Skip to content

Commit fcbdd8d

Browse files
committed
chore: Update lock file, was getting stuck and causing build issues for folks locally
1 parent 95aad4b commit fcbdd8d

File tree

3 files changed

+1954
-1912
lines changed

3 files changed

+1954
-1912
lines changed

pyproject.toml

Lines changed: 37 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,7 @@ requires = [
66
"ninja>=1.11.0",
77
"pyyaml>=6.0",
88
"cffi>=1.15.1",
9-
"torch>=2.10.0.dev,<2.11.0; platform_machine != 'aarch64' or (platform_machine == 'aarch64' and 'tegra' not in platform_release)",
10-
"torch>=2.8.0,<2.9.0; platform_machine == 'aarch64' and 'tegra' in platform_release",
9+
"torch>=2.10.0.dev,<2.11.0",
1110
"pybind11==2.6.2",
1211
]
1312
build-backend = "setuptools.build_meta"
@@ -33,7 +32,7 @@ classifiers = [
3332
"Topic :: Software Development :: Libraries",
3433
]
3534
readme = { file = "README.md", content-type = "text/markdown" }
36-
requires-python = ">=3.9"
35+
requires-python = ">=3.10"
3736
keywords = [
3837
"pytorch",
3938
"torch",
@@ -100,12 +99,10 @@ index-strategy = "unsafe-best-match"
10099

101100
[tool.uv.sources]
102101
torch = [
103-
{ index = "pytorch-nightly-cu130", marker = "platform_machine != 'aarch64' or (platform_machine == 'aarch64' and 'tegra' not in platform_release)" },
104-
{ index = "jetson-containers", marker = "platform_machine == 'aarch64' and 'tegra' in platform_release" },
102+
{ index = "pytorch-nightly-cu130" },
105103
]
106104
torchvision = [
107-
{ index = "pytorch-nightly-cu130", marker = "platform_machine != 'aarch64' or (platform_machine == 'aarch64' and 'tegra' not in platform_release)" },
108-
{ index = "jetson-containers", marker = "platform_machine == 'aarch64' and 'tegra' in platform_release" },
105+
{ index = "pytorch-nightly-cu130" },
109106
]
110107

111108
[[tool.uv.index]]
@@ -114,50 +111,55 @@ url = "https://download.pytorch.org/whl/nightly/cu130"
114111
explicit = false
115112

116113
[[tool.uv.index]]
117-
name = "pytorch-nightly-cu129"
118-
url = "https://download.pytorch.org/whl/nightly/cu129"
114+
name = "pytorch-nightly-cu128"
115+
url = "https://download.pytorch.org/whl/nightly/cu128"
119116
explicit = false
120117

121118
[[tool.uv.index]]
122-
name = "jetson-containers"
123-
url = "https://pypi.jetson-ai-lab.io/jp6/cu126"
119+
name = "pytorch-nightly-cu126"
120+
url = "https://download.pytorch.org/whl/nightly/cu126"
124121
explicit = false
125122

126123
[[tool.uv.index]]
127-
name = "nvidia"
128-
url = "https://pypi.nvidia.com"
124+
name = "pytorch-test-cu130"
125+
url = "https://download.pytorch.org/whl/test/cu130"
129126
explicit = false
130127

131-
# [[tool.uv.index]]
132-
# name = "pytorch-nightly-cu124"
133-
# url = "https://download.pytorch.org/whl/nightly/cu124"
134-
# explicit = true
128+
[[tool.uv.index]]
129+
name = "pytorch-test-cu128"
130+
url = "https://download.pytorch.org/whl/test/cu128"
131+
explicit = false
135132

136-
# [[tool.uv.index]]
137-
# name = "pytorch-nightly-cu118"
138-
# url = "https://download.pytorch.org/whl/nightly/cu118"
139-
# explicit = true
133+
[[tool.uv.index]]
134+
name = "pytorch-test-cu126"
135+
url = "https://download.pytorch.org/whl/test/cu126"
136+
explicit = false
140137

141-
# [[tool.uv.index]]
142-
# name = "pytorch-test-cu124"
143-
# url = "https://download.pytorch.org/whl/test/cu124"
144-
# explicit = false
138+
[[tool.uv.index]]
139+
name = "pytorch-release-cu130"
140+
url = "https://download.pytorch.org/whl/release/cu130"
141+
explicit = false
145142

146-
# [[tool.uv.index]]
147-
# name = "pytorch-test-cu118"
148-
# url = "https://download.pytorch.org/whl/test/cu118"
149-
# explicit = false
143+
[[tool.uv.index]]
144+
name = "pytorch-release-cu128"
145+
url = "https://download.pytorch.org/whl/release/cu128"
146+
explicit = false
150147

151-
# [[tool.uv.index]]
152-
# name = "pytorch-release-cu124"
153-
# url = "https://download.pytorch.org/whl/cu124"
154-
# explicit = false
148+
[[tool.uv.index]]
149+
name = "pytorch-release-cu126"
150+
url = "https://download.pytorch.org/whl/release/cu126"
151+
explicit = false
155152

156153
# [[tool.uv.index]]
157-
# name = "pytorch-release-cu118"
158-
# url = "https://download.pytorch.org/whl/cu118"
154+
# name = "jetson-containers"
155+
# url = "https://pypi.jetson-ai-lab.io/jp6/cu126"
159156
# explicit = false
160157

158+
[[tool.uv.index]]
159+
name = "nvidia"
160+
url = "https://pypi.nvidia.com"
161+
explicit = false
162+
161163

162164
[tool.ruff]
163165
# NOTE: Synchoronize the ignores with .flake8

setup.py

Lines changed: 68 additions & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -725,71 +725,85 @@ def run(self):
725725
with open(os.path.join(get_root_dir(), "README.md"), "r", encoding="utf-8") as fh:
726726
long_description = fh.read()
727727

728-
base_requirements = [
729-
"packaging>=23",
730-
"typing-extensions>=4.7.0",
731-
"dllist",
732-
"psutil",
733-
# dummy package as a WAR for the tensorrt dependency on nvidia-cuda-runtime-cu13
734-
"nvidia-cuda-runtime-cu13==0.0.0a0",
735-
]
736728

729+
def get_jetpack_requirements(base_requirements):
730+
requirements = base_requirements + ["numpy<2.0.0"]
731+
if IS_DLFW_CI:
732+
return requirements
733+
else:
734+
return requirements + ["torch>=2.8.0,<2.9.0", "tensorrt>=10.3.0,<10.4.0"]
737735

738-
def get_requirements():
739-
if IS_JETPACK:
740-
requirements = get_jetpack_requirements()
741-
elif IS_SBSA:
742-
requirements = get_sbsa_requirements()
736+
737+
def get_sbsa_requirements(base_requirements):
738+
requirements = base_requirements + ["numpy"]
739+
if IS_DLFW_CI:
740+
return requirements
743741
else:
744-
# standard linux and windows requirements
745-
requirements = base_requirements + ["numpy"]
746-
if not IS_DLFW_CI:
747-
requirements = requirements + ["torch>=2.10.0.dev,<2.11.0"]
748-
if USE_TRT_RTX:
742+
# TensorRT does not currently build wheels for Tegra, so we need to use the local tensorrt install from the tarball for thor
743+
# also due to we use sbsa torch_tensorrt wheel for thor, so when we build sbsa wheel, we need to only include tensorrt dependency.
744+
return requirements + [
745+
"torch>=2.10.0.dev,<2.11.0",
746+
"tensorrt>=10.14.1,<10.15.0",
747+
]
748+
749+
750+
def get_x86_64_requirements(base_requirements):
751+
requirements = base_requirements + ["numpy"]
752+
753+
if IS_DLFW_CI:
754+
return requirements
755+
else:
756+
requirements = requirements + ["torch>=2.10.0.dev,<2.11.0"]
757+
if USE_TRT_RTX:
758+
return requirements + [
759+
"tensorrt_rtx>=1.2.0.54",
760+
]
761+
else:
762+
requirements = requirements + [
763+
"tensorrt>=10.14.1,<10.15.0",
764+
]
765+
cuda_version = torch.version.cuda
766+
if cuda_version.startswith("12"):
767+
# directly use tensorrt>=10.14.1,<10.15.0 in cu12* env, it will pull both tensorrt_cu12 and tensorrt_cu13
768+
# which will cause the conflict due to cuda-toolkit 13 is also pulled in, so we need to specify tensorrt_cu12 here
769+
tensorrt_prefix = "tensorrt-cu12"
770+
requirements = requirements + [
771+
f"{tensorrt_prefix}>=10.14.1,<10.15.0",
772+
f"{tensorrt_prefix}-bindings>=10.14.1,<10.15.0",
773+
f"{tensorrt_prefix}-libs>=10.14.1,<10.15.0",
774+
]
775+
elif cuda_version.startswith("13"):
776+
tensorrt_prefix = "tensorrt-cu13"
749777
requirements = requirements + [
750-
"tensorrt_rtx>=1.2.0.54",
778+
f"{tensorrt_prefix}>=10.14.1,<10.15.0,!=10.14.1.48",
779+
f"{tensorrt_prefix}-bindings>=10.14.1,<10.15.0,!=10.14.1.48",
780+
f"{tensorrt_prefix}-libs>=10.14.1,<10.15.0,!=10.14.1.48",
751781
]
752782
else:
753-
cuda_version = torch.version.cuda
754-
if cuda_version.startswith("12"):
755-
# directly use tensorrt>=10.14.1,<10.15.0 in cu12* env, it will pull both tensorrt_cu12 and tensorrt_cu13
756-
# which will cause the conflict due to cuda-toolkit 13 is also pulled in, so we need to specify tensorrt_cu12 here
757-
tensorrt_prefix = "tensorrt-cu12"
758-
requirements = requirements + [
759-
f"{tensorrt_prefix}>=10.14.1,<10.15.0",
760-
f"{tensorrt_prefix}-bindings>=10.14.1,<10.15.0",
761-
f"{tensorrt_prefix}-libs>=10.14.1,<10.15.0",
762-
]
763-
elif cuda_version.startswith("13"):
764-
tensorrt_prefix = "tensorrt-cu13"
765-
requirements = requirements + [
766-
f"{tensorrt_prefix}>=10.14.1,<10.15.0,!=10.14.1.48",
767-
f"{tensorrt_prefix}-bindings>=10.14.1,<10.15.0,!=10.14.1.48",
768-
f"{tensorrt_prefix}-libs>=10.14.1,<10.15.0,!=10.14.1.48",
769-
]
770-
else:
771-
raise ValueError(f"Unsupported CUDA version: {cuda_version}")
772-
return requirements
783+
raise ValueError(f"Unsupported CUDA version: {cuda_version}")
773784

774-
775-
def get_jetpack_requirements():
776-
jetpack_requirements = base_requirements + ["numpy<2.0.0"]
777-
if IS_DLFW_CI:
778-
return jetpack_requirements
779-
return jetpack_requirements + ["torch>=2.8.0,<2.9.0", "tensorrt>=10.3.0,<10.4.0"]
785+
return requirements
780786

781787

782-
def get_sbsa_requirements():
783-
sbsa_requirements = base_requirements + ["numpy"]
784-
if IS_DLFW_CI:
785-
return sbsa_requirements
786-
# TensorRT does not currently build wheels for Tegra, so we need to use the local tensorrt install from the tarball for thor
787-
# also due to we use sbsa torch_tensorrt wheel for thor, so when we build sbsa wheel, we need to only include tensorrt dependency.
788-
return sbsa_requirements + [
789-
"torch>=2.10.0.dev,<2.11.0",
790-
"tensorrt>=10.14.1,<10.15.0",
788+
def get_requirements():
789+
base_requirements = [
790+
"packaging>=23",
791+
"typing-extensions>=4.7.0",
792+
"dllist",
793+
"psutil",
794+
# dummy package as a WAR for the tensorrt dependency on nvidia-cuda-runtime-cu13
795+
"nvidia-cuda-runtime-cu13==0.0.0a0",
791796
]
792797

798+
if IS_JETPACK:
799+
requirements = get_jetpack_requirements(base_requirements)
800+
elif IS_SBSA:
801+
requirements = get_sbsa_requirements(base_requirements)
802+
else:
803+
# standard linux and windows requirements
804+
requirements = get_x86_64_requirements(base_requirements)
805+
return requirements
806+
793807

794808
setup(
795809
name="torch_tensorrt",

0 commit comments

Comments
 (0)