@@ -725,71 +725,85 @@ def run(self):
725725with open (os .path .join (get_root_dir (), "README.md" ), "r" , encoding = "utf-8" ) as fh :
726726 long_description = fh .read ()
727727
728- base_requirements = [
729- "packaging>=23" ,
730- "typing-extensions>=4.7.0" ,
731- "dllist" ,
732- "psutil" ,
733- # dummy package as a WAR for the tensorrt dependency on nvidia-cuda-runtime-cu13
734- "nvidia-cuda-runtime-cu13==0.0.0a0" ,
735- ]
736728
729+ def get_jetpack_requirements (base_requirements ):
730+ requirements = base_requirements + ["numpy<2.0.0" ]
731+ if IS_DLFW_CI :
732+ return requirements
733+ else :
734+ return requirements + ["torch>=2.8.0,<2.9.0" , "tensorrt>=10.3.0,<10.4.0" ]
737735
738- def get_requirements ():
739- if IS_JETPACK :
740- requirements = get_jetpack_requirements ()
741- elif IS_SBSA :
742- requirements = get_sbsa_requirements ()
736+
737+ def get_sbsa_requirements ( base_requirements ) :
738+ requirements = base_requirements + [ "numpy" ]
739+ if IS_DLFW_CI :
740+ return requirements
743741 else :
744- # standard linux and windows requirements
745- requirements = base_requirements + ["numpy" ]
746- if not IS_DLFW_CI :
747- requirements = requirements + ["torch>=2.10.0.dev,<2.11.0" ]
748- if USE_TRT_RTX :
742+ # TensorRT does not currently build wheels for Tegra, so we need to use the local tensorrt install from the tarball for thor
743+ # also due to we use sbsa torch_tensorrt wheel for thor, so when we build sbsa wheel, we need to only include tensorrt dependency.
744+ return requirements + [
745+ "torch>=2.10.0.dev,<2.11.0" ,
746+ "tensorrt>=10.14.1,<10.15.0" ,
747+ ]
748+
749+
750+ def get_x86_64_requirements (base_requirements ):
751+ requirements = base_requirements + ["numpy" ]
752+
753+ if IS_DLFW_CI :
754+ return requirements
755+ else :
756+ requirements = requirements + ["torch>=2.10.0.dev,<2.11.0" ]
757+ if USE_TRT_RTX :
758+ return requirements + [
759+ "tensorrt_rtx>=1.2.0.54" ,
760+ ]
761+ else :
762+ requirements = requirements + [
763+ "tensorrt>=10.14.1,<10.15.0" ,
764+ ]
765+ cuda_version = torch .version .cuda
766+ if cuda_version .startswith ("12" ):
767+ # directly use tensorrt>=10.14.1,<10.15.0 in cu12* env, it will pull both tensorrt_cu12 and tensorrt_cu13
768+ # which will cause the conflict due to cuda-toolkit 13 is also pulled in, so we need to specify tensorrt_cu12 here
769+ tensorrt_prefix = "tensorrt-cu12"
770+ requirements = requirements + [
771+ f"{ tensorrt_prefix } >=10.14.1,<10.15.0" ,
772+ f"{ tensorrt_prefix } -bindings>=10.14.1,<10.15.0" ,
773+ f"{ tensorrt_prefix } -libs>=10.14.1,<10.15.0" ,
774+ ]
775+ elif cuda_version .startswith ("13" ):
776+ tensorrt_prefix = "tensorrt-cu13"
749777 requirements = requirements + [
750- "tensorrt_rtx>=1.2.0.54" ,
778+ f"{ tensorrt_prefix } >=10.14.1,<10.15.0,!=10.14.1.48" ,
779+ f"{ tensorrt_prefix } -bindings>=10.14.1,<10.15.0,!=10.14.1.48" ,
780+ f"{ tensorrt_prefix } -libs>=10.14.1,<10.15.0,!=10.14.1.48" ,
751781 ]
752782 else :
753- cuda_version = torch .version .cuda
754- if cuda_version .startswith ("12" ):
755- # directly use tensorrt>=10.14.1,<10.15.0 in cu12* env, it will pull both tensorrt_cu12 and tensorrt_cu13
756- # which will cause the conflict due to cuda-toolkit 13 is also pulled in, so we need to specify tensorrt_cu12 here
757- tensorrt_prefix = "tensorrt-cu12"
758- requirements = requirements + [
759- f"{ tensorrt_prefix } >=10.14.1,<10.15.0" ,
760- f"{ tensorrt_prefix } -bindings>=10.14.1,<10.15.0" ,
761- f"{ tensorrt_prefix } -libs>=10.14.1,<10.15.0" ,
762- ]
763- elif cuda_version .startswith ("13" ):
764- tensorrt_prefix = "tensorrt-cu13"
765- requirements = requirements + [
766- f"{ tensorrt_prefix } >=10.14.1,<10.15.0,!=10.14.1.48" ,
767- f"{ tensorrt_prefix } -bindings>=10.14.1,<10.15.0,!=10.14.1.48" ,
768- f"{ tensorrt_prefix } -libs>=10.14.1,<10.15.0,!=10.14.1.48" ,
769- ]
770- else :
771- raise ValueError (f"Unsupported CUDA version: { cuda_version } " )
772- return requirements
783+ raise ValueError (f"Unsupported CUDA version: { cuda_version } " )
773784
774-
775- def get_jetpack_requirements ():
776- jetpack_requirements = base_requirements + ["numpy<2.0.0" ]
777- if IS_DLFW_CI :
778- return jetpack_requirements
779- return jetpack_requirements + ["torch>=2.8.0,<2.9.0" , "tensorrt>=10.3.0,<10.4.0" ]
785+ return requirements
780786
781787
782- def get_sbsa_requirements ():
783- sbsa_requirements = base_requirements + ["numpy" ]
784- if IS_DLFW_CI :
785- return sbsa_requirements
786- # TensorRT does not currently build wheels for Tegra, so we need to use the local tensorrt install from the tarball for thor
787- # also due to we use sbsa torch_tensorrt wheel for thor, so when we build sbsa wheel, we need to only include tensorrt dependency.
788- return sbsa_requirements + [
789- "torch>=2.10.0.dev,<2.11.0" ,
790- "tensorrt>=10.14.1,<10.15.0" ,
788+ def get_requirements ():
789+ base_requirements = [
790+ "packaging>=23" ,
791+ "typing-extensions>=4.7.0" ,
792+ "dllist" ,
793+ "psutil" ,
794+ # dummy package as a WAR for the tensorrt dependency on nvidia-cuda-runtime-cu13
795+ "nvidia-cuda-runtime-cu13==0.0.0a0" ,
791796 ]
792797
798+ if IS_JETPACK :
799+ requirements = get_jetpack_requirements (base_requirements )
800+ elif IS_SBSA :
801+ requirements = get_sbsa_requirements (base_requirements )
802+ else :
803+ # standard linux and windows requirements
804+ requirements = get_x86_64_requirements (base_requirements )
805+ return requirements
806+
793807
794808setup (
795809 name = "torch_tensorrt" ,
0 commit comments