From 8f027788a004b493c7f5cf5219cc633331d733f7 Mon Sep 17 00:00:00 2001 From: lanluo-nvidia Date: Sat, 22 Nov 2025 20:20:19 -0800 Subject: [PATCH 1/6] change trt_ep_path so that parallel test execution won't mix up --- tests/py/dynamo/models/test_export_serde.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/py/dynamo/models/test_export_serde.py b/tests/py/dynamo/models/test_export_serde.py index c5b007e34b..1d58f4fb3f 100644 --- a/tests/py/dynamo/models/test_export_serde.py +++ b/tests/py/dynamo/models/test_export_serde.py @@ -141,6 +141,8 @@ def test_no_compile(ir): This tests export serde functionality on a model which won't convert to TRT because of min_block_size=5 constraint """ + tmp_dir = tempfile.mkdtemp(prefix="test_no_compile") + trt_ep_path = os.path.join(tmp_dir, "trt.ep") class MyModule(torch.nn.Module): def __init__(self): From 2fddc94ea21105fcf00e57c3e81ed8fa55e1c645 Mon Sep 17 00:00:00 2001 From: lanluo-nvidia Date: Sat, 22 Nov 2025 19:34:52 -0800 Subject: [PATCH 2/6] change trt_ep_path so that parallel test execution won't mix up --- .../dynamo/models/test_export_kwargs_serde.py | 15 ++++++--- tests/py/dynamo/models/test_export_serde.py | 31 +++++++++++++++++-- tests/py/dynamo/models/test_model_refit.py | 6 ++-- tests/py/dynamo/models/test_reexport.py | 29 +++++++++++++++-- 4 files changed, 68 insertions(+), 13 deletions(-) diff --git a/tests/py/dynamo/models/test_export_kwargs_serde.py b/tests/py/dynamo/models/test_export_kwargs_serde.py index dabbad3cc8..3efe498223 100644 --- a/tests/py/dynamo/models/test_export_kwargs_serde.py +++ b/tests/py/dynamo/models/test_export_kwargs_serde.py @@ -75,7 +75,8 @@ def forward(self, x, b=5, c=None, d=None): ) # Save the module - trt_ep_path = os.path.join(tempfile.gettempdir(), "compiled.ep") + tmp_dir = tempfile.mkdtemp(prefix="test_custom_model") + trt_ep_path = os.path.join(tmp_dir, "compiled.ep") torchtrt.save(trt_gm, trt_ep_path, retrace=False) # Clean up model env torch._dynamo.reset() @@ -137,7 +138,8 @@ def forward(self, x, b=5, c=None, d=None): ) # Save the module - trt_ep_path = os.path.join(tempfile.gettempdir(), "compiled.ep") + tmp_dir = tempfile.mkdtemp(prefix="test_custom_model_with_dynamo_trace") + trt_ep_path = os.path.join(tmp_dir, "compiled.ep") torchtrt.save(trt_gm, trt_ep_path, retrace=False) # Clean up model env torch._dynamo.reset() @@ -208,7 +210,8 @@ def forward(self, x, b=5, c=None, d=None): ) # Save the module - trt_ep_path = os.path.join(tempfile.gettempdir(), "compiled.ep") + tmp_dir = tempfile.mkdtemp(prefix="test_custom_model_with_dynamo_trace_dynamic") + trt_ep_path = os.path.join(tmp_dir, "compiled.ep") torchtrt.save(trt_gm, trt_ep_path, retrace=False) # Clean up model env torch._dynamo.reset() @@ -298,7 +301,8 @@ def forward(self, x, b=None, c=None, d=None, e=[]): msg=f"CustomKwargs Module TRT outputs don't match with the original model. Cosine sim score: {cos_sim} Threshold: {COSINE_THRESHOLD}", ) # Save the module - trt_ep_path = os.path.join(tempfile.gettempdir(), "compiled.ep") + tmp_dir = tempfile.mkdtemp(prefix="test_custom_model_with_dynamo_trace_kwarg_dynamic") + trt_ep_path = os.path.join(tmp_dir, "compiled.ep") torchtrt.save(trt_gm, trt_ep_path, retrace=False) # Clean up model env torch._dynamo.reset() @@ -388,7 +392,8 @@ def forward(self, x, b=None, c=None, d=None, e=[]): msg=f"CustomKwargs Module TRT outputs don't match with the original model. Cosine sim score: {cos_sim} Threshold: {COSINE_THRESHOLD}", ) # Save the module - trt_ep_path = os.path.join(tempfile.gettempdir(), "compiled.ep") + tmp_dir = tempfile.mkdtemp(prefix="test_custom_model_with_dynamo_trace_kwarg_dynamic") + trt_ep_path = os.path.join(tmp_dir, "compiled.ep") torchtrt.save(trt_gm, trt_ep_path, retrace=False) # Clean up model env torch._dynamo.reset() diff --git a/tests/py/dynamo/models/test_export_serde.py b/tests/py/dynamo/models/test_export_serde.py index 1d58f4fb3f..1979fca298 100644 --- a/tests/py/dynamo/models/test_export_serde.py +++ b/tests/py/dynamo/models/test_export_serde.py @@ -17,9 +17,6 @@ if importlib.util.find_spec("torchvision"): import torchvision.models as models -trt_ep_path = os.path.join(tempfile.gettempdir(), "trt.ep") - - @pytest.mark.unit @pytest.mark.critical def test_base_full_compile(ir): @@ -27,6 +24,8 @@ def test_base_full_compile(ir): This tests export serde functionality on a base model which is fully TRT convertible """ + tmp_dir = tempfile.mkdtemp(prefix="test_base_full_compile") + trt_ep_path = os.path.join(tmp_dir, "trt.ep") class MyModule(torch.nn.Module): def __init__(self): @@ -82,6 +81,9 @@ def test_base_full_compile_multiple_outputs(ir): with multiple outputs which is fully TRT convertible """ + tmp_dir = tempfile.mkdtemp(prefix="test_base_full_compile_multiple_outputs") + trt_ep_path = os.path.join(tmp_dir, "trt.ep") + class MyModule(torch.nn.Module): def __init__(self): super().__init__() @@ -204,6 +206,9 @@ def test_hybrid_relu_fallback(ir): fallback """ + tmp_dir = tempfile.mkdtemp(prefix="test_hybrid_relu_fallback") + trt_ep_path = os.path.join(tmp_dir, "trt.ep") + class MyModule(torch.nn.Module): def __init__(self): super().__init__() @@ -264,6 +269,9 @@ def test_resnet18(ir): """ This tests export save and load functionality on Resnet18 model """ + tmp_dir = tempfile.mkdtemp(prefix="test_resnet18") + trt_ep_path = os.path.join(tmp_dir, "trt.ep") + model = models.resnet18().eval().cuda() input = torch.randn((1, 3, 224, 224)).to("cuda") @@ -309,6 +317,9 @@ def test_resnet18_cpu_offload(ir): """ This tests export save and load functionality on Resnet18 model """ + tmp_dir = tempfile.mkdtemp(prefix="test_resnet18_cpu_offload") + trt_ep_path = os.path.join(tmp_dir, "trt.ep") + model = models.resnet18().eval().cuda() input = torch.randn((1, 3, 224, 224)).to("cuda") @@ -361,6 +372,9 @@ def test_resnet18_dynamic(ir): """ This tests export save and load functionality on Resnet18 model """ + tmp_dir = tempfile.mkdtemp(prefix="test_resnet18_dynamic") + trt_ep_path = os.path.join(tmp_dir, "trt.ep") + model = models.resnet18().eval().cuda() input = torch.randn((1, 3, 224, 224)).to("cuda") @@ -401,6 +415,9 @@ def test_resnet18_torch_exec_ops_serde(ir): """ This tests export save and load functionality on Resnet18 model """ + tmp_dir = tempfile.mkdtemp(prefix="test_resnet18_torch_exec_ops_serde") + trt_ep_path = os.path.join(tmp_dir, "trt.ep") + model = models.resnet18().eval().cuda() input = torch.randn((1, 3, 224, 224)).to("cuda") @@ -434,6 +451,9 @@ def test_hybrid_conv_fallback(ir): model where a conv (a weighted layer) has been forced to fallback to Pytorch. """ + tmp_dir = tempfile.mkdtemp(prefix="test_hybrid_conv_fallback") + trt_ep_path = os.path.join(tmp_dir, "trt.ep") + class MyModule(torch.nn.Module): def __init__(self): super().__init__() @@ -495,6 +515,9 @@ def test_hybrid_conv_fallback_cpu_offload(ir): model where a conv (a weighted layer) has been forced to fallback to Pytorch. """ + tmp_dir = tempfile.mkdtemp(prefix="test_hybrid_conv_fallback_cpu_offload") + trt_ep_path = os.path.join(tmp_dir, "trt.ep") + class MyModule(torch.nn.Module): def __init__(self): super().__init__() @@ -557,6 +580,8 @@ def test_arange_export(ir): Here the arange output is a static constant (which is registered as input to the graph) in the exporter. """ + tmp_dir = tempfile.mkdtemp(prefix="test_arange_export") + trt_ep_path = os.path.join(tmp_dir, "trt.ep") class MyModule(torch.nn.Module): def __init__(self): diff --git a/tests/py/dynamo/models/test_model_refit.py b/tests/py/dynamo/models/test_model_refit.py index e6b7f6e2a4..fe6d4c2ce5 100644 --- a/tests/py/dynamo/models/test_model_refit.py +++ b/tests/py/dynamo/models/test_model_refit.py @@ -532,7 +532,8 @@ def test_refit_one_engine_bert_with_weightmap(): ) @pytest.mark.unit def test_refit_one_engine_inline_runtime_with_weightmap(): - trt_ep_path = os.path.join(tempfile.gettempdir(), "compiled.ep") + tmp_dir = tempfile.mkdtemp(prefix="test_refit_one_engine_inline_runtime_with_weightmap") + trt_ep_path = os.path.join(tmp_dir, "compiled.ep") model = models.resnet18(pretrained=False).eval().to("cuda") model2 = models.resnet18(pretrained=True).eval().to("cuda") inputs = [torch.randn((1, 3, 224, 224)).to("cuda")] @@ -889,7 +890,8 @@ def test_refit_one_engine_bert_without_weightmap(): ) @pytest.mark.unit def test_refit_one_engine_inline_runtime_without_weightmap(): - trt_ep_path = os.path.join(tempfile.gettempdir(), "compiled.ep") + tmp_dir = tempfile.mkdtemp(prefix="test_refit_one_engine_inline_runtime_without_weightmap") + trt_ep_path = os.path.join(tmp_dir, "compiled.ep") model = models.resnet18(pretrained=True).eval().to("cuda") model2 = models.resnet18(pretrained=False).eval().to("cuda") inputs = [torch.randn((1, 3, 224, 224)).to("cuda")] diff --git a/tests/py/dynamo/models/test_reexport.py b/tests/py/dynamo/models/test_reexport.py index 7c414def52..e8149dbe2e 100644 --- a/tests/py/dynamo/models/test_reexport.py +++ b/tests/py/dynamo/models/test_reexport.py @@ -14,8 +14,6 @@ if importlib.util.find_spec("torchvision"): import torchvision.models as models -trt_ep_path = os.path.join(tempfile.gettempdir(), "trt.ep") - @pytest.mark.unit @pytest.mark.critical @@ -56,6 +54,9 @@ def forward(self, x): # Reexport trt_exp_program = torch.export.export(trt_module, (input,), strict=False) + tmp_dir = tempfile.mkdtemp(prefix="test_base_full_compile") + trt_ep_path = os.path.join(tmp_dir, "trt.ep") + torch.export.save(trt_exp_program, trt_ep_path) deser_trt_module = torchtrt.load(trt_ep_path).module() @@ -82,6 +83,9 @@ def test_base_full_compile_multiple_outputs(ir): with multiple outputs which is fully TRT convertible """ + tmp_dir = tempfile.mkdtemp(prefix="test_base_full_compile_multiple_outputs") + trt_ep_path = os.path.join(tmp_dir, "trt.ep") + class MyModule(torch.nn.Module): def __init__(self): super().__init__() @@ -156,7 +160,8 @@ def forward(self, x): conv = conv * 0.5 relu = self.relu(conv) return conv, relu - + tmp_dir = tempfile.mkdtemp(prefix="test_no_compile") + trt_ep_path = os.path.join(tmp_dir, "trt.ep") model = MyModule().eval().cuda() input = torch.randn((1, 3, 224, 224)).to("cuda") @@ -207,6 +212,9 @@ def test_hybrid_relu_fallback(ir): fallback """ + tmp_dir = tempfile.mkdtemp(prefix="test_hybrid_relu_fallback") + trt_ep_path = os.path.join(tmp_dir, "trt.ep") + class MyModule(torch.nn.Module): def __init__(self): super().__init__() @@ -270,6 +278,9 @@ def test_resnet18(ir): """ This tests export save and load functionality on Resnet18 model """ + tmp_dir = tempfile.mkdtemp(prefix="test_resnet18") + trt_ep_path = os.path.join(tmp_dir, "trt.ep") + model = models.resnet18().eval().cuda() input = torch.randn((1, 3, 224, 224)).to("cuda") @@ -315,6 +326,8 @@ def test_hybrid_conv_fallback(ir): This tests export save and load functionality on a hybrid model where a conv (a weighted layer) has been forced to fallback to Pytorch. """ + tmp_dir = tempfile.mkdtemp(prefix="test_hybrid_conv_fallback") + trt_ep_path = os.path.join(tmp_dir, "trt.ep") class MyModule(torch.nn.Module): def __init__(self): @@ -378,6 +391,8 @@ def test_arange_export(ir): Here the arange output is a static constant (which is registered as input to the graph) in the exporter. """ + tmp_dir = tempfile.mkdtemp(prefix="test_arange_export") + trt_ep_path = os.path.join(tmp_dir, "trt.ep") class MyModule(torch.nn.Module): def __init__(self): @@ -440,6 +455,8 @@ def test_resnet18_dynamic(ir): """ This tests export save and load functionality on Resnet18 model with dynamic shapes """ + tmp_dir = tempfile.mkdtemp(prefix="test_resnet18_dynamic") + trt_ep_path = os.path.join(tmp_dir, "trt.ep") model = models.resnet18().eval().cuda() input_bs2 = torch.randn((2, 3, 224, 224)).to("cuda") @@ -514,6 +531,8 @@ def test_resnet18_dynamic_fallback(ir): """ This tests export save and load functionality on Resnet18 model with dynamic shapes and fallback """ + tmp_dir = tempfile.mkdtemp(prefix="test_resnet18_dynamic_fallback") + trt_ep_path = os.path.join(tmp_dir, "trt.ep") model = models.resnet18().eval().cuda() input_bs2 = torch.randn((2, 3, 224, 224)).to("cuda") @@ -588,6 +607,8 @@ def test_bitwise_and_dynamic_fallback(ir): """ This tests export save and load functionality on a bitwise_and model with dynamic shapes and fallback """ + tmp_dir = tempfile.mkdtemp(prefix="test_bitwise_and_dynamic_fallback") + trt_ep_path = os.path.join(tmp_dir, "trt.ep") class bitwise_and(torch.nn.Module): def forward(self, lhs_val, rhs_val): @@ -668,6 +689,8 @@ def test_random_dynamic_fallback(ir): """ This tests export save and load functionality on a random model with dynamic shapes and fallback """ + tmp_dir = tempfile.mkdtemp(prefix="test_random_dynamic_fallback") + trt_ep_path = os.path.join(tmp_dir, "trt.ep") class NeuralNetwork(nn.Module): def __init__(self): From f66d951f7e5225a33f968e9669ed7ec86169d174 Mon Sep 17 00:00:00 2001 From: lanluo-nvidia Date: Sat, 22 Nov 2025 19:04:36 -0800 Subject: [PATCH 3/6] restore nonezero test for thor --- py/torch_tensorrt/dynamo/conversion/aten_ops_converters.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/py/torch_tensorrt/dynamo/conversion/aten_ops_converters.py b/py/torch_tensorrt/dynamo/conversion/aten_ops_converters.py index 9ee84643c9..2acebea673 100644 --- a/py/torch_tensorrt/dynamo/conversion/aten_ops_converters.py +++ b/py/torch_tensorrt/dynamo/conversion/aten_ops_converters.py @@ -429,7 +429,7 @@ def index_nonbool_validator( node: Node, settings: Optional[CompilationSettings] = None ) -> bool: # for thor and tensorrt_rtx, we don't support boolean indices, due to nonzero op not supported - if is_thor() or ENABLED_FEATURES.tensorrt_rtx: + if ENABLED_FEATURES.tensorrt_rtx: index = node.args[1] for ind in index: if ind is not None: From c35de076858c8fcda76e82a197370ec65b26a7cd Mon Sep 17 00:00:00 2001 From: lanluo-nvidia Date: Sat, 22 Nov 2025 18:14:23 -0800 Subject: [PATCH 4/6] restore nonezero test for thor --- .../py/dynamo/conversion/test_arange_aten.py | 6 ++--- .../py/dynamo/conversion/test_cumsum_aten.py | 6 ++--- tests/py/dynamo/conversion/test_index_aten.py | 22 ++++++++----------- .../py/dynamo/conversion/test_nonzero_aten.py | 6 ++--- tests/py/dynamo/conversion/test_sym_size.py | 5 ----- .../dynamo/runtime/test_output_allocator.py | 10 ++++----- 6 files changed, 23 insertions(+), 32 deletions(-) diff --git a/tests/py/dynamo/conversion/test_arange_aten.py b/tests/py/dynamo/conversion/test_arange_aten.py index 968611a4ec..d7b5298be5 100644 --- a/tests/py/dynamo/conversion/test_arange_aten.py +++ b/tests/py/dynamo/conversion/test_arange_aten.py @@ -5,14 +5,14 @@ import torch_tensorrt from parameterized import parameterized from torch.testing._internal.common_utils import run_tests -from torch_tensorrt._utils import is_tegra_platform, is_thor +from torch_tensorrt._utils import is_tegra_platform from .harness import DispatchTestCase @unittest.skipIf( - is_thor() or is_tegra_platform(), - "Skipped on Thor and Tegra platforms", + is_tegra_platform(), + "Skipped on Tegra platforms", ) class TestArangeConverter(DispatchTestCase): @parameterized.expand( diff --git a/tests/py/dynamo/conversion/test_cumsum_aten.py b/tests/py/dynamo/conversion/test_cumsum_aten.py index a677212cb1..e5e0d7df29 100644 --- a/tests/py/dynamo/conversion/test_cumsum_aten.py +++ b/tests/py/dynamo/conversion/test_cumsum_aten.py @@ -5,14 +5,14 @@ import torch_tensorrt from parameterized import parameterized from torch.testing._internal.common_utils import run_tests -from torch_tensorrt._utils import is_tegra_platform, is_thor +from torch_tensorrt._utils import is_tegra_platform from .harness import DispatchTestCase @unittest.skipIf( - is_thor() or is_tegra_platform(), - "Skipped on Thor and Tegra platforms", + is_tegra_platform(), + "Skipped on Tegra platforms", ) class TestCumsumConverter(DispatchTestCase): @parameterized.expand( diff --git a/tests/py/dynamo/conversion/test_index_aten.py b/tests/py/dynamo/conversion/test_index_aten.py index e34dc48dd5..ee6b61305f 100644 --- a/tests/py/dynamo/conversion/test_index_aten.py +++ b/tests/py/dynamo/conversion/test_index_aten.py @@ -6,7 +6,7 @@ from parameterized import parameterized from torch.testing._internal.common_utils import run_tests from torch_tensorrt import ENABLED_FEATURES, Input -from torch_tensorrt._utils import is_tegra_platform, is_thor +from torch_tensorrt._utils import is_tegra_platform from .harness import DispatchTestCase @@ -114,8 +114,8 @@ def forward(self, input): ] ) @unittest.skipIf( - is_thor() or ENABLED_FEATURES.tensorrt_rtx, - "Skipped on Thor or tensorrt_rtx due to nonzero not supported", + ENABLED_FEATURES.tensorrt_rtx, + "Skipped on tensorrt_rtx due to nonzero not supported", ) def test_index_constant_bool_mask(self, _, index, input): class TestModule(torch.nn.Module): @@ -149,8 +149,8 @@ def forward(self, x, index0): ) @unittest.skipIf( - is_thor() or ENABLED_FEATURES.tensorrt_rtx, - "Skipped on Thor or tensorrt_rtx due to nonzero not supported", + iENABLED_FEATURES.tensorrt_rtx, + "Skipped on tensorrt_rtx due to nonzero not supported", ) def test_index_zero_two_dim_ITensor_mask(self): class TestModule(nn.Module): @@ -163,10 +163,6 @@ def forward(self, x, index0): index0 = torch.tensor([True, False]) self.run_test(TestModule(), [input, index0], enable_passes=True) - @unittest.skipIf( - is_thor(), - "Skipped on Thor due to nonzero not supported", - ) def test_index_zero_index_three_dim_ITensor(self): class TestModule(nn.Module): def forward(self, x, index0): @@ -180,8 +176,8 @@ def forward(self, x, index0): self.run_test(TestModule(), [input, index0]) @unittest.skipIf( - is_thor() or ENABLED_FEATURES.tensorrt_rtx, - "Skipped on Thor or tensorrt_rtx due to nonzero not supported", + ENABLED_FEATURES.tensorrt_rtx, + "Skipped on tensorrt_rtx due to nonzero not supported", ) def test_index_zero_index_three_dim_mask_ITensor(self): class TestModule(nn.Module): @@ -252,8 +248,8 @@ def forward(self, input): @unittest.skipIf( - torch_tensorrt.ENABLED_FEATURES.tensorrt_rtx or is_thor() or is_tegra_platform(), - "nonzero is not supported for tensorrt_rtx", + torch_tensorrt.ENABLED_FEATURES.tensorrt_rtx or is_tegra_platform(), + "nonzero is not supported for tensorrt_rtx or Tegra platforms", ) class TestIndexDynamicInputNonDynamicIndexConverter(DispatchTestCase): def test_index_input_non_dynamic_index_dynamic(self): diff --git a/tests/py/dynamo/conversion/test_nonzero_aten.py b/tests/py/dynamo/conversion/test_nonzero_aten.py index 641cc7c098..646f000a8d 100644 --- a/tests/py/dynamo/conversion/test_nonzero_aten.py +++ b/tests/py/dynamo/conversion/test_nonzero_aten.py @@ -6,14 +6,14 @@ from parameterized import parameterized from torch.testing._internal.common_utils import run_tests from torch_tensorrt import Input -from torch_tensorrt._utils import is_tegra_platform, is_thor +from torch_tensorrt._utils import is_tegra_platform from .harness import DispatchTestCase @unittest.skipIf( - torch_tensorrt.ENABLED_FEATURES.tensorrt_rtx or is_thor() or is_tegra_platform(), - "nonzero is not supported for tensorrt_rtx", + torch_tensorrt.ENABLED_FEATURES.tensorrt_rtx or is_tegra_platform(), + "nonzero is not supported for tensorrt_rtx or Tegra platforms", ) class TestNonZeroConverter(DispatchTestCase): @parameterized.expand( diff --git a/tests/py/dynamo/conversion/test_sym_size.py b/tests/py/dynamo/conversion/test_sym_size.py index 770dd75504..968ecd322b 100644 --- a/tests/py/dynamo/conversion/test_sym_size.py +++ b/tests/py/dynamo/conversion/test_sym_size.py @@ -4,15 +4,10 @@ import torch.nn as nn from parameterized import parameterized from torch.testing._internal.common_utils import run_tests -from torch_tensorrt._utils import is_thor from .harness import DispatchTestCase -@unittest.skipIf( - is_thor(), - "Skipped on Thor", -) class TestSymSizeConverter(DispatchTestCase): @parameterized.expand( [ diff --git a/tests/py/dynamo/runtime/test_output_allocator.py b/tests/py/dynamo/runtime/test_output_allocator.py index ce00135c99..28d8ad3e94 100644 --- a/tests/py/dynamo/runtime/test_output_allocator.py +++ b/tests/py/dynamo/runtime/test_output_allocator.py @@ -5,7 +5,7 @@ import torch_tensorrt from parameterized import parameterized from torch.testing._internal.common_utils import TestCase, run_tests -from torch_tensorrt._utils import is_tegra_platform, is_thor +from torch_tensorrt._utils import is_tegra_platform from ..testing_utilities import DECIMALS_OF_AGREEMENT @@ -45,7 +45,7 @@ def forward(self, input): @unittest.skipIf( - torch_tensorrt.ENABLED_FEATURES.tensorrt_rtx or is_thor() or is_tegra_platform(), + torch_tensorrt.ENABLED_FEATURES.tensorrt_rtx or is_tegra_platform(), "TensorRT RTX does not support nonzero which are required for this test", ) class TestOutputAllocatorStaticModel(TestCase): @@ -158,7 +158,7 @@ def test_combination_of_cg_and_oa(self, _, use_python_runtime): @unittest.skipIf( - torch_tensorrt.ENABLED_FEATURES.tensorrt_rtx or is_thor() or is_tegra_platform(), + torch_tensorrt.ENABLED_FEATURES.tensorrt_rtx or s_tegra_platform(), "TensorRT RTX does not support nonzero which are required for this test", ) class TestOutputAllocatorDDSModel(TestCase): @@ -268,7 +268,7 @@ def test_combination_of_cg_and_oa(self, _, use_python_runtime): @unittest.skipIf( - torch_tensorrt.ENABLED_FEATURES.tensorrt_rtx or is_thor() or is_tegra_platform(), + torch_tensorrt.ENABLED_FEATURES.tensorrt_rtx or is_tegra_platform(), "TensorRT RTX does not support nonzero which are required for this test", ) class TestOutputAllocatorDDSOpWithReductionOpModel(TestCase): @@ -382,7 +382,7 @@ def test_combination_of_cg_and_oa(self, _, use_python_runtime): @unittest.skipIf( - torch_tensorrt.ENABLED_FEATURES.tensorrt_rtx or is_thor() or is_tegra_platform(), + torch_tensorrt.ENABLED_FEATURES.tensorrt_rtx or is_tegra_platform(), "TensorRT RTX does not support nonzero which are required for this test", ) class TestOutputAllocatorDDSModelWithGraphBreak(TestCase): From 1669fd708f5b2a55994453686cd372e0a8b646c6 Mon Sep 17 00:00:00 2001 From: lanluo-nvidia Date: Sat, 22 Nov 2025 20:46:18 -0800 Subject: [PATCH 5/6] test --- tests/py/dynamo/conversion/test_index_aten.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/py/dynamo/conversion/test_index_aten.py b/tests/py/dynamo/conversion/test_index_aten.py index ee6b61305f..fd8c35de53 100644 --- a/tests/py/dynamo/conversion/test_index_aten.py +++ b/tests/py/dynamo/conversion/test_index_aten.py @@ -149,7 +149,7 @@ def forward(self, x, index0): ) @unittest.skipIf( - iENABLED_FEATURES.tensorrt_rtx, + ENABLED_FEATURES.tensorrt_rtx, "Skipped on tensorrt_rtx due to nonzero not supported", ) def test_index_zero_two_dim_ITensor_mask(self): From e553b1c285ecadd02ab0b6b916a86e6c5ba90d94 Mon Sep 17 00:00:00 2001 From: lanluo-nvidia Date: Sat, 22 Nov 2025 21:06:49 -0800 Subject: [PATCH 6/6] lint --- tests/py/dynamo/models/test_export_kwargs_serde.py | 8 ++++++-- tests/py/dynamo/models/test_export_serde.py | 1 + tests/py/dynamo/models/test_model_refit.py | 8 ++++++-- tests/py/dynamo/models/test_reexport.py | 1 + 4 files changed, 14 insertions(+), 4 deletions(-) diff --git a/tests/py/dynamo/models/test_export_kwargs_serde.py b/tests/py/dynamo/models/test_export_kwargs_serde.py index 3efe498223..81121cbccd 100644 --- a/tests/py/dynamo/models/test_export_kwargs_serde.py +++ b/tests/py/dynamo/models/test_export_kwargs_serde.py @@ -301,7 +301,9 @@ def forward(self, x, b=None, c=None, d=None, e=[]): msg=f"CustomKwargs Module TRT outputs don't match with the original model. Cosine sim score: {cos_sim} Threshold: {COSINE_THRESHOLD}", ) # Save the module - tmp_dir = tempfile.mkdtemp(prefix="test_custom_model_with_dynamo_trace_kwarg_dynamic") + tmp_dir = tempfile.mkdtemp( + prefix="test_custom_model_with_dynamo_trace_kwarg_dynamic" + ) trt_ep_path = os.path.join(tmp_dir, "compiled.ep") torchtrt.save(trt_gm, trt_ep_path, retrace=False) # Clean up model env @@ -392,7 +394,9 @@ def forward(self, x, b=None, c=None, d=None, e=[]): msg=f"CustomKwargs Module TRT outputs don't match with the original model. Cosine sim score: {cos_sim} Threshold: {COSINE_THRESHOLD}", ) # Save the module - tmp_dir = tempfile.mkdtemp(prefix="test_custom_model_with_dynamo_trace_kwarg_dynamic") + tmp_dir = tempfile.mkdtemp( + prefix="test_custom_model_with_dynamo_trace_kwarg_dynamic" + ) trt_ep_path = os.path.join(tmp_dir, "compiled.ep") torchtrt.save(trt_gm, trt_ep_path, retrace=False) # Clean up model env diff --git a/tests/py/dynamo/models/test_export_serde.py b/tests/py/dynamo/models/test_export_serde.py index 1979fca298..8c00d1ec3d 100644 --- a/tests/py/dynamo/models/test_export_serde.py +++ b/tests/py/dynamo/models/test_export_serde.py @@ -17,6 +17,7 @@ if importlib.util.find_spec("torchvision"): import torchvision.models as models + @pytest.mark.unit @pytest.mark.critical def test_base_full_compile(ir): diff --git a/tests/py/dynamo/models/test_model_refit.py b/tests/py/dynamo/models/test_model_refit.py index fe6d4c2ce5..c9d4595d43 100644 --- a/tests/py/dynamo/models/test_model_refit.py +++ b/tests/py/dynamo/models/test_model_refit.py @@ -532,7 +532,9 @@ def test_refit_one_engine_bert_with_weightmap(): ) @pytest.mark.unit def test_refit_one_engine_inline_runtime_with_weightmap(): - tmp_dir = tempfile.mkdtemp(prefix="test_refit_one_engine_inline_runtime_with_weightmap") + tmp_dir = tempfile.mkdtemp( + prefix="test_refit_one_engine_inline_runtime_with_weightmap" + ) trt_ep_path = os.path.join(tmp_dir, "compiled.ep") model = models.resnet18(pretrained=False).eval().to("cuda") model2 = models.resnet18(pretrained=True).eval().to("cuda") @@ -890,7 +892,9 @@ def test_refit_one_engine_bert_without_weightmap(): ) @pytest.mark.unit def test_refit_one_engine_inline_runtime_without_weightmap(): - tmp_dir = tempfile.mkdtemp(prefix="test_refit_one_engine_inline_runtime_without_weightmap") + tmp_dir = tempfile.mkdtemp( + prefix="test_refit_one_engine_inline_runtime_without_weightmap" + ) trt_ep_path = os.path.join(tmp_dir, "compiled.ep") model = models.resnet18(pretrained=True).eval().to("cuda") model2 = models.resnet18(pretrained=False).eval().to("cuda") diff --git a/tests/py/dynamo/models/test_reexport.py b/tests/py/dynamo/models/test_reexport.py index e8149dbe2e..0ede5a9226 100644 --- a/tests/py/dynamo/models/test_reexport.py +++ b/tests/py/dynamo/models/test_reexport.py @@ -160,6 +160,7 @@ def forward(self, x): conv = conv * 0.5 relu = self.relu(conv) return conv, relu + tmp_dir = tempfile.mkdtemp(prefix="test_no_compile") trt_ep_path = os.path.join(tmp_dir, "trt.ep") model = MyModule().eval().cuda()