-
Notifications
You must be signed in to change notification settings - Fork 376
thor none_zero is fixed in trt 14, reenable_back #3928
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
There are some changes that do not conform to Python style guidelines:
--- /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/models/test_export_serde.py 2025-11-23 04:44:15.040051+00:00
+++ /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/models/test_export_serde.py 2025-11-23 04:44:58.015912+00:00
@@ -15,10 +15,11 @@
assertions = unittest.TestCase()
if importlib.util.find_spec("torchvision"):
import torchvision.models as models
+
@pytest.mark.unit
@pytest.mark.critical
def test_base_full_compile(ir):
"""
This tests export serde functionality on a base model
--- /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/models/test_export_kwargs_serde.py 2025-11-23 04:44:15.040051+00:00
+++ /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/models/test_export_kwargs_serde.py 2025-11-23 04:44:58.214570+00:00
@@ -299,11 +299,13 @@
assertions.assertTrue(
cos_sim > COSINE_THRESHOLD,
msg=f"CustomKwargs Module TRT outputs don't match with the original model. Cosine sim score: {cos_sim} Threshold: {COSINE_THRESHOLD}",
)
# Save the module
- tmp_dir = tempfile.mkdtemp(prefix="test_custom_model_with_dynamo_trace_kwarg_dynamic")
+ tmp_dir = tempfile.mkdtemp(
+ prefix="test_custom_model_with_dynamo_trace_kwarg_dynamic"
+ )
trt_ep_path = os.path.join(tmp_dir, "compiled.ep")
torchtrt.save(trt_gm, trt_ep_path, retrace=False)
# Clean up model env
torch._dynamo.reset()
@@ -390,11 +392,13 @@
assertions.assertTrue(
cos_sim > COSINE_THRESHOLD,
msg=f"CustomKwargs Module TRT outputs don't match with the original model. Cosine sim score: {cos_sim} Threshold: {COSINE_THRESHOLD}",
)
# Save the module
- tmp_dir = tempfile.mkdtemp(prefix="test_custom_model_with_dynamo_trace_kwarg_dynamic")
+ tmp_dir = tempfile.mkdtemp(
+ prefix="test_custom_model_with_dynamo_trace_kwarg_dynamic"
+ )
trt_ep_path = os.path.join(tmp_dir, "compiled.ep")
torchtrt.save(trt_gm, trt_ep_path, retrace=False)
# Clean up model env
torch._dynamo.reset()
--- /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/models/test_model_refit.py 2025-11-23 04:44:15.040051+00:00
+++ /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/models/test_model_refit.py 2025-11-23 04:44:58.911484+00:00
@@ -530,11 +530,13 @@
not torch_trt.ENABLED_FEATURES.refit,
"Refit feature is not supported in Python 3.13 or higher",
)
@pytest.mark.unit
def test_refit_one_engine_inline_runtime_with_weightmap():
- tmp_dir = tempfile.mkdtemp(prefix="test_refit_one_engine_inline_runtime_with_weightmap")
+ tmp_dir = tempfile.mkdtemp(
+ prefix="test_refit_one_engine_inline_runtime_with_weightmap"
+ )
trt_ep_path = os.path.join(tmp_dir, "compiled.ep")
model = models.resnet18(pretrained=False).eval().to("cuda")
model2 = models.resnet18(pretrained=True).eval().to("cuda")
inputs = [torch.randn((1, 3, 224, 224)).to("cuda")]
enabled_precisions = {torch.float}
@@ -888,11 +890,13 @@
not torch_trt.ENABLED_FEATURES.refit,
"Refit feature is not supported in Python 3.13 or higher",
)
@pytest.mark.unit
def test_refit_one_engine_inline_runtime_without_weightmap():
- tmp_dir = tempfile.mkdtemp(prefix="test_refit_one_engine_inline_runtime_without_weightmap")
+ tmp_dir = tempfile.mkdtemp(
+ prefix="test_refit_one_engine_inline_runtime_without_weightmap"
+ )
trt_ep_path = os.path.join(tmp_dir, "compiled.ep")
model = models.resnet18(pretrained=True).eval().to("cuda")
model2 = models.resnet18(pretrained=False).eval().to("cuda")
inputs = [torch.randn((1, 3, 224, 224)).to("cuda")]
enabled_precisions = {torch.float}
--- /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/models/test_reexport.py 2025-11-23 04:44:15.040051+00:00
+++ /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/models/test_reexport.py 2025-11-23 04:44:59.288504+00:00
@@ -158,10 +158,11 @@
def forward(self, x):
conv = self.conv(x)
conv = conv * 0.5
relu = self.relu(conv)
return conv, relu
+
tmp_dir = tempfile.mkdtemp(prefix="test_no_compile")
trt_ep_path = os.path.join(tmp_dir, "trt.ep")
model = MyModule().eval().cuda()
input = torch.randn((1, 3, 224, 224)).to("cuda")
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
There are some changes that do not conform to Python style guidelines:
--- /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/models/test_export_kwargs_serde.py 2025-11-23 04:46:35.093132+00:00
+++ /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/models/test_export_kwargs_serde.py 2025-11-23 04:47:21.201660+00:00
@@ -299,11 +299,13 @@
assertions.assertTrue(
cos_sim > COSINE_THRESHOLD,
msg=f"CustomKwargs Module TRT outputs don't match with the original model. Cosine sim score: {cos_sim} Threshold: {COSINE_THRESHOLD}",
)
# Save the module
- tmp_dir = tempfile.mkdtemp(prefix="test_custom_model_with_dynamo_trace_kwarg_dynamic")
+ tmp_dir = tempfile.mkdtemp(
+ prefix="test_custom_model_with_dynamo_trace_kwarg_dynamic"
+ )
trt_ep_path = os.path.join(tmp_dir, "compiled.ep")
torchtrt.save(trt_gm, trt_ep_path, retrace=False)
# Clean up model env
torch._dynamo.reset()
@@ -390,11 +392,13 @@
assertions.assertTrue(
cos_sim > COSINE_THRESHOLD,
msg=f"CustomKwargs Module TRT outputs don't match with the original model. Cosine sim score: {cos_sim} Threshold: {COSINE_THRESHOLD}",
)
# Save the module
- tmp_dir = tempfile.mkdtemp(prefix="test_custom_model_with_dynamo_trace_kwarg_dynamic")
+ tmp_dir = tempfile.mkdtemp(
+ prefix="test_custom_model_with_dynamo_trace_kwarg_dynamic"
+ )
trt_ep_path = os.path.join(tmp_dir, "compiled.ep")
torchtrt.save(trt_gm, trt_ep_path, retrace=False)
# Clean up model env
torch._dynamo.reset()
--- /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/models/test_export_serde.py 2025-11-23 04:46:35.093132+00:00
+++ /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/models/test_export_serde.py 2025-11-23 04:47:21.577144+00:00
@@ -15,10 +15,11 @@
assertions = unittest.TestCase()
if importlib.util.find_spec("torchvision"):
import torchvision.models as models
+
@pytest.mark.unit
@pytest.mark.critical
def test_base_full_compile(ir):
"""
This tests export serde functionality on a base model
--- /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/models/test_model_refit.py 2025-11-23 04:46:35.093132+00:00
+++ /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/models/test_model_refit.py 2025-11-23 04:47:22.496365+00:00
@@ -530,11 +530,13 @@
not torch_trt.ENABLED_FEATURES.refit,
"Refit feature is not supported in Python 3.13 or higher",
)
@pytest.mark.unit
def test_refit_one_engine_inline_runtime_with_weightmap():
- tmp_dir = tempfile.mkdtemp(prefix="test_refit_one_engine_inline_runtime_with_weightmap")
+ tmp_dir = tempfile.mkdtemp(
+ prefix="test_refit_one_engine_inline_runtime_with_weightmap"
+ )
trt_ep_path = os.path.join(tmp_dir, "compiled.ep")
model = models.resnet18(pretrained=False).eval().to("cuda")
model2 = models.resnet18(pretrained=True).eval().to("cuda")
inputs = [torch.randn((1, 3, 224, 224)).to("cuda")]
enabled_precisions = {torch.float}
@@ -888,11 +890,13 @@
not torch_trt.ENABLED_FEATURES.refit,
"Refit feature is not supported in Python 3.13 or higher",
)
@pytest.mark.unit
def test_refit_one_engine_inline_runtime_without_weightmap():
- tmp_dir = tempfile.mkdtemp(prefix="test_refit_one_engine_inline_runtime_without_weightmap")
+ tmp_dir = tempfile.mkdtemp(
+ prefix="test_refit_one_engine_inline_runtime_without_weightmap"
+ )
trt_ep_path = os.path.join(tmp_dir, "compiled.ep")
model = models.resnet18(pretrained=True).eval().to("cuda")
model2 = models.resnet18(pretrained=False).eval().to("cuda")
inputs = [torch.randn((1, 3, 224, 224)).to("cuda")]
enabled_precisions = {torch.float}
--- /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/models/test_reexport.py 2025-11-23 04:46:35.093132+00:00
+++ /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/models/test_reexport.py 2025-11-23 04:47:22.542164+00:00
@@ -158,10 +158,11 @@
def forward(self, x):
conv = self.conv(x)
conv = conv * 0.5
relu = self.relu(conv)
return conv, relu
+
tmp_dir = tempfile.mkdtemp(prefix="test_no_compile")
trt_ep_path = os.path.join(tmp_dir, "trt.ep")
model = MyModule().eval().cuda()
input = torch.randn((1, 3, 224, 224)).to("cuda")
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
There are some changes that do not conform to Python style guidelines:
--- /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/models/test_export_serde.py 2025-11-23 04:48:33.156579+00:00
+++ /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/models/test_export_serde.py 2025-11-23 04:49:18.306172+00:00
@@ -15,10 +15,11 @@
assertions = unittest.TestCase()
if importlib.util.find_spec("torchvision"):
import torchvision.models as models
+
@pytest.mark.unit
@pytest.mark.critical
def test_base_full_compile(ir):
"""
This tests export serde functionality on a base model
--- /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/models/test_export_kwargs_serde.py 2025-11-23 04:48:33.156579+00:00
+++ /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/models/test_export_kwargs_serde.py 2025-11-23 04:49:18.559689+00:00
@@ -299,11 +299,13 @@
assertions.assertTrue(
cos_sim > COSINE_THRESHOLD,
msg=f"CustomKwargs Module TRT outputs don't match with the original model. Cosine sim score: {cos_sim} Threshold: {COSINE_THRESHOLD}",
)
# Save the module
- tmp_dir = tempfile.mkdtemp(prefix="test_custom_model_with_dynamo_trace_kwarg_dynamic")
+ tmp_dir = tempfile.mkdtemp(
+ prefix="test_custom_model_with_dynamo_trace_kwarg_dynamic"
+ )
trt_ep_path = os.path.join(tmp_dir, "compiled.ep")
torchtrt.save(trt_gm, trt_ep_path, retrace=False)
# Clean up model env
torch._dynamo.reset()
@@ -390,11 +392,13 @@
assertions.assertTrue(
cos_sim > COSINE_THRESHOLD,
msg=f"CustomKwargs Module TRT outputs don't match with the original model. Cosine sim score: {cos_sim} Threshold: {COSINE_THRESHOLD}",
)
# Save the module
- tmp_dir = tempfile.mkdtemp(prefix="test_custom_model_with_dynamo_trace_kwarg_dynamic")
+ tmp_dir = tempfile.mkdtemp(
+ prefix="test_custom_model_with_dynamo_trace_kwarg_dynamic"
+ )
trt_ep_path = os.path.join(tmp_dir, "compiled.ep")
torchtrt.save(trt_gm, trt_ep_path, retrace=False)
# Clean up model env
torch._dynamo.reset()
--- /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/models/test_reexport.py 2025-11-23 04:48:33.156579+00:00
+++ /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/models/test_reexport.py 2025-11-23 04:49:19.558116+00:00
@@ -158,10 +158,11 @@
def forward(self, x):
conv = self.conv(x)
conv = conv * 0.5
relu = self.relu(conv)
return conv, relu
+
tmp_dir = tempfile.mkdtemp(prefix="test_no_compile")
trt_ep_path = os.path.join(tmp_dir, "trt.ep")
model = MyModule().eval().cuda()
input = torch.randn((1, 3, 224, 224)).to("cuda")
--- /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/models/test_model_refit.py 2025-11-23 04:48:33.156579+00:00
+++ /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/models/test_model_refit.py 2025-11-23 04:49:19.609943+00:00
@@ -530,11 +530,13 @@
not torch_trt.ENABLED_FEATURES.refit,
"Refit feature is not supported in Python 3.13 or higher",
)
@pytest.mark.unit
def test_refit_one_engine_inline_runtime_with_weightmap():
- tmp_dir = tempfile.mkdtemp(prefix="test_refit_one_engine_inline_runtime_with_weightmap")
+ tmp_dir = tempfile.mkdtemp(
+ prefix="test_refit_one_engine_inline_runtime_with_weightmap"
+ )
trt_ep_path = os.path.join(tmp_dir, "compiled.ep")
model = models.resnet18(pretrained=False).eval().to("cuda")
model2 = models.resnet18(pretrained=True).eval().to("cuda")
inputs = [torch.randn((1, 3, 224, 224)).to("cuda")]
enabled_precisions = {torch.float}
@@ -888,11 +890,13 @@
not torch_trt.ENABLED_FEATURES.refit,
"Refit feature is not supported in Python 3.13 or higher",
)
@pytest.mark.unit
def test_refit_one_engine_inline_runtime_without_weightmap():
- tmp_dir = tempfile.mkdtemp(prefix="test_refit_one_engine_inline_runtime_without_weightmap")
+ tmp_dir = tempfile.mkdtemp(
+ prefix="test_refit_one_engine_inline_runtime_without_weightmap"
+ )
trt_ep_path = os.path.join(tmp_dir, "compiled.ep")
model = models.resnet18(pretrained=True).eval().to("cuda")
model2 = models.resnet18(pretrained=False).eval().to("cuda")
inputs = [torch.randn((1, 3, 224, 224)).to("cuda")]
enabled_precisions = {torch.float}There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
There are some changes that do not conform to Python style guidelines:
--- /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/models/test_export_kwargs_serde.py 2025-11-23 04:58:27.029037+00:00
+++ /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/models/test_export_kwargs_serde.py 2025-11-23 04:59:09.451767+00:00
@@ -299,11 +299,13 @@
assertions.assertTrue(
cos_sim > COSINE_THRESHOLD,
msg=f"CustomKwargs Module TRT outputs don't match with the original model. Cosine sim score: {cos_sim} Threshold: {COSINE_THRESHOLD}",
)
# Save the module
- tmp_dir = tempfile.mkdtemp(prefix="test_custom_model_with_dynamo_trace_kwarg_dynamic")
+ tmp_dir = tempfile.mkdtemp(
+ prefix="test_custom_model_with_dynamo_trace_kwarg_dynamic"
+ )
trt_ep_path = os.path.join(tmp_dir, "compiled.ep")
torchtrt.save(trt_gm, trt_ep_path, retrace=False)
# Clean up model env
torch._dynamo.reset()
@@ -390,11 +392,13 @@
assertions.assertTrue(
cos_sim > COSINE_THRESHOLD,
msg=f"CustomKwargs Module TRT outputs don't match with the original model. Cosine sim score: {cos_sim} Threshold: {COSINE_THRESHOLD}",
)
# Save the module
- tmp_dir = tempfile.mkdtemp(prefix="test_custom_model_with_dynamo_trace_kwarg_dynamic")
+ tmp_dir = tempfile.mkdtemp(
+ prefix="test_custom_model_with_dynamo_trace_kwarg_dynamic"
+ )
trt_ep_path = os.path.join(tmp_dir, "compiled.ep")
torchtrt.save(trt_gm, trt_ep_path, retrace=False)
# Clean up model env
torch._dynamo.reset()
--- /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/models/test_export_serde.py 2025-11-23 04:58:27.029037+00:00
+++ /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/models/test_export_serde.py 2025-11-23 04:59:09.545289+00:00
@@ -15,10 +15,11 @@
assertions = unittest.TestCase()
if importlib.util.find_spec("torchvision"):
import torchvision.models as models
+
@pytest.mark.unit
@pytest.mark.critical
def test_base_full_compile(ir):
"""
This tests export serde functionality on a base model
--- /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/models/test_model_refit.py 2025-11-23 04:58:27.029037+00:00
+++ /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/models/test_model_refit.py 2025-11-23 04:59:10.265129+00:00
@@ -530,11 +530,13 @@
not torch_trt.ENABLED_FEATURES.refit,
"Refit feature is not supported in Python 3.13 or higher",
)
@pytest.mark.unit
def test_refit_one_engine_inline_runtime_with_weightmap():
- tmp_dir = tempfile.mkdtemp(prefix="test_refit_one_engine_inline_runtime_with_weightmap")
+ tmp_dir = tempfile.mkdtemp(
+ prefix="test_refit_one_engine_inline_runtime_with_weightmap"
+ )
trt_ep_path = os.path.join(tmp_dir, "compiled.ep")
model = models.resnet18(pretrained=False).eval().to("cuda")
model2 = models.resnet18(pretrained=True).eval().to("cuda")
inputs = [torch.randn((1, 3, 224, 224)).to("cuda")]
enabled_precisions = {torch.float}
@@ -888,11 +890,13 @@
not torch_trt.ENABLED_FEATURES.refit,
"Refit feature is not supported in Python 3.13 or higher",
)
@pytest.mark.unit
def test_refit_one_engine_inline_runtime_without_weightmap():
- tmp_dir = tempfile.mkdtemp(prefix="test_refit_one_engine_inline_runtime_without_weightmap")
+ tmp_dir = tempfile.mkdtemp(
+ prefix="test_refit_one_engine_inline_runtime_without_weightmap"
+ )
trt_ep_path = os.path.join(tmp_dir, "compiled.ep")
model = models.resnet18(pretrained=True).eval().to("cuda")
model2 = models.resnet18(pretrained=False).eval().to("cuda")
inputs = [torch.randn((1, 3, 224, 224)).to("cuda")]
enabled_precisions = {torch.float}
--- /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/models/test_reexport.py 2025-11-23 04:58:27.029037+00:00
+++ /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/models/test_reexport.py 2025-11-23 04:59:10.616549+00:00
@@ -158,10 +158,11 @@
def forward(self, x):
conv = self.conv(x)
conv = conv * 0.5
relu = self.relu(conv)
return conv, relu
+
tmp_dir = tempfile.mkdtemp(prefix="test_no_compile")
trt_ep_path = os.path.join(tmp_dir, "trt.ep")
model = MyModule().eval().cuda()
input = torch.randn((1, 3, 224, 224)).to("cuda")
Description
two changes:
change to the unique tmp dir so that test run in parallel won't mixed up
Fixes # (issue)
Type of change
Please delete options that are not relevant and/or add your own.
Checklist: