From 77028b6b334a37c2a3f65d56bc72198b7f81b28e Mon Sep 17 00:00:00 2001 From: Nikita Shulga Date: Mon, 23 Jun 2025 08:24:47 -0700 Subject: [PATCH] Undo part of #9017 Not sure what was the reason behind unrolling the template, but it caused a regression in Linux CI as one can observe in https://hud.pytorch.org/hud/pytorch/vision/342eb9255dbf27f185432bfd4b252e1e125061d7/1?per_page=50&name_filter=unittests-linux%20(3.9%2C%20&mergeEphemeralLF=true --- test/test_ops.py | 68 +++++------------------------------------------- 1 file changed, 7 insertions(+), 61 deletions(-) diff --git a/test/test_ops.py b/test/test_ops.py index eeed3345834..26d13bbe208 100644 --- a/test/test_ops.py +++ b/test/test_ops.py @@ -1201,67 +1201,13 @@ def test_forward_scriptability(self): torch.jit.script(ops.DeformConv2d(in_channels=8, out_channels=8, kernel_size=3)) -@pytest.mark.parametrize("dtype", (torch.float16, torch.float32, torch.float64)) -@pytest.mark.parametrize("device", cpu_and_cuda()) -@pytest.mark.parametrize("requires_grad", (True, False)) -def test_deform_conv2d_opcheck(dtype, device, requires_grad): - batch_size, channels_in, height, width = 1, 6, 10, 10 - kernel_size = (3, 3) - stride = (1, 1) - padding = (1, 1) - dilation = (1, 1) - groups = 2 - out_channels = 4 - out_h = (height + 2 * padding[0] - dilation[0] * (kernel_size[0] - 1) - 1) // stride[0] + 1 - out_w = (width + 2 * padding[1] - dilation[1] * (kernel_size[1] - 1) - 1) // stride[1] + 1 - x = torch.randn(batch_size, channels_in, height, width, dtype=dtype, device=device, requires_grad=requires_grad) - offset = torch.randn( - batch_size, - 2 * kernel_size[0] * kernel_size[1], - out_h, - out_w, - dtype=dtype, - device=device, - requires_grad=requires_grad, - ) - weight = torch.randn( - out_channels, - channels_in // groups, - kernel_size[0], - kernel_size[1], - dtype=dtype, - device=device, - requires_grad=requires_grad, - ) - bias = torch.randn(out_channels, dtype=dtype, device=device, requires_grad=requires_grad) - use_mask = True - mask = torch.sigmoid( - torch.randn( - batch_size, - kernel_size[0] * kernel_size[1], - out_h, - out_w, - dtype=dtype, - device=device, - requires_grad=requires_grad, - ) - ) - kwargs = { - "offset": offset, - "weight": weight, - "bias": bias, - "stride_h": stride[0], - "stride_w": stride[1], - "pad_h": padding[0], - "pad_w": padding[1], - "dilation_h": dilation[0], - "dilation_w": dilation[1], - "groups": groups, - "offset_groups": 1, - "use_mask": use_mask, - "mask": mask, # no modulation in this test - } - optests.opcheck(torch.ops.torchvision.deform_conv2d, args=(x,), kwargs=kwargs) +optests.generate_opcheck_tests( + testcase=TestDeformConv, + namespaces=["torchvision"], + failures_dict_path=os.path.join(os.path.dirname(__file__), "optests_failures_dict.json"), + additional_decorators=[], + test_utils=OPTESTS, +) class TestFrozenBNT: