@@ -1667,23 +1667,29 @@ func.func @test_lpnormalization(%arg0: !torch.vtensor<[3,4,5,6,7],f32>) -> !torc
16671667
16681668// -----
16691669
1670- // CHECK-LABEL: func.func @test_maxunpool_export_without_output_shape
1671- func.func @test_maxunpool_export_without_output_shape (%arg0: !torch.vtensor <[1 ,1 ,2 ,2 ],f32 >, %arg1: !torch.vtensor <[1 ,1 ,2 ,2 ],si64 >) -> !torch.vtensor <[1 ,1 ,4 ,4 ],f32 > attributes {torch.onnx_meta.ir_version = 6 : si64 , torch.onnx_meta.opset_version = 11 : si64 , torch.onnx_meta.producer_name = " backend-test" , torch.onnx_meta.producer_version = " " } {
1670+ // CHECK-LABEL: func.func @test_maxunpool_2d_export_without_output_shape
1671+ func.func @test_maxunpool_2d_export_without_output_shape (%arg0: !torch.vtensor <[1 ,1 ,2 ,2 ],f32 >, %arg1: !torch.vtensor <[1 ,1 ,2 ,2 ],si64 >) -> !torch.vtensor <[1 ,1 ,4 ,4 ],f32 > attributes {torch.onnx_meta.ir_version = 6 : si64 , torch.onnx_meta.opset_version = 11 : si64 , torch.onnx_meta.producer_name = " backend-test" , torch.onnx_meta.producer_version = " " } {
16721672 // CHECK: %[[INT1:.*]] = torch.constant.int 1
16731673 // CHECK: %[[INT1_0:.*]] = torch.constant.int 1
16741674 // CHECK: %[[INT4:.*]] = torch.constant.int 4
16751675 // CHECK: %[[INT4_0:.*]] = torch.constant.int 4
16761676 // CHECK: %[[OUTPUT_SHAPE:.*]] = torch.prim.ListConstruct %[[INT1]], %[[INT1_0]], %[[INT4]], %[[INT4_0]] : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
1677- // CHECK: %[[RESULT:.*]] = torch.aten.max_unpool2d %arg0, %arg1, %[[OUTPUT_SHAPE]] : !torch.vtensor<[1,1,2,2],f32>, !torch.vtensor<[1,1,2,2],si64>, !torch.list<int> -> !torch.vtensor<[1,1,4,4],f32>
1677+ // CHECK: %[[INT0:.*]] = torch.constant.int 0
1678+ // CHECK: %[[INT0_1:.*]] = torch.constant.int 0
1679+ // CHECK: %[[PADDING:.*]] = torch.prim.ListConstruct %[[INT0]], %[[INT0_1]] : (!torch.int, !torch.int) -> !torch.list<int>
1680+ // CHECK: %[[INT2:.*]] = torch.constant.int 2
1681+ // CHECK: %[[INT2_1:.*]] = torch.constant.int 2
1682+ // CHECK: %[[STRIDE:.*]] = torch.prim.ListConstruct %[[INT2]], %[[INT2_1]] : (!torch.int, !torch.int) -> !torch.list<int>
1683+ // CHECK: %[[RESULT:.*]] = torch.aten.max_unpool3d %arg0, %arg1, %[[OUTPUT_SHAPE]], %[[STRIDE]], %[[PADDING]] : !torch.vtensor<[1,1,2,2],f32>, !torch.vtensor<[1,1,2,2],si64>, !torch.list<int>, !torch.list<int>, !torch.list<int> -> !torch.vtensor<[1,1,4,4],f32>
16781684 // return %[[RESULT]] : !torch.vtensor<[1,1,4,4],f32>
16791685 %0 = torch.operator " onnx.MaxUnpool" (%arg0 , %arg1 ) {torch.onnx.kernel_shape = [2 : si64 , 2 : si64 ], torch.onnx.strides = [2 : si64 , 2 : si64 ]} : (!torch.vtensor <[1 ,1 ,2 ,2 ],f32 >, !torch.vtensor <[1 ,1 ,2 ,2 ],si64 >) -> !torch.vtensor <[1 ,1 ,4 ,4 ],f32 >
16801686 return %0 : !torch.vtensor <[1 ,1 ,4 ,4 ],f32 >
16811687}
16821688
16831689// -----
16841690
1685- // CHECK-LABEL: func.func @test_maxunpool3d_export_without_output_shape
1686- func.func @test_maxunpool3d_export_without_output_shape (%arg0: !torch.vtensor <[1 ,1 ,2 ,2 ,2 ],f32 >, %arg1: !torch.vtensor <[1 ,1 ,2 ,2 ,2 ],si64 >) -> !torch.vtensor <[1 ,1 ,4 ,4 ,4 ],f32 > attributes {torch.onnx_meta.ir_version = 6 : si64 , torch.onnx_meta.opset_version = 11 : si64 , torch.onnx_meta.producer_name = " backend-test" , torch.onnx_meta.producer_version = " " } {
1691+ // CHECK-LABEL: func.func @test_maxunpool_3d_export_without_output_shape
1692+ func.func @test_maxunpool_3d_export_without_output_shape (%arg0: !torch.vtensor <[1 ,1 ,2 ,2 ,2 ],f32 >, %arg1: !torch.vtensor <[1 ,1 ,2 ,2 ,2 ],si64 >) -> !torch.vtensor <[1 ,1 ,4 ,4 ,4 ],f32 > attributes {torch.onnx_meta.ir_version = 6 : si64 , torch.onnx_meta.opset_version = 11 : si64 , torch.onnx_meta.producer_name = " backend-test" , torch.onnx_meta.producer_version = " " } {
16871693 // CHECK: %[[INT1:.*]] = torch.constant.int 1
16881694 // CHECK: %[[INT1_0:.*]] = torch.constant.int 1
16891695 // CHECK: %[[INT4:.*]] = torch.constant.int 4
0 commit comments