@@ -265,3 +265,39 @@ func.func @test_hardsigmoid_default(%arg0: !torch.vtensor<[3,4,5],f32>) -> !torc
265265 %0 = torch.operator " onnx.HardSigmoid" (%arg0 ) : (!torch.vtensor <[3 ,4 ,5 ],f32 >) -> !torch.vtensor <[3 ,4 ,5 ],f32 >
266266 return %0 : !torch.vtensor <[3 ,4 ,5 ],f32 >
267267}
268+
269+ // -----
270+
271+ // CHECK-LABEL: @test_globalaveragepool
272+ func.func @test_globalaveragepool (%arg0: !torch.vtensor <[1 ,3 ,5 ,5 ],f32 >) -> !torch.vtensor <[1 ,3 ,1 ,1 ],f32 > attributes {torch.onnx_meta.ir_version = 3 : si64 , torch.onnx_meta.opset_version = 1 : si64 , torch.onnx_meta.producer_name = " backend-test" , torch.onnx_meta.producer_version = " " } {
273+ // CHECK: %[[C0:.*]] = torch.constant.int 0
274+ // CHECK: %[[C1:.*]] = torch.constant.int 1
275+ // CHECK: %[[C5:.*]] = torch.constant.int 5
276+ // CHECK: %[[C5_0:.*]] = torch.constant.int 5
277+ // CHECK: %[[KERNELSIZE:.*]] = torch.prim.ListConstruct %[[C5]], %[[C5_0]] : (!torch.int, !torch.int) -> !torch.list<int>
278+ // CHECK: %[[PADDING:.*]] = torch.prim.ListConstruct %[[C0]], %[[C0]] : (!torch.int, !torch.int) -> !torch.list<int>
279+ // CHECK: %[[STRIDE:.*]] = torch.prim.ListConstruct %[[C1]], %[[C1]] : (!torch.int, !torch.int) -> !torch.list<int>
280+ // CHECK: %[[FALSE:.*]] = torch.constant.bool false
281+ // CHECK: %[[NONE:.*]] = torch.constant.none
282+ // CHECK: torch.aten.avg_pool2d %arg0, %[[KERNELSIZE]], %[[STRIDE]], %[[PADDING]], %[[FALSE]], %[[FALSE]], %[[NONE]] : !torch.vtensor<[1,3,5,5],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,3,1,1],f32>
283+ %0 = torch.operator " onnx.GlobalAveragePool" (%arg0 ) : (!torch.vtensor <[1 ,3 ,5 ,5 ],f32 >) -> !torch.vtensor <[1 ,3 ,1 ,1 ],f32 >
284+ return %0 : !torch.vtensor <[1 ,3 ,1 ,1 ],f32 >
285+ }
286+
287+ // -----
288+
289+ // CHECK-LABEL: @test_globalaveragepool_precomputed
290+ func.func @test_globalaveragepool_precomputed (%arg0: !torch.vtensor <[1 ,1 ,3 ,3 ],f32 >) -> !torch.vtensor <[1 ,1 ,1 ,1 ],f32 > attributes {torch.onnx_meta.ir_version = 3 : si64 , torch.onnx_meta.opset_version = 1 : si64 , torch.onnx_meta.producer_name = " backend-test" , torch.onnx_meta.producer_version = " " } {
291+ // CHECK: %[[C0:.*]] = torch.constant.int 0
292+ // CHECK: %[[C1:.*]] = torch.constant.int 1
293+ // CHECK: %[[C3:.*]] = torch.constant.int 3
294+ // CHECK: %[[C3_0:.*]] = torch.constant.int 3
295+ // CHECK: %[[KERNELSIZE:.*]] = torch.prim.ListConstruct %[[C3]], %[[C3_0]] : (!torch.int, !torch.int) -> !torch.list<int>
296+ // CHECK: %[[PADDING:.*]] = torch.prim.ListConstruct %[[C0]], %[[C0]] : (!torch.int, !torch.int) -> !torch.list<int>
297+ // CHECK: %[[STRIDE:.*]] = torch.prim.ListConstruct %[[C1]], %[[C1]] : (!torch.int, !torch.int) -> !torch.list<int>
298+ // CHECK: %[[FALSE:.*]] = torch.constant.bool false
299+ // CHECK: %[[NONE:.*]] = torch.constant.none
300+ // CHECK: torch.aten.avg_pool2d %arg0, %[[KERNELSIZE]], %[[STRIDE]], %[[PADDING]], %[[FALSE]], %[[FALSE]], %[[NONE]] : !torch.vtensor<[1,1,3,3],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,1,1,1],f32>
301+ %0 = torch.operator " onnx.GlobalAveragePool" (%arg0 ) : (!torch.vtensor <[1 ,1 ,3 ,3 ],f32 >) -> !torch.vtensor <[1 ,1 ,1 ,1 ],f32 >
302+ return %0 : !torch.vtensor <[1 ,1 ,1 ,1 ],f32 >
303+ }
0 commit comments