2121from torch .testing ._comparison import BooleanPair , NonePair , not_close_error_metas , NumberPair , TensorLikePair
2222from torchvision import io , tv_tensors
2323from torchvision .transforms ._functional_tensor import _max_value as get_max_value
24- from torchvision .transforms .v2 .functional import clamp_bounding_boxes , to_image , to_pil_image
24+ from torchvision .transforms .v2 .functional import to_image , to_pil_image
2525
2626
2727IN_OSS_CI = any (os .getenv (var ) == "true" for var in ["CIRCLECI" , "GITHUB_ACTIONS" ])
@@ -410,7 +410,7 @@ def make_bounding_boxes(
410410 canvas_size = DEFAULT_SIZE ,
411411 * ,
412412 format = tv_tensors .BoundingBoxFormat .XYXY ,
413- clamping_mode = "hard" , # TODOBB
413+ clamping_mode = "soft" ,
414414 num_boxes = 1 ,
415415 dtype = None ,
416416 device = "cpu" ,
@@ -424,13 +424,6 @@ def sample_position(values, max_value):
424424 format = tv_tensors .BoundingBoxFormat [format ]
425425
426426 dtype = dtype or torch .float32
427- int_dtype = dtype in (
428- torch .uint8 ,
429- torch .int8 ,
430- torch .int16 ,
431- torch .int32 ,
432- torch .int64 ,
433- )
434427
435428 h , w = (torch .randint (1 , s , (num_boxes ,)) for s in canvas_size )
436429 y = sample_position (h , canvas_size [0 ])
@@ -457,33 +450,18 @@ def sample_position(values, max_value):
457450 elif format is tv_tensors .BoundingBoxFormat .XYXYXYXY :
458451 r_rad = r * torch .pi / 180.0
459452 cos , sin = torch .cos (r_rad ), torch .sin (r_rad )
460- x1 = torch . round ( x ) if int_dtype else x
461- y1 = torch . round ( y ) if int_dtype else y
462- x2 = torch . round ( x1 + w * cos ) if int_dtype else x1 + w * cos
463- y2 = torch . round ( y1 - w * sin ) if int_dtype else y1 - w * sin
464- x3 = torch . round ( x2 + h * sin ) if int_dtype else x2 + h * sin
465- y3 = torch . round ( y2 + h * cos ) if int_dtype else y2 + h * cos
466- x4 = torch . round ( x1 + h * sin ) if int_dtype else x1 + h * sin
467- y4 = torch . round ( y1 + h * cos ) if int_dtype else y1 + h * cos
453+ x1 = x
454+ y1 = y
455+ x2 = x1 + w * cos
456+ y2 = y1 - w * sin
457+ x3 = x2 + h * sin
458+ y3 = y2 + h * cos
459+ x4 = x1 + h * sin
460+ y4 = y1 + h * cos
468461 parts = (x1 , y1 , x2 , y2 , x3 , y3 , x4 , y4 )
469462 else :
470463 raise ValueError (f"Format { format } is not supported" )
471464 out_boxes = torch .stack (parts , dim = - 1 ).to (dtype = dtype , device = device )
472- if tv_tensors .is_rotated_bounding_format (format ):
473- # The rotated bounding boxes are not guaranteed to be within the canvas by design,
474- # so we apply clamping. We also add a 2 buffer to the canvas size to avoid
475- # numerical issues during the testing
476- buffer = 4
477- out_boxes = clamp_bounding_boxes (
478- out_boxes ,
479- format = format ,
480- canvas_size = (canvas_size [0 ] - buffer , canvas_size [1 ] - buffer ),
481- clamping_mode = clamping_mode ,
482- )
483- if format is tv_tensors .BoundingBoxFormat .XYWHR or format is tv_tensors .BoundingBoxFormat .CXCYWHR :
484- out_boxes [:, :2 ] += buffer // 2
485- elif format is tv_tensors .BoundingBoxFormat .XYXYXYXY :
486- out_boxes [:, :] += buffer // 2
487465 return tv_tensors .BoundingBoxes (out_boxes , format = format , canvas_size = canvas_size , clamping_mode = clamping_mode )
488466
489467
0 commit comments