Skip to content

Commit 0a675e5

Browse files
committed
Use scalar_type to replace deprecated type API
1 parent 6d24aa5 commit 0a675e5

File tree

3 files changed

+12
-12
lines changed

3 files changed

+12
-12
lines changed

torchsparse/backend/convolution/convolution_gather_scatter_cuda.cu

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -547,7 +547,7 @@ at::Tensor conv_forward_gather_scatter_cuda_latest(
547547

548548
// all gather
549549
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
550-
in_feat.type(), "conv_forward_gather_scatter_cuda", ([&] {
550+
in_feat.scalar_type(), "conv_forward_gather_scatter_cuda", ([&] {
551551
gather_all_kernel_pad_sep_with_mask<scalar_t>
552552
<<<ceil((double)(n_in_feats * n_in_channels) /
553553
(256 << (sizeof(scalar_t) == 2) + 2)),
@@ -779,7 +779,7 @@ at::Tensor conv_forward_gather_scatter_cuda_fallback(
779779
// gather n_active_feats dense features from N sparse input features with c
780780
// feature dimensions
781781
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
782-
in_feat.type(), "conv_forward_gather_scatter_cuda", ([&] {
782+
in_feat.scalar_type(), "conv_forward_gather_scatter_cuda", ([&] {
783783
gather_kernel<scalar_t>
784784
<<<ceil((double)(n_active_feats * n_in_channels) / 256), 256>>>(
785785
n_active_feats, n_in_feats, n_in_channels,
@@ -796,7 +796,7 @@ at::Tensor conv_forward_gather_scatter_cuda_fallback(
796796
// scatter n_active_feats dense features into n_out_feats output features of
797797
// dimension n_out_channels
798798
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
799-
in_feat.type(), "conv_forward_gather_scatter_cuda", ([&] {
799+
in_feat.scalar_type(), "conv_forward_gather_scatter_cuda", ([&] {
800800
scatter_kernel<scalar_t>
801801
<<<ceil((double)(n_active_feats * n_out_channels) / 256), 256>>>(
802802
n_active_feats, n_out_feats, n_out_channels,
@@ -877,7 +877,7 @@ void conv_backward_gather_scatter_cuda(at::Tensor in_feat, at::Tensor grad_in_fe
877877
}
878878
// gather
879879
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
880-
in_feat.type(), "conv_forward_gather_scatter_cuda", ([&] {
880+
in_feat.scalar_type(), "conv_forward_gather_scatter_cuda", ([&] {
881881
gather_kernel<scalar_t>
882882
<<<ceil((double)(n_active_feats * n_out_channels) / 256), 256>>>(
883883
n_active_feats, n_out_feats, n_out_channels,
@@ -886,7 +886,7 @@ void conv_backward_gather_scatter_cuda(at::Tensor in_feat, at::Tensor grad_in_fe
886886
neighbor_map.data_ptr<int>() + cur_offset, !transpose);
887887
}));
888888
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
889-
in_feat.type(), "conv_forward_gather_scatter_cuda", ([&] {
889+
in_feat.scalar_type(), "conv_forward_gather_scatter_cuda", ([&] {
890890
gather_kernel<scalar_t>
891891
<<<ceil((double)(n_active_feats * n_in_channels) / 256), 256>>>(
892892
n_active_feats, n_in_feats, n_in_channels,
@@ -902,7 +902,7 @@ void conv_backward_gather_scatter_cuda(at::Tensor in_feat, at::Tensor grad_in_fe
902902
out_grad_buffer_activated);
903903
// scatter
904904
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
905-
in_feat.type(), "conv_forward_gather_scatter_cuda", ([&] {
905+
in_feat.scalar_type(), "conv_forward_gather_scatter_cuda", ([&] {
906906
scatter_kernel<scalar_t>
907907
<<<ceil((double)(n_active_feats * n_in_channels) / 256), 256>>>(
908908
n_active_feats, n_in_feats, n_in_channels,

torchsparse/backend/devoxelize/devoxelize_cuda.cu

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,7 @@ at::Tensor devoxelize_forward_cuda(const at::Tensor feat,
6868
torch::zeros({N, c}, at::device(feat.device()).dtype(feat.dtype()));
6969

7070
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
71-
feat.type(), "devoxelize_forward_cuda", ([&] {
71+
feat.scalar_type(), "devoxelize_forward_cuda", ([&] {
7272
devoxelize_forward_kernel<scalar_t><<<N, c>>>(
7373
N, c, indices.data_ptr<int>(), weight.data_ptr<scalar_t>(),
7474
feat.data_ptr<scalar_t>(), out.data_ptr<scalar_t>());
@@ -88,7 +88,7 @@ at::Tensor devoxelize_backward_cuda(const at::Tensor top_grad,
8888
{n, c}, at::device(top_grad.device()).dtype(top_grad.dtype()));
8989

9090
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
91-
top_grad.type(), "devoxelize_backward_cuda", ([&] {
91+
top_grad.scalar_type(), "devoxelize_backward_cuda", ([&] {
9292
devoxelize_backward_kernel<scalar_t><<<N, c>>>(
9393
N, n, c, indices.data_ptr<int>(), weight.data_ptr<scalar_t>(),
9494
top_grad.data_ptr<scalar_t>(), bottom_grad.data_ptr<scalar_t>());

torchsparse/backend/voxelize/voxelize_cuda.cu

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,7 @@ at::Tensor voxelize_forward_cuda(const at::Tensor inputs, const at::Tensor idx,
8686
torch::zeros({N1, c}, at::device(idx.device()).dtype(inputs.dtype()));
8787

8888
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
89-
inputs.type(), "voxelize_forward_cuda", ([&]
89+
inputs.scalar_type(), "voxelize_forward_cuda", ([&]
9090
{ voxelize_forward_kernel<scalar_t><<<N, c>>>(
9191
N, c, N1, inputs.data_ptr<scalar_t>(), idx.data_ptr<int>(),
9292
counts.data_ptr<int>(), out.data_ptr<scalar_t>()); }));
@@ -105,7 +105,7 @@ at::Tensor voxelize_backward_cuda(const at::Tensor top_grad,
105105
torch::zeros({N, c}, at::device(idx.device()).dtype(top_grad.dtype()));
106106

107107
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
108-
top_grad.type(), "voxelize_backward_cuda", ([&]
108+
top_grad.scalar_type(), "voxelize_backward_cuda", ([&]
109109
{ voxelize_backward_kernel<scalar_t><<<N, c>>>(
110110
N, c, N1, top_grad.data_ptr<scalar_t>(), idx.data_ptr<int>(),
111111
counts.data_ptr<int>(), bottom_grad.data_ptr<scalar_t>()); }));
@@ -120,7 +120,7 @@ void to_dense_forward_cuda(const at::Tensor inputs, const at::Tensor idx,
120120
int c = inputs.size(1);
121121

122122
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
123-
inputs.type(), "to_dense_forward_cuda", ([&]
123+
inputs.scalar_type(), "to_dense_forward_cuda", ([&]
124124
{ to_dense_forward_kernel<scalar_t><<<(N * c + 255) / 256, 256>>>(
125125
N, c, inputs.data_ptr<scalar_t>(), idx.data_ptr<int>(),
126126
range.data_ptr<int>(), outputs.data_ptr<scalar_t>()); }));
@@ -134,7 +134,7 @@ void to_dense_backward_cuda(const at::Tensor top_grad,
134134
int c = bottom_grad.size(1);
135135

136136
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
137-
top_grad.type(), "to_dense_backward_cuda", ([&]
137+
top_grad.scalar_type(), "to_dense_backward_cuda", ([&]
138138
{ to_dense_backward_kernel<scalar_t><<<(N * c + 255) / 256, 256>>>(
139139
N, c, top_grad.data_ptr<scalar_t>(), idx.data_ptr<int>(),
140140
range.data_ptr<int>(), bottom_grad.data_ptr<scalar_t>()); }));

0 commit comments

Comments
 (0)