Skip to content

Commit 2c8067b

Browse files
CodemodService Botfacebook-github-bot
authored andcommitted
aitemplate
Reviewed By: muchulee8 Differential Revision: D81664736
1 parent b636955 commit 2c8067b

40 files changed

+553
-584
lines changed

examples/02_detectron2/modeling/backbone/utils.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,6 @@
1313
# limitations under the License.
1414
#
1515
from dataclasses import dataclass
16-
from typing import Optional
1716

1817

1918
@dataclass
@@ -24,7 +23,7 @@ class ShapeSpec:
2423
to complement the lack of shape inference ability among pytorch modules.
2524
"""
2625

27-
channels: Optional[int] = None
28-
height: Optional[int] = None
29-
width: Optional[int] = None
30-
stride: Optional[int] = None
26+
channels: int | None = None
27+
height: int | None = None
28+
width: int | None = None
29+
stride: int | None = None

examples/02_detectron2/modeling/roi_heads/box_head.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,6 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414
#
15-
from typing import Tuple
1615

1716
from aitemplate.compiler import ops
1817
from aitemplate.frontend import nn
@@ -32,7 +31,7 @@ def __init__(
3231
feat_dim: int,
3332
fc_dim: int,
3433
pooled_size: int,
35-
im_shape: Tuple[int, int],
34+
im_shape: tuple[int, int],
3635
):
3736
super().__init__()
3837
self.num_rois = num_rois

examples/02_detectron2/modeling/roi_heads/fast_rcnn.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,6 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414
#
15-
from typing import Tuple
1615

1716
from aitemplate.compiler import ops
1817
from aitemplate.frontend import nn, Tensor
@@ -21,7 +20,7 @@
2120
class fast_rcnn_inference:
2221
def __init__(
2322
self,
24-
im_shape: Tuple[int, int],
23+
im_shape: tuple[int, int],
2524
num_rois: int,
2625
num_classes: int,
2726
clip_box: bool = True,

examples/02_detectron2/modeling/roi_heads/mask_head.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,6 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414
#
15-
from typing import Tuple
1615

1716
from aitemplate.compiler import ops
1817
from aitemplate.frontend import nn
@@ -31,7 +30,7 @@ def __init__(
3130
feat_dim: int,
3231
conv_dim: int,
3332
pooled_size: int,
34-
im_shape: Tuple[int, int],
33+
im_shape: tuple[int, int],
3534
):
3635
super().__init__()
3736
HH, WW = im_shape

examples/02_detectron2/modeling/roi_heads/roi_heads.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,6 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414
#
15-
from typing import Dict
1615

1716
from aitemplate.compiler import ops
1817

@@ -58,7 +57,7 @@ def get_shape(self, x):
5857
shape = [it.value() for it in x._attrs["shape"]]
5958
return shape
6059

61-
def forward(self, features: Dict[str, Tensor], rois: Tensor, proposals: Tensor):
60+
def forward(self, features: dict[str, Tensor], rois: Tensor, proposals: Tensor):
6261
box_features = [features[f] for f in self.in_features]
6362
roi_feat = self.box_head(box_features, rois)
6463
detections = self.box_predictor(roi_feat, proposals)

examples/02_detectron2/predictor/predictor.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,6 @@
1414
#
1515
import itertools
1616
import os
17-
from typing import Tuple
1817

1918
import cv2
2019
import numpy as np
@@ -117,7 +116,7 @@ def apply_bbox(self, bbox, im_w, im_h):
117116
@staticmethod
118117
def get_output_shape(
119118
oldh: int, oldw: int, short_edge_length: int, max_size: int
120-
) -> Tuple[int, int]:
119+
) -> tuple[int, int]:
121120
"""
122121
Compute the output size given input size and target short edge length.
123122
"""

examples/03_bert/benchmark_ait.py

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -14,8 +14,6 @@
1414
#
1515
import os
1616

17-
from typing import Dict, List
18-
1917
import click
2018
import numpy as np
2119
import torch
@@ -35,12 +33,12 @@ def mark_output(y: Tensor) -> None:
3533
y[i]._attrs["is_output"] = True
3634
y[i]._attrs["name"] = "output_%d" % (i)
3735
y_shape = [d._attrs["values"][0] for d in y[i]._attrs["shape"]]
38-
print("output_{} shape: {}".format(i, y_shape))
36+
print(f"output_{i} shape: {y_shape}")
3937

4038

4139
def create_bert_inputs(
4240
batch_size: int, seq_length: int, dtype: str = "int64"
43-
) -> List[Tensor]:
41+
) -> list[Tensor]:
4442
input_ids = Tensor(
4543
shape=[batch_size, seq_length],
4644
name="input_ids",
@@ -76,7 +74,7 @@ def create_bert_encoders_input(
7674

7775
def create_bert_inputs_pt(
7876
batch_size: int, seq_length: int, dtype: torch.dtype = torch.int64
79-
) -> Dict[str, torch.Tensor]:
77+
) -> dict[str, torch.Tensor]:
8078
input_ids = torch.randn(batch_size, seq_length).to(dtype).cuda()
8179
token_type_ids = torch.randn(batch_size, seq_length).to(dtype).cuda()
8280
position_ids = torch.randn(batch_size, seq_length).to(dtype).cuda()
@@ -90,14 +88,14 @@ def create_bert_inputs_pt(
9088

9189
def create_bert_encoders_inputs_pt(
9290
batch_size: int, seq_length: int, hidden_size: int
93-
) -> Dict[str, torch.Tensor]:
91+
) -> dict[str, torch.Tensor]:
9492
encoder_input = torch.randn([batch_size, seq_length, hidden_size]).cuda().half()
9593
return {"input": encoder_input}
9694

9795

9896
def map_pt_params(
9997
ait_bert, pt_bert, batch_size: int, seq_length: int
100-
) -> Dict[str, torch.Tensor]:
98+
) -> dict[str, torch.Tensor]:
10199
pt_params = dict(pt_bert.named_parameters())
102100
mapped_pt_params = {}
103101
for name, _ in ait_bert.named_parameters():

examples/03_bert/modeling/bert.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,6 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414
#
15-
from typing import Tuple
1615

1716
from aitemplate.compiler import ops
1817
from aitemplate.frontend import nn, Tensor
@@ -70,7 +69,7 @@ def __init__(
7069
def forward(
7170
self,
7271
hidden_states: Tensor,
73-
) -> Tuple[Tensor]:
72+
) -> tuple[Tensor]:
7473
self_output = self.self(hidden_states, hidden_states)
7574
attention_output = self.output(self_output)
7675
outputs = (attention_output,)

examples/05_stable_diffusion/src/modeling/attention.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -17,8 +17,6 @@
1717
Implementations are translated from https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention.py.
1818
"""
1919

20-
from typing import Optional
21-
2220
from aitemplate.compiler.ops import reshape
2321
from aitemplate.frontend import nn, Tensor
2422

@@ -46,7 +44,7 @@ def __init__(
4644
height: int,
4745
width: int,
4846
channels: int,
49-
num_head_channels: Optional[int] = None,
47+
num_head_channels: int | None = None,
5048
num_groups: int = 32,
5149
rescale_output_factor: float = 1.0,
5250
eps: float = 1e-5,

examples/05_stable_diffusion/src/modeling/clip.py

Lines changed: 17 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,6 @@
1313
# limitations under the License.
1414
#
1515
from inspect import isfunction
16-
from typing import Optional
1716

1817
from aitemplate.compiler import ops
1918
from aitemplate.frontend import nn, Tensor
@@ -279,10 +278,10 @@ def __init__(
279278
def forward(
280279
self,
281280
hidden_states: Tensor,
282-
attention_mask: Optional[Tensor] = None,
283-
causal_attention_mask: Optional[Tensor] = None,
284-
output_attentions: Optional[bool] = False,
285-
residual: Optional[Tensor] = None,
281+
attention_mask: Tensor | None = None,
282+
causal_attention_mask: Tensor | None = None,
283+
output_attentions: bool | None = False,
284+
residual: Tensor | None = None,
286285
):
287286
if residual is not None:
288287
self_output = self.attn(hidden_states, residual)
@@ -399,7 +398,7 @@ def __init__(
399398
def forward(
400399
self,
401400
hidden_states: Tensor,
402-
output_attentions: Optional[bool] = False,
401+
output_attentions: bool | None = False,
403402
):
404403
"""
405404
Args:
@@ -469,11 +468,11 @@ def __init__(
469468
def forward(
470469
self,
471470
inputs_embeds,
472-
attention_mask: Optional[Tensor] = None,
473-
causal_attention_mask: Optional[Tensor] = None,
474-
output_attentions: Optional[bool] = None,
475-
output_hidden_states: Optional[bool] = None,
476-
return_dict: Optional[bool] = None,
471+
attention_mask: Tensor | None = None,
472+
causal_attention_mask: Tensor | None = None,
473+
output_attentions: bool | None = None,
474+
output_hidden_states: bool | None = None,
475+
return_dict: bool | None = None,
477476
):
478477
r"""
479478
Args:
@@ -548,7 +547,7 @@ def forward(
548547
self,
549548
input_ids: Tensor,
550549
position_ids: Tensor,
551-
inputs_embeds: Optional[Tensor] = None,
550+
inputs_embeds: Tensor | None = None,
552551
) -> Tensor:
553552
input_shape = ops.size()(input_ids)
554553

@@ -612,12 +611,12 @@ def __init__(
612611

613612
def forward(
614613
self,
615-
input_ids: Optional[Tensor] = None,
616-
attention_mask: Optional[Tensor] = None,
617-
position_ids: Optional[Tensor] = None,
618-
output_attentions: Optional[bool] = None,
619-
output_hidden_states: Optional[bool] = None,
620-
return_dict: Optional[bool] = None,
614+
input_ids: Tensor | None = None,
615+
attention_mask: Tensor | None = None,
616+
position_ids: Tensor | None = None,
617+
output_attentions: bool | None = None,
618+
output_hidden_states: bool | None = None,
619+
return_dict: bool | None = None,
621620
):
622621
r"""
623622
Returns:

0 commit comments

Comments
 (0)