Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 4 additions & 5 deletions examples/02_detectron2/modeling/backbone/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@
# limitations under the License.
#
from dataclasses import dataclass
from typing import Optional


@dataclass
Expand All @@ -24,7 +23,7 @@ class ShapeSpec:
to complement the lack of shape inference ability among pytorch modules.
"""

channels: Optional[int] = None
height: Optional[int] = None
width: Optional[int] = None
stride: Optional[int] = None
channels: int | None = None
height: int | None = None
width: int | None = None
stride: int | None = None
3 changes: 1 addition & 2 deletions examples/02_detectron2/modeling/roi_heads/box_head.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Tuple

from aitemplate.compiler import ops
from aitemplate.frontend import nn
Expand All @@ -32,7 +31,7 @@ def __init__(
feat_dim: int,
fc_dim: int,
pooled_size: int,
im_shape: Tuple[int, int],
im_shape: tuple[int, int],
):
super().__init__()
self.num_rois = num_rois
Expand Down
3 changes: 1 addition & 2 deletions examples/02_detectron2/modeling/roi_heads/fast_rcnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Tuple

from aitemplate.compiler import ops
from aitemplate.frontend import nn, Tensor
Expand All @@ -21,7 +20,7 @@
class fast_rcnn_inference:
def __init__(
self,
im_shape: Tuple[int, int],
im_shape: tuple[int, int],
num_rois: int,
num_classes: int,
clip_box: bool = True,
Expand Down
3 changes: 1 addition & 2 deletions examples/02_detectron2/modeling/roi_heads/mask_head.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Tuple

from aitemplate.compiler import ops
from aitemplate.frontend import nn
Expand All @@ -31,7 +30,7 @@ def __init__(
feat_dim: int,
conv_dim: int,
pooled_size: int,
im_shape: Tuple[int, int],
im_shape: tuple[int, int],
):
super().__init__()
HH, WW = im_shape
Expand Down
3 changes: 1 addition & 2 deletions examples/02_detectron2/modeling/roi_heads/roi_heads.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Dict

from aitemplate.compiler import ops

Expand Down Expand Up @@ -58,7 +57,7 @@ def get_shape(self, x):
shape = [it.value() for it in x._attrs["shape"]]
return shape

def forward(self, features: Dict[str, Tensor], rois: Tensor, proposals: Tensor):
def forward(self, features: dict[str, Tensor], rois: Tensor, proposals: Tensor):
box_features = [features[f] for f in self.in_features]
roi_feat = self.box_head(box_features, rois)
detections = self.box_predictor(roi_feat, proposals)
Expand Down
3 changes: 1 addition & 2 deletions examples/02_detectron2/predictor/predictor.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@
#
import itertools
import os
from typing import Tuple

import cv2
import numpy as np
Expand Down Expand Up @@ -117,7 +116,7 @@ def apply_bbox(self, bbox, im_w, im_h):
@staticmethod
def get_output_shape(
oldh: int, oldw: int, short_edge_length: int, max_size: int
) -> Tuple[int, int]:
) -> tuple[int, int]:
"""
Compute the output size given input size and target short edge length.
"""
Expand Down
12 changes: 5 additions & 7 deletions examples/03_bert/benchmark_ait.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,6 @@
#
import os

from typing import Dict, List

import click
import numpy as np
import torch
Expand All @@ -35,12 +33,12 @@ def mark_output(y: Tensor) -> None:
y[i]._attrs["is_output"] = True
y[i]._attrs["name"] = "output_%d" % (i)
y_shape = [d._attrs["values"][0] for d in y[i]._attrs["shape"]]
print("output_{} shape: {}".format(i, y_shape))
print(f"output_{i} shape: {y_shape}")


def create_bert_inputs(
batch_size: int, seq_length: int, dtype: str = "int64"
) -> List[Tensor]:
) -> list[Tensor]:
input_ids = Tensor(
shape=[batch_size, seq_length],
name="input_ids",
Expand Down Expand Up @@ -76,7 +74,7 @@ def create_bert_encoders_input(

def create_bert_inputs_pt(
batch_size: int, seq_length: int, dtype: torch.dtype = torch.int64
) -> Dict[str, torch.Tensor]:
) -> dict[str, torch.Tensor]:
input_ids = torch.randn(batch_size, seq_length).to(dtype).cuda()
token_type_ids = torch.randn(batch_size, seq_length).to(dtype).cuda()
position_ids = torch.randn(batch_size, seq_length).to(dtype).cuda()
Expand All @@ -90,14 +88,14 @@ def create_bert_inputs_pt(

def create_bert_encoders_inputs_pt(
batch_size: int, seq_length: int, hidden_size: int
) -> Dict[str, torch.Tensor]:
) -> dict[str, torch.Tensor]:
encoder_input = torch.randn([batch_size, seq_length, hidden_size]).cuda().half()
return {"input": encoder_input}


def map_pt_params(
ait_bert, pt_bert, batch_size: int, seq_length: int
) -> Dict[str, torch.Tensor]:
) -> dict[str, torch.Tensor]:
pt_params = dict(pt_bert.named_parameters())
mapped_pt_params = {}
for name, _ in ait_bert.named_parameters():
Expand Down
3 changes: 1 addition & 2 deletions examples/03_bert/modeling/bert.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Tuple

from aitemplate.compiler import ops
from aitemplate.frontend import nn, Tensor
Expand Down Expand Up @@ -70,7 +69,7 @@ def __init__(
def forward(
self,
hidden_states: Tensor,
) -> Tuple[Tensor]:
) -> tuple[Tensor]:
self_output = self.self(hidden_states, hidden_states)
attention_output = self.output(self_output)
outputs = (attention_output,)
Expand Down
4 changes: 1 addition & 3 deletions examples/05_stable_diffusion/src/modeling/attention.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,6 @@
Implementations are translated from https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention.py.
"""

from typing import Optional

from aitemplate.compiler.ops import reshape
from aitemplate.frontend import nn, Tensor

Expand Down Expand Up @@ -46,7 +44,7 @@ def __init__(
height: int,
width: int,
channels: int,
num_head_channels: Optional[int] = None,
num_head_channels: int | None = None,
num_groups: int = 32,
rescale_output_factor: float = 1.0,
eps: float = 1e-5,
Expand Down
35 changes: 17 additions & 18 deletions examples/05_stable_diffusion/src/modeling/clip.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@
# limitations under the License.
#
from inspect import isfunction
from typing import Optional

from aitemplate.compiler import ops
from aitemplate.frontend import nn, Tensor
Expand Down Expand Up @@ -279,10 +278,10 @@ def __init__(
def forward(
self,
hidden_states: Tensor,
attention_mask: Optional[Tensor] = None,
causal_attention_mask: Optional[Tensor] = None,
output_attentions: Optional[bool] = False,
residual: Optional[Tensor] = None,
attention_mask: Tensor | None = None,
causal_attention_mask: Tensor | None = None,
output_attentions: bool | None = False,
residual: Tensor | None = None,
):
if residual is not None:
self_output = self.attn(hidden_states, residual)
Expand Down Expand Up @@ -399,7 +398,7 @@ def __init__(
def forward(
self,
hidden_states: Tensor,
output_attentions: Optional[bool] = False,
output_attentions: bool | None = False,
):
"""
Args:
Expand Down Expand Up @@ -469,11 +468,11 @@ def __init__(
def forward(
self,
inputs_embeds,
attention_mask: Optional[Tensor] = None,
causal_attention_mask: Optional[Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
attention_mask: Tensor | None = None,
causal_attention_mask: Tensor | None = None,
output_attentions: bool | None = None,
output_hidden_states: bool | None = None,
return_dict: bool | None = None,
):
r"""
Args:
Expand Down Expand Up @@ -548,7 +547,7 @@ def forward(
self,
input_ids: Tensor,
position_ids: Tensor,
inputs_embeds: Optional[Tensor] = None,
inputs_embeds: Tensor | None = None,
) -> Tensor:
input_shape = ops.size()(input_ids)

Expand Down Expand Up @@ -612,12 +611,12 @@ def __init__(

def forward(
self,
input_ids: Optional[Tensor] = None,
attention_mask: Optional[Tensor] = None,
position_ids: Optional[Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
input_ids: Tensor | None = None,
attention_mask: Tensor | None = None,
position_ids: Tensor | None = None,
output_attentions: bool | None = None,
output_hidden_states: bool | None = None,
return_dict: bool | None = None,
):
r"""
Returns:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Optional, Tuple, Union

from aitemplate.compiler import ops
from aitemplate.frontend import nn
Expand Down Expand Up @@ -82,26 +81,26 @@ def __init__(
in_channels: int = 4,
flip_sin_to_cos: bool = True,
freq_shift: int = 0,
down_block_types: Tuple[str] = (
down_block_types: tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
),
block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
block_out_channels: tuple[int] = (320, 640, 1280, 1280),
layers_per_block: int = 2,
downsample_padding: int = 1,
mid_block_scale_factor: float = 1,
act_fn: str = "silu",
norm_num_groups: Optional[int] = 32,
norm_num_groups: int | None = 32,
norm_eps: float = 1e-5,
cross_attention_dim: int = 768,
attention_head_dim: Union[int, Tuple[int]] = 8,
attention_head_dim: int | tuple[int] = 8,
use_linear_projection: bool = False,
upcast_attention: bool = False,
resnet_time_scale_shift: str = "default",
controlnet_conditioning_channel_order: str = "rgb",
conditioning_embedding_out_channels: Optional[Tuple[int]] = (16, 32, 96, 256),
conditioning_embedding_out_channels: tuple[int] | None = (16, 32, 96, 256),
global_pool_conditions: bool = False,
):
super().__init__()
Expand Down Expand Up @@ -199,7 +198,7 @@ def forward(
encoder_hidden_states,
controlnet_cond,
conditioning_scale: float = 1.0,
) -> Tuple:
) -> tuple:
t_emb = self.time_proj(timestep)
emb = self.time_embedding(t_emb)

Expand Down Expand Up @@ -302,33 +301,33 @@ class ControlNetUNet2DConditionModel(nn.Module):

def __init__(
self,
sample_size: Optional[int] = None,
sample_size: int | None = None,
in_channels: int = 4,
out_channels: int = 4,
center_input_sample: bool = False,
flip_sin_to_cos: bool = True,
freq_shift: int = 0,
down_block_types: Tuple[str] = (
down_block_types: tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
),
up_block_types: Tuple[str] = (
up_block_types: tuple[str] = (
"UpBlock2D",
"CrossAttnUpBlock2D",
"CrossAttnUpBlock2D",
"CrossAttnUpBlock2D",
),
block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
block_out_channels: tuple[int] = (320, 640, 1280, 1280),
layers_per_block: int = 2,
downsample_padding: int = 1,
mid_block_scale_factor: float = 1,
act_fn: str = "silu",
norm_num_groups: int = 32,
norm_eps: float = 1e-5,
cross_attention_dim: int = 1280,
attention_head_dim: Union[int, Tuple[int]] = 8,
attention_head_dim: int | tuple[int] = 8,
use_linear_projection: bool = False,
):
super().__init__()
Expand Down
Loading
Loading