Skip to content

Commit 1a7f3bd

Browse files
authored
Generate opset24 ops (#2523)
- Genrate opset24 from onnx 1.19 - Minior tweaks on style to enable newer versions of the ruff linter --------- Signed-off-by: Justin Chu <[email protected]>
1 parent ce34dce commit 1a7f3bd

37 files changed

+3076
-1149
lines changed

onnxscript/onnx_opset/__init__.py

Lines changed: 8 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -2,13 +2,11 @@
22
# ⚠️ WARNING - AUTO-GENERATED CODE - DO NOT EDIT ⚠️
33
# ⚙️ Generated by 'python -m opgen'
44
# --------------------------------------------------------------------------
5-
# Copyright (c) Microsoft Corporation. All rights reserved.
5+
# Copyright (c) Microsoft Corporation.
66
# Licensed under the MIT License.
77
# --------------------------------------------------------------------------
88
# pylint: disable=W0221,W0222,R0901,W0237
99
# mypy: disable-error-code=override
10-
# ruff: noqa: N801,E741
11-
# ruff: noqa: D214,D402,D405,D411,D412,D416,D417
1210
# --------------------------------------------------------------------------
1311

1412
from __future__ import annotations
@@ -40,14 +38,12 @@
4038
from onnxscript.onnx_opset._impl.opset21 import Opset21
4139
from onnxscript.onnx_opset._impl.opset22 import Opset22
4240
from onnxscript.onnx_opset._impl.opset23 import Opset23
41+
from onnxscript.onnx_opset._impl.opset24 import Opset24
4342
from onnxscript.onnx_opset._impl.opset_ai_onnx_ml1 import Opset_ai_onnx_ml1
4443
from onnxscript.onnx_opset._impl.opset_ai_onnx_ml2 import Opset_ai_onnx_ml2
4544
from onnxscript.onnx_opset._impl.opset_ai_onnx_ml3 import Opset_ai_onnx_ml3
4645
from onnxscript.onnx_opset._impl.opset_ai_onnx_ml4 import Opset_ai_onnx_ml4
4746
from onnxscript.onnx_opset._impl.opset_ai_onnx_ml5 import Opset_ai_onnx_ml5
48-
from onnxscript.onnx_opset._impl.opset_ai_onnx_preview_training1 import (
49-
Opset_ai_onnx_preview_training1,
50-
)
5147
from onnxscript.values import Opset
5248

5349
__all__ = [
@@ -75,12 +71,12 @@
7571
"opset21",
7672
"opset22",
7773
"opset23",
74+
"opset24",
7875
"opset_ai_onnx_ml1",
7976
"opset_ai_onnx_ml2",
8077
"opset_ai_onnx_ml3",
8178
"opset_ai_onnx_ml4",
8279
"opset_ai_onnx_ml5",
83-
"opset_ai_onnx_preview_training1",
8480
]
8581

8682

@@ -113,12 +109,12 @@
113109
opset21 = Opset21()
114110
opset22 = Opset22()
115111
opset23 = Opset23()
112+
opset24 = Opset24()
116113
opset_ai_onnx_ml1 = Opset_ai_onnx_ml1()
117114
opset_ai_onnx_ml2 = Opset_ai_onnx_ml2()
118115
opset_ai_onnx_ml3 = Opset_ai_onnx_ml3()
119116
opset_ai_onnx_ml4 = Opset_ai_onnx_ml4()
120117
opset_ai_onnx_ml5 = Opset_ai_onnx_ml5()
121-
opset_ai_onnx_preview_training1 = Opset_ai_onnx_preview_training1()
122118
all_opsets: Mapping[Tuple[str, int], Opset] = {
123119
(
124120
"",
@@ -212,6 +208,10 @@
212208
"",
213209
23,
214210
): opset23,
211+
(
212+
"",
213+
24,
214+
): opset24,
215215
(
216216
"ai.onnx.ml",
217217
1,
@@ -232,8 +232,4 @@
232232
"ai.onnx.ml",
233233
5,
234234
): opset_ai_onnx_ml5,
235-
(
236-
"ai.onnx.preview.training",
237-
1,
238-
): opset_ai_onnx_preview_training1,
239235
}

onnxscript/onnx_opset/_impl/opset1.py

Lines changed: 107 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -2,13 +2,12 @@
22
# ⚠️ WARNING - AUTO-GENERATED CODE - DO NOT EDIT ⚠️
33
# ⚙️ Generated by 'python -m opgen'
44
# --------------------------------------------------------------------------
5-
# Copyright (c) Microsoft Corporation. All rights reserved.
5+
# Copyright (c) Microsoft Corporation.
66
# Licensed under the MIT License.
77
# --------------------------------------------------------------------------
88
# pylint: disable=W0221,W0222,R0901,W0237
99
# mypy: disable-error-code=override
10-
# ruff: noqa: N801,E741
11-
# ruff: noqa: D214,D402,D405,D411,D412,D416,D417
10+
# ruff: noqa: D214, D402, D405, D411, D416, D417
1211
# --------------------------------------------------------------------------
1312

1413
from __future__ import annotations
@@ -398,7 +397,18 @@ def BatchNormalization(
398397
)
399398

400399
T2_Cast: TypeAlias = Union[
401-
BOOL, DOUBLE, FLOAT, FLOAT16, INT16, INT32, INT64, INT8, UINT16, UINT32, UINT64, UINT8
400+
BOOL,
401+
DOUBLE,
402+
FLOAT,
403+
FLOAT16,
404+
INT16,
405+
INT32,
406+
INT64,
407+
INT8,
408+
UINT16,
409+
UINT32,
410+
UINT64,
411+
UINT8,
402412
]
403413

404414
def Cast(self, input: T1_Cast, *, to: str) -> T2_Cast:
@@ -837,7 +847,11 @@ def Dropout(
837847
T_Elu = TypeVar("T_Elu", DOUBLE, FLOAT, FLOAT16)
838848

839849
def Elu(
840-
self, X: T_Elu, *, alpha: float = 1.0, consumed_inputs: Optional[Sequence[int]] = None
850+
self,
851+
X: T_Elu,
852+
*,
853+
alpha: float = 1.0,
854+
consumed_inputs: Optional[Sequence[int]] = None,
841855
) -> T_Elu:
842856
r"""[🌐 Elu(1)](https://onnx.ai/onnx/operators/onnx__Elu.html#elu-1 "Online Documentation")
843857
@@ -849,7 +863,7 @@ def Elu(
849863
850864
851865
Args:
852-
X: 1D input tensor
866+
X: Input tensor
853867
854868
alpha: Coefficient of ELU default to 1.0.
855869
@@ -859,7 +873,9 @@ def Elu(
859873
schema = get_schema("Elu", 1, "")
860874
op = Op(self, "Elu", schema)
861875
return op(
862-
*self._prepare_inputs(schema, X), alpha=alpha, consumed_inputs=consumed_inputs
876+
*self._prepare_inputs(schema, X),
877+
alpha=alpha,
878+
consumed_inputs=consumed_inputs,
863879
)
864880

865881
T_Equal = TypeVar("T_Equal", BOOL, INT32, INT64)
@@ -1338,7 +1354,12 @@ def GlobalMaxPool(self, X: T_GlobalMaxPool) -> T_GlobalMaxPool:
13381354
T1_Greater: TypeAlias = BOOL
13391355

13401356
def Greater(
1341-
self, A: T_Greater, B: T_Greater, *, axis: Optional[int] = None, broadcast: int = 0
1357+
self,
1358+
A: T_Greater,
1359+
B: T_Greater,
1360+
*,
1361+
axis: Optional[int] = None,
1362+
broadcast: int = 0,
13421363
) -> T1_Greater:
13431364
r"""[🌐 Greater(1)](https://onnx.ai/onnx/operators/onnx__Greater.html#greater-1 "Online Documentation")
13441365
@@ -1603,7 +1624,11 @@ def LRN(
16031624
schema = get_schema("LRN", 1, "")
16041625
op = Op(self, "LRN", schema)
16051626
return op(
1606-
*self._prepare_inputs(schema, X), alpha=alpha, beta=beta, bias=bias, size=size
1627+
*self._prepare_inputs(schema, X),
1628+
alpha=alpha,
1629+
beta=beta,
1630+
bias=bias,
1631+
size=size,
16071632
)
16081633

16091634
T_LSTM = TypeVar("T_LSTM", DOUBLE, FLOAT, FLOAT16)
@@ -1822,7 +1847,9 @@ def LeakyRelu(
18221847
schema = get_schema("LeakyRelu", 1, "")
18231848
op = Op(self, "LeakyRelu", schema)
18241849
return op(
1825-
*self._prepare_inputs(schema, X), alpha=alpha, consumed_inputs=consumed_inputs
1850+
*self._prepare_inputs(schema, X),
1851+
alpha=alpha,
1852+
consumed_inputs=consumed_inputs,
18261853
)
18271854

18281855
T_Less = TypeVar("T_Less", DOUBLE, FLOAT, FLOAT16)
@@ -1935,7 +1962,11 @@ def LogSoftmax(self, input: T_LogSoftmax, *, axis: int = 1) -> T_LogSoftmax:
19351962
)
19361963

19371964
def Loop(
1938-
self, M: Optional[I_Loop], cond: Optional[B_Loop], *v_initial: V_Loop, body: GraphProto
1965+
self,
1966+
M: Optional[I_Loop],
1967+
cond: Optional[B_Loop],
1968+
*v_initial: V_Loop,
1969+
body: GraphProto,
19391970
) -> V_Loop:
19401971
r"""[🌐 Loop(1)](https://onnx.ai/onnx/operators/onnx__Loop.html#loop-1 "Online Documentation")
19411972
@@ -1954,7 +1985,7 @@ def Loop(
19541985
This table summarizes the operating modes of this operator with equivalent
19551986
C-style code:
19561987
1957-
Operator inputs defined as (max_trip_count, condition_var).
1988+
Operator inputs defined as (max_trip_count, condition_var).
19581989
19591990
input ("", ""):
19601991
for (int i=0; ; ++i) {
@@ -2493,7 +2524,11 @@ def Or(self, A: T_Or, B: T_Or, *, axis: Optional[int] = None, broadcast: int = 0
24932524
T_PRelu = TypeVar("T_PRelu", DOUBLE, FLOAT, FLOAT16)
24942525

24952526
def PRelu(
2496-
self, X: T_PRelu, slope: T_PRelu, *, consumed_inputs: Optional[Sequence[int]] = None
2527+
self,
2528+
X: T_PRelu,
2529+
slope: T_PRelu,
2530+
*,
2531+
consumed_inputs: Optional[Sequence[int]] = None,
24972532
) -> T_PRelu:
24982533
r"""[🌐 PRelu(1)](https://onnx.ai/onnx/operators/onnx__PRelu.html#prelu-1 "Online Documentation")
24992534
@@ -2567,7 +2602,10 @@ def Pad(
25672602
schema = get_schema("Pad", 1, "")
25682603
op = Op(self, "Pad", schema)
25692604
return op(
2570-
*self._prepare_inputs(schema, data), mode=mode, paddings=paddings, value=value
2605+
*self._prepare_inputs(schema, data),
2606+
mode=mode,
2607+
paddings=paddings,
2608+
value=value,
25712609
)
25722610

25732611
T_Pow = TypeVar("T_Pow", DOUBLE, FLOAT, FLOAT16)
@@ -2975,7 +3013,11 @@ def RandomUniformLike(
29753013
schema = get_schema("RandomUniformLike", 1, "")
29763014
op = Op(self, "RandomUniformLike", schema)
29773015
return op(
2978-
*self._prepare_inputs(schema, input), dtype=dtype, high=high, low=low, seed=seed
3016+
*self._prepare_inputs(schema, input),
3017+
dtype=dtype,
3018+
high=high,
3019+
low=low,
3020+
seed=seed,
29793021
)
29803022

29813023
T_Reciprocal = TypeVar("T_Reciprocal", DOUBLE, FLOAT, FLOAT16)
@@ -3004,7 +3046,11 @@ def Reciprocal(
30043046
T_ReduceL1 = TypeVar("T_ReduceL1", DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64)
30053047

30063048
def ReduceL1(
3007-
self, data: T_ReduceL1, *, axes: Optional[Sequence[int]] = None, keepdims: int = 1
3049+
self,
3050+
data: T_ReduceL1,
3051+
*,
3052+
axes: Optional[Sequence[int]] = None,
3053+
keepdims: int = 1,
30083054
) -> T_ReduceL1:
30093055
r"""[🌐 ReduceL1(1)](https://onnx.ai/onnx/operators/onnx__ReduceL1.html#reducel1-1 "Online Documentation")
30103056
@@ -3034,7 +3080,11 @@ def ReduceL1(
30343080
T_ReduceL2 = TypeVar("T_ReduceL2", DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64)
30353081

30363082
def ReduceL2(
3037-
self, data: T_ReduceL2, *, axes: Optional[Sequence[int]] = None, keepdims: int = 1
3083+
self,
3084+
data: T_ReduceL2,
3085+
*,
3086+
axes: Optional[Sequence[int]] = None,
3087+
keepdims: int = 1,
30383088
) -> T_ReduceL2:
30393089
r"""[🌐 ReduceL2(1)](https://onnx.ai/onnx/operators/onnx__ReduceL2.html#reducel2-1 "Online Documentation")
30403090
@@ -3066,7 +3116,11 @@ def ReduceL2(
30663116
)
30673117

30683118
def ReduceLogSum(
3069-
self, data: T_ReduceLogSum, *, axes: Optional[Sequence[int]] = None, keepdims: int = 1
3119+
self,
3120+
data: T_ReduceLogSum,
3121+
*,
3122+
axes: Optional[Sequence[int]] = None,
3123+
keepdims: int = 1,
30703124
) -> T_ReduceLogSum:
30713125
r"""[🌐 ReduceLogSum(1)](https://onnx.ai/onnx/operators/onnx__ReduceLogSum.html#reducelogsum-1 "Online Documentation")
30723126
@@ -3132,7 +3186,11 @@ def ReduceLogSumExp(
31323186
T_ReduceMax = TypeVar("T_ReduceMax", DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64)
31333187

31343188
def ReduceMax(
3135-
self, data: T_ReduceMax, *, axes: Optional[Sequence[int]] = None, keepdims: int = 1
3189+
self,
3190+
data: T_ReduceMax,
3191+
*,
3192+
axes: Optional[Sequence[int]] = None,
3193+
keepdims: int = 1,
31363194
) -> T_ReduceMax:
31373195
r"""[🌐 ReduceMax(1)](https://onnx.ai/onnx/operators/onnx__ReduceMax.html#reducemax-1 "Online Documentation")
31383196
@@ -3164,7 +3222,11 @@ def ReduceMax(
31643222
)
31653223

31663224
def ReduceMean(
3167-
self, data: T_ReduceMean, *, axes: Optional[Sequence[int]] = None, keepdims: int = 1
3225+
self,
3226+
data: T_ReduceMean,
3227+
*,
3228+
axes: Optional[Sequence[int]] = None,
3229+
keepdims: int = 1,
31683230
) -> T_ReduceMean:
31693231
r"""[🌐 ReduceMean(1)](https://onnx.ai/onnx/operators/onnx__ReduceMean.html#reducemean-1 "Online Documentation")
31703232
@@ -3194,7 +3256,11 @@ def ReduceMean(
31943256
T_ReduceMin = TypeVar("T_ReduceMin", DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64)
31953257

31963258
def ReduceMin(
3197-
self, data: T_ReduceMin, *, axes: Optional[Sequence[int]] = None, keepdims: int = 1
3259+
self,
3260+
data: T_ReduceMin,
3261+
*,
3262+
axes: Optional[Sequence[int]] = None,
3263+
keepdims: int = 1,
31983264
) -> T_ReduceMin:
31993265
r"""[🌐 ReduceMin(1)](https://onnx.ai/onnx/operators/onnx__ReduceMin.html#reducemin-1 "Online Documentation")
32003266
@@ -3226,7 +3292,11 @@ def ReduceMin(
32263292
)
32273293

32283294
def ReduceProd(
3229-
self, data: T_ReduceProd, *, axes: Optional[Sequence[int]] = None, keepdims: int = 1
3295+
self,
3296+
data: T_ReduceProd,
3297+
*,
3298+
axes: Optional[Sequence[int]] = None,
3299+
keepdims: int = 1,
32303300
) -> T_ReduceProd:
32313301
r"""[🌐 ReduceProd(1)](https://onnx.ai/onnx/operators/onnx__ReduceProd.html#reduceprod-1 "Online Documentation")
32323302
@@ -3256,7 +3326,11 @@ def ReduceProd(
32563326
T_ReduceSum = TypeVar("T_ReduceSum", DOUBLE, FLOAT, FLOAT16, INT32, INT64, UINT32, UINT64)
32573327

32583328
def ReduceSum(
3259-
self, data: T_ReduceSum, *, axes: Optional[Sequence[int]] = None, keepdims: int = 1
3329+
self,
3330+
data: T_ReduceSum,
3331+
*,
3332+
axes: Optional[Sequence[int]] = None,
3333+
keepdims: int = 1,
32603334
) -> T_ReduceSum:
32613335
r"""[🌐 ReduceSum(1)](https://onnx.ai/onnx/operators/onnx__ReduceSum.html#reducesum-1 "Online Documentation")
32623336
@@ -3371,7 +3445,9 @@ def Reshape(
33713445
schema = get_schema("Reshape", 1, "")
33723446
op = Op(self, "Reshape", schema)
33733447
return op(
3374-
*self._prepare_inputs(schema, data), consumed_inputs=consumed_inputs, shape=shape
3448+
*self._prepare_inputs(schema, data),
3449+
consumed_inputs=consumed_inputs,
3450+
shape=shape,
33753451
)
33763452

33773453
T_Selu = TypeVar("T_Selu", DOUBLE, FLOAT, FLOAT16)
@@ -3632,7 +3708,7 @@ def Softplus(self, X: T_Softplus) -> T_Softplus:
36323708
36333709
36343710
Args:
3635-
X: (differentiable) 1D input tensor
3711+
X: (differentiable) Input tensor
36363712
"""
36373713

36383714
schema = get_schema("Softplus", 1, "")
@@ -4019,7 +4095,12 @@ def Unsqueeze(self, data: T_Unsqueeze, *, axes: Sequence[int]) -> T_Unsqueeze:
40194095
T_Upsample = TypeVar("T_Upsample", BOOL, DOUBLE, FLOAT, FLOAT16, INT32, INT64)
40204096

40214097
def Upsample(
4022-
self, X: T_Upsample, *, height_scale: float, mode: str = "nearest", width_scale: float
4098+
self,
4099+
X: T_Upsample,
4100+
*,
4101+
height_scale: float,
4102+
mode: str = "nearest",
4103+
width_scale: float,
40234104
) -> T_Upsample:
40244105
r"""[🌐 Upsample(1)](https://onnx.ai/onnx/operators/onnx__Upsample.html#upsample-1 "Online Documentation")
40254106

onnxscript/onnx_opset/_impl/opset10.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2,13 +2,12 @@
22
# ⚠️ WARNING - AUTO-GENERATED CODE - DO NOT EDIT ⚠️
33
# ⚙️ Generated by 'python -m opgen'
44
# --------------------------------------------------------------------------
5-
# Copyright (c) Microsoft Corporation. All rights reserved.
5+
# Copyright (c) Microsoft Corporation.
66
# Licensed under the MIT License.
77
# --------------------------------------------------------------------------
88
# pylint: disable=W0221,W0222,R0901,W0237
99
# mypy: disable-error-code=override
10-
# ruff: noqa: N801,E741
11-
# ruff: noqa: D214,D402,D405,D411,D412,D416,D417
10+
# ruff: noqa: D402
1211
# --------------------------------------------------------------------------
1312

1413
from __future__ import annotations

0 commit comments

Comments
 (0)