Skip to content

Commit 428d4b2

Browse files
9.0 Release (#2616)
1 parent ea1d2de commit 428d4b2

File tree

14 files changed

+174
-24
lines changed

14 files changed

+174
-24
lines changed

BUILDING.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ Follow these steps:
1919
1. Fork and clone the GitHub [coremltools repository](https://github.com/apple/coremltools).
2020

2121
2. Run the [build.sh](scripts/build.sh) script to build `coremltools`.
22-
* By default this script uses Python 3.7, but you can include `--python=3.8` (or `3.9`, `3.10`, `3.11`, `3.12`) as a argument to change the Python version.
22+
* By default this script uses Python 3.7, but you can include `--python=3.8` (or `3.9`, `3.10`, `3.11`, `3.12`, `3.13`) as a argument to change the Python version.
2323
* The script creates a new `build` folder with the coremltools distribution, and a `dist` folder with Python wheel files.
2424

2525
3. Run the [test.sh](scripts/test.sh) script to test the build.
@@ -45,7 +45,7 @@ The following build targets help you configure the development environment. If y
4545
* `test_slow` | Run all non-fast tests.
4646
* `wheel` | Build wheels in release mode.
4747

48-
The script uses Python 3.7, but you can include `--python=3.8` (or `3.9`, `3.10`, `3.11`, `3.12`) as a argument to change the Python version.
48+
The script uses Python 3.7, but you can include `--python=3.8` (or `3.9`, `3.10`, `3.11`, `3.12`, `3.13`) as a argument to change the Python version.
4949

5050
## Resources
5151

CMakeLists.txt

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ add_library(modelpackage
7070
modelpackage/src/utils/JsonMap.cpp
7171
modelpackage/src/ModelPackagePython.cpp
7272
)
73-
73+
7474
target_compile_definitions(modelpackage
7575
PRIVATE
7676
CPU_ONLY=1
@@ -197,8 +197,13 @@ set(KMEANS_DIR "${PROJECT_SOURCE_DIR}/deps/kmeans1d")
197197
execute_process(
198198
COMMAND python3 setup.py build_ext --inplace
199199
WORKING_DIRECTORY ${KMEANS_DIR}
200+
RESULT_VARIABLE KMEANS1D_BUILD_STATUS
200201
)
201202

203+
if(NOT KMEANS1D_BUILD_STATUS EQUAL 0)
204+
message(FATAL_ERROR "Could not build kmeans1d dependency")
205+
endif()
206+
202207
# Somehow Python's setuptools is building this shared object file so that it tries to load the C++
203208
# standard library using an rpath that only exist on the build machine. Change that so it gets
204209
# loaded from the standard location.

coremltools/converters/mil/frontend/_utils.py

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -126,7 +126,12 @@ def pymil_broadcast_to(tensor: Var, shape: Union[Var, VARIABLE_SHAPE_TYPE], name
126126

127127
if any_symbolic(tensor.shape) or shape_var.val is None:
128128
tensor_shape = mb.shape(x=tensor)
129-
reps = mb.real_div(x=shape_var, y=tensor_shape)
129+
reps = mb.select(
130+
cond = mb.equal(x=shape_var, y=-1),
131+
a = tensor_shape,
132+
b = shape_var,
133+
)
134+
reps = mb.real_div(x=reps, y=tensor_shape)
130135
reps = mb.cast(x=reps, dtype="int32")
131136
res = mb.tile(x=tensor, reps=reps, name=name)
132137
else:

coremltools/converters/mil/frontend/torch/ssa_passes/torch_upsample_to_core_upsample.py

Lines changed: 67 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -42,12 +42,67 @@ def _torch_upsample_to_core_upsample_block(block):
4242

4343
if op.op_type in target_ops:
4444
if _try_replace_with_core_upsample(op):
45-
logger.info("Successfully map {} to core upsample".format(op.op_type))
45+
msg = f"Successfully map {op.op_type} to core upsample"
46+
logger.info(msg)
4647
else:
4748
raise ValueError("Unable to map {} to core upsample".format(op.op_type))
4849

49-
50-
def _try_get_upsample_factor(output_size):
50+
def _try_get_upsample_factor_pattern_2(output_size, expected_gather_indices, target_op):
51+
"""
52+
This is the pattern corresponds to the python source code:
53+
54+
class UpsampleBilinear(nn.Module):
55+
def forward(self, x):
56+
b, c, h, w = x.shape
57+
return F.interpolate(x, size=(h*2, w*2), mode='bilinear', align_corners=False)
58+
59+
The resulting pymil program is:
60+
61+
function[CoreML5](%x: (1, 3, is0, is1, fp32)(Tensor)) {
62+
block0() {
63+
%3_shape: (4,int32)^(Tensor) = shape(x=%x, name="3_shape")
64+
%gather_0: (int32)^(Scalar) = gather(x=%3_shape, indices=2, axis=0, name="gather_0")
65+
%6_shape: (4,int32)^(Tensor) = shape(x=%x, name="6_shape")
66+
%gather_1: (int32)^(Scalar) = gather(x=%6_shape, indices=3, axis=0, name="gather_1")
67+
%9: (int32)(Scalar) = mul(x=%gather_0, y=2, name="9")
68+
%10: (int32)(Scalar) = cast(x=%9, dtype="int32", name="10")
69+
%12: (int32)(Scalar) = mul(x=%gather_1, y=2, name="12")
70+
%13: (int32)(Scalar) = cast(x=%12, dtype="int32", name="13")
71+
%17: (1, 3, is2, is3, fp32)(Tensor) = torch_upsample_bilinear(x=%x, output_height=%10, output_width=%13, align_corners=False, name="17")
72+
73+
We do a pattern matching to extract the scale value.
74+
"""
75+
# cast op
76+
op = output_size
77+
if op.op_type != "cast" or op.dtype.val != "int32":
78+
return None
79+
80+
# mul op
81+
mul_op = op.x.op
82+
if mul_op.op_type != "mul":
83+
return None
84+
mul_op_y = mul_op.y
85+
86+
# gather op
87+
gather_op = mul_op.x.op
88+
if gather_op.op_type != "gather":
89+
return None
90+
if gather_op.indices.val != expected_gather_indices:
91+
return None
92+
if gather_op.axis.val != 0:
93+
return None
94+
95+
# shape op
96+
shape_op = gather_op.x.op
97+
if shape_op.op_type != "shape":
98+
return None
99+
if shape_op.x != target_op:
100+
return None
101+
102+
return mul_op_y.val
103+
104+
105+
def _try_get_upsample_factor_pattern_1(output_size):
51106
op = output_size
52107
# If the output has value, then the upsample op itself is derived from the upsample_1d op,
53108
# so we can just return scale factor 1 for that case
@@ -103,9 +158,15 @@ def _try_replace_with_core_upsample(op):
103158
assert op.op_type in target_ops
104159

105160
# 2d upsampling
106-
if op.op_type in ["torch_upsample_nearest_neighbor", "torch_upsample_bilinear"]:
107-
scales_h = _try_get_upsample_factor(op.output_height.op)
108-
scales_w = _try_get_upsample_factor(op.output_width.op)
161+
if op.op_type in target_ops:
162+
163+
# try to resolve the scaling factor - pattern 1
164+
scales_h = _try_get_upsample_factor_pattern_1(op.output_height.op)
165+
scales_w = _try_get_upsample_factor_pattern_1(op.output_width.op)
166+
167+
if scales_h is None or scales_w is None:
168+
scales_h = _try_get_upsample_factor_pattern_2(op.output_height.op, 2, op.x)
169+
scales_w = _try_get_upsample_factor_pattern_2(op.output_width.op, 3, op.x)
109170

110171
if scales_h is None or scales_w is None:
111172
return False

coremltools/converters/mil/frontend/torch/test/test_torch_export_conversion_api.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -334,6 +334,7 @@ def forward(self, x):
334334
assert mlmodel.user_defined_metadata[_METADATA_SOURCE_DIALECT] == dialect_name
335335

336336

337+
@pytest.mark.skipif((version_info.major, version_info.minor) == (3, 13), reason="rdar://158079341")
337338
class TestExecuTorchExamples(TorchBaseTest):
338339
@pytest.mark.parametrize(
339340
"compute_unit, backend, frontend, dynamic",

coremltools/converters/mil/frontend/torch/test/test_torch_ops.py

Lines changed: 70 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2668,6 +2668,41 @@ def forward(self, x):
26682668
compute_unit=compute_unit,
26692669
)
26702670

2671+
@pytest.mark.parametrize(
2672+
"compute_unit, backend, frontend",
2673+
itertools.product(compute_units, backends, frontends),
2674+
)
2675+
def test_upsample_with_shape_gather_pattern(self, compute_unit, backend, frontend):
2676+
if frontend == TorchFrontend.TORCHEXPORT:
2677+
pytest.xfail("CoreML model not runnable for the torch export frontend.")
2678+
2679+
input_shape = (1, 3, 32, 32)
2680+
2681+
class UpsampleBilinear(nn.Module):
2682+
def forward(self, x):
2683+
b, c, h, w = x.shape
2684+
return nn.functional.interpolate(
2685+
x, size=(h * 2, w * 2), mode="bilinear", align_corners=False
2686+
)
2687+
2688+
model = UpsampleBilinear().eval()
2689+
2690+
h_dim = torch.export.Dim(name="height", min=16, max=128)
2691+
w_dim = torch.export.Dim(name="width", min=16, max=128)
2692+
torch_export_dynamic_shapes = {"x": {2: h_dim, 3: w_dim}}
2693+
2694+
self.run_compare_torch(
2695+
input_shape,
2696+
model,
2697+
frontend=frontend,
2698+
backend=backend,
2699+
compute_unit=compute_unit,
2700+
converter_input_type=[
2701+
TensorType(shape=(1, 3, ct.RangeDim(16, 128), ct.RangeDim(16, 128))),
2702+
],
2703+
torch_export_dynamic_shapes=torch_export_dynamic_shapes,
2704+
)
2705+
26712706
@pytest.mark.parametrize(
26722707
"compute_unit, backend, frontend, output_size",
26732708
itertools.product(compute_units, backends, frontends, [10, 170]),
@@ -5234,7 +5269,42 @@ def forward(self, x, y):
52345269
input_shapes, model, compute_unit=compute_unit, backend=backend, frontend=frontend
52355270
)
52365271

5272+
@pytest.mark.parametrize(
5273+
"compute_unit, backend, frontend, input_shape",
5274+
itertools.product(
5275+
compute_units,
5276+
backends,
5277+
frontends,
5278+
[
5279+
(ct.RangeDim(3, 21), ),
5280+
(15, )
5281+
]
5282+
),
5283+
)
5284+
def test_expand_dynamic_shape4(self, compute_unit, backend, frontend, input_shape):
5285+
if frontend in TORCH_EXPORT_BASED_FRONTENDS:
5286+
pytest.xfail(
5287+
"torch.export refuses to make size-1 dim dynamic, "
5288+
"and cannot expand one dynamic dimension into another dynamic dimension"
5289+
)
5290+
5291+
class TestModel(nn.Module):
5292+
def forward(self, x):
5293+
return x.reshape(-1, 1, 3).expand(-1, 7, -1)
5294+
5295+
converter_input_type = [ct.TensorType(name = "x", shape=input_shape, dtype=types.fp32)]
5296+
model = TestModel()
52375297

5298+
self.run_compare_torch(
5299+
torch.rand(15),
5300+
model,
5301+
input_as_shape=False,
5302+
converter_input_type=converter_input_type,
5303+
frontend=frontend,
5304+
backend=backend,
5305+
compute_unit=compute_unit,
5306+
)
5307+
52385308
class TestExpandDims(TorchBaseTest):
52395309
@pytest.mark.parametrize(
52405310
"compute_unit, backend, frontend, rank_and_axis",

coremltools/test/ml_program/test_utils.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
import platform
1010
import shutil
1111
import tempfile
12+
from sys import version_info
1213
from typing import Dict, Tuple
1314

1415
import numpy as np
@@ -1496,8 +1497,11 @@ def validate_inference(multifunction_mlpackage_path: str) -> None:
14961497
shutil.rmtree(multifunction_mlpackage_path)
14971498

14981499

1500+
@pytest.mark.skipif(
1501+
(version_info.major, version_info.minor) == (3, 13),
1502+
reason="rdar://157488825 (Python 3.13 Unit Test Segmentation Fault)",
1503+
)
14991504
class TestBisectModel:
1500-
15011505
@staticmethod
15021506
def check_spec_op_type(model_path, expected_ops):
15031507
spec = load_spec(model_path)

coremltools/version.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,4 +4,4 @@
44
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
55

66

7-
__version__ = "9.0b1" # VERSION_STRING
7+
__version__ = "9.0" # VERSION_STRING

docs-guides/source/installing-coremltools.md

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ This page describes how to install the [`coremltools`](https://github.com/apple/
44

55
```{admonition} Supported Python and MacOS Versions
66
7-
The current version of coremltools ([version 8.0](https://github.com/apple/coremltools)) includes wheels for Python 3.7, 3.8, 3.9, 3.10, 3.11, and 3.12. The last stable release of coremltools to support Python 2 is version 4.0.
7+
The current version of coremltools ([version 9.0b1](https://github.com/apple/coremltools)) includes wheels for Python 3.7, 3.8, 3.9, 3.10, 3.11, 3.12, 3.13. The last stable release of coremltools to support Python 2 is version 4.0.
88
99
The supported MacOS versions are as follows:
1010
@@ -19,7 +19,7 @@ The supported MacOS versions are as follows:
1919
If you are using macOS, you should already be familiar with the [Mac Terminal app command line](https://developer.apple.com/library/archive/documentation/OpenSource/Conceptual/ShellScripting/CommandLInePrimer/CommandLine.html#//apple_ref/doc/uid/TP40004268-CH271-BBCBEAJD "Command Line Primer") to perform tasks such as installations and updates. If you are using Linux, you should already be familiar with [basic Shell commands in Linux](https://www.geeksforgeeks.org/basic-shell-commands-in-linux/).
2020
```
2121

22-
Before installing coremltools, you need [Python](https://www.python.org/downloads/ "Python Downloads") and the [`pip`](https://pip.pypa.io/en/stable/) installer.
22+
Before installing coremltools, you need [Python](https://www.python.org/downloads/ "Python Downloads") and the [`pip`](https://pip.pypa.io/en/stable/) installer.
2323

2424
The `coremltools` package supports [Python 3](https://www.python.org/download/releases/3.0/). We recommend that you install Python 3.6 or newer. Use a Python package manager such as [Conda](https://docs.conda.io/en/latest/index.html) or [venv](https://docs.python.org/3/library/venv.html) to install the newest version of Python and other dependencies. [Conda](https://docs.conda.io/en/latest/index.html) is recommended because it is the most reliable way to install all required dependencies.
2525

@@ -82,7 +82,7 @@ python -m venv coremltools-venv
8282
source coremltools-venv/bin/activate
8383
```
8484

85-
4. Follow the instructions in [Install Core ML Tools](#install-core-ml-tools).
85+
4. Follow the instructions in [Install Core ML Tools](#install-core-ml-tools).
8686

8787
## Install Core ML Tools
8888

@@ -103,17 +103,17 @@ The continuous integration (CI) system linked to the `coremltools` repo builds a
103103
To access the wheel for a particular `coremltools` release, follow these steps:
104104

105105
1. Go to the [`coremltools` repository](https://github.com/apple/coremltools) on GitHub, scroll down to the **README.md** heading, and click the **build passing** button. The **Branches** tab appears:
106-
106+
107107
![Branches tab](images/repo-readme-build-passing-button-annot.png)
108-
108+
109109
![Branches passed](images/repo-branches-passed-button.png)
110110

111111
2. Click the **passed** button to show the **Pipeline** tab:
112-
112+
113113
![Pipeline tab](images/repo-build-wheel-selected.png)
114114

115115
3. Click a wheel in the **Build** column. For example, in the previous figure, the **build_wheel_macos_py38** wheel is highlighted for clicking. After clicking a wheel, the raw job log appears, with the **Download** and **Browse** buttons in the right column:
116-
116+
117117
![Download and Browse](images/repo-job-artifacts.png)
118118

119119
4. Click the **Download** button to download the `dist` folder with the wheel files.

reqs/build.pip

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,11 @@
11
numpy==1.21.0; platform_machine == "arm64" and python_version < "3.9"
22
numpy<1.20; platform_machine != "arm64" and python_version < "3.9"
3-
numpy==2.0.0; python_version >= "3.9"
3+
numpy==2.0.0; python_version >= "3.9" and python_version < "3.13"
4+
numpy==2.1.0; python_version >= "3.13"
45

6+
protobuf
57
pytest
8+
setuptools; python_version >= "3.13"
69
six
710
sympy
811
tqdm

0 commit comments

Comments
 (0)