Skip to content

Commit 97be3f4

Browse files
authored
Merge pull request #472 from aseemw/dev/coremltools_3.0_release
3.0 release
2 parents 33c1914 + 46bfaba commit 97be3f4

File tree

8 files changed

+177
-20
lines changed

8 files changed

+177
-20
lines changed

README.md

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -5,12 +5,12 @@ Core ML community tools contains all supporting tools for Core ML model
55
conversion and validation. This includes scikit-learn, LIBSVM, Caffe,
66
Keras and XGBoost.
77

8-
coremltools 3.0 beta
8+
coremltools 3.0
99
--------------------
10-
[Release notes](https://github.com/apple/coremltools/releases/tag/v3.0-beta)
10+
[Release notes](https://github.com/apple/coremltools/releases/)
1111
```shell
1212
# Install using pip
13-
pip install coremltools==3.0b6
13+
pip install coremltools==3.0
1414
```
1515

1616
API

coremltools/models/model.py

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -102,11 +102,6 @@ def _get_proxy_and_spec(filename, use_cpu_only=False):
102102
# version of the engine can support so we'll not try to have a proxy object
103103
return None, specification
104104

105-
# check if there are custom layers
106-
if _has_custom_layer(specification):
107-
# custom layers can't be supported directly by compiling and loading the model here
108-
return None, specification
109-
110105
try:
111106
return _MLModelProxy(filename, use_cpu_only), specification
112107
except RuntimeError as e:

coremltools/test/test_keras2.py

Lines changed: 169 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1159,13 +1159,87 @@ def test_updatable_model_flag_no_loss_optimizer(self):
11591159
"""
11601160
import coremltools
11611161
from keras.layers import Dense
1162-
from keras.losses import categorical_crossentropy
1162+
updatable = Sequential()
1163+
updatable.add(Dense(128, input_shape=(16,)))
1164+
updatable.add(Dense(10, name="foo", activation='softmax',
1165+
trainable=True))
1166+
input = ['data']
1167+
output = ['output']
1168+
cml = coremltools.converters.keras.convert(
1169+
updatable, input, output, respect_trainable=True
1170+
)
1171+
spec = cml.get_spec()
1172+
self.assertTrue(spec.isUpdatable)
1173+
layers = spec.neuralNetwork.layers
1174+
self.assertIsNotNone(layers[1].innerProduct)
1175+
self.assertTrue(layers[1].innerProduct)
1176+
self.assertTrue(layers[1].isUpdatable)
1177+
1178+
# when loss was specified as a string the converter had failed to work.
1179+
def test_updatable_model_flag_mse_string_adam(self):
1180+
"""
1181+
Tests the 'respect_trainable' flag when used along with string
1182+
for the loss(here mse), conversion is successful
1183+
"""
1184+
import coremltools
1185+
from keras.layers import Dense
1186+
from keras.optimizers import Adam
1187+
1188+
updatable = Sequential()
1189+
updatable.add(Dense(128, input_shape=(16,)))
1190+
updatable.add(Dense(10, name="foo", activation='relu',
1191+
trainable=True))
1192+
updatable.compile(loss='mean_squared_error',
1193+
optimizer=Adam(lr=1.0, beta_1=0.5, beta_2=0.75,
1194+
epsilon=0.25),
1195+
metrics=['accuracy'])
1196+
input = ['data']
1197+
output = ['output']
1198+
cml = coremltools.converters.keras.convert(
1199+
updatable, input, output, respect_trainable=True
1200+
)
1201+
spec = cml.get_spec()
1202+
self.assertTrue(spec.isUpdatable)
1203+
layers = spec.neuralNetwork.layers
1204+
self.assertIsNotNone(layers[1].innerProduct)
1205+
self.assertTrue(layers[1].innerProduct)
1206+
self.assertTrue(layers[1].isUpdatable)
1207+
1208+
self.assertEqual(len(spec.neuralNetwork.updateParams.lossLayers), 1)
1209+
# check that mean squared error input name and output name is set
1210+
# check length is non-zero for mse
1211+
self.assertTrue(len(spec.neuralNetwork.updateParams.lossLayers[
1212+
0].meanSquaredErrorLossLayer.input))
1213+
self.assertTrue(len(spec.neuralNetwork.updateParams.lossLayers[
1214+
0].meanSquaredErrorLossLayer.target))
1215+
# check length is 0 for cce
1216+
self.assertFalse(len(spec.neuralNetwork.updateParams.lossLayers[
1217+
0].categoricalCrossEntropyLossLayer.input))
1218+
self.assertFalse(len(spec.neuralNetwork.updateParams.lossLayers[
1219+
0].categoricalCrossEntropyLossLayer.target))
1220+
1221+
adopt = spec.neuralNetwork.updateParams.optimizer.adamOptimizer
1222+
# verify default values
1223+
self.assertEqual(adopt.learningRate.defaultValue, 1.0)
1224+
self.assertEqual(adopt.beta1.defaultValue, 0.5)
1225+
self.assertEqual(adopt.beta2.defaultValue, 0.75)
1226+
self.assertEqual(adopt.eps.defaultValue, 0.25)
1227+
1228+
def test_updatable_model_flag_cce_string_sgd(self):
1229+
"""
1230+
Tests the 'respect_trainable' flag when used along with string
1231+
for the loss(here cce), conversion is successful
1232+
"""
1233+
import coremltools
1234+
from keras.layers import Dense
11631235
from keras.optimizers import SGD
11641236

11651237
updatable = Sequential()
11661238
updatable.add(Dense(128, input_shape=(16,)))
11671239
updatable.add(Dense(10, name="foo", activation='softmax',
11681240
trainable=True))
1241+
updatable.compile(loss='categorical_crossentropy',
1242+
optimizer=SGD(lr=1.0), metrics=['accuracy'])
11691243
input = ['data']
11701244
output = ['output']
11711245
cml = coremltools.converters.keras.convert(
@@ -1177,3 +1251,97 @@ def test_updatable_model_flag_no_loss_optimizer(self):
11771251
self.assertIsNotNone(layers[1].innerProduct)
11781252
self.assertTrue(layers[1].innerProduct)
11791253
self.assertTrue(layers[1].isUpdatable)
1254+
self.assertEqual(len(spec.neuralNetwork.updateParams.lossLayers), 1)
1255+
1256+
# check that cce input name and output name is set
1257+
# check length is non-zero for cce
1258+
self.assertTrue(len(spec.neuralNetwork.updateParams.lossLayers[
1259+
0].categoricalCrossEntropyLossLayer.input))
1260+
self.assertTrue(len(spec.neuralNetwork.updateParams.lossLayers[
1261+
0].categoricalCrossEntropyLossLayer.target))
1262+
# check length is 0 for mse
1263+
self.assertFalse(len(spec.neuralNetwork.updateParams.lossLayers[
1264+
0].meanSquaredErrorLossLayer.input))
1265+
self.assertFalse(len(spec.neuralNetwork.updateParams.lossLayers[
1266+
0].meanSquaredErrorLossLayer.target))
1267+
1268+
sgdopt = spec.neuralNetwork.updateParams.optimizer.sgdOptimizer
1269+
self.assertEqual(sgdopt.learningRate.defaultValue, 1.0)
1270+
self.assertEqual(sgdopt.miniBatchSize.defaultValue, 16)
1271+
self.assertEqual(sgdopt.momentum.defaultValue, 0.0)
1272+
1273+
1274+
def test_updatable_model_flag_cce_sgd_string(self):
1275+
"""
1276+
Tests the 'respect_trainable' flag when used along with string
1277+
for the optimizer(keras internally creates an instance, here sgd),
1278+
conversion is successful
1279+
"""
1280+
import coremltools
1281+
from keras.layers import Dense, Input
1282+
from keras.losses import categorical_crossentropy
1283+
input = ['data']
1284+
output = ['output']
1285+
1286+
# This should result in an updatable model.
1287+
inputs = Input(shape=(16,))
1288+
d1 = Dense(128)(inputs)
1289+
d2 = Dense(10, name="foo", activation='softmax', trainable=True)(d1)
1290+
kmodel = Model(inputs=inputs, outputs=d2)
1291+
kmodel.compile(loss=categorical_crossentropy,
1292+
optimizer='sgd', metrics=['accuracy'])
1293+
cml = coremltools.converters.keras.convert(
1294+
kmodel, input, output, respect_trainable=True
1295+
)
1296+
spec = cml.get_spec()
1297+
self.assertTrue(spec.isUpdatable)
1298+
layers = spec.neuralNetwork.layers
1299+
self.assertIsNotNone(layers[1].innerProduct)
1300+
self.assertTrue(layers[1].innerProduct)
1301+
self.assertTrue(layers[1].isUpdatable)
1302+
self.assertEqual(len(spec.neuralNetwork.updateParams.lossLayers), 1)
1303+
sgdopt = spec.neuralNetwork.updateParams.optimizer.sgdOptimizer
1304+
# use almost equal for default verification with at least 5 decimal
1305+
# places of closeness
1306+
self.assertAlmostEqual(sgdopt.learningRate.defaultValue, 0.01,
1307+
places=5)
1308+
self.assertEqual(sgdopt.miniBatchSize.defaultValue, 16)
1309+
self.assertEqual(sgdopt.momentum.defaultValue, 0.0)
1310+
1311+
def test_updatable_model_flag_cce_adam_string(self):
1312+
"""
1313+
Tests the 'respect_trainable' flag when used along with string
1314+
for the optimizer(keras internally creates an instance, here adam),
1315+
conversion is successful
1316+
"""
1317+
import coremltools
1318+
from keras.layers import Dense, Input
1319+
from keras.losses import categorical_crossentropy
1320+
input = ['data']
1321+
output = ['output']
1322+
1323+
# This should result in an updatable model.
1324+
inputs = Input(shape=(16,))
1325+
d1 = Dense(128)(inputs)
1326+
d2 = Dense(10, name="foo", activation='softmax', trainable=True)(d1)
1327+
kmodel = Model(inputs=inputs, outputs=d2)
1328+
kmodel.compile(loss=categorical_crossentropy,
1329+
optimizer='adam', metrics=['accuracy'])
1330+
cml = coremltools.converters.keras.convert(
1331+
kmodel, input, output, respect_trainable=True
1332+
)
1333+
spec = cml.get_spec()
1334+
self.assertTrue(spec.isUpdatable)
1335+
layers = spec.neuralNetwork.layers
1336+
self.assertIsNotNone(layers[1].innerProduct)
1337+
self.assertTrue(layers[1].innerProduct)
1338+
self.assertTrue(layers[1].isUpdatable)
1339+
self.assertEqual(len(spec.neuralNetwork.updateParams.lossLayers), 1)
1340+
adopt = spec.neuralNetwork.updateParams.optimizer.adamOptimizer
1341+
# use almost equal for default verification with at least 5 decimal
1342+
# places of closeness
1343+
self.assertAlmostEqual(adopt.learningRate.defaultValue, 0.001, places=5)
1344+
self.assertAlmostEqual(adopt.miniBatchSize.defaultValue, 16)
1345+
self.assertAlmostEqual(adopt.beta1.defaultValue, 0.90, places=5)
1346+
self.assertAlmostEqual(adopt.beta2.defaultValue, 0.999, places=5)
1347+

docs/conf.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@
5656
try:
5757
version = pkg_resources.require("coremltools")[0].version
5858
except:
59-
version = "3.0b6"
59+
version = "3.0"
6060

6161
# The short X.Y version.
6262
version = version

examples/updatable_models/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,4 +18,4 @@ The model is a 'linked' pipeline composed of a 'linked' drawing embedding model
1818
- [Updatable Nearest Neighbor Classifier](https://github.com/apple/coremltools/tree/master/examples/updatable_models/updatable_nearest_neighbor_classifier.ipynb)
1919
This notebook makes an empty updatable nearest neighbor classifier. Before updating with training examples it predicts 'defaultLabel' for all input.
2020

21-
In addition of the above examples, a short document on CoreML 3.0 Update Task API usage is provided [here](https://github.com/apple/coremltools/tree/master/examples/updatable_models/OnDeviceTraining_API_Usage.md).
21+
In addition of the above examples, a short document on CoreML 3.0 Update Task API usage is provided [here](https://github.com/apple/coremltools/tree/master/examples/updatable_models/OnDeviceTraining_API_Usage.md).

mlmodel/format/NeuralNetwork.proto

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3517,7 +3517,7 @@ message BatchedMatMulLayerParams {
35173517
*
35183518
* y = ConcatNDLayer(x1,x2,....)
35193519
*
3520-
* Requires at least 1 input and produces 1 output.
3520+
* Requires at least 2 input and produces 1 output.
35213521
*
35223522
* Input
35233523
* A Sequence of N-dimensional tensors. The rank of the input tensors must match and all dimensions except 'axis' must be equal.

mlmodel/src/NeuralNetwork/NeuralNetworkLayerValidator.cpp

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1048,12 +1048,6 @@ Result NeuralNetworkSpecValidator::validateGRULayer(const Specification::NeuralN
10481048
const auto& params = layer.gru();
10491049
bool hasBiasVectors = params.hasbiasvectors();
10501050

1051-
// Validate that all weightParam types match
1052-
WeightParamType gateWeightMatrixValueType, gateRecursionMatrixValueType, gateBiasVectorValueType;
1053-
gateWeightMatrixValueType = valueType(params.updategateweightmatrix());
1054-
gateRecursionMatrixValueType = valueType(params.updategaterecursionmatrix());
1055-
gateBiasVectorValueType = valueType(params.updategatebiasvector());
1056-
10571051
std::vector<CoreML::WeightParamType> weightTypeList;
10581052
weightTypeList.push_back(valueType(params.updategateweightmatrix()));
10591053
weightTypeList.push_back(valueType(params.updategaterecursionmatrix()));

setup.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
long_description = f.read()
1111

1212
setup(name='coremltools',
13-
version='3.0b6',
13+
version='3.0',
1414
description='Community Tools for CoreML',
1515
long_description=long_description,
1616
author='Apple Inc.',
@@ -60,7 +60,7 @@
6060
'console_scripts': ['coremlconverter = coremltools:_main']
6161
},
6262
classifiers=[
63-
'Development Status :: 4 - Beta',
63+
'Development Status :: 5 - Production/Stable',
6464
'Intended Audience :: End Users/Desktop',
6565
'Intended Audience :: Developers',
6666
'Operating System :: MacOS :: MacOS X',

0 commit comments

Comments
 (0)