Skip to content

Commit 65cf611

Browse files
authored
Merge pull request #584 from 1duo/master
Mark coremltools 3.2
2 parents 21e8b38 + 53f1068 commit 65cf611

29 files changed

+756
-279
lines changed

README.md

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,7 @@ sure to install virtualenv using your system pip.
7474
pip install virtualenv
7575
```
7676

77-
The method for installing *coremltools* follows the
77+
The method for installing `coremltools` follows the
7878
[standard python package installation steps](https://packaging.python.org/installing/).
7979
To create a Python virtual environment called `pythonenv` follow these steps:
8080

@@ -88,6 +88,7 @@ virtualenv pythonenv
8888
```
8989

9090
To activate your new virtual environment and install `coremltools` in this environment, follow these steps:
91+
9192
```
9293
# Active your virtual environment
9394
source pythonenv/bin/activate
@@ -215,6 +216,7 @@ Finally, to run the most important unit tests, you can use:
215216
```shell
216217
pytest -rs
217218
```
219+
218220
some tests are marked as slow because they test a lot of combinations.
219221
If you want to run, all tests, you can use:
220222

coremltools/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@
2222
For more information: http://developer.apple.com/documentation/coreml
2323
"""
2424

25-
__version__ = '3.1'
25+
__version__ = '3.2'
2626

2727
# This is the basic Core ML specification format understood by iOS 11.0
2828
SPECIFICATION_VERSION = 1

coremltools/converters/caffe/_caffe_converter.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
# Use of this source code is governed by a BSD-3-clause license that can be
44
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
55

6-
import sys as _sys
6+
import os
77
import six as _six
88
from ...models import _MLMODEL_FULL_PRECISION, _MLMODEL_HALF_PRECISION, _VALID_MLMODEL_PRECISION_TYPES
99

@@ -191,6 +191,10 @@ def convert(model, image_input_names=[], is_bgr=False,
191191
green_bias, gray_bias, image_scale, class_labels,
192192
predicted_feature_name)
193193
model = MLModel(model_path)
194+
try:
195+
os.remove(model_path)
196+
except OSError:
197+
pass
194198

195199
if model_precision == _MLMODEL_HALF_PRECISION and model is not None:
196200
model = convert_neural_network_weights_to_fp16(model)

coremltools/converters/keras/_keras2_converter.py

Lines changed: 39 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
from ...models.neural_network.update_optimizer_utils import SgdParams
1111
from ...proto import FeatureTypes_pb2 as _FeatureTypes_pb2
1212
from collections import OrderedDict as _OrderedDict
13+
from ...proto import Model_pb2 as _Model_pb2
1314
from ...models import datatypes
1415
from ...models import MLModel as _MLModel
1516
from ...models.utils import save_spec as _save_spec
@@ -32,7 +33,7 @@
3233

3334
_keras.layers.convolutional.Conv2D: _layers2.convert_convolution,
3435
_keras.layers.convolutional.Conv2DTranspose: _layers2.convert_convolution,
35-
_keras.layers.convolutional.SeparableConv2D: _layers2.convert_separable_convolution,
36+
_keras.layers.convolutional.SeparableConv2D: _layers2.convert_separable_convolution,
3637
_keras.layers.pooling.AveragePooling2D: _layers2.convert_pooling,
3738
_keras.layers.pooling.MaxPooling2D: _layers2.convert_pooling,
3839
_keras.layers.pooling.GlobalAveragePooling2D: _layers2.convert_pooling,
@@ -63,7 +64,7 @@
6364
_keras.layers.Maximum: _layers2.convert_merge,
6465
_keras.layers.Concatenate: _layers2.convert_merge,
6566
_keras.layers.Dot: _layers2.convert_merge,
66-
67+
6768
_keras.layers.core.Flatten: _layers2.convert_flatten,
6869
_keras.layers.core.Permute:_layers2.convert_permute,
6970
_keras.layers.core.Reshape:_layers2.convert_reshape,
@@ -91,7 +92,7 @@
9192
_KERAS_LAYER_REGISTRY[_keras.engine.topology.InputLayer] = \
9293
_layers2.default_skip
9394
# end if _HAS_KERAS2_TF
94-
95+
9596

9697
def _is_merge_layer(layer):
9798
if _HAS_KERAS2_TF:
@@ -106,7 +107,7 @@ def _is_activation_layer(layer):
106107
isinstance(layer, _keras.layers.advanced_activations.LeakyReLU) or
107108
isinstance(layer, _keras.layers.advanced_activations.PReLU) or
108109
isinstance(layer, _keras.layers.advanced_activations.ELU) or
109-
isinstance(layer,
110+
isinstance(layer,
110111
_keras.layers.advanced_activations.ThresholdedReLU) or
111112
isinstance(layer, _keras.layers.advanced_activations.Softmax))
112113

@@ -163,7 +164,7 @@ def _load_keras_model(model_network_path, model_weight_path, custom_objects=None
163164
Path where the model network weights are (hd5 file)
164165
165166
custom_objects:
166-
A dictionary of layers or other custom classes
167+
A dictionary of layers or other custom classes
167168
or functions used by the model
168169
169170
Returns
@@ -300,7 +301,8 @@ def _convert(model,
300301
custom_objects=None,
301302
input_shapes=None,
302303
output_shapes=None,
303-
respect_trainable=False):
304+
respect_trainable=False,
305+
use_float_arraytype=False):
304306

305307
# Check Keras format
306308
if _keras.backend.image_data_format() == 'channels_first':
@@ -309,48 +311,48 @@ def _convert(model,
309311
"Changing to 'channels_last', but your model may not be converted "
310312
"converted properly.")
311313
_keras.backend.set_image_data_format('channels_last')
312-
314+
313315
# Check custom conversion functions / custom objects
314316
add_custom_layers = custom_conversion_functions is not None
315317

316318
if isinstance(model, _string_types):
317319
model = _keras.models.load_model(model, custom_objects = custom_objects)
318320
elif isinstance(model, tuple):
319321
model = _load_keras_model(model[0], model[1])
320-
322+
321323
# Check valid versions
322324
_check_unsupported_layers(model, add_custom_layers)
323-
325+
324326
# Build network graph to represent Keras model
325327
graph = _topology2.NetGraph(model)
326328
graph.build()
327329

328330
# The graph should be finalized before executing this
329331
graph.generate_blob_names()
330332
graph.add_recurrent_optionals()
331-
333+
332334
inputs = graph.get_input_layers()
333335
outputs = graph.get_output_layers()
334-
336+
335337
# check input / output names validity
336-
if input_names is not None:
338+
if input_names is not None:
337339
if isinstance(input_names, _string_types):
338340
input_names = [input_names]
339-
else:
341+
else:
340342
input_names = ['input' + str(i+1) for i in range(len(inputs))]
341343

342-
if output_names is not None:
344+
if output_names is not None:
343345
if isinstance(output_names, _string_types):
344346
output_names = [output_names]
345-
else:
347+
else:
346348
output_names = ['output' + str(i+1) for i in range(len(outputs))]
347-
349+
348350
if image_input_names is not None and isinstance(image_input_names, _string_types):
349351
image_input_names = [image_input_names]
350-
352+
351353
graph.reset_model_input_names(input_names)
352354
graph.reset_model_output_names(output_names)
353-
355+
354356
# Keras -> Core ML input dimension dictionary
355357
# (None, None) -> [1, 1, 1, 1, 1]
356358
# (None, D) -> [D] or [D, 1, 1, 1, 1]
@@ -478,7 +480,8 @@ def _convert(model,
478480
input_features = list(zip(input_names, input_types))
479481
output_features = list(zip(output_names, output_types))
480482

481-
builder = _NeuralNetworkBuilder(input_features, output_features, mode = mode)
483+
builder = _NeuralNetworkBuilder(input_features, output_features, mode = mode,
484+
use_float_arraytype=use_float_arraytype)
482485

483486
for iter, layer in enumerate(graph.layer_list):
484487
keras_layer = graph.keras_layer_map[layer]
@@ -548,4 +551,21 @@ def _convert(model,
548551

549552
# Return the protobuf spec
550553
spec = builder.spec
554+
555+
# If the model has multi-arrays of type double, recommend to the user the utility function
556+
# coremltools.models.utils.convert_double_to_float_multiarray_type(spec)
557+
has_double_multiarray = False
558+
for feature in list(spec.description.input) + list(spec.description.output):
559+
if feature.type.HasField('multiArrayType'):
560+
if feature.type.multiArrayType.dataType == _Model_pb2.ArrayFeatureType.DOUBLE:
561+
has_double_multiarray = True
562+
break
563+
564+
if has_double_multiarray:
565+
print("\n\nRecommendation: This model has at least one multiarray input/output of type double.\n"
566+
"For large sized arrays, multiarrays of type float32 are more efficient.\n"
567+
"In future, float input/output multiarrays will be produced by default by the converter.\n"
568+
"Please use, either the flag 'use_float_arraytype' during the call to convert or\n"
569+
"the utility 'coremltools.utils.convert_double_to_float_multiarray_type(spec)', post-conversion.\n\n")
570+
551571
return spec

coremltools/converters/keras/_keras_converter.py

Lines changed: 16 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -361,7 +361,8 @@ def convertToSpec(model,
361361
custom_objects=None,
362362
input_shapes = None,
363363
output_shapes = None,
364-
respect_trainable = False):
364+
respect_trainable = False,
365+
use_float_arraytype = False):
365366

366367
"""
367368
Convert a Keras model to Core ML protobuf specification (.mlmodel).
@@ -485,6 +486,10 @@ def convertToSpec(model,
485486
If True, then Keras layers that are marked 'trainable' will
486487
automatically be marked updatable in the Core ML model.
487488
489+
use_float_arraytype: bool
490+
If true, the datatype of input/output multiarrays is set to Float32 instead
491+
of double.
492+
488493
Returns
489494
-------
490495
model: MLModel
@@ -576,7 +581,8 @@ def convertToSpec(model,
576581
custom_objects=custom_objects,
577582
input_shapes=input_shapes,
578583
output_shapes=output_shapes,
579-
respect_trainable=respect_trainable)
584+
respect_trainable=respect_trainable,
585+
use_float_arraytype=use_float_arraytype)
580586
else:
581587
raise RuntimeError(
582588
'Keras not found or unsupported version or backend found. keras conversion API is disabled.')
@@ -606,7 +612,8 @@ def convert(model,
606612
custom_conversion_functions = None,
607613
input_shapes = None,
608614
output_shapes = None,
609-
respect_trainable = False):
615+
respect_trainable = False,
616+
use_float_arraytype = False):
610617

611618
"""
612619
Convert a Keras model to Core ML protobuf specification (.mlmodel).
@@ -719,6 +726,10 @@ def convert(model,
719726
If yes, then Keras layers marked 'trainable' will automatically be
720727
marked updatable in the Core ML model.
721728
729+
use_float_arraytype: bool
730+
If true, the datatype of input/output multiarrays is set to Float32 instead
731+
of double.
732+
722733
Returns
723734
-------
724735
model: MLModel
@@ -789,6 +800,7 @@ def convert(model,
789800
custom_conversion_functions=custom_conversion_functions,
790801
input_shapes=input_shapes,
791802
output_shapes=output_shapes,
792-
respect_trainable=respect_trainable)
803+
respect_trainable=respect_trainable,
804+
use_float_arraytype=use_float_arraytype)
793805

794806
return _MLModel(spec)

coremltools/converters/nnssa/commons/basic_graph_ops.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -153,7 +153,7 @@ def check_connections(gd):
153153
for i in v.control_inputs:
154154
assert (k in gd[i].control_outputs)
155155
for i in v.control_outputs:
156-
message = f"Node {k} not in {i} control_inputs"
156+
message = "Node " + k + " not in " + i + " control_inputs"
157157
assert (k in gd[i].control_inputs), message
158158

159159

coremltools/converters/nnssa/coreml/graph_pass/__init__.py

Lines changed: 3 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -3,14 +3,6 @@
33
from __future__ import division as _
44
from __future__ import absolute_import as _
55

6-
from .op_removals import remove_no_ops_and_shift_control_dependencies
7-
from .op_removals import constant_weight_link_removal
8-
from .op_removals import remove_single_isolated_node
9-
from .op_removals import remove_identity, remove_oneway_split
10-
11-
from .op_fusions import fuse_bias_add, transform_nhwc_to_nchw, \
12-
onehot_matmul_to_embedding, fuse_layer_norm, fuse_gelu, \
13-
fuse_batch_norm, fuse_pad_into_conv, \
14-
spatial_reduce_to_global_pool, fuse_batch_to_space_or_space_to_batch
15-
16-
from .mlmodel_passes import remove_disconnected_layers
6+
from .op_removals import *
7+
from .op_fusions import *
8+
from .mlmodel_passes import *

coremltools/converters/nnssa/coreml/graph_pass/op_fusions.py

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -285,8 +285,8 @@ def transform_nhwc_to_nchw(nnssa):
285285
inp_node_format = graph[inp_node_name].attr.get('data_format')
286286
symbolic_value = graph[inp_node_name].attr['symbolic_value']
287287
if (graph[inp_node_name].op == 'Const' or
288-
len(graph[inp_node_name].datatype.get_shape()) != 4 or
289-
( symbolic_value and not any_symbolic_or_unknown(symbolic_value))):
288+
len(graph[inp_node_name].datatype.get_shape()) != 4 or
289+
(symbolic_value and not any_symbolic_or_unknown(symbolic_value))):
290290
# Const weights and parameters
291291
continue
292292

@@ -649,7 +649,11 @@ def _match_batch_norm_pattern(graph, entry_node, pattern_ops):
649649
return None
650650
if not _check_number_inputs(node, 2):
651651
return None
652-
const_node = graph[node.inputs[1]]
652+
node_inputs = [graph[n].op.lower() for n in node.inputs]
653+
try:
654+
const_node = graph[node.inputs[node_inputs.index('const')]]
655+
except ValueError:
656+
return None
653657
if not _check_single_out_vector_constant_node(const_node):
654658
return None
655659
if not _check_rank_matches(const_node, node):
@@ -702,7 +706,6 @@ def _merge_batch_norm(graph, nodes, pattern_id=1):
702706
control_inputs = []
703707
control_outputs = []
704708
bn_node_names = [x.name for x in nodes]
705-
print(f"\n\nProcessing Fused Batch Norm: {fused_bn_node.name}")
706709

707710
for name in bn_node_names:
708711
control_inputs += graph[name].control_inputs

0 commit comments

Comments
 (0)