Skip to content

Commit

Permalink
5.0b5 Release (#1299)
Browse files Browse the repository at this point in the history
* 5.0b5 Release

* Define missing variable

* Remove links

* Unit tests: do not call predict on an ML package unless on macOS 12+

* CI: increase time out for tests

* Unit tests: more skipping predict on an ML package unless on macOS 12+

* Remove usage of six module

* Remove Linux CI jobs which are taking too long

* Unit tests: even more skipping predict on an ML package unless on macOS 12+

* Unit tests: even more skipping predict on an ML package unless on macOS 12+
  • Loading branch information
TobyRoseman committed Sep 22, 2021
1 parent 9827d42 commit 30ea1da
Show file tree
Hide file tree
Showing 89 changed files with 5,021 additions and 2,380 deletions.
46 changes: 2 additions & 44 deletions .gitlab-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -122,13 +122,6 @@ build_wheel_macos_py39:
- zsh -e scripts/test.sh --wheel-path=${WHEEL_PATH} --python=${PYTHON}
--test-package=${TEST_PACKAGE} --fast

.test_linux_pkg_with_reqs: &test_linux_pkg_with_reqs
stage: test
script:
- export PATH=$PATH:/opt/anaconda/bin/
- zsh -e scripts/test.sh --wheel-path=${WHEEL_PATH} --python=${PYTHON}
--test-package=${TEST_PACKAGE} --requirements=${REQUIREMENTS} --fast

test_linux_py37_coremltools_test:
<<: *test_linux_pkg
tags:
Expand All @@ -153,56 +146,21 @@ test_linux_py37_pytorch:
TEST_PACKAGE: coremltools.converters.mil.frontend.torch
WHEEL_PATH: build/dist/coremltools-*-none-manylinux1_x86_64.whl

test_linux_py37_tf1:
<<: *test_linux_pkg
tags:
- docker
image: registry.gitlab.com/zach_nation/coremltools/build-image-ubuntu-18.04:1.0.0
dependencies:
- build_wheel_linux_py37
variables:
PYTHON: "3.7"
TEST_PACKAGE: coremltools.converters.mil.frontend.tensorflow
WHEEL_PATH: build/dist/coremltools-*-none-manylinux1_x86_64.whl

test_linux_py37_tf2:
<<: *test_linux_pkg_with_reqs
tags:
- docker
image: registry.gitlab.com/zach_nation/coremltools/build-image-ubuntu-18.04:1.0.0
dependencies:
- build_wheel_linux_py37
variables:
PYTHON: "3.7"
REQUIREMENTS: reqs/test_tf2.pip
TEST_PACKAGE: coremltools.converters.mil.frontend.tensorflow2
WHEEL_PATH: build/dist/coremltools-*-none-manylinux1_x86_64.whl

test_linux_py37_mil:
<<: *test_linux_pkg
tags:
- docker
image: registry.gitlab.com/zach_nation/coremltools/build-image-ubuntu-18.04:1.0.0
dependencies:
- build_wheel_linux_py37
variables:
PYTHON: "3.7"
TEST_PACKAGE: coremltools.converters.mil
WHEEL_PATH: build/dist/coremltools-*-none-manylinux1_x86_64.whl

#########################################################################
##
## macOS - Test
##
#########################################################################
.test_macos_pkg: &test_macos_pkg
stage: test
timeout: 9h
script:
- zsh -e scripts/test.sh --wheel-path=${WHEEL_PATH} --python=${PYTHON}
--test-package=${TEST_PACKAGE} --fast

.test_macos_pkg_with_reqs: &test_macos_pkg_with_reqs
stage: test
timeout: 9h
script:
- zsh -e scripts/test.sh --wheel-path=${WHEEL_PATH} --python=${PYTHON}
--test-package=${TEST_PACKAGE} --requirements=${REQUIREMENTS} --fast
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ The coremltools 5 package offers several performance improvements over previous
To install coremltools 5, use the following command:

```shell
pip install coremltools==5.0b4
pip install coremltools==5.0b5
```


Expand Down
14 changes: 0 additions & 14 deletions coremlpython/CoreMLPython.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@
#pragma clang diagnostic pop

#import <CoreML/CoreML.h>
#import "Validation/NeuralNetwork/NeuralNetworkShapes.hpp"

namespace py = pybind11;

Expand All @@ -32,18 +31,5 @@ namespace CoreML {
static int32_t maximumSupportedSpecificationVersion();
std::string toString() const;
};


class NeuralNetworkShapeInformation {
private:
std::unique_ptr<NeuralNetworkShaper> shaper;
public:
NeuralNetworkShapeInformation(const std::string& filename);
NeuralNetworkShapeInformation(const std::string& filename, bool useInputAndOutputConstraints);
void init(const std::string& filename);
py::dict shape(const std::string& name);
std::string toString() const;
void print() const;
};
}
}
33 changes: 0 additions & 33 deletions coremlpython/CoreMLPython.mm
Original file line number Diff line number Diff line change
Expand Up @@ -112,33 +112,6 @@
return CoreML::MLMODEL_SPECIFICATION_VERSION_NEWEST;
}

NeuralNetworkShapeInformation::NeuralNetworkShapeInformation(const std::string& filename) {
CoreML::Specification::Model model;
Result r = CoreML::loadSpecificationPath(model, filename);
shaper = std::unique_ptr<NeuralNetworkShaper>(new NeuralNetworkShaper(model));
}

NeuralNetworkShapeInformation::NeuralNetworkShapeInformation(const std::string& filename, bool useInputAndOutputConstraints) {
CoreML::Specification::Model model;
Result r = CoreML::loadSpecificationPath(model, filename);
shaper = std::unique_ptr<NeuralNetworkShaper>(new NeuralNetworkShaper(model, useInputAndOutputConstraints));
}

void NeuralNetworkShapeInformation::init(const std::string& filename) {
CoreML::Specification::Model model;
Result r = CoreML::loadSpecificationPath(model, filename);
shaper.reset(new NeuralNetworkShaper(model));
}

py::dict NeuralNetworkShapeInformation::shape(const std::string& name) {
const ShapeConstraint& constraint = shaper->shape(name);
return Utils::shapeConstraintToPyDict(constraint);
}

void NeuralNetworkShapeInformation::print() const {
shaper->print();
}


/*
*
Expand All @@ -155,12 +128,6 @@
.def_static("auto_set_specification_version", &Model::autoSetSpecificationVersion)
.def_static("maximum_supported_specification_version", &Model::maximumSupportedSpecificationVersion);

py::class_<NeuralNetworkShapeInformation>(m, "_NeuralNetworkShaperProxy")
.def(py::init<const std::string&>())
.def(py::init<const std::string&, bool>())
.def("shape", &NeuralNetworkShapeInformation::shape)
.def("print", &NeuralNetworkShapeInformation::print);

return m.ptr();
}

Expand Down
6 changes: 4 additions & 2 deletions coremlpython/CoreMLPythonUtils.h
Original file line number Diff line number Diff line change
@@ -1,3 +1,7 @@
// Copyright (c) 2021, Apple Inc. All rights reserved.
//
// Use of this source code is governed by a BSD-3-clause license that can be
// found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
#include <string>

#pragma clang diagnostic push
Expand Down Expand Up @@ -37,8 +41,6 @@ namespace CoreML {
py::object convertDictionaryValueToPython(NSDictionary<NSObject *,NSNumber *> * value);
py::object convertImageValueToPython(CVPixelBufferRef value);
py::object convertSequenceValueToPython(MLSequence *seq) API_AVAILABLE(macos(10.14));
py::dict shapeConstraintToPyDict(const ShapeConstraint& constraint);

}
}
}
18 changes: 4 additions & 14 deletions coremlpython/CoreMLPythonUtils.mm
Original file line number Diff line number Diff line change
@@ -1,3 +1,7 @@
// Copyright (c) 2021, Apple Inc. All rights reserved.
//
// Use of this source code is governed by a BSD-3-clause license that can be
// found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
#import "CoreMLPythonArray.h"
#import "CoreMLPythonUtils.h"

Expand Down Expand Up @@ -590,17 +594,3 @@ static size_t sizeOfArrayElement(MLMultiArrayDataType type) {
}
return py::object();
}



py::dict Utils::shapeConstraintToPyDict(const ShapeConstraint& constraint) {
@autoreleasepool {
py::dict ret;
ret[py::str("S")] = py::make_tuple((int)constraint.sequenceRange().minimumValue(), (constraint.sequenceRange().maximumValue().isUnbound() ? -1 : (int)constraint.sequenceRange().maximumValue().value()));
ret[py::str("B")] = py::make_tuple((int)constraint.batchRange().minimumValue(), (constraint.batchRange().maximumValue().isUnbound() ? -1 : (int)constraint.batchRange().maximumValue().value()));
ret[py::str("C")] = py::make_tuple((int)constraint.channelRange().minimumValue(), (constraint.channelRange().maximumValue().isUnbound() ? -1 : (int)constraint.channelRange().maximumValue().value()));
ret[py::str("H")] = py::make_tuple((int)constraint.heightRange().minimumValue(), (constraint.heightRange().maximumValue().isUnbound() ? -1 : (int)constraint.heightRange().maximumValue().value()));
ret[py::str("W")] = py::make_tuple((int)constraint.widthRange().minimumValue(), (constraint.widthRange().maximumValue().isUnbound() ? -1 : (int)constraint.widthRange().maximumValue().value()));
return ret;
}
}
42 changes: 42 additions & 0 deletions coremltools/converters/mil/backend/backend_helper.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
# Copyright (c) 2021, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause

from coremltools.converters.mil.mil.passes.name_sanitization_utils import NameSanitizer

def _get_probability_var_for_classifier(prog, classifier_config):
'''
Return the var which will be used to construct the dictionary for the classifier.
:param prog: mil program
:param classifier_config: an instance of coremltools.ClassifierConfig class
:return: var
'''
block = prog.functions["main"]
probability_var = None
if classifier_config.predicted_probabilities_output is None \
or classifier_config.predicted_probabilities_output == "":
# user has not indicated which tensor in the program to use as probabilities
# (i.e which tensor to link to the classifier output)
# in this case, attach the last non const op to the classify op
for op in reversed(block.operations):
if op.op_type != 'const' and len(op.outputs) == 1:
probability_var = op.outputs[0]
break
if probability_var is None:
raise ValueError("Unable to determine the tensor in the graph "
"that corresponds to the probabilities for the classifier output")
else:
# user has indicated which tensor in the program to use as probabilities
# (i.e which tensor to link to the classifier output)
# Verify that it corresponds to a var produced in the program
predicted_probabilities_output = NameSanitizer().sanitize_name(classifier_config.predicted_probabilities_output)
for op in block.operations:
for out in op.outputs:
if out.name == predicted_probabilities_output:
probability_var = out
break
if probability_var is None:
msg = "'predicted_probabilities_output', '{}', provided in 'ClassifierConfig', does not exist in the MIL program."
raise ValueError(msg.format(predicted_probabilities_output))
return probability_var
57 changes: 3 additions & 54 deletions coremltools/converters/mil/backend/mil/helper.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,12 @@
import six
import numpy as np
import coremltools.proto.MIL_pb2 as pm
import os
import re

from coremltools.converters.mil.mil import types
from coremltools.converters.mil.mil.types import builtin_to_proto_types
from coremltools.models.model import _WEIGHTS_DIR_NAME, _WEIGHTS_FILE_NAME
import coremltools.proto.FeatureTypes_pb2 as ft
import coremltools.proto.MIL_pb2 as pm

from coremltools.converters.mil.mil.types import (
type_to_builtin_type,
Expand Down Expand Up @@ -67,7 +66,7 @@ def create_valuetype_tensor(shape, data_type):


def set_proto_dim(proto_dim, dim):
if isinstance(dim, (six.integer_types, np.integer)):
if isinstance(dim, (int, np.integer)):
proto_dim.constant.size = dim
else:
dim_str = str(dim)
Expand Down Expand Up @@ -275,57 +274,6 @@ def types_to_proto(valuetype):
return create_valuetype_scalar(types_to_proto_primitive(valuetype))


class NameSanitizer(object):

def __init__(self, prefix=None):
# to hold all names encountered,
# to make sure that all new names are unique
self.all_names = set()
self.prefix = "_" if prefix is None else prefix

def sanitize_name(self, name):
"""
Sanitize the input string and return it back.
Input string should be of the format: [a-zA-Z_][a-zA-Z0-9_]*
If it is not, then it is sanitized in the following manner:
- first, any character that is not [a-zA-Z0-9_] is replaced with "_"
- if the starting character is not [a-zA-Z_], it is prefixed with self.prefix
- the resulting string must be unique. If it has been encountered before,
it is appended by "_0" or "_1" and so on, until it becomes unique.
:name: str
current name
:return: str
updated name. Returns the same string, if sanitization not required.
"""

# replace any character that is not [a-zA-Z0-9_] with an underscore
new_name = re.sub("[^a-zA-Z0-9_]", "_", name)

# now check if the name starts with anything but [A-Za-z_]
# if so, then add the prefix
if re.match("[^a-zA-Z_]", new_name):
new_name = self.prefix + new_name

if new_name == name:
# return if nothing has changed
self.all_names.add(name)
return name
else:
# name has changed
# make sure it is unique, then return
if new_name in self.all_names:
idx = 0
new_name += "_" + str(idx)
while new_name in self.all_names:
idx += 1
new_name += "_" + str(idx)
# now we have a unique name
self.all_names.add(new_name)
return new_name

def create_file_value(output_var, blob_writer):
if output_var.val.dtype.kind == 'f' and output_var.val.dtype.itemsize == 4:
offset = blob_writer.write_float_data(output_var.val.flatten())
Expand Down Expand Up @@ -365,3 +313,4 @@ def cast_to_framework_io_dtype(var, is_output):
ioname2 = "outputs" if is_output else "inputs"
raise NotImplementedError(ioname + var.name + " has data type " + builtin_to_string(var.dtype) + \
". ML Program models only support fp32 and int32 " + ioname2 + ".")

36 changes: 4 additions & 32 deletions coremltools/converters/mil/backend/mil/load.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,10 @@
import numpy as _np
import os
import tempfile
import shutil

from coremltools.converters.mil.backend.mil.helper import *
from coremltools.converters.mil.backend.backend_helper import _get_probability_var_for_classifier
from .passes import mil_passes
import coremltools.proto.MIL_pb2 as pm
from coremltools.converters.mil.mil import types
Expand All @@ -35,11 +37,9 @@
import coremltools.proto.FeatureTypes_pb2 as ft
from coremltools.converters.mil.input_types import ImageType, TensorType, EnumeratedShapes, RangeDim
from coremltools.models.model import _WEIGHTS_FILE_NAME
import shutil
from coremltools.converters.mil.mil import Builder as mb
from coremltools.converters.mil.mil import mil_list
from coremltools import _SPECIFICATION_VERSION_IOS_15
import six as _six

def should_use_weight_file(val):
return (
Expand Down Expand Up @@ -198,39 +198,11 @@ def _add_classify_op(prog, classifier_config):
classes = classes.splitlines()
elif isinstance(classes_in, list): # list[int or str]
classes = classes_in
assert all([isinstance(x, \
(_six.integer_types, _six.string_types)) for x in classes]), message
assert all([isinstance(x, (int, str)) for x in classes]), message
else:
raise ValueError(message)

probability_var = None
if classifier_config.predicted_probabilities_output is None \
or classifier_config.predicted_probabilities_output == "":
# user has not indicated which tensor in the program to use as probabilities
# (i.e which tensor to link to the classifier output)
# in this case, attach the last non const op to the classify op
for op in reversed(block.operations):
if op.op_type != 'const' and len(op.outputs) == 1:
probability_var = op.outputs[0]
break
if probability_var is None:
raise ValueError("Unable to determine the tensor in the graph "
"that corresponds to the probabilities for the classifier output")
else:
# user has indicated which tensor in the program to use as probabilities
# (i.e which tensor to link to the classifier output)
# Verify that it corresponds to a var produced in the program
probability_var_found = False
predicted_probabilities_output = NameSanitizer().sanitize_name(classifier_config.predicted_probabilities_output)
for op in block.operations:
for out in op.outputs:
if out.name == predicted_probabilities_output:
probability_var_found = True
probability_var = out
break
if not probability_var_found:
msg = "'predicted_probabilities_output', '{}', provided in 'ClassifierConfig', does not exist in the MIL program."
raise ValueError(msg.format(predicted_probabilities_output))
probability_var = _get_probability_var_for_classifier(prog, classifier_config)

# add the classify op now
with block:
Expand Down
Loading

0 comments on commit 30ea1da

Please sign in to comment.