Skip to content

Commit

Permalink
6.0b1 Release (#1508)
Browse files Browse the repository at this point in the history
  • Loading branch information
TobyRoseman committed Jun 7, 2022
1 parent 6adf95b commit 973eae6
Show file tree
Hide file tree
Showing 262 changed files with 8,671 additions and 31,435 deletions.
11 changes: 0 additions & 11 deletions .gitlab-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -38,12 +38,6 @@ check_python_flake8:
paths:
- build/dist/

build_wheel_linux_py35:
<<: *build_linux
image: registry.gitlab.com/zach_nation/coremltools/build-image-ubuntu-18.04:1.0.0
variables:
PYTHON: "3.5"

build_wheel_linux_py36:
<<: *build_linux
image: registry.gitlab.com/zach_nation/coremltools/build-image-ubuntu-18.04:1.0.0
Expand Down Expand Up @@ -85,11 +79,6 @@ build_wheel_linux_py39:
paths:
- build/dist/

build_wheel_macos_py35:
<<: *build_macos
variables:
PYTHON: "3.5"

build_wheel_macos_py36:
<<: *build_macos
variables:
Expand Down
3 changes: 0 additions & 3 deletions .pylintrc
Original file line number Diff line number Diff line change
Expand Up @@ -316,8 +316,6 @@ ignore-on-opaque-inference=yes
ignored-classes=
optparse.Values,
sympy.core.mul.Mul,
test.model_zoo.onnx.test_latham_lstm.TestLathamLSTM,
test.model_zoo.onnx.test_transformer.TestMT,
thread._local,
_thread._local

Expand All @@ -328,7 +326,6 @@ ignored-classes=
ignored-modules=
LazyLoader,
matplotlib.cm,
onnx.onnx_*_ml_pb2,
tensorflow,
tensorflow.core.framework.*_pb2,
tensorflow.tools.api.generator.api.contrib,
Expand Down
4 changes: 2 additions & 2 deletions BUILDING.md
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ Follow these steps:
1. Fork and clone the GitHub [coremltools repository](https://github.com/apple/coremltools).

2. Run the [build.sh](scripts/build.sh) script to build `coremltools`.
* By default this script uses Python 3.7, but you can include `--python=3.5` (or `3.6`, `3.8`, and so on) as a argument to change the Python version.
* By default this script uses Python 3.7, but you can include `--python=3.6` (or `3.7`, `3.8`, `3.9`) as a argument to change the Python version.
* The script creates a new `build` folder with the coremltools distribution, and a `dist` folder with Python wheel files.

3. Run the [test.sh](scripts/test.sh) script to test the build.
Expand All @@ -45,7 +45,7 @@ The following build targets help you configure the development environment. If y
* `test_slow` | Run all non-fast tests.
* `wheel` | Build wheels in release mode.

The script uses Python 3.7, but you can include `--python=3.5` (or `3.6`, `3.8`, and so on) as a argument to change the Python version.
The script uses Python 3.7, but you can include `--python=3.6` (or `3.7`, `3.8`, `3.9`) as a argument to change the Python version.

## Resources

Expand Down
1 change: 0 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@ Use *coremltools* to convert machine learning models from third-party libraries
* [TensorFlow 1.x](https://www.tensorflow.org/versions/r1.15/api_docs/python/tf)
* [TensorFlow 2.x](https://www.tensorflow.org/api_docs)
* [PyTorch](https://pytorch.org/)
* [TensorFlow's Keras APIs](https://keras.io/)
* Non-neural network frameworks:
* [scikit-learn](https://scikit-learn.org/stable/)
* [XGBoost](https://xgboost.readthedocs.io/en/latest/)
Expand Down
2 changes: 1 addition & 1 deletion coremlpython/CoreMLPython.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ namespace CoreML {
Model& operator=(const Model&) = delete;
~Model();
explicit Model(const std::string& urlStr, const std::string& computeUnits);
py::dict predict(const py::dict& input, bool useCPUOnly);
py::dict predict(const py::dict& input);
static py::bytes autoSetSpecificationVersion(const py::bytes& modelBytes);
static int32_t maximumSupportedSpecificationVersion();
std::string toString() const;
Expand Down
5 changes: 1 addition & 4 deletions coremlpython/CoreMLPython.mm
Original file line number Diff line number Diff line change
Expand Up @@ -78,15 +78,12 @@
}
}

py::dict Model::predict(const py::dict& input, bool useCPUOnly) {
py::dict Model::predict(const py::dict& input) {
@autoreleasepool {
NSError *error = nil;
MLDictionaryFeatureProvider *inFeatures = Utils::dictToFeatures(input, &error);
Utils::handleError(error);
MLPredictionOptions *options = [[MLPredictionOptions alloc] init];
options.usesCPUOnly = useCPUOnly;
id<MLFeatureProvider> outFeatures = [m_model predictionFromFeatures:static_cast<MLDictionaryFeatureProvider * _Nonnull>(inFeatures)
options:options
error:&error];
Utils::handleError(error);
return Utils::featuresToDict(outFeatures);
Expand Down
13 changes: 12 additions & 1 deletion coremlpython/CoreMLPythonUtils.mm
Original file line number Diff line number Diff line change
Expand Up @@ -433,6 +433,7 @@ static size_t sizeOfArrayElement(MLMultiArrayDataType type) {
case MLMultiArrayDataTypeInt32:
return sizeof(int32_t);
case MLMultiArrayDataTypeFloat32:
case MLMultiArrayDataTypeFloat16:
return sizeof(float);
case MLMultiArrayDataTypeDouble:
return sizeof(double);
Expand All @@ -449,7 +450,7 @@ static size_t sizeOfArrayElement(MLMultiArrayDataType type) {
MLMultiArrayDataType type = value.dataType;
std::vector<size_t> shape = Utils::convertNSArrayToCpp(value.shape);
std::vector<size_t> strides = Utils::convertNSArrayToCpp(value.strides);

// convert strides to numpy (bytes) instead of mlkit (elements)
for (size_t& stride : strides) {
stride *= sizeOfArrayElement(type);
Expand All @@ -460,6 +461,16 @@ static size_t sizeOfArrayElement(MLMultiArrayDataType type) {
return py::array(shape, strides, static_cast<const int32_t*>(value.dataPointer));
case MLMultiArrayDataTypeFloat32:
return py::array(shape, strides, static_cast<const float*>(value.dataPointer));
case MLMultiArrayDataTypeFloat16:
{
// create a float32 array, cast float16 values and copy into it
// TODO: rdar://92239209 : return np.float16 instead of np.float32 when multiarray type is Float16
std::vector<float> value_fp32(value.count, 0.0);
for (size_t i=0; i<value.count; i++) {
value_fp32[i] = [value[i] floatValue];
}
return py::array(shape, strides, value_fp32.data());
}
case MLMultiArrayDataTypeDouble:
return py::array(shape, strides, static_cast<const double*>(value.dataPointer));
default:
Expand Down
20 changes: 18 additions & 2 deletions coremltools/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,8 @@
Core MLTools in a python package for creating, examining, and testing models in the .mlmodel
format. In particular, it can be used to:
* Convert existing models to .mlmodel format from popular machine learning tools including:
Keras, scikit-learn, libsvm, and XGBoost.
* Convert existing models to .mlpackage or .mlmodel formats from popular machine learning tools including:
PyTorch, TensorFlow, scikit-learn, XGBoost and libsvm.
* Express models in .mlmodel format through a simple API.
* Make predictions with an .mlmodel (on select platforms for testing purposes).
Expand Down Expand Up @@ -60,6 +60,9 @@
# New versions for iOS 15.0
_SPECIFICATION_VERSION_IOS_15 = 6

# New versions for iOS 16.0
_SPECIFICATION_VERSION_IOS_16 = 7

class ComputeUnit(_Enum):
'''
The set of processing-unit configurations the model can use to make predictions.
Expand All @@ -68,16 +71,29 @@ class ComputeUnit(_Enum):
CPU_AND_GPU = 2 # Allows the model to use both the CPU and GPU, but not the neural engine
CPU_ONLY = 3 # Limit the model to only use the CPU

# A dictionary that maps the CoreML model specification version to the MLProgram/MIL opset string
_OPSET = {
_SPECIFICATION_VERSION_IOS_15: "CoreML5",
_SPECIFICATION_VERSION_IOS_16: "CoreML6",
}

# Default specification version for each backend
_LOWEST_ALLOWED_SPECIFICATION_VERSION_FOR_NEURALNETWORK = _SPECIFICATION_VERSION_IOS_13
_LOWEST_ALLOWED_SPECIFICATION_VERSION_FOR_MILPROGRAM = _SPECIFICATION_VERSION_IOS_15


# expose sub packages as directories
from . import converters
from . import proto
from . import models
from .models import utils
from .models.ml_program import compression_utils

# expose unified converter in coremltools package level
from .converters import convert
from .converters import (
ClassifierConfig,
ColorLayout as colorlayout,
TensorType,
ImageType,
RangeDim,
Expand Down
84 changes: 2 additions & 82 deletions coremltools/_deps/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ def __get_sklearn_version(version):
_TF_1_MIN_VERSION = "1.12.0"
_TF_1_MAX_VERSION = "1.15.0"
_TF_2_MIN_VERSION = "2.1.0"
_TF_2_MAX_VERSION = "2.6.2"
_TF_2_MAX_VERSION = "2.8.0"

try:
import tensorflow
Expand Down Expand Up @@ -143,82 +143,9 @@ def __get_sklearn_version(version):
MSG_TF1_NOT_FOUND = "TensorFlow 1.x not found."
MSG_TF2_NOT_FOUND = "TensorFlow 2.x not found."

# ---------------------------------------------------------------------------------------
_HAS_KERAS_TF = True
_HAS_KERAS2_TF = True
_KERAS_MIN_VERSION = "1.2.2"
_KERAS_MAX_VERSION = "2.6.0"
MSG_KERAS1_NOT_FOUND = "Keras 1 not found."
MSG_KERAS2_NOT_FOUND = "Keras 2 not found."

try:
# Prevent keras from printing things that are not errors to standard error.
import sys

import io

temp = io.StringIO()
stderr = sys.stderr
try:
sys.stderr = temp
import keras
except:
# Print out any actual error message and re-raise.
sys.stderr = stderr
sys.stderr.write(temp.getvalue())
raise
finally:
sys.stderr = stderr
import tensorflow

k_ver = _get_version(keras.__version__)

# keras 1 version too old
if k_ver < _StrictVersion(_KERAS_MIN_VERSION):
_HAS_KERAS_TF = False
_HAS_KERAS2_TF = False
_logging.warning(
(
"Keras version %s is not supported. Minimum required version: %s ."
"Keras conversion will be disabled."
)
% (keras.__version__, _KERAS_MIN_VERSION)
)
# keras version too new
if k_ver > _StrictVersion(_KERAS_MAX_VERSION):
_HAS_KERAS_TF = False
_logging.warning(
(
"Keras version %s has not been tested with coremltools. You may run into unexpected errors. "
"Keras %s is the most recent version that has been tested."
)
% (keras.__version__, _KERAS_MAX_VERSION)
)
# Using Keras 2 rather than 1
if k_ver >= _StrictVersion("2.0.0"):
_HAS_KERAS_TF = False
_HAS_KERAS2_TF = True
# Using Keras 1 rather than 2
else:
_HAS_KERAS_TF = True
_HAS_KERAS2_TF = False
if keras.backend.backend() != "tensorflow":
_HAS_KERAS_TF = False
_HAS_KERAS2_TF = False
_logging.warning(
(
"Unsupported Keras backend (only TensorFlow is currently supported). "
"Keras conversion will be disabled."
)
)

except:
_HAS_KERAS_TF = False
_HAS_KERAS2_TF = False

# ---------------------------------------------------------------------------------------
_HAS_TORCH = True
_TORCH_MAX_VERSION = "1.10.2"
_TORCH_MAX_VERSION = "1.11.0"
try:
import torch
_warn_if_above_max_supported_version("Torch", torch.__version__, _TORCH_MAX_VERSION)
Expand All @@ -228,13 +155,6 @@ def __get_sklearn_version(version):


# ---------------------------------------------------------------------------------------
_HAS_ONNX = True
try:
import onnx
except:
_HAS_ONNX = False
MSG_ONNX_NOT_FOUND = "ONNX not found."

try:
import scipy
except:
Expand Down
3 changes: 1 addition & 2 deletions coremltools/converters/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,10 @@
from . import libsvm
from . import sklearn
from . import xgboost
from . import keras
from . import onnx
from ._converters_entry import convert
from .mil import (
ClassifierConfig,
ColorLayout,
TensorType,
ImageType,
RangeDim,
Expand Down
Loading

0 comments on commit 973eae6

Please sign in to comment.