Skip to content

Commit

Permalink
unify UT
Browse files Browse the repository at this point in the history
  • Loading branch information
wejoncy committed Sep 19, 2024
1 parent 3944fd6 commit 4f935e7
Show file tree
Hide file tree
Showing 8 changed files with 123 additions and 151 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,9 @@ namespace coreml {

// Once all ops are supportted FP16, we can remove it. Before that, we keep a set of ops to
// filter suppported ones.
static std::set<const std::string> Float16Ops = {
static std::set<std::string> Float16Ops = {

Check warning on line 18 in onnxruntime/core/providers/coreml/builders/impl/base_op_builder.cc

View workflow job for this annotation

GitHub Actions / Optional Lint C++

[cpplint] reported by reviewdog 🐶 Add #include <string> for string [build/include_what_you_use] [4] Raw Output: onnxruntime/core/providers/coreml/builders/impl/base_op_builder.cc:18: Add #include <string> for string [build/include_what_you_use] [4]
"Add", "Mul", "Sub", "Div", "Pow", "Sqrt", "Reciprocal",
"Sigmoid", "Tanh", "Relu", "LeakyRelu", "Concat", "GridSample", "GlobalAveragePool",
"Sigmoid", "Tanh", "Relu", "LeakyRelu", "Concat", "GridSample", "GlobalAveragePool", "Clip", "DepthToSpace", "Resize", "Slice",

Check warning on line 20 in onnxruntime/core/providers/coreml/builders/impl/base_op_builder.cc

View workflow job for this annotation

GitHub Actions / Optional Lint C++

[cpplint] reported by reviewdog 🐶 Lines should be <= 120 characters long [whitespace/line_length] [2] Raw Output: onnxruntime/core/providers/coreml/builders/impl/base_op_builder.cc:20: Lines should be <= 120 characters long [whitespace/line_length] [2]
"GlobalMaxPool", "AveragePool", "MaxPool", "Reshape", "Split", "Transpose"};

namespace {
Expand Down Expand Up @@ -91,7 +91,8 @@ bool BaseOpBuilder::HasSupportedInputs(const Node& node, const OpBuilderInputPar
}

/* static */
bool BaseOpBuilder::IsInputDtypeSupport(const Node& node, size_t idx, const OpBuilderInputParams& input_params,
bool BaseOpBuilder::IsInputDtypeSupport(const Node& node, size_t idx,
[[maybe_unused]] const OpBuilderInputParams& input_params,
const logging::Logger& logger) {
if (idx >= node.InputDefs().size()) {
LOGS(logger, VERBOSE) << "Input index [" << idx << "] is out of range";
Expand Down
18 changes: 18 additions & 0 deletions onnxruntime/core/providers/coreml/builders/impl/builder_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -131,6 +131,15 @@ void CreateCoreMLWeightConvertingDataToFloats(CoreML::Specification::WeightParam
[](T v) { return narrow<float>(v); });
*weight.mutable_floatvalue() = std::move(weight_floats);
}

template <typename T>
void CreateCoreMLWeightConvertingDataToFloat16s(CoreML::Specification::WeightParams& weight, gsl::span<const T> data) {
std::vector<MLFloat16> weight_float16s{};
weight_float16s.reserve(data.size());
std::transform(data.begin(), data.end(), std::back_inserter(weight_float16s),

Check warning on line 139 in onnxruntime/core/providers/coreml/builders/impl/builder_utils.cc

View workflow job for this annotation

GitHub Actions / Optional Lint C++

[cpplint] reported by reviewdog 🐶 Add #include <algorithm> for transform [build/include_what_you_use] [4] Raw Output: onnxruntime/core/providers/coreml/builders/impl/builder_utils.cc:139: Add #include <algorithm> for transform [build/include_what_you_use] [4]
[](T v) { return MLFloat16(narrow<float>(v)); });
CreateCoreMLWeight(weight, weight_float16s);
}
} // namespace

void CreateCoreMLWeight(CoreML::Specification::WeightParams& weight, gsl::span<const int32_t> data) {
Expand Down Expand Up @@ -203,6 +212,13 @@ void CopyDataToTensorValue<float>(MILSpec::TensorValue& tensor_value, gsl::span<
tensor_value.mutable_floats()->mutable_values()->Add(data.begin(), data.end());
}

template <>
void CopyDataToTensorValue<MLFloat16>(MILSpec::TensorValue& tensor_value, gsl::span<const MLFloat16> data) {
const char* begin = (const char*)(data.data());
const char* end = (const char*)(data.data()) + data.size() * sizeof(MLFloat16);
tensor_value.mutable_bytes()->mutable_values()->assign(begin, end);
}

template <>
void CopyDataToTensorValue<int32_t>(MILSpec::TensorValue& tensor_value, gsl::span<const int32_t> data) {
tensor_value.mutable_ints()->mutable_values()->Add(data.begin(), data.end());
Expand Down Expand Up @@ -300,6 +316,8 @@ template MILSpec::Value CreateTensorValue<int64_t, int32_t>(gsl::span<const int6
std::optional<gsl::span<const int64_t>> shape);
template MILSpec::Value CreateTensorValue<float, float>(gsl::span<const float> data,
std::optional<gsl::span<const int64_t>> shape);
template MILSpec::Value CreateTensorValue<MLFloat16, MLFloat16>(gsl::span<const MLFloat16> data,
std::optional<gsl::span<const int64_t>> shape);
template MILSpec::Value CreateTensorValue<bool, bool>(gsl::span<const bool> data,
std::optional<gsl::span<const int64_t>> shape);
template MILSpec::Value CreateTensorValue<std::string, std::string>(gsl::span<const std::string> data,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,12 @@ Status UnaryOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, const
AddOperationInput(*op, "x", input_defs[0]->Name());
if (op_type == "Reciprocal") {
float epsilon = 1e-4; // epsilon: const T (Optional, default=1e-4)
AddOperationInput(*op, "epsilon", model_builder.AddScalarConstant(op->type(), "epsilon", epsilon));
auto dtype = node.InputDefs()[0]->TypeAsProto()->tensor_type().elem_type();
if (dtype == ONNX_NAMESPACE::TensorProto_DataType_FLOAT) {
AddOperationInput(*op, "epsilon", model_builder.AddScalarConstant(op->type(), "epsilon", epsilon));
} else if (dtype == ONNX_NAMESPACE::TensorProto_DataType_FLOAT16) {
AddOperationInput(*op, "epsilon", model_builder.AddScalarConstant(op->type(), "epsilon", MLFloat16(epsilon)));
}
}

AddOperationOutput(*op, *node.OutputDefs()[0]);
Expand Down
8 changes: 8 additions & 0 deletions onnxruntime/core/providers/coreml/builders/model_builder.cc
Original file line number Diff line number Diff line change
Expand Up @@ -639,6 +639,14 @@ std::string_view ModelBuilder::AddConstantImpl(std::string_view op_type, std::st
return AddTensorValueAsConstantOperation(op_type, value_type, std::move(input_value));
}

template <>
std::string_view ModelBuilder::AddConstantImpl(std::string_view op_type, std::string_view value_type,
gsl::span<const MLFloat16> value,
std::optional<gsl::span<const int64_t>> shape) {
auto input_value = CreateTensorValue<MLFloat16>(value, shape);
return AddTensorValueAsConstantOperation(op_type, value_type, std::move(input_value));
}

template <>
std::string_view ModelBuilder::AddConstantImpl(std::string_view op_type, std::string_view value_type,
gsl::span<const int64_t> value,
Expand Down
3 changes: 2 additions & 1 deletion onnxruntime/core/providers/coreml/builders/model_builder.h
Original file line number Diff line number Diff line change
Expand Up @@ -107,11 +107,12 @@ class ModelBuilder {
std::string_view AddConstant(std::string_view op_type, std::string_view value_type, gsl::span<const T> value,
std::optional<gsl::span<const int64_t>> shape = std::nullopt) {
static_assert(std::is_same_v<T, float> ||
std::is_same_v<T, MLFloat16> ||
std::is_same_v<T, int64_t> ||
std::is_same_v<T, std::string> ||
std::is_same_v<T, bool>,
// add specialization in AddConstantImpl for new types if needed
"AddConstant currently supports float, int64_t, std::string and bool.");
"AddConstant currently supports float/MLFloat16, int64_t, std::string and bool.");
return AddConstantImpl(op_type, value_type, value, shape);
}

Expand Down
92 changes: 0 additions & 92 deletions onnxruntime/test/providers/coreml/coreml_basic_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -257,98 +257,6 @@ TEST(CoreMLExecutionProviderTest, TestNameSanitization) {
// TensorRT does not support Clip opset 11 yet.
test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider});
}

TEST(CoreMLExecutionProviderTest, TestBinaryFp16) {
auto test_binary_op = [](std::string op) {
OpTester test(op, 11);

std::vector<int64_t> dims{3, 3};
std::vector<float> input1 = {-1.0f, 0.0f, 1.0f,
-6.0f, 0.0f, 6.0f,
-5.4f, 2.0f, 6.0f};
std::vector<MLFloat16> input1_fp16(9);
ConvertFloatToMLFloat16(input1.data(), input1_fp16.data(), 9);
std::vector<float> input2 = {-1.0f, 0.0f, 1.0f,
-5.0f, 0.0f, 5.0f,
-5.0f, 2.0f, 5.0f};
std::vector<MLFloat16> input2_fp16(9);
ConvertFloatToMLFloat16(input2.data(), input2_fp16.data(), 9);
std::vector<float> output(9);
if (op == "Add") {
for (int i = 0; i < 9; i++) {
output[i] = input1_fp16[i] + input2_fp16[i];
}
} else if (op == "Sub") {
for (int i = 0; i < 9; i++) {
output[i] = input1_fp16[i] - input2_fp16[i];
}
} else if (op == "Mul") {
for (int i = 0; i < 9; i++) {
output[i] = input1_fp16[i] * input2_fp16[i];
}
} else if (op == "Div") {
for (int i = 0; i < 9; i++) {
output[i] = input1_fp16[i] / input2_fp16[i];
}
}
std::vector<MLFloat16> output_fp16(9);
ConvertFloatToMLFloat16(output.data(), output_fp16.data(), 9);

test.AddInput<MLFloat16>("0", dims, input1_fp16);
test.AddInput<MLFloat16>("1.min", dims, input2_fp16);
test.AddOutput<MLFloat16>("3", dims, output_fp16);

// TensorRT does not support Clip opset 11 yet.
std::vector<std::unique_ptr<IExecutionProvider>> coreml_ep;
coreml_ep.emplace_back(MakeCoreMLExecutionProvider(COREML_FLAG_CREATE_MLPROGRAM));
test.Run(OpTester::ExpectResult::kExpectSuccess, "", {}, nullptr, &coreml_ep);
};
test_binary_op("Add");
test_binary_op("Sub");
test_binary_op("Div");
test_binary_op("Mul");
}

TEST(CoreMLExecutionProviderTest, TestUnaryFp16) {
auto test_binary_op = [](std::string op) {
OpTester test(op, 11);

std::vector<int64_t> dims{3, 3};
std::vector<float> input1 = {-1.0f, 0.0f, 1.0f,
-6.0f, 0.2f, 6.0f,
-5.4f, 2.0f, 6.0f};
std::vector<MLFloat16> input1_fp16(9);
ConvertFloatToMLFloat16(input1.data(), input1_fp16.data(), 9);

std::vector<float> output(9);
if (op == "Sqrt") {
for (int i = 0; i < 9; i++) {
output[i] = sqrt(input1_fp16[i]);
}
} else if (op == "Reciprocal") {
for (int i = 0; i < 9; i++) {
output[i] = 1.0f / (1e-4 + input1_fp16[i]);
}
} else if (op == "Relu") {
for (int i = 0; i < 9; i++) {
output[i] = fmax(0.0f, input1_fp16[i]);
}
}
std::vector<MLFloat16> output_fp16(9);
ConvertFloatToMLFloat16(output.data(), output_fp16.data(), 9);

test.AddInput<MLFloat16>("0", dims, input1_fp16);
test.AddOutput<MLFloat16>("3", dims, output_fp16);

// TensorRT does not support Clip opset 11 yet.
std::vector<std::unique_ptr<IExecutionProvider>> coreml_ep;
coreml_ep.emplace_back(MakeCoreMLExecutionProvider(COREML_FLAG_CREATE_MLPROGRAM));
test.Run(OpTester::ExpectResult::kExpectSuccess, "", {}, nullptr, &coreml_ep);
};
test_binary_op("Sqrt");
test_binary_op("Reciprocal");
test_binary_op("Relu");
}
#endif

} // namespace test
Expand Down
Loading

0 comments on commit 4f935e7

Please sign in to comment.