1 /**
2 * Copyright 2022 Huawei Technologies Co., Ltd
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #ifndef MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_OP_COREML_OP_
18 #define MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_OP_COREML_OP_
19 #include <utility>
20 #include <vector>
21 #include <string>
22 #include <set>
23 #include <memory>
24 #include <unordered_map>
25 #include "proto/Model.pb.h"
26 #include "proto/NeuralNetwork.pb.h"
27 #include "schema/model_generated.h"
28 #include "include/errorcode.h"
29 #include "include/api/types.h"
30 #include "include/api/data_type.h"
31 #include "src/common/log_adapter.h"
32 #include "src/common/log_util.h"
33 #include "nnacl/op_base.h"
34 using mindspore::lite::RET_ERROR;
35 using mindspore::lite::RET_NOT_SUPPORT;
36 using mindspore::lite::RET_OK;
37 namespace mindspore::lite {
38 inline const std::vector<int> NHWC2NCHW_PERM = {0, 3, 1, 2};
39 inline const std::vector<int> NCHW2NHWC_PERM = {0, 2, 3, 1};
40 enum COREML_WEIGHT_SHAPE { ML_WT_COUT = 0, ML_WT_CIN = 1, ML_WT_H = 2, ML_WT_W = 3 };
41 enum MSLITE_WEIGHT_SHAPE { MS_WT_COUT = 0, MS_WT_H = 1, MS_WT_W = 2, MS_WT_CIN = 3 };
42 enum PAD { PAD_UP = 0, PAD_DOWN = 1, PAD_LEFT = 2, PAD_RIGHT = 3 };
43 constexpr int REPEAT_TIMES2 = 2;
44 class CoreMLOp {
45 public:
CoreMLOp(const schema::Primitive * primitive,std::vector<mindspore::MSTensor> in_tensors,std::vector<mindspore::MSTensor> out_tensors,std::string name)46 CoreMLOp(const schema::Primitive *primitive, std::vector<mindspore::MSTensor> in_tensors,
47 std::vector<mindspore::MSTensor> out_tensors, std::string name)
48 : op_primitive_(primitive),
49 in_tensors_(std::move(in_tensors)),
50 out_tensors_(std::move(out_tensors)),
51 name_(std::move(name)) {
52 if (primitive != nullptr) {
53 type_ = primitive->value_type();
54 }
55 }
56
57 // the op will be managed by coreml model, no need to manually deconstruct
58 virtual ~CoreMLOp() = default;
59
IsSupport()60 virtual int IsSupport() { return RET_OK; }
61
62 virtual int Init();
63
InitParams()64 virtual int InitParams() { return RET_OK; }
65
HandleAxis()66 virtual int HandleAxis() { return RET_OK; }
67
BuildLayer()68 virtual int BuildLayer() { return RET_OK; }
69
70 // override this method if the op has tensor which does not need to add to graph,e.g.,const tensor.
71 virtual void SetMLOpInOut();
72
73 // Transfer the ownership of op to coreml model; Multiple layers are possible to be build for one op, thus using
74 // vector as return.
75 virtual std::vector<CoreML::Specification::NeuralNetworkLayer *> GetLayers();
76
77 virtual int SetActivation(schema::ActivationType act_type);
78
79 virtual int SetPadding(std::vector<int> pad_list);
80
81 virtual int SetConstInput(const mindspore::MSTensor &in_tensor);
82
set_inputs(const std::vector<mindspore::MSTensor> & in_tensors)83 void set_inputs(const std::vector<mindspore::MSTensor> &in_tensors) { this->in_tensors_ = in_tensors; }
84
set_input(const mindspore::MSTensor & in_tensor,int index)85 void set_input(const mindspore::MSTensor &in_tensor, int index) {
86 MS_ASSERT(static_cast<size_t>(index) < in_tensors_.size());
87 this->in_tensors_[index] = in_tensor;
88 }
89
set_outputs(const std::vector<mindspore::MSTensor> & out_tensors)90 void set_outputs(const std::vector<mindspore::MSTensor> &out_tensors) { this->out_tensors_ = out_tensors; }
91
inputs()92 const std::vector<mindspore::MSTensor> &inputs() { return this->in_tensors_; }
93
outputs()94 const std::vector<mindspore::MSTensor> &outputs() { return this->out_tensors_; }
95
set_in_ops(const std::vector<CoreMLOp * > & in_ops)96 void set_in_ops(const std::vector<CoreMLOp *> &in_ops) { this->in_ops_ = in_ops; }
97
set_out_ops(const std::vector<CoreMLOp * > & out_ops)98 void set_out_ops(const std::vector<CoreMLOp *> &out_ops) { this->out_ops_ = out_ops; }
99
in_ops()100 const std::vector<CoreMLOp *> &in_ops() const { return this->in_ops_; }
101
out_ops()102 const std::vector<CoreMLOp *> &out_ops() const { return this->out_ops_; }
103
type()104 schema::PrimitiveType type() const { return type_; }
105
name()106 std::string name() const { return this->name_; }
107
set_name(const std::string & name)108 void set_name(const std::string &name) { this->name_ = name; }
109
110 protected:
111 const schema::Primitive *op_primitive_ = nullptr;
112 std::vector<mindspore::MSTensor> in_tensors_;
113 std::vector<mindspore::MSTensor> out_tensors_;
114 std::vector<CoreMLOp *> in_ops_;
115 std::vector<CoreMLOp *> out_ops_;
116 schema::PrimitiveType type_ = schema::PrimitiveType_NONE;
117 std::string name_;
118 std::unique_ptr<CoreML::Specification::NeuralNetworkLayer> op_ = nullptr;
119 std::unique_ptr<CoreML::Specification::NeuralNetworkLayer> pad_op_ = nullptr;
120 std::unique_ptr<CoreML::Specification::NeuralNetworkLayer> act_op_ = nullptr;
121 std::unordered_map<std::string, std::unique_ptr<CoreML::Specification::NeuralNetworkLayer>> const_ops_ = {};
122 };
123
124 typedef CoreMLOp *(*CoreMLGetOp)(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors,
125 const std::vector<mindspore::MSTensor> &out_tensors, const std::string &name);
126
127 template <class T>
GetCoreMLOp(const schema::Primitive * primitive,const std::vector<mindspore::MSTensor> & in_tensors,const std::vector<mindspore::MSTensor> & out_tensors,const std::string & name)128 CoreMLOp *GetCoreMLOp(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors,
129 const std::vector<mindspore::MSTensor> &out_tensors, const std::string &name) {
130 auto shape = out_tensors.front().Shape();
131 if (std::find(shape.begin(), shape.end(), -1) != shape.end()) {
132 MS_LOG(ERROR) << "CoreML does not support runtime inference shape.";
133 return nullptr;
134 }
135 auto *op = new (std::nothrow) T(primitive, in_tensors, out_tensors, name);
136 if (op == nullptr) {
137 MS_LOG(ERROR) << "op is nullptr.";
138 return nullptr;
139 }
140 auto ret = op->IsSupport();
141 if (ret != RET_OK) {
142 MS_LOG(WARNING) << "CoreML op is not supported.";
143 delete op;
144 return nullptr;
145 }
146 ret = op->Init();
147 if (ret != RET_OK) {
148 MS_LOG(WARNING) << "CoreML op init failed.";
149 delete op;
150 return nullptr;
151 }
152 return op;
153 }
154 } // namespace mindspore::lite
155 #endif // MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_OP_COREML_OP_
156