• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2022 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "src/litert/delegate/coreml/op/coreml_op.h"
18 #include "nnacl/base/cast_base.h"
19 namespace mindspore::lite {
Init()20 int CoreMLOp::Init() {
21   auto ret = InitParams();
22   if (ret != RET_OK) {
23     MS_LOG(ERROR) << "CoreML op " << name_ << "'s parameter initialization failed.";
24     return RET_ERROR;
25   }
26   op_ = std::make_unique<CoreML::Specification::NeuralNetworkLayer>();
27   if (op_ == nullptr) {
28     MS_LOG(ERROR) << "New CoreML op " << name_ << " failed.";
29     return RET_ERROR;
30   }
31   op_->set_name("CoreML_" + name_);
32   return RET_OK;
33 }
34 
SetActivation(schema::ActivationType act_type)35 int CoreMLOp::SetActivation(schema::ActivationType act_type) {
36   act_op_ = std::make_unique<CoreML::Specification::NeuralNetworkLayer>();
37   if (act_op_ == nullptr) {
38     MS_LOG(ERROR) << "New CoreML op " << name_ << "_activation failed.";
39     return RET_ERROR;
40   }
41   act_op_->set_name("CoreML_" + name_ + "_activation");
42   switch (act_type) {
43     case schema::ActivationType_RELU:
44       act_op_->mutable_activation()->mutable_relu();
45       break;
46     case schema::ActivationType_RELU6: {
47       auto clip_param = act_op_->mutable_clip();
48       clip_param->set_minval(0);
49       clip_param->set_maxval(kValueThreshold6);
50       break;
51     }
52     case schema::ActivationType_TANH:
53       act_op_->mutable_activation()->mutable_tanh();
54       break;
55     case schema::ActivationType_SIGMOID:
56       act_op_->mutable_activation()->mutable_sigmoid();
57       break;
58     default:
59       MS_LOG(ERROR) << "Unsupported activation type.";
60       return RET_ERROR;
61   }
62   return RET_OK;
63 }
64 
SetPadding(std::vector<int> pad_list)65 int CoreMLOp::SetPadding(std::vector<int> pad_list) {
66   pad_op_ = std::make_unique<CoreML::Specification::NeuralNetworkLayer>();
67   if (pad_op_ == nullptr) {
68     MS_LOG(ERROR) << "New CoreML op " << name_ << "_pad failed.";
69     return RET_ERROR;
70   }
71   pad_op_->set_name("CoreML_" + name_ + "_pad");
72   auto pad_param = pad_op_->mutable_padding();
73   pad_param->mutable_constant();
74   auto height_border = pad_param->mutable_paddingamounts()->add_borderamounts();
75   auto width_border = pad_param->mutable_paddingamounts()->add_borderamounts();
76   height_border->set_startedgesize(pad_list[PAD_UP]);
77   height_border->set_endedgesize(pad_list[PAD_DOWN]);
78   width_border->set_startedgesize(pad_list[PAD_LEFT]);
79   width_border->set_endedgesize(pad_list[PAD_RIGHT]);
80   return RET_OK;
81 }
82 
SetConstInput(const mindspore::MSTensor & in_tensor)83 int CoreMLOp::SetConstInput(const mindspore::MSTensor &in_tensor) {
84   MS_CHECK_TRUE_MSG(in_tensor.IsConst(), RET_ERROR, "Only constant tensor can be set as CoreML Const op.");
85   std::string const_op_name = "CoreML_" + in_tensor.Name() + "_const";
86   auto const_op = std::make_unique<CoreML::Specification::NeuralNetworkLayer>();
87   if (const_op == nullptr) {
88     MS_LOG(ERROR) << "New CoreML const op " << const_op_name << " for op " << name_ << " failed.";
89     return RET_ERROR;
90   }
91   const_op->set_name(const_op_name);
92   auto const_param = const_op->mutable_loadconstantnd();
93   for (auto i : in_tensor.Shape()) {
94     const_param->add_shape(static_cast<uint64_t>(i));
95   }
96   if (in_tensor.Shape().empty()) {
97     const_param->add_shape(1);
98   }
99   // set const data
100   auto org_data = in_tensor.Data().get();
101   auto *ml_data_container = const_param->mutable_data()->mutable_floatvalue();
102   ml_data_container->Resize(in_tensor.ElementNum(), 0);
103   auto *ml_data = reinterpret_cast<float *>(ml_data_container->mutable_data());
104   if (in_tensor.DataType() == DataType::kNumberTypeInt32) {
105     Int32ToFloat32(reinterpret_cast<const int *>(org_data), ml_data, in_tensor.ElementNum());
106   } else if (in_tensor.DataType() == DataType::kNumberTypeFloat32) {
107     memcpy(ml_data, org_data, in_tensor.DataSize());
108   } else {
109     MS_LOG(ERROR) << "Unsupported const input data type: " << static_cast<int>(in_tensor.DataType());
110     return RET_ERROR;
111   }
112   const_ops_[in_tensor.Name()] = std::move(const_op);
113   return RET_OK;
114 }
115 
SetMLOpInOut()116 void CoreMLOp::SetMLOpInOut() {
117   MS_ASSERT(op_ != nullptr);
118   auto input_name = in_tensors_.at(0).Name();
119   if (pad_op_ != nullptr) {
120     std::string pad_name = op_->name() + "_pad_0";
121     pad_op_->add_input(input_name);
122     pad_op_->add_output(pad_name);
123     op_->add_input(pad_name);
124   } else {
125     op_->add_input(input_name);
126   }
127   auto output_name = out_tensors_.at(0).Name();
128   if (act_op_ != nullptr) {
129     std::string act_name = op_->name() + "_act_0";
130     op_->add_output(act_name);
131     act_op_->add_input(act_name);
132     act_op_->add_output(output_name);
133   } else {
134     op_->add_output(output_name);
135   }
136 }
137 
GetLayers()138 std::vector<CoreML::Specification::NeuralNetworkLayer *> CoreMLOp::GetLayers() {
139   MS_ASSERT(op_ != nullptr);
140   std::vector<CoreML::Specification::NeuralNetworkLayer *> ret_ops;
141   if (pad_op_ != nullptr) {
142     ret_ops.push_back(pad_op_.release());
143   }
144   if (!const_ops_.empty()) {
145     for (auto it = const_ops_.begin(); it != const_ops_.end(); it++) {
146       ret_ops.push_back(it->second.release());
147     }
148   }
149   ret_ops.push_back(op_.release());
150   if (act_op_ != nullptr) {
151     ret_ops.push_back(act_op_.release());
152   }
153   return ret_ops;
154 }
155 }  // namespace mindspore::lite
156