1 /*
2 * Copyright (c) 2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "depthwise_conv2d_native_builder.h"
17
18 #include "frameworks/native/transform.h"
19 #include "frameworks/native/validation.h"
20
21 namespace OHOS {
22 namespace NeuralNetworkRuntime {
23 namespace Ops {
24 static const int INPUT_NUM = 3;
25 static const int OUTPUT_NUM = 1;
26 static const int PAD_MODE_SIZE = 1;
27 static const int PAD_LIST_SIZE = 4;
28 static const int IN_CHANNEL_IN_INPUT = 3;
29 static const int OUT_CHANNEL_IN_WEIGHT = 0;
30 static const int HEIGHT_IN_WEIGHT = 1;
31 static const int WIDTH_IN_WEIGHT = 2;
32 static const int INPUT_RANK = 4;
33 static const int INPUT_X = 0;
34 static const int INPUT_WEIGHT = 1;
35 static const int SCALE_LENGTH = 1;
36 static const std::string OP_NAME = "DepthwiseConv2DNative";
37
DepthwiseConv2DNativeBuilder()38 DepthwiseConv2DNativeBuilder::DepthwiseConv2DNativeBuilder() {}
39
~DepthwiseConv2DNativeBuilder()40 DepthwiseConv2DNativeBuilder::~DepthwiseConv2DNativeBuilder() {}
41
SetIsPadMode(std::shared_ptr<NNTensor> tensor,bool & isPadMode)42 OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetIsPadMode(std::shared_ptr<NNTensor> tensor,
43 bool &isPadMode)
44 {
45 if (tensor->GetElementCount() == PAD_MODE_SIZE) {
46 isPadMode = true;
47 } else if (tensor->GetElementCount() != PAD_LIST_SIZE) {
48 LOGE("[DepthwiseConv2DNative] The element size of padMode should be 1 or "
49 "the element size of padList should be 4.");
50 return OH_NN_INVALID_PARAMETER;
51 }
52
53 return OH_NN_SUCCESS;
54 }
55
SetActivation(std::shared_ptr<NNTensor> tensor)56 OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetActivation(std::shared_ptr<NNTensor> tensor)
57 {
58 tensor->IdentifyOpParameter();
59 // Set ActivationType
60 if (tensor->GetElementCount() != SCALE_LENGTH) {
61 LOGE("[DepthwiseConv2DNative] SetActivation failed, the Activation should be scaler.");
62 return OH_NN_INVALID_PARAMETER;
63 }
64
65 if (tensor->GetDataType() != OH_NN_INT8) {
66 LOGE("[DepthwiseConv2DNative] SetActivation failed, the activationType should have type OH_NN_INT8.");
67 return OH_NN_INVALID_PARAMETER;
68 }
69
70 void* buffer = tensor->GetBuffer();
71 if (buffer == nullptr) {
72 LOGE("[DepthwiseConv2DNative] SetActivation GetBuffer return nullptr");
73 return OH_NN_INVALID_PARAMETER;
74 }
75 int8_t* pFuseData = static_cast<int8_t*>(buffer);
76 if (!OHOS::NeuralNetworkRuntime::Validation::ValidateFuseType(static_cast<OH_NN_FuseType>(*pFuseData))) {
77 LOGE("[DepthwiseConv2DNative] SetActivation failed, activation input is invalid.");
78 return OH_NN_INVALID_PARAMETER;
79 }
80 m_activationType = NNToMS::TransfromFusionType((OH_NN_FuseType)(*pFuseData));
81
82 return OH_NN_SUCCESS;
83 }
84
SetKernelSize(const std::vector<uint32_t> & inputsIndex,const std::vector<std::shared_ptr<NNTensor>> & allTensors)85 OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetKernelSize(const std::vector<uint32_t>& inputsIndex,
86 const std::vector<std::shared_ptr<NNTensor>>& allTensors)
87 {
88 // Set kernleSize and outChannel
89 auto weightShape = allTensors[inputsIndex[INPUT_WEIGHT]]->GetDimensions();
90 if (weightShape.size() != INPUT_RANK) {
91 LOGE("[DepthwiseConv2DNative] SetKernelSize failed, invalid rank of shape of weight, should be 4 dimensions.");
92 return OH_NN_INVALID_PARAMETER;
93 }
94
95 m_outChannel = weightShape[OUT_CHANNEL_IN_WEIGHT];
96 m_kernelSize.clear();
97 m_kernelSize.emplace_back(weightShape[HEIGHT_IN_WEIGHT]);
98 m_kernelSize.emplace_back(weightShape[WIDTH_IN_WEIGHT]);
99 return OH_NN_SUCCESS;
100 }
101
SetStrides(std::shared_ptr<NNTensor> tensor)102 OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetStrides(std::shared_ptr<NNTensor> tensor)
103 {
104 tensor->IdentifyOpParameter();
105 if (tensor->GetDataType() != OH_NN_INT64) {
106 LOGE("[DepthwiseConv2DNative] SetStrides failed, the stride should have type OH_NN_INT64.");
107 return OH_NN_INVALID_PARAMETER;
108 }
109
110 void* buffer = tensor->GetBuffer();
111 if (buffer == nullptr) {
112 LOGE("[DepthwiseConv2DNative] SetStrides GetBuffer return nullptr");
113 return OH_NN_INVALID_PARAMETER;
114 }
115 const int64_t* pStrides = reinterpret_cast<const int64_t*>(buffer);
116 int stridesSize = tensor->GetElementCount();
117 m_strides.assign(pStrides, pStrides + stridesSize);
118
119 return OH_NN_SUCCESS;
120 }
SetDilation(std::shared_ptr<NNTensor> tensor)121 OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetDilation(std::shared_ptr<NNTensor> tensor)
122 {
123 tensor->IdentifyOpParameter();
124 if (tensor->GetDataType() != OH_NN_INT64) {
125 LOGE("[DepthwiseConv2DNative] SetDilation failed, the dilation should have type OH_NN_INT64");
126 return OH_NN_INVALID_PARAMETER;
127 }
128
129 void* buffer = tensor->GetBuffer();
130 if (buffer == nullptr) {
131 LOGE("[DepthwiseConv2DNative] SetDilation GetBuffer return nullptr");
132 return OH_NN_INVALID_PARAMETER;
133 }
134 const int64_t* pDilation = reinterpret_cast<const int64_t*>(buffer);
135 int dilationSize = tensor->GetElementCount();
136 m_dilation.assign(pDilation, pDilation + dilationSize);
137
138 return OH_NN_SUCCESS;
139 }
140
SetPadModeOrPaddings(std::shared_ptr<NNTensor> tensor)141 OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetPadModeOrPaddings(
142 std::shared_ptr<NNTensor> tensor)
143 {
144 tensor->IdentifyOpParameter();
145
146 bool isPadMode = false;
147 OH_NN_ReturnCode ret = SetIsPadMode(tensor, isPadMode);
148 if (ret != OH_NN_SUCCESS) {
149 return ret;
150 }
151
152 void* buffer = tensor->GetBuffer();
153 if (buffer == nullptr) {
154 LOGE("[DepthwiseConv2DNative] SetPad GetBuffer return nullptr");
155 return OH_NN_INVALID_PARAMETER;
156 }
157
158 if (isPadMode) {
159 if (tensor->GetDataType() != OH_NN_INT8) {
160 LOGE("[DepthwiseConv2DNative] SetPadModeOrPaddings failed, the padMode should have type OH_NN_INT8.");
161 return OH_NN_INVALID_PARAMETER;
162 }
163
164 int8_t* pPad = static_cast<int8_t*>(buffer);
165 if (!OHOS::NeuralNetworkRuntime::Validation::ValidatePadMode(*pPad)) {
166 LOGE("[DepthwiseConv2DNative] SetPadModeOrPaddings failed, invalid pad mode.");
167 return OH_NN_INVALID_PARAMETER;
168 }
169 m_padMode = NNToMS::TransformPadModeValue(*pPad);
170 } else {
171 if (tensor->GetDataType() != OH_NN_INT64) {
172 LOGE("[DepthwiseConv2DNative] SetPadModeOrPaddings failed, the padList should have type OH_NN_INT64.");
173 return OH_NN_INVALID_PARAMETER;
174 }
175
176 const int64_t* pPadList = reinterpret_cast<const int64_t*>(buffer);
177 int padListSize = tensor->GetElementCount();
178 m_pad.assign(pPadList, pPadList + padListSize);
179 }
180 return OH_NN_SUCCESS;
181 }
182
SetInputAndOutput(const std::vector<uint32_t> & inputsIndex,const std::vector<uint32_t> & outputsIndex,const std::vector<std::shared_ptr<NNTensor>> & allTensors)183 OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetInputAndOutput(
184 const std::vector<uint32_t>& inputsIndex, const std::vector<uint32_t>& outputsIndex,
185 const std::vector<std::shared_ptr<NNTensor>>& allTensors)
186 {
187 OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM);
188 if (returnCode != OH_NN_SUCCESS) {
189 LOGE("[DepthwiseConv2DNative] SetInputAndOutput failed, passed invalid input or output index.");
190 return returnCode;
191 }
192
193 m_inputsIndex = inputsIndex;
194 m_outputsIndex = outputsIndex;
195
196 return OH_NN_SUCCESS;
197 }
198
Build(const std::vector<uint32_t> & paramsIndex,const std::vector<uint32_t> & inputsIndex,const std::vector<uint32_t> & outputsIndex,const std::vector<std::shared_ptr<NNTensor>> & allTensors)199 OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::Build(const std::vector<uint32_t>& paramsIndex,
200 const std::vector<uint32_t>& inputsIndex, const std::vector<uint32_t>& outputsIndex,
201 const std::vector<std::shared_ptr<NNTensor>>& allTensors)
202 {
203 if (m_isBuild) {
204 LOGE("[DepthwiseConv2DNative] Build failed, operation has been build, cannot build again.");
205 return OH_NN_OPERATION_FORBIDDEN;
206 }
207
208 OH_NN_ReturnCode ret = SetInputAndOutput(inputsIndex, outputsIndex, allTensors);
209 if (ret != OH_NN_SUCCESS) {
210 return ret;
211 }
212
213 auto inputShape = allTensors[inputsIndex[INPUT_X]]->GetDimensions();
214 if (inputShape.size() != INPUT_RANK) {
215 LOGE("[DepthwiseConv2DNative] Build failed, invalid rank of shape of input, should be 4 dimensions.");
216 return OH_NN_INVALID_PARAMETER;
217 }
218 m_inChannel = inputShape[IN_CHANNEL_IN_INPUT];
219 // Set Kernel Size
220 ret = SetKernelSize(inputsIndex, allTensors);
221 if (ret != OH_NN_SUCCESS) {
222 LOGE("[DepthwiseConv2DNative] Build failed, SetKernelSize failed.");
223 return ret;
224 }
225
226 for (int i : paramsIndex) {
227 std::shared_ptr<NNTensor> tensor = allTensors[i]; // 参数 tensor
228 switch (tensor->GetType()) {
229 case OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES:
230 ret = SetStrides(tensor);
231 break;
232 case OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION:
233 ret = SetDilation(tensor);
234 break;
235 case OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD_MODE:
236 case OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD:
237 ret = SetPadModeOrPaddings(tensor);
238 break;
239 case OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE:
240 ret = SetActivation(tensor);
241 break;
242 default:
243 LOGE("[DepthwiseConv2DNative] Build failed, param invalid, type = %d.", tensor->GetType());
244 return OH_NN_INVALID_PARAMETER;
245 }
246 if (ret != OH_NN_SUCCESS) {
247 LOGE("[DepthwiseConv2DNative] Build failed, passed invalid param.");
248 return ret;
249 }
250 }
251
252 SetQuantType(outputsIndex, allTensors);
253
254 m_isBuild = true;
255 m_name = OP_NAME;
256 return OH_NN_SUCCESS;
257 }
258
GetPrimitive()259 LiteGraphPrimitvePtr DepthwiseConv2DNativeBuilder::GetPrimitive()
260 {
261 if (!m_isBuild) {
262 LOGE("[DepthwiseConv2DNative] GetPrimitive failed, cannot get primitive before call build.");
263 return {nullptr, DestroyLiteGraphPrimitive};
264 }
265
266 auto primitive = MindIR_Conv2DFusion_CreatePrimitive(m_kernelSize, m_strides,
267 m_dilation, m_padMode, m_pad, m_inChannel, m_inChannel, m_outChannel, m_activationType);
268 LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ;
269 return graphPrimitivePtr;
270 }
271
272 REGISTER_OPS(DepthwiseConv2DNativeBuilder, OH_NN_OPS_DEPTHWISE_CONV2D_NATIVE);
273 } // namespace Ops
274 } // namespace NeuralNetworkRuntime
275 } // namespace OHOS
276