• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "depthwise_conv2d_native_builder.h"
17 
18 #include "transform.h"
19 #include "validation.h"
20 #include "ops_validation.h"
21 
22 namespace OHOS {
23 namespace NeuralNetworkRuntime {
24 namespace Ops {
25 static const int INPUT_NUM = 3;
26 static const int OUTPUT_NUM = 1;
27 static const int PAD_MODE_SIZE = 1;
28 static const int PAD_LIST_SIZE = 4;
29 static const int IN_CHANNEL_IN_INPUT = 3;
30 static const int OUT_CHANNEL_IN_WEIGHT = 0;
31 static const int HEIGHT_IN_WEIGHT = 1;
32 static const int WIDTH_IN_WEIGHT = 2;
33 static const int INPUT_RANK = 4;
34 static const int INPUT_X = 0;
35 static const int INPUT_WEIGHT = 1;
36 static const int SCALE_LENGTH = 1;
37 static const std::string OP_NAME = "DepthwiseConv2DNative";
38 
DepthwiseConv2DNativeBuilder()39 DepthwiseConv2DNativeBuilder::DepthwiseConv2DNativeBuilder() {}
40 
~DepthwiseConv2DNativeBuilder()41 DepthwiseConv2DNativeBuilder::~DepthwiseConv2DNativeBuilder() {}
42 
SetIsPadMode(std::shared_ptr<NNTensor> tensor,bool & isPadMode)43 OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetIsPadMode(std::shared_ptr<NNTensor> tensor,
44     bool &isPadMode)
45 {
46     if (tensor->GetElementCount() == PAD_MODE_SIZE) {
47         isPadMode = true;
48     } else if (tensor->GetElementCount() != PAD_LIST_SIZE) {
49         LOGE("[DepthwiseConv2DNative] The element size of padMode should be 1 or "
50             "the element size of padList should be 4.");
51         return OH_NN_INVALID_PARAMETER;
52     }
53 
54     return OH_NN_SUCCESS;
55 }
56 
SetActivation(std::shared_ptr<NNTensor> tensor)57 OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetActivation(std::shared_ptr<NNTensor> tensor)
58 {
59     tensor->IdentifyOpParameter();
60     // Set ActivationType
61     if (tensor->GetElementCount() != SCALE_LENGTH) {
62         LOGE("[DepthwiseConv2DNative] SetActivation failed, the Activation should be scaler.");
63         return OH_NN_INVALID_PARAMETER;
64     }
65 
66     if (tensor->GetDataType() != OH_NN_INT8) {
67         LOGE("[DepthwiseConv2DNative] SetActivation failed, the activationType should have type OH_NN_INT8.");
68         return OH_NN_INVALID_PARAMETER;
69     }
70 
71     void* buffer = tensor->GetBuffer();
72     if (buffer == nullptr) {
73         LOGE("[DepthwiseConv2DNative] SetActivation GetBuffer return nullptr");
74         return OH_NN_INVALID_PARAMETER;
75     }
76     int8_t* pFuseData = static_cast<int8_t*>(buffer);
77     if (!OHOS::NeuralNetworkRuntime::Validation::ValidateFuseType(static_cast<OH_NN_FuseType>(*pFuseData))) {
78         LOGE("[DepthwiseConv2DNative] SetActivation failed, activation input is invalid.");
79         return OH_NN_INVALID_PARAMETER;
80     }
81     m_activationType = NNToMS::TransfromFusionType((OH_NN_FuseType)(*pFuseData));
82 
83     return OH_NN_SUCCESS;
84 }
85 
SetKernelSize(const std::vector<uint32_t> & inputsIndex,const std::vector<std::shared_ptr<NNTensor>> & allTensors)86 OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetKernelSize(const std::vector<uint32_t>& inputsIndex,
87     const std::vector<std::shared_ptr<NNTensor>>& allTensors)
88 {
89     // Set kernleSize and outChannel
90     auto weightShape = allTensors[inputsIndex[INPUT_WEIGHT]]->GetDimensions();
91     if (weightShape.size() != INPUT_RANK) {
92         LOGE("[DepthwiseConv2DNative] SetKernelSize failed, invalid rank of shape of weight, should be 4 dimensions.");
93         return OH_NN_INVALID_PARAMETER;
94     }
95 
96     m_outChannel = weightShape[OUT_CHANNEL_IN_WEIGHT];
97     m_kernelSize.clear();
98     m_kernelSize.emplace_back(weightShape[HEIGHT_IN_WEIGHT]);
99     m_kernelSize.emplace_back(weightShape[WIDTH_IN_WEIGHT]);
100     return OH_NN_SUCCESS;
101 }
102 
SetStrides(std::shared_ptr<NNTensor> tensor)103 OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetStrides(std::shared_ptr<NNTensor> tensor)
104 {
105     tensor->IdentifyOpParameter();
106     if (tensor->GetDataType() != OH_NN_INT64) {
107         LOGE("[DepthwiseConv2DNative] SetStrides failed, the stride should have type OH_NN_INT64.");
108         return OH_NN_INVALID_PARAMETER;
109     }
110 
111     void* buffer = tensor->GetBuffer();
112     if (buffer == nullptr) {
113         LOGE("[DepthwiseConv2DNative] SetStrides GetBuffer return nullptr");
114         return OH_NN_INVALID_PARAMETER;
115     }
116     const int64_t* pStrides = reinterpret_cast<const int64_t*>(buffer);
117     uint32_t stridesSize = tensor->GetElementCount();
118     m_strides.assign(pStrides, pStrides + stridesSize);
119 
120     return OH_NN_SUCCESS;
121 }
SetDilation(std::shared_ptr<NNTensor> tensor)122 OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetDilation(std::shared_ptr<NNTensor> tensor)
123 {
124     tensor->IdentifyOpParameter();
125     if (tensor->GetDataType() != OH_NN_INT64) {
126         LOGE("[DepthwiseConv2DNative] SetDilation failed, the dilation should have type OH_NN_INT64");
127         return OH_NN_INVALID_PARAMETER;
128     }
129 
130     void* buffer = tensor->GetBuffer();
131     if (buffer == nullptr) {
132         LOGE("[DepthwiseConv2DNative] SetDilation GetBuffer return nullptr");
133         return OH_NN_INVALID_PARAMETER;
134     }
135     const int64_t* pDilation = reinterpret_cast<const int64_t*>(buffer);
136     uint32_t dilationSize = tensor->GetElementCount();
137     m_dilation.assign(pDilation, pDilation + dilationSize);
138 
139     return OH_NN_SUCCESS;
140 }
141 
SetPadModeOrPaddings(std::shared_ptr<NNTensor> tensor)142 OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetPadModeOrPaddings(
143     std::shared_ptr<NNTensor> tensor)
144 {
145     tensor->IdentifyOpParameter();
146 
147     bool isPadMode = false;
148     OH_NN_ReturnCode ret = SetIsPadMode(tensor, isPadMode);
149     if (ret != OH_NN_SUCCESS) {
150         return ret;
151     }
152 
153     void* buffer = tensor->GetBuffer();
154     if (buffer == nullptr) {
155         LOGE("[DepthwiseConv2DNative] SetPad GetBuffer return nullptr");
156         return OH_NN_INVALID_PARAMETER;
157     }
158 
159     if (isPadMode) {
160         if (tensor->GetDataType() != OH_NN_INT8) {
161             LOGE("[DepthwiseConv2DNative] SetPadModeOrPaddings failed, the padMode should have type OH_NN_INT8.");
162             return OH_NN_INVALID_PARAMETER;
163         }
164 
165         int8_t* pPad = static_cast<int8_t*>(buffer);
166         if (!OHOS::NeuralNetworkRuntime::Validation::ValidatePadMode(*pPad)) {
167             LOGE("[DepthwiseConv2DNative] SetPadModeOrPaddings failed, invalid pad mode.");
168             return OH_NN_INVALID_PARAMETER;
169         }
170         m_padMode = NNToMS::TransformPadModeValue(*pPad);
171     } else {
172         if (tensor->GetDataType() != OH_NN_INT64) {
173             LOGE("[DepthwiseConv2DNative] SetPadModeOrPaddings failed, the padList should have type OH_NN_INT64.");
174             return OH_NN_INVALID_PARAMETER;
175         }
176 
177         const int64_t* pPadList = reinterpret_cast<const int64_t*>(buffer);
178         uint32_t padListSize = tensor->GetElementCount();
179         m_pad.assign(pPadList, pPadList + padListSize);
180     }
181     return OH_NN_SUCCESS;
182 }
183 
SetInputAndOutput(const std::vector<uint32_t> & inputsIndex,const std::vector<uint32_t> & outputsIndex,const std::vector<std::shared_ptr<NNTensor>> & allTensors)184 OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::SetInputAndOutput(
185     const std::vector<uint32_t>& inputsIndex, const std::vector<uint32_t>& outputsIndex,
186     const std::vector<std::shared_ptr<NNTensor>>& allTensors)
187 {
188     OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM);
189     if (returnCode != OH_NN_SUCCESS) {
190         LOGE("[DepthwiseConv2DNative] SetInputAndOutput failed, passed invalid input or output index.");
191         return returnCode;
192     }
193 
194     m_inputsIndex = inputsIndex;
195     m_outputsIndex = outputsIndex;
196 
197     return OH_NN_SUCCESS;
198 }
199 
Build(const std::vector<uint32_t> & paramsIndex,const std::vector<uint32_t> & inputsIndex,const std::vector<uint32_t> & outputsIndex,const std::vector<std::shared_ptr<NNTensor>> & allTensors)200 OH_NN_ReturnCode DepthwiseConv2DNativeBuilder::Build(const std::vector<uint32_t>& paramsIndex,
201     const std::vector<uint32_t>& inputsIndex, const std::vector<uint32_t>& outputsIndex,
202     const std::vector<std::shared_ptr<NNTensor>>& allTensors)
203 {
204     if (m_isBuild) {
205         LOGE("[DepthwiseConv2DNative] Build failed, operation has been build, cannot build again.");
206         return OH_NN_OPERATION_FORBIDDEN;
207     }
208 
209     OH_NN_ReturnCode ret = SetInputAndOutput(inputsIndex, outputsIndex, allTensors);
210     if (ret != OH_NN_SUCCESS) {
211         return ret;
212     }
213 
214     auto inputShape = allTensors[inputsIndex[INPUT_X]]->GetDimensions();
215     if (inputShape.size() != INPUT_RANK) {
216         LOGE("[DepthwiseConv2DNative] Build failed, invalid rank of shape of input, should be 4 dimensions.");
217         return OH_NN_INVALID_PARAMETER;
218     }
219     m_inChannel = inputShape[IN_CHANNEL_IN_INPUT];
220     // Set Kernel Size
221     ret = SetKernelSize(inputsIndex, allTensors);
222     if (ret != OH_NN_SUCCESS) {
223         LOGE("[DepthwiseConv2DNative] Build failed, SetKernelSize failed.");
224         return ret;
225     }
226 
227     for (int i : paramsIndex) {
228         std::shared_ptr<NNTensor> tensor = allTensors[i];  // 参数 tensor
229         switch (tensor->GetType()) {
230             case OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES:
231                 ret = SetStrides(tensor);
232                 break;
233             case OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION:
234                 ret = SetDilation(tensor);
235                 break;
236             case OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD_MODE:
237             case OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD:
238                 ret = SetPadModeOrPaddings(tensor);
239                 break;
240             case OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE:
241                 ret = SetActivation(tensor);
242                 break;
243             default:
244                 LOGE("[DepthwiseConv2DNative] Build failed, param invalid, type = %d.", tensor->GetType());
245                 return OH_NN_INVALID_PARAMETER;
246         }
247         if (ret != OH_NN_SUCCESS) {
248             LOGE("[DepthwiseConv2DNative] Build failed, passed invalid param.");
249             return ret;
250         }
251     }
252 
253     SetQuantType(outputsIndex, allTensors);
254 
255     m_isBuild = true;
256     m_name = OP_NAME;
257     return OH_NN_SUCCESS;
258 }
259 
GetPrimitive()260 LiteGraphPrimitvePtr DepthwiseConv2DNativeBuilder::GetPrimitive()
261 {
262     if (!m_isBuild) {
263         LOGE("[DepthwiseConv2DNative] GetPrimitive failed, cannot get primitive before call build.");
264         return {nullptr, DestroyLiteGraphPrimitive};
265     }
266 
267     auto primitive = MindIR_Conv2DFusion_CreatePrimitive(m_kernelSize, m_strides,
268         m_dilation, m_padMode, m_pad, m_inChannel, m_inChannel, m_outChannel, m_activationType);
269     LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ;
270     return graphPrimitivePtr;
271 }
272 
273 REGISTER_OPS(DepthwiseConv2DNativeBuilder, OH_NN_OPS_DEPTHWISE_CONV2D_NATIVE);
274 } // namespace Ops
275 } // namespace NeuralNetworkRuntime
276 } // namespace OHOS
277