1 /*
2 * Copyright (c) 2024 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "detection_post_process_builder.h"
17
18 #include "transform.h"
19 #include "validation.h"
20 #include "mindir.h"
21 #include "ops_registry.h"
22
23 namespace OHOS {
24 namespace NeuralNetworkRuntime {
25 namespace Ops {
26 static const int INPUT_NUM = 3;
27 static const int OUTPUT_NUM = 4;
28 static const int PARAM_MAX_NUM = 10;
29 static const int SCALAR_LENGTH = 1;
30 static const std::string OP_NAME = "DetectionPostProcess";
31
DetectionPostProcessBuilder()32 DetectionPostProcessBuilder::DetectionPostProcessBuilder() {}
33
~DetectionPostProcessBuilder()34 DetectionPostProcessBuilder::~DetectionPostProcessBuilder() {}
35
SetInputSize(const std::shared_ptr<NNTensor> & tensor)36 OH_NN_ReturnCode DetectionPostProcessBuilder::SetInputSize(const std::shared_ptr<NNTensor>& tensor)
37 {
38 if (tensor->GetDataType() != OH_NN_INT64) {
39 LOGE("[DetectionPostProcess] The inputSize should be type OH_NN_INT64.");
40 return OH_NN_INVALID_PARAMETER;
41 }
42
43 if (tensor->GetElementCount() != SCALAR_LENGTH) {
44 LOGE("[DetectionPostProcess] The inputSize should be scalar.");
45 return OH_NN_INVALID_PARAMETER;
46 }
47
48 void* buffer = tensor->GetBuffer();
49 if (buffer == nullptr) {
50 LOGE("[DetectionPostProcess] Tensor buffer is nullptr.");
51 return OH_NN_INVALID_PARAMETER;
52 }
53 m_inputSize = *(static_cast<const int64_t*>(buffer));
54
55 return OH_NN_SUCCESS;
56 }
57
SetScale(const std::shared_ptr<NNTensor> & tensor)58 OH_NN_ReturnCode DetectionPostProcessBuilder::SetScale(const std::shared_ptr<NNTensor>& tensor)
59 {
60 if (tensor->GetDataType() != OH_NN_FLOAT32) {
61 LOGE("[DetectionPostProcess] The scale should be type OH_NN_FLOAT32.");
62 return OH_NN_INVALID_PARAMETER;
63 }
64
65 m_scale.clear();
66
67 void* buffer = tensor->GetBuffer();
68 if (buffer == nullptr) {
69 LOGE("[DetectionPostProcess] Tensor buffer is nullptr.");
70 return OH_NN_INVALID_PARAMETER;
71 }
72
73 float* pScale = static_cast<float*>(buffer);
74
75 uint32_t elementCount = tensor->GetElementCount();
76 for (uint32_t i = 0; i < elementCount; ++i) {
77 m_scale.emplace_back(*pScale);
78 ++pScale;
79 }
80
81 return OH_NN_SUCCESS;
82 }
83
SetNmsIoUThreshold(const std::shared_ptr<NNTensor> & tensor)84 OH_NN_ReturnCode DetectionPostProcessBuilder::SetNmsIoUThreshold(const std::shared_ptr<NNTensor>& tensor)
85 {
86 if (tensor->GetDataType() != OH_NN_FLOAT32) {
87 LOGE("[DetectionPostProcess] The nmsIoUThreshold should be type OH_NN_FLOAT32.");
88 return OH_NN_INVALID_PARAMETER;
89 }
90
91 if (tensor->GetElementCount() != SCALAR_LENGTH) {
92 LOGE("[DetectionPostProcess] The nmsIoUThreshold should be scalar.");
93 return OH_NN_INVALID_PARAMETER;
94 }
95
96 void* buffer = tensor->GetBuffer();
97 if (buffer == nullptr) {
98 LOGE("[DetectionPostProcess] Tensor buffer is nullptr.");
99 return OH_NN_INVALID_PARAMETER;
100 }
101 m_nmsIoUThreshold = *(static_cast<const float*>(buffer));
102
103 return OH_NN_SUCCESS;
104 }
105
SetNmsScoreThreshold(const std::shared_ptr<NNTensor> & tensor)106 OH_NN_ReturnCode DetectionPostProcessBuilder::SetNmsScoreThreshold(const std::shared_ptr<NNTensor>& tensor)
107 {
108 if (tensor->GetDataType() != OH_NN_FLOAT32) {
109 LOGE("[DetectionPostProcess] The scoreThreshold should be type OH_NN_FLOAT32.");
110 return OH_NN_INVALID_PARAMETER;
111 }
112
113 if (tensor->GetElementCount() != SCALAR_LENGTH) {
114 LOGE("[DetectionPostProcess] The scoreThreshold should be scalar.");
115 return OH_NN_INVALID_PARAMETER;
116 }
117
118 void* buffer = tensor->GetBuffer();
119 if (buffer == nullptr) {
120 LOGE("[DetectionPostProcess] Tensor buffer is nullptr.");
121 return OH_NN_INVALID_PARAMETER;
122 }
123 m_nmsScoreThreshold = *(static_cast<const float*>(buffer));
124
125 return OH_NN_SUCCESS;
126 }
127
SetMaxDetections(const std::shared_ptr<NNTensor> & tensor)128 OH_NN_ReturnCode DetectionPostProcessBuilder::SetMaxDetections(const std::shared_ptr<NNTensor>& tensor)
129 {
130 if (tensor->GetDataType() != OH_NN_INT64) {
131 LOGE("[DetectionPostProcess] The maxDetections should be type OH_NN_INT64.");
132 return OH_NN_INVALID_PARAMETER;
133 }
134
135 if (tensor->GetElementCount() != SCALAR_LENGTH) {
136 LOGE("[DetectionPostProcess] The maxDetections should be scalar.");
137 return OH_NN_INVALID_PARAMETER;
138 }
139
140 void* buffer = tensor->GetBuffer();
141 if (buffer == nullptr) {
142 LOGE("[DetectionPostProcess] Tensor buffer is nullptr.");
143 return OH_NN_INVALID_PARAMETER;
144 }
145 m_maxDetections = *(static_cast<const int64_t*>(buffer));
146
147 return OH_NN_SUCCESS;
148 }
149
SetDetectionsPerClass(const std::shared_ptr<NNTensor> & tensor)150 OH_NN_ReturnCode DetectionPostProcessBuilder::SetDetectionsPerClass(const std::shared_ptr<NNTensor>& tensor)
151 {
152 if (tensor->GetDataType() != OH_NN_INT64) {
153 LOGE("[DetectionPostProcess] The detectionsPerClass should be type OH_NN_INT64.");
154 return OH_NN_INVALID_PARAMETER;
155 }
156
157 if (tensor->GetElementCount() != SCALAR_LENGTH) {
158 LOGE("[DetectionPostProcess] The detectionsPerClass should be scalar.");
159 return OH_NN_INVALID_PARAMETER;
160 }
161
162 void* buffer = tensor->GetBuffer();
163 if (buffer == nullptr) {
164 LOGE("[DetectionPostProcess] Tensor buffer is nullptr.");
165 return OH_NN_INVALID_PARAMETER;
166 }
167 m_detectionsPerClass = *(static_cast<const int64_t*>(buffer));
168
169 return OH_NN_SUCCESS;
170 }
171
SetMaxClassesPerDetection(const std::shared_ptr<NNTensor> & tensor)172 OH_NN_ReturnCode DetectionPostProcessBuilder::SetMaxClassesPerDetection(const std::shared_ptr<NNTensor>& tensor)
173 {
174 if (tensor->GetDataType() != OH_NN_INT64) {
175 LOGE("[DetectionPostProcess] The maxClassesPerDetection should be type OH_NN_INT64.");
176 return OH_NN_INVALID_PARAMETER;
177 }
178
179 if (tensor->GetElementCount() != SCALAR_LENGTH) {
180 LOGE("[DetectionPostProcess] The maxClassesPerDetection should be scalar.");
181 return OH_NN_INVALID_PARAMETER;
182 }
183
184 void* buffer = tensor->GetBuffer();
185 if (buffer == nullptr) {
186 LOGE("[DetectionPostProcess] Tensor buffer is nullptr.");
187 return OH_NN_INVALID_PARAMETER;
188 }
189 m_maxClassesPerDetection = *(static_cast<const int64_t*>(buffer));
190
191 return OH_NN_SUCCESS;
192 }
193
SetNumClasses(const std::shared_ptr<NNTensor> & tensor)194 OH_NN_ReturnCode DetectionPostProcessBuilder::SetNumClasses(const std::shared_ptr<NNTensor>& tensor)
195 {
196 if (tensor->GetDataType() != OH_NN_INT64) {
197 LOGE("[DetectionPostProcess] The numClasses should be type OH_NN_INT64.");
198 return OH_NN_INVALID_PARAMETER;
199 }
200
201 if (tensor->GetElementCount() != SCALAR_LENGTH) {
202 LOGE("[DetectionPostProcess] The numClasses should be scalar.");
203 return OH_NN_INVALID_PARAMETER;
204 }
205
206 void* buffer = tensor->GetBuffer();
207 if (buffer == nullptr) {
208 LOGE("[DetectionPostProcess] Tensor buffer is nullptr.");
209 return OH_NN_INVALID_PARAMETER;
210 }
211 m_numClasses = *(static_cast<const int64_t*>(buffer));
212
213 return OH_NN_SUCCESS;
214 }
215
SetUseRegularNms(const std::shared_ptr<NNTensor> & tensor)216 OH_NN_ReturnCode DetectionPostProcessBuilder::SetUseRegularNms(const std::shared_ptr<NNTensor>& tensor)
217 {
218 if (tensor->GetDataType() != OH_NN_BOOL) {
219 LOGE("[DetectionPostProcess] The useRegularNms should be type OH_NN_BOOL.");
220 return OH_NN_INVALID_PARAMETER;
221 }
222
223 if (tensor->GetElementCount() != SCALAR_LENGTH) {
224 LOGE("[DetectionPostProcess] The useRegularNms should be scalar.");
225 return OH_NN_INVALID_PARAMETER;
226 }
227
228 void* buffer = tensor->GetBuffer();
229 if (buffer == nullptr) {
230 LOGE("[DetectionPostProcess] Tensor buffer is nullptr.");
231 return OH_NN_INVALID_PARAMETER;
232 }
233 m_useRegularNms = *(static_cast<bool*>(buffer));
234
235 return OH_NN_SUCCESS;
236 }
237
SetOutQuantized(const std::shared_ptr<NNTensor> & tensor)238 OH_NN_ReturnCode DetectionPostProcessBuilder::SetOutQuantized(const std::shared_ptr<NNTensor>& tensor)
239 {
240 if (tensor->GetDataType() != OH_NN_BOOL) {
241 LOGE("[DetectionPostProcess] The outQuantized should be type OH_NN_BOOL.");
242 return OH_NN_INVALID_PARAMETER;
243 }
244
245 if (tensor->GetElementCount() != SCALAR_LENGTH) {
246 LOGE("[DetectionPostProcess] The outQuantized should be scalar.");
247 return OH_NN_INVALID_PARAMETER;
248 }
249
250 void* buffer = tensor->GetBuffer();
251 if (buffer == nullptr) {
252 LOGE("[DetectionPostProcess] Tensor buffer is nullptr.");
253 return OH_NN_INVALID_PARAMETER;
254 }
255 m_outQuantized = *(static_cast<bool*>(buffer));
256
257 return OH_NN_SUCCESS;
258 }
259
260
Build(const std::vector<uint32_t> & paramsIndex,const std::vector<uint32_t> & inputsIndex,const std::vector<uint32_t> & outputsIndex,const std::vector<std::shared_ptr<NNTensor>> & allTensors)261 OH_NN_ReturnCode DetectionPostProcessBuilder::Build(const std::vector<uint32_t>& paramsIndex,
262 const std::vector<uint32_t>& inputsIndex,
263 const std::vector<uint32_t>& outputsIndex,
264 const std::vector<std::shared_ptr<NNTensor>>& allTensors)
265 {
266 if (m_isBuild) {
267 LOGE("[DetectionPostProcess] Build failed, the detectionPostProcess operation has been build. \
268 cannot build again.");
269 return OH_NN_OPERATION_FORBIDDEN;
270 }
271
272 auto ret = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM);
273 if (ret != OH_NN_SUCCESS) {
274 LOGE("[DetectionPostProcess] Build failed, passed invalid input or output index.");
275 return ret;
276 }
277
278 m_inputsIndex = inputsIndex;
279 m_outputsIndex = outputsIndex;
280
281 ret = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM);
282 if (ret != OH_NN_SUCCESS) {
283 LOGE("[DetectionPostProcess] Build failed, passed invalid param index.");
284 return ret;
285 }
286
287 for (int i : paramsIndex) {
288 std::shared_ptr<NNTensor> tensor = allTensors[i];
289 tensor->IdentifyOpParameter();
290 if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) {
291 ret = (this->*(m_paramMap[tensor->GetType()]))(tensor);
292 } else {
293 LOGE("[DetectionPostProcess] Build failed, param invalid, type=%d", tensor->GetType());
294 return OH_NN_INVALID_PARAMETER;
295 }
296
297 if (ret != OH_NN_SUCCESS) {
298 LOGE("[DetectionPostProcess] Build failed, passed invalid param.");
299 return ret;
300 }
301 }
302
303 m_name = OP_NAME;
304 m_isBuild = true;
305 return OH_NN_SUCCESS;
306 }
307
GetPrimitive()308 LiteGraphPrimitvePtr DetectionPostProcessBuilder::GetPrimitive()
309 {
310 if (!m_isBuild) {
311 LOGE("[DetectionPostProcess] GetPrimitive failed, cannot get primitive before call build.");
312 return {nullptr, DestroyLiteGraphPrimitive};
313 }
314
315 mindspore::lite::Format format {mindspore::lite::FORMAT_NCHW};
316
317 void* primitive = mindspore::lite::MindIR_DetectionPostProcess_CreatePrimitive(format, m_inputSize, m_scale,
318 m_nmsIoUThreshold, m_nmsScoreThreshold, m_maxDetections, m_detectionsPerClass, m_maxClassesPerDetection,
319 m_numClasses, m_useRegularNms, m_outQuantized);
320 LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive) ;
321 return graphPrimitivePtr;
322 }
323
324 REGISTER_OPS(DetectionPostProcessBuilder, OH_NN_OPS_DETECTION_POST_PROCESS);
325 } // namespace Ops
326 } // namespace NeuralNetworkRuntime
327 } // namespace OHOS