1 /*
2 * Copyright (c) 2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "batchnorm_builder.h"
17
18 #include "mindir.h"
19
20 #include "frameworks/native/ops_registry.h"
21
22 namespace OHOS {
23 namespace NeuralNetworkRuntime {
24 namespace Ops {
25 static const int INPUT_NUM = 5;
26 static const int OUTPUT_NUM = 1;
27 static const int SCALAR_LENGTH = 1;
28 const std::string OP_NAME = "BatchNorm";
29
BatchNormBuilder()30 BatchNormBuilder::BatchNormBuilder() {}
31
~BatchNormBuilder()32 BatchNormBuilder::~BatchNormBuilder() {}
33
SetEpsilon(std::shared_ptr<NNTensor> tensor)34 OH_NN_ReturnCode BatchNormBuilder::SetEpsilon(std::shared_ptr<NNTensor> tensor)
35 {
36 tensor->IdentifyOpParameter();
37 if (tensor->GetDataType() != OH_NN_FLOAT32) {
38 LOGE("[BatchNorm] SetEpsilon failed, the Epsilon should be type OH_NN_FLOAT32.");
39 return OH_NN_INVALID_PARAMETER;
40 }
41
42 if (tensor->GetElementCount() != SCALAR_LENGTH) {
43 LOGE("[BatchNorm] SetEpsilon failed, the Epsilon shoule be a scalar");
44 return OH_NN_INVALID_PARAMETER;
45 }
46
47 void* buffer = tensor->GetBuffer();
48 if (buffer == nullptr) {
49 LOGE("[BatchNorm] SetEpsilon failed, the epsilon passed a empty buffer.");
50 return OH_NN_INVALID_PARAMETER;
51 }
52
53 m_epsilon = *static_cast<float*>(buffer);
54 return OH_NN_SUCCESS;
55 }
56
Build(const std::vector<uint32_t> & paramsIndex,const std::vector<uint32_t> & inputsIndex,const std::vector<uint32_t> & outputsIndex,const std::vector<std::shared_ptr<NNTensor>> & allTensors)57 OH_NN_ReturnCode BatchNormBuilder::Build(const std::vector<uint32_t>& paramsIndex,
58 const std::vector<uint32_t>& inputsIndex,
59 const std::vector<uint32_t>& outputsIndex,
60 const std::vector<std::shared_ptr<NNTensor>>& allTensors)
61 {
62 if (m_isBuild) {
63 LOGE("[BatchNorm] Build failed, batchNorm operation has been build, cannot build again.");
64 return OH_NN_OPERATION_FORBIDDEN;
65 }
66
67 OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM);
68 if (returnCode != OH_NN_SUCCESS) {
69 LOGE("[BatchNorm] Build failed, passed invalid input or output index.");
70 return returnCode;
71 }
72
73 m_inputsIndex = inputsIndex;
74 m_outputsIndex = outputsIndex;
75
76 for (int i : paramsIndex) {
77 std::shared_ptr<NNTensor> tensor = allTensors[i];
78 switch (tensor->GetType()) {
79 case OH_NN_BATCH_NORM_EPSILON:
80 returnCode = SetEpsilon(tensor);
81 break;
82 default:
83 LOGE("[BatchNorm] Parameter Type is invalid, type=%d", tensor->GetType());
84 return OH_NN_INVALID_PARAMETER;
85 }
86
87 if (returnCode != OH_NN_SUCCESS) {
88 LOGE("[BatchNorm] BatchNorm Build failed,, Passed invalid param.");
89 return returnCode;
90 }
91 }
92
93 // The quantization type of the first output determinies that of the operator.
94 SetQuantType(outputsIndex, allTensors);
95
96 m_isBuild = true;
97 m_name = OP_NAME;
98 return OH_NN_SUCCESS;
99 }
100
GetPrimitive()101 LiteGraphPrimitvePtr BatchNormBuilder::GetPrimitive()
102 {
103 if (!m_isBuild) {
104 LOGE("[BatchNorm] GetPrimitive failed, cannot get primitive before call build.");
105 return {nullptr, DestroyLiteGraphPrimitive};
106 }
107
108 void* primitive = mindspore::lite::MindIR_FusedBatchNorm_CreatePrimitive(m_epsilon);
109 LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive);
110 return graphPrimitivePtr;
111 }
112
113 REGISTER_OPS(BatchNormBuilder, OH_NN_OPS_BATCH_NORM);
114 } // namespace Ops
115 } // namespace NeuralNetworkRuntime
116 } // namespace OHOS