1 /*
2 * Copyright (c) 2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "pad_builder.h"
17
18 #include "transform.h"
19 #include "validation.h"
20 #include "ops_registry.h"
21
22 namespace OHOS {
23 namespace NeuralNetworkRuntime {
24 namespace Ops {
25 static const int INPUT_NUM = 2;
26 static const int OUTPUT_NUM = 1;
27 static const int PARAM_MAX_NUM = 2;
28 static const int SCALE_LENGTH = 1;
29 static const std::string OP_NAME = "Pad";
30 static const std::unordered_map<int, mindspore::lite::PaddingMode> paddingList = {
31 {0, mindspore::lite::PADDING_MODE_CONSTANT},
32 {1, mindspore::lite::PADDING_MODE_REFLECT},
33 {2, mindspore::lite::PADDING_MODE_SYMMETRIC},
34 {3, mindspore::lite::PADDING_MODE_RESERVED}};
35
PadBuilder()36 PadBuilder::PadBuilder() {}
37
~PadBuilder()38 PadBuilder::~PadBuilder() {}
39
SetPaddingMode(const std::shared_ptr<NNTensor> & tensor)40 OH_NN_ReturnCode PadBuilder::SetPaddingMode(const std::shared_ptr<NNTensor>& tensor)
41 {
42 tensor->IdentifyOpParameter();
43 if (tensor->GetElementCount() != SCALE_LENGTH) {
44 LOGE("[Pad] SetPaddingMode failed, the paddingMode shoule be a scalar");
45 return OH_NN_INVALID_PARAMETER;
46 }
47
48 if (tensor->GetDataType() != OH_NN_INT32) {
49 LOGE("[Pad] SetPaddingMode failed, the paddingMode should be type OH_NN_INT32.");
50 return OH_NN_INVALID_PARAMETER;
51 }
52
53 void* buffer = tensor->GetBuffer();
54 if (buffer == nullptr) {
55 LOGE("[Pad] SetPaddingMode GetBuffer return nullptr");
56 return OH_NN_INVALID_PARAMETER;
57 }
58
59 int paddingModeKey = *(static_cast<int*>(buffer));
60 auto it = paddingList.find(paddingModeKey);
61 if (it != paddingList.end()) {
62 m_paddingMode = it->second;
63 } else {
64 LOGE("[DepthToSpace] The padding mode value should between [0, 3], but get %d.", paddingModeKey);
65 LOGE("[DepthToSpace] paddingMode value:");
66 LOGE(" 0-PADDING_MODE_CONSTANT, 1-PADDING_MODE_REFLECT, 2-PADDING_MODE_SYMMETRIC, 3-PADDING_MODE_RESERVED");
67 return OH_NN_INVALID_PARAMETER;
68 }
69
70 return OH_NN_SUCCESS;
71 }
72
SetConstantValue(const std::shared_ptr<NNTensor> & tensor)73 OH_NN_ReturnCode PadBuilder::SetConstantValue(const std::shared_ptr<NNTensor>& tensor)
74 {
75 tensor->IdentifyOpParameter();
76 if (tensor->GetElementCount() != SCALE_LENGTH) {
77 LOGE("[Pad] Pad SetConstantValue failed. The constant_value should be scaler.");
78 return OH_NN_INVALID_PARAMETER;
79 }
80
81 if (tensor->GetDataType() != OH_NN_FLOAT32) {
82 LOGE("[Pad] Pad SetConstantValue failed. The constant_value should be type OH_NN_FLOAT32");
83 return OH_NN_INVALID_PARAMETER;
84 }
85
86 void* buffer = tensor->GetBuffer();
87 if (buffer == nullptr) {
88 LOGE("[Pad] SetConstantValue failed, the constantValue passed an empty buffer.");
89 return OH_NN_INVALID_PARAMETER;
90 }
91
92 m_constantValue = *static_cast<float*>(buffer);
93 return OH_NN_SUCCESS;
94 }
95
Build(const std::vector<uint32_t> & paramsIndex,const std::vector<uint32_t> & inputsIndex,const std::vector<uint32_t> & outputsIndex,const std::vector<std::shared_ptr<NNTensor>> & allTensors)96 OH_NN_ReturnCode PadBuilder::Build(const std::vector<uint32_t>& paramsIndex,
97 const std::vector<uint32_t>& inputsIndex,
98 const std::vector<uint32_t>& outputsIndex,
99 const std::vector<std::shared_ptr<NNTensor>>& allTensors)
100 {
101 if (m_isBuild) {
102 LOGE("[Pad] Pad Build failed. operation has been build, cannot build again.");
103 return OH_NN_OPERATION_FORBIDDEN;
104 }
105
106 OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM);
107 if (returnCode != OH_NN_SUCCESS) {
108 LOGE("[Pad] Pad Build failed. Passed invalid input or output index of Pad operation index.");
109 return returnCode;
110 }
111
112 m_inputsIndex = inputsIndex;
113 m_outputsIndex = outputsIndex;
114
115 returnCode = CheckParamIndex(paramsIndex, allTensors, PARAM_MAX_NUM);
116 if (returnCode != OH_NN_SUCCESS) {
117 LOGE("[Pad] Pad Build failed. Passed invalid param index of Pad operation index.");
118 return returnCode;
119 }
120
121 for (int i : paramsIndex) {
122 std::shared_ptr<NNTensor> tensor = allTensors[i];
123 if (m_paramMap.find(tensor->GetType()) != m_paramMap.end()) {
124 returnCode = (this->*(m_paramMap[tensor->GetType()]))(tensor);
125 } else {
126 LOGE("[Pad] Build failed, param invalid, type=%d", tensor->GetType());
127 return OH_NN_INVALID_PARAMETER;
128 }
129
130 if (returnCode != OH_NN_SUCCESS) {
131 LOGE("[Pad] Pad Build failed. Passed invalid param.");
132 return returnCode;
133 }
134 }
135
136 // The quantization type of the first output determinies that of the operator.
137 SetQuantType(outputsIndex, allTensors);
138
139 m_name = OP_NAME;
140 m_isBuild = true;
141 return OH_NN_SUCCESS;
142 }
GetPrimitive()143 LiteGraphPrimitvePtr PadBuilder::GetPrimitive()
144 {
145 if (!m_isBuild) {
146 LOGE("[Pad] GetPrimitive failed. Cannot get primitive before call build.");
147 return {nullptr, DestroyLiteGraphPrimitive};
148 }
149
150 std::vector<std::vector<int64_t>> paddings;
151
152 void* primitive = MindIR_PadFusion_CreatePrimitive(paddings, m_paddingMode, m_constantValue);
153 LiteGraphPrimitvePtr graphPrimitivePtr(primitive, DestroyLiteGraphPrimitive);
154 return graphPrimitivePtr;
155 }
156
157 REGISTER_OPS(PadBuilder, OH_NN_OPS_PAD);
158 } // namespace Ops
159 } // namespace NeuralNetworkRuntime
160 } // namespcae OHOS