• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "pooling_builder.h"
17 
18 #include "transform.h"
19 #include "validation.h"
20 #include "ops_validation.h"
21 
22 namespace OHOS {
23 namespace NeuralNetworkRuntime {
24 namespace Ops {
25 static const int INPUT_NUM = 1;
26 static const int OUTPUT_NUM = 1;
27 static const int NUM_ELEMENT_PAD_MODE = 1;
28 static const int NUM_ELEMENT_PAD_LIST = 4;
29 static const int ACTIVATION_LENGTH = 1;
30 
PoolingBuild(const std::vector<uint32_t> & paramsIndex,const std::vector<uint32_t> & inputsIndex,const std::vector<uint32_t> & outputsIndex,const std::vector<std::shared_ptr<NNTensor>> & allTensors)31 OH_NN_ReturnCode PoolingBuilder::PoolingBuild(const std::vector<uint32_t>& paramsIndex,
32                                               const std::vector<uint32_t>& inputsIndex,
33                                               const std::vector<uint32_t>& outputsIndex,
34                                               const std::vector<std::shared_ptr<NNTensor>>& allTensors)
35 {
36     if (m_isBuild) {
37         LOGE("[PoolingBuilder] PoolingBuild failed, operation has been build, cannot build again.");
38         return OH_NN_OPERATION_FORBIDDEN;
39     }
40 
41     // Set input and output
42     OH_NN_ReturnCode returnCode = SetInputAndOutput(inputsIndex, outputsIndex, allTensors);
43     if (returnCode != OH_NN_SUCCESS) {
44         LOGE("[PoolingBuilder] PoolingBuild failed, the SetInputAndOutput failed.");
45         return returnCode;
46     }
47 
48     for (int i : paramsIndex) {
49         std::shared_ptr<NNTensor> tensor = allTensors[i];
50         switch (tensor->GetType()) {
51             case OH_NN_AVG_POOL_KERNEL_SIZE:
52             case OH_NN_MAX_POOL_KERNEL_SIZE:
53                 returnCode = SetKernel(tensor);
54                 break;
55             case OH_NN_AVG_POOL_STRIDE:
56             case OH_NN_MAX_POOL_STRIDE:
57                 returnCode = SetStrides(tensor);
58                 break;
59             case OH_NN_AVG_POOL_PAD_MODE:
60             case OH_NN_MAX_POOL_PAD_MODE:
61             case OH_NN_MAX_POOL_PAD:
62             case OH_NN_AVG_POOL_PAD:
63                 returnCode = SetPadModeOrPaddings(tensor);
64                 break;
65             case OH_NN_AVG_POOL_ACTIVATION_TYPE:
66             case OH_NN_MAX_POOL_ACTIVATION_TYPE:
67                 returnCode = SetActivation(tensor);
68                 break;
69             default:
70                 LOGE("[PoolingBuilder] Build failed, param invalid, type = %d.", tensor->GetType());
71                 return OH_NN_INVALID_PARAMETER;
72         }
73         if (returnCode != OH_NN_SUCCESS) {
74             LOGE("[PoolingBuilder] PoolingBuild failed, passed invalid param.");
75             return returnCode;
76         }
77     }
78 
79     // The quantization type of the first output determinies that of the operator.
80     SetQuantType(outputsIndex, allTensors);
81 
82     return OH_NN_SUCCESS;
83 }
84 
SetInputAndOutput(const std::vector<uint32_t> & inputsIndex,const std::vector<uint32_t> & outputsIndex,const std::vector<std::shared_ptr<NNTensor>> & allTensors)85 OH_NN_ReturnCode PoolingBuilder::SetInputAndOutput(const std::vector<uint32_t>& inputsIndex,
86                                                    const std::vector<uint32_t>& outputsIndex,
87                                                    const std::vector<std::shared_ptr<NNTensor>>& allTensors)
88 {
89     OH_NN_ReturnCode returnCode = CheckIOIndex(inputsIndex, outputsIndex, allTensors, INPUT_NUM, OUTPUT_NUM);
90     if (returnCode != OH_NN_SUCCESS) {
91         LOGE("[PoolingBuilder] SetInputAndOutput failed, passed invalid input or output index.");
92         return returnCode;
93     }
94 
95     m_inputsIndex = inputsIndex;
96     m_outputsIndex = outputsIndex;
97 
98     return OH_NN_SUCCESS;
99 }
100 
SetKernel(std::shared_ptr<NNTensor> tensor)101 OH_NN_ReturnCode PoolingBuilder::SetKernel(std::shared_ptr<NNTensor> tensor)
102 {
103     tensor->IdentifyOpParameter();
104     // Set kernelSize
105     if (tensor->GetDataType() != OH_NN_INT64) {
106         LOGE("[PoolingBuilder] SetKernel failed, the KernelSize should be type OH_NN_INT64.");
107         return OH_NN_INVALID_PARAMETER;
108     }
109 
110     void* buffer = tensor->GetBuffer();
111     if (buffer == nullptr) {
112         LOGE("[PoolingBuilder] SetKernel GetBuffer return nullptr");
113         return OH_NN_INVALID_PARAMETER;
114     }
115 
116     const int64_t* pKernelSize = reinterpret_cast<const int64_t*>(buffer);
117     uint32_t kernelSize = tensor->GetElementCount();
118     m_kernelSize.assign(pKernelSize, pKernelSize + kernelSize);
119 
120     return OH_NN_SUCCESS;
121 }
122 
SetStrides(std::shared_ptr<NNTensor> tensor)123 OH_NN_ReturnCode PoolingBuilder::SetStrides(std::shared_ptr<NNTensor> tensor)
124 {
125     tensor->IdentifyOpParameter();
126     // Set Strides
127     if (tensor->GetDataType() != OH_NN_INT64) {
128         LOGE("[PoolingBuilder] SetStrides failed, the Strides should be type OH_NN_INT64.");
129         return OH_NN_INVALID_PARAMETER;
130     }
131 
132     void* buffer = tensor->GetBuffer();
133     if (buffer == nullptr) {
134         LOGE("[PoolingBuilder] SetStrides GetBuffer return nullptr");
135         return OH_NN_INVALID_PARAMETER;
136     }
137 
138     const int64_t* pStrides = reinterpret_cast<const int64_t*>(buffer);
139     uint32_t strideslSize = tensor->GetElementCount();
140     m_strides.assign(pStrides, pStrides + strideslSize);
141 
142     return OH_NN_SUCCESS;
143 }
144 
SetPadModeOrPaddings(std::shared_ptr<NNTensor> tensor)145 OH_NN_ReturnCode PoolingBuilder::SetPadModeOrPaddings(std::shared_ptr<NNTensor> tensor)
146 {
147     tensor->IdentifyOpParameter();
148 
149     void* buffer = tensor->GetBuffer();
150     if (buffer == nullptr) {
151         LOGE("[PoolingBuilder] SetPadModeOrPaddings GetBuffer return nullptr");
152         return OH_NN_INVALID_PARAMETER;
153     }
154     size_t tensorElementCount = tensor->GetElementCount();
155     // Set PadMode or PadList
156     if (tensorElementCount == NUM_ELEMENT_PAD_MODE) {
157         // PadMode
158         if (tensor->GetDataType() != OH_NN_INT8) {
159             LOGE("[PoolingBuilder] SetPadModeOrPaddings failed, the type of padMode should be OH_NN_INT8.");
160             return OH_NN_INVALID_PARAMETER;
161         }
162 
163         int8_t* pPadMode = static_cast<int8_t*>(buffer);
164         if (!OHOS::NeuralNetworkRuntime::Validation::ValidatePadMode(*pPadMode)) {
165             LOGE("[PoolingBuilder] SetPadModeOrPaddings failed, invalid pad mode.");
166             return OH_NN_INVALID_PARAMETER;
167         }
168         m_padMode = NNToMS::TransformPadModeValue(*pPadMode);
169     } else if (tensorElementCount == NUM_ELEMENT_PAD_LIST) {
170         if (tensor->GetDataType() != OH_NN_INT64) {
171             LOGE("[PoolingBuilder] SetPadModeOrPaddings failed, the type of padList should be OH_NN_INT64.");
172             return OH_NN_INVALID_PARAMETER;
173         }
174 
175         int64_t* pPad = static_cast<int64_t*>(buffer);
176         // PadList
177         m_pad.clear();
178         for (int i = 0; i < NUM_ELEMENT_PAD_LIST; i++) {
179             m_pad.emplace_back(static_cast<int64_t>(pPad[i]));
180         }
181     } else {
182         LOGE("[PoolingBuilder] SetPadModeOrPaddings failed, invalid element size of padMode or padList,"
183             "padMode should be single value, and padList should be 4.");
184         return OH_NN_INVALID_PARAMETER;
185     }
186     return OH_NN_SUCCESS;
187 }
188 
SetActivation(std::shared_ptr<NNTensor> tensor)189 OH_NN_ReturnCode PoolingBuilder::SetActivation(std::shared_ptr<NNTensor> tensor)
190 {
191     tensor->IdentifyOpParameter();
192     // Set ActivationType
193     if (tensor->GetElementCount() != ACTIVATION_LENGTH) {
194         LOGE("[PoolingBuilder] SetActivation failed, the Activation shoule be a scalar");
195         return OH_NN_INVALID_PARAMETER;
196     }
197 
198     if (tensor->GetDataType() != OH_NN_INT8) {
199         LOGE("[PoolingBuilder] SetActivation failed, the ActivationType should be type OH_NN_INT8.");
200         return OH_NN_INVALID_PARAMETER;
201     }
202 
203     void* buffer = tensor->GetBuffer();
204     if (buffer == nullptr) {
205         LOGE("[PoolingBuilder] SetActivation GetBuffer return nullptr");
206         return OH_NN_INVALID_PARAMETER;
207     }
208 
209     int8_t* pFuseData = static_cast<int8_t*>(buffer);
210     if (!OHOS::NeuralNetworkRuntime::Validation::ValidateFuseType(static_cast<OH_NN_FuseType>(*pFuseData))) {
211         LOGE("[PoolingBuilder] SetActivation failed, activation input is invalid.");
212         return OH_NN_INVALID_PARAMETER;
213     }
214     auto fuseType = (OH_NN_FuseType)(*pFuseData);
215     m_activationType = NNToMS::TransfromFusionType(fuseType);
216 
217     return OH_NN_SUCCESS;
218 }
219 } // namespace Ops
220 } // namespace NeuralNetworkRuntime
221 } // namespace OHOS
222