• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2017 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_OPERATIONS_UTILS_H
18 #define ANDROID_FRAMEWORKS_ML_NN_COMMON_OPERATIONS_UTILS_H
19 
20 #include <algorithm>
21 #include <cstdint>
22 #include <vector>
23 
24 #include "nnapi/TypeUtils.h"
25 #include "nnapi/Types.h"
26 
27 namespace android {
28 namespace nn {
29 
30 // DEPRECATED. Use NN_RET_CHECK instead.
31 #define NN_CHECK(x) NN_RET_CHECK(x)
32 #define NN_OPS_CHECK(x) NN_RET_CHECK(x)
33 
34 // DEPRECATED. Use NN_RET_CHECK_EQ instead.
35 #define NN_CHECK_EQ(x, y) NN_RET_CHECK_EQ(x, y)
36 
37 // An 8-bit boolean type (sizeof(bool) is implementation-defined).
38 typedef uint8_t bool8;
39 
40 enum PaddingScheme {
41     kPaddingUnknown = 0,
42     kPaddingSame = 1,
43     kPaddingValid = 2,
44 };
45 
46 // Stores operand type information. "Shape" is a historical name.
47 struct Shape {
48     OperandType type = OperandType::FLOAT32;
49     std::vector<uint32_t> dimensions;
50     float scale = 0.0f;
51     int32_t offset = 0;
52     Operand::ExtraParams extraParams;
53 };
54 
55 // Provides information available during graph creation to validate an operation.
56 class IOperationValidationContext {
57    public:
~IOperationValidationContext()58     virtual ~IOperationValidationContext() {}
59 
60     virtual const char* getOperationName() const = 0;
61 
62     virtual uint32_t getNumInputs() const = 0;
63     virtual OperandType getInputType(uint32_t index) const = 0;
64     virtual Shape getInputShape(uint32_t index) const = 0;
65     virtual const Operand::ExtraParams& getInputExtraParams(uint32_t index) const = 0;
66 
67     virtual uint32_t getNumOutputs() const = 0;
68     virtual OperandType getOutputType(uint32_t index) const = 0;
69     virtual Shape getOutputShape(uint32_t index) const = 0;
70 };
71 
72 // Provides inputs and outputs during operation execution.
73 class IOperationExecutionContext {
74    public:
~IOperationExecutionContext()75     virtual ~IOperationExecutionContext() {}
76 
77     virtual uint32_t getNumInputs() const = 0;
78     virtual OperandType getInputType(uint32_t index) const = 0;
79     virtual Shape getInputShape(uint32_t index) const = 0;
80     virtual const void* getInputBuffer(uint32_t index) const = 0;
81     virtual const Operand::ExtraParams& getInputExtraParams(uint32_t index) const = 0;
82 
83     virtual uint32_t getNumOutputs() const = 0;
84     virtual OperandType getOutputType(uint32_t index) const = 0;
85     virtual Shape getOutputShape(uint32_t index) const = 0;
86     virtual void* getOutputBuffer(uint32_t index) = 0;
87 
88     // Updates the output shape, allocating the buffer if necessary.
89     virtual bool setOutputShape(uint32_t index, const Shape& shape) = 0;
90 
91     virtual bool isOmittedInput(uint32_t index) const = 0;
92     virtual bool isOmittedOutput(uint32_t index) const = 0;
93 
94     template <typename T>
getInputBuffer(uint32_t index)95     const T* getInputBuffer(uint32_t index) const {
96         return reinterpret_cast<const T*>(getInputBuffer(index));
97     }
98 
99     template <typename T>
getOutputBuffer(uint32_t index)100     T* getOutputBuffer(uint32_t index) {
101         return reinterpret_cast<T*>(getOutputBuffer(index));
102     }
103 
104     template <typename T>
getInputValue(uint32_t index)105     T getInputValue(uint32_t index) const {
106         return getInputBuffer<T>(index)[0];
107     }
108 };
109 
110 // Verifies that the number and types of operation inputs are as expected.
111 bool validateInputTypes(const IOperationValidationContext* context,
112                         const std::vector<OperandType>& expectedTypes);
113 
114 // Verifies that the number and types of operation outputs are as expected.
115 bool validateOutputTypes(const IOperationValidationContext* context,
116                          const std::vector<OperandType>& expectedTypes);
117 
118 // Verifies that the HAL version specified in the context is greater or equal
119 // than the minimal supported HAL version.
120 bool validateVersion(const IOperationValidationContext* context, Version contextVersion,
121                      Version minSupportedVersion);
122 
123 // Verifies that the two shapes are the same.
124 bool SameShape(const Shape& in1, const Shape& in2);
125 
126 // Sets out to the same shape as in.
127 bool SetShape(const Shape& in, Shape* out);
128 
129 // Return the total number of elements, i.e. all the dimensions multiplied
130 // together. For a scalar, returns one.
131 uint32_t getNumberOfElements(const Shape& shape);
132 uint32_t getNumberOfElements(const Shape& shape, size_t firstAxisInclusive,
133                              size_t lastAxisExclusive);
134 
135 uint32_t getNumberOfDimensions(const Shape& shape);
136 
137 uint32_t getSizeOfDimension(const Shape& shape, uint32_t dimensionIdx);
138 
139 uint32_t hasKnownRank(const Shape& shape);
140 
141 // Converts an axis index from the range [-dims, dims) into the range [0, dims).
142 bool handleNegativeAxis(int32_t numberOfDimensions, int32_t* axis);
143 
handleNegativeAxis(const Shape & shape,int32_t * axis)144 inline bool handleNegativeAxis(const Shape& shape, int32_t* axis) {
145     return handleNegativeAxis(getNumberOfDimensions(shape), axis);
146 }
147 
computeOutSize(int32_t imageSize,int32_t filterSize,int32_t stride,int32_t paddingHead,int32_t paddingTail)148 inline int32_t computeOutSize(int32_t imageSize, int32_t filterSize, int32_t stride,
149                               int32_t paddingHead, int32_t paddingTail) {
150     return (imageSize - filterSize + stride + paddingHead + paddingTail) / stride;
151 }
152 
computeOutSize(int32_t imageSize,int32_t filterSize,int32_t stride,int32_t dilationRate,int32_t paddingHead,int32_t paddingTail)153 inline int32_t computeOutSize(int32_t imageSize, int32_t filterSize, int32_t stride,
154                               int32_t dilationRate, int32_t paddingHead, int32_t paddingTail) {
155     int32_t effectiveFilterSize = ((filterSize - 1) * dilationRate + 1);
156     return (imageSize - effectiveFilterSize + stride + paddingHead + paddingTail) / stride;
157 }
158 
computeOutSizeTransposeConv(int32_t imageSize,int32_t filterSize,int32_t stride,int32_t paddingHead,int32_t paddingTail)159 inline int32_t computeOutSizeTransposeConv(int32_t imageSize, int32_t filterSize, int32_t stride,
160                                            int32_t paddingHead, int32_t paddingTail) {
161     return imageSize * stride + filterSize - stride - paddingHead - paddingTail;
162 }
163 
164 __wur bool QuantizeMultiplier(double double_multiplier, int32_t* quantized_multiplier,
165                               int32_t* shift);
166 
167 __wur bool QuantizeMultiplierSmallerThanOne(double double_multiplier, int32_t* quantized_multiplier,
168                                             int32_t* right_shift);
169 
170 // Same as QuantizeMultiplierSmallerThanOne but returns left shift (i.e. negated
171 // right shift), so that it has the same interface as
172 // QuantizeMultiplierGreaterThanOne and QuantizeMultiplier functions.
173 __wur bool QuantizeMultiplierSmallerThanOneExp(double double_multiplier,
174                                                int32_t* quantized_multiplier, int32_t* left_shift);
175 
176 __wur bool QuantizeMultiplierGreaterThanOne(double double_multiplier, int32_t* quantized_multiplier,
177                                             int* left_shift);
178 
179 __wur bool GetQuantizedConvolutionMultipler(const Shape& inputShape, const Shape& filterShape,
180                                             const Shape& biasShape, const Shape& outputShape,
181                                             double* multiplier);
182 
183 void CalculateActivationRangeUint8(int32_t activation, const Shape& outputShape, int32_t* act_min,
184                                    int32_t* act_max);
185 
186 void CalculateActivationRangeInt8(int32_t activation, const Shape& outputShape, int32_t* act_min,
187                                   int32_t* act_max);
188 
189 void CalculateActivationRangeFloat(int32_t activation, float* activation_min,
190                                    float* activation_max);
191 
192 int32_t CalculateInputRadius(int input_integer_bits, int input_left_shift);
193 
194 void calculateExplicitPaddingImpl(int32_t in_size, int32_t stride, int32_t dilation_factor,
195                                   int32_t filter_size, int32_t padding_implicit,
196                                   bool isTransposeConv, int32_t* padding_head,
197                                   int32_t* padding_tail);
198 
calculateExplicitPadding(int32_t in_size,int32_t stride,int32_t dilation_factor,int32_t filter_size,int32_t padding_implicit,int32_t * padding_head,int32_t * padding_tail)199 inline void calculateExplicitPadding(int32_t in_size, int32_t stride, int32_t dilation_factor,
200                                      int32_t filter_size, int32_t padding_implicit,
201                                      int32_t* padding_head, int32_t* padding_tail) {
202     calculateExplicitPaddingImpl(in_size, stride, dilation_factor, filter_size, padding_implicit,
203                                  /*isTransposeConv=*/false, padding_head, padding_tail);
204 }
205 
calculateExplicitPadding(int32_t in_size,int32_t stride,int32_t filter_size,int32_t padding_implicit,int32_t * padding_head,int32_t * padding_tail)206 inline void calculateExplicitPadding(int32_t in_size, int32_t stride, int32_t filter_size,
207                                      int32_t padding_implicit, int32_t* padding_head,
208                                      int32_t* padding_tail) {
209     calculateExplicitPadding(in_size, stride, 1, filter_size, padding_implicit, padding_head,
210                              padding_tail);
211 }
212 
calculateExplicitPaddingTransposeConv(int32_t in_size,int32_t stride,int32_t filter_size,int32_t padding_implicit,int32_t * padding_head,int32_t * padding_tail)213 inline void calculateExplicitPaddingTransposeConv(int32_t in_size, int32_t stride,
214                                                   int32_t filter_size, int32_t padding_implicit,
215                                                   int32_t* padding_head, int32_t* padding_tail) {
216     calculateExplicitPaddingImpl(in_size, stride, /*dilation_factor=*/1, filter_size,
217                                  padding_implicit, /*isTransposeConv=*/true, padding_head,
218                                  padding_tail);
219 }
220 
getPaddingScheme(int32_t inWidth,int32_t inHeight,int32_t strideWidth,int32_t strideHeight,int32_t filterWidth,int32_t filterHeight,int32_t paddingLeft,int32_t paddingRight,int32_t paddingTop,int32_t paddingBottom)221 inline PaddingScheme getPaddingScheme(int32_t inWidth, int32_t inHeight, int32_t strideWidth,
222                                       int32_t strideHeight, int32_t filterWidth,
223                                       int32_t filterHeight, int32_t paddingLeft,
224                                       int32_t paddingRight, int32_t paddingTop,
225                                       int32_t paddingBottom) {
226     if (paddingLeft == 0 && paddingRight == 0 && paddingTop == 0 && paddingBottom == 0) {
227         return kPaddingValid;
228     }
229 
230     int32_t expectedPaddingLeft, expectedPaddingRight;
231     int32_t expectedPaddingTop, expectedPaddingBottom;
232 
233     calculateExplicitPadding(inWidth, strideWidth, filterWidth, kPaddingSame, &expectedPaddingLeft,
234                              &expectedPaddingRight);
235     calculateExplicitPadding(inHeight, strideHeight, filterHeight, kPaddingSame,
236                              &expectedPaddingTop, &expectedPaddingBottom);
237     if (expectedPaddingLeft == paddingLeft && expectedPaddingRight == paddingRight &&
238         expectedPaddingTop == paddingTop && expectedPaddingBottom == paddingBottom) {
239         return kPaddingSame;
240     } else {
241         return kPaddingUnknown;
242     }
243 }
244 
245 // Reverse order of bits in the mask to match the expected order in kernel
ReverseMaskBits(int mask,int num_dimensions)246 inline int ReverseMaskBits(int mask, int num_dimensions) {
247     int out = 0;
248     for (int dim = 0; dim < num_dimensions; dim++) {
249         out <<= 1;
250         out += (mask & 1);
251         mask >>= 1;
252     }
253     return out;
254 }
255 
256 // Compute the positive remainder.
PositiveRemainder(int32_t dividend,int32_t divisor)257 inline int32_t PositiveRemainder(int32_t dividend, int32_t divisor) {
258     return (divisor + (dividend % divisor)) % divisor;
259 }
260 
261 // Compute clamped index.
ClampedIndex(int32_t index,int dim,bool pos_stride)262 inline int32_t ClampedIndex(int32_t index, int dim, bool pos_stride) {
263     return pos_stride
264                    ? (index >= dim ? dim
265                                    : PositiveRemainder(std::min(std::max(index, -dim), dim), dim))
266                    : (index < -dim
267                               ? -1
268                               : PositiveRemainder(std::min(std::max(index, -dim), dim - 1), dim));
269 }
270 
271 // Broadcasts input shape against one another and puts the result into output
272 // shape. Returns true on success and false on error.
273 bool calculateBroadcastedShape(const Shape& in1, const Shape& in2, Shape* out);
274 
275 // Dequantizes a value and quantizes it back using new scale and offset.
276 template <typename T>
277 T requantize(T value, const Shape& oldShape, const Shape& newShape);
278 
279 // Preparation functions for the corresponding ops
280 bool floorPrepare(const Shape& input, Shape* output);
281 
282 bool depthwiseConvPrepare(const Shape& input, const Shape& filter, const Shape& bias,
283                           int32_t padding_left, int32_t padding_right, int32_t padding_top,
284                           int32_t padding_bottom, int32_t stride_width, int32_t stride_height,
285                           int32_t depth_multiplier, int32_t dilation_width_factor,
286                           int32_t dilation_height_factor, Shape* output);
287 
288 bool genericActivationPrepare(const Shape& input, Shape* output);
289 
290 bool reshapePrepare(const Shape& input, const int32_t* targetDims, const int32_t targetDimsSize,
291                     Shape* output);
292 
293 bool depthToSpacePrepare(const Shape& input, int32_t blockSize, Shape* output);
294 
295 bool spaceToDepthPrepare(const Shape& input, int32_t blockSize, Shape* output);
296 
297 bool embeddingLookupPrepare(const Shape& valueShape, const Shape& lookupShape, Shape* outputShape);
298 
299 bool hashtableLookupPrepare(const Shape& lookupShape, const Shape& keyShape,
300                             const Shape& valueShape, Shape* outputShape, Shape* hitShape);
301 
302 bool padPrepare(const Shape& input, const int32_t* paddingsData, const Shape& paddingsShape,
303                 Shape* output);
304 
305 bool batchToSpacePrepare(const Shape& input, const int32_t* blockSizeData,
306                          const Shape& blockSizeShape, Shape* output);
307 
308 bool spaceToBatchPrepare(const Shape& input, const int32_t* blockSizeData,
309                          const Shape& blockSizeShape, const int32_t* paddingsData,
310                          const Shape& paddingsShape, Shape* output);
311 
312 bool meanPrepare(const Shape& input, const int32_t* axisData, const Shape& axisShape, bool keepDims,
313                  Shape* output);
314 
315 bool argMinMaxPrepare(const Shape& input, int32_t axis, Shape* output);
316 
317 bool splitPrepare(const Shape& input, int32_t axis, int32_t numOutputs, std::vector<Shape>* output);
318 
319 bool groupedConvPrepare(const Shape& input, const Shape& filter, const Shape& bias,
320                         int32_t padding_left, int32_t padding_right, int32_t padding_top,
321                         int32_t padding_bottom, int32_t stride_width, int32_t stride_height,
322                         int32_t numGroups, Shape* output);
323 
324 // Transposes the first two dimensions.
325 template <typename T>
transposeFirstTwoDimensions(const T * buffer,const Shape & shape,T * transposedBuffer)326 inline bool transposeFirstTwoDimensions(const T* buffer, const Shape& shape, T* transposedBuffer) {
327     const int numDims = getNumberOfDimensions(shape);
328     NN_RET_CHECK(numDims >= 2);
329     const int firstDim = getSizeOfDimension(shape, 0);
330     const int secondDim = getSizeOfDimension(shape, 1);
331     int blockSize = 1;
332     for (int i = 2; i < numDims; ++i) {
333         blockSize *= getSizeOfDimension(shape, i);
334     }
335 
336     for (int i = 0; i < firstDim; ++i) {
337         for (int j = 0; j < secondDim; ++j) {
338             for (int k = 0; k < blockSize; ++k) {
339                 transposedBuffer[(j * firstDim + i) * blockSize + k] =
340                         buffer[(i * secondDim + j) * blockSize + k];
341             }
342         }
343     }
344     return true;
345 }
346 
transposeFirstTwoDimensions(const Shape & shape,Shape * transposedShape)347 inline bool transposeFirstTwoDimensions(const Shape& shape, Shape* transposedShape) {
348     NN_RET_CHECK(getNumberOfDimensions(shape) >= 2);
349     *transposedShape = shape;
350     transposedShape->dimensions[0] = shape.dimensions[1];
351     transposedShape->dimensions[1] = shape.dimensions[0];
352     return true;
353 }
354 
355 // Given two 3-dimensional tensors, merge them into one 3-dimensional tensor
356 // at the third dimension. The merged tensor's third dimension size will be
357 // sum of that of the two inputs.
358 template <typename T>
mergeThirdDimension(const T * bufferA,const std::vector<uint32_t> & dimsA,const T * bufferB,const std::vector<uint32_t> & dimsB,T * merged)359 inline bool mergeThirdDimension(const T* bufferA, const std::vector<uint32_t>& dimsA,
360                                 const T* bufferB, const std::vector<uint32_t>& dimsB, T* merged) {
361     NN_RET_CHECK_EQ(dimsA.size(), 3u);
362     NN_RET_CHECK_EQ(dimsB.size(), 3u);
363 
364     NN_RET_CHECK_EQ(dimsA[0], dimsB[0]);
365     NN_RET_CHECK_EQ(dimsA[1], dimsB[1]);
366 
367     for (unsigned int i = 0; i < dimsA[0]; ++i) {
368         for (unsigned int j = 0; j < dimsA[1]; ++j) {
369             for (unsigned int k = 0; k < dimsA[2]; ++k) {
370                 merged[(i * dimsA[1] + j) * (dimsA[2] + dimsB[2]) + k] =
371                         bufferA[(i * dimsA[1] + j) * dimsA[2] + k];
372             }
373             for (unsigned int k = 0; k < dimsB[2]; ++k) {
374                 merged[(i * dimsA[1] + j) * (dimsA[2] + dimsB[2]) + dimsA[2] + k] =
375                         bufferB[(i * dimsB[1] + j) * dimsB[2] + k];
376             }
377         }
378     }
379     return true;
380 }
381 
382 template <typename T>
383 inline T saturateCast(int32_t val);
384 
385 template <>
386 inline uint8_t saturateCast<uint8_t>(int32_t val) {
387     return static_cast<int8_t>(std::max(0, std::min(255, val)));
388 }
389 
390 template <>
391 inline int8_t saturateCast<int8_t>(int32_t val) {
392     return static_cast<int8_t>(std::max(-128, std::min(127, val)));
393 }
394 
395 }  // namespace nn
396 }  // namespace android
397 
398 #endif  // ANDROID_FRAMEWORKS_ML_NN_COMMON_OPERATIONS_UTILS_H
399