• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2019 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "OperationsUtils.h"
18 #define LOG_TAG "Operations"
19 
20 #include "HalInterfaces.h"
21 #include "IndexedShapeWrapper.h"
22 #include "OperationResolver.h"
23 #include "Tracing.h"
24 
25 #include <algorithm>
26 #include <cmath>
27 
28 namespace android {
29 namespace nn {
30 namespace quantize {
31 
32 constexpr uint32_t kNumInputs = 1;
33 constexpr uint32_t kInputTensor = 0;
34 
35 constexpr uint32_t kNumOutputs = 1;
36 constexpr uint32_t kOutputTensor = 0;
37 
38 namespace {
39 
40 using namespace hal;
41 
42 template <typename T>
quantizeToQuant8(const T * inputData,uint8_t * outputData,const Shape & outputShape)43 bool quantizeToQuant8(const T* inputData, uint8_t* outputData, const Shape& outputShape) {
44     NNTRACE_COMP("quantizeToQuant8");
45     uint32_t size = getNumberOfElements(outputShape);
46     for (uint32_t i = 0; i < size; ++i) {
47         outputData[i] = static_cast<uint8_t>(std::max<float>(
48                 0.0f, std::min<float>(255.0f, outputShape.offset + std::round(inputData[i] /
49                                                                               outputShape.scale))));
50     }
51     return true;
52 }
53 
54 template <typename T>
quantizeToQuant8Signed(const T * inputData,int8_t * outputData,const Shape & outputShape)55 bool quantizeToQuant8Signed(const T* inputData, int8_t* outputData, const Shape& outputShape) {
56     NNTRACE_COMP("quantizeToQuant8Signed");
57     uint32_t size = getNumberOfElements(outputShape);
58     for (uint32_t i = 0; i < size; ++i) {
59         outputData[i] = static_cast<int8_t>(std::max<float>(
60                 -128.0f,
61                 std::min<float>(127.0f, outputShape.offset +
62                                                 std::round(inputData[i] / outputShape.scale))));
63     }
64     return true;
65 }
66 
67 }  // namespace
68 
validate(const IOperationValidationContext * context)69 bool validate(const IOperationValidationContext* context) {
70     NN_RET_CHECK_EQ(context->getNumInputs(), kNumInputs);
71     NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs);
72 
73     const OperandType inputType = context->getInputType(kInputTensor);
74     const OperandType outputType = context->getOutputType(kOutputTensor);
75 
76     NN_RET_CHECK(inputType == OperandType::TENSOR_FLOAT16 ||
77                  inputType == OperandType::TENSOR_FLOAT32)
78             << "Unsupported input operand type for QUANTIZE op: " << toString(inputType);
79     NN_RET_CHECK(outputType == OperandType::TENSOR_QUANT8_ASYMM ||
80                  outputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED)
81             << "Unsupported output operand type for QUANTIZE op: " << toString(outputType);
82     if (outputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
83         return validateHalVersion(context, HalVersion::V1_3);
84     } else {
85         return validateHalVersion(context, HalVersion::V1_2);
86     }
87 }
88 
prepare(IOperationExecutionContext * context)89 bool prepare(IOperationExecutionContext* context) {
90     const Shape& input = context->getInputShape(kInputTensor);
91     Shape output = context->getOutputShape(kOutputTensor);
92     output.dimensions = input.dimensions;
93     return context->setOutputShape(kOutputTensor, output);
94 }
95 
execute(IOperationExecutionContext * context)96 bool execute(IOperationExecutionContext* context) {
97     // Bypass execution in the case of zero-sized input.
98     if (getNumberOfElements(context->getOutputShape(kOutputTensor)) == 0) return true;
99 
100     const OperandType inputType = context->getInputType(kInputTensor);
101     const OperandType outputType = context->getOutputType(kOutputTensor);
102     if (inputType == OperandType::TENSOR_FLOAT32) {
103         if (outputType == OperandType::TENSOR_QUANT8_ASYMM) {
104             return quantizeToQuant8<float>(context->getInputBuffer<float>(kInputTensor),
105                                            context->getOutputBuffer<uint8_t>(kOutputTensor),
106                                            context->getOutputShape(kOutputTensor));
107         } else if (outputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
108             return quantizeToQuant8Signed<float>(context->getInputBuffer<float>(kInputTensor),
109                                                  context->getOutputBuffer<int8_t>(kOutputTensor),
110                                                  context->getOutputShape(kOutputTensor));
111         }
112     } else if (inputType == OperandType::TENSOR_FLOAT16) {
113         if (outputType == OperandType::TENSOR_QUANT8_ASYMM) {
114             return quantizeToQuant8<_Float16>(context->getInputBuffer<_Float16>(kInputTensor),
115                                               context->getOutputBuffer<uint8_t>(kOutputTensor),
116                                               context->getOutputShape(kOutputTensor));
117         } else if (outputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
118             return quantizeToQuant8Signed<_Float16>(context->getInputBuffer<_Float16>(kInputTensor),
119                                                     context->getOutputBuffer<int8_t>(kOutputTensor),
120                                                     context->getOutputShape(kOutputTensor));
121         }
122     }
123     NN_RET_CHECK_FAIL() << "Unsupported tensor types combination for QUANTIZE op. (input type: "
124                         << toString(inputType)
125                         << " output type: " << toString(context->getOutputType(kOutputTensor))
126                         << ")";
127 }
128 
129 }  // namespace quantize
130 
131 NN_REGISTER_OPERATION(QUANTIZE, "QUANTIZE", quantize::validate, quantize::prepare,
132                       quantize::execute, .allowZeroSizedInput = true);
133 
134 }  // namespace nn
135 }  // namespace android
136