1 /*
2 * Copyright (c) 2017-2020 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24 #include "arm_compute/runtime/CL/functions/CLGEMMLowpOutputStage.h"
25
26 #include "arm_compute/core/CL/ICLTensor.h"
27 #include "arm_compute/core/Types.h"
28 #include "src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel.h"
29 #include "src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel.h"
30 #include "src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleKernel.h"
31 #include "support/MemorySupport.h"
32
33 #include <algorithm>
34
35 namespace arm_compute
36 {
configure(const ICLTensor * input,const ICLTensor * bias,ICLTensor * output,int result_fixedpoint_multiplier,int result_shift,int result_offset_after_shift,int min,int max)37 void CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint::configure(const ICLTensor *input, const ICLTensor *bias, ICLTensor *output,
38 int result_fixedpoint_multiplier, int result_shift, int result_offset_after_shift,
39 int min, int max)
40 {
41 configure(CLKernelLibrary::get().get_compile_context(), input, bias, output, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max);
42 }
43
configure(const CLCompileContext & compile_context,const ICLTensor * input,const ICLTensor * bias,ICLTensor * output,int result_fixedpoint_multiplier,int result_shift,int result_offset_after_shift,int min,int max)44 void CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output,
45 int result_fixedpoint_multiplier, int result_shift, int result_offset_after_shift,
46 int min, int max)
47 {
48 GEMMLowpOutputStageInfo info{};
49 info.gemmlowp_multiplier = result_fixedpoint_multiplier;
50 info.gemmlowp_shift = result_shift;
51 info.gemmlowp_offset = result_offset_after_shift;
52 info.gemmlowp_min_bound = min;
53 info.gemmlowp_max_bound = max;
54 info.output_data_type = DataType::QASYMM8;
55 auto k = arm_compute::support::cpp14::make_unique<CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel>();
56 k->configure(compile_context, input, bias, output, &info);
57 _kernel = std::move(k);
58 }
59
validate(const ITensorInfo * input,const ITensorInfo * bias,const ITensorInfo * output,int min,int max)60 Status CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint::validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output,
61 int min, int max)
62 {
63 GEMMLowpOutputStageInfo info{};
64 info.gemmlowp_min_bound = min;
65 info.gemmlowp_max_bound = max;
66 info.output_data_type = DataType::QASYMM8;
67 return CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel::validate(input, bias, output, &info);
68 }
69
configure(const ICLTensor * input,const ICLTensor * bias,ICLTensor * output,int result_fixedpoint_multiplier,int result_shift,int result_offset_after_shift,int min,int max)70 void CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPoint::configure(const ICLTensor *input, const ICLTensor *bias, ICLTensor *output,
71 int result_fixedpoint_multiplier, int result_shift, int result_offset_after_shift,
72 int min, int max)
73 {
74 configure(CLKernelLibrary::get().get_compile_context(), input, bias, output, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max);
75 }
76
configure(const CLCompileContext & compile_context,const ICLTensor * input,const ICLTensor * bias,ICLTensor * output,int result_fixedpoint_multiplier,int result_shift,int result_offset_after_shift,int min,int max)77 void CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPoint::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output,
78 int result_fixedpoint_multiplier, int result_shift, int result_offset_after_shift,
79 int min, int max)
80 {
81 GEMMLowpOutputStageInfo info{};
82 info.gemmlowp_multiplier = result_fixedpoint_multiplier;
83 info.gemmlowp_shift = result_shift;
84 info.gemmlowp_offset = result_offset_after_shift;
85 info.gemmlowp_min_bound = min;
86 info.gemmlowp_max_bound = max;
87 info.output_data_type = DataType::QASYMM8_SIGNED;
88 auto k = arm_compute::support::cpp14::make_unique<CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel>();
89 k->configure(compile_context, input, bias, output, &info);
90 _kernel = std::move(k);
91 }
92
validate(const ITensorInfo * input,const ITensorInfo * bias,const ITensorInfo * output,int min,int max)93 Status CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPoint::validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output,
94 int min, int max)
95 {
96 GEMMLowpOutputStageInfo info{};
97 info.gemmlowp_min_bound = min;
98 info.gemmlowp_max_bound = max;
99 info.output_data_type = DataType::QASYMM8_SIGNED;
100 return CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel::validate(input, bias, output, &info);
101 }
102
configure(const ICLTensor * input,const ICLTensor * bias,ICLTensor * output,int result_fixedpoint_multiplier,int result_shift,int min,int max)103 void CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint::configure(const ICLTensor *input, const ICLTensor *bias, ICLTensor *output,
104 int result_fixedpoint_multiplier, int result_shift,
105 int min, int max)
106 {
107 configure(CLKernelLibrary::get().get_compile_context(), input, bias, output, result_fixedpoint_multiplier, result_shift, min, max);
108 }
109
configure(const CLCompileContext & compile_context,const ICLTensor * input,const ICLTensor * bias,ICLTensor * output,int result_fixedpoint_multiplier,int result_shift,int min,int max)110 void CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output,
111 int result_fixedpoint_multiplier, int result_shift,
112 int min, int max)
113 {
114 GEMMLowpOutputStageInfo info{};
115 info.gemmlowp_multiplier = result_fixedpoint_multiplier;
116 info.gemmlowp_shift = result_shift;
117 info.gemmlowp_min_bound = min;
118 info.gemmlowp_max_bound = max;
119 info.output_data_type = DataType::QSYMM16;
120 auto k = arm_compute::support::cpp14::make_unique<CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel>();
121 k->configure(compile_context, input, bias, output, &info);
122 _kernel = std::move(k);
123 }
124
validate(const ITensorInfo * input,const ITensorInfo * bias,const ITensorInfo * output,int min,int max)125 Status CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint::validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output,
126 int min, int max)
127 {
128 GEMMLowpOutputStageInfo info{};
129 info.gemmlowp_min_bound = min;
130 info.gemmlowp_max_bound = max;
131 info.output_data_type = DataType::QSYMM16;
132 return CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel::validate(input, bias, output, &info);
133 }
134
configure(const ICLTensor * input,const ICLTensor * bias,ICLTensor * output,const GEMMLowpOutputStageInfo & info)135 void CLGEMMLowpOutputStage::configure(const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const GEMMLowpOutputStageInfo &info)
136 {
137 configure(CLKernelLibrary::get().get_compile_context(), input, bias, output, info);
138 }
139
configure(const CLCompileContext & compile_context,const ICLTensor * input,const ICLTensor * bias,ICLTensor * output,const GEMMLowpOutputStageInfo & info)140 void CLGEMMLowpOutputStage::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const GEMMLowpOutputStageInfo &info)
141 {
142 ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
143
144 switch(info.type)
145 {
146 case GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT:
147 {
148 auto k = arm_compute::support::cpp14::make_unique<CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel>();
149 k->configure(compile_context, input, bias, output, &info);
150 _kernel = std::move(k);
151 break;
152 }
153 case GEMMLowpOutputStageType::QUANTIZE_DOWN:
154 {
155 auto k = arm_compute::support::cpp14::make_unique<CLGEMMLowpQuantizeDownInt32ScaleKernel>();
156 k->configure(compile_context, input, bias, output, &info);
157 _kernel = std::move(k);
158 break;
159 }
160 case GEMMLowpOutputStageType::QUANTIZE_DOWN_FLOAT:
161 {
162 auto k = arm_compute::support::cpp14::make_unique<CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel>();
163 k->configure(compile_context, input, bias, output, &info);
164 _kernel = std::move(k);
165 break;
166 }
167 default:
168 ARM_COMPUTE_ERROR("Unsupported GEMMLowpOutputStage type.");
169 }
170 }
171
validate(const ITensorInfo * input,const ITensorInfo * bias,const ITensorInfo * output,const GEMMLowpOutputStageInfo & info)172 Status CLGEMMLowpOutputStage::validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const GEMMLowpOutputStageInfo &info)
173 {
174 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(output);
175 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::QSYMM16);
176
177 switch(info.type)
178 {
179 case GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT:
180 return CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel::validate(input, bias, output, &info);
181 case GEMMLowpOutputStageType::QUANTIZE_DOWN:
182 return CLGEMMLowpQuantizeDownInt32ScaleKernel::validate(input, bias, output, &info);
183 case GEMMLowpOutputStageType::QUANTIZE_DOWN_FLOAT:
184 return CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel::validate(input, bias, output, &info);
185 default:
186 return ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Unsupported GEMMLowpOutputStage type.");
187 }
188 }
189 } // namespace arm_compute
190