• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2017-2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h"
25 
26 #include "arm_compute/core/CL/ICLTensor.h"
27 #include "arm_compute/core/Error.h"
28 #include "arm_compute/core/Helpers.h"
29 #include "arm_compute/core/KernelDescriptors.h"
30 #include "arm_compute/core/TensorInfo.h"
31 #include "arm_compute/core/Types.h"
32 #include "arm_compute/core/Validate.h"
33 #include "arm_compute/core/utils/misc/ShapeCalculator.h"
34 #include "arm_compute/core/utils/quantization/AsymmHelpers.h"
35 #include "arm_compute/runtime/CL/CLScheduler.h"
36 #include "src/core/CL/gemm/native/CLGEMMNativeKernelConfiguration.h"
37 #include "src/core/CL/gemm/reshaped_only_rhs/CLGEMMReshapedOnlyRHSKernelConfiguration.h"
38 #include "src/core/CL/kernels/CLDepthConvertLayerKernel.h"
39 #include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.h"
40 #include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.h"
41 #include "src/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.h"
42 #include "src/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.h"
43 #include "src/core/CL/kernels/CLGEMMLowpReductionKernel.h"
44 #include "src/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.h"
45 #include "src/core/helpers/AutoConfiguration.h"
46 #include "src/runtime/CL/gemm/CLGEMMKernelSelection.h"
47 #include "support/MemorySupport.h"
48 
49 namespace arm_compute
50 {
51 using namespace arm_compute::misc::shape_calculator;
52 using namespace arm_compute::cl_gemm;
53 
54 namespace
55 {
is_gemm_reshaped(unsigned int m,unsigned int n,unsigned int k,DataType data_type,bool reshape_b_only_on_first_run)56 inline bool is_gemm_reshaped(unsigned int m, unsigned int n, unsigned int k, DataType data_type, bool reshape_b_only_on_first_run)
57 {
58     std::unique_ptr<ICLGEMMKernelSelection> gemm_kernel = CLGEMMKernelSelectionFactory::create(CLScheduler::get().target());
59     ARM_COMPUTE_ERROR_ON_NULLPTR(gemm_kernel.get());
60 
61     CLGEMMKernelSelectionParams params;
62     params.m               = m;
63     params.n               = n;
64     params.k               = k;
65     params.is_rhs_constant = reshape_b_only_on_first_run;
66     params.data_type       = data_type;
67 
68     switch(gemm_kernel->select_kernel(params))
69     {
70         case CLGEMMKernelType::NATIVE:
71             return false;
72         case CLGEMMKernelType::RESHAPED_ONLY_RHS:
73             return true;
74         default:
75             ARM_COMPUTE_ERROR("Not supported gemmlowp kernel!");
76     }
77 }
78 } // namespace
79 
CLGEMMLowpMatrixMultiplyCore(std::shared_ptr<IMemoryManager> memory_manager)80 CLGEMMLowpMatrixMultiplyCore::CLGEMMLowpMatrixMultiplyCore(std::shared_ptr<IMemoryManager> memory_manager)
81     : _memory_group(std::move(memory_manager)),
82       _weights_to_qasymm8(support::cpp14::make_unique<CLDepthConvertLayerKernel>()),
83       _mm_native_kernel(support::cpp14::make_unique<CLGEMMLowpMatrixMultiplyNativeKernel>()),
84       _mm_reshaped_only_rhs_kernel(support::cpp14::make_unique<CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel>()),
85       _mtx_b_reshape_kernel(support::cpp14::make_unique<CLGEMMReshapeRHSMatrixKernel>()),
86       _mtx_a_reduction_kernel(support::cpp14::make_unique<CLGEMMLowpMatrixAReductionKernel>()),
87       _mtx_b_reduction_kernel(support::cpp14::make_unique<CLGEMMLowpMatrixBReductionKernel>()),
88       _offset_contribution_kernel(support::cpp14::make_unique<CLGEMMLowpOffsetContributionKernel>()),
89       _offset_contribution_output_stage_kernel(support::cpp14::make_unique<CLGEMMLowpOffsetContributionOutputStageKernel>()),
90       _qasymm8_weights(),
91       _vector_sum_col(),
92       _vector_sum_row(),
93       _tmp_b(),
94       _mm_result_s32(),
95       _gemm_output_stage_multipliers(),
96       _gemm_output_stage_shifts(),
97       _matrix_a(nullptr),
98       _original_b(nullptr),
99       _output(nullptr),
100       _a_offset(0),
101       _b_offset(0),
102       _is_gemm_reshaped(true),
103       _reshape_b_only_on_first_run(false),
104       _is_prepared(false),
105       _run_output_stage(false),
106       _convert_to_qasymm8(false),
107       _run_offset_contribution(false)
108 {
109 }
110 
111 CLGEMMLowpMatrixMultiplyCore::~CLGEMMLowpMatrixMultiplyCore() = default;
112 
configure(const ICLTensor * a,const ICLTensor * b,const ICLTensor * c,ICLTensor * output,const GEMMInfo & gemm_info)113 void CLGEMMLowpMatrixMultiplyCore::configure(const ICLTensor *a, const ICLTensor *b, const ICLTensor *c, ICLTensor *output, const GEMMInfo &gemm_info)
114 {
115     configure(CLKernelLibrary::get().get_compile_context(), a, b, c, output, gemm_info);
116 }
117 
configure(const CLCompileContext & compile_context,const ICLTensor * a,const ICLTensor * b,const ICLTensor * c,ICLTensor * output,const GEMMInfo & gemm_info)118 void CLGEMMLowpMatrixMultiplyCore::configure(const CLCompileContext &compile_context, const ICLTensor *a, const ICLTensor *b, const ICLTensor *c, ICLTensor *output, const GEMMInfo &gemm_info)
119 {
120     ARM_COMPUTE_ERROR_ON_NULLPTR(a, b, output);
121     ARM_COMPUTE_ERROR_THROW_ON(CLGEMMLowpMatrixMultiplyCore::validate(a->info(), b->info(), c != nullptr ? c->info() : nullptr, output->info(), gemm_info));
122 
123     _is_prepared                 = false;
124     _original_b                  = b;
125     _reshape_b_only_on_first_run = gemm_info.reshape_b_only_on_first_run();
126     _a_offset                    = a->info()->quantization_info().uniform().offset;
127     _matrix_a                    = a;
128     _output                      = output;
129 
130     _convert_to_qasymm8 = is_data_type_quantized_per_channel(b->info()->data_type()) && is_data_type_quantized_symmetric(b->info()->data_type())
131                           && a->info()->data_type() == DataType::QASYMM8;
132     _b_offset = _convert_to_qasymm8 ? -128 : b->info()->quantization_info().uniform().offset;
133 
134     // Get the GPU target
135     const GPUTarget gpu_target = CLScheduler::get().target();
136 
137     // Set the target for the kernels
138     _mm_native_kernel->set_target(gpu_target);
139     _mm_reshaped_only_rhs_kernel->set_target(gpu_target);
140 
141     GEMMRHSMatrixInfo rhs_info;
142     GEMMLHSMatrixInfo lhs_info;
143 
144     // Arguments used by GEMMReshapeInfo
145     // If we pass the matrix A and matrix B reshaped to CLGEMMMatrixMultiplyKernel, we need to pass m, n, k, mult_transpose1xW_width and mult_interleave4x4_height to CLGEMMReshapeInfo
146     // in order to know how the matrices have been reshaped
147     bool               reinterpret_input_as_3d = gemm_info.reinterpret_input_as_3d();
148     const unsigned int m                       = reinterpret_input_as_3d ? (a->info()->dimension(1) * a->info()->dimension(2)) : a->info()->dimension(1);
149     const unsigned int n                       = b->info()->dimension(0);
150     const unsigned int k                       = a->info()->dimension(0);
151     const unsigned int batch_size              = reinterpret_input_as_3d ? a->info()->dimension(3) : a->info()->dimension(2);
152     const int          depth_output_gemm3d     = gemm_info.depth_output_gemm3d();
153 
154     // Check if we need to reshape the matrix A and matrix B
155     _is_gemm_reshaped = is_gemm_reshaped(m, n, k, a->info()->data_type(), _reshape_b_only_on_first_run);
156 
157     if(_convert_to_qasymm8)
158     {
159         // Set data type for converted weights
160         TensorInfo weights_info(*b->info());
161         weights_info.set_data_type(DataType::QASYMM8);
162         _qasymm8_weights.allocator()->init(weights_info);
163         _weights_to_qasymm8->configure(compile_context, b, &_qasymm8_weights, ConvertPolicy::WRAP, 0);
164     }
165 
166     const ICLTensor *matrix_b = _convert_to_qasymm8 ? &_qasymm8_weights : b;
167     if(_is_gemm_reshaped)
168     {
169         matrix_b = &_tmp_b;
170 
171         if(!_reshape_b_only_on_first_run)
172         {
173             _memory_group.manage(&_tmp_b);
174         }
175 
176         // Pick up the GEMM configuration
177         // Datatype is DataType::QASYMM8 or DataType::QASYMM8_SIGNED doesn't matter, since it only affect the shape configuration
178         std::tie(lhs_info, rhs_info) = CLGEMMReshapedOnlyRHSKernelConfigurationFactory::create(gpu_target)->configure(m, n, k, batch_size, DataType::QASYMM8);
179 
180         // Configure reshape RHS kernel
181         _mtx_b_reshape_kernel->configure(compile_context, _convert_to_qasymm8 ? &_qasymm8_weights : b, &_tmp_b, rhs_info);
182     }
183 
184     // Using default reduction info
185     const GEMMLowpReductionKernelInfo reduction_info {};
186 
187     // Initialize matrix B reduction kernel only if _a_offset is not equal to 0
188     if(_a_offset != 0)
189     {
190         TensorInfo info_vector_sum_col(compute_reductionA_shape(*b->info()), 1, DataType::S32);
191         _vector_sum_col.allocator()->init(info_vector_sum_col);
192         if(!_reshape_b_only_on_first_run)
193         {
194             _memory_group.manage(&_vector_sum_col);
195         }
196 
197         // Configure Matrix B reduction kernel
198         _mtx_b_reduction_kernel->configure(compile_context, _convert_to_qasymm8 ? &_qasymm8_weights : b, &_vector_sum_col, reduction_info);
199     }
200 
201     // Initialize Matrix A reduction kernel only if _b_offset is not equal to 0
202     if(_b_offset != 0)
203     {
204         TensorInfo info_vector_sum_row(compute_reductionB_shape(*a->info()), 1, DataType::S32);
205         _vector_sum_row.allocator()->init(info_vector_sum_row);
206         _memory_group.manage(&_vector_sum_row);
207 
208         // Configure matrix A reduction kernel
209         _mtx_a_reduction_kernel->configure(compile_context, a, &_vector_sum_row, reduction_info);
210     }
211 
212     GEMMKernelInfo gemm_kernel_info;
213     gemm_kernel_info.m                       = m;
214     gemm_kernel_info.n                       = n;
215     gemm_kernel_info.k                       = k;
216     gemm_kernel_info.depth_output_gemm3d     = depth_output_gemm3d;
217     gemm_kernel_info.reinterpret_input_as_3d = reinterpret_input_as_3d;
218     gemm_kernel_info.lhs_info                = lhs_info;
219     gemm_kernel_info.rhs_info                = rhs_info;
220     gemm_kernel_info.a_offset                = _a_offset;
221     gemm_kernel_info.b_offset                = _b_offset;
222     // If GEMMLowpOutputStage != NONE, fuse the offset contribution with the output stage
223     if(gemm_info.gemmlowp_output_stage().type != GEMMLowpOutputStageType::NONE)
224     {
225         // Configure offset contribution kernel
226         const size_t num_filters = (gemm_info.gemmlowp_output_stage().is_quantized_per_channel) ? gemm_info.gemmlowp_output_stage().gemmlowp_multipliers.size() : 1;
227 
228         _gemm_output_stage_multipliers.allocator()->init(TensorInfo(TensorShape(num_filters), 1, DataType::S32));
229         _gemm_output_stage_shifts.allocator()->init(TensorInfo(TensorShape(num_filters), 1, DataType::S32));
230 
231         GEMMLowpOutputStageInfo gemmlowp_output_stage = gemm_info.gemmlowp_output_stage();
232         gemmlowp_output_stage.output_data_type        = _matrix_a->info()->data_type();
233 
234         gemm_kernel_info.output_stage = gemmlowp_output_stage;
235 
236         if(_is_gemm_reshaped && gemmlowp_output_stage.type == GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT)
237         {
238             // Configure and tune matrix multiply kernel with fused output stage
239             _mm_reshaped_only_rhs_kernel->configure(compile_context, _matrix_a, matrix_b, output, gemm_kernel_info, _a_offset == 0 ? nullptr : &_vector_sum_col,
240                                                     _b_offset == 0 ? nullptr : &_vector_sum_row, c, &_gemm_output_stage_multipliers, &_gemm_output_stage_shifts);
241         }
242         else
243         {
244             _run_output_stage = true;
245 
246             _memory_group.manage(&_mm_result_s32);
247 
248             if(_is_gemm_reshaped)
249             {
250                 _mm_reshaped_only_rhs_kernel->configure(compile_context, _matrix_a, matrix_b, &_mm_result_s32, gemm_kernel_info);
251             }
252             else
253             {
254                 // Pick up the GEMM configuration
255                 std::tie(lhs_info, rhs_info) = CLGEMMNativeKernelConfigurationFactory::create(gpu_target)->configure(m, n, k, batch_size, DataType::QASYMM8);
256 
257                 // Configure matrix multiply kernel
258                 _mm_native_kernel->configure(compile_context, _matrix_a, matrix_b, &_mm_result_s32, lhs_info, rhs_info, GEMMReshapeInfo(m, n, k, 1, 1, depth_output_gemm3d, reinterpret_input_as_3d));
259 
260                 _offset_contribution_output_stage_kernel->configure(compile_context, &_mm_result_s32, _a_offset == 0 ? nullptr : &_vector_sum_col, _b_offset == 0 ? nullptr : &_vector_sum_row, c, output,
261                                                                     a->info()->dimension(0),
262                                                                     _a_offset, _b_offset, gemmlowp_output_stage, &_gemm_output_stage_multipliers, &_gemm_output_stage_shifts);
263                 _mm_result_s32.allocator()->allocate();
264             }
265         }
266 
267         _gemm_output_stage_multipliers.allocator()->allocate();
268         _gemm_output_stage_shifts.allocator()->allocate();
269         // Compute GEMM output multipliers and shifts for output stage
270         _gemm_output_stage_multipliers.map();
271         _gemm_output_stage_shifts.map();
272         std::memcpy(_gemm_output_stage_multipliers.ptr_to_element(Coordinates(0)), gemm_info.gemmlowp_output_stage().gemmlowp_multipliers.data(), num_filters * sizeof(int32_t));
273         std::memcpy(_gemm_output_stage_shifts.ptr_to_element(Coordinates(0)), gemm_info.gemmlowp_output_stage().gemmlowp_shifts.data(), num_filters * sizeof(int32_t));
274         _gemm_output_stage_multipliers.unmap();
275         _gemm_output_stage_shifts.unmap();
276     }
277     else
278     {
279         _run_offset_contribution = true;
280         if(_is_gemm_reshaped)
281         {
282             // Configure and tune matrix multiply kernel
283             _mm_reshaped_only_rhs_kernel->configure(compile_context, _matrix_a, matrix_b, output, gemm_kernel_info);
284         }
285         else
286         {
287             // Pick up the GEMM configuration
288             std::tie(lhs_info, rhs_info) = CLGEMMNativeKernelConfigurationFactory::create(gpu_target)->configure(m, n, k, batch_size, DataType::QASYMM8);
289 
290             // Configure matrix multiply kernel
291             _mm_native_kernel->configure(compile_context, _matrix_a, matrix_b, output, lhs_info, rhs_info, GEMMReshapeInfo(m, n, k, 1, 1, depth_output_gemm3d, reinterpret_input_as_3d));
292         }
293 
294         // Configure offset contribution kernel
295         _offset_contribution_kernel->configure(compile_context, output, _a_offset == 0 ? nullptr : &_vector_sum_col, _b_offset == 0 ? nullptr : &_vector_sum_row, c, a->info()->dimension(0), _a_offset,
296                                                _b_offset);
297     }
298 
299     // Allocate tensors
300     if(_is_gemm_reshaped)
301     {
302         if(!_reshape_b_only_on_first_run)
303         {
304             _tmp_b.allocator()->allocate();
305         }
306     }
307 
308     if(_a_offset != 0 && !_reshape_b_only_on_first_run)
309     {
310         _vector_sum_col.allocator()->allocate();
311     }
312 
313     if(_b_offset != 0)
314     {
315         _vector_sum_row.allocator()->allocate();
316     }
317 }
318 
validate(const ITensorInfo * a,const ITensorInfo * b,const ITensorInfo * c,const ITensorInfo * output,const GEMMInfo & gemm_info)319 Status CLGEMMLowpMatrixMultiplyCore::validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *output, const GEMMInfo &gemm_info)
320 {
321     ARM_COMPUTE_ERROR_ON_NULLPTR(a, b, output);
322     ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(a, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED);
323     ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(b, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::QSYMM8, DataType::QSYMM8_PER_CHANNEL);
324     ARM_COMPUTE_RETURN_ERROR_ON(a->data_type() == DataType::QASYMM8 && b->data_type() == DataType::QASYMM8_SIGNED);
325     ARM_COMPUTE_RETURN_ERROR_ON(a->data_type() == DataType::QASYMM8_SIGNED && b->data_type() == DataType::QASYMM8);
326     ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.is_a_reshaped(), "Matrix A already reshaped is not supported");
327     ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.is_b_reshaped(), "Matrix B already reshaped is not supported");
328 
329     int32_t a_offset = a->quantization_info().uniform().offset;
330     int32_t b_offset = b->quantization_info().uniform().offset;
331 
332     const ITensorInfo *matrix_a_info = a;
333 
334     TensorInfo        tmp_b_info{};
335     GEMMRHSMatrixInfo rhs_info;
336     GEMMLHSMatrixInfo lhs_info;
337 
338     // Get the GPU target
339     const GPUTarget gpu_target = CLScheduler::get().target();
340 
341     bool               reinterpret_input_as_3d = gemm_info.reinterpret_input_as_3d();
342     const unsigned int m                       = reinterpret_input_as_3d ? (a->dimension(1) * a->dimension(2)) : a->dimension(1);
343     const unsigned int n                       = b->dimension(0);
344     const unsigned int k                       = a->dimension(0);
345     const unsigned int batch_size              = reinterpret_input_as_3d ? a->dimension(3) : a->dimension(2);
346     const int          depth_output_gemm3d     = gemm_info.depth_output_gemm3d();
347 
348     bool reshape_matrix_b = is_gemm_reshaped(m, n, k, a->data_type(), gemm_info.reshape_b_only_on_first_run());
349 
350     const GEMMReshapeInfo reshape_info = GEMMReshapeInfo(m, n, k, 1, 1, depth_output_gemm3d, reinterpret_input_as_3d);
351 
352     bool convert_to_qasymm8 = is_data_type_quantized_per_channel(b->data_type()) && is_data_type_quantized_symmetric(b->data_type())
353                               && is_data_type_quantized_asymmetric(a->data_type());
354     TensorInfo weights_info(*b);
355     if(convert_to_qasymm8)
356     {
357         b_offset = -128;
358         weights_info.set_data_type(DataType::QASYMM8);
359         ARM_COMPUTE_RETURN_ON_ERROR(CLDepthConvertLayerKernel::validate(b, &weights_info, ConvertPolicy::WRAP, 0));
360     }
361     const ITensorInfo *matrix_b_info = &weights_info;
362     if(reshape_matrix_b)
363     {
364         matrix_b_info = &tmp_b_info;
365 
366         // Pick up the GEMM configuration
367         std::tie(lhs_info, rhs_info) = CLGEMMReshapedOnlyRHSKernelConfigurationFactory::create(gpu_target)->configure(m, n, k, batch_size, DataType::QASYMM8);
368 
369         // Validate reshape RHS kernel
370         auto_init_if_empty(tmp_b_info, weights_info.clone()->set_tensor_shape(compute_rhs_reshaped_shape(weights_info, rhs_info)));
371         ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMReshapeRHSMatrixKernel::validate(&weights_info, &tmp_b_info, rhs_info));
372     }
373 
374     TensorInfo info_vector_sum_col{};
375     TensorInfo info_vector_sum_row{};
376 
377     const GEMMLowpReductionKernelInfo reduction_info;
378     // Validate matrix B reduction kernel only if _a_offset is not equal to 0
379     if(a_offset != 0)
380     {
381         info_vector_sum_col = TensorInfo(compute_reductionA_shape(weights_info), 1, DataType::S32);
382 
383         // Configure Matrix B reduction kernel
384         ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixBReductionKernel::validate(&weights_info, &info_vector_sum_col, reduction_info));
385     }
386 
387     // Validate Matrix A reduction kernel only if _b_offset is not equal to 0
388     if(b_offset != 0)
389     {
390         info_vector_sum_row = TensorInfo(compute_reductionB_shape(*a), 1, DataType::S32);
391 
392         // Configure matrix A reduction kernel
393         ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixAReductionKernel::validate(a, &info_vector_sum_row, reduction_info));
394     }
395 
396     GEMMKernelInfo gemm_kernel_info;
397     gemm_kernel_info.m                       = m;
398     gemm_kernel_info.n                       = n;
399     gemm_kernel_info.k                       = k;
400     gemm_kernel_info.depth_output_gemm3d     = depth_output_gemm3d;
401     gemm_kernel_info.reinterpret_input_as_3d = reinterpret_input_as_3d;
402     gemm_kernel_info.lhs_info                = lhs_info;
403     gemm_kernel_info.rhs_info                = rhs_info;
404     gemm_kernel_info.a_offset                = a_offset;
405     gemm_kernel_info.b_offset                = b_offset;
406     if(gemm_info.gemmlowp_output_stage().type != GEMMLowpOutputStageType::NONE)
407     {
408         const size_t num_filters = (gemm_info.gemmlowp_output_stage().is_quantized_per_channel) ? gemm_info.gemmlowp_output_stage().gemmlowp_multipliers.size() : 1;
409 
410         const TensorInfo gemm_output_stage_multipliers_shifts_info(TensorInfo(TensorShape(num_filters), 1, DataType::S32));
411 
412         GEMMLowpOutputStageInfo gemmlowp_output_stage = gemm_info.gemmlowp_output_stage();
413         gemmlowp_output_stage.output_data_type        = a->data_type();
414 
415         gemm_kernel_info.output_stage = gemmlowp_output_stage;
416         if(reshape_matrix_b && gemm_info.gemmlowp_output_stage().type == GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT)
417         {
418             ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel::validate(matrix_a_info, matrix_b_info, output, gemm_kernel_info,
419                                                                                                 a_offset == 0 ? nullptr : &info_vector_sum_col,
420                                                                                                 b_offset == 0 ? nullptr : &info_vector_sum_row,
421                                                                                                 c,
422                                                                                                 &gemm_output_stage_multipliers_shifts_info,
423                                                                                                 &gemm_output_stage_multipliers_shifts_info));
424         }
425         else
426         {
427             TensorInfo mm_result_s32_info{};
428 
429             if(reshape_matrix_b)
430             {
431                 // Output tensor auto inizialitation if not yet initialized
432                 auto_init_if_empty(mm_result_s32_info, a->clone()->set_tensor_shape(compute_mm_shape(*matrix_a_info, *matrix_b_info, reshape_info)).set_data_type(DataType::S32));
433 
434                 // Validate matrix multiply
435                 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel::validate(matrix_a_info, matrix_b_info, &mm_result_s32_info, gemm_kernel_info));
436             }
437             else
438             {
439                 // Output tensor auto inizialitation if not yet initialized
440                 auto_init_if_empty(mm_result_s32_info, a->clone()->set_tensor_shape(compute_mm_shape(*matrix_a_info, *matrix_b_info, false, reshape_info)).set_data_type(DataType::S32));
441 
442                 // Pick up the GEMM configuration
443                 std::tie(lhs_info, rhs_info) = CLGEMMNativeKernelConfigurationFactory::create(gpu_target)->configure(m, n, k, batch_size, DataType::QASYMM8);
444 
445                 // Validate matrix multiply
446                 ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyNativeKernel::validate(matrix_a_info, matrix_b_info, &mm_result_s32_info, lhs_info, rhs_info, reshape_info));
447             }
448 
449             // Validate offset contribution kernel
450             ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpOffsetContributionOutputStageKernel::validate(&mm_result_s32_info,
451                                                                                                 a_offset == 0 ? nullptr : &info_vector_sum_col,
452                                                                                                 b_offset == 0 ? nullptr : &info_vector_sum_row,
453                                                                                                 c,
454                                                                                                 output,
455                                                                                                 a_offset, b_offset,
456                                                                                                 gemmlowp_output_stage,
457                                                                                                 &gemm_output_stage_multipliers_shifts_info,
458                                                                                                 &gemm_output_stage_multipliers_shifts_info));
459         }
460     }
461     else
462     {
463         if(reshape_matrix_b)
464         {
465             // Validate matrix multiply
466             ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel::validate(matrix_a_info, matrix_b_info, output, gemm_kernel_info));
467         }
468         else
469         {
470             // Pick up the GEMM configuration
471             std::tie(lhs_info, rhs_info) = CLGEMMNativeKernelConfigurationFactory::create(gpu_target)->configure(m, n, k, batch_size, DataType::QASYMM8);
472 
473             // Validate matrix multiply
474             ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyNativeKernel::validate(matrix_a_info, matrix_b_info, output, lhs_info, rhs_info, reshape_info));
475         }
476 
477         if(output->total_size() != 0)
478         {
479             // Validate offset contribution kernel
480             ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpOffsetContributionKernel::validate(output,
481                                                                                      a_offset == 0 ? nullptr : &info_vector_sum_col,
482                                                                                      b_offset == 0 ? nullptr : &info_vector_sum_row,
483                                                                                      c,
484                                                                                      a_offset, b_offset));
485         }
486     }
487 
488     return Status{};
489 }
490 
run()491 void CLGEMMLowpMatrixMultiplyCore::run()
492 {
493     prepare();
494 
495     MemoryGroupResourceScope scope_mg(_memory_group);
496 
497     if(_is_gemm_reshaped)
498     {
499         if(!_reshape_b_only_on_first_run)
500         {
501             // Run reshape matrix B
502             CLScheduler::get().enqueue(*_mtx_b_reshape_kernel, false);
503         }
504     }
505 
506     // Run matrix B reduction kernel only if _a_offset is not equal to 0
507     if(_a_offset != 0 && !_reshape_b_only_on_first_run)
508     {
509         CLScheduler::get().enqueue(*_mtx_b_reduction_kernel, false);
510     }
511 
512     // Run matrix A reduction kernel only if _b_offset is not equal to 0
513     if(_b_offset != 0)
514     {
515         CLScheduler::get().enqueue(*_mtx_a_reduction_kernel, false);
516     }
517 
518     // Run matrix multiply
519     if(_is_gemm_reshaped)
520     {
521         CLScheduler::get().enqueue(*_mm_reshaped_only_rhs_kernel, false);
522     }
523     else
524     {
525         CLScheduler::get().enqueue(*_mm_native_kernel, false);
526     }
527     if(_run_output_stage)
528     {
529         // Run offset contribution/output stage kernel
530         CLScheduler::get().enqueue(*_offset_contribution_output_stage_kernel, true);
531     }
532     if(_run_offset_contribution)
533     {
534         // Run offset contribution kernel
535         CLScheduler::get().enqueue(*_offset_contribution_kernel, true);
536     }
537 }
538 
prepare()539 void CLGEMMLowpMatrixMultiplyCore::prepare()
540 {
541     if(!_is_prepared)
542     {
543         if(_convert_to_qasymm8)
544         {
545             _qasymm8_weights.allocator()->allocate();
546             CLScheduler::get().enqueue(*_weights_to_qasymm8, false);
547         }
548 
549         if(_is_gemm_reshaped && _reshape_b_only_on_first_run)
550         {
551             ARM_COMPUTE_ERROR_ON(!_original_b->is_used());
552 
553             // Run reshape kernel and mark original weights tensor as unused
554             _tmp_b.allocator()->allocate();
555             CLScheduler::get().enqueue(*_mtx_b_reshape_kernel, false);
556             _original_b->mark_as_unused();
557         }
558 
559         // Run matrix B reduction kernel only if _a_offset is not equal to 0
560         if(_a_offset != 0 && _reshape_b_only_on_first_run)
561         {
562             _vector_sum_col.allocator()->allocate();
563             CLScheduler::get().enqueue(*_mtx_b_reduction_kernel, false);
564         }
565 
566         CLScheduler::get().queue().finish();
567         _is_prepared = true;
568     }
569 }
570 } // namespace arm_compute
571