• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2017-2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h"
25 
26 #include "arm_compute/core/PixelValue.h"
27 #include "arm_compute/core/Size2D.h"
28 #include "arm_compute/core/Utils.h"
29 #include "arm_compute/core/Validate.h"
30 #include "arm_compute/core/utils/misc/ShapeCalculator.h"
31 #include "arm_compute/core/utils/quantization/AsymmHelpers.h"
32 #include "arm_compute/runtime/CL/CLScheduler.h"
33 #include "src/core/CL/kernels/CLCol2ImKernel.h"
34 #include "src/core/CL/kernels/CLDepthConvertLayerKernel.h"
35 #include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.h"
36 #include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.h"
37 #include "src/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.h"
38 #include "src/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.h"
39 #include "src/core/CL/kernels/CLGEMMLowpReductionKernel.h"
40 #include "src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h"
41 #include "src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.h"
42 #include "src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedOnlyRHSKernel.h"
43 #include "src/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.h"
44 #include "src/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.h"
45 #include "src/core/CL/kernels/CLIm2ColKernel.h"
46 #include "src/core/CL/kernels/CLWeightsReshapeKernel.h"
47 #include "src/core/helpers/AutoConfiguration.h"
48 #include "support/Cast.h"
49 #include "support/MemorySupport.h"
50 
51 #include <cmath>
52 #include <memory>
53 #include <tuple>
54 
55 namespace arm_compute
56 {
57 using namespace arm_compute::misc::shape_calculator;
58 using namespace arm_compute::utils::cast;
59 
CLConvolutionLayerReshapeWeights()60 CLConvolutionLayerReshapeWeights::CLConvolutionLayerReshapeWeights()
61     : _weights_reshape_kernel(support::cpp14::make_unique<CLWeightsReshapeKernel>())
62 {
63 }
64 
65 CLConvolutionLayerReshapeWeights::~CLConvolutionLayerReshapeWeights() = default;
66 
configure(const ICLTensor * weights,const ICLTensor * biases,ICLTensor * output,unsigned int num_groups)67 void CLConvolutionLayerReshapeWeights::configure(const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, unsigned int num_groups)
68 {
69     configure(CLKernelLibrary::get().get_compile_context(), weights, biases, output, num_groups);
70 }
71 
configure(const CLCompileContext & compile_context,const ICLTensor * weights,const ICLTensor * biases,ICLTensor * output,unsigned int num_groups)72 void CLConvolutionLayerReshapeWeights::configure(const CLCompileContext &compile_context, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, unsigned int num_groups)
73 {
74     // Perform validation step
75     ARM_COMPUTE_ERROR_ON_NULLPTR(weights, output);
76     ARM_COMPUTE_ERROR_THROW_ON(CLConvolutionLayerReshapeWeights::validate(weights->info(),
77                                                                           (biases != nullptr) ? biases->info() : nullptr,
78                                                                           output->info(),
79                                                                           num_groups));
80 
81     const bool       append_biases = (biases != nullptr) && !is_data_type_quantized_asymmetric(weights->info()->data_type());
82     const ICLTensor *biases_to_use = (append_biases) ? biases : nullptr;
83 
84     _weights_reshape_kernel->configure(compile_context, weights, biases_to_use, output, num_groups);
85 
86     output->info()->set_quantization_info(weights->info()->quantization_info());
87 }
88 
validate(const ITensorInfo * weights,const ITensorInfo * biases,const ITensorInfo * output,unsigned int num_groups)89 Status CLConvolutionLayerReshapeWeights::validate(const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, unsigned int num_groups)
90 {
91     ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(weights);
92     ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(weights, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::QSYMM8_PER_CHANNEL, DataType::F16, DataType::F32);
93     ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 4);
94 
95     if(biases != nullptr)
96     {
97         const int idx_kernels = get_data_layout_dimension_index(weights->data_layout(), DataLayoutDimension::BATCHES);
98         ARM_COMPUTE_RETURN_ERROR_ON(is_data_type_quantized(weights->data_type()));
99 
100         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(weights, biases);
101         ARM_COMPUTE_RETURN_ERROR_ON(biases->dimension(0) != weights->dimension(idx_kernels));
102         ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1);
103     }
104 
105     if((output != nullptr) && (output->total_size() != 0))
106     {
107         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(weights, output);
108         CLWeightsReshapeKernel::validate(weights, biases, output, num_groups);
109     }
110 
111     return Status{};
112 }
113 
run()114 void CLConvolutionLayerReshapeWeights::run()
115 {
116     CLScheduler::get().enqueue(*_weights_reshape_kernel);
117 }
118 
CLGEMMConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager,IWeightsManager * weights_manager)119 CLGEMMConvolutionLayer::CLGEMMConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager, IWeightsManager *weights_manager)
120     : _memory_group(memory_manager), _weights_manager(weights_manager), _reshape_weights(), _reshape_weights_managed(), _im2col_kernel(support::cpp14::make_unique<CLIm2ColKernel>()),
121       _mm_gemm(memory_manager, weights_manager), _mm_gemmlowp(memory_manager), _col2im_kernel(support::cpp14::make_unique<CLCol2ImKernel>()), _activationlayer_function(), _original_weights(nullptr),
122       _im2col_output(), _weights_reshaped(), _gemm_output(), _skip_im2col(false), _skip_col2im(false), _is_quantized(false), _fuse_activation(true), _is_prepared(false)
123 {
124 }
125 
126 CLGEMMConvolutionLayer::~CLGEMMConvolutionLayer() = default;
127 
configure_mm(const CLCompileContext & compile_context,const ICLTensor * input,const ICLTensor * weights,const ICLTensor * biases,ICLTensor * output,const GEMMLowpOutputStageInfo & gemmlowp_output_stage,int gemm_3d_depth,const ActivationLayerInfo & act_info)128 void CLGEMMConvolutionLayer::configure_mm(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
129                                           const GEMMLowpOutputStageInfo &gemmlowp_output_stage,
130                                           int gemm_3d_depth, const ActivationLayerInfo &act_info)
131 {
132     ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights);
133     ARM_COMPUTE_ERROR_THROW_ON(validate_mm(input->info(), weights->info(), biases != nullptr ? biases->info() : nullptr, output->info(), gemmlowp_output_stage, gemm_3d_depth, _skip_im2col, act_info));
134 
135     const GEMMInfo &gemm_info = GEMMInfo(false,                 // is_a_reshaped
136                                          false,                 // is_b_reshaped
137                                          true,                  // reshape_b_only_on_first_run
138                                          gemm_3d_depth,         // depth_output_gemm3d
139                                          _skip_im2col,          // reinterpret_input_as_3d
140                                          false,                 // retain_internal_weights
141                                          gemmlowp_output_stage, // gemmlowp_output_stage
142                                          false,                 // fp_mixed_precision
143                                          true,                  // broadcast_bias
144                                          act_info);             // activation_info
145 
146     if(_is_quantized)
147     {
148         // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
149         // Extract and negate input and weights offset
150         const QuantizationInfo input_quantization_info   = input->info()->quantization_info();
151         const QuantizationInfo weights_quantization_info = weights->info()->quantization_info();
152 
153         input->info()->set_quantization_info(QuantizationInfo(input_quantization_info.uniform().scale, -input_quantization_info.uniform().offset));
154         weights->info()->set_quantization_info(QuantizationInfo(weights_quantization_info.uniform().scale, -weights_quantization_info.uniform().offset));
155 
156         _mm_gemmlowp.configure(compile_context, input, weights, biases, output, gemm_info);
157 
158         // Revert back QuantizatioInfo as input and weights could be used in other convolution layers
159         input->info()->set_quantization_info(input_quantization_info);
160         weights->info()->set_quantization_info(weights_quantization_info);
161     }
162     else
163     {
164         // Configure matrix multiply function
165         _mm_gemm.configure(compile_context, input, weights, biases, output, 1.0f, 1.0f, gemm_info);
166     }
167 }
168 
validate_mm(const ITensorInfo * input,const ITensorInfo * weights,const ITensorInfo * biases,const ITensorInfo * output,const GEMMLowpOutputStageInfo & gemmlowp_output_stage,int gemm_3d_depth,bool skip_im2col,const ActivationLayerInfo & act_info)169 Status CLGEMMConvolutionLayer::validate_mm(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output,
170                                            const GEMMLowpOutputStageInfo &gemmlowp_output_stage, int gemm_3d_depth, bool skip_im2col, const ActivationLayerInfo &act_info)
171 {
172     const bool is_quantized = is_data_type_quantized_asymmetric(input->data_type());
173 
174     const GEMMInfo &gemm_info = GEMMInfo(false,                 // is_a_reshaped
175                                          false,                 // is_b_reshaped
176                                          true,                  // reshape_b_only_on_first_run
177                                          gemm_3d_depth,         // depth_output_gemm3d
178                                          skip_im2col,           // reinterpret_input_as_3d
179                                          false,                 // retain_internal_weights
180                                          gemmlowp_output_stage, // gemmlowp_output_stage
181                                          false,                 // fp_mixed_precision
182                                          true,                  // broadcast_bias
183                                          act_info);             // activation_info
184 
185     if(is_quantized)
186     {
187         // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
188         // Extract and negate input and weights offset
189         const QuantizationInfo input_quantization_info   = input->quantization_info();
190         const QuantizationInfo weights_quantization_info = weights->quantization_info();
191 
192         std::unique_ptr<ITensorInfo> input_qa   = input->clone();
193         std::unique_ptr<ITensorInfo> weights_qa = weights->clone();
194         input_qa->set_quantization_info(QuantizationInfo(input_quantization_info.uniform().scale, -input_quantization_info.uniform().offset));
195         weights_qa->set_quantization_info(QuantizationInfo(weights_quantization_info.uniform().scale, -weights_quantization_info.uniform().offset));
196 
197         // Perform validation step on GEMMLowp
198         return CLGEMMLowpMatrixMultiplyCore::validate(input_qa.get(), weights_qa.get(), biases, output, gemm_info);
199     }
200     else
201     {
202         // Perform validation step on Matrix multiply function
203         return CLGEMM::validate(input, weights, biases, output, 1.0f, 1.0f, gemm_info);
204     }
205 }
206 
configure(const ICLTensor * input,const ICLTensor * weights,const ICLTensor * biases,ICLTensor * output,const PadStrideInfo & conv_info,const WeightsInfo & weights_info,const Size2D & dilation,const ActivationLayerInfo & act_info,unsigned int num_groups)207 void CLGEMMConvolutionLayer::configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info,
208                                        const Size2D &dilation, const ActivationLayerInfo &act_info, unsigned int num_groups)
209 {
210     configure(CLKernelLibrary::get().get_compile_context(), input, weights, biases, output, conv_info, weights_info, dilation, act_info, num_groups);
211 }
212 
configure(const CLCompileContext & compile_context,const ICLTensor * input,const ICLTensor * weights,const ICLTensor * biases,ICLTensor * output,const PadStrideInfo & conv_info,const WeightsInfo & weights_info,const Size2D & dilation,const ActivationLayerInfo & act_info,unsigned int num_groups)213 void CLGEMMConvolutionLayer::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
214                                        const PadStrideInfo &conv_info,
215                                        const WeightsInfo &weights_info, const Size2D &dilation, const ActivationLayerInfo &act_info, unsigned int num_groups)
216 {
217     ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
218 
219     ARM_COMPUTE_ERROR_THROW_ON(CLGEMMConvolutionLayer::validate(input->info(),
220                                                                 weights->info(),
221                                                                 biases != nullptr ? biases->info() : nullptr,
222                                                                 output->info(),
223                                                                 conv_info,
224                                                                 weights_info,
225                                                                 dilation,
226                                                                 act_info,
227                                                                 num_groups));
228 
229     const DataType   data_type   = input->info()->data_type();
230     const DataLayout data_layout = input->info()->data_layout();
231     const int        idx_width   = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
232     const int        idx_height  = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
233     const int        idx_kernels = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
234 
235     const unsigned int kernel_width  = weights->info()->dimension(idx_width);
236     const unsigned int kernel_height = weights->info()->dimension(idx_height);
237     const unsigned int num_kernels   = weights->info()->dimension(idx_kernels);
238 
239     const UniformQuantizationInfo iq_info = input->info()->quantization_info().uniform();
240     const UniformQuantizationInfo oq_info = output->info()->quantization_info().uniform();
241 
242     _is_prepared      = weights_info.retain_internal_weights();
243     _original_weights = weights;
244     _is_quantized     = is_data_type_quantized_asymmetric(input->info()->data_type());
245     _skip_im2col      = (data_layout == DataLayout::NHWC && kernel_width == 1 && kernel_height == 1 && conv_info.stride().first == 1 && conv_info.stride().second == 1);
246     _skip_col2im      = data_layout == DataLayout::NHWC;
247 
248     // Only for quantize there are few cases where we cannot fuse the activation function in GEMM
249     _fuse_activation = true;
250 
251     // Set the GPU target for im2col and col2im
252     _im2col_kernel->set_target(CLScheduler::get().target());
253     _col2im_kernel->set_target(CLScheduler::get().target());
254 
255     const ICLTensor *gemm_input_to_use  = input;
256     ICLTensor       *gemm_output_to_use = output;
257 
258     // Get parameters from conv_info
259     unsigned int stride_x = 0;
260     unsigned int stride_y = 0;
261     std::tie(stride_x, stride_y) = conv_info.stride();
262 
263     // Get convolved dimensions
264     unsigned int conv_w = 0;
265     unsigned int conv_h = 0;
266     std::tie(conv_w, conv_h) = scaled_dimensions(input->info()->dimension(idx_width),
267                                                  input->info()->dimension(idx_height),
268                                                  kernel_width,
269                                                  kernel_height,
270                                                  conv_info,
271                                                  dilation);
272 
273     unsigned int mat_weights_cols = num_kernels / num_groups;
274 
275     const ICLTensor *biases_to_use = biases;
276     bool             append_bias   = false;
277 
278     ICLTensor *weights_to_use = &_weights_reshaped;
279     if(num_groups != 1 && biases != nullptr)
280     {
281         // num_groups != 1 can only be for NCHW
282         // Since it is missing an utility function to reshape the biases, we append the biases into the weights tensor
283         biases_to_use = nullptr;
284         append_bias   = true;
285 
286         if(_weights_manager && _weights_manager->are_weights_managed(weights))
287         {
288             _reshape_weights_managed.configure(compile_context, weights, biases, num_groups);
289             weights_to_use = utils::cast::polymorphic_downcast<ICLTensor *>(_weights_manager->acquire(weights, &_reshape_weights_managed));
290         }
291         else
292         {
293             _reshape_weights.configure(compile_context, weights, biases, &_weights_reshaped, num_groups);
294         }
295     }
296     else
297     {
298         if(_weights_manager && _weights_manager->are_weights_managed(weights))
299         {
300             _reshape_weights_managed.configure(compile_context, weights, nullptr, num_groups);
301             weights_to_use = utils::cast::polymorphic_downcast<ICLTensor *>(_weights_manager->acquire(weights, &_reshape_weights_managed));
302         }
303         else
304         {
305             _reshape_weights.configure(compile_context, weights, nullptr, &_weights_reshaped, num_groups);
306         }
307     }
308 
309     // Create tensor to store im2col reshaped inputs
310     if(!_skip_im2col)
311     {
312         _memory_group.manage(&_im2col_output);
313 
314         // Configure and tune im2col. im2col output shape is auto-initialized
315         _im2col_kernel->configure(compile_context, input, &_im2col_output, Size2D(kernel_width, kernel_height), conv_info, append_bias, dilation, num_groups);
316 
317         // Set quantization info
318         _im2col_output.info()->set_quantization_info(input->info()->quantization_info());
319         CLScheduler::get().tune_kernel_static(*_im2col_kernel);
320 
321         // Update GEMM input
322         gemm_input_to_use = &_im2col_output;
323     }
324 
325     // Create GEMM output tensor
326     if(!_skip_col2im)
327     {
328         TensorShape shape_gemm;
329 
330         // If we cannot skip col2im it means we run im2col as well
331         shape_gemm = _im2col_output.info()->tensor_shape();
332         shape_gemm.set(0, mat_weights_cols);
333         shape_gemm.set(1, conv_w * conv_h);
334 
335         TensorInfo info_gemm(shape_gemm, 1, data_type);
336         info_gemm.set_quantization_info(output->info()->quantization_info()).set_data_layout(input->info()->data_layout());
337         _gemm_output.allocator()->init(info_gemm);
338         _memory_group.manage(&_gemm_output);
339 
340         // Update GEMM output
341         gemm_output_to_use = &_gemm_output;
342     }
343 
344     GEMMLowpOutputStageInfo gemmlowp_output_stage;
345     gemmlowp_output_stage.type            = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT;
346     gemmlowp_output_stage.gemmlowp_offset = 0;
347 
348     // Configure output stage for quantized case
349     if(_is_quantized)
350     {
351         const auto         output_quant_info        = (output->info()->total_size() == 0) ? iq_info : oq_info;
352         const bool         is_quantized_per_channel = is_data_type_quantized_per_channel(weights->info()->data_type());
353         const unsigned int num_filters              = (is_quantized_per_channel) ? num_kernels : 1;
354 
355         gemmlowp_output_stage.is_quantized_per_channel = is_quantized_per_channel;
356 
357         gemmlowp_output_stage.gemmlowp_multipliers.resize(num_filters);
358         gemmlowp_output_stage.gemmlowp_shifts.resize(num_filters);
359         quantization::compute_quantized_multipliers_and_shifts(input->info(),
360                                                                weights->info(),
361                                                                output->info(),
362                                                                idx_kernels,
363                                                                gemmlowp_output_stage.gemmlowp_multipliers.data(),
364                                                                gemmlowp_output_stage.gemmlowp_shifts.data());
365         gemmlowp_output_stage.gemmlowp_multiplier = gemmlowp_output_stage.gemmlowp_multipliers[0];
366         gemmlowp_output_stage.gemmlowp_shift      = gemmlowp_output_stage.gemmlowp_shifts[0];
367 
368         PixelValue min_val{};
369         PixelValue max_val{};
370         std::tie(min_val, max_val) = get_min_max(output->info()->data_type());
371 
372         auto min_activation = min_val.get<int32_t>();
373         auto max_activation = max_val.get<int32_t>();
374 
375         const std::set<ActivationLayerInfo::ActivationFunction> supported_acts = { ActivationLayerInfo::ActivationFunction::RELU,
376                                                                                    ActivationLayerInfo::ActivationFunction::BOUNDED_RELU,
377                                                                                    ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU
378                                                                                  };
379 
380         if(act_info.enabled())
381         {
382             if(supported_acts.count(act_info.activation()) != 0)
383             {
384                 std::tie(min_activation, max_activation) = get_quantized_activation_min_max(act_info, data_type, output_quant_info);
385             }
386             else
387             {
388                 _fuse_activation = false;
389             }
390         }
391 
392         // Set the GEMMLowp output stage info
393         gemmlowp_output_stage.gemmlowp_offset    = output_quant_info.offset;
394         gemmlowp_output_stage.gemmlowp_min_bound = min_activation;
395         gemmlowp_output_stage.gemmlowp_max_bound = max_activation;
396     }
397 
398     // Configure and tune GEMM
399     // In case of NHWC, we need to run GEMM3D (gemm_3d_depth != 0) in order to avoid reshaping the output matrix
400     const unsigned int gemm_3d_depth = (data_layout == DataLayout::NHWC) ? conv_h : 0;
401 
402     configure_mm(compile_context, gemm_input_to_use, weights_to_use, biases_to_use, gemm_output_to_use, gemmlowp_output_stage, gemm_3d_depth, act_info);
403 
404     if(!_skip_im2col)
405     {
406         _im2col_output.allocator()->allocate();
407     }
408 
409     if(!_skip_col2im)
410     {
411         // Configure and tune Col2Im
412         _col2im_kernel->configure(compile_context, gemm_output_to_use, output, Size2D(conv_w, conv_h), num_groups);
413         CLScheduler::get().tune_kernel_static(*_col2im_kernel.get());
414     }
415 
416     if(!_skip_col2im)
417     {
418         _gemm_output.allocator()->allocate();
419     }
420 
421     ARM_COMPUTE_ERROR_ON_MSG((output->info()->dimension(idx_width) != conv_w) || (output->info()->dimension(idx_height) != conv_h),
422                              "Output shape does not match the expected one");
423 
424     if(!_fuse_activation)
425     {
426         _activationlayer_function.configure(compile_context, output, nullptr, act_info);
427     }
428 
429     ARM_COMPUTE_UNUSED(weights_info);
430 }
431 
validate(const ITensorInfo * input,const ITensorInfo * weights,const ITensorInfo * biases,const ITensorInfo * output,const PadStrideInfo & conv_info,const WeightsInfo & weights_info,const Size2D & dilation,const ActivationLayerInfo & act_info,unsigned int num_groups)432 Status CLGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
433                                         const WeightsInfo &weights_info, const Size2D &dilation, const ActivationLayerInfo &act_info, unsigned int num_groups)
434 {
435     ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
436     ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights_info.are_reshaped(), "Weights already reshaped are not supported!");
437     ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F16, DataType::F32);
438     const bool is_quantized_per_channel = is_data_type_quantized_per_channel(weights->data_type());
439 
440     if(!is_quantized_per_channel)
441     {
442         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
443     }
444     ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, weights);
445     ARM_COMPUTE_RETURN_ERROR_ON_MSG((num_groups != 1) && (input->data_layout() != DataLayout::NCHW), "Grouping (num_groups != 1) with NHWC data layout is not supported");
446     ARM_COMPUTE_RETURN_ERROR_ON_MSG((num_groups != 1) && (input->data_type() == DataType::QASYMM8), "Grouping (num_groups != 1) is not supported with QASYMM8");
447     ARM_COMPUTE_RETURN_ERROR_ON(((input->dimension(2) / weights->dimension(2)) != num_groups) && (input->data_layout() == DataLayout::NCHW));
448 
449     const DataLayout data_layout = input->data_layout();
450     const DataType   data_type   = input->data_type();
451     const int        idx_width   = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
452     const int        idx_height  = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
453     const int        idx_channel = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
454     const int        idx_kernels = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
455 
456     const unsigned int kernel_width  = weights->dimension(idx_width);
457     const unsigned int kernel_height = weights->dimension(idx_height);
458     const unsigned int num_kernels   = weights->dimension(idx_kernels);
459 
460     TensorInfo         im2col_reshaped_info{};
461     TensorInfo         info_gemm{};
462     TensorInfo         weights_reshaped_info{};
463     const ITensorInfo *gemm_input_to_use  = input;
464     const ITensorInfo *gemm_output_to_use = output;
465     const ITensorInfo *weights_to_use     = weights;
466     const bool         is_quantized       = is_data_type_quantized_asymmetric(data_type);
467     const bool         skip_im2col        = (data_layout == DataLayout::NHWC && kernel_width == 1 && kernel_height == 1 && conv_info.stride().first == 1 && conv_info.stride().second == 1);
468     const bool         skip_col2im        = data_layout == DataLayout::NHWC;
469     bool               fuse_activation    = true;
470 
471     ARM_COMPUTE_RETURN_ERROR_ON((weights->dimension(idx_channel) * num_groups) != input->dimension(idx_channel));
472     ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 4);
473 
474     // Validate biases
475     if(biases != nullptr)
476     {
477         if(is_quantized)
478         {
479             ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(biases, 1, DataType::S32);
480         }
481         else
482         {
483             ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases);
484         }
485         ARM_COMPUTE_RETURN_ERROR_ON(biases->dimension(0) != weights->dimension(idx_kernels));
486         ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1);
487     }
488 
489     if(act_info.enabled())
490     {
491         ARM_COMPUTE_ERROR_ON(act_info.b() > act_info.a());
492     }
493 
494     // Get convolved dimensions
495     unsigned int conv_w = 0;
496     unsigned int conv_h = 0;
497 
498     std::tie(conv_w, conv_h) = scaled_dimensions(input->dimension(idx_width),
499                                                  input->dimension(idx_height),
500                                                  kernel_width,
501                                                  kernel_height,
502                                                  conv_info,
503                                                  dilation);
504 
505     unsigned int mat_weights_cols = num_kernels / num_groups;
506 
507     const ITensorInfo *biases_to_use = biases;
508     bool               append_bias   = false;
509 
510     if(num_groups != 1 && biases != nullptr)
511     {
512         // num_groups != 1 can only be for NCHW
513         // Since it is missing an utility function to reshape the biases, we append the biases into the weights tensor
514         biases_to_use = nullptr;
515         append_bias   = true;
516 
517         ARM_COMPUTE_RETURN_ON_ERROR(CLConvolutionLayerReshapeWeights::validate(weights, biases, nullptr, num_groups));
518         weights_reshaped_info = TensorInfo(compute_weights_reshaped_shape(*weights, true, num_groups), 1, data_type);
519     }
520     else
521     {
522         ARM_COMPUTE_RETURN_ON_ERROR(CLConvolutionLayerReshapeWeights::validate(weights, nullptr, nullptr, num_groups));
523         weights_reshaped_info = TensorInfo(compute_weights_reshaped_shape(*weights, false, num_groups), 1, data_type);
524     }
525 
526     weights_to_use = &weights_reshaped_info;
527 
528     if(!skip_im2col)
529     {
530         const Size2D kernel_dims(kernel_width, kernel_height);
531 
532         // Output tensor auto initialization if not yet initialized
533         TensorShape expected_output_shape = compute_im2col_conv_shape(input, kernel_dims, conv_info, append_bias, dilation, num_groups == 1, num_groups);
534 
535         auto_init_if_empty(im2col_reshaped_info, input->clone()->set_tensor_shape(expected_output_shape));
536 
537         ARM_COMPUTE_RETURN_ON_ERROR(CLIm2ColKernel::validate(input, &im2col_reshaped_info, kernel_dims, conv_info, append_bias, dilation, num_groups));
538         gemm_input_to_use = &im2col_reshaped_info;
539     }
540 
541     // Create GEMM output tensor
542     if(!skip_col2im)
543     {
544         TensorShape shape_gemm;
545 
546         shape_gemm = gemm_input_to_use->tensor_shape();
547         shape_gemm.set(0, mat_weights_cols);
548         shape_gemm.set(1, conv_w * conv_h);
549 
550         info_gemm = TensorInfo(shape_gemm, 1, data_type);
551         info_gemm.set_quantization_info(output->quantization_info()).set_data_layout(input->data_layout());
552         gemm_output_to_use = &info_gemm;
553     }
554 
555     GEMMLowpOutputStageInfo gemmlowp_output_stage;
556     gemmlowp_output_stage.type                     = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT;
557     gemmlowp_output_stage.gemmlowp_offset          = 0;
558     gemmlowp_output_stage.is_quantized_per_channel = is_quantized_per_channel;
559 
560     if(is_quantized)
561     {
562         const UniformQuantizationInfo iq_info           = input->quantization_info().uniform();
563         const UniformQuantizationInfo oq_info           = output->quantization_info().uniform();
564         const auto                    output_quant_info = (output->total_size() == 0) ? iq_info : oq_info;
565         const unsigned int            num_filters       = (is_quantized_per_channel) ? num_kernels : 1;
566 
567         gemmlowp_output_stage.gemmlowp_multipliers.resize(num_filters);
568         gemmlowp_output_stage.gemmlowp_shifts.resize(num_filters);
569         quantization::compute_quantized_multipliers_and_shifts(input,
570                                                                weights,
571                                                                output,
572                                                                idx_kernels,
573                                                                gemmlowp_output_stage.gemmlowp_multipliers.data(),
574                                                                gemmlowp_output_stage.gemmlowp_shifts.data());
575         gemmlowp_output_stage.gemmlowp_multiplier = gemmlowp_output_stage.gemmlowp_multipliers[0];
576         gemmlowp_output_stage.gemmlowp_shift      = gemmlowp_output_stage.gemmlowp_shifts[0];
577 
578         int min_activation = 0;
579         int max_activation = 0;
580 
581         const std::set<ActivationLayerInfo::ActivationFunction> supported_acts = { ActivationLayerInfo::ActivationFunction::RELU,
582                                                                                    ActivationLayerInfo::ActivationFunction::BOUNDED_RELU,
583                                                                                    ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU
584                                                                                  };
585 
586         if(act_info.enabled())
587         {
588             if(supported_acts.count(act_info.activation()) != 0)
589             {
590                 std::tie(min_activation, max_activation) = get_quantized_activation_min_max(act_info, data_type, output_quant_info);
591             }
592             else
593             {
594                 fuse_activation = false;
595             }
596         }
597 
598         // Set the GEMMLowp output stage info
599         gemmlowp_output_stage.gemmlowp_offset    = output_quant_info.offset;
600         gemmlowp_output_stage.gemmlowp_min_bound = min_activation;
601         gemmlowp_output_stage.gemmlowp_max_bound = max_activation;
602     }
603 
604     // In case of NHWC, we need to run GEMM3D (gemm_3d_depth != 0) in order to avoid reshaping the output matrix
605     const unsigned int gemm_3d_depth = (data_layout == DataLayout::NHWC) ? conv_h : 0;
606 
607     ARM_COMPUTE_RETURN_ON_ERROR(validate_mm(gemm_input_to_use, weights_to_use, biases_to_use, gemm_output_to_use, gemmlowp_output_stage, gemm_3d_depth, skip_im2col, act_info));
608 
609     // Validate Col2Im
610     if(!skip_col2im)
611     {
612         ARM_COMPUTE_RETURN_ON_ERROR(CLCol2ImKernel::validate(gemm_output_to_use, output, Size2D(conv_w, conv_h), num_groups));
613     }
614 
615     //Validate Activation Layer
616     if(!fuse_activation)
617     {
618         ARM_COMPUTE_RETURN_ON_ERROR(CLActivationLayer::validate(output, nullptr, act_info));
619     }
620 
621     return Status{};
622 }
623 
run()624 void CLGEMMConvolutionLayer::run()
625 {
626     prepare();
627 
628     MemoryGroupResourceScope scope_mg(_memory_group);
629 
630     // Run im2col
631     if(!_skip_im2col)
632     {
633         CLScheduler::get().enqueue(*_im2col_kernel);
634     }
635 
636     // Runs CLGEMM or CLGEMMLowpMatrixMultiplyCore functions
637     if(_is_quantized)
638     {
639         // Run gemmlowp
640         _mm_gemmlowp.run();
641     }
642     else
643     {
644         // Run gemm
645         _mm_gemm.run();
646     }
647 
648     // Reshape output matrix
649     if(!_skip_col2im)
650     {
651         CLScheduler::get().enqueue(*_col2im_kernel.get(), false);
652     }
653 
654     //Run Activation Layer if we cannot fuse in GEMM
655     if(!_fuse_activation)
656     {
657         _activationlayer_function.run();
658     }
659 }
660 
prepare()661 void CLGEMMConvolutionLayer::prepare()
662 {
663     if(!_is_prepared)
664     {
665         ARM_COMPUTE_ERROR_ON(!_original_weights->is_used());
666         if(_weights_manager && _weights_manager->are_weights_managed(_original_weights))
667         {
668             _weights_manager->run(_original_weights, &_reshape_weights_managed);
669         }
670         else
671         {
672             // Run weights reshaping and mark original weights tensor as unused
673             _weights_reshaped.allocator()->allocate();
674             _reshape_weights.run();
675             _original_weights->mark_as_unused();
676         }
677 
678         // Prepare GEMM
679         _is_quantized ? _mm_gemmlowp.prepare() : _mm_gemm.prepare();
680         if(!_weights_reshaped.is_used())
681         {
682             _weights_reshaped.allocator()->free();
683         }
684 
685         CLScheduler::get().queue().finish();
686         _is_prepared = true;
687     }
688 }
689 } // namespace arm_compute
690