• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2017-2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h"
25 
26 #include "arm_compute/core/Helpers.h"
27 #include "arm_compute/core/Size2D.h"
28 #include "arm_compute/core/Validate.h"
29 #include "arm_compute/core/utils/misc/ShapeCalculator.h"
30 #include "arm_compute/core/utils/quantization/AsymmHelpers.h"
31 #include "arm_compute/runtime/NEON/NEScheduler.h"
32 #include "src/core/NEON/kernels/NEConvertFullyConnectedWeightsKernel.h"
33 #include "src/core/NEON/kernels/NEConvertQuantizedSignednessKernel.h"
34 #include "src/core/NEON/kernels/NEFlattenLayerKernel.h"
35 #include "src/core/NEON/kernels/NEFlattenLayerKernel.h"
36 #include "src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h"
37 #include "src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h"
38 #include "src/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.h"
39 #include "src/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.h"
40 #include "src/core/NEON/kernels/NEGEMMLowpReductionKernel.h"
41 #include "src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.h"
42 #include "src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.h"
43 #include "src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h"
44 #include "src/core/NEON/kernels/NETransposeKernel.h"
45 
46 #include "support/MemorySupport.h"
47 
48 #include <algorithm>
49 #include <cmath>
50 
51 namespace arm_compute
52 {
53 using namespace arm_compute::misc::shape_calculator;
54 
55 namespace
56 {
57 // Get min, max bound of a quantized assymetric output tensor, with the effect of fused activation
get_quantized_asymmetric_output_min_max(const QuantizationInfo & q_info,const ActivationLayerInfo & act_info,DataType data_type)58 std::pair<PixelValue, PixelValue> get_quantized_asymmetric_output_min_max(const QuantizationInfo &q_info, const ActivationLayerInfo &act_info, DataType data_type)
59 {
60     PixelValue type_min{};
61     PixelValue type_max{};
62     std::tie(type_min, type_max) = get_min_max(data_type);
63     const UniformQuantizationInfo q_unif = q_info.uniform();
64 
65     if(act_info.enabled())
66     {
67         switch(act_info.activation())
68         {
69             case ActivationLayerInfo::ActivationFunction::RELU:
70                 type_min = PixelValue(q_unif.offset);
71                 break;
72             case ActivationLayerInfo::ActivationFunction::BOUNDED_RELU:
73                 type_min = PixelValue(q_unif.offset);
74                 type_max = PixelValue(act_info.a(), data_type, q_info);
75                 break;
76             case ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU:
77                 type_min = PixelValue(act_info.b(), data_type, q_info);
78                 type_max = PixelValue(act_info.a(), data_type, q_info);
79                 break;
80             default:
81                 ARM_COMPUTE_ERROR("Activation function not supported.");
82                 break;
83         }
84     }
85 
86     return std::make_pair(type_min, type_max);
87 }
88 
get_gemmlowp_output_stage_info(const ITensorInfo * input,const ITensorInfo * weights,const ITensorInfo * output,const ActivationLayerInfo & act,GEMMLowpOutputStageInfo & gemmlowp_output_stage_info)89 Status get_gemmlowp_output_stage_info(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *output, const ActivationLayerInfo &act,
90                                       GEMMLowpOutputStageInfo &gemmlowp_output_stage_info)
91 {
92     const auto                    data_type = input->data_type();
93     const QuantizationInfo        oq_info   = output->quantization_info();
94     const UniformQuantizationInfo iq_unif   = input->quantization_info().uniform();
95     const UniformQuantizationInfo wq_unif   = weights->quantization_info().uniform();
96     const UniformQuantizationInfo oq_unif   = oq_info.uniform();
97 
98     float   multiplier = (iq_unif.scale * wq_unif.scale) / oq_unif.scale;
99     int32_t output_multiplier;
100     int32_t output_shift;
101 
102     ARM_COMPUTE_RETURN_ON_ERROR(quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift));
103 
104     PixelValue type_min{};
105     PixelValue type_max{};
106     std::tie(type_min, type_max) = get_quantized_asymmetric_output_min_max(oq_info, act, data_type);
107 
108     gemmlowp_output_stage_info.gemmlowp_multiplier = output_multiplier;
109     gemmlowp_output_stage_info.gemmlowp_shift      = output_shift;
110     gemmlowp_output_stage_info.gemmlowp_offset     = oq_unif.offset;
111     gemmlowp_output_stage_info.type                = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT;
112     gemmlowp_output_stage_info.gemmlowp_min_bound  = type_min.get<int32_t>();
113     gemmlowp_output_stage_info.gemmlowp_max_bound  = type_max.get<int32_t>();
114 
115     return Status{};
116 }
117 
validate_mm(const ITensorInfo * input,const ITensorInfo * weights,const ITensorInfo * biases,const ITensorInfo * output,const ActivationLayerInfo & act)118 Status validate_mm(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const ActivationLayerInfo &act)
119 {
120     if(is_data_type_quantized_asymmetric(input->data_type()))
121     {
122         // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
123         // Extract and negate input and weights offset
124         const QuantizationInfo input_quantization_info(input->quantization_info().uniform().scale, -input->quantization_info().uniform().offset);
125         const QuantizationInfo weights_quantization_info(weights->quantization_info().uniform().scale, -weights->quantization_info().uniform().offset);
126 
127         GEMMLowpOutputStageInfo gemmlowp_output_stage_info;
128         ARM_COMPUTE_RETURN_ON_ERROR(get_gemmlowp_output_stage_info(input, weights, output, act, gemmlowp_output_stage_info));
129 
130         GEMMInfo gemm_info;
131         gemm_info.set_gemmlowp_output_stage(gemmlowp_output_stage_info);
132 
133         // Validate gemmlowp function
134         ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMLowpMatrixMultiplyCore::validate(&input->clone()->set_quantization_info(input_quantization_info),
135                                                                            &weights->clone()->set_quantization_info(weights_quantization_info),
136                                                                            biases,
137                                                                            output,
138                                                                            gemm_info));
139     }
140     else
141     {
142         ARM_COMPUTE_RETURN_ON_ERROR(NEGEMM::validate(input, weights, biases, output, 1.f, 1.0f, GEMMInfo(false, false, true /* Reshape weights only for the first run */)));
143     }
144 
145     return Status{};
146 }
147 } // namespace
148 
configure(const ITensor * input,ITensor * output)149 void NEFullyConnectedLayerReshapeWeights::configure(const ITensor *input, ITensor *output)
150 {
151     auto k = arm_compute::support::cpp14::make_unique<NETransposeKernel>();
152     k->configure(input, output);
153     _kernel = std::move(k);
154 }
155 
validate(const ITensorInfo * input,const ITensorInfo * output)156 Status NEFullyConnectedLayerReshapeWeights::validate(const ITensorInfo *input, const ITensorInfo *output)
157 {
158     return NETransposeKernel::validate(input, output);
159 }
160 
161 NEFullyConnectedLayer::~NEFullyConnectedLayer() = default;
162 
NEFullyConnectedLayer(std::shared_ptr<IMemoryManager> memory_manager,IWeightsManager * weights_manager)163 NEFullyConnectedLayer::NEFullyConnectedLayer(std::shared_ptr<IMemoryManager> memory_manager, IWeightsManager *weights_manager)
164     : _memory_group(std::move(memory_manager)), _weights_manager(weights_manager), _flatten_kernel(), _convert_weights(), _convert_weights_managed(), _reshape_weights_function(),
165       _reshape_weights_managed_function(), _mm_gemm(nullptr, weights_manager), _mm_gemmlowp(nullptr, weights_manager), _flatten_output(), _converted_weights_output(), _reshape_weights_output(),
166       _original_weights(nullptr), _are_weights_converted(true), _are_weights_reshaped(false), _is_fc_after_conv(false), _is_quantized_asymmetric(false), _is_prepared(false)
167 {
168 }
169 
configure_mm(const ITensor * input,const ITensor * weights,const ITensor * biases,ITensor * output,const ActivationLayerInfo & act)170 void NEFullyConnectedLayer::configure_mm(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const ActivationLayerInfo &act)
171 {
172     if(_is_quantized_asymmetric)
173     {
174         // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
175         // Extract and negate input and weights offset
176         const QuantizationInfo input_quantization_info   = input->info()->quantization_info();
177         const QuantizationInfo weights_quantization_info = weights->info()->quantization_info();
178 
179         input->info()->set_quantization_info(QuantizationInfo(input_quantization_info.uniform().scale, -input_quantization_info.uniform().offset));
180         weights->info()->set_quantization_info(QuantizationInfo(weights_quantization_info.uniform().scale, -weights_quantization_info.uniform().offset));
181 
182         // Configure gemmlowp function and output stage for asymmetric quantized types
183         GEMMLowpOutputStageInfo gemmlowp_output_stage_info;
184         const Status            status = get_gemmlowp_output_stage_info(input->info(), weights->info(), output->info(), act, gemmlowp_output_stage_info);
185         ARM_COMPUTE_ERROR_ON(status.error_code() != ErrorCode::OK);
186 
187         GEMMInfo gemm_info;
188         gemm_info.set_gemmlowp_output_stage(gemmlowp_output_stage_info);
189         gemm_info.set_activation_info(act);
190         _mm_gemmlowp.configure(input, weights, biases, output, gemm_info);
191 
192         // Revert back QuantizatioInfo as input and weights could be used in other fully connected layers
193         input->info()->set_quantization_info(input_quantization_info);
194         weights->info()->set_quantization_info(weights_quantization_info);
195     }
196     else
197     {
198         // Configure matrix multiply kernel
199         GEMMInfo gemm_info(false, false, true /* Reshape weights only for the first run */);
200         gemm_info.set_activation_info(act);
201         _mm_gemm.configure(input, weights, biases, output, 1.f, 1.0f, gemm_info);
202     }
203 }
204 
configure_conv_fc(const ITensor * input,const ITensor * weights,const ITensor * biases,ITensor * output,const ActivationLayerInfo & act)205 void NEFullyConnectedLayer::configure_conv_fc(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const ActivationLayerInfo &act)
206 {
207     ARM_COMPUTE_ERROR_ON((weights->info()->dimension(1) != (input->info()->dimension(0) * input->info()->dimension(1) * input->info()->dimension(2))));
208 
209     // If the fully connected layer is called after a convolution layer, the input tensor must be linearized
210 
211     // Initialize output tensor for flatten
212     TensorShape shape_flatten = compute_flatten_shape(input->info());
213     _flatten_output.allocator()->init(input->info()->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(shape_flatten));
214 
215     // Configure flatten kernel
216     _memory_group.manage(&_flatten_output);
217 
218     _flatten_kernel = arm_compute::support::cpp14::make_unique<NEFlattenLayerKernel>();
219     _flatten_kernel->configure(input, &_flatten_output);
220 
221     // Configure matrix multiply kernel
222     configure_mm(&_flatten_output, weights, biases, output, act);
223 
224     // Allocate the output tensor for flatten once all the configure methods have been called
225     _flatten_output.allocator()->allocate();
226 }
227 
configure_fc_fc(const ITensor * input,const ITensor * weights,const ITensor * biases,ITensor * output,const ActivationLayerInfo & act)228 void NEFullyConnectedLayer::configure_fc_fc(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const ActivationLayerInfo &act)
229 {
230     ARM_COMPUTE_ERROR_ON(input->info()->dimension(0) != weights->info()->dimension(1));
231 
232     // Configure matrix multiply kernel
233     configure_mm(input, weights, biases, output, act);
234 }
235 
configure(const ITensor * input,const ITensor * weights,const ITensor * biases,ITensor * output,FullyConnectedLayerInfo fc_info)236 void NEFullyConnectedLayer::configure(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output,
237                                       FullyConnectedLayerInfo fc_info)
238 {
239     // Perform validate step
240     ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
241     ARM_COMPUTE_ERROR_THROW_ON(NEFullyConnectedLayer::validate(input->info(),
242                                                                weights->info(),
243                                                                biases != nullptr ? biases->info() : nullptr,
244                                                                output->info(),
245                                                                fc_info));
246 
247     _are_weights_converted   = true;
248     _are_weights_reshaped    = fc_info.transpose_weights ? fc_info.are_weights_reshaped : true;
249     _is_fc_after_conv        = true;
250     _is_quantized_asymmetric = is_data_type_quantized_asymmetric(input->info()->data_type());
251     _original_weights        = weights;
252 
253     if(_weights_manager)
254     {
255         _weights_manager->manage(weights);
256     }
257 
258     // With the Fully Connected layer we can have 4 different cases:
259     //  1) Convolution layer -> Fully Connected layer without batches
260     //  2) Fully Connected layer -> Fully Connected layer without batches
261     //  3) Convolution layer -> Fully Connected layer with batches
262     //  4) Fully Connected layer -> Fully Connected layer with batches
263 
264     const ITensor *weights_to_use = weights;
265 
266     // Check if we have a fully connected layer with batches
267     const bool is_batched_fc_layer = output->info()->dimension(1) > 1;
268     if(is_batched_fc_layer)
269     {
270         _is_fc_after_conv = (TensorShape::num_max_dimensions >= 4) && (std::equal(input->info()->tensor_shape().cbegin() + 3,
271                                                                                   input->info()->tensor_shape().cend(),
272                                                                                   output->info()->tensor_shape().cbegin() + 1));
273     }
274     else
275     {
276         _is_fc_after_conv = input->info()->num_dimensions() > 1;
277     }
278 
279     // Reshape weights if needed
280     if(!_are_weights_reshaped)
281     {
282         if(_weights_manager && _weights_manager->are_weights_managed(weights))
283         {
284             _reshape_weights_managed_function.configure(weights);
285             weights_to_use = _weights_manager->acquire(weights, &_reshape_weights_managed_function);
286         }
287         else
288         {
289             // Reshape the weights
290             _reshape_weights_function.configure(weights, &_reshape_weights_output);
291             weights_to_use = &_reshape_weights_output;
292         }
293     }
294 
295     // Convert weights if needed
296     if(_is_fc_after_conv && (input->info()->data_layout() != fc_info.weights_trained_layout))
297     {
298         if(_weights_manager && _weights_manager->are_weights_managed(weights_to_use))
299         {
300             _convert_weights_managed.configure(weights_to_use,
301                                                input->info()->tensor_shape(),
302                                                fc_info.weights_trained_layout);
303             weights_to_use = _weights_manager->acquire(weights, &_convert_weights_managed);
304         }
305         else
306         {
307             // Convert weights
308             _convert_weights.configure(weights_to_use,
309                                        &_converted_weights_output,
310                                        input->info()->tensor_shape(),
311                                        fc_info.weights_trained_layout);
312 
313             weights_to_use = &_converted_weights_output;
314         }
315         _are_weights_converted = false;
316     }
317 
318     if(_is_fc_after_conv)
319     {
320         // Fully Connected layer after a Convolution Layer without batches
321         configure_conv_fc(input, weights_to_use, biases, output, fc_info.activation_info);
322     }
323     else
324     {
325         // Fully Connected layer after a Fully Connected Layer without batches
326         configure_fc_fc(input, weights_to_use, biases, output, fc_info.activation_info);
327     }
328 
329     _are_weights_reshaped = _are_weights_reshaped || fc_info.retain_internal_weights;
330 }
331 
validate(const ITensorInfo * input,const ITensorInfo * weights,const ITensorInfo * biases,const ITensorInfo * output,FullyConnectedLayerInfo fc_info)332 Status NEFullyConnectedLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output,
333                                        FullyConnectedLayerInfo fc_info)
334 {
335     ARM_COMPUTE_UNUSED(fc_info.retain_internal_weights);
336     ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
337     ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F16, DataType::F32);
338     ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights, output);
339     ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 2);
340     ARM_COMPUTE_RETURN_ERROR_ON(biases != nullptr && biases->num_dimensions() > 1);
341 
342     bool weights_reshaped = fc_info.transpose_weights ? fc_info.are_weights_reshaped : true;
343     bool is_fc_after_conv = true;
344 
345     const ITensorInfo &flatten_input     = TensorInfo(input->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_flatten_shape(input)));
346     const ITensorInfo &reshaped_weights  = TensorInfo(weights->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_transposed_shape(*weights)));
347     const ITensorInfo &converted_weights = weights_reshaped ? TensorInfo(weights->clone()->set_is_resizable(true).reset_padding()) : TensorInfo(*reshaped_weights.clone());
348 
349     // With the Fully Connected layer we can have 4 different cases:
350     //  1) Convolution layer -> Fully Connected layer without batches
351     //  2) Fully Connected layer -> Fully Connected layer without batches
352     //  3) Convolution layer -> Fully Connected layer with batches
353     //  4) Fully Connected layer -> Fully Connected layer with batches
354 
355     const ITensorInfo *input_to_use   = input;
356     const ITensorInfo *weights_to_use = weights;
357 
358     // Check if we have a fully connected layer with batches
359     const bool is_batched_fc_layer = output->dimension(1) > 1;
360 
361     if(is_batched_fc_layer)
362     {
363         is_fc_after_conv = (TensorShape::num_max_dimensions >= 4) && (std::equal(input->tensor_shape().cbegin() + 3,
364                                                                                  input->tensor_shape().cend(),
365                                                                                  output->tensor_shape().cbegin() + 1));
366     }
367     else
368     {
369         is_fc_after_conv = input->num_dimensions() > 1;
370     }
371 
372     if(!weights_reshaped)
373     {
374         // Validate reshape weights kernel
375         ARM_COMPUTE_RETURN_ON_ERROR(NEFullyConnectedLayerReshapeWeights::validate(weights, &reshaped_weights));
376         weights_to_use = &reshaped_weights;
377     }
378 
379     if(is_fc_after_conv && (input->data_layout() != fc_info.weights_trained_layout))
380     {
381         // Validate convert weights kernel
382         ARM_COMPUTE_RETURN_ON_ERROR(NEConvertFullyConnectedWeights::validate(weights_to_use,
383                                                                              &converted_weights,
384                                                                              input->tensor_shape(),
385                                                                              fc_info.weights_trained_layout));
386         weights_to_use = &converted_weights;
387     }
388 
389     if(is_fc_after_conv)
390     {
391         // Fully Connected layer after a Convolution Layer without batches
392         ARM_COMPUTE_RETURN_ERROR_ON((weights_to_use->dimension(1) != (input->dimension(0) * input->dimension(1) * input->dimension(2))));
393 
394         // Validate flatten kernel
395         ARM_COMPUTE_RETURN_ON_ERROR(NEFlattenLayerKernel::validate(input, &flatten_input));
396         input_to_use = &flatten_input;
397     }
398     else
399     {
400         // Fully Connected layer after a Fully Connected Layer without batches
401         ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(0) != weights_to_use->dimension(1));
402     }
403     // Validate matrix multiply kernel
404     ARM_COMPUTE_RETURN_ON_ERROR(validate_mm(input_to_use, weights_to_use, biases, output, fc_info.activation_info));
405 
406     return Status{};
407 }
408 
run()409 void NEFullyConnectedLayer::run()
410 {
411     prepare();
412 
413     MemoryGroupResourceScope scope_mg(_memory_group);
414 
415     // Linearize input if it comes from a convolutional layer
416     if(_is_fc_after_conv)
417     {
418         NEScheduler::get().schedule(_flatten_kernel.get(), Window::DimY);
419     }
420 
421     // Run matrix multiply
422     if(_is_quantized_asymmetric)
423     {
424         _mm_gemmlowp.run();
425     }
426     else
427     {
428         _mm_gemm.run();
429     }
430 }
431 
prepare()432 void NEFullyConnectedLayer::prepare()
433 {
434     if(!_is_prepared)
435     {
436         if(!_weights_manager)
437         {
438             ARM_COMPUTE_ERROR_ON(!_original_weights->is_used());
439         }
440 
441         auto release_unused = [](Tensor * w)
442         {
443             if(!w->is_used())
444             {
445                 w->allocator()->free();
446             }
447         };
448 
449         // Pointer to current weights
450         const ITensor *cur_weights = _original_weights;
451 
452         // Reshape of the weights (happens only once)
453         if(!_are_weights_reshaped)
454         {
455             if(_weights_manager && _weights_manager->are_weights_managed(_original_weights))
456             {
457                 cur_weights = _weights_manager->run(cur_weights, &_reshape_weights_managed_function);
458             }
459             else
460             {
461                 // Reshape of the weights (happens only once)
462                 if(!_are_weights_reshaped)
463                 {
464                     // Run reshape weights kernel and mark weights as unused
465                     _reshape_weights_output.allocator()->allocate();
466                     _reshape_weights_function.run();
467                 }
468                 cur_weights->mark_as_unused();
469                 cur_weights = &_reshape_weights_output;
470             }
471             _are_weights_reshaped = true;
472         }
473 
474         // Convert weights if needed (happens only once)
475         if(!_are_weights_converted)
476         {
477             if(_weights_manager && _weights_manager->are_weights_managed(cur_weights))
478             {
479                 _weights_manager->run(cur_weights, &_convert_weights_managed);
480             }
481             else
482             {
483                 _converted_weights_output.allocator()->allocate();
484                 _convert_weights.run();
485                 cur_weights->mark_as_unused();
486             }
487 
488             _are_weights_converted = true;
489         }
490 
491         // Release reshaped weights if unused
492         release_unused(&_reshape_weights_output);
493 
494         // Prepare GEMM prepare and release unused weights
495         if(!_is_quantized_asymmetric)
496         {
497             _mm_gemm.prepare();
498         }
499 
500         // Release converted weights if unused
501         release_unused(&_reshape_weights_output);
502         release_unused(&_converted_weights_output);
503 
504         _is_prepared = true;
505     }
506 }
507 } // namespace arm_compute
508