• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2017-2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "arm_compute/runtime/NEON/functions/NEGEMM.h"
25 
26 #include "arm_compute/core/Error.h"
27 #include "arm_compute/core/Helpers.h"
28 #include "arm_compute/core/ITensor.h"
29 #include "arm_compute/core/TensorInfo.h"
30 #include "arm_compute/core/Types.h"
31 #include "arm_compute/core/Validate.h"
32 #include "arm_compute/core/utils/misc/ShapeCalculator.h"
33 #include "arm_compute/runtime/NEON/NEScheduler.h"
34 #include "arm_compute/runtime/NEON/functions/NEGEMMAssemblyDispatch.h"
35 #include "arm_compute/runtime/TensorAllocator.h"
36 #include "src/core/CPP/Validate.h"
37 #include "src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h"
38 #include "src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.h"
39 #include "src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.h"
40 #include "src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h"
41 #include "src/core/helpers/AutoConfiguration.h"
42 #include "support/MemorySupport.h"
43 
44 #include <cmath>
45 
46 using namespace arm_compute::misc::shape_calculator;
47 
48 namespace arm_compute
49 {
50 namespace
51 {
init_assembly_metadata(const GEMMInfo & info)52 AsmGemmInfo init_assembly_metadata(const GEMMInfo &info)
53 {
54     AsmGemmInfo asm_info;
55     asm_info.method                  = AsmConvMethod::Im2Col;
56     asm_info.reinterpret_input_as_3d = info.reinterpret_input_as_3d();
57     asm_info.depth_output_gemm3d     = info.depth_output_gemm3d();
58     asm_info.activation_info         = info.activation_info();
59 
60     return asm_info;
61 }
62 } // namespace
63 
NEGEMM(std::shared_ptr<IMemoryManager> memory_manager,IWeightsManager * weights_manager)64 NEGEMM::NEGEMM(std::shared_ptr<IMemoryManager> memory_manager, IWeightsManager *weights_manager)
65     : _memory_group(memory_manager), _weights_manager(weights_manager), _interleave_kernel(), _transpose_kernel(), _mm_kernel(), _asm_glue(memory_manager, weights_manager), _ma_kernel(),
66       _alpha_scale_func(nullptr), _add_bias(), _activation_func(), _tmp_a(), _tmp_b(), _tmp_d(), _original_b(nullptr), _run_vector_matrix_multiplication(false), _run_alpha_scale(false),
67       _run_addition(false), _run_bias_addition(false), _run_activation(false), _reshape_b_only_on_first_run(false), _is_prepared(false)
68 {
69 }
70 
71 NEGEMM::~NEGEMM() = default;
72 
configure(const ITensor * a,const ITensor * b,const ITensor * c,ITensor * d,float alpha,float beta,const GEMMInfo & gemm_info)73 void NEGEMM::configure(const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d, float alpha, float beta, const GEMMInfo &gemm_info)
74 {
75     ARM_COMPUTE_ERROR_THROW_ON(NEGEMM::validate(a->info(), b->info(), (c != nullptr) ? c->info() : nullptr, d->info(), alpha, beta, gemm_info));
76 
77     const AsmGemmInfo asm_info      = init_assembly_metadata(gemm_info);
78     const bool        is_c_bias     = gemm_info.reshape_b_only_on_first_run();
79     bool              run_optimised = bool(NEGEMMAssemblyDispatch::validate(a->info(), b->info(), (is_c_bias && c != nullptr) ? c->info() : nullptr, d->info(), asm_info));
80 
81     // Check if we need to reshape the matrix B only on the first run
82     _is_prepared                      = false;
83     _reshape_b_only_on_first_run      = gemm_info.reshape_b_only_on_first_run();
84     _run_vector_matrix_multiplication = a->info()->dimension(1) < 2;
85     _original_b                       = b;
86     _run_alpha_scale                  = alpha != 1.f;
87     _run_bias_addition                = c != nullptr && gemm_info.reshape_b_only_on_first_run();
88     _run_addition                     = beta != 0 && c != nullptr && !gemm_info.reshape_b_only_on_first_run();
89     _run_activation                   = gemm_info.activation_info().enabled() && (!run_optimised || (run_optimised && !NEGEMMAssemblyDispatch::is_activation_supported(gemm_info.activation_info())));
90 
91     if(run_optimised)
92     {
93         const ITensor *c_to_use = is_c_bias ? c : nullptr;
94         _asm_glue.configure(a, b, c_to_use, d, asm_info);
95         ARM_COMPUTE_ERROR_ON(!_asm_glue.is_configured());
96 
97         // Scale product by alpha
98         if(_run_alpha_scale)
99         {
100             _alpha_scale_func.configure(d, nullptr, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LINEAR, alpha, 0.f));
101         }
102     }
103     else
104     {
105         // Pick output tensor in case bias addition should be performed
106         ITensor *gemm_output_to_use = d;
107         if(_run_bias_addition)
108         {
109             gemm_output_to_use = &_tmp_d;
110             _memory_group.manage(&_tmp_d);
111         }
112 
113         _mm_kernel = arm_compute::support::cpp14::make_unique<NEGEMMMatrixMultiplyKernel>();
114 
115         // Select between GEMV and GEMM
116         if(_run_vector_matrix_multiplication)
117         {
118             // Configure the matrix multiply kernel
119             _mm_kernel->configure(a, b, gemm_output_to_use, alpha, false);
120         }
121         else
122         {
123             TensorShape shape_tmp_a = a->info()->tensor_shape();
124             TensorShape shape_tmp_b = b->info()->tensor_shape();
125 
126             shape_tmp_a.set(0, a->info()->dimension(0) * 4);
127             shape_tmp_a.set(1, std::ceil(a->info()->dimension(1) / 4.0f));
128 
129             const unsigned int transpose_w = 16 / data_size_from_type(b->info()->data_type());
130             shape_tmp_b.set(0, b->info()->dimension(1) * transpose_w);
131             shape_tmp_b.set(1, std::ceil(b->info()->dimension(0) / static_cast<float>(transpose_w)));
132 
133             TensorInfo info_a = a->info()->clone()->set_tensor_shape(shape_tmp_a).set_is_resizable(true);
134             TensorInfo info_b = b->info()->clone()->set_tensor_shape(shape_tmp_b).set_is_resizable(true);
135 
136             _tmp_a.allocator()->init(info_a);
137             _tmp_b.allocator()->init(info_b);
138 
139             // Manage intermediate buffers
140             _memory_group.manage(&_tmp_a);
141             if(!_reshape_b_only_on_first_run)
142             {
143                 _memory_group.manage(&_tmp_b);
144             }
145 
146             int m = a->info()->dimension(1);
147             int n = b->info()->dimension(0);
148             int k = a->info()->dimension(0);
149 
150             // Configure interleave kernel
151             _interleave_kernel = arm_compute::support::cpp14::make_unique<NEGEMMInterleave4x4Kernel>();
152             _interleave_kernel->configure(a, &_tmp_a);
153 
154             // Configure transpose kernel
155             _transpose_kernel = arm_compute::support::cpp14::make_unique<NEGEMMTranspose1xWKernel>();
156             _transpose_kernel->configure(b, &_tmp_b);
157 
158             // Configure matrix multiplication kernel
159             _mm_kernel->configure(&_tmp_a, &_tmp_b, gemm_output_to_use, alpha, true, GEMMReshapeInfo(m, n, k));
160 
161             // Allocate once the all configure methods have been called
162             _tmp_a.allocator()->allocate();
163             if(!_reshape_b_only_on_first_run)
164             {
165                 _tmp_b.allocator()->allocate();
166             }
167         }
168 
169         if(_run_bias_addition)
170         {
171             _add_bias.configure(gemm_output_to_use, c, d, ConvertPolicy::SATURATE);
172             _tmp_d.allocator()->allocate();
173         }
174     }
175 
176     // Configure matrix addition kernel
177     if(_run_addition)
178     {
179         _ma_kernel = arm_compute::support::cpp14::make_unique<NEGEMMMatrixAdditionKernel>();
180         _ma_kernel->configure(c, d, beta);
181     }
182 
183     // Configure activation
184     const ActivationLayerInfo &activation = gemm_info.activation_info();
185     if(_run_activation)
186     {
187         _activation_func.configure(d, nullptr, activation);
188     }
189 }
190 
validate(const ITensorInfo * a,const ITensorInfo * b,const ITensorInfo * c,const ITensorInfo * output,float alpha,float beta,const GEMMInfo & gemm_info)191 Status NEGEMM::validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *output, float alpha, float beta, const GEMMInfo &gemm_info)
192 {
193     ARM_COMPUTE_UNUSED(alpha);
194     const bool is_c_bias = gemm_info.reshape_b_only_on_first_run();
195 
196     ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(a);
197     ARM_COMPUTE_RETURN_ERROR_ON_CPU_BF16_UNSUPPORTED(a);
198     ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(a, 1, DataType::BFLOAT16, DataType::F16, DataType::F32);
199     ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(a, b);
200     ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->dimension(0) != b->dimension(1), "The product AB is defined only if the number of columns in A is equal to the number of rows in B");
201     ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.is_a_reshaped(), "Matrix A already reshaped is not supported");
202     ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.is_b_reshaped(), "Matrix B already reshaped is not supported");
203     if(a->data_type() != DataType::BFLOAT16)
204     {
205         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(a, output);
206     }
207 
208     if(c != nullptr && !is_c_bias)
209     {
210         ARM_COMPUTE_RETURN_ERROR_ON(gemm_info.depth_output_gemm3d() != 0);
211         ARM_COMPUTE_RETURN_ERROR_ON(gemm_info.reinterpret_input_as_3d());
212         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(c, output);
213         ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->dimension(1) != c->dimension(1), "The C matrix must have the same number of rows as the matrix A");
214         ARM_COMPUTE_RETURN_ERROR_ON_MSG(b->dimension(0) != c->dimension(0), "The C matrix must have the same number of columns as the matrix B");
215     }
216 
217     if(output->total_size() != 0)
218     {
219         ARM_COMPUTE_RETURN_ERROR_ON(b->dimension(0) != output->dimension(0));
220         if(gemm_info.depth_output_gemm3d() != 0)
221         {
222             if(gemm_info.reinterpret_input_as_3d())
223             {
224                 ARM_COMPUTE_RETURN_ERROR_ON(a->dimension(1) != output->dimension(1));
225                 ARM_COMPUTE_RETURN_ERROR_ON(a->dimension(2) != output->dimension(2));
226             }
227             else
228             {
229                 ARM_COMPUTE_RETURN_ERROR_ON(a->dimension(1) != output->dimension(1) * output->dimension(2));
230             }
231         }
232         else
233         {
234             ARM_COMPUTE_RETURN_ERROR_ON(a->dimension(1) != output->dimension(1));
235         }
236     }
237 
238     // Check if we need to run the optimized assembly kernel
239     AsmGemmInfo asm_info      = init_assembly_metadata(gemm_info);
240     const bool  run_optimised = bool(NEGEMMAssemblyDispatch::validate(a, b, is_c_bias ? c : nullptr, output, asm_info));
241 
242     if(!run_optimised)
243     {
244         ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.reinterpret_input_as_3d(), "NEGEMM cannot reinterpret the input tensor as 3D");
245         ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.depth_output_gemm3d() != 0, "NEGEMM cannot reinterpret the output tensor as 3D");
246 
247         // Check if the first input tensor is a vector.
248         const bool run_vector_matrix_multiplication = a->dimension(1) < 2;
249         // Check if we need to reshape the matrix A and matrix B
250         const bool run_interleave_transpose = !run_vector_matrix_multiplication && !(gemm_info.reshape_b_only_on_first_run());
251 
252         // Arguments used by GEMMReshapeInfo
253         // If we pass the matrix A and matrix B reshaped to NEGEMMMatrixMultiplyKernel, we need to pass m, n, k, mult_transpose1xW_width and mult_interleave4x4_height to NEGEMMReshapeInfo
254         // in order to know how the matrices have been reshaped
255         const int m                         = a->dimension(1);
256         const int n                         = b->dimension(0);
257         const int k                         = a->dimension(0);
258         int       mult_transpose1xW_width   = 1;
259         int       mult_interleave4x4_height = 1;
260 
261         const GEMMReshapeInfo reshape_info = GEMMReshapeInfo(m, n, k, mult_transpose1xW_width, mult_interleave4x4_height, gemm_info.depth_output_gemm3d());
262 
263         const ITensorInfo *matrix_a_info = a;
264         const ITensorInfo *matrix_b_info = b;
265 
266         TensorInfo tmp_a_info{};
267         TensorInfo tmp_b_info{};
268         TensorInfo tmp_output_info = *output->clone();
269 
270         if(run_interleave_transpose)
271         {
272             matrix_a_info = &tmp_a_info;
273             matrix_b_info = &tmp_b_info;
274 
275             // Validate interleave kernel
276             auto_init_if_empty(tmp_a_info, a->clone()->set_tensor_shape(compute_interleaved_shape(*a, mult_interleave4x4_height, gemm_info.reinterpret_input_as_3d())));
277             ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMInterleave4x4Kernel::validate(a, &tmp_a_info));
278 
279             // Validate transpose kernel
280             auto_init_if_empty(tmp_b_info, b->clone()->set_tensor_shape(compute_transpose1xW_with_element_size_shape(*b, mult_transpose1xW_width)));
281             ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMTranspose1xWKernel::validate(b, &tmp_b_info));
282         }
283 
284         // Validate matrix multiply
285         auto_init_if_empty(tmp_output_info, matrix_a_info->clone()->set_tensor_shape(compute_mm_shape(*matrix_a_info, *matrix_b_info, run_interleave_transpose, reshape_info)));
286         ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMMatrixMultiplyKernel::validate(matrix_a_info, matrix_b_info, &tmp_output_info, alpha, run_interleave_transpose, reshape_info));
287 
288         if(c != nullptr && gemm_info.reshape_b_only_on_first_run())
289         {
290             ARM_COMPUTE_RETURN_ON_ERROR(NEArithmeticAddition::validate(&tmp_output_info, c, output, ConvertPolicy::SATURATE));
291         }
292     }
293 
294     // Validate matrix addition kernel
295     if(beta != 0 && c != nullptr && !is_c_bias)
296     {
297         ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMMatrixAdditionKernel::validate(c, output, beta));
298     }
299 
300     // Validate activation
301     const ActivationLayerInfo &activation = gemm_info.activation_info();
302     if(activation.enabled())
303     {
304         ARM_COMPUTE_RETURN_ON_ERROR(NEActivationLayer::validate(output, nullptr, activation));
305     }
306 
307     return Status{};
308 }
309 
run()310 void NEGEMM::run()
311 {
312     prepare();
313 
314     MemoryGroupResourceScope scope_mg(_memory_group);
315 
316     if(_asm_glue.is_configured())
317     {
318         _asm_glue.run();
319         if(_run_alpha_scale)
320         {
321             _alpha_scale_func.run();
322         }
323     }
324     else
325     {
326         if(!_run_vector_matrix_multiplication)
327         {
328             // Run interleave kernel
329             NEScheduler::get().schedule(_interleave_kernel.get(), Window::DimY);
330 
331             if(!_reshape_b_only_on_first_run)
332             {
333                 // Run transpose kernel
334                 NEScheduler::get().schedule(_transpose_kernel.get(), Window::DimY);
335             }
336         }
337 
338         NEScheduler::get().schedule(_mm_kernel.get(), _run_vector_matrix_multiplication ? Window::DimX : Window::DimY);
339 
340         // Run bias addition kernel
341         if(_run_bias_addition)
342         {
343             _add_bias.run();
344         }
345     }
346 
347     // Run matrix addition kernel
348     if(_run_addition)
349     {
350         NEScheduler::get().schedule(_ma_kernel.get(), Window::DimY);
351     }
352 
353     // Run activation function
354     if(_run_activation)
355     {
356         _activation_func.run();
357     }
358 }
359 
prepare()360 void NEGEMM::prepare()
361 {
362     if(!_is_prepared)
363     {
364         const bool original_b_managed_by_weights_manager = _weights_manager && _weights_manager->are_weights_managed(_original_b);
365         if(_asm_glue.is_configured())
366         {
367             if(!original_b_managed_by_weights_manager)
368             {
369                 ARM_COMPUTE_ERROR_ON(!_original_b->is_used());
370             }
371 
372             _asm_glue.prepare();
373             if(!original_b_managed_by_weights_manager)
374             {
375                 _original_b->mark_as_unused();
376             }
377         }
378         else if(_reshape_b_only_on_first_run && !_run_vector_matrix_multiplication && !_asm_glue.is_configured())
379         {
380             if(!original_b_managed_by_weights_manager)
381             {
382                 ARM_COMPUTE_ERROR_ON(!_original_b->is_used());
383             }
384 
385             _tmp_b.allocator()->allocate();
386             NEScheduler::get().schedule(_transpose_kernel.get(), Window::DimY);
387             if(!original_b_managed_by_weights_manager)
388             {
389                 _original_b->mark_as_unused();
390             }
391         }
392 
393         _is_prepared = true;
394     }
395 }
396 } // namespace arm_compute
397