• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2017-2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef ARM_COMPUTE_CL /* Needed by Utils.cpp to handle OpenCL exceptions properly */
25 #error "This example needs to be built with -DARM_COMPUTE_CL"
26 #endif /* ARM_COMPUTE_CL */
27 
28 #include "arm_compute/core/Types.h"
29 #include "arm_compute/core/Utils.h"
30 #include "arm_compute/core/utils/quantization/AsymmHelpers.h"
31 #include "arm_compute/runtime/CL/CLScheduler.h"
32 #include "arm_compute/runtime/CL/functions/CLGEMM.h"
33 #include "arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h"
34 #include "arm_compute/runtime/CL/functions/CLGEMMLowpOutputStage.h"
35 #include "src/core/CL/kernels/CLDepthConvertLayerKernel.h"
36 #include "src/core/CL/kernels/CLFillBorderKernel.h"
37 #include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.h"
38 #include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.h"
39 #include "src/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.h"
40 #include "src/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.h"
41 #include "src/core/CL/kernels/CLGEMMLowpReductionKernel.h"
42 #include "src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h"
43 #include "src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.h"
44 #include "src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedOnlyRHSKernel.h"
45 #include "src/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.h"
46 #include "src/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.h"
47 #include "src/core/CL/kernels/CLIm2ColKernel.h"
48 #include "src/core/CL/kernels/CLWeightsReshapeKernel.h"
49 #include "tests/AssetsLibrary.h"
50 #include "tests/CL/CLAccessor.h"
51 #include "tests/Globals.h"
52 #include "tests/IAccessor.h"
53 #include "tests/SimpleTensor.h"
54 #include "tests/validation/Validation.h"
55 #include "tests/validation/reference/GEMM.h"
56 #include "tests/validation/reference/GEMMLowp.h"
57 
58 #include "utils/TypePrinter.h"
59 #include "utils/Utils.h"
60 #include "utils/command_line/CommandLineOptions.h"
61 #include "utils/command_line/CommandLineParser.h"
62 
63 #include "ValidateExample.h"
64 
65 #include <cstdlib>
66 
67 using namespace arm_compute;
68 using namespace utils;
69 using namespace arm_compute::test;
70 using namespace arm_compute::test::validation;
71 
72 constexpr float                     abs_tolerance_f32(0.0001f); /**< F32 Absolute tolerance value for comparing reference's output against implementation's output for
73                                                                * floating point data types in case using relative tolerance fails because of small values */
74 RelativeTolerance<float>            tolerance_f32(0.001f);      /**< F32 Tolerance value for comparing reference's output against implementation's output for floating point data types */
75 RelativeTolerance<half_float::half> tolerance_f16(half(0.2));   /**< F16 Tolerance value for comparing reference's output against implementation's output for floating point data types */
76 constexpr float                     tolerance_num_f16 = 0.02f;  /**< F16 Tolerance number */
77 
78 namespace
79 {
80 class GEMMCommandLineOptions final
81 {
82 public:
GEMMCommandLineOptions(CommandLineParser & parser)83     explicit GEMMCommandLineOptions(CommandLineParser &parser) noexcept
84         : help(parser.add_option<ToggleOption>("help")),
85           add_bias(parser.add_option<ToggleOption>("add_bias")),
86           M(parser.add_option<SimpleOption<int>>("m", 7)),
87           N(parser.add_option<SimpleOption<int>>("n", 3)),
88           K(parser.add_option<SimpleOption<int>>("k", 5)),
89           B(parser.add_option<SimpleOption<int>>("b", 1)),
90           alpha(parser.add_option<SimpleOption<float>>("alpha", 1.f)),
91           beta(parser.add_option<SimpleOption<float>>("beta", 0.f)),
92           offset_src0(parser.add_option<SimpleOption<int>>("offset_i0", 10)),
93           offset_src1(parser.add_option<SimpleOption<int>>("offset_i1", 10)),
94           offset_dst(parser.add_option<SimpleOption<int>>("offset_o", 10)),
95           scale_src0(parser.add_option<SimpleOption<float>>("scale_i0", 1.f / 255)),
96           scale_src1(parser.add_option<SimpleOption<float>>("scale_i1", 1.f / 255)),
97           scale_dst(parser.add_option<SimpleOption<float>>("scale_o", 1.f / 255)),
98           data_type()
99     {
100         // Setup data type
101         const std::set<arm_compute::DataType> supported_data_types
102         {
103             DataType::F16,
104             DataType::F32,
105             DataType::QASYMM8,
106         };
107         data_type = parser.add_option<EnumOption<DataType>>("type", supported_data_types, DataType::F32);
108 
109         // Setup help strings
110         help->set_help("Show this help message");
111         add_bias->set_help("Add bias to the GEMM. Used when running in QASYMM8");
112         M->set_help("M value");
113         N->set_help("N value");
114         K->set_help("K value");
115         B->set_help("B value - number of batches");
116         alpha->set_help("Alpha value");
117         beta->set_help("Beta value");
118         offset_src0->set_help("Offset of first input. Used when running in QASYMM8");
119         offset_src1->set_help("Offset of second input. Used when running in QASYMM8");
120         offset_dst->set_help("Offset of output. Used when running in QASYMM8");
121         scale_src0->set_help("Scale of first input. Used when running in QASYMM8");
122         scale_src1->set_help("Scale of second input. Used when running in QASYMM8");
123         scale_dst->set_help("Scale of output. Used when running in QASYMM8");
124         data_type->set_help("Data type to use");
125     }
126     /** Prevent instances of this class from being copied (As this class contains pointers) */
127     GEMMCommandLineOptions(const GEMMCommandLineOptions &) = delete;
128     /** Prevent instances of this class from being copied (As this class contains pointers) */
129     GEMMCommandLineOptions &operator=(const GEMMCommandLineOptions &) = delete;
130     /** Allow instances of this class to be moved */
131     GEMMCommandLineOptions(GEMMCommandLineOptions &&) noexcept(true) = default;
132     /** Allow instances of this class to be moved */
133     GEMMCommandLineOptions &operator=(GEMMCommandLineOptions &&) noexcept(true) = default;
134     /** Default destructor */
135     ~GEMMCommandLineOptions() = default;
136 
137 public:
138     ToggleOption                      *help;
139     ToggleOption                      *add_bias;
140     SimpleOption<int>                 *M;
141     SimpleOption<int>                 *N;
142     SimpleOption<int>                 *K;
143     SimpleOption<int>                 *B;
144     SimpleOption<float>               *alpha;
145     SimpleOption<float>               *beta;
146     SimpleOption<int>                 *offset_src0;
147     SimpleOption<int>                 *offset_src1;
148     SimpleOption<int>                 *offset_dst;
149     SimpleOption<float>               *scale_src0;
150     SimpleOption<float>               *scale_src1;
151     SimpleOption<float>               *scale_dst;
152     EnumOption<arm_compute::DataType> *data_type;
153 };
154 } // namespace
155 
156 class CLGEMMValidateExample : public ValidateExample
157 {
158 public:
do_setup(int argc,char ** argv)159     bool do_setup(int argc, char **argv) override
160     {
161         CLScheduler::get().default_init();
162 
163         // Parse options
164         CommandLineParser      parser;
165         GEMMCommandLineOptions gemm_options(parser);
166         parser.parse(argc, argv);
167 
168         // Print help
169         const bool print_help = gemm_options.help->is_set() ? gemm_options.help->value() : false;
170         if(print_help)
171         {
172             parser.print_help(argv[0]);
173             return false;
174         }
175 
176         // Consume parameters
177         consume_params(gemm_options);
178         print_parameters_internal();
179 
180         const bool is_quantized = is_data_type_quantized(data_type);
181 
182         // Calculate re-quantization parameters
183         if(is_quantized)
184         {
185             float multiplier = scale_src0 * scale_src1 / scale_dst;
186             quantization::calculate_quantized_multiplier(multiplier, &dst_multiplier, &dst_shift);
187         }
188 
189         // Initialize GEMM inputs/outputs
190         src0.allocator()->init(TensorInfo(TensorShape(K, M, B), 1, data_type));
191         src1.allocator()->init(TensorInfo(TensorShape(N, K, B), 1, data_type));
192         src2.allocator()->init(TensorInfo(TensorShape(N, M, B), 1, data_type));
193         init_sgemm_output(dst, src0, src1, data_type);
194 
195         // Configure function
196         if(is_quantized)
197         {
198             src0.info()->set_quantization_info(QuantizationInfo(scale_src0, offset_src0));
199             src1.info()->set_quantization_info(QuantizationInfo(scale_src1, offset_src1));
200             dst.info()->set_quantization_info(QuantizationInfo(scale_dst, offset_dst));
201             biases.allocator()->init(TensorInfo(TensorShape(N), 1, DataType::S32));
202             init_sgemm_output(tmp_dst, src0, src1, DataType::S32);
203 
204             // Configure GEMMlowp matrix multiply function
205             mm_gemmlowp.configure(&src0, &src1, nullptr, &tmp_dst);
206 
207             // Configure GEMMlowp output stage
208             mm_gemmlowp_output_stage.configure(&tmp_dst, add_bias ? &biases : nullptr, &dst, dst_multiplier, dst_shift, offset_dst);
209             tmp_dst.allocator()->allocate();
210             biases.allocator()->allocate();
211             fill(CLAccessor(biases), 3);
212         }
213         else
214         {
215             // Configure matrix multiply function
216             mm_gemm.configure(&src0, &src1, &src2, &dst, alpha, beta);
217         }
218 
219         // Allocate all the tensors
220         src0.allocator()->allocate();
221         src1.allocator()->allocate();
222         dst.allocator()->allocate();
223         src2.allocator()->allocate();
224 
225         fill(CLAccessor(src0), 0);
226         fill(CLAccessor(src1), 1);
227         fill(CLAccessor(src2), 2);
228 
229         return true;
230     }
231 
print_parameters_internal()232     void print_parameters_internal()
233     {
234         std::cout << "Datatype : " << string_from_data_type(data_type) << "\n";
235         std::cout << "M : " << support::cpp11::to_string(M) << "\n";
236         std::cout << "N : " << support::cpp11::to_string(N) << "\n";
237         std::cout << "K : " << support::cpp11::to_string(K) << "\n";
238         std::cout << "B : " << support::cpp11::to_string(B) << "\n";
239         if(data_type == DataType::QASYMM8)
240         {
241             std::cout << "Scale_Src0 : " << support::cpp11::to_string(scale_src0) << "\n";
242             std::cout << "Offset_Src0 : " << support::cpp11::to_string(offset_src0) << "\n";
243             std::cout << "Scale_Scr1 : " << support::cpp11::to_string(scale_src1) << "\n";
244             std::cout << "Offset_Src1 : " << support::cpp11::to_string(offset_src1) << "\n";
245             std::cout << "Scale_Dst : " << support::cpp11::to_string(scale_dst) << "\n";
246             std::cout << "Offset_Dst : " << support::cpp11::to_string(offset_dst) << "\n";
247             std::cout << "Bias : " << support::cpp11::to_string(add_bias) << "\n";
248         }
249         else
250         {
251             std::cout << "Alpha : " << support::cpp11::to_string(alpha) << "\n";
252             std::cout << "Beta : " << support::cpp11::to_string(beta) << "\n";
253         }
254     }
255 
do_validate()256     void do_validate() override
257     {
258         switch(data_type)
259         {
260             case DataType::F16:
261             {
262                 SimpleTensor<half> ref_src0 = { TensorShape(K, M, B), data_type, 1 };
263                 SimpleTensor<half> ref_src1 = { TensorShape(N, K, B), data_type, 1 };
264                 SimpleTensor<half> ref_src2 = { TensorShape(N, M, B), data_type, 1 };
265 
266                 fill(ref_src0, 0);
267                 fill(ref_src1, 1);
268                 fill(ref_src2, 2);
269 
270                 SimpleTensor<half> ref_dst = reference::gemm<half>(ref_src0, ref_src1, ref_src2, alpha, beta);
271                 validate(CLAccessor(dst), ref_dst, tolerance_f16, tolerance_num_f16);
272                 break;
273             }
274             case DataType::F32:
275             {
276                 SimpleTensor<float> ref_src0 = { TensorShape(K, M, B), data_type, 1 };
277                 SimpleTensor<float> ref_src1 = { TensorShape(N, K, B), data_type, 1 };
278                 SimpleTensor<float> ref_src2 = { TensorShape(N, M, B), data_type, 1 };
279 
280                 fill(ref_src0, 0);
281                 fill(ref_src1, 1);
282                 fill(ref_src2, 2);
283 
284                 SimpleTensor<float> ref_dst = reference::gemm<float>(ref_src0, ref_src1, ref_src2, alpha, beta);
285                 validate(CLAccessor(dst), ref_dst, tolerance_f32, 0.f, abs_tolerance_f32);
286                 break;
287             }
288             case DataType::QASYMM8:
289             {
290                 SimpleTensor<uint8_t> ref_src0{ TensorShape(K, M, B), data_type, 1 };
291                 SimpleTensor<uint8_t> ref_src1{ TensorShape(N, K, B), data_type, 1 };
292                 SimpleTensor<uint8_t> ref_dst;
293 
294                 // Fill reference
295                 fill(ref_src0, 0);
296                 fill(ref_src1, 1);
297 
298                 SimpleTensor<int32_t> ref_tmp_dst = reference::gemmlowp_matrix_multiply_core<int32_t, uint8_t>(ref_src0, ref_src1, TensorShape(N, M, B), offset_src0, offset_src1);
299 
300                 const std::vector<int32_t> dst_multiplier_vec = { dst_multiplier };
301                 const std::vector<int32_t> dst_shift_vec      = { dst_shift };
302 
303                 if(add_bias)
304                 {
305                     SimpleTensor<int32_t> biases{ TensorShape(N), DataType::S32, 1 };
306                     // Fill bias
307                     fill(biases, 3);
308                     ref_dst = reference::gemmlowp_quantize_down_scale_by_fixedpoint<int32_t, uint8_t>(ref_tmp_dst, biases, dst_multiplier_vec, dst_shift_vec, offset_dst);
309                 }
310                 else
311                 {
312                     ref_dst = reference::gemmlowp_quantize_down_scale_by_fixedpoint<int32_t, uint8_t>(ref_tmp_dst, dst_multiplier_vec, dst_shift_vec, offset_dst);
313                 }
314                 validate(CLAccessor(dst), ref_dst);
315                 break;
316             }
317             default:
318                 break;
319         }
320     }
do_run()321     void do_run() override
322     {
323         // Execute the function
324         if(data_type == DataType::QASYMM8)
325         {
326             // Run gemmlowp
327             mm_gemmlowp.run();
328             // Run output stage
329             mm_gemmlowp_output_stage.run();
330         }
331         else
332         {
333             // Run gemm
334             mm_gemm.run();
335         }
336 
337         // Make sure all the OpenCL jobs are done executing:
338         CLScheduler::get().sync();
339     }
340 
341 private:
342     template <typename U>
fill(U && tensor,int i)343     void fill(U &&tensor, int i)
344     {
345         switch(tensor.data_type())
346         {
347             case DataType::F16:
348             case DataType::F32:
349             {
350                 std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
351                 library->fill(tensor, distribution, i);
352                 break;
353             }
354             case DataType::S32:
355             case DataType::QASYMM8:
356             {
357                 std::uniform_int_distribution<> distribution(-6000, 6000);
358                 library->fill(tensor, distribution, i);
359                 break;
360             }
361             default:
362                 library->fill_tensor_uniform(tensor, i);
363         }
364     }
365 
consume_params(const GEMMCommandLineOptions & opts)366     void consume_params(const GEMMCommandLineOptions &opts)
367     {
368         ARM_COMPUTE_ERROR_ON(opts.M->value() <= 0);
369         ARM_COMPUTE_ERROR_ON(opts.N->value() <= 0);
370         ARM_COMPUTE_ERROR_ON(opts.K->value() <= 0);
371         ARM_COMPUTE_ERROR_ON(opts.B->value() <= 0);
372         M           = opts.M->value();
373         N           = opts.N->value();
374         K           = opts.K->value();
375         B           = opts.B->value();
376         alpha       = opts.alpha->value();
377         beta        = opts.beta->value();
378         offset_src0 = opts.offset_src0->value();
379         offset_src1 = opts.offset_src1->value();
380         offset_dst  = opts.offset_dst->value();
381         scale_src0  = opts.scale_src0->value();
382         scale_src1  = opts.scale_src1->value();
383         scale_dst   = opts.scale_dst->value();
384         add_bias    = opts.add_bias->is_set() ? opts.add_bias->value() : true;
385         data_type   = opts.data_type->value();
386     }
387 
388     CLTensor src0{}, src1{}, src2{}, dst{};
389     CLTensor tmp_dst{}, biases{};
390 
391     CLGEMM                                              mm_gemm{};
392     CLGEMMLowpMatrixMultiplyCore                        mm_gemmlowp{};
393     CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint mm_gemmlowp_output_stage{};
394 
395     size_t   M{ 7 }, N{ 3 }, K{ 5 }, B{ 1 };
396     DataType data_type{ DataType::F32 };
397     float    alpha{ 1.0 }, beta{ 0.0 };
398     int      offset_src0{ 10 }, offset_src1{ 10 }, offset_dst{ 10 };
399     float    scale_src0{ 1.0f / 255 }, scale_src1{ 1.0f / 255 }, scale_dst{ 1.0f / 255 };
400     int32_t  dst_multiplier{ 0 }, dst_shift{ 0 };
401     bool     add_bias{ true };
402 };
403 
404 /** Main program for gemm test
405  *
406  * @param[in] argc Number of arguments
407  * @param[in] argv Arguments
408  *
409  */
main(int argc,char ** argv)410 int main(int argc, char **argv)
411 {
412     return utils::run_example<CLGEMMValidateExample>(argc, argv);
413 }
414