1 /*
2 * Copyright (c) 2019-2021 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24 #ifndef ARM_COMPUTE_CL /* Needed by Utils.cpp to handle OpenCL exceptions properly */
25 #error "This example needs to be built with -DARM_COMPUTE_CL"
26 #endif /* ARM_COMPUTE_CL */
27
28 #include "arm_compute/core/Helpers.h"
29 #include "arm_compute/core/KernelDescriptors.h"
30 #include "arm_compute/core/Types.h"
31 #include "arm_compute/core/utils/misc/ShapeCalculator.h"
32 #include "arm_compute/runtime/CL/CLScheduler.h"
33 #include "arm_compute/runtime/CL/CLTuner.h"
34 #include "examples/gemm_tuner/CommonGemmExampleOptions.h"
35 #include "examples/gemm_tuner/GemmTunerHelpers.h"
36 #include "src/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedKernel.h"
37 #include "src/gpu/cl/kernels/ClGemmReshapeLhsMatrixKernel.h"
38 #include "tests/CL/Helper.h"
39 #include "utils/Utils.h"
40 #include "utils/command_line/CommandLineOptions.h"
41 #include "utils/command_line/CommandLineParser.h"
42
43 #include <cstdlib>
44
45 using namespace arm_compute;
46 using namespace arm_compute::opencl::kernels;
47 using namespace utils;
48 using namespace arm_compute::misc::shape_calculator;
49 using namespace gemm_tuner;
50
51 namespace
52 {
53 /** Structure holding all tunable gemm configs specific to this example/strategy */
54 struct GemmConfigs
55 {
56 size_t m0{ 4 }; /**< Number of rows processed by the matrix multiplication */
57 size_t n0{ 4 }; /**< Number of columns processed by the matrix multiplication */
58 size_t k0{ 4 }; /**< Number of partial accumulations performed by the matrix multiplication */
59 size_t v0{ 1 }; /**< Number of vertical blocks of size (m0xk0) stored on the same output row */
60 size_t h0{ 1 }; /**< Number of horizontal blocks of size (k0xn0) stored on the same output row */
61 bool interleave_lhs{ true }; /**< Interleave lhs matrix */
62 bool transpose_lhs{ true }; /**< Transpose lhs matrix. */
63 bool interleave_rhs{ true }; /**< Interleave rhs matrix */
64 bool transpose_rhs{ true }; /**< Transpose rhs matrix. */
65 bool export_to_cl_image_rhs{ true }; /**< Export rhs matrix to cl_image. */
66 };
67
68 /** Formatted output of the GemmConfigs type
69 *
70 * @param[out] os Output stream.
71 * @param[in] configs Tunable configurations to output
72 *
73 * @return Modified output stream.
74 */
operator <<(::std::ostream & os,const GemmConfigs & configs)75 ::std::ostream &operator<<(::std::ostream &os, const GemmConfigs &configs)
76 {
77 std::string false_str = std::string("false");
78 std::string true_str = std::string("true");
79
80 os << "m0 : " << configs.m0 << std::endl;
81 os << "n0 : " << configs.n0 << std::endl;
82 os << "k0 : " << configs.k0 << std::endl;
83 os << "v0 : " << configs.v0 << std::endl;
84 os << "h0 : " << configs.h0 << std::endl;
85 os << "interleave_lhs : " << (configs.interleave_lhs ? true_str : false_str) << std::endl;
86 os << "transpose_lhs : " << (configs.transpose_lhs ? true_str : false_str) << std::endl;
87 os << "interleave_rhs : " << (configs.interleave_rhs ? true_str : false_str) << std::endl;
88 os << "transpose_rhs : " << (configs.transpose_rhs ? true_str : false_str) << std::endl;
89 os << "export_to_cl_image_rhs : " << (configs.export_to_cl_image_rhs ? true_str : false_str) << std::endl;
90 return os;
91 }
92
93 /** Command line options for gemm configs */
94 class GemmConfigOptions
95 {
96 public:
97 /** Constructor
98 *
99 * @param[in,out] parser A parser on which "parse()" hasn't been called yet.
100 */
GemmConfigOptions(CommandLineParser & parser)101 GemmConfigOptions(CommandLineParser &parser)
102 : m0(parser.add_positional_option<SimpleOption<size_t>>("m0", 4)),
103 n0(parser.add_positional_option<SimpleOption<size_t>>("n0", 4)),
104 k0(parser.add_positional_option<SimpleOption<size_t>>("k0", 4)),
105 v0(parser.add_positional_option<SimpleOption<size_t>>("v0", 1)),
106 h0(parser.add_positional_option<SimpleOption<size_t>>("h0", 1)),
107 interleave_lhs(parser.add_positional_option<SimpleOption<size_t>>("interleave_lhs", 1)),
108 interleave_rhs(parser.add_positional_option<SimpleOption<size_t>>("interleave_rhs", 1)),
109 transpose_rhs(parser.add_positional_option<SimpleOption<size_t>>("transpose_rhs", 1)),
110 export_to_cl_image_rhs(parser.add_positional_option<SimpleOption<size_t>>("export_to_cl_image_rhs", 1))
111 {
112 m0->set_help("Number of rows processed by the matrix multiplication");
113 n0->set_help("Number of columns processed by the matrix multiplication");
114 k0->set_help("Number of partial accumulations performed by the matrix multiplication");
115 v0->set_help("Number of vertical blocks of size (m0xk0) stored on the same output row");
116 h0->set_help("Number of horizontal blocks of size (k0xn0) stored on the same output row");
117 interleave_lhs->set_help("Interleave lhs matrix (1) / Do not interleave lhs matrix (0)");
118 interleave_rhs->set_help("Interleave rhs matrix (1) / Do not interleave rhs matrix (0)");
119 // FIXME: Currently we only support 2 variants of the gemm reshaped kernels in which transpose_lhs and
120 // transpose_rhs are the opposites of each other. In the future we may extend the kernels to include the other
121 // 2 variants (both transposed and none transposed)
122 transpose_rhs->set_help("Transpose rhs matrix but not lhs matrix (1) / Do not transpose rhs matrix but do transpose lhs matrix (0)");
123 export_to_cl_image_rhs->set_help("Export rhs matrix to cl_image (1) / Do not export rhs matrix to cl_image (0)");
124 }
125 /** Prevent instances of this class from being copied (As this class contains pointers) */
126 GemmConfigOptions(const GemmConfigOptions &) = delete;
127 /** Prevent instances of this class from being copied (As this class contains pointers) */
128 GemmConfigOptions &operator=(const GemmConfigOptions &) = delete;
129 /** Allow instances of this class to be moved */
130 GemmConfigOptions(GemmConfigOptions &&) = default;
131 /** Allow instances of this class to be moved */
132 GemmConfigOptions &operator=(GemmConfigOptions &&) = default;
133 /** Default destructor */
134 ~GemmConfigOptions() = default;
135
136 SimpleOption<size_t> *m0; /**< Number of rows processed by the matrix multiplication option */
137 SimpleOption<size_t> *n0; /**< Number of columns processed by the matrix multiplication option */
138 SimpleOption<size_t> *k0; /**< Number of partial accumulations performed by the matrix multiplication option */
139 SimpleOption<size_t> *v0; /**< Number of vertical blocks of size (m0xk0) stored on the same output row option */
140 SimpleOption<size_t> *h0; /**< Number of horizontal blocks of size (k0xn0) stored on the same output row option */
141 SimpleOption<size_t> *interleave_lhs; /**< Interleave lhs matrix option (1 enable; 0 disable) */
142 SimpleOption<size_t> *interleave_rhs; /**< Interleave rhs matrix option (1 enable; 0 disable) */
143 // FIXME: Currently we only support 2 variants of the gemm reshaped kernels in which transpose_lhs and
144 // transpose_rhs are the opposites of each other. In the future we may extend the kernels to include the other
145 // 2 variants (both transposed and none transposed)
146 SimpleOption<size_t> *transpose_rhs; /**< Transpose rhs matrix option (1 enable; 0 disable). Also set the lhs matrix transpose option to the opposite. */
147 SimpleOption<size_t> *export_to_cl_image_rhs; /**< Export rhs matrix to cl_image.*/
148 };
149
150 /** Consumes the gemm configuration options and creates a structure containing all information
151 *
152 * @param[in] options Options to consume
153 *
154 * @return Structure containing the gemm configurations
155 */
consume_gemm_configs(const GemmConfigOptions & options)156 GemmConfigs consume_gemm_configs(const GemmConfigOptions &options)
157 {
158 GemmConfigs configs;
159 configs.m0 = options.m0->value();
160 configs.n0 = options.n0->value();
161 configs.k0 = options.k0->value();
162 configs.v0 = options.v0->value();
163 configs.h0 = options.h0->value();
164 configs.interleave_lhs = options.interleave_lhs->value() != 0;
165 // FIXME: Currently we only support 2 variants of the gemm reshaped kernels in which transpose_lhs and
166 // transpose_rhs are the opposites of each other. In the future we may extend the kernels to include the other
167 // 2 variants (both transposed and none transposed)
168 configs.transpose_lhs = options.transpose_rhs->value() == 0;
169 configs.interleave_rhs = options.interleave_rhs->value() != 0;
170 configs.transpose_rhs = options.transpose_rhs->value() != 0;
171 configs.export_to_cl_image_rhs = options.export_to_cl_image_rhs->value() != 0;
172 return configs;
173 }
174
175 } // namespace
176
177 // Create function for ClGemmReshapeLhsMatrixKernel
178 using CLGEMMReshapeLHSMatrix = test::CLSynthetizeOperator<ClGemmReshapeLhsMatrixKernel>;
179 // Create function for ClGemmMatrixMultiplyReshapedKernel
180 using CLGEMMMatrixMultiplyReshaped = test::CLSynthetizeOperator<ClGemmMatrixMultiplyReshapedKernel>;
181
182 class CLGEMMMatrixMultiplyReshapedExample : public Example
183 {
184 public:
do_setup(int argc,char ** argv)185 bool do_setup(int argc, char **argv) override
186 {
187 // Default parameters
188 const float alpha = 1.0f;
189 const float beta = 0.0f;
190 const ActivationLayerInfo act_info = ActivationLayerInfo();
191 CommonGemmExampleParams params;
192 GemmConfigs configs;
193
194 // Set up command line parser and options
195 CommandLineParser parser;
196 CommonGemmExampleOptions param_options(parser);
197 GemmConfigOptions config_options(parser);
198
199 // Parse command line options
200 parser.parse(argc, argv);
201 if(param_options.help->is_set() && param_options.help->value())
202 {
203 // Print help message
204 parser.print_help(argv[0]);
205 return false;
206 }
207 if(!parser.validate())
208 {
209 // Invalid arguments. Use default parameters and configs
210 std::cerr << "Invalid arguments." << std::endl;
211 parser.print_help(argv[0]);
212 std::cerr << "Falling back to default parameters and configs" << std::endl;
213 }
214 else
215 {
216 // Get parameters and configs from command-line options
217 params = consume_common_gemm_example_parameters(param_options);
218 configs = consume_gemm_configs(config_options);
219 }
220
221 // Print gemm parameters and configurations
222 std::cout << "Gemm parameters:" << std::endl;
223 std::cout << params << std::endl;
224 std::cout << "Gemm configurations:" << std::endl;
225 std::cout << configs << std::endl;
226
227 tuner.set_tuner_mode(params.tuner_mode);
228
229 CLScheduler::get().default_init(&tuner);
230
231 lhs.allocator()->init(TensorInfo(TensorShape(params.K, params.M, params.B), 1, params.data_type));
232 rhs.allocator()->init(TensorInfo(TensorShape(params.N, params.K, params.B), 1, params.data_type));
233 bias.allocator()->init(TensorInfo(TensorShape(params.N, 1, params.B), 1, params.data_type));
234
235 GEMMLHSMatrixInfo lhs_info;
236 lhs_info.m0 = configs.m0;
237 lhs_info.k0 = configs.k0;
238 lhs_info.v0 = configs.v0;
239 lhs_info.interleave = configs.interleave_lhs;
240 lhs_info.transpose = configs.transpose_lhs;
241
242 GEMMRHSMatrixInfo rhs_info;
243 rhs_info.n0 = configs.n0;
244 rhs_info.k0 = configs.k0;
245 rhs_info.h0 = configs.h0;
246 rhs_info.interleave = configs.interleave_rhs;
247 rhs_info.transpose = configs.transpose_rhs;
248 rhs_info.export_to_cl_image = configs.export_to_cl_image_rhs;
249
250 GEMMKernelInfo kernel_info;
251 kernel_info.m = params.M;
252 kernel_info.n = params.N;
253 kernel_info.k = params.K;
254 kernel_info.depth_output_gemm3d = 0;
255 kernel_info.reinterpret_input_as_3d = false;
256 kernel_info.broadcast_bias = true;
257 kernel_info.activation_info = act_info;
258
259 // Initialise lhs_reshaped tensor info
260 lhs_reshaped.allocator()->init(TensorInfo(compute_lhs_reshaped_shape(*lhs.info(), lhs_info), 1, params.data_type));
261
262 // Initialise rhs_reshaped tensor info
263 rhs_reshaped.allocator()->init(TensorInfo(compute_rhs_reshaped_shape(*rhs.info(), rhs_info), 1, params.data_type));
264
265 if(rhs_info.export_to_cl_image)
266 {
267 if(!examples::gemm_tuner_helpers::update_padding_for_cl_image(rhs_reshaped.info()))
268 {
269 std::cerr << "cl_image is not supported on the device, disable export_to_cl_image" << std::endl;
270 return false;
271 }
272 }
273
274 // Validate argments
275 Status status{};
276 status = reshape_lhs.validate(lhs.info(), lhs_reshaped.info(), lhs_info, kernel_info.reinterpret_input_as_3d);
277 if(!status)
278 {
279 // Unsupported arguments
280 std::cerr << "Unsupported arguments." << std::endl;
281 std::cerr << "Check documentation for supported/unsupported combinations" << std::endl;
282 return false;
283 }
284
285 status = gemm.validate(lhs_reshaped.info(), rhs_reshaped.info(), bias.info(), dst.info(), alpha, beta, lhs_info, rhs_info, kernel_info);
286 if(!status)
287 {
288 // Unsupported arguments
289 std::cerr << "Unsupported arguments." << std::endl;
290 std::cerr << "Check documentation for supported/unsupported combinations" << std::endl;
291 return false;
292 }
293
294 // Configure reshape lhs function
295 reshape_lhs.configure(lhs.info(), lhs_reshaped.info(), lhs_info);
296
297 // Configure function
298 gemm.configure(lhs_reshaped.info(), rhs_reshaped.info(), bias.info(), dst.info(), alpha, beta, lhs_info, rhs_info, kernel_info);
299
300 // Allocate tensors
301 lhs.allocator()->allocate();
302 rhs.allocator()->allocate();
303 lhs_reshaped.allocator()->allocate();
304 rhs_reshaped.allocator()->allocate();
305 bias.allocator()->allocate();
306 dst.allocator()->allocate();
307
308 return true;
309 }
do_run()310 void do_run() override
311 {
312 // Execute the functions
313 ITensorPack reshape_lsh_pack({ { ACL_SRC, &lhs }, { ACL_DST, &lhs_reshaped } });
314 reshape_lhs.run(reshape_lsh_pack);
315
316 ITensorPack gemm_pack({ { ACL_SRC_0, &lhs_reshaped },
317 { ACL_SRC_1, &rhs_reshaped },
318 { ACL_SRC_2, &bias },
319 { ACL_DST, &dst }
320 });
321 gemm.run(gemm_pack);
322
323 // Make sure all the OpenCL jobs are done executing:
324 CLScheduler::get().sync();
325 }
326
do_teardown()327 void do_teardown() override
328 {
329 }
330
331 private:
332 CLTensor lhs{};
333 CLTensor rhs{};
334 CLTensor lhs_reshaped{};
335 CLTensor rhs_reshaped{};
336 CLTensor bias{};
337 CLTensor dst{};
338 CLTuner tuner{};
339 CLGEMMReshapeLHSMatrix reshape_lhs{};
340 CLGEMMMatrixMultiplyReshaped gemm{};
341 };
342
343 /** Main program for gemm reshaped test
344 *
345 * @param[in] argc Number of arguments
346 * @param[in] argv Arguments ( [optional] M, [optional] N, [optional] K, [optional] B, [optional] m0, [optional] n0, [optional] k0, [optional] v0, [optional] h0, [optional] interleave_lhs, [optional] interleave_rhs, [optional] transpose_rhs, [optional] export_to_cl_image )
347 */
main(int argc,char ** argv)348 int main(int argc, char **argv)
349 {
350 return run_example<CLGEMMMatrixMultiplyReshapedExample>(argc, argv);
351 }
352