1 /*
2 * Copyright (c) 2016-2022 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24 #ifndef ARM_COMPUTE_ICLKERNEL_H
25 #define ARM_COMPUTE_ICLKERNEL_H
26
27 #include "arm_compute/core/CL/CLKernelLibrary.h"
28 #include "arm_compute/core/CL/CLTypes.h"
29 #include "arm_compute/core/CL/OpenCL.h"
30 #include "arm_compute/core/GPUTarget.h"
31 #include "arm_compute/core/IKernel.h"
32 #include "arm_compute/core/Validate.h"
33 #include "arm_compute/core/experimental/Types.h"
34 #include "arm_compute/runtime/CL/CLTuningParams.h"
35
36 #include "src/core/CL/DefaultLWSHeuristics.h"
37
38 #include <string>
39
40 namespace arm_compute
41 {
42 namespace
43 {
is_same_lws(cl::NDRange lws0,cl::NDRange lws1)44 bool is_same_lws(cl::NDRange lws0, cl::NDRange lws1)
45 {
46 if(lws0.dimensions() != lws1.dimensions())
47 {
48 return false;
49 }
50
51 for(size_t i = 0; i < lws0.dimensions(); ++i)
52 {
53 if(lws0.get()[i] != lws1.get()[i])
54 {
55 return false;
56 }
57 }
58
59 return true;
60 }
61 } // namespace
62 template <typename T>
63 class ICLArray;
64 class ICLTensor;
65 class Window;
66 /** Common interface for all the OpenCL kernels */
67 class ICLKernel : public IKernel
68 {
69 private:
70 /** Returns the number of arguments enqueued per array object.
71 *
72 * @return The number of arguments enqueued per array object.
73 */
74 template <unsigned int dimension_size>
num_arguments_per_array()75 constexpr static unsigned int num_arguments_per_array()
76 {
77 return num_arguments_per_tensor<dimension_size>();
78 }
79 /** Returns the number of arguments enqueued per tensor object.
80 *
81 * @return The number of arguments enqueued per tensor object.
82 */
83 template <unsigned int dimension_size>
num_arguments_per_tensor()84 constexpr static unsigned int num_arguments_per_tensor()
85 {
86 return 2 + 2 * dimension_size;
87 }
88
default_lws_tune(const Window & window)89 cl::NDRange default_lws_tune(const Window &window)
90 {
91 return get_default_lws_for_type(_type, gws_from_window(window));
92 }
93
94 using IKernel::configure; //Prevent children from calling IKernel::configure() directly
95 protected:
96 /** Configure the kernel's window and local workgroup size hint.
97 *
98 * @param[in] window The maximum window which will be returned by window()
99 * @param[in] lws_hint Local-Workgroup-Size to use.
100 * @param[in] wbsm_hint (Optional) Workgroup-Batch-Size-Modifier to use.
101 */
102 void configure_internal(const Window &window, cl::NDRange lws_hint, cl_int wbsm_hint = 0)
103 {
104 configure_internal(window, CLTuningParams(lws_hint, wbsm_hint));
105 }
106
107 /** Configure the kernel's window and tuning parameters hints.
108 *
109 * @param[in] window The maximum window which will be returned by window()
110 * @param[in] tuning_params_hint (Optional) Tuning parameters to use.
111 */
112 void configure_internal(const Window &window, CLTuningParams tuning_params_hint = CLTuningParams(CLKernelLibrary::get().default_ndrange(), 0))
113 {
114 _tuning_params_hint = tuning_params_hint;
115
116 if(is_same_lws(_tuning_params_hint.get_lws(), CLKernelLibrary::get().default_ndrange()))
117 {
118 _tuning_params_hint.set_lws(default_lws_tune(window));
119 }
120
121 IKernel::configure(window);
122 }
123
124 public:
125 /** Constructor */
ICLKernel()126 ICLKernel()
127 : _kernel(nullptr), _target(GPUTarget::MIDGARD), _config_id(arm_compute::default_config_id), _max_workgroup_size(0), _type(CLKernelType::UNKNOWN), _tuning_params_hint()
128 {
129 }
130 /** Returns a reference to the OpenCL kernel of this object.
131 *
132 * @return A reference to the OpenCL kernel of this object.
133 */
kernel()134 cl::Kernel &kernel()
135 {
136 return _kernel;
137 }
138 /** Returns the CL kernel type
139 *
140 * @return The CL kernel type
141 */
type()142 CLKernelType type() const
143 {
144 return _type;
145 }
146 /** Add the passed 1D array's parameters to the object's kernel's arguments starting from the index idx.
147 *
148 * @param[in,out] idx Index at which to start adding the array's arguments. Will be incremented by the number of kernel arguments set.
149 * @param[in] array Array to set as an argument of the object's kernel.
150 * @param[in] strides @ref Strides object containing stride of each dimension in bytes.
151 * @param[in] num_dimensions Number of dimensions of the @p array.
152 * @param[in] window Window the kernel will be executed on.
153 */
154 template <typename T>
add_1D_array_argument(unsigned int & idx,const ICLArray<T> * array,const Strides & strides,unsigned int num_dimensions,const Window & window)155 void add_1D_array_argument(unsigned int &idx, const ICLArray<T> *array, const Strides &strides, unsigned int num_dimensions, const Window &window)
156 {
157 add_array_argument<T, 1>(idx, array, strides, num_dimensions, window);
158 }
159 /** Add the passed 1D tensor's parameters to the object's kernel's arguments starting from the index idx.
160 *
161 * @param[in,out] idx Index at which to start adding the tensor's arguments. Will be incremented by the number of kernel arguments set.
162 * @param[in] tensor Tensor to set as an argument of the object's kernel.
163 * @param[in] window Window the kernel will be executed on.
164 */
add_1D_tensor_argument(unsigned int & idx,const ICLTensor * tensor,const Window & window)165 void add_1D_tensor_argument(unsigned int &idx, const ICLTensor *tensor, const Window &window)
166 {
167 add_tensor_argument<1>(idx, tensor, window);
168 }
169 /** Add the passed 1D tensor's parameters to the object's kernel's arguments starting from the index idx if the condition is true.
170 *
171 * @param[in] cond Condition to check
172 * @param[in,out] idx Index at which to start adding the tensor's arguments. Will be incremented by the number of kernel arguments set.
173 * @param[in] tensor Tensor to set as an argument of the object's kernel.
174 * @param[in] window Window the kernel will be executed on.
175 */
add_1D_tensor_argument_if(bool cond,unsigned int & idx,const ICLTensor * tensor,const Window & window)176 void add_1D_tensor_argument_if(bool cond, unsigned int &idx, const ICLTensor *tensor, const Window &window)
177 {
178 if(cond)
179 {
180 add_1D_tensor_argument(idx, tensor, window);
181 }
182 }
183 /** Add the passed 2D tensor's parameters to the object's kernel's arguments starting from the index idx.
184 *
185 * @param[in,out] idx Index at which to start adding the tensor's arguments. Will be incremented by the number of kernel arguments set.
186 * @param[in] tensor Tensor to set as an argument of the object's kernel.
187 * @param[in] window Window the kernel will be executed on.
188 */
add_2D_tensor_argument(unsigned int & idx,const ICLTensor * tensor,const Window & window)189 void add_2D_tensor_argument(unsigned int &idx, const ICLTensor *tensor, const Window &window)
190 {
191 add_tensor_argument<2>(idx, tensor, window);
192 }
193 /** Add the passed 2D tensor's parameters to the object's kernel's arguments starting from the index idx if the condition is true.
194 *
195 * @param[in] cond Condition to check
196 * @param[in,out] idx Index at which to start adding the tensor's arguments. Will be incremented by the number of kernel arguments set.
197 * @param[in] tensor Tensor to set as an argument of the object's kernel.
198 * @param[in] window Window the kernel will be executed on.
199 */
add_2D_tensor_argument_if(bool cond,unsigned int & idx,const ICLTensor * tensor,const Window & window)200 void add_2D_tensor_argument_if(bool cond, unsigned int &idx, const ICLTensor *tensor, const Window &window)
201 {
202 if(cond)
203 {
204 add_2D_tensor_argument(idx, tensor, window);
205 }
206 }
207 /** Add the passed 3D tensor's parameters to the object's kernel's arguments starting from the index idx.
208 *
209 * @param[in,out] idx Index at which to start adding the tensor's arguments. Will be incremented by the number of kernel arguments set.
210 * @param[in] tensor Tensor to set as an argument of the object's kernel.
211 * @param[in] window Window the kernel will be executed on.
212 */
add_3D_tensor_argument(unsigned int & idx,const ICLTensor * tensor,const Window & window)213 void add_3D_tensor_argument(unsigned int &idx, const ICLTensor *tensor, const Window &window)
214 {
215 add_tensor_argument<3>(idx, tensor, window);
216 }
217 /** Add the passed 4D tensor's parameters to the object's kernel's arguments starting from the index idx.
218 *
219 * @param[in,out] idx Index at which to start adding the tensor's arguments. Will be incremented by the number of kernel arguments set.
220 * @param[in] tensor Tensor to set as an argument of the object's kernel.
221 * @param[in] window Window the kernel will be executed on.
222 */
add_4D_tensor_argument(unsigned int & idx,const ICLTensor * tensor,const Window & window)223 void add_4D_tensor_argument(unsigned int &idx, const ICLTensor *tensor, const Window &window)
224 {
225 add_tensor_argument<4>(idx, tensor, window);
226 }
227 /** Add the passed 5D tensor's parameters to the object's kernel's arguments starting from the index idx.
228 *
229 * @param[in,out] idx Index at which to start adding the tensor's arguments. Will be incremented by the number of kernel arguments set.
230 * @param[in] tensor Tensor to set as an argument of the object's kernel.
231 * @param[in] window Window the kernel will be executed on.
232 */
add_5D_tensor_argument(unsigned int & idx,const ICLTensor * tensor,const Window & window)233 void add_5D_tensor_argument(unsigned int &idx, const ICLTensor *tensor, const Window &window)
234 {
235 add_tensor_argument<5>(idx, tensor, window);
236 }
237
238 /** Add the passed NHW 3D tensor's parameters to the object's kernel's arguments by passing strides, dimensions and the offset to the first valid element in bytes.
239 *
240 * @param[in,out] idx Index at which to start adding the tensor's arguments. Will be incremented by the number of kernel arguments set.
241 * @param[in] tensor Tensor to set as an argument of the object's kernel.
242 */
243 void add_3d_tensor_nhw_argument(unsigned int &idx, const ICLTensor *tensor);
244
245 /** Returns the number of arguments enqueued per NHW 3D Tensor object.
246 *
247 * @return The number of arguments enqueued per NHW 3D Tensor object.
248 */
num_arguments_per_3d_tensor_nhw()249 constexpr static unsigned int num_arguments_per_3d_tensor_nhw()
250 {
251 constexpr unsigned int no_args_per_3d_tensor_nhw = 7u;
252 return no_args_per_3d_tensor_nhw;
253 }
254
255 /** Add the passed NHWC 4D tensor's parameters to the object's kernel's arguments by passing strides, dimensions and the offset to the first valid element in bytes.
256 *
257 * @param[in,out] idx Index at which to start adding the tensor's arguments. Will be incremented by the number of kernel arguments set.
258 * @param[in] tensor Tensor to set as an argument of the object's kernel.
259 */
260 void add_4d_tensor_nhwc_argument(unsigned int &idx, const ICLTensor *tensor);
261
262 /** Returns the number of arguments enqueued per NHWC 4D Tensor object.
263 *
264 * @return The number of arguments enqueued per NHWC 4D Tensor object.
265 */
num_arguments_per_4d_tensor_nhwc()266 constexpr static unsigned int num_arguments_per_4d_tensor_nhwc()
267 {
268 constexpr unsigned int no_args_per_4d_tensor_nhwc = 9u;
269 return no_args_per_4d_tensor_nhwc;
270 }
271
272 /** Returns the number of arguments enqueued per 1D array object.
273 *
274 * @return The number of arguments enqueues per 1D array object.
275 */
num_arguments_per_1D_array()276 constexpr static unsigned int num_arguments_per_1D_array()
277 {
278 return num_arguments_per_array<1>();
279 }
280 /** Returns the number of arguments enqueued per 1D tensor object.
281 *
282 * @return The number of arguments enqueues per 1D tensor object.
283 */
num_arguments_per_1D_tensor()284 constexpr static unsigned int num_arguments_per_1D_tensor()
285 {
286 return num_arguments_per_tensor<1>();
287 }
288 /** Returns the number of arguments enqueued per 2D tensor object.
289 *
290 * @return The number of arguments enqueues per 2D tensor object.
291 */
num_arguments_per_2D_tensor()292 constexpr static unsigned int num_arguments_per_2D_tensor()
293 {
294 return num_arguments_per_tensor<2>();
295 }
296 /** Returns the number of arguments enqueued per 3D tensor object.
297 *
298 * @return The number of arguments enqueues per 3D tensor object.
299 */
num_arguments_per_3D_tensor()300 constexpr static unsigned int num_arguments_per_3D_tensor()
301 {
302 return num_arguments_per_tensor<3>();
303 }
304 /** Returns the number of arguments enqueued per 4D tensor object.
305 *
306 * @return The number of arguments enqueues per 4D tensor object.
307 */
num_arguments_per_4D_tensor()308 constexpr static unsigned int num_arguments_per_4D_tensor()
309 {
310 return num_arguments_per_tensor<4>();
311 }
312 /** Enqueue the OpenCL kernel to process the given window on the passed OpenCL command queue.
313 *
314 * @note The queue is *not* flushed by this method, and therefore the kernel will not have been executed by the time this method returns.
315 *
316 * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window()).
317 * @param[in,out] queue Command queue on which to enqueue the kernel.
318 */
run(const Window & window,cl::CommandQueue & queue)319 virtual void run(const Window &window, cl::CommandQueue &queue)
320 {
321 ARM_COMPUTE_UNUSED(window, queue);
322 }
323 /** Enqueue the OpenCL kernel to process the given window on the passed OpenCL command queue.
324 *
325 * @note The queue is *not* flushed by this method, and therefore the kernel will not have been executed by the time this method returns.
326 *
327 * @param[in] tensors A vector containing the tensors to operato on.
328 * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window()).
329 * @param[in,out] queue Command queue on which to enqueue the kernel.
330 */
run_op(ITensorPack & tensors,const Window & window,cl::CommandQueue & queue)331 virtual void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
332 {
333 ARM_COMPUTE_UNUSED(tensors, window, queue);
334 }
335 /** Add the passed parameters to the object's kernel's arguments starting from the index idx.
336 *
337 * @param[in,out] idx Index at which to start adding the arguments. Will be incremented by the number of kernel arguments set.
338 * @param[in] value Value to set as an argument of the object's kernel.
339 */
340 template <typename T>
add_argument(unsigned int & idx,T value)341 void add_argument(unsigned int &idx, T value)
342 {
343 _kernel.setArg(idx++, value);
344 }
345
346 /** Set the Local-Workgroup-Size hint
347 *
348 * @note This method should be called after the configuration of the kernel
349 *
350 * @param[in] lws_hint Local-Workgroup-Size to use
351 */
set_lws_hint(const cl::NDRange & lws_hint)352 void set_lws_hint(const cl::NDRange &lws_hint)
353 {
354 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); // lws_hint will be overwritten by configure()
355 _tuning_params_hint.set_lws(lws_hint);
356 }
357
358 /** Return the Local-Workgroup-Size hint
359 *
360 * @return Current lws hint
361 */
lws_hint()362 cl::NDRange lws_hint() const
363 {
364 return _tuning_params_hint.get_lws();
365 }
366
367 /** Set the workgroup batch size modifier hint
368 *
369 * @note This method should be called after the configuration of the kernel
370 *
371 * @param[in] wbsm_hint workgroup batch size modifier value
372 */
set_wbsm_hint(const cl_int & wbsm_hint)373 void set_wbsm_hint(const cl_int &wbsm_hint)
374 {
375 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); // wbsm_hint will be overwritten by configure()
376 _tuning_params_hint.set_wbsm(wbsm_hint);
377 }
378
379 /** Return the workgroup batch size modifier hint
380 *
381 * @return Current wbsm hint
382 */
wbsm_hint()383 cl_int wbsm_hint() const
384 {
385 return _tuning_params_hint.get_wbsm();
386 }
387
388 /** Get the configuration ID
389 *
390 * @note The configuration ID can be used by the caller to distinguish different calls of the same OpenCL kernel
391 * In particular, this method can be used by CLScheduler to keep track of the best LWS for each configuration of the same kernel.
392 * The configuration ID should be provided only for the kernels potentially affected by the LWS geometry
393 *
394 * @note This method should be called after the configuration of the kernel
395 *
396 * @return configuration id string
397 */
config_id()398 const std::string &config_id() const
399 {
400 return _config_id;
401 }
402
403 /** Set the targeted GPU architecture
404 *
405 * @param[in] target The targeted GPU architecture
406 */
set_target(GPUTarget target)407 void set_target(GPUTarget target)
408 {
409 _target = target;
410 }
411
412 /** Set the targeted GPU architecture according to the CL device
413 *
414 * @param[in] device A CL device
415 */
416 void set_target(cl::Device &device);
417
418 /** Get the targeted GPU architecture
419 *
420 * @return The targeted GPU architecture.
421 */
get_target()422 GPUTarget get_target() const
423 {
424 return _target;
425 }
426
427 /** Get the maximum workgroup size for the device the CLKernelLibrary uses.
428 *
429 * @return The maximum workgroup size value.
430 */
431 size_t get_max_workgroup_size();
432 /** Get the global work size given an execution window
433 *
434 * @param[in] window Execution window
435 *
436 * @return Global work size of the given execution window
437 */
438 static cl::NDRange gws_from_window(const Window &window);
439
440 private:
441 /** Add the passed array's parameters to the object's kernel's arguments starting from the index idx.
442 *
443 * @param[in,out] idx Index at which to start adding the array's arguments. Will be incremented by the number of kernel arguments set.
444 * @param[in] array Array to set as an argument of the object's kernel.
445 * @param[in] strides @ref Strides object containing stride of each dimension in bytes.
446 * @param[in] num_dimensions Number of dimensions of the @p array.
447 * @param[in] window Window the kernel will be executed on.
448 */
449 template <typename T, unsigned int dimension_size>
450 void add_array_argument(unsigned int &idx, const ICLArray<T> *array, const Strides &strides, unsigned int num_dimensions, const Window &window);
451 /** Add the passed tensor's parameters to the object's kernel's arguments starting from the index idx.
452 *
453 * @param[in,out] idx Index at which to start adding the tensor's arguments. Will be incremented by the number of kernel arguments set.
454 * @param[in] tensor Tensor to set as an argument of the object's kernel.
455 * @param[in] window Window the kernel will be executed on.
456 */
457 template <unsigned int dimension_size>
458 void add_tensor_argument(unsigned int &idx, const ICLTensor *tensor, const Window &window);
459
460 protected:
461 cl::Kernel _kernel; /**< OpenCL kernel to run */
462 GPUTarget _target; /**< The targeted GPU */
463 std::string _config_id; /**< Configuration ID */
464 size_t _max_workgroup_size; /**< The maximum workgroup size for this kernel */
465 CLKernelType _type; /**< The CL kernel type */
466 private:
467 CLTuningParams _tuning_params_hint; /**< Tuning parameters hint for the OpenCL kernel */
468 };
469
470 /** Add the kernel to the command queue with the given window.
471 *
472 * @note Depending on the size of the window, this might translate into several jobs being enqueued.
473 *
474 * @note If kernel->kernel() is empty then the function will return without adding anything to the queue.
475 *
476 * @param[in,out] queue OpenCL command queue.
477 * @param[in] kernel Kernel to enqueue
478 * @param[in] window Window the kernel has to process.
479 * @param[in] lws_hint (Optional) Local workgroup size requested. Default is based on the device target.
480 * @param[in] use_dummy_work_items (Optional) Use dummy work items in order to have two dimensional power of two NDRange. Default is false
481 * Note: it is kernel responsibility to check if the work-item is out-of-range
482 *
483 * @note If any dimension of the lws is greater than the global workgroup size then no lws will be passed.
484 */
485 void enqueue(cl::CommandQueue &queue, ICLKernel &kernel, const Window &window, const cl::NDRange &lws_hint = CLKernelLibrary::get().default_ndrange(), bool use_dummy_work_items = false);
486
487 /** Add the passed array's parameters to the object's kernel's arguments starting from the index idx.
488 *
489 * @param[in,out] idx Index at which to start adding the array's arguments. Will be incremented by the number of kernel arguments set.
490 * @param[in] array Array to set as an argument of the object's kernel.
491 * @param[in] strides @ref Strides object containing stride of each dimension in bytes.
492 * @param[in] num_dimensions Number of dimensions of the @p array.
493 * @param[in] window Window the kernel will be executed on.
494 */
495 template <typename T, unsigned int dimension_size>
add_array_argument(unsigned & idx,const ICLArray<T> * array,const Strides & strides,unsigned int num_dimensions,const Window & window)496 void ICLKernel::add_array_argument(unsigned &idx, const ICLArray<T> *array, const Strides &strides, unsigned int num_dimensions, const Window &window)
497 {
498 ARM_COMPUTE_ERROR_ON(array == nullptr);
499
500 // Calculate offset to the start of the window
501 unsigned int offset_first_element = 0;
502
503 for(unsigned int n = 0; n < num_dimensions; ++n)
504 {
505 offset_first_element += window[n].start() * strides[n];
506 }
507
508 unsigned int idx_start = idx;
509 _kernel.setArg(idx++, array->cl_buffer());
510
511 for(unsigned int dimension = 0; dimension < dimension_size; dimension++)
512 {
513 _kernel.setArg<cl_uint>(idx++, strides[dimension]);
514 _kernel.setArg<cl_uint>(idx++, strides[dimension] * window[dimension].step());
515 }
516
517 _kernel.setArg<cl_uint>(idx++, offset_first_element);
518
519 ARM_COMPUTE_ERROR_ON_MSG_VAR(idx_start + num_arguments_per_array<dimension_size>() != idx,
520 "add_%dD_array_argument() is supposed to add exactly %d arguments to the kernel", dimension_size, num_arguments_per_array<dimension_size>());
521 ARM_COMPUTE_UNUSED(idx_start);
522 }
523 }
524 #endif /*ARM_COMPUTE_ICLKERNEL_H */
525