1 /*
2 * Copyright (c) 2016-2022 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24 #include "src/core/CL/ICLKernel.h"
25
26 #include "arm_compute/core/CL/ICLTensor.h"
27 #include "arm_compute/core/Helpers.h"
28 #include "src/core/helpers/Utils.h"
29
30 #include <cstddef>
31
enqueue(cl::CommandQueue & queue,ICLKernel & kernel,const Window & window,const cl::NDRange & lws_hint,bool use_dummy_work_items)32 void arm_compute::enqueue(cl::CommandQueue &queue, ICLKernel &kernel, const Window &window, const cl::NDRange &lws_hint, bool use_dummy_work_items)
33 {
34 if(kernel.kernel()() == nullptr)
35 {
36 return;
37 }
38
39 for(unsigned int i = 0; i < Coordinates::num_max_dimensions; ++i)
40 {
41 ARM_COMPUTE_ERROR_ON(window[i].step() == 0);
42 // Make sure that dimensions > Z are 1
43 ARM_COMPUTE_ERROR_ON((i >= 3) && ((window[i].end() - window[i].start()) != 1));
44 }
45
46 cl::NDRange gws = ICLKernel::gws_from_window(window);
47
48 // Check for empty NDRange
49 if(gws.dimensions() == 0)
50 {
51 return;
52 }
53
54 // Use dummy work-items
55 if(use_dummy_work_items)
56 {
57 gws.get()[0] = get_next_power_two(gws[0]);
58 gws.get()[1] = get_next_power_two(gws[1]);
59 }
60
61 cl::NDRange valid_lws;
62 if(lws_hint[0] * lws_hint[1] * lws_hint[2] > kernel.get_max_workgroup_size())
63 {
64 valid_lws = cl::NullRange;
65 }
66 else
67 {
68 valid_lws = lws_hint;
69 }
70
71 cl::NDRange lws = cl::NullRange;
72
73 if((valid_lws[0] <= gws[0]) && (valid_lws[1] <= gws[1]) && (valid_lws[2] <= gws[2]))
74 {
75 lws = valid_lws;
76 }
77
78 if(CLKernelLibrary::get().is_wbsm_supported())
79 {
80 set_wbsm(kernel.kernel(), kernel.wbsm_hint());
81 }
82 queue.enqueueNDRangeKernel(kernel.kernel(), cl::NullRange, gws, lws);
83 }
84
85 namespace arm_compute
86 {
87 template <unsigned int dimension_size>
add_tensor_argument(unsigned & idx,const ICLTensor * tensor,const Window & window)88 void ICLKernel::add_tensor_argument(unsigned &idx, const ICLTensor *tensor, const Window &window)
89 {
90 ARM_COMPUTE_ERROR_ON(tensor == nullptr);
91
92 const ITensorInfo *info = tensor->info();
93 const Strides &strides = info->strides_in_bytes();
94
95 // Calculate offset to the start of the window
96 unsigned int offset_first_element = info->offset_first_element_in_bytes();
97
98 for(unsigned int n = 0; n < info->num_dimensions(); ++n)
99 {
100 offset_first_element += (window.is_broadcasted(n) ? 0 : window[n].start()) * strides[n];
101 }
102
103 unsigned int idx_start = idx;
104 _kernel.setArg(idx++, tensor->cl_buffer());
105
106 for(unsigned int d = 0; d < dimension_size; ++d)
107 {
108 _kernel.setArg<cl_uint>(idx++, window.is_broadcasted(d) ? 0 : strides[d]);
109 _kernel.setArg<cl_uint>(idx++, window.is_broadcasted(d) ? 0 : (strides[d] * window[d].step()));
110 }
111
112 _kernel.setArg<cl_uint>(idx++, offset_first_element);
113
114 ARM_COMPUTE_ERROR_ON_MSG_VAR(idx_start + num_arguments_per_tensor<dimension_size>() != idx,
115 "add_%dD_tensor_argument() is supposed to add exactly %d arguments to the kernel", dimension_size, num_arguments_per_tensor<dimension_size>());
116 ARM_COMPUTE_UNUSED(idx_start);
117 }
118
add_3d_tensor_nhw_argument(unsigned int & idx,const ICLTensor * tensor)119 void ICLKernel::add_3d_tensor_nhw_argument(unsigned int &idx, const ICLTensor *tensor)
120 {
121 ARM_COMPUTE_ERROR_ON(tensor == nullptr);
122
123 const ITensorInfo *info = tensor->info();
124 ARM_COMPUTE_ERROR_ON(info == nullptr);
125 const Strides &strides = info->strides_in_bytes();
126
127 // Tensor poniter
128 _kernel.setArg(idx++, tensor->cl_buffer());
129
130 // Add stride_y, stride_z
131 _kernel.setArg<cl_uint>(idx++, strides[1]);
132 _kernel.setArg<cl_uint>(idx++, strides[2]);
133
134 // Tensor dimensions
135 _kernel.setArg<cl_uint>(idx++, info->dimension(0));
136 _kernel.setArg<cl_uint>(idx++, info->dimension(1));
137 _kernel.setArg<cl_uint>(idx++, info->dimension(2));
138
139 // Offset of first element
140 unsigned int offset_first_element = info->offset_first_element_in_bytes();
141 _kernel.setArg<cl_uint>(idx++, offset_first_element);
142 }
143
add_4d_tensor_nhwc_argument(unsigned int & idx,const ICLTensor * tensor)144 void ICLKernel::add_4d_tensor_nhwc_argument(unsigned int &idx, const ICLTensor *tensor)
145 {
146 ARM_COMPUTE_ERROR_ON(tensor == nullptr);
147
148 const ITensorInfo *info = tensor->info();
149 ARM_COMPUTE_ERROR_ON(info == nullptr);
150 const Strides &strides = info->strides_in_bytes();
151
152 // Tensor poniter
153 _kernel.setArg(idx++, tensor->cl_buffer());
154
155 // Add stride_y, stride_z and stride_w
156 _kernel.setArg<cl_uint>(idx++, strides[1]);
157 _kernel.setArg<cl_uint>(idx++, strides[2]);
158 _kernel.setArg<cl_uint>(idx++, strides[3]);
159
160 // Tensor dimensions
161 _kernel.setArg<cl_uint>(idx++, info->dimension(0));
162 _kernel.setArg<cl_uint>(idx++, info->dimension(1));
163 _kernel.setArg<cl_uint>(idx++, info->dimension(2));
164 _kernel.setArg<cl_uint>(idx++, info->dimension(3));
165
166 // Offset of first element
167 unsigned int offset_first_element = info->offset_first_element_in_bytes();
168 _kernel.setArg<cl_uint>(idx++, offset_first_element);
169 }
170
171 #ifndef DOXYGEN_SKIP_THIS
172 template void ICLKernel::add_tensor_argument<1>(unsigned &idx, const ICLTensor *tensor, const Window &window);
173 template void ICLKernel::add_tensor_argument<2>(unsigned &idx, const ICLTensor *tensor, const Window &window);
174 template void ICLKernel::add_tensor_argument<3>(unsigned &idx, const ICLTensor *tensor, const Window &window);
175 template void ICLKernel::add_tensor_argument<4>(unsigned &idx, const ICLTensor *tensor, const Window &window);
176 template void ICLKernel::add_tensor_argument<5>(unsigned &idx, const ICLTensor *tensor, const Window &window);
177 #endif /* DOXYGEN_SKIP_THIS */
178
set_target(cl::Device & device)179 void ICLKernel::set_target(cl::Device &device)
180 {
181 _target = get_target_from_device(device);
182 }
183
get_max_workgroup_size()184 size_t ICLKernel::get_max_workgroup_size()
185 {
186 if(_max_workgroup_size == 0)
187 {
188 _max_workgroup_size = CLKernelLibrary::get().max_local_workgroup_size(_kernel);
189 }
190 return _max_workgroup_size;
191 }
192
gws_from_window(const Window & window)193 cl::NDRange ICLKernel::gws_from_window(const Window &window)
194 {
195 if((window.x().end() - window.x().start()) == 0 || (window.y().end() - window.y().start()) == 0)
196 {
197 return cl::NullRange;
198 }
199
200 cl::NDRange gws((window.x().end() - window.x().start()) / window.x().step(),
201 (window.y().end() - window.y().start()) / window.y().step(),
202 (window.z().end() - window.z().start()) / window.z().step());
203
204 return gws;
205 }
206 } // namespace arm_compute