• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2016-2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "src/core/CL/ICLKernel.h"
25 
26 #include "arm_compute/core/CL/ICLTensor.h"
27 #include "arm_compute/core/Helpers.h"
28 #include "src/core/helpers/Utils.h"
29 
30 #include <cstddef>
31 
32 using namespace arm_compute;
33 
enqueue(cl::CommandQueue & queue,ICLKernel & kernel,const Window & window,const cl::NDRange & lws_hint,bool use_dummy_work_items)34 void arm_compute::enqueue(cl::CommandQueue &queue, ICLKernel &kernel, const Window &window, const cl::NDRange &lws_hint, bool use_dummy_work_items)
35 {
36     if(kernel.kernel()() == nullptr)
37     {
38         return;
39     }
40 
41     for(unsigned int i = 0; i < Coordinates::num_max_dimensions; ++i)
42     {
43         ARM_COMPUTE_ERROR_ON(window[i].step() == 0);
44         // Make sure that dimensions > Z are 1
45         ARM_COMPUTE_ERROR_ON((i >= 3) && ((window[i].end() - window[i].start()) != 1));
46     }
47 
48     cl::NDRange gws = ICLKernel::gws_from_window(window);
49 
50     // Check for empty NDRange
51     if(gws.dimensions() == 0)
52     {
53         return;
54     }
55 
56     // Use dummy work-items
57     if(use_dummy_work_items)
58     {
59         gws.get()[0] = get_next_power_two(gws[0]);
60         gws.get()[1] = get_next_power_two(gws[1]);
61     }
62 
63     cl::NDRange valid_lws;
64     if(lws_hint[0] * lws_hint[1] * lws_hint[2] > kernel.get_max_workgroup_size())
65     {
66         valid_lws = cl::NullRange;
67     }
68     else
69     {
70         valid_lws = lws_hint;
71     }
72 
73     cl::NDRange lws = cl::NullRange;
74 
75     if((valid_lws[0] <= gws[0]) && (valid_lws[1] <= gws[1]) && (valid_lws[2] <= gws[2]))
76     {
77         lws = valid_lws;
78     }
79 
80     queue.enqueueNDRangeKernel(kernel.kernel(), cl::NullRange, gws, lws);
81 }
82 
83 template <unsigned int dimension_size>
add_tensor_argument(unsigned & idx,const ICLTensor * tensor,const Window & window)84 void ICLKernel::add_tensor_argument(unsigned &idx, const ICLTensor *tensor, const Window &window)
85 {
86     ARM_COMPUTE_ERROR_ON(tensor == nullptr);
87 
88     const ITensorInfo *info    = tensor->info();
89     const Strides     &strides = info->strides_in_bytes();
90 
91     // Calculate offset to the start of the window
92     unsigned int offset_first_element = info->offset_first_element_in_bytes();
93 
94     for(unsigned int n = 0; n < info->num_dimensions(); ++n)
95     {
96         offset_first_element += (window.is_broadcasted(n) ? 0 : window[n].start()) * strides[n];
97     }
98 
99     unsigned int idx_start = idx;
100     _kernel.setArg(idx++, tensor->cl_buffer());
101 
102     for(unsigned int d = 0; d < dimension_size; ++d)
103     {
104         _kernel.setArg<cl_uint>(idx++, strides[d]);
105         _kernel.setArg<cl_uint>(idx++, strides[d] * window[d].step());
106     }
107 
108     _kernel.setArg<cl_uint>(idx++, offset_first_element);
109 
110     ARM_COMPUTE_ERROR_ON_MSG_VAR(idx_start + num_arguments_per_tensor<dimension_size>() != idx,
111                                  "add_%dD_tensor_argument() is supposed to add exactly %d arguments to the kernel", dimension_size, num_arguments_per_tensor<dimension_size>());
112     ARM_COMPUTE_UNUSED(idx_start);
113 }
114 
115 #ifndef DOXYGEN_SKIP_THIS
116 template void ICLKernel::add_tensor_argument<1>(unsigned &idx, const ICLTensor *tensor, const Window &window);
117 template void ICLKernel::add_tensor_argument<2>(unsigned &idx, const ICLTensor *tensor, const Window &window);
118 template void ICLKernel::add_tensor_argument<3>(unsigned &idx, const ICLTensor *tensor, const Window &window);
119 template void ICLKernel::add_tensor_argument<4>(unsigned &idx, const ICLTensor *tensor, const Window &window);
120 #endif /* DOXYGEN_SKIP_THIS */
121 
set_target(cl::Device & device)122 void ICLKernel::set_target(cl::Device &device)
123 {
124     _target = get_target_from_device(device);
125 }
126 
get_max_workgroup_size()127 size_t ICLKernel::get_max_workgroup_size()
128 {
129     if(_max_workgroup_size == 0)
130     {
131         _max_workgroup_size = CLKernelLibrary::get().max_local_workgroup_size(_kernel);
132     }
133     return _max_workgroup_size;
134 }
135 
gws_from_window(const Window & window)136 cl::NDRange ICLKernel::gws_from_window(const Window &window)
137 {
138     if((window.x().end() - window.x().start()) == 0 || (window.y().end() - window.y().start()) == 0)
139     {
140         return cl::NullRange;
141     }
142 
143     cl::NDRange gws((window.x().end() - window.x().start()) / window.x().step(),
144                     (window.y().end() - window.y().start()) / window.y().step(),
145                     (window.z().end() - window.z().start()) / window.z().step());
146 
147     return gws;
148 }
149