1 /*
2 * Copyright (c) 2016-2022 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24 #include "arm_compute/runtime/CL/CLScheduler.h"
25
26 #include "arm_compute/core/CL/CLKernelLibrary.h"
27 #include "arm_compute/runtime/CL/CLTuner.h"
28 #include "src/core/CL/ICLKernel.h"
29
30 namespace arm_compute
31 {
context()32 cl::Context &CLScheduler::context()
33 {
34 ARM_COMPUTE_ERROR_ON(!_is_initialised);
35 _context = CLKernelLibrary::get().context();
36 return _context;
37 }
38
queue()39 cl::CommandQueue &CLScheduler::queue()
40 {
41 ARM_COMPUTE_ERROR_ON(!_is_initialised);
42 return _queue;
43 }
44
target() const45 GPUTarget CLScheduler::target() const
46 {
47 return _target;
48 }
49
gemm_heuristics() const50 CLGEMMHeuristicsHandle *CLScheduler::gemm_heuristics() const
51 {
52 return _gemm_heuristics;
53 }
54
set_queue(cl::CommandQueue queue)55 void CLScheduler::set_queue(cl::CommandQueue queue)
56 {
57 _queue = std::move(queue);
58 }
59
set_target(GPUTarget target)60 void CLScheduler::set_target(GPUTarget target)
61 {
62 _target = target;
63 }
64
set_tuner(ICLTuner * tuner)65 void CLScheduler::set_tuner(ICLTuner *tuner)
66 {
67 _cl_tuner = tuner;
68 }
69
sync()70 void CLScheduler::sync()
71 {
72 _queue.finish();
73 }
74
enqueue_sync_event()75 cl::Event CLScheduler::enqueue_sync_event()
76 {
77 cl::Event event;
78 _queue.enqueueMarker(&event);
79 return event;
80 }
81
tune_kernel_static(ICLKernel & kernel)82 void CLScheduler::tune_kernel_static(ICLKernel &kernel)
83 {
84 if(_cl_tuner != nullptr)
85 {
86 _cl_tuner->tune_kernel_static(kernel);
87 }
88 }
89
is_initialised() const90 bool CLScheduler::is_initialised() const
91 {
92 return _is_initialised;
93 }
94
95 std::once_flag CLScheduler::_initialize_symbols;
96
CLScheduler()97 CLScheduler::CLScheduler()
98 : _context(), _queue(), _target(GPUTarget::MIDGARD), _is_initialised(false), _cl_tuner(nullptr), _gemm_heuristics(nullptr), _backend_type(CLBackendType::Native), _job_chaining_enabled(false),
99 _job_chaining_size(), _job_chaining_count(0)
100 {
101 }
102
get()103 CLScheduler &CLScheduler::get()
104 {
105 std::call_once(_initialize_symbols, opencl_is_available);
106 static CLScheduler scheduler;
107 return scheduler;
108 }
109
default_init_with_context(cl::Device & device,cl::Context & ctx,ICLTuner * cl_tuner,CLGEMMHeuristicsHandle * gemm_h)110 void CLScheduler::default_init_with_context(cl::Device &device, cl::Context &ctx, ICLTuner *cl_tuner, CLGEMMHeuristicsHandle *gemm_h)
111 {
112 if(!_is_initialised)
113 {
114 const std::string cl_kernels_folder("./cl_kernels/");
115 cl::CommandQueue queue = cl::CommandQueue(ctx, device);
116 CLKernelLibrary::get().init(cl_kernels_folder, ctx, device);
117 init(ctx, queue, device, cl_tuner, gemm_h);
118 _cl_tuner = cl_tuner;
119 }
120 }
121
default_init(ICLTuner * cl_tuner,CLGEMMHeuristicsHandle * gemm_h,CLBackendType cl_backend_type)122 void CLScheduler::default_init(ICLTuner *cl_tuner, CLGEMMHeuristicsHandle *gemm_h, CLBackendType cl_backend_type)
123 {
124 if(!_is_initialised)
125 {
126 cl::Context ctx;
127 cl::Device dev;
128 cl_int err;
129 std::tie(ctx, dev, err) = create_opencl_context_and_device(cl_backend_type);
130 ARM_COMPUTE_ERROR_ON_MSG(err != CL_SUCCESS, "Failed to create OpenCL context");
131 cl::CommandQueue queue = cl::CommandQueue(ctx, dev);
132 CLKernelLibrary::get().init("./cl_kernels/", ctx, dev);
133 init(ctx, queue, dev, cl_tuner, gemm_h);
134 }
135
136 // Set CL tuner and GEMM heuristics
137 _cl_tuner = cl_tuner;
138 _gemm_heuristics = gemm_h;
139 }
140
default_reinit(ICLTuner * cl_tuner,CLGEMMHeuristicsHandle * gemm_h,CLBackendType cl_backend_type)141 void CLScheduler::default_reinit(ICLTuner *cl_tuner, CLGEMMHeuristicsHandle *gemm_h, CLBackendType cl_backend_type)
142 {
143 _is_initialised = false;
144
145 default_init(cl_tuner, gemm_h, cl_backend_type);
146 }
147
set_context(cl::Context context)148 void CLScheduler::set_context(cl::Context context)
149 {
150 _context = std::move(context);
151 CLKernelLibrary::get().set_context(_context);
152 }
153
init(cl::Context context,cl::CommandQueue queue,const cl::Device & device,ICLTuner * cl_tuner,CLGEMMHeuristicsHandle * gemm_h,CLBackendType cl_backend_type)154 void CLScheduler::init(cl::Context context, cl::CommandQueue queue, const cl::Device &device, ICLTuner *cl_tuner, CLGEMMHeuristicsHandle *gemm_h, CLBackendType cl_backend_type)
155 {
156 set_context(std::move(context));
157 _queue = std::move(queue);
158 _target = get_target_from_device(device);
159 _is_initialised = true;
160 _cl_tuner = cl_tuner;
161 _gemm_heuristics = gemm_h;
162 _backend_type = cl_backend_type;
163 }
164
enqueue_common(ICLKernel & kernel,ITensorPack & tensors,bool flush)165 void CLScheduler::enqueue_common(ICLKernel &kernel, ITensorPack &tensors, bool flush)
166 {
167 ARM_COMPUTE_ERROR_ON_MSG(!_is_initialised,
168 "The CLScheduler is not initialised yet! Please call the CLScheduler::get().default_init(), \
169 or CLScheduler::get()::init() and CLKernelLibrary::get()::init() function before running functions!");
170
171 const bool inject_memory = !tensors.empty();
172
173 // Tune the kernel if the CLTuner has been provided
174 if(_cl_tuner != nullptr)
175 {
176 inject_memory ? _cl_tuner->tune_kernel_dynamic(kernel, tensors) : _cl_tuner->tune_kernel_dynamic(kernel);
177 }
178
179 // Run kernel
180 inject_memory ? kernel.run_op(tensors, kernel.window(), _queue) : kernel.run(kernel.window(), _queue);
181 if(_job_chaining_enabled)
182 {
183 ++_job_chaining_count;
184 }
185
186 flush_queue(flush);
187 }
188
flush_queue(bool flush)189 void CLScheduler::flush_queue(bool flush)
190 {
191 if(_job_chaining_enabled)
192 {
193 if(_job_chaining_count >= _job_chaining_size)
194 {
195 _job_chaining_count = 0;
196 _queue.flush();
197 }
198 }
199 else if(flush)
200 {
201 _queue.flush();
202 }
203 }
204
enqueue(ICLKernel & kernel,bool flush)205 void CLScheduler::enqueue(ICLKernel &kernel, bool flush)
206 {
207 ITensorPack pack;
208 enqueue_common(kernel, pack, flush);
209 }
210
enqueue_op(ICLKernel & kernel,ITensorPack & tensors,bool flush)211 void CLScheduler::enqueue_op(ICLKernel &kernel, ITensorPack &tensors, bool flush)
212 {
213 enqueue_common(kernel, tensors, flush);
214 }
215
enable_job_chaining(int job_chaining_size)216 void CLScheduler::enable_job_chaining(int job_chaining_size)
217 {
218 _job_chaining_enabled = true;
219 _job_chaining_size = job_chaining_size;
220 }
221 } // namespace arm_compute
222