1 /*
2 * Copyright (c) 2016-2020 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24 #include "arm_compute/runtime/CPP/CPPScheduler.h"
25
26 #include "arm_compute/core/CPP/ICPPKernel.h"
27 #include "arm_compute/core/Error.h"
28 #include "arm_compute/core/Helpers.h"
29 #include "arm_compute/core/Utils.h"
30 #include "src/runtime/CPUUtils.h"
31 #include "support/MemorySupport.h"
32 #include "support/Mutex.h"
33
34 #include <atomic>
35 #include <condition_variable>
36 #include <iostream>
37 #include <list>
38 #include <mutex>
39 #include <system_error>
40 #include <thread>
41
42 namespace arm_compute
43 {
44 namespace
45 {
46 class ThreadFeeder
47 {
48 public:
49 /** Constructor
50 *
51 * @param[in] start First value that will be returned by the feeder
52 * @param[in] end End condition (The last value returned by get_next() will be end - 1)
53 */
ThreadFeeder(unsigned int start=0,unsigned int end=0)54 explicit ThreadFeeder(unsigned int start = 0, unsigned int end = 0)
55 : _atomic_counter(start), _end(end)
56 {
57 }
58 /** Return the next element in the range if there is one.
59 *
60 * @param[out] next Will contain the next element if there is one.
61 *
62 * @return False if the end of the range has been reached and next wasn't set.
63 */
get_next(unsigned int & next)64 bool get_next(unsigned int &next)
65 {
66 next = atomic_fetch_add_explicit(&_atomic_counter, 1u, std::memory_order_relaxed);
67 return next < _end;
68 }
69
70 private:
71 std::atomic_uint _atomic_counter;
72 const unsigned int _end;
73 };
74
75 /** Execute workloads[info.thread_id] first, then call the feeder to get the index of the next workload to run.
76 *
77 * Will run workloads until the feeder reaches the end of its range.
78 *
79 * @param[in] workloads The array of workloads
80 * @param[in,out] feeder The feeder indicating which workload to execute next.
81 * @param[in] info Threading and CPU info.
82 */
process_workloads(std::vector<IScheduler::Workload> & workloads,ThreadFeeder & feeder,const ThreadInfo & info)83 void process_workloads(std::vector<IScheduler::Workload> &workloads, ThreadFeeder &feeder, const ThreadInfo &info)
84 {
85 unsigned int workload_index = info.thread_id;
86 do
87 {
88 ARM_COMPUTE_ERROR_ON(workload_index >= workloads.size());
89 workloads[workload_index](info);
90 }
91 while(feeder.get_next(workload_index));
92 }
93
set_thread_affinity(int core_id)94 void set_thread_affinity(int core_id)
95 {
96 if(core_id < 0)
97 {
98 return;
99 }
100
101 cpu_set_t set;
102 CPU_ZERO(&set);
103 CPU_SET(core_id, &set);
104 ARM_COMPUTE_EXIT_ON_MSG(sched_setaffinity(0, sizeof(set), &set),
105 "Error setting thread affinity");
106 }
107
108 class Thread final
109 {
110 public:
111 /** Start a new thread
112 *
113 * Thread will be pinned to a given core id if value is non-negative
114 *
115 * @param[in] core_pin Core id to pin the thread on. If negative no thread pinning will take place
116 */
117 explicit Thread(int core_pin = -1);
118
119 Thread(const Thread &) = delete;
120 Thread &operator=(const Thread &) = delete;
121 Thread(Thread &&) = delete;
122 Thread &operator=(Thread &&) = delete;
123
124 /** Destructor. Make the thread join. */
125 ~Thread();
126
127 /** Request the worker thread to start executing workloads.
128 *
129 * The thread will start by executing workloads[info.thread_id] and will then call the feeder to
130 * get the index of the following workload to run.
131 *
132 * @note This function will return as soon as the workloads have been sent to the worker thread.
133 * wait() needs to be called to ensure the execution is complete.
134 */
135 void start(std::vector<IScheduler::Workload> *workloads, ThreadFeeder &feeder, const ThreadInfo &info);
136
137 /** Wait for the current kernel execution to complete. */
138 void wait();
139
140 /** Function ran by the worker thread. */
141 void worker_thread();
142
143 private:
144 std::thread _thread{};
145 ThreadInfo _info{};
146 std::vector<IScheduler::Workload> *_workloads{ nullptr };
147 ThreadFeeder *_feeder{ nullptr };
148 std::mutex _m{};
149 std::condition_variable _cv{};
150 bool _wait_for_work{ false };
151 bool _job_complete{ true };
152 std::exception_ptr _current_exception{ nullptr };
153 int _core_pin{ -1 };
154 };
155
Thread(int core_pin)156 Thread::Thread(int core_pin)
157 : _core_pin(core_pin)
158 {
159 _thread = std::thread(&Thread::worker_thread, this);
160 }
161
~Thread()162 Thread::~Thread()
163 {
164 // Make sure worker thread has ended
165 if(_thread.joinable())
166 {
167 ThreadFeeder feeder;
168 start(nullptr, feeder, ThreadInfo());
169 _thread.join();
170 }
171 }
172
start(std::vector<IScheduler::Workload> * workloads,ThreadFeeder & feeder,const ThreadInfo & info)173 void Thread::start(std::vector<IScheduler::Workload> *workloads, ThreadFeeder &feeder, const ThreadInfo &info)
174 {
175 _workloads = workloads;
176 _feeder = &feeder;
177 _info = info;
178 {
179 std::lock_guard<std::mutex> lock(_m);
180 _wait_for_work = true;
181 _job_complete = false;
182 }
183 _cv.notify_one();
184 }
185
wait()186 void Thread::wait()
187 {
188 {
189 std::unique_lock<std::mutex> lock(_m);
190 _cv.wait(lock, [&] { return _job_complete; });
191 }
192
193 if(_current_exception)
194 {
195 std::rethrow_exception(_current_exception);
196 }
197 }
198
worker_thread()199 void Thread::worker_thread()
200 {
201 set_thread_affinity(_core_pin);
202
203 while(true)
204 {
205 std::unique_lock<std::mutex> lock(_m);
206 _cv.wait(lock, [&] { return _wait_for_work; });
207 _wait_for_work = false;
208
209 _current_exception = nullptr;
210
211 // Time to exit
212 if(_workloads == nullptr)
213 {
214 return;
215 }
216
217 #ifndef ARM_COMPUTE_EXCEPTIONS_DISABLED
218 try
219 {
220 #endif /* ARM_COMPUTE_EXCEPTIONS_ENABLED */
221 process_workloads(*_workloads, *_feeder, _info);
222
223 #ifndef ARM_COMPUTE_EXCEPTIONS_DISABLED
224 }
225 catch(...)
226 {
227 _current_exception = std::current_exception();
228 }
229 #endif /* ARM_COMPUTE_EXCEPTIONS_DISABLED */
230 _job_complete = true;
231 lock.unlock();
232 _cv.notify_one();
233 }
234 }
235 } //namespace
236
237 struct CPPScheduler::Impl final
238 {
Implarm_compute::CPPScheduler::Impl239 explicit Impl(unsigned int thread_hint)
240 : _num_threads(thread_hint), _threads(_num_threads - 1)
241 {
242 }
set_num_threadsarm_compute::CPPScheduler::Impl243 void set_num_threads(unsigned int num_threads, unsigned int thread_hint)
244 {
245 _num_threads = num_threads == 0 ? thread_hint : num_threads;
246 _threads.resize(_num_threads - 1);
247 }
set_num_threads_with_affinityarm_compute::CPPScheduler::Impl248 void set_num_threads_with_affinity(unsigned int num_threads, unsigned int thread_hint, BindFunc func)
249 {
250 _num_threads = num_threads == 0 ? thread_hint : num_threads;
251
252 // Set affinity on main thread
253 set_thread_affinity(func(0, thread_hint));
254
255 // Set affinity on worked threads
256 _threads.clear();
257 for(auto i = 1U; i < _num_threads; ++i)
258 {
259 _threads.emplace_back(func(i, thread_hint));
260 }
261 }
num_threadsarm_compute::CPPScheduler::Impl262 unsigned int num_threads() const
263 {
264 return _num_threads;
265 }
266
267 void run_workloads(std::vector<IScheduler::Workload> &workloads);
268
269 unsigned int _num_threads;
270 std::list<Thread> _threads;
271 arm_compute::Mutex _run_workloads_mutex{};
272 };
273
274 /*
275 * This singleton has been deprecated and will be removed in the next release
276 */
get()277 CPPScheduler &CPPScheduler::get()
278 {
279 static CPPScheduler scheduler;
280 return scheduler;
281 }
282
CPPScheduler()283 CPPScheduler::CPPScheduler()
284 : _impl(support::cpp14::make_unique<Impl>(num_threads_hint()))
285 {
286 }
287
288 CPPScheduler::~CPPScheduler() = default;
289
set_num_threads(unsigned int num_threads)290 void CPPScheduler::set_num_threads(unsigned int num_threads)
291 {
292 // No changes in the number of threads while current workloads are running
293 arm_compute::lock_guard<std::mutex> lock(_impl->_run_workloads_mutex);
294 _impl->set_num_threads(num_threads, num_threads_hint());
295 }
296
set_num_threads_with_affinity(unsigned int num_threads,BindFunc func)297 void CPPScheduler::set_num_threads_with_affinity(unsigned int num_threads, BindFunc func)
298 {
299 // No changes in the number of threads while current workloads are running
300 arm_compute::lock_guard<std::mutex> lock(_impl->_run_workloads_mutex);
301 _impl->set_num_threads_with_affinity(num_threads, num_threads_hint(), func);
302 }
303
num_threads() const304 unsigned int CPPScheduler::num_threads() const
305 {
306 return _impl->num_threads();
307 }
308
309 #ifndef DOXYGEN_SKIP_THIS
run_workloads(std::vector<IScheduler::Workload> & workloads)310 void CPPScheduler::run_workloads(std::vector<IScheduler::Workload> &workloads)
311 {
312 // Mutex to ensure other threads won't interfere with the setup of the current thread's workloads
313 // Other thread's workloads will be scheduled after the current thread's workloads have finished
314 // This is not great because different threads workloads won't run in parallel but at least they
315 // won't interfere each other and deadlock.
316 arm_compute::lock_guard<std::mutex> lock(_impl->_run_workloads_mutex);
317 const unsigned int num_threads = std::min(_impl->num_threads(), static_cast<unsigned int>(workloads.size()));
318 if(num_threads < 1)
319 {
320 return;
321 }
322 ThreadFeeder feeder(num_threads, workloads.size());
323 ThreadInfo info;
324 info.cpu_info = &_cpu_info;
325 info.num_threads = num_threads;
326 unsigned int t = 0;
327 auto thread_it = _impl->_threads.begin();
328 for(; t < num_threads - 1; ++t, ++thread_it)
329 {
330 info.thread_id = t;
331 thread_it->start(&workloads, feeder, info);
332 }
333
334 info.thread_id = t;
335 process_workloads(workloads, feeder, info);
336 #ifndef ARM_COMPUTE_EXCEPTIONS_DISABLED
337 try
338 {
339 #endif /* ARM_COMPUTE_EXCEPTIONS_DISABLED */
340 for(auto &thread : _impl->_threads)
341 {
342 thread.wait();
343 }
344 #ifndef ARM_COMPUTE_EXCEPTIONS_DISABLED
345 }
346 catch(const std::system_error &e)
347 {
348 std::cerr << "Caught system_error with code " << e.code() << " meaning " << e.what() << '\n';
349 }
350 #endif /* ARM_COMPUTE_EXCEPTIONS_DISABLED */
351 }
352 #endif /* DOXYGEN_SKIP_THIS */
353
schedule_op(ICPPKernel * kernel,const Hints & hints,ITensorPack & tensors)354 void CPPScheduler::schedule_op(ICPPKernel *kernel, const Hints &hints, ITensorPack &tensors)
355 {
356 schedule_common(kernel, hints, tensors);
357 }
358
schedule(ICPPKernel * kernel,const Hints & hints)359 void CPPScheduler::schedule(ICPPKernel *kernel, const Hints &hints)
360 {
361 ITensorPack tensors;
362 schedule_common(kernel, hints, tensors);
363 }
364 } // namespace arm_compute
365