1 //==-- llvm/Support/ThreadPool.cpp - A ThreadPool implementation -*- C++ -*-==//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements a crude C++11 based thread pool.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "llvm/Support/ThreadPool.h"
15
16 #include "llvm/Config/llvm-config.h"
17 #include "llvm/Support/raw_ostream.h"
18
19 using namespace llvm;
20
21 #if LLVM_ENABLE_THREADS
22
23 // Default to std::thread::hardware_concurrency
ThreadPool()24 ThreadPool::ThreadPool() : ThreadPool(std::thread::hardware_concurrency()) {}
25
ThreadPool(unsigned ThreadCount)26 ThreadPool::ThreadPool(unsigned ThreadCount)
27 : ActiveThreads(0), EnableFlag(true) {
28 // Create ThreadCount threads that will loop forever, wait on QueueCondition
29 // for tasks to be queued or the Pool to be destroyed.
30 Threads.reserve(ThreadCount);
31 for (unsigned ThreadID = 0; ThreadID < ThreadCount; ++ThreadID) {
32 Threads.emplace_back([&] {
33 while (true) {
34 PackagedTaskTy Task;
35 {
36 std::unique_lock<std::mutex> LockGuard(QueueLock);
37 // Wait for tasks to be pushed in the queue
38 QueueCondition.wait(LockGuard,
39 [&] { return !EnableFlag || !Tasks.empty(); });
40 // Exit condition
41 if (!EnableFlag && Tasks.empty())
42 return;
43 // Yeah, we have a task, grab it and release the lock on the queue
44
45 // We first need to signal that we are active before popping the queue
46 // in order for wait() to properly detect that even if the queue is
47 // empty, there is still a task in flight.
48 {
49 ++ActiveThreads;
50 std::unique_lock<std::mutex> LockGuard(CompletionLock);
51 }
52 Task = std::move(Tasks.front());
53 Tasks.pop();
54 }
55 // Run the task we just grabbed
56 #ifndef _MSC_VER
57 Task();
58 #else
59 Task(/* unused */ false);
60 #endif
61
62 {
63 // Adjust `ActiveThreads`, in case someone waits on ThreadPool::wait()
64 std::unique_lock<std::mutex> LockGuard(CompletionLock);
65 --ActiveThreads;
66 }
67
68 // Notify task completion, in case someone waits on ThreadPool::wait()
69 CompletionCondition.notify_all();
70 }
71 });
72 }
73 }
74
wait()75 void ThreadPool::wait() {
76 // Wait for all threads to complete and the queue to be empty
77 std::unique_lock<std::mutex> LockGuard(CompletionLock);
78 CompletionCondition.wait(LockGuard,
79 [&] { return Tasks.empty() && !ActiveThreads; });
80 }
81
asyncImpl(TaskTy Task)82 std::shared_future<ThreadPool::VoidTy> ThreadPool::asyncImpl(TaskTy Task) {
83 /// Wrap the Task in a packaged_task to return a future object.
84 PackagedTaskTy PackagedTask(std::move(Task));
85 auto Future = PackagedTask.get_future();
86 {
87 // Lock the queue and push the new task
88 std::unique_lock<std::mutex> LockGuard(QueueLock);
89
90 // Don't allow enqueueing after disabling the pool
91 assert(EnableFlag && "Queuing a thread during ThreadPool destruction");
92
93 Tasks.push(std::move(PackagedTask));
94 }
95 QueueCondition.notify_one();
96 return Future.share();
97 }
98
99 // The destructor joins all threads, waiting for completion.
~ThreadPool()100 ThreadPool::~ThreadPool() {
101 {
102 std::unique_lock<std::mutex> LockGuard(QueueLock);
103 EnableFlag = false;
104 }
105 QueueCondition.notify_all();
106 for (auto &Worker : Threads)
107 Worker.join();
108 }
109
110 #else // LLVM_ENABLE_THREADS Disabled
111
ThreadPool()112 ThreadPool::ThreadPool() : ThreadPool(0) {}
113
114 // No threads are launched, issue a warning if ThreadCount is not 0
ThreadPool(unsigned ThreadCount)115 ThreadPool::ThreadPool(unsigned ThreadCount)
116 : ActiveThreads(0) {
117 if (ThreadCount) {
118 errs() << "Warning: request a ThreadPool with " << ThreadCount
119 << " threads, but LLVM_ENABLE_THREADS has been turned off\n";
120 }
121 }
122
wait()123 void ThreadPool::wait() {
124 // Sequential implementation running the tasks
125 while (!Tasks.empty()) {
126 auto Task = std::move(Tasks.front());
127 Tasks.pop();
128 #ifndef _MSC_VER
129 Task();
130 #else
131 Task(/* unused */ false);
132 #endif
133 }
134 }
135
asyncImpl(TaskTy Task)136 std::shared_future<ThreadPool::VoidTy> ThreadPool::asyncImpl(TaskTy Task) {
137 #ifndef _MSC_VER
138 // Get a Future with launch::deferred execution using std::async
139 auto Future = std::async(std::launch::deferred, std::move(Task)).share();
140 // Wrap the future so that both ThreadPool::wait() can operate and the
141 // returned future can be sync'ed on.
142 PackagedTaskTy PackagedTask([Future]() { Future.get(); });
143 #else
144 auto Future = std::async(std::launch::deferred, std::move(Task), false).share();
145 PackagedTaskTy PackagedTask([Future](bool) -> bool { Future.get(); return false; });
146 #endif
147 Tasks.push(std::move(PackagedTask));
148 return Future;
149 }
150
~ThreadPool()151 ThreadPool::~ThreadPool() {
152 wait();
153 }
154
155 #endif
156