• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *
3  * Copyright 2016 gRPC authors.
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  *     http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  *
17  */
18 
19 #include "src/cpp/thread_manager/thread_manager.h"
20 
21 #include <climits>
22 #include <mutex>
23 
24 #include <grpc/support/log.h>
25 #include "src/core/lib/gprpp/thd.h"
26 #include "src/core/lib/iomgr/exec_ctx.h"
27 
28 namespace grpc {
29 
WorkerThread(ThreadManager * thd_mgr)30 ThreadManager::WorkerThread::WorkerThread(ThreadManager* thd_mgr)
31     : thd_mgr_(thd_mgr) {
32   // Make thread creation exclusive with respect to its join happening in
33   // ~WorkerThread().
34   thd_ = grpc_core::Thread(
35       "grpcpp_sync_server",
36       [](void* th) { static_cast<ThreadManager::WorkerThread*>(th)->Run(); },
37       this);
38   thd_.Start();
39 }
40 
Run()41 void ThreadManager::WorkerThread::Run() {
42   thd_mgr_->MainWorkLoop();
43   thd_mgr_->MarkAsCompleted(this);
44 }
45 
~WorkerThread()46 ThreadManager::WorkerThread::~WorkerThread() {
47   // Don't join until the thread is fully constructed.
48   thd_.Join();
49 }
50 
ThreadManager(const char * name,grpc_resource_quota * resource_quota,int min_pollers,int max_pollers)51 ThreadManager::ThreadManager(const char* name,
52                              grpc_resource_quota* resource_quota,
53                              int min_pollers, int max_pollers)
54     : shutdown_(false),
55       num_pollers_(0),
56       min_pollers_(min_pollers),
57       max_pollers_(max_pollers == -1 ? INT_MAX : max_pollers),
58       num_threads_(0),
59       max_active_threads_sofar_(0) {
60   resource_user_ = grpc_resource_user_create(resource_quota, name);
61 }
62 
~ThreadManager()63 ThreadManager::~ThreadManager() {
64   {
65     std::lock_guard<std::mutex> lock(mu_);
66     GPR_ASSERT(num_threads_ == 0);
67   }
68 
69   grpc_core::ExecCtx exec_ctx;  // grpc_resource_user_unref needs an exec_ctx
70   grpc_resource_user_unref(resource_user_);
71   CleanupCompletedThreads();
72 }
73 
Wait()74 void ThreadManager::Wait() {
75   std::unique_lock<std::mutex> lock(mu_);
76   while (num_threads_ != 0) {
77     shutdown_cv_.wait(lock);
78   }
79 }
80 
Shutdown()81 void ThreadManager::Shutdown() {
82   std::lock_guard<std::mutex> lock(mu_);
83   shutdown_ = true;
84 }
85 
IsShutdown()86 bool ThreadManager::IsShutdown() {
87   std::lock_guard<std::mutex> lock(mu_);
88   return shutdown_;
89 }
90 
GetMaxActiveThreadsSoFar()91 int ThreadManager::GetMaxActiveThreadsSoFar() {
92   std::lock_guard<std::mutex> list_lock(list_mu_);
93   return max_active_threads_sofar_;
94 }
95 
MarkAsCompleted(WorkerThread * thd)96 void ThreadManager::MarkAsCompleted(WorkerThread* thd) {
97   {
98     std::lock_guard<std::mutex> list_lock(list_mu_);
99     completed_threads_.push_back(thd);
100   }
101 
102   {
103     std::lock_guard<std::mutex> lock(mu_);
104     num_threads_--;
105     if (num_threads_ == 0) {
106       shutdown_cv_.notify_one();
107     }
108   }
109 
110   // Give a thread back to the resource quota
111   grpc_resource_user_free_threads(resource_user_, 1);
112 }
113 
CleanupCompletedThreads()114 void ThreadManager::CleanupCompletedThreads() {
115   std::list<WorkerThread*> completed_threads;
116   {
117     // swap out the completed threads list: allows other threads to clean up
118     // more quickly
119     std::unique_lock<std::mutex> lock(list_mu_);
120     completed_threads.swap(completed_threads_);
121   }
122   for (auto thd : completed_threads) delete thd;
123 }
124 
Initialize()125 void ThreadManager::Initialize() {
126   if (!grpc_resource_user_allocate_threads(resource_user_, min_pollers_)) {
127     gpr_log(GPR_ERROR,
128             "No thread quota available to even create the minimum required "
129             "polling threads (i.e %d). Unable to start the thread manager",
130             min_pollers_);
131     abort();
132   }
133 
134   {
135     std::unique_lock<std::mutex> lock(mu_);
136     num_pollers_ = min_pollers_;
137     num_threads_ = min_pollers_;
138     max_active_threads_sofar_ = min_pollers_;
139   }
140 
141   for (int i = 0; i < min_pollers_; i++) {
142     new WorkerThread(this);
143   }
144 }
145 
MainWorkLoop()146 void ThreadManager::MainWorkLoop() {
147   while (true) {
148     void* tag;
149     bool ok;
150     WorkStatus work_status = PollForWork(&tag, &ok);
151 
152     std::unique_lock<std::mutex> lock(mu_);
153     // Reduce the number of pollers by 1 and check what happened with the poll
154     num_pollers_--;
155     bool done = false;
156     switch (work_status) {
157       case TIMEOUT:
158         // If we timed out and we have more pollers than we need (or we are
159         // shutdown), finish this thread
160         if (shutdown_ || num_pollers_ > max_pollers_) done = true;
161         break;
162       case SHUTDOWN:
163         // If the thread manager is shutdown, finish this thread
164         done = true;
165         break;
166       case WORK_FOUND:
167         // If we got work and there are now insufficient pollers and there is
168         // quota available to create a new thread, start a new poller thread
169         bool resource_exhausted = false;
170         if (!shutdown_ && num_pollers_ < min_pollers_) {
171           if (grpc_resource_user_allocate_threads(resource_user_, 1)) {
172             // We can allocate a new poller thread
173             num_pollers_++;
174             num_threads_++;
175             if (num_threads_ > max_active_threads_sofar_) {
176               max_active_threads_sofar_ = num_threads_;
177             }
178             // Drop lock before spawning thread to avoid contention
179             lock.unlock();
180             new WorkerThread(this);
181           } else if (num_pollers_ > 0) {
182             // There is still at least some thread polling, so we can go on
183             // even though we are below the number of pollers that we would
184             // like to have (min_pollers_)
185             lock.unlock();
186           } else {
187             // There are no pollers to spare and we couldn't allocate
188             // a new thread, so resources are exhausted!
189             lock.unlock();
190             resource_exhausted = true;
191           }
192         } else {
193           // There are a sufficient number of pollers available so we can do
194           // the work and continue polling with our existing poller threads
195           lock.unlock();
196         }
197         // Lock is always released at this point - do the application work
198         // or return resource exhausted if there is new work but we couldn't
199         // get a thread in which to do it.
200         DoWork(tag, ok, !resource_exhausted);
201         // Take the lock again to check post conditions
202         lock.lock();
203         // If we're shutdown, we should finish at this point.
204         if (shutdown_) done = true;
205         break;
206     }
207     // If we decided to finish the thread, break out of the while loop
208     if (done) break;
209 
210     // Otherwise go back to polling as long as it doesn't exceed max_pollers_
211     //
212     // **WARNING**:
213     // There is a possibility of threads thrashing here (i.e excessive thread
214     // shutdowns and creations than the ideal case). This happens if max_poller_
215     // count is small and the rate of incoming requests is also small. In such
216     // scenarios we can possibly configure max_pollers_ to a higher value and/or
217     // increase the cq timeout.
218     //
219     // However, not doing this check here and unconditionally incrementing
220     // num_pollers (and hoping that the system will eventually settle down) has
221     // far worse consequences i.e huge number of threads getting created to the
222     // point of thread-exhaustion. For example: if the incoming request rate is
223     // very high, all the polling threads will return very quickly from
224     // PollForWork() with WORK_FOUND. They all briefly decrement num_pollers_
225     // counter thereby possibly - and briefly - making it go below min_pollers;
226     // This will most likely result in the creation of a new poller since
227     // num_pollers_ dipped below min_pollers_.
228     //
229     // Now, If we didn't do the max_poller_ check here, all these threads will
230     // go back to doing PollForWork() and the whole cycle repeats (with a new
231     // thread being added in each cycle). Once the total number of threads in
232     // the system crosses a certain threshold (around ~1500), there is heavy
233     // contention on mutexes (the mu_ here or the mutexes in gRPC core like the
234     // pollset mutex) that makes DoWork() take longer to finish thereby causing
235     // new poller threads to be created even faster. This results in a thread
236     // avalanche.
237     if (num_pollers_ < max_pollers_) {
238       num_pollers_++;
239     } else {
240       break;
241     }
242   };
243 
244   // This thread is exiting. Do some cleanup work i.e delete already completed
245   // worker threads
246   CleanupCompletedThreads();
247 
248   // If we are here, either ThreadManager is shutting down or it already has
249   // enough threads.
250 }
251 
252 }  // namespace grpc
253