• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *
3  * Copyright 2015 gRPC authors.
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  *     http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  *
17  */
18 
19 #include <grpc/support/port_platform.h>
20 
21 #include "src/core/lib/iomgr/executor.h"
22 
23 #include <string.h>
24 
25 #include <grpc/support/alloc.h>
26 #include <grpc/support/cpu.h>
27 #include <grpc/support/log.h>
28 #include <grpc/support/sync.h>
29 
30 #include "src/core/lib/debug/stats.h"
31 #include "src/core/lib/gpr/tls.h"
32 #include "src/core/lib/gpr/useful.h"
33 #include "src/core/lib/gprpp/memory.h"
34 #include "src/core/lib/iomgr/exec_ctx.h"
35 #include "src/core/lib/iomgr/iomgr.h"
36 
37 #define MAX_DEPTH 2
38 
39 #define EXECUTOR_TRACE(format, ...)                       \
40   do {                                                    \
41     if (GRPC_TRACE_FLAG_ENABLED(executor_trace)) {        \
42       gpr_log(GPR_INFO, "EXECUTOR " format, __VA_ARGS__); \
43     }                                                     \
44   } while (0)
45 
46 #define EXECUTOR_TRACE0(str)                       \
47   do {                                             \
48     if (GRPC_TRACE_FLAG_ENABLED(executor_trace)) { \
49       gpr_log(GPR_INFO, "EXECUTOR " str);          \
50     }                                              \
51   } while (0)
52 
53 namespace grpc_core {
54 namespace {
55 
56 GPR_TLS_DECL(g_this_thread_state);
57 
58 Executor* executors[static_cast<size_t>(ExecutorType::NUM_EXECUTORS)];
59 
default_enqueue_short(grpc_closure * closure,grpc_error * error)60 void default_enqueue_short(grpc_closure* closure, grpc_error* error) {
61   executors[static_cast<size_t>(ExecutorType::DEFAULT)]->Enqueue(
62       closure, error, true /* is_short */);
63 }
64 
default_enqueue_long(grpc_closure * closure,grpc_error * error)65 void default_enqueue_long(grpc_closure* closure, grpc_error* error) {
66   executors[static_cast<size_t>(ExecutorType::DEFAULT)]->Enqueue(
67       closure, error, false /* is_short */);
68 }
69 
resolver_enqueue_short(grpc_closure * closure,grpc_error * error)70 void resolver_enqueue_short(grpc_closure* closure, grpc_error* error) {
71   executors[static_cast<size_t>(ExecutorType::RESOLVER)]->Enqueue(
72       closure, error, true /* is_short */);
73 }
74 
resolver_enqueue_long(grpc_closure * closure,grpc_error * error)75 void resolver_enqueue_long(grpc_closure* closure, grpc_error* error) {
76   executors[static_cast<size_t>(ExecutorType::RESOLVER)]->Enqueue(
77       closure, error, false /* is_short */);
78 }
79 
80 using EnqueueFunc = void (*)(grpc_closure* closure, grpc_error* error);
81 
82 const EnqueueFunc
83     executor_enqueue_fns_[static_cast<size_t>(ExecutorType::NUM_EXECUTORS)]
84                          [static_cast<size_t>(ExecutorJobType::NUM_JOB_TYPES)] =
85                              {{default_enqueue_short, default_enqueue_long},
86                               {resolver_enqueue_short, resolver_enqueue_long}};
87 
88 }  // namespace
89 
90 TraceFlag executor_trace(false, "executor");
91 
Executor(const char * name)92 Executor::Executor(const char* name) : name_(name) {
93   adding_thread_lock_ = GPR_SPINLOCK_STATIC_INITIALIZER;
94   gpr_atm_rel_store(&num_threads_, 0);
95   max_threads_ = GPR_MAX(1, 2 * gpr_cpu_num_cores());
96 }
97 
Init()98 void Executor::Init() { SetThreading(true); }
99 
RunClosures(const char * executor_name,grpc_closure_list list)100 size_t Executor::RunClosures(const char* executor_name,
101                              grpc_closure_list list) {
102   size_t n = 0;
103 
104   // In the executor, the ExecCtx for the thread is declared in the executor
105   // thread itself, but this is the point where we could start seeing
106   // application-level callbacks. No need to create a new ExecCtx, though,
107   // since there already is one and it is flushed (but not destructed) in this
108   // function itself. The ApplicationCallbackExecCtx will have its callbacks
109   // invoked on its destruction, which will be after completing any closures in
110   // the executor's closure list (which were explicitly scheduled onto the
111   // executor).
112   grpc_core::ApplicationCallbackExecCtx callback_exec_ctx(
113       GRPC_APP_CALLBACK_EXEC_CTX_FLAG_IS_INTERNAL_THREAD);
114 
115   grpc_closure* c = list.head;
116   while (c != nullptr) {
117     grpc_closure* next = c->next_data.next;
118     grpc_error* error = c->error_data.error;
119 #ifndef NDEBUG
120     EXECUTOR_TRACE("(%s) run %p [created by %s:%d]", executor_name, c,
121                    c->file_created, c->line_created);
122     c->scheduled = false;
123 #else
124     EXECUTOR_TRACE("(%s) run %p", executor_name, c);
125 #endif
126     c->cb(c->cb_arg, error);
127     GRPC_ERROR_UNREF(error);
128     c = next;
129     n++;
130     grpc_core::ExecCtx::Get()->Flush();
131   }
132 
133   return n;
134 }
135 
IsThreaded() const136 bool Executor::IsThreaded() const {
137   return gpr_atm_acq_load(&num_threads_) > 0;
138 }
139 
SetThreading(bool threading)140 void Executor::SetThreading(bool threading) {
141   gpr_atm curr_num_threads = gpr_atm_acq_load(&num_threads_);
142   EXECUTOR_TRACE("(%s) SetThreading(%d) begin", name_, threading);
143 
144   if (threading) {
145     if (curr_num_threads > 0) {
146       EXECUTOR_TRACE("(%s) SetThreading(true). curr_num_threads > 0", name_);
147       return;
148     }
149 
150     GPR_ASSERT(num_threads_ == 0);
151     gpr_atm_rel_store(&num_threads_, 1);
152     thd_state_ = static_cast<ThreadState*>(
153         gpr_zalloc(sizeof(ThreadState) * max_threads_));
154 
155     for (size_t i = 0; i < max_threads_; i++) {
156       gpr_mu_init(&thd_state_[i].mu);
157       gpr_cv_init(&thd_state_[i].cv);
158       thd_state_[i].id = i;
159       thd_state_[i].name = name_;
160       thd_state_[i].thd = grpc_core::Thread();
161       thd_state_[i].elems = GRPC_CLOSURE_LIST_INIT;
162     }
163 
164     thd_state_[0].thd =
165         grpc_core::Thread(name_, &Executor::ThreadMain, &thd_state_[0]);
166     thd_state_[0].thd.Start();
167   } else {  // !threading
168     if (curr_num_threads == 0) {
169       EXECUTOR_TRACE("(%s) SetThreading(false). curr_num_threads == 0", name_);
170       return;
171     }
172 
173     for (size_t i = 0; i < max_threads_; i++) {
174       gpr_mu_lock(&thd_state_[i].mu);
175       thd_state_[i].shutdown = true;
176       gpr_cv_signal(&thd_state_[i].cv);
177       gpr_mu_unlock(&thd_state_[i].mu);
178     }
179 
180     /* Ensure no thread is adding a new thread. Once this is past, then no
181      * thread will try to add a new one either (since shutdown is true) */
182     gpr_spinlock_lock(&adding_thread_lock_);
183     gpr_spinlock_unlock(&adding_thread_lock_);
184 
185     curr_num_threads = gpr_atm_no_barrier_load(&num_threads_);
186     for (gpr_atm i = 0; i < curr_num_threads; i++) {
187       thd_state_[i].thd.Join();
188       EXECUTOR_TRACE("(%s) Thread %" PRIdPTR " of %" PRIdPTR " joined", name_,
189                      i + 1, curr_num_threads);
190     }
191 
192     gpr_atm_rel_store(&num_threads_, 0);
193     for (size_t i = 0; i < max_threads_; i++) {
194       gpr_mu_destroy(&thd_state_[i].mu);
195       gpr_cv_destroy(&thd_state_[i].cv);
196       RunClosures(thd_state_[i].name, thd_state_[i].elems);
197     }
198 
199     gpr_free(thd_state_);
200 
201     // grpc_iomgr_shutdown_background_closure() will close all the registered
202     // fds in the background poller, and wait for all pending closures to
203     // finish. Thus, never call Executor::SetThreading(false) in the middle of
204     // an application.
205     // TODO(guantaol): create another method to finish all the pending closures
206     // registered in the background poller by grpc_core::Executor.
207     grpc_iomgr_shutdown_background_closure();
208   }
209 
210   EXECUTOR_TRACE("(%s) SetThreading(%d) done", name_, threading);
211 }
212 
Shutdown()213 void Executor::Shutdown() { SetThreading(false); }
214 
ThreadMain(void * arg)215 void Executor::ThreadMain(void* arg) {
216   ThreadState* ts = static_cast<ThreadState*>(arg);
217   gpr_tls_set(&g_this_thread_state, reinterpret_cast<intptr_t>(ts));
218 
219   grpc_core::ExecCtx exec_ctx(GRPC_EXEC_CTX_FLAG_IS_INTERNAL_THREAD);
220 
221   size_t subtract_depth = 0;
222   for (;;) {
223     EXECUTOR_TRACE("(%s) [%" PRIdPTR "]: step (sub_depth=%" PRIdPTR ")",
224                    ts->name, ts->id, subtract_depth);
225 
226     gpr_mu_lock(&ts->mu);
227     ts->depth -= subtract_depth;
228     // Wait for closures to be enqueued or for the executor to be shutdown
229     while (grpc_closure_list_empty(ts->elems) && !ts->shutdown) {
230       ts->queued_long_job = false;
231       gpr_cv_wait(&ts->cv, &ts->mu, gpr_inf_future(GPR_CLOCK_MONOTONIC));
232     }
233 
234     if (ts->shutdown) {
235       EXECUTOR_TRACE("(%s) [%" PRIdPTR "]: shutdown", ts->name, ts->id);
236       gpr_mu_unlock(&ts->mu);
237       break;
238     }
239 
240     GRPC_STATS_INC_EXECUTOR_QUEUE_DRAINED();
241     grpc_closure_list closures = ts->elems;
242     ts->elems = GRPC_CLOSURE_LIST_INIT;
243     gpr_mu_unlock(&ts->mu);
244 
245     EXECUTOR_TRACE("(%s) [%" PRIdPTR "]: execute", ts->name, ts->id);
246 
247     grpc_core::ExecCtx::Get()->InvalidateNow();
248     subtract_depth = RunClosures(ts->name, closures);
249   }
250 
251   gpr_tls_set(&g_this_thread_state, reinterpret_cast<intptr_t>(nullptr));
252 }
253 
Enqueue(grpc_closure * closure,grpc_error * error,bool is_short)254 void Executor::Enqueue(grpc_closure* closure, grpc_error* error,
255                        bool is_short) {
256   bool retry_push;
257   if (is_short) {
258     GRPC_STATS_INC_EXECUTOR_SCHEDULED_SHORT_ITEMS();
259   } else {
260     GRPC_STATS_INC_EXECUTOR_SCHEDULED_LONG_ITEMS();
261   }
262 
263   do {
264     retry_push = false;
265     size_t cur_thread_count =
266         static_cast<size_t>(gpr_atm_acq_load(&num_threads_));
267 
268     // If the number of threads is zero(i.e either the executor is not threaded
269     // or already shutdown), then queue the closure on the exec context itself
270     if (cur_thread_count == 0) {
271 #ifndef NDEBUG
272       EXECUTOR_TRACE("(%s) schedule %p (created %s:%d) inline", name_, closure,
273                      closure->file_created, closure->line_created);
274 #else
275       EXECUTOR_TRACE("(%s) schedule %p inline", name_, closure);
276 #endif
277       grpc_closure_list_append(grpc_core::ExecCtx::Get()->closure_list(),
278                                closure, error);
279       return;
280     }
281 
282     if (grpc_iomgr_add_closure_to_background_poller(closure, error)) {
283       return;
284     }
285 
286     ThreadState* ts =
287         reinterpret_cast<ThreadState*>(gpr_tls_get(&g_this_thread_state));
288     if (ts == nullptr) {
289       ts = &thd_state_[GPR_HASH_POINTER(grpc_core::ExecCtx::Get(),
290                                         cur_thread_count)];
291     } else {
292       GRPC_STATS_INC_EXECUTOR_SCHEDULED_TO_SELF();
293     }
294 
295     ThreadState* orig_ts = ts;
296     bool try_new_thread = false;
297 
298     for (;;) {
299 #ifndef NDEBUG
300       EXECUTOR_TRACE(
301           "(%s) try to schedule %p (%s) (created %s:%d) to thread "
302           "%" PRIdPTR,
303           name_, closure, is_short ? "short" : "long", closure->file_created,
304           closure->line_created, ts->id);
305 #else
306       EXECUTOR_TRACE("(%s) try to schedule %p (%s) to thread %" PRIdPTR, name_,
307                      closure, is_short ? "short" : "long", ts->id);
308 #endif
309 
310       gpr_mu_lock(&ts->mu);
311       if (ts->queued_long_job) {
312         // if there's a long job queued, we never queue anything else to this
313         // queue (since long jobs can take 'infinite' time and we need to
314         // guarantee no starvation). Spin through queues and try again
315         gpr_mu_unlock(&ts->mu);
316         size_t idx = ts->id;
317         ts = &thd_state_[(idx + 1) % cur_thread_count];
318         if (ts == orig_ts) {
319           // We cycled through all the threads. Retry enqueue again by creating
320           // a new thread
321           //
322           // TODO (sreek): There is a potential issue here. We are
323           // unconditionally setting try_new_thread to true here. What if the
324           // executor is shutdown OR if cur_thread_count is already equal to
325           // max_threads ?
326           // (Fortunately, this is not an issue yet (as of july 2018) because
327           // there is only one instance of long job in gRPC and hence we will
328           // not hit this code path)
329           retry_push = true;
330           try_new_thread = true;
331           break;
332         }
333 
334         continue;  // Try the next thread-state
335       }
336 
337       // == Found the thread state (i.e thread) to enqueue this closure! ==
338 
339       // Also, if this thread has been waiting for closures, wake it up.
340       // - If grpc_closure_list_empty() is true and the Executor is not
341       //   shutdown, it means that the thread must be waiting in ThreadMain()
342       // - Note that gpr_cv_signal() won't immediately wakeup the thread. That
343       //   happens after we release the mutex &ts->mu a few lines below
344       if (grpc_closure_list_empty(ts->elems) && !ts->shutdown) {
345         GRPC_STATS_INC_EXECUTOR_WAKEUP_INITIATED();
346         gpr_cv_signal(&ts->cv);
347       }
348 
349       grpc_closure_list_append(&ts->elems, closure, error);
350 
351       // If we already queued more than MAX_DEPTH number of closures on this
352       // thread, use this as a hint to create more threads
353       ts->depth++;
354       try_new_thread = ts->depth > MAX_DEPTH &&
355                        cur_thread_count < max_threads_ && !ts->shutdown;
356 
357       ts->queued_long_job = !is_short;
358 
359       gpr_mu_unlock(&ts->mu);
360       break;
361     }
362 
363     if (try_new_thread && gpr_spinlock_trylock(&adding_thread_lock_)) {
364       cur_thread_count = static_cast<size_t>(gpr_atm_acq_load(&num_threads_));
365       if (cur_thread_count < max_threads_) {
366         // Increment num_threads (safe to do a store instead of a cas because we
367         // always increment num_threads under the 'adding_thread_lock')
368         gpr_atm_rel_store(&num_threads_, cur_thread_count + 1);
369 
370         thd_state_[cur_thread_count].thd = grpc_core::Thread(
371             name_, &Executor::ThreadMain, &thd_state_[cur_thread_count]);
372         thd_state_[cur_thread_count].thd.Start();
373       }
374       gpr_spinlock_unlock(&adding_thread_lock_);
375     }
376 
377     if (retry_push) {
378       GRPC_STATS_INC_EXECUTOR_PUSH_RETRIES();
379     }
380   } while (retry_push);
381 }
382 
383 // Executor::InitAll() and Executor::ShutdownAll() functions are called in the
384 // the grpc_init() and grpc_shutdown() code paths which are protected by a
385 // global mutex. So it is okay to assume that these functions are thread-safe
InitAll()386 void Executor::InitAll() {
387   EXECUTOR_TRACE0("Executor::InitAll() enter");
388 
389   // Return if Executor::InitAll() is already called earlier
390   if (executors[static_cast<size_t>(ExecutorType::DEFAULT)] != nullptr) {
391     GPR_ASSERT(executors[static_cast<size_t>(ExecutorType::RESOLVER)] !=
392                nullptr);
393     return;
394   }
395 
396   executors[static_cast<size_t>(ExecutorType::DEFAULT)] =
397       new Executor("default-executor");
398   executors[static_cast<size_t>(ExecutorType::RESOLVER)] =
399       new Executor("resolver-executor");
400 
401   executors[static_cast<size_t>(ExecutorType::DEFAULT)]->Init();
402   executors[static_cast<size_t>(ExecutorType::RESOLVER)]->Init();
403 
404   EXECUTOR_TRACE0("Executor::InitAll() done");
405 }
406 
Run(grpc_closure * closure,grpc_error * error,ExecutorType executor_type,ExecutorJobType job_type)407 void Executor::Run(grpc_closure* closure, grpc_error* error,
408                    ExecutorType executor_type, ExecutorJobType job_type) {
409   executor_enqueue_fns_[static_cast<size_t>(executor_type)]
410                        [static_cast<size_t>(job_type)](closure, error);
411 }
412 
ShutdownAll()413 void Executor::ShutdownAll() {
414   EXECUTOR_TRACE0("Executor::ShutdownAll() enter");
415 
416   // Return if Executor:SshutdownAll() is already called earlier
417   if (executors[static_cast<size_t>(ExecutorType::DEFAULT)] == nullptr) {
418     GPR_ASSERT(executors[static_cast<size_t>(ExecutorType::RESOLVER)] ==
419                nullptr);
420     return;
421   }
422 
423   executors[static_cast<size_t>(ExecutorType::DEFAULT)]->Shutdown();
424   executors[static_cast<size_t>(ExecutorType::RESOLVER)]->Shutdown();
425 
426   // Delete the executor objects.
427   //
428   // NOTE: It is important to call Shutdown() on all executors first before
429   // calling delete  because it is possible for one executor (that is not
430   // shutdown yet) to call Enqueue() on a different executor which is already
431   // shutdown. This is legal and in such cases, the Enqueue() operation
432   // effectively "fails" and enqueues that closure on the calling thread's
433   // exec_ctx.
434   //
435   // By ensuring that all executors are shutdown first, we are also ensuring
436   // that no thread is active across all executors.
437 
438   delete executors[static_cast<size_t>(ExecutorType::DEFAULT)];
439   delete executors[static_cast<size_t>(ExecutorType::RESOLVER)];
440   executors[static_cast<size_t>(ExecutorType::DEFAULT)] = nullptr;
441   executors[static_cast<size_t>(ExecutorType::RESOLVER)] = nullptr;
442 
443   EXECUTOR_TRACE0("Executor::ShutdownAll() done");
444 }
445 
IsThreaded(ExecutorType executor_type)446 bool Executor::IsThreaded(ExecutorType executor_type) {
447   GPR_ASSERT(executor_type < ExecutorType::NUM_EXECUTORS);
448   return executors[static_cast<size_t>(executor_type)]->IsThreaded();
449 }
450 
IsThreadedDefault()451 bool Executor::IsThreadedDefault() {
452   return Executor::IsThreaded(ExecutorType::DEFAULT);
453 }
454 
SetThreadingAll(bool enable)455 void Executor::SetThreadingAll(bool enable) {
456   EXECUTOR_TRACE("Executor::SetThreadingAll(%d) called", enable);
457   for (size_t i = 0; i < static_cast<size_t>(ExecutorType::NUM_EXECUTORS);
458        i++) {
459     executors[i]->SetThreading(enable);
460   }
461 }
462 
SetThreadingDefault(bool enable)463 void Executor::SetThreadingDefault(bool enable) {
464   EXECUTOR_TRACE("Executor::SetThreadingDefault(%d) called", enable);
465   executors[static_cast<size_t>(ExecutorType::DEFAULT)]->SetThreading(enable);
466 }
467 
grpc_executor_global_init()468 void grpc_executor_global_init() { gpr_tls_init(&g_this_thread_state); }
469 
470 }  // namespace grpc_core
471