1 /*
2 *
3 * Copyright 2015 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19 #include <grpc/support/port_platform.h>
20
21 #include "src/core/lib/iomgr/executor.h"
22
23 #include <string.h>
24
25 #include <grpc/support/alloc.h>
26 #include <grpc/support/cpu.h>
27 #include <grpc/support/log.h>
28 #include <grpc/support/sync.h>
29
30 #include "src/core/lib/debug/stats.h"
31 #include "src/core/lib/gpr/tls.h"
32 #include "src/core/lib/gpr/useful.h"
33 #include "src/core/lib/gprpp/memory.h"
34 #include "src/core/lib/iomgr/exec_ctx.h"
35 #include "src/core/lib/iomgr/iomgr.h"
36
37 #define MAX_DEPTH 2
38
39 #define EXECUTOR_TRACE(format, ...) \
40 do { \
41 if (GRPC_TRACE_FLAG_ENABLED(executor_trace)) { \
42 gpr_log(GPR_INFO, "EXECUTOR " format, __VA_ARGS__); \
43 } \
44 } while (0)
45
46 #define EXECUTOR_TRACE0(str) \
47 do { \
48 if (GRPC_TRACE_FLAG_ENABLED(executor_trace)) { \
49 gpr_log(GPR_INFO, "EXECUTOR " str); \
50 } \
51 } while (0)
52
53 namespace grpc_core {
54 namespace {
55
56 GPR_TLS_DECL(g_this_thread_state);
57
58 Executor* executors[static_cast<size_t>(ExecutorType::NUM_EXECUTORS)];
59
default_enqueue_short(grpc_closure * closure,grpc_error * error)60 void default_enqueue_short(grpc_closure* closure, grpc_error* error) {
61 executors[static_cast<size_t>(ExecutorType::DEFAULT)]->Enqueue(
62 closure, error, true /* is_short */);
63 }
64
default_enqueue_long(grpc_closure * closure,grpc_error * error)65 void default_enqueue_long(grpc_closure* closure, grpc_error* error) {
66 executors[static_cast<size_t>(ExecutorType::DEFAULT)]->Enqueue(
67 closure, error, false /* is_short */);
68 }
69
resolver_enqueue_short(grpc_closure * closure,grpc_error * error)70 void resolver_enqueue_short(grpc_closure* closure, grpc_error* error) {
71 executors[static_cast<size_t>(ExecutorType::RESOLVER)]->Enqueue(
72 closure, error, true /* is_short */);
73 }
74
resolver_enqueue_long(grpc_closure * closure,grpc_error * error)75 void resolver_enqueue_long(grpc_closure* closure, grpc_error* error) {
76 executors[static_cast<size_t>(ExecutorType::RESOLVER)]->Enqueue(
77 closure, error, false /* is_short */);
78 }
79
80 using EnqueueFunc = void (*)(grpc_closure* closure, grpc_error* error);
81
82 const EnqueueFunc
83 executor_enqueue_fns_[static_cast<size_t>(ExecutorType::NUM_EXECUTORS)]
84 [static_cast<size_t>(ExecutorJobType::NUM_JOB_TYPES)] =
85 {{default_enqueue_short, default_enqueue_long},
86 {resolver_enqueue_short, resolver_enqueue_long}};
87
88 } // namespace
89
90 TraceFlag executor_trace(false, "executor");
91
Executor(const char * name)92 Executor::Executor(const char* name) : name_(name) {
93 adding_thread_lock_ = GPR_SPINLOCK_STATIC_INITIALIZER;
94 gpr_atm_rel_store(&num_threads_, 0);
95 max_threads_ = GPR_MAX(1, 2 * gpr_cpu_num_cores());
96 }
97
Init()98 void Executor::Init() { SetThreading(true); }
99
RunClosures(const char * executor_name,grpc_closure_list list)100 size_t Executor::RunClosures(const char* executor_name,
101 grpc_closure_list list) {
102 size_t n = 0;
103
104 // In the executor, the ExecCtx for the thread is declared in the executor
105 // thread itself, but this is the point where we could start seeing
106 // application-level callbacks. No need to create a new ExecCtx, though,
107 // since there already is one and it is flushed (but not destructed) in this
108 // function itself. The ApplicationCallbackExecCtx will have its callbacks
109 // invoked on its destruction, which will be after completing any closures in
110 // the executor's closure list (which were explicitly scheduled onto the
111 // executor).
112 grpc_core::ApplicationCallbackExecCtx callback_exec_ctx(
113 GRPC_APP_CALLBACK_EXEC_CTX_FLAG_IS_INTERNAL_THREAD);
114
115 grpc_closure* c = list.head;
116 while (c != nullptr) {
117 grpc_closure* next = c->next_data.next;
118 grpc_error* error = c->error_data.error;
119 #ifndef NDEBUG
120 EXECUTOR_TRACE("(%s) run %p [created by %s:%d]", executor_name, c,
121 c->file_created, c->line_created);
122 c->scheduled = false;
123 #else
124 EXECUTOR_TRACE("(%s) run %p", executor_name, c);
125 #endif
126 c->cb(c->cb_arg, error);
127 GRPC_ERROR_UNREF(error);
128 c = next;
129 n++;
130 grpc_core::ExecCtx::Get()->Flush();
131 }
132
133 return n;
134 }
135
IsThreaded() const136 bool Executor::IsThreaded() const {
137 return gpr_atm_acq_load(&num_threads_) > 0;
138 }
139
SetThreading(bool threading)140 void Executor::SetThreading(bool threading) {
141 gpr_atm curr_num_threads = gpr_atm_acq_load(&num_threads_);
142 EXECUTOR_TRACE("(%s) SetThreading(%d) begin", name_, threading);
143
144 if (threading) {
145 if (curr_num_threads > 0) {
146 EXECUTOR_TRACE("(%s) SetThreading(true). curr_num_threads > 0", name_);
147 return;
148 }
149
150 GPR_ASSERT(num_threads_ == 0);
151 gpr_atm_rel_store(&num_threads_, 1);
152 thd_state_ = static_cast<ThreadState*>(
153 gpr_zalloc(sizeof(ThreadState) * max_threads_));
154
155 for (size_t i = 0; i < max_threads_; i++) {
156 gpr_mu_init(&thd_state_[i].mu);
157 gpr_cv_init(&thd_state_[i].cv);
158 thd_state_[i].id = i;
159 thd_state_[i].name = name_;
160 thd_state_[i].thd = grpc_core::Thread();
161 thd_state_[i].elems = GRPC_CLOSURE_LIST_INIT;
162 }
163
164 thd_state_[0].thd =
165 grpc_core::Thread(name_, &Executor::ThreadMain, &thd_state_[0]);
166 thd_state_[0].thd.Start();
167 } else { // !threading
168 if (curr_num_threads == 0) {
169 EXECUTOR_TRACE("(%s) SetThreading(false). curr_num_threads == 0", name_);
170 return;
171 }
172
173 for (size_t i = 0; i < max_threads_; i++) {
174 gpr_mu_lock(&thd_state_[i].mu);
175 thd_state_[i].shutdown = true;
176 gpr_cv_signal(&thd_state_[i].cv);
177 gpr_mu_unlock(&thd_state_[i].mu);
178 }
179
180 /* Ensure no thread is adding a new thread. Once this is past, then no
181 * thread will try to add a new one either (since shutdown is true) */
182 gpr_spinlock_lock(&adding_thread_lock_);
183 gpr_spinlock_unlock(&adding_thread_lock_);
184
185 curr_num_threads = gpr_atm_no_barrier_load(&num_threads_);
186 for (gpr_atm i = 0; i < curr_num_threads; i++) {
187 thd_state_[i].thd.Join();
188 EXECUTOR_TRACE("(%s) Thread %" PRIdPTR " of %" PRIdPTR " joined", name_,
189 i + 1, curr_num_threads);
190 }
191
192 gpr_atm_rel_store(&num_threads_, 0);
193 for (size_t i = 0; i < max_threads_; i++) {
194 gpr_mu_destroy(&thd_state_[i].mu);
195 gpr_cv_destroy(&thd_state_[i].cv);
196 RunClosures(thd_state_[i].name, thd_state_[i].elems);
197 }
198
199 gpr_free(thd_state_);
200
201 // grpc_iomgr_shutdown_background_closure() will close all the registered
202 // fds in the background poller, and wait for all pending closures to
203 // finish. Thus, never call Executor::SetThreading(false) in the middle of
204 // an application.
205 // TODO(guantaol): create another method to finish all the pending closures
206 // registered in the background poller by grpc_core::Executor.
207 grpc_iomgr_shutdown_background_closure();
208 }
209
210 EXECUTOR_TRACE("(%s) SetThreading(%d) done", name_, threading);
211 }
212
Shutdown()213 void Executor::Shutdown() { SetThreading(false); }
214
ThreadMain(void * arg)215 void Executor::ThreadMain(void* arg) {
216 ThreadState* ts = static_cast<ThreadState*>(arg);
217 gpr_tls_set(&g_this_thread_state, reinterpret_cast<intptr_t>(ts));
218
219 grpc_core::ExecCtx exec_ctx(GRPC_EXEC_CTX_FLAG_IS_INTERNAL_THREAD);
220
221 size_t subtract_depth = 0;
222 for (;;) {
223 EXECUTOR_TRACE("(%s) [%" PRIdPTR "]: step (sub_depth=%" PRIdPTR ")",
224 ts->name, ts->id, subtract_depth);
225
226 gpr_mu_lock(&ts->mu);
227 ts->depth -= subtract_depth;
228 // Wait for closures to be enqueued or for the executor to be shutdown
229 while (grpc_closure_list_empty(ts->elems) && !ts->shutdown) {
230 ts->queued_long_job = false;
231 gpr_cv_wait(&ts->cv, &ts->mu, gpr_inf_future(GPR_CLOCK_MONOTONIC));
232 }
233
234 if (ts->shutdown) {
235 EXECUTOR_TRACE("(%s) [%" PRIdPTR "]: shutdown", ts->name, ts->id);
236 gpr_mu_unlock(&ts->mu);
237 break;
238 }
239
240 GRPC_STATS_INC_EXECUTOR_QUEUE_DRAINED();
241 grpc_closure_list closures = ts->elems;
242 ts->elems = GRPC_CLOSURE_LIST_INIT;
243 gpr_mu_unlock(&ts->mu);
244
245 EXECUTOR_TRACE("(%s) [%" PRIdPTR "]: execute", ts->name, ts->id);
246
247 grpc_core::ExecCtx::Get()->InvalidateNow();
248 subtract_depth = RunClosures(ts->name, closures);
249 }
250
251 gpr_tls_set(&g_this_thread_state, reinterpret_cast<intptr_t>(nullptr));
252 }
253
Enqueue(grpc_closure * closure,grpc_error * error,bool is_short)254 void Executor::Enqueue(grpc_closure* closure, grpc_error* error,
255 bool is_short) {
256 bool retry_push;
257 if (is_short) {
258 GRPC_STATS_INC_EXECUTOR_SCHEDULED_SHORT_ITEMS();
259 } else {
260 GRPC_STATS_INC_EXECUTOR_SCHEDULED_LONG_ITEMS();
261 }
262
263 do {
264 retry_push = false;
265 size_t cur_thread_count =
266 static_cast<size_t>(gpr_atm_acq_load(&num_threads_));
267
268 // If the number of threads is zero(i.e either the executor is not threaded
269 // or already shutdown), then queue the closure on the exec context itself
270 if (cur_thread_count == 0) {
271 #ifndef NDEBUG
272 EXECUTOR_TRACE("(%s) schedule %p (created %s:%d) inline", name_, closure,
273 closure->file_created, closure->line_created);
274 #else
275 EXECUTOR_TRACE("(%s) schedule %p inline", name_, closure);
276 #endif
277 grpc_closure_list_append(grpc_core::ExecCtx::Get()->closure_list(),
278 closure, error);
279 return;
280 }
281
282 if (grpc_iomgr_add_closure_to_background_poller(closure, error)) {
283 return;
284 }
285
286 ThreadState* ts = (ThreadState*)gpr_tls_get(&g_this_thread_state);
287 if (ts == nullptr) {
288 ts = &thd_state_[GPR_HASH_POINTER(grpc_core::ExecCtx::Get(),
289 cur_thread_count)];
290 } else {
291 GRPC_STATS_INC_EXECUTOR_SCHEDULED_TO_SELF();
292 }
293
294 ThreadState* orig_ts = ts;
295 bool try_new_thread = false;
296
297 for (;;) {
298 #ifndef NDEBUG
299 EXECUTOR_TRACE(
300 "(%s) try to schedule %p (%s) (created %s:%d) to thread "
301 "%" PRIdPTR,
302 name_, closure, is_short ? "short" : "long", closure->file_created,
303 closure->line_created, ts->id);
304 #else
305 EXECUTOR_TRACE("(%s) try to schedule %p (%s) to thread %" PRIdPTR, name_,
306 closure, is_short ? "short" : "long", ts->id);
307 #endif
308
309 gpr_mu_lock(&ts->mu);
310 if (ts->queued_long_job) {
311 // if there's a long job queued, we never queue anything else to this
312 // queue (since long jobs can take 'infinite' time and we need to
313 // guarantee no starvation). Spin through queues and try again
314 gpr_mu_unlock(&ts->mu);
315 size_t idx = ts->id;
316 ts = &thd_state_[(idx + 1) % cur_thread_count];
317 if (ts == orig_ts) {
318 // We cycled through all the threads. Retry enqueue again by creating
319 // a new thread
320 //
321 // TODO (sreek): There is a potential issue here. We are
322 // unconditionally setting try_new_thread to true here. What if the
323 // executor is shutdown OR if cur_thread_count is already equal to
324 // max_threads ?
325 // (Fortunately, this is not an issue yet (as of july 2018) because
326 // there is only one instance of long job in gRPC and hence we will
327 // not hit this code path)
328 retry_push = true;
329 try_new_thread = true;
330 break;
331 }
332
333 continue; // Try the next thread-state
334 }
335
336 // == Found the thread state (i.e thread) to enqueue this closure! ==
337
338 // Also, if this thread has been waiting for closures, wake it up.
339 // - If grpc_closure_list_empty() is true and the Executor is not
340 // shutdown, it means that the thread must be waiting in ThreadMain()
341 // - Note that gpr_cv_signal() won't immediately wakeup the thread. That
342 // happens after we release the mutex &ts->mu a few lines below
343 if (grpc_closure_list_empty(ts->elems) && !ts->shutdown) {
344 GRPC_STATS_INC_EXECUTOR_WAKEUP_INITIATED();
345 gpr_cv_signal(&ts->cv);
346 }
347
348 grpc_closure_list_append(&ts->elems, closure, error);
349
350 // If we already queued more than MAX_DEPTH number of closures on this
351 // thread, use this as a hint to create more threads
352 ts->depth++;
353 try_new_thread = ts->depth > MAX_DEPTH &&
354 cur_thread_count < max_threads_ && !ts->shutdown;
355
356 ts->queued_long_job = !is_short;
357
358 gpr_mu_unlock(&ts->mu);
359 break;
360 }
361
362 if (try_new_thread && gpr_spinlock_trylock(&adding_thread_lock_)) {
363 cur_thread_count = static_cast<size_t>(gpr_atm_acq_load(&num_threads_));
364 if (cur_thread_count < max_threads_) {
365 // Increment num_threads (safe to do a store instead of a cas because we
366 // always increment num_threads under the 'adding_thread_lock')
367 gpr_atm_rel_store(&num_threads_, cur_thread_count + 1);
368
369 thd_state_[cur_thread_count].thd = grpc_core::Thread(
370 name_, &Executor::ThreadMain, &thd_state_[cur_thread_count]);
371 thd_state_[cur_thread_count].thd.Start();
372 }
373 gpr_spinlock_unlock(&adding_thread_lock_);
374 }
375
376 if (retry_push) {
377 GRPC_STATS_INC_EXECUTOR_PUSH_RETRIES();
378 }
379 } while (retry_push);
380 }
381
382 // Executor::InitAll() and Executor::ShutdownAll() functions are called in the
383 // the grpc_init() and grpc_shutdown() code paths which are protected by a
384 // global mutex. So it is okay to assume that these functions are thread-safe
InitAll()385 void Executor::InitAll() {
386 EXECUTOR_TRACE0("Executor::InitAll() enter");
387
388 // Return if Executor::InitAll() is already called earlier
389 if (executors[static_cast<size_t>(ExecutorType::DEFAULT)] != nullptr) {
390 GPR_ASSERT(executors[static_cast<size_t>(ExecutorType::RESOLVER)] !=
391 nullptr);
392 return;
393 }
394
395 executors[static_cast<size_t>(ExecutorType::DEFAULT)] =
396 new Executor("default-executor");
397 executors[static_cast<size_t>(ExecutorType::RESOLVER)] =
398 new Executor("resolver-executor");
399
400 executors[static_cast<size_t>(ExecutorType::DEFAULT)]->Init();
401 executors[static_cast<size_t>(ExecutorType::RESOLVER)]->Init();
402
403 EXECUTOR_TRACE0("Executor::InitAll() done");
404 }
405
Run(grpc_closure * closure,grpc_error * error,ExecutorType executor_type,ExecutorJobType job_type)406 void Executor::Run(grpc_closure* closure, grpc_error* error,
407 ExecutorType executor_type, ExecutorJobType job_type) {
408 executor_enqueue_fns_[static_cast<size_t>(executor_type)]
409 [static_cast<size_t>(job_type)](closure, error);
410 }
411
ShutdownAll()412 void Executor::ShutdownAll() {
413 EXECUTOR_TRACE0("Executor::ShutdownAll() enter");
414
415 // Return if Executor:SshutdownAll() is already called earlier
416 if (executors[static_cast<size_t>(ExecutorType::DEFAULT)] == nullptr) {
417 GPR_ASSERT(executors[static_cast<size_t>(ExecutorType::RESOLVER)] ==
418 nullptr);
419 return;
420 }
421
422 executors[static_cast<size_t>(ExecutorType::DEFAULT)]->Shutdown();
423 executors[static_cast<size_t>(ExecutorType::RESOLVER)]->Shutdown();
424
425 // Delete the executor objects.
426 //
427 // NOTE: It is important to call Shutdown() on all executors first before
428 // calling delete because it is possible for one executor (that is not
429 // shutdown yet) to call Enqueue() on a different executor which is already
430 // shutdown. This is legal and in such cases, the Enqueue() operation
431 // effectively "fails" and enqueues that closure on the calling thread's
432 // exec_ctx.
433 //
434 // By ensuring that all executors are shutdown first, we are also ensuring
435 // that no thread is active across all executors.
436
437 delete executors[static_cast<size_t>(ExecutorType::DEFAULT)];
438 delete executors[static_cast<size_t>(ExecutorType::RESOLVER)];
439 executors[static_cast<size_t>(ExecutorType::DEFAULT)] = nullptr;
440 executors[static_cast<size_t>(ExecutorType::RESOLVER)] = nullptr;
441
442 EXECUTOR_TRACE0("Executor::ShutdownAll() done");
443 }
444
IsThreaded(ExecutorType executor_type)445 bool Executor::IsThreaded(ExecutorType executor_type) {
446 GPR_ASSERT(executor_type < ExecutorType::NUM_EXECUTORS);
447 return executors[static_cast<size_t>(executor_type)]->IsThreaded();
448 }
449
IsThreadedDefault()450 bool Executor::IsThreadedDefault() {
451 return Executor::IsThreaded(ExecutorType::DEFAULT);
452 }
453
SetThreadingAll(bool enable)454 void Executor::SetThreadingAll(bool enable) {
455 EXECUTOR_TRACE("Executor::SetThreadingAll(%d) called", enable);
456 for (size_t i = 0; i < static_cast<size_t>(ExecutorType::NUM_EXECUTORS);
457 i++) {
458 executors[i]->SetThreading(enable);
459 }
460 }
461
SetThreadingDefault(bool enable)462 void Executor::SetThreadingDefault(bool enable) {
463 EXECUTOR_TRACE("Executor::SetThreadingDefault(%d) called", enable);
464 executors[static_cast<size_t>(ExecutorType::DEFAULT)]->SetThreading(enable);
465 }
466
grpc_executor_global_init()467 void grpc_executor_global_init() { gpr_tls_init(&g_this_thread_state); }
468
469 } // namespace grpc_core
470