1 /*
2 *
3 * Copyright 2015-2016 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18 #include <grpc/support/port_platform.h>
19
20 #include "src/core/lib/surface/completion_queue.h"
21
22 #include <inttypes.h>
23 #include <stdio.h>
24 #include <string.h>
25
26 #include <vector>
27
28 #include "absl/strings/str_format.h"
29 #include "absl/strings/str_join.h"
30
31 #include <grpc/support/alloc.h>
32 #include <grpc/support/atm.h>
33 #include <grpc/support/log.h>
34 #include <grpc/support/string_util.h>
35 #include <grpc/support/time.h>
36
37 #include "src/core/lib/debug/stats.h"
38 #include "src/core/lib/gpr/spinlock.h"
39 #include "src/core/lib/gpr/string.h"
40 #include "src/core/lib/gpr/tls.h"
41 #include "src/core/lib/gprpp/atomic.h"
42 #include "src/core/lib/iomgr/executor.h"
43 #include "src/core/lib/iomgr/pollset.h"
44 #include "src/core/lib/iomgr/timer.h"
45 #include "src/core/lib/profiling/timers.h"
46 #include "src/core/lib/surface/api_trace.h"
47 #include "src/core/lib/surface/call.h"
48 #include "src/core/lib/surface/event_string.h"
49
50 grpc_core::TraceFlag grpc_trace_operation_failures(false, "op_failure");
51 grpc_core::DebugOnlyTraceFlag grpc_trace_pending_tags(false, "pending_tags");
52 grpc_core::DebugOnlyTraceFlag grpc_trace_cq_refcount(false, "cq_refcount");
53
54 namespace {
55
56 // Specifies a cq thread local cache.
57 // The first event that occurs on a thread
58 // with a cq cache will go into that cache, and
59 // will only be returned on the thread that initialized the cache.
60 // NOTE: Only one event will ever be cached.
61 GPR_TLS_DECL(g_cached_event);
62 GPR_TLS_DECL(g_cached_cq);
63
64 struct plucker {
65 grpc_pollset_worker** worker;
66 void* tag;
67 };
68 struct cq_poller_vtable {
69 bool can_get_pollset;
70 bool can_listen;
71 size_t (*size)(void);
72 void (*init)(grpc_pollset* pollset, gpr_mu** mu);
73 grpc_error* (*kick)(grpc_pollset* pollset,
74 grpc_pollset_worker* specific_worker);
75 grpc_error* (*work)(grpc_pollset* pollset, grpc_pollset_worker** worker,
76 grpc_millis deadline);
77 void (*shutdown)(grpc_pollset* pollset, grpc_closure* closure);
78 void (*destroy)(grpc_pollset* pollset);
79 };
80 typedef struct non_polling_worker {
81 gpr_cv cv;
82 bool kicked;
83 struct non_polling_worker* next;
84 struct non_polling_worker* prev;
85 } non_polling_worker;
86
87 struct non_polling_poller {
88 gpr_mu mu;
89 bool kicked_without_poller;
90 non_polling_worker* root;
91 grpc_closure* shutdown;
92 };
non_polling_poller_size(void)93 size_t non_polling_poller_size(void) { return sizeof(non_polling_poller); }
94
non_polling_poller_init(grpc_pollset * pollset,gpr_mu ** mu)95 void non_polling_poller_init(grpc_pollset* pollset, gpr_mu** mu) {
96 non_polling_poller* npp = reinterpret_cast<non_polling_poller*>(pollset);
97 gpr_mu_init(&npp->mu);
98 *mu = &npp->mu;
99 }
100
non_polling_poller_destroy(grpc_pollset * pollset)101 void non_polling_poller_destroy(grpc_pollset* pollset) {
102 non_polling_poller* npp = reinterpret_cast<non_polling_poller*>(pollset);
103 gpr_mu_destroy(&npp->mu);
104 }
105
non_polling_poller_work(grpc_pollset * pollset,grpc_pollset_worker ** worker,grpc_millis deadline)106 grpc_error* non_polling_poller_work(grpc_pollset* pollset,
107 grpc_pollset_worker** worker,
108 grpc_millis deadline) {
109 non_polling_poller* npp = reinterpret_cast<non_polling_poller*>(pollset);
110 if (npp->shutdown) return GRPC_ERROR_NONE;
111 if (npp->kicked_without_poller) {
112 npp->kicked_without_poller = false;
113 return GRPC_ERROR_NONE;
114 }
115 non_polling_worker w;
116 gpr_cv_init(&w.cv);
117 if (worker != nullptr) *worker = reinterpret_cast<grpc_pollset_worker*>(&w);
118 if (npp->root == nullptr) {
119 npp->root = w.next = w.prev = &w;
120 } else {
121 w.next = npp->root;
122 w.prev = w.next->prev;
123 w.next->prev = w.prev->next = &w;
124 }
125 w.kicked = false;
126 gpr_timespec deadline_ts =
127 grpc_millis_to_timespec(deadline, GPR_CLOCK_MONOTONIC);
128 while (!npp->shutdown && !w.kicked &&
129 !gpr_cv_wait(&w.cv, &npp->mu, deadline_ts)) {
130 }
131 grpc_core::ExecCtx::Get()->InvalidateNow();
132 if (&w == npp->root) {
133 npp->root = w.next;
134 if (&w == npp->root) {
135 if (npp->shutdown) {
136 grpc_core::ExecCtx::Run(DEBUG_LOCATION, npp->shutdown, GRPC_ERROR_NONE);
137 }
138 npp->root = nullptr;
139 }
140 }
141 w.next->prev = w.prev;
142 w.prev->next = w.next;
143 gpr_cv_destroy(&w.cv);
144 if (worker != nullptr) *worker = nullptr;
145 return GRPC_ERROR_NONE;
146 }
147
non_polling_poller_kick(grpc_pollset * pollset,grpc_pollset_worker * specific_worker)148 grpc_error* non_polling_poller_kick(grpc_pollset* pollset,
149 grpc_pollset_worker* specific_worker) {
150 non_polling_poller* p = reinterpret_cast<non_polling_poller*>(pollset);
151 if (specific_worker == nullptr) {
152 specific_worker = reinterpret_cast<grpc_pollset_worker*>(p->root);
153 }
154 if (specific_worker != nullptr) {
155 non_polling_worker* w =
156 reinterpret_cast<non_polling_worker*>(specific_worker);
157 if (!w->kicked) {
158 w->kicked = true;
159 gpr_cv_signal(&w->cv);
160 }
161 } else {
162 p->kicked_without_poller = true;
163 }
164 return GRPC_ERROR_NONE;
165 }
166
non_polling_poller_shutdown(grpc_pollset * pollset,grpc_closure * closure)167 void non_polling_poller_shutdown(grpc_pollset* pollset, grpc_closure* closure) {
168 non_polling_poller* p = reinterpret_cast<non_polling_poller*>(pollset);
169 GPR_ASSERT(closure != nullptr);
170 p->shutdown = closure;
171 if (p->root == nullptr) {
172 grpc_core::ExecCtx::Run(DEBUG_LOCATION, closure, GRPC_ERROR_NONE);
173 } else {
174 non_polling_worker* w = p->root;
175 do {
176 gpr_cv_signal(&w->cv);
177 w = w->next;
178 } while (w != p->root);
179 }
180 }
181
182 const cq_poller_vtable g_poller_vtable_by_poller_type[] = {
183 /* GRPC_CQ_DEFAULT_POLLING */
184 {true, true, grpc_pollset_size, grpc_pollset_init, grpc_pollset_kick,
185 grpc_pollset_work, grpc_pollset_shutdown, grpc_pollset_destroy},
186 /* GRPC_CQ_NON_LISTENING */
187 {true, false, grpc_pollset_size, grpc_pollset_init, grpc_pollset_kick,
188 grpc_pollset_work, grpc_pollset_shutdown, grpc_pollset_destroy},
189 /* GRPC_CQ_NON_POLLING */
190 {false, false, non_polling_poller_size, non_polling_poller_init,
191 non_polling_poller_kick, non_polling_poller_work,
192 non_polling_poller_shutdown, non_polling_poller_destroy},
193 };
194
195 } // namespace
196
197 struct cq_vtable {
198 grpc_cq_completion_type cq_completion_type;
199 size_t data_size;
200 void (*init)(void* data,
201 grpc_experimental_completion_queue_functor* shutdown_callback);
202 void (*shutdown)(grpc_completion_queue* cq);
203 void (*destroy)(void* data);
204 bool (*begin_op)(grpc_completion_queue* cq, void* tag);
205 void (*end_op)(grpc_completion_queue* cq, void* tag, grpc_error* error,
206 void (*done)(void* done_arg, grpc_cq_completion* storage),
207 void* done_arg, grpc_cq_completion* storage, bool internal);
208 grpc_event (*next)(grpc_completion_queue* cq, gpr_timespec deadline,
209 void* reserved);
210 grpc_event (*pluck)(grpc_completion_queue* cq, void* tag,
211 gpr_timespec deadline, void* reserved);
212 };
213
214 namespace {
215
216 /* Queue that holds the cq_completion_events. Internally uses
217 * MultiProducerSingleConsumerQueue (a lockfree multiproducer single consumer
218 * queue). It uses a queue_lock to support multiple consumers.
219 * Only used in completion queues whose completion_type is GRPC_CQ_NEXT */
220 class CqEventQueue {
221 public:
222 CqEventQueue() = default;
223 ~CqEventQueue() = default;
224
225 /* Note: The counter is not incremented/decremented atomically with push/pop.
226 * The count is only eventually consistent */
num_items() const227 intptr_t num_items() const {
228 return num_queue_items_.Load(grpc_core::MemoryOrder::RELAXED);
229 }
230
231 bool Push(grpc_cq_completion* c);
232 grpc_cq_completion* Pop();
233
234 private:
235 /* Spinlock to serialize consumers i.e pop() operations */
236 gpr_spinlock queue_lock_ = GPR_SPINLOCK_INITIALIZER;
237
238 grpc_core::MultiProducerSingleConsumerQueue queue_;
239
240 /* A lazy counter of number of items in the queue. This is NOT atomically
241 incremented/decremented along with push/pop operations and hence is only
242 eventually consistent */
243 grpc_core::Atomic<intptr_t> num_queue_items_{0};
244 };
245
246 struct cq_next_data {
~cq_next_data__anon3288517e0211::cq_next_data247 ~cq_next_data() {
248 GPR_ASSERT(queue.num_items() == 0);
249 #ifndef NDEBUG
250 if (pending_events.Load(grpc_core::MemoryOrder::ACQUIRE) != 0) {
251 gpr_log(GPR_ERROR, "Destroying CQ without draining it fully.");
252 }
253 #endif
254 }
255
256 /** Completed events for completion-queues of type GRPC_CQ_NEXT */
257 CqEventQueue queue;
258
259 /** Counter of how many things have ever been queued on this completion queue
260 useful for avoiding locks to check the queue */
261 grpc_core::Atomic<intptr_t> things_queued_ever{0};
262
263 /** Number of outstanding events (+1 if not shut down)
264 Initial count is dropped by grpc_completion_queue_shutdown */
265 grpc_core::Atomic<intptr_t> pending_events{1};
266
267 /** 0 initially. 1 once we initiated shutdown */
268 bool shutdown_called = false;
269 };
270
271 struct cq_pluck_data {
cq_pluck_data__anon3288517e0211::cq_pluck_data272 cq_pluck_data() {
273 completed_tail = &completed_head;
274 completed_head.next = reinterpret_cast<uintptr_t>(completed_tail);
275 }
276
~cq_pluck_data__anon3288517e0211::cq_pluck_data277 ~cq_pluck_data() {
278 GPR_ASSERT(completed_head.next ==
279 reinterpret_cast<uintptr_t>(&completed_head));
280 #ifndef NDEBUG
281 if (pending_events.Load(grpc_core::MemoryOrder::ACQUIRE) != 0) {
282 gpr_log(GPR_ERROR, "Destroying CQ without draining it fully.");
283 }
284 #endif
285 }
286
287 /** Completed events for completion-queues of type GRPC_CQ_PLUCK */
288 grpc_cq_completion completed_head;
289 grpc_cq_completion* completed_tail;
290
291 /** Number of pending events (+1 if we're not shutdown).
292 Initial count is dropped by grpc_completion_queue_shutdown. */
293 grpc_core::Atomic<intptr_t> pending_events{1};
294
295 /** Counter of how many things have ever been queued on this completion queue
296 useful for avoiding locks to check the queue */
297 grpc_core::Atomic<intptr_t> things_queued_ever{0};
298
299 /** 0 initially. 1 once we completed shutting */
300 /* TODO: (sreek) This is not needed since (shutdown == 1) if and only if
301 * (pending_events == 0). So consider removing this in future and use
302 * pending_events */
303 grpc_core::Atomic<bool> shutdown{false};
304
305 /** 0 initially. 1 once we initiated shutdown */
306 bool shutdown_called = false;
307
308 int num_pluckers = 0;
309 plucker pluckers[GRPC_MAX_COMPLETION_QUEUE_PLUCKERS];
310 };
311
312 struct cq_callback_data {
cq_callback_data__anon3288517e0211::cq_callback_data313 explicit cq_callback_data(
314 grpc_experimental_completion_queue_functor* shutdown_callback)
315 : shutdown_callback(shutdown_callback) {}
316
~cq_callback_data__anon3288517e0211::cq_callback_data317 ~cq_callback_data() {
318 #ifndef NDEBUG
319 if (pending_events.Load(grpc_core::MemoryOrder::ACQUIRE) != 0) {
320 gpr_log(GPR_ERROR, "Destroying CQ without draining it fully.");
321 }
322 #endif
323 }
324
325 /** No actual completed events queue, unlike other types */
326
327 /** Number of pending events (+1 if we're not shutdown).
328 Initial count is dropped by grpc_completion_queue_shutdown. */
329 grpc_core::Atomic<intptr_t> pending_events{1};
330
331 /** 0 initially. 1 once we initiated shutdown */
332 bool shutdown_called = false;
333
334 /** A callback that gets invoked when the CQ completes shutdown */
335 grpc_experimental_completion_queue_functor* shutdown_callback;
336 };
337
338 } // namespace
339
340 /* Completion queue structure */
341 struct grpc_completion_queue {
342 /** Once owning_refs drops to zero, we will destroy the cq */
343 grpc_core::RefCount owning_refs;
344
345 gpr_mu* mu;
346
347 const cq_vtable* vtable;
348 const cq_poller_vtable* poller_vtable;
349
350 #ifndef NDEBUG
351 void** outstanding_tags;
352 size_t outstanding_tag_count;
353 size_t outstanding_tag_capacity;
354 #endif
355
356 grpc_closure pollset_shutdown_done;
357 int num_polls;
358 };
359
360 /* Forward declarations */
361 static void cq_finish_shutdown_next(grpc_completion_queue* cq);
362 static void cq_finish_shutdown_pluck(grpc_completion_queue* cq);
363 static void cq_finish_shutdown_callback(grpc_completion_queue* cq);
364 static void cq_shutdown_next(grpc_completion_queue* cq);
365 static void cq_shutdown_pluck(grpc_completion_queue* cq);
366 static void cq_shutdown_callback(grpc_completion_queue* cq);
367
368 static bool cq_begin_op_for_next(grpc_completion_queue* cq, void* tag);
369 static bool cq_begin_op_for_pluck(grpc_completion_queue* cq, void* tag);
370 static bool cq_begin_op_for_callback(grpc_completion_queue* cq, void* tag);
371
372 // A cq_end_op function is called when an operation on a given CQ with
373 // a given tag has completed. The storage argument is a reference to the
374 // space reserved for this completion as it is placed into the corresponding
375 // queue. The done argument is a callback that will be invoked when it is
376 // safe to free up that storage. The storage MUST NOT be freed until the
377 // done callback is invoked.
378 static void cq_end_op_for_next(
379 grpc_completion_queue* cq, void* tag, grpc_error* error,
380 void (*done)(void* done_arg, grpc_cq_completion* storage), void* done_arg,
381 grpc_cq_completion* storage, bool internal);
382
383 static void cq_end_op_for_pluck(
384 grpc_completion_queue* cq, void* tag, grpc_error* error,
385 void (*done)(void* done_arg, grpc_cq_completion* storage), void* done_arg,
386 grpc_cq_completion* storage, bool internal);
387
388 static void cq_end_op_for_callback(
389 grpc_completion_queue* cq, void* tag, grpc_error* error,
390 void (*done)(void* done_arg, grpc_cq_completion* storage), void* done_arg,
391 grpc_cq_completion* storage, bool internal);
392
393 static grpc_event cq_next(grpc_completion_queue* cq, gpr_timespec deadline,
394 void* reserved);
395
396 static grpc_event cq_pluck(grpc_completion_queue* cq, void* tag,
397 gpr_timespec deadline, void* reserved);
398
399 // Note that cq_init_next and cq_init_pluck do not use the shutdown_callback
400 static void cq_init_next(
401 void* data, grpc_experimental_completion_queue_functor* shutdown_callback);
402 static void cq_init_pluck(
403 void* data, grpc_experimental_completion_queue_functor* shutdown_callback);
404 static void cq_init_callback(
405 void* data, grpc_experimental_completion_queue_functor* shutdown_callback);
406 static void cq_destroy_next(void* data);
407 static void cq_destroy_pluck(void* data);
408 static void cq_destroy_callback(void* data);
409
410 /* Completion queue vtables based on the completion-type */
411 static const cq_vtable g_cq_vtable[] = {
412 /* GRPC_CQ_NEXT */
413 {GRPC_CQ_NEXT, sizeof(cq_next_data), cq_init_next, cq_shutdown_next,
414 cq_destroy_next, cq_begin_op_for_next, cq_end_op_for_next, cq_next,
415 nullptr},
416 /* GRPC_CQ_PLUCK */
417 {GRPC_CQ_PLUCK, sizeof(cq_pluck_data), cq_init_pluck, cq_shutdown_pluck,
418 cq_destroy_pluck, cq_begin_op_for_pluck, cq_end_op_for_pluck, nullptr,
419 cq_pluck},
420 /* GRPC_CQ_CALLBACK */
421 {GRPC_CQ_CALLBACK, sizeof(cq_callback_data), cq_init_callback,
422 cq_shutdown_callback, cq_destroy_callback, cq_begin_op_for_callback,
423 cq_end_op_for_callback, nullptr, nullptr},
424 };
425
426 #define DATA_FROM_CQ(cq) ((void*)((cq) + 1))
427 #define POLLSET_FROM_CQ(cq) \
428 ((grpc_pollset*)((cq)->vtable->data_size + (char*)DATA_FROM_CQ(cq)))
429
430 grpc_core::TraceFlag grpc_cq_pluck_trace(false, "queue_pluck");
431
432 #define GRPC_SURFACE_TRACE_RETURNED_EVENT(cq, event) \
433 do { \
434 if (GRPC_TRACE_FLAG_ENABLED(grpc_api_trace) && \
435 (GRPC_TRACE_FLAG_ENABLED(grpc_cq_pluck_trace) || \
436 (event)->type != GRPC_QUEUE_TIMEOUT)) { \
437 gpr_log(GPR_INFO, "RETURN_EVENT[%p]: %s", cq, \
438 grpc_event_string(event).c_str()); \
439 } \
440 } while (0)
441
442 static void on_pollset_shutdown_done(void* arg, grpc_error* error);
443
grpc_cq_global_init()444 void grpc_cq_global_init() {
445 gpr_tls_init(&g_cached_event);
446 gpr_tls_init(&g_cached_cq);
447 }
448
grpc_completion_queue_thread_local_cache_init(grpc_completion_queue * cq)449 void grpc_completion_queue_thread_local_cache_init(grpc_completion_queue* cq) {
450 if (reinterpret_cast<grpc_completion_queue*>(gpr_tls_get(&g_cached_cq)) ==
451 nullptr) {
452 gpr_tls_set(&g_cached_event, (intptr_t)0);
453 gpr_tls_set(&g_cached_cq, (intptr_t)cq);
454 }
455 }
456
grpc_completion_queue_thread_local_cache_flush(grpc_completion_queue * cq,void ** tag,int * ok)457 int grpc_completion_queue_thread_local_cache_flush(grpc_completion_queue* cq,
458 void** tag, int* ok) {
459 grpc_cq_completion* storage =
460 reinterpret_cast<grpc_cq_completion*>(gpr_tls_get(&g_cached_event));
461 int ret = 0;
462 if (storage != nullptr && reinterpret_cast<grpc_completion_queue*>(
463 gpr_tls_get(&g_cached_cq)) == cq) {
464 *tag = storage->tag;
465 grpc_core::ExecCtx exec_ctx;
466 *ok = (storage->next & static_cast<uintptr_t>(1)) == 1;
467 storage->done(storage->done_arg, storage);
468 ret = 1;
469 cq_next_data* cqd = static_cast<cq_next_data*> DATA_FROM_CQ(cq);
470 if (cqd->pending_events.FetchSub(1, grpc_core::MemoryOrder::ACQ_REL) == 1) {
471 GRPC_CQ_INTERNAL_REF(cq, "shutting_down");
472 gpr_mu_lock(cq->mu);
473 cq_finish_shutdown_next(cq);
474 gpr_mu_unlock(cq->mu);
475 GRPC_CQ_INTERNAL_UNREF(cq, "shutting_down");
476 }
477 }
478 gpr_tls_set(&g_cached_event, (intptr_t)0);
479 gpr_tls_set(&g_cached_cq, (intptr_t)0);
480
481 return ret;
482 }
483
Push(grpc_cq_completion * c)484 bool CqEventQueue::Push(grpc_cq_completion* c) {
485 queue_.Push(
486 reinterpret_cast<grpc_core::MultiProducerSingleConsumerQueue::Node*>(c));
487 return num_queue_items_.FetchAdd(1, grpc_core::MemoryOrder::RELAXED) == 0;
488 }
489
Pop()490 grpc_cq_completion* CqEventQueue::Pop() {
491 grpc_cq_completion* c = nullptr;
492
493 if (gpr_spinlock_trylock(&queue_lock_)) {
494 GRPC_STATS_INC_CQ_EV_QUEUE_TRYLOCK_SUCCESSES();
495
496 bool is_empty = false;
497 c = reinterpret_cast<grpc_cq_completion*>(queue_.PopAndCheckEnd(&is_empty));
498 gpr_spinlock_unlock(&queue_lock_);
499
500 if (c == nullptr && !is_empty) {
501 GRPC_STATS_INC_CQ_EV_QUEUE_TRANSIENT_POP_FAILURES();
502 }
503 } else {
504 GRPC_STATS_INC_CQ_EV_QUEUE_TRYLOCK_FAILURES();
505 }
506
507 if (c) {
508 num_queue_items_.FetchSub(1, grpc_core::MemoryOrder::RELAXED);
509 }
510
511 return c;
512 }
513
grpc_completion_queue_create_internal(grpc_cq_completion_type completion_type,grpc_cq_polling_type polling_type,grpc_experimental_completion_queue_functor * shutdown_callback)514 grpc_completion_queue* grpc_completion_queue_create_internal(
515 grpc_cq_completion_type completion_type, grpc_cq_polling_type polling_type,
516 grpc_experimental_completion_queue_functor* shutdown_callback) {
517 GPR_TIMER_SCOPE("grpc_completion_queue_create_internal", 0);
518
519 grpc_completion_queue* cq;
520
521 GRPC_API_TRACE(
522 "grpc_completion_queue_create_internal(completion_type=%d, "
523 "polling_type=%d)",
524 2, (completion_type, polling_type));
525
526 const cq_vtable* vtable = &g_cq_vtable[completion_type];
527 const cq_poller_vtable* poller_vtable =
528 &g_poller_vtable_by_poller_type[polling_type];
529
530 grpc_core::ExecCtx exec_ctx;
531 GRPC_STATS_INC_CQS_CREATED();
532
533 cq = static_cast<grpc_completion_queue*>(
534 gpr_zalloc(sizeof(grpc_completion_queue) + vtable->data_size +
535 poller_vtable->size()));
536
537 cq->vtable = vtable;
538 cq->poller_vtable = poller_vtable;
539
540 /* One for destroy(), one for pollset_shutdown */
541 new (&cq->owning_refs) grpc_core::RefCount(2);
542
543 poller_vtable->init(POLLSET_FROM_CQ(cq), &cq->mu);
544 vtable->init(DATA_FROM_CQ(cq), shutdown_callback);
545
546 GRPC_CLOSURE_INIT(&cq->pollset_shutdown_done, on_pollset_shutdown_done, cq,
547 grpc_schedule_on_exec_ctx);
548 return cq;
549 }
550
cq_init_next(void * data,grpc_experimental_completion_queue_functor *)551 static void cq_init_next(
552 void* data,
553 grpc_experimental_completion_queue_functor* /*shutdown_callback*/) {
554 new (data) cq_next_data();
555 }
556
cq_destroy_next(void * data)557 static void cq_destroy_next(void* data) {
558 cq_next_data* cqd = static_cast<cq_next_data*>(data);
559 cqd->~cq_next_data();
560 }
561
cq_init_pluck(void * data,grpc_experimental_completion_queue_functor *)562 static void cq_init_pluck(
563 void* data,
564 grpc_experimental_completion_queue_functor* /*shutdown_callback*/) {
565 new (data) cq_pluck_data();
566 }
567
cq_destroy_pluck(void * data)568 static void cq_destroy_pluck(void* data) {
569 cq_pluck_data* cqd = static_cast<cq_pluck_data*>(data);
570 cqd->~cq_pluck_data();
571 }
572
cq_init_callback(void * data,grpc_experimental_completion_queue_functor * shutdown_callback)573 static void cq_init_callback(
574 void* data, grpc_experimental_completion_queue_functor* shutdown_callback) {
575 new (data) cq_callback_data(shutdown_callback);
576 }
577
cq_destroy_callback(void * data)578 static void cq_destroy_callback(void* data) {
579 cq_callback_data* cqd = static_cast<cq_callback_data*>(data);
580 cqd->~cq_callback_data();
581 }
582
grpc_get_cq_completion_type(grpc_completion_queue * cq)583 grpc_cq_completion_type grpc_get_cq_completion_type(grpc_completion_queue* cq) {
584 return cq->vtable->cq_completion_type;
585 }
586
grpc_get_cq_poll_num(grpc_completion_queue * cq)587 int grpc_get_cq_poll_num(grpc_completion_queue* cq) {
588 int cur_num_polls;
589 gpr_mu_lock(cq->mu);
590 cur_num_polls = cq->num_polls;
591 gpr_mu_unlock(cq->mu);
592 return cur_num_polls;
593 }
594
595 #ifndef NDEBUG
grpc_cq_internal_ref(grpc_completion_queue * cq,const char * reason,const char * file,int line)596 void grpc_cq_internal_ref(grpc_completion_queue* cq, const char* reason,
597 const char* file, int line) {
598 grpc_core::DebugLocation debug_location(file, line);
599 #else
600 void grpc_cq_internal_ref(grpc_completion_queue* cq) {
601 grpc_core::DebugLocation debug_location;
602 const char* reason = nullptr;
603 #endif
604 cq->owning_refs.Ref(debug_location, reason);
605 }
606
607 static void on_pollset_shutdown_done(void* arg, grpc_error* /*error*/) {
608 grpc_completion_queue* cq = static_cast<grpc_completion_queue*>(arg);
609 GRPC_CQ_INTERNAL_UNREF(cq, "pollset_destroy");
610 }
611
612 #ifndef NDEBUG
613 void grpc_cq_internal_unref(grpc_completion_queue* cq, const char* reason,
614 const char* file, int line) {
615 grpc_core::DebugLocation debug_location(file, line);
616 #else
617 void grpc_cq_internal_unref(grpc_completion_queue* cq) {
618 grpc_core::DebugLocation debug_location;
619 const char* reason = nullptr;
620 #endif
621 if (GPR_UNLIKELY(cq->owning_refs.Unref(debug_location, reason))) {
622 cq->vtable->destroy(DATA_FROM_CQ(cq));
623 cq->poller_vtable->destroy(POLLSET_FROM_CQ(cq));
624 #ifndef NDEBUG
625 gpr_free(cq->outstanding_tags);
626 #endif
627 gpr_free(cq);
628 }
629 }
630
631 #ifndef NDEBUG
632 static void cq_check_tag(grpc_completion_queue* cq, void* tag, bool lock_cq) {
633 int found = 0;
634 if (lock_cq) {
635 gpr_mu_lock(cq->mu);
636 }
637
638 for (int i = 0; i < static_cast<int>(cq->outstanding_tag_count); i++) {
639 if (cq->outstanding_tags[i] == tag) {
640 cq->outstanding_tag_count--;
641 GPR_SWAP(void*, cq->outstanding_tags[i],
642 cq->outstanding_tags[cq->outstanding_tag_count]);
643 found = 1;
644 break;
645 }
646 }
647
648 if (lock_cq) {
649 gpr_mu_unlock(cq->mu);
650 }
651
652 GPR_ASSERT(found);
653 }
654 #else
655 static void cq_check_tag(grpc_completion_queue* /*cq*/, void* /*tag*/,
656 bool /*lock_cq*/) {}
657 #endif
658
659 static bool cq_begin_op_for_next(grpc_completion_queue* cq, void* /*tag*/) {
660 cq_next_data* cqd = static_cast<cq_next_data*> DATA_FROM_CQ(cq);
661 return cqd->pending_events.IncrementIfNonzero();
662 }
663
664 static bool cq_begin_op_for_pluck(grpc_completion_queue* cq, void* /*tag*/) {
665 cq_pluck_data* cqd = static_cast<cq_pluck_data*> DATA_FROM_CQ(cq);
666 return cqd->pending_events.IncrementIfNonzero();
667 }
668
669 static bool cq_begin_op_for_callback(grpc_completion_queue* cq, void* /*tag*/) {
670 cq_callback_data* cqd = static_cast<cq_callback_data*> DATA_FROM_CQ(cq);
671 return cqd->pending_events.IncrementIfNonzero();
672 }
673
674 bool grpc_cq_begin_op(grpc_completion_queue* cq, void* tag) {
675 #ifndef NDEBUG
676 gpr_mu_lock(cq->mu);
677 if (cq->outstanding_tag_count == cq->outstanding_tag_capacity) {
678 cq->outstanding_tag_capacity = GPR_MAX(4, 2 * cq->outstanding_tag_capacity);
679 cq->outstanding_tags = static_cast<void**>(gpr_realloc(
680 cq->outstanding_tags,
681 sizeof(*cq->outstanding_tags) * cq->outstanding_tag_capacity));
682 }
683 cq->outstanding_tags[cq->outstanding_tag_count++] = tag;
684 gpr_mu_unlock(cq->mu);
685 #endif
686 return cq->vtable->begin_op(cq, tag);
687 }
688
689 /* Queue a GRPC_OP_COMPLETED operation to a completion queue (with a
690 * completion
691 * type of GRPC_CQ_NEXT) */
692 static void cq_end_op_for_next(
693 grpc_completion_queue* cq, void* tag, grpc_error* error,
694 void (*done)(void* done_arg, grpc_cq_completion* storage), void* done_arg,
695 grpc_cq_completion* storage, bool /*internal*/) {
696 GPR_TIMER_SCOPE("cq_end_op_for_next", 0);
697
698 if (GRPC_TRACE_FLAG_ENABLED(grpc_api_trace) ||
699 (GRPC_TRACE_FLAG_ENABLED(grpc_trace_operation_failures) &&
700 error != GRPC_ERROR_NONE)) {
701 const char* errmsg = grpc_error_string(error);
702 GRPC_API_TRACE(
703 "cq_end_op_for_next(cq=%p, tag=%p, error=%s, "
704 "done=%p, done_arg=%p, storage=%p)",
705 6, (cq, tag, errmsg, done, done_arg, storage));
706 if (GRPC_TRACE_FLAG_ENABLED(grpc_trace_operation_failures) &&
707 error != GRPC_ERROR_NONE) {
708 gpr_log(GPR_ERROR, "Operation failed: tag=%p, error=%s", tag, errmsg);
709 }
710 }
711 cq_next_data* cqd = static_cast<cq_next_data*> DATA_FROM_CQ(cq);
712 int is_success = (error == GRPC_ERROR_NONE);
713
714 storage->tag = tag;
715 storage->done = done;
716 storage->done_arg = done_arg;
717 storage->next = static_cast<uintptr_t>(is_success);
718
719 cq_check_tag(cq, tag, true); /* Used in debug builds only */
720
721 if (reinterpret_cast<grpc_completion_queue*>(gpr_tls_get(&g_cached_cq)) ==
722 cq &&
723 reinterpret_cast<grpc_cq_completion*>(gpr_tls_get(&g_cached_event)) ==
724 nullptr) {
725 gpr_tls_set(&g_cached_event, (intptr_t)storage);
726 } else {
727 /* Add the completion to the queue */
728 bool is_first = cqd->queue.Push(storage);
729 cqd->things_queued_ever.FetchAdd(1, grpc_core::MemoryOrder::RELAXED);
730 /* Since we do not hold the cq lock here, it is important to do an 'acquire'
731 load here (instead of a 'no_barrier' load) to match with the release
732 store
733 (done via pending_events.FetchSub(1, ACQ_REL)) in cq_shutdown_next
734 */
735 if (cqd->pending_events.Load(grpc_core::MemoryOrder::ACQUIRE) != 1) {
736 /* Only kick if this is the first item queued */
737 if (is_first) {
738 gpr_mu_lock(cq->mu);
739 grpc_error* kick_error =
740 cq->poller_vtable->kick(POLLSET_FROM_CQ(cq), nullptr);
741 gpr_mu_unlock(cq->mu);
742
743 if (kick_error != GRPC_ERROR_NONE) {
744 const char* msg = grpc_error_string(kick_error);
745 gpr_log(GPR_ERROR, "Kick failed: %s", msg);
746 GRPC_ERROR_UNREF(kick_error);
747 }
748 }
749 if (cqd->pending_events.FetchSub(1, grpc_core::MemoryOrder::ACQ_REL) ==
750 1) {
751 GRPC_CQ_INTERNAL_REF(cq, "shutting_down");
752 gpr_mu_lock(cq->mu);
753 cq_finish_shutdown_next(cq);
754 gpr_mu_unlock(cq->mu);
755 GRPC_CQ_INTERNAL_UNREF(cq, "shutting_down");
756 }
757 } else {
758 GRPC_CQ_INTERNAL_REF(cq, "shutting_down");
759 cqd->pending_events.Store(0, grpc_core::MemoryOrder::RELEASE);
760 gpr_mu_lock(cq->mu);
761 cq_finish_shutdown_next(cq);
762 gpr_mu_unlock(cq->mu);
763 GRPC_CQ_INTERNAL_UNREF(cq, "shutting_down");
764 }
765 }
766
767 GRPC_ERROR_UNREF(error);
768 }
769
770 /* Queue a GRPC_OP_COMPLETED operation to a completion queue (with a
771 * completion
772 * type of GRPC_CQ_PLUCK) */
773 static void cq_end_op_for_pluck(
774 grpc_completion_queue* cq, void* tag, grpc_error* error,
775 void (*done)(void* done_arg, grpc_cq_completion* storage), void* done_arg,
776 grpc_cq_completion* storage, bool /*internal*/) {
777 GPR_TIMER_SCOPE("cq_end_op_for_pluck", 0);
778
779 cq_pluck_data* cqd = static_cast<cq_pluck_data*> DATA_FROM_CQ(cq);
780 int is_success = (error == GRPC_ERROR_NONE);
781
782 if (GRPC_TRACE_FLAG_ENABLED(grpc_api_trace) ||
783 (GRPC_TRACE_FLAG_ENABLED(grpc_trace_operation_failures) &&
784 error != GRPC_ERROR_NONE)) {
785 const char* errmsg = grpc_error_string(error);
786 GRPC_API_TRACE(
787 "cq_end_op_for_pluck(cq=%p, tag=%p, error=%s, "
788 "done=%p, done_arg=%p, storage=%p)",
789 6, (cq, tag, errmsg, done, done_arg, storage));
790 if (GRPC_TRACE_FLAG_ENABLED(grpc_trace_operation_failures) &&
791 error != GRPC_ERROR_NONE) {
792 gpr_log(GPR_ERROR, "Operation failed: tag=%p, error=%s", tag, errmsg);
793 }
794 }
795
796 storage->tag = tag;
797 storage->done = done;
798 storage->done_arg = done_arg;
799 storage->next = reinterpret_cast<uintptr_t>(&cqd->completed_head) |
800 static_cast<uintptr_t>(is_success);
801
802 gpr_mu_lock(cq->mu);
803 cq_check_tag(cq, tag, false); /* Used in debug builds only */
804
805 /* Add to the list of completions */
806 cqd->things_queued_ever.FetchAdd(1, grpc_core::MemoryOrder::RELAXED);
807 cqd->completed_tail->next =
808 reinterpret_cast<uintptr_t>(storage) | (1u & cqd->completed_tail->next);
809 cqd->completed_tail = storage;
810
811 if (cqd->pending_events.FetchSub(1, grpc_core::MemoryOrder::ACQ_REL) == 1) {
812 cq_finish_shutdown_pluck(cq);
813 gpr_mu_unlock(cq->mu);
814 } else {
815 grpc_pollset_worker* pluck_worker = nullptr;
816 for (int i = 0; i < cqd->num_pluckers; i++) {
817 if (cqd->pluckers[i].tag == tag) {
818 pluck_worker = *cqd->pluckers[i].worker;
819 break;
820 }
821 }
822
823 grpc_error* kick_error =
824 cq->poller_vtable->kick(POLLSET_FROM_CQ(cq), pluck_worker);
825
826 gpr_mu_unlock(cq->mu);
827
828 if (kick_error != GRPC_ERROR_NONE) {
829 const char* msg = grpc_error_string(kick_error);
830 gpr_log(GPR_ERROR, "Kick failed: %s", msg);
831
832 GRPC_ERROR_UNREF(kick_error);
833 }
834 }
835
836 GRPC_ERROR_UNREF(error);
837 }
838
839 static void functor_callback(void* arg, grpc_error* error) {
840 auto* functor = static_cast<grpc_experimental_completion_queue_functor*>(arg);
841 functor->functor_run(functor, error == GRPC_ERROR_NONE);
842 }
843
844 /* Complete an event on a completion queue of type GRPC_CQ_CALLBACK */
845 static void cq_end_op_for_callback(
846 grpc_completion_queue* cq, void* tag, grpc_error* error,
847 void (*done)(void* done_arg, grpc_cq_completion* storage), void* done_arg,
848 grpc_cq_completion* storage, bool internal) {
849 GPR_TIMER_SCOPE("cq_end_op_for_callback", 0);
850
851 cq_callback_data* cqd = static_cast<cq_callback_data*> DATA_FROM_CQ(cq);
852
853 if (GRPC_TRACE_FLAG_ENABLED(grpc_api_trace) ||
854 (GRPC_TRACE_FLAG_ENABLED(grpc_trace_operation_failures) &&
855 error != GRPC_ERROR_NONE)) {
856 const char* errmsg = grpc_error_string(error);
857 GRPC_API_TRACE(
858 "cq_end_op_for_callback(cq=%p, tag=%p, error=%s, "
859 "done=%p, done_arg=%p, storage=%p)",
860 6, (cq, tag, errmsg, done, done_arg, storage));
861 if (GRPC_TRACE_FLAG_ENABLED(grpc_trace_operation_failures) &&
862 error != GRPC_ERROR_NONE) {
863 gpr_log(GPR_ERROR, "Operation failed: tag=%p, error=%s", tag, errmsg);
864 }
865 }
866
867 // The callback-based CQ isn't really a queue at all and thus has no need
868 // for reserved storage. Invoke the done callback right away to release it.
869 done(done_arg, storage);
870
871 cq_check_tag(cq, tag, true); /* Used in debug builds only */
872
873 if (cqd->pending_events.FetchSub(1, grpc_core::MemoryOrder::ACQ_REL) == 1) {
874 cq_finish_shutdown_callback(cq);
875 }
876
877 // If possible, schedule the callback onto an existing thread-local
878 // ApplicationCallbackExecCtx, which is a work queue. This is possible for:
879 // 1. The callback is internally-generated and there is an ACEC available
880 // 2. The callback is marked inlineable and there is an ACEC available
881 // 3. We are already running in a background poller thread (which always has
882 // an ACEC available at the base of the stack).
883 auto* functor = static_cast<grpc_experimental_completion_queue_functor*>(tag);
884 if (((internal || functor->inlineable) &&
885 grpc_core::ApplicationCallbackExecCtx::Available()) ||
886 grpc_iomgr_is_any_background_poller_thread()) {
887 grpc_core::ApplicationCallbackExecCtx::Enqueue(functor,
888 (error == GRPC_ERROR_NONE));
889 GRPC_ERROR_UNREF(error);
890 return;
891 }
892
893 // Schedule the callback on a closure if not internal or triggered
894 // from a background poller thread.
895 grpc_core::Executor::Run(
896 GRPC_CLOSURE_CREATE(functor_callback, functor, nullptr), error);
897 }
898
899 void grpc_cq_end_op(grpc_completion_queue* cq, void* tag, grpc_error* error,
900 void (*done)(void* done_arg, grpc_cq_completion* storage),
901 void* done_arg, grpc_cq_completion* storage,
902 bool internal) {
903 cq->vtable->end_op(cq, tag, error, done, done_arg, storage, internal);
904 }
905
906 struct cq_is_finished_arg {
907 gpr_atm last_seen_things_queued_ever;
908 grpc_completion_queue* cq;
909 grpc_millis deadline;
910 grpc_cq_completion* stolen_completion;
911 void* tag; /* for pluck */
912 bool first_loop;
913 };
914 class ExecCtxNext : public grpc_core::ExecCtx {
915 public:
916 explicit ExecCtxNext(void* arg)
917 : ExecCtx(0), check_ready_to_finish_arg_(arg) {}
918
919 bool CheckReadyToFinish() override {
920 cq_is_finished_arg* a =
921 static_cast<cq_is_finished_arg*>(check_ready_to_finish_arg_);
922 grpc_completion_queue* cq = a->cq;
923 cq_next_data* cqd = static_cast<cq_next_data*> DATA_FROM_CQ(cq);
924 GPR_ASSERT(a->stolen_completion == nullptr);
925
926 intptr_t current_last_seen_things_queued_ever =
927 cqd->things_queued_ever.Load(grpc_core::MemoryOrder::RELAXED);
928
929 if (current_last_seen_things_queued_ever !=
930 a->last_seen_things_queued_ever) {
931 a->last_seen_things_queued_ever =
932 cqd->things_queued_ever.Load(grpc_core::MemoryOrder::RELAXED);
933
934 /* Pop a cq_completion from the queue. Returns NULL if the queue is empty
935 * might return NULL in some cases even if the queue is not empty; but
936 * that
937 * is ok and doesn't affect correctness. Might effect the tail latencies a
938 * bit) */
939 a->stolen_completion = cqd->queue.Pop();
940 if (a->stolen_completion != nullptr) {
941 return true;
942 }
943 }
944 return !a->first_loop && a->deadline < grpc_core::ExecCtx::Get()->Now();
945 }
946
947 private:
948 void* check_ready_to_finish_arg_;
949 };
950
951 #ifndef NDEBUG
952 static void dump_pending_tags(grpc_completion_queue* cq) {
953 if (!GRPC_TRACE_FLAG_ENABLED(grpc_trace_pending_tags)) return;
954 std::vector<std::string> parts;
955 parts.push_back("PENDING TAGS:");
956 gpr_mu_lock(cq->mu);
957 for (size_t i = 0; i < cq->outstanding_tag_count; i++) {
958 parts.push_back(absl::StrFormat(" %p", cq->outstanding_tags[i]));
959 }
960 gpr_mu_unlock(cq->mu);
961 gpr_log(GPR_DEBUG, "%s", absl::StrJoin(parts, "").c_str());
962 }
963 #else
964 static void dump_pending_tags(grpc_completion_queue* /*cq*/) {}
965 #endif
966
967 static grpc_event cq_next(grpc_completion_queue* cq, gpr_timespec deadline,
968 void* reserved) {
969 GPR_TIMER_SCOPE("grpc_completion_queue_next", 0);
970
971 grpc_event ret;
972 cq_next_data* cqd = static_cast<cq_next_data*> DATA_FROM_CQ(cq);
973
974 GRPC_API_TRACE(
975 "grpc_completion_queue_next("
976 "cq=%p, "
977 "deadline=gpr_timespec { tv_sec: %" PRId64
978 ", tv_nsec: %d, clock_type: %d }, "
979 "reserved=%p)",
980 5,
981 (cq, deadline.tv_sec, deadline.tv_nsec, (int)deadline.clock_type,
982 reserved));
983 GPR_ASSERT(!reserved);
984
985 dump_pending_tags(cq);
986
987 GRPC_CQ_INTERNAL_REF(cq, "next");
988
989 grpc_millis deadline_millis = grpc_timespec_to_millis_round_up(deadline);
990 cq_is_finished_arg is_finished_arg = {
991 cqd->things_queued_ever.Load(grpc_core::MemoryOrder::RELAXED),
992 cq,
993 deadline_millis,
994 nullptr,
995 nullptr,
996 true};
997 ExecCtxNext exec_ctx(&is_finished_arg);
998 for (;;) {
999 grpc_millis iteration_deadline = deadline_millis;
1000
1001 if (is_finished_arg.stolen_completion != nullptr) {
1002 grpc_cq_completion* c = is_finished_arg.stolen_completion;
1003 is_finished_arg.stolen_completion = nullptr;
1004 ret.type = GRPC_OP_COMPLETE;
1005 ret.success = c->next & 1u;
1006 ret.tag = c->tag;
1007 c->done(c->done_arg, c);
1008 break;
1009 }
1010
1011 grpc_cq_completion* c = cqd->queue.Pop();
1012
1013 if (c != nullptr) {
1014 ret.type = GRPC_OP_COMPLETE;
1015 ret.success = c->next & 1u;
1016 ret.tag = c->tag;
1017 c->done(c->done_arg, c);
1018 break;
1019 } else {
1020 /* If c == NULL it means either the queue is empty OR in an transient
1021 inconsistent state. If it is the latter, we shold do a 0-timeout poll
1022 so that the thread comes back quickly from poll to make a second
1023 attempt at popping. Not doing this can potentially deadlock this
1024 thread forever (if the deadline is infinity) */
1025 if (cqd->queue.num_items() > 0) {
1026 iteration_deadline = 0;
1027 }
1028 }
1029
1030 if (cqd->pending_events.Load(grpc_core::MemoryOrder::ACQUIRE) == 0) {
1031 /* Before returning, check if the queue has any items left over (since
1032 MultiProducerSingleConsumerQueue::Pop() can sometimes return NULL
1033 even if the queue is not empty. If so, keep retrying but do not
1034 return GRPC_QUEUE_SHUTDOWN */
1035 if (cqd->queue.num_items() > 0) {
1036 /* Go to the beginning of the loop. No point doing a poll because
1037 (cq->shutdown == true) is only possible when there is no pending
1038 work (i.e cq->pending_events == 0) and any outstanding completion
1039 events should have already been queued on this cq */
1040 continue;
1041 }
1042
1043 ret.type = GRPC_QUEUE_SHUTDOWN;
1044 ret.success = 0;
1045 break;
1046 }
1047
1048 if (!is_finished_arg.first_loop &&
1049 grpc_core::ExecCtx::Get()->Now() >= deadline_millis) {
1050 ret.type = GRPC_QUEUE_TIMEOUT;
1051 ret.success = 0;
1052 dump_pending_tags(cq);
1053 break;
1054 }
1055
1056 /* The main polling work happens in grpc_pollset_work */
1057 gpr_mu_lock(cq->mu);
1058 cq->num_polls++;
1059 grpc_error* err = cq->poller_vtable->work(POLLSET_FROM_CQ(cq), nullptr,
1060 iteration_deadline);
1061 gpr_mu_unlock(cq->mu);
1062
1063 if (err != GRPC_ERROR_NONE) {
1064 const char* msg = grpc_error_string(err);
1065 gpr_log(GPR_ERROR, "Completion queue next failed: %s", msg);
1066
1067 GRPC_ERROR_UNREF(err);
1068 ret.type = GRPC_QUEUE_TIMEOUT;
1069 ret.success = 0;
1070 dump_pending_tags(cq);
1071 break;
1072 }
1073 is_finished_arg.first_loop = false;
1074 }
1075
1076 if (cqd->queue.num_items() > 0 &&
1077 cqd->pending_events.Load(grpc_core::MemoryOrder::ACQUIRE) > 0) {
1078 gpr_mu_lock(cq->mu);
1079 cq->poller_vtable->kick(POLLSET_FROM_CQ(cq), nullptr);
1080 gpr_mu_unlock(cq->mu);
1081 }
1082
1083 GRPC_SURFACE_TRACE_RETURNED_EVENT(cq, &ret);
1084 GRPC_CQ_INTERNAL_UNREF(cq, "next");
1085
1086 GPR_ASSERT(is_finished_arg.stolen_completion == nullptr);
1087
1088 return ret;
1089 }
1090
1091 /* Finishes the completion queue shutdown. This means that there are no more
1092 completion events / tags expected from the completion queue
1093 - Must be called under completion queue lock
1094 - Must be called only once in completion queue's lifetime
1095 - grpc_completion_queue_shutdown() MUST have been called before calling
1096 this function */
1097 static void cq_finish_shutdown_next(grpc_completion_queue* cq) {
1098 cq_next_data* cqd = static_cast<cq_next_data*> DATA_FROM_CQ(cq);
1099
1100 GPR_ASSERT(cqd->shutdown_called);
1101 GPR_ASSERT(cqd->pending_events.Load(grpc_core::MemoryOrder::RELAXED) == 0);
1102
1103 cq->poller_vtable->shutdown(POLLSET_FROM_CQ(cq), &cq->pollset_shutdown_done);
1104 }
1105
1106 static void cq_shutdown_next(grpc_completion_queue* cq) {
1107 cq_next_data* cqd = static_cast<cq_next_data*> DATA_FROM_CQ(cq);
1108
1109 /* Need an extra ref for cq here because:
1110 * We call cq_finish_shutdown_next() below, that would call pollset shutdown.
1111 * Pollset shutdown decrements the cq ref count which can potentially destroy
1112 * the cq (if that happens to be the last ref).
1113 * Creating an extra ref here prevents the cq from getting destroyed while
1114 * this function is still active */
1115 GRPC_CQ_INTERNAL_REF(cq, "shutting_down");
1116 gpr_mu_lock(cq->mu);
1117 if (cqd->shutdown_called) {
1118 gpr_mu_unlock(cq->mu);
1119 GRPC_CQ_INTERNAL_UNREF(cq, "shutting_down");
1120 return;
1121 }
1122 cqd->shutdown_called = true;
1123 /* Doing acq/release FetchSub here to match with
1124 * cq_begin_op_for_next and cq_end_op_for_next functions which read/write
1125 * on this counter without necessarily holding a lock on cq */
1126 if (cqd->pending_events.FetchSub(1, grpc_core::MemoryOrder::ACQ_REL) == 1) {
1127 cq_finish_shutdown_next(cq);
1128 }
1129 gpr_mu_unlock(cq->mu);
1130 GRPC_CQ_INTERNAL_UNREF(cq, "shutting_down");
1131 }
1132
1133 grpc_event grpc_completion_queue_next(grpc_completion_queue* cq,
1134 gpr_timespec deadline, void* reserved) {
1135 return cq->vtable->next(cq, deadline, reserved);
1136 }
1137
1138 static int add_plucker(grpc_completion_queue* cq, void* tag,
1139 grpc_pollset_worker** worker) {
1140 cq_pluck_data* cqd = static_cast<cq_pluck_data*> DATA_FROM_CQ(cq);
1141 if (cqd->num_pluckers == GRPC_MAX_COMPLETION_QUEUE_PLUCKERS) {
1142 return 0;
1143 }
1144 cqd->pluckers[cqd->num_pluckers].tag = tag;
1145 cqd->pluckers[cqd->num_pluckers].worker = worker;
1146 cqd->num_pluckers++;
1147 return 1;
1148 }
1149
1150 static void del_plucker(grpc_completion_queue* cq, void* tag,
1151 grpc_pollset_worker** worker) {
1152 cq_pluck_data* cqd = static_cast<cq_pluck_data*> DATA_FROM_CQ(cq);
1153 for (int i = 0; i < cqd->num_pluckers; i++) {
1154 if (cqd->pluckers[i].tag == tag && cqd->pluckers[i].worker == worker) {
1155 cqd->num_pluckers--;
1156 GPR_SWAP(plucker, cqd->pluckers[i], cqd->pluckers[cqd->num_pluckers]);
1157 return;
1158 }
1159 }
1160 GPR_UNREACHABLE_CODE(return );
1161 }
1162
1163 class ExecCtxPluck : public grpc_core::ExecCtx {
1164 public:
1165 explicit ExecCtxPluck(void* arg)
1166 : ExecCtx(0), check_ready_to_finish_arg_(arg) {}
1167
1168 bool CheckReadyToFinish() override {
1169 cq_is_finished_arg* a =
1170 static_cast<cq_is_finished_arg*>(check_ready_to_finish_arg_);
1171 grpc_completion_queue* cq = a->cq;
1172 cq_pluck_data* cqd = static_cast<cq_pluck_data*> DATA_FROM_CQ(cq);
1173
1174 GPR_ASSERT(a->stolen_completion == nullptr);
1175 gpr_atm current_last_seen_things_queued_ever =
1176 cqd->things_queued_ever.Load(grpc_core::MemoryOrder::RELAXED);
1177 if (current_last_seen_things_queued_ever !=
1178 a->last_seen_things_queued_ever) {
1179 gpr_mu_lock(cq->mu);
1180 a->last_seen_things_queued_ever =
1181 cqd->things_queued_ever.Load(grpc_core::MemoryOrder::RELAXED);
1182 grpc_cq_completion* c;
1183 grpc_cq_completion* prev = &cqd->completed_head;
1184 while ((c = reinterpret_cast<grpc_cq_completion*>(
1185 prev->next & ~static_cast<uintptr_t>(1))) !=
1186 &cqd->completed_head) {
1187 if (c->tag == a->tag) {
1188 prev->next = (prev->next & static_cast<uintptr_t>(1)) |
1189 (c->next & ~static_cast<uintptr_t>(1));
1190 if (c == cqd->completed_tail) {
1191 cqd->completed_tail = prev;
1192 }
1193 gpr_mu_unlock(cq->mu);
1194 a->stolen_completion = c;
1195 return true;
1196 }
1197 prev = c;
1198 }
1199 gpr_mu_unlock(cq->mu);
1200 }
1201 return !a->first_loop && a->deadline < grpc_core::ExecCtx::Get()->Now();
1202 }
1203
1204 private:
1205 void* check_ready_to_finish_arg_;
1206 };
1207
1208 static grpc_event cq_pluck(grpc_completion_queue* cq, void* tag,
1209 gpr_timespec deadline, void* reserved) {
1210 GPR_TIMER_SCOPE("grpc_completion_queue_pluck", 0);
1211
1212 grpc_event ret;
1213 grpc_cq_completion* c;
1214 grpc_cq_completion* prev;
1215 grpc_pollset_worker* worker = nullptr;
1216 cq_pluck_data* cqd = static_cast<cq_pluck_data*> DATA_FROM_CQ(cq);
1217
1218 if (GRPC_TRACE_FLAG_ENABLED(grpc_cq_pluck_trace)) {
1219 GRPC_API_TRACE(
1220 "grpc_completion_queue_pluck("
1221 "cq=%p, tag=%p, "
1222 "deadline=gpr_timespec { tv_sec: %" PRId64
1223 ", tv_nsec: %d, clock_type: %d }, "
1224 "reserved=%p)",
1225 6,
1226 (cq, tag, deadline.tv_sec, deadline.tv_nsec, (int)deadline.clock_type,
1227 reserved));
1228 }
1229 GPR_ASSERT(!reserved);
1230
1231 dump_pending_tags(cq);
1232
1233 GRPC_CQ_INTERNAL_REF(cq, "pluck");
1234 gpr_mu_lock(cq->mu);
1235 grpc_millis deadline_millis = grpc_timespec_to_millis_round_up(deadline);
1236 cq_is_finished_arg is_finished_arg = {
1237 cqd->things_queued_ever.Load(grpc_core::MemoryOrder::RELAXED),
1238 cq,
1239 deadline_millis,
1240 nullptr,
1241 tag,
1242 true};
1243 ExecCtxPluck exec_ctx(&is_finished_arg);
1244 for (;;) {
1245 if (is_finished_arg.stolen_completion != nullptr) {
1246 gpr_mu_unlock(cq->mu);
1247 c = is_finished_arg.stolen_completion;
1248 is_finished_arg.stolen_completion = nullptr;
1249 ret.type = GRPC_OP_COMPLETE;
1250 ret.success = c->next & 1u;
1251 ret.tag = c->tag;
1252 c->done(c->done_arg, c);
1253 break;
1254 }
1255 prev = &cqd->completed_head;
1256 while ((c = reinterpret_cast<grpc_cq_completion*>(
1257 prev->next & ~static_cast<uintptr_t>(1))) !=
1258 &cqd->completed_head) {
1259 if (c->tag == tag) {
1260 prev->next = (prev->next & static_cast<uintptr_t>(1)) |
1261 (c->next & ~static_cast<uintptr_t>(1));
1262 if (c == cqd->completed_tail) {
1263 cqd->completed_tail = prev;
1264 }
1265 gpr_mu_unlock(cq->mu);
1266 ret.type = GRPC_OP_COMPLETE;
1267 ret.success = c->next & 1u;
1268 ret.tag = c->tag;
1269 c->done(c->done_arg, c);
1270 goto done;
1271 }
1272 prev = c;
1273 }
1274 if (cqd->shutdown.Load(grpc_core::MemoryOrder::RELAXED)) {
1275 gpr_mu_unlock(cq->mu);
1276 ret.type = GRPC_QUEUE_SHUTDOWN;
1277 ret.success = 0;
1278 break;
1279 }
1280 if (!add_plucker(cq, tag, &worker)) {
1281 gpr_log(GPR_DEBUG,
1282 "Too many outstanding grpc_completion_queue_pluck calls: maximum "
1283 "is %d",
1284 GRPC_MAX_COMPLETION_QUEUE_PLUCKERS);
1285 gpr_mu_unlock(cq->mu);
1286 /* TODO(ctiller): should we use a different result here */
1287 ret.type = GRPC_QUEUE_TIMEOUT;
1288 ret.success = 0;
1289 dump_pending_tags(cq);
1290 break;
1291 }
1292 if (!is_finished_arg.first_loop &&
1293 grpc_core::ExecCtx::Get()->Now() >= deadline_millis) {
1294 del_plucker(cq, tag, &worker);
1295 gpr_mu_unlock(cq->mu);
1296 ret.type = GRPC_QUEUE_TIMEOUT;
1297 ret.success = 0;
1298 dump_pending_tags(cq);
1299 break;
1300 }
1301 cq->num_polls++;
1302 grpc_error* err =
1303 cq->poller_vtable->work(POLLSET_FROM_CQ(cq), &worker, deadline_millis);
1304 if (err != GRPC_ERROR_NONE) {
1305 del_plucker(cq, tag, &worker);
1306 gpr_mu_unlock(cq->mu);
1307 const char* msg = grpc_error_string(err);
1308 gpr_log(GPR_ERROR, "Completion queue pluck failed: %s", msg);
1309
1310 GRPC_ERROR_UNREF(err);
1311 ret.type = GRPC_QUEUE_TIMEOUT;
1312 ret.success = 0;
1313 dump_pending_tags(cq);
1314 break;
1315 }
1316 is_finished_arg.first_loop = false;
1317 del_plucker(cq, tag, &worker);
1318 }
1319 done:
1320 GRPC_SURFACE_TRACE_RETURNED_EVENT(cq, &ret);
1321 GRPC_CQ_INTERNAL_UNREF(cq, "pluck");
1322
1323 GPR_ASSERT(is_finished_arg.stolen_completion == nullptr);
1324
1325 return ret;
1326 }
1327
1328 grpc_event grpc_completion_queue_pluck(grpc_completion_queue* cq, void* tag,
1329 gpr_timespec deadline, void* reserved) {
1330 return cq->vtable->pluck(cq, tag, deadline, reserved);
1331 }
1332
1333 static void cq_finish_shutdown_pluck(grpc_completion_queue* cq) {
1334 cq_pluck_data* cqd = static_cast<cq_pluck_data*> DATA_FROM_CQ(cq);
1335
1336 GPR_ASSERT(cqd->shutdown_called);
1337 GPR_ASSERT(!cqd->shutdown.Load(grpc_core::MemoryOrder::RELAXED));
1338 cqd->shutdown.Store(true, grpc_core::MemoryOrder::RELAXED);
1339
1340 cq->poller_vtable->shutdown(POLLSET_FROM_CQ(cq), &cq->pollset_shutdown_done);
1341 }
1342
1343 /* NOTE: This function is almost exactly identical to cq_shutdown_next() but
1344 * merging them is a bit tricky and probably not worth it */
1345 static void cq_shutdown_pluck(grpc_completion_queue* cq) {
1346 cq_pluck_data* cqd = static_cast<cq_pluck_data*> DATA_FROM_CQ(cq);
1347
1348 /* Need an extra ref for cq here because:
1349 * We call cq_finish_shutdown_pluck() below, that would call pollset shutdown.
1350 * Pollset shutdown decrements the cq ref count which can potentially destroy
1351 * the cq (if that happens to be the last ref).
1352 * Creating an extra ref here prevents the cq from getting destroyed while
1353 * this function is still active */
1354 GRPC_CQ_INTERNAL_REF(cq, "shutting_down (pluck cq)");
1355 gpr_mu_lock(cq->mu);
1356 if (cqd->shutdown_called) {
1357 gpr_mu_unlock(cq->mu);
1358 GRPC_CQ_INTERNAL_UNREF(cq, "shutting_down (pluck cq)");
1359 return;
1360 }
1361 cqd->shutdown_called = true;
1362 if (cqd->pending_events.FetchSub(1, grpc_core::MemoryOrder::ACQ_REL) == 1) {
1363 cq_finish_shutdown_pluck(cq);
1364 }
1365 gpr_mu_unlock(cq->mu);
1366 GRPC_CQ_INTERNAL_UNREF(cq, "shutting_down (pluck cq)");
1367 }
1368
1369 static void cq_finish_shutdown_callback(grpc_completion_queue* cq) {
1370 cq_callback_data* cqd = static_cast<cq_callback_data*> DATA_FROM_CQ(cq);
1371 auto* callback = cqd->shutdown_callback;
1372
1373 GPR_ASSERT(cqd->shutdown_called);
1374
1375 cq->poller_vtable->shutdown(POLLSET_FROM_CQ(cq), &cq->pollset_shutdown_done);
1376 if (grpc_iomgr_is_any_background_poller_thread()) {
1377 grpc_core::ApplicationCallbackExecCtx::Enqueue(callback, true);
1378 return;
1379 }
1380
1381 // Schedule the callback on a closure if not internal or triggered
1382 // from a background poller thread.
1383 grpc_core::Executor::Run(
1384 GRPC_CLOSURE_CREATE(functor_callback, callback, nullptr),
1385 GRPC_ERROR_NONE);
1386 }
1387
1388 static void cq_shutdown_callback(grpc_completion_queue* cq) {
1389 cq_callback_data* cqd = static_cast<cq_callback_data*> DATA_FROM_CQ(cq);
1390
1391 /* Need an extra ref for cq here because:
1392 * We call cq_finish_shutdown_callback() below, which calls pollset shutdown.
1393 * Pollset shutdown decrements the cq ref count which can potentially destroy
1394 * the cq (if that happens to be the last ref).
1395 * Creating an extra ref here prevents the cq from getting destroyed while
1396 * this function is still active */
1397 GRPC_CQ_INTERNAL_REF(cq, "shutting_down (callback cq)");
1398 gpr_mu_lock(cq->mu);
1399 if (cqd->shutdown_called) {
1400 gpr_mu_unlock(cq->mu);
1401 GRPC_CQ_INTERNAL_UNREF(cq, "shutting_down (callback cq)");
1402 return;
1403 }
1404 cqd->shutdown_called = true;
1405 if (cqd->pending_events.FetchSub(1, grpc_core::MemoryOrder::ACQ_REL) == 1) {
1406 gpr_mu_unlock(cq->mu);
1407 cq_finish_shutdown_callback(cq);
1408 } else {
1409 gpr_mu_unlock(cq->mu);
1410 }
1411 GRPC_CQ_INTERNAL_UNREF(cq, "shutting_down (callback cq)");
1412 }
1413
1414 /* Shutdown simply drops a ref that we reserved at creation time; if we drop
1415 to zero here, then enter shutdown mode and wake up any waiters */
1416 void grpc_completion_queue_shutdown(grpc_completion_queue* cq) {
1417 GPR_TIMER_SCOPE("grpc_completion_queue_shutdown", 0);
1418 grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
1419 grpc_core::ExecCtx exec_ctx;
1420 GRPC_API_TRACE("grpc_completion_queue_shutdown(cq=%p)", 1, (cq));
1421 cq->vtable->shutdown(cq);
1422 }
1423
1424 void grpc_completion_queue_destroy(grpc_completion_queue* cq) {
1425 GPR_TIMER_SCOPE("grpc_completion_queue_destroy", 0);
1426 GRPC_API_TRACE("grpc_completion_queue_destroy(cq=%p)", 1, (cq));
1427 grpc_completion_queue_shutdown(cq);
1428
1429 grpc_core::ExecCtx exec_ctx;
1430 GRPC_CQ_INTERNAL_UNREF(cq, "destroy");
1431 }
1432
1433 grpc_pollset* grpc_cq_pollset(grpc_completion_queue* cq) {
1434 return cq->poller_vtable->can_get_pollset ? POLLSET_FROM_CQ(cq) : nullptr;
1435 }
1436
1437 bool grpc_cq_can_listen(grpc_completion_queue* cq) {
1438 return cq->poller_vtable->can_listen;
1439 }
1440