1 /*
2 *
3 * Copyright 2017 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19 #include <grpc/support/port_platform.h>
20
21 #include "src/core/lib/iomgr/port.h"
22
23 #include <grpc/support/log.h>
24
25 /* This polling engine is only relevant on linux kernels supporting epoll() */
26 #ifdef GRPC_LINUX_EPOLL_CREATE1
27
28 #include "src/core/lib/iomgr/ev_epollex_linux.h"
29
30 #include <assert.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <poll.h>
34 #include <pthread.h>
35 #include <string.h>
36 #include <sys/epoll.h>
37 #include <sys/socket.h>
38 #include <sys/syscall.h>
39 #include <unistd.h>
40
41 #include <grpc/support/alloc.h>
42 #include <grpc/support/string_util.h>
43
44 #include "src/core/lib/debug/stats.h"
45 #include "src/core/lib/gpr/spinlock.h"
46 #include "src/core/lib/gpr/tls.h"
47 #include "src/core/lib/gpr/useful.h"
48 #include "src/core/lib/gprpp/manual_constructor.h"
49 #include "src/core/lib/gprpp/mutex_lock.h"
50 #include "src/core/lib/iomgr/block_annotate.h"
51 #include "src/core/lib/iomgr/iomgr_internal.h"
52 #include "src/core/lib/iomgr/is_epollexclusive_available.h"
53 #include "src/core/lib/iomgr/lockfree_event.h"
54 #include "src/core/lib/iomgr/sys_epoll_wrapper.h"
55 #include "src/core/lib/iomgr/timer.h"
56 #include "src/core/lib/iomgr/wakeup_fd_posix.h"
57 #include "src/core/lib/profiling/timers.h"
58
59 // debug aid: create workers on the heap (allows asan to spot
60 // use-after-destruction)
61 //#define GRPC_EPOLLEX_CREATE_WORKERS_ON_HEAP 1
62
63 #define MAX_EPOLL_EVENTS 100
64 // TODO(juanlishen): We use a greater-than-one value here as a workaround fix to
65 // a keepalive ping timeout issue. We may want to revert https://github
66 // .com/grpc/grpc/pull/14943 once we figure out the root cause.
67 #define MAX_EPOLL_EVENTS_HANDLED_EACH_POLL_CALL 16
68 #define MAX_FDS_IN_CACHE 32
69
70 grpc_core::DebugOnlyTraceFlag grpc_trace_pollable_refcount(false,
71 "pollable_refcount");
72
73 /*******************************************************************************
74 * pollable Declarations
75 */
76
77 typedef enum { PO_MULTI, PO_FD, PO_EMPTY } pollable_type;
78
79 typedef struct pollable pollable;
80
81 typedef struct cached_fd {
82 // Set to the grpc_fd's salt value. See 'salt' variable' in grpc_fd for more
83 // details
84 intptr_t salt;
85
86 // The underlying fd
87 int fd;
88
89 // A recency time counter that helps to determine the LRU fd in the cache
90 uint64_t last_used;
91 } cached_fd;
92
93 /// A pollable is something that can be polled: it has an epoll set to poll on,
94 /// and a wakeup fd for kicks
95 /// There are three broad types:
96 /// - PO_EMPTY - the empty pollable, used before file descriptors are added to
97 /// a pollset
98 /// - PO_FD - a pollable containing only one FD - used to optimize single-fd
99 /// pollsets (which are common with synchronous api usage)
100 /// - PO_MULTI - a pollable containing many fds
101 struct pollable {
102 pollable_type type; // immutable
103 gpr_refcount refs;
104
105 int epfd;
106 grpc_wakeup_fd wakeup;
107
108 // The following are relevant only for type PO_FD
109 grpc_fd* owner_fd; // Set to the owner_fd if the type is PO_FD
110 gpr_mu owner_orphan_mu; // Synchronizes access to owner_orphaned field
111 bool owner_orphaned; // Is the owner fd orphaned
112
113 grpc_pollset_set* pollset_set;
114 pollable* next;
115 pollable* prev;
116
117 gpr_mu mu;
118 grpc_pollset_worker* root_worker;
119
120 int event_cursor;
121 int event_count;
122 struct epoll_event events[MAX_EPOLL_EVENTS];
123
124 // We may be calling pollable_add_fd() on the same (pollable, fd) multiple
125 // times. To prevent pollable_add_fd() from making multiple sys calls to
126 // epoll_ctl() to add the fd, we maintain a cache of what fds are already
127 // present in the underlying epoll-set.
128 //
129 // Since this is not a correctness issue, we do not need to maintain all the
130 // fds in the cache. Hence we just use an LRU cache of size 'MAX_FDS_IN_CACHE'
131 //
132 // NOTE: An ideal implementation of this should do the following:
133 // 1) Add fds to the cache in pollable_add_fd() function (i.e whenever the fd
134 // is added to the pollable's epoll set)
135 // 2) Remove the fd from the cache whenever the fd is removed from the
136 // underlying epoll set (i.e whenever fd_orphan() is called).
137 //
138 // Implementing (2) above (i.e removing fds from cache on fd_orphan) adds a
139 // lot of complexity since an fd can be present in multiple pollables. So our
140 // implementation ONLY DOES (1) and NOT (2).
141 //
142 // The cache_fd.salt variable helps here to maintain correctness (it serves as
143 // an epoch that differentiates one grpc_fd from the other even though both of
144 // them may have the same fd number)
145 //
146 // The following implements LRU-eviction cache of fds in this pollable
147 cached_fd fd_cache[MAX_FDS_IN_CACHE];
148 int fd_cache_size;
149 uint64_t fd_cache_counter; // Recency timer tick counter
150 };
151
pollable_type_string(pollable_type t)152 static const char* pollable_type_string(pollable_type t) {
153 switch (t) {
154 case PO_MULTI:
155 return "pollset";
156 case PO_FD:
157 return "fd";
158 case PO_EMPTY:
159 return "empty";
160 }
161 return "<invalid>";
162 }
163
pollable_desc(pollable * p)164 static char* pollable_desc(pollable* p) {
165 char* out;
166 gpr_asprintf(&out, "type=%s epfd=%d wakeup=%d", pollable_type_string(p->type),
167 p->epfd, p->wakeup.read_fd);
168 return out;
169 }
170
171 /// Shared empty pollable - used by pollset to poll on until the first fd is
172 /// added
173 static pollable* g_empty_pollable;
174
175 static grpc_error* pollable_create(pollable_type type, pollable** p);
176 #ifdef NDEBUG
177 static pollable* pollable_ref(pollable* p);
178 static void pollable_unref(pollable* p);
179 #define POLLABLE_REF(p, r) pollable_ref(p)
180 #define POLLABLE_UNREF(p, r) pollable_unref(p)
181 #else
182 static pollable* pollable_ref(pollable* p, int line, const char* reason);
183 static void pollable_unref(pollable* p, int line, const char* reason);
184 #define POLLABLE_REF(p, r) pollable_ref((p), __LINE__, (r))
185 #define POLLABLE_UNREF(p, r) pollable_unref((p), __LINE__, (r))
186 #endif
187
188 /*******************************************************************************
189 * Fd Declarations
190 */
191
192 // Monotonically increasing Epoch counter that is assinged to each grpc_fd. See
193 // the description of 'salt' variable in 'grpc_fd' for more details
194 // TODO: (sreek/kpayson) gpr_atm is intptr_t which may not be wide-enough on
195 // 32-bit systems. Change this to int_64 - atleast on 32-bit systems
196 static gpr_atm g_fd_salt;
197
198 struct grpc_fd {
199 int fd;
200
201 // Since fd numbers can be reused (after old fds are closed), this serves as
202 // an epoch that uniquely identifies this fd (i.e the pair (salt, fd) is
203 // unique (until the salt counter (i.e g_fd_salt) overflows)
204 intptr_t salt;
205
206 // refst format:
207 // bit 0 : 1=Active / 0=Orphaned
208 // bits 1-n : refcount
209 // Ref/Unref by two to avoid altering the orphaned bit
210 gpr_atm refst;
211
212 gpr_mu orphan_mu;
213
214 gpr_mu pollable_mu;
215 pollable* pollable_obj;
216
217 grpc_core::ManualConstructor<grpc_core::LockfreeEvent> read_closure;
218 grpc_core::ManualConstructor<grpc_core::LockfreeEvent> write_closure;
219 grpc_core::ManualConstructor<grpc_core::LockfreeEvent> error_closure;
220
221 struct grpc_fd* freelist_next;
222 grpc_closure* on_done_closure;
223
224 grpc_iomgr_object iomgr_object;
225
226 // Do we need to track EPOLLERR events separately?
227 bool track_err;
228 };
229
230 static void fd_global_init(void);
231 static void fd_global_shutdown(void);
232
233 /*******************************************************************************
234 * Pollset Declarations
235 */
236
237 typedef struct {
238 grpc_pollset_worker* next;
239 grpc_pollset_worker* prev;
240 } pwlink;
241
242 typedef enum { PWLINK_POLLABLE = 0, PWLINK_POLLSET, PWLINK_COUNT } pwlinks;
243
244 struct grpc_pollset_worker {
245 bool kicked;
246 bool initialized_cv;
247 #ifndef NDEBUG
248 // debug aid: which thread started this worker
249 pid_t originator;
250 #endif
251 gpr_cv cv;
252 grpc_pollset* pollset;
253 pollable* pollable_obj;
254
255 pwlink links[PWLINK_COUNT];
256 };
257
258 struct grpc_pollset {
259 gpr_mu mu;
260 gpr_atm worker_count;
261 pollable* active_pollable;
262 bool kicked_without_poller;
263 grpc_closure* shutdown_closure;
264 bool already_shutdown;
265 grpc_pollset_worker* root_worker;
266 int containing_pollset_set_count;
267 };
268
269 /*******************************************************************************
270 * Pollset-set Declarations
271 */
272
273 struct grpc_pollset_set {
274 gpr_refcount refs;
275 gpr_mu mu;
276 grpc_pollset_set* parent;
277
278 size_t pollset_count;
279 size_t pollset_capacity;
280 grpc_pollset** pollsets;
281
282 size_t fd_count;
283 size_t fd_capacity;
284 grpc_fd** fds;
285 };
286
287 /*******************************************************************************
288 * Common helpers
289 */
290
append_error(grpc_error ** composite,grpc_error * error,const char * desc)291 static bool append_error(grpc_error** composite, grpc_error* error,
292 const char* desc) {
293 if (error == GRPC_ERROR_NONE) return true;
294 if (*composite == GRPC_ERROR_NONE) {
295 *composite = GRPC_ERROR_CREATE_FROM_COPIED_STRING(desc);
296 }
297 *composite = grpc_error_add_child(*composite, error);
298 return false;
299 }
300
301 /*******************************************************************************
302 * Fd Definitions
303 */
304
305 /* We need to keep a freelist not because of any concerns of malloc performance
306 * but instead so that implementations with multiple threads in (for example)
307 * epoll_wait deal with the race between pollset removal and incoming poll
308 * notifications.
309 *
310 * The problem is that the poller ultimately holds a reference to this
311 * object, so it is very difficult to know when is safe to free it, at least
312 * without some expensive synchronization.
313 *
314 * If we keep the object freelisted, in the worst case losing this race just
315 * becomes a spurious read notification on a reused fd.
316 */
317
318 static grpc_fd* fd_freelist = nullptr;
319 static gpr_mu fd_freelist_mu;
320
321 #ifndef NDEBUG
322 #define REF_BY(fd, n, reason) ref_by(fd, n, reason, __FILE__, __LINE__)
323 #define UNREF_BY(fd, n, reason) unref_by(fd, n, reason, __FILE__, __LINE__)
ref_by(grpc_fd * fd,int n,const char * reason,const char * file,int line)324 static void ref_by(grpc_fd* fd, int n, const char* reason, const char* file,
325 int line) {
326 if (grpc_trace_fd_refcount.enabled()) {
327 gpr_log(GPR_DEBUG,
328 "FD %d %p ref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]",
329 fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst),
330 gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line);
331 }
332 #else
333 #define REF_BY(fd, n, reason) ref_by(fd, n)
334 #define UNREF_BY(fd, n, reason) unref_by(fd, n)
335 static void ref_by(grpc_fd* fd, int n) {
336 #endif
337 GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&fd->refst, n) > 0);
338 }
339
340 #ifndef NDEBUG
341 #define INVALIDATE_FD(fd) invalidate_fd(fd)
342 /* Since an fd is never really destroyed (i.e gpr_free() is not called), it is
343 * hard to cases where fd fields are accessed even after calling fd_destroy().
344 * The following invalidates fd fields to make catching such errors easier */
345 static void invalidate_fd(grpc_fd* fd) {
346 fd->fd = -1;
347 fd->salt = -1;
348 gpr_atm_no_barrier_store(&fd->refst, -1);
349 memset(&fd->orphan_mu, -1, sizeof(fd->orphan_mu));
350 memset(&fd->pollable_mu, -1, sizeof(fd->pollable_mu));
351 fd->pollable_obj = nullptr;
352 fd->on_done_closure = nullptr;
353 memset(&fd->iomgr_object, -1, sizeof(fd->iomgr_object));
354 fd->track_err = false;
355 }
356 #else
357 #define INVALIDATE_FD(fd)
358 #endif
359
360 /* Uninitialize and add to the freelist */
361 static void fd_destroy(void* arg, grpc_error* error) {
362 grpc_fd* fd = static_cast<grpc_fd*>(arg);
363 grpc_iomgr_unregister_object(&fd->iomgr_object);
364 POLLABLE_UNREF(fd->pollable_obj, "fd_pollable");
365 gpr_mu_destroy(&fd->pollable_mu);
366 gpr_mu_destroy(&fd->orphan_mu);
367
368 fd->read_closure->DestroyEvent();
369 fd->write_closure->DestroyEvent();
370 fd->error_closure->DestroyEvent();
371
372 INVALIDATE_FD(fd);
373
374 /* Add the fd to the freelist */
375 gpr_mu_lock(&fd_freelist_mu);
376 fd->freelist_next = fd_freelist;
377 fd_freelist = fd;
378 gpr_mu_unlock(&fd_freelist_mu);
379 }
380
381 #ifndef NDEBUG
382 static void unref_by(grpc_fd* fd, int n, const char* reason, const char* file,
383 int line) {
384 if (grpc_trace_fd_refcount.enabled()) {
385 gpr_log(GPR_DEBUG,
386 "FD %d %p unref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]",
387 fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst),
388 gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line);
389 }
390 #else
391 static void unref_by(grpc_fd* fd, int n) {
392 #endif
393 gpr_atm old = gpr_atm_full_fetch_add(&fd->refst, -n);
394 if (old == n) {
395 GRPC_CLOSURE_SCHED(
396 GRPC_CLOSURE_CREATE(fd_destroy, fd, grpc_schedule_on_exec_ctx),
397 GRPC_ERROR_NONE);
398 } else {
399 GPR_ASSERT(old > n);
400 }
401 }
402
403 static void fd_global_init(void) { gpr_mu_init(&fd_freelist_mu); }
404
405 static void fd_global_shutdown(void) {
406 gpr_mu_lock(&fd_freelist_mu);
407 gpr_mu_unlock(&fd_freelist_mu);
408 while (fd_freelist != nullptr) {
409 grpc_fd* fd = fd_freelist;
410 fd_freelist = fd_freelist->freelist_next;
411 gpr_free(fd);
412 }
413 gpr_mu_destroy(&fd_freelist_mu);
414 }
415
416 static grpc_fd* fd_create(int fd, const char* name, bool track_err) {
417 grpc_fd* new_fd = nullptr;
418
419 gpr_mu_lock(&fd_freelist_mu);
420 if (fd_freelist != nullptr) {
421 new_fd = fd_freelist;
422 fd_freelist = fd_freelist->freelist_next;
423 }
424 gpr_mu_unlock(&fd_freelist_mu);
425
426 if (new_fd == nullptr) {
427 new_fd = static_cast<grpc_fd*>(gpr_malloc(sizeof(grpc_fd)));
428 new_fd->read_closure.Init();
429 new_fd->write_closure.Init();
430 new_fd->error_closure.Init();
431 }
432
433 new_fd->fd = fd;
434 new_fd->salt = gpr_atm_no_barrier_fetch_add(&g_fd_salt, 1);
435 gpr_atm_rel_store(&new_fd->refst, (gpr_atm)1);
436 gpr_mu_init(&new_fd->orphan_mu);
437 gpr_mu_init(&new_fd->pollable_mu);
438 new_fd->pollable_obj = nullptr;
439 new_fd->read_closure->InitEvent();
440 new_fd->write_closure->InitEvent();
441 new_fd->error_closure->InitEvent();
442 new_fd->freelist_next = nullptr;
443 new_fd->on_done_closure = nullptr;
444
445 char* fd_name;
446 gpr_asprintf(&fd_name, "%s fd=%d", name, fd);
447 grpc_iomgr_register_object(&new_fd->iomgr_object, fd_name);
448 #ifndef NDEBUG
449 if (grpc_trace_fd_refcount.enabled()) {
450 gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, new_fd, fd_name);
451 }
452 #endif
453 gpr_free(fd_name);
454
455 new_fd->track_err = track_err;
456 return new_fd;
457 }
458
459 static int fd_wrapped_fd(grpc_fd* fd) {
460 int ret_fd = fd->fd;
461 return (gpr_atm_acq_load(&fd->refst) & 1) ? ret_fd : -1;
462 }
463
464 static void fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
465 const char* reason) {
466 bool is_fd_closed = false;
467
468 gpr_mu_lock(&fd->orphan_mu);
469
470 // Get the fd->pollable_obj and set the owner_orphaned on that pollable to
471 // true so that the pollable will no longer access its owner_fd field.
472 gpr_mu_lock(&fd->pollable_mu);
473 pollable* pollable_obj = fd->pollable_obj;
474 gpr_mu_unlock(&fd->pollable_mu);
475
476 if (pollable_obj) {
477 gpr_mu_lock(&pollable_obj->owner_orphan_mu);
478 pollable_obj->owner_orphaned = true;
479 }
480
481 fd->on_done_closure = on_done;
482
483 /* If release_fd is not NULL, we should be relinquishing control of the file
484 descriptor fd->fd (but we still own the grpc_fd structure). */
485 if (release_fd != nullptr) {
486 *release_fd = fd->fd;
487 } else {
488 close(fd->fd);
489 is_fd_closed = true;
490 }
491
492 // TODO(sreek): handle fd removal (where is_fd_closed=false)
493 if (!is_fd_closed) {
494 GRPC_FD_TRACE("epoll_fd %p (%d) was orphaned but not closed.", fd, fd->fd);
495 }
496
497 /* Remove the active status but keep referenced. We want this grpc_fd struct
498 to be alive (and not added to freelist) until the end of this function */
499 REF_BY(fd, 1, reason);
500
501 GRPC_CLOSURE_SCHED(fd->on_done_closure, GRPC_ERROR_NONE);
502
503 if (pollable_obj) {
504 gpr_mu_unlock(&pollable_obj->owner_orphan_mu);
505 }
506
507 gpr_mu_unlock(&fd->orphan_mu);
508
509 UNREF_BY(fd, 2, reason); /* Drop the reference */
510 }
511
512 static bool fd_is_shutdown(grpc_fd* fd) {
513 return fd->read_closure->IsShutdown();
514 }
515
516 /* Might be called multiple times */
517 static void fd_shutdown(grpc_fd* fd, grpc_error* why) {
518 if (fd->read_closure->SetShutdown(GRPC_ERROR_REF(why))) {
519 if (shutdown(fd->fd, SHUT_RDWR)) {
520 if (errno != ENOTCONN) {
521 gpr_log(GPR_ERROR, "Error shutting down fd %d. errno: %d",
522 grpc_fd_wrapped_fd(fd), errno);
523 }
524 }
525 fd->write_closure->SetShutdown(GRPC_ERROR_REF(why));
526 fd->error_closure->SetShutdown(GRPC_ERROR_REF(why));
527 }
528 GRPC_ERROR_UNREF(why);
529 }
530
531 static void fd_notify_on_read(grpc_fd* fd, grpc_closure* closure) {
532 fd->read_closure->NotifyOn(closure);
533 }
534
535 static void fd_notify_on_write(grpc_fd* fd, grpc_closure* closure) {
536 fd->write_closure->NotifyOn(closure);
537 }
538
539 static void fd_notify_on_error(grpc_fd* fd, grpc_closure* closure) {
540 fd->error_closure->NotifyOn(closure);
541 }
542
543 /*******************************************************************************
544 * Pollable Definitions
545 */
546
547 static grpc_error* pollable_create(pollable_type type, pollable** p) {
548 *p = nullptr;
549
550 int epfd = epoll_create1(EPOLL_CLOEXEC);
551 if (epfd == -1) {
552 return GRPC_OS_ERROR(errno, "epoll_create1");
553 }
554 GRPC_FD_TRACE("Pollable_create: created epfd: %d (type: %d)", epfd, type);
555 *p = static_cast<pollable*>(gpr_malloc(sizeof(**p)));
556 grpc_error* err = grpc_wakeup_fd_init(&(*p)->wakeup);
557 if (err != GRPC_ERROR_NONE) {
558 GRPC_FD_TRACE(
559 "Pollable_create: closed epfd: %d (type: %d). wakeupfd_init error",
560 epfd, type);
561 close(epfd);
562 gpr_free(*p);
563 *p = nullptr;
564 return err;
565 }
566 struct epoll_event ev;
567 ev.events = static_cast<uint32_t>(EPOLLIN | EPOLLET);
568 ev.data.ptr = (void*)(1 | (intptr_t) & (*p)->wakeup);
569 if (epoll_ctl(epfd, EPOLL_CTL_ADD, (*p)->wakeup.read_fd, &ev) != 0) {
570 err = GRPC_OS_ERROR(errno, "epoll_ctl");
571 GRPC_FD_TRACE(
572 "Pollable_create: closed epfd: %d (type: %d). epoll_ctl error", epfd,
573 type);
574 close(epfd);
575 grpc_wakeup_fd_destroy(&(*p)->wakeup);
576 gpr_free(*p);
577 *p = nullptr;
578 return err;
579 }
580
581 (*p)->type = type;
582 gpr_ref_init(&(*p)->refs, 1);
583 gpr_mu_init(&(*p)->mu);
584 (*p)->epfd = epfd;
585 (*p)->owner_fd = nullptr;
586 gpr_mu_init(&(*p)->owner_orphan_mu);
587 (*p)->owner_orphaned = false;
588 (*p)->pollset_set = nullptr;
589 (*p)->next = (*p)->prev = *p;
590 (*p)->root_worker = nullptr;
591 (*p)->event_cursor = 0;
592 (*p)->event_count = 0;
593 (*p)->fd_cache_size = 0;
594 (*p)->fd_cache_counter = 0;
595 return GRPC_ERROR_NONE;
596 }
597
598 #ifdef NDEBUG
599 static pollable* pollable_ref(pollable* p) {
600 #else
601 static pollable* pollable_ref(pollable* p, int line, const char* reason) {
602 if (grpc_trace_pollable_refcount.enabled()) {
603 int r = static_cast<int> gpr_atm_no_barrier_load(&p->refs.count);
604 gpr_log(__FILE__, line, GPR_LOG_SEVERITY_DEBUG,
605 "POLLABLE:%p ref %d->%d %s", p, r, r + 1, reason);
606 }
607 #endif
608 gpr_ref(&p->refs);
609 return p;
610 }
611
612 #ifdef NDEBUG
613 static void pollable_unref(pollable* p) {
614 #else
615 static void pollable_unref(pollable* p, int line, const char* reason) {
616 if (p == nullptr) return;
617 if (grpc_trace_pollable_refcount.enabled()) {
618 int r = static_cast<int> gpr_atm_no_barrier_load(&p->refs.count);
619 gpr_log(__FILE__, line, GPR_LOG_SEVERITY_DEBUG,
620 "POLLABLE:%p unref %d->%d %s", p, r, r - 1, reason);
621 }
622 #endif
623 if (p != nullptr && gpr_unref(&p->refs)) {
624 GRPC_FD_TRACE("pollable_unref: Closing epfd: %d", p->epfd);
625 close(p->epfd);
626 grpc_wakeup_fd_destroy(&p->wakeup);
627 gpr_mu_destroy(&p->owner_orphan_mu);
628 gpr_free(p);
629 }
630 }
631
632 static grpc_error* pollable_add_fd(pollable* p, grpc_fd* fd) {
633 grpc_error* error = GRPC_ERROR_NONE;
634 static const char* err_desc = "pollable_add_fd";
635 const int epfd = p->epfd;
636 gpr_mu_lock(&p->mu);
637 p->fd_cache_counter++;
638
639 // Handle the case of overflow for our cache counter by
640 // reseting the recency-counter on all cache objects
641 if (p->fd_cache_counter == 0) {
642 for (int i = 0; i < p->fd_cache_size; i++) {
643 p->fd_cache[i].last_used = 0;
644 }
645 }
646
647 int lru_idx = 0;
648 for (int i = 0; i < p->fd_cache_size; i++) {
649 if (p->fd_cache[i].fd == fd->fd && p->fd_cache[i].salt == fd->salt) {
650 GRPC_STATS_INC_POLLSET_FD_CACHE_HITS();
651 p->fd_cache[i].last_used = p->fd_cache_counter;
652 gpr_mu_unlock(&p->mu);
653 return GRPC_ERROR_NONE;
654 } else if (p->fd_cache[i].last_used < p->fd_cache[lru_idx].last_used) {
655 lru_idx = i;
656 }
657 }
658
659 // Add to cache
660 if (p->fd_cache_size < MAX_FDS_IN_CACHE) {
661 lru_idx = p->fd_cache_size;
662 p->fd_cache_size++;
663 }
664 p->fd_cache[lru_idx].fd = fd->fd;
665 p->fd_cache[lru_idx].salt = fd->salt;
666 p->fd_cache[lru_idx].last_used = p->fd_cache_counter;
667 gpr_mu_unlock(&p->mu);
668
669 if (grpc_polling_trace.enabled()) {
670 gpr_log(GPR_INFO, "add fd %p (%d) to pollable %p", fd, fd->fd, p);
671 }
672
673 struct epoll_event ev_fd;
674 ev_fd.events =
675 static_cast<uint32_t>(EPOLLET | EPOLLIN | EPOLLOUT | EPOLLEXCLUSIVE);
676 /* Use the second least significant bit of ev_fd.data.ptr to store track_err
677 * to avoid synchronization issues when accessing it after receiving an event.
678 * Accessing fd would be a data race there because the fd might have been
679 * returned to the free list at that point. */
680 ev_fd.data.ptr = reinterpret_cast<void*>(reinterpret_cast<intptr_t>(fd) |
681 (fd->track_err ? 2 : 0));
682 GRPC_STATS_INC_SYSCALL_EPOLL_CTL();
683 if (epoll_ctl(epfd, EPOLL_CTL_ADD, fd->fd, &ev_fd) != 0) {
684 switch (errno) {
685 case EEXIST:
686 break;
687 default:
688 append_error(&error, GRPC_OS_ERROR(errno, "epoll_ctl"), err_desc);
689 }
690 }
691
692 return error;
693 }
694
695 /*******************************************************************************
696 * Pollset Definitions
697 */
698
699 GPR_TLS_DECL(g_current_thread_pollset);
700 GPR_TLS_DECL(g_current_thread_worker);
701
702 /* Global state management */
703 static grpc_error* pollset_global_init(void) {
704 gpr_tls_init(&g_current_thread_pollset);
705 gpr_tls_init(&g_current_thread_worker);
706 return pollable_create(PO_EMPTY, &g_empty_pollable);
707 }
708
709 static void pollset_global_shutdown(void) {
710 POLLABLE_UNREF(g_empty_pollable, "g_empty_pollable");
711 gpr_tls_destroy(&g_current_thread_pollset);
712 gpr_tls_destroy(&g_current_thread_worker);
713 }
714
715 /* pollset->mu must be held while calling this function */
716 static void pollset_maybe_finish_shutdown(grpc_pollset* pollset) {
717 if (grpc_polling_trace.enabled()) {
718 gpr_log(GPR_INFO,
719 "PS:%p (pollable:%p) maybe_finish_shutdown sc=%p (target:!NULL) "
720 "rw=%p (target:NULL) cpsc=%d (target:0)",
721 pollset, pollset->active_pollable, pollset->shutdown_closure,
722 pollset->root_worker, pollset->containing_pollset_set_count);
723 }
724 if (pollset->shutdown_closure != nullptr && pollset->root_worker == nullptr &&
725 pollset->containing_pollset_set_count == 0) {
726 GPR_TIMER_MARK("pollset_finish_shutdown", 0);
727 GRPC_CLOSURE_SCHED(pollset->shutdown_closure, GRPC_ERROR_NONE);
728 pollset->shutdown_closure = nullptr;
729 pollset->already_shutdown = true;
730 }
731 }
732
733 /* pollset->mu must be held before calling this function,
734 * pollset->active_pollable->mu & specific_worker->pollable_obj->mu must not be
735 * held */
736 static grpc_error* kick_one_worker(grpc_pollset_worker* specific_worker) {
737 GPR_TIMER_SCOPE("kick_one_worker", 0);
738 pollable* p = specific_worker->pollable_obj;
739 grpc_core::MutexLock lock(&p->mu);
740 GPR_ASSERT(specific_worker != nullptr);
741 if (specific_worker->kicked) {
742 if (grpc_polling_trace.enabled()) {
743 gpr_log(GPR_INFO, "PS:%p kicked_specific_but_already_kicked", p);
744 }
745 GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
746 return GRPC_ERROR_NONE;
747 }
748 if (gpr_tls_get(&g_current_thread_worker) == (intptr_t)specific_worker) {
749 if (grpc_polling_trace.enabled()) {
750 gpr_log(GPR_INFO, "PS:%p kicked_specific_but_awake", p);
751 }
752 GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD();
753 specific_worker->kicked = true;
754 return GRPC_ERROR_NONE;
755 }
756 if (specific_worker == p->root_worker) {
757 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
758 if (grpc_polling_trace.enabled()) {
759 gpr_log(GPR_INFO, "PS:%p kicked_specific_via_wakeup_fd", p);
760 }
761 specific_worker->kicked = true;
762 grpc_error* error = grpc_wakeup_fd_wakeup(&p->wakeup);
763 return error;
764 }
765 if (specific_worker->initialized_cv) {
766 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
767 if (grpc_polling_trace.enabled()) {
768 gpr_log(GPR_INFO, "PS:%p kicked_specific_via_cv", p);
769 }
770 specific_worker->kicked = true;
771 gpr_cv_signal(&specific_worker->cv);
772 return GRPC_ERROR_NONE;
773 }
774 // we can get here during end_worker after removing specific_worker from the
775 // pollable list but before removing it from the pollset list
776 return GRPC_ERROR_NONE;
777 }
778
779 static grpc_error* pollset_kick(grpc_pollset* pollset,
780 grpc_pollset_worker* specific_worker) {
781 GPR_TIMER_SCOPE("pollset_kick", 0);
782 GRPC_STATS_INC_POLLSET_KICK();
783 if (grpc_polling_trace.enabled()) {
784 gpr_log(GPR_INFO,
785 "PS:%p kick %p tls_pollset=%p tls_worker=%p pollset.root_worker=%p",
786 pollset, specific_worker,
787 (void*)gpr_tls_get(&g_current_thread_pollset),
788 (void*)gpr_tls_get(&g_current_thread_worker), pollset->root_worker);
789 }
790 if (specific_worker == nullptr) {
791 if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)pollset) {
792 if (pollset->root_worker == nullptr) {
793 if (grpc_polling_trace.enabled()) {
794 gpr_log(GPR_INFO, "PS:%p kicked_any_without_poller", pollset);
795 }
796 GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER();
797 pollset->kicked_without_poller = true;
798 return GRPC_ERROR_NONE;
799 } else {
800 // We've been asked to kick a poller, but we haven't been told which one
801 // ... any will do
802 // We look at the pollset worker list because:
803 // 1. the pollable list may include workers from other pollers, so we'd
804 // need to do an O(N) search
805 // 2. we'd additionally need to take the pollable lock, which we've so
806 // far avoided
807 // Now, we would prefer to wake a poller in cv_wait, and not in
808 // epoll_wait (since the latter would imply the need to do an additional
809 // wakeup)
810 // We know that if a worker is at the root of a pollable, it's (likely)
811 // also the root of a pollset, and we know that if a worker is NOT at
812 // the root of a pollset, it's (likely) not at the root of a pollable,
813 // so we take our chances and choose the SECOND worker enqueued against
814 // the pollset as a worker that's likely to be in cv_wait
815 return kick_one_worker(
816 pollset->root_worker->links[PWLINK_POLLSET].next);
817 }
818 } else {
819 if (grpc_polling_trace.enabled()) {
820 gpr_log(GPR_INFO, "PS:%p kicked_any_but_awake", pollset);
821 }
822 GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD();
823 return GRPC_ERROR_NONE;
824 }
825 } else {
826 return kick_one_worker(specific_worker);
827 }
828 }
829
830 static grpc_error* pollset_kick_all(grpc_pollset* pollset) {
831 GPR_TIMER_SCOPE("pollset_kick_all", 0);
832 grpc_error* error = GRPC_ERROR_NONE;
833 const char* err_desc = "pollset_kick_all";
834 grpc_pollset_worker* w = pollset->root_worker;
835 if (w != nullptr) {
836 do {
837 GRPC_STATS_INC_POLLSET_KICK();
838 append_error(&error, kick_one_worker(w), err_desc);
839 w = w->links[PWLINK_POLLSET].next;
840 } while (w != pollset->root_worker);
841 }
842 return error;
843 }
844
845 static void pollset_init(grpc_pollset* pollset, gpr_mu** mu) {
846 gpr_mu_init(&pollset->mu);
847 gpr_atm_no_barrier_store(&pollset->worker_count, 0);
848 pollset->active_pollable = POLLABLE_REF(g_empty_pollable, "pollset");
849 pollset->kicked_without_poller = false;
850 pollset->shutdown_closure = nullptr;
851 pollset->already_shutdown = false;
852 pollset->root_worker = nullptr;
853 pollset->containing_pollset_set_count = 0;
854 *mu = &pollset->mu;
855 }
856
857 static int poll_deadline_to_millis_timeout(grpc_millis millis) {
858 if (millis == GRPC_MILLIS_INF_FUTURE) return -1;
859 grpc_millis delta = millis - grpc_core::ExecCtx::Get()->Now();
860 if (delta > INT_MAX)
861 return INT_MAX;
862 else if (delta < 0)
863 return 0;
864 else
865 return static_cast<int>(delta);
866 }
867
868 static void fd_become_readable(grpc_fd* fd) { fd->read_closure->SetReady(); }
869
870 static void fd_become_writable(grpc_fd* fd) { fd->write_closure->SetReady(); }
871
872 static void fd_has_errors(grpc_fd* fd) { fd->error_closure->SetReady(); }
873
874 /* Get the pollable_obj attached to this fd. If none is attached, create a new
875 * pollable object (of type PO_FD), attach it to the fd and return it
876 *
877 * Note that if a pollable object is already attached to the fd, it may be of
878 * either PO_FD or PO_MULTI type */
879 static grpc_error* get_fd_pollable(grpc_fd* fd, pollable** p) {
880 gpr_mu_lock(&fd->pollable_mu);
881 grpc_error* error = GRPC_ERROR_NONE;
882 static const char* err_desc = "get_fd_pollable";
883 if (fd->pollable_obj == nullptr) {
884 if (append_error(&error, pollable_create(PO_FD, &fd->pollable_obj),
885 err_desc)) {
886 fd->pollable_obj->owner_fd = fd;
887 if (!append_error(&error, pollable_add_fd(fd->pollable_obj, fd),
888 err_desc)) {
889 POLLABLE_UNREF(fd->pollable_obj, "fd_pollable");
890 fd->pollable_obj = nullptr;
891 }
892 }
893 }
894 if (error == GRPC_ERROR_NONE) {
895 GPR_ASSERT(fd->pollable_obj != nullptr);
896 *p = POLLABLE_REF(fd->pollable_obj, "pollset");
897 } else {
898 GPR_ASSERT(fd->pollable_obj == nullptr);
899 *p = nullptr;
900 }
901 gpr_mu_unlock(&fd->pollable_mu);
902 return error;
903 }
904
905 /* pollset->po.mu lock must be held by the caller before calling this */
906 static void pollset_shutdown(grpc_pollset* pollset, grpc_closure* closure) {
907 GPR_TIMER_SCOPE("pollset_shutdown", 0);
908 GPR_ASSERT(pollset->shutdown_closure == nullptr);
909 pollset->shutdown_closure = closure;
910 GRPC_LOG_IF_ERROR("pollset_shutdown", pollset_kick_all(pollset));
911 pollset_maybe_finish_shutdown(pollset);
912 }
913
914 static grpc_error* pollable_process_events(grpc_pollset* pollset,
915 pollable* pollable_obj, bool drain) {
916 GPR_TIMER_SCOPE("pollable_process_events", 0);
917 static const char* err_desc = "pollset_process_events";
918 // Use a simple heuristic to determine how many fd events to process
919 // per loop iteration. (events/workers)
920 int handle_count = 1;
921 int worker_count = gpr_atm_no_barrier_load(&pollset->worker_count);
922 GPR_ASSERT(worker_count > 0);
923 handle_count =
924 (pollable_obj->event_count - pollable_obj->event_cursor) / worker_count;
925 if (handle_count == 0) {
926 handle_count = 1;
927 } else if (handle_count > MAX_EPOLL_EVENTS_HANDLED_EACH_POLL_CALL) {
928 handle_count = MAX_EPOLL_EVENTS_HANDLED_EACH_POLL_CALL;
929 }
930 grpc_error* error = GRPC_ERROR_NONE;
931 for (int i = 0; (drain || i < handle_count) &&
932 pollable_obj->event_cursor != pollable_obj->event_count;
933 i++) {
934 int n = pollable_obj->event_cursor++;
935 struct epoll_event* ev = &pollable_obj->events[n];
936 void* data_ptr = ev->data.ptr;
937 if (1 & (intptr_t)data_ptr) {
938 if (grpc_polling_trace.enabled()) {
939 gpr_log(GPR_INFO, "PS:%p got pollset_wakeup %p", pollset, data_ptr);
940 }
941 append_error(&error,
942 grpc_wakeup_fd_consume_wakeup(
943 (grpc_wakeup_fd*)((~static_cast<intptr_t>(1)) &
944 (intptr_t)data_ptr)),
945 err_desc);
946 } else {
947 grpc_fd* fd =
948 reinterpret_cast<grpc_fd*>(reinterpret_cast<intptr_t>(data_ptr) & ~2);
949 bool track_err = reinterpret_cast<intptr_t>(data_ptr) & 2;
950 bool cancel = (ev->events & EPOLLHUP) != 0;
951 bool error = (ev->events & EPOLLERR) != 0;
952 bool read_ev = (ev->events & (EPOLLIN | EPOLLPRI)) != 0;
953 bool write_ev = (ev->events & EPOLLOUT) != 0;
954 bool err_fallback = error && !track_err;
955
956 if (grpc_polling_trace.enabled()) {
957 gpr_log(GPR_INFO,
958 "PS:%p got fd %p: cancel=%d read=%d "
959 "write=%d",
960 pollset, fd, cancel, read_ev, write_ev);
961 }
962 if (error && !err_fallback) {
963 fd_has_errors(fd);
964 }
965 if (read_ev || cancel || err_fallback) {
966 fd_become_readable(fd);
967 }
968 if (write_ev || cancel || err_fallback) {
969 fd_become_writable(fd);
970 }
971 }
972 }
973
974 return error;
975 }
976
977 /* pollset_shutdown is guaranteed to be called before pollset_destroy. */
978 static void pollset_destroy(grpc_pollset* pollset) {
979 POLLABLE_UNREF(pollset->active_pollable, "pollset");
980 pollset->active_pollable = nullptr;
981 gpr_mu_destroy(&pollset->mu);
982 }
983
984 static grpc_error* pollable_epoll(pollable* p, grpc_millis deadline) {
985 GPR_TIMER_SCOPE("pollable_epoll", 0);
986 int timeout = poll_deadline_to_millis_timeout(deadline);
987
988 if (grpc_polling_trace.enabled()) {
989 char* desc = pollable_desc(p);
990 gpr_log(GPR_INFO, "POLLABLE:%p[%s] poll for %dms", p, desc, timeout);
991 gpr_free(desc);
992 }
993
994 if (timeout != 0) {
995 GRPC_SCHEDULING_START_BLOCKING_REGION;
996 }
997 int r;
998 do {
999 GRPC_STATS_INC_SYSCALL_POLL();
1000 r = epoll_wait(p->epfd, p->events, MAX_EPOLL_EVENTS, timeout);
1001 } while (r < 0 && errno == EINTR);
1002 if (timeout != 0) {
1003 GRPC_SCHEDULING_END_BLOCKING_REGION;
1004 }
1005
1006 if (r < 0) return GRPC_OS_ERROR(errno, "epoll_wait");
1007
1008 if (grpc_polling_trace.enabled()) {
1009 gpr_log(GPR_INFO, "POLLABLE:%p got %d events", p, r);
1010 }
1011
1012 p->event_cursor = 0;
1013 p->event_count = r;
1014
1015 return GRPC_ERROR_NONE;
1016 }
1017
1018 /* Return true if first in list */
1019 static bool worker_insert(grpc_pollset_worker** root_worker,
1020 grpc_pollset_worker* worker, pwlinks link) {
1021 if (*root_worker == nullptr) {
1022 *root_worker = worker;
1023 worker->links[link].next = worker->links[link].prev = worker;
1024 return true;
1025 } else {
1026 worker->links[link].next = *root_worker;
1027 worker->links[link].prev = worker->links[link].next->links[link].prev;
1028 worker->links[link].next->links[link].prev = worker;
1029 worker->links[link].prev->links[link].next = worker;
1030 return false;
1031 }
1032 }
1033
1034 /* returns the new root IFF the root changed */
1035 typedef enum { WRR_NEW_ROOT, WRR_EMPTIED, WRR_REMOVED } worker_remove_result;
1036
1037 static worker_remove_result worker_remove(grpc_pollset_worker** root_worker,
1038 grpc_pollset_worker* worker,
1039 pwlinks link) {
1040 if (worker == *root_worker) {
1041 if (worker == worker->links[link].next) {
1042 *root_worker = nullptr;
1043 return WRR_EMPTIED;
1044 } else {
1045 *root_worker = worker->links[link].next;
1046 worker->links[link].prev->links[link].next = worker->links[link].next;
1047 worker->links[link].next->links[link].prev = worker->links[link].prev;
1048 return WRR_NEW_ROOT;
1049 }
1050 } else {
1051 worker->links[link].prev->links[link].next = worker->links[link].next;
1052 worker->links[link].next->links[link].prev = worker->links[link].prev;
1053 return WRR_REMOVED;
1054 }
1055 }
1056
1057 /* Return true if this thread should poll */
1058 static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
1059 grpc_pollset_worker** worker_hdl,
1060 grpc_millis deadline) {
1061 GPR_TIMER_SCOPE("begin_worker", 0);
1062 bool do_poll =
1063 (pollset->shutdown_closure == nullptr && !pollset->already_shutdown);
1064 gpr_atm_no_barrier_fetch_add(&pollset->worker_count, 1);
1065 if (worker_hdl != nullptr) *worker_hdl = worker;
1066 worker->initialized_cv = false;
1067 worker->kicked = false;
1068 worker->pollset = pollset;
1069 worker->pollable_obj =
1070 POLLABLE_REF(pollset->active_pollable, "pollset_worker");
1071 worker_insert(&pollset->root_worker, worker, PWLINK_POLLSET);
1072 gpr_mu_lock(&worker->pollable_obj->mu);
1073 if (!worker_insert(&worker->pollable_obj->root_worker, worker,
1074 PWLINK_POLLABLE)) {
1075 worker->initialized_cv = true;
1076 gpr_cv_init(&worker->cv);
1077 gpr_mu_unlock(&pollset->mu);
1078 if (grpc_polling_trace.enabled() &&
1079 worker->pollable_obj->root_worker != worker) {
1080 gpr_log(GPR_INFO, "PS:%p wait %p w=%p for %dms", pollset,
1081 worker->pollable_obj, worker,
1082 poll_deadline_to_millis_timeout(deadline));
1083 }
1084 while (do_poll && worker->pollable_obj->root_worker != worker) {
1085 if (gpr_cv_wait(&worker->cv, &worker->pollable_obj->mu,
1086 grpc_millis_to_timespec(deadline, GPR_CLOCK_REALTIME))) {
1087 if (grpc_polling_trace.enabled()) {
1088 gpr_log(GPR_INFO, "PS:%p timeout_wait %p w=%p", pollset,
1089 worker->pollable_obj, worker);
1090 }
1091 do_poll = false;
1092 } else if (worker->kicked) {
1093 if (grpc_polling_trace.enabled()) {
1094 gpr_log(GPR_INFO, "PS:%p wakeup %p w=%p", pollset,
1095 worker->pollable_obj, worker);
1096 }
1097 do_poll = false;
1098 } else if (grpc_polling_trace.enabled() &&
1099 worker->pollable_obj->root_worker != worker) {
1100 gpr_log(GPR_INFO, "PS:%p spurious_wakeup %p w=%p", pollset,
1101 worker->pollable_obj, worker);
1102 }
1103 }
1104 grpc_core::ExecCtx::Get()->InvalidateNow();
1105 } else {
1106 gpr_mu_unlock(&pollset->mu);
1107 }
1108 gpr_mu_unlock(&worker->pollable_obj->mu);
1109
1110 return do_poll;
1111 }
1112
1113 static void end_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
1114 grpc_pollset_worker** worker_hdl) {
1115 GPR_TIMER_SCOPE("end_worker", 0);
1116 gpr_mu_lock(&pollset->mu);
1117 gpr_mu_lock(&worker->pollable_obj->mu);
1118 switch (worker_remove(&worker->pollable_obj->root_worker, worker,
1119 PWLINK_POLLABLE)) {
1120 case WRR_NEW_ROOT: {
1121 // wakeup new poller
1122 grpc_pollset_worker* new_root = worker->pollable_obj->root_worker;
1123 GPR_ASSERT(new_root->initialized_cv);
1124 gpr_cv_signal(&new_root->cv);
1125 break;
1126 }
1127 case WRR_EMPTIED:
1128 if (pollset->active_pollable != worker->pollable_obj) {
1129 // pollable no longer being polled: flush events
1130 pollable_process_events(pollset, worker->pollable_obj, true);
1131 }
1132 break;
1133 case WRR_REMOVED:
1134 break;
1135 }
1136 gpr_mu_unlock(&worker->pollable_obj->mu);
1137 POLLABLE_UNREF(worker->pollable_obj, "pollset_worker");
1138 if (worker_remove(&pollset->root_worker, worker, PWLINK_POLLSET) ==
1139 WRR_EMPTIED) {
1140 pollset_maybe_finish_shutdown(pollset);
1141 }
1142 if (worker->initialized_cv) {
1143 gpr_cv_destroy(&worker->cv);
1144 }
1145 gpr_atm_no_barrier_fetch_add(&pollset->worker_count, -1);
1146 }
1147
1148 #ifndef NDEBUG
1149 static long gettid(void) { return syscall(__NR_gettid); }
1150 #endif
1151
1152 /* pollset->mu lock must be held by the caller before calling this.
1153 The function pollset_work() may temporarily release the lock (pollset->po.mu)
1154 during the course of its execution but it will always re-acquire the lock and
1155 ensure that it is held by the time the function returns */
1156 static grpc_error* pollset_work(grpc_pollset* pollset,
1157 grpc_pollset_worker** worker_hdl,
1158 grpc_millis deadline) {
1159 GPR_TIMER_SCOPE("pollset_work", 0);
1160 #ifdef GRPC_EPOLLEX_CREATE_WORKERS_ON_HEAP
1161 grpc_pollset_worker* worker =
1162 (grpc_pollset_worker*)gpr_malloc(sizeof(*worker));
1163 #define WORKER_PTR (worker)
1164 #else
1165 grpc_pollset_worker worker;
1166 #define WORKER_PTR (&worker)
1167 #endif
1168 #ifndef NDEBUG
1169 WORKER_PTR->originator = gettid();
1170 #endif
1171 if (grpc_polling_trace.enabled()) {
1172 gpr_log(GPR_INFO,
1173 "PS:%p work hdl=%p worker=%p now=%" PRId64 " deadline=%" PRId64
1174 " kwp=%d pollable=%p",
1175 pollset, worker_hdl, WORKER_PTR, grpc_core::ExecCtx::Get()->Now(),
1176 deadline, pollset->kicked_without_poller, pollset->active_pollable);
1177 }
1178 static const char* err_desc = "pollset_work";
1179 grpc_error* error = GRPC_ERROR_NONE;
1180 if (pollset->kicked_without_poller) {
1181 pollset->kicked_without_poller = false;
1182 } else {
1183 if (begin_worker(pollset, WORKER_PTR, worker_hdl, deadline)) {
1184 gpr_tls_set(&g_current_thread_pollset, (intptr_t)pollset);
1185 gpr_tls_set(&g_current_thread_worker, (intptr_t)WORKER_PTR);
1186 if (WORKER_PTR->pollable_obj->event_cursor ==
1187 WORKER_PTR->pollable_obj->event_count) {
1188 append_error(&error, pollable_epoll(WORKER_PTR->pollable_obj, deadline),
1189 err_desc);
1190 }
1191 append_error(
1192 &error,
1193 pollable_process_events(pollset, WORKER_PTR->pollable_obj, false),
1194 err_desc);
1195 grpc_core::ExecCtx::Get()->Flush();
1196 gpr_tls_set(&g_current_thread_pollset, 0);
1197 gpr_tls_set(&g_current_thread_worker, 0);
1198 }
1199 end_worker(pollset, WORKER_PTR, worker_hdl);
1200 }
1201 #ifdef GRPC_EPOLLEX_CREATE_WORKERS_ON_HEAP
1202 gpr_free(worker);
1203 #endif
1204 #undef WORKER_PTR
1205 return error;
1206 }
1207
1208 static grpc_error* pollset_transition_pollable_from_empty_to_fd_locked(
1209 grpc_pollset* pollset, grpc_fd* fd) {
1210 static const char* err_desc = "pollset_transition_pollable_from_empty_to_fd";
1211 grpc_error* error = GRPC_ERROR_NONE;
1212 if (grpc_polling_trace.enabled()) {
1213 gpr_log(GPR_INFO,
1214 "PS:%p add fd %p (%d); transition pollable from empty to fd",
1215 pollset, fd, fd->fd);
1216 }
1217 append_error(&error, pollset_kick_all(pollset), err_desc);
1218 POLLABLE_UNREF(pollset->active_pollable, "pollset");
1219 append_error(&error, get_fd_pollable(fd, &pollset->active_pollable),
1220 err_desc);
1221 return error;
1222 }
1223
1224 static grpc_error* pollset_transition_pollable_from_fd_to_multi_locked(
1225 grpc_pollset* pollset, grpc_fd* and_add_fd) {
1226 static const char* err_desc = "pollset_transition_pollable_from_fd_to_multi";
1227 grpc_error* error = GRPC_ERROR_NONE;
1228 if (grpc_polling_trace.enabled()) {
1229 gpr_log(
1230 GPR_INFO,
1231 "PS:%p add fd %p (%d); transition pollable from fd %p to multipoller",
1232 pollset, and_add_fd, and_add_fd ? and_add_fd->fd : -1,
1233 pollset->active_pollable->owner_fd);
1234 }
1235 append_error(&error, pollset_kick_all(pollset), err_desc);
1236 grpc_fd* initial_fd = pollset->active_pollable->owner_fd;
1237 POLLABLE_UNREF(pollset->active_pollable, "pollset");
1238 pollset->active_pollable = nullptr;
1239 if (append_error(&error, pollable_create(PO_MULTI, &pollset->active_pollable),
1240 err_desc)) {
1241 append_error(&error, pollable_add_fd(pollset->active_pollable, initial_fd),
1242 err_desc);
1243 if (and_add_fd != nullptr) {
1244 append_error(&error,
1245 pollable_add_fd(pollset->active_pollable, and_add_fd),
1246 err_desc);
1247 }
1248 }
1249 return error;
1250 }
1251
1252 /* expects pollsets locked, flag whether fd is locked or not */
1253 static grpc_error* pollset_add_fd_locked(grpc_pollset* pollset, grpc_fd* fd) {
1254 grpc_error* error = GRPC_ERROR_NONE;
1255 pollable* po_at_start =
1256 POLLABLE_REF(pollset->active_pollable, "pollset_add_fd");
1257 switch (pollset->active_pollable->type) {
1258 case PO_EMPTY:
1259 /* empty pollable --> single fd pollable */
1260 error = pollset_transition_pollable_from_empty_to_fd_locked(pollset, fd);
1261 break;
1262 case PO_FD:
1263 gpr_mu_lock(&po_at_start->owner_orphan_mu);
1264 if (po_at_start->owner_orphaned) {
1265 error =
1266 pollset_transition_pollable_from_empty_to_fd_locked(pollset, fd);
1267 } else {
1268 /* fd --> multipoller */
1269 error =
1270 pollset_transition_pollable_from_fd_to_multi_locked(pollset, fd);
1271 }
1272 gpr_mu_unlock(&po_at_start->owner_orphan_mu);
1273 break;
1274 case PO_MULTI:
1275 error = pollable_add_fd(pollset->active_pollable, fd);
1276 break;
1277 }
1278 if (error != GRPC_ERROR_NONE) {
1279 POLLABLE_UNREF(pollset->active_pollable, "pollset");
1280 pollset->active_pollable = po_at_start;
1281 } else {
1282 POLLABLE_UNREF(po_at_start, "pollset_add_fd");
1283 }
1284 return error;
1285 }
1286
1287 static grpc_error* pollset_as_multipollable_locked(grpc_pollset* pollset,
1288 pollable** pollable_obj) {
1289 grpc_error* error = GRPC_ERROR_NONE;
1290 pollable* po_at_start =
1291 POLLABLE_REF(pollset->active_pollable, "pollset_as_multipollable");
1292 switch (pollset->active_pollable->type) {
1293 case PO_EMPTY:
1294 POLLABLE_UNREF(pollset->active_pollable, "pollset");
1295 error = pollable_create(PO_MULTI, &pollset->active_pollable);
1296 /* Any workers currently polling on this pollset must now be woked up so
1297 * that they can pick up the new active_pollable */
1298 if (grpc_polling_trace.enabled()) {
1299 gpr_log(GPR_INFO,
1300 "PS:%p active pollable transition from empty to multi",
1301 pollset);
1302 }
1303 static const char* err_desc =
1304 "pollset_as_multipollable_locked: empty -> multi";
1305 append_error(&error, pollset_kick_all(pollset), err_desc);
1306 break;
1307 case PO_FD:
1308 gpr_mu_lock(&po_at_start->owner_orphan_mu);
1309 if (po_at_start->owner_orphaned) {
1310 // Unlock before Unref'ing the pollable
1311 gpr_mu_unlock(&po_at_start->owner_orphan_mu);
1312 POLLABLE_UNREF(pollset->active_pollable, "pollset");
1313 error = pollable_create(PO_MULTI, &pollset->active_pollable);
1314 } else {
1315 error = pollset_transition_pollable_from_fd_to_multi_locked(pollset,
1316 nullptr);
1317 gpr_mu_unlock(&po_at_start->owner_orphan_mu);
1318 }
1319 break;
1320 case PO_MULTI:
1321 break;
1322 }
1323 if (error != GRPC_ERROR_NONE) {
1324 POLLABLE_UNREF(pollset->active_pollable, "pollset");
1325 pollset->active_pollable = po_at_start;
1326 *pollable_obj = nullptr;
1327 } else {
1328 *pollable_obj = POLLABLE_REF(pollset->active_pollable, "pollset_set");
1329 POLLABLE_UNREF(po_at_start, "pollset_as_multipollable");
1330 }
1331 return error;
1332 }
1333
1334 static void pollset_add_fd(grpc_pollset* pollset, grpc_fd* fd) {
1335 GPR_TIMER_SCOPE("pollset_add_fd", 0);
1336 gpr_mu_lock(&pollset->mu);
1337 grpc_error* error = pollset_add_fd_locked(pollset, fd);
1338 gpr_mu_unlock(&pollset->mu);
1339 GRPC_LOG_IF_ERROR("pollset_add_fd", error);
1340 }
1341
1342 /*******************************************************************************
1343 * Pollset-set Definitions
1344 */
1345
1346 static grpc_pollset_set* pss_lock_adam(grpc_pollset_set* pss) {
1347 gpr_mu_lock(&pss->mu);
1348 while (pss->parent != nullptr) {
1349 gpr_mu_unlock(&pss->mu);
1350 pss = pss->parent;
1351 gpr_mu_lock(&pss->mu);
1352 }
1353 return pss;
1354 }
1355
1356 static grpc_pollset_set* pollset_set_create(void) {
1357 grpc_pollset_set* pss =
1358 static_cast<grpc_pollset_set*>(gpr_zalloc(sizeof(*pss)));
1359 gpr_mu_init(&pss->mu);
1360 gpr_ref_init(&pss->refs, 1);
1361 return pss;
1362 }
1363
1364 static void pollset_set_unref(grpc_pollset_set* pss) {
1365 if (pss == nullptr) return;
1366 if (!gpr_unref(&pss->refs)) return;
1367 pollset_set_unref(pss->parent);
1368 gpr_mu_destroy(&pss->mu);
1369 for (size_t i = 0; i < pss->pollset_count; i++) {
1370 gpr_mu_lock(&pss->pollsets[i]->mu);
1371 if (0 == --pss->pollsets[i]->containing_pollset_set_count) {
1372 pollset_maybe_finish_shutdown(pss->pollsets[i]);
1373 }
1374 gpr_mu_unlock(&pss->pollsets[i]->mu);
1375 }
1376 for (size_t i = 0; i < pss->fd_count; i++) {
1377 UNREF_BY(pss->fds[i], 2, "pollset_set");
1378 }
1379 gpr_free(pss->pollsets);
1380 gpr_free(pss->fds);
1381 gpr_free(pss);
1382 }
1383
1384 static void pollset_set_add_fd(grpc_pollset_set* pss, grpc_fd* fd) {
1385 GPR_TIMER_SCOPE("pollset_set_add_fd", 0);
1386 if (grpc_polling_trace.enabled()) {
1387 gpr_log(GPR_INFO, "PSS:%p: add fd %p (%d)", pss, fd, fd->fd);
1388 }
1389 grpc_error* error = GRPC_ERROR_NONE;
1390 static const char* err_desc = "pollset_set_add_fd";
1391 pss = pss_lock_adam(pss);
1392 for (size_t i = 0; i < pss->pollset_count; i++) {
1393 append_error(&error, pollable_add_fd(pss->pollsets[i]->active_pollable, fd),
1394 err_desc);
1395 }
1396 if (pss->fd_count == pss->fd_capacity) {
1397 pss->fd_capacity = GPR_MAX(pss->fd_capacity * 2, 8);
1398 pss->fds = static_cast<grpc_fd**>(
1399 gpr_realloc(pss->fds, pss->fd_capacity * sizeof(*pss->fds)));
1400 }
1401 REF_BY(fd, 2, "pollset_set");
1402 pss->fds[pss->fd_count++] = fd;
1403 gpr_mu_unlock(&pss->mu);
1404
1405 GRPC_LOG_IF_ERROR(err_desc, error);
1406 }
1407
1408 static void pollset_set_del_fd(grpc_pollset_set* pss, grpc_fd* fd) {
1409 GPR_TIMER_SCOPE("pollset_set_del_fd", 0);
1410 if (grpc_polling_trace.enabled()) {
1411 gpr_log(GPR_INFO, "PSS:%p: del fd %p", pss, fd);
1412 }
1413 pss = pss_lock_adam(pss);
1414 size_t i;
1415 for (i = 0; i < pss->fd_count; i++) {
1416 if (pss->fds[i] == fd) {
1417 UNREF_BY(fd, 2, "pollset_set");
1418 break;
1419 }
1420 }
1421 GPR_ASSERT(i != pss->fd_count);
1422 for (; i < pss->fd_count - 1; i++) {
1423 pss->fds[i] = pss->fds[i + 1];
1424 }
1425 pss->fd_count--;
1426 gpr_mu_unlock(&pss->mu);
1427 }
1428
1429 static void pollset_set_del_pollset(grpc_pollset_set* pss, grpc_pollset* ps) {
1430 GPR_TIMER_SCOPE("pollset_set_del_pollset", 0);
1431 if (grpc_polling_trace.enabled()) {
1432 gpr_log(GPR_INFO, "PSS:%p: del pollset %p", pss, ps);
1433 }
1434 pss = pss_lock_adam(pss);
1435 size_t i;
1436 for (i = 0; i < pss->pollset_count; i++) {
1437 if (pss->pollsets[i] == ps) {
1438 break;
1439 }
1440 }
1441 GPR_ASSERT(i != pss->pollset_count);
1442 for (; i < pss->pollset_count - 1; i++) {
1443 pss->pollsets[i] = pss->pollsets[i + 1];
1444 }
1445 pss->pollset_count--;
1446 gpr_mu_unlock(&pss->mu);
1447 gpr_mu_lock(&ps->mu);
1448 if (0 == --ps->containing_pollset_set_count) {
1449 pollset_maybe_finish_shutdown(ps);
1450 }
1451 gpr_mu_unlock(&ps->mu);
1452 }
1453
1454 // add all fds to pollables, and output a new array of unorphaned out_fds
1455 // assumes pollsets are multipollable
1456 static grpc_error* add_fds_to_pollsets(grpc_fd** fds, size_t fd_count,
1457 grpc_pollset** pollsets,
1458 size_t pollset_count,
1459 const char* err_desc, grpc_fd** out_fds,
1460 size_t* out_fd_count) {
1461 GPR_TIMER_SCOPE("add_fds_to_pollsets", 0);
1462 grpc_error* error = GRPC_ERROR_NONE;
1463 for (size_t i = 0; i < fd_count; i++) {
1464 gpr_mu_lock(&fds[i]->orphan_mu);
1465 if ((gpr_atm_no_barrier_load(&fds[i]->refst) & 1) == 0) {
1466 gpr_mu_unlock(&fds[i]->orphan_mu);
1467 UNREF_BY(fds[i], 2, "pollset_set");
1468 } else {
1469 for (size_t j = 0; j < pollset_count; j++) {
1470 append_error(&error,
1471 pollable_add_fd(pollsets[j]->active_pollable, fds[i]),
1472 err_desc);
1473 }
1474 gpr_mu_unlock(&fds[i]->orphan_mu);
1475 out_fds[(*out_fd_count)++] = fds[i];
1476 }
1477 }
1478 return error;
1479 }
1480
1481 static void pollset_set_add_pollset(grpc_pollset_set* pss, grpc_pollset* ps) {
1482 GPR_TIMER_SCOPE("pollset_set_add_pollset", 0);
1483 if (grpc_polling_trace.enabled()) {
1484 gpr_log(GPR_INFO, "PSS:%p: add pollset %p", pss, ps);
1485 }
1486 grpc_error* error = GRPC_ERROR_NONE;
1487 static const char* err_desc = "pollset_set_add_pollset";
1488 pollable* pollable_obj = nullptr;
1489 gpr_mu_lock(&ps->mu);
1490 if (!GRPC_LOG_IF_ERROR(err_desc,
1491 pollset_as_multipollable_locked(ps, &pollable_obj))) {
1492 GPR_ASSERT(pollable_obj == nullptr);
1493 gpr_mu_unlock(&ps->mu);
1494 return;
1495 }
1496 ps->containing_pollset_set_count++;
1497 gpr_mu_unlock(&ps->mu);
1498 pss = pss_lock_adam(pss);
1499 size_t initial_fd_count = pss->fd_count;
1500 pss->fd_count = 0;
1501 append_error(&error,
1502 add_fds_to_pollsets(pss->fds, initial_fd_count, &ps, 1, err_desc,
1503 pss->fds, &pss->fd_count),
1504 err_desc);
1505 if (pss->pollset_count == pss->pollset_capacity) {
1506 pss->pollset_capacity = GPR_MAX(pss->pollset_capacity * 2, 8);
1507 pss->pollsets = static_cast<grpc_pollset**>(gpr_realloc(
1508 pss->pollsets, pss->pollset_capacity * sizeof(*pss->pollsets)));
1509 }
1510 pss->pollsets[pss->pollset_count++] = ps;
1511 gpr_mu_unlock(&pss->mu);
1512 POLLABLE_UNREF(pollable_obj, "pollset_set");
1513
1514 GRPC_LOG_IF_ERROR(err_desc, error);
1515 }
1516
1517 static void pollset_set_add_pollset_set(grpc_pollset_set* a,
1518 grpc_pollset_set* b) {
1519 GPR_TIMER_SCOPE("pollset_set_add_pollset_set", 0);
1520 if (grpc_polling_trace.enabled()) {
1521 gpr_log(GPR_INFO, "PSS: merge (%p, %p)", a, b);
1522 }
1523 grpc_error* error = GRPC_ERROR_NONE;
1524 static const char* err_desc = "pollset_set_add_fd";
1525 for (;;) {
1526 if (a == b) {
1527 // pollset ancestors are the same: nothing to do
1528 return;
1529 }
1530 if (a > b) {
1531 GPR_SWAP(grpc_pollset_set*, a, b);
1532 }
1533 gpr_mu* a_mu = &a->mu;
1534 gpr_mu* b_mu = &b->mu;
1535 gpr_mu_lock(a_mu);
1536 gpr_mu_lock(b_mu);
1537 if (a->parent != nullptr) {
1538 a = a->parent;
1539 } else if (b->parent != nullptr) {
1540 b = b->parent;
1541 } else {
1542 break; // exit loop, both pollsets locked
1543 }
1544 gpr_mu_unlock(a_mu);
1545 gpr_mu_unlock(b_mu);
1546 }
1547 // try to do the least copying possible
1548 // TODO(sreek): there's probably a better heuristic here
1549 const size_t a_size = a->fd_count + a->pollset_count;
1550 const size_t b_size = b->fd_count + b->pollset_count;
1551 if (b_size > a_size) {
1552 GPR_SWAP(grpc_pollset_set*, a, b);
1553 }
1554 if (grpc_polling_trace.enabled()) {
1555 gpr_log(GPR_INFO, "PSS: parent %p to %p", b, a);
1556 }
1557 gpr_ref(&a->refs);
1558 b->parent = a;
1559 if (a->fd_capacity < a->fd_count + b->fd_count) {
1560 a->fd_capacity = GPR_MAX(2 * a->fd_capacity, a->fd_count + b->fd_count);
1561 a->fds = static_cast<grpc_fd**>(
1562 gpr_realloc(a->fds, a->fd_capacity * sizeof(*a->fds)));
1563 }
1564 size_t initial_a_fd_count = a->fd_count;
1565 a->fd_count = 0;
1566 append_error(
1567 &error,
1568 add_fds_to_pollsets(a->fds, initial_a_fd_count, b->pollsets,
1569 b->pollset_count, "merge_a2b", a->fds, &a->fd_count),
1570 err_desc);
1571 append_error(
1572 &error,
1573 add_fds_to_pollsets(b->fds, b->fd_count, a->pollsets, a->pollset_count,
1574 "merge_b2a", a->fds, &a->fd_count),
1575 err_desc);
1576 if (a->pollset_capacity < a->pollset_count + b->pollset_count) {
1577 a->pollset_capacity =
1578 GPR_MAX(2 * a->pollset_capacity, a->pollset_count + b->pollset_count);
1579 a->pollsets = static_cast<grpc_pollset**>(
1580 gpr_realloc(a->pollsets, a->pollset_capacity * sizeof(*a->pollsets)));
1581 }
1582 if (b->pollset_count > 0) {
1583 memcpy(a->pollsets + a->pollset_count, b->pollsets,
1584 b->pollset_count * sizeof(*b->pollsets));
1585 }
1586 a->pollset_count += b->pollset_count;
1587 gpr_free(b->fds);
1588 gpr_free(b->pollsets);
1589 b->fds = nullptr;
1590 b->pollsets = nullptr;
1591 b->fd_count = b->fd_capacity = b->pollset_count = b->pollset_capacity = 0;
1592 gpr_mu_unlock(&a->mu);
1593 gpr_mu_unlock(&b->mu);
1594 }
1595
1596 static void pollset_set_del_pollset_set(grpc_pollset_set* bag,
1597 grpc_pollset_set* item) {}
1598
1599 /*******************************************************************************
1600 * Event engine binding
1601 */
1602
1603 static void shutdown_engine(void) {
1604 fd_global_shutdown();
1605 pollset_global_shutdown();
1606 }
1607
1608 static const grpc_event_engine_vtable vtable = {
1609 sizeof(grpc_pollset),
1610 true,
1611
1612 fd_create,
1613 fd_wrapped_fd,
1614 fd_orphan,
1615 fd_shutdown,
1616 fd_notify_on_read,
1617 fd_notify_on_write,
1618 fd_notify_on_error,
1619 fd_become_readable,
1620 fd_become_writable,
1621 fd_has_errors,
1622 fd_is_shutdown,
1623
1624 pollset_init,
1625 pollset_shutdown,
1626 pollset_destroy,
1627 pollset_work,
1628 pollset_kick,
1629 pollset_add_fd,
1630
1631 pollset_set_create,
1632 pollset_set_unref, // destroy ==> unref 1 public ref
1633 pollset_set_add_pollset,
1634 pollset_set_del_pollset,
1635 pollset_set_add_pollset_set,
1636 pollset_set_del_pollset_set,
1637 pollset_set_add_fd,
1638 pollset_set_del_fd,
1639
1640 shutdown_engine,
1641 };
1642
1643 const grpc_event_engine_vtable* grpc_init_epollex_linux(
1644 bool explicitly_requested) {
1645 if (!grpc_has_wakeup_fd()) {
1646 gpr_log(GPR_ERROR, "Skipping epollex because of no wakeup fd.");
1647 return nullptr;
1648 }
1649
1650 if (!grpc_is_epollexclusive_available()) {
1651 gpr_log(GPR_INFO, "Skipping epollex because it is not supported.");
1652 return nullptr;
1653 }
1654
1655 fd_global_init();
1656
1657 if (!GRPC_LOG_IF_ERROR("pollset_global_init", pollset_global_init())) {
1658 pollset_global_shutdown();
1659 fd_global_shutdown();
1660 return nullptr;
1661 }
1662
1663 return &vtable;
1664 }
1665
1666 #else /* defined(GRPC_LINUX_EPOLL_CREATE1) */
1667 #if defined(GRPC_POSIX_SOCKET_EV_EPOLLEX)
1668 #include "src/core/lib/iomgr/ev_epollex_linux.h"
1669 /* If GRPC_LINUX_EPOLL_CREATE1 is not defined, it means
1670 epoll_create1 is not available. Return NULL */
1671 const grpc_event_engine_vtable* grpc_init_epollex_linux(
1672 bool explicitly_requested) {
1673 return nullptr;
1674 }
1675 #endif /* defined(GRPC_POSIX_SOCKET_EV_EPOLLEX) */
1676
1677 #endif /* !defined(GRPC_LINUX_EPOLL_CREATE1) */
1678