1 /*
2 *
3 * Copyright 2017 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19 #include <grpc/support/port_platform.h>
20
21 #include "src/core/lib/iomgr/port.h"
22
23 #include <grpc/support/log.h>
24
25 /* This polling engine is only relevant on linux kernels supporting epoll() */
26 #ifdef GRPC_LINUX_EPOLL_CREATE1
27
28 #include "src/core/lib/iomgr/ev_epollex_linux.h"
29
30 #include <assert.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <poll.h>
34 #include <pthread.h>
35 #include <string.h>
36 #include <sys/epoll.h>
37 #include <sys/socket.h>
38 #include <sys/syscall.h>
39 #include <unistd.h>
40
41 #include <string>
42
43 #include "absl/container/inlined_vector.h"
44 #include "absl/strings/str_cat.h"
45 #include "absl/strings/str_format.h"
46
47 #include <grpc/support/alloc.h>
48
49 #include "src/core/lib/debug/stats.h"
50 #include "src/core/lib/gpr/spinlock.h"
51 #include "src/core/lib/gpr/tls.h"
52 #include "src/core/lib/gpr/useful.h"
53 #include "src/core/lib/gprpp/manual_constructor.h"
54 #include "src/core/lib/gprpp/ref_counted.h"
55 #include "src/core/lib/gprpp/sync.h"
56 #include "src/core/lib/iomgr/block_annotate.h"
57 #include "src/core/lib/iomgr/iomgr_internal.h"
58 #include "src/core/lib/iomgr/is_epollexclusive_available.h"
59 #include "src/core/lib/iomgr/lockfree_event.h"
60 #include "src/core/lib/iomgr/sys_epoll_wrapper.h"
61 #include "src/core/lib/iomgr/timer.h"
62 #include "src/core/lib/iomgr/wakeup_fd_posix.h"
63 #include "src/core/lib/profiling/timers.h"
64
65 // debug aid: create workers on the heap (allows asan to spot
66 // use-after-destruction)
67 //#define GRPC_EPOLLEX_CREATE_WORKERS_ON_HEAP 1
68
69 #define MAX_EPOLL_EVENTS 100
70 #define MAX_FDS_IN_CACHE 32
71
72 grpc_core::DebugOnlyTraceFlag grpc_trace_pollable_refcount(false,
73 "pollable_refcount");
74
75 /*******************************************************************************
76 * pollable Declarations
77 */
78
79 typedef enum { PO_MULTI, PO_FD, PO_EMPTY } pollable_type;
80
81 typedef struct pollable pollable;
82
83 /// A pollable is something that can be polled: it has an epoll set to poll on,
84 /// and a wakeup fd for kicks
85 /// There are three broad types:
86 /// - PO_EMPTY - the empty pollable, used before file descriptors are added to
87 /// a pollset
88 /// - PO_FD - a pollable containing only one FD - used to optimize single-fd
89 /// pollsets (which are common with synchronous api usage)
90 /// - PO_MULTI - a pollable containing many fds
91 struct pollable {
92 pollable_type type; // immutable
93 grpc_core::RefCount refs;
94
95 int epfd;
96 grpc_wakeup_fd wakeup;
97
98 // The following are relevant only for type PO_FD
99 grpc_fd* owner_fd; // Set to the owner_fd if the type is PO_FD
100 gpr_mu owner_orphan_mu; // Synchronizes access to owner_orphaned field
101 bool owner_orphaned; // Is the owner fd orphaned
102
103 grpc_pollset_set* pollset_set;
104 pollable* next;
105 pollable* prev;
106
107 gpr_mu mu;
108 grpc_pollset_worker* root_worker;
109
110 int event_cursor;
111 int event_count;
112 struct epoll_event events[MAX_EPOLL_EVENTS];
113 };
114
pollable_type_string(pollable_type t)115 static const char* pollable_type_string(pollable_type t) {
116 switch (t) {
117 case PO_MULTI:
118 return "pollset";
119 case PO_FD:
120 return "fd";
121 case PO_EMPTY:
122 return "empty";
123 }
124 return "<invalid>";
125 }
126
pollable_desc(pollable * p)127 static std::string pollable_desc(pollable* p) {
128 return absl::StrFormat("type=%s epfd=%d wakeup=%d",
129 pollable_type_string(p->type), p->epfd,
130 p->wakeup.read_fd);
131 }
132
133 /// Shared empty pollable - used by pollset to poll on until the first fd is
134 /// added
135 static pollable* g_empty_pollable;
136
137 static grpc_error* pollable_create(pollable_type type, pollable** p);
pollable_ref(pollable * p,const grpc_core::DebugLocation & dbg_loc,const char * reason)138 static pollable* pollable_ref(pollable* p,
139 const grpc_core::DebugLocation& dbg_loc,
140 const char* reason) {
141 p->refs.Ref(dbg_loc, reason);
142 return p;
143 }
pollable_unref(pollable * p,const grpc_core::DebugLocation & dbg_loc,const char * reason)144 static void pollable_unref(pollable* p, const grpc_core::DebugLocation& dbg_loc,
145 const char* reason) {
146 if (p == nullptr) return;
147 if (GPR_UNLIKELY(p != nullptr && p->refs.Unref(dbg_loc, reason))) {
148 GRPC_FD_TRACE("pollable_unref: Closing epfd: %d", p->epfd);
149 close(p->epfd);
150 grpc_wakeup_fd_destroy(&p->wakeup);
151 gpr_mu_destroy(&p->owner_orphan_mu);
152 gpr_mu_destroy(&p->mu);
153 gpr_free(p);
154 }
155 }
156 #define POLLABLE_REF(p, r) pollable_ref((p), DEBUG_LOCATION, (r))
157 #define POLLABLE_UNREF(p, r) pollable_unref((p), DEBUG_LOCATION, (r))
158
159 /*******************************************************************************
160 * Fd Declarations
161 */
162
163 struct grpc_fd {
grpc_fdgrpc_fd164 grpc_fd(int fd, const char* name, bool track_err)
165 : fd(fd), track_err(track_err) {
166 gpr_mu_init(&orphan_mu);
167 gpr_mu_init(&pollable_mu);
168 read_closure.InitEvent();
169 write_closure.InitEvent();
170 error_closure.InitEvent();
171
172 std::string fd_name = absl::StrCat(name, " fd=", fd);
173 grpc_iomgr_register_object(&iomgr_object, fd_name.c_str());
174 #ifndef NDEBUG
175 if (GRPC_TRACE_FLAG_ENABLED(grpc_trace_fd_refcount)) {
176 gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, this, fd_name.c_str());
177 }
178 #endif
179 }
180
181 // This is really the dtor, but the poller threads waking up from
182 // epoll_wait() may access the (read|write|error)_closure after destruction.
183 // Since the object will be added to the free pool, this behavior is
184 // not going to cause issues, except spurious events if the FD is reused
185 // while the race happens.
destroygrpc_fd186 void destroy() {
187 grpc_iomgr_unregister_object(&iomgr_object);
188
189 POLLABLE_UNREF(pollable_obj, "fd_pollable");
190
191 // To clear out the allocations of pollset_fds, we need to swap its
192 // contents with a newly-constructed (and soon to be destructed) local
193 // variable of its same type. This is because InlinedVector::clear is _not_
194 // guaranteed to actually free up allocations and this is important since
195 // this object doesn't have a conventional destructor.
196 absl::InlinedVector<int, 1> pollset_fds_tmp;
197 pollset_fds_tmp.swap(pollset_fds);
198
199 gpr_mu_destroy(&pollable_mu);
200 gpr_mu_destroy(&orphan_mu);
201
202 read_closure.DestroyEvent();
203 write_closure.DestroyEvent();
204 error_closure.DestroyEvent();
205
206 invalidate();
207 }
208
209 #ifndef NDEBUG
210 /* Since an fd is never really destroyed (i.e gpr_free() is not called), it is
211 * hard-to-debug cases where fd fields are accessed even after calling
212 * fd_destroy(). The following invalidates fd fields to make catching such
213 * errors easier */
invalidategrpc_fd214 void invalidate() {
215 fd = -1;
216 gpr_atm_no_barrier_store(&refst, -1);
217 memset(&orphan_mu, -1, sizeof(orphan_mu));
218 memset(&pollable_mu, -1, sizeof(pollable_mu));
219 pollable_obj = nullptr;
220 on_done_closure = nullptr;
221 memset(&iomgr_object, -1, sizeof(iomgr_object));
222 track_err = false;
223 }
224 #else
invalidategrpc_fd225 void invalidate() {}
226 #endif
227
228 int fd;
229
230 // refst format:
231 // bit 0 : 1=Active / 0=Orphaned
232 // bits 1-n : refcount
233 // Ref/Unref by two to avoid altering the orphaned bit
234 gpr_atm refst = 1;
235
236 gpr_mu orphan_mu;
237
238 // Protects pollable_obj and pollset_fds.
239 gpr_mu pollable_mu;
240 absl::InlinedVector<int, 1> pollset_fds; // Used in PO_MULTI.
241 pollable* pollable_obj = nullptr; // Used in PO_FD.
242
243 grpc_core::LockfreeEvent read_closure;
244 grpc_core::LockfreeEvent write_closure;
245 grpc_core::LockfreeEvent error_closure;
246
247 struct grpc_fd* freelist_next = nullptr;
248 grpc_closure* on_done_closure = nullptr;
249
250 grpc_iomgr_object iomgr_object;
251
252 // Do we need to track EPOLLERR events separately?
253 bool track_err;
254 };
255
256 static void fd_global_init(void);
257 static void fd_global_shutdown(void);
258
259 /*******************************************************************************
260 * Pollset Declarations
261 */
262
263 struct pwlink {
264 grpc_pollset_worker* next;
265 grpc_pollset_worker* prev;
266 };
267 typedef enum { PWLINK_POLLABLE = 0, PWLINK_POLLSET, PWLINK_COUNT } pwlinks;
268
269 struct grpc_pollset_worker {
270 bool kicked;
271 bool initialized_cv;
272 #ifndef NDEBUG
273 // debug aid: which thread started this worker
274 pid_t originator;
275 #endif
276 gpr_cv cv;
277 grpc_pollset* pollset;
278 pollable* pollable_obj;
279
280 pwlink links[PWLINK_COUNT];
281 };
282
283 struct grpc_pollset {
284 gpr_mu mu;
285 gpr_atm worker_count;
286 gpr_atm active_pollable_type;
287 pollable* active_pollable;
288 bool kicked_without_poller;
289 grpc_closure* shutdown_closure;
290 bool already_shutdown;
291 grpc_pollset_worker* root_worker;
292 int containing_pollset_set_count;
293 };
294
295 /*******************************************************************************
296 * Pollset-set Declarations
297 */
298
299 struct grpc_pollset_set {
300 grpc_core::RefCount refs;
301 gpr_mu mu;
302 grpc_pollset_set* parent;
303
304 size_t pollset_count;
305 size_t pollset_capacity;
306 grpc_pollset** pollsets;
307
308 size_t fd_count;
309 size_t fd_capacity;
310 grpc_fd** fds;
311 };
312
313 /*******************************************************************************
314 * Common helpers
315 */
316
append_error(grpc_error ** composite,grpc_error * error,const char * desc)317 static bool append_error(grpc_error** composite, grpc_error* error,
318 const char* desc) {
319 if (error == GRPC_ERROR_NONE) return true;
320 if (*composite == GRPC_ERROR_NONE) {
321 *composite = GRPC_ERROR_CREATE_FROM_COPIED_STRING(desc);
322 }
323 *composite = grpc_error_add_child(*composite, error);
324 return false;
325 }
326
327 /*******************************************************************************
328 * Fd Definitions
329 */
330
331 /* We need to keep a freelist not because of any concerns of malloc performance
332 * but instead so that implementations with multiple threads in (for example)
333 * epoll_wait deal with the race between pollset removal and incoming poll
334 * notifications.
335 *
336 * The problem is that the poller ultimately holds a reference to this
337 * object, so it is very difficult to know when is safe to free it, at least
338 * without some expensive synchronization.
339 *
340 * If we keep the object freelisted, in the worst case losing this race just
341 * becomes a spurious read notification on a reused fd.
342 */
343
344 static grpc_fd* fd_freelist = nullptr;
345 static gpr_mu fd_freelist_mu;
346
347 #ifndef NDEBUG
348 #define REF_BY(fd, n, reason) ref_by(fd, n, reason, __FILE__, __LINE__)
349 #define UNREF_BY(fd, n, reason) unref_by(fd, n, reason, __FILE__, __LINE__)
ref_by(grpc_fd * fd,int n,const char * reason,const char * file,int line)350 static void ref_by(grpc_fd* fd, int n, const char* reason, const char* file,
351 int line) {
352 if (GRPC_TRACE_FLAG_ENABLED(grpc_trace_fd_refcount)) {
353 gpr_log(GPR_DEBUG,
354 "FD %d %p ref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]",
355 fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst),
356 gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line);
357 }
358 #else
359 #define REF_BY(fd, n, reason) \
360 do { \
361 ref_by(fd, n); \
362 (void)(reason); \
363 } while (0)
364 #define UNREF_BY(fd, n, reason) \
365 do { \
366 unref_by(fd, n); \
367 (void)(reason); \
368 } while (0)
369 static void ref_by(grpc_fd* fd, int n) {
370 #endif
371 GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&fd->refst, n) > 0);
372 }
373
374 /* Uninitialize and add to the freelist */
375 static void fd_destroy(void* arg, grpc_error* /*error*/) {
376 grpc_fd* fd = static_cast<grpc_fd*>(arg);
377 fd->destroy();
378
379 /* Add the fd to the freelist */
380 gpr_mu_lock(&fd_freelist_mu);
381 fd->freelist_next = fd_freelist;
382 fd_freelist = fd;
383 gpr_mu_unlock(&fd_freelist_mu);
384 }
385
386 #ifndef NDEBUG
387 static void unref_by(grpc_fd* fd, int n, const char* reason, const char* file,
388 int line) {
389 if (GRPC_TRACE_FLAG_ENABLED(grpc_trace_fd_refcount)) {
390 gpr_log(GPR_DEBUG,
391 "FD %d %p unref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]",
392 fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst),
393 gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line);
394 }
395 #else
396 static void unref_by(grpc_fd* fd, int n) {
397 #endif
398 gpr_atm old = gpr_atm_full_fetch_add(&fd->refst, -n);
399 if (old == n) {
400 grpc_core::ExecCtx::Run(
401 DEBUG_LOCATION,
402 GRPC_CLOSURE_CREATE(fd_destroy, fd, grpc_schedule_on_exec_ctx),
403 GRPC_ERROR_NONE);
404 } else {
405 GPR_ASSERT(old > n);
406 }
407 }
408
409 static void fd_global_init(void) { gpr_mu_init(&fd_freelist_mu); }
410
411 static void fd_global_shutdown(void) {
412 // TODO(guantaol): We don't have a reasonable explanation about this
413 // lock()/unlock() pattern. It can be a valid barrier if there is at most one
414 // pending lock() at this point. Otherwise, there is still a possibility of
415 // use-after-free race. Need to reason about the code and/or clean it up.
416 gpr_mu_lock(&fd_freelist_mu);
417 gpr_mu_unlock(&fd_freelist_mu);
418 while (fd_freelist != nullptr) {
419 grpc_fd* fd = fd_freelist;
420 fd_freelist = fd_freelist->freelist_next;
421 gpr_free(fd);
422 }
423 gpr_mu_destroy(&fd_freelist_mu);
424 }
425
426 static grpc_fd* fd_create(int fd, const char* name, bool track_err) {
427 grpc_fd* new_fd = nullptr;
428
429 gpr_mu_lock(&fd_freelist_mu);
430 if (fd_freelist != nullptr) {
431 new_fd = fd_freelist;
432 fd_freelist = fd_freelist->freelist_next;
433 }
434 gpr_mu_unlock(&fd_freelist_mu);
435
436 if (new_fd == nullptr) {
437 new_fd = static_cast<grpc_fd*>(gpr_malloc(sizeof(grpc_fd)));
438 }
439
440 return new (new_fd) grpc_fd(fd, name, track_err);
441 }
442
443 static int fd_wrapped_fd(grpc_fd* fd) {
444 int ret_fd = fd->fd;
445 return (gpr_atm_acq_load(&fd->refst) & 1) ? ret_fd : -1;
446 }
447
448 static void fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
449 const char* reason) {
450 bool is_fd_closed = false;
451
452 gpr_mu_lock(&fd->orphan_mu);
453
454 // Get the fd->pollable_obj and set the owner_orphaned on that pollable to
455 // true so that the pollable will no longer access its owner_fd field.
456 gpr_mu_lock(&fd->pollable_mu);
457 pollable* pollable_obj = fd->pollable_obj;
458
459 if (pollable_obj) {
460 gpr_mu_lock(&pollable_obj->owner_orphan_mu);
461 pollable_obj->owner_orphaned = true;
462 }
463
464 fd->on_done_closure = on_done;
465
466 /* If release_fd is not NULL, we should be relinquishing control of the file
467 descriptor fd->fd (but we still own the grpc_fd structure). */
468 if (release_fd != nullptr) {
469 // Remove the FD from all epolls sets, before releasing it.
470 // Otherwise, we will receive epoll events after we release the FD.
471 epoll_event ev_fd;
472 memset(&ev_fd, 0, sizeof(ev_fd));
473 if (pollable_obj != nullptr) { // For PO_FD.
474 epoll_ctl(pollable_obj->epfd, EPOLL_CTL_DEL, fd->fd, &ev_fd);
475 }
476 for (size_t i = 0; i < fd->pollset_fds.size(); ++i) { // For PO_MULTI.
477 const int epfd = fd->pollset_fds[i];
478 epoll_ctl(epfd, EPOLL_CTL_DEL, fd->fd, &ev_fd);
479 }
480 *release_fd = fd->fd;
481 } else {
482 close(fd->fd);
483 is_fd_closed = true;
484 }
485
486 // TODO(sreek): handle fd removal (where is_fd_closed=false)
487 if (!is_fd_closed) {
488 GRPC_FD_TRACE("epoll_fd %p (%d) was orphaned but not closed.", fd, fd->fd);
489 }
490
491 /* Remove the active status but keep referenced. We want this grpc_fd struct
492 to be alive (and not added to freelist) until the end of this function */
493 REF_BY(fd, 1, reason);
494
495 grpc_core::ExecCtx::Run(DEBUG_LOCATION, fd->on_done_closure, GRPC_ERROR_NONE);
496
497 if (pollable_obj) {
498 gpr_mu_unlock(&pollable_obj->owner_orphan_mu);
499 }
500
501 gpr_mu_unlock(&fd->pollable_mu);
502 gpr_mu_unlock(&fd->orphan_mu);
503
504 UNREF_BY(fd, 2, reason); /* Drop the reference */
505 }
506
507 static bool fd_is_shutdown(grpc_fd* fd) {
508 return fd->read_closure.IsShutdown();
509 }
510
511 /* Might be called multiple times */
512 static void fd_shutdown(grpc_fd* fd, grpc_error* why) {
513 if (fd->read_closure.SetShutdown(GRPC_ERROR_REF(why))) {
514 if (shutdown(fd->fd, SHUT_RDWR)) {
515 if (errno != ENOTCONN) {
516 gpr_log(GPR_ERROR, "Error shutting down fd %d. errno: %d",
517 grpc_fd_wrapped_fd(fd), errno);
518 }
519 }
520 fd->write_closure.SetShutdown(GRPC_ERROR_REF(why));
521 fd->error_closure.SetShutdown(GRPC_ERROR_REF(why));
522 }
523 GRPC_ERROR_UNREF(why);
524 }
525
526 static void fd_notify_on_read(grpc_fd* fd, grpc_closure* closure) {
527 fd->read_closure.NotifyOn(closure);
528 }
529
530 static void fd_notify_on_write(grpc_fd* fd, grpc_closure* closure) {
531 fd->write_closure.NotifyOn(closure);
532 }
533
534 static void fd_notify_on_error(grpc_fd* fd, grpc_closure* closure) {
535 fd->error_closure.NotifyOn(closure);
536 }
537
538 static bool fd_has_pollset(grpc_fd* fd, grpc_pollset* pollset) {
539 const int epfd = pollset->active_pollable->epfd;
540 grpc_core::MutexLockForGprMu lock(&fd->pollable_mu);
541 for (size_t i = 0; i < fd->pollset_fds.size(); ++i) {
542 if (fd->pollset_fds[i] == epfd) {
543 return true;
544 }
545 }
546 return false;
547 }
548
549 static void fd_add_pollset(grpc_fd* fd, grpc_pollset* pollset) {
550 const int epfd = pollset->active_pollable->epfd;
551 grpc_core::MutexLockForGprMu lock(&fd->pollable_mu);
552 fd->pollset_fds.push_back(epfd);
553 }
554
555 /*******************************************************************************
556 * Pollable Definitions
557 */
558
559 static grpc_error* pollable_create(pollable_type type, pollable** p) {
560 *p = nullptr;
561
562 int epfd = epoll_create1(EPOLL_CLOEXEC);
563 if (epfd == -1) {
564 return GRPC_OS_ERROR(errno, "epoll_create1");
565 }
566 GRPC_FD_TRACE("Pollable_create: created epfd: %d (type: %d)", epfd, type);
567 *p = static_cast<pollable*>(gpr_malloc(sizeof(**p)));
568 grpc_error* err = grpc_wakeup_fd_init(&(*p)->wakeup);
569 if (err != GRPC_ERROR_NONE) {
570 GRPC_FD_TRACE(
571 "Pollable_create: closed epfd: %d (type: %d). wakeupfd_init error",
572 epfd, type);
573 close(epfd);
574 gpr_free(*p);
575 *p = nullptr;
576 return err;
577 }
578 struct epoll_event ev;
579 ev.events = static_cast<uint32_t>(EPOLLIN | EPOLLET);
580 ev.data.ptr =
581 reinterpret_cast<void*>(1 | reinterpret_cast<intptr_t>(&(*p)->wakeup));
582 if (epoll_ctl(epfd, EPOLL_CTL_ADD, (*p)->wakeup.read_fd, &ev) != 0) {
583 err = GRPC_OS_ERROR(errno, "epoll_ctl");
584 GRPC_FD_TRACE(
585 "Pollable_create: closed epfd: %d (type: %d). epoll_ctl error", epfd,
586 type);
587 close(epfd);
588 grpc_wakeup_fd_destroy(&(*p)->wakeup);
589 gpr_free(*p);
590 *p = nullptr;
591 return err;
592 }
593
594 (*p)->type = type;
595 new (&(*p)->refs) grpc_core::RefCount(
596 1, GRPC_TRACE_FLAG_ENABLED(grpc_trace_pollable_refcount)
597 ? "pollable_refcount"
598 : nullptr);
599 gpr_mu_init(&(*p)->mu);
600 (*p)->epfd = epfd;
601 (*p)->owner_fd = nullptr;
602 gpr_mu_init(&(*p)->owner_orphan_mu);
603 (*p)->owner_orphaned = false;
604 (*p)->pollset_set = nullptr;
605 (*p)->next = (*p)->prev = *p;
606 (*p)->root_worker = nullptr;
607 (*p)->event_cursor = 0;
608 (*p)->event_count = 0;
609 return GRPC_ERROR_NONE;
610 }
611
612 static grpc_error* pollable_add_fd(pollable* p, grpc_fd* fd) {
613 grpc_error* error = GRPC_ERROR_NONE;
614 static const char* err_desc = "pollable_add_fd";
615 const int epfd = p->epfd;
616 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
617 gpr_log(GPR_INFO, "add fd %p (%d) to pollable %p", fd, fd->fd, p);
618 }
619
620 struct epoll_event ev_fd;
621 ev_fd.events =
622 static_cast<uint32_t>(EPOLLET | EPOLLIN | EPOLLOUT | EPOLLEXCLUSIVE);
623 /* Use the second least significant bit of ev_fd.data.ptr to store track_err
624 * to avoid synchronization issues when accessing it after receiving an event.
625 * Accessing fd would be a data race there because the fd might have been
626 * returned to the free list at that point. */
627 ev_fd.data.ptr = reinterpret_cast<void*>(reinterpret_cast<intptr_t>(fd) |
628 (fd->track_err ? 2 : 0));
629 GRPC_STATS_INC_SYSCALL_EPOLL_CTL();
630 if (epoll_ctl(epfd, EPOLL_CTL_ADD, fd->fd, &ev_fd) != 0) {
631 switch (errno) {
632 case EEXIST:
633 break;
634 default:
635 append_error(&error, GRPC_OS_ERROR(errno, "epoll_ctl"), err_desc);
636 }
637 }
638
639 return error;
640 }
641
642 /*******************************************************************************
643 * Pollset Definitions
644 */
645
646 GPR_TLS_DECL(g_current_thread_pollset);
647 GPR_TLS_DECL(g_current_thread_worker);
648
649 /* Global state management */
650 static grpc_error* pollset_global_init(void) {
651 gpr_tls_init(&g_current_thread_pollset);
652 gpr_tls_init(&g_current_thread_worker);
653 return pollable_create(PO_EMPTY, &g_empty_pollable);
654 }
655
656 static void pollset_global_shutdown(void) {
657 POLLABLE_UNREF(g_empty_pollable, "g_empty_pollable");
658 gpr_tls_destroy(&g_current_thread_pollset);
659 gpr_tls_destroy(&g_current_thread_worker);
660 }
661
662 /* pollset->mu must be held while calling this function */
663 static void pollset_maybe_finish_shutdown(grpc_pollset* pollset) {
664 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
665 gpr_log(GPR_INFO,
666 "PS:%p (pollable:%p) maybe_finish_shutdown sc=%p (target:!NULL) "
667 "rw=%p (target:NULL) cpsc=%d (target:0)",
668 pollset, pollset->active_pollable, pollset->shutdown_closure,
669 pollset->root_worker, pollset->containing_pollset_set_count);
670 }
671 if (pollset->shutdown_closure != nullptr && pollset->root_worker == nullptr &&
672 pollset->containing_pollset_set_count == 0) {
673 GPR_TIMER_MARK("pollset_finish_shutdown", 0);
674 grpc_core::ExecCtx::Run(DEBUG_LOCATION, pollset->shutdown_closure,
675 GRPC_ERROR_NONE);
676 pollset->shutdown_closure = nullptr;
677 pollset->already_shutdown = true;
678 }
679 }
680
681 /* pollset->mu must be held before calling this function,
682 * pollset->active_pollable->mu & specific_worker->pollable_obj->mu must not be
683 * held */
684 static grpc_error* kick_one_worker(grpc_pollset_worker* specific_worker) {
685 GPR_TIMER_SCOPE("kick_one_worker", 0);
686 pollable* p = specific_worker->pollable_obj;
687 grpc_core::MutexLockForGprMu lock(&p->mu);
688 GPR_ASSERT(specific_worker != nullptr);
689 if (specific_worker->kicked) {
690 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
691 gpr_log(GPR_INFO, "PS:%p kicked_specific_but_already_kicked", p);
692 }
693 GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
694 return GRPC_ERROR_NONE;
695 }
696 if (gpr_tls_get(&g_current_thread_worker) ==
697 reinterpret_cast<intptr_t>(specific_worker)) {
698 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
699 gpr_log(GPR_INFO, "PS:%p kicked_specific_but_awake", p);
700 }
701 GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD();
702 specific_worker->kicked = true;
703 return GRPC_ERROR_NONE;
704 }
705 if (specific_worker == p->root_worker) {
706 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
707 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
708 gpr_log(GPR_INFO, "PS:%p kicked_specific_via_wakeup_fd", p);
709 }
710 specific_worker->kicked = true;
711 grpc_error* error = grpc_wakeup_fd_wakeup(&p->wakeup);
712 return error;
713 }
714 if (specific_worker->initialized_cv) {
715 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
716 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
717 gpr_log(GPR_INFO, "PS:%p kicked_specific_via_cv", p);
718 }
719 specific_worker->kicked = true;
720 gpr_cv_signal(&specific_worker->cv);
721 return GRPC_ERROR_NONE;
722 }
723 // we can get here during end_worker after removing specific_worker from the
724 // pollable list but before removing it from the pollset list
725 return GRPC_ERROR_NONE;
726 }
727
728 static grpc_error* pollset_kick(grpc_pollset* pollset,
729 grpc_pollset_worker* specific_worker) {
730 GPR_TIMER_SCOPE("pollset_kick", 0);
731 GRPC_STATS_INC_POLLSET_KICK();
732 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
733 gpr_log(GPR_INFO,
734 "PS:%p kick %p tls_pollset=%" PRIxPTR " tls_worker=%" PRIxPTR
735 " pollset.root_worker=%p",
736 pollset, specific_worker, gpr_tls_get(&g_current_thread_pollset),
737 gpr_tls_get(&g_current_thread_worker), pollset->root_worker);
738 }
739 if (specific_worker == nullptr) {
740 if (gpr_tls_get(&g_current_thread_pollset) !=
741 reinterpret_cast<intptr_t>(pollset)) {
742 if (pollset->root_worker == nullptr) {
743 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
744 gpr_log(GPR_INFO, "PS:%p kicked_any_without_poller", pollset);
745 }
746 GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER();
747 pollset->kicked_without_poller = true;
748 return GRPC_ERROR_NONE;
749 } else {
750 // We've been asked to kick a poller, but we haven't been told which one
751 // ... any will do
752 // We look at the pollset worker list because:
753 // 1. the pollable list may include workers from other pollers, so we'd
754 // need to do an O(N) search
755 // 2. we'd additionally need to take the pollable lock, which we've so
756 // far avoided
757 // Now, we would prefer to wake a poller in cv_wait, and not in
758 // epoll_wait (since the latter would imply the need to do an additional
759 // wakeup)
760 // We know that if a worker is at the root of a pollable, it's (likely)
761 // also the root of a pollset, and we know that if a worker is NOT at
762 // the root of a pollset, it's (likely) not at the root of a pollable,
763 // so we take our chances and choose the SECOND worker enqueued against
764 // the pollset as a worker that's likely to be in cv_wait
765 return kick_one_worker(
766 pollset->root_worker->links[PWLINK_POLLSET].next);
767 }
768 } else {
769 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
770 gpr_log(GPR_INFO, "PS:%p kicked_any_but_awake", pollset);
771 }
772 GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD();
773 return GRPC_ERROR_NONE;
774 }
775 } else {
776 return kick_one_worker(specific_worker);
777 }
778 }
779
780 static grpc_error* pollset_kick_all(grpc_pollset* pollset) {
781 GPR_TIMER_SCOPE("pollset_kick_all", 0);
782 grpc_error* error = GRPC_ERROR_NONE;
783 const char* err_desc = "pollset_kick_all";
784 grpc_pollset_worker* w = pollset->root_worker;
785 if (w != nullptr) {
786 do {
787 GRPC_STATS_INC_POLLSET_KICK();
788 append_error(&error, kick_one_worker(w), err_desc);
789 w = w->links[PWLINK_POLLSET].next;
790 } while (w != pollset->root_worker);
791 }
792 return error;
793 }
794
795 static void pollset_init(grpc_pollset* pollset, gpr_mu** mu) {
796 gpr_mu_init(&pollset->mu);
797 gpr_atm_no_barrier_store(&pollset->worker_count, 0);
798 gpr_atm_no_barrier_store(&pollset->active_pollable_type, PO_EMPTY);
799 pollset->active_pollable = POLLABLE_REF(g_empty_pollable, "pollset");
800 pollset->kicked_without_poller = false;
801 pollset->shutdown_closure = nullptr;
802 pollset->already_shutdown = false;
803 pollset->root_worker = nullptr;
804 pollset->containing_pollset_set_count = 0;
805 *mu = &pollset->mu;
806 }
807
808 static int poll_deadline_to_millis_timeout(grpc_millis millis) {
809 if (millis == GRPC_MILLIS_INF_FUTURE) return -1;
810 grpc_millis delta = millis - grpc_core::ExecCtx::Get()->Now();
811 if (delta > INT_MAX) {
812 return INT_MAX;
813 } else if (delta < 0) {
814 return 0;
815 } else {
816 return static_cast<int>(delta);
817 }
818 }
819
820 static void fd_become_readable(grpc_fd* fd) { fd->read_closure.SetReady(); }
821
822 static void fd_become_writable(grpc_fd* fd) { fd->write_closure.SetReady(); }
823
824 static void fd_has_errors(grpc_fd* fd) { fd->error_closure.SetReady(); }
825
826 /* Get the pollable_obj attached to this fd. If none is attached, create a new
827 * pollable object (of type PO_FD), attach it to the fd and return it
828 *
829 * Note that if a pollable object is already attached to the fd, it may be of
830 * either PO_FD or PO_MULTI type */
831 static grpc_error* get_fd_pollable(grpc_fd* fd, pollable** p) {
832 gpr_mu_lock(&fd->pollable_mu);
833 grpc_error* error = GRPC_ERROR_NONE;
834 static const char* err_desc = "get_fd_pollable";
835 if (fd->pollable_obj == nullptr) {
836 if (append_error(&error, pollable_create(PO_FD, &fd->pollable_obj),
837 err_desc)) {
838 fd->pollable_obj->owner_fd = fd;
839 if (!append_error(&error, pollable_add_fd(fd->pollable_obj, fd),
840 err_desc)) {
841 POLLABLE_UNREF(fd->pollable_obj, "fd_pollable");
842 fd->pollable_obj = nullptr;
843 }
844 }
845 }
846 if (error == GRPC_ERROR_NONE) {
847 GPR_ASSERT(fd->pollable_obj != nullptr);
848 *p = POLLABLE_REF(fd->pollable_obj, "pollset");
849 } else {
850 GPR_ASSERT(fd->pollable_obj == nullptr);
851 *p = nullptr;
852 }
853 gpr_mu_unlock(&fd->pollable_mu);
854 return error;
855 }
856
857 /* pollset->po.mu lock must be held by the caller before calling this */
858 static void pollset_shutdown(grpc_pollset* pollset, grpc_closure* closure) {
859 GPR_TIMER_SCOPE("pollset_shutdown", 0);
860 GPR_ASSERT(pollset->shutdown_closure == nullptr);
861 pollset->shutdown_closure = closure;
862 GRPC_LOG_IF_ERROR("pollset_shutdown", pollset_kick_all(pollset));
863 pollset_maybe_finish_shutdown(pollset);
864 }
865
866 static grpc_error* pollable_process_events(grpc_pollset* pollset,
867 pollable* pollable_obj, bool drain) {
868 GPR_TIMER_SCOPE("pollable_process_events", 0);
869 static const char* err_desc = "pollset_process_events";
870 // Use a simple heuristic to determine how many fd events to process
871 // per loop iteration. (events/workers)
872 int handle_count = 1;
873 int worker_count = gpr_atm_no_barrier_load(&pollset->worker_count);
874 GPR_ASSERT(worker_count > 0);
875 handle_count =
876 (pollable_obj->event_count - pollable_obj->event_cursor) / worker_count;
877 if (handle_count == 0) {
878 handle_count = 1;
879 }
880 grpc_error* error = GRPC_ERROR_NONE;
881 for (int i = 0; (drain || i < handle_count) &&
882 pollable_obj->event_cursor != pollable_obj->event_count;
883 i++) {
884 int n = pollable_obj->event_cursor++;
885 struct epoll_event* ev = &pollable_obj->events[n];
886 void* data_ptr = ev->data.ptr;
887 if (1 & reinterpret_cast<intptr_t>(data_ptr)) {
888 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
889 gpr_log(GPR_INFO, "PS:%p got pollset_wakeup %p", pollset, data_ptr);
890 }
891 append_error(
892 &error,
893 grpc_wakeup_fd_consume_wakeup(reinterpret_cast<grpc_wakeup_fd*>(
894 ~static_cast<intptr_t>(1) &
895 reinterpret_cast<intptr_t>(data_ptr))),
896 err_desc);
897 } else {
898 grpc_fd* fd =
899 reinterpret_cast<grpc_fd*>(reinterpret_cast<intptr_t>(data_ptr) & ~2);
900 bool track_err = reinterpret_cast<intptr_t>(data_ptr) & 2;
901 bool cancel = (ev->events & EPOLLHUP) != 0;
902 bool error = (ev->events & EPOLLERR) != 0;
903 bool read_ev = (ev->events & (EPOLLIN | EPOLLPRI)) != 0;
904 bool write_ev = (ev->events & EPOLLOUT) != 0;
905 bool err_fallback = error && !track_err;
906
907 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
908 gpr_log(GPR_INFO,
909 "PS:%p got fd %p: cancel=%d read=%d "
910 "write=%d",
911 pollset, fd, cancel, read_ev, write_ev);
912 }
913 if (error && !err_fallback) {
914 fd_has_errors(fd);
915 }
916 if (read_ev || cancel || err_fallback) {
917 fd_become_readable(fd);
918 }
919 if (write_ev || cancel || err_fallback) {
920 fd_become_writable(fd);
921 }
922 }
923 }
924
925 return error;
926 }
927
928 /* pollset_shutdown is guaranteed to be called before pollset_destroy. */
929 static void pollset_destroy(grpc_pollset* pollset) {
930 POLLABLE_UNREF(pollset->active_pollable, "pollset");
931 pollset->active_pollable = nullptr;
932 gpr_mu_destroy(&pollset->mu);
933 }
934
935 static grpc_error* pollable_epoll(pollable* p, grpc_millis deadline) {
936 GPR_TIMER_SCOPE("pollable_epoll", 0);
937 int timeout = poll_deadline_to_millis_timeout(deadline);
938
939 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
940 gpr_log(GPR_INFO, "POLLABLE:%p[%s] poll for %dms", p,
941 pollable_desc(p).c_str(), timeout);
942 }
943
944 if (timeout != 0) {
945 GRPC_SCHEDULING_START_BLOCKING_REGION;
946 }
947 int r;
948 do {
949 GRPC_STATS_INC_SYSCALL_POLL();
950 r = epoll_wait(p->epfd, p->events, MAX_EPOLL_EVENTS, timeout);
951 } while (r < 0 && errno == EINTR);
952 if (timeout != 0) {
953 GRPC_SCHEDULING_END_BLOCKING_REGION;
954 }
955
956 if (r < 0) return GRPC_OS_ERROR(errno, "epoll_wait");
957
958 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
959 gpr_log(GPR_INFO, "POLLABLE:%p got %d events", p, r);
960 }
961
962 p->event_cursor = 0;
963 p->event_count = r;
964
965 return GRPC_ERROR_NONE;
966 }
967
968 /* Return true if first in list */
969 static bool worker_insert(grpc_pollset_worker** root_worker,
970 grpc_pollset_worker* worker, pwlinks link) {
971 if (*root_worker == nullptr) {
972 *root_worker = worker;
973 worker->links[link].next = worker->links[link].prev = worker;
974 return true;
975 } else {
976 worker->links[link].next = *root_worker;
977 worker->links[link].prev = worker->links[link].next->links[link].prev;
978 worker->links[link].next->links[link].prev = worker;
979 worker->links[link].prev->links[link].next = worker;
980 return false;
981 }
982 }
983
984 /* returns the new root IFF the root changed */
985 typedef enum { WRR_NEW_ROOT, WRR_EMPTIED, WRR_REMOVED } worker_remove_result;
986
987 static worker_remove_result worker_remove(grpc_pollset_worker** root_worker,
988 grpc_pollset_worker* worker,
989 pwlinks link) {
990 if (worker == *root_worker) {
991 if (worker == worker->links[link].next) {
992 *root_worker = nullptr;
993 return WRR_EMPTIED;
994 } else {
995 *root_worker = worker->links[link].next;
996 worker->links[link].prev->links[link].next = worker->links[link].next;
997 worker->links[link].next->links[link].prev = worker->links[link].prev;
998 return WRR_NEW_ROOT;
999 }
1000 } else {
1001 worker->links[link].prev->links[link].next = worker->links[link].next;
1002 worker->links[link].next->links[link].prev = worker->links[link].prev;
1003 return WRR_REMOVED;
1004 }
1005 }
1006
1007 /* Return true if this thread should poll */
1008 static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
1009 grpc_pollset_worker** worker_hdl,
1010 grpc_millis deadline) {
1011 GPR_TIMER_SCOPE("begin_worker", 0);
1012 bool do_poll =
1013 (pollset->shutdown_closure == nullptr && !pollset->already_shutdown);
1014 gpr_atm_no_barrier_fetch_add(&pollset->worker_count, 1);
1015 if (worker_hdl != nullptr) *worker_hdl = worker;
1016 worker->initialized_cv = false;
1017 worker->kicked = false;
1018 worker->pollset = pollset;
1019 worker->pollable_obj =
1020 POLLABLE_REF(pollset->active_pollable, "pollset_worker");
1021 worker_insert(&pollset->root_worker, worker, PWLINK_POLLSET);
1022 gpr_mu_lock(&worker->pollable_obj->mu);
1023 if (!worker_insert(&worker->pollable_obj->root_worker, worker,
1024 PWLINK_POLLABLE)) {
1025 worker->initialized_cv = true;
1026 gpr_cv_init(&worker->cv);
1027 gpr_mu_unlock(&pollset->mu);
1028 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace) &&
1029 worker->pollable_obj->root_worker != worker) {
1030 gpr_log(GPR_INFO, "PS:%p wait %p w=%p for %dms", pollset,
1031 worker->pollable_obj, worker,
1032 poll_deadline_to_millis_timeout(deadline));
1033 }
1034 while (do_poll && worker->pollable_obj->root_worker != worker) {
1035 if (gpr_cv_wait(&worker->cv, &worker->pollable_obj->mu,
1036 grpc_millis_to_timespec(deadline, GPR_CLOCK_REALTIME))) {
1037 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
1038 gpr_log(GPR_INFO, "PS:%p timeout_wait %p w=%p", pollset,
1039 worker->pollable_obj, worker);
1040 }
1041 do_poll = false;
1042 } else if (worker->kicked) {
1043 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
1044 gpr_log(GPR_INFO, "PS:%p wakeup %p w=%p", pollset,
1045 worker->pollable_obj, worker);
1046 }
1047 do_poll = false;
1048 } else if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace) &&
1049 worker->pollable_obj->root_worker != worker) {
1050 gpr_log(GPR_INFO, "PS:%p spurious_wakeup %p w=%p", pollset,
1051 worker->pollable_obj, worker);
1052 }
1053 }
1054 grpc_core::ExecCtx::Get()->InvalidateNow();
1055 } else {
1056 gpr_mu_unlock(&pollset->mu);
1057 }
1058 gpr_mu_unlock(&worker->pollable_obj->mu);
1059
1060 return do_poll;
1061 }
1062
1063 static void end_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
1064 grpc_pollset_worker** /*worker_hdl*/) {
1065 GPR_TIMER_SCOPE("end_worker", 0);
1066 gpr_mu_lock(&pollset->mu);
1067 gpr_mu_lock(&worker->pollable_obj->mu);
1068 switch (worker_remove(&worker->pollable_obj->root_worker, worker,
1069 PWLINK_POLLABLE)) {
1070 case WRR_NEW_ROOT: {
1071 // wakeup new poller
1072 grpc_pollset_worker* new_root = worker->pollable_obj->root_worker;
1073 GPR_ASSERT(new_root->initialized_cv);
1074 gpr_cv_signal(&new_root->cv);
1075 break;
1076 }
1077 case WRR_EMPTIED:
1078 if (pollset->active_pollable != worker->pollable_obj) {
1079 // pollable no longer being polled: flush events
1080 pollable_process_events(pollset, worker->pollable_obj, true);
1081 }
1082 break;
1083 case WRR_REMOVED:
1084 break;
1085 }
1086 gpr_mu_unlock(&worker->pollable_obj->mu);
1087 POLLABLE_UNREF(worker->pollable_obj, "pollset_worker");
1088 if (worker_remove(&pollset->root_worker, worker, PWLINK_POLLSET) ==
1089 WRR_EMPTIED) {
1090 pollset_maybe_finish_shutdown(pollset);
1091 }
1092 if (worker->initialized_cv) {
1093 gpr_cv_destroy(&worker->cv);
1094 }
1095 gpr_atm_no_barrier_fetch_add(&pollset->worker_count, -1);
1096 }
1097
1098 #ifndef NDEBUG
1099 static long sys_gettid(void) { return syscall(__NR_gettid); }
1100 #endif
1101
1102 /* pollset->mu lock must be held by the caller before calling this.
1103 The function pollset_work() may temporarily release the lock (pollset->po.mu)
1104 during the course of its execution but it will always re-acquire the lock and
1105 ensure that it is held by the time the function returns */
1106 static grpc_error* pollset_work(grpc_pollset* pollset,
1107 grpc_pollset_worker** worker_hdl,
1108 grpc_millis deadline) {
1109 GPR_TIMER_SCOPE("pollset_work", 0);
1110 #ifdef GRPC_EPOLLEX_CREATE_WORKERS_ON_HEAP
1111 grpc_pollset_worker* worker =
1112 (grpc_pollset_worker*)gpr_malloc(sizeof(*worker));
1113 #define WORKER_PTR (worker)
1114 #else
1115 grpc_pollset_worker worker;
1116 #define WORKER_PTR (&worker)
1117 #endif
1118 #ifndef NDEBUG
1119 WORKER_PTR->originator = sys_gettid();
1120 #endif
1121 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
1122 gpr_log(GPR_INFO,
1123 "PS:%p work hdl=%p worker=%p now=%" PRId64 " deadline=%" PRId64
1124 " kwp=%d pollable=%p",
1125 pollset, worker_hdl, WORKER_PTR, grpc_core::ExecCtx::Get()->Now(),
1126 deadline, pollset->kicked_without_poller, pollset->active_pollable);
1127 }
1128 static const char* err_desc = "pollset_work";
1129 grpc_error* error = GRPC_ERROR_NONE;
1130 if (pollset->kicked_without_poller) {
1131 pollset->kicked_without_poller = false;
1132 } else {
1133 if (begin_worker(pollset, WORKER_PTR, worker_hdl, deadline)) {
1134 gpr_tls_set(&g_current_thread_pollset, (intptr_t)pollset);
1135 gpr_tls_set(&g_current_thread_worker, (intptr_t)WORKER_PTR);
1136 if (WORKER_PTR->pollable_obj->event_cursor ==
1137 WORKER_PTR->pollable_obj->event_count) {
1138 append_error(&error, pollable_epoll(WORKER_PTR->pollable_obj, deadline),
1139 err_desc);
1140 }
1141 append_error(
1142 &error,
1143 pollable_process_events(pollset, WORKER_PTR->pollable_obj, false),
1144 err_desc);
1145 grpc_core::ExecCtx::Get()->Flush();
1146 gpr_tls_set(&g_current_thread_pollset, 0);
1147 gpr_tls_set(&g_current_thread_worker, 0);
1148 }
1149 end_worker(pollset, WORKER_PTR, worker_hdl);
1150 }
1151 #ifdef GRPC_EPOLLEX_CREATE_WORKERS_ON_HEAP
1152 gpr_free(worker);
1153 #endif
1154 #undef WORKER_PTR
1155 return error;
1156 }
1157
1158 static grpc_error* pollset_transition_pollable_from_empty_to_fd_locked(
1159 grpc_pollset* pollset, grpc_fd* fd) {
1160 static const char* err_desc = "pollset_transition_pollable_from_empty_to_fd";
1161 grpc_error* error = GRPC_ERROR_NONE;
1162 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
1163 gpr_log(GPR_INFO,
1164 "PS:%p add fd %p (%d); transition pollable from empty to fd",
1165 pollset, fd, fd->fd);
1166 }
1167 append_error(&error, pollset_kick_all(pollset), err_desc);
1168 POLLABLE_UNREF(pollset->active_pollable, "pollset");
1169 append_error(&error, get_fd_pollable(fd, &pollset->active_pollable),
1170 err_desc);
1171 return error;
1172 }
1173
1174 static grpc_error* pollset_transition_pollable_from_fd_to_multi_locked(
1175 grpc_pollset* pollset, grpc_fd* and_add_fd) {
1176 static const char* err_desc = "pollset_transition_pollable_from_fd_to_multi";
1177 grpc_error* error = GRPC_ERROR_NONE;
1178 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
1179 gpr_log(
1180 GPR_INFO,
1181 "PS:%p add fd %p (%d); transition pollable from fd %p to multipoller",
1182 pollset, and_add_fd, and_add_fd ? and_add_fd->fd : -1,
1183 pollset->active_pollable->owner_fd);
1184 }
1185 append_error(&error, pollset_kick_all(pollset), err_desc);
1186 grpc_fd* initial_fd = pollset->active_pollable->owner_fd;
1187 POLLABLE_UNREF(pollset->active_pollable, "pollset");
1188 pollset->active_pollable = nullptr;
1189 if (append_error(&error, pollable_create(PO_MULTI, &pollset->active_pollable),
1190 err_desc)) {
1191 append_error(&error, pollable_add_fd(pollset->active_pollable, initial_fd),
1192 err_desc);
1193 if (and_add_fd != nullptr) {
1194 append_error(&error,
1195 pollable_add_fd(pollset->active_pollable, and_add_fd),
1196 err_desc);
1197 }
1198 }
1199 return error;
1200 }
1201
1202 /* expects pollsets locked, flag whether fd is locked or not */
1203 static grpc_error* pollset_add_fd_locked(grpc_pollset* pollset, grpc_fd* fd) {
1204 grpc_error* error = GRPC_ERROR_NONE;
1205 pollable* po_at_start =
1206 POLLABLE_REF(pollset->active_pollable, "pollset_add_fd");
1207 switch (pollset->active_pollable->type) {
1208 case PO_EMPTY:
1209 /* empty pollable --> single fd pollable */
1210 error = pollset_transition_pollable_from_empty_to_fd_locked(pollset, fd);
1211 break;
1212 case PO_FD:
1213 gpr_mu_lock(&po_at_start->owner_orphan_mu);
1214 if (po_at_start->owner_orphaned) {
1215 error =
1216 pollset_transition_pollable_from_empty_to_fd_locked(pollset, fd);
1217 } else {
1218 /* fd --> multipoller */
1219 error =
1220 pollset_transition_pollable_from_fd_to_multi_locked(pollset, fd);
1221 }
1222 gpr_mu_unlock(&po_at_start->owner_orphan_mu);
1223 break;
1224 case PO_MULTI:
1225 error = pollable_add_fd(pollset->active_pollable, fd);
1226 break;
1227 }
1228 if (error != GRPC_ERROR_NONE) {
1229 POLLABLE_UNREF(pollset->active_pollable, "pollset");
1230 pollset->active_pollable = po_at_start;
1231 } else {
1232 gpr_atm_rel_store(&pollset->active_pollable_type,
1233 pollset->active_pollable->type);
1234 POLLABLE_UNREF(po_at_start, "pollset_add_fd");
1235 }
1236 return error;
1237 }
1238
1239 static grpc_error* pollset_as_multipollable_locked(grpc_pollset* pollset,
1240 pollable** pollable_obj) {
1241 grpc_error* error = GRPC_ERROR_NONE;
1242 pollable* po_at_start =
1243 POLLABLE_REF(pollset->active_pollable, "pollset_as_multipollable");
1244 switch (pollset->active_pollable->type) {
1245 case PO_EMPTY:
1246 POLLABLE_UNREF(pollset->active_pollable, "pollset");
1247 error = pollable_create(PO_MULTI, &pollset->active_pollable);
1248 /* Any workers currently polling on this pollset must now be woked up so
1249 * that they can pick up the new active_pollable */
1250 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
1251 gpr_log(GPR_INFO,
1252 "PS:%p active pollable transition from empty to multi",
1253 pollset);
1254 }
1255 static const char* err_desc =
1256 "pollset_as_multipollable_locked: empty -> multi";
1257 append_error(&error, pollset_kick_all(pollset), err_desc);
1258 break;
1259 case PO_FD:
1260 gpr_mu_lock(&po_at_start->owner_orphan_mu);
1261 if (po_at_start->owner_orphaned) {
1262 // Unlock before Unref'ing the pollable
1263 gpr_mu_unlock(&po_at_start->owner_orphan_mu);
1264 POLLABLE_UNREF(pollset->active_pollable, "pollset");
1265 error = pollable_create(PO_MULTI, &pollset->active_pollable);
1266 } else {
1267 error = pollset_transition_pollable_from_fd_to_multi_locked(pollset,
1268 nullptr);
1269 gpr_mu_unlock(&po_at_start->owner_orphan_mu);
1270 }
1271 break;
1272 case PO_MULTI:
1273 break;
1274 }
1275 if (error != GRPC_ERROR_NONE) {
1276 POLLABLE_UNREF(pollset->active_pollable, "pollset");
1277 pollset->active_pollable = po_at_start;
1278 *pollable_obj = nullptr;
1279 } else {
1280 gpr_atm_rel_store(&pollset->active_pollable_type,
1281 pollset->active_pollable->type);
1282 *pollable_obj = POLLABLE_REF(pollset->active_pollable, "pollset_set");
1283 POLLABLE_UNREF(po_at_start, "pollset_as_multipollable");
1284 }
1285 return error;
1286 }
1287
1288 static void pollset_add_fd(grpc_pollset* pollset, grpc_fd* fd) {
1289 GPR_TIMER_SCOPE("pollset_add_fd", 0);
1290
1291 // We never transition from PO_MULTI to other modes (i.e., PO_FD or PO_EMPTY)
1292 // and, thus, it is safe to simply store and check whether the FD has already
1293 // been added to the active pollable previously.
1294 if (gpr_atm_acq_load(&pollset->active_pollable_type) == PO_MULTI &&
1295 fd_has_pollset(fd, pollset)) {
1296 return;
1297 }
1298
1299 grpc_core::MutexLockForGprMu lock(&pollset->mu);
1300 grpc_error* error = pollset_add_fd_locked(pollset, fd);
1301
1302 // If we are in PO_MULTI mode, we should update the pollsets of the FD.
1303 if (gpr_atm_no_barrier_load(&pollset->active_pollable_type) == PO_MULTI) {
1304 fd_add_pollset(fd, pollset);
1305 }
1306
1307 GRPC_LOG_IF_ERROR("pollset_add_fd", error);
1308 }
1309
1310 /*******************************************************************************
1311 * Pollset-set Definitions
1312 */
1313
1314 static grpc_pollset_set* pss_lock_adam(grpc_pollset_set* pss) {
1315 gpr_mu_lock(&pss->mu);
1316 while (pss->parent != nullptr) {
1317 gpr_mu_unlock(&pss->mu);
1318 pss = pss->parent;
1319 gpr_mu_lock(&pss->mu);
1320 }
1321 return pss;
1322 }
1323
1324 static grpc_pollset_set* pollset_set_create(void) {
1325 grpc_pollset_set* pss =
1326 static_cast<grpc_pollset_set*>(gpr_zalloc(sizeof(*pss)));
1327 gpr_mu_init(&pss->mu);
1328 new (&pss->refs) grpc_core::RefCount();
1329 return pss;
1330 }
1331
1332 static void pollset_set_unref(grpc_pollset_set* pss) {
1333 if (pss == nullptr) return;
1334 if (GPR_LIKELY(!pss->refs.Unref())) return;
1335 pollset_set_unref(pss->parent);
1336 gpr_mu_destroy(&pss->mu);
1337 for (size_t i = 0; i < pss->pollset_count; i++) {
1338 gpr_mu_lock(&pss->pollsets[i]->mu);
1339 if (0 == --pss->pollsets[i]->containing_pollset_set_count) {
1340 pollset_maybe_finish_shutdown(pss->pollsets[i]);
1341 }
1342 gpr_mu_unlock(&pss->pollsets[i]->mu);
1343 }
1344 for (size_t i = 0; i < pss->fd_count; i++) {
1345 UNREF_BY(pss->fds[i], 2, "pollset_set");
1346 }
1347 gpr_free(pss->pollsets);
1348 gpr_free(pss->fds);
1349 gpr_free(pss);
1350 }
1351
1352 static void pollset_set_add_fd(grpc_pollset_set* pss, grpc_fd* fd) {
1353 GPR_TIMER_SCOPE("pollset_set_add_fd", 0);
1354 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
1355 gpr_log(GPR_INFO, "PSS:%p: add fd %p (%d)", pss, fd, fd->fd);
1356 }
1357 grpc_error* error = GRPC_ERROR_NONE;
1358 static const char* err_desc = "pollset_set_add_fd";
1359 pss = pss_lock_adam(pss);
1360 for (size_t i = 0; i < pss->pollset_count; i++) {
1361 append_error(&error, pollable_add_fd(pss->pollsets[i]->active_pollable, fd),
1362 err_desc);
1363 }
1364 if (pss->fd_count == pss->fd_capacity) {
1365 pss->fd_capacity = GPR_MAX(pss->fd_capacity * 2, 8);
1366 pss->fds = static_cast<grpc_fd**>(
1367 gpr_realloc(pss->fds, pss->fd_capacity * sizeof(*pss->fds)));
1368 }
1369 REF_BY(fd, 2, "pollset_set");
1370 pss->fds[pss->fd_count++] = fd;
1371 gpr_mu_unlock(&pss->mu);
1372
1373 GRPC_LOG_IF_ERROR(err_desc, error);
1374 }
1375
1376 static void pollset_set_del_fd(grpc_pollset_set* pss, grpc_fd* fd) {
1377 GPR_TIMER_SCOPE("pollset_set_del_fd", 0);
1378 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
1379 gpr_log(GPR_INFO, "PSS:%p: del fd %p", pss, fd);
1380 }
1381 pss = pss_lock_adam(pss);
1382 size_t i;
1383 for (i = 0; i < pss->fd_count; i++) {
1384 if (pss->fds[i] == fd) {
1385 UNREF_BY(fd, 2, "pollset_set");
1386 break;
1387 }
1388 }
1389 GPR_ASSERT(i != pss->fd_count);
1390 for (; i < pss->fd_count - 1; i++) {
1391 pss->fds[i] = pss->fds[i + 1];
1392 }
1393 pss->fd_count--;
1394 gpr_mu_unlock(&pss->mu);
1395 }
1396
1397 static void pollset_set_del_pollset(grpc_pollset_set* pss, grpc_pollset* ps) {
1398 GPR_TIMER_SCOPE("pollset_set_del_pollset", 0);
1399 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
1400 gpr_log(GPR_INFO, "PSS:%p: del pollset %p", pss, ps);
1401 }
1402 pss = pss_lock_adam(pss);
1403 size_t i;
1404 for (i = 0; i < pss->pollset_count; i++) {
1405 if (pss->pollsets[i] == ps) {
1406 break;
1407 }
1408 }
1409 GPR_ASSERT(i != pss->pollset_count);
1410 for (; i < pss->pollset_count - 1; i++) {
1411 pss->pollsets[i] = pss->pollsets[i + 1];
1412 }
1413 pss->pollset_count--;
1414 gpr_mu_unlock(&pss->mu);
1415 gpr_mu_lock(&ps->mu);
1416 if (0 == --ps->containing_pollset_set_count) {
1417 pollset_maybe_finish_shutdown(ps);
1418 }
1419 gpr_mu_unlock(&ps->mu);
1420 }
1421
1422 // add all fds to pollables, and output a new array of unorphaned out_fds
1423 // assumes pollsets are multipollable
1424 static grpc_error* add_fds_to_pollsets(grpc_fd** fds, size_t fd_count,
1425 grpc_pollset** pollsets,
1426 size_t pollset_count,
1427 const char* err_desc, grpc_fd** out_fds,
1428 size_t* out_fd_count) {
1429 GPR_TIMER_SCOPE("add_fds_to_pollsets", 0);
1430 grpc_error* error = GRPC_ERROR_NONE;
1431 for (size_t i = 0; i < fd_count; i++) {
1432 gpr_mu_lock(&fds[i]->orphan_mu);
1433 if ((gpr_atm_no_barrier_load(&fds[i]->refst) & 1) == 0) {
1434 gpr_mu_unlock(&fds[i]->orphan_mu);
1435 UNREF_BY(fds[i], 2, "pollset_set");
1436 } else {
1437 for (size_t j = 0; j < pollset_count; j++) {
1438 append_error(&error,
1439 pollable_add_fd(pollsets[j]->active_pollable, fds[i]),
1440 err_desc);
1441 }
1442 gpr_mu_unlock(&fds[i]->orphan_mu);
1443 out_fds[(*out_fd_count)++] = fds[i];
1444 }
1445 }
1446 return error;
1447 }
1448
1449 static void pollset_set_add_pollset(grpc_pollset_set* pss, grpc_pollset* ps) {
1450 GPR_TIMER_SCOPE("pollset_set_add_pollset", 0);
1451 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
1452 gpr_log(GPR_INFO, "PSS:%p: add pollset %p", pss, ps);
1453 }
1454 grpc_error* error = GRPC_ERROR_NONE;
1455 static const char* err_desc = "pollset_set_add_pollset";
1456 pollable* pollable_obj = nullptr;
1457 gpr_mu_lock(&ps->mu);
1458 if (!GRPC_LOG_IF_ERROR(err_desc,
1459 pollset_as_multipollable_locked(ps, &pollable_obj))) {
1460 GPR_ASSERT(pollable_obj == nullptr);
1461 gpr_mu_unlock(&ps->mu);
1462 return;
1463 }
1464 ps->containing_pollset_set_count++;
1465 gpr_mu_unlock(&ps->mu);
1466 pss = pss_lock_adam(pss);
1467 size_t initial_fd_count = pss->fd_count;
1468 pss->fd_count = 0;
1469 append_error(&error,
1470 add_fds_to_pollsets(pss->fds, initial_fd_count, &ps, 1, err_desc,
1471 pss->fds, &pss->fd_count),
1472 err_desc);
1473 if (pss->pollset_count == pss->pollset_capacity) {
1474 pss->pollset_capacity = GPR_MAX(pss->pollset_capacity * 2, 8);
1475 pss->pollsets = static_cast<grpc_pollset**>(gpr_realloc(
1476 pss->pollsets, pss->pollset_capacity * sizeof(*pss->pollsets)));
1477 }
1478 pss->pollsets[pss->pollset_count++] = ps;
1479 gpr_mu_unlock(&pss->mu);
1480 POLLABLE_UNREF(pollable_obj, "pollset_set");
1481
1482 GRPC_LOG_IF_ERROR(err_desc, error);
1483 }
1484
1485 static void pollset_set_add_pollset_set(grpc_pollset_set* a,
1486 grpc_pollset_set* b) {
1487 GPR_TIMER_SCOPE("pollset_set_add_pollset_set", 0);
1488 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
1489 gpr_log(GPR_INFO, "PSS: merge (%p, %p)", a, b);
1490 }
1491 grpc_error* error = GRPC_ERROR_NONE;
1492 static const char* err_desc = "pollset_set_add_fd";
1493 for (;;) {
1494 if (a == b) {
1495 // pollset ancestors are the same: nothing to do
1496 return;
1497 }
1498 if (a > b) {
1499 GPR_SWAP(grpc_pollset_set*, a, b);
1500 }
1501 gpr_mu* a_mu = &a->mu;
1502 gpr_mu* b_mu = &b->mu;
1503 gpr_mu_lock(a_mu);
1504 gpr_mu_lock(b_mu);
1505 if (a->parent != nullptr) {
1506 a = a->parent;
1507 } else if (b->parent != nullptr) {
1508 b = b->parent;
1509 } else {
1510 break; // exit loop, both pollsets locked
1511 }
1512 gpr_mu_unlock(a_mu);
1513 gpr_mu_unlock(b_mu);
1514 }
1515 // try to do the least copying possible
1516 // TODO(sreek): there's probably a better heuristic here
1517 const size_t a_size = a->fd_count + a->pollset_count;
1518 const size_t b_size = b->fd_count + b->pollset_count;
1519 if (b_size > a_size) {
1520 GPR_SWAP(grpc_pollset_set*, a, b);
1521 }
1522 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
1523 gpr_log(GPR_INFO, "PSS: parent %p to %p", b, a);
1524 }
1525 a->refs.Ref();
1526 b->parent = a;
1527 if (a->fd_capacity < a->fd_count + b->fd_count) {
1528 a->fd_capacity = GPR_MAX(2 * a->fd_capacity, a->fd_count + b->fd_count);
1529 a->fds = static_cast<grpc_fd**>(
1530 gpr_realloc(a->fds, a->fd_capacity * sizeof(*a->fds)));
1531 }
1532 size_t initial_a_fd_count = a->fd_count;
1533 a->fd_count = 0;
1534 append_error(
1535 &error,
1536 add_fds_to_pollsets(a->fds, initial_a_fd_count, b->pollsets,
1537 b->pollset_count, "merge_a2b", a->fds, &a->fd_count),
1538 err_desc);
1539 append_error(
1540 &error,
1541 add_fds_to_pollsets(b->fds, b->fd_count, a->pollsets, a->pollset_count,
1542 "merge_b2a", a->fds, &a->fd_count),
1543 err_desc);
1544 if (a->pollset_capacity < a->pollset_count + b->pollset_count) {
1545 a->pollset_capacity =
1546 GPR_MAX(2 * a->pollset_capacity, a->pollset_count + b->pollset_count);
1547 a->pollsets = static_cast<grpc_pollset**>(
1548 gpr_realloc(a->pollsets, a->pollset_capacity * sizeof(*a->pollsets)));
1549 }
1550 if (b->pollset_count > 0) {
1551 memcpy(a->pollsets + a->pollset_count, b->pollsets,
1552 b->pollset_count * sizeof(*b->pollsets));
1553 }
1554 a->pollset_count += b->pollset_count;
1555 gpr_free(b->fds);
1556 gpr_free(b->pollsets);
1557 b->fds = nullptr;
1558 b->pollsets = nullptr;
1559 b->fd_count = b->fd_capacity = b->pollset_count = b->pollset_capacity = 0;
1560 gpr_mu_unlock(&a->mu);
1561 gpr_mu_unlock(&b->mu);
1562 }
1563
1564 static void pollset_set_del_pollset_set(grpc_pollset_set* /*bag*/,
1565 grpc_pollset_set* /*item*/) {}
1566
1567 /*******************************************************************************
1568 * Event engine binding
1569 */
1570
1571 static bool is_any_background_poller_thread(void) { return false; }
1572
1573 static void shutdown_background_closure(void) {}
1574
1575 static bool add_closure_to_background_poller(grpc_closure* /*closure*/,
1576 grpc_error* /*error*/) {
1577 return false;
1578 }
1579
1580 static void shutdown_engine(void) {
1581 fd_global_shutdown();
1582 pollset_global_shutdown();
1583 }
1584
1585 static const grpc_event_engine_vtable vtable = {
1586 sizeof(grpc_pollset),
1587 true,
1588 false,
1589
1590 fd_create,
1591 fd_wrapped_fd,
1592 fd_orphan,
1593 fd_shutdown,
1594 fd_notify_on_read,
1595 fd_notify_on_write,
1596 fd_notify_on_error,
1597 fd_become_readable,
1598 fd_become_writable,
1599 fd_has_errors,
1600 fd_is_shutdown,
1601
1602 pollset_init,
1603 pollset_shutdown,
1604 pollset_destroy,
1605 pollset_work,
1606 pollset_kick,
1607 pollset_add_fd,
1608
1609 pollset_set_create,
1610 pollset_set_unref, // destroy ==> unref 1 public ref
1611 pollset_set_add_pollset,
1612 pollset_set_del_pollset,
1613 pollset_set_add_pollset_set,
1614 pollset_set_del_pollset_set,
1615 pollset_set_add_fd,
1616 pollset_set_del_fd,
1617
1618 is_any_background_poller_thread,
1619 shutdown_background_closure,
1620 shutdown_engine,
1621 add_closure_to_background_poller,
1622 };
1623
1624 const grpc_event_engine_vtable* grpc_init_epollex_linux(
1625 bool /*explicitly_requested*/) {
1626 if (!grpc_has_wakeup_fd()) {
1627 gpr_log(GPR_ERROR, "Skipping epollex because of no wakeup fd.");
1628 return nullptr;
1629 }
1630
1631 if (!grpc_is_epollexclusive_available()) {
1632 gpr_log(GPR_INFO, "Skipping epollex because it is not supported.");
1633 return nullptr;
1634 }
1635
1636 fd_global_init();
1637
1638 if (!GRPC_LOG_IF_ERROR("pollset_global_init", pollset_global_init())) {
1639 pollset_global_shutdown();
1640 fd_global_shutdown();
1641 return nullptr;
1642 }
1643
1644 return &vtable;
1645 }
1646
1647 #else /* defined(GRPC_LINUX_EPOLL_CREATE1) */
1648 #if defined(GRPC_POSIX_SOCKET_EV_EPOLLEX)
1649 #include "src/core/lib/iomgr/ev_epollex_linux.h"
1650 /* If GRPC_LINUX_EPOLL_CREATE1 is not defined, it means
1651 epoll_create1 is not available. Return NULL */
1652 const grpc_event_engine_vtable* grpc_init_epollex_linux(
1653 bool /*explicitly_requested*/) {
1654 return nullptr;
1655 }
1656 #endif /* defined(GRPC_POSIX_SOCKET_EV_EPOLLEX) */
1657
1658 #endif /* !defined(GRPC_LINUX_EPOLL_CREATE1) */
1659