1 /*
2 *
3 * Copyright 2016 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19 #include <grpc/support/port_platform.h>
20
21 #include "src/core/lib/iomgr/combiner.h"
22
23 #include <assert.h>
24 #include <inttypes.h>
25 #include <string.h>
26
27 #include <grpc/support/alloc.h>
28 #include <grpc/support/log.h>
29
30 #include "src/core/lib/debug/stats.h"
31 #include "src/core/lib/gprpp/mpscq.h"
32 #include "src/core/lib/iomgr/executor.h"
33 #include "src/core/lib/iomgr/iomgr.h"
34 #include "src/core/lib/profiling/timers.h"
35
36 grpc_core::DebugOnlyTraceFlag grpc_combiner_trace(false, "combiner");
37
38 #define GRPC_COMBINER_TRACE(fn) \
39 do { \
40 if (grpc_combiner_trace.enabled()) { \
41 fn; \
42 } \
43 } while (0)
44
45 #define STATE_UNORPHANED 1
46 #define STATE_ELEM_COUNT_LOW_BIT 2
47
48 static void combiner_exec(grpc_core::Combiner* lock, grpc_closure* closure,
49 grpc_error* error);
50 static void combiner_finally_exec(grpc_core::Combiner* lock,
51 grpc_closure* closure, grpc_error* error);
52
53 static void offload(void* arg, grpc_error* error);
54
grpc_combiner_create(void)55 grpc_core::Combiner* grpc_combiner_create(void) {
56 grpc_core::Combiner* lock = new grpc_core::Combiner();
57 gpr_ref_init(&lock->refs, 1);
58 gpr_atm_no_barrier_store(&lock->state, STATE_UNORPHANED);
59 grpc_closure_list_init(&lock->final_list);
60 GRPC_CLOSURE_INIT(&lock->offload, offload, lock, nullptr);
61 GRPC_COMBINER_TRACE(gpr_log(GPR_INFO, "C:%p create", lock));
62 return lock;
63 }
64
really_destroy(grpc_core::Combiner * lock)65 static void really_destroy(grpc_core::Combiner* lock) {
66 GRPC_COMBINER_TRACE(gpr_log(GPR_INFO, "C:%p really_destroy", lock));
67 GPR_ASSERT(gpr_atm_no_barrier_load(&lock->state) == 0);
68 delete lock;
69 }
70
start_destroy(grpc_core::Combiner * lock)71 static void start_destroy(grpc_core::Combiner* lock) {
72 gpr_atm old_state = gpr_atm_full_fetch_add(&lock->state, -STATE_UNORPHANED);
73 GRPC_COMBINER_TRACE(gpr_log(
74 GPR_INFO, "C:%p really_destroy old_state=%" PRIdPTR, lock, old_state));
75 if (old_state == 1) {
76 really_destroy(lock);
77 }
78 }
79
80 #ifndef NDEBUG
81 #define GRPC_COMBINER_DEBUG_SPAM(op, delta) \
82 if (grpc_combiner_trace.enabled()) { \
83 gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, \
84 "C:%p %s %" PRIdPTR " --> %" PRIdPTR " %s", lock, (op), \
85 gpr_atm_no_barrier_load(&lock->refs.count), \
86 gpr_atm_no_barrier_load(&lock->refs.count) + (delta), reason); \
87 }
88 #else
89 #define GRPC_COMBINER_DEBUG_SPAM(op, delta)
90 #endif
91
grpc_combiner_unref(grpc_core::Combiner * lock GRPC_COMBINER_DEBUG_ARGS)92 void grpc_combiner_unref(grpc_core::Combiner* lock GRPC_COMBINER_DEBUG_ARGS) {
93 GRPC_COMBINER_DEBUG_SPAM("UNREF", -1);
94 if (gpr_unref(&lock->refs)) {
95 start_destroy(lock);
96 }
97 }
98
grpc_combiner_ref(grpc_core::Combiner * lock GRPC_COMBINER_DEBUG_ARGS)99 grpc_core::Combiner* grpc_combiner_ref(
100 grpc_core::Combiner* lock GRPC_COMBINER_DEBUG_ARGS) {
101 GRPC_COMBINER_DEBUG_SPAM(" REF", 1);
102 gpr_ref(&lock->refs);
103 return lock;
104 }
105
push_last_on_exec_ctx(grpc_core::Combiner * lock)106 static void push_last_on_exec_ctx(grpc_core::Combiner* lock) {
107 lock->next_combiner_on_this_exec_ctx = nullptr;
108 if (grpc_core::ExecCtx::Get()->combiner_data()->active_combiner == nullptr) {
109 grpc_core::ExecCtx::Get()->combiner_data()->active_combiner =
110 grpc_core::ExecCtx::Get()->combiner_data()->last_combiner = lock;
111 } else {
112 grpc_core::ExecCtx::Get()
113 ->combiner_data()
114 ->last_combiner->next_combiner_on_this_exec_ctx = lock;
115 grpc_core::ExecCtx::Get()->combiner_data()->last_combiner = lock;
116 }
117 }
118
push_first_on_exec_ctx(grpc_core::Combiner * lock)119 static void push_first_on_exec_ctx(grpc_core::Combiner* lock) {
120 lock->next_combiner_on_this_exec_ctx =
121 grpc_core::ExecCtx::Get()->combiner_data()->active_combiner;
122 grpc_core::ExecCtx::Get()->combiner_data()->active_combiner = lock;
123 if (lock->next_combiner_on_this_exec_ctx == nullptr) {
124 grpc_core::ExecCtx::Get()->combiner_data()->last_combiner = lock;
125 }
126 }
127
combiner_exec(grpc_core::Combiner * lock,grpc_closure * cl,grpc_error * error)128 static void combiner_exec(grpc_core::Combiner* lock, grpc_closure* cl,
129 grpc_error* error) {
130 GPR_TIMER_SCOPE("combiner.execute", 0);
131 GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_ITEMS();
132 gpr_atm last = gpr_atm_full_fetch_add(&lock->state, STATE_ELEM_COUNT_LOW_BIT);
133 GRPC_COMBINER_TRACE(gpr_log(GPR_INFO,
134 "C:%p grpc_combiner_execute c=%p last=%" PRIdPTR,
135 lock, cl, last));
136 if (last == 1) {
137 GRPC_STATS_INC_COMBINER_LOCKS_INITIATED();
138 GPR_TIMER_MARK("combiner.initiated", 0);
139 gpr_atm_no_barrier_store(&lock->initiating_exec_ctx_or_null,
140 (gpr_atm)grpc_core::ExecCtx::Get());
141 // first element on this list: add it to the list of combiner locks
142 // executing within this exec_ctx
143 push_last_on_exec_ctx(lock);
144 } else {
145 // there may be a race with setting here: if that happens, we may delay
146 // offload for one or two actions, and that's fine
147 gpr_atm initiator =
148 gpr_atm_no_barrier_load(&lock->initiating_exec_ctx_or_null);
149 if (initiator != 0 && initiator != (gpr_atm)grpc_core::ExecCtx::Get()) {
150 gpr_atm_no_barrier_store(&lock->initiating_exec_ctx_or_null, 0);
151 }
152 }
153 GPR_ASSERT(last & STATE_UNORPHANED); // ensure lock has not been destroyed
154 assert(cl->cb);
155 cl->error_data.error = error;
156 lock->queue.Push(cl->next_data.mpscq_node.get());
157 }
158
move_next()159 static void move_next() {
160 grpc_core::ExecCtx::Get()->combiner_data()->active_combiner =
161 grpc_core::ExecCtx::Get()
162 ->combiner_data()
163 ->active_combiner->next_combiner_on_this_exec_ctx;
164 if (grpc_core::ExecCtx::Get()->combiner_data()->active_combiner == nullptr) {
165 grpc_core::ExecCtx::Get()->combiner_data()->last_combiner = nullptr;
166 }
167 }
168
offload(void * arg,grpc_error *)169 static void offload(void* arg, grpc_error* /*error*/) {
170 grpc_core::Combiner* lock = static_cast<grpc_core::Combiner*>(arg);
171 push_last_on_exec_ctx(lock);
172 }
173
queue_offload(grpc_core::Combiner * lock)174 static void queue_offload(grpc_core::Combiner* lock) {
175 GRPC_STATS_INC_COMBINER_LOCKS_OFFLOADED();
176 move_next();
177 GRPC_COMBINER_TRACE(gpr_log(GPR_INFO, "C:%p queue_offload", lock));
178 grpc_core::Executor::Run(&lock->offload, GRPC_ERROR_NONE);
179 }
180
grpc_combiner_continue_exec_ctx()181 bool grpc_combiner_continue_exec_ctx() {
182 GPR_TIMER_SCOPE("combiner.continue_exec_ctx", 0);
183
184 GRPC_COMBINER_TRACE(gpr_log(GPR_INFO,
185 "grpc_core::ExecCtx::Get() = %p "
186 "grpc_core::ExecCtx::Get()->combiner_data() = %p ",
187 grpc_core::ExecCtx::Get(),
188 grpc_core::ExecCtx::Get()->combiner_data()));
189
190 if (grpc_core::ExecCtx::Get() == nullptr) {
191 return false;
192 }
193
194 grpc_core::Combiner* lock =
195 grpc_core::ExecCtx::Get()->combiner_data()->active_combiner;
196 if (lock == nullptr) {
197 return false;
198 }
199
200 bool contended =
201 gpr_atm_no_barrier_load(&lock->initiating_exec_ctx_or_null) == 0;
202
203 GRPC_COMBINER_TRACE(gpr_log(GPR_INFO,
204 "C:%p grpc_combiner_continue_exec_ctx "
205 "contended=%d "
206 "exec_ctx_ready_to_finish=%d "
207 "time_to_execute_final_list=%d",
208 lock, contended,
209 grpc_core::ExecCtx::Get()->IsReadyToFinish(),
210 lock->time_to_execute_final_list));
211
212 // offload only if all the following conditions are true:
213 // 1. the combiner is contended and has more than one closure to execute
214 // 2. the current execution context needs to finish as soon as possible
215 // 3. the current thread is not a worker for any background poller
216 // 4. the DEFAULT executor is threaded
217 if (contended && grpc_core::ExecCtx::Get()->IsReadyToFinish() &&
218 !grpc_iomgr_is_any_background_poller_thread() &&
219 grpc_core::Executor::IsThreadedDefault()) {
220 GPR_TIMER_MARK("offload_from_finished_exec_ctx", 0);
221 // this execution context wants to move on: schedule remaining work to be
222 // picked up on the executor
223 queue_offload(lock);
224 return true;
225 }
226
227 if (!lock->time_to_execute_final_list ||
228 // peek to see if something new has shown up, and execute that with
229 // priority
230 (gpr_atm_acq_load(&lock->state) >> 1) > 1) {
231 grpc_core::MultiProducerSingleConsumerQueue::Node* n = lock->queue.Pop();
232 GRPC_COMBINER_TRACE(
233 gpr_log(GPR_INFO, "C:%p maybe_finish_one n=%p", lock, n));
234 if (n == nullptr) {
235 // queue is in an inconsistent state: use this as a cue that we should
236 // go off and do something else for a while (and come back later)
237 GPR_TIMER_MARK("delay_busy", 0);
238 queue_offload(lock);
239 return true;
240 }
241 GPR_TIMER_SCOPE("combiner.exec1", 0);
242 grpc_closure* cl = reinterpret_cast<grpc_closure*>(n);
243 grpc_error* cl_err = cl->error_data.error;
244 #ifndef NDEBUG
245 cl->scheduled = false;
246 #endif
247 cl->cb(cl->cb_arg, cl_err);
248 GRPC_ERROR_UNREF(cl_err);
249 } else {
250 grpc_closure* c = lock->final_list.head;
251 GPR_ASSERT(c != nullptr);
252 grpc_closure_list_init(&lock->final_list);
253 int loops = 0;
254 while (c != nullptr) {
255 GPR_TIMER_SCOPE("combiner.exec_1final", 0);
256 GRPC_COMBINER_TRACE(
257 gpr_log(GPR_INFO, "C:%p execute_final[%d] c=%p", lock, loops, c));
258 grpc_closure* next = c->next_data.next;
259 grpc_error* error = c->error_data.error;
260 #ifndef NDEBUG
261 c->scheduled = false;
262 #endif
263 c->cb(c->cb_arg, error);
264 GRPC_ERROR_UNREF(error);
265 c = next;
266 }
267 }
268
269 GPR_TIMER_MARK("unref", 0);
270 move_next();
271 lock->time_to_execute_final_list = false;
272 gpr_atm old_state =
273 gpr_atm_full_fetch_add(&lock->state, -STATE_ELEM_COUNT_LOW_BIT);
274 GRPC_COMBINER_TRACE(
275 gpr_log(GPR_INFO, "C:%p finish old_state=%" PRIdPTR, lock, old_state));
276 // Define a macro to ease readability of the following switch statement.
277 #define OLD_STATE_WAS(orphaned, elem_count) \
278 (((orphaned) ? 0 : STATE_UNORPHANED) | \
279 ((elem_count)*STATE_ELEM_COUNT_LOW_BIT))
280 // Depending on what the previous state was, we need to perform different
281 // actions.
282 switch (old_state) {
283 default:
284 // we have multiple queued work items: just continue executing them
285 break;
286 case OLD_STATE_WAS(false, 2):
287 case OLD_STATE_WAS(true, 2):
288 // we're down to one queued item: if it's the final list we should do that
289 if (!grpc_closure_list_empty(lock->final_list)) {
290 lock->time_to_execute_final_list = true;
291 }
292 break;
293 case OLD_STATE_WAS(false, 1):
294 // had one count, one unorphaned --> unlocked unorphaned
295 return true;
296 case OLD_STATE_WAS(true, 1):
297 // and one count, one orphaned --> unlocked and orphaned
298 really_destroy(lock);
299 return true;
300 case OLD_STATE_WAS(false, 0):
301 case OLD_STATE_WAS(true, 0):
302 // these values are illegal - representing an already unlocked or
303 // deleted lock
304 GPR_UNREACHABLE_CODE(return true);
305 }
306 push_first_on_exec_ctx(lock);
307 return true;
308 }
309
310 static void enqueue_finally(void* closure, grpc_error* error);
311
combiner_finally_exec(grpc_core::Combiner * lock,grpc_closure * closure,grpc_error * error)312 static void combiner_finally_exec(grpc_core::Combiner* lock,
313 grpc_closure* closure, grpc_error* error) {
314 GPR_ASSERT(lock != nullptr);
315 GPR_TIMER_SCOPE("combiner.execute_finally", 0);
316 GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_FINAL_ITEMS();
317 GRPC_COMBINER_TRACE(gpr_log(
318 GPR_INFO, "C:%p grpc_combiner_execute_finally c=%p; ac=%p", lock, closure,
319 grpc_core::ExecCtx::Get()->combiner_data()->active_combiner));
320 if (grpc_core::ExecCtx::Get()->combiner_data()->active_combiner != lock) {
321 GPR_TIMER_MARK("slowpath", 0);
322 // Using error_data.scratch to store the combiner so that it can be accessed
323 // in enqueue_finally.
324 closure->error_data.scratch = reinterpret_cast<uintptr_t>(lock);
325 lock->Run(GRPC_CLOSURE_CREATE(enqueue_finally, closure, nullptr), error);
326 return;
327 }
328
329 if (grpc_closure_list_empty(lock->final_list)) {
330 gpr_atm_full_fetch_add(&lock->state, STATE_ELEM_COUNT_LOW_BIT);
331 }
332 grpc_closure_list_append(&lock->final_list, closure, error);
333 }
334
enqueue_finally(void * closure,grpc_error * error)335 static void enqueue_finally(void* closure, grpc_error* error) {
336 grpc_closure* cl = static_cast<grpc_closure*>(closure);
337 combiner_finally_exec(
338 reinterpret_cast<grpc_core::Combiner*>(cl->error_data.scratch), cl,
339 GRPC_ERROR_REF(error));
340 }
341
342 namespace grpc_core {
Run(grpc_closure * closure,grpc_error * error)343 void Combiner::Run(grpc_closure* closure, grpc_error* error) {
344 combiner_exec(this, closure, error);
345 }
346
FinallyRun(grpc_closure * closure,grpc_error * error)347 void Combiner::FinallyRun(grpc_closure* closure, grpc_error* error) {
348 combiner_finally_exec(this, closure, error);
349 }
350 } // namespace grpc_core
351