1 /*
2 *
3 * Copyright 2015 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19 #include <ruby/ruby.h>
20 #include <ruby/thread.h>
21
22 #include "rb_byte_buffer.h"
23 #include "rb_channel.h"
24 #include "rb_grpc_imports.generated.h"
25
26 #include <grpc/grpc.h>
27 #include <grpc/grpc_security.h>
28 #include <grpc/support/alloc.h>
29 #include <grpc/support/log.h>
30 #include <grpc/support/time.h>
31 #include "rb_call.h"
32 #include "rb_channel_args.h"
33 #include "rb_channel_credentials.h"
34 #include "rb_completion_queue.h"
35 #include "rb_grpc.h"
36 #include "rb_server.h"
37
38 /* id_channel is the name of the hidden ivar that preserves a reference to the
39 * channel on a call, so that calls are not GCed before their channel. */
40 static ID id_channel;
41
42 /* id_target is the name of the hidden ivar that preserves a reference to the
43 * target string used to create the call, preserved so that it does not get
44 * GCed before the channel */
45 static ID id_target;
46
47 /* id_insecure_channel is used to indicate that a channel is insecure */
48 static VALUE id_insecure_channel;
49
50 /* grpc_rb_cChannel is the ruby class that proxies grpc_channel. */
51 static VALUE grpc_rb_cChannel = Qnil;
52
53 /* Used during the conversion of a hash to channel args during channel setup */
54 static VALUE grpc_rb_cChannelArgs;
55
56 typedef struct bg_watched_channel {
57 grpc_channel* channel;
58 // these fields must only be accessed under global_connection_polling_mu
59 struct bg_watched_channel* next;
60 int channel_destroyed;
61 int refcount;
62 } bg_watched_channel;
63
64 /* grpc_rb_channel wraps a grpc_channel. */
65 typedef struct grpc_rb_channel {
66 VALUE credentials;
67
68 /* The actual channel (protected in a wrapper to tell when it's safe to
69 * destroy) */
70 bg_watched_channel* bg_wrapped;
71 } grpc_rb_channel;
72
73 typedef enum { CONTINUOUS_WATCH, WATCH_STATE_API } watch_state_op_type;
74
75 typedef struct watch_state_op {
76 watch_state_op_type op_type;
77 // from event.success
78 union {
79 struct {
80 int success;
81 // has been called back due to a cq next call
82 int called_back;
83 } api_callback_args;
84 struct {
85 bg_watched_channel* bg;
86 } continuous_watch_callback_args;
87 } op;
88 } watch_state_op;
89
90 static bg_watched_channel* bg_watched_channel_list_head = NULL;
91
92 static void grpc_rb_channel_try_register_connection_polling(
93 bg_watched_channel* bg);
94 static void* wait_until_channel_polling_thread_started_no_gil(void*);
95 static void wait_until_channel_polling_thread_started_unblocking_func(void*);
96 static void* channel_init_try_register_connection_polling_without_gil(
97 void* arg);
98
99 typedef struct channel_init_try_register_stack {
100 grpc_channel* channel;
101 grpc_rb_channel* wrapper;
102 } channel_init_try_register_stack;
103
104 static grpc_completion_queue* channel_polling_cq;
105 static gpr_mu global_connection_polling_mu;
106 static gpr_cv global_connection_polling_cv;
107 static int abort_channel_polling = 0;
108 static int channel_polling_thread_started = 0;
109
110 static int bg_watched_channel_list_lookup(bg_watched_channel* bg);
111 static bg_watched_channel* bg_watched_channel_list_create_and_add(
112 grpc_channel* channel);
113 static void bg_watched_channel_list_free_and_remove(bg_watched_channel* bg);
114 static void run_poll_channels_loop_unblocking_func(void* arg);
115
116 // Needs to be called under global_connection_polling_mu
grpc_rb_channel_watch_connection_state_op_complete(watch_state_op * op,int success)117 static void grpc_rb_channel_watch_connection_state_op_complete(
118 watch_state_op* op, int success) {
119 GPR_ASSERT(!op->op.api_callback_args.called_back);
120 op->op.api_callback_args.called_back = 1;
121 op->op.api_callback_args.success = success;
122 // wake up the watch API call thats waiting on this op
123 gpr_cv_broadcast(&global_connection_polling_cv);
124 }
125
126 /* Avoids destroying a channel twice. */
grpc_rb_channel_safe_destroy(bg_watched_channel * bg)127 static void grpc_rb_channel_safe_destroy(bg_watched_channel* bg) {
128 gpr_mu_lock(&global_connection_polling_mu);
129 GPR_ASSERT(bg_watched_channel_list_lookup(bg));
130 if (!bg->channel_destroyed) {
131 grpc_channel_destroy(bg->channel);
132 bg->channel_destroyed = 1;
133 }
134 bg->refcount--;
135 if (bg->refcount == 0) {
136 bg_watched_channel_list_free_and_remove(bg);
137 }
138 gpr_mu_unlock(&global_connection_polling_mu);
139 }
140
channel_safe_destroy_without_gil(void * arg)141 static void* channel_safe_destroy_without_gil(void* arg) {
142 grpc_rb_channel_safe_destroy((bg_watched_channel*)arg);
143 return NULL;
144 }
145
146 /* Destroys Channel instances. */
grpc_rb_channel_free(void * p)147 static void grpc_rb_channel_free(void* p) {
148 grpc_rb_channel* ch = NULL;
149 if (p == NULL) {
150 return;
151 };
152 ch = (grpc_rb_channel*)p;
153
154 if (ch->bg_wrapped != NULL) {
155 /* assumption made here: it's ok to directly gpr_mu_lock the global
156 * connection polling mutex becuse we're in a finalizer,
157 * and we can count on this thread to not be interrupted or
158 * yield the gil. */
159 grpc_rb_channel_safe_destroy(ch->bg_wrapped);
160 ch->bg_wrapped = NULL;
161 }
162
163 xfree(p);
164 }
165
166 /* Protects the mark object from GC */
grpc_rb_channel_mark(void * p)167 static void grpc_rb_channel_mark(void* p) {
168 grpc_rb_channel* channel = NULL;
169 if (p == NULL) {
170 return;
171 }
172 channel = (grpc_rb_channel*)p;
173 if (channel->credentials != Qnil) {
174 rb_gc_mark(channel->credentials);
175 }
176 }
177
178 static rb_data_type_t grpc_channel_data_type = {"grpc_channel",
179 {grpc_rb_channel_mark,
180 grpc_rb_channel_free,
181 GRPC_RB_MEMSIZE_UNAVAILABLE,
182 {NULL, NULL}},
183 NULL,
184 NULL,
185 #ifdef RUBY_TYPED_FREE_IMMEDIATELY
186 RUBY_TYPED_FREE_IMMEDIATELY
187 #endif
188 };
189
190 /* Allocates grpc_rb_channel instances. */
grpc_rb_channel_alloc(VALUE cls)191 static VALUE grpc_rb_channel_alloc(VALUE cls) {
192 grpc_rb_channel* wrapper = ALLOC(grpc_rb_channel);
193 wrapper->bg_wrapped = NULL;
194 wrapper->credentials = Qnil;
195 return TypedData_Wrap_Struct(cls, &grpc_channel_data_type, wrapper);
196 }
197
198 /*
199 call-seq:
200 insecure_channel = Channel:new("myhost:8080", {'arg1': 'value1'},
201 :this_channel_is_insecure)
202 creds = ...
203 secure_channel = Channel:new("myhost:443", {'arg1': 'value1'}, creds)
204
205 Creates channel instances. */
grpc_rb_channel_init(int argc,VALUE * argv,VALUE self)206 static VALUE grpc_rb_channel_init(int argc, VALUE* argv, VALUE self) {
207 VALUE channel_args = Qnil;
208 VALUE credentials = Qnil;
209 VALUE target = Qnil;
210 grpc_rb_channel* wrapper = NULL;
211 grpc_channel* ch = NULL;
212 grpc_channel_credentials* creds = NULL;
213 char* target_chars = NULL;
214 grpc_channel_args args;
215 channel_init_try_register_stack stack;
216 int stop_waiting_for_thread_start = 0;
217 MEMZERO(&args, grpc_channel_args, 1);
218
219 grpc_ruby_once_init();
220 grpc_ruby_fork_guard();
221 rb_thread_call_without_gvl(
222 wait_until_channel_polling_thread_started_no_gil,
223 &stop_waiting_for_thread_start,
224 wait_until_channel_polling_thread_started_unblocking_func,
225 &stop_waiting_for_thread_start);
226
227 /* "3" == 3 mandatory args */
228 rb_scan_args(argc, argv, "3", &target, &channel_args, &credentials);
229
230 TypedData_Get_Struct(self, grpc_rb_channel, &grpc_channel_data_type, wrapper);
231 target_chars = StringValueCStr(target);
232 grpc_rb_hash_convert_to_channel_args(channel_args, &args);
233 if (TYPE(credentials) == T_SYMBOL) {
234 if (id_insecure_channel != SYM2ID(credentials)) {
235 rb_raise(rb_eTypeError,
236 "bad creds symbol, want :this_channel_is_insecure");
237 return Qnil;
238 }
239 ch = grpc_insecure_channel_create(target_chars, &args, NULL);
240 } else {
241 wrapper->credentials = credentials;
242 creds = grpc_rb_get_wrapped_channel_credentials(credentials);
243 ch = grpc_secure_channel_create(creds, target_chars, &args, NULL);
244 }
245
246 GPR_ASSERT(ch);
247 stack.channel = ch;
248 stack.wrapper = wrapper;
249 rb_thread_call_without_gvl(
250 channel_init_try_register_connection_polling_without_gil, &stack, NULL,
251 NULL);
252
253 if (args.args != NULL) {
254 xfree(args.args); /* Allocated by grpc_rb_hash_convert_to_channel_args */
255 }
256 if (ch == NULL) {
257 rb_raise(rb_eRuntimeError, "could not create an rpc channel to target:%s",
258 target_chars);
259 return Qnil;
260 }
261 rb_ivar_set(self, id_target, target);
262 return self;
263 }
264
265 typedef struct get_state_stack {
266 bg_watched_channel* bg;
267 int try_to_connect;
268 int out;
269 } get_state_stack;
270
get_state_without_gil(void * arg)271 static void* get_state_without_gil(void* arg) {
272 get_state_stack* stack = (get_state_stack*)arg;
273
274 gpr_mu_lock(&global_connection_polling_mu);
275 GPR_ASSERT(abort_channel_polling || channel_polling_thread_started);
276 if (stack->bg->channel_destroyed) {
277 stack->out = GRPC_CHANNEL_SHUTDOWN;
278 } else {
279 stack->out = grpc_channel_check_connectivity_state(stack->bg->channel,
280 stack->try_to_connect);
281 }
282 gpr_mu_unlock(&global_connection_polling_mu);
283
284 return NULL;
285 }
286
287 /*
288 call-seq:
289 ch.connectivity_state -> state
290 ch.connectivity_state(true) -> state
291
292 Indicates the current state of the channel, whose value is one of the
293 constants defined in GRPC::Core::ConnectivityStates.
294
295 It also tries to connect if the chennel is idle in the second form. */
grpc_rb_channel_get_connectivity_state(int argc,VALUE * argv,VALUE self)296 static VALUE grpc_rb_channel_get_connectivity_state(int argc, VALUE* argv,
297 VALUE self) {
298 VALUE try_to_connect_param = Qfalse;
299 grpc_rb_channel* wrapper = NULL;
300 get_state_stack stack;
301
302 /* "01" == 0 mandatory args, 1 (try_to_connect) is optional */
303 rb_scan_args(argc, argv, "01", &try_to_connect_param);
304
305 TypedData_Get_Struct(self, grpc_rb_channel, &grpc_channel_data_type, wrapper);
306 if (wrapper->bg_wrapped == NULL) {
307 rb_raise(rb_eRuntimeError, "closed!");
308 return Qnil;
309 }
310
311 stack.bg = wrapper->bg_wrapped;
312 stack.try_to_connect = RTEST(try_to_connect_param) ? 1 : 0;
313 rb_thread_call_without_gvl(get_state_without_gil, &stack, NULL, NULL);
314
315 return LONG2NUM(stack.out);
316 }
317
318 typedef struct watch_state_stack {
319 bg_watched_channel* bg_wrapped;
320 gpr_timespec deadline;
321 int last_state;
322 } watch_state_stack;
323
wait_for_watch_state_op_complete_without_gvl(void * arg)324 static void* wait_for_watch_state_op_complete_without_gvl(void* arg) {
325 watch_state_stack* stack = (watch_state_stack*)arg;
326 watch_state_op* op = NULL;
327 void* success = (void*)0;
328
329 gpr_mu_lock(&global_connection_polling_mu);
330 // its unsafe to do a "watch" after "channel polling abort" because the cq has
331 // been shut down.
332 if (abort_channel_polling || stack->bg_wrapped->channel_destroyed) {
333 gpr_mu_unlock(&global_connection_polling_mu);
334 return (void*)0;
335 }
336 op = gpr_zalloc(sizeof(watch_state_op));
337 op->op_type = WATCH_STATE_API;
338 grpc_channel_watch_connectivity_state(stack->bg_wrapped->channel,
339 stack->last_state, stack->deadline,
340 channel_polling_cq, op);
341
342 while (!op->op.api_callback_args.called_back) {
343 gpr_cv_wait(&global_connection_polling_cv, &global_connection_polling_mu,
344 gpr_inf_future(GPR_CLOCK_REALTIME));
345 }
346 if (op->op.api_callback_args.success) {
347 success = (void*)1;
348 }
349 gpr_free(op);
350 gpr_mu_unlock(&global_connection_polling_mu);
351
352 return success;
353 }
wait_for_watch_state_op_complete_unblocking_func(void * arg)354 static void wait_for_watch_state_op_complete_unblocking_func(void* arg) {
355 bg_watched_channel* bg = (bg_watched_channel*)arg;
356 gpr_mu_lock(&global_connection_polling_mu);
357 if (!bg->channel_destroyed) {
358 grpc_channel_destroy(bg->channel);
359 bg->channel_destroyed = 1;
360 }
361 gpr_mu_unlock(&global_connection_polling_mu);
362 }
363
364 /* Wait until the channel's connectivity state becomes different from
365 * "last_state", or "deadline" expires.
366 * Returns true if the channel's connectivity state becomes different
367 * from "last_state" within "deadline".
368 * Returns false if "deadline" expires before the channel's connectivity
369 * state changes from "last_state".
370 * */
grpc_rb_channel_watch_connectivity_state(VALUE self,VALUE last_state,VALUE deadline)371 static VALUE grpc_rb_channel_watch_connectivity_state(VALUE self,
372 VALUE last_state,
373 VALUE deadline) {
374 grpc_rb_channel* wrapper = NULL;
375 watch_state_stack stack;
376 void* op_success = 0;
377
378 grpc_ruby_fork_guard();
379 TypedData_Get_Struct(self, grpc_rb_channel, &grpc_channel_data_type, wrapper);
380
381 if (wrapper->bg_wrapped == NULL) {
382 rb_raise(rb_eRuntimeError, "closed!");
383 return Qnil;
384 }
385
386 if (!FIXNUM_P(last_state)) {
387 rb_raise(
388 rb_eTypeError,
389 "bad type for last_state. want a GRPC::Core::ChannelState constant");
390 return Qnil;
391 }
392
393 stack.bg_wrapped = wrapper->bg_wrapped;
394 stack.deadline = grpc_rb_time_timeval(deadline, 0),
395 stack.last_state = NUM2LONG(last_state);
396
397 op_success = rb_thread_call_without_gvl(
398 wait_for_watch_state_op_complete_without_gvl, &stack,
399 wait_for_watch_state_op_complete_unblocking_func, wrapper->bg_wrapped);
400
401 return op_success ? Qtrue : Qfalse;
402 }
403
404 /* Create a call given a grpc_channel, in order to call method. The request
405 is not sent until grpc_call_invoke is called. */
grpc_rb_channel_create_call(VALUE self,VALUE parent,VALUE mask,VALUE method,VALUE host,VALUE deadline)406 static VALUE grpc_rb_channel_create_call(VALUE self, VALUE parent, VALUE mask,
407 VALUE method, VALUE host,
408 VALUE deadline) {
409 VALUE res = Qnil;
410 grpc_rb_channel* wrapper = NULL;
411 grpc_call* call = NULL;
412 grpc_call* parent_call = NULL;
413 grpc_completion_queue* cq = NULL;
414 int flags = GRPC_PROPAGATE_DEFAULTS;
415 grpc_slice method_slice;
416 grpc_slice host_slice;
417 grpc_slice* host_slice_ptr = NULL;
418 char* tmp_str = NULL;
419
420 grpc_ruby_fork_guard();
421 if (host != Qnil) {
422 host_slice =
423 grpc_slice_from_copied_buffer(RSTRING_PTR(host), RSTRING_LEN(host));
424 host_slice_ptr = &host_slice;
425 }
426 if (mask != Qnil) {
427 flags = NUM2UINT(mask);
428 }
429 if (parent != Qnil) {
430 parent_call = grpc_rb_get_wrapped_call(parent);
431 }
432
433 TypedData_Get_Struct(self, grpc_rb_channel, &grpc_channel_data_type, wrapper);
434 if (wrapper->bg_wrapped == NULL) {
435 rb_raise(rb_eRuntimeError, "closed!");
436 return Qnil;
437 }
438
439 cq = grpc_completion_queue_create_for_pluck(NULL);
440 method_slice =
441 grpc_slice_from_copied_buffer(RSTRING_PTR(method), RSTRING_LEN(method));
442 call = grpc_channel_create_call(wrapper->bg_wrapped->channel, parent_call,
443 flags, cq, method_slice, host_slice_ptr,
444 grpc_rb_time_timeval(deadline,
445 /* absolute time */ 0),
446 NULL);
447
448 if (call == NULL) {
449 tmp_str = grpc_slice_to_c_string(method_slice);
450 rb_raise(rb_eRuntimeError, "cannot create call with method %s", tmp_str);
451 return Qnil;
452 }
453
454 grpc_slice_unref(method_slice);
455 if (host_slice_ptr != NULL) {
456 grpc_slice_unref(host_slice);
457 }
458
459 res = grpc_rb_wrap_call(call, cq);
460
461 /* Make this channel an instance attribute of the call so that it is not GCed
462 * before the call. */
463 rb_ivar_set(res, id_channel, self);
464 return res;
465 }
466
467 /* Closes the channel, calling it's destroy method */
468 /* Note this is an API-level call; a wrapped channel's finalizer doesn't call
469 * this */
grpc_rb_channel_destroy(VALUE self)470 static VALUE grpc_rb_channel_destroy(VALUE self) {
471 grpc_rb_channel* wrapper = NULL;
472
473 TypedData_Get_Struct(self, grpc_rb_channel, &grpc_channel_data_type, wrapper);
474 if (wrapper->bg_wrapped != NULL) {
475 rb_thread_call_without_gvl(channel_safe_destroy_without_gil,
476 wrapper->bg_wrapped, NULL, NULL);
477 wrapper->bg_wrapped = NULL;
478 }
479
480 return Qnil;
481 }
482
483 /* Called to obtain the target that this channel accesses. */
grpc_rb_channel_get_target(VALUE self)484 static VALUE grpc_rb_channel_get_target(VALUE self) {
485 grpc_rb_channel* wrapper = NULL;
486 VALUE res = Qnil;
487 char* target = NULL;
488
489 TypedData_Get_Struct(self, grpc_rb_channel, &grpc_channel_data_type, wrapper);
490 target = grpc_channel_get_target(wrapper->bg_wrapped->channel);
491 res = rb_str_new2(target);
492 gpr_free(target);
493
494 return res;
495 }
496
497 /* Needs to be called under global_connection_polling_mu */
bg_watched_channel_list_lookup(bg_watched_channel * target)498 static int bg_watched_channel_list_lookup(bg_watched_channel* target) {
499 bg_watched_channel* cur = bg_watched_channel_list_head;
500
501 while (cur != NULL) {
502 if (cur == target) {
503 return 1;
504 }
505 cur = cur->next;
506 }
507
508 return 0;
509 }
510
511 /* Needs to be called under global_connection_polling_mu */
bg_watched_channel_list_create_and_add(grpc_channel * channel)512 static bg_watched_channel* bg_watched_channel_list_create_and_add(
513 grpc_channel* channel) {
514 bg_watched_channel* watched = gpr_zalloc(sizeof(bg_watched_channel));
515
516 watched->channel = channel;
517 watched->next = bg_watched_channel_list_head;
518 watched->refcount = 1;
519 bg_watched_channel_list_head = watched;
520 return watched;
521 }
522
523 /* Needs to be called under global_connection_polling_mu */
bg_watched_channel_list_free_and_remove(bg_watched_channel * target)524 static void bg_watched_channel_list_free_and_remove(
525 bg_watched_channel* target) {
526 bg_watched_channel* bg = NULL;
527
528 GPR_ASSERT(bg_watched_channel_list_lookup(target));
529 GPR_ASSERT(target->channel_destroyed && target->refcount == 0);
530 if (bg_watched_channel_list_head == target) {
531 bg_watched_channel_list_head = target->next;
532 gpr_free(target);
533 return;
534 }
535 bg = bg_watched_channel_list_head;
536 while (bg != NULL && bg->next != NULL) {
537 if (bg->next == target) {
538 bg->next = bg->next->next;
539 gpr_free(target);
540 return;
541 }
542 bg = bg->next;
543 }
544 GPR_ASSERT(0);
545 }
546
547 /* Initialize a grpc_rb_channel's "protected grpc_channel" and try to push
548 * it onto the background thread for constant watches. */
channel_init_try_register_connection_polling_without_gil(void * arg)549 static void* channel_init_try_register_connection_polling_without_gil(
550 void* arg) {
551 channel_init_try_register_stack* stack =
552 (channel_init_try_register_stack*)arg;
553
554 gpr_mu_lock(&global_connection_polling_mu);
555 stack->wrapper->bg_wrapped =
556 bg_watched_channel_list_create_and_add(stack->channel);
557 grpc_rb_channel_try_register_connection_polling(stack->wrapper->bg_wrapped);
558 gpr_mu_unlock(&global_connection_polling_mu);
559 return NULL;
560 }
561
562 // Needs to be called under global_connection_poolling_mu
grpc_rb_channel_try_register_connection_polling(bg_watched_channel * bg)563 static void grpc_rb_channel_try_register_connection_polling(
564 bg_watched_channel* bg) {
565 grpc_connectivity_state conn_state;
566 watch_state_op* op = NULL;
567
568 GPR_ASSERT(channel_polling_thread_started || abort_channel_polling);
569
570 if (bg->refcount == 0) {
571 GPR_ASSERT(bg->channel_destroyed);
572 bg_watched_channel_list_free_and_remove(bg);
573 return;
574 }
575 GPR_ASSERT(bg->refcount == 1);
576 if (bg->channel_destroyed || abort_channel_polling) {
577 return;
578 }
579
580 conn_state = grpc_channel_check_connectivity_state(bg->channel, 0);
581 if (conn_state == GRPC_CHANNEL_SHUTDOWN) {
582 return;
583 }
584 GPR_ASSERT(bg_watched_channel_list_lookup(bg));
585 // prevent bg from being free'd by GC while background thread is watching it
586 bg->refcount++;
587
588 op = gpr_zalloc(sizeof(watch_state_op));
589 op->op_type = CONTINUOUS_WATCH;
590 op->op.continuous_watch_callback_args.bg = bg;
591 grpc_channel_watch_connectivity_state(bg->channel, conn_state,
592 gpr_inf_future(GPR_CLOCK_REALTIME),
593 channel_polling_cq, op);
594 }
595
596 // Note this loop breaks out with a single call of
597 // "run_poll_channels_loop_no_gil".
598 // This assumes that a ruby call the unblocking func
599 // indicates process shutdown.
600 // In the worst case, this stops polling channel connectivity
601 // early and falls back to current behavior.
run_poll_channels_loop_no_gil(void * arg)602 static void* run_poll_channels_loop_no_gil(void* arg) {
603 grpc_event event;
604 watch_state_op* op = NULL;
605 bg_watched_channel* bg = NULL;
606 (void)arg;
607 gpr_log(GPR_DEBUG, "GRPC_RUBY: run_poll_channels_loop_no_gil - begin");
608
609 gpr_mu_lock(&global_connection_polling_mu);
610 GPR_ASSERT(!channel_polling_thread_started);
611 channel_polling_thread_started = 1;
612 gpr_cv_broadcast(&global_connection_polling_cv);
613 gpr_mu_unlock(&global_connection_polling_mu);
614
615 for (;;) {
616 event = grpc_completion_queue_next(
617 channel_polling_cq, gpr_inf_future(GPR_CLOCK_REALTIME), NULL);
618 if (event.type == GRPC_QUEUE_SHUTDOWN) {
619 break;
620 }
621 gpr_mu_lock(&global_connection_polling_mu);
622 if (event.type == GRPC_OP_COMPLETE) {
623 op = (watch_state_op*)event.tag;
624 if (op->op_type == CONTINUOUS_WATCH) {
625 bg = (bg_watched_channel*)op->op.continuous_watch_callback_args.bg;
626 bg->refcount--;
627 grpc_rb_channel_try_register_connection_polling(bg);
628 gpr_free(op);
629 } else if (op->op_type == WATCH_STATE_API) {
630 grpc_rb_channel_watch_connection_state_op_complete(
631 (watch_state_op*)event.tag, event.success);
632 } else {
633 GPR_ASSERT(0);
634 }
635 }
636 gpr_mu_unlock(&global_connection_polling_mu);
637 }
638 grpc_completion_queue_destroy(channel_polling_cq);
639 gpr_log(GPR_DEBUG,
640 "GRPC_RUBY: run_poll_channels_loop_no_gil - exit connection polling "
641 "loop");
642 return NULL;
643 }
644
645 // Notify the channel polling loop to cleanup and shutdown.
run_poll_channels_loop_unblocking_func(void * arg)646 static void run_poll_channels_loop_unblocking_func(void* arg) {
647 bg_watched_channel* bg = NULL;
648 (void)arg;
649
650 gpr_mu_lock(&global_connection_polling_mu);
651 gpr_log(GPR_DEBUG,
652 "GRPC_RUBY: run_poll_channels_loop_unblocking_func - begin aborting "
653 "connection polling");
654 // early out after first time through
655 if (abort_channel_polling) {
656 gpr_mu_unlock(&global_connection_polling_mu);
657 return;
658 }
659 abort_channel_polling = 1;
660
661 // force pending watches to end by switching to shutdown state
662 bg = bg_watched_channel_list_head;
663 while (bg != NULL) {
664 if (!bg->channel_destroyed) {
665 grpc_channel_destroy(bg->channel);
666 bg->channel_destroyed = 1;
667 }
668 bg = bg->next;
669 }
670
671 grpc_completion_queue_shutdown(channel_polling_cq);
672 gpr_cv_broadcast(&global_connection_polling_cv);
673 gpr_mu_unlock(&global_connection_polling_mu);
674 gpr_log(GPR_DEBUG,
675 "GRPC_RUBY: run_poll_channels_loop_unblocking_func - end aborting "
676 "connection polling");
677 }
678
679 // Poll channel connectivity states in background thread without the GIL.
run_poll_channels_loop(VALUE arg)680 static VALUE run_poll_channels_loop(VALUE arg) {
681 (void)arg;
682 gpr_log(
683 GPR_DEBUG,
684 "GRPC_RUBY: run_poll_channels_loop - create connection polling thread");
685 rb_thread_call_without_gvl(run_poll_channels_loop_no_gil, NULL,
686 run_poll_channels_loop_unblocking_func, NULL);
687
688 return Qnil;
689 }
690
wait_until_channel_polling_thread_started_no_gil(void * arg)691 static void* wait_until_channel_polling_thread_started_no_gil(void* arg) {
692 int* stop_waiting = (int*)arg;
693 gpr_log(GPR_DEBUG, "GRPC_RUBY: wait for channel polling thread to start");
694 gpr_mu_lock(&global_connection_polling_mu);
695 while (!channel_polling_thread_started && !abort_channel_polling &&
696 !*stop_waiting) {
697 gpr_cv_wait(&global_connection_polling_cv, &global_connection_polling_mu,
698 gpr_inf_future(GPR_CLOCK_REALTIME));
699 }
700 gpr_mu_unlock(&global_connection_polling_mu);
701
702 return NULL;
703 }
704
wait_until_channel_polling_thread_started_unblocking_func(void * arg)705 static void wait_until_channel_polling_thread_started_unblocking_func(
706 void* arg) {
707 int* stop_waiting = (int*)arg;
708 gpr_mu_lock(&global_connection_polling_mu);
709 gpr_log(GPR_DEBUG,
710 "GRPC_RUBY: interrupt wait for channel polling thread to start");
711 *stop_waiting = 1;
712 gpr_cv_broadcast(&global_connection_polling_cv);
713 gpr_mu_unlock(&global_connection_polling_mu);
714 }
715
set_abort_channel_polling_without_gil(void * arg)716 static void* set_abort_channel_polling_without_gil(void* arg) {
717 (void)arg;
718 gpr_mu_lock(&global_connection_polling_mu);
719 abort_channel_polling = 1;
720 gpr_cv_broadcast(&global_connection_polling_cv);
721 gpr_mu_unlock(&global_connection_polling_mu);
722 return NULL;
723 }
724
725 /* Temporary fix for
726 * https://github.com/GoogleCloudPlatform/google-cloud-ruby/issues/899.
727 * Transports in idle channels can get destroyed. Normally c-core re-connects,
728 * but in grpc-ruby core never gets a thread until an RPC is made, because ruby
729 * only calls c-core's "completion_queu_pluck" API.
730 * This uses a global background thread that calls
731 * "completion_queue_next" on registered "watch_channel_connectivity_state"
732 * calls - so that c-core can reconnect if needed, when there aren't any RPC's.
733 * TODO(apolcyn) remove this when core handles new RPCs on dead connections.
734 */
grpc_rb_channel_polling_thread_start()735 void grpc_rb_channel_polling_thread_start() {
736 VALUE background_thread = Qnil;
737
738 GPR_ASSERT(!abort_channel_polling);
739 GPR_ASSERT(!channel_polling_thread_started);
740 GPR_ASSERT(channel_polling_cq == NULL);
741
742 gpr_mu_init(&global_connection_polling_mu);
743 gpr_cv_init(&global_connection_polling_cv);
744
745 channel_polling_cq = grpc_completion_queue_create_for_next(NULL);
746 background_thread = rb_thread_create(run_poll_channels_loop, NULL);
747
748 if (!RTEST(background_thread)) {
749 gpr_log(GPR_DEBUG, "GRPC_RUBY: failed to spawn channel polling thread");
750 rb_thread_call_without_gvl(set_abort_channel_polling_without_gil, NULL,
751 NULL, NULL);
752 }
753 }
754
Init_grpc_propagate_masks()755 static void Init_grpc_propagate_masks() {
756 /* Constants representing call propagation masks in grpc.h */
757 VALUE grpc_rb_mPropagateMasks =
758 rb_define_module_under(grpc_rb_mGrpcCore, "PropagateMasks");
759 rb_define_const(grpc_rb_mPropagateMasks, "DEADLINE",
760 UINT2NUM(GRPC_PROPAGATE_DEADLINE));
761 rb_define_const(grpc_rb_mPropagateMasks, "CENSUS_STATS_CONTEXT",
762 UINT2NUM(GRPC_PROPAGATE_CENSUS_STATS_CONTEXT));
763 rb_define_const(grpc_rb_mPropagateMasks, "CENSUS_TRACING_CONTEXT",
764 UINT2NUM(GRPC_PROPAGATE_CENSUS_TRACING_CONTEXT));
765 rb_define_const(grpc_rb_mPropagateMasks, "CANCELLATION",
766 UINT2NUM(GRPC_PROPAGATE_CANCELLATION));
767 rb_define_const(grpc_rb_mPropagateMasks, "DEFAULTS",
768 UINT2NUM(GRPC_PROPAGATE_DEFAULTS));
769 }
770
Init_grpc_connectivity_states()771 static void Init_grpc_connectivity_states() {
772 /* Constants representing call propagation masks in grpc.h */
773 VALUE grpc_rb_mConnectivityStates =
774 rb_define_module_under(grpc_rb_mGrpcCore, "ConnectivityStates");
775 rb_define_const(grpc_rb_mConnectivityStates, "IDLE",
776 LONG2NUM(GRPC_CHANNEL_IDLE));
777 rb_define_const(grpc_rb_mConnectivityStates, "CONNECTING",
778 LONG2NUM(GRPC_CHANNEL_CONNECTING));
779 rb_define_const(grpc_rb_mConnectivityStates, "READY",
780 LONG2NUM(GRPC_CHANNEL_READY));
781 rb_define_const(grpc_rb_mConnectivityStates, "TRANSIENT_FAILURE",
782 LONG2NUM(GRPC_CHANNEL_TRANSIENT_FAILURE));
783 rb_define_const(grpc_rb_mConnectivityStates, "FATAL_FAILURE",
784 LONG2NUM(GRPC_CHANNEL_SHUTDOWN));
785 }
786
Init_grpc_channel()787 void Init_grpc_channel() {
788 grpc_rb_cChannelArgs = rb_define_class("TmpChannelArgs", rb_cObject);
789 grpc_rb_cChannel =
790 rb_define_class_under(grpc_rb_mGrpcCore, "Channel", rb_cObject);
791
792 /* Allocates an object managed by the ruby runtime */
793 rb_define_alloc_func(grpc_rb_cChannel, grpc_rb_channel_alloc);
794
795 /* Provides a ruby constructor and support for dup/clone. */
796 rb_define_method(grpc_rb_cChannel, "initialize", grpc_rb_channel_init, -1);
797 rb_define_method(grpc_rb_cChannel, "initialize_copy",
798 grpc_rb_cannot_init_copy, 1);
799
800 /* Add ruby analogues of the Channel methods. */
801 rb_define_method(grpc_rb_cChannel, "connectivity_state",
802 grpc_rb_channel_get_connectivity_state, -1);
803 rb_define_method(grpc_rb_cChannel, "watch_connectivity_state",
804 grpc_rb_channel_watch_connectivity_state, 2);
805 rb_define_method(grpc_rb_cChannel, "create_call", grpc_rb_channel_create_call,
806 5);
807 rb_define_method(grpc_rb_cChannel, "target", grpc_rb_channel_get_target, 0);
808 rb_define_method(grpc_rb_cChannel, "destroy", grpc_rb_channel_destroy, 0);
809 rb_define_alias(grpc_rb_cChannel, "close", "destroy");
810
811 id_channel = rb_intern("__channel");
812 id_target = rb_intern("__target");
813 rb_define_const(grpc_rb_cChannel, "SSL_TARGET",
814 ID2SYM(rb_intern(GRPC_SSL_TARGET_NAME_OVERRIDE_ARG)));
815 rb_define_const(grpc_rb_cChannel, "ENABLE_CENSUS",
816 ID2SYM(rb_intern(GRPC_ARG_ENABLE_CENSUS)));
817 rb_define_const(grpc_rb_cChannel, "MAX_CONCURRENT_STREAMS",
818 ID2SYM(rb_intern(GRPC_ARG_MAX_CONCURRENT_STREAMS)));
819 rb_define_const(grpc_rb_cChannel, "MAX_MESSAGE_LENGTH",
820 ID2SYM(rb_intern(GRPC_ARG_MAX_RECEIVE_MESSAGE_LENGTH)));
821 id_insecure_channel = rb_intern("this_channel_is_insecure");
822 Init_grpc_propagate_masks();
823 Init_grpc_connectivity_states();
824 }
825
826 /* Gets the wrapped channel from the ruby wrapper */
grpc_rb_get_wrapped_channel(VALUE v)827 grpc_channel* grpc_rb_get_wrapped_channel(VALUE v) {
828 grpc_rb_channel* wrapper = NULL;
829 TypedData_Get_Struct(v, grpc_rb_channel, &grpc_channel_data_type, wrapper);
830 return wrapper->bg_wrapped->channel;
831 }
832