• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *
3  * Copyright 2015 gRPC authors.
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  *     http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  *
17  */
18 
19 #include <ruby/ruby.h>
20 #include <ruby/thread.h>
21 
22 #include "rb_byte_buffer.h"
23 #include "rb_channel.h"
24 #include "rb_grpc_imports.generated.h"
25 
26 #include <grpc/grpc.h>
27 #include <grpc/grpc_security.h>
28 #include <grpc/support/alloc.h>
29 #include <grpc/support/log.h>
30 #include <grpc/support/time.h>
31 #include "rb_call.h"
32 #include "rb_channel_args.h"
33 #include "rb_channel_credentials.h"
34 #include "rb_completion_queue.h"
35 #include "rb_grpc.h"
36 #include "rb_server.h"
37 
38 /* id_channel is the name of the hidden ivar that preserves a reference to the
39  * channel on a call, so that calls are not GCed before their channel.  */
40 static ID id_channel;
41 
42 /* id_target is the name of the hidden ivar that preserves a reference to the
43  * target string used to create the call, preserved so that it does not get
44  * GCed before the channel */
45 static ID id_target;
46 
47 /* id_insecure_channel is used to indicate that a channel is insecure */
48 static VALUE id_insecure_channel;
49 
50 /* grpc_rb_cChannel is the ruby class that proxies grpc_channel. */
51 static VALUE grpc_rb_cChannel = Qnil;
52 
53 /* Used during the conversion of a hash to channel args during channel setup */
54 static VALUE grpc_rb_cChannelArgs;
55 
56 typedef struct bg_watched_channel {
57   grpc_channel* channel;
58   // these fields must only be accessed under global_connection_polling_mu
59   struct bg_watched_channel* next;
60   int channel_destroyed;
61   int refcount;
62 } bg_watched_channel;
63 
64 /* grpc_rb_channel wraps a grpc_channel. */
65 typedef struct grpc_rb_channel {
66   VALUE credentials;
67 
68   /* The actual channel (protected in a wrapper to tell when it's safe to
69    * destroy) */
70   bg_watched_channel* bg_wrapped;
71 } grpc_rb_channel;
72 
73 typedef enum { CONTINUOUS_WATCH, WATCH_STATE_API } watch_state_op_type;
74 
75 typedef struct watch_state_op {
76   watch_state_op_type op_type;
77   // from event.success
78   union {
79     struct {
80       int success;
81       // has been called back due to a cq next call
82       int called_back;
83     } api_callback_args;
84     struct {
85       bg_watched_channel* bg;
86     } continuous_watch_callback_args;
87   } op;
88 } watch_state_op;
89 
90 static bg_watched_channel* bg_watched_channel_list_head = NULL;
91 
92 static void grpc_rb_channel_try_register_connection_polling(
93     bg_watched_channel* bg);
94 static void* wait_until_channel_polling_thread_started_no_gil(void*);
95 static void wait_until_channel_polling_thread_started_unblocking_func(void*);
96 static void* channel_init_try_register_connection_polling_without_gil(
97     void* arg);
98 
99 typedef struct channel_init_try_register_stack {
100   grpc_channel* channel;
101   grpc_rb_channel* wrapper;
102 } channel_init_try_register_stack;
103 
104 static grpc_completion_queue* channel_polling_cq;
105 static gpr_mu global_connection_polling_mu;
106 static gpr_cv global_connection_polling_cv;
107 static int abort_channel_polling = 0;
108 static int channel_polling_thread_started = 0;
109 
110 static int bg_watched_channel_list_lookup(bg_watched_channel* bg);
111 static bg_watched_channel* bg_watched_channel_list_create_and_add(
112     grpc_channel* channel);
113 static void bg_watched_channel_list_free_and_remove(bg_watched_channel* bg);
114 static void run_poll_channels_loop_unblocking_func(void* arg);
115 
116 // Needs to be called under global_connection_polling_mu
grpc_rb_channel_watch_connection_state_op_complete(watch_state_op * op,int success)117 static void grpc_rb_channel_watch_connection_state_op_complete(
118     watch_state_op* op, int success) {
119   GPR_ASSERT(!op->op.api_callback_args.called_back);
120   op->op.api_callback_args.called_back = 1;
121   op->op.api_callback_args.success = success;
122   // wake up the watch API call that's waiting on this op
123   gpr_cv_broadcast(&global_connection_polling_cv);
124 }
125 
126 /* Avoids destroying a channel twice. */
grpc_rb_channel_safe_destroy(bg_watched_channel * bg)127 static void grpc_rb_channel_safe_destroy(bg_watched_channel* bg) {
128   gpr_mu_lock(&global_connection_polling_mu);
129   GPR_ASSERT(bg_watched_channel_list_lookup(bg));
130   if (!bg->channel_destroyed) {
131     grpc_channel_destroy(bg->channel);
132     bg->channel_destroyed = 1;
133   }
134   bg->refcount--;
135   if (bg->refcount == 0) {
136     bg_watched_channel_list_free_and_remove(bg);
137   }
138   gpr_mu_unlock(&global_connection_polling_mu);
139 }
140 
channel_safe_destroy_without_gil(void * arg)141 static void* channel_safe_destroy_without_gil(void* arg) {
142   grpc_rb_channel_safe_destroy((bg_watched_channel*)arg);
143   return NULL;
144 }
145 
grpc_rb_channel_free_internal(void * p)146 static void grpc_rb_channel_free_internal(void* p) {
147   grpc_rb_channel* ch = NULL;
148   if (p == NULL) {
149     return;
150   };
151   ch = (grpc_rb_channel*)p;
152   if (ch->bg_wrapped != NULL) {
153     /* assumption made here: it's ok to directly gpr_mu_lock the global
154      * connection polling mutex because we're in a finalizer,
155      * and we can count on this thread to not be interrupted or
156      * yield the gil. */
157     grpc_rb_channel_safe_destroy(ch->bg_wrapped);
158     ch->bg_wrapped = NULL;
159   }
160   xfree(p);
161 }
162 
163 /* Destroys Channel instances. */
grpc_rb_channel_free(void * p)164 static void grpc_rb_channel_free(void* p) {
165   grpc_rb_channel_free_internal(p);
166   grpc_ruby_shutdown();
167 }
168 
169 /* Protects the mark object from GC */
grpc_rb_channel_mark(void * p)170 static void grpc_rb_channel_mark(void* p) {
171   grpc_rb_channel* channel = NULL;
172   if (p == NULL) {
173     return;
174   }
175   channel = (grpc_rb_channel*)p;
176   if (channel->credentials != Qnil) {
177     rb_gc_mark(channel->credentials);
178   }
179 }
180 
181 static rb_data_type_t grpc_channel_data_type = {"grpc_channel",
182                                                 {grpc_rb_channel_mark,
183                                                  grpc_rb_channel_free,
184                                                  GRPC_RB_MEMSIZE_UNAVAILABLE,
185                                                  {NULL, NULL}},
186                                                 NULL,
187                                                 NULL,
188 #ifdef RUBY_TYPED_FREE_IMMEDIATELY
189                                                 RUBY_TYPED_FREE_IMMEDIATELY
190 #endif
191 };
192 
193 /* Allocates grpc_rb_channel instances. */
grpc_rb_channel_alloc(VALUE cls)194 static VALUE grpc_rb_channel_alloc(VALUE cls) {
195   grpc_ruby_init();
196   grpc_rb_channel* wrapper = ALLOC(grpc_rb_channel);
197   wrapper->bg_wrapped = NULL;
198   wrapper->credentials = Qnil;
199   return TypedData_Wrap_Struct(cls, &grpc_channel_data_type, wrapper);
200 }
201 
202 /*
203   call-seq:
204     insecure_channel = Channel:new("myhost:8080", {'arg1': 'value1'},
205                                    :this_channel_is_insecure)
206     creds = ...
207     secure_channel = Channel:new("myhost:443", {'arg1': 'value1'}, creds)
208 
209   Creates channel instances. */
grpc_rb_channel_init(int argc,VALUE * argv,VALUE self)210 static VALUE grpc_rb_channel_init(int argc, VALUE* argv, VALUE self) {
211   VALUE channel_args = Qnil;
212   VALUE credentials = Qnil;
213   VALUE target = Qnil;
214   grpc_rb_channel* wrapper = NULL;
215   grpc_channel* ch = NULL;
216   grpc_channel_credentials* creds = NULL;
217   char* target_chars = NULL;
218   grpc_channel_args args;
219   channel_init_try_register_stack stack;
220   int stop_waiting_for_thread_start = 0;
221   MEMZERO(&args, grpc_channel_args, 1);
222 
223   grpc_ruby_fork_guard();
224   rb_thread_call_without_gvl(
225       wait_until_channel_polling_thread_started_no_gil,
226       &stop_waiting_for_thread_start,
227       wait_until_channel_polling_thread_started_unblocking_func,
228       &stop_waiting_for_thread_start);
229 
230   /* "3" == 3 mandatory args */
231   rb_scan_args(argc, argv, "3", &target, &channel_args, &credentials);
232 
233   TypedData_Get_Struct(self, grpc_rb_channel, &grpc_channel_data_type, wrapper);
234   target_chars = StringValueCStr(target);
235   grpc_rb_hash_convert_to_channel_args(channel_args, &args);
236   if (TYPE(credentials) == T_SYMBOL) {
237     if (id_insecure_channel != SYM2ID(credentials)) {
238       rb_raise(rb_eTypeError,
239                "bad creds symbol, want :this_channel_is_insecure");
240       return Qnil;
241     }
242     ch = grpc_insecure_channel_create(target_chars, &args, NULL);
243   } else {
244     wrapper->credentials = credentials;
245     creds = grpc_rb_get_wrapped_channel_credentials(credentials);
246     ch = grpc_secure_channel_create(creds, target_chars, &args, NULL);
247   }
248 
249   GPR_ASSERT(ch);
250   stack.channel = ch;
251   stack.wrapper = wrapper;
252   rb_thread_call_without_gvl(
253       channel_init_try_register_connection_polling_without_gil, &stack, NULL,
254       NULL);
255 
256   if (args.args != NULL) {
257     xfree(args.args); /* Allocated by grpc_rb_hash_convert_to_channel_args */
258   }
259   if (ch == NULL) {
260     rb_raise(rb_eRuntimeError, "could not create an rpc channel to target:%s",
261              target_chars);
262     return Qnil;
263   }
264   rb_ivar_set(self, id_target, target);
265   return self;
266 }
267 
268 typedef struct get_state_stack {
269   bg_watched_channel* bg;
270   int try_to_connect;
271   int out;
272 } get_state_stack;
273 
get_state_without_gil(void * arg)274 static void* get_state_without_gil(void* arg) {
275   get_state_stack* stack = (get_state_stack*)arg;
276 
277   gpr_mu_lock(&global_connection_polling_mu);
278   GPR_ASSERT(abort_channel_polling || channel_polling_thread_started);
279   if (stack->bg->channel_destroyed) {
280     stack->out = GRPC_CHANNEL_SHUTDOWN;
281   } else {
282     stack->out = grpc_channel_check_connectivity_state(stack->bg->channel,
283                                                        stack->try_to_connect);
284   }
285   gpr_mu_unlock(&global_connection_polling_mu);
286 
287   return NULL;
288 }
289 
290 /*
291   call-seq:
292     ch.connectivity_state       -> state
293     ch.connectivity_state(true) -> state
294 
295   Indicates the current state of the channel, whose value is one of the
296   constants defined in GRPC::Core::ConnectivityStates.
297 
298   It also tries to connect if the channel is idle in the second form. */
grpc_rb_channel_get_connectivity_state(int argc,VALUE * argv,VALUE self)299 static VALUE grpc_rb_channel_get_connectivity_state(int argc, VALUE* argv,
300                                                     VALUE self) {
301   VALUE try_to_connect_param = Qfalse;
302   grpc_rb_channel* wrapper = NULL;
303   get_state_stack stack;
304 
305   /* "01" == 0 mandatory args, 1 (try_to_connect) is optional */
306   rb_scan_args(argc, argv, "01", &try_to_connect_param);
307 
308   TypedData_Get_Struct(self, grpc_rb_channel, &grpc_channel_data_type, wrapper);
309   if (wrapper->bg_wrapped == NULL) {
310     rb_raise(rb_eRuntimeError, "closed!");
311     return Qnil;
312   }
313 
314   stack.bg = wrapper->bg_wrapped;
315   stack.try_to_connect = RTEST(try_to_connect_param) ? 1 : 0;
316   rb_thread_call_without_gvl(get_state_without_gil, &stack, NULL, NULL);
317 
318   return LONG2NUM(stack.out);
319 }
320 
321 typedef struct watch_state_stack {
322   bg_watched_channel* bg_wrapped;
323   gpr_timespec deadline;
324   int last_state;
325 } watch_state_stack;
326 
wait_for_watch_state_op_complete_without_gvl(void * arg)327 static void* wait_for_watch_state_op_complete_without_gvl(void* arg) {
328   watch_state_stack* stack = (watch_state_stack*)arg;
329   watch_state_op* op = NULL;
330   void* success = (void*)0;
331 
332   gpr_mu_lock(&global_connection_polling_mu);
333   // it's unsafe to do a "watch" after "channel polling abort" because the cq
334   // has been shut down.
335   if (abort_channel_polling || stack->bg_wrapped->channel_destroyed) {
336     gpr_mu_unlock(&global_connection_polling_mu);
337     return (void*)0;
338   }
339   op = gpr_zalloc(sizeof(watch_state_op));
340   op->op_type = WATCH_STATE_API;
341   grpc_channel_watch_connectivity_state(stack->bg_wrapped->channel,
342                                         stack->last_state, stack->deadline,
343                                         channel_polling_cq, op);
344 
345   while (!op->op.api_callback_args.called_back) {
346     gpr_cv_wait(&global_connection_polling_cv, &global_connection_polling_mu,
347                 gpr_inf_future(GPR_CLOCK_REALTIME));
348   }
349   if (op->op.api_callback_args.success) {
350     success = (void*)1;
351   }
352   gpr_free(op);
353   gpr_mu_unlock(&global_connection_polling_mu);
354 
355   return success;
356 }
wait_for_watch_state_op_complete_unblocking_func(void * arg)357 static void wait_for_watch_state_op_complete_unblocking_func(void* arg) {
358   bg_watched_channel* bg = (bg_watched_channel*)arg;
359   gpr_mu_lock(&global_connection_polling_mu);
360   if (!bg->channel_destroyed) {
361     grpc_channel_destroy(bg->channel);
362     bg->channel_destroyed = 1;
363   }
364   gpr_mu_unlock(&global_connection_polling_mu);
365 }
366 
367 /* Wait until the channel's connectivity state becomes different from
368  * "last_state", or "deadline" expires.
369  * Returns true if the channel's connectivity state becomes different
370  * from "last_state" within "deadline".
371  * Returns false if "deadline" expires before the channel's connectivity
372  * state changes from "last_state".
373  * */
grpc_rb_channel_watch_connectivity_state(VALUE self,VALUE last_state,VALUE deadline)374 static VALUE grpc_rb_channel_watch_connectivity_state(VALUE self,
375                                                       VALUE last_state,
376                                                       VALUE deadline) {
377   grpc_rb_channel* wrapper = NULL;
378   watch_state_stack stack;
379   void* op_success = 0;
380 
381   grpc_ruby_fork_guard();
382   TypedData_Get_Struct(self, grpc_rb_channel, &grpc_channel_data_type, wrapper);
383 
384   if (wrapper->bg_wrapped == NULL) {
385     rb_raise(rb_eRuntimeError, "closed!");
386     return Qnil;
387   }
388 
389   if (!FIXNUM_P(last_state)) {
390     rb_raise(
391         rb_eTypeError,
392         "bad type for last_state. want a GRPC::Core::ChannelState constant");
393     return Qnil;
394   }
395 
396   stack.bg_wrapped = wrapper->bg_wrapped;
397   stack.deadline = grpc_rb_time_timeval(deadline, 0),
398   stack.last_state = NUM2LONG(last_state);
399 
400   op_success = rb_thread_call_without_gvl(
401       wait_for_watch_state_op_complete_without_gvl, &stack,
402       wait_for_watch_state_op_complete_unblocking_func, wrapper->bg_wrapped);
403 
404   return op_success ? Qtrue : Qfalse;
405 }
406 
407 /* Create a call given a grpc_channel, in order to call method. The request
408    is not sent until grpc_call_invoke is called. */
grpc_rb_channel_create_call(VALUE self,VALUE parent,VALUE mask,VALUE method,VALUE host,VALUE deadline)409 static VALUE grpc_rb_channel_create_call(VALUE self, VALUE parent, VALUE mask,
410                                          VALUE method, VALUE host,
411                                          VALUE deadline) {
412   VALUE res = Qnil;
413   grpc_rb_channel* wrapper = NULL;
414   grpc_call* call = NULL;
415   grpc_call* parent_call = NULL;
416   grpc_completion_queue* cq = NULL;
417   int flags = GRPC_PROPAGATE_DEFAULTS;
418   grpc_slice method_slice;
419   grpc_slice host_slice;
420   grpc_slice* host_slice_ptr = NULL;
421   char* tmp_str = NULL;
422 
423   grpc_ruby_fork_guard();
424   if (host != Qnil) {
425     host_slice =
426         grpc_slice_from_copied_buffer(RSTRING_PTR(host), RSTRING_LEN(host));
427     host_slice_ptr = &host_slice;
428   }
429   if (mask != Qnil) {
430     flags = NUM2UINT(mask);
431   }
432   if (parent != Qnil) {
433     parent_call = grpc_rb_get_wrapped_call(parent);
434   }
435 
436   TypedData_Get_Struct(self, grpc_rb_channel, &grpc_channel_data_type, wrapper);
437   if (wrapper->bg_wrapped == NULL) {
438     rb_raise(rb_eRuntimeError, "closed!");
439     return Qnil;
440   }
441 
442   cq = grpc_completion_queue_create_for_pluck(NULL);
443   method_slice =
444       grpc_slice_from_copied_buffer(RSTRING_PTR(method), RSTRING_LEN(method));
445   call = grpc_channel_create_call(wrapper->bg_wrapped->channel, parent_call,
446                                   flags, cq, method_slice, host_slice_ptr,
447                                   grpc_rb_time_timeval(deadline,
448                                                        /* absolute time */ 0),
449                                   NULL);
450 
451   if (call == NULL) {
452     tmp_str = grpc_slice_to_c_string(method_slice);
453     rb_raise(rb_eRuntimeError, "cannot create call with method %s", tmp_str);
454     return Qnil;
455   }
456 
457   grpc_slice_unref(method_slice);
458   if (host_slice_ptr != NULL) {
459     grpc_slice_unref(host_slice);
460   }
461 
462   res = grpc_rb_wrap_call(call, cq);
463 
464   /* Make this channel an instance attribute of the call so that it is not GCed
465    * before the call. */
466   rb_ivar_set(res, id_channel, self);
467   return res;
468 }
469 
470 /* Closes the channel, calling it's destroy method */
471 /* Note this is an API-level call; a wrapped channel's finalizer doesn't call
472  * this */
grpc_rb_channel_destroy(VALUE self)473 static VALUE grpc_rb_channel_destroy(VALUE self) {
474   grpc_rb_channel* wrapper = NULL;
475 
476   TypedData_Get_Struct(self, grpc_rb_channel, &grpc_channel_data_type, wrapper);
477   if (wrapper->bg_wrapped != NULL) {
478     rb_thread_call_without_gvl(channel_safe_destroy_without_gil,
479                                wrapper->bg_wrapped, NULL, NULL);
480     wrapper->bg_wrapped = NULL;
481   }
482 
483   return Qnil;
484 }
485 
486 /* Called to obtain the target that this channel accesses. */
grpc_rb_channel_get_target(VALUE self)487 static VALUE grpc_rb_channel_get_target(VALUE self) {
488   grpc_rb_channel* wrapper = NULL;
489   VALUE res = Qnil;
490   char* target = NULL;
491 
492   TypedData_Get_Struct(self, grpc_rb_channel, &grpc_channel_data_type, wrapper);
493   target = grpc_channel_get_target(wrapper->bg_wrapped->channel);
494   res = rb_str_new2(target);
495   gpr_free(target);
496 
497   return res;
498 }
499 
500 /* Needs to be called under global_connection_polling_mu */
bg_watched_channel_list_lookup(bg_watched_channel * target)501 static int bg_watched_channel_list_lookup(bg_watched_channel* target) {
502   bg_watched_channel* cur = bg_watched_channel_list_head;
503 
504   while (cur != NULL) {
505     if (cur == target) {
506       return 1;
507     }
508     cur = cur->next;
509   }
510 
511   return 0;
512 }
513 
514 /* Needs to be called under global_connection_polling_mu */
bg_watched_channel_list_create_and_add(grpc_channel * channel)515 static bg_watched_channel* bg_watched_channel_list_create_and_add(
516     grpc_channel* channel) {
517   bg_watched_channel* watched = gpr_zalloc(sizeof(bg_watched_channel));
518 
519   watched->channel = channel;
520   watched->next = bg_watched_channel_list_head;
521   watched->refcount = 1;
522   bg_watched_channel_list_head = watched;
523   return watched;
524 }
525 
526 /* Needs to be called under global_connection_polling_mu */
bg_watched_channel_list_free_and_remove(bg_watched_channel * target)527 static void bg_watched_channel_list_free_and_remove(
528     bg_watched_channel* target) {
529   bg_watched_channel* bg = NULL;
530 
531   GPR_ASSERT(bg_watched_channel_list_lookup(target));
532   GPR_ASSERT(target->channel_destroyed && target->refcount == 0);
533   if (bg_watched_channel_list_head == target) {
534     bg_watched_channel_list_head = target->next;
535     gpr_free(target);
536     return;
537   }
538   bg = bg_watched_channel_list_head;
539   while (bg != NULL && bg->next != NULL) {
540     if (bg->next == target) {
541       bg->next = bg->next->next;
542       gpr_free(target);
543       return;
544     }
545     bg = bg->next;
546   }
547   GPR_ASSERT(0);
548 }
549 
550 /* Initialize a grpc_rb_channel's "protected grpc_channel" and try to push
551  * it onto the background thread for constant watches. */
channel_init_try_register_connection_polling_without_gil(void * arg)552 static void* channel_init_try_register_connection_polling_without_gil(
553     void* arg) {
554   channel_init_try_register_stack* stack =
555       (channel_init_try_register_stack*)arg;
556 
557   gpr_mu_lock(&global_connection_polling_mu);
558   stack->wrapper->bg_wrapped =
559       bg_watched_channel_list_create_and_add(stack->channel);
560   grpc_rb_channel_try_register_connection_polling(stack->wrapper->bg_wrapped);
561   gpr_mu_unlock(&global_connection_polling_mu);
562   return NULL;
563 }
564 
565 // Needs to be called under global_connection_poolling_mu
grpc_rb_channel_try_register_connection_polling(bg_watched_channel * bg)566 static void grpc_rb_channel_try_register_connection_polling(
567     bg_watched_channel* bg) {
568   grpc_connectivity_state conn_state;
569   watch_state_op* op = NULL;
570 
571   GPR_ASSERT(channel_polling_thread_started || abort_channel_polling);
572 
573   if (bg->refcount == 0) {
574     GPR_ASSERT(bg->channel_destroyed);
575     bg_watched_channel_list_free_and_remove(bg);
576     return;
577   }
578   GPR_ASSERT(bg->refcount == 1);
579   if (bg->channel_destroyed || abort_channel_polling) {
580     return;
581   }
582 
583   conn_state = grpc_channel_check_connectivity_state(bg->channel, 0);
584   if (conn_state == GRPC_CHANNEL_SHUTDOWN) {
585     return;
586   }
587   GPR_ASSERT(bg_watched_channel_list_lookup(bg));
588   // prevent bg from being free'd by GC while background thread is watching it
589   bg->refcount++;
590 
591   op = gpr_zalloc(sizeof(watch_state_op));
592   op->op_type = CONTINUOUS_WATCH;
593   op->op.continuous_watch_callback_args.bg = bg;
594   grpc_channel_watch_connectivity_state(bg->channel, conn_state,
595                                         gpr_inf_future(GPR_CLOCK_REALTIME),
596                                         channel_polling_cq, op);
597 }
598 
599 // Note this loop breaks out with a single call of
600 // "run_poll_channels_loop_no_gil".
601 // This assumes that a ruby call the unblocking func
602 // indicates process shutdown.
603 // In the worst case, this stops polling channel connectivity
604 // early and falls back to current behavior.
run_poll_channels_loop_no_gil(void * arg)605 static void* run_poll_channels_loop_no_gil(void* arg) {
606   grpc_event event;
607   watch_state_op* op = NULL;
608   bg_watched_channel* bg = NULL;
609   (void)arg;
610   gpr_log(GPR_DEBUG, "GRPC_RUBY: run_poll_channels_loop_no_gil - begin");
611 
612   gpr_mu_lock(&global_connection_polling_mu);
613   GPR_ASSERT(!channel_polling_thread_started);
614   channel_polling_thread_started = 1;
615   gpr_cv_broadcast(&global_connection_polling_cv);
616   gpr_mu_unlock(&global_connection_polling_mu);
617 
618   for (;;) {
619     event = grpc_completion_queue_next(
620         channel_polling_cq, gpr_inf_future(GPR_CLOCK_REALTIME), NULL);
621     if (event.type == GRPC_QUEUE_SHUTDOWN) {
622       break;
623     }
624     gpr_mu_lock(&global_connection_polling_mu);
625     if (event.type == GRPC_OP_COMPLETE) {
626       op = (watch_state_op*)event.tag;
627       if (op->op_type == CONTINUOUS_WATCH) {
628         bg = (bg_watched_channel*)op->op.continuous_watch_callback_args.bg;
629         bg->refcount--;
630         grpc_rb_channel_try_register_connection_polling(bg);
631         gpr_free(op);
632       } else if (op->op_type == WATCH_STATE_API) {
633         grpc_rb_channel_watch_connection_state_op_complete(
634             (watch_state_op*)event.tag, event.success);
635       } else {
636         GPR_ASSERT(0);
637       }
638     }
639     gpr_mu_unlock(&global_connection_polling_mu);
640   }
641   grpc_completion_queue_destroy(channel_polling_cq);
642   gpr_log(GPR_DEBUG,
643           "GRPC_RUBY: run_poll_channels_loop_no_gil - exit connection polling "
644           "loop");
645   return NULL;
646 }
647 
648 // Notify the channel polling loop to cleanup and shutdown.
run_poll_channels_loop_unblocking_func(void * arg)649 static void run_poll_channels_loop_unblocking_func(void* arg) {
650   bg_watched_channel* bg = NULL;
651   (void)arg;
652 
653   gpr_mu_lock(&global_connection_polling_mu);
654   gpr_log(GPR_DEBUG,
655           "GRPC_RUBY: run_poll_channels_loop_unblocking_func - begin aborting "
656           "connection polling");
657   // early out after first time through
658   if (abort_channel_polling) {
659     gpr_mu_unlock(&global_connection_polling_mu);
660     return;
661   }
662   abort_channel_polling = 1;
663 
664   // force pending watches to end by switching to shutdown state
665   bg = bg_watched_channel_list_head;
666   while (bg != NULL) {
667     if (!bg->channel_destroyed) {
668       grpc_channel_destroy(bg->channel);
669       bg->channel_destroyed = 1;
670     }
671     bg = bg->next;
672   }
673 
674   grpc_completion_queue_shutdown(channel_polling_cq);
675   gpr_cv_broadcast(&global_connection_polling_cv);
676   gpr_mu_unlock(&global_connection_polling_mu);
677   gpr_log(GPR_DEBUG,
678           "GRPC_RUBY: run_poll_channels_loop_unblocking_func - end aborting "
679           "connection polling");
680 }
681 
682 // Poll channel connectivity states in background thread without the GIL.
run_poll_channels_loop(VALUE arg)683 static VALUE run_poll_channels_loop(VALUE arg) {
684   (void)arg;
685   gpr_log(
686       GPR_DEBUG,
687       "GRPC_RUBY: run_poll_channels_loop - create connection polling thread");
688   grpc_ruby_init();
689   rb_thread_call_without_gvl(run_poll_channels_loop_no_gil, NULL,
690                              run_poll_channels_loop_unblocking_func, NULL);
691   grpc_ruby_shutdown();
692   return Qnil;
693 }
694 
wait_until_channel_polling_thread_started_no_gil(void * arg)695 static void* wait_until_channel_polling_thread_started_no_gil(void* arg) {
696   int* stop_waiting = (int*)arg;
697   gpr_log(GPR_DEBUG, "GRPC_RUBY: wait for channel polling thread to start");
698   gpr_mu_lock(&global_connection_polling_mu);
699   while (!channel_polling_thread_started && !abort_channel_polling &&
700          !*stop_waiting) {
701     gpr_cv_wait(&global_connection_polling_cv, &global_connection_polling_mu,
702                 gpr_inf_future(GPR_CLOCK_REALTIME));
703   }
704   gpr_mu_unlock(&global_connection_polling_mu);
705 
706   return NULL;
707 }
708 
wait_until_channel_polling_thread_started_unblocking_func(void * arg)709 static void wait_until_channel_polling_thread_started_unblocking_func(
710     void* arg) {
711   int* stop_waiting = (int*)arg;
712   gpr_mu_lock(&global_connection_polling_mu);
713   gpr_log(GPR_DEBUG,
714           "GRPC_RUBY: interrupt wait for channel polling thread to start");
715   *stop_waiting = 1;
716   gpr_cv_broadcast(&global_connection_polling_cv);
717   gpr_mu_unlock(&global_connection_polling_mu);
718 }
719 
set_abort_channel_polling_without_gil(void * arg)720 static void* set_abort_channel_polling_without_gil(void* arg) {
721   (void)arg;
722   gpr_mu_lock(&global_connection_polling_mu);
723   abort_channel_polling = 1;
724   gpr_cv_broadcast(&global_connection_polling_cv);
725   gpr_mu_unlock(&global_connection_polling_mu);
726   return NULL;
727 }
728 
729 /* Temporary fix for
730  * https://github.com/GoogleCloudPlatform/google-cloud-ruby/issues/899.
731  * Transports in idle channels can get destroyed. Normally c-core re-connects,
732  * but in grpc-ruby core never gets a thread until an RPC is made, because ruby
733  * only calls c-core's "completion_queu_pluck" API.
734  * This uses a global background thread that calls
735  * "completion_queue_next" on registered "watch_channel_connectivity_state"
736  * calls - so that c-core can reconnect if needed, when there aren't any RPC's.
737  * TODO(apolcyn) remove this when core handles new RPCs on dead connections.
738  */
grpc_rb_channel_polling_thread_start()739 void grpc_rb_channel_polling_thread_start() {
740   VALUE background_thread = Qnil;
741 
742   GPR_ASSERT(!abort_channel_polling);
743   GPR_ASSERT(!channel_polling_thread_started);
744   GPR_ASSERT(channel_polling_cq == NULL);
745 
746   gpr_mu_init(&global_connection_polling_mu);
747   gpr_cv_init(&global_connection_polling_cv);
748 
749   channel_polling_cq = grpc_completion_queue_create_for_next(NULL);
750   background_thread = rb_thread_create(run_poll_channels_loop, NULL);
751 
752   if (!RTEST(background_thread)) {
753     gpr_log(GPR_DEBUG, "GRPC_RUBY: failed to spawn channel polling thread");
754     rb_thread_call_without_gvl(set_abort_channel_polling_without_gil, NULL,
755                                NULL, NULL);
756   }
757 }
758 
Init_grpc_propagate_masks()759 static void Init_grpc_propagate_masks() {
760   /* Constants representing call propagation masks in grpc.h */
761   VALUE grpc_rb_mPropagateMasks =
762       rb_define_module_under(grpc_rb_mGrpcCore, "PropagateMasks");
763   rb_define_const(grpc_rb_mPropagateMasks, "DEADLINE",
764                   UINT2NUM(GRPC_PROPAGATE_DEADLINE));
765   rb_define_const(grpc_rb_mPropagateMasks, "CENSUS_STATS_CONTEXT",
766                   UINT2NUM(GRPC_PROPAGATE_CENSUS_STATS_CONTEXT));
767   rb_define_const(grpc_rb_mPropagateMasks, "CENSUS_TRACING_CONTEXT",
768                   UINT2NUM(GRPC_PROPAGATE_CENSUS_TRACING_CONTEXT));
769   rb_define_const(grpc_rb_mPropagateMasks, "CANCELLATION",
770                   UINT2NUM(GRPC_PROPAGATE_CANCELLATION));
771   rb_define_const(grpc_rb_mPropagateMasks, "DEFAULTS",
772                   UINT2NUM(GRPC_PROPAGATE_DEFAULTS));
773 }
774 
Init_grpc_connectivity_states()775 static void Init_grpc_connectivity_states() {
776   /* Constants representing call propagation masks in grpc.h */
777   VALUE grpc_rb_mConnectivityStates =
778       rb_define_module_under(grpc_rb_mGrpcCore, "ConnectivityStates");
779   rb_define_const(grpc_rb_mConnectivityStates, "IDLE",
780                   LONG2NUM(GRPC_CHANNEL_IDLE));
781   rb_define_const(grpc_rb_mConnectivityStates, "CONNECTING",
782                   LONG2NUM(GRPC_CHANNEL_CONNECTING));
783   rb_define_const(grpc_rb_mConnectivityStates, "READY",
784                   LONG2NUM(GRPC_CHANNEL_READY));
785   rb_define_const(grpc_rb_mConnectivityStates, "TRANSIENT_FAILURE",
786                   LONG2NUM(GRPC_CHANNEL_TRANSIENT_FAILURE));
787   rb_define_const(grpc_rb_mConnectivityStates, "FATAL_FAILURE",
788                   LONG2NUM(GRPC_CHANNEL_SHUTDOWN));
789 }
790 
Init_grpc_channel()791 void Init_grpc_channel() {
792   grpc_rb_cChannelArgs = rb_define_class("TmpChannelArgs", rb_cObject);
793   grpc_rb_cChannel =
794       rb_define_class_under(grpc_rb_mGrpcCore, "Channel", rb_cObject);
795 
796   /* Allocates an object managed by the ruby runtime */
797   rb_define_alloc_func(grpc_rb_cChannel, grpc_rb_channel_alloc);
798 
799   /* Provides a ruby constructor and support for dup/clone. */
800   rb_define_method(grpc_rb_cChannel, "initialize", grpc_rb_channel_init, -1);
801   rb_define_method(grpc_rb_cChannel, "initialize_copy",
802                    grpc_rb_cannot_init_copy, 1);
803 
804   /* Add ruby analogues of the Channel methods. */
805   rb_define_method(grpc_rb_cChannel, "connectivity_state",
806                    grpc_rb_channel_get_connectivity_state, -1);
807   rb_define_method(grpc_rb_cChannel, "watch_connectivity_state",
808                    grpc_rb_channel_watch_connectivity_state, 2);
809   rb_define_method(grpc_rb_cChannel, "create_call", grpc_rb_channel_create_call,
810                    5);
811   rb_define_method(grpc_rb_cChannel, "target", grpc_rb_channel_get_target, 0);
812   rb_define_method(grpc_rb_cChannel, "destroy", grpc_rb_channel_destroy, 0);
813   rb_define_alias(grpc_rb_cChannel, "close", "destroy");
814 
815   id_channel = rb_intern("__channel");
816   id_target = rb_intern("__target");
817   rb_define_const(grpc_rb_cChannel, "SSL_TARGET",
818                   ID2SYM(rb_intern(GRPC_SSL_TARGET_NAME_OVERRIDE_ARG)));
819   rb_define_const(grpc_rb_cChannel, "ENABLE_CENSUS",
820                   ID2SYM(rb_intern(GRPC_ARG_ENABLE_CENSUS)));
821   rb_define_const(grpc_rb_cChannel, "MAX_CONCURRENT_STREAMS",
822                   ID2SYM(rb_intern(GRPC_ARG_MAX_CONCURRENT_STREAMS)));
823   rb_define_const(grpc_rb_cChannel, "MAX_MESSAGE_LENGTH",
824                   ID2SYM(rb_intern(GRPC_ARG_MAX_RECEIVE_MESSAGE_LENGTH)));
825   id_insecure_channel = rb_intern("this_channel_is_insecure");
826   Init_grpc_propagate_masks();
827   Init_grpc_connectivity_states();
828 }
829 
830 /* Gets the wrapped channel from the ruby wrapper */
grpc_rb_get_wrapped_channel(VALUE v)831 grpc_channel* grpc_rb_get_wrapped_channel(VALUE v) {
832   grpc_rb_channel* wrapper = NULL;
833   TypedData_Get_Struct(v, grpc_rb_channel, &grpc_channel_data_type, wrapper);
834   return wrapper->bg_wrapped->channel;
835 }
836