• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *
3  * Copyright 2015 gRPC authors.
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  *     http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  *
17  */
18 
19 #include <ruby/ruby.h>
20 
21 #include "rb_channel.h"
22 
23 #include <grpc/credentials.h>
24 #include <grpc/grpc.h>
25 #include <grpc/grpc_security.h>
26 #include <grpc/support/alloc.h>
27 #include <grpc/support/log.h>
28 #include <grpc/support/time.h>
29 #include <ruby/thread.h>
30 
31 #include "rb_byte_buffer.h"
32 #include "rb_call.h"
33 #include "rb_channel_args.h"
34 #include "rb_channel_credentials.h"
35 #include "rb_completion_queue.h"
36 #include "rb_grpc.h"
37 #include "rb_grpc_imports.generated.h"
38 #include "rb_server.h"
39 #include "rb_xds_channel_credentials.h"
40 
41 /* id_channel is the name of the hidden ivar that preserves a reference to the
42  * channel on a call, so that calls are not GCed before their channel.  */
43 static ID id_channel;
44 
45 /* id_target is the name of the hidden ivar that preserves a reference to the
46  * target string used to create the call, preserved so that it does not get
47  * GCed before the channel */
48 static ID id_target;
49 
50 /* hidden ivar that synchronizes post-fork channel re-creation */
51 static ID id_channel_recreation_mu;
52 
53 /* id_insecure_channel is used to indicate that a channel is insecure */
54 static VALUE id_insecure_channel;
55 
56 /* grpc_rb_cChannel is the ruby class that proxies grpc_channel. */
57 static VALUE grpc_rb_cChannel = Qnil;
58 
59 /* Used during the conversion of a hash to channel args during channel setup */
60 static VALUE grpc_rb_cChannelArgs;
61 
62 typedef struct bg_watched_channel {
63   grpc_channel* channel;
64   // these fields must only be accessed under global_connection_polling_mu
65   struct bg_watched_channel* next;
66   int channel_destroyed;
67   int refcount;
68 } bg_watched_channel;
69 
70 /* grpc_rb_channel wraps a grpc_channel. */
71 typedef struct grpc_rb_channel {
72   VALUE credentials;
73   grpc_channel_args args;
74   /* The actual channel (protected in a wrapper to tell when it's safe to
75    * destroy) */
76   bg_watched_channel* bg_wrapped;
77 } grpc_rb_channel;
78 
79 typedef enum { CONTINUOUS_WATCH, WATCH_STATE_API } watch_state_op_type;
80 
81 typedef struct watch_state_op {
82   watch_state_op_type op_type;
83   // from event.success
84   union {
85     struct {
86       int success;
87       // has been called back due to a cq next call
88       int called_back;
89     } api_callback_args;
90     struct {
91       bg_watched_channel* bg;
92     } continuous_watch_callback_args;
93   } op;
94 } watch_state_op;
95 
96 static bg_watched_channel* bg_watched_channel_list_head = NULL;
97 
98 static void grpc_rb_channel_try_register_connection_polling(
99     bg_watched_channel* bg);
100 static void* channel_init_try_register_connection_polling_without_gil(
101     void* arg);
102 
103 typedef struct channel_init_try_register_stack {
104   grpc_channel* channel;
105   grpc_rb_channel* wrapper;
106 } channel_init_try_register_stack;
107 
108 static grpc_completion_queue* g_channel_polling_cq;
109 static gpr_mu global_connection_polling_mu;
110 static gpr_cv global_connection_polling_cv;
111 static int g_abort_channel_polling = 0;
112 static gpr_once g_once_init = GPR_ONCE_INIT;
113 static VALUE g_channel_polling_thread = Qnil;
114 
115 static int bg_watched_channel_list_lookup(bg_watched_channel* bg);
116 static bg_watched_channel* bg_watched_channel_list_create_and_add(
117     grpc_channel* channel);
118 static void bg_watched_channel_list_free_and_remove(bg_watched_channel* bg);
119 static void run_poll_channels_loop_unblocking_func(void* arg);
120 static void* run_poll_channels_loop_unblocking_func_wrapper(void* arg);
121 
122 // Needs to be called under global_connection_polling_mu
grpc_rb_channel_watch_connection_state_op_complete(watch_state_op * op,int success)123 static void grpc_rb_channel_watch_connection_state_op_complete(
124     watch_state_op* op, int success) {
125   GRPC_RUBY_ASSERT(!op->op.api_callback_args.called_back);
126   op->op.api_callback_args.called_back = 1;
127   op->op.api_callback_args.success = success;
128   // wake up the watch API call that's waiting on this op
129   gpr_cv_broadcast(&global_connection_polling_cv);
130 }
131 
132 /* Avoids destroying a channel twice. */
grpc_rb_channel_safe_destroy(bg_watched_channel * bg)133 static void grpc_rb_channel_safe_destroy(bg_watched_channel* bg) {
134   gpr_mu_lock(&global_connection_polling_mu);
135   GRPC_RUBY_ASSERT(bg_watched_channel_list_lookup(bg));
136   if (!bg->channel_destroyed) {
137     grpc_channel_destroy(bg->channel);
138     bg->channel_destroyed = 1;
139   }
140   bg->refcount--;
141   if (bg->refcount == 0) {
142     bg_watched_channel_list_free_and_remove(bg);
143   }
144   gpr_mu_unlock(&global_connection_polling_mu);
145 }
146 
channel_safe_destroy_without_gil(void * arg)147 static void* channel_safe_destroy_without_gil(void* arg) {
148   grpc_rb_channel_safe_destroy((bg_watched_channel*)arg);
149   return NULL;
150 }
151 
grpc_rb_channel_free_internal(void * p)152 static void grpc_rb_channel_free_internal(void* p) {
153   grpc_rb_channel* ch = NULL;
154   if (p == NULL) {
155     return;
156   };
157   ch = (grpc_rb_channel*)p;
158   if (ch->bg_wrapped != NULL) {
159     /* assumption made here: it's ok to directly gpr_mu_lock the global
160      * connection polling mutex because we're in a finalizer,
161      * and we can count on this thread to not be interrupted or
162      * yield the gil. */
163     grpc_rb_channel_safe_destroy(ch->bg_wrapped);
164     grpc_rb_channel_args_destroy(&ch->args);
165   }
166   xfree(p);
167 }
168 
169 /* Destroys Channel instances. */
grpc_rb_channel_free(void * p)170 static void grpc_rb_channel_free(void* p) { grpc_rb_channel_free_internal(p); }
171 
172 /* Protects the mark object from GC */
grpc_rb_channel_mark(void * p)173 static void grpc_rb_channel_mark(void* p) {
174   grpc_rb_channel* channel = NULL;
175   if (p == NULL) {
176     return;
177   }
178   channel = (grpc_rb_channel*)p;
179   if (channel->credentials != Qnil) {
180     rb_gc_mark(channel->credentials);
181   }
182 }
183 
184 static rb_data_type_t grpc_channel_data_type = {"grpc_channel",
185                                                 {grpc_rb_channel_mark,
186                                                  grpc_rb_channel_free,
187                                                  GRPC_RB_MEMSIZE_UNAVAILABLE,
188                                                  {NULL, NULL}},
189                                                 NULL,
190                                                 NULL,
191 #ifdef RUBY_TYPED_FREE_IMMEDIATELY
192                                                 RUBY_TYPED_FREE_IMMEDIATELY
193 #endif
194 };
195 
196 /* Allocates grpc_rb_channel instances. */
grpc_rb_channel_alloc(VALUE cls)197 static VALUE grpc_rb_channel_alloc(VALUE cls) {
198   grpc_ruby_init();
199   grpc_rb_channel* wrapper = ALLOC(grpc_rb_channel);
200   wrapper->bg_wrapped = NULL;
201   wrapper->credentials = Qnil;
202   MEMZERO(&wrapper->args, grpc_channel_args, 1);
203   return TypedData_Wrap_Struct(cls, &grpc_channel_data_type, wrapper);
204 }
205 
206 /*
207   call-seq:
208     insecure_channel = Channel:new("myhost:8080", {'arg1': 'value1'},
209                                    :this_channel_is_insecure)
210     creds = ...
211     secure_channel = Channel:new("myhost:443", {'arg1': 'value1'}, creds)
212 
213   Creates channel instances. */
grpc_rb_channel_init(int argc,VALUE * argv,VALUE self)214 static VALUE grpc_rb_channel_init(int argc, VALUE* argv, VALUE self) {
215   VALUE channel_args = Qnil;
216   VALUE credentials = Qnil;
217   VALUE target = Qnil;
218   grpc_rb_channel* wrapper = NULL;
219   grpc_channel* ch = NULL;
220   grpc_channel_credentials* creds = NULL;
221   char* target_chars = NULL;
222   channel_init_try_register_stack stack;
223 
224   grpc_ruby_fork_guard();
225   /* "3" == 3 mandatory args */
226   rb_scan_args(argc, argv, "3", &target, &channel_args, &credentials);
227 
228   TypedData_Get_Struct(self, grpc_rb_channel, &grpc_channel_data_type, wrapper);
229   target_chars = StringValueCStr(target);
230   grpc_rb_hash_convert_to_channel_args(channel_args, &wrapper->args);
231   if (TYPE(credentials) == T_SYMBOL) {
232     if (id_insecure_channel != SYM2ID(credentials)) {
233       rb_raise(rb_eTypeError,
234                "bad creds symbol, want :this_channel_is_insecure");
235       return Qnil;
236     }
237     grpc_channel_credentials* insecure_creds =
238         grpc_insecure_credentials_create();
239     ch = grpc_channel_create(target_chars, insecure_creds, &wrapper->args);
240     grpc_channel_credentials_release(insecure_creds);
241   } else {
242     wrapper->credentials = credentials;
243     if (grpc_rb_is_channel_credentials(credentials)) {
244       creds = grpc_rb_get_wrapped_channel_credentials(credentials);
245     } else if (grpc_rb_is_xds_channel_credentials(credentials)) {
246       creds = grpc_rb_get_wrapped_xds_channel_credentials(credentials);
247     } else {
248       rb_raise(rb_eTypeError,
249                "bad creds, want ChannelCredentials or XdsChannelCredentials");
250       return Qnil;
251     }
252     ch = grpc_channel_create(target_chars, creds, &wrapper->args);
253   }
254 
255   GRPC_RUBY_ASSERT(ch);
256   stack.channel = ch;
257   stack.wrapper = wrapper;
258   rb_thread_call_without_gvl(
259       channel_init_try_register_connection_polling_without_gil, &stack, NULL,
260       NULL);
261   if (ch == NULL) {
262     rb_raise(rb_eRuntimeError, "could not create an rpc channel to target:%s",
263              target_chars);
264     return Qnil;
265   }
266   rb_ivar_set(self, id_target, target);
267   rb_ivar_set(self, id_channel_recreation_mu, rb_mutex_new());
268   return self;
269 }
270 
271 typedef struct get_state_stack {
272   bg_watched_channel* bg;
273   int try_to_connect;
274   int out;
275 } get_state_stack;
276 
get_state_without_gil(void * arg)277 static void* get_state_without_gil(void* arg) {
278   get_state_stack* stack = (get_state_stack*)arg;
279 
280   gpr_mu_lock(&global_connection_polling_mu);
281   if (stack->bg->channel_destroyed) {
282     stack->out = GRPC_CHANNEL_SHUTDOWN;
283   } else {
284     stack->out = grpc_channel_check_connectivity_state(stack->bg->channel,
285                                                        stack->try_to_connect);
286   }
287   gpr_mu_unlock(&global_connection_polling_mu);
288 
289   return NULL;
290 }
291 
292 /*
293   call-seq:
294     ch.connectivity_state       -> state
295     ch.connectivity_state(true) -> state
296 
297   Indicates the current state of the channel, whose value is one of the
298   constants defined in GRPC::Core::ConnectivityStates.
299 
300   It also tries to connect if the channel is idle in the second form. */
grpc_rb_channel_get_connectivity_state(int argc,VALUE * argv,VALUE self)301 static VALUE grpc_rb_channel_get_connectivity_state(int argc, VALUE* argv,
302                                                     VALUE self) {
303   VALUE try_to_connect_param = Qfalse;
304   grpc_rb_channel* wrapper = NULL;
305   get_state_stack stack;
306 
307   /* "01" == 0 mandatory args, 1 (try_to_connect) is optional */
308   rb_scan_args(argc, argv, "01", &try_to_connect_param);
309 
310   TypedData_Get_Struct(self, grpc_rb_channel, &grpc_channel_data_type, wrapper);
311   if (wrapper->bg_wrapped == NULL) {
312     rb_raise(rb_eRuntimeError, "closed!");
313     return Qnil;
314   }
315 
316   stack.bg = wrapper->bg_wrapped;
317   stack.try_to_connect = RTEST(try_to_connect_param) ? 1 : 0;
318   rb_thread_call_without_gvl(get_state_without_gil, &stack, NULL, NULL);
319 
320   return LONG2NUM(stack.out);
321 }
322 
323 typedef struct watch_state_stack {
324   bg_watched_channel* bg_wrapped;
325   gpr_timespec deadline;
326   int last_state;
327 } watch_state_stack;
328 
wait_for_watch_state_op_complete_without_gvl(void * arg)329 static void* wait_for_watch_state_op_complete_without_gvl(void* arg) {
330   watch_state_stack* stack = (watch_state_stack*)arg;
331   watch_state_op* op = NULL;
332   void* success = (void*)0;
333 
334   gpr_mu_lock(&global_connection_polling_mu);
335   // it's unsafe to do a "watch" after "channel polling abort" because the cq
336   // has been shut down.
337   if (g_abort_channel_polling || stack->bg_wrapped->channel_destroyed) {
338     gpr_mu_unlock(&global_connection_polling_mu);
339     return (void*)0;
340   }
341   op = gpr_zalloc(sizeof(watch_state_op));
342   op->op_type = WATCH_STATE_API;
343   grpc_channel_watch_connectivity_state(stack->bg_wrapped->channel,
344                                         stack->last_state, stack->deadline,
345                                         g_channel_polling_cq, op);
346 
347   while (!op->op.api_callback_args.called_back) {
348     gpr_cv_wait(&global_connection_polling_cv, &global_connection_polling_mu,
349                 gpr_inf_future(GPR_CLOCK_REALTIME));
350   }
351   if (op->op.api_callback_args.success) {
352     success = (void*)1;
353   }
354   gpr_free(op);
355   gpr_mu_unlock(&global_connection_polling_mu);
356 
357   return success;
358 }
wait_for_watch_state_op_complete_unblocking_func(void * arg)359 static void wait_for_watch_state_op_complete_unblocking_func(void* arg) {
360   bg_watched_channel* bg = (bg_watched_channel*)arg;
361   gpr_mu_lock(&global_connection_polling_mu);
362   if (!bg->channel_destroyed) {
363     grpc_channel_destroy(bg->channel);
364     bg->channel_destroyed = 1;
365   }
366   gpr_mu_unlock(&global_connection_polling_mu);
367 }
368 
369 /* Wait until the channel's connectivity state becomes different from
370  * "last_state", or "deadline" expires.
371  * Returns true if the channel's connectivity state becomes different
372  * from "last_state" within "deadline".
373  * Returns false if "deadline" expires before the channel's connectivity
374  * state changes from "last_state".
375  * */
grpc_rb_channel_watch_connectivity_state(VALUE self,VALUE last_state,VALUE deadline)376 static VALUE grpc_rb_channel_watch_connectivity_state(VALUE self,
377                                                       VALUE last_state,
378                                                       VALUE deadline) {
379   grpc_rb_channel* wrapper = NULL;
380   watch_state_stack stack;
381   void* op_success = 0;
382 
383   grpc_ruby_fork_guard();
384   TypedData_Get_Struct(self, grpc_rb_channel, &grpc_channel_data_type, wrapper);
385 
386   if (wrapper->bg_wrapped == NULL) {
387     rb_raise(rb_eRuntimeError, "closed!");
388     return Qnil;
389   }
390 
391   if (!FIXNUM_P(last_state)) {
392     rb_raise(
393         rb_eTypeError,
394         "bad type for last_state. want a GRPC::Core::ChannelState constant");
395     return Qnil;
396   }
397 
398   stack.bg_wrapped = wrapper->bg_wrapped;
399   stack.deadline = grpc_rb_time_timeval(deadline, 0),
400   stack.last_state = NUM2LONG(last_state);
401 
402   op_success = rb_thread_call_without_gvl(
403       wait_for_watch_state_op_complete_without_gvl, &stack,
404       wait_for_watch_state_op_complete_unblocking_func, wrapper->bg_wrapped);
405 
406   return op_success ? Qtrue : Qfalse;
407 }
408 
grpc_rb_channel_maybe_recreate_channel_after_fork(grpc_rb_channel * wrapper,VALUE target)409 static void grpc_rb_channel_maybe_recreate_channel_after_fork(
410     grpc_rb_channel* wrapper, VALUE target) {
411   // TODO(apolcyn): maybe check if fork support is enabled here.
412   // The only way we can get bg->channel_destroyed without bg itself being
413   // NULL is if we destroyed the channel during GRPC::prefork.
414   bg_watched_channel* bg = wrapper->bg_wrapped;
415   if (bg->channel_destroyed) {
416     // There must be one ref at this point, held by the ruby-level channel
417     // object, drop this one here.
418     GRPC_RUBY_ASSERT(bg->refcount == 1);
419     rb_thread_call_without_gvl(channel_safe_destroy_without_gil, bg, NULL,
420                                NULL);
421     // re-create C-core channel
422     const char* target_str = StringValueCStr(target);
423     grpc_channel* channel;
424     if (wrapper->credentials == Qnil) {
425       grpc_channel_credentials* insecure_creds =
426           grpc_insecure_credentials_create();
427       channel = grpc_channel_create(target_str, insecure_creds, &wrapper->args);
428       grpc_channel_credentials_release(insecure_creds);
429     } else {
430       grpc_channel_credentials* creds;
431       if (grpc_rb_is_channel_credentials(wrapper->credentials)) {
432         creds = grpc_rb_get_wrapped_channel_credentials(wrapper->credentials);
433       } else if (grpc_rb_is_xds_channel_credentials(wrapper->credentials)) {
434         creds =
435             grpc_rb_get_wrapped_xds_channel_credentials(wrapper->credentials);
436       } else {
437         rb_raise(rb_eTypeError,
438                  "failed to re-create channel after fork: bad creds, want "
439                  "ChannelCredentials or XdsChannelCredentials");
440         return;
441       }
442       channel = grpc_channel_create(target_str, creds, &wrapper->args);
443     }
444     // re-register with channel polling thread
445     channel_init_try_register_stack stack;
446     stack.channel = channel;
447     stack.wrapper = wrapper;
448     rb_thread_call_without_gvl(
449         channel_init_try_register_connection_polling_without_gil, &stack, NULL,
450         NULL);
451   }
452 }
453 
454 /* Create a call given a grpc_channel, in order to call method. The request
455    is not sent until grpc_call_invoke is called. */
grpc_rb_channel_create_call(VALUE self,VALUE parent,VALUE mask,VALUE method,VALUE host,VALUE deadline)456 static VALUE grpc_rb_channel_create_call(VALUE self, VALUE parent, VALUE mask,
457                                          VALUE method, VALUE host,
458                                          VALUE deadline) {
459   VALUE res = Qnil;
460   grpc_rb_channel* wrapper = NULL;
461   grpc_call* call = NULL;
462   grpc_call* parent_call = NULL;
463   grpc_completion_queue* cq = NULL;
464   int flags = GRPC_PROPAGATE_DEFAULTS;
465   grpc_slice method_slice;
466   grpc_slice host_slice;
467   grpc_slice* host_slice_ptr = NULL;
468   char* tmp_str = NULL;
469 
470   grpc_ruby_fork_guard();
471   if (host != Qnil) {
472     host_slice =
473         grpc_slice_from_copied_buffer(RSTRING_PTR(host), RSTRING_LEN(host));
474     host_slice_ptr = &host_slice;
475   }
476   if (mask != Qnil) {
477     flags = NUM2UINT(mask);
478   }
479   if (parent != Qnil) {
480     parent_call = grpc_rb_get_wrapped_call(parent);
481   }
482 
483   TypedData_Get_Struct(self, grpc_rb_channel, &grpc_channel_data_type, wrapper);
484   if (wrapper->bg_wrapped == NULL) {
485     rb_raise(rb_eRuntimeError, "closed!");
486     return Qnil;
487   }
488   // TODO(apolcyn): only do this check if fork support is enabled
489   rb_mutex_lock(rb_ivar_get(self, id_channel_recreation_mu));
490   grpc_rb_channel_maybe_recreate_channel_after_fork(
491       wrapper, rb_ivar_get(self, id_target));
492   rb_mutex_unlock(rb_ivar_get(self, id_channel_recreation_mu));
493 
494   cq = grpc_completion_queue_create_for_pluck(NULL);
495   method_slice =
496       grpc_slice_from_copied_buffer(RSTRING_PTR(method), RSTRING_LEN(method));
497   call = grpc_channel_create_call(wrapper->bg_wrapped->channel, parent_call,
498                                   flags, cq, method_slice, host_slice_ptr,
499                                   grpc_rb_time_timeval(deadline,
500                                                        /* absolute time */ 0),
501                                   NULL);
502 
503   if (call == NULL) {
504     tmp_str = grpc_slice_to_c_string(method_slice);
505     rb_raise(rb_eRuntimeError, "cannot create call with method %s", tmp_str);
506     return Qnil;
507   }
508 
509   grpc_slice_unref(method_slice);
510   if (host_slice_ptr != NULL) {
511     grpc_slice_unref(host_slice);
512   }
513 
514   res = grpc_rb_wrap_call(call, cq);
515 
516   /* Make this channel an instance attribute of the call so that it is not GCed
517    * before the call. */
518   rb_ivar_set(res, id_channel, self);
519   return res;
520 }
521 
522 /* Closes the channel, calling it's destroy method */
523 /* Note this is an API-level call; a wrapped channel's finalizer doesn't call
524  * this */
grpc_rb_channel_destroy(VALUE self)525 static VALUE grpc_rb_channel_destroy(VALUE self) {
526   grpc_rb_channel* wrapper = NULL;
527 
528   TypedData_Get_Struct(self, grpc_rb_channel, &grpc_channel_data_type, wrapper);
529   if (wrapper->bg_wrapped != NULL) {
530     rb_thread_call_without_gvl(channel_safe_destroy_without_gil,
531                                wrapper->bg_wrapped, NULL, NULL);
532     wrapper->bg_wrapped = NULL;
533   }
534 
535   return Qnil;
536 }
537 
538 /* Called to obtain the target that this channel accesses. */
grpc_rb_channel_get_target(VALUE self)539 static VALUE grpc_rb_channel_get_target(VALUE self) {
540   grpc_rb_channel* wrapper = NULL;
541   VALUE res = Qnil;
542   char* target = NULL;
543 
544   TypedData_Get_Struct(self, grpc_rb_channel, &grpc_channel_data_type, wrapper);
545   target = grpc_channel_get_target(wrapper->bg_wrapped->channel);
546   res = rb_str_new2(target);
547   gpr_free(target);
548 
549   return res;
550 }
551 
552 /* Needs to be called under global_connection_polling_mu */
bg_watched_channel_list_lookup(bg_watched_channel * target)553 static int bg_watched_channel_list_lookup(bg_watched_channel* target) {
554   bg_watched_channel* cur = bg_watched_channel_list_head;
555 
556   while (cur != NULL) {
557     if (cur == target) {
558       return 1;
559     }
560     cur = cur->next;
561   }
562 
563   return 0;
564 }
565 
566 /* Needs to be called under global_connection_polling_mu */
bg_watched_channel_list_create_and_add(grpc_channel * channel)567 static bg_watched_channel* bg_watched_channel_list_create_and_add(
568     grpc_channel* channel) {
569   bg_watched_channel* watched = gpr_zalloc(sizeof(bg_watched_channel));
570 
571   watched->channel = channel;
572   watched->next = bg_watched_channel_list_head;
573   watched->refcount = 1;
574   bg_watched_channel_list_head = watched;
575   return watched;
576 }
577 
578 /* Needs to be called under global_connection_polling_mu */
bg_watched_channel_list_free_and_remove(bg_watched_channel * target)579 static void bg_watched_channel_list_free_and_remove(
580     bg_watched_channel* target) {
581   bg_watched_channel* bg = NULL;
582 
583   GRPC_RUBY_ASSERT(bg_watched_channel_list_lookup(target));
584   GRPC_RUBY_ASSERT(target->channel_destroyed && target->refcount == 0);
585   if (bg_watched_channel_list_head == target) {
586     bg_watched_channel_list_head = target->next;
587     gpr_free(target);
588     return;
589   }
590   bg = bg_watched_channel_list_head;
591   while (bg != NULL && bg->next != NULL) {
592     if (bg->next == target) {
593       bg->next = bg->next->next;
594       gpr_free(target);
595       return;
596     }
597     bg = bg->next;
598   }
599   GRPC_RUBY_ASSERT(0);
600 }
601 
602 /* Initialize a grpc_rb_channel's "protected grpc_channel" and try to push
603  * it onto the background thread for constant watches. */
channel_init_try_register_connection_polling_without_gil(void * arg)604 static void* channel_init_try_register_connection_polling_without_gil(
605     void* arg) {
606   channel_init_try_register_stack* stack =
607       (channel_init_try_register_stack*)arg;
608 
609   gpr_mu_lock(&global_connection_polling_mu);
610   stack->wrapper->bg_wrapped =
611       bg_watched_channel_list_create_and_add(stack->channel);
612   grpc_rb_channel_try_register_connection_polling(stack->wrapper->bg_wrapped);
613   gpr_mu_unlock(&global_connection_polling_mu);
614   return NULL;
615 }
616 
617 // Needs to be called under global_connection_poolling_mu
grpc_rb_channel_try_register_connection_polling(bg_watched_channel * bg)618 static void grpc_rb_channel_try_register_connection_polling(
619     bg_watched_channel* bg) {
620   grpc_connectivity_state conn_state;
621   watch_state_op* op = NULL;
622   if (bg->refcount == 0) {
623     GRPC_RUBY_ASSERT(bg->channel_destroyed);
624     bg_watched_channel_list_free_and_remove(bg);
625     return;
626   }
627   GRPC_RUBY_ASSERT(bg->refcount == 1);
628   if (bg->channel_destroyed || g_abort_channel_polling) {
629     return;
630   }
631   conn_state = grpc_channel_check_connectivity_state(bg->channel, 0);
632   if (conn_state == GRPC_CHANNEL_SHUTDOWN) {
633     return;
634   }
635   GRPC_RUBY_ASSERT(bg_watched_channel_list_lookup(bg));
636   // prevent bg from being free'd by GC while background thread is watching it
637   bg->refcount++;
638   op = gpr_zalloc(sizeof(watch_state_op));
639   op->op_type = CONTINUOUS_WATCH;
640   op->op.continuous_watch_callback_args.bg = bg;
641   grpc_channel_watch_connectivity_state(bg->channel, conn_state,
642                                         gpr_inf_future(GPR_CLOCK_REALTIME),
643                                         g_channel_polling_cq, op);
644 }
645 
646 // Note this loop breaks out with a single call of
647 // "run_poll_channels_loop_no_gil".
648 // This assumes that a ruby call the unblocking func
649 // indicates process shutdown.
650 // In the worst case, this stops polling channel connectivity
651 // early and falls back to current behavior.
run_poll_channels_loop_no_gil(void * arg)652 static void* run_poll_channels_loop_no_gil(void* arg) {
653   grpc_event event;
654   watch_state_op* op = NULL;
655   bg_watched_channel* bg = NULL;
656   (void)arg;
657   grpc_absl_log(GPR_DEBUG, "GRPC_RUBY: run_poll_channels_loop_no_gil - begin");
658 
659   gpr_mu_lock(&global_connection_polling_mu);
660   gpr_cv_broadcast(&global_connection_polling_cv);
661   gpr_mu_unlock(&global_connection_polling_mu);
662 
663   for (;;) {
664     event = grpc_completion_queue_next(
665         g_channel_polling_cq, gpr_inf_future(GPR_CLOCK_REALTIME), NULL);
666     if (event.type == GRPC_QUEUE_SHUTDOWN) {
667       break;
668     }
669     gpr_mu_lock(&global_connection_polling_mu);
670     if (event.type == GRPC_OP_COMPLETE) {
671       op = (watch_state_op*)event.tag;
672       if (op->op_type == CONTINUOUS_WATCH) {
673         bg = (bg_watched_channel*)op->op.continuous_watch_callback_args.bg;
674         bg->refcount--;
675         grpc_rb_channel_try_register_connection_polling(bg);
676         gpr_free(op);
677       } else if (op->op_type == WATCH_STATE_API) {
678         grpc_rb_channel_watch_connection_state_op_complete(
679             (watch_state_op*)event.tag, event.success);
680       } else {
681         GRPC_RUBY_ASSERT(0);
682       }
683     }
684     gpr_mu_unlock(&global_connection_polling_mu);
685   }
686   grpc_completion_queue_destroy(g_channel_polling_cq);
687   grpc_absl_log(
688       GPR_DEBUG,
689       "GRPC_RUBY: run_poll_channels_loop_no_gil - exit connection polling "
690       "loop");
691   return NULL;
692 }
693 
run_poll_channels_loop_unblocking_func(void * arg)694 static void run_poll_channels_loop_unblocking_func(void* arg) {
695   run_poll_channels_loop_unblocking_func_wrapper(arg);
696 }
697 
698 // Notify the channel polling loop to cleanup and shutdown.
run_poll_channels_loop_unblocking_func_wrapper(void * arg)699 static void* run_poll_channels_loop_unblocking_func_wrapper(void* arg) {
700   bg_watched_channel* bg = NULL;
701   (void)arg;
702 
703   gpr_mu_lock(&global_connection_polling_mu);
704   grpc_absl_log(
705       GPR_DEBUG,
706       "GRPC_RUBY: run_poll_channels_loop_unblocking_func - begin aborting "
707       "connection polling");
708   // early out after first time through
709   if (g_abort_channel_polling) {
710     gpr_mu_unlock(&global_connection_polling_mu);
711     return NULL;
712   }
713   g_abort_channel_polling = 1;
714 
715   // force pending watches to end by switching to shutdown state
716   bg = bg_watched_channel_list_head;
717   while (bg != NULL) {
718     if (!bg->channel_destroyed) {
719       grpc_channel_destroy(bg->channel);
720       bg->channel_destroyed = 1;
721     }
722     bg = bg->next;
723   }
724 
725   grpc_absl_log_int(
726       GPR_DEBUG,
727       "GRPC_RUBY: cq shutdown on global polling cq. pid: ", getpid());
728   grpc_completion_queue_shutdown(g_channel_polling_cq);
729   gpr_cv_broadcast(&global_connection_polling_cv);
730   gpr_mu_unlock(&global_connection_polling_mu);
731   grpc_absl_log(
732       GPR_DEBUG,
733       "GRPC_RUBY: run_poll_channels_loop_unblocking_func - end aborting "
734       "connection polling");
735   return NULL;
736 }
737 
738 // Poll channel connectivity states in background thread without the GIL.
run_poll_channels_loop(void * arg)739 static VALUE run_poll_channels_loop(void* arg) {
740   (void)arg;
741   grpc_absl_log(
742       GPR_DEBUG,
743       "GRPC_RUBY: run_poll_channels_loop - create connection polling thread");
744   rb_thread_call_without_gvl(run_poll_channels_loop_no_gil, NULL,
745                              run_poll_channels_loop_unblocking_func, NULL);
746   return Qnil;
747 }
748 
set_abort_channel_polling_without_gil(void * arg)749 static void* set_abort_channel_polling_without_gil(void* arg) {
750   (void)arg;
751   gpr_mu_lock(&global_connection_polling_mu);
752   g_abort_channel_polling = 1;
753   gpr_cv_broadcast(&global_connection_polling_cv);
754   gpr_mu_unlock(&global_connection_polling_mu);
755   return NULL;
756 }
757 
do_basic_init()758 static void do_basic_init() {
759   gpr_mu_init(&global_connection_polling_mu);
760   gpr_cv_init(&global_connection_polling_cv);
761 }
762 
763 /* Temporary fix for
764  * https://github.com/GoogleCloudPlatform/google-cloud-ruby/issues/899.
765  * Transports in idle channels can get destroyed. Normally c-core re-connects,
766  * but in grpc-ruby core never gets a thread until an RPC is made, because ruby
767  * only calls c-core's "completion_queu_pluck" API.
768  * This uses a global background thread that calls
769  * "completion_queue_next" on registered "watch_channel_connectivity_state"
770  * calls - so that c-core can reconnect if needed, when there aren't any RPC's.
771  * TODO(apolcyn) remove this when core handles new RPCs on dead connections.
772  */
grpc_rb_channel_polling_thread_start()773 void grpc_rb_channel_polling_thread_start() {
774   gpr_once_init(&g_once_init, do_basic_init);
775   GRPC_RUBY_ASSERT(!RTEST(g_channel_polling_thread));
776   GRPC_RUBY_ASSERT(!g_abort_channel_polling);
777   GRPC_RUBY_ASSERT(g_channel_polling_cq == NULL);
778 
779   g_channel_polling_cq = grpc_completion_queue_create_for_next(NULL);
780   g_channel_polling_thread = rb_thread_create(run_poll_channels_loop, NULL);
781 
782   if (!RTEST(g_channel_polling_thread)) {
783     grpc_absl_log(GPR_ERROR,
784                   "GRPC_RUBY: failed to spawn channel polling thread");
785     rb_thread_call_without_gvl(set_abort_channel_polling_without_gil, NULL,
786                                NULL, NULL);
787     return;
788   }
789 }
790 
grpc_rb_channel_polling_thread_stop()791 void grpc_rb_channel_polling_thread_stop() {
792   if (!RTEST(g_channel_polling_thread)) {
793     grpc_absl_log(
794         GPR_ERROR,
795         "GRPC_RUBY: channel polling thread stop: thread was not started");
796     return;
797   }
798   rb_thread_call_without_gvl(run_poll_channels_loop_unblocking_func_wrapper,
799                              NULL, NULL, NULL);
800   rb_funcall(g_channel_polling_thread, rb_intern("join"), 0);
801   // state associated with the channel polling thread is destroyed, reset so
802   // we can start again later
803   g_channel_polling_thread = Qnil;
804   g_abort_channel_polling = false;
805   g_channel_polling_cq = NULL;
806 }
807 
Init_grpc_propagate_masks()808 static void Init_grpc_propagate_masks() {
809   /* Constants representing call propagation masks in grpc.h */
810   VALUE grpc_rb_mPropagateMasks =
811       rb_define_module_under(grpc_rb_mGrpcCore, "PropagateMasks");
812   rb_define_const(grpc_rb_mPropagateMasks, "DEADLINE",
813                   UINT2NUM(GRPC_PROPAGATE_DEADLINE));
814   rb_define_const(grpc_rb_mPropagateMasks, "CENSUS_STATS_CONTEXT",
815                   UINT2NUM(GRPC_PROPAGATE_CENSUS_STATS_CONTEXT));
816   rb_define_const(grpc_rb_mPropagateMasks, "CENSUS_TRACING_CONTEXT",
817                   UINT2NUM(GRPC_PROPAGATE_CENSUS_TRACING_CONTEXT));
818   rb_define_const(grpc_rb_mPropagateMasks, "CANCELLATION",
819                   UINT2NUM(GRPC_PROPAGATE_CANCELLATION));
820   rb_define_const(grpc_rb_mPropagateMasks, "DEFAULTS",
821                   UINT2NUM(GRPC_PROPAGATE_DEFAULTS));
822 }
823 
Init_grpc_connectivity_states()824 static void Init_grpc_connectivity_states() {
825   /* Constants representing call propagation masks in grpc.h */
826   VALUE grpc_rb_mConnectivityStates =
827       rb_define_module_under(grpc_rb_mGrpcCore, "ConnectivityStates");
828   rb_define_const(grpc_rb_mConnectivityStates, "IDLE",
829                   LONG2NUM(GRPC_CHANNEL_IDLE));
830   rb_define_const(grpc_rb_mConnectivityStates, "CONNECTING",
831                   LONG2NUM(GRPC_CHANNEL_CONNECTING));
832   rb_define_const(grpc_rb_mConnectivityStates, "READY",
833                   LONG2NUM(GRPC_CHANNEL_READY));
834   rb_define_const(grpc_rb_mConnectivityStates, "TRANSIENT_FAILURE",
835                   LONG2NUM(GRPC_CHANNEL_TRANSIENT_FAILURE));
836   rb_define_const(grpc_rb_mConnectivityStates, "FATAL_FAILURE",
837                   LONG2NUM(GRPC_CHANNEL_SHUTDOWN));
838 }
839 
Init_grpc_channel()840 void Init_grpc_channel() {
841   rb_global_variable(&g_channel_polling_thread);
842   grpc_rb_cChannelArgs = rb_define_class("TmpChannelArgs", rb_cObject);
843   rb_undef_alloc_func(grpc_rb_cChannelArgs);
844   grpc_rb_cChannel =
845       rb_define_class_under(grpc_rb_mGrpcCore, "Channel", rb_cObject);
846 
847   /* Allocates an object managed by the ruby runtime */
848   rb_define_alloc_func(grpc_rb_cChannel, grpc_rb_channel_alloc);
849 
850   /* Provides a ruby constructor and support for dup/clone. */
851   rb_define_method(grpc_rb_cChannel, "initialize", grpc_rb_channel_init, -1);
852   rb_define_method(grpc_rb_cChannel, "initialize_copy",
853                    grpc_rb_cannot_init_copy, 1);
854 
855   /* Add ruby analogues of the Channel methods. */
856   rb_define_method(grpc_rb_cChannel, "connectivity_state",
857                    grpc_rb_channel_get_connectivity_state, -1);
858   rb_define_method(grpc_rb_cChannel, "watch_connectivity_state",
859                    grpc_rb_channel_watch_connectivity_state, 2);
860   rb_define_method(grpc_rb_cChannel, "create_call", grpc_rb_channel_create_call,
861                    5);
862   rb_define_method(grpc_rb_cChannel, "target", grpc_rb_channel_get_target, 0);
863   rb_define_method(grpc_rb_cChannel, "destroy", grpc_rb_channel_destroy, 0);
864   rb_define_alias(grpc_rb_cChannel, "close", "destroy");
865 
866   id_channel = rb_intern("__channel");
867   id_target = rb_intern("__target");
868   id_channel_recreation_mu = rb_intern("__channel_recreation_mu");
869   rb_define_const(grpc_rb_cChannel, "SSL_TARGET",
870                   ID2SYM(rb_intern(GRPC_SSL_TARGET_NAME_OVERRIDE_ARG)));
871   rb_define_const(grpc_rb_cChannel, "ENABLE_CENSUS",
872                   ID2SYM(rb_intern(GRPC_ARG_ENABLE_CENSUS)));
873   rb_define_const(grpc_rb_cChannel, "MAX_CONCURRENT_STREAMS",
874                   ID2SYM(rb_intern(GRPC_ARG_MAX_CONCURRENT_STREAMS)));
875   rb_define_const(grpc_rb_cChannel, "MAX_MESSAGE_LENGTH",
876                   ID2SYM(rb_intern(GRPC_ARG_MAX_RECEIVE_MESSAGE_LENGTH)));
877   id_insecure_channel = rb_intern("this_channel_is_insecure");
878   Init_grpc_propagate_masks();
879   Init_grpc_connectivity_states();
880 }
881 
882 /* Gets the wrapped channel from the ruby wrapper */
grpc_rb_get_wrapped_channel(VALUE v)883 grpc_channel* grpc_rb_get_wrapped_channel(VALUE v) {
884   grpc_rb_channel* wrapper = NULL;
885   TypedData_Get_Struct(v, grpc_rb_channel, &grpc_channel_data_type, wrapper);
886   return wrapper->bg_wrapped->channel;
887 }
888