• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *
3  * Copyright 2015 gRPC authors.
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  *     http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  *
17  */
18 
19 #include <grpc/support/port_platform.h>
20 
21 #include "src/core/lib/surface/channel.h"
22 
23 #include <inttypes.h>
24 #include <limits.h>
25 #include <stdlib.h>
26 #include <string.h>
27 
28 #include <grpc/compression.h>
29 #include <grpc/support/alloc.h>
30 #include <grpc/support/log.h>
31 #include <grpc/support/string_util.h>
32 
33 #include "src/core/lib/channel/channel_args.h"
34 #include "src/core/lib/channel/channel_trace.h"
35 #include "src/core/lib/channel/channelz.h"
36 #include "src/core/lib/channel/channelz_registry.h"
37 #include "src/core/lib/debug/stats.h"
38 #include "src/core/lib/gpr/string.h"
39 #include "src/core/lib/gprpp/manual_constructor.h"
40 #include "src/core/lib/gprpp/memory.h"
41 #include "src/core/lib/gprpp/ref_counted_ptr.h"
42 #include "src/core/lib/iomgr/iomgr.h"
43 #include "src/core/lib/iomgr/resource_quota.h"
44 #include "src/core/lib/slice/slice_internal.h"
45 #include "src/core/lib/surface/api_trace.h"
46 #include "src/core/lib/surface/call.h"
47 #include "src/core/lib/surface/channel_init.h"
48 #include "src/core/lib/transport/static_metadata.h"
49 
50 /** Cache grpc-status: X mdelems for X = 0..NUM_CACHED_STATUS_ELEMS.
51  *  Avoids needing to take a metadata context lock for sending status
52  *  if the status code is <= NUM_CACHED_STATUS_ELEMS.
53  *  Sized to allow the most commonly used codes to fit in
54  *  (OK, Cancelled, Unknown). */
55 #define NUM_CACHED_STATUS_ELEMS 3
56 
57 static void destroy_channel(void* arg, grpc_error_handle error);
58 
grpc_channel_create_with_builder(grpc_channel_stack_builder * builder,grpc_channel_stack_type channel_stack_type,grpc_error_handle * error)59 grpc_channel* grpc_channel_create_with_builder(
60     grpc_channel_stack_builder* builder,
61     grpc_channel_stack_type channel_stack_type, grpc_error_handle* error) {
62   char* target = gpr_strdup(grpc_channel_stack_builder_get_target(builder));
63   grpc_channel_args* args = grpc_channel_args_copy(
64       grpc_channel_stack_builder_get_channel_arguments(builder));
65   grpc_resource_user* resource_user =
66       grpc_channel_stack_builder_get_resource_user(builder);
67   grpc_channel* channel;
68   if (channel_stack_type == GRPC_SERVER_CHANNEL) {
69     GRPC_STATS_INC_SERVER_CHANNELS_CREATED();
70   } else {
71     GRPC_STATS_INC_CLIENT_CHANNELS_CREATED();
72   }
73   grpc_error_handle builder_error = grpc_channel_stack_builder_finish(
74       builder, sizeof(grpc_channel), 1, destroy_channel, nullptr,
75       reinterpret_cast<void**>(&channel));
76   if (builder_error != GRPC_ERROR_NONE) {
77     gpr_log(GPR_ERROR, "channel stack builder failed: %s",
78             grpc_error_std_string(builder_error).c_str());
79     GPR_ASSERT(channel == nullptr);
80     if (error != nullptr) {
81       *error = builder_error;
82     } else {
83       GRPC_ERROR_UNREF(builder_error);
84     }
85     gpr_free(target);
86     grpc_channel_args_destroy(args);
87     return nullptr;
88   }
89   channel->target = target;
90   channel->resource_user = resource_user;
91   channel->is_client = grpc_channel_stack_type_is_client(channel_stack_type);
92   channel->registration_table.Init();
93 
94   gpr_atm_no_barrier_store(
95       &channel->call_size_estimate,
96       (gpr_atm)CHANNEL_STACK_FROM_CHANNEL(channel)->call_stack_size +
97           grpc_call_get_initial_size_estimate());
98 
99   grpc_compression_options_init(&channel->compression_options);
100   for (size_t i = 0; i < args->num_args; i++) {
101     if (0 ==
102         strcmp(args->args[i].key, GRPC_COMPRESSION_CHANNEL_DEFAULT_LEVEL)) {
103       channel->compression_options.default_level.is_set = true;
104       channel->compression_options.default_level.level =
105           static_cast<grpc_compression_level>(grpc_channel_arg_get_integer(
106               &args->args[i],
107               {GRPC_COMPRESS_LEVEL_NONE, GRPC_COMPRESS_LEVEL_NONE,
108                GRPC_COMPRESS_LEVEL_COUNT - 1}));
109     } else if (0 == strcmp(args->args[i].key,
110                            GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM)) {
111       channel->compression_options.default_algorithm.is_set = true;
112       channel->compression_options.default_algorithm.algorithm =
113           static_cast<grpc_compression_algorithm>(grpc_channel_arg_get_integer(
114               &args->args[i], {GRPC_COMPRESS_NONE, GRPC_COMPRESS_NONE,
115                                GRPC_COMPRESS_ALGORITHMS_COUNT - 1}));
116     } else if (0 ==
117                strcmp(args->args[i].key,
118                       GRPC_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET)) {
119       channel->compression_options.enabled_algorithms_bitset =
120           static_cast<uint32_t>(args->args[i].value.integer) |
121           0x1; /* always support no compression */
122     } else if (0 == strcmp(args->args[i].key, GRPC_ARG_CHANNELZ_CHANNEL_NODE)) {
123       if (args->args[i].type == GRPC_ARG_POINTER) {
124         GPR_ASSERT(args->args[i].value.pointer.p != nullptr);
125         channel->channelz_node = static_cast<grpc_core::channelz::ChannelNode*>(
126                                      args->args[i].value.pointer.p)
127                                      ->Ref();
128       } else {
129         gpr_log(GPR_DEBUG,
130                 GRPC_ARG_CHANNELZ_CHANNEL_NODE " should be a pointer");
131       }
132     }
133   }
134 
135   grpc_channel_args_destroy(args);
136   return channel;
137 }
138 
get_default_authority(const grpc_channel_args * input_args)139 static grpc_core::UniquePtr<char> get_default_authority(
140     const grpc_channel_args* input_args) {
141   bool has_default_authority = false;
142   char* ssl_override = nullptr;
143   grpc_core::UniquePtr<char> default_authority;
144   const size_t num_args = input_args != nullptr ? input_args->num_args : 0;
145   for (size_t i = 0; i < num_args; ++i) {
146     if (0 == strcmp(input_args->args[i].key, GRPC_ARG_DEFAULT_AUTHORITY)) {
147       has_default_authority = true;
148     } else if (0 == strcmp(input_args->args[i].key,
149                            GRPC_SSL_TARGET_NAME_OVERRIDE_ARG)) {
150       ssl_override = grpc_channel_arg_get_string(&input_args->args[i]);
151     }
152   }
153   if (!has_default_authority && ssl_override != nullptr) {
154     default_authority.reset(gpr_strdup(ssl_override));
155   }
156   return default_authority;
157 }
158 
build_channel_args(const grpc_channel_args * input_args,char * default_authority)159 static grpc_channel_args* build_channel_args(
160     const grpc_channel_args* input_args, char* default_authority) {
161   grpc_arg new_args[1];
162   size_t num_new_args = 0;
163   if (default_authority != nullptr) {
164     new_args[num_new_args++] = grpc_channel_arg_string_create(
165         const_cast<char*>(GRPC_ARG_DEFAULT_AUTHORITY), default_authority);
166   }
167   return grpc_channel_args_copy_and_add(input_args, new_args, num_new_args);
168 }
169 
170 namespace {
171 
channelz_node_copy(void * p)172 void* channelz_node_copy(void* p) {
173   grpc_core::channelz::ChannelNode* node =
174       static_cast<grpc_core::channelz::ChannelNode*>(p);
175   node->Ref().release();
176   return p;
177 }
channelz_node_destroy(void * p)178 void channelz_node_destroy(void* p) {
179   grpc_core::channelz::ChannelNode* node =
180       static_cast<grpc_core::channelz::ChannelNode*>(p);
181   node->Unref();
182 }
channelz_node_cmp(void * p1,void * p2)183 int channelz_node_cmp(void* p1, void* p2) { return GPR_ICMP(p1, p2); }
184 const grpc_arg_pointer_vtable channelz_node_arg_vtable = {
185     channelz_node_copy, channelz_node_destroy, channelz_node_cmp};
186 
CreateChannelzNode(grpc_channel_stack_builder * builder)187 void CreateChannelzNode(grpc_channel_stack_builder* builder) {
188   const grpc_channel_args* args =
189       grpc_channel_stack_builder_get_channel_arguments(builder);
190   // Check whether channelz is enabled.
191   const bool channelz_enabled = grpc_channel_args_find_bool(
192       args, GRPC_ARG_ENABLE_CHANNELZ, GRPC_ENABLE_CHANNELZ_DEFAULT);
193   if (!channelz_enabled) return;
194   // Get parameters needed to create the channelz node.
195   const size_t channel_tracer_max_memory = grpc_channel_args_find_integer(
196       args, GRPC_ARG_MAX_CHANNEL_TRACE_EVENT_MEMORY_PER_NODE,
197       {GRPC_MAX_CHANNEL_TRACE_EVENT_MEMORY_PER_NODE_DEFAULT, 0, INT_MAX});
198   const bool is_internal_channel = grpc_channel_args_find_bool(
199       args, GRPC_ARG_CHANNELZ_IS_INTERNAL_CHANNEL, false);
200   // Create the channelz node.
201   const char* target = grpc_channel_stack_builder_get_target(builder);
202   grpc_core::RefCountedPtr<grpc_core::channelz::ChannelNode> channelz_node =
203       grpc_core::MakeRefCounted<grpc_core::channelz::ChannelNode>(
204           target != nullptr ? target : "", channel_tracer_max_memory,
205           is_internal_channel);
206   channelz_node->AddTraceEvent(
207       grpc_core::channelz::ChannelTrace::Severity::Info,
208       grpc_slice_from_static_string("Channel created"));
209   // Add channelz node to channel args.
210   // We remove the is_internal_channel arg, since we no longer need it.
211   grpc_arg new_arg = grpc_channel_arg_pointer_create(
212       const_cast<char*>(GRPC_ARG_CHANNELZ_CHANNEL_NODE), channelz_node.get(),
213       &channelz_node_arg_vtable);
214   const char* args_to_remove[] = {GRPC_ARG_CHANNELZ_IS_INTERNAL_CHANNEL};
215   grpc_channel_args* new_args = grpc_channel_args_copy_and_add_and_remove(
216       args, args_to_remove, GPR_ARRAY_SIZE(args_to_remove), &new_arg, 1);
217   grpc_channel_stack_builder_set_channel_arguments(builder, new_args);
218   grpc_channel_args_destroy(new_args);
219 }
220 
221 }  // namespace
222 
grpc_channel_create(const char * target,const grpc_channel_args * input_args,grpc_channel_stack_type channel_stack_type,grpc_transport * optional_transport,grpc_resource_user * resource_user,grpc_error_handle * error)223 grpc_channel* grpc_channel_create(const char* target,
224                                   const grpc_channel_args* input_args,
225                                   grpc_channel_stack_type channel_stack_type,
226                                   grpc_transport* optional_transport,
227                                   grpc_resource_user* resource_user,
228                                   grpc_error_handle* error) {
229   // We need to make sure that grpc_shutdown() does not shut things down
230   // until after the channel is destroyed.  However, the channel may not
231   // actually be destroyed by the time grpc_channel_destroy() returns,
232   // since there may be other existing refs to the channel.  If those
233   // refs are held by things that are visible to the wrapped language
234   // (such as outstanding calls on the channel), then the wrapped
235   // language can be responsible for making sure that grpc_shutdown()
236   // does not run until after those refs are released.  However, the
237   // channel may also have refs to itself held internally for various
238   // things that need to be cleaned up at channel destruction (e.g.,
239   // LB policies, subchannels, etc), and because these refs are not
240   // visible to the wrapped language, it cannot be responsible for
241   // deferring grpc_shutdown() until after they are released.  To
242   // accommodate that, we call grpc_init() here and then call
243   // grpc_shutdown() when the channel is actually destroyed, thus
244   // ensuring that shutdown is deferred until that point.
245   grpc_init();
246   grpc_channel_stack_builder* builder = grpc_channel_stack_builder_create();
247   const grpc_core::UniquePtr<char> default_authority =
248       get_default_authority(input_args);
249   grpc_channel_args* args =
250       build_channel_args(input_args, default_authority.get());
251   if (grpc_channel_stack_type_is_client(channel_stack_type)) {
252     auto channel_args_mutator =
253         grpc_channel_args_get_client_channel_creation_mutator();
254     if (channel_args_mutator != nullptr) {
255       args = channel_args_mutator(target, args, channel_stack_type);
256     }
257   }
258   grpc_channel_stack_builder_set_channel_arguments(builder, args);
259   grpc_channel_args_destroy(args);
260   grpc_channel_stack_builder_set_target(builder, target);
261   grpc_channel_stack_builder_set_transport(builder, optional_transport);
262   grpc_channel_stack_builder_set_resource_user(builder, resource_user);
263   if (!grpc_channel_init_create_stack(builder, channel_stack_type)) {
264     grpc_channel_stack_builder_destroy(builder);
265     if (resource_user != nullptr) {
266       grpc_resource_user_free(resource_user, GRPC_RESOURCE_QUOTA_CHANNEL_SIZE);
267     }
268     grpc_shutdown();  // Since we won't call destroy_channel().
269     return nullptr;
270   }
271   // We only need to do this for clients here. For servers, this will be
272   // done in src/core/lib/surface/server.cc.
273   if (grpc_channel_stack_type_is_client(channel_stack_type)) {
274     CreateChannelzNode(builder);
275   }
276   grpc_channel* channel =
277       grpc_channel_create_with_builder(builder, channel_stack_type, error);
278   if (channel == nullptr) {
279     grpc_shutdown();  // Since we won't call destroy_channel().
280   }
281   return channel;
282 }
283 
grpc_channel_get_call_size_estimate(grpc_channel * channel)284 size_t grpc_channel_get_call_size_estimate(grpc_channel* channel) {
285 #define ROUND_UP_SIZE 256
286   /* We round up our current estimate to the NEXT value of ROUND_UP_SIZE.
287      This ensures:
288       1. a consistent size allocation when our estimate is drifting slowly
289          (which is common) - which tends to help most allocators reuse memory
290       2. a small amount of allowed growth over the estimate without hitting
291          the arena size doubling case, reducing overall memory usage */
292   return (static_cast<size_t>(
293               gpr_atm_no_barrier_load(&channel->call_size_estimate)) +
294           2 * ROUND_UP_SIZE) &
295          ~static_cast<size_t>(ROUND_UP_SIZE - 1);
296 }
297 
grpc_channel_update_call_size_estimate(grpc_channel * channel,size_t size)298 void grpc_channel_update_call_size_estimate(grpc_channel* channel,
299                                             size_t size) {
300   size_t cur = static_cast<size_t>(
301       gpr_atm_no_barrier_load(&channel->call_size_estimate));
302   if (cur < size) {
303     /* size grew: update estimate */
304     gpr_atm_no_barrier_cas(&channel->call_size_estimate,
305                            static_cast<gpr_atm>(cur),
306                            static_cast<gpr_atm>(size));
307     /* if we lose: never mind, something else will likely update soon enough */
308   } else if (cur == size) {
309     /* no change: holding pattern */
310   } else if (cur > 0) {
311     /* size shrank: decrease estimate */
312     gpr_atm_no_barrier_cas(
313         &channel->call_size_estimate, static_cast<gpr_atm>(cur),
314         static_cast<gpr_atm>(GPR_MIN(cur - 1, (255 * cur + size) / 256)));
315     /* if we lose: never mind, something else will likely update soon enough */
316   }
317 }
318 
grpc_channel_get_target(grpc_channel * channel)319 char* grpc_channel_get_target(grpc_channel* channel) {
320   GRPC_API_TRACE("grpc_channel_get_target(channel=%p)", 1, (channel));
321   return gpr_strdup(channel->target);
322 }
323 
grpc_channel_get_info(grpc_channel * channel,const grpc_channel_info * channel_info)324 void grpc_channel_get_info(grpc_channel* channel,
325                            const grpc_channel_info* channel_info) {
326   grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
327   grpc_core::ExecCtx exec_ctx;
328   grpc_channel_element* elem =
329       grpc_channel_stack_element(CHANNEL_STACK_FROM_CHANNEL(channel), 0);
330   elem->filter->get_channel_info(elem, channel_info);
331 }
332 
grpc_channel_reset_connect_backoff(grpc_channel * channel)333 void grpc_channel_reset_connect_backoff(grpc_channel* channel) {
334   grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
335   grpc_core::ExecCtx exec_ctx;
336   GRPC_API_TRACE("grpc_channel_reset_connect_backoff(channel=%p)", 1,
337                  (channel));
338   grpc_transport_op* op = grpc_make_transport_op(nullptr);
339   op->reset_connect_backoff = true;
340   grpc_channel_element* elem =
341       grpc_channel_stack_element(CHANNEL_STACK_FROM_CHANNEL(channel), 0);
342   elem->filter->start_transport_op(elem, op);
343 }
344 
grpc_channel_create_call_internal(grpc_channel * channel,grpc_call * parent_call,uint32_t propagation_mask,grpc_completion_queue * cq,grpc_pollset_set * pollset_set_alternative,grpc_mdelem path_mdelem,grpc_mdelem authority_mdelem,grpc_millis deadline)345 static grpc_call* grpc_channel_create_call_internal(
346     grpc_channel* channel, grpc_call* parent_call, uint32_t propagation_mask,
347     grpc_completion_queue* cq, grpc_pollset_set* pollset_set_alternative,
348     grpc_mdelem path_mdelem, grpc_mdelem authority_mdelem,
349     grpc_millis deadline) {
350   grpc_mdelem send_metadata[2];
351   size_t num_metadata = 0;
352 
353   GPR_ASSERT(channel->is_client);
354   GPR_ASSERT(!(cq != nullptr && pollset_set_alternative != nullptr));
355 
356   send_metadata[num_metadata++] = path_mdelem;
357   if (!GRPC_MDISNULL(authority_mdelem)) {
358     send_metadata[num_metadata++] = authority_mdelem;
359   }
360 
361   grpc_call_create_args args;
362   args.channel = channel;
363   args.server = nullptr;
364   args.parent = parent_call;
365   args.propagation_mask = propagation_mask;
366   args.cq = cq;
367   args.pollset_set_alternative = pollset_set_alternative;
368   args.server_transport_data = nullptr;
369   args.add_initial_metadata = send_metadata;
370   args.add_initial_metadata_count = num_metadata;
371   args.send_deadline = deadline;
372 
373   grpc_call* call;
374   GRPC_LOG_IF_ERROR("call_create", grpc_call_create(&args, &call));
375   return call;
376 }
377 
grpc_channel_create_call(grpc_channel * channel,grpc_call * parent_call,uint32_t propagation_mask,grpc_completion_queue * completion_queue,grpc_slice method,const grpc_slice * host,gpr_timespec deadline,void * reserved)378 grpc_call* grpc_channel_create_call(grpc_channel* channel,
379                                     grpc_call* parent_call,
380                                     uint32_t propagation_mask,
381                                     grpc_completion_queue* completion_queue,
382                                     grpc_slice method, const grpc_slice* host,
383                                     gpr_timespec deadline, void* reserved) {
384   GPR_ASSERT(!reserved);
385   grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
386   grpc_core::ExecCtx exec_ctx;
387   grpc_call* call = grpc_channel_create_call_internal(
388       channel, parent_call, propagation_mask, completion_queue, nullptr,
389       grpc_mdelem_create(GRPC_MDSTR_PATH, method, nullptr),
390       host != nullptr ? grpc_mdelem_create(GRPC_MDSTR_AUTHORITY, *host, nullptr)
391                       : GRPC_MDNULL,
392       grpc_timespec_to_millis_round_up(deadline));
393 
394   return call;
395 }
396 
grpc_channel_create_pollset_set_call(grpc_channel * channel,grpc_call * parent_call,uint32_t propagation_mask,grpc_pollset_set * pollset_set,const grpc_slice & method,const grpc_slice * host,grpc_millis deadline,void * reserved)397 grpc_call* grpc_channel_create_pollset_set_call(
398     grpc_channel* channel, grpc_call* parent_call, uint32_t propagation_mask,
399     grpc_pollset_set* pollset_set, const grpc_slice& method,
400     const grpc_slice* host, grpc_millis deadline, void* reserved) {
401   GPR_ASSERT(!reserved);
402   return grpc_channel_create_call_internal(
403       channel, parent_call, propagation_mask, nullptr, pollset_set,
404       grpc_mdelem_create(GRPC_MDSTR_PATH, method, nullptr),
405       host != nullptr ? grpc_mdelem_create(GRPC_MDSTR_AUTHORITY, *host, nullptr)
406                       : GRPC_MDNULL,
407       deadline);
408 }
409 
410 namespace grpc_core {
411 
RegisteredCall(const char * method_arg,const char * host_arg)412 RegisteredCall::RegisteredCall(const char* method_arg, const char* host_arg)
413     : method(method_arg != nullptr ? method_arg : ""),
414       host(host_arg != nullptr ? host_arg : ""),
415       path(grpc_mdelem_from_slices(
416           GRPC_MDSTR_PATH, grpc_core::ExternallyManagedSlice(method.c_str()))),
417       authority(!host.empty()
418                     ? grpc_mdelem_from_slices(
419                           GRPC_MDSTR_AUTHORITY,
420                           grpc_core::ExternallyManagedSlice(host.c_str()))
421                     : GRPC_MDNULL) {}
422 
423 // TODO(vjpai): Delete copy-constructor when allowed by all supported compilers.
RegisteredCall(const RegisteredCall & other)424 RegisteredCall::RegisteredCall(const RegisteredCall& other)
425     : RegisteredCall(other.method.c_str(), other.host.c_str()) {}
426 
RegisteredCall(RegisteredCall && other)427 RegisteredCall::RegisteredCall(RegisteredCall&& other) noexcept
428     : method(std::move(other.method)),
429       host(std::move(other.host)),
430       path(grpc_mdelem_from_slices(
431           GRPC_MDSTR_PATH, grpc_core::ExternallyManagedSlice(method.c_str()))),
432       authority(!host.empty()
433                     ? grpc_mdelem_from_slices(
434                           GRPC_MDSTR_AUTHORITY,
435                           grpc_core::ExternallyManagedSlice(host.c_str()))
436                     : GRPC_MDNULL) {
437   GRPC_MDELEM_UNREF(other.path);
438   GRPC_MDELEM_UNREF(other.authority);
439   other.path = GRPC_MDNULL;
440   other.authority = GRPC_MDNULL;
441 }
442 
~RegisteredCall()443 RegisteredCall::~RegisteredCall() {
444   GRPC_MDELEM_UNREF(path);
445   GRPC_MDELEM_UNREF(authority);
446 }
447 
448 }  // namespace grpc_core
449 
grpc_channel_register_call(grpc_channel * channel,const char * method,const char * host,void * reserved)450 void* grpc_channel_register_call(grpc_channel* channel, const char* method,
451                                  const char* host, void* reserved) {
452   GRPC_API_TRACE(
453       "grpc_channel_register_call(channel=%p, method=%s, host=%s, reserved=%p)",
454       4, (channel, method, host, reserved));
455   GPR_ASSERT(!reserved);
456   grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
457   grpc_core::ExecCtx exec_ctx;
458 
459   grpc_core::MutexLock lock(&channel->registration_table->mu);
460   channel->registration_table->method_registration_attempts++;
461   auto key = std::make_pair(std::string(host != nullptr ? host : ""),
462                             std::string(method != nullptr ? method : ""));
463   auto rc_posn = channel->registration_table->map.find(key);
464   if (rc_posn != channel->registration_table->map.end()) {
465     return &rc_posn->second;
466   }
467   auto insertion_result = channel->registration_table->map.insert(
468       {std::move(key), grpc_core::RegisteredCall(method, host)});
469   return &insertion_result.first->second;
470 }
471 
grpc_channel_create_registered_call(grpc_channel * channel,grpc_call * parent_call,uint32_t propagation_mask,grpc_completion_queue * completion_queue,void * registered_call_handle,gpr_timespec deadline,void * reserved)472 grpc_call* grpc_channel_create_registered_call(
473     grpc_channel* channel, grpc_call* parent_call, uint32_t propagation_mask,
474     grpc_completion_queue* completion_queue, void* registered_call_handle,
475     gpr_timespec deadline, void* reserved) {
476   grpc_core::RegisteredCall* rc =
477       static_cast<grpc_core::RegisteredCall*>(registered_call_handle);
478   GRPC_API_TRACE(
479       "grpc_channel_create_registered_call("
480       "channel=%p, parent_call=%p, propagation_mask=%x, completion_queue=%p, "
481       "registered_call_handle=%p, "
482       "deadline=gpr_timespec { tv_sec: %" PRId64
483       ", tv_nsec: %d, clock_type: %d }, "
484       "reserved=%p)",
485       9,
486       (channel, parent_call, (unsigned)propagation_mask, completion_queue,
487        registered_call_handle, deadline.tv_sec, deadline.tv_nsec,
488        (int)deadline.clock_type, reserved));
489   GPR_ASSERT(!reserved);
490   grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
491   grpc_core::ExecCtx exec_ctx;
492   grpc_call* call = grpc_channel_create_call_internal(
493       channel, parent_call, propagation_mask, completion_queue, nullptr,
494       GRPC_MDELEM_REF(rc->path), GRPC_MDELEM_REF(rc->authority),
495       grpc_timespec_to_millis_round_up(deadline));
496 
497   return call;
498 }
499 
destroy_channel(void * arg,grpc_error_handle)500 static void destroy_channel(void* arg, grpc_error_handle /*error*/) {
501   grpc_channel* channel = static_cast<grpc_channel*>(arg);
502   if (channel->channelz_node != nullptr) {
503     channel->channelz_node->AddTraceEvent(
504         grpc_core::channelz::ChannelTrace::Severity::Info,
505         grpc_slice_from_static_string("Channel destroyed"));
506     channel->channelz_node.reset();
507   }
508   grpc_channel_stack_destroy(CHANNEL_STACK_FROM_CHANNEL(channel));
509   channel->registration_table.Destroy();
510   if (channel->resource_user != nullptr) {
511     grpc_resource_user_free(channel->resource_user,
512                             GRPC_RESOURCE_QUOTA_CHANNEL_SIZE);
513   }
514   gpr_free(channel->target);
515   gpr_free(channel);
516   // See comment in grpc_channel_create() for why we do this.
517   grpc_shutdown();
518 }
519 
grpc_channel_destroy_internal(grpc_channel * channel)520 void grpc_channel_destroy_internal(grpc_channel* channel) {
521   grpc_transport_op* op = grpc_make_transport_op(nullptr);
522   grpc_channel_element* elem;
523   GRPC_API_TRACE("grpc_channel_destroy(channel=%p)", 1, (channel));
524   op->disconnect_with_error =
525       GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Destroyed");
526   elem = grpc_channel_stack_element(CHANNEL_STACK_FROM_CHANNEL(channel), 0);
527   elem->filter->start_transport_op(elem, op);
528   GRPC_CHANNEL_INTERNAL_UNREF(channel, "channel");
529 }
530 
grpc_channel_destroy(grpc_channel * channel)531 void grpc_channel_destroy(grpc_channel* channel) {
532   grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
533   grpc_core::ExecCtx exec_ctx;
534   grpc_channel_destroy_internal(channel);
535 }
536