1 /*
2 *
3 * Copyright 2015 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19 #include <grpc/support/port_platform.h>
20
21 #include "src/core/lib/surface/channel.h"
22
23 #include <inttypes.h>
24 #include <limits.h>
25 #include <stdlib.h>
26 #include <string.h>
27
28 #include <grpc/compression.h>
29 #include <grpc/support/alloc.h>
30 #include <grpc/support/log.h>
31 #include <grpc/support/string_util.h>
32
33 #include "src/core/lib/channel/channel_args.h"
34 #include "src/core/lib/channel/channel_trace.h"
35 #include "src/core/lib/channel/channelz.h"
36 #include "src/core/lib/debug/stats.h"
37 #include "src/core/lib/gpr/string.h"
38 #include "src/core/lib/gprpp/manual_constructor.h"
39 #include "src/core/lib/gprpp/memory.h"
40 #include "src/core/lib/gprpp/ref_counted_ptr.h"
41 #include "src/core/lib/iomgr/iomgr.h"
42 #include "src/core/lib/slice/slice_internal.h"
43 #include "src/core/lib/surface/api_trace.h"
44 #include "src/core/lib/surface/call.h"
45 #include "src/core/lib/surface/channel_init.h"
46 #include "src/core/lib/transport/static_metadata.h"
47
48 /** Cache grpc-status: X mdelems for X = 0..NUM_CACHED_STATUS_ELEMS.
49 * Avoids needing to take a metadata context lock for sending status
50 * if the status code is <= NUM_CACHED_STATUS_ELEMS.
51 * Sized to allow the most commonly used codes to fit in
52 * (OK, Cancelled, Unknown). */
53 #define NUM_CACHED_STATUS_ELEMS 3
54
55 typedef struct registered_call {
56 grpc_mdelem path;
57 grpc_mdelem authority;
58 struct registered_call* next;
59 } registered_call;
60
61 struct grpc_channel {
62 int is_client;
63 grpc_compression_options compression_options;
64
65 gpr_atm call_size_estimate;
66
67 gpr_mu registered_call_mu;
68 registered_call* registered_calls;
69
70 grpc_core::RefCountedPtr<grpc_core::channelz::ChannelNode> channelz_channel;
71
72 char* target;
73 };
74
75 #define CHANNEL_STACK_FROM_CHANNEL(c) ((grpc_channel_stack*)((c) + 1))
76
77 static void destroy_channel(void* arg, grpc_error* error);
78
grpc_channel_create_with_builder(grpc_channel_stack_builder * builder,grpc_channel_stack_type channel_stack_type)79 grpc_channel* grpc_channel_create_with_builder(
80 grpc_channel_stack_builder* builder,
81 grpc_channel_stack_type channel_stack_type) {
82 char* target = gpr_strdup(grpc_channel_stack_builder_get_target(builder));
83 grpc_channel_args* args = grpc_channel_args_copy(
84 grpc_channel_stack_builder_get_channel_arguments(builder));
85 grpc_channel* channel;
86 if (channel_stack_type == GRPC_SERVER_CHANNEL) {
87 GRPC_STATS_INC_SERVER_CHANNELS_CREATED();
88 } else {
89 GRPC_STATS_INC_CLIENT_CHANNELS_CREATED();
90 }
91 grpc_error* error = grpc_channel_stack_builder_finish(
92 builder, sizeof(grpc_channel), 1, destroy_channel, nullptr,
93 reinterpret_cast<void**>(&channel));
94 if (error != GRPC_ERROR_NONE) {
95 gpr_log(GPR_ERROR, "channel stack builder failed: %s",
96 grpc_error_string(error));
97 GRPC_ERROR_UNREF(error);
98 gpr_free(target);
99 grpc_channel_args_destroy(args);
100 return channel;
101 }
102
103 channel->target = target;
104 channel->is_client = grpc_channel_stack_type_is_client(channel_stack_type);
105 size_t channel_tracer_max_nodes = 0; // default to off
106 bool channelz_enabled = false;
107 bool internal_channel = false;
108 // this creates the default ChannelNode. Different types of channels may
109 // override this to ensure a correct ChannelNode is created.
110 grpc_core::channelz::ChannelNodeCreationFunc channel_node_create_func =
111 grpc_core::channelz::ChannelNode::MakeChannelNode;
112 gpr_mu_init(&channel->registered_call_mu);
113 channel->registered_calls = nullptr;
114
115 gpr_atm_no_barrier_store(
116 &channel->call_size_estimate,
117 (gpr_atm)CHANNEL_STACK_FROM_CHANNEL(channel)->call_stack_size +
118 grpc_call_get_initial_size_estimate());
119
120 grpc_compression_options_init(&channel->compression_options);
121 for (size_t i = 0; i < args->num_args; i++) {
122 if (0 ==
123 strcmp(args->args[i].key, GRPC_COMPRESSION_CHANNEL_DEFAULT_LEVEL)) {
124 channel->compression_options.default_level.is_set = true;
125 channel->compression_options.default_level.level =
126 static_cast<grpc_compression_level>(grpc_channel_arg_get_integer(
127 &args->args[i],
128 {GRPC_COMPRESS_LEVEL_NONE, GRPC_COMPRESS_LEVEL_NONE,
129 GRPC_COMPRESS_LEVEL_COUNT - 1}));
130 } else if (0 == strcmp(args->args[i].key,
131 GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM)) {
132 channel->compression_options.default_algorithm.is_set = true;
133 channel->compression_options.default_algorithm.algorithm =
134 static_cast<grpc_compression_algorithm>(grpc_channel_arg_get_integer(
135 &args->args[i], {GRPC_COMPRESS_NONE, GRPC_COMPRESS_NONE,
136 GRPC_COMPRESS_ALGORITHMS_COUNT - 1}));
137 } else if (0 ==
138 strcmp(args->args[i].key,
139 GRPC_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET)) {
140 channel->compression_options.enabled_algorithms_bitset =
141 static_cast<uint32_t>(args->args[i].value.integer) |
142 0x1; /* always support no compression */
143 } else if (0 == strcmp(args->args[i].key,
144 GRPC_ARG_MAX_CHANNEL_TRACE_EVENTS_PER_NODE)) {
145 GPR_ASSERT(channel_tracer_max_nodes == 0);
146 // max_nodes defaults to 0 (which is off), clamped between 0 and INT_MAX
147 const grpc_integer_options options = {0, 0, INT_MAX};
148 channel_tracer_max_nodes =
149 (size_t)grpc_channel_arg_get_integer(&args->args[i], options);
150 } else if (0 == strcmp(args->args[i].key, GRPC_ARG_ENABLE_CHANNELZ)) {
151 // channelz will not be enabled by default until all concerns in
152 // https://github.com/grpc/grpc/issues/15986 are addressed.
153 channelz_enabled = grpc_channel_arg_get_bool(&args->args[i], false);
154 } else if (0 == strcmp(args->args[i].key,
155 GRPC_ARG_CHANNELZ_CHANNEL_NODE_CREATION_FUNC)) {
156 GPR_ASSERT(args->args[i].type == GRPC_ARG_POINTER);
157 GPR_ASSERT(args->args[i].value.pointer.p != nullptr);
158 channel_node_create_func =
159 reinterpret_cast<grpc_core::channelz::ChannelNodeCreationFunc>(
160 args->args[i].value.pointer.p);
161 } else if (0 == strcmp(args->args[i].key,
162 GRPC_ARG_CHANNELZ_CHANNEL_IS_INTERNAL_CHANNEL)) {
163 internal_channel = grpc_channel_arg_get_bool(&args->args[i], false);
164 }
165 }
166
167 grpc_channel_args_destroy(args);
168 // we only need to do the channelz bookkeeping for clients here. The channelz
169 // bookkeeping for server channels occurs in src/core/lib/surface/server.cc
170 if (channelz_enabled && channel->is_client) {
171 channel->channelz_channel = channel_node_create_func(
172 channel, channel_tracer_max_nodes, !internal_channel);
173 channel->channelz_channel->AddTraceEvent(
174 grpc_core::channelz::ChannelTrace::Severity::Info,
175 grpc_slice_from_static_string("Channel created"));
176 }
177 return channel;
178 }
179
get_default_authority(const grpc_channel_args * input_args)180 static grpc_core::UniquePtr<char> get_default_authority(
181 const grpc_channel_args* input_args) {
182 bool has_default_authority = false;
183 char* ssl_override = nullptr;
184 grpc_core::UniquePtr<char> default_authority;
185 const size_t num_args = input_args != nullptr ? input_args->num_args : 0;
186 for (size_t i = 0; i < num_args; ++i) {
187 if (0 == strcmp(input_args->args[i].key, GRPC_ARG_DEFAULT_AUTHORITY)) {
188 has_default_authority = true;
189 } else if (0 == strcmp(input_args->args[i].key,
190 GRPC_SSL_TARGET_NAME_OVERRIDE_ARG)) {
191 ssl_override = grpc_channel_arg_get_string(&input_args->args[i]);
192 }
193 }
194 if (!has_default_authority && ssl_override != nullptr) {
195 default_authority.reset(gpr_strdup(ssl_override));
196 }
197 return default_authority;
198 }
199
build_channel_args(const grpc_channel_args * input_args,char * default_authority)200 static grpc_channel_args* build_channel_args(
201 const grpc_channel_args* input_args, char* default_authority) {
202 grpc_arg new_args[1];
203 size_t num_new_args = 0;
204 if (default_authority != nullptr) {
205 new_args[num_new_args++] = grpc_channel_arg_string_create(
206 const_cast<char*>(GRPC_ARG_DEFAULT_AUTHORITY), default_authority);
207 }
208 return grpc_channel_args_copy_and_add(input_args, new_args, num_new_args);
209 }
210
grpc_channel_get_channelz_node(grpc_channel * channel)211 grpc_core::channelz::ChannelNode* grpc_channel_get_channelz_node(
212 grpc_channel* channel) {
213 return channel->channelz_channel.get();
214 }
215
grpc_channel_create(const char * target,const grpc_channel_args * input_args,grpc_channel_stack_type channel_stack_type,grpc_transport * optional_transport)216 grpc_channel* grpc_channel_create(const char* target,
217 const grpc_channel_args* input_args,
218 grpc_channel_stack_type channel_stack_type,
219 grpc_transport* optional_transport) {
220 grpc_channel_stack_builder* builder = grpc_channel_stack_builder_create();
221 const grpc_core::UniquePtr<char> default_authority =
222 get_default_authority(input_args);
223 grpc_channel_args* args =
224 build_channel_args(input_args, default_authority.get());
225 grpc_channel_stack_builder_set_channel_arguments(builder, args);
226 grpc_channel_args_destroy(args);
227 grpc_channel_stack_builder_set_target(builder, target);
228 grpc_channel_stack_builder_set_transport(builder, optional_transport);
229 if (!grpc_channel_init_create_stack(builder, channel_stack_type)) {
230 grpc_channel_stack_builder_destroy(builder);
231 return nullptr;
232 }
233 return grpc_channel_create_with_builder(builder, channel_stack_type);
234 }
235
grpc_channel_get_call_size_estimate(grpc_channel * channel)236 size_t grpc_channel_get_call_size_estimate(grpc_channel* channel) {
237 #define ROUND_UP_SIZE 256
238 /* We round up our current estimate to the NEXT value of ROUND_UP_SIZE.
239 This ensures:
240 1. a consistent size allocation when our estimate is drifting slowly
241 (which is common) - which tends to help most allocators reuse memory
242 2. a small amount of allowed growth over the estimate without hitting
243 the arena size doubling case, reducing overall memory usage */
244 return (static_cast<size_t>(
245 gpr_atm_no_barrier_load(&channel->call_size_estimate)) +
246 2 * ROUND_UP_SIZE) &
247 ~static_cast<size_t>(ROUND_UP_SIZE - 1);
248 }
249
grpc_channel_update_call_size_estimate(grpc_channel * channel,size_t size)250 void grpc_channel_update_call_size_estimate(grpc_channel* channel,
251 size_t size) {
252 size_t cur = static_cast<size_t>(
253 gpr_atm_no_barrier_load(&channel->call_size_estimate));
254 if (cur < size) {
255 /* size grew: update estimate */
256 gpr_atm_no_barrier_cas(&channel->call_size_estimate,
257 static_cast<gpr_atm>(cur),
258 static_cast<gpr_atm>(size));
259 /* if we lose: never mind, something else will likely update soon enough */
260 } else if (cur == size) {
261 /* no change: holding pattern */
262 } else if (cur > 0) {
263 /* size shrank: decrease estimate */
264 gpr_atm_no_barrier_cas(
265 &channel->call_size_estimate, static_cast<gpr_atm>(cur),
266 static_cast<gpr_atm>(GPR_MIN(cur - 1, (255 * cur + size) / 256)));
267 /* if we lose: never mind, something else will likely update soon enough */
268 }
269 }
270
grpc_channel_get_target(grpc_channel * channel)271 char* grpc_channel_get_target(grpc_channel* channel) {
272 GRPC_API_TRACE("grpc_channel_get_target(channel=%p)", 1, (channel));
273 return gpr_strdup(channel->target);
274 }
275
grpc_channel_get_info(grpc_channel * channel,const grpc_channel_info * channel_info)276 void grpc_channel_get_info(grpc_channel* channel,
277 const grpc_channel_info* channel_info) {
278 grpc_core::ExecCtx exec_ctx;
279 grpc_channel_element* elem =
280 grpc_channel_stack_element(CHANNEL_STACK_FROM_CHANNEL(channel), 0);
281 elem->filter->get_channel_info(elem, channel_info);
282 }
283
grpc_channel_reset_connect_backoff(grpc_channel * channel)284 void grpc_channel_reset_connect_backoff(grpc_channel* channel) {
285 grpc_core::ExecCtx exec_ctx;
286 GRPC_API_TRACE("grpc_channel_reset_connect_backoff(channel=%p)", 1,
287 (channel));
288 grpc_transport_op* op = grpc_make_transport_op(nullptr);
289 op->reset_connect_backoff = true;
290 grpc_channel_element* elem =
291 grpc_channel_stack_element(CHANNEL_STACK_FROM_CHANNEL(channel), 0);
292 elem->filter->start_transport_op(elem, op);
293 }
294
grpc_channel_create_call_internal(grpc_channel * channel,grpc_call * parent_call,uint32_t propagation_mask,grpc_completion_queue * cq,grpc_pollset_set * pollset_set_alternative,grpc_mdelem path_mdelem,grpc_mdelem authority_mdelem,grpc_millis deadline)295 static grpc_call* grpc_channel_create_call_internal(
296 grpc_channel* channel, grpc_call* parent_call, uint32_t propagation_mask,
297 grpc_completion_queue* cq, grpc_pollset_set* pollset_set_alternative,
298 grpc_mdelem path_mdelem, grpc_mdelem authority_mdelem,
299 grpc_millis deadline) {
300 grpc_mdelem send_metadata[2];
301 size_t num_metadata = 0;
302
303 GPR_ASSERT(channel->is_client);
304 GPR_ASSERT(!(cq != nullptr && pollset_set_alternative != nullptr));
305
306 send_metadata[num_metadata++] = path_mdelem;
307 if (!GRPC_MDISNULL(authority_mdelem)) {
308 send_metadata[num_metadata++] = authority_mdelem;
309 }
310
311 grpc_call_create_args args;
312 memset(&args, 0, sizeof(args));
313 args.channel = channel;
314 args.parent = parent_call;
315 args.propagation_mask = propagation_mask;
316 args.cq = cq;
317 args.pollset_set_alternative = pollset_set_alternative;
318 args.server_transport_data = nullptr;
319 args.add_initial_metadata = send_metadata;
320 args.add_initial_metadata_count = num_metadata;
321 args.send_deadline = deadline;
322
323 grpc_call* call;
324 GRPC_LOG_IF_ERROR("call_create", grpc_call_create(&args, &call));
325 return call;
326 }
327
grpc_channel_create_call(grpc_channel * channel,grpc_call * parent_call,uint32_t propagation_mask,grpc_completion_queue * cq,grpc_slice method,const grpc_slice * host,gpr_timespec deadline,void * reserved)328 grpc_call* grpc_channel_create_call(grpc_channel* channel,
329 grpc_call* parent_call,
330 uint32_t propagation_mask,
331 grpc_completion_queue* cq,
332 grpc_slice method, const grpc_slice* host,
333 gpr_timespec deadline, void* reserved) {
334 GPR_ASSERT(!reserved);
335 grpc_core::ExecCtx exec_ctx;
336 grpc_call* call = grpc_channel_create_call_internal(
337 channel, parent_call, propagation_mask, cq, nullptr,
338 grpc_mdelem_from_slices(GRPC_MDSTR_PATH, grpc_slice_ref_internal(method)),
339 host != nullptr ? grpc_mdelem_from_slices(GRPC_MDSTR_AUTHORITY,
340 grpc_slice_ref_internal(*host))
341 : GRPC_MDNULL,
342 grpc_timespec_to_millis_round_up(deadline));
343
344 return call;
345 }
346
grpc_channel_create_pollset_set_call(grpc_channel * channel,grpc_call * parent_call,uint32_t propagation_mask,grpc_pollset_set * pollset_set,grpc_slice method,const grpc_slice * host,grpc_millis deadline,void * reserved)347 grpc_call* grpc_channel_create_pollset_set_call(
348 grpc_channel* channel, grpc_call* parent_call, uint32_t propagation_mask,
349 grpc_pollset_set* pollset_set, grpc_slice method, const grpc_slice* host,
350 grpc_millis deadline, void* reserved) {
351 GPR_ASSERT(!reserved);
352 return grpc_channel_create_call_internal(
353 channel, parent_call, propagation_mask, nullptr, pollset_set,
354 grpc_mdelem_from_slices(GRPC_MDSTR_PATH, grpc_slice_ref_internal(method)),
355 host != nullptr ? grpc_mdelem_from_slices(GRPC_MDSTR_AUTHORITY,
356 grpc_slice_ref_internal(*host))
357 : GRPC_MDNULL,
358 deadline);
359 }
360
grpc_channel_register_call(grpc_channel * channel,const char * method,const char * host,void * reserved)361 void* grpc_channel_register_call(grpc_channel* channel, const char* method,
362 const char* host, void* reserved) {
363 registered_call* rc =
364 static_cast<registered_call*>(gpr_malloc(sizeof(registered_call)));
365 GRPC_API_TRACE(
366 "grpc_channel_register_call(channel=%p, method=%s, host=%s, reserved=%p)",
367 4, (channel, method, host, reserved));
368 GPR_ASSERT(!reserved);
369 grpc_core::ExecCtx exec_ctx;
370
371 rc->path = grpc_mdelem_from_slices(
372 GRPC_MDSTR_PATH,
373 grpc_slice_intern(grpc_slice_from_static_string(method)));
374 rc->authority =
375 host ? grpc_mdelem_from_slices(
376 GRPC_MDSTR_AUTHORITY,
377 grpc_slice_intern(grpc_slice_from_static_string(host)))
378 : GRPC_MDNULL;
379 gpr_mu_lock(&channel->registered_call_mu);
380 rc->next = channel->registered_calls;
381 channel->registered_calls = rc;
382 gpr_mu_unlock(&channel->registered_call_mu);
383
384 return rc;
385 }
386
grpc_channel_create_registered_call(grpc_channel * channel,grpc_call * parent_call,uint32_t propagation_mask,grpc_completion_queue * completion_queue,void * registered_call_handle,gpr_timespec deadline,void * reserved)387 grpc_call* grpc_channel_create_registered_call(
388 grpc_channel* channel, grpc_call* parent_call, uint32_t propagation_mask,
389 grpc_completion_queue* completion_queue, void* registered_call_handle,
390 gpr_timespec deadline, void* reserved) {
391 registered_call* rc = static_cast<registered_call*>(registered_call_handle);
392 GRPC_API_TRACE(
393 "grpc_channel_create_registered_call("
394 "channel=%p, parent_call=%p, propagation_mask=%x, completion_queue=%p, "
395 "registered_call_handle=%p, "
396 "deadline=gpr_timespec { tv_sec: %" PRId64
397 ", tv_nsec: %d, clock_type: %d }, "
398 "reserved=%p)",
399 9,
400 (channel, parent_call, (unsigned)propagation_mask, completion_queue,
401 registered_call_handle, deadline.tv_sec, deadline.tv_nsec,
402 (int)deadline.clock_type, reserved));
403 GPR_ASSERT(!reserved);
404 grpc_core::ExecCtx exec_ctx;
405 grpc_call* call = grpc_channel_create_call_internal(
406 channel, parent_call, propagation_mask, completion_queue, nullptr,
407 GRPC_MDELEM_REF(rc->path), GRPC_MDELEM_REF(rc->authority),
408 grpc_timespec_to_millis_round_up(deadline));
409
410 return call;
411 }
412
413 #ifndef NDEBUG
414 #define REF_REASON reason
415 #define REF_ARG , const char* reason
416 #else
417 #define REF_REASON ""
418 #define REF_ARG
419 #endif
grpc_channel_internal_ref(grpc_channel * c REF_ARG)420 void grpc_channel_internal_ref(grpc_channel* c REF_ARG) {
421 GRPC_CHANNEL_STACK_REF(CHANNEL_STACK_FROM_CHANNEL(c), REF_REASON);
422 }
423
grpc_channel_internal_unref(grpc_channel * c REF_ARG)424 void grpc_channel_internal_unref(grpc_channel* c REF_ARG) {
425 GRPC_CHANNEL_STACK_UNREF(CHANNEL_STACK_FROM_CHANNEL(c), REF_REASON);
426 }
427
destroy_channel(void * arg,grpc_error * error)428 static void destroy_channel(void* arg, grpc_error* error) {
429 grpc_channel* channel = static_cast<grpc_channel*>(arg);
430 if (channel->channelz_channel != nullptr) {
431 channel->channelz_channel->AddTraceEvent(
432 grpc_core::channelz::ChannelTrace::Severity::Info,
433 grpc_slice_from_static_string("Channel destroyed"));
434 channel->channelz_channel->MarkChannelDestroyed();
435 channel->channelz_channel.reset();
436 }
437 grpc_channel_stack_destroy(CHANNEL_STACK_FROM_CHANNEL(channel));
438 while (channel->registered_calls) {
439 registered_call* rc = channel->registered_calls;
440 channel->registered_calls = rc->next;
441 GRPC_MDELEM_UNREF(rc->path);
442 GRPC_MDELEM_UNREF(rc->authority);
443 gpr_free(rc);
444 }
445 gpr_mu_destroy(&channel->registered_call_mu);
446 gpr_free(channel->target);
447 gpr_free(channel);
448 }
449
grpc_channel_destroy(grpc_channel * channel)450 void grpc_channel_destroy(grpc_channel* channel) {
451 grpc_transport_op* op = grpc_make_transport_op(nullptr);
452 grpc_channel_element* elem;
453 grpc_core::ExecCtx exec_ctx;
454 GRPC_API_TRACE("grpc_channel_destroy(channel=%p)", 1, (channel));
455 op->disconnect_with_error =
456 GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Destroyed");
457 elem = grpc_channel_stack_element(CHANNEL_STACK_FROM_CHANNEL(channel), 0);
458 elem->filter->start_transport_op(elem, op);
459
460 GRPC_CHANNEL_INTERNAL_UNREF(channel, "channel");
461 }
462
grpc_channel_get_channel_stack(grpc_channel * channel)463 grpc_channel_stack* grpc_channel_get_channel_stack(grpc_channel* channel) {
464 return CHANNEL_STACK_FROM_CHANNEL(channel);
465 }
466
grpc_channel_compression_options(const grpc_channel * channel)467 grpc_compression_options grpc_channel_compression_options(
468 const grpc_channel* channel) {
469 return channel->compression_options;
470 }
471
grpc_channel_get_reffed_status_elem(grpc_channel * channel,int i)472 grpc_mdelem grpc_channel_get_reffed_status_elem(grpc_channel* channel, int i) {
473 char tmp[GPR_LTOA_MIN_BUFSIZE];
474 switch (i) {
475 case 0:
476 return GRPC_MDELEM_GRPC_STATUS_0;
477 case 1:
478 return GRPC_MDELEM_GRPC_STATUS_1;
479 case 2:
480 return GRPC_MDELEM_GRPC_STATUS_2;
481 }
482 gpr_ltoa(i, tmp);
483 return grpc_mdelem_from_slices(GRPC_MDSTR_GRPC_STATUS,
484 grpc_slice_from_copied_string(tmp));
485 }
486