• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright 2015-2016 gRPC authors.
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 //     http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 //
16 
17 #include <grpc/support/port_platform.h>
18 
19 #include "src/core/lib/surface/server.h"
20 
21 #include <limits.h>
22 #include <stdlib.h>
23 #include <string.h>
24 
25 #include <algorithm>
26 #include <atomic>
27 #include <iterator>
28 #include <list>
29 #include <queue>
30 #include <utility>
31 #include <vector>
32 
33 #include "absl/memory/memory.h"
34 #include "absl/types/optional.h"
35 
36 #include <grpc/support/alloc.h>
37 #include <grpc/support/log.h>
38 #include <grpc/support/string_util.h>
39 
40 #include "src/core/lib/channel/channel_args.h"
41 #include "src/core/lib/channel/channelz.h"
42 #include "src/core/lib/channel/connected_channel.h"
43 #include "src/core/lib/debug/stats.h"
44 #include "src/core/lib/gpr/spinlock.h"
45 #include "src/core/lib/gpr/string.h"
46 #include "src/core/lib/gprpp/mpscq.h"
47 #include "src/core/lib/iomgr/executor.h"
48 #include "src/core/lib/iomgr/iomgr.h"
49 #include "src/core/lib/slice/slice_internal.h"
50 #include "src/core/lib/surface/api_trace.h"
51 #include "src/core/lib/surface/call.h"
52 #include "src/core/lib/surface/channel.h"
53 #include "src/core/lib/surface/completion_queue.h"
54 #include "src/core/lib/surface/init.h"
55 #include "src/core/lib/transport/metadata.h"
56 #include "src/core/lib/transport/static_metadata.h"
57 
58 namespace grpc_core {
59 
60 TraceFlag grpc_server_channel_trace(false, "server_channel");
61 
62 //
63 // Server::RequestedCall
64 //
65 
66 struct Server::RequestedCall {
67   enum class Type { BATCH_CALL, REGISTERED_CALL };
68 
RequestedCallgrpc_core::Server::RequestedCall69   RequestedCall(void* tag_arg, grpc_completion_queue* call_cq,
70                 grpc_call** call_arg, grpc_metadata_array* initial_md,
71                 grpc_call_details* details)
72       : type(Type::BATCH_CALL),
73         tag(tag_arg),
74         cq_bound_to_call(call_cq),
75         call(call_arg),
76         initial_metadata(initial_md) {
77     details->reserved = nullptr;
78     data.batch.details = details;
79   }
80 
RequestedCallgrpc_core::Server::RequestedCall81   RequestedCall(void* tag_arg, grpc_completion_queue* call_cq,
82                 grpc_call** call_arg, grpc_metadata_array* initial_md,
83                 RegisteredMethod* rm, gpr_timespec* deadline,
84                 grpc_byte_buffer** optional_payload)
85       : type(Type::REGISTERED_CALL),
86         tag(tag_arg),
87         cq_bound_to_call(call_cq),
88         call(call_arg),
89         initial_metadata(initial_md) {
90     data.registered.method = rm;
91     data.registered.deadline = deadline;
92     data.registered.optional_payload = optional_payload;
93   }
94 
95   MultiProducerSingleConsumerQueue::Node mpscq_node;
96   const Type type;
97   void* const tag;
98   grpc_completion_queue* const cq_bound_to_call;
99   grpc_call** const call;
100   grpc_cq_completion completion;
101   grpc_metadata_array* const initial_metadata;
102   union {
103     struct {
104       grpc_call_details* details;
105     } batch;
106     struct {
107       RegisteredMethod* method;
108       gpr_timespec* deadline;
109       grpc_byte_buffer** optional_payload;
110     } registered;
111   } data;
112 };
113 
114 //
115 // Server::RegisteredMethod
116 //
117 
118 struct Server::RegisteredMethod {
RegisteredMethodgrpc_core::Server::RegisteredMethod119   RegisteredMethod(
120       const char* method_arg, const char* host_arg,
121       grpc_server_register_method_payload_handling payload_handling_arg,
122       uint32_t flags_arg)
123       : method(method_arg == nullptr ? "" : method_arg),
124         host(host_arg == nullptr ? "" : host_arg),
125         payload_handling(payload_handling_arg),
126         flags(flags_arg) {}
127 
128   ~RegisteredMethod() = default;
129 
130   const std::string method;
131   const std::string host;
132   const grpc_server_register_method_payload_handling payload_handling;
133   const uint32_t flags;
134   // One request matcher per method.
135   std::unique_ptr<RequestMatcherInterface> matcher;
136 };
137 
138 //
139 // Server::RequestMatcherInterface
140 //
141 
142 // RPCs that come in from the transport must be matched against RPC requests
143 // from the application. An incoming request from the application can be matched
144 // to an RPC that has already arrived or can be queued up for later use.
145 // Likewise, an RPC coming in from the transport can either be matched to a
146 // request that already arrived from the application or can be queued up for
147 // later use (marked pending). If there is a match, the request's tag is posted
148 // on the request's notification CQ.
149 //
150 // RequestMatcherInterface is the base class to provide this functionality.
151 class Server::RequestMatcherInterface {
152  public:
~RequestMatcherInterface()153   virtual ~RequestMatcherInterface() {}
154 
155   // Unref the calls associated with any incoming RPCs in the pending queue (not
156   // yet matched to an application-requested RPC).
157   virtual void ZombifyPending() = 0;
158 
159   // Mark all application-requested RPCs failed if they have not been matched to
160   // an incoming RPC. The error parameter indicates why the RPCs are being
161   // failed (always server shutdown in all current implementations).
162   virtual void KillRequests(grpc_error_handle error) = 0;
163 
164   // How many request queues are supported by this matcher. This is an abstract
165   // concept that essentially maps to gRPC completion queues.
166   virtual size_t request_queue_count() const = 0;
167 
168   // This function is invoked when the application requests a new RPC whose
169   // information is in the call parameter. The request_queue_index marks the
170   // queue onto which to place this RPC, and is typically associated with a gRPC
171   // CQ. If there are pending RPCs waiting to be matched, publish one (match it
172   // and notify the CQ).
173   virtual void RequestCallWithPossiblePublish(size_t request_queue_index,
174                                               RequestedCall* call) = 0;
175 
176   // This function is invoked on an incoming RPC, represented by the calld
177   // object. The RequestMatcher will try to match it against an
178   // application-requested RPC if possible or will place it in the pending queue
179   // otherwise. To enable some measure of fairness between server CQs, the match
180   // is done starting at the start_request_queue_index parameter in a cyclic
181   // order rather than always starting at 0.
182   virtual void MatchOrQueue(size_t start_request_queue_index,
183                             CallData* calld) = 0;
184 
185   // Returns the server associated with this request matcher
186   virtual Server* server() const = 0;
187 };
188 
189 // The RealRequestMatcher is an implementation of RequestMatcherInterface that
190 // actually uses all the features of RequestMatcherInterface: expecting the
191 // application to explicitly request RPCs and then matching those to incoming
192 // RPCs, along with a slow path by which incoming RPCs are put on a locked
193 // pending list if they aren't able to be matched to an application request.
194 class Server::RealRequestMatcher : public RequestMatcherInterface {
195  public:
RealRequestMatcher(Server * server)196   explicit RealRequestMatcher(Server* server)
197       : server_(server), requests_per_cq_(server->cqs_.size()) {}
198 
~RealRequestMatcher()199   ~RealRequestMatcher() override {
200     for (LockedMultiProducerSingleConsumerQueue& queue : requests_per_cq_) {
201       GPR_ASSERT(queue.Pop() == nullptr);
202     }
203   }
204 
ZombifyPending()205   void ZombifyPending() override {
206     while (!pending_.empty()) {
207       CallData* calld = pending_.front();
208       calld->SetState(CallData::CallState::ZOMBIED);
209       calld->KillZombie();
210       pending_.pop();
211     }
212   }
213 
KillRequests(grpc_error_handle error)214   void KillRequests(grpc_error_handle error) override {
215     for (size_t i = 0; i < requests_per_cq_.size(); i++) {
216       RequestedCall* rc;
217       while ((rc = reinterpret_cast<RequestedCall*>(
218                   requests_per_cq_[i].Pop())) != nullptr) {
219         server_->FailCall(i, rc, GRPC_ERROR_REF(error));
220       }
221     }
222     GRPC_ERROR_UNREF(error);
223   }
224 
request_queue_count() const225   size_t request_queue_count() const override {
226     return requests_per_cq_.size();
227   }
228 
RequestCallWithPossiblePublish(size_t request_queue_index,RequestedCall * call)229   void RequestCallWithPossiblePublish(size_t request_queue_index,
230                                       RequestedCall* call) override {
231     if (requests_per_cq_[request_queue_index].Push(&call->mpscq_node)) {
232       /* this was the first queued request: we need to lock and start
233          matching calls */
234       struct PendingCall {
235         RequestedCall* rc = nullptr;
236         CallData* calld;
237       };
238       auto pop_next_pending = [this, request_queue_index] {
239         PendingCall pending_call;
240         {
241           MutexLock lock(&server_->mu_call_);
242           if (!pending_.empty()) {
243             pending_call.rc = reinterpret_cast<RequestedCall*>(
244                 requests_per_cq_[request_queue_index].Pop());
245             if (pending_call.rc != nullptr) {
246               pending_call.calld = pending_.front();
247               pending_.pop();
248             }
249           }
250         }
251         return pending_call;
252       };
253       while (true) {
254         PendingCall next_pending = pop_next_pending();
255         if (next_pending.rc == nullptr) break;
256         if (!next_pending.calld->MaybeActivate()) {
257           // Zombied Call
258           next_pending.calld->KillZombie();
259         } else {
260           next_pending.calld->Publish(request_queue_index, next_pending.rc);
261         }
262       }
263     }
264   }
265 
MatchOrQueue(size_t start_request_queue_index,CallData * calld)266   void MatchOrQueue(size_t start_request_queue_index,
267                     CallData* calld) override {
268     for (size_t i = 0; i < requests_per_cq_.size(); i++) {
269       size_t cq_idx = (start_request_queue_index + i) % requests_per_cq_.size();
270       RequestedCall* rc =
271           reinterpret_cast<RequestedCall*>(requests_per_cq_[cq_idx].TryPop());
272       if (rc != nullptr) {
273         GRPC_STATS_INC_SERVER_CQS_CHECKED(i);
274         calld->SetState(CallData::CallState::ACTIVATED);
275         calld->Publish(cq_idx, rc);
276         return;
277       }
278     }
279     // No cq to take the request found; queue it on the slow list.
280     GRPC_STATS_INC_SERVER_SLOWPATH_REQUESTS_QUEUED();
281     // We need to ensure that all the queues are empty.  We do this under
282     // the server mu_call_ lock to ensure that if something is added to
283     // an empty request queue, it will block until the call is actually
284     // added to the pending list.
285     RequestedCall* rc = nullptr;
286     size_t cq_idx = 0;
287     size_t loop_count;
288     {
289       MutexLock lock(&server_->mu_call_);
290       for (loop_count = 0; loop_count < requests_per_cq_.size(); loop_count++) {
291         cq_idx =
292             (start_request_queue_index + loop_count) % requests_per_cq_.size();
293         rc = reinterpret_cast<RequestedCall*>(requests_per_cq_[cq_idx].Pop());
294         if (rc != nullptr) {
295           break;
296         }
297       }
298       if (rc == nullptr) {
299         calld->SetState(CallData::CallState::PENDING);
300         pending_.push(calld);
301         return;
302       }
303     }
304     GRPC_STATS_INC_SERVER_CQS_CHECKED(loop_count + requests_per_cq_.size());
305     calld->SetState(CallData::CallState::ACTIVATED);
306     calld->Publish(cq_idx, rc);
307   }
308 
server() const309   Server* server() const override { return server_; }
310 
311  private:
312   Server* const server_;
313   std::queue<CallData*> pending_;
314   std::vector<LockedMultiProducerSingleConsumerQueue> requests_per_cq_;
315 };
316 
317 // AllocatingRequestMatchers don't allow the application to request an RPC in
318 // advance or queue up any incoming RPC for later match. Instead, MatchOrQueue
319 // will call out to an allocation function passed in at the construction of the
320 // object. These request matchers are designed for the C++ callback API, so they
321 // only support 1 completion queue (passed in at the constructor). They are also
322 // used for the sync API.
323 class Server::AllocatingRequestMatcherBase : public RequestMatcherInterface {
324  public:
AllocatingRequestMatcherBase(Server * server,grpc_completion_queue * cq)325   AllocatingRequestMatcherBase(Server* server, grpc_completion_queue* cq)
326       : server_(server), cq_(cq) {
327     size_t idx;
328     for (idx = 0; idx < server->cqs_.size(); idx++) {
329       if (server->cqs_[idx] == cq) {
330         break;
331       }
332     }
333     GPR_ASSERT(idx < server->cqs_.size());
334     cq_idx_ = idx;
335   }
336 
ZombifyPending()337   void ZombifyPending() override {}
338 
KillRequests(grpc_error_handle error)339   void KillRequests(grpc_error_handle error) override {
340     GRPC_ERROR_UNREF(error);
341   }
342 
request_queue_count() const343   size_t request_queue_count() const override { return 0; }
344 
RequestCallWithPossiblePublish(size_t,RequestedCall *)345   void RequestCallWithPossiblePublish(size_t /*request_queue_index*/,
346                                       RequestedCall* /*call*/) final {
347     GPR_ASSERT(false);
348   }
349 
server() const350   Server* server() const override { return server_; }
351 
352   // Supply the completion queue related to this request matcher
cq() const353   grpc_completion_queue* cq() const { return cq_; }
354 
355   // Supply the completion queue's index relative to the server.
cq_idx() const356   size_t cq_idx() const { return cq_idx_; }
357 
358  private:
359   Server* const server_;
360   grpc_completion_queue* const cq_;
361   size_t cq_idx_;
362 };
363 
364 // An allocating request matcher for non-registered methods (used for generic
365 // API and unimplemented RPCs).
366 class Server::AllocatingRequestMatcherBatch
367     : public AllocatingRequestMatcherBase {
368  public:
AllocatingRequestMatcherBatch(Server * server,grpc_completion_queue * cq,std::function<BatchCallAllocation ()> allocator)369   AllocatingRequestMatcherBatch(Server* server, grpc_completion_queue* cq,
370                                 std::function<BatchCallAllocation()> allocator)
371       : AllocatingRequestMatcherBase(server, cq),
372         allocator_(std::move(allocator)) {}
373 
MatchOrQueue(size_t,CallData * calld)374   void MatchOrQueue(size_t /*start_request_queue_index*/,
375                     CallData* calld) override {
376     if (server()->ShutdownRefOnRequest()) {
377       BatchCallAllocation call_info = allocator_();
378       GPR_ASSERT(server()->ValidateServerRequest(
379                      cq(), static_cast<void*>(call_info.tag), nullptr,
380                      nullptr) == GRPC_CALL_OK);
381       RequestedCall* rc = new RequestedCall(
382           static_cast<void*>(call_info.tag), call_info.cq, call_info.call,
383           call_info.initial_metadata, call_info.details);
384       calld->SetState(CallData::CallState::ACTIVATED);
385       calld->Publish(cq_idx(), rc);
386     } else {
387       calld->FailCallCreation();
388     }
389     server()->ShutdownUnrefOnRequest();
390   }
391 
392  private:
393   std::function<BatchCallAllocation()> allocator_;
394 };
395 
396 // An allocating request matcher for registered methods.
397 class Server::AllocatingRequestMatcherRegistered
398     : public AllocatingRequestMatcherBase {
399  public:
AllocatingRequestMatcherRegistered(Server * server,grpc_completion_queue * cq,RegisteredMethod * rm,std::function<RegisteredCallAllocation ()> allocator)400   AllocatingRequestMatcherRegistered(
401       Server* server, grpc_completion_queue* cq, RegisteredMethod* rm,
402       std::function<RegisteredCallAllocation()> allocator)
403       : AllocatingRequestMatcherBase(server, cq),
404         registered_method_(rm),
405         allocator_(std::move(allocator)) {}
406 
MatchOrQueue(size_t,CallData * calld)407   void MatchOrQueue(size_t /*start_request_queue_index*/,
408                     CallData* calld) override {
409     if (server()->ShutdownRefOnRequest()) {
410       RegisteredCallAllocation call_info = allocator_();
411       GPR_ASSERT(server()->ValidateServerRequest(
412                      cq(), call_info.tag, call_info.optional_payload,
413                      registered_method_) == GRPC_CALL_OK);
414       RequestedCall* rc =
415           new RequestedCall(call_info.tag, call_info.cq, call_info.call,
416                             call_info.initial_metadata, registered_method_,
417                             call_info.deadline, call_info.optional_payload);
418       calld->SetState(CallData::CallState::ACTIVATED);
419       calld->Publish(cq_idx(), rc);
420     } else {
421       calld->FailCallCreation();
422     }
423     server()->ShutdownUnrefOnRequest();
424   }
425 
426  private:
427   RegisteredMethod* const registered_method_;
428   std::function<RegisteredCallAllocation()> allocator_;
429 };
430 
431 //
432 // ChannelBroadcaster
433 //
434 
435 namespace {
436 
437 class ChannelBroadcaster {
438  public:
439   // This can have an empty constructor and destructor since we want to control
440   // when the actual setup and shutdown broadcast take place.
441 
442   // Copies over the channels from the locked server.
FillChannelsLocked(std::vector<grpc_channel * > channels)443   void FillChannelsLocked(std::vector<grpc_channel*> channels) {
444     GPR_DEBUG_ASSERT(channels_.empty());
445     channels_ = std::move(channels);
446   }
447 
448   // Broadcasts a shutdown on each channel.
BroadcastShutdown(bool send_goaway,grpc_error_handle force_disconnect)449   void BroadcastShutdown(bool send_goaway, grpc_error_handle force_disconnect) {
450     for (grpc_channel* channel : channels_) {
451       SendShutdown(channel, send_goaway, GRPC_ERROR_REF(force_disconnect));
452       GRPC_CHANNEL_INTERNAL_UNREF(channel, "broadcast");
453     }
454     channels_.clear();  // just for safety against double broadcast
455     GRPC_ERROR_UNREF(force_disconnect);
456   }
457 
458  private:
459   struct ShutdownCleanupArgs {
460     grpc_closure closure;
461     grpc_slice slice;
462   };
463 
ShutdownCleanup(void * arg,grpc_error_handle)464   static void ShutdownCleanup(void* arg, grpc_error_handle /*error*/) {
465     ShutdownCleanupArgs* a = static_cast<ShutdownCleanupArgs*>(arg);
466     grpc_slice_unref_internal(a->slice);
467     delete a;
468   }
469 
SendShutdown(grpc_channel * channel,bool send_goaway,grpc_error_handle send_disconnect)470   static void SendShutdown(grpc_channel* channel, bool send_goaway,
471                            grpc_error_handle send_disconnect) {
472     ShutdownCleanupArgs* sc = new ShutdownCleanupArgs;
473     GRPC_CLOSURE_INIT(&sc->closure, ShutdownCleanup, sc,
474                       grpc_schedule_on_exec_ctx);
475     grpc_transport_op* op = grpc_make_transport_op(&sc->closure);
476     grpc_channel_element* elem;
477     op->goaway_error =
478         send_goaway
479             ? grpc_error_set_int(
480                   GRPC_ERROR_CREATE_FROM_STATIC_STRING("Server shutdown"),
481                   GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_OK)
482             : GRPC_ERROR_NONE;
483     op->set_accept_stream = true;
484     sc->slice = grpc_slice_from_copied_string("Server shutdown");
485     op->disconnect_with_error = send_disconnect;
486     elem =
487         grpc_channel_stack_element(grpc_channel_get_channel_stack(channel), 0);
488     elem->filter->start_transport_op(elem, op);
489   }
490 
491   std::vector<grpc_channel*> channels_;
492 };
493 
494 }  // namespace
495 
496 //
497 // Server
498 //
499 
500 const grpc_channel_filter Server::kServerTopFilter = {
501     Server::CallData::StartTransportStreamOpBatch,
502     grpc_channel_next_op,
503     sizeof(Server::CallData),
504     Server::CallData::InitCallElement,
505     grpc_call_stack_ignore_set_pollset_or_pollset_set,
506     Server::CallData::DestroyCallElement,
507     sizeof(Server::ChannelData),
508     Server::ChannelData::InitChannelElement,
509     Server::ChannelData::DestroyChannelElement,
510     grpc_channel_next_get_info,
511     "server",
512 };
513 
514 namespace {
515 
CreateDefaultResourceUser(const grpc_channel_args * args)516 grpc_resource_user* CreateDefaultResourceUser(const grpc_channel_args* args) {
517   if (args != nullptr) {
518     grpc_resource_quota* resource_quota =
519         grpc_resource_quota_from_channel_args(args, false /* create */);
520     if (resource_quota != nullptr) {
521       return grpc_resource_user_create(resource_quota, "default");
522     }
523   }
524   return nullptr;
525 }
526 
CreateChannelzNode(const grpc_channel_args * args)527 RefCountedPtr<channelz::ServerNode> CreateChannelzNode(
528     const grpc_channel_args* args) {
529   RefCountedPtr<channelz::ServerNode> channelz_node;
530   if (grpc_channel_args_find_bool(args, GRPC_ARG_ENABLE_CHANNELZ,
531                                   GRPC_ENABLE_CHANNELZ_DEFAULT)) {
532     size_t channel_tracer_max_memory = grpc_channel_args_find_integer(
533         args, GRPC_ARG_MAX_CHANNEL_TRACE_EVENT_MEMORY_PER_NODE,
534         {GRPC_MAX_CHANNEL_TRACE_EVENT_MEMORY_PER_NODE_DEFAULT, 0, INT_MAX});
535     channelz_node =
536         MakeRefCounted<channelz::ServerNode>(channel_tracer_max_memory);
537     channelz_node->AddTraceEvent(
538         channelz::ChannelTrace::Severity::Info,
539         grpc_slice_from_static_string("Server created"));
540   }
541   return channelz_node;
542 }
543 
544 }  // namespace
545 
Server(const grpc_channel_args * args)546 Server::Server(const grpc_channel_args* args)
547     : channel_args_(grpc_channel_args_copy(args)),
548       default_resource_user_(CreateDefaultResourceUser(args)),
549       channelz_node_(CreateChannelzNode(args)) {}
550 
~Server()551 Server::~Server() {
552   grpc_channel_args_destroy(channel_args_);
553   // Remove the cq pollsets from the config_fetcher.
554   if (started_ && config_fetcher_ != nullptr &&
555       config_fetcher_->interested_parties() != nullptr) {
556     for (grpc_pollset* pollset : pollsets_) {
557       grpc_pollset_set_del_pollset(config_fetcher_->interested_parties(),
558                                    pollset);
559     }
560   }
561   for (size_t i = 0; i < cqs_.size(); i++) {
562     GRPC_CQ_INTERNAL_UNREF(cqs_[i], "server");
563   }
564 }
565 
AddListener(OrphanablePtr<ListenerInterface> listener)566 void Server::AddListener(OrphanablePtr<ListenerInterface> listener) {
567   channelz::ListenSocketNode* listen_socket_node =
568       listener->channelz_listen_socket_node();
569   if (listen_socket_node != nullptr && channelz_node_ != nullptr) {
570     channelz_node_->AddChildListenSocket(listen_socket_node->Ref());
571   }
572   listeners_.emplace_back(std::move(listener));
573 }
574 
Start()575 void Server::Start() {
576   started_ = true;
577   for (grpc_completion_queue* cq : cqs_) {
578     if (grpc_cq_can_listen(cq)) {
579       pollsets_.push_back(grpc_cq_pollset(cq));
580     }
581   }
582   if (unregistered_request_matcher_ == nullptr) {
583     unregistered_request_matcher_ = absl::make_unique<RealRequestMatcher>(this);
584   }
585   for (std::unique_ptr<RegisteredMethod>& rm : registered_methods_) {
586     if (rm->matcher == nullptr) {
587       rm->matcher = absl::make_unique<RealRequestMatcher>(this);
588     }
589   }
590   {
591     MutexLock lock(&mu_global_);
592     starting_ = true;
593   }
594   // Register the interested parties from the config fetcher to the cq pollsets
595   // before starting listeners so that config fetcher is being polled when the
596   // listeners start watch the fetcher.
597   if (config_fetcher_ != nullptr &&
598       config_fetcher_->interested_parties() != nullptr) {
599     for (grpc_pollset* pollset : pollsets_) {
600       grpc_pollset_set_add_pollset(config_fetcher_->interested_parties(),
601                                    pollset);
602     }
603   }
604   for (auto& listener : listeners_) {
605     listener.listener->Start(this, &pollsets_);
606   }
607   MutexLock lock(&mu_global_);
608   starting_ = false;
609   starting_cv_.Signal();
610 }
611 
SetupTransport(grpc_transport * transport,grpc_pollset * accepting_pollset,const grpc_channel_args * args,const RefCountedPtr<grpc_core::channelz::SocketNode> & socket_node,grpc_resource_user * resource_user)612 grpc_error_handle Server::SetupTransport(
613     grpc_transport* transport, grpc_pollset* accepting_pollset,
614     const grpc_channel_args* args,
615     const RefCountedPtr<grpc_core::channelz::SocketNode>& socket_node,
616     grpc_resource_user* resource_user) {
617   // Create channel.
618   grpc_error_handle error = GRPC_ERROR_NONE;
619   grpc_channel* channel = grpc_channel_create(
620       nullptr, args, GRPC_SERVER_CHANNEL, transport, resource_user, &error);
621   if (channel == nullptr) {
622     return error;
623   }
624   ChannelData* chand = static_cast<ChannelData*>(
625       grpc_channel_stack_element(grpc_channel_get_channel_stack(channel), 0)
626           ->channel_data);
627   // Set up CQs.
628   size_t cq_idx;
629   for (cq_idx = 0; cq_idx < cqs_.size(); cq_idx++) {
630     if (grpc_cq_pollset(cqs_[cq_idx]) == accepting_pollset) break;
631   }
632   if (cq_idx == cqs_.size()) {
633     // Completion queue not found.  Pick a random one to publish new calls to.
634     cq_idx = static_cast<size_t>(rand()) % cqs_.size();
635   }
636   // Set up channelz node.
637   intptr_t channelz_socket_uuid = 0;
638   if (socket_node != nullptr) {
639     channelz_socket_uuid = socket_node->uuid();
640     channelz_node_->AddChildSocket(socket_node);
641   }
642   // Initialize chand.
643   chand->InitTransport(Ref(), channel, cq_idx, transport, channelz_socket_uuid);
644   return GRPC_ERROR_NONE;
645 }
646 
HasOpenConnections()647 bool Server::HasOpenConnections() {
648   MutexLock lock(&mu_global_);
649   return !channels_.empty();
650 }
651 
SetRegisteredMethodAllocator(grpc_completion_queue * cq,void * method_tag,std::function<RegisteredCallAllocation ()> allocator)652 void Server::SetRegisteredMethodAllocator(
653     grpc_completion_queue* cq, void* method_tag,
654     std::function<RegisteredCallAllocation()> allocator) {
655   RegisteredMethod* rm = static_cast<RegisteredMethod*>(method_tag);
656   rm->matcher = absl::make_unique<AllocatingRequestMatcherRegistered>(
657       this, cq, rm, std::move(allocator));
658 }
659 
SetBatchMethodAllocator(grpc_completion_queue * cq,std::function<BatchCallAllocation ()> allocator)660 void Server::SetBatchMethodAllocator(
661     grpc_completion_queue* cq, std::function<BatchCallAllocation()> allocator) {
662   GPR_DEBUG_ASSERT(unregistered_request_matcher_ == nullptr);
663   unregistered_request_matcher_ =
664       absl::make_unique<AllocatingRequestMatcherBatch>(this, cq,
665                                                        std::move(allocator));
666 }
667 
RegisterCompletionQueue(grpc_completion_queue * cq)668 void Server::RegisterCompletionQueue(grpc_completion_queue* cq) {
669   for (grpc_completion_queue* queue : cqs_) {
670     if (queue == cq) return;
671   }
672   GRPC_CQ_INTERNAL_REF(cq, "server");
673   cqs_.push_back(cq);
674 }
675 
676 namespace {
677 
streq(const std::string & a,const char * b)678 bool streq(const std::string& a, const char* b) {
679   return (a.empty() && b == nullptr) ||
680          ((b != nullptr) && !strcmp(a.c_str(), b));
681 }
682 
683 }  // namespace
684 
RegisterMethod(const char * method,const char * host,grpc_server_register_method_payload_handling payload_handling,uint32_t flags)685 Server::RegisteredMethod* Server::RegisterMethod(
686     const char* method, const char* host,
687     grpc_server_register_method_payload_handling payload_handling,
688     uint32_t flags) {
689   if (!method) {
690     gpr_log(GPR_ERROR,
691             "grpc_server_register_method method string cannot be NULL");
692     return nullptr;
693   }
694   for (std::unique_ptr<RegisteredMethod>& m : registered_methods_) {
695     if (streq(m->method, method) && streq(m->host, host)) {
696       gpr_log(GPR_ERROR, "duplicate registration for %s@%s", method,
697               host ? host : "*");
698       return nullptr;
699     }
700   }
701   if ((flags & ~GRPC_INITIAL_METADATA_USED_MASK) != 0) {
702     gpr_log(GPR_ERROR, "grpc_server_register_method invalid flags 0x%08x",
703             flags);
704     return nullptr;
705   }
706   registered_methods_.emplace_back(absl::make_unique<RegisteredMethod>(
707       method, host, payload_handling, flags));
708   return registered_methods_.back().get();
709 }
710 
DoneRequestEvent(void * req,grpc_cq_completion *)711 void Server::DoneRequestEvent(void* req, grpc_cq_completion* /*c*/) {
712   delete static_cast<RequestedCall*>(req);
713 }
714 
FailCall(size_t cq_idx,RequestedCall * rc,grpc_error_handle error)715 void Server::FailCall(size_t cq_idx, RequestedCall* rc,
716                       grpc_error_handle error) {
717   *rc->call = nullptr;
718   rc->initial_metadata->count = 0;
719   GPR_ASSERT(error != GRPC_ERROR_NONE);
720   grpc_cq_end_op(cqs_[cq_idx], rc->tag, error, DoneRequestEvent, rc,
721                  &rc->completion);
722 }
723 
724 // Before calling MaybeFinishShutdown(), we must hold mu_global_ and not
725 // hold mu_call_.
MaybeFinishShutdown()726 void Server::MaybeFinishShutdown() {
727   if (!ShutdownReady() || shutdown_published_) {
728     return;
729   }
730   {
731     MutexLock lock(&mu_call_);
732     KillPendingWorkLocked(
733         GRPC_ERROR_CREATE_FROM_STATIC_STRING("Server Shutdown"));
734   }
735   if (!channels_.empty() || listeners_destroyed_ < listeners_.size()) {
736     if (gpr_time_cmp(gpr_time_sub(gpr_now(GPR_CLOCK_REALTIME),
737                                   last_shutdown_message_time_),
738                      gpr_time_from_seconds(1, GPR_TIMESPAN)) >= 0) {
739       last_shutdown_message_time_ = gpr_now(GPR_CLOCK_REALTIME);
740       gpr_log(GPR_DEBUG,
741               "Waiting for %" PRIuPTR " channels and %" PRIuPTR "/%" PRIuPTR
742               " listeners to be destroyed before shutting down server",
743               channels_.size(), listeners_.size() - listeners_destroyed_,
744               listeners_.size());
745     }
746     return;
747   }
748   shutdown_published_ = true;
749   for (auto& shutdown_tag : shutdown_tags_) {
750     Ref().release();
751     grpc_cq_end_op(shutdown_tag.cq, shutdown_tag.tag, GRPC_ERROR_NONE,
752                    DoneShutdownEvent, this, &shutdown_tag.completion);
753   }
754 }
755 
KillPendingWorkLocked(grpc_error_handle error)756 void Server::KillPendingWorkLocked(grpc_error_handle error) {
757   if (started_) {
758     unregistered_request_matcher_->KillRequests(GRPC_ERROR_REF(error));
759     unregistered_request_matcher_->ZombifyPending();
760     for (std::unique_ptr<RegisteredMethod>& rm : registered_methods_) {
761       rm->matcher->KillRequests(GRPC_ERROR_REF(error));
762       rm->matcher->ZombifyPending();
763     }
764   }
765   GRPC_ERROR_UNREF(error);
766 }
767 
GetChannelsLocked() const768 std::vector<grpc_channel*> Server::GetChannelsLocked() const {
769   std::vector<grpc_channel*> channels;
770   channels.reserve(channels_.size());
771   for (const ChannelData* chand : channels_) {
772     channels.push_back(chand->channel());
773     GRPC_CHANNEL_INTERNAL_REF(chand->channel(), "broadcast");
774   }
775   return channels;
776 }
777 
ListenerDestroyDone(void * arg,grpc_error_handle)778 void Server::ListenerDestroyDone(void* arg, grpc_error_handle /*error*/) {
779   Server* server = static_cast<Server*>(arg);
780   MutexLock lock(&server->mu_global_);
781   server->listeners_destroyed_++;
782   server->MaybeFinishShutdown();
783 }
784 
785 namespace {
786 
DonePublishedShutdown(void *,grpc_cq_completion * storage)787 void DonePublishedShutdown(void* /*done_arg*/, grpc_cq_completion* storage) {
788   delete storage;
789 }
790 
791 }  // namespace
792 
793 // - Kills all pending requests-for-incoming-RPC-calls (i.e., the requests made
794 //   via grpc_server_request_call() and grpc_server_request_registered_call()
795 //   will now be cancelled). See KillPendingWorkLocked().
796 //
797 // - Shuts down the listeners (i.e., the server will no longer listen on the
798 //   port for new incoming channels).
799 //
800 // - Iterates through all channels on the server and sends shutdown msg (see
801 //   ChannelBroadcaster::BroadcastShutdown() for details) to the clients via
802 //   the transport layer. The transport layer then guarantees the following:
803 //    -- Sends shutdown to the client (e.g., HTTP2 transport sends GOAWAY).
804 //    -- If the server has outstanding calls that are in the process, the
805 //       connection is NOT closed until the server is done with all those calls.
806 //    -- Once there are no more calls in progress, the channel is closed.
ShutdownAndNotify(grpc_completion_queue * cq,void * tag)807 void Server::ShutdownAndNotify(grpc_completion_queue* cq, void* tag) {
808   ChannelBroadcaster broadcaster;
809   {
810     // Wait for startup to be finished.  Locks mu_global.
811     MutexLock lock(&mu_global_);
812     WaitUntil(&starting_cv_, &mu_global_, [this] { return !starting_; });
813     // Stay locked, and gather up some stuff to do.
814     GPR_ASSERT(grpc_cq_begin_op(cq, tag));
815     if (shutdown_published_) {
816       grpc_cq_end_op(cq, tag, GRPC_ERROR_NONE, DonePublishedShutdown, nullptr,
817                      new grpc_cq_completion);
818       return;
819     }
820     shutdown_tags_.emplace_back(tag, cq);
821     if (ShutdownCalled()) {
822       return;
823     }
824     last_shutdown_message_time_ = gpr_now(GPR_CLOCK_REALTIME);
825     broadcaster.FillChannelsLocked(GetChannelsLocked());
826     // Collect all unregistered then registered calls.
827     {
828       MutexLock lock(&mu_call_);
829       KillPendingWorkLocked(
830           GRPC_ERROR_CREATE_FROM_STATIC_STRING("Server Shutdown"));
831     }
832     ShutdownUnrefOnShutdownCall();
833   }
834   // Shutdown listeners.
835   for (auto& listener : listeners_) {
836     channelz::ListenSocketNode* channelz_listen_socket_node =
837         listener.listener->channelz_listen_socket_node();
838     if (channelz_node_ != nullptr && channelz_listen_socket_node != nullptr) {
839       channelz_node_->RemoveChildListenSocket(
840           channelz_listen_socket_node->uuid());
841     }
842     GRPC_CLOSURE_INIT(&listener.destroy_done, ListenerDestroyDone, this,
843                       grpc_schedule_on_exec_ctx);
844     listener.listener->SetOnDestroyDone(&listener.destroy_done);
845     listener.listener.reset();
846   }
847   broadcaster.BroadcastShutdown(/*send_goaway=*/true, GRPC_ERROR_NONE);
848 }
849 
CancelAllCalls()850 void Server::CancelAllCalls() {
851   ChannelBroadcaster broadcaster;
852   {
853     MutexLock lock(&mu_global_);
854     broadcaster.FillChannelsLocked(GetChannelsLocked());
855   }
856   broadcaster.BroadcastShutdown(
857       /*send_goaway=*/false,
858       GRPC_ERROR_CREATE_FROM_STATIC_STRING("Cancelling all calls"));
859 }
860 
Orphan()861 void Server::Orphan() {
862   {
863     MutexLock lock(&mu_global_);
864     GPR_ASSERT(ShutdownCalled() || listeners_.empty());
865     GPR_ASSERT(listeners_destroyed_ == listeners_.size());
866   }
867   if (default_resource_user_ != nullptr) {
868     grpc_resource_quota_unref(grpc_resource_user_quota(default_resource_user_));
869     grpc_resource_user_shutdown(default_resource_user_);
870     grpc_resource_user_unref(default_resource_user_);
871   }
872   Unref();
873 }
874 
ValidateServerRequest(grpc_completion_queue * cq_for_notification,void * tag,grpc_byte_buffer ** optional_payload,RegisteredMethod * rm)875 grpc_call_error Server::ValidateServerRequest(
876     grpc_completion_queue* cq_for_notification, void* tag,
877     grpc_byte_buffer** optional_payload, RegisteredMethod* rm) {
878   if ((rm == nullptr && optional_payload != nullptr) ||
879       ((rm != nullptr) && ((optional_payload == nullptr) !=
880                            (rm->payload_handling == GRPC_SRM_PAYLOAD_NONE)))) {
881     return GRPC_CALL_ERROR_PAYLOAD_TYPE_MISMATCH;
882   }
883   if (!grpc_cq_begin_op(cq_for_notification, tag)) {
884     return GRPC_CALL_ERROR_COMPLETION_QUEUE_SHUTDOWN;
885   }
886   return GRPC_CALL_OK;
887 }
888 
ValidateServerRequestAndCq(size_t * cq_idx,grpc_completion_queue * cq_for_notification,void * tag,grpc_byte_buffer ** optional_payload,RegisteredMethod * rm)889 grpc_call_error Server::ValidateServerRequestAndCq(
890     size_t* cq_idx, grpc_completion_queue* cq_for_notification, void* tag,
891     grpc_byte_buffer** optional_payload, RegisteredMethod* rm) {
892   size_t idx;
893   for (idx = 0; idx < cqs_.size(); idx++) {
894     if (cqs_[idx] == cq_for_notification) {
895       break;
896     }
897   }
898   if (idx == cqs_.size()) {
899     return GRPC_CALL_ERROR_NOT_SERVER_COMPLETION_QUEUE;
900   }
901   grpc_call_error error =
902       ValidateServerRequest(cq_for_notification, tag, optional_payload, rm);
903   if (error != GRPC_CALL_OK) {
904     return error;
905   }
906   *cq_idx = idx;
907   return GRPC_CALL_OK;
908 }
909 
QueueRequestedCall(size_t cq_idx,RequestedCall * rc)910 grpc_call_error Server::QueueRequestedCall(size_t cq_idx, RequestedCall* rc) {
911   if (ShutdownCalled()) {
912     FailCall(cq_idx, rc,
913              GRPC_ERROR_CREATE_FROM_STATIC_STRING("Server Shutdown"));
914     return GRPC_CALL_OK;
915   }
916   RequestMatcherInterface* rm;
917   switch (rc->type) {
918     case RequestedCall::Type::BATCH_CALL:
919       rm = unregistered_request_matcher_.get();
920       break;
921     case RequestedCall::Type::REGISTERED_CALL:
922       rm = rc->data.registered.method->matcher.get();
923       break;
924   }
925   rm->RequestCallWithPossiblePublish(cq_idx, rc);
926   return GRPC_CALL_OK;
927 }
928 
RequestCall(grpc_call ** call,grpc_call_details * details,grpc_metadata_array * request_metadata,grpc_completion_queue * cq_bound_to_call,grpc_completion_queue * cq_for_notification,void * tag)929 grpc_call_error Server::RequestCall(grpc_call** call,
930                                     grpc_call_details* details,
931                                     grpc_metadata_array* request_metadata,
932                                     grpc_completion_queue* cq_bound_to_call,
933                                     grpc_completion_queue* cq_for_notification,
934                                     void* tag) {
935   size_t cq_idx;
936   grpc_call_error error = ValidateServerRequestAndCq(
937       &cq_idx, cq_for_notification, tag, nullptr, nullptr);
938   if (error != GRPC_CALL_OK) {
939     return error;
940   }
941   RequestedCall* rc =
942       new RequestedCall(tag, cq_bound_to_call, call, request_metadata, details);
943   return QueueRequestedCall(cq_idx, rc);
944 }
945 
RequestRegisteredCall(RegisteredMethod * rm,grpc_call ** call,gpr_timespec * deadline,grpc_metadata_array * request_metadata,grpc_byte_buffer ** optional_payload,grpc_completion_queue * cq_bound_to_call,grpc_completion_queue * cq_for_notification,void * tag_new)946 grpc_call_error Server::RequestRegisteredCall(
947     RegisteredMethod* rm, grpc_call** call, gpr_timespec* deadline,
948     grpc_metadata_array* request_metadata, grpc_byte_buffer** optional_payload,
949     grpc_completion_queue* cq_bound_to_call,
950     grpc_completion_queue* cq_for_notification, void* tag_new) {
951   size_t cq_idx;
952   grpc_call_error error = ValidateServerRequestAndCq(
953       &cq_idx, cq_for_notification, tag_new, optional_payload, rm);
954   if (error != GRPC_CALL_OK) {
955     return error;
956   }
957   RequestedCall* rc =
958       new RequestedCall(tag_new, cq_bound_to_call, call, request_metadata, rm,
959                         deadline, optional_payload);
960   return QueueRequestedCall(cq_idx, rc);
961 }
962 
963 //
964 // Server::ChannelData::ConnectivityWatcher
965 //
966 
967 class Server::ChannelData::ConnectivityWatcher
968     : public AsyncConnectivityStateWatcherInterface {
969  public:
ConnectivityWatcher(ChannelData * chand)970   explicit ConnectivityWatcher(ChannelData* chand) : chand_(chand) {
971     GRPC_CHANNEL_INTERNAL_REF(chand_->channel_, "connectivity");
972   }
973 
~ConnectivityWatcher()974   ~ConnectivityWatcher() override {
975     GRPC_CHANNEL_INTERNAL_UNREF(chand_->channel_, "connectivity");
976   }
977 
978  private:
OnConnectivityStateChange(grpc_connectivity_state new_state,const absl::Status &)979   void OnConnectivityStateChange(grpc_connectivity_state new_state,
980                                  const absl::Status& /*status*/) override {
981     // Don't do anything until we are being shut down.
982     if (new_state != GRPC_CHANNEL_SHUTDOWN) return;
983     // Shut down channel.
984     MutexLock lock(&chand_->server_->mu_global_);
985     chand_->Destroy();
986   }
987 
988   ChannelData* chand_;
989 };
990 
991 //
992 // Server::ChannelData
993 //
994 
~ChannelData()995 Server::ChannelData::~ChannelData() {
996   if (registered_methods_ != nullptr) {
997     for (const ChannelRegisteredMethod& crm : *registered_methods_) {
998       grpc_slice_unref_internal(crm.method);
999       GPR_DEBUG_ASSERT(crm.method.refcount == &kNoopRefcount ||
1000                        crm.method.refcount == nullptr);
1001       if (crm.has_host) {
1002         grpc_slice_unref_internal(crm.host);
1003         GPR_DEBUG_ASSERT(crm.host.refcount == &kNoopRefcount ||
1004                          crm.host.refcount == nullptr);
1005       }
1006     }
1007     registered_methods_.reset();
1008   }
1009   if (server_ != nullptr) {
1010     if (server_->channelz_node_ != nullptr && channelz_socket_uuid_ != 0) {
1011       server_->channelz_node_->RemoveChildSocket(channelz_socket_uuid_);
1012     }
1013     {
1014       MutexLock lock(&server_->mu_global_);
1015       if (list_position_.has_value()) {
1016         server_->channels_.erase(*list_position_);
1017         list_position_.reset();
1018       }
1019       server_->MaybeFinishShutdown();
1020     }
1021   }
1022 }
1023 
InitTransport(RefCountedPtr<Server> server,grpc_channel * channel,size_t cq_idx,grpc_transport * transport,intptr_t channelz_socket_uuid)1024 void Server::ChannelData::InitTransport(RefCountedPtr<Server> server,
1025                                         grpc_channel* channel, size_t cq_idx,
1026                                         grpc_transport* transport,
1027                                         intptr_t channelz_socket_uuid) {
1028   server_ = std::move(server);
1029   channel_ = channel;
1030   cq_idx_ = cq_idx;
1031   channelz_socket_uuid_ = channelz_socket_uuid;
1032   // Build a lookup table phrased in terms of mdstr's in this channels context
1033   // to quickly find registered methods.
1034   size_t num_registered_methods = server_->registered_methods_.size();
1035   if (num_registered_methods > 0) {
1036     uint32_t max_probes = 0;
1037     size_t slots = 2 * num_registered_methods;
1038     registered_methods_ =
1039         absl::make_unique<std::vector<ChannelRegisteredMethod>>(slots);
1040     for (std::unique_ptr<RegisteredMethod>& rm : server_->registered_methods_) {
1041       ExternallyManagedSlice host;
1042       ExternallyManagedSlice method(rm->method.c_str());
1043       const bool has_host = !rm->host.empty();
1044       if (has_host) {
1045         host = ExternallyManagedSlice(rm->host.c_str());
1046       }
1047       uint32_t hash =
1048           GRPC_MDSTR_KV_HASH(has_host ? host.Hash() : 0, method.Hash());
1049       uint32_t probes = 0;
1050       for (probes = 0; (*registered_methods_)[(hash + probes) % slots]
1051                            .server_registered_method != nullptr;
1052            probes++) {
1053       }
1054       if (probes > max_probes) max_probes = probes;
1055       ChannelRegisteredMethod* crm =
1056           &(*registered_methods_)[(hash + probes) % slots];
1057       crm->server_registered_method = rm.get();
1058       crm->flags = rm->flags;
1059       crm->has_host = has_host;
1060       if (has_host) {
1061         crm->host = host;
1062       }
1063       crm->method = method;
1064     }
1065     GPR_ASSERT(slots <= UINT32_MAX);
1066     registered_method_max_probes_ = max_probes;
1067   }
1068   // Publish channel.
1069   {
1070     MutexLock lock(&server_->mu_global_);
1071     server_->channels_.push_front(this);
1072     list_position_ = server_->channels_.begin();
1073   }
1074   // Start accept_stream transport op.
1075   grpc_transport_op* op = grpc_make_transport_op(nullptr);
1076   op->set_accept_stream = true;
1077   op->set_accept_stream_fn = AcceptStream;
1078   op->set_accept_stream_user_data = this;
1079   op->start_connectivity_watch = MakeOrphanable<ConnectivityWatcher>(this);
1080   if (server_->ShutdownCalled()) {
1081     op->disconnect_with_error =
1082         GRPC_ERROR_CREATE_FROM_STATIC_STRING("Server shutdown");
1083   }
1084   grpc_transport_perform_op(transport, op);
1085 }
1086 
GetRegisteredMethod(const grpc_slice & host,const grpc_slice & path,bool is_idempotent)1087 Server::ChannelRegisteredMethod* Server::ChannelData::GetRegisteredMethod(
1088     const grpc_slice& host, const grpc_slice& path, bool is_idempotent) {
1089   if (registered_methods_ == nullptr) return nullptr;
1090   /* TODO(ctiller): unify these two searches */
1091   /* check for an exact match with host */
1092   uint32_t hash = GRPC_MDSTR_KV_HASH(grpc_slice_hash_internal(host),
1093                                      grpc_slice_hash_internal(path));
1094   for (size_t i = 0; i <= registered_method_max_probes_; i++) {
1095     ChannelRegisteredMethod* rm =
1096         &(*registered_methods_)[(hash + i) % registered_methods_->size()];
1097     if (rm->server_registered_method == nullptr) break;
1098     if (!rm->has_host) continue;
1099     if (rm->host != host) continue;
1100     if (rm->method != path) continue;
1101     if ((rm->flags & GRPC_INITIAL_METADATA_IDEMPOTENT_REQUEST) &&
1102         !is_idempotent) {
1103       continue;
1104     }
1105     return rm;
1106   }
1107   /* check for a wildcard method definition (no host set) */
1108   hash = GRPC_MDSTR_KV_HASH(0, grpc_slice_hash_internal(path));
1109   for (size_t i = 0; i <= registered_method_max_probes_; i++) {
1110     ChannelRegisteredMethod* rm =
1111         &(*registered_methods_)[(hash + i) % registered_methods_->size()];
1112     if (rm->server_registered_method == nullptr) break;
1113     if (rm->has_host) continue;
1114     if (rm->method != path) continue;
1115     if ((rm->flags & GRPC_INITIAL_METADATA_IDEMPOTENT_REQUEST) &&
1116         !is_idempotent) {
1117       continue;
1118     }
1119     return rm;
1120   }
1121   return nullptr;
1122 }
1123 
AcceptStream(void * arg,grpc_transport *,const void * transport_server_data)1124 void Server::ChannelData::AcceptStream(void* arg, grpc_transport* /*transport*/,
1125                                        const void* transport_server_data) {
1126   auto* chand = static_cast<Server::ChannelData*>(arg);
1127   /* create a call */
1128   grpc_call_create_args args;
1129   args.channel = chand->channel_;
1130   args.server = chand->server_.get();
1131   args.parent = nullptr;
1132   args.propagation_mask = 0;
1133   args.cq = nullptr;
1134   args.pollset_set_alternative = nullptr;
1135   args.server_transport_data = transport_server_data;
1136   args.add_initial_metadata = nullptr;
1137   args.add_initial_metadata_count = 0;
1138   args.send_deadline = GRPC_MILLIS_INF_FUTURE;
1139   grpc_call* call;
1140   grpc_error_handle error = grpc_call_create(&args, &call);
1141   grpc_call_element* elem =
1142       grpc_call_stack_element(grpc_call_get_call_stack(call), 0);
1143   auto* calld = static_cast<Server::CallData*>(elem->call_data);
1144   if (error != GRPC_ERROR_NONE) {
1145     GRPC_ERROR_UNREF(error);
1146     calld->FailCallCreation();
1147     return;
1148   }
1149   calld->Start(elem);
1150 }
1151 
FinishDestroy(void * arg,grpc_error_handle)1152 void Server::ChannelData::FinishDestroy(void* arg,
1153                                         grpc_error_handle /*error*/) {
1154   auto* chand = static_cast<Server::ChannelData*>(arg);
1155   Server* server = chand->server_.get();
1156   GRPC_CHANNEL_INTERNAL_UNREF(chand->channel_, "server");
1157   server->Unref();
1158 }
1159 
Destroy()1160 void Server::ChannelData::Destroy() {
1161   if (!list_position_.has_value()) return;
1162   GPR_ASSERT(server_ != nullptr);
1163   server_->channels_.erase(*list_position_);
1164   list_position_.reset();
1165   server_->Ref().release();
1166   server_->MaybeFinishShutdown();
1167   GRPC_CLOSURE_INIT(&finish_destroy_channel_closure_, FinishDestroy, this,
1168                     grpc_schedule_on_exec_ctx);
1169   if (GRPC_TRACE_FLAG_ENABLED(grpc_server_channel_trace)) {
1170     gpr_log(GPR_INFO, "Disconnected client");
1171   }
1172   grpc_transport_op* op =
1173       grpc_make_transport_op(&finish_destroy_channel_closure_);
1174   op->set_accept_stream = true;
1175   grpc_channel_next_op(
1176       grpc_channel_stack_element(grpc_channel_get_channel_stack(channel_), 0),
1177       op);
1178 }
1179 
InitChannelElement(grpc_channel_element * elem,grpc_channel_element_args * args)1180 grpc_error_handle Server::ChannelData::InitChannelElement(
1181     grpc_channel_element* elem, grpc_channel_element_args* args) {
1182   GPR_ASSERT(args->is_first);
1183   GPR_ASSERT(!args->is_last);
1184   new (elem->channel_data) ChannelData();
1185   return GRPC_ERROR_NONE;
1186 }
1187 
DestroyChannelElement(grpc_channel_element * elem)1188 void Server::ChannelData::DestroyChannelElement(grpc_channel_element* elem) {
1189   auto* chand = static_cast<ChannelData*>(elem->channel_data);
1190   chand->~ChannelData();
1191 }
1192 
1193 //
1194 // Server::CallData
1195 //
1196 
CallData(grpc_call_element * elem,const grpc_call_element_args & args,RefCountedPtr<Server> server)1197 Server::CallData::CallData(grpc_call_element* elem,
1198                            const grpc_call_element_args& args,
1199                            RefCountedPtr<Server> server)
1200     : server_(std::move(server)),
1201       call_(grpc_call_from_top_element(elem)),
1202       call_combiner_(args.call_combiner) {
1203   GRPC_CLOSURE_INIT(&recv_initial_metadata_ready_, RecvInitialMetadataReady,
1204                     elem, grpc_schedule_on_exec_ctx);
1205   GRPC_CLOSURE_INIT(&recv_trailing_metadata_ready_, RecvTrailingMetadataReady,
1206                     elem, grpc_schedule_on_exec_ctx);
1207 }
1208 
~CallData()1209 Server::CallData::~CallData() {
1210   GPR_ASSERT(state_.Load(MemoryOrder::RELAXED) != CallState::PENDING);
1211   GRPC_ERROR_UNREF(recv_initial_metadata_error_);
1212   if (host_.has_value()) {
1213     grpc_slice_unref_internal(*host_);
1214   }
1215   if (path_.has_value()) {
1216     grpc_slice_unref_internal(*path_);
1217   }
1218   grpc_metadata_array_destroy(&initial_metadata_);
1219   grpc_byte_buffer_destroy(payload_);
1220 }
1221 
SetState(CallState state)1222 void Server::CallData::SetState(CallState state) {
1223   state_.Store(state, MemoryOrder::RELAXED);
1224 }
1225 
MaybeActivate()1226 bool Server::CallData::MaybeActivate() {
1227   CallState expected = CallState::PENDING;
1228   return state_.CompareExchangeStrong(&expected, CallState::ACTIVATED,
1229                                       MemoryOrder::ACQ_REL,
1230                                       MemoryOrder::RELAXED);
1231 }
1232 
FailCallCreation()1233 void Server::CallData::FailCallCreation() {
1234   CallState expected_not_started = CallState::NOT_STARTED;
1235   CallState expected_pending = CallState::PENDING;
1236   if (state_.CompareExchangeStrong(&expected_not_started, CallState::ZOMBIED,
1237                                    MemoryOrder::ACQ_REL,
1238                                    MemoryOrder::ACQUIRE)) {
1239     KillZombie();
1240   } else if (state_.CompareExchangeStrong(&expected_pending, CallState::ZOMBIED,
1241                                           MemoryOrder::ACQ_REL,
1242                                           MemoryOrder::RELAXED)) {
1243     // Zombied call will be destroyed when it's removed from the pending
1244     // queue... later.
1245   }
1246 }
1247 
Start(grpc_call_element * elem)1248 void Server::CallData::Start(grpc_call_element* elem) {
1249   grpc_op op;
1250   op.op = GRPC_OP_RECV_INITIAL_METADATA;
1251   op.flags = 0;
1252   op.reserved = nullptr;
1253   op.data.recv_initial_metadata.recv_initial_metadata = &initial_metadata_;
1254   GRPC_CLOSURE_INIT(&recv_initial_metadata_batch_complete_,
1255                     RecvInitialMetadataBatchComplete, elem,
1256                     grpc_schedule_on_exec_ctx);
1257   grpc_call_start_batch_and_execute(call_, &op, 1,
1258                                     &recv_initial_metadata_batch_complete_);
1259 }
1260 
Publish(size_t cq_idx,RequestedCall * rc)1261 void Server::CallData::Publish(size_t cq_idx, RequestedCall* rc) {
1262   grpc_call_set_completion_queue(call_, rc->cq_bound_to_call);
1263   *rc->call = call_;
1264   cq_new_ = server_->cqs_[cq_idx];
1265   GPR_SWAP(grpc_metadata_array, *rc->initial_metadata, initial_metadata_);
1266   switch (rc->type) {
1267     case RequestedCall::Type::BATCH_CALL:
1268       GPR_ASSERT(host_.has_value());
1269       GPR_ASSERT(path_.has_value());
1270       rc->data.batch.details->host = grpc_slice_ref_internal(*host_);
1271       rc->data.batch.details->method = grpc_slice_ref_internal(*path_);
1272       rc->data.batch.details->deadline =
1273           grpc_millis_to_timespec(deadline_, GPR_CLOCK_MONOTONIC);
1274       rc->data.batch.details->flags = recv_initial_metadata_flags_;
1275       break;
1276     case RequestedCall::Type::REGISTERED_CALL:
1277       *rc->data.registered.deadline =
1278           grpc_millis_to_timespec(deadline_, GPR_CLOCK_MONOTONIC);
1279       if (rc->data.registered.optional_payload != nullptr) {
1280         *rc->data.registered.optional_payload = payload_;
1281         payload_ = nullptr;
1282       }
1283       break;
1284     default:
1285       GPR_UNREACHABLE_CODE(return );
1286   }
1287   grpc_cq_end_op(cq_new_, rc->tag, GRPC_ERROR_NONE, Server::DoneRequestEvent,
1288                  rc, &rc->completion, true);
1289 }
1290 
PublishNewRpc(void * arg,grpc_error_handle error)1291 void Server::CallData::PublishNewRpc(void* arg, grpc_error_handle error) {
1292   grpc_call_element* call_elem = static_cast<grpc_call_element*>(arg);
1293   auto* calld = static_cast<Server::CallData*>(call_elem->call_data);
1294   auto* chand = static_cast<Server::ChannelData*>(call_elem->channel_data);
1295   RequestMatcherInterface* rm = calld->matcher_;
1296   Server* server = rm->server();
1297   if (error != GRPC_ERROR_NONE || server->ShutdownCalled()) {
1298     calld->state_.Store(CallState::ZOMBIED, MemoryOrder::RELAXED);
1299     calld->KillZombie();
1300     return;
1301   }
1302   rm->MatchOrQueue(chand->cq_idx(), calld);
1303 }
1304 
1305 namespace {
1306 
KillZombieClosure(void * call,grpc_error_handle)1307 void KillZombieClosure(void* call, grpc_error_handle /*error*/) {
1308   grpc_call_unref(static_cast<grpc_call*>(call));
1309 }
1310 
1311 }  // namespace
1312 
KillZombie()1313 void Server::CallData::KillZombie() {
1314   GRPC_CLOSURE_INIT(&kill_zombie_closure_, KillZombieClosure, call_,
1315                     grpc_schedule_on_exec_ctx);
1316   ExecCtx::Run(DEBUG_LOCATION, &kill_zombie_closure_, GRPC_ERROR_NONE);
1317 }
1318 
StartNewRpc(grpc_call_element * elem)1319 void Server::CallData::StartNewRpc(grpc_call_element* elem) {
1320   auto* chand = static_cast<ChannelData*>(elem->channel_data);
1321   if (server_->ShutdownCalled()) {
1322     state_.Store(CallState::ZOMBIED, MemoryOrder::RELAXED);
1323     KillZombie();
1324     return;
1325   }
1326   // Find request matcher.
1327   matcher_ = server_->unregistered_request_matcher_.get();
1328   grpc_server_register_method_payload_handling payload_handling =
1329       GRPC_SRM_PAYLOAD_NONE;
1330   if (path_.has_value() && host_.has_value()) {
1331     ChannelRegisteredMethod* rm =
1332         chand->GetRegisteredMethod(*host_, *path_,
1333                                    (recv_initial_metadata_flags_ &
1334                                     GRPC_INITIAL_METADATA_IDEMPOTENT_REQUEST));
1335     if (rm != nullptr) {
1336       matcher_ = rm->server_registered_method->matcher.get();
1337       payload_handling = rm->server_registered_method->payload_handling;
1338     }
1339   }
1340   // Start recv_message op if needed.
1341   switch (payload_handling) {
1342     case GRPC_SRM_PAYLOAD_NONE:
1343       PublishNewRpc(elem, GRPC_ERROR_NONE);
1344       break;
1345     case GRPC_SRM_PAYLOAD_READ_INITIAL_BYTE_BUFFER: {
1346       grpc_op op;
1347       op.op = GRPC_OP_RECV_MESSAGE;
1348       op.flags = 0;
1349       op.reserved = nullptr;
1350       op.data.recv_message.recv_message = &payload_;
1351       GRPC_CLOSURE_INIT(&publish_, PublishNewRpc, elem,
1352                         grpc_schedule_on_exec_ctx);
1353       grpc_call_start_batch_and_execute(call_, &op, 1, &publish_);
1354       break;
1355     }
1356   }
1357 }
1358 
RecvInitialMetadataBatchComplete(void * arg,grpc_error_handle error)1359 void Server::CallData::RecvInitialMetadataBatchComplete(
1360     void* arg, grpc_error_handle error) {
1361   grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
1362   auto* calld = static_cast<Server::CallData*>(elem->call_data);
1363   if (error != GRPC_ERROR_NONE) {
1364     calld->FailCallCreation();
1365     return;
1366   }
1367   calld->StartNewRpc(elem);
1368 }
1369 
StartTransportStreamOpBatchImpl(grpc_call_element * elem,grpc_transport_stream_op_batch * batch)1370 void Server::CallData::StartTransportStreamOpBatchImpl(
1371     grpc_call_element* elem, grpc_transport_stream_op_batch* batch) {
1372   if (batch->recv_initial_metadata) {
1373     GPR_ASSERT(batch->payload->recv_initial_metadata.recv_flags == nullptr);
1374     recv_initial_metadata_ =
1375         batch->payload->recv_initial_metadata.recv_initial_metadata;
1376     original_recv_initial_metadata_ready_ =
1377         batch->payload->recv_initial_metadata.recv_initial_metadata_ready;
1378     batch->payload->recv_initial_metadata.recv_initial_metadata_ready =
1379         &recv_initial_metadata_ready_;
1380     batch->payload->recv_initial_metadata.recv_flags =
1381         &recv_initial_metadata_flags_;
1382   }
1383   if (batch->recv_trailing_metadata) {
1384     original_recv_trailing_metadata_ready_ =
1385         batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready;
1386     batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready =
1387         &recv_trailing_metadata_ready_;
1388   }
1389   grpc_call_next_op(elem, batch);
1390 }
1391 
RecvInitialMetadataReady(void * arg,grpc_error_handle error)1392 void Server::CallData::RecvInitialMetadataReady(void* arg,
1393                                                 grpc_error_handle error) {
1394   grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
1395   CallData* calld = static_cast<CallData*>(elem->call_data);
1396   grpc_millis op_deadline;
1397   if (error == GRPC_ERROR_NONE) {
1398     GPR_DEBUG_ASSERT(calld->recv_initial_metadata_->idx.named.path != nullptr);
1399     GPR_DEBUG_ASSERT(calld->recv_initial_metadata_->idx.named.authority !=
1400                      nullptr);
1401     calld->path_.emplace(grpc_slice_ref_internal(
1402         GRPC_MDVALUE(calld->recv_initial_metadata_->idx.named.path->md)));
1403     calld->host_.emplace(grpc_slice_ref_internal(
1404         GRPC_MDVALUE(calld->recv_initial_metadata_->idx.named.authority->md)));
1405     grpc_metadata_batch_remove(calld->recv_initial_metadata_, GRPC_BATCH_PATH);
1406     grpc_metadata_batch_remove(calld->recv_initial_metadata_,
1407                                GRPC_BATCH_AUTHORITY);
1408   } else {
1409     GRPC_ERROR_REF(error);
1410   }
1411   op_deadline = calld->recv_initial_metadata_->deadline;
1412   if (op_deadline != GRPC_MILLIS_INF_FUTURE) {
1413     calld->deadline_ = op_deadline;
1414   }
1415   if (calld->host_.has_value() && calld->path_.has_value()) {
1416     /* do nothing */
1417   } else {
1418     /* Pass the error reference to calld->recv_initial_metadata_error */
1419     grpc_error_handle src_error = error;
1420     error = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
1421         "Missing :authority or :path", &src_error, 1);
1422     GRPC_ERROR_UNREF(src_error);
1423     calld->recv_initial_metadata_error_ = GRPC_ERROR_REF(error);
1424   }
1425   grpc_closure* closure = calld->original_recv_initial_metadata_ready_;
1426   calld->original_recv_initial_metadata_ready_ = nullptr;
1427   if (calld->seen_recv_trailing_metadata_ready_) {
1428     GRPC_CALL_COMBINER_START(calld->call_combiner_,
1429                              &calld->recv_trailing_metadata_ready_,
1430                              calld->recv_trailing_metadata_error_,
1431                              "continue server recv_trailing_metadata_ready");
1432   }
1433   Closure::Run(DEBUG_LOCATION, closure, error);
1434 }
1435 
RecvTrailingMetadataReady(void * arg,grpc_error_handle error)1436 void Server::CallData::RecvTrailingMetadataReady(void* arg,
1437                                                  grpc_error_handle error) {
1438   grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
1439   CallData* calld = static_cast<CallData*>(elem->call_data);
1440   if (calld->original_recv_initial_metadata_ready_ != nullptr) {
1441     calld->recv_trailing_metadata_error_ = GRPC_ERROR_REF(error);
1442     calld->seen_recv_trailing_metadata_ready_ = true;
1443     GRPC_CLOSURE_INIT(&calld->recv_trailing_metadata_ready_,
1444                       RecvTrailingMetadataReady, elem,
1445                       grpc_schedule_on_exec_ctx);
1446     GRPC_CALL_COMBINER_STOP(calld->call_combiner_,
1447                             "deferring server recv_trailing_metadata_ready "
1448                             "until after recv_initial_metadata_ready");
1449     return;
1450   }
1451   error =
1452       grpc_error_add_child(GRPC_ERROR_REF(error),
1453                            GRPC_ERROR_REF(calld->recv_initial_metadata_error_));
1454   Closure::Run(DEBUG_LOCATION, calld->original_recv_trailing_metadata_ready_,
1455                error);
1456 }
1457 
InitCallElement(grpc_call_element * elem,const grpc_call_element_args * args)1458 grpc_error_handle Server::CallData::InitCallElement(
1459     grpc_call_element* elem, const grpc_call_element_args* args) {
1460   auto* chand = static_cast<ChannelData*>(elem->channel_data);
1461   new (elem->call_data) Server::CallData(elem, *args, chand->server());
1462   return GRPC_ERROR_NONE;
1463 }
1464 
DestroyCallElement(grpc_call_element * elem,const grpc_call_final_info *,grpc_closure *)1465 void Server::CallData::DestroyCallElement(
1466     grpc_call_element* elem, const grpc_call_final_info* /*final_info*/,
1467     grpc_closure* /*ignored*/) {
1468   auto* calld = static_cast<CallData*>(elem->call_data);
1469   calld->~CallData();
1470 }
1471 
StartTransportStreamOpBatch(grpc_call_element * elem,grpc_transport_stream_op_batch * batch)1472 void Server::CallData::StartTransportStreamOpBatch(
1473     grpc_call_element* elem, grpc_transport_stream_op_batch* batch) {
1474   auto* calld = static_cast<CallData*>(elem->call_data);
1475   calld->StartTransportStreamOpBatchImpl(elem, batch);
1476 }
1477 
1478 }  // namespace grpc_core
1479 
1480 //
1481 // C-core API
1482 //
1483 
grpc_server_create(const grpc_channel_args * args,void * reserved)1484 grpc_server* grpc_server_create(const grpc_channel_args* args, void* reserved) {
1485   grpc_core::ExecCtx exec_ctx;
1486   GRPC_API_TRACE("grpc_server_create(%p, %p)", 2, (args, reserved));
1487   grpc_server* c_server = new grpc_server;
1488   c_server->core_server = grpc_core::MakeOrphanable<grpc_core::Server>(args);
1489   return c_server;
1490 }
1491 
grpc_server_register_completion_queue(grpc_server * server,grpc_completion_queue * cq,void * reserved)1492 void grpc_server_register_completion_queue(grpc_server* server,
1493                                            grpc_completion_queue* cq,
1494                                            void* reserved) {
1495   GRPC_API_TRACE(
1496       "grpc_server_register_completion_queue(server=%p, cq=%p, reserved=%p)", 3,
1497       (server, cq, reserved));
1498   GPR_ASSERT(!reserved);
1499   auto cq_type = grpc_get_cq_completion_type(cq);
1500   if (cq_type != GRPC_CQ_NEXT && cq_type != GRPC_CQ_CALLBACK) {
1501     gpr_log(GPR_INFO,
1502             "Completion queue of type %d is being registered as a "
1503             "server-completion-queue",
1504             static_cast<int>(cq_type));
1505     /* Ideally we should log an error and abort but ruby-wrapped-language API
1506        calls grpc_completion_queue_pluck() on server completion queues */
1507   }
1508   server->core_server->RegisterCompletionQueue(cq);
1509 }
1510 
grpc_server_register_method(grpc_server * server,const char * method,const char * host,grpc_server_register_method_payload_handling payload_handling,uint32_t flags)1511 void* grpc_server_register_method(
1512     grpc_server* server, const char* method, const char* host,
1513     grpc_server_register_method_payload_handling payload_handling,
1514     uint32_t flags) {
1515   GRPC_API_TRACE(
1516       "grpc_server_register_method(server=%p, method=%s, host=%s, "
1517       "flags=0x%08x)",
1518       4, (server, method, host, flags));
1519   return server->core_server->RegisterMethod(method, host, payload_handling,
1520                                              flags);
1521 }
1522 
grpc_server_start(grpc_server * server)1523 void grpc_server_start(grpc_server* server) {
1524   grpc_core::ExecCtx exec_ctx;
1525   GRPC_API_TRACE("grpc_server_start(server=%p)", 1, (server));
1526   server->core_server->Start();
1527 }
1528 
grpc_server_shutdown_and_notify(grpc_server * server,grpc_completion_queue * cq,void * tag)1529 void grpc_server_shutdown_and_notify(grpc_server* server,
1530                                      grpc_completion_queue* cq, void* tag) {
1531   grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
1532   grpc_core::ExecCtx exec_ctx;
1533   GRPC_API_TRACE("grpc_server_shutdown_and_notify(server=%p, cq=%p, tag=%p)", 3,
1534                  (server, cq, tag));
1535   server->core_server->ShutdownAndNotify(cq, tag);
1536 }
1537 
grpc_server_cancel_all_calls(grpc_server * server)1538 void grpc_server_cancel_all_calls(grpc_server* server) {
1539   grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
1540   grpc_core::ExecCtx exec_ctx;
1541   GRPC_API_TRACE("grpc_server_cancel_all_calls(server=%p)", 1, (server));
1542   server->core_server->CancelAllCalls();
1543 }
1544 
grpc_server_destroy(grpc_server * server)1545 void grpc_server_destroy(grpc_server* server) {
1546   grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
1547   grpc_core::ExecCtx exec_ctx;
1548   GRPC_API_TRACE("grpc_server_destroy(server=%p)", 1, (server));
1549   delete server;
1550 }
1551 
grpc_server_request_call(grpc_server * server,grpc_call ** call,grpc_call_details * details,grpc_metadata_array * request_metadata,grpc_completion_queue * cq_bound_to_call,grpc_completion_queue * cq_for_notification,void * tag)1552 grpc_call_error grpc_server_request_call(
1553     grpc_server* server, grpc_call** call, grpc_call_details* details,
1554     grpc_metadata_array* request_metadata,
1555     grpc_completion_queue* cq_bound_to_call,
1556     grpc_completion_queue* cq_for_notification, void* tag) {
1557   grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
1558   grpc_core::ExecCtx exec_ctx;
1559   GRPC_STATS_INC_SERVER_REQUESTED_CALLS();
1560   GRPC_API_TRACE(
1561       "grpc_server_request_call("
1562       "server=%p, call=%p, details=%p, initial_metadata=%p, "
1563       "cq_bound_to_call=%p, cq_for_notification=%p, tag=%p)",
1564       7,
1565       (server, call, details, request_metadata, cq_bound_to_call,
1566        cq_for_notification, tag));
1567   return server->core_server->RequestCall(call, details, request_metadata,
1568                                           cq_bound_to_call, cq_for_notification,
1569                                           tag);
1570 }
1571 
grpc_server_request_registered_call(grpc_server * server,void * registered_method,grpc_call ** call,gpr_timespec * deadline,grpc_metadata_array * request_metadata,grpc_byte_buffer ** optional_payload,grpc_completion_queue * cq_bound_to_call,grpc_completion_queue * cq_for_notification,void * tag_new)1572 grpc_call_error grpc_server_request_registered_call(
1573     grpc_server* server, void* registered_method, grpc_call** call,
1574     gpr_timespec* deadline, grpc_metadata_array* request_metadata,
1575     grpc_byte_buffer** optional_payload,
1576     grpc_completion_queue* cq_bound_to_call,
1577     grpc_completion_queue* cq_for_notification, void* tag_new) {
1578   grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
1579   grpc_core::ExecCtx exec_ctx;
1580   GRPC_STATS_INC_SERVER_REQUESTED_CALLS();
1581   auto* rm =
1582       static_cast<grpc_core::Server::RegisteredMethod*>(registered_method);
1583   GRPC_API_TRACE(
1584       "grpc_server_request_registered_call("
1585       "server=%p, registered_method=%p, call=%p, deadline=%p, "
1586       "request_metadata=%p, "
1587       "optional_payload=%p, cq_bound_to_call=%p, cq_for_notification=%p, "
1588       "tag=%p)",
1589       9,
1590       (server, registered_method, call, deadline, request_metadata,
1591        optional_payload, cq_bound_to_call, cq_for_notification, tag_new));
1592   return server->core_server->RequestRegisteredCall(
1593       rm, call, deadline, request_metadata, optional_payload, cq_bound_to_call,
1594       cq_for_notification, tag_new);
1595 }
1596 
grpc_server_set_config_fetcher(grpc_server * server,grpc_server_config_fetcher * server_config_fetcher)1597 void grpc_server_set_config_fetcher(
1598     grpc_server* server, grpc_server_config_fetcher* server_config_fetcher) {
1599   grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
1600   grpc_core::ExecCtx exec_ctx;
1601   GRPC_API_TRACE("grpc_server_set_config_fetcher(server=%p, config_fetcher=%p)",
1602                  2, (server, server_config_fetcher));
1603   server->core_server->set_config_fetcher(
1604       std::unique_ptr<grpc_server_config_fetcher>(server_config_fetcher));
1605 }
1606 
grpc_server_config_fetcher_destroy(grpc_server_config_fetcher * server_config_fetcher)1607 void grpc_server_config_fetcher_destroy(
1608     grpc_server_config_fetcher* server_config_fetcher) {
1609   grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
1610   grpc_core::ExecCtx exec_ctx;
1611   GRPC_API_TRACE("grpc_server_config_fetcher_destroy(config_fetcher=%p)", 1,
1612                  (server_config_fetcher));
1613   delete server_config_fetcher;
1614 }
1615