• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2015 gRPC authors.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *     http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *
16  */
17 
18 #include <grpcpp/server.h>
19 
20 #include <cstdlib>
21 #include <sstream>
22 #include <type_traits>
23 #include <utility>
24 
25 #include <grpc/grpc.h>
26 #include <grpc/impl/codegen/grpc_types.h>
27 #include <grpc/support/alloc.h>
28 #include <grpc/support/log.h>
29 #include <grpcpp/completion_queue.h>
30 #include <grpcpp/generic/async_generic_service.h>
31 #include <grpcpp/impl/codegen/async_unary_call.h>
32 #include <grpcpp/impl/codegen/byte_buffer.h>
33 #include <grpcpp/impl/codegen/call.h>
34 #include <grpcpp/impl/codegen/completion_queue_tag.h>
35 #include <grpcpp/impl/codegen/method_handler.h>
36 #include <grpcpp/impl/codegen/server_interceptor.h>
37 #include <grpcpp/impl/grpc_library.h>
38 #include <grpcpp/impl/rpc_service_method.h>
39 #include <grpcpp/impl/server_initializer.h>
40 #include <grpcpp/impl/service_type.h>
41 #include <grpcpp/security/server_credentials.h>
42 #include <grpcpp/server_context.h>
43 #include <grpcpp/support/time.h>
44 
45 #include "absl/memory/memory.h"
46 
47 #include "src/core/ext/transport/inproc/inproc_transport.h"
48 #include "src/core/lib/gprpp/manual_constructor.h"
49 #include "src/core/lib/iomgr/exec_ctx.h"
50 #include "src/core/lib/iomgr/iomgr.h"
51 #include "src/core/lib/profiling/timers.h"
52 #include "src/core/lib/surface/call.h"
53 #include "src/core/lib/surface/completion_queue.h"
54 #include "src/core/lib/surface/server.h"
55 #include "src/cpp/client/create_channel_internal.h"
56 #include "src/cpp/server/external_connection_acceptor_impl.h"
57 #include "src/cpp/server/health/default_health_check_service.h"
58 #include "src/cpp/thread_manager/thread_manager.h"
59 
60 namespace grpc {
61 namespace {
62 
63 // The default value for maximum number of threads that can be created in the
64 // sync server. This value of INT_MAX is chosen to match the default behavior if
65 // no ResourceQuota is set. To modify the max number of threads in a sync
66 // server, pass a custom ResourceQuota object  (with the desired number of
67 // max-threads set) to the server builder.
68 #define DEFAULT_MAX_SYNC_SERVER_THREADS INT_MAX
69 
70 // Give a useful status error message if the resource is exhausted specifically
71 // because the server threadpool is full.
72 const char* kServerThreadpoolExhausted = "Server Threadpool Exhausted";
73 
74 // Although we might like to give a useful status error message on unimplemented
75 // RPCs, it's not always possible since that also would need to be added across
76 // languages and isn't actually required by the spec.
77 const char* kUnknownRpcMethod = "";
78 
79 class DefaultGlobalCallbacks final : public Server::GlobalCallbacks {
80  public:
~DefaultGlobalCallbacks()81   ~DefaultGlobalCallbacks() override {}
PreSynchronousRequest(ServerContext *)82   void PreSynchronousRequest(ServerContext* /*context*/) override {}
PostSynchronousRequest(ServerContext *)83   void PostSynchronousRequest(ServerContext* /*context*/) override {}
84 };
85 
86 std::shared_ptr<Server::GlobalCallbacks> g_callbacks = nullptr;
87 gpr_once g_once_init_callbacks = GPR_ONCE_INIT;
88 
InitGlobalCallbacks()89 void InitGlobalCallbacks() {
90   if (!g_callbacks) {
91     g_callbacks.reset(new DefaultGlobalCallbacks());
92   }
93 }
94 
95 class ShutdownTag : public internal::CompletionQueueTag {
96  public:
FinalizeResult(void **,bool *)97   bool FinalizeResult(void** /*tag*/, bool* /*status*/) override {
98     return false;
99   }
100 };
101 
102 class PhonyTag : public internal::CompletionQueueTag {
103  public:
FinalizeResult(void **,bool *)104   bool FinalizeResult(void** /*tag*/, bool* /*status*/) override {
105     return true;
106   }
107 };
108 
109 class UnimplementedAsyncRequestContext {
110  protected:
UnimplementedAsyncRequestContext()111   UnimplementedAsyncRequestContext() : generic_stream_(&server_context_) {}
112 
113   GenericServerContext server_context_;
114   GenericServerAsyncReaderWriter generic_stream_;
115 };
116 
117 // TODO(vjpai): Just for this file, use some contents of the experimental
118 // namespace here to make the code easier to read below. Remove this when
119 // de-experimentalized fully.
120 #ifndef GRPC_CALLBACK_API_NONEXPERIMENTAL
121 using ::grpc::experimental::CallbackGenericService;
122 using ::grpc::experimental::CallbackServerContext;
123 using ::grpc::experimental::GenericCallbackServerContext;
124 #endif
125 
126 }  // namespace
127 
BaseAsyncRequest(ServerInterface * server,ServerContext * context,internal::ServerAsyncStreamingInterface * stream,CompletionQueue * call_cq,ServerCompletionQueue * notification_cq,void * tag,bool delete_on_finalize)128 ServerInterface::BaseAsyncRequest::BaseAsyncRequest(
129     ServerInterface* server, ServerContext* context,
130     internal::ServerAsyncStreamingInterface* stream, CompletionQueue* call_cq,
131     ServerCompletionQueue* notification_cq, void* tag, bool delete_on_finalize)
132     : server_(server),
133       context_(context),
134       stream_(stream),
135       call_cq_(call_cq),
136       notification_cq_(notification_cq),
137       tag_(tag),
138       delete_on_finalize_(delete_on_finalize),
139       call_(nullptr),
140       done_intercepting_(false) {
141   /* Set up interception state partially for the receive ops. call_wrapper_ is
142    * not filled at this point, but it will be filled before the interceptors are
143    * run. */
144   interceptor_methods_.SetCall(&call_wrapper_);
145   interceptor_methods_.SetReverse();
146   call_cq_->RegisterAvalanching();  // This op will trigger more ops
147 }
148 
~BaseAsyncRequest()149 ServerInterface::BaseAsyncRequest::~BaseAsyncRequest() {
150   call_cq_->CompleteAvalanching();
151 }
152 
FinalizeResult(void ** tag,bool * status)153 bool ServerInterface::BaseAsyncRequest::FinalizeResult(void** tag,
154                                                        bool* status) {
155   if (done_intercepting_) {
156     *tag = tag_;
157     if (delete_on_finalize_) {
158       delete this;
159     }
160     return true;
161   }
162   context_->set_call(call_);
163   context_->cq_ = call_cq_;
164   if (call_wrapper_.call() == nullptr) {
165     // Fill it since it is empty.
166     call_wrapper_ = internal::Call(
167         call_, server_, call_cq_, server_->max_receive_message_size(), nullptr);
168   }
169 
170   // just the pointers inside call are copied here
171   stream_->BindCall(&call_wrapper_);
172 
173   if (*status && call_ && call_wrapper_.server_rpc_info()) {
174     done_intercepting_ = true;
175     // Set interception point for RECV INITIAL METADATA
176     interceptor_methods_.AddInterceptionHookPoint(
177         experimental::InterceptionHookPoints::POST_RECV_INITIAL_METADATA);
178     interceptor_methods_.SetRecvInitialMetadata(&context_->client_metadata_);
179     if (interceptor_methods_.RunInterceptors(
180             [this]() { ContinueFinalizeResultAfterInterception(); })) {
181       // There are no interceptors to run. Continue
182     } else {
183       // There were interceptors to be run, so
184       // ContinueFinalizeResultAfterInterception will be run when interceptors
185       // are done.
186       return false;
187     }
188   }
189   if (*status && call_) {
190     context_->BeginCompletionOp(&call_wrapper_, nullptr, nullptr);
191   }
192   *tag = tag_;
193   if (delete_on_finalize_) {
194     delete this;
195   }
196   return true;
197 }
198 
199 void ServerInterface::BaseAsyncRequest::
ContinueFinalizeResultAfterInterception()200     ContinueFinalizeResultAfterInterception() {
201   context_->BeginCompletionOp(&call_wrapper_, nullptr, nullptr);
202   // Queue a tag which will be returned immediately
203   grpc_core::ExecCtx exec_ctx;
204   grpc_cq_begin_op(notification_cq_->cq(), this);
205   grpc_cq_end_op(
206       notification_cq_->cq(), this, GRPC_ERROR_NONE,
207       [](void* /*arg*/, grpc_cq_completion* completion) { delete completion; },
208       nullptr, new grpc_cq_completion());
209 }
210 
RegisteredAsyncRequest(ServerInterface * server,ServerContext * context,internal::ServerAsyncStreamingInterface * stream,CompletionQueue * call_cq,ServerCompletionQueue * notification_cq,void * tag,const char * name,internal::RpcMethod::RpcType type)211 ServerInterface::RegisteredAsyncRequest::RegisteredAsyncRequest(
212     ServerInterface* server, ServerContext* context,
213     internal::ServerAsyncStreamingInterface* stream, CompletionQueue* call_cq,
214     ServerCompletionQueue* notification_cq, void* tag, const char* name,
215     internal::RpcMethod::RpcType type)
216     : BaseAsyncRequest(server, context, stream, call_cq, notification_cq, tag,
217                        true),
218       name_(name),
219       type_(type) {}
220 
IssueRequest(void * registered_method,grpc_byte_buffer ** payload,ServerCompletionQueue * notification_cq)221 void ServerInterface::RegisteredAsyncRequest::IssueRequest(
222     void* registered_method, grpc_byte_buffer** payload,
223     ServerCompletionQueue* notification_cq) {
224   // The following call_start_batch is internally-generated so no need for an
225   // explanatory log on failure.
226   GPR_ASSERT(grpc_server_request_registered_call(
227                  server_->server(), registered_method, &call_,
228                  &context_->deadline_, context_->client_metadata_.arr(),
229                  payload, call_cq_->cq(), notification_cq->cq(),
230                  this) == GRPC_CALL_OK);
231 }
232 
GenericAsyncRequest(ServerInterface * server,GenericServerContext * context,internal::ServerAsyncStreamingInterface * stream,CompletionQueue * call_cq,ServerCompletionQueue * notification_cq,void * tag,bool delete_on_finalize)233 ServerInterface::GenericAsyncRequest::GenericAsyncRequest(
234     ServerInterface* server, GenericServerContext* context,
235     internal::ServerAsyncStreamingInterface* stream, CompletionQueue* call_cq,
236     ServerCompletionQueue* notification_cq, void* tag, bool delete_on_finalize)
237     : BaseAsyncRequest(server, context, stream, call_cq, notification_cq, tag,
238                        delete_on_finalize) {
239   grpc_call_details_init(&call_details_);
240   GPR_ASSERT(notification_cq);
241   GPR_ASSERT(call_cq);
242   // The following call_start_batch is internally-generated so no need for an
243   // explanatory log on failure.
244   GPR_ASSERT(grpc_server_request_call(server->server(), &call_, &call_details_,
245                                       context->client_metadata_.arr(),
246                                       call_cq->cq(), notification_cq->cq(),
247                                       this) == GRPC_CALL_OK);
248 }
249 
FinalizeResult(void ** tag,bool * status)250 bool ServerInterface::GenericAsyncRequest::FinalizeResult(void** tag,
251                                                           bool* status) {
252   // If we are done intercepting, there is nothing more for us to do
253   if (done_intercepting_) {
254     return BaseAsyncRequest::FinalizeResult(tag, status);
255   }
256   // TODO(yangg) remove the copy here.
257   if (*status) {
258     static_cast<GenericServerContext*>(context_)->method_ =
259         StringFromCopiedSlice(call_details_.method);
260     static_cast<GenericServerContext*>(context_)->host_ =
261         StringFromCopiedSlice(call_details_.host);
262     context_->deadline_ = call_details_.deadline;
263   }
264   grpc_slice_unref(call_details_.method);
265   grpc_slice_unref(call_details_.host);
266   call_wrapper_ = internal::Call(
267       call_, server_, call_cq_, server_->max_receive_message_size(),
268       context_->set_server_rpc_info(
269           static_cast<GenericServerContext*>(context_)->method_.c_str(),
270           internal::RpcMethod::BIDI_STREAMING,
271           *server_->interceptor_creators()));
272   return BaseAsyncRequest::FinalizeResult(tag, status);
273 }
274 
275 namespace {
276 class ShutdownCallback : public grpc_experimental_completion_queue_functor {
277  public:
ShutdownCallback()278   ShutdownCallback() {
279     functor_run = &ShutdownCallback::Run;
280     // Set inlineable to true since this callback is trivial and thus does not
281     // need to be run from the executor (triggering a thread hop). This should
282     // only be used by internal callbacks like this and not by user application
283     // code.
284     inlineable = true;
285   }
286   // TakeCQ takes ownership of the cq into the shutdown callback
287   // so that the shutdown callback will be responsible for destroying it
TakeCQ(CompletionQueue * cq)288   void TakeCQ(CompletionQueue* cq) { cq_ = cq; }
289 
290   // The Run function will get invoked by the completion queue library
291   // when the shutdown is actually complete
Run(grpc_experimental_completion_queue_functor * cb,int)292   static void Run(grpc_experimental_completion_queue_functor* cb, int) {
293     auto* callback = static_cast<ShutdownCallback*>(cb);
294     delete callback->cq_;
295     delete callback;
296   }
297 
298  private:
299   CompletionQueue* cq_ = nullptr;
300 };
301 }  // namespace
302 
303 /// Use private inheritance rather than composition only to establish order
304 /// of construction, since the public base class should be constructed after the
305 /// elements belonging to the private base class are constructed. This is not
306 /// possible using true composition.
307 class Server::UnimplementedAsyncRequest final
308     : private grpc::UnimplementedAsyncRequestContext,
309       public GenericAsyncRequest {
310  public:
UnimplementedAsyncRequest(ServerInterface * server,grpc::ServerCompletionQueue * cq)311   UnimplementedAsyncRequest(ServerInterface* server,
312                             grpc::ServerCompletionQueue* cq)
313       : GenericAsyncRequest(server, &server_context_, &generic_stream_, cq, cq,
314                             nullptr, false) {}
315 
316   bool FinalizeResult(void** tag, bool* status) override;
317 
context()318   grpc::ServerContext* context() { return &server_context_; }
stream()319   grpc::GenericServerAsyncReaderWriter* stream() { return &generic_stream_; }
320 };
321 
322 /// UnimplementedAsyncResponse should not post user-visible completions to the
323 /// C++ completion queue, but is generated as a CQ event by the core
324 class Server::UnimplementedAsyncResponse final
325     : public grpc::internal::CallOpSet<
326           grpc::internal::CallOpSendInitialMetadata,
327           grpc::internal::CallOpServerSendStatus> {
328  public:
329   explicit UnimplementedAsyncResponse(UnimplementedAsyncRequest* request);
~UnimplementedAsyncResponse()330   ~UnimplementedAsyncResponse() override { delete request_; }
331 
FinalizeResult(void ** tag,bool * status)332   bool FinalizeResult(void** tag, bool* status) override {
333     if (grpc::internal::CallOpSet<
334             grpc::internal::CallOpSendInitialMetadata,
335             grpc::internal::CallOpServerSendStatus>::FinalizeResult(tag,
336                                                                     status)) {
337       delete this;
338     } else {
339       // The tag was swallowed due to interception. We will see it again.
340     }
341     return false;
342   }
343 
344  private:
345   UnimplementedAsyncRequest* const request_;
346 };
347 
348 class Server::SyncRequest final : public grpc::internal::CompletionQueueTag {
349  public:
SyncRequest(Server * server,grpc::internal::RpcServiceMethod * method,grpc_core::Server::RegisteredCallAllocation * data)350   SyncRequest(Server* server, grpc::internal::RpcServiceMethod* method,
351               grpc_core::Server::RegisteredCallAllocation* data)
352       : SyncRequest(server, method) {
353     CommonSetup(data);
354     data->deadline = &deadline_;
355     data->optional_payload = has_request_payload_ ? &request_payload_ : nullptr;
356   }
357 
SyncRequest(Server * server,grpc::internal::RpcServiceMethod * method,grpc_core::Server::BatchCallAllocation * data)358   SyncRequest(Server* server, grpc::internal::RpcServiceMethod* method,
359               grpc_core::Server::BatchCallAllocation* data)
360       : SyncRequest(server, method) {
361     CommonSetup(data);
362     call_details_ = new grpc_call_details;
363     grpc_call_details_init(call_details_);
364     data->details = call_details_;
365   }
366 
~SyncRequest()367   ~SyncRequest() override {
368     // The destructor should only cleanup those objects created in the
369     // constructor, since some paths may or may not actually go through the
370     // Run stage where other objects are allocated.
371     if (has_request_payload_ && request_payload_) {
372       grpc_byte_buffer_destroy(request_payload_);
373     }
374     if (call_details_ != nullptr) {
375       grpc_call_details_destroy(call_details_);
376       delete call_details_;
377     }
378     grpc_metadata_array_destroy(&request_metadata_);
379     server_->UnrefWithPossibleNotify();
380   }
381 
FinalizeResult(void **,bool * status)382   bool FinalizeResult(void** /*tag*/, bool* status) override {
383     if (!*status) {
384       delete this;
385       return false;
386     }
387     if (call_details_) {
388       deadline_ = call_details_->deadline;
389     }
390     return true;
391   }
392 
Run(const std::shared_ptr<GlobalCallbacks> & global_callbacks,bool resources)393   void Run(const std::shared_ptr<GlobalCallbacks>& global_callbacks,
394            bool resources) {
395     ctx_.Init(deadline_, &request_metadata_);
396     wrapped_call_.Init(
397         call_, server_, &cq_, server_->max_receive_message_size(),
398         ctx_->ctx.set_server_rpc_info(method_->name(), method_->method_type(),
399                                       server_->interceptor_creators_));
400     ctx_->ctx.set_call(call_);
401     ctx_->ctx.cq_ = &cq_;
402     request_metadata_.count = 0;
403 
404     global_callbacks_ = global_callbacks;
405     resources_ = resources;
406 
407     interceptor_methods_.SetCall(&*wrapped_call_);
408     interceptor_methods_.SetReverse();
409     // Set interception point for RECV INITIAL METADATA
410     interceptor_methods_.AddInterceptionHookPoint(
411         grpc::experimental::InterceptionHookPoints::POST_RECV_INITIAL_METADATA);
412     interceptor_methods_.SetRecvInitialMetadata(&ctx_->ctx.client_metadata_);
413 
414     if (has_request_payload_) {
415       // Set interception point for RECV MESSAGE
416       auto* handler = resources_ ? method_->handler()
417                                  : server_->resource_exhausted_handler_.get();
418       deserialized_request_ = handler->Deserialize(call_, request_payload_,
419                                                    &request_status_, nullptr);
420 
421       request_payload_ = nullptr;
422       interceptor_methods_.AddInterceptionHookPoint(
423           grpc::experimental::InterceptionHookPoints::POST_RECV_MESSAGE);
424       interceptor_methods_.SetRecvMessage(deserialized_request_, nullptr);
425     }
426 
427     if (interceptor_methods_.RunInterceptors(
428             [this]() { ContinueRunAfterInterception(); })) {
429       ContinueRunAfterInterception();
430     } else {
431       // There were interceptors to be run, so ContinueRunAfterInterception
432       // will be run when interceptors are done.
433     }
434   }
435 
ContinueRunAfterInterception()436   void ContinueRunAfterInterception() {
437     ctx_->ctx.BeginCompletionOp(&*wrapped_call_, nullptr, nullptr);
438     global_callbacks_->PreSynchronousRequest(&ctx_->ctx);
439     auto* handler = resources_ ? method_->handler()
440                                : server_->resource_exhausted_handler_.get();
441     handler->RunHandler(grpc::internal::MethodHandler::HandlerParameter(
442         &*wrapped_call_, &ctx_->ctx, deserialized_request_, request_status_,
443         nullptr, nullptr));
444     global_callbacks_->PostSynchronousRequest(&ctx_->ctx);
445 
446     cq_.Shutdown();
447 
448     grpc::internal::CompletionQueueTag* op_tag = ctx_->ctx.GetCompletionOpTag();
449     cq_.TryPluck(op_tag, gpr_inf_future(GPR_CLOCK_REALTIME));
450 
451     // Ensure the cq_ is shutdown
452     grpc::PhonyTag ignored_tag;
453     GPR_ASSERT(cq_.Pluck(&ignored_tag) == false);
454 
455     // Cleanup structures allocated during Run/ContinueRunAfterInterception
456     wrapped_call_.Destroy();
457     ctx_.Destroy();
458 
459     delete this;
460   }
461 
462   // For requests that must be only cleaned up but not actually Run
Cleanup()463   void Cleanup() {
464     cq_.Shutdown();
465     grpc_call_unref(call_);
466     delete this;
467   }
468 
469  private:
SyncRequest(Server * server,grpc::internal::RpcServiceMethod * method)470   SyncRequest(Server* server, grpc::internal::RpcServiceMethod* method)
471       : server_(server),
472         method_(method),
473         has_request_payload_(method->method_type() ==
474                                  grpc::internal::RpcMethod::NORMAL_RPC ||
475                              method->method_type() ==
476                                  grpc::internal::RpcMethod::SERVER_STREAMING),
477         cq_(grpc_completion_queue_create_for_pluck(nullptr)) {}
478 
479   template <class CallAllocation>
CommonSetup(CallAllocation * data)480   void CommonSetup(CallAllocation* data) {
481     server_->Ref();
482     grpc_metadata_array_init(&request_metadata_);
483     data->tag = static_cast<void*>(this);
484     data->call = &call_;
485     data->initial_metadata = &request_metadata_;
486     data->cq = cq_.cq();
487   }
488 
489   Server* const server_;
490   grpc::internal::RpcServiceMethod* const method_;
491   const bool has_request_payload_;
492   grpc_call* call_;
493   grpc_call_details* call_details_ = nullptr;
494   gpr_timespec deadline_;
495   grpc_metadata_array request_metadata_;
496   grpc_byte_buffer* request_payload_ = nullptr;
497   grpc::CompletionQueue cq_;
498   grpc::Status request_status_;
499   std::shared_ptr<GlobalCallbacks> global_callbacks_;
500   bool resources_;
501   void* deserialized_request_ = nullptr;
502   grpc::internal::InterceptorBatchMethodsImpl interceptor_methods_;
503 
504   // ServerContextWrapper allows ManualConstructor while using a private
505   // contructor of ServerContext via this friend class.
506   struct ServerContextWrapper {
507     ServerContext ctx;
508 
ServerContextWrappergrpc::Server::SyncRequest::ServerContextWrapper509     ServerContextWrapper(gpr_timespec deadline, grpc_metadata_array* arr)
510         : ctx(deadline, arr) {}
511   };
512 
513   grpc_core::ManualConstructor<ServerContextWrapper> ctx_;
514   grpc_core::ManualConstructor<internal::Call> wrapped_call_;
515 };
516 
517 template <class ServerContextType>
518 class Server::CallbackRequest final
519     : public grpc::internal::CompletionQueueTag {
520  public:
521   static_assert(
522       std::is_base_of<grpc::CallbackServerContext, ServerContextType>::value,
523       "ServerContextType must be derived from CallbackServerContext");
524 
525   // For codegen services, the value of method represents the defined
526   // characteristics of the method being requested. For generic services, method
527   // is nullptr since these services don't have pre-defined methods.
CallbackRequest(Server * server,grpc::internal::RpcServiceMethod * method,grpc::CompletionQueue * cq,grpc_core::Server::RegisteredCallAllocation * data)528   CallbackRequest(Server* server, grpc::internal::RpcServiceMethod* method,
529                   grpc::CompletionQueue* cq,
530                   grpc_core::Server::RegisteredCallAllocation* data)
531       : server_(server),
532         method_(method),
533         has_request_payload_(method->method_type() ==
534                                  grpc::internal::RpcMethod::NORMAL_RPC ||
535                              method->method_type() ==
536                                  grpc::internal::RpcMethod::SERVER_STREAMING),
537         cq_(cq),
538         tag_(this),
539         ctx_(server_->context_allocator() != nullptr
540                  ? server_->context_allocator()->NewCallbackServerContext()
541                  : nullptr) {
542     CommonSetup(server, data);
543     data->deadline = &deadline_;
544     data->optional_payload = has_request_payload_ ? &request_payload_ : nullptr;
545   }
546 
547   // For generic services, method is nullptr since these services don't have
548   // pre-defined methods.
CallbackRequest(Server * server,grpc::CompletionQueue * cq,grpc_core::Server::BatchCallAllocation * data)549   CallbackRequest(Server* server, grpc::CompletionQueue* cq,
550                   grpc_core::Server::BatchCallAllocation* data)
551       : server_(server),
552         method_(nullptr),
553         has_request_payload_(false),
554         call_details_(new grpc_call_details),
555         cq_(cq),
556         tag_(this),
557         ctx_(server_->context_allocator() != nullptr
558                  ? server_->context_allocator()
559                        ->NewGenericCallbackServerContext()
560                  : nullptr) {
561     CommonSetup(server, data);
562     grpc_call_details_init(call_details_);
563     data->details = call_details_;
564   }
565 
~CallbackRequest()566   ~CallbackRequest() override {
567     delete call_details_;
568     grpc_metadata_array_destroy(&request_metadata_);
569     if (has_request_payload_ && request_payload_) {
570       grpc_byte_buffer_destroy(request_payload_);
571     }
572     if (ctx_alloc_by_default_ || server_->context_allocator() == nullptr) {
573       default_ctx_.Destroy();
574     }
575     server_->UnrefWithPossibleNotify();
576   }
577 
578   // Needs specialization to account for different processing of metadata
579   // in generic API
580   bool FinalizeResult(void** tag, bool* status) override;
581 
582  private:
583   // method_name needs to be specialized between named method and generic
584   const char* method_name() const;
585 
586   class CallbackCallTag : public grpc_experimental_completion_queue_functor {
587    public:
CallbackCallTag(Server::CallbackRequest<ServerContextType> * req)588     explicit CallbackCallTag(Server::CallbackRequest<ServerContextType>* req)
589         : req_(req) {
590       functor_run = &CallbackCallTag::StaticRun;
591       // Set inlineable to true since this callback is internally-controlled
592       // without taking any locks, and thus does not need to be run from the
593       // executor (which triggers a thread hop). This should only be used by
594       // internal callbacks like this and not by user application code. The work
595       // here is actually non-trivial, but there is no chance of having user
596       // locks conflict with each other so it's ok to run inlined.
597       inlineable = true;
598     }
599 
600     // force_run can not be performed on a tag if operations using this tag
601     // have been sent to PerformOpsOnCall. It is intended for error conditions
602     // that are detected before the operations are internally processed.
force_run(bool ok)603     void force_run(bool ok) { Run(ok); }
604 
605    private:
606     Server::CallbackRequest<ServerContextType>* req_;
607     grpc::internal::Call* call_;
608 
StaticRun(grpc_experimental_completion_queue_functor * cb,int ok)609     static void StaticRun(grpc_experimental_completion_queue_functor* cb,
610                           int ok) {
611       static_cast<CallbackCallTag*>(cb)->Run(static_cast<bool>(ok));
612     }
Run(bool ok)613     void Run(bool ok) {
614       void* ignored = req_;
615       bool new_ok = ok;
616       GPR_ASSERT(!req_->FinalizeResult(&ignored, &new_ok));
617       GPR_ASSERT(ignored == req_);
618 
619       if (!ok) {
620         // The call has been shutdown.
621         // Delete its contents to free up the request.
622         delete req_;
623         return;
624       }
625 
626       // Bind the call, deadline, and metadata from what we got
627       req_->ctx_->set_call(req_->call_);
628       req_->ctx_->cq_ = req_->cq_;
629       req_->ctx_->BindDeadlineAndMetadata(req_->deadline_,
630                                           &req_->request_metadata_);
631       req_->request_metadata_.count = 0;
632 
633       // Create a C++ Call to control the underlying core call
634       call_ =
635           new (grpc_call_arena_alloc(req_->call_, sizeof(grpc::internal::Call)))
636               grpc::internal::Call(
637                   req_->call_, req_->server_, req_->cq_,
638                   req_->server_->max_receive_message_size(),
639                   req_->ctx_->set_server_rpc_info(
640                       req_->method_name(),
641                       (req_->method_ != nullptr)
642                           ? req_->method_->method_type()
643                           : grpc::internal::RpcMethod::BIDI_STREAMING,
644                       req_->server_->interceptor_creators_));
645 
646       req_->interceptor_methods_.SetCall(call_);
647       req_->interceptor_methods_.SetReverse();
648       // Set interception point for RECV INITIAL METADATA
649       req_->interceptor_methods_.AddInterceptionHookPoint(
650           grpc::experimental::InterceptionHookPoints::
651               POST_RECV_INITIAL_METADATA);
652       req_->interceptor_methods_.SetRecvInitialMetadata(
653           &req_->ctx_->client_metadata_);
654 
655       if (req_->has_request_payload_) {
656         // Set interception point for RECV MESSAGE
657         req_->request_ = req_->method_->handler()->Deserialize(
658             req_->call_, req_->request_payload_, &req_->request_status_,
659             &req_->handler_data_);
660         req_->request_payload_ = nullptr;
661         req_->interceptor_methods_.AddInterceptionHookPoint(
662             grpc::experimental::InterceptionHookPoints::POST_RECV_MESSAGE);
663         req_->interceptor_methods_.SetRecvMessage(req_->request_, nullptr);
664       }
665 
666       if (req_->interceptor_methods_.RunInterceptors(
667               [this] { ContinueRunAfterInterception(); })) {
668         ContinueRunAfterInterception();
669       } else {
670         // There were interceptors to be run, so ContinueRunAfterInterception
671         // will be run when interceptors are done.
672       }
673     }
ContinueRunAfterInterception()674     void ContinueRunAfterInterception() {
675       auto* handler = (req_->method_ != nullptr)
676                           ? req_->method_->handler()
677                           : req_->server_->generic_handler_.get();
678       handler->RunHandler(grpc::internal::MethodHandler::HandlerParameter(
679           call_, req_->ctx_, req_->request_, req_->request_status_,
680           req_->handler_data_, [this] { delete req_; }));
681     }
682   };
683 
684   template <class CallAllocation>
CommonSetup(Server * server,CallAllocation * data)685   void CommonSetup(Server* server, CallAllocation* data) {
686     server->Ref();
687     grpc_metadata_array_init(&request_metadata_);
688     data->tag = static_cast<void*>(&tag_);
689     data->call = &call_;
690     data->initial_metadata = &request_metadata_;
691     if (ctx_ == nullptr) {
692       default_ctx_.Init();
693       ctx_ = &*default_ctx_;
694       ctx_alloc_by_default_ = true;
695     }
696     ctx_->set_context_allocator(server->context_allocator());
697     data->cq = cq_->cq();
698   }
699 
700   Server* const server_;
701   grpc::internal::RpcServiceMethod* const method_;
702   const bool has_request_payload_;
703   grpc_byte_buffer* request_payload_ = nullptr;
704   void* request_ = nullptr;
705   void* handler_data_ = nullptr;
706   grpc::Status request_status_;
707   grpc_call_details* const call_details_ = nullptr;
708   grpc_call* call_;
709   gpr_timespec deadline_;
710   grpc_metadata_array request_metadata_;
711   grpc::CompletionQueue* const cq_;
712   bool ctx_alloc_by_default_ = false;
713   CallbackCallTag tag_;
714   ServerContextType* ctx_ = nullptr;
715   grpc_core::ManualConstructor<ServerContextType> default_ctx_;
716   grpc::internal::InterceptorBatchMethodsImpl interceptor_methods_;
717 };
718 
719 template <>
FinalizeResult(void **,bool *)720 bool Server::CallbackRequest<grpc::CallbackServerContext>::FinalizeResult(
721     void** /*tag*/, bool* /*status*/) {
722   return false;
723 }
724 
725 template <>
726 bool Server::CallbackRequest<
FinalizeResult(void **,bool * status)727     grpc::GenericCallbackServerContext>::FinalizeResult(void** /*tag*/,
728                                                         bool* status) {
729   if (*status) {
730     deadline_ = call_details_->deadline;
731     // TODO(yangg) remove the copy here
732     ctx_->method_ = grpc::StringFromCopiedSlice(call_details_->method);
733     ctx_->host_ = grpc::StringFromCopiedSlice(call_details_->host);
734   }
735   grpc_slice_unref(call_details_->method);
736   grpc_slice_unref(call_details_->host);
737   return false;
738 }
739 
740 template <>
method_name() const741 const char* Server::CallbackRequest<grpc::CallbackServerContext>::method_name()
742     const {
743   return method_->name();
744 }
745 
746 template <>
747 const char* Server::CallbackRequest<
method_name() const748     grpc::GenericCallbackServerContext>::method_name() const {
749   return ctx_->method().c_str();
750 }
751 
752 // Implementation of ThreadManager. Each instance of SyncRequestThreadManager
753 // manages a pool of threads that poll for incoming Sync RPCs and call the
754 // appropriate RPC handlers
755 class Server::SyncRequestThreadManager : public grpc::ThreadManager {
756  public:
SyncRequestThreadManager(Server * server,grpc::CompletionQueue * server_cq,std::shared_ptr<GlobalCallbacks> global_callbacks,grpc_resource_quota * rq,int min_pollers,int max_pollers,int cq_timeout_msec)757   SyncRequestThreadManager(Server* server, grpc::CompletionQueue* server_cq,
758                            std::shared_ptr<GlobalCallbacks> global_callbacks,
759                            grpc_resource_quota* rq, int min_pollers,
760                            int max_pollers, int cq_timeout_msec)
761       : ThreadManager("SyncServer", rq, min_pollers, max_pollers),
762         server_(server),
763         server_cq_(server_cq),
764         cq_timeout_msec_(cq_timeout_msec),
765         global_callbacks_(std::move(global_callbacks)) {}
766 
PollForWork(void ** tag,bool * ok)767   WorkStatus PollForWork(void** tag, bool* ok) override {
768     *tag = nullptr;
769     // TODO(ctiller): workaround for GPR_TIMESPAN based deadlines not working
770     // right now
771     gpr_timespec deadline =
772         gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
773                      gpr_time_from_millis(cq_timeout_msec_, GPR_TIMESPAN));
774 
775     switch (server_cq_->AsyncNext(tag, ok, deadline)) {
776       case grpc::CompletionQueue::TIMEOUT:
777         return TIMEOUT;
778       case grpc::CompletionQueue::SHUTDOWN:
779         return SHUTDOWN;
780       case grpc::CompletionQueue::GOT_EVENT:
781         return WORK_FOUND;
782     }
783 
784     GPR_UNREACHABLE_CODE(return TIMEOUT);
785   }
786 
DoWork(void * tag,bool ok,bool resources)787   void DoWork(void* tag, bool ok, bool resources) override {
788     (void)ok;
789     SyncRequest* sync_req = static_cast<SyncRequest*>(tag);
790 
791     // Under the AllocatingRequestMatcher model we will never see an invalid tag
792     // here.
793     GPR_DEBUG_ASSERT(sync_req != nullptr);
794     GPR_DEBUG_ASSERT(ok);
795 
796     GPR_TIMER_SCOPE("sync_req->Run()", 0);
797     sync_req->Run(global_callbacks_, resources);
798   }
799 
AddSyncMethod(grpc::internal::RpcServiceMethod * method,void * tag)800   void AddSyncMethod(grpc::internal::RpcServiceMethod* method, void* tag) {
801     server_->server()->core_server->SetRegisteredMethodAllocator(
802         server_cq_->cq(), tag, [this, method] {
803           grpc_core::Server::RegisteredCallAllocation result;
804           new SyncRequest(server_, method, &result);
805           return result;
806         });
807     has_sync_method_ = true;
808   }
809 
AddUnknownSyncMethod()810   void AddUnknownSyncMethod() {
811     if (has_sync_method_) {
812       unknown_method_ = absl::make_unique<grpc::internal::RpcServiceMethod>(
813           "unknown", grpc::internal::RpcMethod::BIDI_STREAMING,
814           new grpc::internal::UnknownMethodHandler(kUnknownRpcMethod));
815       server_->server()->core_server->SetBatchMethodAllocator(
816           server_cq_->cq(), [this] {
817             grpc_core::Server::BatchCallAllocation result;
818             new SyncRequest(server_, unknown_method_.get(), &result);
819             return result;
820           });
821     }
822   }
823 
Shutdown()824   void Shutdown() override {
825     ThreadManager::Shutdown();
826     server_cq_->Shutdown();
827   }
828 
Wait()829   void Wait() override {
830     ThreadManager::Wait();
831     // Drain any pending items from the queue
832     void* tag;
833     bool ok;
834     while (server_cq_->Next(&tag, &ok)) {
835       // This problem can arise if the server CQ gets a request queued to it
836       // before it gets shutdown but then pulls it after shutdown.
837       static_cast<SyncRequest*>(tag)->Cleanup();
838     }
839   }
840 
Start()841   void Start() {
842     if (has_sync_method_) {
843       Initialize();  // ThreadManager's Initialize()
844     }
845   }
846 
847  private:
848   Server* server_;
849   grpc::CompletionQueue* server_cq_;
850   int cq_timeout_msec_;
851   bool has_sync_method_ = false;
852   std::unique_ptr<grpc::internal::RpcServiceMethod> unknown_method_;
853   std::shared_ptr<Server::GlobalCallbacks> global_callbacks_;
854 };
855 
856 static grpc::internal::GrpcLibraryInitializer g_gli_initializer;
Server(grpc::ChannelArguments * args,std::shared_ptr<std::vector<std::unique_ptr<grpc::ServerCompletionQueue>>> sync_server_cqs,int min_pollers,int max_pollers,int sync_cq_timeout_msec,std::vector<std::shared_ptr<grpc::internal::ExternalConnectionAcceptorImpl>> acceptors,grpc_server_config_fetcher * server_config_fetcher,grpc_resource_quota * server_rq,std::vector<std::unique_ptr<grpc::experimental::ServerInterceptorFactoryInterface>> interceptor_creators)857 Server::Server(
858     grpc::ChannelArguments* args,
859     std::shared_ptr<std::vector<std::unique_ptr<grpc::ServerCompletionQueue>>>
860         sync_server_cqs,
861     int min_pollers, int max_pollers, int sync_cq_timeout_msec,
862     std::vector<std::shared_ptr<grpc::internal::ExternalConnectionAcceptorImpl>>
863         acceptors,
864     grpc_server_config_fetcher* server_config_fetcher,
865     grpc_resource_quota* server_rq,
866     std::vector<
867         std::unique_ptr<grpc::experimental::ServerInterceptorFactoryInterface>>
868         interceptor_creators)
869     : acceptors_(std::move(acceptors)),
870       interceptor_creators_(std::move(interceptor_creators)),
871       max_receive_message_size_(INT_MIN),
872       sync_server_cqs_(std::move(sync_server_cqs)),
873       started_(false),
874       shutdown_(false),
875       shutdown_notified_(false),
876       server_(nullptr),
877       server_initializer_(new ServerInitializer(this)),
878       health_check_service_disabled_(false) {
879   g_gli_initializer.summon();
880   gpr_once_init(&grpc::g_once_init_callbacks, grpc::InitGlobalCallbacks);
881   global_callbacks_ = grpc::g_callbacks;
882   global_callbacks_->UpdateArguments(args);
883 
884   if (sync_server_cqs_ != nullptr) {
885     bool default_rq_created = false;
886     if (server_rq == nullptr) {
887       server_rq = grpc_resource_quota_create("SyncServer-default-rq");
888       grpc_resource_quota_set_max_threads(server_rq,
889                                           DEFAULT_MAX_SYNC_SERVER_THREADS);
890       default_rq_created = true;
891     }
892 
893     for (const auto& it : *sync_server_cqs_) {
894       sync_req_mgrs_.emplace_back(new SyncRequestThreadManager(
895           this, it.get(), global_callbacks_, server_rq, min_pollers,
896           max_pollers, sync_cq_timeout_msec));
897     }
898 
899     if (default_rq_created) {
900       grpc_resource_quota_unref(server_rq);
901     }
902   }
903 
904   for (auto& acceptor : acceptors_) {
905     acceptor->SetToChannelArgs(args);
906   }
907 
908   grpc_channel_args channel_args;
909   args->SetChannelArgs(&channel_args);
910 
911   for (size_t i = 0; i < channel_args.num_args; i++) {
912     if (0 == strcmp(channel_args.args[i].key,
913                     grpc::kHealthCheckServiceInterfaceArg)) {
914       if (channel_args.args[i].value.pointer.p == nullptr) {
915         health_check_service_disabled_ = true;
916       } else {
917         health_check_service_.reset(
918             static_cast<grpc::HealthCheckServiceInterface*>(
919                 channel_args.args[i].value.pointer.p));
920       }
921     }
922     if (0 ==
923         strcmp(channel_args.args[i].key, GRPC_ARG_MAX_RECEIVE_MESSAGE_LENGTH)) {
924       max_receive_message_size_ = channel_args.args[i].value.integer;
925     }
926   }
927   server_ = grpc_server_create(&channel_args, nullptr);
928   grpc_server_set_config_fetcher(server_, server_config_fetcher);
929 }
930 
~Server()931 Server::~Server() {
932   {
933     grpc::internal::ReleasableMutexLock lock(&mu_);
934     if (started_ && !shutdown_) {
935       lock.Release();
936       Shutdown();
937     } else if (!started_) {
938       // Shutdown the completion queues
939       for (const auto& value : sync_req_mgrs_) {
940         value->Shutdown();
941       }
942       CompletionQueue* callback_cq =
943           callback_cq_.load(std::memory_order_relaxed);
944       if (callback_cq != nullptr) {
945         if (grpc_iomgr_run_in_background()) {
946           // gRPC-core provides the backing needed for the preferred CQ type
947           callback_cq->Shutdown();
948         } else {
949           CompletionQueue::ReleaseCallbackAlternativeCQ(callback_cq);
950         }
951         callback_cq_.store(nullptr, std::memory_order_release);
952       }
953     }
954   }
955   // Destroy health check service before we destroy the C server so that
956   // it does not call grpc_server_request_registered_call() after the C
957   // server has been destroyed.
958   health_check_service_.reset();
959   grpc_server_destroy(server_);
960 }
961 
SetGlobalCallbacks(GlobalCallbacks * callbacks)962 void Server::SetGlobalCallbacks(GlobalCallbacks* callbacks) {
963   GPR_ASSERT(!grpc::g_callbacks);
964   GPR_ASSERT(callbacks);
965   grpc::g_callbacks.reset(callbacks);
966 }
967 
c_server()968 grpc_server* Server::c_server() { return server_; }
969 
InProcessChannel(const grpc::ChannelArguments & args)970 std::shared_ptr<grpc::Channel> Server::InProcessChannel(
971     const grpc::ChannelArguments& args) {
972   grpc_channel_args channel_args = args.c_channel_args();
973   return grpc::CreateChannelInternal(
974       "inproc", grpc_inproc_channel_create(server_, &channel_args, nullptr),
975       std::vector<std::unique_ptr<
976           grpc::experimental::ClientInterceptorFactoryInterface>>());
977 }
978 
979 std::shared_ptr<grpc::Channel>
InProcessChannelWithInterceptors(const grpc::ChannelArguments & args,std::vector<std::unique_ptr<grpc::experimental::ClientInterceptorFactoryInterface>> interceptor_creators)980 Server::experimental_type::InProcessChannelWithInterceptors(
981     const grpc::ChannelArguments& args,
982     std::vector<
983         std::unique_ptr<grpc::experimental::ClientInterceptorFactoryInterface>>
984         interceptor_creators) {
985   grpc_channel_args channel_args = args.c_channel_args();
986   return grpc::CreateChannelInternal(
987       "inproc",
988       grpc_inproc_channel_create(server_->server_, &channel_args, nullptr),
989       std::move(interceptor_creators));
990 }
991 
PayloadHandlingForMethod(grpc::internal::RpcServiceMethod * method)992 static grpc_server_register_method_payload_handling PayloadHandlingForMethod(
993     grpc::internal::RpcServiceMethod* method) {
994   switch (method->method_type()) {
995     case grpc::internal::RpcMethod::NORMAL_RPC:
996     case grpc::internal::RpcMethod::SERVER_STREAMING:
997       return GRPC_SRM_PAYLOAD_READ_INITIAL_BYTE_BUFFER;
998     case grpc::internal::RpcMethod::CLIENT_STREAMING:
999     case grpc::internal::RpcMethod::BIDI_STREAMING:
1000       return GRPC_SRM_PAYLOAD_NONE;
1001   }
1002   GPR_UNREACHABLE_CODE(return GRPC_SRM_PAYLOAD_NONE;);
1003 }
1004 
RegisterService(const std::string * addr,grpc::Service * service)1005 bool Server::RegisterService(const std::string* addr, grpc::Service* service) {
1006   bool has_async_methods = service->has_async_methods();
1007   if (has_async_methods) {
1008     GPR_ASSERT(service->server_ == nullptr &&
1009                "Can only register an asynchronous service against one server.");
1010     service->server_ = this;
1011   }
1012 
1013   const char* method_name = nullptr;
1014 
1015   for (const auto& method : service->methods_) {
1016     if (method == nullptr) {  // Handled by generic service if any.
1017       continue;
1018     }
1019 
1020     void* method_registration_tag = grpc_server_register_method(
1021         server_, method->name(), addr ? addr->c_str() : nullptr,
1022         PayloadHandlingForMethod(method.get()), 0);
1023     if (method_registration_tag == nullptr) {
1024       gpr_log(GPR_DEBUG, "Attempt to register %s multiple times",
1025               method->name());
1026       return false;
1027     }
1028 
1029     if (method->handler() == nullptr) {  // Async method without handler
1030       method->set_server_tag(method_registration_tag);
1031     } else if (method->api_type() ==
1032                grpc::internal::RpcServiceMethod::ApiType::SYNC) {
1033       for (const auto& value : sync_req_mgrs_) {
1034         value->AddSyncMethod(method.get(), method_registration_tag);
1035       }
1036     } else {
1037       has_callback_methods_ = true;
1038       grpc::internal::RpcServiceMethod* method_value = method.get();
1039       grpc::CompletionQueue* cq = CallbackCQ();
1040       server_->core_server->SetRegisteredMethodAllocator(
1041           cq->cq(), method_registration_tag, [this, cq, method_value] {
1042             grpc_core::Server::RegisteredCallAllocation result;
1043             new CallbackRequest<grpc::CallbackServerContext>(this, method_value,
1044                                                              cq, &result);
1045             return result;
1046           });
1047     }
1048 
1049     method_name = method->name();
1050   }
1051 
1052   // Parse service name.
1053   if (method_name != nullptr) {
1054     std::stringstream ss(method_name);
1055     std::string service_name;
1056     if (std::getline(ss, service_name, '/') &&
1057         std::getline(ss, service_name, '/')) {
1058       services_.push_back(service_name);
1059     }
1060   }
1061   return true;
1062 }
1063 
RegisterAsyncGenericService(grpc::AsyncGenericService * service)1064 void Server::RegisterAsyncGenericService(grpc::AsyncGenericService* service) {
1065   GPR_ASSERT(service->server_ == nullptr &&
1066              "Can only register an async generic service against one server.");
1067   service->server_ = this;
1068   has_async_generic_service_ = true;
1069 }
1070 
RegisterCallbackGenericService(grpc::CallbackGenericService * service)1071 void Server::RegisterCallbackGenericService(
1072     grpc::CallbackGenericService* service) {
1073   GPR_ASSERT(
1074       service->server_ == nullptr &&
1075       "Can only register a callback generic service against one server.");
1076   service->server_ = this;
1077   has_callback_generic_service_ = true;
1078   generic_handler_.reset(service->Handler());
1079 
1080   grpc::CompletionQueue* cq = CallbackCQ();
1081   server_->core_server->SetBatchMethodAllocator(cq->cq(), [this, cq] {
1082     grpc_core::Server::BatchCallAllocation result;
1083     new CallbackRequest<grpc::GenericCallbackServerContext>(this, cq, &result);
1084     return result;
1085   });
1086 }
1087 
AddListeningPort(const std::string & addr,grpc::ServerCredentials * creds)1088 int Server::AddListeningPort(const std::string& addr,
1089                              grpc::ServerCredentials* creds) {
1090   GPR_ASSERT(!started_);
1091   int port = creds->AddPortToServer(addr, server_);
1092   global_callbacks_->AddPort(this, addr, creds, port);
1093   return port;
1094 }
1095 
Ref()1096 void Server::Ref() {
1097   shutdown_refs_outstanding_.fetch_add(1, std::memory_order_relaxed);
1098 }
1099 
UnrefWithPossibleNotify()1100 void Server::UnrefWithPossibleNotify() {
1101   if (GPR_UNLIKELY(shutdown_refs_outstanding_.fetch_sub(
1102                        1, std::memory_order_acq_rel) == 1)) {
1103     // No refs outstanding means that shutdown has been initiated and no more
1104     // callback requests are outstanding.
1105     grpc::internal::MutexLock lock(&mu_);
1106     GPR_ASSERT(shutdown_);
1107     shutdown_done_ = true;
1108     shutdown_done_cv_.Signal();
1109   }
1110 }
1111 
UnrefAndWaitLocked()1112 void Server::UnrefAndWaitLocked() {
1113   if (GPR_UNLIKELY(shutdown_refs_outstanding_.fetch_sub(
1114                        1, std::memory_order_acq_rel) == 1)) {
1115     shutdown_done_ = true;
1116     return;  // no need to wait on CV since done condition already set
1117   }
1118   grpc::internal::WaitUntil(
1119       &shutdown_done_cv_, &mu_,
1120       [this]() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) { return shutdown_done_; });
1121 }
1122 
Start(grpc::ServerCompletionQueue ** cqs,size_t num_cqs)1123 void Server::Start(grpc::ServerCompletionQueue** cqs, size_t num_cqs) {
1124   GPR_ASSERT(!started_);
1125   global_callbacks_->PreServerStart(this);
1126   started_ = true;
1127 
1128   // Only create default health check service when user did not provide an
1129   // explicit one.
1130   grpc::ServerCompletionQueue* health_check_cq = nullptr;
1131   grpc::DefaultHealthCheckService::HealthCheckServiceImpl*
1132       default_health_check_service_impl = nullptr;
1133   if (health_check_service_ == nullptr && !health_check_service_disabled_ &&
1134       grpc::DefaultHealthCheckServiceEnabled()) {
1135     auto* default_hc_service = new grpc::DefaultHealthCheckService;
1136     health_check_service_.reset(default_hc_service);
1137     // We create a non-polling CQ to avoid impacting application
1138     // performance.  This ensures that we don't introduce thread hops
1139     // for application requests that wind up on this CQ, which is polled
1140     // in its own thread.
1141     health_check_cq = new grpc::ServerCompletionQueue(
1142         GRPC_CQ_NEXT, GRPC_CQ_NON_POLLING, nullptr);
1143     grpc_server_register_completion_queue(server_, health_check_cq->cq(),
1144                                           nullptr);
1145     default_health_check_service_impl =
1146         default_hc_service->GetHealthCheckService(
1147             std::unique_ptr<grpc::ServerCompletionQueue>(health_check_cq));
1148     RegisterService(nullptr, default_health_check_service_impl);
1149   }
1150 
1151   for (auto& acceptor : acceptors_) {
1152     acceptor->GetCredentials()->AddPortToServer(acceptor->name(), server_);
1153   }
1154 
1155   // If this server uses callback methods, then create a callback generic
1156   // service to handle any unimplemented methods using the default reactor
1157   // creator
1158   if (has_callback_methods_ && !has_callback_generic_service_) {
1159     unimplemented_service_ = absl::make_unique<grpc::CallbackGenericService>();
1160     RegisterCallbackGenericService(unimplemented_service_.get());
1161   }
1162 
1163 #ifndef NDEBUG
1164   for (size_t i = 0; i < num_cqs; i++) {
1165     cq_list_.push_back(cqs[i]);
1166   }
1167 #endif
1168 
1169   // If we have a generic service, all unmatched method names go there.
1170   // Otherwise, we must provide at least one RPC request for an "unimplemented"
1171   // RPC, which covers any RPC for a method name that isn't matched. If we
1172   // have a sync service, let it be a sync unimplemented RPC, which must be
1173   // registered before server start (to initialize an AllocatingRequestMatcher).
1174   // If we have an AllocatingRequestMatcher, we can't also specify other
1175   // unimplemented RPCs via explicit async requests, so we won't do so. If we
1176   // only have async services, we can specify unimplemented RPCs on each async
1177   // CQ so that some user polling thread will move them along as long as some
1178   // progress is being made on any RPCs in the system.
1179   bool unknown_rpc_needed =
1180       !has_async_generic_service_ && !has_callback_generic_service_;
1181 
1182   if (unknown_rpc_needed && !sync_req_mgrs_.empty()) {
1183     sync_req_mgrs_[0]->AddUnknownSyncMethod();
1184     unknown_rpc_needed = false;
1185   }
1186 
1187   grpc_server_start(server_);
1188 
1189   if (unknown_rpc_needed) {
1190     for (size_t i = 0; i < num_cqs; i++) {
1191       if (cqs[i]->IsFrequentlyPolled()) {
1192         new UnimplementedAsyncRequest(this, cqs[i]);
1193       }
1194     }
1195     if (health_check_cq != nullptr) {
1196       new UnimplementedAsyncRequest(this, health_check_cq);
1197     }
1198     unknown_rpc_needed = false;
1199   }
1200 
1201   // If this server has any support for synchronous methods (has any sync
1202   // server CQs), make sure that we have a ResourceExhausted handler
1203   // to deal with the case of thread exhaustion
1204   if (sync_server_cqs_ != nullptr && !sync_server_cqs_->empty()) {
1205     resource_exhausted_handler_ =
1206         absl::make_unique<grpc::internal::ResourceExhaustedHandler>(
1207             kServerThreadpoolExhausted);
1208   }
1209 
1210   for (const auto& value : sync_req_mgrs_) {
1211     value->Start();
1212   }
1213 
1214   if (default_health_check_service_impl != nullptr) {
1215     default_health_check_service_impl->StartServingThread();
1216   }
1217 
1218   for (auto& acceptor : acceptors_) {
1219     acceptor->Start();
1220   }
1221 }
1222 
ShutdownInternal(gpr_timespec deadline)1223 void Server::ShutdownInternal(gpr_timespec deadline) {
1224   grpc::internal::MutexLock lock(&mu_);
1225   if (shutdown_) {
1226     return;
1227   }
1228 
1229   shutdown_ = true;
1230 
1231   for (auto& acceptor : acceptors_) {
1232     acceptor->Shutdown();
1233   }
1234 
1235   /// The completion queue to use for server shutdown completion notification
1236   grpc::CompletionQueue shutdown_cq;
1237   grpc::ShutdownTag shutdown_tag;  // Phony shutdown tag
1238   grpc_server_shutdown_and_notify(server_, shutdown_cq.cq(), &shutdown_tag);
1239 
1240   shutdown_cq.Shutdown();
1241 
1242   void* tag;
1243   bool ok;
1244   grpc::CompletionQueue::NextStatus status =
1245       shutdown_cq.AsyncNext(&tag, &ok, deadline);
1246 
1247   // If this timed out, it means we are done with the grace period for a clean
1248   // shutdown. We should force a shutdown now by cancelling all inflight calls
1249   if (status == grpc::CompletionQueue::NextStatus::TIMEOUT) {
1250     grpc_server_cancel_all_calls(server_);
1251   }
1252   // Else in case of SHUTDOWN or GOT_EVENT, it means that the server has
1253   // successfully shutdown
1254 
1255   // Drop the shutdown ref and wait for all other refs to drop as well.
1256   UnrefAndWaitLocked();
1257 
1258   // Shutdown all ThreadManagers. This will try to gracefully stop all the
1259   // threads in the ThreadManagers (once they process any inflight requests)
1260   for (const auto& value : sync_req_mgrs_) {
1261     value->Shutdown();  // ThreadManager's Shutdown()
1262   }
1263 
1264   // Wait for threads in all ThreadManagers to terminate
1265   for (const auto& value : sync_req_mgrs_) {
1266     value->Wait();
1267   }
1268 
1269   // Shutdown the callback CQ. The CQ is owned by its own shutdown tag, so it
1270   // will delete itself at true shutdown.
1271   CompletionQueue* callback_cq = callback_cq_.load(std::memory_order_relaxed);
1272   if (callback_cq != nullptr) {
1273     if (grpc_iomgr_run_in_background()) {
1274       // gRPC-core provides the backing needed for the preferred CQ type
1275       callback_cq->Shutdown();
1276     } else {
1277       CompletionQueue::ReleaseCallbackAlternativeCQ(callback_cq);
1278     }
1279     callback_cq_.store(nullptr, std::memory_order_release);
1280   }
1281 
1282   // Drain the shutdown queue (if the previous call to AsyncNext() timed out
1283   // and we didn't remove the tag from the queue yet)
1284   while (shutdown_cq.Next(&tag, &ok)) {
1285     // Nothing to be done here. Just ignore ok and tag values
1286   }
1287 
1288   shutdown_notified_ = true;
1289   shutdown_cv_.SignalAll();
1290 
1291 #ifndef NDEBUG
1292   // Unregister this server with the CQs passed into it by the user so that
1293   // those can be checked for properly-ordered shutdown.
1294   for (auto* cq : cq_list_) {
1295     cq->UnregisterServer(this);
1296   }
1297   cq_list_.clear();
1298 #endif
1299 }
1300 
Wait()1301 void Server::Wait() {
1302   grpc::internal::MutexLock lock(&mu_);
1303   while (started_ && !shutdown_notified_) {
1304     shutdown_cv_.Wait(&mu_);
1305   }
1306 }
1307 
PerformOpsOnCall(grpc::internal::CallOpSetInterface * ops,grpc::internal::Call * call)1308 void Server::PerformOpsOnCall(grpc::internal::CallOpSetInterface* ops,
1309                               grpc::internal::Call* call) {
1310   ops->FillOps(call);
1311 }
1312 
FinalizeResult(void ** tag,bool * status)1313 bool Server::UnimplementedAsyncRequest::FinalizeResult(void** tag,
1314                                                        bool* status) {
1315   if (GenericAsyncRequest::FinalizeResult(tag, status)) {
1316     // We either had no interceptors run or we are done intercepting
1317     if (*status) {
1318       // Create a new request/response pair using the server and CQ values
1319       // stored in this object's base class.
1320       new UnimplementedAsyncRequest(server_, notification_cq_);
1321       new UnimplementedAsyncResponse(this);
1322     } else {
1323       delete this;
1324     }
1325   } else {
1326     // The tag was swallowed due to interception. We will see it again.
1327   }
1328   return false;
1329 }
1330 
UnimplementedAsyncResponse(UnimplementedAsyncRequest * request)1331 Server::UnimplementedAsyncResponse::UnimplementedAsyncResponse(
1332     UnimplementedAsyncRequest* request)
1333     : request_(request) {
1334   grpc::Status status(grpc::StatusCode::UNIMPLEMENTED, kUnknownRpcMethod);
1335   grpc::internal::UnknownMethodHandler::FillOps(request_->context(),
1336                                                 kUnknownRpcMethod, this);
1337   request_->stream()->call_.PerformOps(this);
1338 }
1339 
initializer()1340 grpc::ServerInitializer* Server::initializer() {
1341   return server_initializer_.get();
1342 }
1343 
CallbackCQ()1344 grpc::CompletionQueue* Server::CallbackCQ() {
1345   // TODO(vjpai): Consider using a single global CQ for the default CQ
1346   // if there is no explicit per-server CQ registered
1347   CompletionQueue* callback_cq = callback_cq_.load(std::memory_order_acquire);
1348   if (callback_cq != nullptr) {
1349     return callback_cq;
1350   }
1351   // The callback_cq_ wasn't already set, so grab a lock and set it up exactly
1352   // once for this server.
1353   grpc::internal::MutexLock l(&mu_);
1354   callback_cq = callback_cq_.load(std::memory_order_relaxed);
1355   if (callback_cq != nullptr) {
1356     return callback_cq;
1357   }
1358   if (grpc_iomgr_run_in_background()) {
1359     // gRPC-core provides the backing needed for the preferred CQ type
1360     auto* shutdown_callback = new grpc::ShutdownCallback;
1361     callback_cq = new grpc::CompletionQueue(grpc_completion_queue_attributes{
1362         GRPC_CQ_CURRENT_VERSION, GRPC_CQ_CALLBACK, GRPC_CQ_DEFAULT_POLLING,
1363         shutdown_callback});
1364 
1365     // Transfer ownership of the new cq to its own shutdown callback
1366     shutdown_callback->TakeCQ(callback_cq);
1367   } else {
1368     // Otherwise we need to use the alternative CQ variant
1369     callback_cq = CompletionQueue::CallbackAlternativeCQ();
1370   }
1371 
1372   callback_cq_.store(callback_cq, std::memory_order_release);
1373   return callback_cq;
1374 }
1375 
1376 }  // namespace grpc
1377