• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright 2015 gRPC authors.
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 //     http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 //
16 //
17 
18 #include <grpc/byte_buffer.h>
19 #include <grpc/grpc.h>
20 #include <grpc/impl/channel_arg_names.h>
21 #include <grpc/slice.h>
22 #include <grpc/support/sync.h>
23 #include <grpc/support/time.h>
24 #include <grpcpp/channel.h>
25 #include <grpcpp/completion_queue.h>
26 #include <grpcpp/generic/async_generic_service.h>
27 #include <grpcpp/health_check_service_interface.h>
28 #include <grpcpp/impl/call.h>
29 #include <grpcpp/impl/call_op_set.h>
30 #include <grpcpp/impl/call_op_set_interface.h>
31 #include <grpcpp/impl/completion_queue_tag.h>
32 #include <grpcpp/impl/interceptor_common.h>
33 #include <grpcpp/impl/metadata_map.h>
34 #include <grpcpp/impl/rpc_method.h>
35 #include <grpcpp/impl/rpc_service_method.h>
36 #include <grpcpp/impl/server_callback_handlers.h>
37 #include <grpcpp/impl/server_initializer.h>
38 #include <grpcpp/impl/service_type.h>
39 #include <grpcpp/impl/sync.h>
40 #include <grpcpp/security/server_credentials.h>
41 #include <grpcpp/server.h>
42 #include <grpcpp/server_context.h>
43 #include <grpcpp/server_interface.h>
44 #include <grpcpp/support/byte_buffer.h>
45 #include <grpcpp/support/channel_arguments.h>
46 #include <grpcpp/support/client_interceptor.h>
47 #include <grpcpp/support/interceptor.h>
48 #include <grpcpp/support/method_handler.h>
49 #include <grpcpp/support/server_interceptor.h>
50 #include <grpcpp/support/slice.h>
51 #include <grpcpp/support/status.h>
52 #include <limits.h>
53 #include <string.h>
54 
55 #include <algorithm>
56 #include <atomic>
57 #include <cstdlib>
58 #include <memory>
59 #include <new>
60 #include <sstream>
61 #include <string>
62 #include <type_traits>
63 #include <utility>
64 #include <vector>
65 
66 #include "absl/log/check.h"
67 #include "absl/log/log.h"
68 #include "absl/status/status.h"
69 #include "src/core/ext/transport/inproc/inproc_transport.h"
70 #include "src/core/lib/iomgr/exec_ctx.h"
71 #include "src/core/lib/iomgr/iomgr.h"
72 #include "src/core/lib/resource_quota/api.h"
73 #include "src/core/lib/surface/completion_queue.h"
74 #include "src/core/server/server.h"
75 #include "src/core/util/manual_constructor.h"
76 #include "src/cpp/client/create_channel_internal.h"
77 #include "src/cpp/server/external_connection_acceptor_impl.h"
78 #include "src/cpp/server/health/default_health_check_service.h"
79 #include "src/cpp/thread_manager/thread_manager.h"
80 
81 namespace grpc {
82 namespace {
83 
84 // The default value for maximum number of threads that can be created in the
85 // sync server. This value of INT_MAX is chosen to match the default behavior if
86 // no ResourceQuota is set. To modify the max number of threads in a sync
87 // server, pass a custom ResourceQuota object  (with the desired number of
88 // max-threads set) to the server builder.
89 #define DEFAULT_MAX_SYNC_SERVER_THREADS INT_MAX
90 
91 // Give a useful status error message if the resource is exhausted specifically
92 // because the server threadpool is full.
93 const char* kServerThreadpoolExhausted = "Server Threadpool Exhausted";
94 
95 // Although we might like to give a useful status error message on unimplemented
96 // RPCs, it's not always possible since that also would need to be added across
97 // languages and isn't actually required by the spec.
98 const char* kUnknownRpcMethod = "";
99 
100 class DefaultGlobalCallbacks final : public Server::GlobalCallbacks {
101  public:
~DefaultGlobalCallbacks()102   ~DefaultGlobalCallbacks() override {}
PreSynchronousRequest(ServerContext *)103   void PreSynchronousRequest(ServerContext* /*context*/) override {}
PostSynchronousRequest(ServerContext *)104   void PostSynchronousRequest(ServerContext* /*context*/) override {}
105 };
106 
107 std::shared_ptr<Server::GlobalCallbacks> g_callbacks = nullptr;
108 gpr_once g_once_init_callbacks = GPR_ONCE_INIT;
109 
InitGlobalCallbacks()110 void InitGlobalCallbacks() {
111   if (!g_callbacks) {
112     g_callbacks = std::make_shared<DefaultGlobalCallbacks>();
113   }
114 }
115 
116 class ShutdownTag : public internal::CompletionQueueTag {
117  public:
FinalizeResult(void **,bool *)118   bool FinalizeResult(void** /*tag*/, bool* /*status*/) override {
119     return false;
120   }
121 };
122 
123 class PhonyTag : public internal::CompletionQueueTag {
124  public:
FinalizeResult(void **,bool *)125   bool FinalizeResult(void** /*tag*/, bool* /*status*/) override {
126     return true;
127   }
128 };
129 
130 class UnimplementedAsyncRequestContext {
131  protected:
UnimplementedAsyncRequestContext()132   UnimplementedAsyncRequestContext() : generic_stream_(&server_context_) {}
133 
134   GenericServerContext server_context_;
135   GenericServerAsyncReaderWriter generic_stream_;
136 };
137 
138 }  // namespace
139 
BaseAsyncRequest(ServerInterface * server,ServerContext * context,internal::ServerAsyncStreamingInterface * stream,CompletionQueue * call_cq,ServerCompletionQueue * notification_cq,void * tag,bool delete_on_finalize)140 ServerInterface::BaseAsyncRequest::BaseAsyncRequest(
141     ServerInterface* server, ServerContext* context,
142     internal::ServerAsyncStreamingInterface* stream, CompletionQueue* call_cq,
143     ServerCompletionQueue* notification_cq, void* tag, bool delete_on_finalize)
144     : server_(server),
145       context_(context),
146       stream_(stream),
147       call_cq_(call_cq),
148       notification_cq_(notification_cq),
149       tag_(tag),
150       delete_on_finalize_(delete_on_finalize),
151       call_(nullptr),
152       done_intercepting_(false) {
153   // Set up interception state partially for the receive ops. call_wrapper_ is
154   // not filled at this point, but it will be filled before the interceptors are
155   // run.
156   interceptor_methods_.SetCall(&call_wrapper_);
157   interceptor_methods_.SetReverse();
158   call_cq_->RegisterAvalanching();  // This op will trigger more ops
159   call_metric_recording_enabled_ = server_->call_metric_recording_enabled();
160   server_metric_recorder_ = server_->server_metric_recorder();
161 }
162 
~BaseAsyncRequest()163 ServerInterface::BaseAsyncRequest::~BaseAsyncRequest() {
164   call_cq_->CompleteAvalanching();
165 }
166 
FinalizeResult(void ** tag,bool * status)167 bool ServerInterface::BaseAsyncRequest::FinalizeResult(void** tag,
168                                                        bool* status) {
169   if (done_intercepting_) {
170     *tag = tag_;
171     if (delete_on_finalize_) {
172       delete this;
173     }
174     return true;
175   }
176   context_->set_call(call_, call_metric_recording_enabled_,
177                      server_metric_recorder_);
178   context_->cq_ = call_cq_;
179   if (call_wrapper_.call() == nullptr) {
180     // Fill it since it is empty.
181     call_wrapper_ = internal::Call(
182         call_, server_, call_cq_, server_->max_receive_message_size(), nullptr);
183   }
184 
185   // just the pointers inside call are copied here
186   stream_->BindCall(&call_wrapper_);
187 
188   if (*status && call_ && call_wrapper_.server_rpc_info()) {
189     done_intercepting_ = true;
190     // Set interception point for RECV INITIAL METADATA
191     interceptor_methods_.AddInterceptionHookPoint(
192         experimental::InterceptionHookPoints::POST_RECV_INITIAL_METADATA);
193     interceptor_methods_.SetRecvInitialMetadata(&context_->client_metadata_);
194     if (interceptor_methods_.RunInterceptors(
195             [this]() { ContinueFinalizeResultAfterInterception(); })) {
196       // There are no interceptors to run. Continue
197     } else {
198       // There were interceptors to be run, so
199       // ContinueFinalizeResultAfterInterception will be run when interceptors
200       // are done.
201       return false;
202     }
203   }
204   if (*status && call_) {
205     context_->BeginCompletionOp(&call_wrapper_, nullptr, nullptr);
206   }
207   *tag = tag_;
208   if (delete_on_finalize_) {
209     delete this;
210   }
211   return true;
212 }
213 
214 void ServerInterface::BaseAsyncRequest::
ContinueFinalizeResultAfterInterception()215     ContinueFinalizeResultAfterInterception() {
216   context_->BeginCompletionOp(&call_wrapper_, nullptr, nullptr);
217   // Queue a tag which will be returned immediately
218   grpc_core::ExecCtx exec_ctx;
219   grpc_cq_begin_op(notification_cq_->cq(), this);
220   grpc_cq_end_op(
221       notification_cq_->cq(), this, absl::OkStatus(),
222       [](void* /*arg*/, grpc_cq_completion* completion) { delete completion; },
223       nullptr, new grpc_cq_completion());
224 }
225 
RegisteredAsyncRequest(ServerInterface * server,ServerContext * context,internal::ServerAsyncStreamingInterface * stream,CompletionQueue * call_cq,ServerCompletionQueue * notification_cq,void * tag,const char * name,internal::RpcMethod::RpcType type)226 ServerInterface::RegisteredAsyncRequest::RegisteredAsyncRequest(
227     ServerInterface* server, ServerContext* context,
228     internal::ServerAsyncStreamingInterface* stream, CompletionQueue* call_cq,
229     ServerCompletionQueue* notification_cq, void* tag, const char* name,
230     internal::RpcMethod::RpcType type)
231     : BaseAsyncRequest(server, context, stream, call_cq, notification_cq, tag,
232                        true),
233       name_(name),
234       type_(type) {}
235 
IssueRequest(void * registered_method,grpc_byte_buffer ** payload,ServerCompletionQueue * notification_cq)236 void ServerInterface::RegisteredAsyncRequest::IssueRequest(
237     void* registered_method, grpc_byte_buffer** payload,
238     ServerCompletionQueue* notification_cq) {
239   // The following call_start_batch is internally-generated so no need for an
240   // explanatory log on failure.
241   CHECK(grpc_server_request_registered_call(
242             server_->server(), registered_method, &call_, &context_->deadline_,
243             context_->client_metadata_.arr(), payload, call_cq_->cq(),
244             notification_cq->cq(), this) == GRPC_CALL_OK);
245 }
246 
GenericAsyncRequest(ServerInterface * server,GenericServerContext * context,internal::ServerAsyncStreamingInterface * stream,CompletionQueue * call_cq,ServerCompletionQueue * notification_cq,void * tag,bool delete_on_finalize,bool issue_request)247 ServerInterface::GenericAsyncRequest::GenericAsyncRequest(
248     ServerInterface* server, GenericServerContext* context,
249     internal::ServerAsyncStreamingInterface* stream, CompletionQueue* call_cq,
250     ServerCompletionQueue* notification_cq, void* tag, bool delete_on_finalize,
251     bool issue_request)
252     : BaseAsyncRequest(server, context, stream, call_cq, notification_cq, tag,
253                        delete_on_finalize) {
254   grpc_call_details_init(&call_details_);
255   CHECK(notification_cq);
256   CHECK(call_cq);
257   if (issue_request) {
258     IssueRequest();
259   }
260 }
261 
FinalizeResult(void ** tag,bool * status)262 bool ServerInterface::GenericAsyncRequest::FinalizeResult(void** tag,
263                                                           bool* status) {
264   // If we are done intercepting, there is nothing more for us to do
265   if (done_intercepting_) {
266     return BaseAsyncRequest::FinalizeResult(tag, status);
267   }
268   // TODO(yangg) remove the copy here.
269   if (*status) {
270     static_cast<GenericServerContext*>(context_)->method_ =
271         StringFromCopiedSlice(call_details_.method);
272     static_cast<GenericServerContext*>(context_)->host_ =
273         StringFromCopiedSlice(call_details_.host);
274     context_->deadline_ = call_details_.deadline;
275   }
276   grpc_slice_unref(call_details_.method);
277   grpc_slice_unref(call_details_.host);
278   call_wrapper_ = internal::Call(
279       call_, server_, call_cq_, server_->max_receive_message_size(),
280       context_->set_server_rpc_info(
281           static_cast<GenericServerContext*>(context_)->method_.c_str(),
282           internal::RpcMethod::BIDI_STREAMING,
283           *server_->interceptor_creators()));
284   return BaseAsyncRequest::FinalizeResult(tag, status);
285 }
286 
IssueRequest()287 void ServerInterface::GenericAsyncRequest::IssueRequest() {
288   // The following call_start_batch is internally-generated so no need for an
289   // explanatory log on failure.
290   CHECK(grpc_server_request_call(server_->server(), &call_, &call_details_,
291                                  context_->client_metadata_.arr(),
292                                  call_cq_->cq(), notification_cq_->cq(),
293                                  this) == GRPC_CALL_OK);
294 }
295 
296 namespace {
297 class ShutdownCallback : public grpc_completion_queue_functor {
298  public:
ShutdownCallback()299   ShutdownCallback() {
300     functor_run = &ShutdownCallback::Run;
301     // Set inlineable to true since this callback is trivial and thus does not
302     // need to be run from the executor (triggering a thread hop). This should
303     // only be used by internal callbacks like this and not by user application
304     // code.
305     inlineable = true;
306   }
307   // TakeCQ takes ownership of the cq into the shutdown callback
308   // so that the shutdown callback will be responsible for destroying it
TakeCQ(CompletionQueue * cq)309   void TakeCQ(CompletionQueue* cq) { cq_ = cq; }
310 
311   // The Run function will get invoked by the completion queue library
312   // when the shutdown is actually complete
Run(grpc_completion_queue_functor * cb,int)313   static void Run(grpc_completion_queue_functor* cb, int) {
314     auto* callback = static_cast<ShutdownCallback*>(cb);
315     delete callback->cq_;
316     delete callback;
317   }
318 
319  private:
320   CompletionQueue* cq_ = nullptr;
321 };
322 }  // namespace
323 
324 /// Use private inheritance rather than composition only to establish order
325 /// of construction, since the public base class should be constructed after the
326 /// elements belonging to the private base class are constructed. This is not
327 /// possible using true composition.
328 class Server::UnimplementedAsyncRequest final
329     : private grpc::UnimplementedAsyncRequestContext,
330       public GenericAsyncRequest {
331  public:
UnimplementedAsyncRequest(ServerInterface * server,grpc::ServerCompletionQueue * cq)332   UnimplementedAsyncRequest(ServerInterface* server,
333                             grpc::ServerCompletionQueue* cq)
334       : GenericAsyncRequest(server, &server_context_, &generic_stream_, cq, cq,
335                             /*tag=*/nullptr, /*delete_on_finalize=*/false,
336                             /*issue_request=*/false) {
337     // Issue request here instead of the base class to prevent race on vptr.
338     IssueRequest();
339   }
340 
341   bool FinalizeResult(void** tag, bool* status) override;
342 
context()343   grpc::ServerContext* context() { return &server_context_; }
stream()344   grpc::GenericServerAsyncReaderWriter* stream() { return &generic_stream_; }
345 };
346 
347 /// UnimplementedAsyncResponse should not post user-visible completions to the
348 /// C++ completion queue, but is generated as a CQ event by the core
349 class Server::UnimplementedAsyncResponse final
350     : public grpc::internal::CallOpSet<
351           grpc::internal::CallOpSendInitialMetadata,
352           grpc::internal::CallOpServerSendStatus> {
353  public:
354   explicit UnimplementedAsyncResponse(UnimplementedAsyncRequest* request);
~UnimplementedAsyncResponse()355   ~UnimplementedAsyncResponse() override { delete request_; }
356 
FinalizeResult(void ** tag,bool * status)357   bool FinalizeResult(void** tag, bool* status) override {
358     if (grpc::internal::CallOpSet<
359             grpc::internal::CallOpSendInitialMetadata,
360             grpc::internal::CallOpServerSendStatus>::FinalizeResult(tag,
361                                                                     status)) {
362       delete this;
363     } else {
364       // The tag was swallowed due to interception. We will see it again.
365     }
366     return false;
367   }
368 
369  private:
370   UnimplementedAsyncRequest* const request_;
371 };
372 
373 class Server::SyncRequest final : public grpc::internal::CompletionQueueTag {
374  public:
SyncRequest(Server * server,grpc::internal::RpcServiceMethod * method,grpc_core::Server::RegisteredCallAllocation * data)375   SyncRequest(Server* server, grpc::internal::RpcServiceMethod* method,
376               grpc_core::Server::RegisteredCallAllocation* data)
377       : SyncRequest(server, method) {
378     CommonSetup(data);
379     data->deadline = &deadline_;
380     data->optional_payload = has_request_payload_ ? &request_payload_ : nullptr;
381   }
382 
SyncRequest(Server * server,grpc::internal::RpcServiceMethod * method,grpc_core::Server::BatchCallAllocation * data)383   SyncRequest(Server* server, grpc::internal::RpcServiceMethod* method,
384               grpc_core::Server::BatchCallAllocation* data)
385       : SyncRequest(server, method) {
386     CommonSetup(data);
387     call_details_ = new grpc_call_details;
388     grpc_call_details_init(call_details_);
389     data->details = call_details_;
390   }
391 
~SyncRequest()392   ~SyncRequest() override {
393     // The destructor should only cleanup those objects created in the
394     // constructor, since some paths may or may not actually go through the
395     // Run stage where other objects are allocated.
396     if (has_request_payload_ && request_payload_) {
397       grpc_byte_buffer_destroy(request_payload_);
398     }
399     if (call_details_ != nullptr) {
400       grpc_call_details_destroy(call_details_);
401       delete call_details_;
402     }
403     grpc_metadata_array_destroy(&request_metadata_);
404     server_->UnrefWithPossibleNotify();
405   }
406 
FinalizeResult(void **,bool * status)407   bool FinalizeResult(void** /*tag*/, bool* status) override {
408     if (!*status) {
409       delete this;
410       return false;
411     }
412     if (call_details_) {
413       deadline_ = call_details_->deadline;
414     }
415     return true;
416   }
417 
Run(const std::shared_ptr<GlobalCallbacks> & global_callbacks,bool resources)418   void Run(const std::shared_ptr<GlobalCallbacks>& global_callbacks,
419            bool resources) {
420     ctx_.Init(deadline_, &request_metadata_);
421     wrapped_call_.Init(
422         call_, server_, &cq_, server_->max_receive_message_size(),
423         ctx_->ctx.set_server_rpc_info(method_->name(), method_->method_type(),
424                                       server_->interceptor_creators_));
425     ctx_->ctx.set_call(call_, server_->call_metric_recording_enabled(),
426                        server_->server_metric_recorder());
427     ctx_->ctx.cq_ = &cq_;
428     request_metadata_.count = 0;
429 
430     global_callbacks_ = global_callbacks;
431     resources_ = resources;
432 
433     interceptor_methods_.SetCall(&*wrapped_call_);
434     interceptor_methods_.SetReverse();
435     // Set interception point for RECV INITIAL METADATA
436     interceptor_methods_.AddInterceptionHookPoint(
437         grpc::experimental::InterceptionHookPoints::POST_RECV_INITIAL_METADATA);
438     interceptor_methods_.SetRecvInitialMetadata(&ctx_->ctx.client_metadata_);
439 
440     if (has_request_payload_) {
441       // Set interception point for RECV MESSAGE
442       auto* handler = resources_ ? method_->handler()
443                                  : server_->resource_exhausted_handler_.get();
444       deserialized_request_ = handler->Deserialize(call_, request_payload_,
445                                                    &request_status_, nullptr);
446       if (!request_status_.ok()) {
447         VLOG(2) << "Failed to deserialize message.";
448       }
449       request_payload_ = nullptr;
450       interceptor_methods_.AddInterceptionHookPoint(
451           grpc::experimental::InterceptionHookPoints::POST_RECV_MESSAGE);
452       interceptor_methods_.SetRecvMessage(deserialized_request_, nullptr);
453     }
454 
455     if (interceptor_methods_.RunInterceptors(
456             [this]() { ContinueRunAfterInterception(); })) {
457       ContinueRunAfterInterception();
458     } else {
459       // There were interceptors to be run, so ContinueRunAfterInterception
460       // will be run when interceptors are done.
461     }
462   }
463 
ContinueRunAfterInterception()464   void ContinueRunAfterInterception() {
465     ctx_->ctx.BeginCompletionOp(&*wrapped_call_, nullptr, nullptr);
466     global_callbacks_->PreSynchronousRequest(&ctx_->ctx);
467     auto* handler = resources_ ? method_->handler()
468                                : server_->resource_exhausted_handler_.get();
469     handler->RunHandler(grpc::internal::MethodHandler::HandlerParameter(
470         &*wrapped_call_, &ctx_->ctx, deserialized_request_, request_status_,
471         nullptr, nullptr));
472     global_callbacks_->PostSynchronousRequest(&ctx_->ctx);
473 
474     cq_.Shutdown();
475 
476     grpc::internal::CompletionQueueTag* op_tag = ctx_->ctx.GetCompletionOpTag();
477     cq_.TryPluck(op_tag, gpr_inf_future(GPR_CLOCK_REALTIME));
478 
479     // Ensure the cq_ is shutdown
480     grpc::PhonyTag ignored_tag;
481     CHECK(cq_.Pluck(&ignored_tag) == false);
482 
483     // Cleanup structures allocated during Run/ContinueRunAfterInterception
484     wrapped_call_.Destroy();
485     ctx_.Destroy();
486 
487     delete this;
488   }
489 
490   // For requests that must be only cleaned up but not actually Run
Cleanup()491   void Cleanup() {
492     cq_.Shutdown();
493     grpc_call_unref(call_);
494     delete this;
495   }
496 
497  private:
SyncRequest(Server * server,grpc::internal::RpcServiceMethod * method)498   SyncRequest(Server* server, grpc::internal::RpcServiceMethod* method)
499       : server_(server),
500         method_(method),
501         has_request_payload_(method->method_type() ==
502                                  grpc::internal::RpcMethod::NORMAL_RPC ||
503                              method->method_type() ==
504                                  grpc::internal::RpcMethod::SERVER_STREAMING),
505         cq_(grpc_completion_queue_create_for_pluck(nullptr)) {}
506 
507   template <class CallAllocation>
CommonSetup(CallAllocation * data)508   void CommonSetup(CallAllocation* data) {
509     server_->Ref();
510     grpc_metadata_array_init(&request_metadata_);
511     data->tag = static_cast<void*>(this);
512     data->call = &call_;
513     data->initial_metadata = &request_metadata_;
514     data->cq = cq_.cq();
515   }
516 
517   Server* const server_;
518   grpc::internal::RpcServiceMethod* const method_;
519   const bool has_request_payload_;
520   grpc_call* call_;
521   grpc_call_details* call_details_ = nullptr;
522   gpr_timespec deadline_;
523   grpc_metadata_array request_metadata_;
524   grpc_byte_buffer* request_payload_ = nullptr;
525   grpc::CompletionQueue cq_;
526   grpc::Status request_status_;
527   std::shared_ptr<GlobalCallbacks> global_callbacks_;
528   bool resources_;
529   void* deserialized_request_ = nullptr;
530   grpc::internal::InterceptorBatchMethodsImpl interceptor_methods_;
531 
532   // ServerContextWrapper allows ManualConstructor while using a private
533   // constructor of ServerContext via this friend class.
534   struct ServerContextWrapper {
535     ServerContext ctx;
536 
ServerContextWrappergrpc::Server::SyncRequest::ServerContextWrapper537     ServerContextWrapper(gpr_timespec deadline, grpc_metadata_array* arr)
538         : ctx(deadline, arr) {}
539   };
540 
541   grpc_core::ManualConstructor<ServerContextWrapper> ctx_;
542   grpc_core::ManualConstructor<internal::Call> wrapped_call_;
543 };
544 
545 template <class ServerContextType>
546 class Server::CallbackRequest final
547     : public grpc::internal::CompletionQueueTag {
548  public:
549   static_assert(
550       std::is_base_of<grpc::CallbackServerContext, ServerContextType>::value,
551       "ServerContextType must be derived from CallbackServerContext");
552 
553   // For codegen services, the value of method represents the defined
554   // characteristics of the method being requested. For generic services, method
555   // is nullptr since these services don't have pre-defined methods.
CallbackRequest(Server * server,grpc::internal::RpcServiceMethod * method,grpc::CompletionQueue * cq,grpc_core::Server::RegisteredCallAllocation * data)556   CallbackRequest(Server* server, grpc::internal::RpcServiceMethod* method,
557                   grpc::CompletionQueue* cq,
558                   grpc_core::Server::RegisteredCallAllocation* data)
559       : server_(server),
560         method_(method),
561         has_request_payload_(method->method_type() ==
562                                  grpc::internal::RpcMethod::NORMAL_RPC ||
563                              method->method_type() ==
564                                  grpc::internal::RpcMethod::SERVER_STREAMING),
565         cq_(cq),
566         tag_(this),
567         ctx_(server_->context_allocator() != nullptr
568                  ? server_->context_allocator()->NewCallbackServerContext()
569                  : nullptr) {
570     CommonSetup(server, data);
571     data->deadline = &deadline_;
572     data->optional_payload = has_request_payload_ ? &request_payload_ : nullptr;
573   }
574 
575   // For generic services, method is nullptr since these services don't have
576   // pre-defined methods.
CallbackRequest(Server * server,grpc::CompletionQueue * cq,grpc_core::Server::BatchCallAllocation * data)577   CallbackRequest(Server* server, grpc::CompletionQueue* cq,
578                   grpc_core::Server::BatchCallAllocation* data)
579       : server_(server),
580         method_(nullptr),
581         has_request_payload_(false),
582         call_details_(new grpc_call_details),
583         cq_(cq),
584         tag_(this),
585         ctx_(server_->context_allocator() != nullptr
586                  ? server_->context_allocator()
587                        ->NewGenericCallbackServerContext()
588                  : nullptr) {
589     CommonSetup(server, data);
590     grpc_call_details_init(call_details_);
591     data->details = call_details_;
592   }
593 
~CallbackRequest()594   ~CallbackRequest() override {
595     delete call_details_;
596     grpc_metadata_array_destroy(&request_metadata_);
597     if (has_request_payload_ && request_payload_) {
598       grpc_byte_buffer_destroy(request_payload_);
599     }
600     if (ctx_alloc_by_default_ || server_->context_allocator() == nullptr) {
601       default_ctx_.Destroy();
602     }
603     server_->UnrefWithPossibleNotify();
604   }
605 
606   // Needs specialization to account for different processing of metadata
607   // in generic API
608   bool FinalizeResult(void** tag, bool* status) override;
609 
610  private:
611   // method_name needs to be specialized between named method and generic
612   const char* method_name() const;
613 
614   class CallbackCallTag : public grpc_completion_queue_functor {
615    public:
CallbackCallTag(Server::CallbackRequest<ServerContextType> * req)616     explicit CallbackCallTag(Server::CallbackRequest<ServerContextType>* req)
617         : req_(req) {
618       functor_run = &CallbackCallTag::StaticRun;
619       // Set inlineable to true since this callback is internally-controlled
620       // without taking any locks, and thus does not need to be run from the
621       // executor (which triggers a thread hop). This should only be used by
622       // internal callbacks like this and not by user application code. The work
623       // here is actually non-trivial, but there is no chance of having user
624       // locks conflict with each other so it's ok to run inlined.
625       inlineable = true;
626     }
627 
628     // force_run can not be performed on a tag if operations using this tag
629     // have been sent to PerformOpsOnCall. It is intended for error conditions
630     // that are detected before the operations are internally processed.
force_run(bool ok)631     void force_run(bool ok) { Run(ok); }
632 
633    private:
634     Server::CallbackRequest<ServerContextType>* req_;
635     grpc::internal::Call* call_;
636 
StaticRun(grpc_completion_queue_functor * cb,int ok)637     static void StaticRun(grpc_completion_queue_functor* cb, int ok) {
638       static_cast<CallbackCallTag*>(cb)->Run(static_cast<bool>(ok));
639     }
Run(bool ok)640     void Run(bool ok) {
641       void* ignored = req_;
642       bool new_ok = ok;
643       CHECK(!req_->FinalizeResult(&ignored, &new_ok));
644       CHECK(ignored == req_);
645 
646       if (!ok) {
647         // The call has been shutdown.
648         // Delete its contents to free up the request.
649         delete req_;
650         return;
651       }
652 
653       // Bind the call, deadline, and metadata from what we got
654       req_->ctx_->set_call(req_->call_,
655                            req_->server_->call_metric_recording_enabled(),
656                            req_->server_->server_metric_recorder());
657       req_->ctx_->cq_ = req_->cq_;
658       req_->ctx_->BindDeadlineAndMetadata(req_->deadline_,
659                                           &req_->request_metadata_);
660       req_->request_metadata_.count = 0;
661 
662       // Create a C++ Call to control the underlying core call
663       call_ =
664           new (grpc_call_arena_alloc(req_->call_, sizeof(grpc::internal::Call)))
665               grpc::internal::Call(
666                   req_->call_, req_->server_, req_->cq_,
667                   req_->server_->max_receive_message_size(),
668                   req_->ctx_->set_server_rpc_info(
669                       req_->method_name(),
670                       (req_->method_ != nullptr)
671                           ? req_->method_->method_type()
672                           : grpc::internal::RpcMethod::BIDI_STREAMING,
673                       req_->server_->interceptor_creators_));
674 
675       req_->interceptor_methods_.SetCall(call_);
676       req_->interceptor_methods_.SetReverse();
677       // Set interception point for RECV INITIAL METADATA
678       req_->interceptor_methods_.AddInterceptionHookPoint(
679           grpc::experimental::InterceptionHookPoints::
680               POST_RECV_INITIAL_METADATA);
681       req_->interceptor_methods_.SetRecvInitialMetadata(
682           &req_->ctx_->client_metadata_);
683 
684       if (req_->has_request_payload_) {
685         // Set interception point for RECV MESSAGE
686         req_->request_ = req_->method_->handler()->Deserialize(
687             req_->call_, req_->request_payload_, &req_->request_status_,
688             &req_->handler_data_);
689         if (!(req_->request_status_.ok())) {
690           VLOG(2) << "Failed to deserialize message.";
691         }
692         req_->request_payload_ = nullptr;
693         req_->interceptor_methods_.AddInterceptionHookPoint(
694             grpc::experimental::InterceptionHookPoints::POST_RECV_MESSAGE);
695         req_->interceptor_methods_.SetRecvMessage(req_->request_, nullptr);
696       }
697 
698       if (req_->interceptor_methods_.RunInterceptors(
699               [this] { ContinueRunAfterInterception(); })) {
700         ContinueRunAfterInterception();
701       } else {
702         // There were interceptors to be run, so ContinueRunAfterInterception
703         // will be run when interceptors are done.
704       }
705     }
ContinueRunAfterInterception()706     void ContinueRunAfterInterception() {
707       auto* handler = (req_->method_ != nullptr)
708                           ? req_->method_->handler()
709                           : req_->server_->generic_handler_.get();
710       handler->RunHandler(grpc::internal::MethodHandler::HandlerParameter(
711           call_, req_->ctx_, req_->request_, req_->request_status_,
712           req_->handler_data_, [this] { delete req_; }));
713     }
714   };
715 
716   template <class CallAllocation>
CommonSetup(Server * server,CallAllocation * data)717   void CommonSetup(Server* server, CallAllocation* data) {
718     server->Ref();
719     grpc_metadata_array_init(&request_metadata_);
720     data->tag = static_cast<void*>(&tag_);
721     data->call = &call_;
722     data->initial_metadata = &request_metadata_;
723     if (ctx_ == nullptr) {
724       default_ctx_.Init();
725       ctx_ = &*default_ctx_;
726       ctx_alloc_by_default_ = true;
727     }
728     ctx_->set_context_allocator(server->context_allocator());
729     data->cq = cq_->cq();
730   }
731 
732   Server* const server_;
733   grpc::internal::RpcServiceMethod* const method_;
734   const bool has_request_payload_;
735   grpc_byte_buffer* request_payload_ = nullptr;
736   void* request_ = nullptr;
737   void* handler_data_ = nullptr;
738   grpc::Status request_status_;
739   grpc_call_details* const call_details_ = nullptr;
740   grpc_call* call_;
741   gpr_timespec deadline_;
742   grpc_metadata_array request_metadata_;
743   grpc::CompletionQueue* const cq_;
744   bool ctx_alloc_by_default_ = false;
745   CallbackCallTag tag_;
746   ServerContextType* ctx_ = nullptr;
747   grpc_core::ManualConstructor<ServerContextType> default_ctx_;
748   grpc::internal::InterceptorBatchMethodsImpl interceptor_methods_;
749 };
750 
751 template <>
FinalizeResult(void **,bool *)752 bool Server::CallbackRequest<grpc::CallbackServerContext>::FinalizeResult(
753     void** /*tag*/, bool* /*status*/) {
754   return false;
755 }
756 
757 template <>
758 bool Server::CallbackRequest<
FinalizeResult(void **,bool * status)759     grpc::GenericCallbackServerContext>::FinalizeResult(void** /*tag*/,
760                                                         bool* status) {
761   if (*status) {
762     deadline_ = call_details_->deadline;
763     // TODO(yangg) remove the copy here
764     ctx_->method_ = grpc::StringFromCopiedSlice(call_details_->method);
765     ctx_->host_ = grpc::StringFromCopiedSlice(call_details_->host);
766   }
767   grpc_slice_unref(call_details_->method);
768   grpc_slice_unref(call_details_->host);
769   return false;
770 }
771 
772 template <>
method_name() const773 const char* Server::CallbackRequest<grpc::CallbackServerContext>::method_name()
774     const {
775   return method_->name();
776 }
777 
778 template <>
779 const char* Server::CallbackRequest<
method_name() const780     grpc::GenericCallbackServerContext>::method_name() const {
781   return ctx_->method().c_str();
782 }
783 
784 // Implementation of ThreadManager. Each instance of SyncRequestThreadManager
785 // manages a pool of threads that poll for incoming Sync RPCs and call the
786 // appropriate RPC handlers
787 class Server::SyncRequestThreadManager : public grpc::ThreadManager {
788  public:
SyncRequestThreadManager(Server * server,grpc::CompletionQueue * server_cq,std::shared_ptr<GlobalCallbacks> global_callbacks,grpc_resource_quota * rq,int min_pollers,int max_pollers,int cq_timeout_msec)789   SyncRequestThreadManager(Server* server, grpc::CompletionQueue* server_cq,
790                            std::shared_ptr<GlobalCallbacks> global_callbacks,
791                            grpc_resource_quota* rq, int min_pollers,
792                            int max_pollers, int cq_timeout_msec)
793       : ThreadManager("SyncServer", rq, min_pollers, max_pollers),
794         server_(server),
795         server_cq_(server_cq),
796         cq_timeout_msec_(cq_timeout_msec),
797         global_callbacks_(std::move(global_callbacks)) {}
798 
PollForWork(void ** tag,bool * ok)799   WorkStatus PollForWork(void** tag, bool* ok) override {
800     *tag = nullptr;
801     // TODO(ctiller): workaround for GPR_TIMESPAN based deadlines not working
802     // right now
803     gpr_timespec deadline =
804         gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
805                      gpr_time_from_millis(cq_timeout_msec_, GPR_TIMESPAN));
806 
807     switch (server_cq_->AsyncNext(tag, ok, deadline)) {
808       case grpc::CompletionQueue::TIMEOUT:
809         return TIMEOUT;
810       case grpc::CompletionQueue::SHUTDOWN:
811         return SHUTDOWN;
812       case grpc::CompletionQueue::GOT_EVENT:
813         return WORK_FOUND;
814     }
815 
816     GPR_UNREACHABLE_CODE(return TIMEOUT);
817   }
818 
DoWork(void * tag,bool ok,bool resources)819   void DoWork(void* tag, bool ok, bool resources) override {
820     (void)ok;
821     SyncRequest* sync_req = static_cast<SyncRequest*>(tag);
822 
823     // Under the AllocatingRequestMatcher model we will never see an invalid tag
824     // here.
825     DCHECK_NE(sync_req, nullptr);
826     DCHECK(ok);
827 
828     sync_req->Run(global_callbacks_, resources);
829   }
830 
AddSyncMethod(grpc::internal::RpcServiceMethod * method,void * tag)831   void AddSyncMethod(grpc::internal::RpcServiceMethod* method, void* tag) {
832     grpc_core::Server::FromC(server_->server())
833         ->SetRegisteredMethodAllocator(server_cq_->cq(), tag, [this, method] {
834           grpc_core::Server::RegisteredCallAllocation result;
835           new SyncRequest(server_, method, &result);
836           return result;
837         });
838     has_sync_method_ = true;
839   }
840 
AddUnknownSyncMethod()841   void AddUnknownSyncMethod() {
842     if (has_sync_method_) {
843       unknown_method_ = std::make_unique<grpc::internal::RpcServiceMethod>(
844           "unknown", grpc::internal::RpcMethod::BIDI_STREAMING,
845           new grpc::internal::UnknownMethodHandler(kUnknownRpcMethod));
846       grpc_core::Server::FromC(server_->server())
847           ->SetBatchMethodAllocator(server_cq_->cq(), [this] {
848             grpc_core::Server::BatchCallAllocation result;
849             new SyncRequest(server_, unknown_method_.get(), &result);
850             return result;
851           });
852     }
853   }
854 
Shutdown()855   void Shutdown() override {
856     ThreadManager::Shutdown();
857     server_cq_->Shutdown();
858   }
859 
Wait()860   void Wait() override {
861     ThreadManager::Wait();
862     // Drain any pending items from the queue
863     void* tag;
864     bool ok;
865     while (server_cq_->Next(&tag, &ok)) {
866       // This problem can arise if the server CQ gets a request queued to it
867       // before it gets shutdown but then pulls it after shutdown.
868       static_cast<SyncRequest*>(tag)->Cleanup();
869     }
870   }
871 
Start()872   void Start() {
873     if (has_sync_method_) {
874       Initialize();  // ThreadManager's Initialize()
875     }
876   }
877 
878  private:
879   Server* server_;
880   grpc::CompletionQueue* server_cq_;
881   int cq_timeout_msec_;
882   bool has_sync_method_ = false;
883   std::unique_ptr<grpc::internal::RpcServiceMethod> unknown_method_;
884   std::shared_ptr<Server::GlobalCallbacks> global_callbacks_;
885 };
886 
Server(grpc::ChannelArguments * args,std::shared_ptr<std::vector<std::unique_ptr<grpc::ServerCompletionQueue>>> sync_server_cqs,int min_pollers,int max_pollers,int sync_cq_timeout_msec,std::vector<std::shared_ptr<grpc::internal::ExternalConnectionAcceptorImpl>> acceptors,grpc_server_config_fetcher * server_config_fetcher,grpc_resource_quota * server_rq,std::vector<std::unique_ptr<grpc::experimental::ServerInterceptorFactoryInterface>> interceptor_creators,experimental::ServerMetricRecorder * server_metric_recorder)887 Server::Server(
888     grpc::ChannelArguments* args,
889     std::shared_ptr<std::vector<std::unique_ptr<grpc::ServerCompletionQueue>>>
890         sync_server_cqs,
891     int min_pollers, int max_pollers, int sync_cq_timeout_msec,
892     std::vector<std::shared_ptr<grpc::internal::ExternalConnectionAcceptorImpl>>
893         acceptors,
894     grpc_server_config_fetcher* server_config_fetcher,
895     grpc_resource_quota* server_rq,
896     std::vector<
897         std::unique_ptr<grpc::experimental::ServerInterceptorFactoryInterface>>
898         interceptor_creators,
899     experimental::ServerMetricRecorder* server_metric_recorder)
900     : acceptors_(std::move(acceptors)),
901       interceptor_creators_(std::move(interceptor_creators)),
902       max_receive_message_size_(INT_MIN),
903       sync_server_cqs_(std::move(sync_server_cqs)),
904       started_(false),
905       shutdown_(false),
906       shutdown_notified_(false),
907       server_(nullptr),
908       server_initializer_(new ServerInitializer(this)),
909       health_check_service_disabled_(false),
910       server_metric_recorder_(server_metric_recorder) {
911   gpr_once_init(&grpc::g_once_init_callbacks, grpc::InitGlobalCallbacks);
912   global_callbacks_ = grpc::g_callbacks;
913   global_callbacks_->UpdateArguments(args);
914 
915   if (sync_server_cqs_ != nullptr) {
916     bool default_rq_created = false;
917     if (server_rq == nullptr) {
918       server_rq = grpc_resource_quota_create("SyncServer-default-rq");
919       grpc_resource_quota_set_max_threads(server_rq,
920                                           DEFAULT_MAX_SYNC_SERVER_THREADS);
921       default_rq_created = true;
922     }
923 
924     for (const auto& it : *sync_server_cqs_) {
925       sync_req_mgrs_.emplace_back(new SyncRequestThreadManager(
926           this, it.get(), global_callbacks_, server_rq, min_pollers,
927           max_pollers, sync_cq_timeout_msec));
928     }
929 
930     if (default_rq_created) {
931       grpc_resource_quota_unref(server_rq);
932     }
933   }
934 
935   for (auto& acceptor : acceptors_) {
936     acceptor->SetToChannelArgs(args);
937   }
938 
939   grpc_channel_args channel_args;
940   args->SetChannelArgs(&channel_args);
941 
942   for (size_t i = 0; i < channel_args.num_args; i++) {
943     if (0 == strcmp(channel_args.args[i].key,
944                     grpc::kHealthCheckServiceInterfaceArg)) {
945       if (channel_args.args[i].value.pointer.p == nullptr) {
946         health_check_service_disabled_ = true;
947       } else {
948         health_check_service_.reset(
949             static_cast<grpc::HealthCheckServiceInterface*>(
950                 channel_args.args[i].value.pointer.p));
951       }
952     }
953     if (0 ==
954         strcmp(channel_args.args[i].key, GRPC_ARG_MAX_RECEIVE_MESSAGE_LENGTH)) {
955       max_receive_message_size_ = channel_args.args[i].value.integer;
956     }
957     if (0 == strcmp(channel_args.args[i].key,
958                     GRPC_ARG_SERVER_CALL_METRIC_RECORDING)) {
959       call_metric_recording_enabled_ = channel_args.args[i].value.integer;
960     }
961   }
962   server_ = grpc_server_create(&channel_args, nullptr);
963   grpc_server_set_config_fetcher(server_, server_config_fetcher);
964 }
965 
~Server()966 Server::~Server() {
967   {
968     grpc::internal::ReleasableMutexLock lock(&mu_);
969     if (started_ && !shutdown_) {
970       lock.Release();
971       Shutdown();
972     } else if (!started_) {
973       // Shutdown the completion queues
974       for (const auto& value : sync_req_mgrs_) {
975         value->Shutdown();
976       }
977       CompletionQueue* callback_cq =
978           callback_cq_.load(std::memory_order_relaxed);
979       if (callback_cq != nullptr) {
980         if (grpc_iomgr_run_in_background()) {
981           // gRPC-core provides the backing needed for the preferred CQ type
982           callback_cq->Shutdown();
983         } else {
984           CompletionQueue::ReleaseCallbackAlternativeCQ(callback_cq);
985         }
986         callback_cq_.store(nullptr, std::memory_order_release);
987       }
988     }
989   }
990   // Destroy health check service before we destroy the C server so that
991   // it does not call grpc_server_request_registered_call() after the C
992   // server has been destroyed.
993   health_check_service_.reset();
994   grpc_server_destroy(server_);
995 }
996 
SetGlobalCallbacks(GlobalCallbacks * callbacks)997 void Server::SetGlobalCallbacks(GlobalCallbacks* callbacks) {
998   CHECK(!grpc::g_callbacks);
999   CHECK(callbacks);
1000   grpc::g_callbacks.reset(callbacks);
1001 }
1002 
c_server()1003 grpc_server* Server::c_server() { return server_; }
1004 
InProcessChannel(const grpc::ChannelArguments & args)1005 std::shared_ptr<grpc::Channel> Server::InProcessChannel(
1006     const grpc::ChannelArguments& args) {
1007   grpc_channel_args channel_args = args.c_channel_args();
1008   return grpc::CreateChannelInternal(
1009       "inproc", grpc_inproc_channel_create(server_, &channel_args, nullptr),
1010       std::vector<std::unique_ptr<
1011           grpc::experimental::ClientInterceptorFactoryInterface>>());
1012 }
1013 
1014 std::shared_ptr<grpc::Channel>
InProcessChannelWithInterceptors(const grpc::ChannelArguments & args,std::vector<std::unique_ptr<grpc::experimental::ClientInterceptorFactoryInterface>> interceptor_creators)1015 Server::experimental_type::InProcessChannelWithInterceptors(
1016     const grpc::ChannelArguments& args,
1017     std::vector<
1018         std::unique_ptr<grpc::experimental::ClientInterceptorFactoryInterface>>
1019         interceptor_creators) {
1020   grpc_channel_args channel_args = args.c_channel_args();
1021   return grpc::CreateChannelInternal(
1022       "inproc",
1023       grpc_inproc_channel_create(server_->server_, &channel_args, nullptr),
1024       std::move(interceptor_creators));
1025 }
1026 
PayloadHandlingForMethod(grpc::internal::RpcServiceMethod * method)1027 static grpc_server_register_method_payload_handling PayloadHandlingForMethod(
1028     grpc::internal::RpcServiceMethod* method) {
1029   switch (method->method_type()) {
1030     case grpc::internal::RpcMethod::NORMAL_RPC:
1031     case grpc::internal::RpcMethod::SERVER_STREAMING:
1032       return GRPC_SRM_PAYLOAD_READ_INITIAL_BYTE_BUFFER;
1033     case grpc::internal::RpcMethod::CLIENT_STREAMING:
1034     case grpc::internal::RpcMethod::BIDI_STREAMING:
1035       return GRPC_SRM_PAYLOAD_NONE;
1036   }
1037   GPR_UNREACHABLE_CODE(return GRPC_SRM_PAYLOAD_NONE;);
1038 }
1039 
RegisterService(const std::string * addr,grpc::Service * service)1040 bool Server::RegisterService(const std::string* addr, grpc::Service* service) {
1041   bool has_async_methods = service->has_async_methods();
1042   if (has_async_methods) {
1043     CHECK_EQ(service->server_, nullptr)
1044         << "Can only register an asynchronous service against one server.";
1045     service->server_ = this;
1046   }
1047 
1048   const char* method_name = nullptr;
1049 
1050   for (const auto& method : service->methods_) {
1051     if (method == nullptr) {  // Handled by generic service if any.
1052       continue;
1053     }
1054 
1055     void* method_registration_tag = grpc_server_register_method(
1056         server_, method->name(), addr ? addr->c_str() : nullptr,
1057         PayloadHandlingForMethod(method.get()), 0);
1058     if (method_registration_tag == nullptr) {
1059       VLOG(2) << "Attempt to register " << method->name() << " multiple times";
1060       return false;
1061     }
1062 
1063     if (method->handler() == nullptr) {  // Async method without handler
1064       method->set_server_tag(method_registration_tag);
1065     } else if (method->api_type() ==
1066                grpc::internal::RpcServiceMethod::ApiType::SYNC) {
1067       for (const auto& value : sync_req_mgrs_) {
1068         value->AddSyncMethod(method.get(), method_registration_tag);
1069       }
1070     } else {
1071       has_callback_methods_ = true;
1072       grpc::internal::RpcServiceMethod* method_value = method.get();
1073       grpc::CompletionQueue* cq = CallbackCQ();
1074       grpc_server_register_completion_queue(server_, cq->cq(), nullptr);
1075       grpc_core::Server::FromC(server_)->SetRegisteredMethodAllocator(
1076           cq->cq(), method_registration_tag, [this, cq, method_value] {
1077             grpc_core::Server::RegisteredCallAllocation result;
1078             new CallbackRequest<grpc::CallbackServerContext>(this, method_value,
1079                                                              cq, &result);
1080             return result;
1081           });
1082     }
1083 
1084     method_name = method->name();
1085   }
1086 
1087   // Parse service name.
1088   if (method_name != nullptr) {
1089     std::stringstream ss(method_name);
1090     std::string service_name;
1091     if (std::getline(ss, service_name, '/') &&
1092         std::getline(ss, service_name, '/')) {
1093       services_.push_back(service_name);
1094     }
1095   }
1096   return true;
1097 }
1098 
RegisterAsyncGenericService(grpc::AsyncGenericService * service)1099 void Server::RegisterAsyncGenericService(grpc::AsyncGenericService* service) {
1100   CHECK_EQ(service->server_, nullptr)
1101       << "Can only register an async generic service against one server.";
1102   service->server_ = this;
1103   has_async_generic_service_ = true;
1104 }
1105 
RegisterCallbackGenericService(grpc::CallbackGenericService * service)1106 void Server::RegisterCallbackGenericService(
1107     grpc::CallbackGenericService* service) {
1108   CHECK_EQ(service->server_, nullptr)
1109       << "Can only register a callback generic service against one server.";
1110   service->server_ = this;
1111   has_callback_generic_service_ = true;
1112   generic_handler_.reset(service->Handler());
1113 
1114   grpc::CompletionQueue* cq = CallbackCQ();
1115   grpc_core::Server::FromC(server_)->SetBatchMethodAllocator(cq->cq(), [this,
1116                                                                         cq] {
1117     grpc_core::Server::BatchCallAllocation result;
1118     new CallbackRequest<grpc::GenericCallbackServerContext>(this, cq, &result);
1119     return result;
1120   });
1121 }
1122 
AddListeningPort(const std::string & addr,grpc::ServerCredentials * creds)1123 int Server::AddListeningPort(const std::string& addr,
1124                              grpc::ServerCredentials* creds) {
1125   CHECK(!started_);
1126   int port = creds->AddPortToServer(addr, server_);
1127   global_callbacks_->AddPort(this, addr, creds, port);
1128   return port;
1129 }
1130 
Ref()1131 void Server::Ref() {
1132   shutdown_refs_outstanding_.fetch_add(1, std::memory_order_relaxed);
1133 }
1134 
UnrefWithPossibleNotify()1135 void Server::UnrefWithPossibleNotify() {
1136   if (GPR_UNLIKELY(shutdown_refs_outstanding_.fetch_sub(
1137                        1, std::memory_order_acq_rel) == 1)) {
1138     // No refs outstanding means that shutdown has been initiated and no more
1139     // callback requests are outstanding.
1140     grpc::internal::MutexLock lock(&mu_);
1141     CHECK(shutdown_);
1142     shutdown_done_ = true;
1143     shutdown_done_cv_.Signal();
1144   }
1145 }
1146 
UnrefAndWaitLocked()1147 void Server::UnrefAndWaitLocked() {
1148   if (GPR_UNLIKELY(shutdown_refs_outstanding_.fetch_sub(
1149                        1, std::memory_order_acq_rel) == 1)) {
1150     shutdown_done_ = true;
1151     return;  // no need to wait on CV since done condition already set
1152   }
1153   while (!shutdown_done_) {
1154     shutdown_done_cv_.Wait(&mu_);
1155   }
1156 }
1157 
Start(grpc::ServerCompletionQueue ** cqs,size_t num_cqs)1158 void Server::Start(grpc::ServerCompletionQueue** cqs, size_t num_cqs) {
1159   CHECK(!started_);
1160   global_callbacks_->PreServerStart(this);
1161   started_ = true;
1162 
1163   // Only create default health check service when user did not provide an
1164   // explicit one.
1165   if (health_check_service_ == nullptr && !health_check_service_disabled_ &&
1166       grpc::DefaultHealthCheckServiceEnabled()) {
1167     auto default_hc_service = std::make_unique<DefaultHealthCheckService>();
1168     auto* hc_service_impl = default_hc_service->GetHealthCheckService();
1169     health_check_service_ = std::move(default_hc_service);
1170     RegisterService(nullptr, hc_service_impl);
1171   }
1172 
1173   for (auto& acceptor : acceptors_) {
1174     acceptor->GetCredentials()->AddPortToServer(acceptor->name(), server_);
1175   }
1176 
1177 #ifndef NDEBUG
1178   for (size_t i = 0; i < num_cqs; i++) {
1179     cq_list_.push_back(cqs[i]);
1180   }
1181 #endif
1182 
1183   // We must have exactly one generic service to handle requests for
1184   // unmatched method names (i.e., to return UNIMPLEMENTED for any RPC
1185   // method for which we don't have a registered implementation).  This
1186   // service comes from one of the following places (first match wins):
1187   // - If the application supplied a generic service via either the async
1188   //   or callback APIs, we use that.
1189   // - If there are callback methods, register a callback generic service.
1190   // - If there are sync methods, register a sync generic service.
1191   //   (This must be done before server start to initialize an
1192   //   AllocatingRequestMatcher.)
1193   // - Otherwise (we have only async methods), we wait until the server
1194   //   is started and then start an UnimplementedAsyncRequest on each
1195   //   async CQ, so that the requests will be moved along by polling
1196   //   done in application threads.
1197   bool unknown_rpc_needed =
1198       !has_async_generic_service_ && !has_callback_generic_service_;
1199   if (unknown_rpc_needed && has_callback_methods_) {
1200     unimplemented_service_ = std::make_unique<grpc::CallbackGenericService>();
1201     RegisterCallbackGenericService(unimplemented_service_.get());
1202     unknown_rpc_needed = false;
1203   }
1204   if (unknown_rpc_needed && !sync_req_mgrs_.empty()) {
1205     sync_req_mgrs_[0]->AddUnknownSyncMethod();
1206     unknown_rpc_needed = false;
1207   }
1208 
1209   grpc_server_start(server_);
1210 
1211   if (unknown_rpc_needed) {
1212     for (size_t i = 0; i < num_cqs; i++) {
1213       if (cqs[i]->IsFrequentlyPolled()) {
1214         new UnimplementedAsyncRequest(this, cqs[i]);
1215       }
1216     }
1217     unknown_rpc_needed = false;
1218   }
1219 
1220   // If this server has any support for synchronous methods (has any sync
1221   // server CQs), make sure that we have a ResourceExhausted handler
1222   // to deal with the case of thread exhaustion
1223   if (sync_server_cqs_ != nullptr && !sync_server_cqs_->empty()) {
1224     resource_exhausted_handler_ =
1225         std::make_unique<grpc::internal::ResourceExhaustedHandler>(
1226             kServerThreadpoolExhausted);
1227   }
1228 
1229   for (const auto& value : sync_req_mgrs_) {
1230     value->Start();
1231   }
1232 
1233   for (auto& acceptor : acceptors_) {
1234     acceptor->Start();
1235   }
1236 }
1237 
ShutdownInternal(gpr_timespec deadline)1238 void Server::ShutdownInternal(gpr_timespec deadline) {
1239   grpc::internal::MutexLock lock(&mu_);
1240   if (shutdown_) {
1241     return;
1242   }
1243 
1244   shutdown_ = true;
1245 
1246   for (auto& acceptor : acceptors_) {
1247     acceptor->Shutdown();
1248   }
1249 
1250   /// The completion queue to use for server shutdown completion notification
1251   grpc::CompletionQueue shutdown_cq;
1252   grpc::ShutdownTag shutdown_tag;  // Phony shutdown tag
1253   grpc_server_shutdown_and_notify(server_, shutdown_cq.cq(), &shutdown_tag);
1254 
1255   shutdown_cq.Shutdown();
1256 
1257   void* tag;
1258   bool ok;
1259   grpc::CompletionQueue::NextStatus status =
1260       shutdown_cq.AsyncNext(&tag, &ok, deadline);
1261 
1262   // If this timed out, it means we are done with the grace period for a clean
1263   // shutdown. We should force a shutdown now by cancelling all inflight calls
1264   if (status == grpc::CompletionQueue::NextStatus::TIMEOUT) {
1265     grpc_server_cancel_all_calls(server_);
1266     status =
1267         shutdown_cq.AsyncNext(&tag, &ok, gpr_inf_future(GPR_CLOCK_MONOTONIC));
1268   }
1269   // Else in case of SHUTDOWN or GOT_EVENT, it means that the server has
1270   // successfully shutdown
1271 
1272   // Drop the shutdown ref and wait for all other refs to drop as well.
1273   UnrefAndWaitLocked();
1274 
1275   // Shutdown all ThreadManagers. This will try to gracefully stop all the
1276   // threads in the ThreadManagers (once they process any inflight requests)
1277   for (const auto& value : sync_req_mgrs_) {
1278     value->Shutdown();  // ThreadManager's Shutdown()
1279   }
1280 
1281   // Wait for threads in all ThreadManagers to terminate
1282   for (const auto& value : sync_req_mgrs_) {
1283     value->Wait();
1284   }
1285 
1286   // Shutdown the callback CQ. The CQ is owned by its own shutdown tag, so it
1287   // will delete itself at true shutdown.
1288   CompletionQueue* callback_cq = callback_cq_.load(std::memory_order_relaxed);
1289   if (callback_cq != nullptr) {
1290     if (grpc_iomgr_run_in_background()) {
1291       // gRPC-core provides the backing needed for the preferred CQ type
1292       callback_cq->Shutdown();
1293     } else {
1294       CompletionQueue::ReleaseCallbackAlternativeCQ(callback_cq);
1295     }
1296     callback_cq_.store(nullptr, std::memory_order_release);
1297   }
1298 
1299   // Drain the shutdown queue (if the previous call to AsyncNext() timed out
1300   // and we didn't remove the tag from the queue yet)
1301   while (shutdown_cq.Next(&tag, &ok)) {
1302     // Nothing to be done here. Just ignore ok and tag values
1303   }
1304 
1305   shutdown_notified_ = true;
1306   shutdown_cv_.SignalAll();
1307 
1308 #ifndef NDEBUG
1309   // Unregister this server with the CQs passed into it by the user so that
1310   // those can be checked for properly-ordered shutdown.
1311   for (auto* cq : cq_list_) {
1312     cq->UnregisterServer(this);
1313   }
1314   cq_list_.clear();
1315 #endif
1316 }
1317 
Wait()1318 void Server::Wait() {
1319   grpc::internal::MutexLock lock(&mu_);
1320   while (started_ && !shutdown_notified_) {
1321     shutdown_cv_.Wait(&mu_);
1322   }
1323 }
1324 
PerformOpsOnCall(grpc::internal::CallOpSetInterface * ops,grpc::internal::Call * call)1325 void Server::PerformOpsOnCall(grpc::internal::CallOpSetInterface* ops,
1326                               grpc::internal::Call* call) {
1327   ops->FillOps(call);
1328 }
1329 
FinalizeResult(void ** tag,bool * status)1330 bool Server::UnimplementedAsyncRequest::FinalizeResult(void** tag,
1331                                                        bool* status) {
1332   if (GenericAsyncRequest::FinalizeResult(tag, status)) {
1333     // We either had no interceptors run or we are done intercepting
1334     if (*status) {
1335       // Create a new request/response pair using the server and CQ values
1336       // stored in this object's base class.
1337       new UnimplementedAsyncRequest(server_, notification_cq_);
1338       new UnimplementedAsyncResponse(this);
1339     } else {
1340       delete this;
1341     }
1342   } else {
1343     // The tag was swallowed due to interception. We will see it again.
1344   }
1345   return false;
1346 }
1347 
UnimplementedAsyncResponse(UnimplementedAsyncRequest * request)1348 Server::UnimplementedAsyncResponse::UnimplementedAsyncResponse(
1349     UnimplementedAsyncRequest* request)
1350     : request_(request) {
1351   grpc::Status status(grpc::StatusCode::UNIMPLEMENTED, kUnknownRpcMethod);
1352   grpc::internal::UnknownMethodHandler::FillOps(request_->context(),
1353                                                 kUnknownRpcMethod, this);
1354   request_->stream()->call_.PerformOps(this);
1355 }
1356 
initializer()1357 grpc::ServerInitializer* Server::initializer() {
1358   return server_initializer_.get();
1359 }
1360 
CallbackCQ()1361 grpc::CompletionQueue* Server::CallbackCQ() {
1362   // TODO(vjpai): Consider using a single global CQ for the default CQ
1363   // if there is no explicit per-server CQ registered
1364   CompletionQueue* callback_cq = callback_cq_.load(std::memory_order_acquire);
1365   if (callback_cq != nullptr) {
1366     return callback_cq;
1367   }
1368   // The callback_cq_ wasn't already set, so grab a lock and set it up exactly
1369   // once for this server.
1370   grpc::internal::MutexLock l(&mu_);
1371   callback_cq = callback_cq_.load(std::memory_order_relaxed);
1372   if (callback_cq != nullptr) {
1373     return callback_cq;
1374   }
1375   if (grpc_iomgr_run_in_background()) {
1376     // gRPC-core provides the backing needed for the preferred CQ type
1377     auto* shutdown_callback = new grpc::ShutdownCallback;
1378     callback_cq = new grpc::CompletionQueue(grpc_completion_queue_attributes{
1379         GRPC_CQ_CURRENT_VERSION, GRPC_CQ_CALLBACK, GRPC_CQ_DEFAULT_POLLING,
1380         shutdown_callback});
1381 
1382     // Transfer ownership of the new cq to its own shutdown callback
1383     shutdown_callback->TakeCQ(callback_cq);
1384   } else {
1385     // Otherwise we need to use the alternative CQ variant
1386     callback_cq = CompletionQueue::CallbackAlternativeCQ();
1387   }
1388 
1389   callback_cq_.store(callback_cq, std::memory_order_release);
1390   return callback_cq;
1391 }
1392 
1393 }  // namespace grpc
1394