1 /*
2 *
3 * Copyright 2015 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19 #include <grpcpp/impl/codegen/server_context_impl.h>
20
21 #include <algorithm>
22 #include <utility>
23
24 #include <grpc/compression.h>
25 #include <grpc/grpc.h>
26 #include <grpc/load_reporting.h>
27 #include <grpc/support/alloc.h>
28 #include <grpc/support/log.h>
29 #include <grpcpp/impl/call.h>
30 #include <grpcpp/impl/codegen/completion_queue_impl.h>
31 #include <grpcpp/support/server_callback.h>
32 #include <grpcpp/support/time.h>
33
34 #include "src/core/lib/gprpp/ref_counted.h"
35 #include "src/core/lib/gprpp/sync.h"
36 #include "src/core/lib/surface/call.h"
37
38 namespace grpc_impl {
39
40 // CompletionOp
41
42 class ServerContextBase::CompletionOp final
43 : public ::grpc::internal::CallOpSetInterface {
44 public:
45 // initial refs: one in the server context, one in the cq
46 // must ref the call before calling constructor and after deleting this
CompletionOp(::grpc::internal::Call * call,::grpc_impl::internal::ServerCallbackCall * callback_controller)47 CompletionOp(::grpc::internal::Call* call,
48 ::grpc_impl::internal::ServerCallbackCall* callback_controller)
49 : call_(*call),
50 callback_controller_(callback_controller),
51 has_tag_(false),
52 tag_(nullptr),
53 core_cq_tag_(this),
54 refs_(2),
55 finalized_(false),
56 cancelled_(0),
57 done_intercepting_(false) {}
58
59 // CompletionOp isn't copyable or movable
60 CompletionOp(const CompletionOp&) = delete;
61 CompletionOp& operator=(const CompletionOp&) = delete;
62 CompletionOp(CompletionOp&&) = delete;
63 CompletionOp& operator=(CompletionOp&&) = delete;
64
~CompletionOp()65 ~CompletionOp() {
66 if (call_.server_rpc_info()) {
67 call_.server_rpc_info()->Unref();
68 }
69 }
70
71 void FillOps(::grpc::internal::Call* call) override;
72
73 // This should always be arena allocated in the call, so override delete.
74 // But this class is not trivially destructible, so must actually call delete
75 // before allowing the arena to be freed
operator delete(void *,std::size_t size)76 static void operator delete(void* /*ptr*/, std::size_t size) {
77 // Use size to avoid unused-parameter warning since assert seems to be
78 // compiled out and treated as unused in some gcc optimized versions.
79 (void)size;
80 assert(size == sizeof(CompletionOp));
81 }
82
83 // This operator should never be called as the memory should be freed as part
84 // of the arena destruction. It only exists to provide a matching operator
85 // delete to the operator new so that some compilers will not complain (see
86 // https://github.com/grpc/grpc/issues/11301) Note at the time of adding this
87 // there are no tests catching the compiler warning.
operator delete(void *,void *)88 static void operator delete(void*, void*) { assert(0); }
89
90 bool FinalizeResult(void** tag, bool* status) override;
91
CheckCancelled(CompletionQueue * cq)92 bool CheckCancelled(CompletionQueue* cq) {
93 cq->TryPluck(this);
94 return CheckCancelledNoPluck();
95 }
CheckCancelledAsync()96 bool CheckCancelledAsync() { return CheckCancelledNoPluck(); }
97
set_tag(void * tag)98 void set_tag(void* tag) {
99 has_tag_ = true;
100 tag_ = tag;
101 }
102
set_core_cq_tag(void * core_cq_tag)103 void set_core_cq_tag(void* core_cq_tag) { core_cq_tag_ = core_cq_tag; }
104
core_cq_tag()105 void* core_cq_tag() override { return core_cq_tag_; }
106
107 void Unref();
108
109 // This will be called while interceptors are run if the RPC is a hijacked
110 // RPC. This should set hijacking state for each of the ops.
SetHijackingState()111 void SetHijackingState() override {
112 /* Servers don't allow hijacking */
113 GPR_ASSERT(false);
114 }
115
116 /* Should be called after interceptors are done running */
ContinueFillOpsAfterInterception()117 void ContinueFillOpsAfterInterception() override {}
118
119 /* Should be called after interceptors are done running on the finalize result
120 * path */
ContinueFinalizeResultAfterInterception()121 void ContinueFinalizeResultAfterInterception() override {
122 done_intercepting_ = true;
123 if (!has_tag_) {
124 /* We don't have a tag to return. */
125 Unref();
126 return;
127 }
128 /* Start a dummy op so that we can return the tag */
129 GPR_ASSERT(grpc_call_start_batch(call_.call(), nullptr, 0, core_cq_tag_,
130 nullptr) == GRPC_CALL_OK);
131 }
132
133 private:
CheckCancelledNoPluck()134 bool CheckCancelledNoPluck() {
135 grpc_core::MutexLock lock(&mu_);
136 return finalized_ ? (cancelled_ != 0) : false;
137 }
138
139 ::grpc::internal::Call call_;
140 ::grpc_impl::internal::ServerCallbackCall* const callback_controller_;
141 bool has_tag_;
142 void* tag_;
143 void* core_cq_tag_;
144 grpc_core::RefCount refs_;
145 grpc_core::Mutex mu_;
146 bool finalized_;
147 int cancelled_; // This is an int (not bool) because it is passed to core
148 bool done_intercepting_;
149 ::grpc::internal::InterceptorBatchMethodsImpl interceptor_methods_;
150 };
151
Unref()152 void ServerContextBase::CompletionOp::Unref() {
153 if (refs_.Unref()) {
154 grpc_call* call = call_.call();
155 delete this;
156 grpc_call_unref(call);
157 }
158 }
159
FillOps(::grpc::internal::Call * call)160 void ServerContextBase::CompletionOp::FillOps(::grpc::internal::Call* call) {
161 grpc_op ops;
162 ops.op = GRPC_OP_RECV_CLOSE_ON_SERVER;
163 ops.data.recv_close_on_server.cancelled = &cancelled_;
164 ops.flags = 0;
165 ops.reserved = nullptr;
166 interceptor_methods_.SetCall(&call_);
167 interceptor_methods_.SetReverse();
168 interceptor_methods_.SetCallOpSetInterface(this);
169 // The following call_start_batch is internally-generated so no need for an
170 // explanatory log on failure.
171 GPR_ASSERT(grpc_call_start_batch(call->call(), &ops, 1, core_cq_tag_,
172 nullptr) == GRPC_CALL_OK);
173 /* No interceptors to run here */
174 }
175
FinalizeResult(void ** tag,bool * status)176 bool ServerContextBase::CompletionOp::FinalizeResult(void** tag, bool* status) {
177 // Decide whether to call the cancel callback within the lock
178 bool call_cancel;
179
180 {
181 grpc_core::MutexLock lock(&mu_);
182 if (done_intercepting_) {
183 // We are done intercepting.
184 bool has_tag = has_tag_;
185 if (has_tag) {
186 *tag = tag_;
187 }
188 Unref();
189 return has_tag;
190 }
191 finalized_ = true;
192
193 // If for some reason the incoming status is false, mark that as a
194 // cancellation.
195 // TODO(vjpai): does this ever happen?
196 if (!*status) {
197 cancelled_ = 1;
198 }
199
200 call_cancel = (cancelled_ != 0);
201 // Release the lock since we may call a callback and interceptors.
202 }
203
204 if (call_cancel && callback_controller_ != nullptr) {
205 callback_controller_->MaybeCallOnCancel();
206 }
207 /* Add interception point and run through interceptors */
208 interceptor_methods_.AddInterceptionHookPoint(
209 ::grpc::experimental::InterceptionHookPoints::POST_RECV_CLOSE);
210 if (interceptor_methods_.RunInterceptors()) {
211 // No interceptors were run
212 bool has_tag = has_tag_;
213 if (has_tag) {
214 *tag = tag_;
215 }
216 Unref();
217 return has_tag;
218 }
219 // There are interceptors to be run. Return false for now.
220 return false;
221 }
222
223 // ServerContextBase body
224
ServerContextBase()225 ServerContextBase::ServerContextBase() {
226 Setup(gpr_inf_future(GPR_CLOCK_REALTIME));
227 }
228
ServerContextBase(gpr_timespec deadline,grpc_metadata_array * arr)229 ServerContextBase::ServerContextBase(gpr_timespec deadline,
230 grpc_metadata_array* arr) {
231 Setup(deadline);
232 std::swap(*client_metadata_.arr(), *arr);
233 }
234
Setup(gpr_timespec deadline)235 void ServerContextBase::Setup(gpr_timespec deadline) {
236 completion_op_ = nullptr;
237 has_notify_when_done_tag_ = false;
238 async_notify_when_done_tag_ = nullptr;
239 deadline_ = deadline;
240 call_ = nullptr;
241 cq_ = nullptr;
242 sent_initial_metadata_ = false;
243 compression_level_set_ = false;
244 has_pending_ops_ = false;
245 rpc_info_ = nullptr;
246 }
247
BindDeadlineAndMetadata(gpr_timespec deadline,grpc_metadata_array * arr)248 void ServerContextBase::BindDeadlineAndMetadata(gpr_timespec deadline,
249 grpc_metadata_array* arr) {
250 deadline_ = deadline;
251 std::swap(*client_metadata_.arr(), *arr);
252 }
253
~ServerContextBase()254 ServerContextBase::~ServerContextBase() { Clear(); }
255
Clear()256 void ServerContextBase::Clear() {
257 auth_context_.reset();
258 initial_metadata_.clear();
259 trailing_metadata_.clear();
260 client_metadata_.Reset();
261 if (completion_op_) {
262 completion_op_->Unref();
263 completion_op_ = nullptr;
264 completion_tag_.Clear();
265 }
266 if (rpc_info_) {
267 rpc_info_->Unref();
268 rpc_info_ = nullptr;
269 }
270 if (call_) {
271 auto* call = call_;
272 call_ = nullptr;
273 grpc_call_unref(call);
274 }
275 if (default_reactor_used_.load(std::memory_order_relaxed)) {
276 reinterpret_cast<Reactor*>(&default_reactor_)->~Reactor();
277 default_reactor_used_.store(false, std::memory_order_relaxed);
278 }
279 test_unary_.reset();
280 }
281
BeginCompletionOp(::grpc::internal::Call * call,std::function<void (bool)> callback,::grpc_impl::internal::ServerCallbackCall * callback_controller)282 void ServerContextBase::BeginCompletionOp(
283 ::grpc::internal::Call* call, std::function<void(bool)> callback,
284 ::grpc_impl::internal::ServerCallbackCall* callback_controller) {
285 GPR_ASSERT(!completion_op_);
286 if (rpc_info_) {
287 rpc_info_->Ref();
288 }
289 grpc_call_ref(call->call());
290 completion_op_ =
291 new (grpc_call_arena_alloc(call->call(), sizeof(CompletionOp)))
292 CompletionOp(call, callback_controller);
293 if (callback_controller != nullptr) {
294 completion_tag_.Set(call->call(), std::move(callback), completion_op_,
295 true);
296 completion_op_->set_core_cq_tag(&completion_tag_);
297 completion_op_->set_tag(completion_op_);
298 } else if (has_notify_when_done_tag_) {
299 completion_op_->set_tag(async_notify_when_done_tag_);
300 }
301 call->PerformOps(completion_op_);
302 }
303
GetCompletionOpTag()304 ::grpc::internal::CompletionQueueTag* ServerContextBase::GetCompletionOpTag() {
305 return static_cast<::grpc::internal::CompletionQueueTag*>(completion_op_);
306 }
307
AddInitialMetadata(const std::string & key,const std::string & value)308 void ServerContextBase::AddInitialMetadata(const std::string& key,
309 const std::string& value) {
310 initial_metadata_.insert(std::make_pair(key, value));
311 }
312
AddTrailingMetadata(const std::string & key,const std::string & value)313 void ServerContextBase::AddTrailingMetadata(const std::string& key,
314 const std::string& value) {
315 trailing_metadata_.insert(std::make_pair(key, value));
316 }
317
TryCancel() const318 void ServerContextBase::TryCancel() const {
319 ::grpc::internal::CancelInterceptorBatchMethods cancel_methods;
320 if (rpc_info_) {
321 for (size_t i = 0; i < rpc_info_->interceptors_.size(); i++) {
322 rpc_info_->RunInterceptor(&cancel_methods, i);
323 }
324 }
325 grpc_call_error err = grpc_call_cancel_with_status(
326 call_, GRPC_STATUS_CANCELLED, "Cancelled on the server side", nullptr);
327 if (err != GRPC_CALL_OK) {
328 gpr_log(GPR_ERROR, "TryCancel failed with: %d", err);
329 }
330 }
331
IsCancelled() const332 bool ServerContextBase::IsCancelled() const {
333 if (completion_tag_) {
334 // When using callback API, this result is always valid.
335 return completion_op_->CheckCancelledAsync();
336 } else if (has_notify_when_done_tag_) {
337 // When using async API, the result is only valid
338 // if the tag has already been delivered at the completion queue
339 return completion_op_ && completion_op_->CheckCancelledAsync();
340 } else {
341 // when using sync API, the result is always valid
342 return completion_op_ && completion_op_->CheckCancelled(cq_);
343 }
344 }
345
set_compression_algorithm(grpc_compression_algorithm algorithm)346 void ServerContextBase::set_compression_algorithm(
347 grpc_compression_algorithm algorithm) {
348 compression_algorithm_ = algorithm;
349 const char* algorithm_name = nullptr;
350 if (!grpc_compression_algorithm_name(algorithm, &algorithm_name)) {
351 gpr_log(GPR_ERROR, "Name for compression algorithm '%d' unknown.",
352 algorithm);
353 abort();
354 }
355 GPR_ASSERT(algorithm_name != nullptr);
356 AddInitialMetadata(GRPC_COMPRESSION_REQUEST_ALGORITHM_MD_KEY, algorithm_name);
357 }
358
peer() const359 std::string ServerContextBase::peer() const {
360 std::string peer;
361 if (call_) {
362 char* c_peer = grpc_call_get_peer(call_);
363 peer = c_peer;
364 gpr_free(c_peer);
365 }
366 return peer;
367 }
368
census_context() const369 const struct census_context* ServerContextBase::census_context() const {
370 return grpc_census_call_get_context(call_);
371 }
372
SetLoadReportingCosts(const std::vector<std::string> & cost_data)373 void ServerContextBase::SetLoadReportingCosts(
374 const std::vector<std::string>& cost_data) {
375 if (call_ == nullptr) return;
376 for (const auto& cost_datum : cost_data) {
377 AddTrailingMetadata(GRPC_LB_COST_MD_KEY, cost_datum);
378 }
379 }
380
381 } // namespace grpc_impl
382