1 //
2 //
3 // Copyright 2015 gRPC authors.
4 //
5 // Licensed under the Apache License, Version 2.0 (the "License");
6 // you may not use this file except in compliance with the License.
7 // You may obtain a copy of the License at
8 //
9 // http://www.apache.org/licenses/LICENSE-2.0
10 //
11 // Unless required by applicable law or agreed to in writing, software
12 // distributed under the License is distributed on an "AS IS" BASIS,
13 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 // See the License for the specific language governing permissions and
15 // limitations under the License.
16 //
17 //
18
19 #include <grpc/impl/grpc_types.h>
20 #include <grpc/support/port_platform.h>
21
22 #include "absl/base/thread_annotations.h"
23 #include "absl/status/status.h"
24 #include "absl/strings/str_cat.h"
25 #include "absl/strings/string_view.h"
26 #include "absl/types/optional.h"
27 #include "src/core/lib/iomgr/exec_ctx.h"
28 #include "src/core/lib/iomgr/port.h"
29
30 #ifdef GRPC_POSIX_SOCKET_TCP
31
32 #include <errno.h>
33 #include <grpc/slice.h>
34 #include <grpc/support/alloc.h>
35 #include <grpc/support/string_util.h>
36 #include <grpc/support/sync.h>
37 #include <grpc/support/time.h>
38 #include <limits.h>
39 #include <netinet/in.h>
40 #include <netinet/tcp.h>
41 #include <stdbool.h>
42 #include <stdio.h>
43 #include <stdlib.h>
44 #include <string.h>
45 #include <sys/socket.h>
46 #include <sys/types.h>
47 #include <unistd.h>
48
49 #include <algorithm>
50 #include <unordered_map>
51
52 #include "absl/log/check.h"
53 #include "absl/log/log.h"
54 #include "src/core/lib/address_utils/sockaddr_utils.h"
55 #include "src/core/lib/debug/trace.h"
56 #include "src/core/lib/experiments/experiments.h"
57 #include "src/core/lib/iomgr/buffer_list.h"
58 #include "src/core/lib/iomgr/ev_posix.h"
59 #include "src/core/lib/iomgr/event_engine_shims/endpoint.h"
60 #include "src/core/lib/iomgr/executor.h"
61 #include "src/core/lib/iomgr/socket_utils_posix.h"
62 #include "src/core/lib/iomgr/tcp_posix.h"
63 #include "src/core/lib/resource_quota/api.h"
64 #include "src/core/lib/resource_quota/memory_quota.h"
65 #include "src/core/lib/slice/slice_internal.h"
66 #include "src/core/lib/slice/slice_string_helpers.h"
67 #include "src/core/telemetry/stats.h"
68 #include "src/core/telemetry/stats_data.h"
69 #include "src/core/util/crash.h"
70 #include "src/core/util/event_log.h"
71 #include "src/core/util/strerror.h"
72 #include "src/core/util/string.h"
73 #include "src/core/util/sync.h"
74 #include "src/core/util/time.h"
75 #include "src/core/util/useful.h"
76
77 #ifndef SOL_TCP
78 #define SOL_TCP IPPROTO_TCP
79 #endif
80
81 #ifndef TCP_INQ
82 #define TCP_INQ 36
83 #define TCP_CM_INQ TCP_INQ
84 #endif
85
86 #ifdef GRPC_HAVE_MSG_NOSIGNAL
87 #define SENDMSG_FLAGS MSG_NOSIGNAL
88 #else
89 #define SENDMSG_FLAGS 0
90 #endif
91
92 // TCP zero copy sendmsg flag.
93 // NB: We define this here as a fallback in case we're using an older set of
94 // library headers that has not defined MSG_ZEROCOPY. Since this constant is
95 // part of the kernel, we are guaranteed it will never change/disagree so
96 // defining it here is safe.
97 #ifndef MSG_ZEROCOPY
98 #define MSG_ZEROCOPY 0x4000000
99 #endif
100
101 #ifdef GRPC_MSG_IOVLEN_TYPE
102 typedef GRPC_MSG_IOVLEN_TYPE msg_iovlen_type;
103 #else
104 typedef size_t msg_iovlen_type;
105 #endif
106
107 namespace grpc_core {
108
109 class TcpZerocopySendRecord {
110 public:
TcpZerocopySendRecord()111 TcpZerocopySendRecord() { grpc_slice_buffer_init(&buf_); }
112
~TcpZerocopySendRecord()113 ~TcpZerocopySendRecord() {
114 AssertEmpty();
115 grpc_slice_buffer_destroy(&buf_);
116 }
117
118 // Given the slices that we wish to send, and the current offset into the
119 // slice buffer (indicating which have already been sent), populate an iovec
120 // array that will be used for a zerocopy enabled sendmsg().
121 msg_iovlen_type PopulateIovs(size_t* unwind_slice_idx,
122 size_t* unwind_byte_idx, size_t* sending_length,
123 iovec* iov);
124
125 // A sendmsg() may not be able to send the bytes that we requested at this
126 // time, returning EAGAIN (possibly due to backpressure). In this case,
127 // unwind the offset into the slice buffer so we retry sending these bytes.
UnwindIfThrottled(size_t unwind_slice_idx,size_t unwind_byte_idx)128 void UnwindIfThrottled(size_t unwind_slice_idx, size_t unwind_byte_idx) {
129 out_offset_.byte_idx = unwind_byte_idx;
130 out_offset_.slice_idx = unwind_slice_idx;
131 }
132
133 // Update the offset into the slice buffer based on how much we wanted to sent
134 // vs. what sendmsg() actually sent (which may be lower, possibly due to
135 // backpressure).
136 void UpdateOffsetForBytesSent(size_t sending_length, size_t actually_sent);
137
138 // Indicates whether all underlying data has been sent or not.
AllSlicesSent()139 bool AllSlicesSent() { return out_offset_.slice_idx == buf_.count; }
140
141 // Reset this structure for a new tcp_write() with zerocopy.
PrepareForSends(grpc_slice_buffer * slices_to_send)142 void PrepareForSends(grpc_slice_buffer* slices_to_send) {
143 AssertEmpty();
144 out_offset_.slice_idx = 0;
145 out_offset_.byte_idx = 0;
146 grpc_slice_buffer_swap(slices_to_send, &buf_);
147 Ref();
148 }
149
150 // References: 1 reference per sendmsg(), and 1 for the tcp_write().
Ref()151 void Ref() { ref_.fetch_add(1, std::memory_order_relaxed); }
152
153 // Unref: called when we get an error queue notification for a sendmsg(), if a
154 // sendmsg() failed or when tcp_write() is done.
Unref()155 bool Unref() {
156 const intptr_t prior = ref_.fetch_sub(1, std::memory_order_acq_rel);
157 DCHECK_GT(prior, 0);
158 if (prior == 1) {
159 AllSendsComplete();
160 return true;
161 }
162 return false;
163 }
164
165 private:
166 struct OutgoingOffset {
167 size_t slice_idx = 0;
168 size_t byte_idx = 0;
169 };
170
AssertEmpty()171 void AssertEmpty() {
172 DCHECK_EQ(buf_.count, 0u);
173 DCHECK_EQ(buf_.length, 0u);
174 DCHECK_EQ(ref_.load(std::memory_order_relaxed), 0);
175 }
176
177 // When all sendmsg() calls associated with this tcp_write() have been
178 // completed (ie. we have received the notifications for each sequence number
179 // for each sendmsg()) and all reference counts have been dropped, drop our
180 // reference to the underlying data since we no longer need it.
AllSendsComplete()181 void AllSendsComplete() {
182 DCHECK_EQ(ref_.load(std::memory_order_relaxed), 0);
183 grpc_slice_buffer_reset_and_unref(&buf_);
184 }
185
186 grpc_slice_buffer buf_;
187 std::atomic<intptr_t> ref_{0};
188 OutgoingOffset out_offset_;
189 };
190
191 class TcpZerocopySendCtx {
192 public:
193 static constexpr int kDefaultMaxSends = 4;
194 static constexpr size_t kDefaultSendBytesThreshold = 16 * 1024; // 16KB
195
TcpZerocopySendCtx(int max_sends=kDefaultMaxSends,size_t send_bytes_threshold=kDefaultSendBytesThreshold)196 explicit TcpZerocopySendCtx(
197 int max_sends = kDefaultMaxSends,
198 size_t send_bytes_threshold = kDefaultSendBytesThreshold)
199 : max_sends_(max_sends),
200 free_send_records_size_(max_sends),
201 threshold_bytes_(send_bytes_threshold) {
202 send_records_ = static_cast<TcpZerocopySendRecord*>(
203 gpr_malloc(max_sends * sizeof(*send_records_)));
204 free_send_records_ = static_cast<TcpZerocopySendRecord**>(
205 gpr_malloc(max_sends * sizeof(*free_send_records_)));
206 if (send_records_ == nullptr || free_send_records_ == nullptr) {
207 gpr_free(send_records_);
208 gpr_free(free_send_records_);
209 GRPC_TRACE_LOG(tcp, INFO)
210 << "Disabling TCP TX zerocopy due to memory pressure.\n";
211 memory_limited_ = true;
212 } else {
213 for (int idx = 0; idx < max_sends_; ++idx) {
214 new (send_records_ + idx) TcpZerocopySendRecord();
215 free_send_records_[idx] = send_records_ + idx;
216 }
217 }
218 }
219
~TcpZerocopySendCtx()220 ~TcpZerocopySendCtx() {
221 if (send_records_ != nullptr) {
222 for (int idx = 0; idx < max_sends_; ++idx) {
223 send_records_[idx].~TcpZerocopySendRecord();
224 }
225 }
226 gpr_free(send_records_);
227 gpr_free(free_send_records_);
228 }
229
230 // True if we were unable to allocate the various bookkeeping structures at
231 // transport initialization time. If memory limited, we do not zerocopy.
memory_limited() const232 bool memory_limited() const { return memory_limited_; }
233
234 // TCP send zerocopy maintains an implicit sequence number for every
235 // successful sendmsg() with zerocopy enabled; the kernel later gives us an
236 // error queue notification with this sequence number indicating that the
237 // underlying data buffers that we sent can now be released. Once that
238 // notification is received, we can release the buffers associated with this
239 // zerocopy send record. Here, we associate the sequence number with the data
240 // buffers that were sent with the corresponding call to sendmsg().
NoteSend(TcpZerocopySendRecord * record)241 void NoteSend(TcpZerocopySendRecord* record) {
242 record->Ref();
243 {
244 MutexLock guard(&lock_);
245 is_in_write_ = true;
246 AssociateSeqWithSendRecordLocked(last_send_, record);
247 }
248 ++last_send_;
249 }
250
251 // If sendmsg() actually failed, though, we need to revert the sequence number
252 // that we speculatively bumped before calling sendmsg(). Note that we bump
253 // this sequence number and perform relevant bookkeeping (see: NoteSend())
254 // *before* calling sendmsg() since, if we called it *after* sendmsg(), then
255 // there is a possible race with the release notification which could occur on
256 // another thread before we do the necessary bookkeeping. Hence, calling
257 // NoteSend() *before* sendmsg() and implementing an undo function is needed.
UndoSend()258 void UndoSend() {
259 --last_send_;
260 if (ReleaseSendRecord(last_send_)->Unref()) {
261 // We should still be holding the ref taken by tcp_write().
262 DCHECK(0);
263 }
264 }
265
266 // Simply associate this send record (and the underlying sent data buffers)
267 // with the implicit sequence number for this zerocopy sendmsg().
AssociateSeqWithSendRecordLocked(uint32_t seq,TcpZerocopySendRecord * record)268 void AssociateSeqWithSendRecordLocked(uint32_t seq,
269 TcpZerocopySendRecord* record) {
270 ctx_lookup_.emplace(seq, record);
271 }
272
273 // Get a send record for a send that we wish to do with zerocopy.
GetSendRecord()274 TcpZerocopySendRecord* GetSendRecord() {
275 MutexLock guard(&lock_);
276 return TryGetSendRecordLocked();
277 }
278
279 // A given send record corresponds to a single tcp_write() with zerocopy
280 // enabled. This can result in several sendmsg() calls to flush all of the
281 // data to wire. Each sendmsg() takes a reference on the
282 // TcpZerocopySendRecord, and corresponds to a single sequence number.
283 // ReleaseSendRecord releases a reference on TcpZerocopySendRecord for a
284 // single sequence number. This is called either when we receive the relevant
285 // error queue notification (saying that we can discard the underlying
286 // buffers for this sendmsg()) is received from the kernel - or, in case
287 // sendmsg() was unsuccessful to begin with.
ReleaseSendRecord(uint32_t seq)288 TcpZerocopySendRecord* ReleaseSendRecord(uint32_t seq) {
289 MutexLock guard(&lock_);
290 return ReleaseSendRecordLocked(seq);
291 }
292
293 // After all the references to a TcpZerocopySendRecord are released, we can
294 // add it back to the pool (of size max_sends_). Note that we can only have
295 // max_sends_ tcp_write() instances with zerocopy enabled in flight at the
296 // same time.
PutSendRecord(TcpZerocopySendRecord * record)297 void PutSendRecord(TcpZerocopySendRecord* record) {
298 DCHECK(record >= send_records_);
299 DCHECK(record < send_records_ + max_sends_);
300 MutexLock guard(&lock_);
301 PutSendRecordLocked(record);
302 }
303
304 // Indicate that we are disposing of this zerocopy context. This indicator
305 // will prevent new zerocopy writes from being issued.
Shutdown()306 void Shutdown() { shutdown_.store(true, std::memory_order_release); }
307
308 // Indicates that there are no inflight tcp_write() instances with zerocopy
309 // enabled.
AllSendRecordsEmpty()310 bool AllSendRecordsEmpty() {
311 MutexLock guard(&lock_);
312 return free_send_records_size_ == max_sends_;
313 }
314
enabled() const315 bool enabled() const { return enabled_; }
316
set_enabled(bool enabled)317 void set_enabled(bool enabled) {
318 DCHECK(!enabled || !memory_limited());
319 enabled_ = enabled;
320 }
321
322 // Only use zerocopy if we are sending at least this many bytes. The
323 // additional overhead of reading the error queue for notifications means that
324 // zerocopy is not useful for small transfers.
threshold_bytes() const325 size_t threshold_bytes() const { return threshold_bytes_; }
326
327 // Expected to be called by handler reading messages from the err queue.
328 // It is used to indicate that some OMem memory is now available. It returns
329 // true to tell the caller to mark the file descriptor as immediately
330 // writable.
331 //
332 // If a write is currently in progress on the socket (ie. we have issued a
333 // sendmsg() and are about to check its return value) then we set omem state
334 // to CHECK to make the sending thread know that some tcp_omem was
335 // concurrently freed even if sendmsg() returns ENOBUFS. In this case, since
336 // there is already an active send thread, we do not need to mark the
337 // socket writeable, so we return false.
338 //
339 // If there was no write in progress on the socket, and the socket was not
340 // marked as FULL, then we need not mark the socket writeable now that some
341 // tcp_omem memory is freed since it was not considered as blocked on
342 // tcp_omem to begin with. So in this case, return false.
343 //
344 // But, if a write was not in progress and the omem state was FULL, then we
345 // need to mark the socket writeable since it is no longer blocked by
346 // tcp_omem. In this case, return true.
347 //
348 // Please refer to the STATE TRANSITION DIAGRAM below for more details.
349 //
UpdateZeroCopyOMemStateAfterFree()350 bool UpdateZeroCopyOMemStateAfterFree() {
351 MutexLock guard(&lock_);
352 if (is_in_write_) {
353 zcopy_enobuf_state_ = OMemState::CHECK;
354 return false;
355 }
356 DCHECK(zcopy_enobuf_state_ != OMemState::CHECK);
357 if (zcopy_enobuf_state_ == OMemState::FULL) {
358 // A previous sendmsg attempt was blocked by ENOBUFS. Return true to
359 // mark the fd as writable so the next write attempt could be made.
360 zcopy_enobuf_state_ = OMemState::OPEN;
361 return true;
362 } else if (zcopy_enobuf_state_ == OMemState::OPEN) {
363 // No need to mark the fd as writable because the previous write
364 // attempt did not encounter ENOBUFS.
365 return false;
366 } else {
367 // This state should never be reached because it implies that the previous
368 // state was CHECK and is_in_write is false. This means that after the
369 // previous sendmsg returned and set is_in_write to false, it did
370 // not update the z-copy change from CHECK to OPEN.
371 Crash("OMem state error!");
372 }
373 }
374
375 // Expected to be called by the thread calling sendmsg after the syscall
376 // invocation. is complete. If an ENOBUF is seen, it checks if the error
377 // handler (Tx0cp completions) has already run and free'ed up some OMem. It
378 // returns true indicating that the write can be attempted again immediately.
379 // If ENOBUFS was seen but no Tx0cp completions have been received between the
380 // sendmsg() and us taking this lock, then tcp_omem is still full from our
381 // point of view. Therefore, we do not signal that the socket is writeable
382 // with respect to the availability of tcp_omem. Therefore the function
383 // returns false. This indicates that another write should not be attempted
384 // immediately and the calling thread should wait until the socket is writable
385 // again. If ENOBUFS was not seen, then again return false because the next
386 // write should be attempted only when the socket is writable again.
387 //
388 // Please refer to the STATE TRANSITION DIAGRAM below for more details.
389 //
UpdateZeroCopyOMemStateAfterSend(bool seen_enobuf)390 bool UpdateZeroCopyOMemStateAfterSend(bool seen_enobuf) {
391 MutexLock guard(&lock_);
392 is_in_write_ = false;
393 if (seen_enobuf) {
394 if (zcopy_enobuf_state_ == OMemState::CHECK) {
395 zcopy_enobuf_state_ = OMemState::OPEN;
396 return true;
397 } else {
398 zcopy_enobuf_state_ = OMemState::FULL;
399 }
400 } else if (zcopy_enobuf_state_ != OMemState::OPEN) {
401 zcopy_enobuf_state_ = OMemState::OPEN;
402 }
403 return false;
404 }
405
406 private:
407 // STATE TRANSITION DIAGRAM
408 //
409 // sendmsg succeeds Tx-zero copy succeeds and there is no active sendmsg
410 // ----<<--+ +------<<-------------------------------------+
411 // | | | |
412 // | | v sendmsg returns ENOBUFS |
413 // +-----> OPEN ------------->>-------------------------> FULL
414 // ^ |
415 // | |
416 // | sendmsg completes |
417 // +----<<---------- CHECK <-------<<-------------+
418 // Tx-zero copy succeeds and there is
419 // an active sendmsg
420 //
421 enum class OMemState : int8_t {
422 OPEN, // Everything is clear and omem is not full.
423 FULL, // The last sendmsg() has returned with an errno of ENOBUFS.
424 CHECK, // Error queue is read while is_in_write_ was true, so we should
425 // check this state after the sendmsg.
426 };
427
ReleaseSendRecordLocked(uint32_t seq)428 TcpZerocopySendRecord* ReleaseSendRecordLocked(uint32_t seq) {
429 auto iter = ctx_lookup_.find(seq);
430 DCHECK(iter != ctx_lookup_.end());
431 TcpZerocopySendRecord* record = iter->second;
432 ctx_lookup_.erase(iter);
433 return record;
434 }
435
TryGetSendRecordLocked()436 TcpZerocopySendRecord* TryGetSendRecordLocked() {
437 if (shutdown_.load(std::memory_order_acquire)) {
438 return nullptr;
439 }
440 if (free_send_records_size_ == 0) {
441 return nullptr;
442 }
443 free_send_records_size_--;
444 return free_send_records_[free_send_records_size_];
445 }
446
PutSendRecordLocked(TcpZerocopySendRecord * record)447 void PutSendRecordLocked(TcpZerocopySendRecord* record) {
448 DCHECK(free_send_records_size_ < max_sends_);
449 free_send_records_[free_send_records_size_] = record;
450 free_send_records_size_++;
451 }
452
453 TcpZerocopySendRecord* send_records_;
454 TcpZerocopySendRecord** free_send_records_;
455 int max_sends_;
456 int free_send_records_size_;
457 Mutex lock_;
458 uint32_t last_send_ = 0;
459 std::atomic<bool> shutdown_{false};
460 bool enabled_ = false;
461 size_t threshold_bytes_ = kDefaultSendBytesThreshold;
462 std::unordered_map<uint32_t, TcpZerocopySendRecord*> ctx_lookup_;
463 bool memory_limited_ = false;
464 bool is_in_write_ = false;
465 OMemState zcopy_enobuf_state_ = OMemState::OPEN;
466 };
467
468 } // namespace grpc_core
469
470 using grpc_core::TcpZerocopySendCtx;
471 using grpc_core::TcpZerocopySendRecord;
472
473 namespace {
474
475 struct grpc_tcp {
grpc_tcp__anonf8b64fee0111::grpc_tcp476 explicit grpc_tcp(const grpc_core::PosixTcpOptions& tcp_options)
477 : min_read_chunk_size(tcp_options.tcp_min_read_chunk_size),
478 max_read_chunk_size(tcp_options.tcp_max_read_chunk_size),
479 tcp_zerocopy_send_ctx(
480 tcp_options.tcp_tx_zerocopy_max_simultaneous_sends,
481 tcp_options.tcp_tx_zerocopy_send_bytes_threshold) {}
482 grpc_endpoint base;
483 grpc_fd* em_fd;
484 int fd;
485 int inq; // bytes pending on the socket from the last read.
486 double target_length;
487 double bytes_read_this_round;
488 grpc_core::RefCount refcount;
489 gpr_atm shutdown_count;
490
491 int min_read_chunk_size;
492 int max_read_chunk_size;
493
494 // garbage after the last read
495 grpc_slice_buffer last_read_buffer;
496
497 grpc_core::Mutex read_mu;
498 grpc_slice_buffer* incoming_buffer ABSL_GUARDED_BY(read_mu) = nullptr;
499
500 grpc_slice_buffer* outgoing_buffer;
501 // byte within outgoing_buffer->slices[0] to write next
502 size_t outgoing_byte_idx;
503
504 grpc_closure* read_cb;
505 grpc_closure* write_cb;
506 grpc_closure* release_fd_cb;
507 int* release_fd;
508
509 grpc_closure read_done_closure;
510 grpc_closure write_done_closure;
511 grpc_closure error_closure;
512
513 std::string peer_string;
514 std::string local_address;
515
516 grpc_core::MemoryOwner memory_owner;
517 grpc_core::MemoryAllocator::Reservation self_reservation;
518
519 grpc_core::TracedBufferList tb_list; // List of traced buffers
520
521 // grpc_endpoint_write takes an argument which if non-null means that the
522 // transport layer wants the TCP layer to collect timestamps for this write.
523 // This arg is forwarded to the timestamps callback function when the ACK
524 // timestamp is received from the kernel. This arg is a (void *) which allows
525 // users of this API to pass in a pointer to any kind of structure. This
526 // structure could actually be a tag or any book-keeping object that the user
527 // can use to distinguish between different traced writes. The only
528 // requirement from the TCP endpoint layer is that this arg should be non-null
529 // if the user wants timestamps for the write.
530 void* outgoing_buffer_arg;
531 // A counter which starts at 0. It is initialized the first time the socket
532 // options for collecting timestamps are set, and is incremented with each
533 // byte sent.
534 int bytes_counter;
535
536 int min_progress_size; // A hint from upper layers specifying the minimum
537 // number of bytes that need to be read to make
538 // meaningful progress
539
540 gpr_atm stop_error_notification; // Set to 1 if we do not want to be notified
541 // on errors anymore
542 TcpZerocopySendCtx tcp_zerocopy_send_ctx;
543 TcpZerocopySendRecord* current_zerocopy_send = nullptr;
544
545 int set_rcvlowat = 0;
546
547 // Used by the endpoint read function to distinguish the very first read call
548 // from the rest
549 bool is_first_read;
550 bool has_posted_reclaimer ABSL_GUARDED_BY(read_mu) = false;
551 bool inq_capable; // cache whether kernel supports inq
552 bool socket_ts_enabled; // True if timestamping options are set on the socket
553 //
554 bool ts_capable; // Cache whether we can set timestamping options
555 };
556
557 struct backup_poller {
558 gpr_mu* pollset_mu;
559 grpc_closure run_poller;
560 };
561
LogCommonIOErrors(absl::string_view prefix,int error_no)562 void LogCommonIOErrors(absl::string_view prefix, int error_no) {
563 switch (error_no) {
564 case ECONNABORTED:
565 grpc_core::global_stats().IncrementEconnabortedCount();
566 return;
567 case ECONNRESET:
568 grpc_core::global_stats().IncrementEconnresetCount();
569 return;
570 case EPIPE:
571 grpc_core::global_stats().IncrementEpipeCount();
572 return;
573 case ETIMEDOUT:
574 grpc_core::global_stats().IncrementEtimedoutCount();
575 return;
576 case ECONNREFUSED:
577 grpc_core::global_stats().IncrementEconnrefusedCount();
578 return;
579 case ENETUNREACH:
580 grpc_core::global_stats().IncrementEnetunreachCount();
581 return;
582 case ENOMSG:
583 grpc_core::global_stats().IncrementEnomsgCount();
584 return;
585 case ENOTCONN:
586 grpc_core::global_stats().IncrementEnotconnCount();
587 return;
588 case ENOBUFS:
589 grpc_core::global_stats().IncrementEnobufsCount();
590 return;
591 default:
592 grpc_core::global_stats().IncrementUncommonIoErrorCount();
593 LOG_EVERY_N_SEC(ERROR, 1)
594 << prefix.data()
595 << " encountered uncommon error: " << grpc_core::StrError(error_no);
596 return;
597 }
598 }
599
600 } // namespace
601
602 static void ZerocopyDisableAndWaitForRemaining(grpc_tcp* tcp);
603
604 #define BACKUP_POLLER_POLLSET(b) ((grpc_pollset*)((b) + 1))
605
606 static grpc_core::Mutex* g_backup_poller_mu = nullptr;
607 static int g_uncovered_notifications_pending
608 ABSL_GUARDED_BY(g_backup_poller_mu);
609 static backup_poller* g_backup_poller ABSL_GUARDED_BY(g_backup_poller_mu);
610
611 static void tcp_handle_read(void* arg /* grpc_tcp */, grpc_error_handle error);
612 static void tcp_handle_write(void* arg /* grpc_tcp */, grpc_error_handle error);
613 static void tcp_drop_uncovered_then_handle_write(void* arg /* grpc_tcp */,
614 grpc_error_handle error);
615
done_poller(void * bp,grpc_error_handle)616 static void done_poller(void* bp, grpc_error_handle /*error_ignored*/) {
617 backup_poller* p = static_cast<backup_poller*>(bp);
618 GRPC_TRACE_LOG(tcp, INFO) << "BACKUP_POLLER:" << p << " destroy";
619 grpc_pollset_destroy(BACKUP_POLLER_POLLSET(p));
620 gpr_free(p);
621 }
622
run_poller(void * bp,grpc_error_handle)623 static void run_poller(void* bp, grpc_error_handle /*error_ignored*/) {
624 backup_poller* p = static_cast<backup_poller*>(bp);
625 GRPC_TRACE_LOG(tcp, INFO) << "BACKUP_POLLER:" << p << " run";
626 gpr_mu_lock(p->pollset_mu);
627 grpc_core::Timestamp deadline =
628 grpc_core::Timestamp::Now() + grpc_core::Duration::Seconds(10);
629 GRPC_LOG_IF_ERROR(
630 "backup_poller:pollset_work",
631 grpc_pollset_work(BACKUP_POLLER_POLLSET(p), nullptr, deadline));
632 gpr_mu_unlock(p->pollset_mu);
633 g_backup_poller_mu->Lock();
634 // last "uncovered" notification is the ref that keeps us polling
635 if (g_uncovered_notifications_pending == 1) {
636 CHECK(g_backup_poller == p);
637 g_backup_poller = nullptr;
638 g_uncovered_notifications_pending = 0;
639 g_backup_poller_mu->Unlock();
640 GRPC_TRACE_LOG(tcp, INFO) << "BACKUP_POLLER:" << p << " shutdown";
641 grpc_pollset_shutdown(BACKUP_POLLER_POLLSET(p),
642 GRPC_CLOSURE_INIT(&p->run_poller, done_poller, p,
643 grpc_schedule_on_exec_ctx));
644 } else {
645 g_backup_poller_mu->Unlock();
646 GRPC_TRACE_LOG(tcp, INFO) << "BACKUP_POLLER:" << p << " reschedule";
647 grpc_core::Executor::Run(&p->run_poller, absl::OkStatus(),
648 grpc_core::ExecutorType::DEFAULT,
649 grpc_core::ExecutorJobType::LONG);
650 }
651 }
652
drop_uncovered(grpc_tcp *)653 static void drop_uncovered(grpc_tcp* /*tcp*/) {
654 int old_count;
655 backup_poller* p;
656 g_backup_poller_mu->Lock();
657 p = g_backup_poller;
658 old_count = g_uncovered_notifications_pending--;
659 g_backup_poller_mu->Unlock();
660 CHECK_GT(old_count, 1);
661 GRPC_TRACE_LOG(tcp, INFO) << "BACKUP_POLLER:" << p << " uncover cnt "
662 << old_count << "->" << old_count - 1;
663 }
664
665 // gRPC API considers a Write operation to be done the moment it clears ‘flow
666 // control’ i.e., not necessarily sent on the wire. This means that the
667 // application MIGHT not call `grpc_completion_queue_next/pluck` in a timely
668 // manner when its `Write()` API is acked.
669 //
670 // We need to ensure that the fd is 'covered' (i.e being monitored by some
671 // polling thread and progress is made) and hence add it to a backup poller here
cover_self(grpc_tcp * tcp)672 static void cover_self(grpc_tcp* tcp) {
673 backup_poller* p;
674 g_backup_poller_mu->Lock();
675 int old_count = 0;
676 if (g_uncovered_notifications_pending == 0) {
677 g_uncovered_notifications_pending = 2;
678 p = static_cast<backup_poller*>(
679 gpr_zalloc(sizeof(*p) + grpc_pollset_size()));
680 g_backup_poller = p;
681 grpc_pollset_init(BACKUP_POLLER_POLLSET(p), &p->pollset_mu);
682 g_backup_poller_mu->Unlock();
683 GRPC_TRACE_LOG(tcp, INFO) << "BACKUP_POLLER:" << p << " create";
684 grpc_core::Executor::Run(
685 GRPC_CLOSURE_INIT(&p->run_poller, run_poller, p, nullptr),
686 absl::OkStatus(), grpc_core::ExecutorType::DEFAULT,
687 grpc_core::ExecutorJobType::LONG);
688 } else {
689 old_count = g_uncovered_notifications_pending++;
690 p = g_backup_poller;
691 g_backup_poller_mu->Unlock();
692 }
693 GRPC_TRACE_LOG(tcp, INFO) << "BACKUP_POLLER:" << p << " add " << tcp
694 << " cnt " << old_count - 1 << "->" << old_count;
695 grpc_pollset_add_fd(BACKUP_POLLER_POLLSET(p), tcp->em_fd);
696 }
697
notify_on_read(grpc_tcp * tcp)698 static void notify_on_read(grpc_tcp* tcp) {
699 GRPC_TRACE_LOG(tcp, INFO) << "TCP:" << tcp << " notify_on_read";
700 grpc_fd_notify_on_read(tcp->em_fd, &tcp->read_done_closure);
701 }
702
notify_on_write(grpc_tcp * tcp)703 static void notify_on_write(grpc_tcp* tcp) {
704 GRPC_TRACE_LOG(tcp, INFO) << "TCP:" << tcp << " notify_on_write";
705 if (!grpc_event_engine_run_in_background()) {
706 cover_self(tcp);
707 }
708 grpc_fd_notify_on_write(tcp->em_fd, &tcp->write_done_closure);
709 }
710
tcp_drop_uncovered_then_handle_write(void * arg,grpc_error_handle error)711 static void tcp_drop_uncovered_then_handle_write(void* arg,
712 grpc_error_handle error) {
713 GRPC_TRACE_LOG(tcp, INFO)
714 << "TCP:" << arg << " got_write: " << grpc_core::StatusToString(error);
715 drop_uncovered(static_cast<grpc_tcp*>(arg));
716 tcp_handle_write(arg, error);
717 }
718
add_to_estimate(grpc_tcp * tcp,size_t bytes)719 static void add_to_estimate(grpc_tcp* tcp, size_t bytes) {
720 tcp->bytes_read_this_round += static_cast<double>(bytes);
721 }
722
finish_estimate(grpc_tcp * tcp)723 static void finish_estimate(grpc_tcp* tcp) {
724 // If we read >80% of the target buffer in one read loop, increase the size
725 // of the target buffer to either the amount read, or twice its previous
726 // value
727 if (tcp->bytes_read_this_round > tcp->target_length * 0.8) {
728 tcp->target_length =
729 std::max(2 * tcp->target_length, tcp->bytes_read_this_round);
730 } else {
731 tcp->target_length =
732 0.99 * tcp->target_length + 0.01 * tcp->bytes_read_this_round;
733 }
734 tcp->bytes_read_this_round = 0;
735 }
736
tcp_annotate_error(grpc_error_handle src_error,grpc_tcp * tcp)737 static grpc_error_handle tcp_annotate_error(grpc_error_handle src_error,
738 grpc_tcp* tcp) {
739 return grpc_error_set_int(
740 grpc_error_set_int(src_error, grpc_core::StatusIntProperty::kFd, tcp->fd),
741 // All tcp errors are marked with UNAVAILABLE so that application may
742 // choose to retry.
743 grpc_core::StatusIntProperty::kRpcStatus, GRPC_STATUS_UNAVAILABLE);
744 }
745
746 static void tcp_handle_read(void* arg /* grpc_tcp */, grpc_error_handle error);
747 static void tcp_handle_write(void* arg /* grpc_tcp */, grpc_error_handle error);
748
tcp_free(grpc_tcp * tcp)749 static void tcp_free(grpc_tcp* tcp) {
750 grpc_fd_orphan(tcp->em_fd, tcp->release_fd_cb, tcp->release_fd,
751 "tcp_unref_orphan");
752 grpc_slice_buffer_destroy(&tcp->last_read_buffer);
753 tcp->tb_list.Shutdown(tcp->outgoing_buffer_arg,
754 GRPC_ERROR_CREATE("endpoint destroyed"));
755 tcp->outgoing_buffer_arg = nullptr;
756 delete tcp;
757 }
758
759 #ifndef NDEBUG
760 #define TCP_UNREF(tcp, reason) tcp_unref((tcp), (reason), DEBUG_LOCATION)
761 #define TCP_REF(tcp, reason) tcp_ref((tcp), (reason), DEBUG_LOCATION)
tcp_unref(grpc_tcp * tcp,const char * reason,const grpc_core::DebugLocation & debug_location)762 static void tcp_unref(grpc_tcp* tcp, const char* reason,
763 const grpc_core::DebugLocation& debug_location) {
764 if (GPR_UNLIKELY(tcp->refcount.Unref(debug_location, reason))) {
765 tcp_free(tcp);
766 }
767 }
768
tcp_ref(grpc_tcp * tcp,const char * reason,const grpc_core::DebugLocation & debug_location)769 static void tcp_ref(grpc_tcp* tcp, const char* reason,
770 const grpc_core::DebugLocation& debug_location) {
771 tcp->refcount.Ref(debug_location, reason);
772 }
773 #else
774 #define TCP_UNREF(tcp, reason) tcp_unref((tcp))
775 #define TCP_REF(tcp, reason) tcp_ref((tcp))
tcp_unref(grpc_tcp * tcp)776 static void tcp_unref(grpc_tcp* tcp) {
777 if (GPR_UNLIKELY(tcp->refcount.Unref())) {
778 tcp_free(tcp);
779 }
780 }
781
tcp_ref(grpc_tcp * tcp)782 static void tcp_ref(grpc_tcp* tcp) { tcp->refcount.Ref(); }
783 #endif
784
tcp_destroy(grpc_endpoint * ep)785 static void tcp_destroy(grpc_endpoint* ep) {
786 grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
787 ZerocopyDisableAndWaitForRemaining(tcp);
788 grpc_fd_shutdown(tcp->em_fd, absl::UnavailableError("endpoint shutdown"));
789 if (grpc_event_engine_can_track_errors()) {
790 gpr_atm_no_barrier_store(&tcp->stop_error_notification, true);
791 grpc_fd_set_error(tcp->em_fd);
792 }
793 tcp->read_mu.Lock();
794 tcp->memory_owner.Reset();
795 tcp->read_mu.Unlock();
796 TCP_UNREF(tcp, "destroy");
797 }
798
perform_reclamation(grpc_tcp * tcp)799 static void perform_reclamation(grpc_tcp* tcp)
800 ABSL_LOCKS_EXCLUDED(tcp->read_mu) {
801 GRPC_TRACE_LOG(resource_quota, INFO)
802 << "TCP: benign reclamation to free memory";
803 tcp->read_mu.Lock();
804 if (tcp->incoming_buffer != nullptr) {
805 grpc_slice_buffer_reset_and_unref(tcp->incoming_buffer);
806 }
807 tcp->has_posted_reclaimer = false;
808 tcp->read_mu.Unlock();
809 }
810
maybe_post_reclaimer(grpc_tcp * tcp)811 static void maybe_post_reclaimer(grpc_tcp* tcp)
812 ABSL_EXCLUSIVE_LOCKS_REQUIRED(tcp->read_mu) {
813 if (!tcp->has_posted_reclaimer) {
814 tcp->has_posted_reclaimer = true;
815 TCP_REF(tcp, "posted_reclaimer");
816 tcp->memory_owner.PostReclaimer(
817 grpc_core::ReclamationPass::kBenign,
818 [tcp](absl::optional<grpc_core::ReclamationSweep> sweep) {
819 if (sweep.has_value()) {
820 perform_reclamation(tcp);
821 }
822 TCP_UNREF(tcp, "posted_reclaimer");
823 });
824 }
825 }
826
tcp_trace_read(grpc_tcp * tcp,grpc_error_handle error)827 static void tcp_trace_read(grpc_tcp* tcp, grpc_error_handle error)
828 ABSL_EXCLUSIVE_LOCKS_REQUIRED(tcp->read_mu) {
829 grpc_closure* cb = tcp->read_cb;
830 if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
831 LOG(INFO) << "TCP:" << tcp << " call_cb " << cb << " " << cb->cb << ":"
832 << cb->cb_arg;
833 size_t i;
834 LOG(INFO) << "READ " << tcp << " (peer=" << tcp->peer_string
835 << ") error=" << grpc_core::StatusToString(error);
836 if (ABSL_VLOG_IS_ON(2)) {
837 for (i = 0; i < tcp->incoming_buffer->count; i++) {
838 char* dump = grpc_dump_slice(tcp->incoming_buffer->slices[i],
839 GPR_DUMP_HEX | GPR_DUMP_ASCII);
840 VLOG(2) << "READ DATA: " << dump;
841 gpr_free(dump);
842 }
843 }
844 }
845 }
846
update_rcvlowat(grpc_tcp * tcp)847 static void update_rcvlowat(grpc_tcp* tcp)
848 ABSL_EXCLUSIVE_LOCKS_REQUIRED(tcp->read_mu) {
849 if (!grpc_core::IsTcpRcvLowatEnabled()) return;
850
851 // TODO(ctiller): Check if supported by OS.
852 // TODO(ctiller): Allow some adjustments instead of hardcoding things.
853
854 static constexpr int kRcvLowatMax = 16 * 1024 * 1024;
855 static constexpr int kRcvLowatThreshold = 16 * 1024;
856
857 int remaining = std::min(static_cast<int>(tcp->incoming_buffer->length),
858 tcp->min_progress_size);
859
860 remaining = std::min(remaining, kRcvLowatMax);
861
862 // Setting SO_RCVLOWAT for small quantities does not save on CPU.
863 if (remaining < 2 * kRcvLowatThreshold) {
864 remaining = 0;
865 }
866
867 // Decrement remaining by kRcvLowatThreshold. This would have the effect of
868 // waking up a little early. It would help with latency because some bytes
869 // may arrive while we execute the recvmsg syscall after waking up.
870 if (remaining > 0) {
871 remaining -= kRcvLowatThreshold;
872 }
873
874 // We still do not know the RPC size. Do not set SO_RCVLOWAT.
875 if (tcp->set_rcvlowat <= 1 && remaining <= 1) return;
876
877 // Previous value is still valid. No change needed in SO_RCVLOWAT.
878 if (tcp->set_rcvlowat == remaining) {
879 return;
880 }
881 if (setsockopt(tcp->fd, SOL_SOCKET, SO_RCVLOWAT, &remaining,
882 sizeof(remaining)) != 0) {
883 LOG(ERROR) << "Cannot set SO_RCVLOWAT on fd=" << tcp->fd
884 << " err=" << grpc_core::StrError(errno);
885 return;
886 }
887 tcp->set_rcvlowat = remaining;
888 }
889
890 // Returns true if data available to read or error other than EAGAIN.
891 #define MAX_READ_IOVEC 64
tcp_do_read(grpc_tcp * tcp,grpc_error_handle * error)892 static bool tcp_do_read(grpc_tcp* tcp, grpc_error_handle* error)
893 ABSL_EXCLUSIVE_LOCKS_REQUIRED(tcp->read_mu) {
894 GRPC_LATENT_SEE_INNER_SCOPE("tcp_do_read");
895 GRPC_TRACE_LOG(tcp, INFO) << "TCP:" << tcp << " do_read";
896 struct msghdr msg;
897 struct iovec iov[MAX_READ_IOVEC];
898 ssize_t read_bytes;
899 size_t total_read_bytes = 0;
900 size_t iov_len =
901 std::min<size_t>(MAX_READ_IOVEC, tcp->incoming_buffer->count);
902 #ifdef GRPC_LINUX_ERRQUEUE
903 constexpr size_t cmsg_alloc_space =
904 CMSG_SPACE(sizeof(grpc_core::scm_timestamping)) + CMSG_SPACE(sizeof(int));
905 #else
906 constexpr size_t cmsg_alloc_space = 24 /* CMSG_SPACE(sizeof(int)) */;
907 #endif // GRPC_LINUX_ERRQUEUE
908 char cmsgbuf[cmsg_alloc_space];
909 for (size_t i = 0; i < iov_len; i++) {
910 iov[i].iov_base = GRPC_SLICE_START_PTR(tcp->incoming_buffer->slices[i]);
911 iov[i].iov_len = GRPC_SLICE_LENGTH(tcp->incoming_buffer->slices[i]);
912 }
913
914 CHECK_NE(tcp->incoming_buffer->length, 0u);
915 DCHECK_GT(tcp->min_progress_size, 0);
916
917 do {
918 // Assume there is something on the queue. If we receive TCP_INQ from
919 // kernel, we will update this value, otherwise, we have to assume there is
920 // always something to read until we get EAGAIN.
921 tcp->inq = 1;
922
923 msg.msg_name = nullptr;
924 msg.msg_namelen = 0;
925 msg.msg_iov = iov;
926 msg.msg_iovlen = static_cast<msg_iovlen_type>(iov_len);
927 if (tcp->inq_capable) {
928 msg.msg_control = cmsgbuf;
929 msg.msg_controllen = sizeof(cmsgbuf);
930 } else {
931 msg.msg_control = nullptr;
932 msg.msg_controllen = 0;
933 }
934 msg.msg_flags = 0;
935
936 grpc_core::global_stats().IncrementTcpReadOffer(
937 tcp->incoming_buffer->length);
938 grpc_core::global_stats().IncrementTcpReadOfferIovSize(
939 tcp->incoming_buffer->count);
940
941 do {
942 grpc_core::global_stats().IncrementSyscallRead();
943 read_bytes = recvmsg(tcp->fd, &msg, 0);
944 } while (read_bytes < 0 && errno == EINTR);
945
946 if (read_bytes < 0 && errno == EAGAIN) {
947 // NB: After calling call_read_cb a parallel call of the read handler may
948 // be running.
949 if (total_read_bytes > 0) {
950 break;
951 }
952 finish_estimate(tcp);
953 tcp->inq = 0;
954 return false;
955 }
956
957 // We have read something in previous reads. We need to deliver those
958 // bytes to the upper layer.
959 if (read_bytes <= 0 && total_read_bytes >= 1) {
960 if (read_bytes < 0) {
961 LogCommonIOErrors("recvmsg", errno);
962 }
963 tcp->inq = 1;
964 break;
965 }
966
967 if (read_bytes <= 0) {
968 // 0 read size ==> end of stream
969 grpc_slice_buffer_reset_and_unref(tcp->incoming_buffer);
970 if (read_bytes == 0) {
971 *error = tcp_annotate_error(absl::InternalError("Socket closed"), tcp);
972 } else {
973 *error =
974 tcp_annotate_error(absl::InternalError(absl::StrCat(
975 "recvmsg:", grpc_core::StrError(errno))),
976 tcp);
977 }
978 return true;
979 }
980
981 grpc_core::global_stats().IncrementTcpReadSize(read_bytes);
982 add_to_estimate(tcp, static_cast<size_t>(read_bytes));
983 DCHECK((size_t)read_bytes <=
984 tcp->incoming_buffer->length - total_read_bytes);
985
986 #ifdef GRPC_HAVE_TCP_INQ
987 if (tcp->inq_capable) {
988 DCHECK(!(msg.msg_flags & MSG_CTRUNC));
989 struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg);
990 for (; cmsg != nullptr; cmsg = CMSG_NXTHDR(&msg, cmsg)) {
991 if (cmsg->cmsg_level == SOL_TCP && cmsg->cmsg_type == TCP_CM_INQ &&
992 cmsg->cmsg_len == CMSG_LEN(sizeof(int))) {
993 tcp->inq = *reinterpret_cast<int*>(CMSG_DATA(cmsg));
994 break;
995 }
996 }
997 }
998 #endif // GRPC_HAVE_TCP_INQ
999
1000 total_read_bytes += read_bytes;
1001 if (tcp->inq == 0 || total_read_bytes == tcp->incoming_buffer->length) {
1002 break;
1003 }
1004
1005 // We had a partial read, and still have space to read more data.
1006 // So, adjust IOVs and try to read more.
1007 size_t remaining = read_bytes;
1008 size_t j = 0;
1009 for (size_t i = 0; i < iov_len; i++) {
1010 if (remaining >= iov[i].iov_len) {
1011 remaining -= iov[i].iov_len;
1012 continue;
1013 }
1014 if (remaining > 0) {
1015 iov[j].iov_base = static_cast<char*>(iov[i].iov_base) + remaining;
1016 iov[j].iov_len = iov[i].iov_len - remaining;
1017 remaining = 0;
1018 } else {
1019 iov[j].iov_base = iov[i].iov_base;
1020 iov[j].iov_len = iov[i].iov_len;
1021 }
1022 ++j;
1023 }
1024 iov_len = j;
1025 } while (true);
1026
1027 if (tcp->inq == 0) {
1028 finish_estimate(tcp);
1029 }
1030
1031 DCHECK_GT(total_read_bytes, 0u);
1032 *error = absl::OkStatus();
1033 if (grpc_core::IsTcpFrameSizeTuningEnabled()) {
1034 // Update min progress size based on the total number of bytes read in
1035 // this round.
1036 tcp->min_progress_size -= total_read_bytes;
1037 if (tcp->min_progress_size > 0) {
1038 // There is still some bytes left to be read before we can signal
1039 // the read as complete. Append the bytes read so far into
1040 // last_read_buffer which serves as a staging buffer. Return false
1041 // to indicate tcp_handle_read needs to be scheduled again.
1042 grpc_slice_buffer_move_first(tcp->incoming_buffer, total_read_bytes,
1043 &tcp->last_read_buffer);
1044 return false;
1045 } else {
1046 // The required number of bytes have been read. Append the bytes
1047 // read in this round into last_read_buffer. Then swap last_read_buffer
1048 // and incoming_buffer. Now incoming buffer contains all the bytes
1049 // read since the start of the last tcp_read operation. last_read_buffer
1050 // would contain any spare space left in the incoming buffer. This
1051 // space will be used in the next tcp_read operation.
1052 tcp->min_progress_size = 1;
1053 grpc_slice_buffer_move_first(tcp->incoming_buffer, total_read_bytes,
1054 &tcp->last_read_buffer);
1055 grpc_slice_buffer_swap(&tcp->last_read_buffer, tcp->incoming_buffer);
1056 return true;
1057 }
1058 }
1059 if (total_read_bytes < tcp->incoming_buffer->length) {
1060 grpc_slice_buffer_trim_end(tcp->incoming_buffer,
1061 tcp->incoming_buffer->length - total_read_bytes,
1062 &tcp->last_read_buffer);
1063 }
1064 return true;
1065 }
1066
maybe_make_read_slices(grpc_tcp * tcp)1067 static void maybe_make_read_slices(grpc_tcp* tcp)
1068 ABSL_EXCLUSIVE_LOCKS_REQUIRED(tcp->read_mu) {
1069 static const int kBigAlloc = 64 * 1024;
1070 static const int kSmallAlloc = 8 * 1024;
1071 if (tcp->incoming_buffer->length <
1072 std::max<size_t>(tcp->min_progress_size, 1)) {
1073 size_t allocate_length = tcp->min_progress_size;
1074 const size_t target_length = static_cast<size_t>(tcp->target_length);
1075 // If memory pressure is low and we think there will be more than
1076 // min_progress_size bytes to read, allocate a bit more.
1077 const bool low_memory_pressure =
1078 tcp->memory_owner.GetPressureInfo().pressure_control_value < 0.8;
1079 if (low_memory_pressure && target_length > allocate_length) {
1080 allocate_length = target_length;
1081 }
1082 int extra_wanted = std::max<int>(
1083 1, allocate_length - static_cast<int>(tcp->incoming_buffer->length));
1084 if (extra_wanted >=
1085 (low_memory_pressure ? kSmallAlloc * 3 / 2 : kBigAlloc)) {
1086 while (extra_wanted > 0) {
1087 extra_wanted -= kBigAlloc;
1088 grpc_slice_buffer_add_indexed(tcp->incoming_buffer,
1089 tcp->memory_owner.MakeSlice(kBigAlloc));
1090 grpc_core::global_stats().IncrementTcpReadAlloc64k();
1091 }
1092 } else {
1093 while (extra_wanted > 0) {
1094 extra_wanted -= kSmallAlloc;
1095 grpc_slice_buffer_add_indexed(tcp->incoming_buffer,
1096 tcp->memory_owner.MakeSlice(kSmallAlloc));
1097 grpc_core::global_stats().IncrementTcpReadAlloc8k();
1098 }
1099 }
1100 maybe_post_reclaimer(tcp);
1101 }
1102 }
1103
tcp_handle_read(void * arg,grpc_error_handle error)1104 static void tcp_handle_read(void* arg /* grpc_tcp */, grpc_error_handle error) {
1105 grpc_tcp* tcp = static_cast<grpc_tcp*>(arg);
1106 GRPC_TRACE_LOG(tcp, INFO)
1107 << "TCP:" << tcp << " got_read: " << grpc_core::StatusToString(error);
1108 tcp->read_mu.Lock();
1109 grpc_error_handle tcp_read_error;
1110 if (GPR_LIKELY(error.ok()) && tcp->memory_owner.is_valid()) {
1111 maybe_make_read_slices(tcp);
1112 if (!tcp_do_read(tcp, &tcp_read_error)) {
1113 // Maybe update rcv lowat value based on the number of bytes read in this
1114 // round.
1115 update_rcvlowat(tcp);
1116 tcp->read_mu.Unlock();
1117 // We've consumed the edge, request a new one
1118 notify_on_read(tcp);
1119 return;
1120 }
1121 tcp_trace_read(tcp, tcp_read_error);
1122 } else {
1123 if (!tcp->memory_owner.is_valid() && error.ok()) {
1124 tcp_read_error =
1125 tcp_annotate_error(absl::InternalError("Socket closed"), tcp);
1126 } else {
1127 tcp_read_error = error;
1128 }
1129 grpc_slice_buffer_reset_and_unref(tcp->incoming_buffer);
1130 grpc_slice_buffer_reset_and_unref(&tcp->last_read_buffer);
1131 }
1132 // Update rcv lowat needs to be called at the end of the current read
1133 // operation to ensure the right SO_RCVLOWAT value is set for the next read.
1134 // Otherwise the next endpoint read operation may get stuck indefinitely
1135 // because the previously set rcv lowat value will persist and the socket may
1136 // erroneously considered to not be ready for read.
1137 update_rcvlowat(tcp);
1138 grpc_closure* cb = tcp->read_cb;
1139 tcp->read_cb = nullptr;
1140 tcp->incoming_buffer = nullptr;
1141 tcp->read_mu.Unlock();
1142 grpc_core::Closure::Run(DEBUG_LOCATION, cb, tcp_read_error);
1143 TCP_UNREF(tcp, "read");
1144 }
1145
tcp_read(grpc_endpoint * ep,grpc_slice_buffer * incoming_buffer,grpc_closure * cb,bool urgent,int min_progress_size)1146 static void tcp_read(grpc_endpoint* ep, grpc_slice_buffer* incoming_buffer,
1147 grpc_closure* cb, bool urgent, int min_progress_size) {
1148 grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1149 CHECK_EQ(tcp->read_cb, nullptr);
1150 tcp->read_cb = cb;
1151 tcp->read_mu.Lock();
1152 tcp->incoming_buffer = incoming_buffer;
1153 tcp->min_progress_size = grpc_core::IsTcpFrameSizeTuningEnabled()
1154 ? std::max(min_progress_size, 1)
1155 : 1;
1156 grpc_slice_buffer_reset_and_unref(incoming_buffer);
1157 grpc_slice_buffer_swap(incoming_buffer, &tcp->last_read_buffer);
1158 TCP_REF(tcp, "read");
1159 if (tcp->is_first_read) {
1160 tcp->read_mu.Unlock();
1161 // Endpoint read called for the very first time. Register read callback with
1162 // the polling engine
1163 tcp->is_first_read = false;
1164 notify_on_read(tcp);
1165 } else if (!urgent && tcp->inq == 0) {
1166 tcp->read_mu.Unlock();
1167 // Upper layer asked to read more but we know there is no pending data
1168 // to read from previous reads. So, wait for POLLIN.
1169 //
1170 notify_on_read(tcp);
1171 } else {
1172 tcp->read_mu.Unlock();
1173 // Not the first time. We may or may not have more bytes available. In any
1174 // case call tcp->read_done_closure (i.e tcp_handle_read()) which does the
1175 // right thing (i.e calls tcp_do_read() which either reads the available
1176 // bytes or calls notify_on_read() to be notified when new bytes become
1177 // available
1178 grpc_core::Closure::Run(DEBUG_LOCATION, &tcp->read_done_closure,
1179 absl::OkStatus());
1180 }
1181 }
1182
1183 // A wrapper around sendmsg. It sends \a msg over \a fd and returns the number
1184 // of bytes sent.
tcp_send(int fd,const struct msghdr * msg,int * saved_errno,int additional_flags=0)1185 ssize_t tcp_send(int fd, const struct msghdr* msg, int* saved_errno,
1186 int additional_flags = 0) {
1187 GRPC_LATENT_SEE_INNER_SCOPE("tcp_send");
1188 ssize_t sent_length;
1189 do {
1190 // TODO(klempner): Cork if this is a partial write
1191 grpc_core::global_stats().IncrementSyscallWrite();
1192 sent_length = sendmsg(fd, msg, SENDMSG_FLAGS | additional_flags);
1193 } while (sent_length < 0 && (*saved_errno = errno) == EINTR);
1194 return sent_length;
1195 }
1196
1197 /// This is to be called if outgoing_buffer_arg is not null. On linux platforms,
1198 /// this will call sendmsg with socket options set to collect timestamps inside
1199 /// the kernel. On return, sent_length is set to the return value of the sendmsg
1200 /// call. Returns false if setting the socket options failed. This is not
1201 /// implemented for non-linux platforms currently, and crashes out.
1202 ///
1203 static bool tcp_write_with_timestamps(grpc_tcp* tcp, struct msghdr* msg,
1204 size_t sending_length,
1205 ssize_t* sent_length, int* saved_errno,
1206 int additional_flags = 0);
1207
1208 /// The callback function to be invoked when we get an error on the socket.
1209 static void tcp_handle_error(void* arg /* grpc_tcp */, grpc_error_handle error);
1210
1211 static TcpZerocopySendRecord* tcp_get_send_zerocopy_record(
1212 grpc_tcp* tcp, grpc_slice_buffer* buf);
1213
1214 #ifdef GRPC_LINUX_ERRQUEUE
1215 static bool process_errors(grpc_tcp* tcp);
1216
tcp_get_send_zerocopy_record(grpc_tcp * tcp,grpc_slice_buffer * buf)1217 static TcpZerocopySendRecord* tcp_get_send_zerocopy_record(
1218 grpc_tcp* tcp, grpc_slice_buffer* buf) {
1219 TcpZerocopySendRecord* zerocopy_send_record = nullptr;
1220 const bool use_zerocopy =
1221 tcp->tcp_zerocopy_send_ctx.enabled() &&
1222 tcp->tcp_zerocopy_send_ctx.threshold_bytes() < buf->length;
1223 if (use_zerocopy) {
1224 zerocopy_send_record = tcp->tcp_zerocopy_send_ctx.GetSendRecord();
1225 if (zerocopy_send_record == nullptr) {
1226 process_errors(tcp);
1227 zerocopy_send_record = tcp->tcp_zerocopy_send_ctx.GetSendRecord();
1228 }
1229 if (zerocopy_send_record != nullptr) {
1230 zerocopy_send_record->PrepareForSends(buf);
1231 DCHECK_EQ(buf->count, 0u);
1232 DCHECK_EQ(buf->length, 0u);
1233 tcp->outgoing_byte_idx = 0;
1234 tcp->outgoing_buffer = nullptr;
1235 }
1236 }
1237 return zerocopy_send_record;
1238 }
1239
ZerocopyDisableAndWaitForRemaining(grpc_tcp * tcp)1240 static void ZerocopyDisableAndWaitForRemaining(grpc_tcp* tcp) {
1241 tcp->tcp_zerocopy_send_ctx.Shutdown();
1242 while (!tcp->tcp_zerocopy_send_ctx.AllSendRecordsEmpty()) {
1243 process_errors(tcp);
1244 }
1245 }
1246
tcp_write_with_timestamps(grpc_tcp * tcp,struct msghdr * msg,size_t sending_length,ssize_t * sent_length,int * saved_errno,int additional_flags)1247 static bool tcp_write_with_timestamps(grpc_tcp* tcp, struct msghdr* msg,
1248 size_t sending_length,
1249 ssize_t* sent_length, int* saved_errno,
1250 int additional_flags) {
1251 if (!tcp->socket_ts_enabled) {
1252 uint32_t opt = grpc_core::kTimestampingSocketOptions;
1253 if (setsockopt(tcp->fd, SOL_SOCKET, SO_TIMESTAMPING,
1254 static_cast<void*>(&opt), sizeof(opt)) != 0) {
1255 GRPC_TRACE_LOG(tcp, ERROR)
1256 << "Failed to set timestamping options on the socket.";
1257 return false;
1258 }
1259 tcp->bytes_counter = -1;
1260 tcp->socket_ts_enabled = true;
1261 }
1262 // Set control message to indicate that you want timestamps.
1263 union {
1264 char cmsg_buf[CMSG_SPACE(sizeof(uint32_t))];
1265 struct cmsghdr align;
1266 } u;
1267 cmsghdr* cmsg = reinterpret_cast<cmsghdr*>(u.cmsg_buf);
1268 cmsg->cmsg_level = SOL_SOCKET;
1269 cmsg->cmsg_type = SO_TIMESTAMPING;
1270 cmsg->cmsg_len = CMSG_LEN(sizeof(uint32_t));
1271 *reinterpret_cast<int*>(CMSG_DATA(cmsg)) =
1272 grpc_core::kTimestampingRecordingOptions;
1273 msg->msg_control = u.cmsg_buf;
1274 msg->msg_controllen = CMSG_SPACE(sizeof(uint32_t));
1275
1276 // If there was an error on sendmsg the logic in tcp_flush will handle it.
1277 ssize_t length = tcp_send(tcp->fd, msg, saved_errno, additional_flags);
1278 *sent_length = length;
1279 // Only save timestamps if all the bytes were taken by sendmsg.
1280 if (sending_length == static_cast<size_t>(length)) {
1281 tcp->tb_list.AddNewEntry(static_cast<uint32_t>(tcp->bytes_counter + length),
1282 tcp->fd, tcp->outgoing_buffer_arg);
1283 tcp->outgoing_buffer_arg = nullptr;
1284 }
1285 return true;
1286 }
1287
1288 static void UnrefMaybePutZerocopySendRecord(grpc_tcp* tcp,
1289 TcpZerocopySendRecord* record,
1290 uint32_t seq, const char* tag);
1291 // Reads \a cmsg to process zerocopy control messages.
process_zerocopy(grpc_tcp * tcp,struct cmsghdr * cmsg)1292 static void process_zerocopy(grpc_tcp* tcp, struct cmsghdr* cmsg) {
1293 DCHECK(cmsg);
1294 auto serr = reinterpret_cast<struct sock_extended_err*>(CMSG_DATA(cmsg));
1295 DCHECK_EQ(serr->ee_errno, 0u);
1296 DCHECK(serr->ee_origin == SO_EE_ORIGIN_ZEROCOPY);
1297 const uint32_t lo = serr->ee_info;
1298 const uint32_t hi = serr->ee_data;
1299 for (uint32_t seq = lo; seq <= hi; ++seq) {
1300 // TODO(arjunroy): It's likely that lo and hi refer to zerocopy sequence
1301 // numbers that are generated by a single call to grpc_endpoint_write; ie.
1302 // we can batch the unref operation. So, check if record is the same for
1303 // both; if so, batch the unref/put.
1304 TcpZerocopySendRecord* record =
1305 tcp->tcp_zerocopy_send_ctx.ReleaseSendRecord(seq);
1306 DCHECK(record);
1307 UnrefMaybePutZerocopySendRecord(tcp, record, seq, "CALLBACK RCVD");
1308 }
1309 if (tcp->tcp_zerocopy_send_ctx.UpdateZeroCopyOMemStateAfterFree()) {
1310 grpc_fd_set_writable(tcp->em_fd);
1311 }
1312 }
1313
1314 // Whether the cmsg received from error queue is of the IPv4 or IPv6 levels.
CmsgIsIpLevel(const cmsghdr & cmsg)1315 static bool CmsgIsIpLevel(const cmsghdr& cmsg) {
1316 return (cmsg.cmsg_level == SOL_IPV6 && cmsg.cmsg_type == IPV6_RECVERR) ||
1317 (cmsg.cmsg_level == SOL_IP && cmsg.cmsg_type == IP_RECVERR);
1318 }
1319
CmsgIsZeroCopy(const cmsghdr & cmsg)1320 static bool CmsgIsZeroCopy(const cmsghdr& cmsg) {
1321 if (!CmsgIsIpLevel(cmsg)) {
1322 return false;
1323 }
1324 auto serr = reinterpret_cast<const sock_extended_err*> CMSG_DATA(&cmsg);
1325 return serr->ee_errno == 0 && serr->ee_origin == SO_EE_ORIGIN_ZEROCOPY;
1326 }
1327
1328 /// Reads \a cmsg to derive timestamps from the control messages. If a valid
1329 /// timestamp is found, the traced buffer list is updated with this timestamp.
1330 /// The caller of this function should be looping on the control messages found
1331 /// in \a msg. \a cmsg should point to the control message that the caller wants
1332 /// processed.
1333 /// On return, a pointer to a control message is returned. On the next
1334 /// iteration, CMSG_NXTHDR(msg, ret_val) should be passed as \a cmsg.
process_timestamp(grpc_tcp * tcp,msghdr * msg,struct cmsghdr * cmsg)1335 struct cmsghdr* process_timestamp(grpc_tcp* tcp, msghdr* msg,
1336 struct cmsghdr* cmsg) {
1337 auto next_cmsg = CMSG_NXTHDR(msg, cmsg);
1338 cmsghdr* opt_stats = nullptr;
1339 if (next_cmsg == nullptr) {
1340 GRPC_TRACE_LOG(tcp, ERROR) << "Received timestamp without extended error";
1341 return cmsg;
1342 }
1343
1344 // Check if next_cmsg is an OPT_STATS msg
1345 if (next_cmsg->cmsg_level == SOL_SOCKET &&
1346 next_cmsg->cmsg_type == SCM_TIMESTAMPING_OPT_STATS) {
1347 opt_stats = next_cmsg;
1348 next_cmsg = CMSG_NXTHDR(msg, opt_stats);
1349 if (next_cmsg == nullptr) {
1350 GRPC_TRACE_LOG(tcp, ERROR) << "Received timestamp without extended error";
1351 return opt_stats;
1352 }
1353 }
1354
1355 if (!(next_cmsg->cmsg_level == SOL_IP || next_cmsg->cmsg_level == SOL_IPV6) ||
1356 !(next_cmsg->cmsg_type == IP_RECVERR ||
1357 next_cmsg->cmsg_type == IPV6_RECVERR)) {
1358 GRPC_TRACE_LOG(tcp, ERROR) << "Unexpected control message";
1359 return cmsg;
1360 }
1361
1362 auto tss =
1363 reinterpret_cast<struct grpc_core::scm_timestamping*>(CMSG_DATA(cmsg));
1364 auto serr = reinterpret_cast<struct sock_extended_err*>(CMSG_DATA(next_cmsg));
1365 if (serr->ee_errno != ENOMSG ||
1366 serr->ee_origin != SO_EE_ORIGIN_TIMESTAMPING) {
1367 LOG(ERROR) << "Unexpected control message";
1368 return cmsg;
1369 }
1370 tcp->tb_list.ProcessTimestamp(serr, opt_stats, tss);
1371 return next_cmsg;
1372 }
1373
1374 /// For linux platforms, reads the socket's error queue and processes error
1375 /// messages from the queue.
1376 ///
process_errors(grpc_tcp * tcp)1377 static bool process_errors(grpc_tcp* tcp) {
1378 GRPC_LATENT_SEE_INNER_SCOPE("process_errors");
1379 bool processed_err = false;
1380 struct iovec iov;
1381 iov.iov_base = nullptr;
1382 iov.iov_len = 0;
1383 struct msghdr msg;
1384 msg.msg_name = nullptr;
1385 msg.msg_namelen = 0;
1386 msg.msg_iov = &iov;
1387 msg.msg_iovlen = 0;
1388 msg.msg_flags = 0;
1389 // Allocate enough space so we don't need to keep increasing this as size
1390 // of OPT_STATS increase
1391 constexpr size_t cmsg_alloc_space =
1392 CMSG_SPACE(sizeof(grpc_core::scm_timestamping)) +
1393 CMSG_SPACE(sizeof(sock_extended_err) + sizeof(sockaddr_in)) +
1394 CMSG_SPACE(32 * NLA_ALIGN(NLA_HDRLEN + sizeof(uint64_t)));
1395 // Allocate aligned space for cmsgs received along with timestamps
1396 union {
1397 char rbuf[cmsg_alloc_space];
1398 struct cmsghdr align;
1399 } aligned_buf;
1400 msg.msg_control = aligned_buf.rbuf;
1401 int r, saved_errno;
1402 while (true) {
1403 msg.msg_controllen = sizeof(aligned_buf.rbuf);
1404 do {
1405 r = recvmsg(tcp->fd, &msg, MSG_ERRQUEUE);
1406 saved_errno = errno;
1407 } while (r < 0 && saved_errno == EINTR);
1408
1409 if (r == -1 && saved_errno == EAGAIN) {
1410 return processed_err; // No more errors to process
1411 }
1412 if (r == -1) {
1413 LogCommonIOErrors("recvmsg(MSG_ERRQUEUE)", saved_errno);
1414 grpc_core::global_stats().IncrementMsgErrqueueErrorCount();
1415 return processed_err;
1416 }
1417 if (GPR_UNLIKELY((msg.msg_flags & MSG_CTRUNC) != 0)) {
1418 LOG(ERROR) << "Error message was truncated.";
1419 }
1420
1421 if (msg.msg_controllen == 0) {
1422 // There was no control message found. It was probably spurious.
1423 return processed_err;
1424 }
1425 bool seen = false;
1426 for (auto cmsg = CMSG_FIRSTHDR(&msg); cmsg && cmsg->cmsg_len;
1427 cmsg = CMSG_NXTHDR(&msg, cmsg)) {
1428 if (CmsgIsZeroCopy(*cmsg)) {
1429 process_zerocopy(tcp, cmsg);
1430 seen = true;
1431 processed_err = true;
1432 } else if (cmsg->cmsg_level == SOL_SOCKET &&
1433 cmsg->cmsg_type == SCM_TIMESTAMPING) {
1434 cmsg = process_timestamp(tcp, &msg, cmsg);
1435 seen = true;
1436 processed_err = true;
1437 } else {
1438 // Got a control message that is not a timestamp or zerocopy. Don't know
1439 // how to handle this.
1440 GRPC_TRACE_LOG(tcp, INFO)
1441 << "unknown control message cmsg_level:" << cmsg->cmsg_level
1442 << " cmsg_type:" << cmsg->cmsg_type;
1443 return processed_err;
1444 }
1445 }
1446 if (!seen) {
1447 return processed_err;
1448 }
1449 }
1450 }
1451
tcp_handle_error(void * arg,grpc_error_handle error)1452 static void tcp_handle_error(void* arg /* grpc_tcp */,
1453 grpc_error_handle error) {
1454 grpc_tcp* tcp = static_cast<grpc_tcp*>(arg);
1455 GRPC_TRACE_LOG(tcp, INFO) << "TCP:" << tcp << " got_error: " << error;
1456
1457 if (!error.ok() ||
1458 static_cast<bool>(gpr_atm_acq_load(&tcp->stop_error_notification))) {
1459 // We aren't going to register to hear on error anymore, so it is safe to
1460 // unref.
1461 TCP_UNREF(tcp, "error-tracking");
1462 return;
1463 }
1464
1465 // We are still interested in collecting timestamps, so let's try reading
1466 // them.
1467 bool processed = process_errors(tcp);
1468 // This might not a timestamps error. Set the read and write closures to be
1469 // ready.
1470 if (!processed) {
1471 grpc_fd_set_readable(tcp->em_fd);
1472 grpc_fd_set_writable(tcp->em_fd);
1473 }
1474 grpc_fd_notify_on_error(tcp->em_fd, &tcp->error_closure);
1475 }
1476
1477 #else // GRPC_LINUX_ERRQUEUE
tcp_get_send_zerocopy_record(grpc_tcp *,grpc_slice_buffer *)1478 static TcpZerocopySendRecord* tcp_get_send_zerocopy_record(
1479 grpc_tcp* /*tcp*/, grpc_slice_buffer* /*buf*/) {
1480 return nullptr;
1481 }
1482
ZerocopyDisableAndWaitForRemaining(grpc_tcp *)1483 static void ZerocopyDisableAndWaitForRemaining(grpc_tcp* /*tcp*/) {}
1484
tcp_write_with_timestamps(grpc_tcp *,struct msghdr *,size_t,ssize_t *,int *,int)1485 static bool tcp_write_with_timestamps(grpc_tcp* /*tcp*/, struct msghdr* /*msg*/,
1486 size_t /*sending_length*/,
1487 ssize_t* /*sent_length*/,
1488 int* /* saved_errno */,
1489 int /*additional_flags*/) {
1490 LOG(ERROR) << "Write with timestamps not supported for this platform";
1491 CHECK(0);
1492 return false;
1493 }
1494
tcp_handle_error(void *,grpc_error_handle)1495 static void tcp_handle_error(void* /*arg*/ /* grpc_tcp */,
1496 grpc_error_handle /*error*/) {
1497 LOG(ERROR) << "Error handling is not supported for this platform";
1498 CHECK(0);
1499 }
1500 #endif // GRPC_LINUX_ERRQUEUE
1501
1502 // If outgoing_buffer_arg is filled, shuts down the list early, so that any
1503 // release operations needed can be performed on the arg
tcp_shutdown_buffer_list(grpc_tcp * tcp)1504 void tcp_shutdown_buffer_list(grpc_tcp* tcp) {
1505 if (tcp->outgoing_buffer_arg) {
1506 tcp->tb_list.Shutdown(tcp->outgoing_buffer_arg,
1507 GRPC_ERROR_CREATE("TracedBuffer list shutdown"));
1508 tcp->outgoing_buffer_arg = nullptr;
1509 }
1510 }
1511
1512 #if defined(IOV_MAX) && IOV_MAX < 260
1513 #define MAX_WRITE_IOVEC IOV_MAX
1514 #else
1515 #define MAX_WRITE_IOVEC 260
1516 #endif
PopulateIovs(size_t * unwind_slice_idx,size_t * unwind_byte_idx,size_t * sending_length,iovec * iov)1517 msg_iovlen_type TcpZerocopySendRecord::PopulateIovs(size_t* unwind_slice_idx,
1518 size_t* unwind_byte_idx,
1519 size_t* sending_length,
1520 iovec* iov) {
1521 msg_iovlen_type iov_size;
1522 *unwind_slice_idx = out_offset_.slice_idx;
1523 *unwind_byte_idx = out_offset_.byte_idx;
1524 for (iov_size = 0;
1525 out_offset_.slice_idx != buf_.count && iov_size != MAX_WRITE_IOVEC;
1526 iov_size++) {
1527 iov[iov_size].iov_base =
1528 GRPC_SLICE_START_PTR(buf_.slices[out_offset_.slice_idx]) +
1529 out_offset_.byte_idx;
1530 iov[iov_size].iov_len =
1531 GRPC_SLICE_LENGTH(buf_.slices[out_offset_.slice_idx]) -
1532 out_offset_.byte_idx;
1533 *sending_length += iov[iov_size].iov_len;
1534 ++(out_offset_.slice_idx);
1535 out_offset_.byte_idx = 0;
1536 }
1537 DCHECK_GT(iov_size, 0u);
1538 return iov_size;
1539 }
1540
UpdateOffsetForBytesSent(size_t sending_length,size_t actually_sent)1541 void TcpZerocopySendRecord::UpdateOffsetForBytesSent(size_t sending_length,
1542 size_t actually_sent) {
1543 size_t trailing = sending_length - actually_sent;
1544 while (trailing > 0) {
1545 size_t slice_length;
1546 out_offset_.slice_idx--;
1547 slice_length = GRPC_SLICE_LENGTH(buf_.slices[out_offset_.slice_idx]);
1548 if (slice_length > trailing) {
1549 out_offset_.byte_idx = slice_length - trailing;
1550 break;
1551 } else {
1552 trailing -= slice_length;
1553 }
1554 }
1555 }
1556
1557 // returns true if done, false if pending; if returning true, *error is set
do_tcp_flush_zerocopy(grpc_tcp * tcp,TcpZerocopySendRecord * record,grpc_error_handle * error)1558 static bool do_tcp_flush_zerocopy(grpc_tcp* tcp, TcpZerocopySendRecord* record,
1559 grpc_error_handle* error) {
1560 msg_iovlen_type iov_size;
1561 ssize_t sent_length = 0;
1562 size_t sending_length;
1563 size_t unwind_slice_idx;
1564 size_t unwind_byte_idx;
1565 bool tried_sending_message;
1566 int saved_errno;
1567 msghdr msg;
1568 // iov consumes a large space. Keep it as the last item on the stack to
1569 // improve locality. After all, we expect only the first elements of it being
1570 // populated in most cases.
1571 iovec iov[MAX_WRITE_IOVEC];
1572 while (true) {
1573 sending_length = 0;
1574 iov_size = record->PopulateIovs(&unwind_slice_idx, &unwind_byte_idx,
1575 &sending_length, iov);
1576 msg.msg_name = nullptr;
1577 msg.msg_namelen = 0;
1578 msg.msg_iov = iov;
1579 msg.msg_iovlen = iov_size;
1580 msg.msg_flags = 0;
1581 tried_sending_message = false;
1582 // Before calling sendmsg (with or without timestamps): we
1583 // take a single ref on the zerocopy send record.
1584 tcp->tcp_zerocopy_send_ctx.NoteSend(record);
1585 saved_errno = 0;
1586 if (tcp->outgoing_buffer_arg != nullptr) {
1587 if (!tcp->ts_capable ||
1588 !tcp_write_with_timestamps(tcp, &msg, sending_length, &sent_length,
1589 &saved_errno, MSG_ZEROCOPY)) {
1590 // We could not set socket options to collect Fathom timestamps.
1591 // Fallback on writing without timestamps.
1592 tcp->ts_capable = false;
1593 tcp_shutdown_buffer_list(tcp);
1594 } else {
1595 tried_sending_message = true;
1596 }
1597 }
1598 if (!tried_sending_message) {
1599 msg.msg_control = nullptr;
1600 msg.msg_controllen = 0;
1601 grpc_core::global_stats().IncrementTcpWriteSize(sending_length);
1602 grpc_core::global_stats().IncrementTcpWriteIovSize(iov_size);
1603 sent_length = tcp_send(tcp->fd, &msg, &saved_errno, MSG_ZEROCOPY);
1604 }
1605 if (tcp->tcp_zerocopy_send_ctx.UpdateZeroCopyOMemStateAfterSend(
1606 saved_errno == ENOBUFS)) {
1607 grpc_fd_set_writable(tcp->em_fd);
1608 }
1609 if (sent_length < 0) {
1610 if (saved_errno != EAGAIN) {
1611 LogCommonIOErrors("sendmsg", saved_errno);
1612 }
1613 // If this particular send failed, drop ref taken earlier in this method.
1614 tcp->tcp_zerocopy_send_ctx.UndoSend();
1615 if (saved_errno == EAGAIN || saved_errno == ENOBUFS) {
1616 record->UnwindIfThrottled(unwind_slice_idx, unwind_byte_idx);
1617 return false;
1618 } else {
1619 *error = tcp_annotate_error(GRPC_OS_ERROR(saved_errno, "sendmsg"), tcp);
1620 tcp_shutdown_buffer_list(tcp);
1621 return true;
1622 }
1623 }
1624 grpc_core::EventLog::Append("tcp-write-outstanding", -sent_length);
1625 tcp->bytes_counter += sent_length;
1626 record->UpdateOffsetForBytesSent(sending_length,
1627 static_cast<size_t>(sent_length));
1628 if (record->AllSlicesSent()) {
1629 *error = absl::OkStatus();
1630 return true;
1631 }
1632 }
1633 }
1634
UnrefMaybePutZerocopySendRecord(grpc_tcp * tcp,TcpZerocopySendRecord * record,uint32_t,const char *)1635 static void UnrefMaybePutZerocopySendRecord(grpc_tcp* tcp,
1636 TcpZerocopySendRecord* record,
1637 uint32_t /*seq*/,
1638 const char* /*tag*/) {
1639 if (record->Unref()) {
1640 tcp->tcp_zerocopy_send_ctx.PutSendRecord(record);
1641 }
1642 }
1643
tcp_flush_zerocopy(grpc_tcp * tcp,TcpZerocopySendRecord * record,grpc_error_handle * error)1644 static bool tcp_flush_zerocopy(grpc_tcp* tcp, TcpZerocopySendRecord* record,
1645 grpc_error_handle* error) {
1646 bool done = do_tcp_flush_zerocopy(tcp, record, error);
1647 if (done) {
1648 // Either we encountered an error, or we successfully sent all the bytes.
1649 // In either case, we're done with this record.
1650 UnrefMaybePutZerocopySendRecord(tcp, record, 0, "flush_done");
1651 }
1652 return done;
1653 }
1654
tcp_flush(grpc_tcp * tcp,grpc_error_handle * error)1655 static bool tcp_flush(grpc_tcp* tcp, grpc_error_handle* error) {
1656 struct msghdr msg;
1657 struct iovec iov[MAX_WRITE_IOVEC];
1658 msg_iovlen_type iov_size;
1659 ssize_t sent_length = 0;
1660 size_t sending_length;
1661 size_t trailing;
1662 size_t unwind_slice_idx;
1663 size_t unwind_byte_idx;
1664 int saved_errno;
1665
1666 // We always start at zero, because we eagerly unref and trim the slice
1667 // buffer as we write
1668 size_t outgoing_slice_idx = 0;
1669
1670 while (true) {
1671 sending_length = 0;
1672 unwind_slice_idx = outgoing_slice_idx;
1673 unwind_byte_idx = tcp->outgoing_byte_idx;
1674 for (iov_size = 0; outgoing_slice_idx != tcp->outgoing_buffer->count &&
1675 iov_size != MAX_WRITE_IOVEC;
1676 iov_size++) {
1677 iov[iov_size].iov_base =
1678 GRPC_SLICE_START_PTR(
1679 tcp->outgoing_buffer->slices[outgoing_slice_idx]) +
1680 tcp->outgoing_byte_idx;
1681 iov[iov_size].iov_len =
1682 GRPC_SLICE_LENGTH(tcp->outgoing_buffer->slices[outgoing_slice_idx]) -
1683 tcp->outgoing_byte_idx;
1684 sending_length += iov[iov_size].iov_len;
1685 outgoing_slice_idx++;
1686 tcp->outgoing_byte_idx = 0;
1687 }
1688 CHECK_GT(iov_size, 0u);
1689
1690 msg.msg_name = nullptr;
1691 msg.msg_namelen = 0;
1692 msg.msg_iov = iov;
1693 msg.msg_iovlen = iov_size;
1694 msg.msg_flags = 0;
1695 bool tried_sending_message = false;
1696 saved_errno = 0;
1697 if (tcp->outgoing_buffer_arg != nullptr) {
1698 if (!tcp->ts_capable ||
1699 !tcp_write_with_timestamps(tcp, &msg, sending_length, &sent_length,
1700 &saved_errno)) {
1701 // We could not set socket options to collect Fathom timestamps.
1702 // Fallback on writing without timestamps.
1703 tcp->ts_capable = false;
1704 tcp_shutdown_buffer_list(tcp);
1705 } else {
1706 tried_sending_message = true;
1707 }
1708 }
1709 if (!tried_sending_message) {
1710 msg.msg_control = nullptr;
1711 msg.msg_controllen = 0;
1712
1713 grpc_core::global_stats().IncrementTcpWriteSize(sending_length);
1714 grpc_core::global_stats().IncrementTcpWriteIovSize(iov_size);
1715
1716 sent_length = tcp_send(tcp->fd, &msg, &saved_errno);
1717 }
1718
1719 if (sent_length < 0) {
1720 if (saved_errno == EAGAIN || saved_errno == ENOBUFS) {
1721 tcp->outgoing_byte_idx = unwind_byte_idx;
1722 // unref all and forget about all slices that have been written to this
1723 // point
1724 for (size_t idx = 0; idx < unwind_slice_idx; ++idx) {
1725 grpc_slice_buffer_remove_first(tcp->outgoing_buffer);
1726 }
1727 return false;
1728 } else {
1729 *error = tcp_annotate_error(GRPC_OS_ERROR(saved_errno, "sendmsg"), tcp);
1730 grpc_slice_buffer_reset_and_unref(tcp->outgoing_buffer);
1731 tcp_shutdown_buffer_list(tcp);
1732 return true;
1733 }
1734 }
1735
1736 CHECK_EQ(tcp->outgoing_byte_idx, 0u);
1737 grpc_core::EventLog::Append("tcp-write-outstanding", -sent_length);
1738 tcp->bytes_counter += sent_length;
1739 trailing = sending_length - static_cast<size_t>(sent_length);
1740 while (trailing > 0) {
1741 size_t slice_length;
1742
1743 outgoing_slice_idx--;
1744 slice_length =
1745 GRPC_SLICE_LENGTH(tcp->outgoing_buffer->slices[outgoing_slice_idx]);
1746 if (slice_length > trailing) {
1747 tcp->outgoing_byte_idx = slice_length - trailing;
1748 break;
1749 } else {
1750 trailing -= slice_length;
1751 }
1752 }
1753 if (outgoing_slice_idx == tcp->outgoing_buffer->count) {
1754 *error = absl::OkStatus();
1755 grpc_slice_buffer_reset_and_unref(tcp->outgoing_buffer);
1756 return true;
1757 }
1758 }
1759 }
1760
tcp_handle_write(void * arg,grpc_error_handle error)1761 static void tcp_handle_write(void* arg /* grpc_tcp */,
1762 grpc_error_handle error) {
1763 grpc_tcp* tcp = static_cast<grpc_tcp*>(arg);
1764 grpc_closure* cb;
1765
1766 if (!error.ok()) {
1767 cb = tcp->write_cb;
1768 tcp->write_cb = nullptr;
1769 if (tcp->current_zerocopy_send != nullptr) {
1770 UnrefMaybePutZerocopySendRecord(tcp, tcp->current_zerocopy_send, 0,
1771 "handle_write_err");
1772 tcp->current_zerocopy_send = nullptr;
1773 }
1774 grpc_core::Closure::Run(DEBUG_LOCATION, cb, error);
1775 TCP_UNREF(tcp, "write");
1776 return;
1777 }
1778 bool flush_result =
1779 tcp->current_zerocopy_send != nullptr
1780 ? tcp_flush_zerocopy(tcp, tcp->current_zerocopy_send, &error)
1781 : tcp_flush(tcp, &error);
1782 if (!flush_result) {
1783 GRPC_TRACE_LOG(tcp, INFO) << "write: delayed";
1784 notify_on_write(tcp);
1785 // tcp_flush does not populate error if it has returned false.
1786 DCHECK(error.ok());
1787 } else {
1788 cb = tcp->write_cb;
1789 tcp->write_cb = nullptr;
1790 tcp->current_zerocopy_send = nullptr;
1791 GRPC_TRACE_LOG(tcp, INFO) << "write: " << grpc_core::StatusToString(error);
1792 // No need to take a ref on error since tcp_flush provides a ref.
1793 grpc_core::Closure::Run(DEBUG_LOCATION, cb, error);
1794 TCP_UNREF(tcp, "write");
1795 }
1796 }
1797
tcp_write(grpc_endpoint * ep,grpc_slice_buffer * buf,grpc_closure * cb,void * arg,int)1798 static void tcp_write(grpc_endpoint* ep, grpc_slice_buffer* buf,
1799 grpc_closure* cb, void* arg, int /*max_frame_size*/) {
1800 grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1801 grpc_error_handle error;
1802 TcpZerocopySendRecord* zerocopy_send_record = nullptr;
1803
1804 grpc_core::EventLog::Append("tcp-write-outstanding", buf->length);
1805
1806 if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
1807 size_t i;
1808
1809 for (i = 0; i < buf->count; i++) {
1810 LOG(INFO) << "WRITE " << tcp << " (peer=" << tcp->peer_string << ")";
1811 if (ABSL_VLOG_IS_ON(2)) {
1812 char* data =
1813 grpc_dump_slice(buf->slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII);
1814 VLOG(2) << "WRITE DATA: " << data;
1815 gpr_free(data);
1816 }
1817 }
1818 }
1819
1820 CHECK_EQ(tcp->write_cb, nullptr);
1821 DCHECK_EQ(tcp->current_zerocopy_send, nullptr);
1822
1823 if (buf->length == 0) {
1824 grpc_core::Closure::Run(
1825 DEBUG_LOCATION, cb,
1826 grpc_fd_is_shutdown(tcp->em_fd)
1827 ? tcp_annotate_error(GRPC_ERROR_CREATE("EOF"), tcp)
1828 : absl::OkStatus());
1829 tcp_shutdown_buffer_list(tcp);
1830 return;
1831 }
1832
1833 zerocopy_send_record = tcp_get_send_zerocopy_record(tcp, buf);
1834 if (zerocopy_send_record == nullptr) {
1835 // Either not enough bytes, or couldn't allocate a zerocopy context.
1836 tcp->outgoing_buffer = buf;
1837 tcp->outgoing_byte_idx = 0;
1838 }
1839 tcp->outgoing_buffer_arg = arg;
1840 if (arg) {
1841 CHECK(grpc_event_engine_can_track_errors());
1842 }
1843
1844 bool flush_result =
1845 zerocopy_send_record != nullptr
1846 ? tcp_flush_zerocopy(tcp, zerocopy_send_record, &error)
1847 : tcp_flush(tcp, &error);
1848 if (!flush_result) {
1849 TCP_REF(tcp, "write");
1850 tcp->write_cb = cb;
1851 tcp->current_zerocopy_send = zerocopy_send_record;
1852 GRPC_TRACE_LOG(tcp, INFO) << "write: delayed";
1853 notify_on_write(tcp);
1854 } else {
1855 GRPC_TRACE_LOG(tcp, INFO) << "write: " << grpc_core::StatusToString(error);
1856 grpc_core::Closure::Run(DEBUG_LOCATION, cb, error);
1857 }
1858 }
1859
tcp_add_to_pollset(grpc_endpoint * ep,grpc_pollset * pollset)1860 static void tcp_add_to_pollset(grpc_endpoint* ep, grpc_pollset* pollset) {
1861 grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1862 grpc_pollset_add_fd(pollset, tcp->em_fd);
1863 }
1864
tcp_add_to_pollset_set(grpc_endpoint * ep,grpc_pollset_set * pollset_set)1865 static void tcp_add_to_pollset_set(grpc_endpoint* ep,
1866 grpc_pollset_set* pollset_set) {
1867 grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1868 grpc_pollset_set_add_fd(pollset_set, tcp->em_fd);
1869 }
1870
tcp_delete_from_pollset_set(grpc_endpoint * ep,grpc_pollset_set * pollset_set)1871 static void tcp_delete_from_pollset_set(grpc_endpoint* ep,
1872 grpc_pollset_set* pollset_set) {
1873 grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1874 grpc_pollset_set_del_fd(pollset_set, tcp->em_fd);
1875 }
1876
tcp_get_peer(grpc_endpoint * ep)1877 static absl::string_view tcp_get_peer(grpc_endpoint* ep) {
1878 grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1879 return tcp->peer_string;
1880 }
1881
tcp_get_local_address(grpc_endpoint * ep)1882 static absl::string_view tcp_get_local_address(grpc_endpoint* ep) {
1883 grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1884 return tcp->local_address;
1885 }
1886
tcp_get_fd(grpc_endpoint * ep)1887 static int tcp_get_fd(grpc_endpoint* ep) {
1888 grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1889 return tcp->fd;
1890 }
1891
tcp_can_track_err(grpc_endpoint * ep)1892 static bool tcp_can_track_err(grpc_endpoint* ep) {
1893 grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1894 if (!grpc_event_engine_can_track_errors()) {
1895 return false;
1896 }
1897 struct sockaddr addr;
1898 socklen_t len = sizeof(addr);
1899 if (getsockname(tcp->fd, &addr, &len) < 0) {
1900 return false;
1901 }
1902 return addr.sa_family == AF_INET || addr.sa_family == AF_INET6;
1903 }
1904
1905 static const grpc_endpoint_vtable vtable = {tcp_read,
1906 tcp_write,
1907 tcp_add_to_pollset,
1908 tcp_add_to_pollset_set,
1909 tcp_delete_from_pollset_set,
1910 tcp_destroy,
1911 tcp_get_peer,
1912 tcp_get_local_address,
1913 tcp_get_fd,
1914 tcp_can_track_err};
1915
grpc_tcp_create(grpc_fd * em_fd,const grpc_core::PosixTcpOptions & options,absl::string_view peer_string)1916 grpc_endpoint* grpc_tcp_create(grpc_fd* em_fd,
1917 const grpc_core::PosixTcpOptions& options,
1918 absl::string_view peer_string) {
1919 grpc_tcp* tcp = new grpc_tcp(options);
1920 tcp->base.vtable = &vtable;
1921 tcp->peer_string = std::string(peer_string);
1922 tcp->fd = grpc_fd_wrapped_fd(em_fd);
1923 CHECK(options.resource_quota != nullptr);
1924 tcp->memory_owner =
1925 options.resource_quota->memory_quota()->CreateMemoryOwner();
1926 tcp->self_reservation = tcp->memory_owner.MakeReservation(sizeof(grpc_tcp));
1927 grpc_resolved_address resolved_local_addr;
1928 memset(&resolved_local_addr, 0, sizeof(resolved_local_addr));
1929 resolved_local_addr.len = sizeof(resolved_local_addr.addr);
1930 absl::StatusOr<std::string> addr_uri;
1931 if (getsockname(tcp->fd,
1932 reinterpret_cast<sockaddr*>(resolved_local_addr.addr),
1933 &resolved_local_addr.len) < 0 ||
1934 !(addr_uri = grpc_sockaddr_to_uri(&resolved_local_addr)).ok()) {
1935 tcp->local_address = "";
1936 } else {
1937 tcp->local_address = addr_uri.value();
1938 }
1939 tcp->read_cb = nullptr;
1940 tcp->write_cb = nullptr;
1941 tcp->current_zerocopy_send = nullptr;
1942 tcp->release_fd_cb = nullptr;
1943 tcp->release_fd = nullptr;
1944 tcp->target_length = static_cast<double>(options.tcp_read_chunk_size);
1945 tcp->bytes_read_this_round = 0;
1946 // Will be set to false by the very first endpoint read function
1947 tcp->is_first_read = true;
1948 tcp->bytes_counter = -1;
1949 tcp->socket_ts_enabled = false;
1950 tcp->ts_capable = true;
1951 tcp->outgoing_buffer_arg = nullptr;
1952 tcp->min_progress_size = 1;
1953 if (options.tcp_tx_zero_copy_enabled &&
1954 !tcp->tcp_zerocopy_send_ctx.memory_limited()) {
1955 #ifdef GRPC_LINUX_ERRQUEUE
1956 const int enable = 1;
1957 auto err =
1958 setsockopt(tcp->fd, SOL_SOCKET, SO_ZEROCOPY, &enable, sizeof(enable));
1959 if (err == 0) {
1960 tcp->tcp_zerocopy_send_ctx.set_enabled(true);
1961 } else {
1962 LOG(ERROR) << "Failed to set zerocopy options on the socket.";
1963 }
1964 #endif
1965 }
1966 // paired with unref in grpc_tcp_destroy
1967 new (&tcp->refcount)
1968 grpc_core::RefCount(1, GRPC_TRACE_FLAG_ENABLED(tcp) ? "tcp" : nullptr);
1969 gpr_atm_no_barrier_store(&tcp->shutdown_count, 0);
1970 tcp->em_fd = em_fd;
1971 grpc_slice_buffer_init(&tcp->last_read_buffer);
1972 GRPC_CLOSURE_INIT(&tcp->read_done_closure, tcp_handle_read, tcp,
1973 grpc_schedule_on_exec_ctx);
1974 if (grpc_event_engine_run_in_background()) {
1975 // If there is a polling engine always running in the background, there is
1976 // no need to run the backup poller.
1977 GRPC_CLOSURE_INIT(&tcp->write_done_closure, tcp_handle_write, tcp,
1978 grpc_schedule_on_exec_ctx);
1979 } else {
1980 GRPC_CLOSURE_INIT(&tcp->write_done_closure,
1981 tcp_drop_uncovered_then_handle_write, tcp,
1982 grpc_schedule_on_exec_ctx);
1983 }
1984 // Always assume there is something on the queue to read.
1985 tcp->inq = 1;
1986 #ifdef GRPC_HAVE_TCP_INQ
1987 int one = 1;
1988 if (setsockopt(tcp->fd, SOL_TCP, TCP_INQ, &one, sizeof(one)) == 0) {
1989 tcp->inq_capable = true;
1990 } else {
1991 VLOG(2) << "cannot set inq fd=" << tcp->fd << " errno=" << errno;
1992 tcp->inq_capable = false;
1993 }
1994 #else
1995 tcp->inq_capable = false;
1996 #endif // GRPC_HAVE_TCP_INQ
1997 // Start being notified on errors if event engine can track errors.
1998 if (grpc_event_engine_can_track_errors()) {
1999 // Grab a ref to tcp so that we can safely access the tcp struct when
2000 // processing errors. We unref when we no longer want to track errors
2001 // separately.
2002 TCP_REF(tcp, "error-tracking");
2003 gpr_atm_rel_store(&tcp->stop_error_notification, 0);
2004 GRPC_CLOSURE_INIT(&tcp->error_closure, tcp_handle_error, tcp,
2005 grpc_schedule_on_exec_ctx);
2006 grpc_fd_notify_on_error(tcp->em_fd, &tcp->error_closure);
2007 }
2008
2009 return &tcp->base;
2010 }
2011
grpc_tcp_fd(grpc_endpoint * ep)2012 int grpc_tcp_fd(grpc_endpoint* ep) {
2013 grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
2014 CHECK(ep->vtable == &vtable);
2015 return grpc_fd_wrapped_fd(tcp->em_fd);
2016 }
2017
grpc_tcp_destroy_and_release_fd(grpc_endpoint * ep,int * fd,grpc_closure * done)2018 void grpc_tcp_destroy_and_release_fd(grpc_endpoint* ep, int* fd,
2019 grpc_closure* done) {
2020 if (grpc_event_engine::experimental::grpc_is_event_engine_endpoint(ep)) {
2021 return grpc_event_engine::experimental::
2022 grpc_event_engine_endpoint_destroy_and_release_fd(ep, fd, done);
2023 }
2024 grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
2025 CHECK(ep->vtable == &vtable);
2026 tcp->release_fd = fd;
2027 tcp->release_fd_cb = done;
2028 grpc_slice_buffer_reset_and_unref(&tcp->last_read_buffer);
2029 if (grpc_event_engine_can_track_errors()) {
2030 // Stop errors notification.
2031 ZerocopyDisableAndWaitForRemaining(tcp);
2032 gpr_atm_no_barrier_store(&tcp->stop_error_notification, true);
2033 grpc_fd_set_error(tcp->em_fd);
2034 }
2035 tcp->read_mu.Lock();
2036 tcp->memory_owner.Reset();
2037 tcp->read_mu.Unlock();
2038 TCP_UNREF(tcp, "destroy");
2039 }
2040
grpc_tcp_posix_init()2041 void grpc_tcp_posix_init() { g_backup_poller_mu = new grpc_core::Mutex; }
2042
grpc_tcp_posix_shutdown()2043 void grpc_tcp_posix_shutdown() {
2044 delete g_backup_poller_mu;
2045 g_backup_poller_mu = nullptr;
2046 }
2047
2048 #endif // GRPC_POSIX_SOCKET_TCP
2049