• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 //
3 // Copyright 2015 gRPC authors.
4 //
5 // Licensed under the Apache License, Version 2.0 (the "License");
6 // you may not use this file except in compliance with the License.
7 // You may obtain a copy of the License at
8 //
9 //     http://www.apache.org/licenses/LICENSE-2.0
10 //
11 // Unless required by applicable law or agreed to in writing, software
12 // distributed under the License is distributed on an "AS IS" BASIS,
13 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 // See the License for the specific language governing permissions and
15 // limitations under the License.
16 //
17 //
18 
19 #include <grpc/support/port_platform.h>
20 
21 #include "absl/base/thread_annotations.h"
22 #include "absl/status/status.h"
23 #include "absl/strings/str_cat.h"
24 #include "absl/strings/string_view.h"
25 #include "absl/types/optional.h"
26 
27 #include <grpc/impl/grpc_types.h>
28 
29 #include "src/core/lib/iomgr/exec_ctx.h"
30 #include "src/core/lib/iomgr/port.h"
31 
32 #ifdef GRPC_POSIX_SOCKET_TCP
33 
34 #include <errno.h>
35 #include <limits.h>
36 #include <netinet/in.h>
37 #include <netinet/tcp.h>
38 #include <stdbool.h>
39 #include <stdio.h>
40 #include <stdlib.h>
41 #include <string.h>
42 #include <sys/socket.h>
43 #include <sys/types.h>
44 #include <unistd.h>
45 
46 #include <algorithm>
47 #include <unordered_map>
48 
49 #include <grpc/slice.h>
50 #include <grpc/support/alloc.h>
51 #include <grpc/support/log.h>
52 #include <grpc/support/string_util.h>
53 #include <grpc/support/sync.h>
54 #include <grpc/support/time.h>
55 
56 #include "src/core/lib/address_utils/sockaddr_utils.h"
57 #include "src/core/lib/debug/event_log.h"
58 #include "src/core/lib/debug/stats.h"
59 #include "src/core/lib/debug/stats_data.h"
60 #include "src/core/lib/debug/trace.h"
61 #include "src/core/lib/experiments/experiments.h"
62 #include "src/core/lib/gpr/string.h"
63 #include "src/core/lib/gpr/useful.h"
64 #include "src/core/lib/gprpp/crash.h"
65 #include "src/core/lib/gprpp/strerror.h"
66 #include "src/core/lib/gprpp/sync.h"
67 #include "src/core/lib/gprpp/time.h"
68 #include "src/core/lib/iomgr/buffer_list.h"
69 #include "src/core/lib/iomgr/ev_posix.h"
70 #include "src/core/lib/iomgr/event_engine_shims/endpoint.h"
71 #include "src/core/lib/iomgr/executor.h"
72 #include "src/core/lib/iomgr/socket_utils_posix.h"
73 #include "src/core/lib/iomgr/tcp_posix.h"
74 #include "src/core/lib/resource_quota/api.h"
75 #include "src/core/lib/resource_quota/memory_quota.h"
76 #include "src/core/lib/resource_quota/trace.h"
77 #include "src/core/lib/slice/slice_internal.h"
78 #include "src/core/lib/slice/slice_string_helpers.h"
79 
80 #ifndef SOL_TCP
81 #define SOL_TCP IPPROTO_TCP
82 #endif
83 
84 #ifndef TCP_INQ
85 #define TCP_INQ 36
86 #define TCP_CM_INQ TCP_INQ
87 #endif
88 
89 #ifdef GRPC_HAVE_MSG_NOSIGNAL
90 #define SENDMSG_FLAGS MSG_NOSIGNAL
91 #else
92 #define SENDMSG_FLAGS 0
93 #endif
94 
95 // TCP zero copy sendmsg flag.
96 // NB: We define this here as a fallback in case we're using an older set of
97 // library headers that has not defined MSG_ZEROCOPY. Since this constant is
98 // part of the kernel, we are guaranteed it will never change/disagree so
99 // defining it here is safe.
100 #ifndef MSG_ZEROCOPY
101 #define MSG_ZEROCOPY 0x4000000
102 #endif
103 
104 #ifdef GRPC_MSG_IOVLEN_TYPE
105 typedef GRPC_MSG_IOVLEN_TYPE msg_iovlen_type;
106 #else
107 typedef size_t msg_iovlen_type;
108 #endif
109 
110 extern grpc_core::TraceFlag grpc_tcp_trace;
111 
112 namespace grpc_core {
113 
114 class TcpZerocopySendRecord {
115  public:
TcpZerocopySendRecord()116   TcpZerocopySendRecord() { grpc_slice_buffer_init(&buf_); }
117 
~TcpZerocopySendRecord()118   ~TcpZerocopySendRecord() {
119     AssertEmpty();
120     grpc_slice_buffer_destroy(&buf_);
121   }
122 
123   // Given the slices that we wish to send, and the current offset into the
124   //   slice buffer (indicating which have already been sent), populate an iovec
125   //   array that will be used for a zerocopy enabled sendmsg().
126   msg_iovlen_type PopulateIovs(size_t* unwind_slice_idx,
127                                size_t* unwind_byte_idx, size_t* sending_length,
128                                iovec* iov);
129 
130   // A sendmsg() may not be able to send the bytes that we requested at this
131   // time, returning EAGAIN (possibly due to backpressure). In this case,
132   // unwind the offset into the slice buffer so we retry sending these bytes.
UnwindIfThrottled(size_t unwind_slice_idx,size_t unwind_byte_idx)133   void UnwindIfThrottled(size_t unwind_slice_idx, size_t unwind_byte_idx) {
134     out_offset_.byte_idx = unwind_byte_idx;
135     out_offset_.slice_idx = unwind_slice_idx;
136   }
137 
138   // Update the offset into the slice buffer based on how much we wanted to sent
139   // vs. what sendmsg() actually sent (which may be lower, possibly due to
140   // backpressure).
141   void UpdateOffsetForBytesSent(size_t sending_length, size_t actually_sent);
142 
143   // Indicates whether all underlying data has been sent or not.
AllSlicesSent()144   bool AllSlicesSent() { return out_offset_.slice_idx == buf_.count; }
145 
146   // Reset this structure for a new tcp_write() with zerocopy.
PrepareForSends(grpc_slice_buffer * slices_to_send)147   void PrepareForSends(grpc_slice_buffer* slices_to_send) {
148     AssertEmpty();
149     out_offset_.slice_idx = 0;
150     out_offset_.byte_idx = 0;
151     grpc_slice_buffer_swap(slices_to_send, &buf_);
152     Ref();
153   }
154 
155   // References: 1 reference per sendmsg(), and 1 for the tcp_write().
Ref()156   void Ref() { ref_.fetch_add(1, std::memory_order_relaxed); }
157 
158   // Unref: called when we get an error queue notification for a sendmsg(), if a
159   //  sendmsg() failed or when tcp_write() is done.
Unref()160   bool Unref() {
161     const intptr_t prior = ref_.fetch_sub(1, std::memory_order_acq_rel);
162     GPR_DEBUG_ASSERT(prior > 0);
163     if (prior == 1) {
164       AllSendsComplete();
165       return true;
166     }
167     return false;
168   }
169 
170  private:
171   struct OutgoingOffset {
172     size_t slice_idx = 0;
173     size_t byte_idx = 0;
174   };
175 
AssertEmpty()176   void AssertEmpty() {
177     GPR_DEBUG_ASSERT(buf_.count == 0);
178     GPR_DEBUG_ASSERT(buf_.length == 0);
179     GPR_DEBUG_ASSERT(ref_.load(std::memory_order_relaxed) == 0);
180   }
181 
182   // When all sendmsg() calls associated with this tcp_write() have been
183   // completed (ie. we have received the notifications for each sequence number
184   // for each sendmsg()) and all reference counts have been dropped, drop our
185   // reference to the underlying data since we no longer need it.
AllSendsComplete()186   void AllSendsComplete() {
187     GPR_DEBUG_ASSERT(ref_.load(std::memory_order_relaxed) == 0);
188     grpc_slice_buffer_reset_and_unref(&buf_);
189   }
190 
191   grpc_slice_buffer buf_;
192   std::atomic<intptr_t> ref_{0};
193   OutgoingOffset out_offset_;
194 };
195 
196 class TcpZerocopySendCtx {
197  public:
198   static constexpr int kDefaultMaxSends = 4;
199   static constexpr size_t kDefaultSendBytesThreshold = 16 * 1024;  // 16KB
200 
TcpZerocopySendCtx(int max_sends=kDefaultMaxSends,size_t send_bytes_threshold=kDefaultSendBytesThreshold)201   explicit TcpZerocopySendCtx(
202       int max_sends = kDefaultMaxSends,
203       size_t send_bytes_threshold = kDefaultSendBytesThreshold)
204       : max_sends_(max_sends),
205         free_send_records_size_(max_sends),
206         threshold_bytes_(send_bytes_threshold) {
207     send_records_ = static_cast<TcpZerocopySendRecord*>(
208         gpr_malloc(max_sends * sizeof(*send_records_)));
209     free_send_records_ = static_cast<TcpZerocopySendRecord**>(
210         gpr_malloc(max_sends * sizeof(*free_send_records_)));
211     if (send_records_ == nullptr || free_send_records_ == nullptr) {
212       gpr_free(send_records_);
213       gpr_free(free_send_records_);
214       gpr_log(GPR_INFO, "Disabling TCP TX zerocopy due to memory pressure.\n");
215       memory_limited_ = true;
216     } else {
217       for (int idx = 0; idx < max_sends_; ++idx) {
218         new (send_records_ + idx) TcpZerocopySendRecord();
219         free_send_records_[idx] = send_records_ + idx;
220       }
221     }
222   }
223 
~TcpZerocopySendCtx()224   ~TcpZerocopySendCtx() {
225     if (send_records_ != nullptr) {
226       for (int idx = 0; idx < max_sends_; ++idx) {
227         send_records_[idx].~TcpZerocopySendRecord();
228       }
229     }
230     gpr_free(send_records_);
231     gpr_free(free_send_records_);
232   }
233 
234   // True if we were unable to allocate the various bookkeeping structures at
235   // transport initialization time. If memory limited, we do not zerocopy.
memory_limited() const236   bool memory_limited() const { return memory_limited_; }
237 
238   // TCP send zerocopy maintains an implicit sequence number for every
239   // successful sendmsg() with zerocopy enabled; the kernel later gives us an
240   // error queue notification with this sequence number indicating that the
241   // underlying data buffers that we sent can now be released. Once that
242   // notification is received, we can release the buffers associated with this
243   // zerocopy send record. Here, we associate the sequence number with the data
244   // buffers that were sent with the corresponding call to sendmsg().
NoteSend(TcpZerocopySendRecord * record)245   void NoteSend(TcpZerocopySendRecord* record) {
246     record->Ref();
247     {
248       MutexLock guard(&lock_);
249       is_in_write_ = true;
250       AssociateSeqWithSendRecordLocked(last_send_, record);
251     }
252     ++last_send_;
253   }
254 
255   // If sendmsg() actually failed, though, we need to revert the sequence number
256   // that we speculatively bumped before calling sendmsg(). Note that we bump
257   // this sequence number and perform relevant bookkeeping (see: NoteSend())
258   // *before* calling sendmsg() since, if we called it *after* sendmsg(), then
259   // there is a possible race with the release notification which could occur on
260   // another thread before we do the necessary bookkeeping. Hence, calling
261   // NoteSend() *before* sendmsg() and implementing an undo function is needed.
UndoSend()262   void UndoSend() {
263     --last_send_;
264     if (ReleaseSendRecord(last_send_)->Unref()) {
265       // We should still be holding the ref taken by tcp_write().
266       GPR_DEBUG_ASSERT(0);
267     }
268   }
269 
270   // Simply associate this send record (and the underlying sent data buffers)
271   // with the implicit sequence number for this zerocopy sendmsg().
AssociateSeqWithSendRecordLocked(uint32_t seq,TcpZerocopySendRecord * record)272   void AssociateSeqWithSendRecordLocked(uint32_t seq,
273                                         TcpZerocopySendRecord* record) {
274     ctx_lookup_.emplace(seq, record);
275   }
276 
277   // Get a send record for a send that we wish to do with zerocopy.
GetSendRecord()278   TcpZerocopySendRecord* GetSendRecord() {
279     MutexLock guard(&lock_);
280     return TryGetSendRecordLocked();
281   }
282 
283   // A given send record corresponds to a single tcp_write() with zerocopy
284   // enabled. This can result in several sendmsg() calls to flush all of the
285   // data to wire. Each sendmsg() takes a reference on the
286   // TcpZerocopySendRecord, and corresponds to a single sequence number.
287   // ReleaseSendRecord releases a reference on TcpZerocopySendRecord for a
288   // single sequence number. This is called either when we receive the relevant
289   // error queue notification (saying that we can discard the underlying
290   // buffers for this sendmsg()) is received from the kernel - or, in case
291   // sendmsg() was unsuccessful to begin with.
ReleaseSendRecord(uint32_t seq)292   TcpZerocopySendRecord* ReleaseSendRecord(uint32_t seq) {
293     MutexLock guard(&lock_);
294     return ReleaseSendRecordLocked(seq);
295   }
296 
297   // After all the references to a TcpZerocopySendRecord are released, we can
298   // add it back to the pool (of size max_sends_). Note that we can only have
299   // max_sends_ tcp_write() instances with zerocopy enabled in flight at the
300   // same time.
PutSendRecord(TcpZerocopySendRecord * record)301   void PutSendRecord(TcpZerocopySendRecord* record) {
302     GPR_DEBUG_ASSERT(record >= send_records_ &&
303                      record < send_records_ + max_sends_);
304     MutexLock guard(&lock_);
305     PutSendRecordLocked(record);
306   }
307 
308   // Indicate that we are disposing of this zerocopy context. This indicator
309   // will prevent new zerocopy writes from being issued.
Shutdown()310   void Shutdown() { shutdown_.store(true, std::memory_order_release); }
311 
312   // Indicates that there are no inflight tcp_write() instances with zerocopy
313   // enabled.
AllSendRecordsEmpty()314   bool AllSendRecordsEmpty() {
315     MutexLock guard(&lock_);
316     return free_send_records_size_ == max_sends_;
317   }
318 
enabled() const319   bool enabled() const { return enabled_; }
320 
set_enabled(bool enabled)321   void set_enabled(bool enabled) {
322     GPR_DEBUG_ASSERT(!enabled || !memory_limited());
323     enabled_ = enabled;
324   }
325 
326   // Only use zerocopy if we are sending at least this many bytes. The
327   // additional overhead of reading the error queue for notifications means that
328   // zerocopy is not useful for small transfers.
threshold_bytes() const329   size_t threshold_bytes() const { return threshold_bytes_; }
330 
331   // Expected to be called by handler reading messages from the err queue.
332   // It is used to indicate that some OMem meory is now available. It returns
333   // true to tell the caller to mark the file descriptor as immediately
334   // writable.
335   //
336   // If a write is currently in progress on the socket (ie. we have issued a
337   // sendmsg() and are about to check its return value) then we set omem state
338   // to CHECK to make the sending thread know that some tcp_omem was
339   // concurrently freed even if sendmsg() returns ENOBUFS. In this case, since
340   // there is already an active send thread, we do not need to mark the
341   // socket writeable, so we return false.
342   //
343   // If there was no write in progress on the socket, and the socket was not
344   // marked as FULL, then we need not mark the socket writeable now that some
345   // tcp_omem memory is freed since it was not considered as blocked on
346   // tcp_omem to begin with. So in this case, return false.
347   //
348   // But, if a write was not in progress and the omem state was FULL, then we
349   // need to mark the socket writeable since it is no longer blocked by
350   // tcp_omem. In this case, return true.
351   //
352   // Please refer to the STATE TRANSITION DIAGRAM below for more details.
353   //
UpdateZeroCopyOMemStateAfterFree()354   bool UpdateZeroCopyOMemStateAfterFree() {
355     MutexLock guard(&lock_);
356     if (is_in_write_) {
357       zcopy_enobuf_state_ = OMemState::CHECK;
358       return false;
359     }
360     GPR_DEBUG_ASSERT(zcopy_enobuf_state_ != OMemState::CHECK);
361     if (zcopy_enobuf_state_ == OMemState::FULL) {
362       // A previous sendmsg attempt was blocked by ENOBUFS. Return true to
363       // mark the fd as writable so the next write attempt could be made.
364       zcopy_enobuf_state_ = OMemState::OPEN;
365       return true;
366     } else if (zcopy_enobuf_state_ == OMemState::OPEN) {
367       // No need to mark the fd as writable because the previous write
368       // attempt did not encounter ENOBUFS.
369       return false;
370     } else {
371       // This state should never be reached because it implies that the previous
372       // state was CHECK and is_in_write is false. This means that after the
373       // previous sendmsg returned and set is_in_write to false, it did
374       // not update the z-copy change from CHECK to OPEN.
375       Crash("OMem state error!");
376     }
377   }
378 
379   // Expected to be called by the thread calling sendmsg after the syscall
380   // invocation. is complete. If an ENOBUF is seen, it checks if the error
381   // handler (Tx0cp completions) has already run and free'ed up some OMem. It
382   // returns true indicating that the write can be attempted again immediately.
383   // If ENOBUFS was seen but no Tx0cp completions have been received between the
384   // sendmsg() and us taking this lock, then tcp_omem is still full from our
385   // point of view. Therefore, we do not signal that the socket is writeable
386   // with respect to the availability of tcp_omem. Therefore the function
387   // returns false. This indicates that another write should not be attempted
388   // immediately and the calling thread should wait until the socket is writable
389   // again. If ENOBUFS was not seen, then again return false because the next
390   // write should be attempted only when the socket is writable again.
391   //
392   // Please refer to the STATE TRANSITION DIAGRAM below for more details.
393   //
UpdateZeroCopyOMemStateAfterSend(bool seen_enobuf)394   bool UpdateZeroCopyOMemStateAfterSend(bool seen_enobuf) {
395     MutexLock guard(&lock_);
396     is_in_write_ = false;
397     if (seen_enobuf) {
398       if (zcopy_enobuf_state_ == OMemState::CHECK) {
399         zcopy_enobuf_state_ = OMemState::OPEN;
400         return true;
401       } else {
402         zcopy_enobuf_state_ = OMemState::FULL;
403       }
404     } else if (zcopy_enobuf_state_ != OMemState::OPEN) {
405       zcopy_enobuf_state_ = OMemState::OPEN;
406     }
407     return false;
408   }
409 
410  private:
411   //                      STATE TRANSITION DIAGRAM
412   //
413   // sendmsg succeeds       Tx-zero copy succeeds and there is no active sendmsg
414   //      ----<<--+  +------<<-------------------------------------+
415   //      |       |  |                                             |
416   //      |       |  v       sendmsg returns ENOBUFS               |
417   //      +-----> OPEN  ------------->>-------------------------> FULL
418   //                ^                                              |
419   //                |                                              |
420   //                | sendmsg completes                            |
421   //                +----<<---------- CHECK <-------<<-------------+
422   //                                        Tx-zero copy succeeds and there is
423   //                                        an active sendmsg
424   //
425   enum class OMemState : int8_t {
426     OPEN,   // Everything is clear and omem is not full.
427     FULL,   // The last sendmsg() has returned with an errno of ENOBUFS.
428     CHECK,  // Error queue is read while is_in_write_ was true, so we should
429             // check this state after the sendmsg.
430   };
431 
ReleaseSendRecordLocked(uint32_t seq)432   TcpZerocopySendRecord* ReleaseSendRecordLocked(uint32_t seq) {
433     auto iter = ctx_lookup_.find(seq);
434     GPR_DEBUG_ASSERT(iter != ctx_lookup_.end());
435     TcpZerocopySendRecord* record = iter->second;
436     ctx_lookup_.erase(iter);
437     return record;
438   }
439 
TryGetSendRecordLocked()440   TcpZerocopySendRecord* TryGetSendRecordLocked() {
441     if (shutdown_.load(std::memory_order_acquire)) {
442       return nullptr;
443     }
444     if (free_send_records_size_ == 0) {
445       return nullptr;
446     }
447     free_send_records_size_--;
448     return free_send_records_[free_send_records_size_];
449   }
450 
PutSendRecordLocked(TcpZerocopySendRecord * record)451   void PutSendRecordLocked(TcpZerocopySendRecord* record) {
452     GPR_DEBUG_ASSERT(free_send_records_size_ < max_sends_);
453     free_send_records_[free_send_records_size_] = record;
454     free_send_records_size_++;
455   }
456 
457   TcpZerocopySendRecord* send_records_;
458   TcpZerocopySendRecord** free_send_records_;
459   int max_sends_;
460   int free_send_records_size_;
461   Mutex lock_;
462   uint32_t last_send_ = 0;
463   std::atomic<bool> shutdown_{false};
464   bool enabled_ = false;
465   size_t threshold_bytes_ = kDefaultSendBytesThreshold;
466   std::unordered_map<uint32_t, TcpZerocopySendRecord*> ctx_lookup_;
467   bool memory_limited_ = false;
468   bool is_in_write_ = false;
469   OMemState zcopy_enobuf_state_ = OMemState::OPEN;
470 };
471 
472 }  // namespace grpc_core
473 
474 using grpc_core::TcpZerocopySendCtx;
475 using grpc_core::TcpZerocopySendRecord;
476 
477 namespace {
478 
479 struct grpc_tcp {
grpc_tcp__anon8e98fc080111::grpc_tcp480   explicit grpc_tcp(const grpc_core::PosixTcpOptions& tcp_options)
481       : min_read_chunk_size(tcp_options.tcp_min_read_chunk_size),
482         max_read_chunk_size(tcp_options.tcp_max_read_chunk_size),
483         tcp_zerocopy_send_ctx(
484             tcp_options.tcp_tx_zerocopy_max_simultaneous_sends,
485             tcp_options.tcp_tx_zerocopy_send_bytes_threshold) {}
486   grpc_endpoint base;
487   grpc_fd* em_fd;
488   int fd;
489   int inq;  // bytes pending on the socket from the last read.
490   double target_length;
491   double bytes_read_this_round;
492   grpc_core::RefCount refcount;
493   gpr_atm shutdown_count;
494 
495   int min_read_chunk_size;
496   int max_read_chunk_size;
497 
498   // garbage after the last read
499   grpc_slice_buffer last_read_buffer;
500 
501   grpc_core::Mutex read_mu;
502   grpc_slice_buffer* incoming_buffer ABSL_GUARDED_BY(read_mu) = nullptr;
503 
504   grpc_slice_buffer* outgoing_buffer;
505   // byte within outgoing_buffer->slices[0] to write next
506   size_t outgoing_byte_idx;
507 
508   grpc_closure* read_cb;
509   grpc_closure* write_cb;
510   grpc_closure* release_fd_cb;
511   int* release_fd;
512 
513   grpc_closure read_done_closure;
514   grpc_closure write_done_closure;
515   grpc_closure error_closure;
516 
517   std::string peer_string;
518   std::string local_address;
519 
520   grpc_core::MemoryOwner memory_owner;
521   grpc_core::MemoryAllocator::Reservation self_reservation;
522 
523   grpc_core::TracedBufferList tb_list;  // List of traced buffers
524 
525   // grpc_endpoint_write takes an argument which if non-null means that the
526   // transport layer wants the TCP layer to collect timestamps for this write.
527   // This arg is forwarded to the timestamps callback function when the ACK
528   // timestamp is received from the kernel. This arg is a (void *) which allows
529   // users of this API to pass in a pointer to any kind of structure. This
530   // structure could actually be a tag or any book-keeping object that the user
531   // can use to distinguish between different traced writes. The only
532   // requirement from the TCP endpoint layer is that this arg should be non-null
533   // if the user wants timestamps for the write.
534   void* outgoing_buffer_arg;
535   // A counter which starts at 0. It is initialized the first time the socket
536   // options for collecting timestamps are set, and is incremented with each
537   // byte sent.
538   int bytes_counter;
539 
540   int min_progress_size;  // A hint from upper layers specifying the minimum
541                           // number of bytes that need to be read to make
542                           // meaningful progress
543 
544   gpr_atm stop_error_notification;  // Set to 1 if we do not want to be notified
545                                     // on errors anymore
546   TcpZerocopySendCtx tcp_zerocopy_send_ctx;
547   TcpZerocopySendRecord* current_zerocopy_send = nullptr;
548 
549   int set_rcvlowat = 0;
550 
551   // Used by the endpoint read function to distinguish the very first read call
552   // from the rest
553   bool is_first_read;
554   bool has_posted_reclaimer ABSL_GUARDED_BY(read_mu) = false;
555   bool inq_capable;        // cache whether kernel supports inq
556   bool socket_ts_enabled;  // True if timestamping options are set on the socket
557                            //
558   bool ts_capable;         // Cache whether we can set timestamping options
559 };
560 
561 struct backup_poller {
562   gpr_mu* pollset_mu;
563   grpc_closure run_poller;
564 };
565 
LogCommonIOErrors(absl::string_view prefix,int error_no)566 void LogCommonIOErrors(absl::string_view prefix, int error_no) {
567   switch (error_no) {
568     case ECONNABORTED:
569       grpc_core::global_stats().IncrementEconnabortedCount();
570       return;
571     case ECONNRESET:
572       grpc_core::global_stats().IncrementEconnresetCount();
573       return;
574     case EPIPE:
575       grpc_core::global_stats().IncrementEpipeCount();
576       return;
577     case ETIMEDOUT:
578       grpc_core::global_stats().IncrementEtimedoutCount();
579       return;
580     case ECONNREFUSED:
581       grpc_core::global_stats().IncrementEconnrefusedCount();
582       return;
583     case ENETUNREACH:
584       grpc_core::global_stats().IncrementEnetunreachCount();
585       return;
586     case ENOMSG:
587       grpc_core::global_stats().IncrementEnomsgCount();
588       return;
589     case ENOTCONN:
590       grpc_core::global_stats().IncrementEnotconnCount();
591       return;
592     case ENOBUFS:
593       grpc_core::global_stats().IncrementEnobufsCount();
594       return;
595     default:
596       grpc_core::global_stats().IncrementUncommonIoErrorCount();
597       GRPC_LOG_EVERY_N_SEC(1, GPR_ERROR, "%s encountered uncommon error: %s",
598                            prefix.data(),
599                            grpc_core::StrError(error_no).c_str());
600       return;
601   }
602 }
603 
604 }  // namespace
605 
606 static void ZerocopyDisableAndWaitForRemaining(grpc_tcp* tcp);
607 
608 #define BACKUP_POLLER_POLLSET(b) ((grpc_pollset*)((b) + 1))
609 
610 static grpc_core::Mutex* g_backup_poller_mu = nullptr;
611 static int g_uncovered_notifications_pending
612     ABSL_GUARDED_BY(g_backup_poller_mu);
613 static backup_poller* g_backup_poller ABSL_GUARDED_BY(g_backup_poller_mu);
614 
615 static void tcp_handle_read(void* arg /* grpc_tcp */, grpc_error_handle error);
616 static void tcp_handle_write(void* arg /* grpc_tcp */, grpc_error_handle error);
617 static void tcp_drop_uncovered_then_handle_write(void* arg /* grpc_tcp */,
618                                                  grpc_error_handle error);
619 
done_poller(void * bp,grpc_error_handle)620 static void done_poller(void* bp, grpc_error_handle /*error_ignored*/) {
621   backup_poller* p = static_cast<backup_poller*>(bp);
622   if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
623     gpr_log(GPR_INFO, "BACKUP_POLLER:%p destroy", p);
624   }
625   grpc_pollset_destroy(BACKUP_POLLER_POLLSET(p));
626   gpr_free(p);
627 }
628 
run_poller(void * bp,grpc_error_handle)629 static void run_poller(void* bp, grpc_error_handle /*error_ignored*/) {
630   backup_poller* p = static_cast<backup_poller*>(bp);
631   if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
632     gpr_log(GPR_INFO, "BACKUP_POLLER:%p run", p);
633   }
634   gpr_mu_lock(p->pollset_mu);
635   grpc_core::Timestamp deadline =
636       grpc_core::Timestamp::Now() + grpc_core::Duration::Seconds(10);
637   GRPC_LOG_IF_ERROR(
638       "backup_poller:pollset_work",
639       grpc_pollset_work(BACKUP_POLLER_POLLSET(p), nullptr, deadline));
640   gpr_mu_unlock(p->pollset_mu);
641   g_backup_poller_mu->Lock();
642   // last "uncovered" notification is the ref that keeps us polling
643   if (g_uncovered_notifications_pending == 1) {
644     GPR_ASSERT(g_backup_poller == p);
645     g_backup_poller = nullptr;
646     g_uncovered_notifications_pending = 0;
647     g_backup_poller_mu->Unlock();
648     if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
649       gpr_log(GPR_INFO, "BACKUP_POLLER:%p shutdown", p);
650     }
651     grpc_pollset_shutdown(BACKUP_POLLER_POLLSET(p),
652                           GRPC_CLOSURE_INIT(&p->run_poller, done_poller, p,
653                                             grpc_schedule_on_exec_ctx));
654   } else {
655     g_backup_poller_mu->Unlock();
656     if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
657       gpr_log(GPR_INFO, "BACKUP_POLLER:%p reschedule", p);
658     }
659     grpc_core::Executor::Run(&p->run_poller, absl::OkStatus(),
660                              grpc_core::ExecutorType::DEFAULT,
661                              grpc_core::ExecutorJobType::LONG);
662   }
663 }
664 
drop_uncovered(grpc_tcp *)665 static void drop_uncovered(grpc_tcp* /*tcp*/) {
666   int old_count;
667   backup_poller* p;
668   g_backup_poller_mu->Lock();
669   p = g_backup_poller;
670   old_count = g_uncovered_notifications_pending--;
671   g_backup_poller_mu->Unlock();
672   GPR_ASSERT(old_count > 1);
673   if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
674     gpr_log(GPR_INFO, "BACKUP_POLLER:%p uncover cnt %d->%d", p, old_count,
675             old_count - 1);
676   }
677 }
678 
679 // gRPC API considers a Write operation to be done the moment it clears ‘flow
680 // control’ i.e., not necessarily sent on the wire. This means that the
681 // application MIGHT not call `grpc_completion_queue_next/pluck` in a timely
682 // manner when its `Write()` API is acked.
683 //
684 // We need to ensure that the fd is 'covered' (i.e being monitored by some
685 // polling thread and progress is made) and hence add it to a backup poller here
cover_self(grpc_tcp * tcp)686 static void cover_self(grpc_tcp* tcp) {
687   backup_poller* p;
688   g_backup_poller_mu->Lock();
689   int old_count = 0;
690   if (g_uncovered_notifications_pending == 0) {
691     g_uncovered_notifications_pending = 2;
692     p = static_cast<backup_poller*>(
693         gpr_zalloc(sizeof(*p) + grpc_pollset_size()));
694     g_backup_poller = p;
695     grpc_pollset_init(BACKUP_POLLER_POLLSET(p), &p->pollset_mu);
696     g_backup_poller_mu->Unlock();
697     if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
698       gpr_log(GPR_INFO, "BACKUP_POLLER:%p create", p);
699     }
700     grpc_core::Executor::Run(
701         GRPC_CLOSURE_INIT(&p->run_poller, run_poller, p, nullptr),
702         absl::OkStatus(), grpc_core::ExecutorType::DEFAULT,
703         grpc_core::ExecutorJobType::LONG);
704   } else {
705     old_count = g_uncovered_notifications_pending++;
706     p = g_backup_poller;
707     g_backup_poller_mu->Unlock();
708   }
709   if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
710     gpr_log(GPR_INFO, "BACKUP_POLLER:%p add %p cnt %d->%d", p, tcp,
711             old_count - 1, old_count);
712   }
713   grpc_pollset_add_fd(BACKUP_POLLER_POLLSET(p), tcp->em_fd);
714 }
715 
notify_on_read(grpc_tcp * tcp)716 static void notify_on_read(grpc_tcp* tcp) {
717   if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
718     gpr_log(GPR_INFO, "TCP:%p notify_on_read", tcp);
719   }
720   grpc_fd_notify_on_read(tcp->em_fd, &tcp->read_done_closure);
721 }
722 
notify_on_write(grpc_tcp * tcp)723 static void notify_on_write(grpc_tcp* tcp) {
724   if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
725     gpr_log(GPR_INFO, "TCP:%p notify_on_write", tcp);
726   }
727   if (!grpc_event_engine_run_in_background()) {
728     cover_self(tcp);
729   }
730   grpc_fd_notify_on_write(tcp->em_fd, &tcp->write_done_closure);
731 }
732 
tcp_drop_uncovered_then_handle_write(void * arg,grpc_error_handle error)733 static void tcp_drop_uncovered_then_handle_write(void* arg,
734                                                  grpc_error_handle error) {
735   if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
736     gpr_log(GPR_INFO, "TCP:%p got_write: %s", arg,
737             grpc_core::StatusToString(error).c_str());
738   }
739   drop_uncovered(static_cast<grpc_tcp*>(arg));
740   tcp_handle_write(arg, error);
741 }
742 
add_to_estimate(grpc_tcp * tcp,size_t bytes)743 static void add_to_estimate(grpc_tcp* tcp, size_t bytes) {
744   tcp->bytes_read_this_round += static_cast<double>(bytes);
745 }
746 
finish_estimate(grpc_tcp * tcp)747 static void finish_estimate(grpc_tcp* tcp) {
748   // If we read >80% of the target buffer in one read loop, increase the size
749   // of the target buffer to either the amount read, or twice its previous
750   // value
751   if (tcp->bytes_read_this_round > tcp->target_length * 0.8) {
752     tcp->target_length =
753         std::max(2 * tcp->target_length, tcp->bytes_read_this_round);
754   } else {
755     tcp->target_length =
756         0.99 * tcp->target_length + 0.01 * tcp->bytes_read_this_round;
757   }
758   tcp->bytes_read_this_round = 0;
759 }
760 
tcp_annotate_error(grpc_error_handle src_error,grpc_tcp * tcp)761 static grpc_error_handle tcp_annotate_error(grpc_error_handle src_error,
762                                             grpc_tcp* tcp) {
763   return grpc_error_set_str(
764       grpc_error_set_int(
765           grpc_error_set_int(src_error, grpc_core::StatusIntProperty::kFd,
766                              tcp->fd),
767           // All tcp errors are marked with UNAVAILABLE so that application may
768           // choose to retry.
769           grpc_core::StatusIntProperty::kRpcStatus, GRPC_STATUS_UNAVAILABLE),
770       grpc_core::StatusStrProperty::kTargetAddress, tcp->peer_string);
771 }
772 
773 static void tcp_handle_read(void* arg /* grpc_tcp */, grpc_error_handle error);
774 static void tcp_handle_write(void* arg /* grpc_tcp */, grpc_error_handle error);
775 
tcp_shutdown(grpc_endpoint * ep,grpc_error_handle why)776 static void tcp_shutdown(grpc_endpoint* ep, grpc_error_handle why) {
777   grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
778   ZerocopyDisableAndWaitForRemaining(tcp);
779   grpc_fd_shutdown(tcp->em_fd, why);
780   tcp->read_mu.Lock();
781   tcp->memory_owner.Reset();
782   tcp->read_mu.Unlock();
783 }
784 
tcp_free(grpc_tcp * tcp)785 static void tcp_free(grpc_tcp* tcp) {
786   grpc_fd_orphan(tcp->em_fd, tcp->release_fd_cb, tcp->release_fd,
787                  "tcp_unref_orphan");
788   grpc_slice_buffer_destroy(&tcp->last_read_buffer);
789   tcp->tb_list.Shutdown(tcp->outgoing_buffer_arg,
790                         GRPC_ERROR_CREATE("endpoint destroyed"));
791   tcp->outgoing_buffer_arg = nullptr;
792   delete tcp;
793 }
794 
795 #ifndef NDEBUG
796 #define TCP_UNREF(tcp, reason) tcp_unref((tcp), (reason), DEBUG_LOCATION)
797 #define TCP_REF(tcp, reason) tcp_ref((tcp), (reason), DEBUG_LOCATION)
tcp_unref(grpc_tcp * tcp,const char * reason,const grpc_core::DebugLocation & debug_location)798 static void tcp_unref(grpc_tcp* tcp, const char* reason,
799                       const grpc_core::DebugLocation& debug_location) {
800   if (GPR_UNLIKELY(tcp->refcount.Unref(debug_location, reason))) {
801     tcp_free(tcp);
802   }
803 }
804 
tcp_ref(grpc_tcp * tcp,const char * reason,const grpc_core::DebugLocation & debug_location)805 static void tcp_ref(grpc_tcp* tcp, const char* reason,
806                     const grpc_core::DebugLocation& debug_location) {
807   tcp->refcount.Ref(debug_location, reason);
808 }
809 #else
810 #define TCP_UNREF(tcp, reason) tcp_unref((tcp))
811 #define TCP_REF(tcp, reason) tcp_ref((tcp))
tcp_unref(grpc_tcp * tcp)812 static void tcp_unref(grpc_tcp* tcp) {
813   if (GPR_UNLIKELY(tcp->refcount.Unref())) {
814     tcp_free(tcp);
815   }
816 }
817 
tcp_ref(grpc_tcp * tcp)818 static void tcp_ref(grpc_tcp* tcp) { tcp->refcount.Ref(); }
819 #endif
820 
tcp_destroy(grpc_endpoint * ep)821 static void tcp_destroy(grpc_endpoint* ep) {
822   grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
823   grpc_slice_buffer_reset_and_unref(&tcp->last_read_buffer);
824   if (grpc_event_engine_can_track_errors()) {
825     ZerocopyDisableAndWaitForRemaining(tcp);
826     gpr_atm_no_barrier_store(&tcp->stop_error_notification, true);
827     grpc_fd_set_error(tcp->em_fd);
828   }
829   tcp->read_mu.Lock();
830   tcp->memory_owner.Reset();
831   tcp->read_mu.Unlock();
832   TCP_UNREF(tcp, "destroy");
833 }
834 
perform_reclamation(grpc_tcp * tcp)835 static void perform_reclamation(grpc_tcp* tcp)
836     ABSL_LOCKS_EXCLUDED(tcp->read_mu) {
837   if (GRPC_TRACE_FLAG_ENABLED(grpc_resource_quota_trace)) {
838     gpr_log(GPR_INFO, "TCP: benign reclamation to free memory");
839   }
840   tcp->read_mu.Lock();
841   if (tcp->incoming_buffer != nullptr) {
842     grpc_slice_buffer_reset_and_unref(tcp->incoming_buffer);
843   }
844   tcp->has_posted_reclaimer = false;
845   tcp->read_mu.Unlock();
846 }
847 
maybe_post_reclaimer(grpc_tcp * tcp)848 static void maybe_post_reclaimer(grpc_tcp* tcp)
849     ABSL_EXCLUSIVE_LOCKS_REQUIRED(tcp->read_mu) {
850   if (!tcp->has_posted_reclaimer) {
851     tcp->has_posted_reclaimer = true;
852     TCP_REF(tcp, "posted_reclaimer");
853     tcp->memory_owner.PostReclaimer(
854         grpc_core::ReclamationPass::kBenign,
855         [tcp](absl::optional<grpc_core::ReclamationSweep> sweep) {
856           if (sweep.has_value()) {
857             perform_reclamation(tcp);
858           }
859           TCP_UNREF(tcp, "posted_reclaimer");
860         });
861   }
862 }
863 
tcp_trace_read(grpc_tcp * tcp,grpc_error_handle error)864 static void tcp_trace_read(grpc_tcp* tcp, grpc_error_handle error)
865     ABSL_EXCLUSIVE_LOCKS_REQUIRED(tcp->read_mu) {
866   grpc_closure* cb = tcp->read_cb;
867   if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
868     gpr_log(GPR_INFO, "TCP:%p call_cb %p %p:%p", tcp, cb, cb->cb, cb->cb_arg);
869     size_t i;
870     gpr_log(GPR_INFO, "READ %p (peer=%s) error=%s", tcp,
871             tcp->peer_string.c_str(), grpc_core::StatusToString(error).c_str());
872     if (gpr_should_log(GPR_LOG_SEVERITY_DEBUG)) {
873       for (i = 0; i < tcp->incoming_buffer->count; i++) {
874         char* dump = grpc_dump_slice(tcp->incoming_buffer->slices[i],
875                                      GPR_DUMP_HEX | GPR_DUMP_ASCII);
876         gpr_log(GPR_DEBUG, "READ DATA: %s", dump);
877         gpr_free(dump);
878       }
879     }
880   }
881 }
882 
update_rcvlowat(grpc_tcp * tcp)883 static void update_rcvlowat(grpc_tcp* tcp)
884     ABSL_EXCLUSIVE_LOCKS_REQUIRED(tcp->read_mu) {
885   if (!grpc_core::IsTcpRcvLowatEnabled()) return;
886 
887   // TODO(ctiller): Check if supported by OS.
888   // TODO(ctiller): Allow some adjustments instead of hardcoding things.
889 
890   static constexpr int kRcvLowatMax = 16 * 1024 * 1024;
891   static constexpr int kRcvLowatThreshold = 16 * 1024;
892 
893   int remaining = std::min(static_cast<int>(tcp->incoming_buffer->length),
894                            tcp->min_progress_size);
895 
896   remaining = std::min(remaining, kRcvLowatMax);
897 
898   // Setting SO_RCVLOWAT for small quantities does not save on CPU.
899   if (remaining < 2 * kRcvLowatThreshold) {
900     remaining = 0;
901   }
902 
903   // Decrement remaining by kRcvLowatThreshold. This would have the effect of
904   // waking up a little early. It would help with latency because some bytes
905   // may arrive while we execute the recvmsg syscall after waking up.
906   if (remaining > 0) {
907     remaining -= kRcvLowatThreshold;
908   }
909 
910   // We still do not know the RPC size. Do not set SO_RCVLOWAT.
911   if (tcp->set_rcvlowat <= 1 && remaining <= 1) return;
912 
913   // Previous value is still valid. No change needed in SO_RCVLOWAT.
914   if (tcp->set_rcvlowat == remaining) {
915     return;
916   }
917   if (setsockopt(tcp->fd, SOL_SOCKET, SO_RCVLOWAT, &remaining,
918                  sizeof(remaining)) != 0) {
919     gpr_log(GPR_ERROR, "%s",
920             absl::StrCat("Cannot set SO_RCVLOWAT on fd=", tcp->fd,
921                          " err=", grpc_core::StrError(errno).c_str())
922                 .c_str());
923     return;
924   }
925   tcp->set_rcvlowat = remaining;
926 }
927 
928 // Returns true if data available to read or error other than EAGAIN.
929 #define MAX_READ_IOVEC 64
tcp_do_read(grpc_tcp * tcp,grpc_error_handle * error)930 static bool tcp_do_read(grpc_tcp* tcp, grpc_error_handle* error)
931     ABSL_EXCLUSIVE_LOCKS_REQUIRED(tcp->read_mu) {
932   if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
933     gpr_log(GPR_INFO, "TCP:%p do_read", tcp);
934   }
935   struct msghdr msg;
936   struct iovec iov[MAX_READ_IOVEC];
937   ssize_t read_bytes;
938   size_t total_read_bytes = 0;
939   size_t iov_len =
940       std::min<size_t>(MAX_READ_IOVEC, tcp->incoming_buffer->count);
941 #ifdef GRPC_LINUX_ERRQUEUE
942   constexpr size_t cmsg_alloc_space =
943       CMSG_SPACE(sizeof(grpc_core::scm_timestamping)) + CMSG_SPACE(sizeof(int));
944 #else
945   constexpr size_t cmsg_alloc_space = 24 /* CMSG_SPACE(sizeof(int)) */;
946 #endif  // GRPC_LINUX_ERRQUEUE
947   char cmsgbuf[cmsg_alloc_space];
948   for (size_t i = 0; i < iov_len; i++) {
949     iov[i].iov_base = GRPC_SLICE_START_PTR(tcp->incoming_buffer->slices[i]);
950     iov[i].iov_len = GRPC_SLICE_LENGTH(tcp->incoming_buffer->slices[i]);
951   }
952 
953   GPR_ASSERT(tcp->incoming_buffer->length != 0);
954   GPR_DEBUG_ASSERT(tcp->min_progress_size > 0);
955 
956   do {
957     // Assume there is something on the queue. If we receive TCP_INQ from
958     // kernel, we will update this value, otherwise, we have to assume there is
959     // always something to read until we get EAGAIN.
960     tcp->inq = 1;
961 
962     msg.msg_name = nullptr;
963     msg.msg_namelen = 0;
964     msg.msg_iov = iov;
965     msg.msg_iovlen = static_cast<msg_iovlen_type>(iov_len);
966     if (tcp->inq_capable) {
967       msg.msg_control = cmsgbuf;
968       msg.msg_controllen = sizeof(cmsgbuf);
969     } else {
970       msg.msg_control = nullptr;
971       msg.msg_controllen = 0;
972     }
973     msg.msg_flags = 0;
974 
975     grpc_core::global_stats().IncrementTcpReadOffer(
976         tcp->incoming_buffer->length);
977     grpc_core::global_stats().IncrementTcpReadOfferIovSize(
978         tcp->incoming_buffer->count);
979 
980     do {
981       grpc_core::global_stats().IncrementSyscallRead();
982       read_bytes = recvmsg(tcp->fd, &msg, 0);
983     } while (read_bytes < 0 && errno == EINTR);
984 
985     if (read_bytes < 0 && errno == EAGAIN) {
986       // NB: After calling call_read_cb a parallel call of the read handler may
987       // be running.
988       if (total_read_bytes > 0) {
989         break;
990       }
991       finish_estimate(tcp);
992       tcp->inq = 0;
993       return false;
994     }
995 
996     // We have read something in previous reads. We need to deliver those
997     // bytes to the upper layer.
998     if (read_bytes <= 0 && total_read_bytes >= 1) {
999       if (read_bytes < 0) {
1000         LogCommonIOErrors("recvmsg", errno);
1001       }
1002       tcp->inq = 1;
1003       break;
1004     }
1005 
1006     if (read_bytes <= 0) {
1007       // 0 read size ==> end of stream
1008       grpc_slice_buffer_reset_and_unref(tcp->incoming_buffer);
1009       if (read_bytes == 0) {
1010         *error = tcp_annotate_error(absl::InternalError("Socket closed"), tcp);
1011       } else {
1012         *error =
1013             tcp_annotate_error(absl::InternalError(absl::StrCat(
1014                                    "recvmsg:", grpc_core::StrError(errno))),
1015                                tcp);
1016       }
1017       return true;
1018     }
1019 
1020     grpc_core::global_stats().IncrementTcpReadSize(read_bytes);
1021     add_to_estimate(tcp, static_cast<size_t>(read_bytes));
1022     GPR_DEBUG_ASSERT((size_t)read_bytes <=
1023                      tcp->incoming_buffer->length - total_read_bytes);
1024 
1025 #ifdef GRPC_HAVE_TCP_INQ
1026     if (tcp->inq_capable) {
1027       GPR_DEBUG_ASSERT(!(msg.msg_flags & MSG_CTRUNC));
1028       struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg);
1029       for (; cmsg != nullptr; cmsg = CMSG_NXTHDR(&msg, cmsg)) {
1030         if (cmsg->cmsg_level == SOL_TCP && cmsg->cmsg_type == TCP_CM_INQ &&
1031             cmsg->cmsg_len == CMSG_LEN(sizeof(int))) {
1032           tcp->inq = *reinterpret_cast<int*>(CMSG_DATA(cmsg));
1033           break;
1034         }
1035       }
1036     }
1037 #endif  // GRPC_HAVE_TCP_INQ
1038 
1039     total_read_bytes += read_bytes;
1040     if (tcp->inq == 0 || total_read_bytes == tcp->incoming_buffer->length) {
1041       break;
1042     }
1043 
1044     // We had a partial read, and still have space to read more data.
1045     // So, adjust IOVs and try to read more.
1046     size_t remaining = read_bytes;
1047     size_t j = 0;
1048     for (size_t i = 0; i < iov_len; i++) {
1049       if (remaining >= iov[i].iov_len) {
1050         remaining -= iov[i].iov_len;
1051         continue;
1052       }
1053       if (remaining > 0) {
1054         iov[j].iov_base = static_cast<char*>(iov[i].iov_base) + remaining;
1055         iov[j].iov_len = iov[i].iov_len - remaining;
1056         remaining = 0;
1057       } else {
1058         iov[j].iov_base = iov[i].iov_base;
1059         iov[j].iov_len = iov[i].iov_len;
1060       }
1061       ++j;
1062     }
1063     iov_len = j;
1064   } while (true);
1065 
1066   if (tcp->inq == 0) {
1067     finish_estimate(tcp);
1068   }
1069 
1070   GPR_DEBUG_ASSERT(total_read_bytes > 0);
1071   *error = absl::OkStatus();
1072   if (grpc_core::IsTcpFrameSizeTuningEnabled()) {
1073     // Update min progress size based on the total number of bytes read in
1074     // this round.
1075     tcp->min_progress_size -= total_read_bytes;
1076     if (tcp->min_progress_size > 0) {
1077       // There is still some bytes left to be read before we can signal
1078       // the read as complete. Append the bytes read so far into
1079       // last_read_buffer which serves as a staging buffer. Return false
1080       // to indicate tcp_handle_read needs to be scheduled again.
1081       grpc_slice_buffer_move_first(tcp->incoming_buffer, total_read_bytes,
1082                                    &tcp->last_read_buffer);
1083       return false;
1084     } else {
1085       // The required number of bytes have been read. Append the bytes
1086       // read in this round into last_read_buffer. Then swap last_read_buffer
1087       // and incoming_buffer. Now incoming buffer contains all the bytes
1088       // read since the start of the last tcp_read operation. last_read_buffer
1089       // would contain any spare space left in the incoming buffer. This
1090       // space will be used in the next tcp_read operation.
1091       tcp->min_progress_size = 1;
1092       grpc_slice_buffer_move_first(tcp->incoming_buffer, total_read_bytes,
1093                                    &tcp->last_read_buffer);
1094       grpc_slice_buffer_swap(&tcp->last_read_buffer, tcp->incoming_buffer);
1095       return true;
1096     }
1097   }
1098   if (total_read_bytes < tcp->incoming_buffer->length) {
1099     grpc_slice_buffer_trim_end(tcp->incoming_buffer,
1100                                tcp->incoming_buffer->length - total_read_bytes,
1101                                &tcp->last_read_buffer);
1102   }
1103   return true;
1104 }
1105 
maybe_make_read_slices(grpc_tcp * tcp)1106 static void maybe_make_read_slices(grpc_tcp* tcp)
1107     ABSL_EXCLUSIVE_LOCKS_REQUIRED(tcp->read_mu) {
1108   static const int kBigAlloc = 64 * 1024;
1109   static const int kSmallAlloc = 8 * 1024;
1110   if (tcp->incoming_buffer->length <
1111       std::max<size_t>(tcp->min_progress_size, 1)) {
1112     size_t allocate_length = tcp->min_progress_size;
1113     const size_t target_length = static_cast<size_t>(tcp->target_length);
1114     // If memory pressure is low and we think there will be more than
1115     // min_progress_size bytes to read, allocate a bit more.
1116     const bool low_memory_pressure =
1117         tcp->memory_owner.GetPressureInfo().pressure_control_value < 0.8;
1118     if (low_memory_pressure && target_length > allocate_length) {
1119       allocate_length = target_length;
1120     }
1121     int extra_wanted = std::max<int>(
1122         1, allocate_length - static_cast<int>(tcp->incoming_buffer->length));
1123     if (extra_wanted >=
1124         (low_memory_pressure ? kSmallAlloc * 3 / 2 : kBigAlloc)) {
1125       while (extra_wanted > 0) {
1126         extra_wanted -= kBigAlloc;
1127         grpc_slice_buffer_add_indexed(tcp->incoming_buffer,
1128                                       tcp->memory_owner.MakeSlice(kBigAlloc));
1129         grpc_core::global_stats().IncrementTcpReadAlloc64k();
1130       }
1131     } else {
1132       while (extra_wanted > 0) {
1133         extra_wanted -= kSmallAlloc;
1134         grpc_slice_buffer_add_indexed(tcp->incoming_buffer,
1135                                       tcp->memory_owner.MakeSlice(kSmallAlloc));
1136         grpc_core::global_stats().IncrementTcpReadAlloc8k();
1137       }
1138     }
1139     maybe_post_reclaimer(tcp);
1140   }
1141 }
1142 
tcp_handle_read(void * arg,grpc_error_handle error)1143 static void tcp_handle_read(void* arg /* grpc_tcp */, grpc_error_handle error) {
1144   grpc_tcp* tcp = static_cast<grpc_tcp*>(arg);
1145   if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
1146     gpr_log(GPR_INFO, "TCP:%p got_read: %s", tcp,
1147             grpc_core::StatusToString(error).c_str());
1148   }
1149   tcp->read_mu.Lock();
1150   grpc_error_handle tcp_read_error;
1151   if (GPR_LIKELY(error.ok()) && tcp->memory_owner.is_valid()) {
1152     maybe_make_read_slices(tcp);
1153     if (!tcp_do_read(tcp, &tcp_read_error)) {
1154       // Maybe update rcv lowat value based on the number of bytes read in this
1155       // round.
1156       update_rcvlowat(tcp);
1157       tcp->read_mu.Unlock();
1158       // We've consumed the edge, request a new one
1159       notify_on_read(tcp);
1160       return;
1161     }
1162     tcp_trace_read(tcp, tcp_read_error);
1163   } else {
1164     if (!tcp->memory_owner.is_valid() && error.ok()) {
1165       tcp_read_error =
1166           tcp_annotate_error(absl::InternalError("Socket closed"), tcp);
1167     } else {
1168       tcp_read_error = error;
1169     }
1170     grpc_slice_buffer_reset_and_unref(tcp->incoming_buffer);
1171     grpc_slice_buffer_reset_and_unref(&tcp->last_read_buffer);
1172   }
1173   // Update rcv lowat needs to be called at the end of the current read
1174   // operation to ensure the right SO_RCVLOWAT value is set for the next read.
1175   // Otherwise the next endpoint read operation may get stuck indefinitely
1176   // because the previously set rcv lowat value will persist and the socket may
1177   // erroneously considered to not be ready for read.
1178   update_rcvlowat(tcp);
1179   grpc_closure* cb = tcp->read_cb;
1180   tcp->read_cb = nullptr;
1181   tcp->incoming_buffer = nullptr;
1182   tcp->read_mu.Unlock();
1183   grpc_core::Closure::Run(DEBUG_LOCATION, cb, tcp_read_error);
1184   TCP_UNREF(tcp, "read");
1185 }
1186 
tcp_read(grpc_endpoint * ep,grpc_slice_buffer * incoming_buffer,grpc_closure * cb,bool urgent,int min_progress_size)1187 static void tcp_read(grpc_endpoint* ep, grpc_slice_buffer* incoming_buffer,
1188                      grpc_closure* cb, bool urgent, int min_progress_size) {
1189   grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1190   GPR_ASSERT(tcp->read_cb == nullptr);
1191   tcp->read_cb = cb;
1192   tcp->read_mu.Lock();
1193   tcp->incoming_buffer = incoming_buffer;
1194   tcp->min_progress_size = grpc_core::IsTcpFrameSizeTuningEnabled()
1195                                ? std::max(min_progress_size, 1)
1196                                : 1;
1197   grpc_slice_buffer_reset_and_unref(incoming_buffer);
1198   grpc_slice_buffer_swap(incoming_buffer, &tcp->last_read_buffer);
1199   TCP_REF(tcp, "read");
1200   if (tcp->is_first_read) {
1201     tcp->read_mu.Unlock();
1202     // Endpoint read called for the very first time. Register read callback with
1203     // the polling engine
1204     tcp->is_first_read = false;
1205     notify_on_read(tcp);
1206   } else if (!urgent && tcp->inq == 0) {
1207     tcp->read_mu.Unlock();
1208     // Upper layer asked to read more but we know there is no pending data
1209     // to read from previous reads. So, wait for POLLIN.
1210     //
1211     notify_on_read(tcp);
1212   } else {
1213     tcp->read_mu.Unlock();
1214     // Not the first time. We may or may not have more bytes available. In any
1215     // case call tcp->read_done_closure (i.e tcp_handle_read()) which does the
1216     // right thing (i.e calls tcp_do_read() which either reads the available
1217     // bytes or calls notify_on_read() to be notified when new bytes become
1218     // available
1219     grpc_core::Closure::Run(DEBUG_LOCATION, &tcp->read_done_closure,
1220                             absl::OkStatus());
1221   }
1222 }
1223 
1224 // A wrapper around sendmsg. It sends \a msg over \a fd and returns the number
1225 // of bytes sent.
tcp_send(int fd,const struct msghdr * msg,int * saved_errno,int additional_flags=0)1226 ssize_t tcp_send(int fd, const struct msghdr* msg, int* saved_errno,
1227                  int additional_flags = 0) {
1228   ssize_t sent_length;
1229   do {
1230     // TODO(klempner): Cork if this is a partial write
1231     grpc_core::global_stats().IncrementSyscallWrite();
1232     sent_length = sendmsg(fd, msg, SENDMSG_FLAGS | additional_flags);
1233   } while (sent_length < 0 && (*saved_errno = errno) == EINTR);
1234   return sent_length;
1235 }
1236 
1237 /// This is to be called if outgoing_buffer_arg is not null. On linux platforms,
1238 /// this will call sendmsg with socket options set to collect timestamps inside
1239 /// the kernel. On return, sent_length is set to the return value of the sendmsg
1240 /// call. Returns false if setting the socket options failed. This is not
1241 /// implemented for non-linux platforms currently, and crashes out.
1242 ///
1243 static bool tcp_write_with_timestamps(grpc_tcp* tcp, struct msghdr* msg,
1244                                       size_t sending_length,
1245                                       ssize_t* sent_length, int* saved_errno,
1246                                       int additional_flags = 0);
1247 
1248 /// The callback function to be invoked when we get an error on the socket.
1249 static void tcp_handle_error(void* arg /* grpc_tcp */, grpc_error_handle error);
1250 
1251 static TcpZerocopySendRecord* tcp_get_send_zerocopy_record(
1252     grpc_tcp* tcp, grpc_slice_buffer* buf);
1253 
1254 #ifdef GRPC_LINUX_ERRQUEUE
1255 static bool process_errors(grpc_tcp* tcp);
1256 
tcp_get_send_zerocopy_record(grpc_tcp * tcp,grpc_slice_buffer * buf)1257 static TcpZerocopySendRecord* tcp_get_send_zerocopy_record(
1258     grpc_tcp* tcp, grpc_slice_buffer* buf) {
1259   TcpZerocopySendRecord* zerocopy_send_record = nullptr;
1260   const bool use_zerocopy =
1261       tcp->tcp_zerocopy_send_ctx.enabled() &&
1262       tcp->tcp_zerocopy_send_ctx.threshold_bytes() < buf->length;
1263   if (use_zerocopy) {
1264     zerocopy_send_record = tcp->tcp_zerocopy_send_ctx.GetSendRecord();
1265     if (zerocopy_send_record == nullptr) {
1266       process_errors(tcp);
1267       zerocopy_send_record = tcp->tcp_zerocopy_send_ctx.GetSendRecord();
1268     }
1269     if (zerocopy_send_record != nullptr) {
1270       zerocopy_send_record->PrepareForSends(buf);
1271       GPR_DEBUG_ASSERT(buf->count == 0);
1272       GPR_DEBUG_ASSERT(buf->length == 0);
1273       tcp->outgoing_byte_idx = 0;
1274       tcp->outgoing_buffer = nullptr;
1275     }
1276   }
1277   return zerocopy_send_record;
1278 }
1279 
ZerocopyDisableAndWaitForRemaining(grpc_tcp * tcp)1280 static void ZerocopyDisableAndWaitForRemaining(grpc_tcp* tcp) {
1281   tcp->tcp_zerocopy_send_ctx.Shutdown();
1282   while (!tcp->tcp_zerocopy_send_ctx.AllSendRecordsEmpty()) {
1283     process_errors(tcp);
1284   }
1285 }
1286 
tcp_write_with_timestamps(grpc_tcp * tcp,struct msghdr * msg,size_t sending_length,ssize_t * sent_length,int * saved_errno,int additional_flags)1287 static bool tcp_write_with_timestamps(grpc_tcp* tcp, struct msghdr* msg,
1288                                       size_t sending_length,
1289                                       ssize_t* sent_length, int* saved_errno,
1290                                       int additional_flags) {
1291   if (!tcp->socket_ts_enabled) {
1292     uint32_t opt = grpc_core::kTimestampingSocketOptions;
1293     if (setsockopt(tcp->fd, SOL_SOCKET, SO_TIMESTAMPING,
1294                    static_cast<void*>(&opt), sizeof(opt)) != 0) {
1295       if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
1296         gpr_log(GPR_ERROR, "Failed to set timestamping options on the socket.");
1297       }
1298       return false;
1299     }
1300     tcp->bytes_counter = -1;
1301     tcp->socket_ts_enabled = true;
1302   }
1303   // Set control message to indicate that you want timestamps.
1304   union {
1305     char cmsg_buf[CMSG_SPACE(sizeof(uint32_t))];
1306     struct cmsghdr align;
1307   } u;
1308   cmsghdr* cmsg = reinterpret_cast<cmsghdr*>(u.cmsg_buf);
1309   cmsg->cmsg_level = SOL_SOCKET;
1310   cmsg->cmsg_type = SO_TIMESTAMPING;
1311   cmsg->cmsg_len = CMSG_LEN(sizeof(uint32_t));
1312   *reinterpret_cast<int*>(CMSG_DATA(cmsg)) =
1313       grpc_core::kTimestampingRecordingOptions;
1314   msg->msg_control = u.cmsg_buf;
1315   msg->msg_controllen = CMSG_SPACE(sizeof(uint32_t));
1316 
1317   // If there was an error on sendmsg the logic in tcp_flush will handle it.
1318   ssize_t length = tcp_send(tcp->fd, msg, saved_errno, additional_flags);
1319   *sent_length = length;
1320   // Only save timestamps if all the bytes were taken by sendmsg.
1321   if (sending_length == static_cast<size_t>(length)) {
1322     tcp->tb_list.AddNewEntry(static_cast<uint32_t>(tcp->bytes_counter + length),
1323                              tcp->fd, tcp->outgoing_buffer_arg);
1324     tcp->outgoing_buffer_arg = nullptr;
1325   }
1326   return true;
1327 }
1328 
1329 static void UnrefMaybePutZerocopySendRecord(grpc_tcp* tcp,
1330                                             TcpZerocopySendRecord* record,
1331                                             uint32_t seq, const char* tag);
1332 // Reads \a cmsg to process zerocopy control messages.
process_zerocopy(grpc_tcp * tcp,struct cmsghdr * cmsg)1333 static void process_zerocopy(grpc_tcp* tcp, struct cmsghdr* cmsg) {
1334   GPR_DEBUG_ASSERT(cmsg);
1335   auto serr = reinterpret_cast<struct sock_extended_err*>(CMSG_DATA(cmsg));
1336   GPR_DEBUG_ASSERT(serr->ee_errno == 0);
1337   GPR_DEBUG_ASSERT(serr->ee_origin == SO_EE_ORIGIN_ZEROCOPY);
1338   const uint32_t lo = serr->ee_info;
1339   const uint32_t hi = serr->ee_data;
1340   for (uint32_t seq = lo; seq <= hi; ++seq) {
1341     // TODO(arjunroy): It's likely that lo and hi refer to zerocopy sequence
1342     // numbers that are generated by a single call to grpc_endpoint_write; ie.
1343     // we can batch the unref operation. So, check if record is the same for
1344     // both; if so, batch the unref/put.
1345     TcpZerocopySendRecord* record =
1346         tcp->tcp_zerocopy_send_ctx.ReleaseSendRecord(seq);
1347     GPR_DEBUG_ASSERT(record);
1348     UnrefMaybePutZerocopySendRecord(tcp, record, seq, "CALLBACK RCVD");
1349   }
1350   if (tcp->tcp_zerocopy_send_ctx.UpdateZeroCopyOMemStateAfterFree()) {
1351     grpc_fd_set_writable(tcp->em_fd);
1352   }
1353 }
1354 
1355 // Whether the cmsg received from error queue is of the IPv4 or IPv6 levels.
CmsgIsIpLevel(const cmsghdr & cmsg)1356 static bool CmsgIsIpLevel(const cmsghdr& cmsg) {
1357   return (cmsg.cmsg_level == SOL_IPV6 && cmsg.cmsg_type == IPV6_RECVERR) ||
1358          (cmsg.cmsg_level == SOL_IP && cmsg.cmsg_type == IP_RECVERR);
1359 }
1360 
CmsgIsZeroCopy(const cmsghdr & cmsg)1361 static bool CmsgIsZeroCopy(const cmsghdr& cmsg) {
1362   if (!CmsgIsIpLevel(cmsg)) {
1363     return false;
1364   }
1365   auto serr = reinterpret_cast<const sock_extended_err*> CMSG_DATA(&cmsg);
1366   return serr->ee_errno == 0 && serr->ee_origin == SO_EE_ORIGIN_ZEROCOPY;
1367 }
1368 
1369 /// Reads \a cmsg to derive timestamps from the control messages. If a valid
1370 /// timestamp is found, the traced buffer list is updated with this timestamp.
1371 /// The caller of this function should be looping on the control messages found
1372 /// in \a msg. \a cmsg should point to the control message that the caller wants
1373 /// processed.
1374 /// On return, a pointer to a control message is returned. On the next
1375 /// iteration, CMSG_NXTHDR(msg, ret_val) should be passed as \a cmsg.
process_timestamp(grpc_tcp * tcp,msghdr * msg,struct cmsghdr * cmsg)1376 struct cmsghdr* process_timestamp(grpc_tcp* tcp, msghdr* msg,
1377                                   struct cmsghdr* cmsg) {
1378   auto next_cmsg = CMSG_NXTHDR(msg, cmsg);
1379   cmsghdr* opt_stats = nullptr;
1380   if (next_cmsg == nullptr) {
1381     if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
1382       gpr_log(GPR_ERROR, "Received timestamp without extended error");
1383     }
1384     return cmsg;
1385   }
1386 
1387   // Check if next_cmsg is an OPT_STATS msg
1388   if (next_cmsg->cmsg_level == SOL_SOCKET &&
1389       next_cmsg->cmsg_type == SCM_TIMESTAMPING_OPT_STATS) {
1390     opt_stats = next_cmsg;
1391     next_cmsg = CMSG_NXTHDR(msg, opt_stats);
1392     if (next_cmsg == nullptr) {
1393       if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
1394         gpr_log(GPR_ERROR, "Received timestamp without extended error");
1395       }
1396       return opt_stats;
1397     }
1398   }
1399 
1400   if (!(next_cmsg->cmsg_level == SOL_IP || next_cmsg->cmsg_level == SOL_IPV6) ||
1401       !(next_cmsg->cmsg_type == IP_RECVERR ||
1402         next_cmsg->cmsg_type == IPV6_RECVERR)) {
1403     if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
1404       gpr_log(GPR_ERROR, "Unexpected control message");
1405     }
1406     return cmsg;
1407   }
1408 
1409   auto tss =
1410       reinterpret_cast<struct grpc_core::scm_timestamping*>(CMSG_DATA(cmsg));
1411   auto serr = reinterpret_cast<struct sock_extended_err*>(CMSG_DATA(next_cmsg));
1412   if (serr->ee_errno != ENOMSG ||
1413       serr->ee_origin != SO_EE_ORIGIN_TIMESTAMPING) {
1414     gpr_log(GPR_ERROR, "Unexpected control message");
1415     return cmsg;
1416   }
1417   tcp->tb_list.ProcessTimestamp(serr, opt_stats, tss);
1418   return next_cmsg;
1419 }
1420 
1421 /// For linux platforms, reads the socket's error queue and processes error
1422 /// messages from the queue.
1423 ///
process_errors(grpc_tcp * tcp)1424 static bool process_errors(grpc_tcp* tcp) {
1425   bool processed_err = false;
1426   struct iovec iov;
1427   iov.iov_base = nullptr;
1428   iov.iov_len = 0;
1429   struct msghdr msg;
1430   msg.msg_name = nullptr;
1431   msg.msg_namelen = 0;
1432   msg.msg_iov = &iov;
1433   msg.msg_iovlen = 0;
1434   msg.msg_flags = 0;
1435   // Allocate enough space so we don't need to keep increasing this as size
1436   // of OPT_STATS increase
1437   constexpr size_t cmsg_alloc_space =
1438       CMSG_SPACE(sizeof(grpc_core::scm_timestamping)) +
1439       CMSG_SPACE(sizeof(sock_extended_err) + sizeof(sockaddr_in)) +
1440       CMSG_SPACE(32 * NLA_ALIGN(NLA_HDRLEN + sizeof(uint64_t)));
1441   // Allocate aligned space for cmsgs received along with timestamps
1442   union {
1443     char rbuf[cmsg_alloc_space];
1444     struct cmsghdr align;
1445   } aligned_buf;
1446   msg.msg_control = aligned_buf.rbuf;
1447   int r, saved_errno;
1448   while (true) {
1449     msg.msg_controllen = sizeof(aligned_buf.rbuf);
1450     do {
1451       r = recvmsg(tcp->fd, &msg, MSG_ERRQUEUE);
1452       saved_errno = errno;
1453     } while (r < 0 && saved_errno == EINTR);
1454 
1455     if (r == -1 && saved_errno == EAGAIN) {
1456       return processed_err;  // No more errors to process
1457     }
1458     if (r == -1) {
1459       LogCommonIOErrors("recvmsg(MSG_ERRQUEUE)", saved_errno);
1460       grpc_core::global_stats().IncrementMsgErrqueueErrorCount();
1461       return processed_err;
1462     }
1463     if (GPR_UNLIKELY((msg.msg_flags & MSG_CTRUNC) != 0)) {
1464       gpr_log(GPR_ERROR, "Error message was truncated.");
1465     }
1466 
1467     if (msg.msg_controllen == 0) {
1468       // There was no control message found. It was probably spurious.
1469       return processed_err;
1470     }
1471     bool seen = false;
1472     for (auto cmsg = CMSG_FIRSTHDR(&msg); cmsg && cmsg->cmsg_len;
1473          cmsg = CMSG_NXTHDR(&msg, cmsg)) {
1474       if (CmsgIsZeroCopy(*cmsg)) {
1475         process_zerocopy(tcp, cmsg);
1476         seen = true;
1477         processed_err = true;
1478       } else if (cmsg->cmsg_level == SOL_SOCKET &&
1479                  cmsg->cmsg_type == SCM_TIMESTAMPING) {
1480         cmsg = process_timestamp(tcp, &msg, cmsg);
1481         seen = true;
1482         processed_err = true;
1483       } else {
1484         // Got a control message that is not a timestamp or zerocopy. Don't know
1485         // how to handle this.
1486         if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
1487           gpr_log(GPR_INFO,
1488                   "unknown control message cmsg_level:%d cmsg_type:%d",
1489                   cmsg->cmsg_level, cmsg->cmsg_type);
1490         }
1491         return processed_err;
1492       }
1493     }
1494     if (!seen) {
1495       return processed_err;
1496     }
1497   }
1498 }
1499 
tcp_handle_error(void * arg,grpc_error_handle error)1500 static void tcp_handle_error(void* arg /* grpc_tcp */,
1501                              grpc_error_handle error) {
1502   grpc_tcp* tcp = static_cast<grpc_tcp*>(arg);
1503   if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
1504     gpr_log(GPR_INFO, "TCP:%p got_error: %s", tcp,
1505             grpc_core::StatusToString(error).c_str());
1506   }
1507 
1508   if (!error.ok() ||
1509       static_cast<bool>(gpr_atm_acq_load(&tcp->stop_error_notification))) {
1510     // We aren't going to register to hear on error anymore, so it is safe to
1511     // unref.
1512     TCP_UNREF(tcp, "error-tracking");
1513     return;
1514   }
1515 
1516   // We are still interested in collecting timestamps, so let's try reading
1517   // them.
1518   bool processed = process_errors(tcp);
1519   // This might not a timestamps error. Set the read and write closures to be
1520   // ready.
1521   if (!processed) {
1522     grpc_fd_set_readable(tcp->em_fd);
1523     grpc_fd_set_writable(tcp->em_fd);
1524   }
1525   grpc_fd_notify_on_error(tcp->em_fd, &tcp->error_closure);
1526 }
1527 
1528 #else   // GRPC_LINUX_ERRQUEUE
tcp_get_send_zerocopy_record(grpc_tcp *,grpc_slice_buffer *)1529 static TcpZerocopySendRecord* tcp_get_send_zerocopy_record(
1530     grpc_tcp* /*tcp*/, grpc_slice_buffer* /*buf*/) {
1531   return nullptr;
1532 }
1533 
ZerocopyDisableAndWaitForRemaining(grpc_tcp *)1534 static void ZerocopyDisableAndWaitForRemaining(grpc_tcp* /*tcp*/) {}
1535 
tcp_write_with_timestamps(grpc_tcp *,struct msghdr *,size_t,ssize_t *,int *,int)1536 static bool tcp_write_with_timestamps(grpc_tcp* /*tcp*/, struct msghdr* /*msg*/,
1537                                       size_t /*sending_length*/,
1538                                       ssize_t* /*sent_length*/,
1539                                       int* /* saved_errno */,
1540                                       int /*additional_flags*/) {
1541   gpr_log(GPR_ERROR, "Write with timestamps not supported for this platform");
1542   GPR_ASSERT(0);
1543   return false;
1544 }
1545 
tcp_handle_error(void *,grpc_error_handle)1546 static void tcp_handle_error(void* /*arg*/ /* grpc_tcp */,
1547                              grpc_error_handle /*error*/) {
1548   gpr_log(GPR_ERROR, "Error handling is not supported for this platform");
1549   GPR_ASSERT(0);
1550 }
1551 #endif  // GRPC_LINUX_ERRQUEUE
1552 
1553 // If outgoing_buffer_arg is filled, shuts down the list early, so that any
1554 // release operations needed can be performed on the arg
tcp_shutdown_buffer_list(grpc_tcp * tcp)1555 void tcp_shutdown_buffer_list(grpc_tcp* tcp) {
1556   if (tcp->outgoing_buffer_arg) {
1557     tcp->tb_list.Shutdown(tcp->outgoing_buffer_arg,
1558                           GRPC_ERROR_CREATE("TracedBuffer list shutdown"));
1559     tcp->outgoing_buffer_arg = nullptr;
1560   }
1561 }
1562 
1563 #if defined(IOV_MAX) && IOV_MAX < 260
1564 #define MAX_WRITE_IOVEC IOV_MAX
1565 #else
1566 #define MAX_WRITE_IOVEC 260
1567 #endif
PopulateIovs(size_t * unwind_slice_idx,size_t * unwind_byte_idx,size_t * sending_length,iovec * iov)1568 msg_iovlen_type TcpZerocopySendRecord::PopulateIovs(size_t* unwind_slice_idx,
1569                                                     size_t* unwind_byte_idx,
1570                                                     size_t* sending_length,
1571                                                     iovec* iov) {
1572   msg_iovlen_type iov_size;
1573   *unwind_slice_idx = out_offset_.slice_idx;
1574   *unwind_byte_idx = out_offset_.byte_idx;
1575   for (iov_size = 0;
1576        out_offset_.slice_idx != buf_.count && iov_size != MAX_WRITE_IOVEC;
1577        iov_size++) {
1578     iov[iov_size].iov_base =
1579         GRPC_SLICE_START_PTR(buf_.slices[out_offset_.slice_idx]) +
1580         out_offset_.byte_idx;
1581     iov[iov_size].iov_len =
1582         GRPC_SLICE_LENGTH(buf_.slices[out_offset_.slice_idx]) -
1583         out_offset_.byte_idx;
1584     *sending_length += iov[iov_size].iov_len;
1585     ++(out_offset_.slice_idx);
1586     out_offset_.byte_idx = 0;
1587   }
1588   GPR_DEBUG_ASSERT(iov_size > 0);
1589   return iov_size;
1590 }
1591 
UpdateOffsetForBytesSent(size_t sending_length,size_t actually_sent)1592 void TcpZerocopySendRecord::UpdateOffsetForBytesSent(size_t sending_length,
1593                                                      size_t actually_sent) {
1594   size_t trailing = sending_length - actually_sent;
1595   while (trailing > 0) {
1596     size_t slice_length;
1597     out_offset_.slice_idx--;
1598     slice_length = GRPC_SLICE_LENGTH(buf_.slices[out_offset_.slice_idx]);
1599     if (slice_length > trailing) {
1600       out_offset_.byte_idx = slice_length - trailing;
1601       break;
1602     } else {
1603       trailing -= slice_length;
1604     }
1605   }
1606 }
1607 
1608 // returns true if done, false if pending; if returning true, *error is set
do_tcp_flush_zerocopy(grpc_tcp * tcp,TcpZerocopySendRecord * record,grpc_error_handle * error)1609 static bool do_tcp_flush_zerocopy(grpc_tcp* tcp, TcpZerocopySendRecord* record,
1610                                   grpc_error_handle* error) {
1611   msg_iovlen_type iov_size;
1612   ssize_t sent_length = 0;
1613   size_t sending_length;
1614   size_t unwind_slice_idx;
1615   size_t unwind_byte_idx;
1616   bool tried_sending_message;
1617   int saved_errno;
1618   msghdr msg;
1619   // iov consumes a large space. Keep it as the last item on the stack to
1620   // improve locality. After all, we expect only the first elements of it being
1621   // populated in most cases.
1622   iovec iov[MAX_WRITE_IOVEC];
1623   while (true) {
1624     sending_length = 0;
1625     iov_size = record->PopulateIovs(&unwind_slice_idx, &unwind_byte_idx,
1626                                     &sending_length, iov);
1627     msg.msg_name = nullptr;
1628     msg.msg_namelen = 0;
1629     msg.msg_iov = iov;
1630     msg.msg_iovlen = iov_size;
1631     msg.msg_flags = 0;
1632     tried_sending_message = false;
1633     // Before calling sendmsg (with or without timestamps): we
1634     // take a single ref on the zerocopy send record.
1635     tcp->tcp_zerocopy_send_ctx.NoteSend(record);
1636     saved_errno = 0;
1637     if (tcp->outgoing_buffer_arg != nullptr) {
1638       if (!tcp->ts_capable ||
1639           !tcp_write_with_timestamps(tcp, &msg, sending_length, &sent_length,
1640                                      &saved_errno, MSG_ZEROCOPY)) {
1641         // We could not set socket options to collect Fathom timestamps.
1642         // Fallback on writing without timestamps.
1643         tcp->ts_capable = false;
1644         tcp_shutdown_buffer_list(tcp);
1645       } else {
1646         tried_sending_message = true;
1647       }
1648     }
1649     if (!tried_sending_message) {
1650       msg.msg_control = nullptr;
1651       msg.msg_controllen = 0;
1652       grpc_core::global_stats().IncrementTcpWriteSize(sending_length);
1653       grpc_core::global_stats().IncrementTcpWriteIovSize(iov_size);
1654       sent_length = tcp_send(tcp->fd, &msg, &saved_errno, MSG_ZEROCOPY);
1655     }
1656     if (tcp->tcp_zerocopy_send_ctx.UpdateZeroCopyOMemStateAfterSend(
1657             saved_errno == ENOBUFS)) {
1658       grpc_fd_set_writable(tcp->em_fd);
1659     }
1660     if (sent_length < 0) {
1661       if (saved_errno != EAGAIN) {
1662         LogCommonIOErrors("sendmsg", saved_errno);
1663       }
1664       // If this particular send failed, drop ref taken earlier in this method.
1665       tcp->tcp_zerocopy_send_ctx.UndoSend();
1666       if (saved_errno == EAGAIN || saved_errno == ENOBUFS) {
1667         record->UnwindIfThrottled(unwind_slice_idx, unwind_byte_idx);
1668         return false;
1669       } else if (saved_errno == EPIPE) {
1670         *error = tcp_annotate_error(GRPC_OS_ERROR(saved_errno, "sendmsg"), tcp);
1671         tcp_shutdown_buffer_list(tcp);
1672         return true;
1673       } else {
1674         *error = tcp_annotate_error(GRPC_OS_ERROR(saved_errno, "sendmsg"), tcp);
1675         tcp_shutdown_buffer_list(tcp);
1676         return true;
1677       }
1678     }
1679     grpc_core::EventLog::Append("tcp-write-outstanding", -sent_length);
1680     tcp->bytes_counter += sent_length;
1681     record->UpdateOffsetForBytesSent(sending_length,
1682                                      static_cast<size_t>(sent_length));
1683     if (record->AllSlicesSent()) {
1684       *error = absl::OkStatus();
1685       return true;
1686     }
1687   }
1688 }
1689 
UnrefMaybePutZerocopySendRecord(grpc_tcp * tcp,TcpZerocopySendRecord * record,uint32_t,const char *)1690 static void UnrefMaybePutZerocopySendRecord(grpc_tcp* tcp,
1691                                             TcpZerocopySendRecord* record,
1692                                             uint32_t /*seq*/,
1693                                             const char* /*tag*/) {
1694   if (record->Unref()) {
1695     tcp->tcp_zerocopy_send_ctx.PutSendRecord(record);
1696   }
1697 }
1698 
tcp_flush_zerocopy(grpc_tcp * tcp,TcpZerocopySendRecord * record,grpc_error_handle * error)1699 static bool tcp_flush_zerocopy(grpc_tcp* tcp, TcpZerocopySendRecord* record,
1700                                grpc_error_handle* error) {
1701   bool done = do_tcp_flush_zerocopy(tcp, record, error);
1702   if (done) {
1703     // Either we encountered an error, or we successfully sent all the bytes.
1704     // In either case, we're done with this record.
1705     UnrefMaybePutZerocopySendRecord(tcp, record, 0, "flush_done");
1706   }
1707   return done;
1708 }
1709 
tcp_flush(grpc_tcp * tcp,grpc_error_handle * error)1710 static bool tcp_flush(grpc_tcp* tcp, grpc_error_handle* error) {
1711   struct msghdr msg;
1712   struct iovec iov[MAX_WRITE_IOVEC];
1713   msg_iovlen_type iov_size;
1714   ssize_t sent_length = 0;
1715   size_t sending_length;
1716   size_t trailing;
1717   size_t unwind_slice_idx;
1718   size_t unwind_byte_idx;
1719   int saved_errno;
1720 
1721   // We always start at zero, because we eagerly unref and trim the slice
1722   // buffer as we write
1723   size_t outgoing_slice_idx = 0;
1724 
1725   while (true) {
1726     sending_length = 0;
1727     unwind_slice_idx = outgoing_slice_idx;
1728     unwind_byte_idx = tcp->outgoing_byte_idx;
1729     for (iov_size = 0; outgoing_slice_idx != tcp->outgoing_buffer->count &&
1730                        iov_size != MAX_WRITE_IOVEC;
1731          iov_size++) {
1732       iov[iov_size].iov_base =
1733           GRPC_SLICE_START_PTR(
1734               tcp->outgoing_buffer->slices[outgoing_slice_idx]) +
1735           tcp->outgoing_byte_idx;
1736       iov[iov_size].iov_len =
1737           GRPC_SLICE_LENGTH(tcp->outgoing_buffer->slices[outgoing_slice_idx]) -
1738           tcp->outgoing_byte_idx;
1739       sending_length += iov[iov_size].iov_len;
1740       outgoing_slice_idx++;
1741       tcp->outgoing_byte_idx = 0;
1742     }
1743     GPR_ASSERT(iov_size > 0);
1744 
1745     msg.msg_name = nullptr;
1746     msg.msg_namelen = 0;
1747     msg.msg_iov = iov;
1748     msg.msg_iovlen = iov_size;
1749     msg.msg_flags = 0;
1750     bool tried_sending_message = false;
1751     saved_errno = 0;
1752     if (tcp->outgoing_buffer_arg != nullptr) {
1753       if (!tcp->ts_capable ||
1754           !tcp_write_with_timestamps(tcp, &msg, sending_length, &sent_length,
1755                                      &saved_errno)) {
1756         // We could not set socket options to collect Fathom timestamps.
1757         // Fallback on writing without timestamps.
1758         tcp->ts_capable = false;
1759         tcp_shutdown_buffer_list(tcp);
1760       } else {
1761         tried_sending_message = true;
1762       }
1763     }
1764     if (!tried_sending_message) {
1765       msg.msg_control = nullptr;
1766       msg.msg_controllen = 0;
1767 
1768       grpc_core::global_stats().IncrementTcpWriteSize(sending_length);
1769       grpc_core::global_stats().IncrementTcpWriteIovSize(iov_size);
1770 
1771       sent_length = tcp_send(tcp->fd, &msg, &saved_errno);
1772     }
1773 
1774     if (sent_length < 0) {
1775       if (saved_errno == EAGAIN || saved_errno == ENOBUFS) {
1776         tcp->outgoing_byte_idx = unwind_byte_idx;
1777         // unref all and forget about all slices that have been written to this
1778         // point
1779         for (size_t idx = 0; idx < unwind_slice_idx; ++idx) {
1780           grpc_slice_buffer_remove_first(tcp->outgoing_buffer);
1781         }
1782         return false;
1783       } else if (saved_errno == EPIPE) {
1784         *error = tcp_annotate_error(GRPC_OS_ERROR(saved_errno, "sendmsg"), tcp);
1785         grpc_slice_buffer_reset_and_unref(tcp->outgoing_buffer);
1786         tcp_shutdown_buffer_list(tcp);
1787         return true;
1788       } else {
1789         *error = tcp_annotate_error(GRPC_OS_ERROR(saved_errno, "sendmsg"), tcp);
1790         grpc_slice_buffer_reset_and_unref(tcp->outgoing_buffer);
1791         tcp_shutdown_buffer_list(tcp);
1792         return true;
1793       }
1794     }
1795 
1796     GPR_ASSERT(tcp->outgoing_byte_idx == 0);
1797     grpc_core::EventLog::Append("tcp-write-outstanding", -sent_length);
1798     tcp->bytes_counter += sent_length;
1799     trailing = sending_length - static_cast<size_t>(sent_length);
1800     while (trailing > 0) {
1801       size_t slice_length;
1802 
1803       outgoing_slice_idx--;
1804       slice_length =
1805           GRPC_SLICE_LENGTH(tcp->outgoing_buffer->slices[outgoing_slice_idx]);
1806       if (slice_length > trailing) {
1807         tcp->outgoing_byte_idx = slice_length - trailing;
1808         break;
1809       } else {
1810         trailing -= slice_length;
1811       }
1812     }
1813     if (outgoing_slice_idx == tcp->outgoing_buffer->count) {
1814       *error = absl::OkStatus();
1815       grpc_slice_buffer_reset_and_unref(tcp->outgoing_buffer);
1816       return true;
1817     }
1818   }
1819 }
1820 
tcp_handle_write(void * arg,grpc_error_handle error)1821 static void tcp_handle_write(void* arg /* grpc_tcp */,
1822                              grpc_error_handle error) {
1823   grpc_tcp* tcp = static_cast<grpc_tcp*>(arg);
1824   grpc_closure* cb;
1825 
1826   if (!error.ok()) {
1827     cb = tcp->write_cb;
1828     tcp->write_cb = nullptr;
1829     if (tcp->current_zerocopy_send != nullptr) {
1830       UnrefMaybePutZerocopySendRecord(tcp, tcp->current_zerocopy_send, 0,
1831                                       "handle_write_err");
1832       tcp->current_zerocopy_send = nullptr;
1833     }
1834     grpc_core::Closure::Run(DEBUG_LOCATION, cb, error);
1835     TCP_UNREF(tcp, "write");
1836     return;
1837   }
1838   bool flush_result =
1839       tcp->current_zerocopy_send != nullptr
1840           ? tcp_flush_zerocopy(tcp, tcp->current_zerocopy_send, &error)
1841           : tcp_flush(tcp, &error);
1842   if (!flush_result) {
1843     if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
1844       gpr_log(GPR_INFO, "write: delayed");
1845     }
1846     notify_on_write(tcp);
1847     // tcp_flush does not populate error if it has returned false.
1848     GPR_DEBUG_ASSERT(error.ok());
1849   } else {
1850     cb = tcp->write_cb;
1851     tcp->write_cb = nullptr;
1852     tcp->current_zerocopy_send = nullptr;
1853     if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
1854       gpr_log(GPR_INFO, "write: %s", grpc_core::StatusToString(error).c_str());
1855     }
1856     // No need to take a ref on error since tcp_flush provides a ref.
1857     grpc_core::Closure::Run(DEBUG_LOCATION, cb, error);
1858     TCP_UNREF(tcp, "write");
1859   }
1860 }
1861 
tcp_write(grpc_endpoint * ep,grpc_slice_buffer * buf,grpc_closure * cb,void * arg,int)1862 static void tcp_write(grpc_endpoint* ep, grpc_slice_buffer* buf,
1863                       grpc_closure* cb, void* arg, int /*max_frame_size*/) {
1864   grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1865   grpc_error_handle error;
1866   TcpZerocopySendRecord* zerocopy_send_record = nullptr;
1867 
1868   grpc_core::EventLog::Append("tcp-write-outstanding", buf->length);
1869 
1870   if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
1871     size_t i;
1872 
1873     for (i = 0; i < buf->count; i++) {
1874       gpr_log(GPR_INFO, "WRITE %p (peer=%s)", tcp, tcp->peer_string.c_str());
1875       if (gpr_should_log(GPR_LOG_SEVERITY_DEBUG)) {
1876         char* data =
1877             grpc_dump_slice(buf->slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII);
1878         gpr_log(GPR_DEBUG, "WRITE DATA: %s", data);
1879         gpr_free(data);
1880       }
1881     }
1882   }
1883 
1884   GPR_ASSERT(tcp->write_cb == nullptr);
1885   GPR_DEBUG_ASSERT(tcp->current_zerocopy_send == nullptr);
1886 
1887   if (buf->length == 0) {
1888     grpc_core::Closure::Run(
1889         DEBUG_LOCATION, cb,
1890         grpc_fd_is_shutdown(tcp->em_fd)
1891             ? tcp_annotate_error(GRPC_ERROR_CREATE("EOF"), tcp)
1892             : absl::OkStatus());
1893     tcp_shutdown_buffer_list(tcp);
1894     return;
1895   }
1896 
1897   zerocopy_send_record = tcp_get_send_zerocopy_record(tcp, buf);
1898   if (zerocopy_send_record == nullptr) {
1899     // Either not enough bytes, or couldn't allocate a zerocopy context.
1900     tcp->outgoing_buffer = buf;
1901     tcp->outgoing_byte_idx = 0;
1902   }
1903   tcp->outgoing_buffer_arg = arg;
1904   if (arg) {
1905     GPR_ASSERT(grpc_event_engine_can_track_errors());
1906   }
1907 
1908   bool flush_result =
1909       zerocopy_send_record != nullptr
1910           ? tcp_flush_zerocopy(tcp, zerocopy_send_record, &error)
1911           : tcp_flush(tcp, &error);
1912   if (!flush_result) {
1913     TCP_REF(tcp, "write");
1914     tcp->write_cb = cb;
1915     tcp->current_zerocopy_send = zerocopy_send_record;
1916     if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
1917       gpr_log(GPR_INFO, "write: delayed");
1918     }
1919     notify_on_write(tcp);
1920   } else {
1921     if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
1922       gpr_log(GPR_INFO, "write: %s", grpc_core::StatusToString(error).c_str());
1923     }
1924     grpc_core::Closure::Run(DEBUG_LOCATION, cb, error);
1925   }
1926 }
1927 
tcp_add_to_pollset(grpc_endpoint * ep,grpc_pollset * pollset)1928 static void tcp_add_to_pollset(grpc_endpoint* ep, grpc_pollset* pollset) {
1929   grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1930   grpc_pollset_add_fd(pollset, tcp->em_fd);
1931 }
1932 
tcp_add_to_pollset_set(grpc_endpoint * ep,grpc_pollset_set * pollset_set)1933 static void tcp_add_to_pollset_set(grpc_endpoint* ep,
1934                                    grpc_pollset_set* pollset_set) {
1935   grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1936   grpc_pollset_set_add_fd(pollset_set, tcp->em_fd);
1937 }
1938 
tcp_delete_from_pollset_set(grpc_endpoint * ep,grpc_pollset_set * pollset_set)1939 static void tcp_delete_from_pollset_set(grpc_endpoint* ep,
1940                                         grpc_pollset_set* pollset_set) {
1941   grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1942   grpc_pollset_set_del_fd(pollset_set, tcp->em_fd);
1943 }
1944 
tcp_get_peer(grpc_endpoint * ep)1945 static absl::string_view tcp_get_peer(grpc_endpoint* ep) {
1946   grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1947   return tcp->peer_string;
1948 }
1949 
tcp_get_local_address(grpc_endpoint * ep)1950 static absl::string_view tcp_get_local_address(grpc_endpoint* ep) {
1951   grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1952   return tcp->local_address;
1953 }
1954 
tcp_get_fd(grpc_endpoint * ep)1955 static int tcp_get_fd(grpc_endpoint* ep) {
1956   grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1957   return tcp->fd;
1958 }
1959 
tcp_can_track_err(grpc_endpoint * ep)1960 static bool tcp_can_track_err(grpc_endpoint* ep) {
1961   grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1962   if (!grpc_event_engine_can_track_errors()) {
1963     return false;
1964   }
1965   struct sockaddr addr;
1966   socklen_t len = sizeof(addr);
1967   if (getsockname(tcp->fd, &addr, &len) < 0) {
1968     return false;
1969   }
1970   return addr.sa_family == AF_INET || addr.sa_family == AF_INET6;
1971 }
1972 
1973 static const grpc_endpoint_vtable vtable = {tcp_read,
1974                                             tcp_write,
1975                                             tcp_add_to_pollset,
1976                                             tcp_add_to_pollset_set,
1977                                             tcp_delete_from_pollset_set,
1978                                             tcp_shutdown,
1979                                             tcp_destroy,
1980                                             tcp_get_peer,
1981                                             tcp_get_local_address,
1982                                             tcp_get_fd,
1983                                             tcp_can_track_err};
1984 
grpc_tcp_create(grpc_fd * em_fd,const grpc_core::PosixTcpOptions & options,absl::string_view peer_string)1985 grpc_endpoint* grpc_tcp_create(grpc_fd* em_fd,
1986                                const grpc_core::PosixTcpOptions& options,
1987                                absl::string_view peer_string) {
1988   grpc_tcp* tcp = new grpc_tcp(options);
1989   tcp->base.vtable = &vtable;
1990   tcp->peer_string = std::string(peer_string);
1991   tcp->fd = grpc_fd_wrapped_fd(em_fd);
1992   GPR_ASSERT(options.resource_quota != nullptr);
1993   tcp->memory_owner =
1994       options.resource_quota->memory_quota()->CreateMemoryOwner();
1995   tcp->self_reservation = tcp->memory_owner.MakeReservation(sizeof(grpc_tcp));
1996   grpc_resolved_address resolved_local_addr;
1997   memset(&resolved_local_addr, 0, sizeof(resolved_local_addr));
1998   resolved_local_addr.len = sizeof(resolved_local_addr.addr);
1999   absl::StatusOr<std::string> addr_uri;
2000   if (getsockname(tcp->fd,
2001                   reinterpret_cast<sockaddr*>(resolved_local_addr.addr),
2002                   &resolved_local_addr.len) < 0 ||
2003       !(addr_uri = grpc_sockaddr_to_uri(&resolved_local_addr)).ok()) {
2004     tcp->local_address = "";
2005   } else {
2006     tcp->local_address = addr_uri.value();
2007   }
2008   tcp->read_cb = nullptr;
2009   tcp->write_cb = nullptr;
2010   tcp->current_zerocopy_send = nullptr;
2011   tcp->release_fd_cb = nullptr;
2012   tcp->release_fd = nullptr;
2013   tcp->target_length = static_cast<double>(options.tcp_read_chunk_size);
2014   tcp->bytes_read_this_round = 0;
2015   // Will be set to false by the very first endpoint read function
2016   tcp->is_first_read = true;
2017   tcp->bytes_counter = -1;
2018   tcp->socket_ts_enabled = false;
2019   tcp->ts_capable = true;
2020   tcp->outgoing_buffer_arg = nullptr;
2021   tcp->min_progress_size = 1;
2022   if (options.tcp_tx_zero_copy_enabled &&
2023       !tcp->tcp_zerocopy_send_ctx.memory_limited()) {
2024 #ifdef GRPC_LINUX_ERRQUEUE
2025     const int enable = 1;
2026     auto err =
2027         setsockopt(tcp->fd, SOL_SOCKET, SO_ZEROCOPY, &enable, sizeof(enable));
2028     if (err == 0) {
2029       tcp->tcp_zerocopy_send_ctx.set_enabled(true);
2030     } else {
2031       gpr_log(GPR_ERROR, "Failed to set zerocopy options on the socket.");
2032     }
2033 #endif
2034   }
2035   // paired with unref in grpc_tcp_destroy
2036   new (&tcp->refcount) grpc_core::RefCount(
2037       1, GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace) ? "tcp" : nullptr);
2038   gpr_atm_no_barrier_store(&tcp->shutdown_count, 0);
2039   tcp->em_fd = em_fd;
2040   grpc_slice_buffer_init(&tcp->last_read_buffer);
2041   GRPC_CLOSURE_INIT(&tcp->read_done_closure, tcp_handle_read, tcp,
2042                     grpc_schedule_on_exec_ctx);
2043   if (grpc_event_engine_run_in_background()) {
2044     // If there is a polling engine always running in the background, there is
2045     // no need to run the backup poller.
2046     GRPC_CLOSURE_INIT(&tcp->write_done_closure, tcp_handle_write, tcp,
2047                       grpc_schedule_on_exec_ctx);
2048   } else {
2049     GRPC_CLOSURE_INIT(&tcp->write_done_closure,
2050                       tcp_drop_uncovered_then_handle_write, tcp,
2051                       grpc_schedule_on_exec_ctx);
2052   }
2053   // Always assume there is something on the queue to read.
2054   tcp->inq = 1;
2055 #ifdef GRPC_HAVE_TCP_INQ
2056   int one = 1;
2057   if (setsockopt(tcp->fd, SOL_TCP, TCP_INQ, &one, sizeof(one)) == 0) {
2058     tcp->inq_capable = true;
2059   } else {
2060     gpr_log(GPR_DEBUG, "cannot set inq fd=%d errno=%d", tcp->fd, errno);
2061     tcp->inq_capable = false;
2062   }
2063 #else
2064   tcp->inq_capable = false;
2065 #endif  // GRPC_HAVE_TCP_INQ
2066   // Start being notified on errors if event engine can track errors.
2067   if (grpc_event_engine_can_track_errors()) {
2068     // Grab a ref to tcp so that we can safely access the tcp struct when
2069     // processing errors. We unref when we no longer want to track errors
2070     // separately.
2071     TCP_REF(tcp, "error-tracking");
2072     gpr_atm_rel_store(&tcp->stop_error_notification, 0);
2073     GRPC_CLOSURE_INIT(&tcp->error_closure, tcp_handle_error, tcp,
2074                       grpc_schedule_on_exec_ctx);
2075     grpc_fd_notify_on_error(tcp->em_fd, &tcp->error_closure);
2076   }
2077 
2078   return &tcp->base;
2079 }
2080 
grpc_tcp_fd(grpc_endpoint * ep)2081 int grpc_tcp_fd(grpc_endpoint* ep) {
2082   grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
2083   GPR_ASSERT(ep->vtable == &vtable);
2084   return grpc_fd_wrapped_fd(tcp->em_fd);
2085 }
2086 
grpc_tcp_destroy_and_release_fd(grpc_endpoint * ep,int * fd,grpc_closure * done)2087 void grpc_tcp_destroy_and_release_fd(grpc_endpoint* ep, int* fd,
2088                                      grpc_closure* done) {
2089   if (grpc_event_engine::experimental::grpc_is_event_engine_endpoint(ep)) {
2090     return grpc_event_engine::experimental::
2091         grpc_event_engine_endpoint_destroy_and_release_fd(ep, fd, done);
2092   }
2093   grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
2094   GPR_ASSERT(ep->vtable == &vtable);
2095   tcp->release_fd = fd;
2096   tcp->release_fd_cb = done;
2097   grpc_slice_buffer_reset_and_unref(&tcp->last_read_buffer);
2098   if (grpc_event_engine_can_track_errors()) {
2099     // Stop errors notification.
2100     ZerocopyDisableAndWaitForRemaining(tcp);
2101     gpr_atm_no_barrier_store(&tcp->stop_error_notification, true);
2102     grpc_fd_set_error(tcp->em_fd);
2103   }
2104   tcp->read_mu.Lock();
2105   tcp->memory_owner.Reset();
2106   tcp->read_mu.Unlock();
2107   TCP_UNREF(tcp, "destroy");
2108 }
2109 
grpc_tcp_posix_init()2110 void grpc_tcp_posix_init() { g_backup_poller_mu = new grpc_core::Mutex; }
2111 
grpc_tcp_posix_shutdown()2112 void grpc_tcp_posix_shutdown() {
2113   delete g_backup_poller_mu;
2114   g_backup_poller_mu = nullptr;
2115 }
2116 
2117 #endif  // GRPC_POSIX_SOCKET_TCP
2118