• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *
3  * Copyright 2015 gRPC authors.
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  *     http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  *
17  */
18 
19 #include <grpc/support/port_platform.h>
20 
21 #include "src/core/lib/iomgr/port.h"
22 
23 #ifdef GRPC_POSIX_SOCKET_TCP
24 
25 #include "src/core/lib/iomgr/tcp_posix.h"
26 
27 #include <errno.h>
28 #include <limits.h>
29 #include <netinet/in.h>
30 #include <netinet/tcp.h>
31 #include <stdbool.h>
32 #include <stdio.h>
33 #include <stdlib.h>
34 #include <string.h>
35 #include <sys/socket.h>
36 #include <sys/types.h>
37 #include <unistd.h>
38 #include <algorithm>
39 #include <unordered_map>
40 
41 #include <grpc/slice.h>
42 #include <grpc/support/alloc.h>
43 #include <grpc/support/log.h>
44 #include <grpc/support/string_util.h>
45 #include <grpc/support/sync.h>
46 #include <grpc/support/time.h>
47 
48 #include "src/core/lib/channel/channel_args.h"
49 #include "src/core/lib/debug/stats.h"
50 #include "src/core/lib/debug/trace.h"
51 #include "src/core/lib/gpr/string.h"
52 #include "src/core/lib/gpr/useful.h"
53 #include "src/core/lib/gprpp/sync.h"
54 #include "src/core/lib/iomgr/buffer_list.h"
55 #include "src/core/lib/iomgr/ev_posix.h"
56 #include "src/core/lib/iomgr/executor.h"
57 #include "src/core/lib/iomgr/socket_utils_posix.h"
58 #include "src/core/lib/profiling/timers.h"
59 #include "src/core/lib/slice/slice_internal.h"
60 #include "src/core/lib/slice/slice_string_helpers.h"
61 
62 #ifndef SOL_TCP
63 #define SOL_TCP IPPROTO_TCP
64 #endif
65 
66 #ifndef TCP_INQ
67 #define TCP_INQ 36
68 #define TCP_CM_INQ TCP_INQ
69 #endif
70 
71 #ifdef GRPC_HAVE_MSG_NOSIGNAL
72 #define SENDMSG_FLAGS MSG_NOSIGNAL
73 #else
74 #define SENDMSG_FLAGS 0
75 #endif
76 
77 // TCP zero copy sendmsg flag.
78 // NB: We define this here as a fallback in case we're using an older set of
79 // library headers that has not defined MSG_ZEROCOPY. Since this constant is
80 // part of the kernel, we are guaranteed it will never change/disagree so
81 // defining it here is safe.
82 #ifndef MSG_ZEROCOPY
83 #define MSG_ZEROCOPY 0x4000000
84 #endif
85 
86 #ifdef GRPC_MSG_IOVLEN_TYPE
87 typedef GRPC_MSG_IOVLEN_TYPE msg_iovlen_type;
88 #else
89 typedef size_t msg_iovlen_type;
90 #endif
91 
92 extern grpc_core::TraceFlag grpc_tcp_trace;
93 
94 namespace grpc_core {
95 
96 class TcpZerocopySendRecord {
97  public:
TcpZerocopySendRecord()98   TcpZerocopySendRecord() { grpc_slice_buffer_init(&buf_); }
99 
~TcpZerocopySendRecord()100   ~TcpZerocopySendRecord() {
101     AssertEmpty();
102     grpc_slice_buffer_destroy_internal(&buf_);
103   }
104 
105   // Given the slices that we wish to send, and the current offset into the
106   //   slice buffer (indicating which have already been sent), populate an iovec
107   //   array that will be used for a zerocopy enabled sendmsg().
108   msg_iovlen_type PopulateIovs(size_t* unwind_slice_idx,
109                                size_t* unwind_byte_idx, size_t* sending_length,
110                                iovec* iov);
111 
112   // A sendmsg() may not be able to send the bytes that we requested at this
113   // time, returning EAGAIN (possibly due to backpressure). In this case,
114   // unwind the offset into the slice buffer so we retry sending these bytes.
UnwindIfThrottled(size_t unwind_slice_idx,size_t unwind_byte_idx)115   void UnwindIfThrottled(size_t unwind_slice_idx, size_t unwind_byte_idx) {
116     out_offset_.byte_idx = unwind_byte_idx;
117     out_offset_.slice_idx = unwind_slice_idx;
118   }
119 
120   // Update the offset into the slice buffer based on how much we wanted to sent
121   // vs. what sendmsg() actually sent (which may be lower, possibly due to
122   // backpressure).
123   void UpdateOffsetForBytesSent(size_t sending_length, size_t actually_sent);
124 
125   // Indicates whether all underlying data has been sent or not.
AllSlicesSent()126   bool AllSlicesSent() { return out_offset_.slice_idx == buf_.count; }
127 
128   // Reset this structure for a new tcp_write() with zerocopy.
PrepareForSends(grpc_slice_buffer * slices_to_send)129   void PrepareForSends(grpc_slice_buffer* slices_to_send) {
130     AssertEmpty();
131     out_offset_.slice_idx = 0;
132     out_offset_.byte_idx = 0;
133     grpc_slice_buffer_swap(slices_to_send, &buf_);
134     Ref();
135   }
136 
137   // References: 1 reference per sendmsg(), and 1 for the tcp_write().
Ref()138   void Ref() { ref_.FetchAdd(1, MemoryOrder::RELAXED); }
139 
140   // Unref: called when we get an error queue notification for a sendmsg(), if a
141   //  sendmsg() failed or when tcp_write() is done.
Unref()142   bool Unref() {
143     const intptr_t prior = ref_.FetchSub(1, MemoryOrder::ACQ_REL);
144     GPR_DEBUG_ASSERT(prior > 0);
145     if (prior == 1) {
146       AllSendsComplete();
147       return true;
148     }
149     return false;
150   }
151 
152  private:
153   struct OutgoingOffset {
154     size_t slice_idx = 0;
155     size_t byte_idx = 0;
156   };
157 
AssertEmpty()158   void AssertEmpty() {
159     GPR_DEBUG_ASSERT(buf_.count == 0);
160     GPR_DEBUG_ASSERT(buf_.length == 0);
161     GPR_DEBUG_ASSERT(ref_.Load(MemoryOrder::RELAXED) == 0);
162   }
163 
164   // When all sendmsg() calls associated with this tcp_write() have been
165   // completed (ie. we have received the notifications for each sequence number
166   // for each sendmsg()) and all reference counts have been dropped, drop our
167   // reference to the underlying data since we no longer need it.
AllSendsComplete()168   void AllSendsComplete() {
169     GPR_DEBUG_ASSERT(ref_.Load(MemoryOrder::RELAXED) == 0);
170     grpc_slice_buffer_reset_and_unref_internal(&buf_);
171   }
172 
173   grpc_slice_buffer buf_;
174   Atomic<intptr_t> ref_;
175   OutgoingOffset out_offset_;
176 };
177 
178 class TcpZerocopySendCtx {
179  public:
180   static constexpr int kDefaultMaxSends = 4;
181   static constexpr size_t kDefaultSendBytesThreshold = 16 * 1024;  // 16KB
182 
TcpZerocopySendCtx(int max_sends=kDefaultMaxSends,size_t send_bytes_threshold=kDefaultSendBytesThreshold)183   TcpZerocopySendCtx(int max_sends = kDefaultMaxSends,
184                      size_t send_bytes_threshold = kDefaultSendBytesThreshold)
185       : max_sends_(max_sends),
186         free_send_records_size_(max_sends),
187         threshold_bytes_(send_bytes_threshold) {
188     send_records_ = static_cast<TcpZerocopySendRecord*>(
189         gpr_malloc(max_sends * sizeof(*send_records_)));
190     free_send_records_ = static_cast<TcpZerocopySendRecord**>(
191         gpr_malloc(max_sends * sizeof(*free_send_records_)));
192     if (send_records_ == nullptr || free_send_records_ == nullptr) {
193       gpr_free(send_records_);
194       gpr_free(free_send_records_);
195       gpr_log(GPR_INFO, "Disabling TCP TX zerocopy due to memory pressure.\n");
196       memory_limited_ = true;
197     } else {
198       for (int idx = 0; idx < max_sends_; ++idx) {
199         new (send_records_ + idx) TcpZerocopySendRecord();
200         free_send_records_[idx] = send_records_ + idx;
201       }
202     }
203   }
204 
~TcpZerocopySendCtx()205   ~TcpZerocopySendCtx() {
206     if (send_records_ != nullptr) {
207       for (int idx = 0; idx < max_sends_; ++idx) {
208         send_records_[idx].~TcpZerocopySendRecord();
209       }
210     }
211     gpr_free(send_records_);
212     gpr_free(free_send_records_);
213   }
214 
215   // True if we were unable to allocate the various bookkeeping structures at
216   // transport initialization time. If memory limited, we do not zerocopy.
memory_limited() const217   bool memory_limited() const { return memory_limited_; }
218 
219   // TCP send zerocopy maintains an implicit sequence number for every
220   // successful sendmsg() with zerocopy enabled; the kernel later gives us an
221   // error queue notification with this sequence number indicating that the
222   // underlying data buffers that we sent can now be released. Once that
223   // notification is received, we can release the buffers associated with this
224   // zerocopy send record. Here, we associate the sequence number with the data
225   // buffers that were sent with the corresponding call to sendmsg().
NoteSend(TcpZerocopySendRecord * record)226   void NoteSend(TcpZerocopySendRecord* record) {
227     record->Ref();
228     AssociateSeqWithSendRecord(last_send_, record);
229     ++last_send_;
230   }
231 
232   // If sendmsg() actually failed, though, we need to revert the sequence number
233   // that we speculatively bumped before calling sendmsg(). Note that we bump
234   // this sequence number and perform relevant bookkeeping (see: NoteSend())
235   // *before* calling sendmsg() since, if we called it *after* sendmsg(), then
236   // there is a possible race with the release notification which could occur on
237   // another thread before we do the necessary bookkeeping. Hence, calling
238   // NoteSend() *before* sendmsg() and implementing an undo function is needed.
UndoSend()239   void UndoSend() {
240     --last_send_;
241     if (ReleaseSendRecord(last_send_)->Unref()) {
242       // We should still be holding the ref taken by tcp_write().
243       GPR_DEBUG_ASSERT(0);
244     }
245   }
246 
247   // Simply associate this send record (and the underlying sent data buffers)
248   // with the implicit sequence number for this zerocopy sendmsg().
AssociateSeqWithSendRecord(uint32_t seq,TcpZerocopySendRecord * record)249   void AssociateSeqWithSendRecord(uint32_t seq, TcpZerocopySendRecord* record) {
250     MutexLock guard(&lock_);
251     ctx_lookup_.emplace(seq, record);
252   }
253 
254   // Get a send record for a send that we wish to do with zerocopy.
GetSendRecord()255   TcpZerocopySendRecord* GetSendRecord() {
256     MutexLock guard(&lock_);
257     return TryGetSendRecordLocked();
258   }
259 
260   // A given send record corresponds to a single tcp_write() with zerocopy
261   // enabled. This can result in several sendmsg() calls to flush all of the
262   // data to wire. Each sendmsg() takes a reference on the
263   // TcpZerocopySendRecord, and corresponds to a single sequence number.
264   // ReleaseSendRecord releases a reference on TcpZerocopySendRecord for a
265   // single sequence number. This is called either when we receive the relevant
266   // error queue notification (saying that we can discard the underlying
267   // buffers for this sendmsg()) is received from the kernel - or, in case
268   // sendmsg() was unsuccessful to begin with.
ReleaseSendRecord(uint32_t seq)269   TcpZerocopySendRecord* ReleaseSendRecord(uint32_t seq) {
270     MutexLock guard(&lock_);
271     return ReleaseSendRecordLocked(seq);
272   }
273 
274   // After all the references to a TcpZerocopySendRecord are released, we can
275   // add it back to the pool (of size max_sends_). Note that we can only have
276   // max_sends_ tcp_write() instances with zerocopy enabled in flight at the
277   // same time.
PutSendRecord(TcpZerocopySendRecord * record)278   void PutSendRecord(TcpZerocopySendRecord* record) {
279     GPR_DEBUG_ASSERT(record >= send_records_ &&
280                      record < send_records_ + max_sends_);
281     MutexLock guard(&lock_);
282     PutSendRecordLocked(record);
283   }
284 
285   // Indicate that we are disposing of this zerocopy context. This indicator
286   // will prevent new zerocopy writes from being issued.
Shutdown()287   void Shutdown() { shutdown_.Store(true, MemoryOrder::RELEASE); }
288 
289   // Indicates that there are no inflight tcp_write() instances with zerocopy
290   // enabled.
AllSendRecordsEmpty()291   bool AllSendRecordsEmpty() {
292     MutexLock guard(&lock_);
293     return free_send_records_size_ == max_sends_;
294   }
295 
enabled() const296   bool enabled() const { return enabled_; }
297 
set_enabled(bool enabled)298   void set_enabled(bool enabled) {
299     GPR_DEBUG_ASSERT(!enabled || !memory_limited());
300     enabled_ = enabled;
301   }
302 
303   // Only use zerocopy if we are sending at least this many bytes. The
304   // additional overhead of reading the error queue for notifications means that
305   // zerocopy is not useful for small transfers.
threshold_bytes() const306   size_t threshold_bytes() const { return threshold_bytes_; }
307 
308  private:
ReleaseSendRecordLocked(uint32_t seq)309   TcpZerocopySendRecord* ReleaseSendRecordLocked(uint32_t seq) {
310     auto iter = ctx_lookup_.find(seq);
311     GPR_DEBUG_ASSERT(iter != ctx_lookup_.end());
312     TcpZerocopySendRecord* record = iter->second;
313     ctx_lookup_.erase(iter);
314     return record;
315   }
316 
TryGetSendRecordLocked()317   TcpZerocopySendRecord* TryGetSendRecordLocked() {
318     if (shutdown_.Load(MemoryOrder::ACQUIRE)) {
319       return nullptr;
320     }
321     if (free_send_records_size_ == 0) {
322       return nullptr;
323     }
324     free_send_records_size_--;
325     return free_send_records_[free_send_records_size_];
326   }
327 
PutSendRecordLocked(TcpZerocopySendRecord * record)328   void PutSendRecordLocked(TcpZerocopySendRecord* record) {
329     GPR_DEBUG_ASSERT(free_send_records_size_ < max_sends_);
330     free_send_records_[free_send_records_size_] = record;
331     free_send_records_size_++;
332   }
333 
334   TcpZerocopySendRecord* send_records_;
335   TcpZerocopySendRecord** free_send_records_;
336   int max_sends_;
337   int free_send_records_size_;
338   Mutex lock_;
339   uint32_t last_send_ = 0;
340   Atomic<bool> shutdown_;
341   bool enabled_ = false;
342   size_t threshold_bytes_ = kDefaultSendBytesThreshold;
343   std::unordered_map<uint32_t, TcpZerocopySendRecord*> ctx_lookup_;
344   bool memory_limited_ = false;
345 };
346 
347 }  // namespace grpc_core
348 
349 using grpc_core::TcpZerocopySendCtx;
350 using grpc_core::TcpZerocopySendRecord;
351 
352 namespace {
353 struct grpc_tcp {
354   grpc_endpoint base;
355   grpc_fd* em_fd;
356   int fd;
357   /* Used by the endpoint read function to distinguish the very first read call
358    * from the rest */
359   bool is_first_read;
360   double target_length;
361   double bytes_read_this_round;
362   grpc_core::RefCount refcount;
363   gpr_atm shutdown_count;
364 
365   int min_read_chunk_size;
366   int max_read_chunk_size;
367 
368   /* garbage after the last read */
369   grpc_slice_buffer last_read_buffer;
370 
371   grpc_slice_buffer* incoming_buffer;
372   int inq;          /* bytes pending on the socket from the last read. */
373   bool inq_capable; /* cache whether kernel supports inq */
374 
375   grpc_slice_buffer* outgoing_buffer;
376   /* byte within outgoing_buffer->slices[0] to write next */
377   size_t outgoing_byte_idx;
378 
379   grpc_closure* read_cb;
380   grpc_closure* write_cb;
381   grpc_closure* release_fd_cb;
382   int* release_fd;
383 
384   grpc_closure read_done_closure;
385   grpc_closure write_done_closure;
386   grpc_closure error_closure;
387 
388   char* peer_string;
389 
390   grpc_resource_user* resource_user;
391   grpc_resource_user_slice_allocator slice_allocator;
392 
393   grpc_core::TracedBuffer* tb_head; /* List of traced buffers */
394   gpr_mu tb_mu; /* Lock for access to list of traced buffers */
395 
396   /* grpc_endpoint_write takes an argument which if non-null means that the
397    * transport layer wants the TCP layer to collect timestamps for this write.
398    * This arg is forwarded to the timestamps callback function when the ACK
399    * timestamp is received from the kernel. This arg is a (void *) which allows
400    * users of this API to pass in a pointer to any kind of structure. This
401    * structure could actually be a tag or any book-keeping object that the user
402    * can use to distinguish between different traced writes. The only
403    * requirement from the TCP endpoint layer is that this arg should be non-null
404    * if the user wants timestamps for the write. */
405   void* outgoing_buffer_arg;
406   /* A counter which starts at 0. It is initialized the first time the socket
407    * options for collecting timestamps are set, and is incremented with each
408    * byte sent. */
409   int bytes_counter;
410   bool socket_ts_enabled; /* True if timestamping options are set on the socket
411                            */
412   bool ts_capable;        /* Cache whether we can set timestamping options */
413   gpr_atm stop_error_notification; /* Set to 1 if we do not want to be notified
414                                       on errors anymore */
415   TcpZerocopySendCtx tcp_zerocopy_send_ctx;
416   TcpZerocopySendRecord* current_zerocopy_send = nullptr;
417 };
418 
419 struct backup_poller {
420   gpr_mu* pollset_mu;
421   grpc_closure run_poller;
422 };
423 
424 }  // namespace
425 
426 static void ZerocopyDisableAndWaitForRemaining(grpc_tcp* tcp);
427 
428 #define BACKUP_POLLER_POLLSET(b) ((grpc_pollset*)((b) + 1))
429 
430 static gpr_atm g_uncovered_notifications_pending;
431 static gpr_atm g_backup_poller; /* backup_poller* */
432 
433 static void tcp_handle_read(void* arg /* grpc_tcp */, grpc_error* error);
434 static void tcp_handle_write(void* arg /* grpc_tcp */, grpc_error* error);
435 static void tcp_drop_uncovered_then_handle_write(void* arg /* grpc_tcp */,
436                                                  grpc_error* error);
437 
done_poller(void * bp,grpc_error *)438 static void done_poller(void* bp, grpc_error* /*error_ignored*/) {
439   backup_poller* p = static_cast<backup_poller*>(bp);
440   if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
441     gpr_log(GPR_INFO, "BACKUP_POLLER:%p destroy", p);
442   }
443   grpc_pollset_destroy(BACKUP_POLLER_POLLSET(p));
444   gpr_free(p);
445 }
446 
run_poller(void * bp,grpc_error *)447 static void run_poller(void* bp, grpc_error* /*error_ignored*/) {
448   backup_poller* p = static_cast<backup_poller*>(bp);
449   if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
450     gpr_log(GPR_INFO, "BACKUP_POLLER:%p run", p);
451   }
452   gpr_mu_lock(p->pollset_mu);
453   grpc_millis deadline = grpc_core::ExecCtx::Get()->Now() + 10 * GPR_MS_PER_SEC;
454   GRPC_STATS_INC_TCP_BACKUP_POLLER_POLLS();
455   GRPC_LOG_IF_ERROR(
456       "backup_poller:pollset_work",
457       grpc_pollset_work(BACKUP_POLLER_POLLSET(p), nullptr, deadline));
458   gpr_mu_unlock(p->pollset_mu);
459   /* last "uncovered" notification is the ref that keeps us polling, if we get
460    * there try a cas to release it */
461   if (gpr_atm_no_barrier_load(&g_uncovered_notifications_pending) == 1 &&
462       gpr_atm_full_cas(&g_uncovered_notifications_pending, 1, 0)) {
463     gpr_mu_lock(p->pollset_mu);
464     bool cas_ok = gpr_atm_full_cas(&g_backup_poller, (gpr_atm)p, 0);
465     if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
466       gpr_log(GPR_INFO, "BACKUP_POLLER:%p done cas_ok=%d", p, cas_ok);
467     }
468     gpr_mu_unlock(p->pollset_mu);
469     if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
470       gpr_log(GPR_INFO, "BACKUP_POLLER:%p shutdown", p);
471     }
472     grpc_pollset_shutdown(BACKUP_POLLER_POLLSET(p),
473                           GRPC_CLOSURE_INIT(&p->run_poller, done_poller, p,
474                                             grpc_schedule_on_exec_ctx));
475   } else {
476     if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
477       gpr_log(GPR_INFO, "BACKUP_POLLER:%p reschedule", p);
478     }
479     grpc_core::Executor::Run(&p->run_poller, GRPC_ERROR_NONE,
480                              grpc_core::ExecutorType::DEFAULT,
481                              grpc_core::ExecutorJobType::LONG);
482   }
483 }
484 
drop_uncovered(grpc_tcp *)485 static void drop_uncovered(grpc_tcp* /*tcp*/) {
486   backup_poller* p = (backup_poller*)gpr_atm_acq_load(&g_backup_poller);
487   gpr_atm old_count =
488       gpr_atm_full_fetch_add(&g_uncovered_notifications_pending, -1);
489   if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
490     gpr_log(GPR_INFO, "BACKUP_POLLER:%p uncover cnt %d->%d", p,
491             static_cast<int>(old_count), static_cast<int>(old_count) - 1);
492   }
493   GPR_ASSERT(old_count != 1);
494 }
495 
496 // gRPC API considers a Write operation to be done the moment it clears ‘flow
497 // control’ i.e., not necessarily sent on the wire. This means that the
498 // application MIGHT not call `grpc_completion_queue_next/pluck` in a timely
499 // manner when its `Write()` API is acked.
500 //
501 // We need to ensure that the fd is 'covered' (i.e being monitored by some
502 // polling thread and progress is made) and hence add it to a backup poller here
cover_self(grpc_tcp * tcp)503 static void cover_self(grpc_tcp* tcp) {
504   backup_poller* p;
505   gpr_atm old_count =
506       gpr_atm_no_barrier_fetch_add(&g_uncovered_notifications_pending, 2);
507   if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
508     gpr_log(GPR_INFO, "BACKUP_POLLER: cover cnt %d->%d",
509             static_cast<int>(old_count), 2 + static_cast<int>(old_count));
510   }
511   if (old_count == 0) {
512     GRPC_STATS_INC_TCP_BACKUP_POLLERS_CREATED();
513     p = static_cast<backup_poller*>(
514         gpr_zalloc(sizeof(*p) + grpc_pollset_size()));
515     if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
516       gpr_log(GPR_INFO, "BACKUP_POLLER:%p create", p);
517     }
518     grpc_pollset_init(BACKUP_POLLER_POLLSET(p), &p->pollset_mu);
519     gpr_atm_rel_store(&g_backup_poller, (gpr_atm)p);
520     grpc_core::Executor::Run(
521         GRPC_CLOSURE_INIT(&p->run_poller, run_poller, p, nullptr),
522         GRPC_ERROR_NONE, grpc_core::ExecutorType::DEFAULT,
523         grpc_core::ExecutorJobType::LONG);
524   } else {
525     while ((p = (backup_poller*)gpr_atm_acq_load(&g_backup_poller)) ==
526            nullptr) {
527       // spin waiting for backup poller
528     }
529   }
530   if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
531     gpr_log(GPR_INFO, "BACKUP_POLLER:%p add %p", p, tcp);
532   }
533   grpc_pollset_add_fd(BACKUP_POLLER_POLLSET(p), tcp->em_fd);
534   if (old_count != 0) {
535     drop_uncovered(tcp);
536   }
537 }
538 
notify_on_read(grpc_tcp * tcp)539 static void notify_on_read(grpc_tcp* tcp) {
540   if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
541     gpr_log(GPR_INFO, "TCP:%p notify_on_read", tcp);
542   }
543   grpc_fd_notify_on_read(tcp->em_fd, &tcp->read_done_closure);
544 }
545 
notify_on_write(grpc_tcp * tcp)546 static void notify_on_write(grpc_tcp* tcp) {
547   if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
548     gpr_log(GPR_INFO, "TCP:%p notify_on_write", tcp);
549   }
550   if (!grpc_event_engine_run_in_background()) {
551     cover_self(tcp);
552   }
553   grpc_fd_notify_on_write(tcp->em_fd, &tcp->write_done_closure);
554 }
555 
tcp_drop_uncovered_then_handle_write(void * arg,grpc_error * error)556 static void tcp_drop_uncovered_then_handle_write(void* arg, grpc_error* error) {
557   if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
558     gpr_log(GPR_INFO, "TCP:%p got_write: %s", arg, grpc_error_string(error));
559   }
560   drop_uncovered(static_cast<grpc_tcp*>(arg));
561   tcp_handle_write(arg, error);
562 }
563 
add_to_estimate(grpc_tcp * tcp,size_t bytes)564 static void add_to_estimate(grpc_tcp* tcp, size_t bytes) {
565   tcp->bytes_read_this_round += static_cast<double>(bytes);
566 }
567 
finish_estimate(grpc_tcp * tcp)568 static void finish_estimate(grpc_tcp* tcp) {
569   /* If we read >80% of the target buffer in one read loop, increase the size
570      of the target buffer to either the amount read, or twice its previous
571      value */
572   if (tcp->bytes_read_this_round > tcp->target_length * 0.8) {
573     tcp->target_length =
574         GPR_MAX(2 * tcp->target_length, tcp->bytes_read_this_round);
575   } else {
576     tcp->target_length =
577         0.99 * tcp->target_length + 0.01 * tcp->bytes_read_this_round;
578   }
579   tcp->bytes_read_this_round = 0;
580 }
581 
get_target_read_size(grpc_tcp * tcp)582 static size_t get_target_read_size(grpc_tcp* tcp) {
583   grpc_resource_quota* rq = grpc_resource_user_quota(tcp->resource_user);
584   double pressure = grpc_resource_quota_get_memory_pressure(rq);
585   double target =
586       tcp->target_length * (pressure > 0.8 ? (1.0 - pressure) / 0.2 : 1.0);
587   size_t sz = ((static_cast<size_t> GPR_CLAMP(target, tcp->min_read_chunk_size,
588                                               tcp->max_read_chunk_size)) +
589                255) &
590               ~static_cast<size_t>(255);
591   /* don't use more than 1/16th of the overall resource quota for a single read
592    * alloc */
593   size_t rqmax = grpc_resource_quota_peek_size(rq);
594   if (sz > rqmax / 16 && rqmax > 1024) {
595     sz = rqmax / 16;
596   }
597   return sz;
598 }
599 
tcp_annotate_error(grpc_error * src_error,grpc_tcp * tcp)600 static grpc_error* tcp_annotate_error(grpc_error* src_error, grpc_tcp* tcp) {
601   return grpc_error_set_str(
602       grpc_error_set_int(
603           grpc_error_set_int(src_error, GRPC_ERROR_INT_FD, tcp->fd),
604           /* All tcp errors are marked with UNAVAILABLE so that application may
605            * choose to retry. */
606           GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE),
607       GRPC_ERROR_STR_TARGET_ADDRESS,
608       grpc_slice_from_copied_string(tcp->peer_string));
609 }
610 
611 static void tcp_handle_read(void* arg /* grpc_tcp */, grpc_error* error);
612 static void tcp_handle_write(void* arg /* grpc_tcp */, grpc_error* error);
613 
tcp_shutdown(grpc_endpoint * ep,grpc_error * why)614 static void tcp_shutdown(grpc_endpoint* ep, grpc_error* why) {
615   grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
616   ZerocopyDisableAndWaitForRemaining(tcp);
617   grpc_fd_shutdown(tcp->em_fd, why);
618   grpc_resource_user_shutdown(tcp->resource_user);
619 }
620 
tcp_free(grpc_tcp * tcp)621 static void tcp_free(grpc_tcp* tcp) {
622   grpc_fd_orphan(tcp->em_fd, tcp->release_fd_cb, tcp->release_fd,
623                  "tcp_unref_orphan");
624   grpc_slice_buffer_destroy_internal(&tcp->last_read_buffer);
625   grpc_resource_user_unref(tcp->resource_user);
626   gpr_free(tcp->peer_string);
627   /* The lock is not really necessary here, since all refs have been released */
628   gpr_mu_lock(&tcp->tb_mu);
629   grpc_core::TracedBuffer::Shutdown(
630       &tcp->tb_head, tcp->outgoing_buffer_arg,
631       GRPC_ERROR_CREATE_FROM_STATIC_STRING("endpoint destroyed"));
632   gpr_mu_unlock(&tcp->tb_mu);
633   tcp->outgoing_buffer_arg = nullptr;
634   gpr_mu_destroy(&tcp->tb_mu);
635   tcp->tcp_zerocopy_send_ctx.~TcpZerocopySendCtx();
636   gpr_free(tcp);
637 }
638 
639 #ifndef NDEBUG
640 #define TCP_UNREF(tcp, reason) tcp_unref((tcp), (reason), DEBUG_LOCATION)
641 #define TCP_REF(tcp, reason) tcp_ref((tcp), (reason), DEBUG_LOCATION)
tcp_unref(grpc_tcp * tcp,const char * reason,const grpc_core::DebugLocation & debug_location)642 static void tcp_unref(grpc_tcp* tcp, const char* reason,
643                       const grpc_core::DebugLocation& debug_location) {
644   if (GPR_UNLIKELY(tcp->refcount.Unref(debug_location, reason))) {
645     tcp_free(tcp);
646   }
647 }
648 
tcp_ref(grpc_tcp * tcp,const char * reason,const grpc_core::DebugLocation & debug_location)649 static void tcp_ref(grpc_tcp* tcp, const char* reason,
650                     const grpc_core::DebugLocation& debug_location) {
651   tcp->refcount.Ref(debug_location, reason);
652 }
653 #else
654 #define TCP_UNREF(tcp, reason) tcp_unref((tcp))
655 #define TCP_REF(tcp, reason) tcp_ref((tcp))
tcp_unref(grpc_tcp * tcp)656 static void tcp_unref(grpc_tcp* tcp) {
657   if (GPR_UNLIKELY(tcp->refcount.Unref())) {
658     tcp_free(tcp);
659   }
660 }
661 
tcp_ref(grpc_tcp * tcp)662 static void tcp_ref(grpc_tcp* tcp) { tcp->refcount.Ref(); }
663 #endif
664 
tcp_destroy(grpc_endpoint * ep)665 static void tcp_destroy(grpc_endpoint* ep) {
666   grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
667   grpc_slice_buffer_reset_and_unref_internal(&tcp->last_read_buffer);
668   if (grpc_event_engine_can_track_errors()) {
669     ZerocopyDisableAndWaitForRemaining(tcp);
670     gpr_atm_no_barrier_store(&tcp->stop_error_notification, true);
671     grpc_fd_set_error(tcp->em_fd);
672   }
673   TCP_UNREF(tcp, "destroy");
674 }
675 
call_read_cb(grpc_tcp * tcp,grpc_error * error)676 static void call_read_cb(grpc_tcp* tcp, grpc_error* error) {
677   grpc_closure* cb = tcp->read_cb;
678 
679   if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
680     gpr_log(GPR_INFO, "TCP:%p call_cb %p %p:%p", tcp, cb, cb->cb, cb->cb_arg);
681     size_t i;
682     const char* str = grpc_error_string(error);
683     gpr_log(GPR_INFO, "READ %p (peer=%s) error=%s", tcp, tcp->peer_string, str);
684 
685     if (gpr_should_log(GPR_LOG_SEVERITY_DEBUG)) {
686       for (i = 0; i < tcp->incoming_buffer->count; i++) {
687         char* dump = grpc_dump_slice(tcp->incoming_buffer->slices[i],
688                                      GPR_DUMP_HEX | GPR_DUMP_ASCII);
689         gpr_log(GPR_DEBUG, "DATA: %s", dump);
690         gpr_free(dump);
691       }
692     }
693   }
694 
695   tcp->read_cb = nullptr;
696   tcp->incoming_buffer = nullptr;
697   grpc_core::Closure::Run(DEBUG_LOCATION, cb, error);
698 }
699 
700 #define MAX_READ_IOVEC 4
tcp_do_read(grpc_tcp * tcp)701 static void tcp_do_read(grpc_tcp* tcp) {
702   GPR_TIMER_SCOPE("tcp_do_read", 0);
703   struct msghdr msg;
704   struct iovec iov[MAX_READ_IOVEC];
705   ssize_t read_bytes;
706   size_t total_read_bytes = 0;
707   size_t iov_len =
708       std::min<size_t>(MAX_READ_IOVEC, tcp->incoming_buffer->count);
709 #ifdef GRPC_LINUX_ERRQUEUE
710   constexpr size_t cmsg_alloc_space =
711       CMSG_SPACE(sizeof(grpc_core::scm_timestamping)) + CMSG_SPACE(sizeof(int));
712 #else
713   constexpr size_t cmsg_alloc_space = 24 /* CMSG_SPACE(sizeof(int)) */;
714 #endif /* GRPC_LINUX_ERRQUEUE */
715   char cmsgbuf[cmsg_alloc_space];
716   for (size_t i = 0; i < iov_len; i++) {
717     iov[i].iov_base = GRPC_SLICE_START_PTR(tcp->incoming_buffer->slices[i]);
718     iov[i].iov_len = GRPC_SLICE_LENGTH(tcp->incoming_buffer->slices[i]);
719   }
720 
721   do {
722     /* Assume there is something on the queue. If we receive TCP_INQ from
723      * kernel, we will update this value, otherwise, we have to assume there is
724      * always something to read until we get EAGAIN. */
725     tcp->inq = 1;
726 
727     msg.msg_name = nullptr;
728     msg.msg_namelen = 0;
729     msg.msg_iov = iov;
730     msg.msg_iovlen = static_cast<msg_iovlen_type>(iov_len);
731     if (tcp->inq_capable) {
732       msg.msg_control = cmsgbuf;
733       msg.msg_controllen = sizeof(cmsgbuf);
734     } else {
735       msg.msg_control = nullptr;
736       msg.msg_controllen = 0;
737     }
738     msg.msg_flags = 0;
739 
740     GRPC_STATS_INC_TCP_READ_OFFER(tcp->incoming_buffer->length);
741     GRPC_STATS_INC_TCP_READ_OFFER_IOV_SIZE(tcp->incoming_buffer->count);
742 
743     do {
744       GPR_TIMER_SCOPE("recvmsg", 0);
745       GRPC_STATS_INC_SYSCALL_READ();
746       read_bytes = recvmsg(tcp->fd, &msg, 0);
747     } while (read_bytes < 0 && errno == EINTR);
748 
749     /* We have read something in previous reads. We need to deliver those
750      * bytes to the upper layer. */
751     if (read_bytes <= 0 && total_read_bytes > 0) {
752       tcp->inq = 1;
753       break;
754     }
755 
756     if (read_bytes < 0) {
757       /* NB: After calling call_read_cb a parallel call of the read handler may
758        * be running. */
759       if (errno == EAGAIN) {
760         finish_estimate(tcp);
761         tcp->inq = 0;
762         /* We've consumed the edge, request a new one */
763         notify_on_read(tcp);
764       } else {
765         grpc_slice_buffer_reset_and_unref_internal(tcp->incoming_buffer);
766         call_read_cb(tcp,
767                      tcp_annotate_error(GRPC_OS_ERROR(errno, "recvmsg"), tcp));
768         TCP_UNREF(tcp, "read");
769       }
770       return;
771     }
772     if (read_bytes == 0) {
773       /* 0 read size ==> end of stream
774        *
775        * We may have read something, i.e., total_read_bytes > 0, but
776        * since the connection is closed we will drop the data here, because we
777        * can't call the callback multiple times. */
778       grpc_slice_buffer_reset_and_unref_internal(tcp->incoming_buffer);
779       call_read_cb(
780           tcp, tcp_annotate_error(
781                    GRPC_ERROR_CREATE_FROM_STATIC_STRING("Socket closed"), tcp));
782       TCP_UNREF(tcp, "read");
783       return;
784     }
785 
786     GRPC_STATS_INC_TCP_READ_SIZE(read_bytes);
787     add_to_estimate(tcp, static_cast<size_t>(read_bytes));
788     GPR_DEBUG_ASSERT((size_t)read_bytes <=
789                      tcp->incoming_buffer->length - total_read_bytes);
790 
791 #ifdef GRPC_HAVE_TCP_INQ
792     if (tcp->inq_capable) {
793       GPR_DEBUG_ASSERT(!(msg.msg_flags & MSG_CTRUNC));
794       struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg);
795       for (; cmsg != nullptr; cmsg = CMSG_NXTHDR(&msg, cmsg)) {
796         if (cmsg->cmsg_level == SOL_TCP && cmsg->cmsg_type == TCP_CM_INQ &&
797             cmsg->cmsg_len == CMSG_LEN(sizeof(int))) {
798           tcp->inq = *reinterpret_cast<int*>(CMSG_DATA(cmsg));
799           break;
800         }
801       }
802     }
803 #endif /* GRPC_HAVE_TCP_INQ */
804 
805     total_read_bytes += read_bytes;
806     if (tcp->inq == 0 || total_read_bytes == tcp->incoming_buffer->length) {
807       /* We have filled incoming_buffer, and we cannot read any more. */
808       break;
809     }
810 
811     /* We had a partial read, and still have space to read more data.
812      * So, adjust IOVs and try to read more. */
813     size_t remaining = read_bytes;
814     size_t j = 0;
815     for (size_t i = 0; i < iov_len; i++) {
816       if (remaining >= iov[i].iov_len) {
817         remaining -= iov[i].iov_len;
818         continue;
819       }
820       if (remaining > 0) {
821         iov[j].iov_base = static_cast<char*>(iov[i].iov_base) + remaining;
822         iov[j].iov_len = iov[i].iov_len - remaining;
823         remaining = 0;
824       } else {
825         iov[j].iov_base = iov[i].iov_base;
826         iov[j].iov_len = iov[i].iov_len;
827       }
828       ++j;
829     }
830     iov_len = j;
831   } while (true);
832 
833   if (tcp->inq == 0) {
834     finish_estimate(tcp);
835   }
836 
837   GPR_DEBUG_ASSERT(total_read_bytes > 0);
838   if (total_read_bytes < tcp->incoming_buffer->length) {
839     grpc_slice_buffer_trim_end(tcp->incoming_buffer,
840                                tcp->incoming_buffer->length - total_read_bytes,
841                                &tcp->last_read_buffer);
842   }
843   call_read_cb(tcp, GRPC_ERROR_NONE);
844   TCP_UNREF(tcp, "read");
845 }
846 
tcp_read_allocation_done(void * tcpp,grpc_error * error)847 static void tcp_read_allocation_done(void* tcpp, grpc_error* error) {
848   grpc_tcp* tcp = static_cast<grpc_tcp*>(tcpp);
849   if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
850     gpr_log(GPR_INFO, "TCP:%p read_allocation_done: %s", tcp,
851             grpc_error_string(error));
852   }
853   if (GPR_UNLIKELY(error != GRPC_ERROR_NONE)) {
854     grpc_slice_buffer_reset_and_unref_internal(tcp->incoming_buffer);
855     grpc_slice_buffer_reset_and_unref_internal(&tcp->last_read_buffer);
856     call_read_cb(tcp, GRPC_ERROR_REF(error));
857     TCP_UNREF(tcp, "read");
858   } else {
859     tcp_do_read(tcp);
860   }
861 }
862 
tcp_continue_read(grpc_tcp * tcp)863 static void tcp_continue_read(grpc_tcp* tcp) {
864   size_t target_read_size = get_target_read_size(tcp);
865   /* Wait for allocation only when there is no buffer left. */
866   if (tcp->incoming_buffer->length == 0 &&
867       tcp->incoming_buffer->count < MAX_READ_IOVEC) {
868     if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
869       gpr_log(GPR_INFO, "TCP:%p alloc_slices", tcp);
870     }
871     if (GPR_UNLIKELY(!grpc_resource_user_alloc_slices(&tcp->slice_allocator,
872                                                       target_read_size, 1,
873                                                       tcp->incoming_buffer))) {
874       // Wait for allocation.
875       return;
876     }
877   }
878   if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
879     gpr_log(GPR_INFO, "TCP:%p do_read", tcp);
880   }
881   tcp_do_read(tcp);
882 }
883 
tcp_handle_read(void * arg,grpc_error * error)884 static void tcp_handle_read(void* arg /* grpc_tcp */, grpc_error* error) {
885   grpc_tcp* tcp = static_cast<grpc_tcp*>(arg);
886   if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
887     gpr_log(GPR_INFO, "TCP:%p got_read: %s", tcp, grpc_error_string(error));
888   }
889 
890   if (GPR_UNLIKELY(error != GRPC_ERROR_NONE)) {
891     grpc_slice_buffer_reset_and_unref_internal(tcp->incoming_buffer);
892     grpc_slice_buffer_reset_and_unref_internal(&tcp->last_read_buffer);
893     call_read_cb(tcp, GRPC_ERROR_REF(error));
894     TCP_UNREF(tcp, "read");
895   } else {
896     tcp_continue_read(tcp);
897   }
898 }
899 
tcp_read(grpc_endpoint * ep,grpc_slice_buffer * incoming_buffer,grpc_closure * cb,bool urgent)900 static void tcp_read(grpc_endpoint* ep, grpc_slice_buffer* incoming_buffer,
901                      grpc_closure* cb, bool urgent) {
902   grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
903   GPR_ASSERT(tcp->read_cb == nullptr);
904   tcp->read_cb = cb;
905   tcp->incoming_buffer = incoming_buffer;
906   grpc_slice_buffer_reset_and_unref_internal(incoming_buffer);
907   grpc_slice_buffer_swap(incoming_buffer, &tcp->last_read_buffer);
908   TCP_REF(tcp, "read");
909   if (tcp->is_first_read) {
910     /* Endpoint read called for the very first time. Register read callback with
911      * the polling engine */
912     tcp->is_first_read = false;
913     notify_on_read(tcp);
914   } else if (!urgent && tcp->inq == 0) {
915     /* Upper layer asked to read more but we know there is no pending data
916      * to read from previous reads. So, wait for POLLIN.
917      */
918     notify_on_read(tcp);
919   } else {
920     /* Not the first time. We may or may not have more bytes available. In any
921      * case call tcp->read_done_closure (i.e tcp_handle_read()) which does the
922      * right thing (i.e calls tcp_do_read() which either reads the available
923      * bytes or calls notify_on_read() to be notified when new bytes become
924      * available */
925     grpc_core::Closure::Run(DEBUG_LOCATION, &tcp->read_done_closure,
926                             GRPC_ERROR_NONE);
927   }
928 }
929 
930 /* A wrapper around sendmsg. It sends \a msg over \a fd and returns the number
931  * of bytes sent. */
tcp_send(int fd,const struct msghdr * msg,int additional_flags=0)932 ssize_t tcp_send(int fd, const struct msghdr* msg, int additional_flags = 0) {
933   GPR_TIMER_SCOPE("sendmsg", 1);
934   ssize_t sent_length;
935   do {
936     /* TODO(klempner): Cork if this is a partial write */
937     GRPC_STATS_INC_SYSCALL_WRITE();
938     sent_length = sendmsg(fd, msg, SENDMSG_FLAGS | additional_flags);
939   } while (sent_length < 0 && errno == EINTR);
940   return sent_length;
941 }
942 
943 /** This is to be called if outgoing_buffer_arg is not null. On linux platforms,
944  * this will call sendmsg with socket options set to collect timestamps inside
945  * the kernel. On return, sent_length is set to the return value of the sendmsg
946  * call. Returns false if setting the socket options failed. This is not
947  * implemented for non-linux platforms currently, and crashes out.
948  */
949 static bool tcp_write_with_timestamps(grpc_tcp* tcp, struct msghdr* msg,
950                                       size_t sending_length,
951                                       ssize_t* sent_length,
952                                       int additional_flags = 0);
953 
954 /** The callback function to be invoked when we get an error on the socket. */
955 static void tcp_handle_error(void* arg /* grpc_tcp */, grpc_error* error);
956 
957 static TcpZerocopySendRecord* tcp_get_send_zerocopy_record(
958     grpc_tcp* tcp, grpc_slice_buffer* buf);
959 
960 #ifdef GRPC_LINUX_ERRQUEUE
961 static bool process_errors(grpc_tcp* tcp);
962 
tcp_get_send_zerocopy_record(grpc_tcp * tcp,grpc_slice_buffer * buf)963 static TcpZerocopySendRecord* tcp_get_send_zerocopy_record(
964     grpc_tcp* tcp, grpc_slice_buffer* buf) {
965   TcpZerocopySendRecord* zerocopy_send_record = nullptr;
966   const bool use_zerocopy =
967       tcp->tcp_zerocopy_send_ctx.enabled() &&
968       tcp->tcp_zerocopy_send_ctx.threshold_bytes() < buf->length;
969   if (use_zerocopy) {
970     zerocopy_send_record = tcp->tcp_zerocopy_send_ctx.GetSendRecord();
971     if (zerocopy_send_record == nullptr) {
972       process_errors(tcp);
973       zerocopy_send_record = tcp->tcp_zerocopy_send_ctx.GetSendRecord();
974     }
975     if (zerocopy_send_record != nullptr) {
976       zerocopy_send_record->PrepareForSends(buf);
977       GPR_DEBUG_ASSERT(buf->count == 0);
978       GPR_DEBUG_ASSERT(buf->length == 0);
979       tcp->outgoing_byte_idx = 0;
980       tcp->outgoing_buffer = nullptr;
981     }
982   }
983   return zerocopy_send_record;
984 }
985 
ZerocopyDisableAndWaitForRemaining(grpc_tcp * tcp)986 static void ZerocopyDisableAndWaitForRemaining(grpc_tcp* tcp) {
987   tcp->tcp_zerocopy_send_ctx.Shutdown();
988   while (!tcp->tcp_zerocopy_send_ctx.AllSendRecordsEmpty()) {
989     process_errors(tcp);
990   }
991 }
992 
tcp_write_with_timestamps(grpc_tcp * tcp,struct msghdr * msg,size_t sending_length,ssize_t * sent_length,int additional_flags)993 static bool tcp_write_with_timestamps(grpc_tcp* tcp, struct msghdr* msg,
994                                       size_t sending_length,
995                                       ssize_t* sent_length,
996                                       int additional_flags) {
997   if (!tcp->socket_ts_enabled) {
998     uint32_t opt = grpc_core::kTimestampingSocketOptions;
999     if (setsockopt(tcp->fd, SOL_SOCKET, SO_TIMESTAMPING,
1000                    static_cast<void*>(&opt), sizeof(opt)) != 0) {
1001       if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
1002         gpr_log(GPR_ERROR, "Failed to set timestamping options on the socket.");
1003       }
1004       return false;
1005     }
1006     tcp->bytes_counter = -1;
1007     tcp->socket_ts_enabled = true;
1008   }
1009   /* Set control message to indicate that you want timestamps. */
1010   union {
1011     char cmsg_buf[CMSG_SPACE(sizeof(uint32_t))];
1012     struct cmsghdr align;
1013   } u;
1014   cmsghdr* cmsg = reinterpret_cast<cmsghdr*>(u.cmsg_buf);
1015   cmsg->cmsg_level = SOL_SOCKET;
1016   cmsg->cmsg_type = SO_TIMESTAMPING;
1017   cmsg->cmsg_len = CMSG_LEN(sizeof(uint32_t));
1018   *reinterpret_cast<int*>(CMSG_DATA(cmsg)) =
1019       grpc_core::kTimestampingRecordingOptions;
1020   msg->msg_control = u.cmsg_buf;
1021   msg->msg_controllen = CMSG_SPACE(sizeof(uint32_t));
1022 
1023   /* If there was an error on sendmsg the logic in tcp_flush will handle it. */
1024   ssize_t length = tcp_send(tcp->fd, msg, additional_flags);
1025   *sent_length = length;
1026   /* Only save timestamps if all the bytes were taken by sendmsg. */
1027   if (sending_length == static_cast<size_t>(length)) {
1028     gpr_mu_lock(&tcp->tb_mu);
1029     grpc_core::TracedBuffer::AddNewEntry(
1030         &tcp->tb_head, static_cast<uint32_t>(tcp->bytes_counter + length),
1031         tcp->fd, tcp->outgoing_buffer_arg);
1032     gpr_mu_unlock(&tcp->tb_mu);
1033     tcp->outgoing_buffer_arg = nullptr;
1034   }
1035   return true;
1036 }
1037 
1038 static void UnrefMaybePutZerocopySendRecord(grpc_tcp* tcp,
1039                                             TcpZerocopySendRecord* record,
1040                                             uint32_t seq, const char* tag);
1041 // Reads \a cmsg to process zerocopy control messages.
process_zerocopy(grpc_tcp * tcp,struct cmsghdr * cmsg)1042 static void process_zerocopy(grpc_tcp* tcp, struct cmsghdr* cmsg) {
1043   GPR_DEBUG_ASSERT(cmsg);
1044   auto serr = reinterpret_cast<struct sock_extended_err*>(CMSG_DATA(cmsg));
1045   GPR_DEBUG_ASSERT(serr->ee_errno == 0);
1046   GPR_DEBUG_ASSERT(serr->ee_origin == SO_EE_ORIGIN_ZEROCOPY);
1047   const uint32_t lo = serr->ee_info;
1048   const uint32_t hi = serr->ee_data;
1049   for (uint32_t seq = lo; seq <= hi; ++seq) {
1050     // TODO(arjunroy): It's likely that lo and hi refer to zerocopy sequence
1051     // numbers that are generated by a single call to grpc_endpoint_write; ie.
1052     // we can batch the unref operation. So, check if record is the same for
1053     // both; if so, batch the unref/put.
1054     TcpZerocopySendRecord* record =
1055         tcp->tcp_zerocopy_send_ctx.ReleaseSendRecord(seq);
1056     GPR_DEBUG_ASSERT(record);
1057     UnrefMaybePutZerocopySendRecord(tcp, record, seq, "CALLBACK RCVD");
1058   }
1059 }
1060 
1061 // Whether the cmsg received from error queue is of the IPv4 or IPv6 levels.
CmsgIsIpLevel(const cmsghdr & cmsg)1062 static bool CmsgIsIpLevel(const cmsghdr& cmsg) {
1063   return (cmsg.cmsg_level == SOL_IPV6 && cmsg.cmsg_type == IPV6_RECVERR) ||
1064          (cmsg.cmsg_level == SOL_IP && cmsg.cmsg_type == IP_RECVERR);
1065 }
1066 
CmsgIsZeroCopy(const cmsghdr & cmsg)1067 static bool CmsgIsZeroCopy(const cmsghdr& cmsg) {
1068   if (!CmsgIsIpLevel(cmsg)) {
1069     return false;
1070   }
1071   auto serr = reinterpret_cast<const sock_extended_err*> CMSG_DATA(&cmsg);
1072   return serr->ee_errno == 0 && serr->ee_origin == SO_EE_ORIGIN_ZEROCOPY;
1073 }
1074 
1075 /** Reads \a cmsg to derive timestamps from the control messages. If a valid
1076  * timestamp is found, the traced buffer list is updated with this timestamp.
1077  * The caller of this function should be looping on the control messages found
1078  * in \a msg. \a cmsg should point to the control message that the caller wants
1079  * processed.
1080  * On return, a pointer to a control message is returned. On the next iteration,
1081  * CMSG_NXTHDR(msg, ret_val) should be passed as \a cmsg. */
process_timestamp(grpc_tcp * tcp,msghdr * msg,struct cmsghdr * cmsg)1082 struct cmsghdr* process_timestamp(grpc_tcp* tcp, msghdr* msg,
1083                                   struct cmsghdr* cmsg) {
1084   auto next_cmsg = CMSG_NXTHDR(msg, cmsg);
1085   cmsghdr* opt_stats = nullptr;
1086   if (next_cmsg == nullptr) {
1087     if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
1088       gpr_log(GPR_ERROR, "Received timestamp without extended error");
1089     }
1090     return cmsg;
1091   }
1092 
1093   /* Check if next_cmsg is an OPT_STATS msg */
1094   if (next_cmsg->cmsg_level == SOL_SOCKET &&
1095       next_cmsg->cmsg_type == SCM_TIMESTAMPING_OPT_STATS) {
1096     opt_stats = next_cmsg;
1097     next_cmsg = CMSG_NXTHDR(msg, opt_stats);
1098     if (next_cmsg == nullptr) {
1099       if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
1100         gpr_log(GPR_ERROR, "Received timestamp without extended error");
1101       }
1102       return opt_stats;
1103     }
1104   }
1105 
1106   if (!(next_cmsg->cmsg_level == SOL_IP || next_cmsg->cmsg_level == SOL_IPV6) ||
1107       !(next_cmsg->cmsg_type == IP_RECVERR ||
1108         next_cmsg->cmsg_type == IPV6_RECVERR)) {
1109     if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
1110       gpr_log(GPR_ERROR, "Unexpected control message");
1111     }
1112     return cmsg;
1113   }
1114 
1115   auto tss =
1116       reinterpret_cast<struct grpc_core::scm_timestamping*>(CMSG_DATA(cmsg));
1117   auto serr = reinterpret_cast<struct sock_extended_err*>(CMSG_DATA(next_cmsg));
1118   if (serr->ee_errno != ENOMSG ||
1119       serr->ee_origin != SO_EE_ORIGIN_TIMESTAMPING) {
1120     gpr_log(GPR_ERROR, "Unexpected control message");
1121     return cmsg;
1122   }
1123   /* The error handling can potentially be done on another thread so we need
1124    * to protect the traced buffer list. A lock free list might be better. Using
1125    * a simple mutex for now. */
1126   gpr_mu_lock(&tcp->tb_mu);
1127   grpc_core::TracedBuffer::ProcessTimestamp(&tcp->tb_head, serr, opt_stats,
1128                                             tss);
1129   gpr_mu_unlock(&tcp->tb_mu);
1130   return next_cmsg;
1131 }
1132 
1133 /** For linux platforms, reads the socket's error queue and processes error
1134  * messages from the queue.
1135  */
process_errors(grpc_tcp * tcp)1136 static bool process_errors(grpc_tcp* tcp) {
1137   bool processed_err = false;
1138   struct iovec iov;
1139   iov.iov_base = nullptr;
1140   iov.iov_len = 0;
1141   struct msghdr msg;
1142   msg.msg_name = nullptr;
1143   msg.msg_namelen = 0;
1144   msg.msg_iov = &iov;
1145   msg.msg_iovlen = 0;
1146   msg.msg_flags = 0;
1147   /* Allocate enough space so we don't need to keep increasing this as size
1148    * of OPT_STATS increase */
1149   constexpr size_t cmsg_alloc_space =
1150       CMSG_SPACE(sizeof(grpc_core::scm_timestamping)) +
1151       CMSG_SPACE(sizeof(sock_extended_err) + sizeof(sockaddr_in)) +
1152       CMSG_SPACE(32 * NLA_ALIGN(NLA_HDRLEN + sizeof(uint64_t)));
1153   /* Allocate aligned space for cmsgs received along with timestamps */
1154   union {
1155     char rbuf[cmsg_alloc_space];
1156     struct cmsghdr align;
1157   } aligned_buf;
1158   msg.msg_control = aligned_buf.rbuf;
1159   msg.msg_controllen = sizeof(aligned_buf.rbuf);
1160   int r, saved_errno;
1161   while (true) {
1162     do {
1163       r = recvmsg(tcp->fd, &msg, MSG_ERRQUEUE);
1164       saved_errno = errno;
1165     } while (r < 0 && saved_errno == EINTR);
1166 
1167     if (r == -1 && saved_errno == EAGAIN) {
1168       return processed_err; /* No more errors to process */
1169     }
1170     if (r == -1) {
1171       return processed_err;
1172     }
1173     if (GPR_UNLIKELY((msg.msg_flags & MSG_CTRUNC) != 0)) {
1174       gpr_log(GPR_ERROR, "Error message was truncated.");
1175     }
1176 
1177     if (msg.msg_controllen == 0) {
1178       /* There was no control message found. It was probably spurious. */
1179       return processed_err;
1180     }
1181     bool seen = false;
1182     for (auto cmsg = CMSG_FIRSTHDR(&msg); cmsg && cmsg->cmsg_len;
1183          cmsg = CMSG_NXTHDR(&msg, cmsg)) {
1184       if (CmsgIsZeroCopy(*cmsg)) {
1185         process_zerocopy(tcp, cmsg);
1186         seen = true;
1187         processed_err = true;
1188       } else if (cmsg->cmsg_level == SOL_SOCKET &&
1189                  cmsg->cmsg_type == SCM_TIMESTAMPING) {
1190         cmsg = process_timestamp(tcp, &msg, cmsg);
1191         seen = true;
1192         processed_err = true;
1193       } else {
1194         /* Got a control message that is not a timestamp or zerocopy. Don't know
1195          * how to handle this. */
1196         if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
1197           gpr_log(GPR_INFO,
1198                   "unknown control message cmsg_level:%d cmsg_type:%d",
1199                   cmsg->cmsg_level, cmsg->cmsg_type);
1200         }
1201         return processed_err;
1202       }
1203     }
1204     if (!seen) {
1205       return processed_err;
1206     }
1207   }
1208 }
1209 
tcp_handle_error(void * arg,grpc_error * error)1210 static void tcp_handle_error(void* arg /* grpc_tcp */, grpc_error* error) {
1211   grpc_tcp* tcp = static_cast<grpc_tcp*>(arg);
1212   if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
1213     gpr_log(GPR_INFO, "TCP:%p got_error: %s", tcp, grpc_error_string(error));
1214   }
1215 
1216   if (error != GRPC_ERROR_NONE ||
1217       static_cast<bool>(gpr_atm_acq_load(&tcp->stop_error_notification))) {
1218     /* We aren't going to register to hear on error anymore, so it is safe to
1219      * unref. */
1220     TCP_UNREF(tcp, "error-tracking");
1221     return;
1222   }
1223 
1224   /* We are still interested in collecting timestamps, so let's try reading
1225    * them. */
1226   bool processed = process_errors(tcp);
1227   /* This might not a timestamps error. Set the read and write closures to be
1228    * ready. */
1229   if (!processed) {
1230     grpc_fd_set_readable(tcp->em_fd);
1231     grpc_fd_set_writable(tcp->em_fd);
1232   }
1233   grpc_fd_notify_on_error(tcp->em_fd, &tcp->error_closure);
1234 }
1235 
1236 #else  /* GRPC_LINUX_ERRQUEUE */
tcp_get_send_zerocopy_record(grpc_tcp * tcp,grpc_slice_buffer * buf)1237 static TcpZerocopySendRecord* tcp_get_send_zerocopy_record(
1238     grpc_tcp* tcp, grpc_slice_buffer* buf) {
1239   return nullptr;
1240 }
1241 
ZerocopyDisableAndWaitForRemaining(grpc_tcp * tcp)1242 static void ZerocopyDisableAndWaitForRemaining(grpc_tcp* tcp) {}
1243 
tcp_write_with_timestamps(grpc_tcp *,struct msghdr *,size_t,ssize_t *,int)1244 static bool tcp_write_with_timestamps(grpc_tcp* /*tcp*/, struct msghdr* /*msg*/,
1245                                       size_t /*sending_length*/,
1246                                       ssize_t* /*sent_length*/,
1247                                       int /*additional_flags*/) {
1248   gpr_log(GPR_ERROR, "Write with timestamps not supported for this platform");
1249   GPR_ASSERT(0);
1250   return false;
1251 }
1252 
tcp_handle_error(void *,grpc_error *)1253 static void tcp_handle_error(void* /*arg*/ /* grpc_tcp */,
1254                              grpc_error* /*error*/) {
1255   gpr_log(GPR_ERROR, "Error handling is not supported for this platform");
1256   GPR_ASSERT(0);
1257 }
1258 #endif /* GRPC_LINUX_ERRQUEUE */
1259 
1260 /* If outgoing_buffer_arg is filled, shuts down the list early, so that any
1261  * release operations needed can be performed on the arg */
tcp_shutdown_buffer_list(grpc_tcp * tcp)1262 void tcp_shutdown_buffer_list(grpc_tcp* tcp) {
1263   if (tcp->outgoing_buffer_arg) {
1264     gpr_mu_lock(&tcp->tb_mu);
1265     grpc_core::TracedBuffer::Shutdown(
1266         &tcp->tb_head, tcp->outgoing_buffer_arg,
1267         GRPC_ERROR_CREATE_FROM_STATIC_STRING("TracedBuffer list shutdown"));
1268     gpr_mu_unlock(&tcp->tb_mu);
1269     tcp->outgoing_buffer_arg = nullptr;
1270   }
1271 }
1272 
1273 #if defined(IOV_MAX) && IOV_MAX < 1000
1274 #define MAX_WRITE_IOVEC IOV_MAX
1275 #else
1276 #define MAX_WRITE_IOVEC 1000
1277 #endif
PopulateIovs(size_t * unwind_slice_idx,size_t * unwind_byte_idx,size_t * sending_length,iovec * iov)1278 msg_iovlen_type TcpZerocopySendRecord::PopulateIovs(size_t* unwind_slice_idx,
1279                                                     size_t* unwind_byte_idx,
1280                                                     size_t* sending_length,
1281                                                     iovec* iov) {
1282   msg_iovlen_type iov_size;
1283   *unwind_slice_idx = out_offset_.slice_idx;
1284   *unwind_byte_idx = out_offset_.byte_idx;
1285   for (iov_size = 0;
1286        out_offset_.slice_idx != buf_.count && iov_size != MAX_WRITE_IOVEC;
1287        iov_size++) {
1288     iov[iov_size].iov_base =
1289         GRPC_SLICE_START_PTR(buf_.slices[out_offset_.slice_idx]) +
1290         out_offset_.byte_idx;
1291     iov[iov_size].iov_len =
1292         GRPC_SLICE_LENGTH(buf_.slices[out_offset_.slice_idx]) -
1293         out_offset_.byte_idx;
1294     *sending_length += iov[iov_size].iov_len;
1295     ++(out_offset_.slice_idx);
1296     out_offset_.byte_idx = 0;
1297   }
1298   GPR_DEBUG_ASSERT(iov_size > 0);
1299   return iov_size;
1300 }
1301 
UpdateOffsetForBytesSent(size_t sending_length,size_t actually_sent)1302 void TcpZerocopySendRecord::UpdateOffsetForBytesSent(size_t sending_length,
1303                                                      size_t actually_sent) {
1304   size_t trailing = sending_length - actually_sent;
1305   while (trailing > 0) {
1306     size_t slice_length;
1307     out_offset_.slice_idx--;
1308     slice_length = GRPC_SLICE_LENGTH(buf_.slices[out_offset_.slice_idx]);
1309     if (slice_length > trailing) {
1310       out_offset_.byte_idx = slice_length - trailing;
1311       break;
1312     } else {
1313       trailing -= slice_length;
1314     }
1315   }
1316 }
1317 
1318 // returns true if done, false if pending; if returning true, *error is set
do_tcp_flush_zerocopy(grpc_tcp * tcp,TcpZerocopySendRecord * record,grpc_error ** error)1319 static bool do_tcp_flush_zerocopy(grpc_tcp* tcp, TcpZerocopySendRecord* record,
1320                                   grpc_error** error) {
1321   struct msghdr msg;
1322   struct iovec iov[MAX_WRITE_IOVEC];
1323   msg_iovlen_type iov_size;
1324   ssize_t sent_length = 0;
1325   size_t sending_length;
1326   size_t unwind_slice_idx;
1327   size_t unwind_byte_idx;
1328   while (true) {
1329     sending_length = 0;
1330     iov_size = record->PopulateIovs(&unwind_slice_idx, &unwind_byte_idx,
1331                                     &sending_length, iov);
1332     msg.msg_name = nullptr;
1333     msg.msg_namelen = 0;
1334     msg.msg_iov = iov;
1335     msg.msg_iovlen = iov_size;
1336     msg.msg_flags = 0;
1337     bool tried_sending_message = false;
1338     // Before calling sendmsg (with or without timestamps): we
1339     // take a single ref on the zerocopy send record.
1340     tcp->tcp_zerocopy_send_ctx.NoteSend(record);
1341     if (tcp->outgoing_buffer_arg != nullptr) {
1342       if (!tcp->ts_capable ||
1343           !tcp_write_with_timestamps(tcp, &msg, sending_length, &sent_length,
1344                                      MSG_ZEROCOPY)) {
1345         /* We could not set socket options to collect Fathom timestamps.
1346          * Fallback on writing without timestamps. */
1347         tcp->ts_capable = false;
1348         tcp_shutdown_buffer_list(tcp);
1349       } else {
1350         tried_sending_message = true;
1351       }
1352     }
1353     if (!tried_sending_message) {
1354       msg.msg_control = nullptr;
1355       msg.msg_controllen = 0;
1356       GRPC_STATS_INC_TCP_WRITE_SIZE(sending_length);
1357       GRPC_STATS_INC_TCP_WRITE_IOV_SIZE(iov_size);
1358       sent_length = tcp_send(tcp->fd, &msg, MSG_ZEROCOPY);
1359     }
1360     if (sent_length < 0) {
1361       // If this particular send failed, drop ref taken earlier in this method.
1362       tcp->tcp_zerocopy_send_ctx.UndoSend();
1363       if (errno == EAGAIN) {
1364         record->UnwindIfThrottled(unwind_slice_idx, unwind_byte_idx);
1365         return false;
1366       } else if (errno == EPIPE) {
1367         *error = tcp_annotate_error(GRPC_OS_ERROR(errno, "sendmsg"), tcp);
1368         tcp_shutdown_buffer_list(tcp);
1369         return true;
1370       } else {
1371         *error = tcp_annotate_error(GRPC_OS_ERROR(errno, "sendmsg"), tcp);
1372         tcp_shutdown_buffer_list(tcp);
1373         return true;
1374       }
1375     }
1376     tcp->bytes_counter += sent_length;
1377     record->UpdateOffsetForBytesSent(sending_length,
1378                                      static_cast<size_t>(sent_length));
1379     if (record->AllSlicesSent()) {
1380       *error = GRPC_ERROR_NONE;
1381       return true;
1382     }
1383   }
1384 }
1385 
UnrefMaybePutZerocopySendRecord(grpc_tcp * tcp,TcpZerocopySendRecord * record,uint32_t seq,const char *)1386 static void UnrefMaybePutZerocopySendRecord(grpc_tcp* tcp,
1387                                             TcpZerocopySendRecord* record,
1388                                             uint32_t seq,
1389                                             const char* /* tag */) {
1390   if (record->Unref()) {
1391     tcp->tcp_zerocopy_send_ctx.PutSendRecord(record);
1392   }
1393 }
1394 
tcp_flush_zerocopy(grpc_tcp * tcp,TcpZerocopySendRecord * record,grpc_error ** error)1395 static bool tcp_flush_zerocopy(grpc_tcp* tcp, TcpZerocopySendRecord* record,
1396                                grpc_error** error) {
1397   bool done = do_tcp_flush_zerocopy(tcp, record, error);
1398   if (done) {
1399     // Either we encountered an error, or we successfully sent all the bytes.
1400     // In either case, we're done with this record.
1401     UnrefMaybePutZerocopySendRecord(tcp, record, 0, "flush_done");
1402   }
1403   return done;
1404 }
1405 
tcp_flush(grpc_tcp * tcp,grpc_error ** error)1406 static bool tcp_flush(grpc_tcp* tcp, grpc_error** error) {
1407   struct msghdr msg;
1408   struct iovec iov[MAX_WRITE_IOVEC];
1409   msg_iovlen_type iov_size;
1410   ssize_t sent_length = 0;
1411   size_t sending_length;
1412   size_t trailing;
1413   size_t unwind_slice_idx;
1414   size_t unwind_byte_idx;
1415 
1416   // We always start at zero, because we eagerly unref and trim the slice
1417   // buffer as we write
1418   size_t outgoing_slice_idx = 0;
1419 
1420   while (true) {
1421     sending_length = 0;
1422     unwind_slice_idx = outgoing_slice_idx;
1423     unwind_byte_idx = tcp->outgoing_byte_idx;
1424     for (iov_size = 0; outgoing_slice_idx != tcp->outgoing_buffer->count &&
1425                        iov_size != MAX_WRITE_IOVEC;
1426          iov_size++) {
1427       iov[iov_size].iov_base =
1428           GRPC_SLICE_START_PTR(
1429               tcp->outgoing_buffer->slices[outgoing_slice_idx]) +
1430           tcp->outgoing_byte_idx;
1431       iov[iov_size].iov_len =
1432           GRPC_SLICE_LENGTH(tcp->outgoing_buffer->slices[outgoing_slice_idx]) -
1433           tcp->outgoing_byte_idx;
1434       sending_length += iov[iov_size].iov_len;
1435       outgoing_slice_idx++;
1436       tcp->outgoing_byte_idx = 0;
1437     }
1438     GPR_ASSERT(iov_size > 0);
1439 
1440     msg.msg_name = nullptr;
1441     msg.msg_namelen = 0;
1442     msg.msg_iov = iov;
1443     msg.msg_iovlen = iov_size;
1444     msg.msg_flags = 0;
1445     bool tried_sending_message = false;
1446     if (tcp->outgoing_buffer_arg != nullptr) {
1447       if (!tcp->ts_capable ||
1448           !tcp_write_with_timestamps(tcp, &msg, sending_length, &sent_length)) {
1449         /* We could not set socket options to collect Fathom timestamps.
1450          * Fallback on writing without timestamps. */
1451         tcp->ts_capable = false;
1452         tcp_shutdown_buffer_list(tcp);
1453       } else {
1454         tried_sending_message = true;
1455       }
1456     }
1457     if (!tried_sending_message) {
1458       msg.msg_control = nullptr;
1459       msg.msg_controllen = 0;
1460 
1461       GRPC_STATS_INC_TCP_WRITE_SIZE(sending_length);
1462       GRPC_STATS_INC_TCP_WRITE_IOV_SIZE(iov_size);
1463 
1464       sent_length = tcp_send(tcp->fd, &msg);
1465     }
1466 
1467     if (sent_length < 0) {
1468       if (errno == EAGAIN) {
1469         tcp->outgoing_byte_idx = unwind_byte_idx;
1470         // unref all and forget about all slices that have been written to this
1471         // point
1472         for (size_t idx = 0; idx < unwind_slice_idx; ++idx) {
1473           grpc_slice_buffer_remove_first(tcp->outgoing_buffer);
1474         }
1475         return false;
1476       } else if (errno == EPIPE) {
1477         *error = tcp_annotate_error(GRPC_OS_ERROR(errno, "sendmsg"), tcp);
1478         grpc_slice_buffer_reset_and_unref_internal(tcp->outgoing_buffer);
1479         tcp_shutdown_buffer_list(tcp);
1480         return true;
1481       } else {
1482         *error = tcp_annotate_error(GRPC_OS_ERROR(errno, "sendmsg"), tcp);
1483         grpc_slice_buffer_reset_and_unref_internal(tcp->outgoing_buffer);
1484         tcp_shutdown_buffer_list(tcp);
1485         return true;
1486       }
1487     }
1488 
1489     GPR_ASSERT(tcp->outgoing_byte_idx == 0);
1490     tcp->bytes_counter += sent_length;
1491     trailing = sending_length - static_cast<size_t>(sent_length);
1492     while (trailing > 0) {
1493       size_t slice_length;
1494 
1495       outgoing_slice_idx--;
1496       slice_length =
1497           GRPC_SLICE_LENGTH(tcp->outgoing_buffer->slices[outgoing_slice_idx]);
1498       if (slice_length > trailing) {
1499         tcp->outgoing_byte_idx = slice_length - trailing;
1500         break;
1501       } else {
1502         trailing -= slice_length;
1503       }
1504     }
1505     if (outgoing_slice_idx == tcp->outgoing_buffer->count) {
1506       *error = GRPC_ERROR_NONE;
1507       grpc_slice_buffer_reset_and_unref_internal(tcp->outgoing_buffer);
1508       return true;
1509     }
1510   }
1511 }
1512 
tcp_handle_write(void * arg,grpc_error * error)1513 static void tcp_handle_write(void* arg /* grpc_tcp */, grpc_error* error) {
1514   grpc_tcp* tcp = static_cast<grpc_tcp*>(arg);
1515   grpc_closure* cb;
1516 
1517   if (error != GRPC_ERROR_NONE) {
1518     cb = tcp->write_cb;
1519     tcp->write_cb = nullptr;
1520     if (tcp->current_zerocopy_send != nullptr) {
1521       UnrefMaybePutZerocopySendRecord(tcp, tcp->current_zerocopy_send, 0,
1522                                       "handle_write_err");
1523       tcp->current_zerocopy_send = nullptr;
1524     }
1525     grpc_core::Closure::Run(DEBUG_LOCATION, cb, GRPC_ERROR_REF(error));
1526     TCP_UNREF(tcp, "write");
1527     return;
1528   }
1529 
1530   bool flush_result =
1531       tcp->current_zerocopy_send != nullptr
1532           ? tcp_flush_zerocopy(tcp, tcp->current_zerocopy_send, &error)
1533           : tcp_flush(tcp, &error);
1534   if (!flush_result) {
1535     if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
1536       gpr_log(GPR_INFO, "write: delayed");
1537     }
1538     notify_on_write(tcp);
1539     // tcp_flush does not populate error if it has returned false.
1540     GPR_DEBUG_ASSERT(error == GRPC_ERROR_NONE);
1541   } else {
1542     cb = tcp->write_cb;
1543     tcp->write_cb = nullptr;
1544     tcp->current_zerocopy_send = nullptr;
1545     if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
1546       const char* str = grpc_error_string(error);
1547       gpr_log(GPR_INFO, "write: %s", str);
1548     }
1549     // No need to take a ref on error since tcp_flush provides a ref.
1550     grpc_core::Closure::Run(DEBUG_LOCATION, cb, error);
1551     TCP_UNREF(tcp, "write");
1552   }
1553 }
1554 
tcp_write(grpc_endpoint * ep,grpc_slice_buffer * buf,grpc_closure * cb,void * arg)1555 static void tcp_write(grpc_endpoint* ep, grpc_slice_buffer* buf,
1556                       grpc_closure* cb, void* arg) {
1557   GPR_TIMER_SCOPE("tcp_write", 0);
1558   grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1559   grpc_error* error = GRPC_ERROR_NONE;
1560   TcpZerocopySendRecord* zerocopy_send_record = nullptr;
1561 
1562   if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
1563     size_t i;
1564 
1565     for (i = 0; i < buf->count; i++) {
1566       gpr_log(GPR_INFO, "WRITE %p (peer=%s)", tcp, tcp->peer_string);
1567       if (gpr_should_log(GPR_LOG_SEVERITY_DEBUG)) {
1568         char* data =
1569             grpc_dump_slice(buf->slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII);
1570         gpr_log(GPR_DEBUG, "DATA: %s", data);
1571         gpr_free(data);
1572       }
1573     }
1574   }
1575 
1576   GPR_ASSERT(tcp->write_cb == nullptr);
1577   GPR_DEBUG_ASSERT(tcp->current_zerocopy_send == nullptr);
1578 
1579   if (buf->length == 0) {
1580     grpc_core::Closure::Run(
1581         DEBUG_LOCATION, cb,
1582         grpc_fd_is_shutdown(tcp->em_fd)
1583             ? tcp_annotate_error(GRPC_ERROR_CREATE_FROM_STATIC_STRING("EOF"),
1584                                  tcp)
1585             : GRPC_ERROR_NONE);
1586     tcp_shutdown_buffer_list(tcp);
1587     return;
1588   }
1589 
1590   zerocopy_send_record = tcp_get_send_zerocopy_record(tcp, buf);
1591   if (zerocopy_send_record == nullptr) {
1592     // Either not enough bytes, or couldn't allocate a zerocopy context.
1593     tcp->outgoing_buffer = buf;
1594     tcp->outgoing_byte_idx = 0;
1595   }
1596   tcp->outgoing_buffer_arg = arg;
1597   if (arg) {
1598     GPR_ASSERT(grpc_event_engine_can_track_errors());
1599   }
1600 
1601   bool flush_result =
1602       zerocopy_send_record != nullptr
1603           ? tcp_flush_zerocopy(tcp, zerocopy_send_record, &error)
1604           : tcp_flush(tcp, &error);
1605   if (!flush_result) {
1606     TCP_REF(tcp, "write");
1607     tcp->write_cb = cb;
1608     tcp->current_zerocopy_send = zerocopy_send_record;
1609     if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
1610       gpr_log(GPR_INFO, "write: delayed");
1611     }
1612     notify_on_write(tcp);
1613   } else {
1614     if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
1615       const char* str = grpc_error_string(error);
1616       gpr_log(GPR_INFO, "write: %s", str);
1617     }
1618     grpc_core::Closure::Run(DEBUG_LOCATION, cb, error);
1619   }
1620 }
1621 
tcp_add_to_pollset(grpc_endpoint * ep,grpc_pollset * pollset)1622 static void tcp_add_to_pollset(grpc_endpoint* ep, grpc_pollset* pollset) {
1623   grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1624   grpc_pollset_add_fd(pollset, tcp->em_fd);
1625 }
1626 
tcp_add_to_pollset_set(grpc_endpoint * ep,grpc_pollset_set * pollset_set)1627 static void tcp_add_to_pollset_set(grpc_endpoint* ep,
1628                                    grpc_pollset_set* pollset_set) {
1629   grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1630   grpc_pollset_set_add_fd(pollset_set, tcp->em_fd);
1631 }
1632 
tcp_delete_from_pollset_set(grpc_endpoint * ep,grpc_pollset_set * pollset_set)1633 static void tcp_delete_from_pollset_set(grpc_endpoint* ep,
1634                                         grpc_pollset_set* pollset_set) {
1635   grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1636   ZerocopyDisableAndWaitForRemaining(tcp);
1637   grpc_pollset_set_del_fd(pollset_set, tcp->em_fd);
1638 }
1639 
tcp_get_peer(grpc_endpoint * ep)1640 static char* tcp_get_peer(grpc_endpoint* ep) {
1641   grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1642   return gpr_strdup(tcp->peer_string);
1643 }
1644 
tcp_get_fd(grpc_endpoint * ep)1645 static int tcp_get_fd(grpc_endpoint* ep) {
1646   grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1647   return tcp->fd;
1648 }
1649 
tcp_get_resource_user(grpc_endpoint * ep)1650 static grpc_resource_user* tcp_get_resource_user(grpc_endpoint* ep) {
1651   grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1652   return tcp->resource_user;
1653 }
1654 
tcp_can_track_err(grpc_endpoint * ep)1655 static bool tcp_can_track_err(grpc_endpoint* ep) {
1656   grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1657   if (!grpc_event_engine_can_track_errors()) {
1658     return false;
1659   }
1660   struct sockaddr addr;
1661   socklen_t len = sizeof(addr);
1662   if (getsockname(tcp->fd, &addr, &len) < 0) {
1663     return false;
1664   }
1665   if (addr.sa_family == AF_INET || addr.sa_family == AF_INET6) {
1666     return true;
1667   }
1668   return false;
1669 }
1670 
1671 static const grpc_endpoint_vtable vtable = {tcp_read,
1672                                             tcp_write,
1673                                             tcp_add_to_pollset,
1674                                             tcp_add_to_pollset_set,
1675                                             tcp_delete_from_pollset_set,
1676                                             tcp_shutdown,
1677                                             tcp_destroy,
1678                                             tcp_get_resource_user,
1679                                             tcp_get_peer,
1680                                             tcp_get_fd,
1681                                             tcp_can_track_err};
1682 
1683 #define MAX_CHUNK_SIZE 32 * 1024 * 1024
1684 
grpc_tcp_create(grpc_fd * em_fd,const grpc_channel_args * channel_args,const char * peer_string)1685 grpc_endpoint* grpc_tcp_create(grpc_fd* em_fd,
1686                                const grpc_channel_args* channel_args,
1687                                const char* peer_string) {
1688   static constexpr bool kZerocpTxEnabledDefault = false;
1689   int tcp_read_chunk_size = GRPC_TCP_DEFAULT_READ_SLICE_SIZE;
1690   int tcp_max_read_chunk_size = 4 * 1024 * 1024;
1691   int tcp_min_read_chunk_size = 256;
1692   bool tcp_tx_zerocopy_enabled = kZerocpTxEnabledDefault;
1693   int tcp_tx_zerocopy_send_bytes_thresh =
1694       grpc_core::TcpZerocopySendCtx::kDefaultSendBytesThreshold;
1695   int tcp_tx_zerocopy_max_simult_sends =
1696       grpc_core::TcpZerocopySendCtx::kDefaultMaxSends;
1697   grpc_resource_quota* resource_quota = grpc_resource_quota_create(nullptr);
1698   if (channel_args != nullptr) {
1699     for (size_t i = 0; i < channel_args->num_args; i++) {
1700       if (0 ==
1701           strcmp(channel_args->args[i].key, GRPC_ARG_TCP_READ_CHUNK_SIZE)) {
1702         grpc_integer_options options = {tcp_read_chunk_size, 1, MAX_CHUNK_SIZE};
1703         tcp_read_chunk_size =
1704             grpc_channel_arg_get_integer(&channel_args->args[i], options);
1705       } else if (0 == strcmp(channel_args->args[i].key,
1706                              GRPC_ARG_TCP_MIN_READ_CHUNK_SIZE)) {
1707         grpc_integer_options options = {tcp_read_chunk_size, 1, MAX_CHUNK_SIZE};
1708         tcp_min_read_chunk_size =
1709             grpc_channel_arg_get_integer(&channel_args->args[i], options);
1710       } else if (0 == strcmp(channel_args->args[i].key,
1711                              GRPC_ARG_TCP_MAX_READ_CHUNK_SIZE)) {
1712         grpc_integer_options options = {tcp_read_chunk_size, 1, MAX_CHUNK_SIZE};
1713         tcp_max_read_chunk_size =
1714             grpc_channel_arg_get_integer(&channel_args->args[i], options);
1715       } else if (0 ==
1716                  strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
1717         grpc_resource_quota_unref_internal(resource_quota);
1718         resource_quota =
1719             grpc_resource_quota_ref_internal(static_cast<grpc_resource_quota*>(
1720                 channel_args->args[i].value.pointer.p));
1721       } else if (0 == strcmp(channel_args->args[i].key,
1722                              GRPC_ARG_TCP_TX_ZEROCOPY_ENABLED)) {
1723         tcp_tx_zerocopy_enabled = grpc_channel_arg_get_bool(
1724             &channel_args->args[i], kZerocpTxEnabledDefault);
1725       } else if (0 == strcmp(channel_args->args[i].key,
1726                              GRPC_ARG_TCP_TX_ZEROCOPY_SEND_BYTES_THRESHOLD)) {
1727         grpc_integer_options options = {
1728             grpc_core::TcpZerocopySendCtx::kDefaultSendBytesThreshold, 0,
1729             INT_MAX};
1730         tcp_tx_zerocopy_send_bytes_thresh =
1731             grpc_channel_arg_get_integer(&channel_args->args[i], options);
1732       } else if (0 == strcmp(channel_args->args[i].key,
1733                              GRPC_ARG_TCP_TX_ZEROCOPY_MAX_SIMULT_SENDS)) {
1734         grpc_integer_options options = {
1735             grpc_core::TcpZerocopySendCtx::kDefaultMaxSends, 0, INT_MAX};
1736         tcp_tx_zerocopy_max_simult_sends =
1737             grpc_channel_arg_get_integer(&channel_args->args[i], options);
1738       }
1739     }
1740   }
1741 
1742   if (tcp_min_read_chunk_size > tcp_max_read_chunk_size) {
1743     tcp_min_read_chunk_size = tcp_max_read_chunk_size;
1744   }
1745   tcp_read_chunk_size = GPR_CLAMP(tcp_read_chunk_size, tcp_min_read_chunk_size,
1746                                   tcp_max_read_chunk_size);
1747 
1748   grpc_tcp* tcp = static_cast<grpc_tcp*>(gpr_malloc(sizeof(grpc_tcp)));
1749   tcp->base.vtable = &vtable;
1750   tcp->peer_string = gpr_strdup(peer_string);
1751   tcp->fd = grpc_fd_wrapped_fd(em_fd);
1752   tcp->read_cb = nullptr;
1753   tcp->write_cb = nullptr;
1754   tcp->current_zerocopy_send = nullptr;
1755   tcp->release_fd_cb = nullptr;
1756   tcp->release_fd = nullptr;
1757   tcp->incoming_buffer = nullptr;
1758   tcp->target_length = static_cast<double>(tcp_read_chunk_size);
1759   tcp->min_read_chunk_size = tcp_min_read_chunk_size;
1760   tcp->max_read_chunk_size = tcp_max_read_chunk_size;
1761   tcp->bytes_read_this_round = 0;
1762   /* Will be set to false by the very first endpoint read function */
1763   tcp->is_first_read = true;
1764   tcp->bytes_counter = -1;
1765   tcp->socket_ts_enabled = false;
1766   tcp->ts_capable = true;
1767   tcp->outgoing_buffer_arg = nullptr;
1768   new (&tcp->tcp_zerocopy_send_ctx) TcpZerocopySendCtx(
1769       tcp_tx_zerocopy_max_simult_sends, tcp_tx_zerocopy_send_bytes_thresh);
1770   if (tcp_tx_zerocopy_enabled && !tcp->tcp_zerocopy_send_ctx.memory_limited()) {
1771 #ifdef GRPC_LINUX_ERRQUEUE
1772     const int enable = 1;
1773     auto err =
1774         setsockopt(tcp->fd, SOL_SOCKET, SO_ZEROCOPY, &enable, sizeof(enable));
1775     if (err == 0) {
1776       tcp->tcp_zerocopy_send_ctx.set_enabled(true);
1777     } else {
1778       gpr_log(GPR_ERROR, "Failed to set zerocopy options on the socket.");
1779     }
1780 #endif
1781   }
1782   /* paired with unref in grpc_tcp_destroy */
1783   new (&tcp->refcount) grpc_core::RefCount(1, &grpc_tcp_trace);
1784   gpr_atm_no_barrier_store(&tcp->shutdown_count, 0);
1785   tcp->em_fd = em_fd;
1786   grpc_slice_buffer_init(&tcp->last_read_buffer);
1787   tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string);
1788   grpc_resource_user_slice_allocator_init(
1789       &tcp->slice_allocator, tcp->resource_user, tcp_read_allocation_done, tcp);
1790   grpc_resource_quota_unref_internal(resource_quota);
1791   gpr_mu_init(&tcp->tb_mu);
1792   tcp->tb_head = nullptr;
1793   GRPC_CLOSURE_INIT(&tcp->read_done_closure, tcp_handle_read, tcp,
1794                     grpc_schedule_on_exec_ctx);
1795   if (grpc_event_engine_run_in_background()) {
1796     // If there is a polling engine always running in the background, there is
1797     // no need to run the backup poller.
1798     GRPC_CLOSURE_INIT(&tcp->write_done_closure, tcp_handle_write, tcp,
1799                       grpc_schedule_on_exec_ctx);
1800   } else {
1801     GRPC_CLOSURE_INIT(&tcp->write_done_closure,
1802                       tcp_drop_uncovered_then_handle_write, tcp,
1803                       grpc_schedule_on_exec_ctx);
1804   }
1805   /* Always assume there is something on the queue to read. */
1806   tcp->inq = 1;
1807 #ifdef GRPC_HAVE_TCP_INQ
1808   int one = 1;
1809   if (setsockopt(tcp->fd, SOL_TCP, TCP_INQ, &one, sizeof(one)) == 0) {
1810     tcp->inq_capable = true;
1811   } else {
1812     gpr_log(GPR_DEBUG, "cannot set inq fd=%d errno=%d", tcp->fd, errno);
1813     tcp->inq_capable = false;
1814   }
1815 #else
1816   tcp->inq_capable = false;
1817 #endif /* GRPC_HAVE_TCP_INQ */
1818   /* Start being notified on errors if event engine can track errors. */
1819   if (grpc_event_engine_can_track_errors()) {
1820     /* Grab a ref to tcp so that we can safely access the tcp struct when
1821      * processing errors. We unref when we no longer want to track errors
1822      * separately. */
1823     TCP_REF(tcp, "error-tracking");
1824     gpr_atm_rel_store(&tcp->stop_error_notification, 0);
1825     GRPC_CLOSURE_INIT(&tcp->error_closure, tcp_handle_error, tcp,
1826                       grpc_schedule_on_exec_ctx);
1827     grpc_fd_notify_on_error(tcp->em_fd, &tcp->error_closure);
1828   }
1829 
1830   return &tcp->base;
1831 }
1832 
grpc_tcp_fd(grpc_endpoint * ep)1833 int grpc_tcp_fd(grpc_endpoint* ep) {
1834   grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1835   GPR_ASSERT(ep->vtable == &vtable);
1836   return grpc_fd_wrapped_fd(tcp->em_fd);
1837 }
1838 
grpc_tcp_destroy_and_release_fd(grpc_endpoint * ep,int * fd,grpc_closure * done)1839 void grpc_tcp_destroy_and_release_fd(grpc_endpoint* ep, int* fd,
1840                                      grpc_closure* done) {
1841   grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1842   GPR_ASSERT(ep->vtable == &vtable);
1843   tcp->release_fd = fd;
1844   tcp->release_fd_cb = done;
1845   grpc_slice_buffer_reset_and_unref_internal(&tcp->last_read_buffer);
1846   if (grpc_event_engine_can_track_errors()) {
1847     /* Stop errors notification. */
1848     ZerocopyDisableAndWaitForRemaining(tcp);
1849     gpr_atm_no_barrier_store(&tcp->stop_error_notification, true);
1850     grpc_fd_set_error(tcp->em_fd);
1851   }
1852   TCP_UNREF(tcp, "destroy");
1853 }
1854 
1855 #endif /* GRPC_POSIX_SOCKET_TCP */
1856