• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *
3  * Copyright 2015 gRPC authors.
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  *     http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  *
17  */
18 
19 #include <grpc/support/port_platform.h>
20 
21 #include "src/core/lib/iomgr/port.h"
22 
23 #ifdef GRPC_POSIX_SOCKET_TCP
24 
25 #include "src/core/lib/iomgr/tcp_posix.h"
26 
27 #include <errno.h>
28 #include <limits.h>
29 #include <netinet/in.h>
30 #include <netinet/tcp.h>
31 #include <stdbool.h>
32 #include <stdio.h>
33 #include <stdlib.h>
34 #include <string.h>
35 #include <sys/socket.h>
36 #include <sys/types.h>
37 #include <unistd.h>
38 #include <algorithm>
39 #include <unordered_map>
40 
41 #include <grpc/slice.h>
42 #include <grpc/support/alloc.h>
43 #include <grpc/support/log.h>
44 #include <grpc/support/string_util.h>
45 #include <grpc/support/sync.h>
46 #include <grpc/support/time.h>
47 
48 #include "src/core/lib/address_utils/sockaddr_utils.h"
49 #include "src/core/lib/channel/channel_args.h"
50 #include "src/core/lib/debug/stats.h"
51 #include "src/core/lib/debug/trace.h"
52 #include "src/core/lib/gpr/string.h"
53 #include "src/core/lib/gpr/useful.h"
54 #include "src/core/lib/gprpp/sync.h"
55 #include "src/core/lib/iomgr/buffer_list.h"
56 #include "src/core/lib/iomgr/ev_posix.h"
57 #include "src/core/lib/iomgr/executor.h"
58 #include "src/core/lib/iomgr/socket_utils_posix.h"
59 #include "src/core/lib/profiling/timers.h"
60 #include "src/core/lib/slice/slice_internal.h"
61 #include "src/core/lib/slice/slice_string_helpers.h"
62 
63 #ifndef SOL_TCP
64 #define SOL_TCP IPPROTO_TCP
65 #endif
66 
67 #ifndef TCP_INQ
68 #define TCP_INQ 36
69 #define TCP_CM_INQ TCP_INQ
70 #endif
71 
72 #ifdef GRPC_HAVE_MSG_NOSIGNAL
73 #define SENDMSG_FLAGS MSG_NOSIGNAL
74 #else
75 #define SENDMSG_FLAGS 0
76 #endif
77 
78 // TCP zero copy sendmsg flag.
79 // NB: We define this here as a fallback in case we're using an older set of
80 // library headers that has not defined MSG_ZEROCOPY. Since this constant is
81 // part of the kernel, we are guaranteed it will never change/disagree so
82 // defining it here is safe.
83 #ifndef MSG_ZEROCOPY
84 #define MSG_ZEROCOPY 0x4000000
85 #endif
86 
87 #ifdef GRPC_MSG_IOVLEN_TYPE
88 typedef GRPC_MSG_IOVLEN_TYPE msg_iovlen_type;
89 #else
90 typedef size_t msg_iovlen_type;
91 #endif
92 
93 extern grpc_core::TraceFlag grpc_tcp_trace;
94 
95 namespace grpc_core {
96 
97 class TcpZerocopySendRecord {
98  public:
TcpZerocopySendRecord()99   TcpZerocopySendRecord() { grpc_slice_buffer_init(&buf_); }
100 
~TcpZerocopySendRecord()101   ~TcpZerocopySendRecord() {
102     AssertEmpty();
103     grpc_slice_buffer_destroy_internal(&buf_);
104   }
105 
106   // Given the slices that we wish to send, and the current offset into the
107   //   slice buffer (indicating which have already been sent), populate an iovec
108   //   array that will be used for a zerocopy enabled sendmsg().
109   msg_iovlen_type PopulateIovs(size_t* unwind_slice_idx,
110                                size_t* unwind_byte_idx, size_t* sending_length,
111                                iovec* iov);
112 
113   // A sendmsg() may not be able to send the bytes that we requested at this
114   // time, returning EAGAIN (possibly due to backpressure). In this case,
115   // unwind the offset into the slice buffer so we retry sending these bytes.
UnwindIfThrottled(size_t unwind_slice_idx,size_t unwind_byte_idx)116   void UnwindIfThrottled(size_t unwind_slice_idx, size_t unwind_byte_idx) {
117     out_offset_.byte_idx = unwind_byte_idx;
118     out_offset_.slice_idx = unwind_slice_idx;
119   }
120 
121   // Update the offset into the slice buffer based on how much we wanted to sent
122   // vs. what sendmsg() actually sent (which may be lower, possibly due to
123   // backpressure).
124   void UpdateOffsetForBytesSent(size_t sending_length, size_t actually_sent);
125 
126   // Indicates whether all underlying data has been sent or not.
AllSlicesSent()127   bool AllSlicesSent() { return out_offset_.slice_idx == buf_.count; }
128 
129   // Reset this structure for a new tcp_write() with zerocopy.
PrepareForSends(grpc_slice_buffer * slices_to_send)130   void PrepareForSends(grpc_slice_buffer* slices_to_send) {
131     AssertEmpty();
132     out_offset_.slice_idx = 0;
133     out_offset_.byte_idx = 0;
134     grpc_slice_buffer_swap(slices_to_send, &buf_);
135     Ref();
136   }
137 
138   // References: 1 reference per sendmsg(), and 1 for the tcp_write().
Ref()139   void Ref() { ref_.FetchAdd(1, MemoryOrder::RELAXED); }
140 
141   // Unref: called when we get an error queue notification for a sendmsg(), if a
142   //  sendmsg() failed or when tcp_write() is done.
Unref()143   bool Unref() {
144     const intptr_t prior = ref_.FetchSub(1, MemoryOrder::ACQ_REL);
145     GPR_DEBUG_ASSERT(prior > 0);
146     if (prior == 1) {
147       AllSendsComplete();
148       return true;
149     }
150     return false;
151   }
152 
153  private:
154   struct OutgoingOffset {
155     size_t slice_idx = 0;
156     size_t byte_idx = 0;
157   };
158 
AssertEmpty()159   void AssertEmpty() {
160     GPR_DEBUG_ASSERT(buf_.count == 0);
161     GPR_DEBUG_ASSERT(buf_.length == 0);
162     GPR_DEBUG_ASSERT(ref_.Load(MemoryOrder::RELAXED) == 0);
163   }
164 
165   // When all sendmsg() calls associated with this tcp_write() have been
166   // completed (ie. we have received the notifications for each sequence number
167   // for each sendmsg()) and all reference counts have been dropped, drop our
168   // reference to the underlying data since we no longer need it.
AllSendsComplete()169   void AllSendsComplete() {
170     GPR_DEBUG_ASSERT(ref_.Load(MemoryOrder::RELAXED) == 0);
171     grpc_slice_buffer_reset_and_unref_internal(&buf_);
172   }
173 
174   grpc_slice_buffer buf_;
175   Atomic<intptr_t> ref_;
176   OutgoingOffset out_offset_;
177 };
178 
179 class TcpZerocopySendCtx {
180  public:
181   static constexpr int kDefaultMaxSends = 4;
182   static constexpr size_t kDefaultSendBytesThreshold = 16 * 1024;  // 16KB
183 
TcpZerocopySendCtx(int max_sends=kDefaultMaxSends,size_t send_bytes_threshold=kDefaultSendBytesThreshold)184   explicit TcpZerocopySendCtx(
185       int max_sends = kDefaultMaxSends,
186       size_t send_bytes_threshold = kDefaultSendBytesThreshold)
187       : max_sends_(max_sends),
188         free_send_records_size_(max_sends),
189         threshold_bytes_(send_bytes_threshold) {
190     send_records_ = static_cast<TcpZerocopySendRecord*>(
191         gpr_malloc(max_sends * sizeof(*send_records_)));
192     free_send_records_ = static_cast<TcpZerocopySendRecord**>(
193         gpr_malloc(max_sends * sizeof(*free_send_records_)));
194     if (send_records_ == nullptr || free_send_records_ == nullptr) {
195       gpr_free(send_records_);
196       gpr_free(free_send_records_);
197       gpr_log(GPR_INFO, "Disabling TCP TX zerocopy due to memory pressure.\n");
198       memory_limited_ = true;
199     } else {
200       for (int idx = 0; idx < max_sends_; ++idx) {
201         new (send_records_ + idx) TcpZerocopySendRecord();
202         free_send_records_[idx] = send_records_ + idx;
203       }
204     }
205   }
206 
~TcpZerocopySendCtx()207   ~TcpZerocopySendCtx() {
208     if (send_records_ != nullptr) {
209       for (int idx = 0; idx < max_sends_; ++idx) {
210         send_records_[idx].~TcpZerocopySendRecord();
211       }
212     }
213     gpr_free(send_records_);
214     gpr_free(free_send_records_);
215   }
216 
217   // True if we were unable to allocate the various bookkeeping structures at
218   // transport initialization time. If memory limited, we do not zerocopy.
memory_limited() const219   bool memory_limited() const { return memory_limited_; }
220 
221   // TCP send zerocopy maintains an implicit sequence number for every
222   // successful sendmsg() with zerocopy enabled; the kernel later gives us an
223   // error queue notification with this sequence number indicating that the
224   // underlying data buffers that we sent can now be released. Once that
225   // notification is received, we can release the buffers associated with this
226   // zerocopy send record. Here, we associate the sequence number with the data
227   // buffers that were sent with the corresponding call to sendmsg().
NoteSend(TcpZerocopySendRecord * record)228   void NoteSend(TcpZerocopySendRecord* record) {
229     record->Ref();
230     AssociateSeqWithSendRecord(last_send_, record);
231     ++last_send_;
232   }
233 
234   // If sendmsg() actually failed, though, we need to revert the sequence number
235   // that we speculatively bumped before calling sendmsg(). Note that we bump
236   // this sequence number and perform relevant bookkeeping (see: NoteSend())
237   // *before* calling sendmsg() since, if we called it *after* sendmsg(), then
238   // there is a possible race with the release notification which could occur on
239   // another thread before we do the necessary bookkeeping. Hence, calling
240   // NoteSend() *before* sendmsg() and implementing an undo function is needed.
UndoSend()241   void UndoSend() {
242     --last_send_;
243     if (ReleaseSendRecord(last_send_)->Unref()) {
244       // We should still be holding the ref taken by tcp_write().
245       GPR_DEBUG_ASSERT(0);
246     }
247   }
248 
249   // Simply associate this send record (and the underlying sent data buffers)
250   // with the implicit sequence number for this zerocopy sendmsg().
AssociateSeqWithSendRecord(uint32_t seq,TcpZerocopySendRecord * record)251   void AssociateSeqWithSendRecord(uint32_t seq, TcpZerocopySendRecord* record) {
252     MutexLock guard(&lock_);
253     ctx_lookup_.emplace(seq, record);
254   }
255 
256   // Get a send record for a send that we wish to do with zerocopy.
GetSendRecord()257   TcpZerocopySendRecord* GetSendRecord() {
258     MutexLock guard(&lock_);
259     return TryGetSendRecordLocked();
260   }
261 
262   // A given send record corresponds to a single tcp_write() with zerocopy
263   // enabled. This can result in several sendmsg() calls to flush all of the
264   // data to wire. Each sendmsg() takes a reference on the
265   // TcpZerocopySendRecord, and corresponds to a single sequence number.
266   // ReleaseSendRecord releases a reference on TcpZerocopySendRecord for a
267   // single sequence number. This is called either when we receive the relevant
268   // error queue notification (saying that we can discard the underlying
269   // buffers for this sendmsg()) is received from the kernel - or, in case
270   // sendmsg() was unsuccessful to begin with.
ReleaseSendRecord(uint32_t seq)271   TcpZerocopySendRecord* ReleaseSendRecord(uint32_t seq) {
272     MutexLock guard(&lock_);
273     return ReleaseSendRecordLocked(seq);
274   }
275 
276   // After all the references to a TcpZerocopySendRecord are released, we can
277   // add it back to the pool (of size max_sends_). Note that we can only have
278   // max_sends_ tcp_write() instances with zerocopy enabled in flight at the
279   // same time.
PutSendRecord(TcpZerocopySendRecord * record)280   void PutSendRecord(TcpZerocopySendRecord* record) {
281     GPR_DEBUG_ASSERT(record >= send_records_ &&
282                      record < send_records_ + max_sends_);
283     MutexLock guard(&lock_);
284     PutSendRecordLocked(record);
285   }
286 
287   // Indicate that we are disposing of this zerocopy context. This indicator
288   // will prevent new zerocopy writes from being issued.
Shutdown()289   void Shutdown() { shutdown_.Store(true, MemoryOrder::RELEASE); }
290 
291   // Indicates that there are no inflight tcp_write() instances with zerocopy
292   // enabled.
AllSendRecordsEmpty()293   bool AllSendRecordsEmpty() {
294     MutexLock guard(&lock_);
295     return free_send_records_size_ == max_sends_;
296   }
297 
enabled() const298   bool enabled() const { return enabled_; }
299 
set_enabled(bool enabled)300   void set_enabled(bool enabled) {
301     GPR_DEBUG_ASSERT(!enabled || !memory_limited());
302     enabled_ = enabled;
303   }
304 
305   // Only use zerocopy if we are sending at least this many bytes. The
306   // additional overhead of reading the error queue for notifications means that
307   // zerocopy is not useful for small transfers.
threshold_bytes() const308   size_t threshold_bytes() const { return threshold_bytes_; }
309 
310  private:
ReleaseSendRecordLocked(uint32_t seq)311   TcpZerocopySendRecord* ReleaseSendRecordLocked(uint32_t seq) {
312     auto iter = ctx_lookup_.find(seq);
313     GPR_DEBUG_ASSERT(iter != ctx_lookup_.end());
314     TcpZerocopySendRecord* record = iter->second;
315     ctx_lookup_.erase(iter);
316     return record;
317   }
318 
TryGetSendRecordLocked()319   TcpZerocopySendRecord* TryGetSendRecordLocked() {
320     if (shutdown_.Load(MemoryOrder::ACQUIRE)) {
321       return nullptr;
322     }
323     if (free_send_records_size_ == 0) {
324       return nullptr;
325     }
326     free_send_records_size_--;
327     return free_send_records_[free_send_records_size_];
328   }
329 
PutSendRecordLocked(TcpZerocopySendRecord * record)330   void PutSendRecordLocked(TcpZerocopySendRecord* record) {
331     GPR_DEBUG_ASSERT(free_send_records_size_ < max_sends_);
332     free_send_records_[free_send_records_size_] = record;
333     free_send_records_size_++;
334   }
335 
336   TcpZerocopySendRecord* send_records_;
337   TcpZerocopySendRecord** free_send_records_;
338   int max_sends_;
339   int free_send_records_size_;
340   Mutex lock_;
341   uint32_t last_send_ = 0;
342   Atomic<bool> shutdown_;
343   bool enabled_ = false;
344   size_t threshold_bytes_ = kDefaultSendBytesThreshold;
345   std::unordered_map<uint32_t, TcpZerocopySendRecord*> ctx_lookup_;
346   bool memory_limited_ = false;
347 };
348 
349 }  // namespace grpc_core
350 
351 using grpc_core::TcpZerocopySendCtx;
352 using grpc_core::TcpZerocopySendRecord;
353 
354 namespace {
355 struct grpc_tcp {
grpc_tcp__anon56c98c340111::grpc_tcp356   grpc_tcp(int max_sends, size_t send_bytes_threshold)
357       : tcp_zerocopy_send_ctx(max_sends, send_bytes_threshold) {}
358   grpc_endpoint base;
359   grpc_fd* em_fd;
360   int fd;
361   /* Used by the endpoint read function to distinguish the very first read call
362    * from the rest */
363   bool is_first_read;
364   double target_length;
365   double bytes_read_this_round;
366   grpc_core::RefCount refcount;
367   gpr_atm shutdown_count;
368 
369   int min_read_chunk_size;
370   int max_read_chunk_size;
371 
372   /* garbage after the last read */
373   grpc_slice_buffer last_read_buffer;
374 
375   grpc_slice_buffer* incoming_buffer;
376   int inq;          /* bytes pending on the socket from the last read. */
377   bool inq_capable; /* cache whether kernel supports inq */
378 
379   grpc_slice_buffer* outgoing_buffer;
380   /* byte within outgoing_buffer->slices[0] to write next */
381   size_t outgoing_byte_idx;
382 
383   grpc_closure* read_cb;
384   grpc_closure* write_cb;
385   grpc_closure* release_fd_cb;
386   int* release_fd;
387 
388   grpc_closure read_done_closure;
389   grpc_closure write_done_closure;
390   grpc_closure error_closure;
391 
392   std::string peer_string;
393   std::string local_address;
394 
395   grpc_resource_user* resource_user;
396   grpc_resource_user_slice_allocator slice_allocator;
397 
398   grpc_core::TracedBuffer* tb_head; /* List of traced buffers */
399   gpr_mu tb_mu; /* Lock for access to list of traced buffers */
400 
401   /* grpc_endpoint_write takes an argument which if non-null means that the
402    * transport layer wants the TCP layer to collect timestamps for this write.
403    * This arg is forwarded to the timestamps callback function when the ACK
404    * timestamp is received from the kernel. This arg is a (void *) which allows
405    * users of this API to pass in a pointer to any kind of structure. This
406    * structure could actually be a tag or any book-keeping object that the user
407    * can use to distinguish between different traced writes. The only
408    * requirement from the TCP endpoint layer is that this arg should be non-null
409    * if the user wants timestamps for the write. */
410   void* outgoing_buffer_arg;
411   /* A counter which starts at 0. It is initialized the first time the socket
412    * options for collecting timestamps are set, and is incremented with each
413    * byte sent. */
414   int bytes_counter;
415   bool socket_ts_enabled; /* True if timestamping options are set on the socket
416                            */
417   bool ts_capable;        /* Cache whether we can set timestamping options */
418   gpr_atm stop_error_notification; /* Set to 1 if we do not want to be notified
419                                       on errors anymore */
420   TcpZerocopySendCtx tcp_zerocopy_send_ctx;
421   TcpZerocopySendRecord* current_zerocopy_send = nullptr;
422 };
423 
424 struct backup_poller {
425   gpr_mu* pollset_mu;
426   grpc_closure run_poller;
427 };
428 
429 }  // namespace
430 
431 static void ZerocopyDisableAndWaitForRemaining(grpc_tcp* tcp);
432 
433 #define BACKUP_POLLER_POLLSET(b) ((grpc_pollset*)((b) + 1))
434 
435 static gpr_atm g_uncovered_notifications_pending;
436 static gpr_atm g_backup_poller; /* backup_poller* */
437 
438 static void tcp_handle_read(void* arg /* grpc_tcp */, grpc_error_handle error);
439 static void tcp_handle_write(void* arg /* grpc_tcp */, grpc_error_handle error);
440 static void tcp_drop_uncovered_then_handle_write(void* arg /* grpc_tcp */,
441                                                  grpc_error_handle error);
442 
done_poller(void * bp,grpc_error_handle)443 static void done_poller(void* bp, grpc_error_handle /*error_ignored*/) {
444   backup_poller* p = static_cast<backup_poller*>(bp);
445   if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
446     gpr_log(GPR_INFO, "BACKUP_POLLER:%p destroy", p);
447   }
448   grpc_pollset_destroy(BACKUP_POLLER_POLLSET(p));
449   gpr_free(p);
450 }
451 
run_poller(void * bp,grpc_error_handle)452 static void run_poller(void* bp, grpc_error_handle /*error_ignored*/) {
453   backup_poller* p = static_cast<backup_poller*>(bp);
454   if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
455     gpr_log(GPR_INFO, "BACKUP_POLLER:%p run", p);
456   }
457   gpr_mu_lock(p->pollset_mu);
458   grpc_millis deadline = grpc_core::ExecCtx::Get()->Now() + 10 * GPR_MS_PER_SEC;
459   GRPC_STATS_INC_TCP_BACKUP_POLLER_POLLS();
460   GRPC_LOG_IF_ERROR(
461       "backup_poller:pollset_work",
462       grpc_pollset_work(BACKUP_POLLER_POLLSET(p), nullptr, deadline));
463   gpr_mu_unlock(p->pollset_mu);
464   /* last "uncovered" notification is the ref that keeps us polling, if we get
465    * there try a cas to release it */
466   if (gpr_atm_no_barrier_load(&g_uncovered_notifications_pending) == 1 &&
467       gpr_atm_full_cas(&g_uncovered_notifications_pending, 1, 0)) {
468     gpr_mu_lock(p->pollset_mu);
469     bool cas_ok =
470         gpr_atm_full_cas(&g_backup_poller, reinterpret_cast<gpr_atm>(p), 0);
471     if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
472       gpr_log(GPR_INFO, "BACKUP_POLLER:%p done cas_ok=%d", p, cas_ok);
473     }
474     gpr_mu_unlock(p->pollset_mu);
475     if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
476       gpr_log(GPR_INFO, "BACKUP_POLLER:%p shutdown", p);
477     }
478     grpc_pollset_shutdown(BACKUP_POLLER_POLLSET(p),
479                           GRPC_CLOSURE_INIT(&p->run_poller, done_poller, p,
480                                             grpc_schedule_on_exec_ctx));
481   } else {
482     if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
483       gpr_log(GPR_INFO, "BACKUP_POLLER:%p reschedule", p);
484     }
485     grpc_core::Executor::Run(&p->run_poller, GRPC_ERROR_NONE,
486                              grpc_core::ExecutorType::DEFAULT,
487                              grpc_core::ExecutorJobType::LONG);
488   }
489 }
490 
drop_uncovered(grpc_tcp *)491 static void drop_uncovered(grpc_tcp* /*tcp*/) {
492   backup_poller* p =
493       reinterpret_cast<backup_poller*>(gpr_atm_acq_load(&g_backup_poller));
494   gpr_atm old_count =
495       gpr_atm_full_fetch_add(&g_uncovered_notifications_pending, -1);
496   if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
497     gpr_log(GPR_INFO, "BACKUP_POLLER:%p uncover cnt %d->%d", p,
498             static_cast<int>(old_count), static_cast<int>(old_count) - 1);
499   }
500   GPR_ASSERT(old_count != 1);
501 }
502 
503 // gRPC API considers a Write operation to be done the moment it clears ‘flow
504 // control’ i.e., not necessarily sent on the wire. This means that the
505 // application MIGHT not call `grpc_completion_queue_next/pluck` in a timely
506 // manner when its `Write()` API is acked.
507 //
508 // We need to ensure that the fd is 'covered' (i.e being monitored by some
509 // polling thread and progress is made) and hence add it to a backup poller here
cover_self(grpc_tcp * tcp)510 static void cover_self(grpc_tcp* tcp) {
511   backup_poller* p;
512   gpr_atm old_count =
513       gpr_atm_no_barrier_fetch_add(&g_uncovered_notifications_pending, 2);
514   if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
515     gpr_log(GPR_INFO, "BACKUP_POLLER: cover cnt %d->%d",
516             static_cast<int>(old_count), 2 + static_cast<int>(old_count));
517   }
518   if (old_count == 0) {
519     GRPC_STATS_INC_TCP_BACKUP_POLLERS_CREATED();
520     p = static_cast<backup_poller*>(
521         gpr_zalloc(sizeof(*p) + grpc_pollset_size()));
522     if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
523       gpr_log(GPR_INFO, "BACKUP_POLLER:%p create", p);
524     }
525     grpc_pollset_init(BACKUP_POLLER_POLLSET(p), &p->pollset_mu);
526     gpr_atm_rel_store(&g_backup_poller, (gpr_atm)p);
527     grpc_core::Executor::Run(
528         GRPC_CLOSURE_INIT(&p->run_poller, run_poller, p, nullptr),
529         GRPC_ERROR_NONE, grpc_core::ExecutorType::DEFAULT,
530         grpc_core::ExecutorJobType::LONG);
531   } else {
532     while ((p = reinterpret_cast<backup_poller*>(
533                 gpr_atm_acq_load(&g_backup_poller))) == nullptr) {
534       // spin waiting for backup poller
535     }
536   }
537   if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
538     gpr_log(GPR_INFO, "BACKUP_POLLER:%p add %p", p, tcp);
539   }
540   grpc_pollset_add_fd(BACKUP_POLLER_POLLSET(p), tcp->em_fd);
541   if (old_count != 0) {
542     drop_uncovered(tcp);
543   }
544 }
545 
notify_on_read(grpc_tcp * tcp)546 static void notify_on_read(grpc_tcp* tcp) {
547   if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
548     gpr_log(GPR_INFO, "TCP:%p notify_on_read", tcp);
549   }
550   grpc_fd_notify_on_read(tcp->em_fd, &tcp->read_done_closure);
551 }
552 
notify_on_write(grpc_tcp * tcp)553 static void notify_on_write(grpc_tcp* tcp) {
554   if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
555     gpr_log(GPR_INFO, "TCP:%p notify_on_write", tcp);
556   }
557   if (!grpc_event_engine_run_in_background()) {
558     cover_self(tcp);
559   }
560   grpc_fd_notify_on_write(tcp->em_fd, &tcp->write_done_closure);
561 }
562 
tcp_drop_uncovered_then_handle_write(void * arg,grpc_error_handle error)563 static void tcp_drop_uncovered_then_handle_write(void* arg,
564                                                  grpc_error_handle error) {
565   if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
566     gpr_log(GPR_INFO, "TCP:%p got_write: %s", arg,
567             grpc_error_std_string(error).c_str());
568   }
569   drop_uncovered(static_cast<grpc_tcp*>(arg));
570   tcp_handle_write(arg, error);
571 }
572 
add_to_estimate(grpc_tcp * tcp,size_t bytes)573 static void add_to_estimate(grpc_tcp* tcp, size_t bytes) {
574   tcp->bytes_read_this_round += static_cast<double>(bytes);
575 }
576 
finish_estimate(grpc_tcp * tcp)577 static void finish_estimate(grpc_tcp* tcp) {
578   /* If we read >80% of the target buffer in one read loop, increase the size
579      of the target buffer to either the amount read, or twice its previous
580      value */
581   if (tcp->bytes_read_this_round > tcp->target_length * 0.8) {
582     tcp->target_length =
583         GPR_MAX(2 * tcp->target_length, tcp->bytes_read_this_round);
584   } else {
585     tcp->target_length =
586         0.99 * tcp->target_length + 0.01 * tcp->bytes_read_this_round;
587   }
588   tcp->bytes_read_this_round = 0;
589 }
590 
get_target_read_size(grpc_tcp * tcp)591 static size_t get_target_read_size(grpc_tcp* tcp) {
592   grpc_resource_quota* rq = grpc_resource_user_quota(tcp->resource_user);
593   double pressure = grpc_resource_quota_get_memory_pressure(rq);
594   double target =
595       tcp->target_length * (pressure > 0.8 ? (1.0 - pressure) / 0.2 : 1.0);
596   size_t sz = ((static_cast<size_t> GPR_CLAMP(target, tcp->min_read_chunk_size,
597                                               tcp->max_read_chunk_size)) +
598                255) &
599               ~static_cast<size_t>(255);
600   /* don't use more than 1/16th of the overall resource quota for a single read
601    * alloc */
602   size_t rqmax = grpc_resource_quota_peek_size(rq);
603   if (sz > rqmax / 16 && rqmax > 1024) {
604     sz = rqmax / 16;
605   }
606   return sz;
607 }
608 
tcp_annotate_error(grpc_error_handle src_error,grpc_tcp * tcp)609 static grpc_error_handle tcp_annotate_error(grpc_error_handle src_error,
610                                             grpc_tcp* tcp) {
611   return grpc_error_set_str(
612       grpc_error_set_int(
613           grpc_error_set_int(src_error, GRPC_ERROR_INT_FD, tcp->fd),
614           /* All tcp errors are marked with UNAVAILABLE so that application may
615            * choose to retry. */
616           GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE),
617       GRPC_ERROR_STR_TARGET_ADDRESS,
618       grpc_slice_from_copied_string(tcp->peer_string.c_str()));
619 }
620 
621 static void tcp_handle_read(void* arg /* grpc_tcp */, grpc_error_handle error);
622 static void tcp_handle_write(void* arg /* grpc_tcp */, grpc_error_handle error);
623 
tcp_shutdown(grpc_endpoint * ep,grpc_error_handle why)624 static void tcp_shutdown(grpc_endpoint* ep, grpc_error_handle why) {
625   grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
626   ZerocopyDisableAndWaitForRemaining(tcp);
627   grpc_fd_shutdown(tcp->em_fd, why);
628   grpc_resource_user_shutdown(tcp->resource_user);
629 }
630 
tcp_free(grpc_tcp * tcp)631 static void tcp_free(grpc_tcp* tcp) {
632   grpc_fd_orphan(tcp->em_fd, tcp->release_fd_cb, tcp->release_fd,
633                  "tcp_unref_orphan");
634   grpc_slice_buffer_destroy_internal(&tcp->last_read_buffer);
635   grpc_resource_user_unref(tcp->resource_user);
636   /* The lock is not really necessary here, since all refs have been released */
637   gpr_mu_lock(&tcp->tb_mu);
638   grpc_core::TracedBuffer::Shutdown(
639       &tcp->tb_head, tcp->outgoing_buffer_arg,
640       GRPC_ERROR_CREATE_FROM_STATIC_STRING("endpoint destroyed"));
641   gpr_mu_unlock(&tcp->tb_mu);
642   tcp->outgoing_buffer_arg = nullptr;
643   gpr_mu_destroy(&tcp->tb_mu);
644   delete tcp;
645 }
646 
647 #ifndef NDEBUG
648 #define TCP_UNREF(tcp, reason) tcp_unref((tcp), (reason), DEBUG_LOCATION)
649 #define TCP_REF(tcp, reason) tcp_ref((tcp), (reason), DEBUG_LOCATION)
tcp_unref(grpc_tcp * tcp,const char * reason,const grpc_core::DebugLocation & debug_location)650 static void tcp_unref(grpc_tcp* tcp, const char* reason,
651                       const grpc_core::DebugLocation& debug_location) {
652   if (GPR_UNLIKELY(tcp->refcount.Unref(debug_location, reason))) {
653     tcp_free(tcp);
654   }
655 }
656 
tcp_ref(grpc_tcp * tcp,const char * reason,const grpc_core::DebugLocation & debug_location)657 static void tcp_ref(grpc_tcp* tcp, const char* reason,
658                     const grpc_core::DebugLocation& debug_location) {
659   tcp->refcount.Ref(debug_location, reason);
660 }
661 #else
662 #define TCP_UNREF(tcp, reason) tcp_unref((tcp))
663 #define TCP_REF(tcp, reason) tcp_ref((tcp))
tcp_unref(grpc_tcp * tcp)664 static void tcp_unref(grpc_tcp* tcp) {
665   if (GPR_UNLIKELY(tcp->refcount.Unref())) {
666     tcp_free(tcp);
667   }
668 }
669 
tcp_ref(grpc_tcp * tcp)670 static void tcp_ref(grpc_tcp* tcp) { tcp->refcount.Ref(); }
671 #endif
672 
tcp_destroy(grpc_endpoint * ep)673 static void tcp_destroy(grpc_endpoint* ep) {
674   grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
675   grpc_slice_buffer_reset_and_unref_internal(&tcp->last_read_buffer);
676   if (grpc_event_engine_can_track_errors()) {
677     ZerocopyDisableAndWaitForRemaining(tcp);
678     gpr_atm_no_barrier_store(&tcp->stop_error_notification, true);
679     grpc_fd_set_error(tcp->em_fd);
680   }
681   TCP_UNREF(tcp, "destroy");
682 }
683 
call_read_cb(grpc_tcp * tcp,grpc_error_handle error)684 static void call_read_cb(grpc_tcp* tcp, grpc_error_handle error) {
685   grpc_closure* cb = tcp->read_cb;
686 
687   if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
688     gpr_log(GPR_INFO, "TCP:%p call_cb %p %p:%p", tcp, cb, cb->cb, cb->cb_arg);
689     size_t i;
690     gpr_log(GPR_INFO, "READ %p (peer=%s) error=%s", tcp,
691             tcp->peer_string.c_str(), grpc_error_std_string(error).c_str());
692     if (gpr_should_log(GPR_LOG_SEVERITY_DEBUG)) {
693       for (i = 0; i < tcp->incoming_buffer->count; i++) {
694         char* dump = grpc_dump_slice(tcp->incoming_buffer->slices[i],
695                                      GPR_DUMP_HEX | GPR_DUMP_ASCII);
696         gpr_log(GPR_DEBUG, "DATA: %s", dump);
697         gpr_free(dump);
698       }
699     }
700   }
701 
702   tcp->read_cb = nullptr;
703   tcp->incoming_buffer = nullptr;
704   grpc_core::Closure::Run(DEBUG_LOCATION, cb, error);
705 }
706 
707 #define MAX_READ_IOVEC 4
tcp_do_read(grpc_tcp * tcp)708 static void tcp_do_read(grpc_tcp* tcp) {
709   GPR_TIMER_SCOPE("tcp_do_read", 0);
710   struct msghdr msg;
711   struct iovec iov[MAX_READ_IOVEC];
712   ssize_t read_bytes;
713   size_t total_read_bytes = 0;
714   size_t iov_len =
715       std::min<size_t>(MAX_READ_IOVEC, tcp->incoming_buffer->count);
716 #ifdef GRPC_LINUX_ERRQUEUE
717   constexpr size_t cmsg_alloc_space =
718       CMSG_SPACE(sizeof(grpc_core::scm_timestamping)) + CMSG_SPACE(sizeof(int));
719 #else
720   constexpr size_t cmsg_alloc_space = 24 /* CMSG_SPACE(sizeof(int)) */;
721 #endif /* GRPC_LINUX_ERRQUEUE */
722   char cmsgbuf[cmsg_alloc_space];
723   for (size_t i = 0; i < iov_len; i++) {
724     iov[i].iov_base = GRPC_SLICE_START_PTR(tcp->incoming_buffer->slices[i]);
725     iov[i].iov_len = GRPC_SLICE_LENGTH(tcp->incoming_buffer->slices[i]);
726   }
727 
728   do {
729     /* Assume there is something on the queue. If we receive TCP_INQ from
730      * kernel, we will update this value, otherwise, we have to assume there is
731      * always something to read until we get EAGAIN. */
732     tcp->inq = 1;
733 
734     msg.msg_name = nullptr;
735     msg.msg_namelen = 0;
736     msg.msg_iov = iov;
737     msg.msg_iovlen = static_cast<msg_iovlen_type>(iov_len);
738     if (tcp->inq_capable) {
739       msg.msg_control = cmsgbuf;
740       msg.msg_controllen = sizeof(cmsgbuf);
741     } else {
742       msg.msg_control = nullptr;
743       msg.msg_controllen = 0;
744     }
745     msg.msg_flags = 0;
746 
747     GRPC_STATS_INC_TCP_READ_OFFER(tcp->incoming_buffer->length);
748     GRPC_STATS_INC_TCP_READ_OFFER_IOV_SIZE(tcp->incoming_buffer->count);
749 
750     do {
751       GPR_TIMER_SCOPE("recvmsg", 0);
752       GRPC_STATS_INC_SYSCALL_READ();
753       read_bytes = recvmsg(tcp->fd, &msg, 0);
754     } while (read_bytes < 0 && errno == EINTR);
755 
756     /* We have read something in previous reads. We need to deliver those
757      * bytes to the upper layer. */
758     if (read_bytes <= 0 && total_read_bytes > 0) {
759       tcp->inq = 1;
760       break;
761     }
762 
763     if (read_bytes < 0) {
764       /* NB: After calling call_read_cb a parallel call of the read handler may
765        * be running. */
766       if (errno == EAGAIN) {
767         finish_estimate(tcp);
768         tcp->inq = 0;
769         /* We've consumed the edge, request a new one */
770         notify_on_read(tcp);
771       } else {
772         grpc_slice_buffer_reset_and_unref_internal(tcp->incoming_buffer);
773         call_read_cb(tcp,
774                      tcp_annotate_error(GRPC_OS_ERROR(errno, "recvmsg"), tcp));
775         TCP_UNREF(tcp, "read");
776       }
777       return;
778     }
779     if (read_bytes == 0) {
780       /* 0 read size ==> end of stream
781        *
782        * We may have read something, i.e., total_read_bytes > 0, but
783        * since the connection is closed we will drop the data here, because we
784        * can't call the callback multiple times. */
785       grpc_slice_buffer_reset_and_unref_internal(tcp->incoming_buffer);
786       call_read_cb(
787           tcp, tcp_annotate_error(
788                    GRPC_ERROR_CREATE_FROM_STATIC_STRING("Socket closed"), tcp));
789       TCP_UNREF(tcp, "read");
790       return;
791     }
792 
793     GRPC_STATS_INC_TCP_READ_SIZE(read_bytes);
794     add_to_estimate(tcp, static_cast<size_t>(read_bytes));
795     GPR_DEBUG_ASSERT((size_t)read_bytes <=
796                      tcp->incoming_buffer->length - total_read_bytes);
797 
798 #ifdef GRPC_HAVE_TCP_INQ
799     if (tcp->inq_capable) {
800       GPR_DEBUG_ASSERT(!(msg.msg_flags & MSG_CTRUNC));
801       struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg);
802       for (; cmsg != nullptr; cmsg = CMSG_NXTHDR(&msg, cmsg)) {
803         if (cmsg->cmsg_level == SOL_TCP && cmsg->cmsg_type == TCP_CM_INQ &&
804             cmsg->cmsg_len == CMSG_LEN(sizeof(int))) {
805           tcp->inq = *reinterpret_cast<int*>(CMSG_DATA(cmsg));
806           break;
807         }
808       }
809     }
810 #endif /* GRPC_HAVE_TCP_INQ */
811 
812     total_read_bytes += read_bytes;
813     if (tcp->inq == 0 || total_read_bytes == tcp->incoming_buffer->length) {
814       /* We have filled incoming_buffer, and we cannot read any more. */
815       break;
816     }
817 
818     /* We had a partial read, and still have space to read more data.
819      * So, adjust IOVs and try to read more. */
820     size_t remaining = read_bytes;
821     size_t j = 0;
822     for (size_t i = 0; i < iov_len; i++) {
823       if (remaining >= iov[i].iov_len) {
824         remaining -= iov[i].iov_len;
825         continue;
826       }
827       if (remaining > 0) {
828         iov[j].iov_base = static_cast<char*>(iov[i].iov_base) + remaining;
829         iov[j].iov_len = iov[i].iov_len - remaining;
830         remaining = 0;
831       } else {
832         iov[j].iov_base = iov[i].iov_base;
833         iov[j].iov_len = iov[i].iov_len;
834       }
835       ++j;
836     }
837     iov_len = j;
838   } while (true);
839 
840   if (tcp->inq == 0) {
841     finish_estimate(tcp);
842   }
843 
844   GPR_DEBUG_ASSERT(total_read_bytes > 0);
845   if (total_read_bytes < tcp->incoming_buffer->length) {
846     grpc_slice_buffer_trim_end(tcp->incoming_buffer,
847                                tcp->incoming_buffer->length - total_read_bytes,
848                                &tcp->last_read_buffer);
849   }
850   call_read_cb(tcp, GRPC_ERROR_NONE);
851   TCP_UNREF(tcp, "read");
852 }
853 
tcp_read_allocation_done(void * tcpp,grpc_error_handle error)854 static void tcp_read_allocation_done(void* tcpp, grpc_error_handle error) {
855   grpc_tcp* tcp = static_cast<grpc_tcp*>(tcpp);
856   if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
857     gpr_log(GPR_INFO, "TCP:%p read_allocation_done: %s", tcp,
858             grpc_error_std_string(error).c_str());
859   }
860   if (GPR_UNLIKELY(error != GRPC_ERROR_NONE)) {
861     grpc_slice_buffer_reset_and_unref_internal(tcp->incoming_buffer);
862     grpc_slice_buffer_reset_and_unref_internal(&tcp->last_read_buffer);
863     call_read_cb(tcp, GRPC_ERROR_REF(error));
864     TCP_UNREF(tcp, "read");
865   } else {
866     tcp_do_read(tcp);
867   }
868 }
869 
tcp_continue_read(grpc_tcp * tcp)870 static void tcp_continue_read(grpc_tcp* tcp) {
871   size_t target_read_size = get_target_read_size(tcp);
872   /* Wait for allocation only when there is no buffer left. */
873   if (tcp->incoming_buffer->length == 0 &&
874       tcp->incoming_buffer->count < MAX_READ_IOVEC) {
875     if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
876       gpr_log(GPR_INFO, "TCP:%p alloc_slices", tcp);
877     }
878     if (GPR_UNLIKELY(!grpc_resource_user_alloc_slices(&tcp->slice_allocator,
879                                                       target_read_size, 1,
880                                                       tcp->incoming_buffer))) {
881       // Wait for allocation.
882       return;
883     }
884   }
885   if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
886     gpr_log(GPR_INFO, "TCP:%p do_read", tcp);
887   }
888   tcp_do_read(tcp);
889 }
890 
tcp_handle_read(void * arg,grpc_error_handle error)891 static void tcp_handle_read(void* arg /* grpc_tcp */, grpc_error_handle error) {
892   grpc_tcp* tcp = static_cast<grpc_tcp*>(arg);
893   if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
894     gpr_log(GPR_INFO, "TCP:%p got_read: %s", tcp,
895             grpc_error_std_string(error).c_str());
896   }
897 
898   if (GPR_UNLIKELY(error != GRPC_ERROR_NONE)) {
899     grpc_slice_buffer_reset_and_unref_internal(tcp->incoming_buffer);
900     grpc_slice_buffer_reset_and_unref_internal(&tcp->last_read_buffer);
901     call_read_cb(tcp, GRPC_ERROR_REF(error));
902     TCP_UNREF(tcp, "read");
903   } else {
904     tcp_continue_read(tcp);
905   }
906 }
907 
tcp_read(grpc_endpoint * ep,grpc_slice_buffer * incoming_buffer,grpc_closure * cb,bool urgent)908 static void tcp_read(grpc_endpoint* ep, grpc_slice_buffer* incoming_buffer,
909                      grpc_closure* cb, bool urgent) {
910   grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
911   GPR_ASSERT(tcp->read_cb == nullptr);
912   tcp->read_cb = cb;
913   tcp->incoming_buffer = incoming_buffer;
914   grpc_slice_buffer_reset_and_unref_internal(incoming_buffer);
915   grpc_slice_buffer_swap(incoming_buffer, &tcp->last_read_buffer);
916   TCP_REF(tcp, "read");
917   if (tcp->is_first_read) {
918     /* Endpoint read called for the very first time. Register read callback with
919      * the polling engine */
920     tcp->is_first_read = false;
921     notify_on_read(tcp);
922   } else if (!urgent && tcp->inq == 0) {
923     /* Upper layer asked to read more but we know there is no pending data
924      * to read from previous reads. So, wait for POLLIN.
925      */
926     notify_on_read(tcp);
927   } else {
928     /* Not the first time. We may or may not have more bytes available. In any
929      * case call tcp->read_done_closure (i.e tcp_handle_read()) which does the
930      * right thing (i.e calls tcp_do_read() which either reads the available
931      * bytes or calls notify_on_read() to be notified when new bytes become
932      * available */
933     grpc_core::Closure::Run(DEBUG_LOCATION, &tcp->read_done_closure,
934                             GRPC_ERROR_NONE);
935   }
936 }
937 
938 /* A wrapper around sendmsg. It sends \a msg over \a fd and returns the number
939  * of bytes sent. */
tcp_send(int fd,const struct msghdr * msg,int additional_flags=0)940 ssize_t tcp_send(int fd, const struct msghdr* msg, int additional_flags = 0) {
941   GPR_TIMER_SCOPE("sendmsg", 1);
942   ssize_t sent_length;
943   do {
944     /* TODO(klempner): Cork if this is a partial write */
945     GRPC_STATS_INC_SYSCALL_WRITE();
946     sent_length = sendmsg(fd, msg, SENDMSG_FLAGS | additional_flags);
947   } while (sent_length < 0 && errno == EINTR);
948   return sent_length;
949 }
950 
951 /** This is to be called if outgoing_buffer_arg is not null. On linux platforms,
952  * this will call sendmsg with socket options set to collect timestamps inside
953  * the kernel. On return, sent_length is set to the return value of the sendmsg
954  * call. Returns false if setting the socket options failed. This is not
955  * implemented for non-linux platforms currently, and crashes out.
956  */
957 static bool tcp_write_with_timestamps(grpc_tcp* tcp, struct msghdr* msg,
958                                       size_t sending_length,
959                                       ssize_t* sent_length,
960                                       int additional_flags = 0);
961 
962 /** The callback function to be invoked when we get an error on the socket. */
963 static void tcp_handle_error(void* arg /* grpc_tcp */, grpc_error_handle error);
964 
965 static TcpZerocopySendRecord* tcp_get_send_zerocopy_record(
966     grpc_tcp* tcp, grpc_slice_buffer* buf);
967 
968 #ifdef GRPC_LINUX_ERRQUEUE
969 static bool process_errors(grpc_tcp* tcp);
970 
tcp_get_send_zerocopy_record(grpc_tcp * tcp,grpc_slice_buffer * buf)971 static TcpZerocopySendRecord* tcp_get_send_zerocopy_record(
972     grpc_tcp* tcp, grpc_slice_buffer* buf) {
973   TcpZerocopySendRecord* zerocopy_send_record = nullptr;
974   const bool use_zerocopy =
975       tcp->tcp_zerocopy_send_ctx.enabled() &&
976       tcp->tcp_zerocopy_send_ctx.threshold_bytes() < buf->length;
977   if (use_zerocopy) {
978     zerocopy_send_record = tcp->tcp_zerocopy_send_ctx.GetSendRecord();
979     if (zerocopy_send_record == nullptr) {
980       process_errors(tcp);
981       zerocopy_send_record = tcp->tcp_zerocopy_send_ctx.GetSendRecord();
982     }
983     if (zerocopy_send_record != nullptr) {
984       zerocopy_send_record->PrepareForSends(buf);
985       GPR_DEBUG_ASSERT(buf->count == 0);
986       GPR_DEBUG_ASSERT(buf->length == 0);
987       tcp->outgoing_byte_idx = 0;
988       tcp->outgoing_buffer = nullptr;
989     }
990   }
991   return zerocopy_send_record;
992 }
993 
ZerocopyDisableAndWaitForRemaining(grpc_tcp * tcp)994 static void ZerocopyDisableAndWaitForRemaining(grpc_tcp* tcp) {
995   tcp->tcp_zerocopy_send_ctx.Shutdown();
996   while (!tcp->tcp_zerocopy_send_ctx.AllSendRecordsEmpty()) {
997     process_errors(tcp);
998   }
999 }
1000 
tcp_write_with_timestamps(grpc_tcp * tcp,struct msghdr * msg,size_t sending_length,ssize_t * sent_length,int additional_flags)1001 static bool tcp_write_with_timestamps(grpc_tcp* tcp, struct msghdr* msg,
1002                                       size_t sending_length,
1003                                       ssize_t* sent_length,
1004                                       int additional_flags) {
1005   if (!tcp->socket_ts_enabled) {
1006     uint32_t opt = grpc_core::kTimestampingSocketOptions;
1007     if (setsockopt(tcp->fd, SOL_SOCKET, SO_TIMESTAMPING,
1008                    static_cast<void*>(&opt), sizeof(opt)) != 0) {
1009       if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
1010         gpr_log(GPR_ERROR, "Failed to set timestamping options on the socket.");
1011       }
1012       return false;
1013     }
1014     tcp->bytes_counter = -1;
1015     tcp->socket_ts_enabled = true;
1016   }
1017   /* Set control message to indicate that you want timestamps. */
1018   union {
1019     char cmsg_buf[CMSG_SPACE(sizeof(uint32_t))];
1020     struct cmsghdr align;
1021   } u;
1022   cmsghdr* cmsg = reinterpret_cast<cmsghdr*>(u.cmsg_buf);
1023   cmsg->cmsg_level = SOL_SOCKET;
1024   cmsg->cmsg_type = SO_TIMESTAMPING;
1025   cmsg->cmsg_len = CMSG_LEN(sizeof(uint32_t));
1026   *reinterpret_cast<int*>(CMSG_DATA(cmsg)) =
1027       grpc_core::kTimestampingRecordingOptions;
1028   msg->msg_control = u.cmsg_buf;
1029   msg->msg_controllen = CMSG_SPACE(sizeof(uint32_t));
1030 
1031   /* If there was an error on sendmsg the logic in tcp_flush will handle it. */
1032   ssize_t length = tcp_send(tcp->fd, msg, additional_flags);
1033   *sent_length = length;
1034   /* Only save timestamps if all the bytes were taken by sendmsg. */
1035   if (sending_length == static_cast<size_t>(length)) {
1036     gpr_mu_lock(&tcp->tb_mu);
1037     grpc_core::TracedBuffer::AddNewEntry(
1038         &tcp->tb_head, static_cast<uint32_t>(tcp->bytes_counter + length),
1039         tcp->fd, tcp->outgoing_buffer_arg);
1040     gpr_mu_unlock(&tcp->tb_mu);
1041     tcp->outgoing_buffer_arg = nullptr;
1042   }
1043   return true;
1044 }
1045 
1046 static void UnrefMaybePutZerocopySendRecord(grpc_tcp* tcp,
1047                                             TcpZerocopySendRecord* record,
1048                                             uint32_t seq, const char* tag);
1049 // Reads \a cmsg to process zerocopy control messages.
process_zerocopy(grpc_tcp * tcp,struct cmsghdr * cmsg)1050 static void process_zerocopy(grpc_tcp* tcp, struct cmsghdr* cmsg) {
1051   GPR_DEBUG_ASSERT(cmsg);
1052   auto serr = reinterpret_cast<struct sock_extended_err*>(CMSG_DATA(cmsg));
1053   GPR_DEBUG_ASSERT(serr->ee_errno == 0);
1054   GPR_DEBUG_ASSERT(serr->ee_origin == SO_EE_ORIGIN_ZEROCOPY);
1055   const uint32_t lo = serr->ee_info;
1056   const uint32_t hi = serr->ee_data;
1057   for (uint32_t seq = lo; seq <= hi; ++seq) {
1058     // TODO(arjunroy): It's likely that lo and hi refer to zerocopy sequence
1059     // numbers that are generated by a single call to grpc_endpoint_write; ie.
1060     // we can batch the unref operation. So, check if record is the same for
1061     // both; if so, batch the unref/put.
1062     TcpZerocopySendRecord* record =
1063         tcp->tcp_zerocopy_send_ctx.ReleaseSendRecord(seq);
1064     GPR_DEBUG_ASSERT(record);
1065     UnrefMaybePutZerocopySendRecord(tcp, record, seq, "CALLBACK RCVD");
1066   }
1067 }
1068 
1069 // Whether the cmsg received from error queue is of the IPv4 or IPv6 levels.
CmsgIsIpLevel(const cmsghdr & cmsg)1070 static bool CmsgIsIpLevel(const cmsghdr& cmsg) {
1071   return (cmsg.cmsg_level == SOL_IPV6 && cmsg.cmsg_type == IPV6_RECVERR) ||
1072          (cmsg.cmsg_level == SOL_IP && cmsg.cmsg_type == IP_RECVERR);
1073 }
1074 
CmsgIsZeroCopy(const cmsghdr & cmsg)1075 static bool CmsgIsZeroCopy(const cmsghdr& cmsg) {
1076   if (!CmsgIsIpLevel(cmsg)) {
1077     return false;
1078   }
1079   auto serr = reinterpret_cast<const sock_extended_err*> CMSG_DATA(&cmsg);
1080   return serr->ee_errno == 0 && serr->ee_origin == SO_EE_ORIGIN_ZEROCOPY;
1081 }
1082 
1083 /** Reads \a cmsg to derive timestamps from the control messages. If a valid
1084  * timestamp is found, the traced buffer list is updated with this timestamp.
1085  * The caller of this function should be looping on the control messages found
1086  * in \a msg. \a cmsg should point to the control message that the caller wants
1087  * processed.
1088  * On return, a pointer to a control message is returned. On the next iteration,
1089  * CMSG_NXTHDR(msg, ret_val) should be passed as \a cmsg. */
process_timestamp(grpc_tcp * tcp,msghdr * msg,struct cmsghdr * cmsg)1090 struct cmsghdr* process_timestamp(grpc_tcp* tcp, msghdr* msg,
1091                                   struct cmsghdr* cmsg) {
1092   auto next_cmsg = CMSG_NXTHDR(msg, cmsg);
1093   cmsghdr* opt_stats = nullptr;
1094   if (next_cmsg == nullptr) {
1095     if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
1096       gpr_log(GPR_ERROR, "Received timestamp without extended error");
1097     }
1098     return cmsg;
1099   }
1100 
1101   /* Check if next_cmsg is an OPT_STATS msg */
1102   if (next_cmsg->cmsg_level == SOL_SOCKET &&
1103       next_cmsg->cmsg_type == SCM_TIMESTAMPING_OPT_STATS) {
1104     opt_stats = next_cmsg;
1105     next_cmsg = CMSG_NXTHDR(msg, opt_stats);
1106     if (next_cmsg == nullptr) {
1107       if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
1108         gpr_log(GPR_ERROR, "Received timestamp without extended error");
1109       }
1110       return opt_stats;
1111     }
1112   }
1113 
1114   if (!(next_cmsg->cmsg_level == SOL_IP || next_cmsg->cmsg_level == SOL_IPV6) ||
1115       !(next_cmsg->cmsg_type == IP_RECVERR ||
1116         next_cmsg->cmsg_type == IPV6_RECVERR)) {
1117     if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
1118       gpr_log(GPR_ERROR, "Unexpected control message");
1119     }
1120     return cmsg;
1121   }
1122 
1123   auto tss =
1124       reinterpret_cast<struct grpc_core::scm_timestamping*>(CMSG_DATA(cmsg));
1125   auto serr = reinterpret_cast<struct sock_extended_err*>(CMSG_DATA(next_cmsg));
1126   if (serr->ee_errno != ENOMSG ||
1127       serr->ee_origin != SO_EE_ORIGIN_TIMESTAMPING) {
1128     gpr_log(GPR_ERROR, "Unexpected control message");
1129     return cmsg;
1130   }
1131   /* The error handling can potentially be done on another thread so we need
1132    * to protect the traced buffer list. A lock free list might be better. Using
1133    * a simple mutex for now. */
1134   gpr_mu_lock(&tcp->tb_mu);
1135   grpc_core::TracedBuffer::ProcessTimestamp(&tcp->tb_head, serr, opt_stats,
1136                                             tss);
1137   gpr_mu_unlock(&tcp->tb_mu);
1138   return next_cmsg;
1139 }
1140 
1141 /** For linux platforms, reads the socket's error queue and processes error
1142  * messages from the queue.
1143  */
process_errors(grpc_tcp * tcp)1144 static bool process_errors(grpc_tcp* tcp) {
1145   bool processed_err = false;
1146   struct iovec iov;
1147   iov.iov_base = nullptr;
1148   iov.iov_len = 0;
1149   struct msghdr msg;
1150   msg.msg_name = nullptr;
1151   msg.msg_namelen = 0;
1152   msg.msg_iov = &iov;
1153   msg.msg_iovlen = 0;
1154   msg.msg_flags = 0;
1155   /* Allocate enough space so we don't need to keep increasing this as size
1156    * of OPT_STATS increase */
1157   constexpr size_t cmsg_alloc_space =
1158       CMSG_SPACE(sizeof(grpc_core::scm_timestamping)) +
1159       CMSG_SPACE(sizeof(sock_extended_err) + sizeof(sockaddr_in)) +
1160       CMSG_SPACE(32 * NLA_ALIGN(NLA_HDRLEN + sizeof(uint64_t)));
1161   /* Allocate aligned space for cmsgs received along with timestamps */
1162   union {
1163     char rbuf[cmsg_alloc_space];
1164     struct cmsghdr align;
1165   } aligned_buf;
1166   msg.msg_control = aligned_buf.rbuf;
1167   msg.msg_controllen = sizeof(aligned_buf.rbuf);
1168   int r, saved_errno;
1169   while (true) {
1170     do {
1171       r = recvmsg(tcp->fd, &msg, MSG_ERRQUEUE);
1172       saved_errno = errno;
1173     } while (r < 0 && saved_errno == EINTR);
1174 
1175     if (r == -1 && saved_errno == EAGAIN) {
1176       return processed_err; /* No more errors to process */
1177     }
1178     if (r == -1) {
1179       return processed_err;
1180     }
1181     if (GPR_UNLIKELY((msg.msg_flags & MSG_CTRUNC) != 0)) {
1182       gpr_log(GPR_ERROR, "Error message was truncated.");
1183     }
1184 
1185     if (msg.msg_controllen == 0) {
1186       /* There was no control message found. It was probably spurious. */
1187       return processed_err;
1188     }
1189     bool seen = false;
1190     for (auto cmsg = CMSG_FIRSTHDR(&msg); cmsg && cmsg->cmsg_len;
1191          cmsg = CMSG_NXTHDR(&msg, cmsg)) {
1192       if (CmsgIsZeroCopy(*cmsg)) {
1193         process_zerocopy(tcp, cmsg);
1194         seen = true;
1195         processed_err = true;
1196       } else if (cmsg->cmsg_level == SOL_SOCKET &&
1197                  cmsg->cmsg_type == SCM_TIMESTAMPING) {
1198         cmsg = process_timestamp(tcp, &msg, cmsg);
1199         seen = true;
1200         processed_err = true;
1201       } else {
1202         /* Got a control message that is not a timestamp or zerocopy. Don't know
1203          * how to handle this. */
1204         if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
1205           gpr_log(GPR_INFO,
1206                   "unknown control message cmsg_level:%d cmsg_type:%d",
1207                   cmsg->cmsg_level, cmsg->cmsg_type);
1208         }
1209         return processed_err;
1210       }
1211     }
1212     if (!seen) {
1213       return processed_err;
1214     }
1215   }
1216 }
1217 
tcp_handle_error(void * arg,grpc_error_handle error)1218 static void tcp_handle_error(void* arg /* grpc_tcp */,
1219                              grpc_error_handle error) {
1220   grpc_tcp* tcp = static_cast<grpc_tcp*>(arg);
1221   if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
1222     gpr_log(GPR_INFO, "TCP:%p got_error: %s", tcp,
1223             grpc_error_std_string(error).c_str());
1224   }
1225 
1226   if (error != GRPC_ERROR_NONE ||
1227       static_cast<bool>(gpr_atm_acq_load(&tcp->stop_error_notification))) {
1228     /* We aren't going to register to hear on error anymore, so it is safe to
1229      * unref. */
1230     TCP_UNREF(tcp, "error-tracking");
1231     return;
1232   }
1233 
1234   /* We are still interested in collecting timestamps, so let's try reading
1235    * them. */
1236   bool processed = process_errors(tcp);
1237   /* This might not a timestamps error. Set the read and write closures to be
1238    * ready. */
1239   if (!processed) {
1240     grpc_fd_set_readable(tcp->em_fd);
1241     grpc_fd_set_writable(tcp->em_fd);
1242   }
1243   grpc_fd_notify_on_error(tcp->em_fd, &tcp->error_closure);
1244 }
1245 
1246 #else  /* GRPC_LINUX_ERRQUEUE */
tcp_get_send_zerocopy_record(grpc_tcp *,grpc_slice_buffer *)1247 static TcpZerocopySendRecord* tcp_get_send_zerocopy_record(
1248     grpc_tcp* /*tcp*/, grpc_slice_buffer* /*buf*/) {
1249   return nullptr;
1250 }
1251 
ZerocopyDisableAndWaitForRemaining(grpc_tcp *)1252 static void ZerocopyDisableAndWaitForRemaining(grpc_tcp* /*tcp*/) {}
1253 
tcp_write_with_timestamps(grpc_tcp *,struct msghdr *,size_t,ssize_t *,int)1254 static bool tcp_write_with_timestamps(grpc_tcp* /*tcp*/, struct msghdr* /*msg*/,
1255                                       size_t /*sending_length*/,
1256                                       ssize_t* /*sent_length*/,
1257                                       int /*additional_flags*/) {
1258   gpr_log(GPR_ERROR, "Write with timestamps not supported for this platform");
1259   GPR_ASSERT(0);
1260   return false;
1261 }
1262 
tcp_handle_error(void *,grpc_error_handle)1263 static void tcp_handle_error(void* /*arg*/ /* grpc_tcp */,
1264                              grpc_error_handle /*error*/) {
1265   gpr_log(GPR_ERROR, "Error handling is not supported for this platform");
1266   GPR_ASSERT(0);
1267 }
1268 #endif /* GRPC_LINUX_ERRQUEUE */
1269 
1270 /* If outgoing_buffer_arg is filled, shuts down the list early, so that any
1271  * release operations needed can be performed on the arg */
tcp_shutdown_buffer_list(grpc_tcp * tcp)1272 void tcp_shutdown_buffer_list(grpc_tcp* tcp) {
1273   if (tcp->outgoing_buffer_arg) {
1274     gpr_mu_lock(&tcp->tb_mu);
1275     grpc_core::TracedBuffer::Shutdown(
1276         &tcp->tb_head, tcp->outgoing_buffer_arg,
1277         GRPC_ERROR_CREATE_FROM_STATIC_STRING("TracedBuffer list shutdown"));
1278     gpr_mu_unlock(&tcp->tb_mu);
1279     tcp->outgoing_buffer_arg = nullptr;
1280   }
1281 }
1282 
1283 #if defined(IOV_MAX) && IOV_MAX < 1000
1284 #define MAX_WRITE_IOVEC IOV_MAX
1285 #else
1286 #define MAX_WRITE_IOVEC 1000
1287 #endif
PopulateIovs(size_t * unwind_slice_idx,size_t * unwind_byte_idx,size_t * sending_length,iovec * iov)1288 msg_iovlen_type TcpZerocopySendRecord::PopulateIovs(size_t* unwind_slice_idx,
1289                                                     size_t* unwind_byte_idx,
1290                                                     size_t* sending_length,
1291                                                     iovec* iov) {
1292   msg_iovlen_type iov_size;
1293   *unwind_slice_idx = out_offset_.slice_idx;
1294   *unwind_byte_idx = out_offset_.byte_idx;
1295   for (iov_size = 0;
1296        out_offset_.slice_idx != buf_.count && iov_size != MAX_WRITE_IOVEC;
1297        iov_size++) {
1298     iov[iov_size].iov_base =
1299         GRPC_SLICE_START_PTR(buf_.slices[out_offset_.slice_idx]) +
1300         out_offset_.byte_idx;
1301     iov[iov_size].iov_len =
1302         GRPC_SLICE_LENGTH(buf_.slices[out_offset_.slice_idx]) -
1303         out_offset_.byte_idx;
1304     *sending_length += iov[iov_size].iov_len;
1305     ++(out_offset_.slice_idx);
1306     out_offset_.byte_idx = 0;
1307   }
1308   GPR_DEBUG_ASSERT(iov_size > 0);
1309   return iov_size;
1310 }
1311 
UpdateOffsetForBytesSent(size_t sending_length,size_t actually_sent)1312 void TcpZerocopySendRecord::UpdateOffsetForBytesSent(size_t sending_length,
1313                                                      size_t actually_sent) {
1314   size_t trailing = sending_length - actually_sent;
1315   while (trailing > 0) {
1316     size_t slice_length;
1317     out_offset_.slice_idx--;
1318     slice_length = GRPC_SLICE_LENGTH(buf_.slices[out_offset_.slice_idx]);
1319     if (slice_length > trailing) {
1320       out_offset_.byte_idx = slice_length - trailing;
1321       break;
1322     } else {
1323       trailing -= slice_length;
1324     }
1325   }
1326 }
1327 
1328 // returns true if done, false if pending; if returning true, *error is set
do_tcp_flush_zerocopy(grpc_tcp * tcp,TcpZerocopySendRecord * record,grpc_error_handle * error)1329 static bool do_tcp_flush_zerocopy(grpc_tcp* tcp, TcpZerocopySendRecord* record,
1330                                   grpc_error_handle* error) {
1331   struct msghdr msg;
1332   struct iovec iov[MAX_WRITE_IOVEC];
1333   msg_iovlen_type iov_size;
1334   ssize_t sent_length = 0;
1335   size_t sending_length;
1336   size_t unwind_slice_idx;
1337   size_t unwind_byte_idx;
1338   while (true) {
1339     sending_length = 0;
1340     iov_size = record->PopulateIovs(&unwind_slice_idx, &unwind_byte_idx,
1341                                     &sending_length, iov);
1342     msg.msg_name = nullptr;
1343     msg.msg_namelen = 0;
1344     msg.msg_iov = iov;
1345     msg.msg_iovlen = iov_size;
1346     msg.msg_flags = 0;
1347     bool tried_sending_message = false;
1348     // Before calling sendmsg (with or without timestamps): we
1349     // take a single ref on the zerocopy send record.
1350     tcp->tcp_zerocopy_send_ctx.NoteSend(record);
1351     if (tcp->outgoing_buffer_arg != nullptr) {
1352       if (!tcp->ts_capable ||
1353           !tcp_write_with_timestamps(tcp, &msg, sending_length, &sent_length,
1354                                      MSG_ZEROCOPY)) {
1355         /* We could not set socket options to collect Fathom timestamps.
1356          * Fallback on writing without timestamps. */
1357         tcp->ts_capable = false;
1358         tcp_shutdown_buffer_list(tcp);
1359       } else {
1360         tried_sending_message = true;
1361       }
1362     }
1363     if (!tried_sending_message) {
1364       msg.msg_control = nullptr;
1365       msg.msg_controllen = 0;
1366       GRPC_STATS_INC_TCP_WRITE_SIZE(sending_length);
1367       GRPC_STATS_INC_TCP_WRITE_IOV_SIZE(iov_size);
1368       sent_length = tcp_send(tcp->fd, &msg, MSG_ZEROCOPY);
1369     }
1370     if (sent_length < 0) {
1371       // If this particular send failed, drop ref taken earlier in this method.
1372       tcp->tcp_zerocopy_send_ctx.UndoSend();
1373       if (errno == EAGAIN) {
1374         record->UnwindIfThrottled(unwind_slice_idx, unwind_byte_idx);
1375         return false;
1376       } else if (errno == EPIPE) {
1377         *error = tcp_annotate_error(GRPC_OS_ERROR(errno, "sendmsg"), tcp);
1378         tcp_shutdown_buffer_list(tcp);
1379         return true;
1380       } else {
1381         *error = tcp_annotate_error(GRPC_OS_ERROR(errno, "sendmsg"), tcp);
1382         tcp_shutdown_buffer_list(tcp);
1383         return true;
1384       }
1385     }
1386     tcp->bytes_counter += sent_length;
1387     record->UpdateOffsetForBytesSent(sending_length,
1388                                      static_cast<size_t>(sent_length));
1389     if (record->AllSlicesSent()) {
1390       *error = GRPC_ERROR_NONE;
1391       return true;
1392     }
1393   }
1394 }
1395 
UnrefMaybePutZerocopySendRecord(grpc_tcp * tcp,TcpZerocopySendRecord * record,uint32_t,const char *)1396 static void UnrefMaybePutZerocopySendRecord(grpc_tcp* tcp,
1397                                             TcpZerocopySendRecord* record,
1398                                             uint32_t /*seq*/,
1399                                             const char* /*tag*/) {
1400   if (record->Unref()) {
1401     tcp->tcp_zerocopy_send_ctx.PutSendRecord(record);
1402   }
1403 }
1404 
tcp_flush_zerocopy(grpc_tcp * tcp,TcpZerocopySendRecord * record,grpc_error_handle * error)1405 static bool tcp_flush_zerocopy(grpc_tcp* tcp, TcpZerocopySendRecord* record,
1406                                grpc_error_handle* error) {
1407   bool done = do_tcp_flush_zerocopy(tcp, record, error);
1408   if (done) {
1409     // Either we encountered an error, or we successfully sent all the bytes.
1410     // In either case, we're done with this record.
1411     UnrefMaybePutZerocopySendRecord(tcp, record, 0, "flush_done");
1412   }
1413   return done;
1414 }
1415 
tcp_flush(grpc_tcp * tcp,grpc_error_handle * error)1416 static bool tcp_flush(grpc_tcp* tcp, grpc_error_handle* error) {
1417   struct msghdr msg;
1418   struct iovec iov[MAX_WRITE_IOVEC];
1419   msg_iovlen_type iov_size;
1420   ssize_t sent_length = 0;
1421   size_t sending_length;
1422   size_t trailing;
1423   size_t unwind_slice_idx;
1424   size_t unwind_byte_idx;
1425 
1426   // We always start at zero, because we eagerly unref and trim the slice
1427   // buffer as we write
1428   size_t outgoing_slice_idx = 0;
1429 
1430   while (true) {
1431     sending_length = 0;
1432     unwind_slice_idx = outgoing_slice_idx;
1433     unwind_byte_idx = tcp->outgoing_byte_idx;
1434     for (iov_size = 0; outgoing_slice_idx != tcp->outgoing_buffer->count &&
1435                        iov_size != MAX_WRITE_IOVEC;
1436          iov_size++) {
1437       iov[iov_size].iov_base =
1438           GRPC_SLICE_START_PTR(
1439               tcp->outgoing_buffer->slices[outgoing_slice_idx]) +
1440           tcp->outgoing_byte_idx;
1441       iov[iov_size].iov_len =
1442           GRPC_SLICE_LENGTH(tcp->outgoing_buffer->slices[outgoing_slice_idx]) -
1443           tcp->outgoing_byte_idx;
1444       sending_length += iov[iov_size].iov_len;
1445       outgoing_slice_idx++;
1446       tcp->outgoing_byte_idx = 0;
1447     }
1448     GPR_ASSERT(iov_size > 0);
1449 
1450     msg.msg_name = nullptr;
1451     msg.msg_namelen = 0;
1452     msg.msg_iov = iov;
1453     msg.msg_iovlen = iov_size;
1454     msg.msg_flags = 0;
1455     bool tried_sending_message = false;
1456     if (tcp->outgoing_buffer_arg != nullptr) {
1457       if (!tcp->ts_capable ||
1458           !tcp_write_with_timestamps(tcp, &msg, sending_length, &sent_length)) {
1459         /* We could not set socket options to collect Fathom timestamps.
1460          * Fallback on writing without timestamps. */
1461         tcp->ts_capable = false;
1462         tcp_shutdown_buffer_list(tcp);
1463       } else {
1464         tried_sending_message = true;
1465       }
1466     }
1467     if (!tried_sending_message) {
1468       msg.msg_control = nullptr;
1469       msg.msg_controllen = 0;
1470 
1471       GRPC_STATS_INC_TCP_WRITE_SIZE(sending_length);
1472       GRPC_STATS_INC_TCP_WRITE_IOV_SIZE(iov_size);
1473 
1474       sent_length = tcp_send(tcp->fd, &msg);
1475     }
1476 
1477     if (sent_length < 0) {
1478       if (errno == EAGAIN) {
1479         tcp->outgoing_byte_idx = unwind_byte_idx;
1480         // unref all and forget about all slices that have been written to this
1481         // point
1482         for (size_t idx = 0; idx < unwind_slice_idx; ++idx) {
1483           grpc_slice_buffer_remove_first(tcp->outgoing_buffer);
1484         }
1485         return false;
1486       } else if (errno == EPIPE) {
1487         *error = tcp_annotate_error(GRPC_OS_ERROR(errno, "sendmsg"), tcp);
1488         grpc_slice_buffer_reset_and_unref_internal(tcp->outgoing_buffer);
1489         tcp_shutdown_buffer_list(tcp);
1490         return true;
1491       } else {
1492         *error = tcp_annotate_error(GRPC_OS_ERROR(errno, "sendmsg"), tcp);
1493         grpc_slice_buffer_reset_and_unref_internal(tcp->outgoing_buffer);
1494         tcp_shutdown_buffer_list(tcp);
1495         return true;
1496       }
1497     }
1498 
1499     GPR_ASSERT(tcp->outgoing_byte_idx == 0);
1500     tcp->bytes_counter += sent_length;
1501     trailing = sending_length - static_cast<size_t>(sent_length);
1502     while (trailing > 0) {
1503       size_t slice_length;
1504 
1505       outgoing_slice_idx--;
1506       slice_length =
1507           GRPC_SLICE_LENGTH(tcp->outgoing_buffer->slices[outgoing_slice_idx]);
1508       if (slice_length > trailing) {
1509         tcp->outgoing_byte_idx = slice_length - trailing;
1510         break;
1511       } else {
1512         trailing -= slice_length;
1513       }
1514     }
1515     if (outgoing_slice_idx == tcp->outgoing_buffer->count) {
1516       *error = GRPC_ERROR_NONE;
1517       grpc_slice_buffer_reset_and_unref_internal(tcp->outgoing_buffer);
1518       return true;
1519     }
1520   }
1521 }
1522 
tcp_handle_write(void * arg,grpc_error_handle error)1523 static void tcp_handle_write(void* arg /* grpc_tcp */,
1524                              grpc_error_handle error) {
1525   grpc_tcp* tcp = static_cast<grpc_tcp*>(arg);
1526   grpc_closure* cb;
1527 
1528   if (error != GRPC_ERROR_NONE) {
1529     cb = tcp->write_cb;
1530     tcp->write_cb = nullptr;
1531     if (tcp->current_zerocopy_send != nullptr) {
1532       UnrefMaybePutZerocopySendRecord(tcp, tcp->current_zerocopy_send, 0,
1533                                       "handle_write_err");
1534       tcp->current_zerocopy_send = nullptr;
1535     }
1536     grpc_core::Closure::Run(DEBUG_LOCATION, cb, GRPC_ERROR_REF(error));
1537     TCP_UNREF(tcp, "write");
1538     return;
1539   }
1540 
1541   bool flush_result =
1542       tcp->current_zerocopy_send != nullptr
1543           ? tcp_flush_zerocopy(tcp, tcp->current_zerocopy_send, &error)
1544           : tcp_flush(tcp, &error);
1545   if (!flush_result) {
1546     if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
1547       gpr_log(GPR_INFO, "write: delayed");
1548     }
1549     notify_on_write(tcp);
1550     // tcp_flush does not populate error if it has returned false.
1551     GPR_DEBUG_ASSERT(error == GRPC_ERROR_NONE);
1552   } else {
1553     cb = tcp->write_cb;
1554     tcp->write_cb = nullptr;
1555     tcp->current_zerocopy_send = nullptr;
1556     if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
1557       gpr_log(GPR_INFO, "write: %s", grpc_error_std_string(error).c_str());
1558     }
1559     // No need to take a ref on error since tcp_flush provides a ref.
1560     grpc_core::Closure::Run(DEBUG_LOCATION, cb, error);
1561     TCP_UNREF(tcp, "write");
1562   }
1563 }
1564 
tcp_write(grpc_endpoint * ep,grpc_slice_buffer * buf,grpc_closure * cb,void * arg)1565 static void tcp_write(grpc_endpoint* ep, grpc_slice_buffer* buf,
1566                       grpc_closure* cb, void* arg) {
1567   GPR_TIMER_SCOPE("tcp_write", 0);
1568   grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1569   grpc_error_handle error = GRPC_ERROR_NONE;
1570   TcpZerocopySendRecord* zerocopy_send_record = nullptr;
1571 
1572   if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
1573     size_t i;
1574 
1575     for (i = 0; i < buf->count; i++) {
1576       gpr_log(GPR_INFO, "WRITE %p (peer=%s)", tcp, tcp->peer_string.c_str());
1577       if (gpr_should_log(GPR_LOG_SEVERITY_DEBUG)) {
1578         char* data =
1579             grpc_dump_slice(buf->slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII);
1580         gpr_log(GPR_DEBUG, "DATA: %s", data);
1581         gpr_free(data);
1582       }
1583     }
1584   }
1585 
1586   GPR_ASSERT(tcp->write_cb == nullptr);
1587   GPR_DEBUG_ASSERT(tcp->current_zerocopy_send == nullptr);
1588 
1589   if (buf->length == 0) {
1590     grpc_core::Closure::Run(
1591         DEBUG_LOCATION, cb,
1592         grpc_fd_is_shutdown(tcp->em_fd)
1593             ? tcp_annotate_error(GRPC_ERROR_CREATE_FROM_STATIC_STRING("EOF"),
1594                                  tcp)
1595             : GRPC_ERROR_NONE);
1596     tcp_shutdown_buffer_list(tcp);
1597     return;
1598   }
1599 
1600   zerocopy_send_record = tcp_get_send_zerocopy_record(tcp, buf);
1601   if (zerocopy_send_record == nullptr) {
1602     // Either not enough bytes, or couldn't allocate a zerocopy context.
1603     tcp->outgoing_buffer = buf;
1604     tcp->outgoing_byte_idx = 0;
1605   }
1606   tcp->outgoing_buffer_arg = arg;
1607   if (arg) {
1608     GPR_ASSERT(grpc_event_engine_can_track_errors());
1609   }
1610 
1611   bool flush_result =
1612       zerocopy_send_record != nullptr
1613           ? tcp_flush_zerocopy(tcp, zerocopy_send_record, &error)
1614           : tcp_flush(tcp, &error);
1615   if (!flush_result) {
1616     TCP_REF(tcp, "write");
1617     tcp->write_cb = cb;
1618     tcp->current_zerocopy_send = zerocopy_send_record;
1619     if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
1620       gpr_log(GPR_INFO, "write: delayed");
1621     }
1622     notify_on_write(tcp);
1623   } else {
1624     if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
1625       gpr_log(GPR_INFO, "write: %s", grpc_error_std_string(error).c_str());
1626     }
1627     grpc_core::Closure::Run(DEBUG_LOCATION, cb, error);
1628   }
1629 }
1630 
tcp_add_to_pollset(grpc_endpoint * ep,grpc_pollset * pollset)1631 static void tcp_add_to_pollset(grpc_endpoint* ep, grpc_pollset* pollset) {
1632   grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1633   grpc_pollset_add_fd(pollset, tcp->em_fd);
1634 }
1635 
tcp_add_to_pollset_set(grpc_endpoint * ep,grpc_pollset_set * pollset_set)1636 static void tcp_add_to_pollset_set(grpc_endpoint* ep,
1637                                    grpc_pollset_set* pollset_set) {
1638   grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1639   grpc_pollset_set_add_fd(pollset_set, tcp->em_fd);
1640 }
1641 
tcp_delete_from_pollset_set(grpc_endpoint * ep,grpc_pollset_set * pollset_set)1642 static void tcp_delete_from_pollset_set(grpc_endpoint* ep,
1643                                         grpc_pollset_set* pollset_set) {
1644   grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1645   grpc_pollset_set_del_fd(pollset_set, tcp->em_fd);
1646 }
1647 
tcp_get_peer(grpc_endpoint * ep)1648 static absl::string_view tcp_get_peer(grpc_endpoint* ep) {
1649   grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1650   return tcp->peer_string;
1651 }
1652 
tcp_get_local_address(grpc_endpoint * ep)1653 static absl::string_view tcp_get_local_address(grpc_endpoint* ep) {
1654   grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1655   return tcp->local_address;
1656 }
1657 
tcp_get_fd(grpc_endpoint * ep)1658 static int tcp_get_fd(grpc_endpoint* ep) {
1659   grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1660   return tcp->fd;
1661 }
1662 
tcp_get_resource_user(grpc_endpoint * ep)1663 static grpc_resource_user* tcp_get_resource_user(grpc_endpoint* ep) {
1664   grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1665   return tcp->resource_user;
1666 }
1667 
tcp_can_track_err(grpc_endpoint * ep)1668 static bool tcp_can_track_err(grpc_endpoint* ep) {
1669   grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1670   if (!grpc_event_engine_can_track_errors()) {
1671     return false;
1672   }
1673   struct sockaddr addr;
1674   socklen_t len = sizeof(addr);
1675   if (getsockname(tcp->fd, &addr, &len) < 0) {
1676     return false;
1677   }
1678   return addr.sa_family == AF_INET || addr.sa_family == AF_INET6;
1679 }
1680 
1681 static const grpc_endpoint_vtable vtable = {tcp_read,
1682                                             tcp_write,
1683                                             tcp_add_to_pollset,
1684                                             tcp_add_to_pollset_set,
1685                                             tcp_delete_from_pollset_set,
1686                                             tcp_shutdown,
1687                                             tcp_destroy,
1688                                             tcp_get_resource_user,
1689                                             tcp_get_peer,
1690                                             tcp_get_local_address,
1691                                             tcp_get_fd,
1692                                             tcp_can_track_err};
1693 
1694 #define MAX_CHUNK_SIZE (32 * 1024 * 1024)
1695 
grpc_tcp_create(grpc_fd * em_fd,const grpc_channel_args * channel_args,const char * peer_string)1696 grpc_endpoint* grpc_tcp_create(grpc_fd* em_fd,
1697                                const grpc_channel_args* channel_args,
1698                                const char* peer_string) {
1699   static constexpr bool kZerocpTxEnabledDefault = false;
1700   int tcp_read_chunk_size = GRPC_TCP_DEFAULT_READ_SLICE_SIZE;
1701   int tcp_max_read_chunk_size = 4 * 1024 * 1024;
1702   int tcp_min_read_chunk_size = 256;
1703   bool tcp_tx_zerocopy_enabled = kZerocpTxEnabledDefault;
1704   int tcp_tx_zerocopy_send_bytes_thresh =
1705       grpc_core::TcpZerocopySendCtx::kDefaultSendBytesThreshold;
1706   int tcp_tx_zerocopy_max_simult_sends =
1707       grpc_core::TcpZerocopySendCtx::kDefaultMaxSends;
1708   grpc_resource_quota* resource_quota = grpc_resource_quota_create(nullptr);
1709   if (channel_args != nullptr) {
1710     for (size_t i = 0; i < channel_args->num_args; i++) {
1711       if (0 ==
1712           strcmp(channel_args->args[i].key, GRPC_ARG_TCP_READ_CHUNK_SIZE)) {
1713         grpc_integer_options options = {tcp_read_chunk_size, 1, MAX_CHUNK_SIZE};
1714         tcp_read_chunk_size =
1715             grpc_channel_arg_get_integer(&channel_args->args[i], options);
1716       } else if (0 == strcmp(channel_args->args[i].key,
1717                              GRPC_ARG_TCP_MIN_READ_CHUNK_SIZE)) {
1718         grpc_integer_options options = {tcp_read_chunk_size, 1, MAX_CHUNK_SIZE};
1719         tcp_min_read_chunk_size =
1720             grpc_channel_arg_get_integer(&channel_args->args[i], options);
1721       } else if (0 == strcmp(channel_args->args[i].key,
1722                              GRPC_ARG_TCP_MAX_READ_CHUNK_SIZE)) {
1723         grpc_integer_options options = {tcp_read_chunk_size, 1, MAX_CHUNK_SIZE};
1724         tcp_max_read_chunk_size =
1725             grpc_channel_arg_get_integer(&channel_args->args[i], options);
1726       } else if (0 ==
1727                  strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
1728         grpc_resource_quota_unref_internal(resource_quota);
1729         resource_quota =
1730             grpc_resource_quota_ref_internal(static_cast<grpc_resource_quota*>(
1731                 channel_args->args[i].value.pointer.p));
1732       } else if (0 == strcmp(channel_args->args[i].key,
1733                              GRPC_ARG_TCP_TX_ZEROCOPY_ENABLED)) {
1734         tcp_tx_zerocopy_enabled = grpc_channel_arg_get_bool(
1735             &channel_args->args[i], kZerocpTxEnabledDefault);
1736       } else if (0 == strcmp(channel_args->args[i].key,
1737                              GRPC_ARG_TCP_TX_ZEROCOPY_SEND_BYTES_THRESHOLD)) {
1738         grpc_integer_options options = {
1739             grpc_core::TcpZerocopySendCtx::kDefaultSendBytesThreshold, 0,
1740             INT_MAX};
1741         tcp_tx_zerocopy_send_bytes_thresh =
1742             grpc_channel_arg_get_integer(&channel_args->args[i], options);
1743       } else if (0 == strcmp(channel_args->args[i].key,
1744                              GRPC_ARG_TCP_TX_ZEROCOPY_MAX_SIMULT_SENDS)) {
1745         grpc_integer_options options = {
1746             grpc_core::TcpZerocopySendCtx::kDefaultMaxSends, 0, INT_MAX};
1747         tcp_tx_zerocopy_max_simult_sends =
1748             grpc_channel_arg_get_integer(&channel_args->args[i], options);
1749       }
1750     }
1751   }
1752 
1753   if (tcp_min_read_chunk_size > tcp_max_read_chunk_size) {
1754     tcp_min_read_chunk_size = tcp_max_read_chunk_size;
1755   }
1756   tcp_read_chunk_size = GPR_CLAMP(tcp_read_chunk_size, tcp_min_read_chunk_size,
1757                                   tcp_max_read_chunk_size);
1758 
1759   grpc_tcp* tcp = new grpc_tcp(tcp_tx_zerocopy_max_simult_sends,
1760                                tcp_tx_zerocopy_send_bytes_thresh);
1761   tcp->base.vtable = &vtable;
1762   tcp->peer_string = peer_string;
1763   tcp->fd = grpc_fd_wrapped_fd(em_fd);
1764   grpc_resolved_address resolved_local_addr;
1765   memset(&resolved_local_addr, 0, sizeof(resolved_local_addr));
1766   resolved_local_addr.len = sizeof(resolved_local_addr.addr);
1767   if (getsockname(tcp->fd,
1768                   reinterpret_cast<sockaddr*>(resolved_local_addr.addr),
1769                   &resolved_local_addr.len) < 0) {
1770     tcp->local_address = "";
1771   } else {
1772     tcp->local_address = grpc_sockaddr_to_uri(&resolved_local_addr);
1773   }
1774   tcp->read_cb = nullptr;
1775   tcp->write_cb = nullptr;
1776   tcp->current_zerocopy_send = nullptr;
1777   tcp->release_fd_cb = nullptr;
1778   tcp->release_fd = nullptr;
1779   tcp->incoming_buffer = nullptr;
1780   tcp->target_length = static_cast<double>(tcp_read_chunk_size);
1781   tcp->min_read_chunk_size = tcp_min_read_chunk_size;
1782   tcp->max_read_chunk_size = tcp_max_read_chunk_size;
1783   tcp->bytes_read_this_round = 0;
1784   /* Will be set to false by the very first endpoint read function */
1785   tcp->is_first_read = true;
1786   tcp->bytes_counter = -1;
1787   tcp->socket_ts_enabled = false;
1788   tcp->ts_capable = true;
1789   tcp->outgoing_buffer_arg = nullptr;
1790   if (tcp_tx_zerocopy_enabled && !tcp->tcp_zerocopy_send_ctx.memory_limited()) {
1791 #ifdef GRPC_LINUX_ERRQUEUE
1792     const int enable = 1;
1793     auto err =
1794         setsockopt(tcp->fd, SOL_SOCKET, SO_ZEROCOPY, &enable, sizeof(enable));
1795     if (err == 0) {
1796       tcp->tcp_zerocopy_send_ctx.set_enabled(true);
1797     } else {
1798       gpr_log(GPR_ERROR, "Failed to set zerocopy options on the socket.");
1799     }
1800 #endif
1801   }
1802   /* paired with unref in grpc_tcp_destroy */
1803   new (&tcp->refcount) grpc_core::RefCount(
1804       1, GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace) ? "tcp" : nullptr);
1805   gpr_atm_no_barrier_store(&tcp->shutdown_count, 0);
1806   tcp->em_fd = em_fd;
1807   grpc_slice_buffer_init(&tcp->last_read_buffer);
1808   tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string);
1809   grpc_resource_user_slice_allocator_init(
1810       &tcp->slice_allocator, tcp->resource_user, tcp_read_allocation_done, tcp);
1811   grpc_resource_quota_unref_internal(resource_quota);
1812   gpr_mu_init(&tcp->tb_mu);
1813   tcp->tb_head = nullptr;
1814   GRPC_CLOSURE_INIT(&tcp->read_done_closure, tcp_handle_read, tcp,
1815                     grpc_schedule_on_exec_ctx);
1816   if (grpc_event_engine_run_in_background()) {
1817     // If there is a polling engine always running in the background, there is
1818     // no need to run the backup poller.
1819     GRPC_CLOSURE_INIT(&tcp->write_done_closure, tcp_handle_write, tcp,
1820                       grpc_schedule_on_exec_ctx);
1821   } else {
1822     GRPC_CLOSURE_INIT(&tcp->write_done_closure,
1823                       tcp_drop_uncovered_then_handle_write, tcp,
1824                       grpc_schedule_on_exec_ctx);
1825   }
1826   /* Always assume there is something on the queue to read. */
1827   tcp->inq = 1;
1828 #ifdef GRPC_HAVE_TCP_INQ
1829   int one = 1;
1830   if (setsockopt(tcp->fd, SOL_TCP, TCP_INQ, &one, sizeof(one)) == 0) {
1831     tcp->inq_capable = true;
1832   } else {
1833     gpr_log(GPR_DEBUG, "cannot set inq fd=%d errno=%d", tcp->fd, errno);
1834     tcp->inq_capable = false;
1835   }
1836 #else
1837   tcp->inq_capable = false;
1838 #endif /* GRPC_HAVE_TCP_INQ */
1839   /* Start being notified on errors if event engine can track errors. */
1840   if (grpc_event_engine_can_track_errors()) {
1841     /* Grab a ref to tcp so that we can safely access the tcp struct when
1842      * processing errors. We unref when we no longer want to track errors
1843      * separately. */
1844     TCP_REF(tcp, "error-tracking");
1845     gpr_atm_rel_store(&tcp->stop_error_notification, 0);
1846     GRPC_CLOSURE_INIT(&tcp->error_closure, tcp_handle_error, tcp,
1847                       grpc_schedule_on_exec_ctx);
1848     grpc_fd_notify_on_error(tcp->em_fd, &tcp->error_closure);
1849   }
1850 
1851   return &tcp->base;
1852 }
1853 
grpc_tcp_fd(grpc_endpoint * ep)1854 int grpc_tcp_fd(grpc_endpoint* ep) {
1855   grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1856   GPR_ASSERT(ep->vtable == &vtable);
1857   return grpc_fd_wrapped_fd(tcp->em_fd);
1858 }
1859 
grpc_tcp_destroy_and_release_fd(grpc_endpoint * ep,int * fd,grpc_closure * done)1860 void grpc_tcp_destroy_and_release_fd(grpc_endpoint* ep, int* fd,
1861                                      grpc_closure* done) {
1862   grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
1863   GPR_ASSERT(ep->vtable == &vtable);
1864   tcp->release_fd = fd;
1865   tcp->release_fd_cb = done;
1866   grpc_slice_buffer_reset_and_unref_internal(&tcp->last_read_buffer);
1867   if (grpc_event_engine_can_track_errors()) {
1868     /* Stop errors notification. */
1869     ZerocopyDisableAndWaitForRemaining(tcp);
1870     gpr_atm_no_barrier_store(&tcp->stop_error_notification, true);
1871     grpc_fd_set_error(tcp->em_fd);
1872   }
1873   TCP_UNREF(tcp, "destroy");
1874 }
1875 
1876 #endif /* GRPC_POSIX_SOCKET_TCP */
1877