• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 //
3 // Copyright 2015 gRPC authors.
4 //
5 // Licensed under the Apache License, Version 2.0 (the "License");
6 // you may not use this file except in compliance with the License.
7 // You may obtain a copy of the License at
8 //
9 //     http://www.apache.org/licenses/LICENSE-2.0
10 //
11 // Unless required by applicable law or agreed to in writing, software
12 // distributed under the License is distributed on an "AS IS" BASIS,
13 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 // See the License for the specific language governing permissions and
15 // limitations under the License.
16 //
17 //
18 
19 #include "absl/time/time.h"
20 #include "src/core/lib/channel/channel_args.h"
21 #include "src/core/lib/iomgr/port.h"
22 #include "src/core/util/notification.h"
23 #include "src/core/util/time.h"
24 
25 // This test won't work except with posix sockets enabled
26 #ifdef GRPC_POSIX_SOCKET_TCP
27 
28 #include <errno.h>
29 #include <fcntl.h>
30 #include <grpc/grpc.h>
31 #include <grpc/support/alloc.h>
32 #include <grpc/support/time.h>
33 #include <gtest/gtest.h>
34 #include <limits.h>
35 #include <string.h>
36 #include <sys/socket.h>
37 #include <sys/types.h>
38 #include <unistd.h>
39 
40 #include "absl/log/check.h"
41 #include "absl/log/log.h"
42 #include "src/core/lib/event_engine/channel_args_endpoint_config.h"
43 #include "src/core/lib/event_engine/default_event_engine.h"
44 #include "src/core/lib/event_engine/posix.h"
45 #include "src/core/lib/event_engine/shim.h"
46 #include "src/core/lib/iomgr/buffer_list.h"
47 #include "src/core/lib/iomgr/ev_posix.h"
48 #include "src/core/lib/iomgr/event_engine_shims/endpoint.h"
49 #include "src/core/lib/iomgr/sockaddr_posix.h"
50 #include "src/core/lib/iomgr/socket_utils_posix.h"
51 #include "src/core/lib/iomgr/tcp_posix.h"
52 #include "src/core/lib/slice/slice_internal.h"
53 #include "src/core/util/crash.h"
54 #include "src/core/util/useful.h"
55 #include "test/core/iomgr/endpoint_tests.h"
56 #include "test/core/test_util/test_config.h"
57 
58 static gpr_mu* g_mu;
59 static grpc_pollset* g_pollset;
60 
61 static constexpr int64_t kDeadlineMillis = 20000;
62 
63 //
64 // General test notes:
65 
66 // All tests which write data into a socket write i%256 into byte i, which is
67 // verified by readers.
68 
69 // In general there are a few interesting things to vary which may lead to
70 // exercising different codepaths in an implementation:
71 // 1. Total amount of data written to the socket
72 // 2. Size of slice allocations
73 // 3. Amount of data we read from or write to the socket at once
74 
75 // The tests here tend to parameterize these where applicable.
76 
77 //
78 
create_sockets(int sv[2])79 static void create_sockets(int sv[2]) {
80   int flags;
81   CHECK_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, sv), 0);
82   flags = fcntl(sv[0], F_GETFL, 0);
83   CHECK_EQ(fcntl(sv[0], F_SETFL, flags | O_NONBLOCK), 0);
84   flags = fcntl(sv[1], F_GETFL, 0);
85   CHECK_EQ(fcntl(sv[1], F_SETFL, flags | O_NONBLOCK), 0);
86 }
87 
fill_socket(int fd)88 static ssize_t fill_socket(int fd) {
89   ssize_t write_bytes;
90   ssize_t total_bytes = 0;
91   int i;
92   unsigned char buf[256];
93   for (i = 0; i < 256; ++i) {
94     buf[i] = static_cast<uint8_t>(i);
95   }
96   do {
97     write_bytes = write(fd, buf, 256);
98     if (write_bytes > 0) {
99       total_bytes += write_bytes;
100     }
101   } while (write_bytes >= 0 || errno == EINTR);
102   CHECK(errno == EAGAIN);
103   return total_bytes;
104 }
105 
fill_socket_partial(int fd,size_t bytes)106 static size_t fill_socket_partial(int fd, size_t bytes) {
107   ssize_t write_bytes;
108   size_t total_bytes = 0;
109   unsigned char* buf = static_cast<unsigned char*>(gpr_malloc(bytes));
110   unsigned i;
111   for (i = 0; i < bytes; ++i) {
112     buf[i] = static_cast<uint8_t>(i % 256);
113   }
114 
115   do {
116     write_bytes = write(fd, buf, bytes - total_bytes);
117     if (write_bytes > 0) {
118       total_bytes += static_cast<size_t>(write_bytes);
119     }
120   } while ((write_bytes >= 0 || errno == EINTR) && bytes > total_bytes);
121 
122   gpr_free(buf);
123 
124   return total_bytes;
125 }
126 
127 struct read_socket_state {
128   grpc_endpoint* ep;
129   size_t min_progress_size;
130   size_t read_bytes;
131   size_t target_read_bytes;
132   grpc_slice_buffer incoming;
133   grpc_closure read_cb;
134 };
135 
count_slices(grpc_slice * slices,size_t nslices,int * current_data)136 static size_t count_slices(grpc_slice* slices, size_t nslices,
137                            int* current_data) {
138   size_t num_bytes = 0;
139   unsigned i, j;
140   unsigned char* buf;
141   for (i = 0; i < nslices; ++i) {
142     buf = GRPC_SLICE_START_PTR(slices[i]);
143     for (j = 0; j < GRPC_SLICE_LENGTH(slices[i]); ++j) {
144       CHECK(buf[j] == *current_data);
145       *current_data = (*current_data + 1) % 256;
146     }
147     num_bytes += GRPC_SLICE_LENGTH(slices[i]);
148   }
149   return num_bytes;
150 }
151 
read_cb(void * user_data,grpc_error_handle error)152 static void read_cb(void* user_data, grpc_error_handle error) {
153   struct read_socket_state* state =
154       static_cast<struct read_socket_state*>(user_data);
155   size_t read_bytes;
156   int current_data;
157 
158   CHECK_OK(error);
159 
160   gpr_mu_lock(g_mu);
161   current_data = state->read_bytes % 256;
162   // The number of bytes read each time this callback is invoked must be >=
163   // the min_progress_size.
164   if (grpc_core::IsTcpFrameSizeTuningEnabled()) {
165     CHECK(state->min_progress_size <= state->incoming.length);
166   }
167   read_bytes = count_slices(state->incoming.slices, state->incoming.count,
168                             &current_data);
169   state->read_bytes += read_bytes;
170   LOG(INFO) << "Read " << read_bytes << " bytes of "
171             << state->target_read_bytes;
172   if (state->read_bytes >= state->target_read_bytes) {
173     CHECK(GRPC_LOG_IF_ERROR("kick", grpc_pollset_kick(g_pollset, nullptr)));
174     gpr_mu_unlock(g_mu);
175   } else {
176     gpr_mu_unlock(g_mu);
177     state->min_progress_size = state->target_read_bytes - state->read_bytes;
178     grpc_endpoint_read(state->ep, &state->incoming, &state->read_cb,
179                        /*urgent=*/false, state->min_progress_size);
180   }
181 }
182 
183 // Write to a socket, then read from it using the grpc_tcp API.
read_test(size_t num_bytes,size_t slice_size,int min_progress_size)184 static void read_test(size_t num_bytes, size_t slice_size,
185                       int min_progress_size) {
186   int sv[2];
187   grpc_endpoint* ep;
188   struct read_socket_state state;
189   size_t written_bytes;
190   grpc_core::Timestamp deadline = grpc_core::Timestamp::FromTimespecRoundUp(
191       grpc_timeout_milliseconds_to_deadline(kDeadlineMillis));
192   grpc_core::ExecCtx exec_ctx;
193 
194   LOG(INFO) << "Read test of size " << num_bytes << ", slice size "
195             << slice_size;
196 
197   create_sockets(sv);
198 
199   grpc_arg a[2];
200   a[0].key = const_cast<char*>(GRPC_ARG_TCP_READ_CHUNK_SIZE);
201   a[0].type = GRPC_ARG_INTEGER,
202   a[0].value.integer = static_cast<int>(slice_size);
203   a[1].key = const_cast<char*>(GRPC_ARG_RESOURCE_QUOTA);
204   a[1].type = GRPC_ARG_POINTER;
205   a[1].value.pointer.p = grpc_resource_quota_create("test");
206   a[1].value.pointer.vtable = grpc_resource_quota_arg_vtable();
207   grpc_channel_args args = {GPR_ARRAY_SIZE(a), a};
208   ep = grpc_tcp_create(
209       grpc_fd_create(sv[1], "read_test", false),
210       TcpOptionsFromEndpointConfig(
211           grpc_event_engine::experimental::ChannelArgsEndpointConfig(
212               grpc_core::ChannelArgs::FromC(&args))),
213       "test");
214   grpc_endpoint_add_to_pollset(ep, g_pollset);
215 
216   written_bytes = fill_socket_partial(sv[0], num_bytes);
217   LOG(INFO) << "Wrote " << written_bytes << " bytes";
218 
219   state.ep = ep;
220   state.read_bytes = 0;
221   state.target_read_bytes = written_bytes;
222   state.min_progress_size =
223       std::min(min_progress_size, static_cast<int>(written_bytes));
224   grpc_slice_buffer_init(&state.incoming);
225   GRPC_CLOSURE_INIT(&state.read_cb, read_cb, &state, grpc_schedule_on_exec_ctx);
226 
227   grpc_endpoint_read(ep, &state.incoming, &state.read_cb, /*urgent=*/false,
228                      /*min_progress_size=*/state.min_progress_size);
229   grpc_core::ExecCtx::Get()->Flush();
230   gpr_mu_lock(g_mu);
231   while (state.read_bytes < state.target_read_bytes) {
232     grpc_pollset_worker* worker = nullptr;
233     CHECK(GRPC_LOG_IF_ERROR("pollset_work",
234                             grpc_pollset_work(g_pollset, &worker, deadline)));
235     gpr_mu_unlock(g_mu);
236     grpc_core::ExecCtx::Get()->Flush();
237     gpr_mu_lock(g_mu);
238   }
239   CHECK(state.read_bytes == state.target_read_bytes);
240   gpr_mu_unlock(g_mu);
241 
242   grpc_slice_buffer_destroy(&state.incoming);
243   grpc_endpoint_destroy(ep);
244   grpc_resource_quota_unref(
245       static_cast<grpc_resource_quota*>(a[1].value.pointer.p));
246 }
247 
248 // Write to a socket until it fills up, then read from it using the grpc_tcp
249 // API.
large_read_test(size_t slice_size,int min_progress_size)250 static void large_read_test(size_t slice_size, int min_progress_size) {
251   int sv[2];
252   grpc_endpoint* ep;
253   struct read_socket_state state;
254   ssize_t written_bytes;
255   grpc_core::Timestamp deadline = grpc_core::Timestamp::FromTimespecRoundUp(
256       grpc_timeout_milliseconds_to_deadline(kDeadlineMillis));
257   grpc_core::ExecCtx exec_ctx;
258 
259   LOG(INFO) << "Start large read test, slice size " << slice_size;
260 
261   create_sockets(sv);
262 
263   grpc_arg a[2];
264   a[0].key = const_cast<char*>(GRPC_ARG_TCP_READ_CHUNK_SIZE);
265   a[0].type = GRPC_ARG_INTEGER;
266   a[0].value.integer = static_cast<int>(slice_size);
267   a[1].key = const_cast<char*>(GRPC_ARG_RESOURCE_QUOTA);
268   a[1].type = GRPC_ARG_POINTER;
269   a[1].value.pointer.p = grpc_resource_quota_create("test");
270   a[1].value.pointer.vtable = grpc_resource_quota_arg_vtable();
271   grpc_channel_args args = {GPR_ARRAY_SIZE(a), a};
272   ep = grpc_tcp_create(
273       grpc_fd_create(sv[1], "large_read_test", false),
274       TcpOptionsFromEndpointConfig(
275           grpc_event_engine::experimental::ChannelArgsEndpointConfig(
276               grpc_core::ChannelArgs::FromC(&args))),
277       "test");
278   grpc_endpoint_add_to_pollset(ep, g_pollset);
279 
280   written_bytes = fill_socket(sv[0]);
281   LOG(INFO) << "Wrote " << written_bytes << " bytes";
282 
283   state.ep = ep;
284   state.read_bytes = 0;
285   state.target_read_bytes = static_cast<size_t>(written_bytes);
286   state.min_progress_size =
287       std::min(min_progress_size, static_cast<int>(written_bytes));
288   grpc_slice_buffer_init(&state.incoming);
289   GRPC_CLOSURE_INIT(&state.read_cb, read_cb, &state, grpc_schedule_on_exec_ctx);
290 
291   grpc_endpoint_read(ep, &state.incoming, &state.read_cb, /*urgent=*/false,
292                      /*min_progress_size=*/state.min_progress_size);
293   grpc_core::ExecCtx::Get()->Flush();
294   gpr_mu_lock(g_mu);
295   while (state.read_bytes < state.target_read_bytes) {
296     grpc_pollset_worker* worker = nullptr;
297     CHECK(GRPC_LOG_IF_ERROR("pollset_work",
298                             grpc_pollset_work(g_pollset, &worker, deadline)));
299     gpr_mu_unlock(g_mu);
300     grpc_core::ExecCtx::Get()->Flush();
301     gpr_mu_lock(g_mu);
302   }
303   CHECK(state.read_bytes == state.target_read_bytes);
304   gpr_mu_unlock(g_mu);
305 
306   grpc_slice_buffer_destroy(&state.incoming);
307   grpc_endpoint_destroy(ep);
308   grpc_resource_quota_unref(
309       static_cast<grpc_resource_quota*>(a[1].value.pointer.p));
310 }
311 
312 struct write_socket_state {
313   grpc_endpoint* ep;
314   int write_done;
315 };
316 
allocate_blocks(size_t num_bytes,size_t slice_size,size_t * num_blocks,uint8_t * current_data)317 static grpc_slice* allocate_blocks(size_t num_bytes, size_t slice_size,
318                                    size_t* num_blocks, uint8_t* current_data) {
319   size_t nslices =
320       (num_bytes / slice_size) + (num_bytes % slice_size ? 1u : 0u);
321   grpc_slice* slices =
322       static_cast<grpc_slice*>(gpr_malloc(sizeof(grpc_slice) * nslices));
323   size_t num_bytes_left = num_bytes;
324   unsigned i, j;
325   unsigned char* buf;
326   *num_blocks = nslices;
327 
328   for (i = 0; i < nslices; ++i) {
329     slices[i] = grpc_slice_malloc(slice_size > num_bytes_left ? num_bytes_left
330                                                               : slice_size);
331     num_bytes_left -= GRPC_SLICE_LENGTH(slices[i]);
332     buf = GRPC_SLICE_START_PTR(slices[i]);
333     for (j = 0; j < GRPC_SLICE_LENGTH(slices[i]); ++j) {
334       buf[j] = *current_data;
335       (*current_data)++;
336     }
337   }
338   CHECK_EQ(num_bytes_left, 0);
339   return slices;
340 }
341 
write_done(void * user_data,grpc_error_handle error)342 static void write_done(void* user_data /* write_socket_state */,
343                        grpc_error_handle error) {
344   CHECK_OK(error);
345   struct write_socket_state* state =
346       static_cast<struct write_socket_state*>(user_data);
347   gpr_mu_lock(g_mu);
348   state->write_done = 1;
349   CHECK(
350       GRPC_LOG_IF_ERROR("pollset_kick", grpc_pollset_kick(g_pollset, nullptr)));
351   gpr_mu_unlock(g_mu);
352 }
353 
drain_socket_blocking(int fd,size_t num_bytes,size_t read_size)354 void drain_socket_blocking(int fd, size_t num_bytes, size_t read_size) {
355   unsigned char* buf = static_cast<unsigned char*>(gpr_malloc(read_size));
356   ssize_t bytes_read;
357   size_t bytes_left = num_bytes;
358   int flags;
359   int current = 0;
360   int i;
361   grpc_core::ExecCtx exec_ctx;
362 
363   flags = fcntl(fd, F_GETFL, 0);
364   CHECK_EQ(fcntl(fd, F_SETFL, flags & ~O_NONBLOCK), 0);
365 
366   for (;;) {
367     grpc_pollset_worker* worker = nullptr;
368     gpr_mu_lock(g_mu);
369     CHECK(GRPC_LOG_IF_ERROR(
370         "pollset_work",
371         grpc_pollset_work(g_pollset, &worker,
372                           grpc_core::Timestamp::FromTimespecRoundUp(
373                               grpc_timeout_milliseconds_to_deadline(10)))));
374     gpr_mu_unlock(g_mu);
375 
376     do {
377       bytes_read =
378           read(fd, buf, bytes_left > read_size ? read_size : bytes_left);
379     } while (bytes_read < 0 && errno == EINTR);
380     CHECK_GE(bytes_read, 0);
381     for (i = 0; i < bytes_read; ++i) {
382       CHECK(buf[i] == current);
383       current = (current + 1) % 256;
384     }
385     bytes_left -= static_cast<size_t>(bytes_read);
386     if (bytes_left == 0) break;
387   }
388   flags = fcntl(fd, F_GETFL, 0);
389   CHECK_EQ(fcntl(fd, F_SETFL, flags | O_NONBLOCK), 0);
390 
391   gpr_free(buf);
392 }
393 
394 // Write to a socket using the grpc_tcp API, then drain it directly.
395 // Note that if the write does not complete immediately we need to drain the
396 // socket in parallel with the read. If collect_timestamps is true, it will
397 // try to get timestamps for the write.
write_test(size_t num_bytes,size_t slice_size)398 static void write_test(size_t num_bytes, size_t slice_size) {
399   int sv[2];
400   grpc_endpoint* ep;
401   struct write_socket_state state;
402   size_t num_blocks;
403   grpc_slice* slices;
404   uint8_t current_data = 0;
405   grpc_slice_buffer outgoing;
406   grpc_closure write_done_closure;
407   grpc_core::Timestamp deadline = grpc_core::Timestamp::FromTimespecRoundUp(
408       grpc_timeout_milliseconds_to_deadline(kDeadlineMillis));
409   grpc_core::ExecCtx exec_ctx;
410 
411   LOG(INFO) << "Start write test with " << num_bytes << " bytes, slice size "
412             << slice_size;
413 
414   create_sockets(sv);
415 
416   grpc_arg a[2];
417   a[0].key = const_cast<char*>(GRPC_ARG_TCP_READ_CHUNK_SIZE);
418   a[0].type = GRPC_ARG_INTEGER,
419   a[0].value.integer = static_cast<int>(slice_size);
420   a[1].key = const_cast<char*>(GRPC_ARG_RESOURCE_QUOTA);
421   a[1].type = GRPC_ARG_POINTER;
422   a[1].value.pointer.p = grpc_resource_quota_create("test");
423   a[1].value.pointer.vtable = grpc_resource_quota_arg_vtable();
424   grpc_channel_args args = {GPR_ARRAY_SIZE(a), a};
425   ep = grpc_tcp_create(
426       grpc_fd_create(sv[1], "write_test", false),
427       TcpOptionsFromEndpointConfig(
428           grpc_event_engine::experimental::ChannelArgsEndpointConfig(
429               grpc_core::ChannelArgs::FromC(&args))),
430       "test");
431   grpc_endpoint_add_to_pollset(ep, g_pollset);
432 
433   state.ep = ep;
434   state.write_done = 0;
435 
436   slices = allocate_blocks(num_bytes, slice_size, &num_blocks, &current_data);
437 
438   grpc_slice_buffer_init(&outgoing);
439   grpc_slice_buffer_addn(&outgoing, slices, num_blocks);
440   GRPC_CLOSURE_INIT(&write_done_closure, write_done, &state,
441                     grpc_schedule_on_exec_ctx);
442 
443   grpc_endpoint_write(ep, &outgoing, &write_done_closure, nullptr,
444                       /*max_frame_size=*/INT_MAX);
445   drain_socket_blocking(sv[0], num_bytes, num_bytes);
446   exec_ctx.Flush();
447   gpr_mu_lock(g_mu);
448   for (;;) {
449     grpc_pollset_worker* worker = nullptr;
450     if (state.write_done) {
451       break;
452     }
453     CHECK(GRPC_LOG_IF_ERROR("pollset_work",
454                             grpc_pollset_work(g_pollset, &worker, deadline)));
455     gpr_mu_unlock(g_mu);
456     exec_ctx.Flush();
457     gpr_mu_lock(g_mu);
458   }
459   gpr_mu_unlock(g_mu);
460 
461   grpc_slice_buffer_destroy(&outgoing);
462   grpc_endpoint_destroy(ep);
463   gpr_free(slices);
464   grpc_resource_quota_unref(
465       static_cast<grpc_resource_quota*>(a[1].value.pointer.p));
466 }
467 
468 struct release_fd_arg {
469   std::atomic<int> fd_released_done{0};
470   grpc_core::Notification notify;
471 };
472 
on_fd_released(void * arg,grpc_error_handle)473 void on_fd_released(void* arg, grpc_error_handle /*errors*/) {
474   release_fd_arg* rel_fd = static_cast<release_fd_arg*>(arg);
475   rel_fd->fd_released_done = 1;
476   rel_fd->notify.Notify();
477 }
478 
479 // Do a read_test, then release fd and try to read/write again. Verify that
480 // grpc_tcp_fd() is available before the fd is released.
release_fd_test(size_t num_bytes,size_t slice_size)481 static void release_fd_test(size_t num_bytes, size_t slice_size) {
482   int sv[2];
483   grpc_endpoint* ep;
484   struct read_socket_state state;
485   size_t written_bytes;
486   int fd;
487   grpc_core::Timestamp deadline = grpc_core::Timestamp::FromTimespecRoundUp(
488       grpc_timeout_milliseconds_to_deadline(kDeadlineMillis));
489   grpc_core::ExecCtx exec_ctx;
490   grpc_closure fd_released_cb;
491   release_fd_arg rel_fd;
492   GRPC_CLOSURE_INIT(&fd_released_cb, &on_fd_released, &rel_fd,
493                     grpc_schedule_on_exec_ctx);
494 
495   LOG(INFO) << "Release fd read_test of size " << num_bytes << ", slice size "
496             << slice_size;
497 
498   create_sockets(sv);
499 
500   grpc_arg a[2];
501   a[0].key = const_cast<char*>(GRPC_ARG_TCP_READ_CHUNK_SIZE);
502   a[0].type = GRPC_ARG_INTEGER;
503   a[0].value.integer = static_cast<int>(slice_size);
504   a[1].key = const_cast<char*>(GRPC_ARG_RESOURCE_QUOTA);
505   a[1].type = GRPC_ARG_POINTER;
506   a[1].value.pointer.p = grpc_resource_quota_create("test");
507   a[1].value.pointer.vtable = grpc_resource_quota_arg_vtable();
508   auto memory_quota = std::make_unique<grpc_core::MemoryQuota>("bar");
509   grpc_channel_args args = {GPR_ARRAY_SIZE(a), a};
510   if (grpc_event_engine::experimental::UseEventEngineListener()) {
511     // Create an event engine wrapped endpoint to test release_fd operations.
512     auto eeep =
513         reinterpret_cast<
514             grpc_event_engine::experimental::PosixEventEngineWithFdSupport*>(
515             grpc_event_engine::experimental::GetDefaultEventEngine().get())
516             ->CreatePosixEndpointFromFd(
517                 sv[1],
518                 grpc_event_engine::experimental::ChannelArgsEndpointConfig(
519                     grpc_core::ChannelArgs::FromC(&args)),
520                 memory_quota->CreateMemoryAllocator("test"));
521     ep = grpc_event_engine::experimental::grpc_event_engine_endpoint_create(
522         std::move(eeep));
523   } else {
524     ep = grpc_tcp_create(
525         grpc_fd_create(sv[1], "read_test", false),
526         TcpOptionsFromEndpointConfig(
527             grpc_event_engine::experimental::ChannelArgsEndpointConfig(
528                 grpc_core::ChannelArgs::FromC(&args))),
529         "test");
530     CHECK(grpc_tcp_fd(ep) == sv[1]);
531     CHECK_GE(sv[1], 0);
532   }
533   grpc_endpoint_add_to_pollset(ep, g_pollset);
534 
535   written_bytes = fill_socket_partial(sv[0], num_bytes);
536   LOG(INFO) << "Wrote " << written_bytes << " bytes";
537 
538   state.ep = ep;
539   state.read_bytes = 0;
540   state.target_read_bytes = written_bytes;
541   state.min_progress_size = 1;
542   grpc_slice_buffer_init(&state.incoming);
543   GRPC_CLOSURE_INIT(&state.read_cb, read_cb, &state, grpc_schedule_on_exec_ctx);
544 
545   grpc_endpoint_read(ep, &state.incoming, &state.read_cb, /*urgent=*/false,
546                      /*min_progress_size=*/state.min_progress_size);
547   grpc_core::ExecCtx::Get()->Flush();
548   gpr_mu_lock(g_mu);
549   while (state.read_bytes < state.target_read_bytes) {
550     grpc_pollset_worker* worker = nullptr;
551     CHECK(GRPC_LOG_IF_ERROR("pollset_work",
552                             grpc_pollset_work(g_pollset, &worker, deadline)));
553     VLOG(2) << "wakeup: read=" << state.read_bytes
554             << " target=" << state.target_read_bytes;
555     gpr_mu_unlock(g_mu);
556     grpc_core::ExecCtx::Get()->Flush();
557     gpr_mu_lock(g_mu);
558   }
559   CHECK(state.read_bytes == state.target_read_bytes);
560   gpr_mu_unlock(g_mu);
561 
562   grpc_slice_buffer_destroy(&state.incoming);
563   grpc_tcp_destroy_and_release_fd(ep, &fd, &fd_released_cb);
564   grpc_core::ExecCtx::Get()->Flush();
565   rel_fd.notify.WaitForNotificationWithTimeout(absl::Seconds(20));
566   CHECK_EQ(rel_fd.fd_released_done, 1);
567   CHECK(fd == sv[1]);
568   written_bytes = fill_socket_partial(sv[0], num_bytes);
569   drain_socket_blocking(fd, written_bytes, written_bytes);
570   written_bytes = fill_socket_partial(fd, num_bytes);
571   drain_socket_blocking(sv[0], written_bytes, written_bytes);
572   close(fd);
573   grpc_resource_quota_unref(
574       static_cast<grpc_resource_quota*>(a[1].value.pointer.p));
575 }
576 
run_tests(void)577 void run_tests(void) {
578   size_t i = 0;
579   for (int i = 1; i <= 8192; i = i * 2) {
580     read_test(100, 8192, i);
581     read_test(10000, 8192, i);
582     read_test(10000, 137, i);
583     read_test(10000, 1, i);
584     large_read_test(8192, i);
585     large_read_test(1, i);
586   }
587   write_test(100, 8192);
588   write_test(100, 1);
589   write_test(100000, 8192);
590   write_test(100000, 1);
591   write_test(100000, 137);
592 
593   for (i = 1; i < 1000; i = std::max(i + 1, i * 5 / 4)) {
594     write_test(40320, i);
595   }
596 
597   release_fd_test(100, 8192);
598 }
599 
clean_up(void)600 static void clean_up(void) {}
601 
create_fixture_tcp_socketpair(size_t slice_size)602 static grpc_endpoint_test_fixture create_fixture_tcp_socketpair(
603     size_t slice_size) {
604   int sv[2];
605   grpc_endpoint_test_fixture f;
606   grpc_core::ExecCtx exec_ctx;
607 
608   create_sockets(sv);
609   grpc_arg a[2];
610   a[0].key = const_cast<char*>(GRPC_ARG_TCP_READ_CHUNK_SIZE);
611   a[0].type = GRPC_ARG_INTEGER;
612   a[0].value.integer = static_cast<int>(slice_size);
613   a[1].key = const_cast<char*>(GRPC_ARG_RESOURCE_QUOTA);
614   a[1].type = GRPC_ARG_POINTER;
615   a[1].value.pointer.p = grpc_resource_quota_create("test");
616   a[1].value.pointer.vtable = grpc_resource_quota_arg_vtable();
617   grpc_channel_args args = {GPR_ARRAY_SIZE(a), a};
618   f.client_ep = grpc_tcp_create(
619       grpc_fd_create(sv[0], "fixture:client", false),
620       TcpOptionsFromEndpointConfig(
621           grpc_event_engine::experimental::ChannelArgsEndpointConfig(
622               grpc_core::ChannelArgs::FromC(&args))),
623       "test");
624   f.server_ep = grpc_tcp_create(
625       grpc_fd_create(sv[1], "fixture:server", false),
626       TcpOptionsFromEndpointConfig(
627           grpc_event_engine::experimental::ChannelArgsEndpointConfig(
628               grpc_core::ChannelArgs::FromC(&args))),
629       "test");
630   grpc_endpoint_add_to_pollset(f.client_ep, g_pollset);
631   grpc_endpoint_add_to_pollset(f.server_ep, g_pollset);
632   grpc_resource_quota_unref(
633       static_cast<grpc_resource_quota*>(a[1].value.pointer.p));
634 
635   return f;
636 }
637 
638 static grpc_endpoint_test_config configs[] = {
639     {"tcp/tcp_socketpair", create_fixture_tcp_socketpair, clean_up},
640 };
641 
destroy_pollset(void * p,grpc_error_handle)642 static void destroy_pollset(void* p, grpc_error_handle /*error*/) {
643   grpc_pollset_destroy(static_cast<grpc_pollset*>(p));
644 }
645 
main(int argc,char ** argv)646 int main(int argc, char** argv) {
647   grpc_closure destroyed;
648   ::testing::InitGoogleTest(&argc, argv);
649   grpc::testing::TestEnvironment env(&argc, argv);
650   grpc_init();
651   {
652     grpc_core::ExecCtx exec_ctx;
653     g_pollset = static_cast<grpc_pollset*>(gpr_zalloc(grpc_pollset_size()));
654     grpc_pollset_init(g_pollset, &g_mu);
655     grpc_endpoint_tests(configs[0], g_pollset, g_mu);
656     run_tests();
657     GRPC_CLOSURE_INIT(&destroyed, destroy_pollset, g_pollset,
658                       grpc_schedule_on_exec_ctx);
659     grpc_pollset_shutdown(g_pollset, &destroyed);
660 
661     grpc_core::ExecCtx::Get()->Flush();
662   }
663   grpc_shutdown();
664   gpr_free(g_pollset);
665 
666   return 0;
667 }
668 
669 #else  // GRPC_POSIX_SOCKET_TCP
670 
main(int argc,char ** argv)671 int main(int argc, char** argv) { return 1; }
672 
673 #endif  // GRPC_POSIX_SOCKET_TCP
674