1 //
2 //
3 // Copyright 2015 gRPC authors.
4 //
5 // Licensed under the Apache License, Version 2.0 (the "License");
6 // you may not use this file except in compliance with the License.
7 // You may obtain a copy of the License at
8 //
9 // http://www.apache.org/licenses/LICENSE-2.0
10 //
11 // Unless required by applicable law or agreed to in writing, software
12 // distributed under the License is distributed on an "AS IS" BASIS,
13 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 // See the License for the specific language governing permissions and
15 // limitations under the License.
16 //
17 //
18
19 #include "test/core/iomgr/endpoint_tests.h"
20
21 #include <grpc/slice.h>
22 #include <grpc/support/alloc.h>
23 #include <grpc/support/time.h>
24 #include <limits.h>
25 #include <stdbool.h>
26 #include <sys/types.h>
27
28 #include "absl/log/check.h"
29 #include "absl/log/log.h"
30 #include "src/core/lib/iomgr/error.h"
31 #include "src/core/lib/slice/slice_internal.h"
32 #include "src/core/util/crash.h"
33 #include "src/core/util/time.h"
34 #include "src/core/util/useful.h"
35 #include "test/core/test_util/test_config.h"
36
37 //
38 // General test notes:
39
40 // All tests which write data into an endpoint write i%256 into byte i, which
41 // is verified by readers.
42
43 // In general there are a few interesting things to vary which may lead to
44 // exercising different codepaths in an implementation:
45 // 1. Total amount of data written to the endpoint
46 // 2. Size of slice allocations
47 // 3. Amount of data we read from or write to the endpoint at once
48
49 // The tests here tend to parameterize these where applicable.
50
51 //
52
53 static gpr_mu* g_mu;
54 static grpc_pollset* g_pollset;
55
count_slices(grpc_slice * slices,size_t nslices,int * current_data)56 size_t count_slices(grpc_slice* slices, size_t nslices, int* current_data) {
57 size_t num_bytes = 0;
58 size_t i;
59 size_t j;
60 unsigned char* buf;
61 for (i = 0; i < nslices; ++i) {
62 buf = GRPC_SLICE_START_PTR(slices[i]);
63 for (j = 0; j < GRPC_SLICE_LENGTH(slices[i]); ++j) {
64 CHECK(buf[j] == *current_data);
65 *current_data = (*current_data + 1) % 256;
66 }
67 num_bytes += GRPC_SLICE_LENGTH(slices[i]);
68 }
69 return num_bytes;
70 }
71
begin_test(grpc_endpoint_test_config config,const char * test_name,size_t slice_size)72 static grpc_endpoint_test_fixture begin_test(grpc_endpoint_test_config config,
73 const char* test_name,
74 size_t slice_size) {
75 LOG(INFO) << test_name << "/" << config.name;
76 return config.create_fixture(slice_size);
77 }
78
end_test(grpc_endpoint_test_config config)79 static void end_test(grpc_endpoint_test_config config) { config.clean_up(); }
80
allocate_blocks(size_t num_bytes,size_t slice_size,size_t * num_blocks,uint8_t * current_data)81 static grpc_slice* allocate_blocks(size_t num_bytes, size_t slice_size,
82 size_t* num_blocks, uint8_t* current_data) {
83 size_t nslices = (num_bytes / slice_size) + (num_bytes % slice_size ? 1 : 0);
84 grpc_slice* slices =
85 static_cast<grpc_slice*>(gpr_malloc(sizeof(grpc_slice) * nslices));
86 size_t num_bytes_left = num_bytes;
87 size_t i;
88 size_t j;
89 unsigned char* buf;
90 *num_blocks = nslices;
91
92 for (i = 0; i < nslices; ++i) {
93 slices[i] = grpc_slice_malloc(slice_size > num_bytes_left ? num_bytes_left
94 : slice_size);
95 num_bytes_left -= GRPC_SLICE_LENGTH(slices[i]);
96 buf = GRPC_SLICE_START_PTR(slices[i]);
97 for (j = 0; j < GRPC_SLICE_LENGTH(slices[i]); ++j) {
98 buf[j] = *current_data;
99 (*current_data)++;
100 }
101 }
102 CHECK_EQ(num_bytes_left, 0u);
103 return slices;
104 }
105
106 struct read_and_write_test_state {
107 grpc_core::Mutex ep_mu; // Guards read_ep and write_ep.
108 grpc_endpoint* read_ep;
109 grpc_endpoint* write_ep;
110 size_t target_bytes;
111 size_t bytes_read;
112 size_t current_write_size;
113 size_t bytes_written;
114 int current_read_data;
115 uint8_t current_write_data;
116 int read_done;
117 int write_done;
118 int max_write_frame_size;
119 grpc_slice_buffer incoming;
120 grpc_slice_buffer outgoing;
121 grpc_closure done_read;
122 grpc_closure done_write;
123 grpc_closure read_scheduler;
124 grpc_closure write_scheduler;
125 };
126
read_scheduler(void * data,grpc_error_handle error)127 static void read_scheduler(void* data, grpc_error_handle error) {
128 struct read_and_write_test_state* state =
129 static_cast<struct read_and_write_test_state*>(data);
130 if (error.ok() && state->bytes_read < state->target_bytes) {
131 grpc_core::MutexLock lock(&state->ep_mu);
132 if (state->read_ep != nullptr) {
133 grpc_endpoint_read(state->read_ep, &state->incoming, &state->done_read,
134 /*urgent=*/false, /*min_progress_size=*/1);
135 return;
136 }
137 }
138 VLOG(2) << "Read handler done";
139 gpr_mu_lock(g_mu);
140 state->read_done = 1 + error.ok();
141 GRPC_LOG_IF_ERROR("pollset_kick", grpc_pollset_kick(g_pollset, nullptr));
142 gpr_mu_unlock(g_mu);
143 }
144
read_and_write_test_read_handler(void * data,grpc_error_handle error)145 static void read_and_write_test_read_handler(void* data,
146 grpc_error_handle error) {
147 struct read_and_write_test_state* state =
148 static_cast<struct read_and_write_test_state*>(data);
149 if (error.ok()) {
150 state->bytes_read +=
151 count_slices(state->incoming.slices, state->incoming.count,
152 &state->current_read_data);
153 }
154 // We perform many reads one after another. If grpc_endpoint_read and the
155 // read_handler are both run inline, we might end up growing the stack
156 // beyond the limit. Schedule the read on ExecCtx to avoid this.
157 grpc_core::ExecCtx::Run(DEBUG_LOCATION, &state->read_scheduler,
158 std::move(error));
159 }
160
write_scheduler(void * data,grpc_error_handle error)161 static void write_scheduler(void* data, grpc_error_handle error) {
162 struct read_and_write_test_state* state =
163 static_cast<struct read_and_write_test_state*>(data);
164 if (error.ok() && state->current_write_size != 0) {
165 grpc_core::MutexLock lock(&state->ep_mu);
166 if (state->write_ep != nullptr) {
167 grpc_endpoint_write(state->write_ep, &state->outgoing, &state->done_write,
168 nullptr,
169 /*max_frame_size=*/state->max_write_frame_size);
170 return;
171 }
172 }
173 VLOG(2) << "Write handler done";
174 gpr_mu_lock(g_mu);
175 state->write_done = 1 + error.ok();
176 GRPC_LOG_IF_ERROR("pollset_kick", grpc_pollset_kick(g_pollset, nullptr));
177 gpr_mu_unlock(g_mu);
178 }
179
read_and_write_test_write_handler(void * data,grpc_error_handle error)180 static void read_and_write_test_write_handler(void* data,
181 grpc_error_handle error) {
182 struct read_and_write_test_state* state =
183 static_cast<struct read_and_write_test_state*>(data);
184 if (error.ok()) {
185 state->bytes_written += state->current_write_size;
186 if (state->target_bytes - state->bytes_written <
187 state->current_write_size) {
188 state->current_write_size = state->target_bytes - state->bytes_written;
189 }
190 if (state->current_write_size != 0) {
191 size_t nslices;
192 grpc_slice* slices =
193 allocate_blocks(state->current_write_size, 8192, &nslices,
194 &state->current_write_data);
195 grpc_slice_buffer_reset_and_unref(&state->outgoing);
196 grpc_slice_buffer_addn(&state->outgoing, slices, nslices);
197 gpr_free(slices);
198 }
199 }
200 // We perform many writes one after another. If grpc_endpoint_write and
201 // the write_handler are both run inline, we might end up growing the
202 // stack beyond the limit. Schedule the write on ExecCtx to avoid this.
203 grpc_core::ExecCtx::Run(DEBUG_LOCATION, &state->write_scheduler,
204 std::move(error));
205 }
206
207 // Do both reading and writing using the grpc_endpoint API.
208
209 // This also includes a test of the shutdown behavior.
210 //
read_and_write_test(grpc_endpoint_test_config config,size_t num_bytes,size_t write_size,size_t slice_size,int max_write_frame_size,bool shutdown)211 static void read_and_write_test(grpc_endpoint_test_config config,
212 size_t num_bytes, size_t write_size,
213 size_t slice_size, int max_write_frame_size,
214 bool shutdown) {
215 struct read_and_write_test_state state;
216 grpc_endpoint_test_fixture f =
217 begin_test(config, "read_and_write_test", slice_size);
218 grpc_core::ExecCtx exec_ctx;
219 auto deadline = grpc_core::Timestamp::FromTimespecRoundUp(
220 grpc_timeout_seconds_to_deadline(300));
221 VLOG(2) << "num_bytes=" << num_bytes << " write_size=" << write_size
222 << " slice_size=" << slice_size << " shutdown=" << shutdown;
223
224 if (shutdown) {
225 LOG(INFO) << "Start read and write shutdown test";
226 } else {
227 LOG(INFO) << "Start read and write test with " << num_bytes
228 << " bytes, slice size " << slice_size;
229 }
230
231 state.read_ep = f.client_ep;
232 state.write_ep = f.server_ep;
233 state.target_bytes = num_bytes;
234 state.bytes_read = 0;
235 state.current_write_size = write_size;
236 state.max_write_frame_size = max_write_frame_size;
237 state.bytes_written = 0;
238 state.read_done = 0;
239 state.write_done = 0;
240 state.current_read_data = 0;
241 state.current_write_data = 0;
242 GRPC_CLOSURE_INIT(&state.read_scheduler, read_scheduler, &state,
243 grpc_schedule_on_exec_ctx);
244 GRPC_CLOSURE_INIT(&state.done_read, read_and_write_test_read_handler, &state,
245 grpc_schedule_on_exec_ctx);
246 GRPC_CLOSURE_INIT(&state.write_scheduler, write_scheduler, &state,
247 grpc_schedule_on_exec_ctx);
248 GRPC_CLOSURE_INIT(&state.done_write, read_and_write_test_write_handler,
249 &state, grpc_schedule_on_exec_ctx);
250 grpc_slice_buffer_init(&state.outgoing);
251 grpc_slice_buffer_init(&state.incoming);
252
253 // Get started by pretending an initial write completed
254 // NOTE: Sets up initial conditions so we can have the same write handler
255 // for the first iteration as for later iterations. It does the right thing
256 // even when bytes_written is unsigned.
257 state.bytes_written -= state.current_write_size;
258 read_and_write_test_write_handler(&state, absl::OkStatus());
259 grpc_core::ExecCtx::Get()->Flush();
260
261 grpc_endpoint_read(state.read_ep, &state.incoming, &state.done_read,
262 /*urgent=*/false, /*min_progress_size=*/1);
263 if (shutdown) {
264 grpc_core::MutexLock lock(&state.ep_mu);
265 VLOG(2) << "shutdown read";
266 grpc_endpoint_destroy(state.read_ep);
267 state.read_ep = nullptr;
268 VLOG(2) << "shutdown write";
269 grpc_endpoint_destroy(state.write_ep);
270 state.write_ep = nullptr;
271 }
272 grpc_core::ExecCtx::Get()->Flush();
273
274 gpr_mu_lock(g_mu);
275 while (!state.read_done || !state.write_done) {
276 grpc_pollset_worker* worker = nullptr;
277 CHECK(grpc_core::Timestamp::Now() < deadline);
278 CHECK(GRPC_LOG_IF_ERROR("pollset_work",
279 grpc_pollset_work(g_pollset, &worker, deadline)));
280 }
281 gpr_mu_unlock(g_mu);
282 grpc_core::ExecCtx::Get()->Flush();
283
284 end_test(config);
285 grpc_slice_buffer_destroy(&state.outgoing);
286 grpc_slice_buffer_destroy(&state.incoming);
287 if (!shutdown) {
288 grpc_endpoint_destroy(state.read_ep);
289 grpc_endpoint_destroy(state.write_ep);
290 }
291 }
292
inc_on_failure(void * arg,grpc_error_handle error)293 static void inc_on_failure(void* arg, grpc_error_handle error) {
294 gpr_mu_lock(g_mu);
295 *static_cast<int*>(arg) += (!error.ok());
296 CHECK(GRPC_LOG_IF_ERROR("kick", grpc_pollset_kick(g_pollset, nullptr)));
297 gpr_mu_unlock(g_mu);
298 }
299
wait_for_fail_count(int * fail_count,int want_fail_count)300 static void wait_for_fail_count(int* fail_count, int want_fail_count) {
301 grpc_core::ExecCtx::Get()->Flush();
302 gpr_mu_lock(g_mu);
303 grpc_core::Timestamp deadline = grpc_core::Timestamp::FromTimespecRoundUp(
304 grpc_timeout_seconds_to_deadline(10));
305 while (grpc_core::Timestamp::Now() < deadline &&
306 *fail_count < want_fail_count) {
307 grpc_pollset_worker* worker = nullptr;
308 CHECK(GRPC_LOG_IF_ERROR("pollset_work",
309 grpc_pollset_work(g_pollset, &worker, deadline)));
310 gpr_mu_unlock(g_mu);
311 grpc_core::ExecCtx::Get()->Flush();
312 gpr_mu_lock(g_mu);
313 }
314 CHECK(*fail_count == want_fail_count);
315 gpr_mu_unlock(g_mu);
316 }
317
grpc_endpoint_tests(grpc_endpoint_test_config config,grpc_pollset * pollset,gpr_mu * mu)318 void grpc_endpoint_tests(grpc_endpoint_test_config config,
319 grpc_pollset* pollset, gpr_mu* mu) {
320 size_t i;
321 g_pollset = pollset;
322 g_mu = mu;
323 for (int i = 1; i <= 10000; i = i * 10) {
324 read_and_write_test(config, 10000000, 100000, 8192, i, false);
325 read_and_write_test(config, 1000000, 100000, 1, i, false);
326 read_and_write_test(config, 100000000, 100000, 1, i, true);
327 }
328 for (i = 1; i < 1000; i = std::max(i + 1, i * 5 / 4)) {
329 read_and_write_test(config, 40320, i, i, i, false);
330 }
331 g_pollset = nullptr;
332 g_mu = nullptr;
333 }
334