1 /* SPDX-License-Identifier: MIT */
2 /*
3 * Description: generic tests for io_uring drain io
4 *
5 * The main idea is to randomly generate different type of sqe to
6 * challenge the drain logic. There are some restrictions for the
7 * generated sqes, details in io_uring maillist:
8 * https://lore.kernel.org/io-uring/39a49b4c-27c2-1035-b250-51daeccaab9b@linux.alibaba.com/
9 *
10 */
11 #include <errno.h>
12 #include <stdio.h>
13 #include <unistd.h>
14 #include <stdlib.h>
15 #include <string.h>
16 #include <time.h>
17 #include <poll.h>
18
19 #include "liburing.h"
20 #include "helpers.h"
21
22 enum {
23 multi,
24 single,
25 nop,
26 cancel,
27 op_last,
28 };
29
30 struct sqe_info {
31 __u8 op;
32 unsigned flags;
33 };
34
35 #define max_entry 50
36
37 /*
38 * sqe_flags: combination of sqe flags
39 * multi_sqes: record the user_data/index of all the multishot sqes
40 * cnt: how many entries there are in multi_sqes
41 * we can leverage multi_sqes array for cancelation: we randomly pick
42 * up an entry in multi_sqes when form a cancelation sqe.
43 * multi_cap: limitation of number of multishot sqes
44 */
45 static const unsigned sqe_flags[4] = {
46 0,
47 IOSQE_IO_LINK,
48 IOSQE_IO_DRAIN,
49 IOSQE_IO_LINK | IOSQE_IO_DRAIN
50 };
51 static int multi_sqes[max_entry], cnt = 0;
52 static int multi_cap = max_entry / 5;
53
write_pipe(int pipe,char * str)54 static int write_pipe(int pipe, char *str)
55 {
56 int ret;
57 do {
58 errno = 0;
59 ret = write(pipe, str, 3);
60 } while (ret == -1 && errno == EINTR);
61 return ret;
62 }
63
read_pipe(int pipe)64 static void read_pipe(int pipe)
65 {
66 char str[4] = {0};
67 int ret;
68
69 ret = read(pipe, &str, 3);
70 if (ret < 0)
71 perror("read");
72 }
73
trigger_event(struct io_uring * ring,int p[])74 static int trigger_event(struct io_uring *ring, int p[])
75 {
76 int ret;
77 if ((ret = write_pipe(p[1], "foo")) != 3) {
78 fprintf(stderr, "bad write return %d\n", ret);
79 return 1;
80 }
81 usleep(1000);
82 io_uring_get_events(ring);
83 read_pipe(p[0]);
84 return 0;
85 }
86
io_uring_sqe_prep(int op,struct io_uring_sqe * sqe,unsigned sqe_flags,int arg)87 static void io_uring_sqe_prep(int op, struct io_uring_sqe *sqe,
88 unsigned sqe_flags, int arg)
89 {
90 switch (op) {
91 case multi:
92 io_uring_prep_poll_add(sqe, arg, POLLIN);
93 sqe->len |= IORING_POLL_ADD_MULTI;
94 break;
95 case single:
96 io_uring_prep_poll_add(sqe, arg, POLLIN);
97 break;
98 case nop:
99 io_uring_prep_nop(sqe);
100 break;
101 case cancel:
102 io_uring_prep_poll_remove(sqe, arg);
103 break;
104 }
105 sqe->flags = sqe_flags;
106 }
107
generate_flags(int sqe_op)108 static __u8 generate_flags(int sqe_op)
109 {
110 __u8 flags = 0;
111 /*
112 * drain sqe must be put after multishot sqes canceled
113 */
114 do {
115 flags = sqe_flags[rand() % 4];
116 } while ((flags & IOSQE_IO_DRAIN) && cnt);
117
118 /*
119 * cancel req cannot have drain or link flag
120 */
121 if (sqe_op == cancel) {
122 flags &= ~(IOSQE_IO_DRAIN | IOSQE_IO_LINK);
123 }
124 /*
125 * avoid below case:
126 * sqe0(multishot, link)->sqe1(nop, link)->sqe2(nop)->sqe3(cancel_sqe0)
127 * sqe3 may execute before sqe0 so that sqe0 isn't canceled
128 */
129 if (sqe_op == multi)
130 flags &= ~IOSQE_IO_LINK;
131
132 return flags;
133
134 }
135
136 /*
137 * function to generate opcode of a sqe
138 * several restrictions here:
139 * - cancel all the previous multishot sqes as soon as possible when
140 * we reach high watermark.
141 * - ensure there is some multishot sqe when generating a cancel sqe
142 * - ensure a cancel/multshot sqe is not in a linkchain
143 * - ensure number of multishot sqes doesn't exceed multi_cap
144 * - don't generate multishot sqes after high watermark
145 */
generate_opcode(int i,int pre_flags)146 static int generate_opcode(int i, int pre_flags)
147 {
148 int sqe_op;
149 int high_watermark = max_entry - max_entry / 5;
150 bool retry0 = false, retry1 = false, retry2 = false;
151
152 if ((i >= high_watermark) && cnt) {
153 sqe_op = cancel;
154 } else {
155 do {
156 sqe_op = rand() % op_last;
157 retry0 = (sqe_op == cancel) && (!cnt || (pre_flags & IOSQE_IO_LINK));
158 retry1 = (sqe_op == multi) && ((multi_cap - 1 < 0) || i >= high_watermark);
159 retry2 = (sqe_op == multi) && (pre_flags & IOSQE_IO_LINK);
160 } while (retry0 || retry1 || retry2);
161 }
162
163 if (sqe_op == multi)
164 multi_cap--;
165 return sqe_op;
166 }
167
add_multishot_sqe(int index)168 static inline void add_multishot_sqe(int index)
169 {
170 multi_sqes[cnt++] = index;
171 }
172
remove_multishot_sqe(void)173 static int remove_multishot_sqe(void)
174 {
175 int ret;
176
177 int rem_index = rand() % cnt;
178 ret = multi_sqes[rem_index];
179 multi_sqes[rem_index] = multi_sqes[cnt - 1];
180 cnt--;
181
182 return ret;
183 }
184
test_generic_drain(struct io_uring * ring)185 static int test_generic_drain(struct io_uring *ring)
186 {
187 struct io_uring_cqe *cqe;
188 struct io_uring_sqe *sqe[max_entry];
189 struct sqe_info si[max_entry];
190 int cqe_data[max_entry << 1], cqe_res[max_entry << 1];
191 int i, j, ret, arg = 0;
192 int pipes[max_entry][2];
193 int pre_flags = 0;
194
195 for (i = 0; i < max_entry; i++) {
196 if (pipe(pipes[i]) != 0) {
197 perror("pipe");
198 return 1;
199 }
200 }
201
202 srand((unsigned)time(NULL));
203 for (i = 0; i < max_entry; i++) {
204 sqe[i] = io_uring_get_sqe(ring);
205 if (!sqe[i]) {
206 printf("get sqe failed\n");
207 goto err;
208 }
209
210 int sqe_op = generate_opcode(i, pre_flags);
211 __u8 flags = generate_flags(sqe_op);
212
213 if (sqe_op == cancel)
214 arg = remove_multishot_sqe();
215 if (sqe_op == multi || sqe_op == single)
216 arg = pipes[i][0];
217 io_uring_sqe_prep(sqe_op, sqe[i], flags, arg);
218 sqe[i]->user_data = i;
219 si[i].op = sqe_op;
220 si[i].flags = flags;
221 pre_flags = flags;
222 if (sqe_op == multi)
223 add_multishot_sqe(i);
224 }
225
226 ret = io_uring_submit(ring);
227 if (ret < 0) {
228 printf("sqe submit failed\n");
229 goto err;
230 } else if (ret < max_entry) {
231 printf("Submitted only %d\n", ret);
232 goto err;
233 }
234
235 sleep(1);
236 // TODO: randomize event triggering order
237 for (i = 0; i < max_entry; i++) {
238 if (si[i].op != multi && si[i].op != single)
239 continue;
240
241 if (trigger_event(ring, pipes[i]))
242 goto err;
243 }
244 sleep(1);
245 i = 0;
246 while (!io_uring_peek_cqe(ring, &cqe)) {
247 cqe_data[i] = cqe->user_data;
248 cqe_res[i++] = cqe->res;
249 io_uring_cqe_seen(ring, cqe);
250 }
251
252 /*
253 * compl_bits is a bit map to record completions.
254 * eg. sqe[0], sqe[1], sqe[2] fully completed
255 * then compl_bits is 000...00111b
256 *
257 */
258 unsigned long long compl_bits = 0;
259 for (j = 0; j < i; j++) {
260 int index = cqe_data[j];
261 if ((si[index].flags & IOSQE_IO_DRAIN) && index) {
262 if ((~compl_bits) & ((1ULL << index) - 1)) {
263 printf("drain failed\n");
264 goto err;
265 }
266 }
267 /*
268 * for multishot sqes, record them only when it is canceled
269 */
270 if ((si[index].op != multi) || (cqe_res[j] == -ECANCELED))
271 compl_bits |= (1ULL << index);
272 }
273
274 return 0;
275 err:
276 return 1;
277 }
278
test_simple_drain(struct io_uring * ring)279 static int test_simple_drain(struct io_uring *ring)
280 {
281 struct io_uring_cqe *cqe;
282 struct io_uring_sqe *sqe[2];
283 int i, ret;
284 int pipe1[2], pipe2[2];
285
286 if (pipe(pipe1) != 0 || pipe(pipe2) != 0) {
287 perror("pipe");
288 return 1;
289 }
290
291 for (i = 0; i < 2; i++) {
292 sqe[i] = io_uring_get_sqe(ring);
293 if (!sqe[i]) {
294 printf("get sqe failed\n");
295 goto err;
296 }
297 }
298
299 io_uring_prep_poll_multishot(sqe[0], pipe1[0], POLLIN);
300 sqe[0]->user_data = 0;
301
302 io_uring_prep_poll_add(sqe[1], pipe2[0], POLLIN);
303 sqe[1]->user_data = 1;
304
305 /* This test relies on multishot poll to trigger events continually.
306 * however with IORING_SETUP_DEFER_TASKRUN this will only happen when
307 * triggered with a get_events. Hence we sprinkle get_events whenever
308 * there might be work to process in order to get the same result
309 */
310 ret = io_uring_submit_and_get_events(ring);
311 if (ret < 0) {
312 printf("sqe submit failed\n");
313 goto err;
314 } else if (ret < 2) {
315 printf("Submitted only %d\n", ret);
316 goto err;
317 }
318
319 for (i = 0; i < 2; i++) {
320 if (trigger_event(ring, pipe1))
321 goto err;
322 }
323 if (trigger_event(ring, pipe2))
324 goto err;
325
326 for (i = 0; i < 2; i++) {
327 sqe[i] = io_uring_get_sqe(ring);
328 if (!sqe[i]) {
329 printf("get sqe failed\n");
330 goto err;
331 }
332 }
333
334 io_uring_prep_poll_remove(sqe[0], 0);
335 sqe[0]->user_data = 2;
336
337 io_uring_prep_nop(sqe[1]);
338 sqe[1]->flags |= IOSQE_IO_DRAIN;
339 sqe[1]->user_data = 3;
340
341 ret = io_uring_submit(ring);
342 if (ret < 0) {
343 printf("sqe submit failed\n");
344 goto err;
345 } else if (ret < 2) {
346 printf("Submitted only %d\n", ret);
347 goto err;
348 }
349
350 for (i = 0; i < 6; i++) {
351 ret = io_uring_wait_cqe(ring, &cqe);
352 if (ret < 0) {
353 printf("wait completion %d\n", ret);
354 goto err;
355 }
356 if ((i == 5) && (cqe->user_data != 3))
357 goto err;
358 io_uring_cqe_seen(ring, cqe);
359 }
360
361 close(pipe1[0]);
362 close(pipe1[1]);
363 close(pipe2[0]);
364 close(pipe2[1]);
365 return 0;
366 err:
367 return 1;
368 }
369
test(bool defer_taskrun)370 static int test(bool defer_taskrun)
371 {
372 struct io_uring ring;
373 int i, ret;
374 unsigned int flags = 0;
375
376 if (defer_taskrun)
377 flags = IORING_SETUP_SINGLE_ISSUER |
378 IORING_SETUP_DEFER_TASKRUN;
379
380 ret = io_uring_queue_init(1024, &ring, flags);
381 if (ret) {
382 printf("ring setup failed\n");
383 return T_EXIT_FAIL;
384 }
385
386 for (i = 0; i < 5; i++) {
387 ret = test_simple_drain(&ring);
388 if (ret) {
389 fprintf(stderr, "test_simple_drain failed\n");
390 return T_EXIT_FAIL;
391 }
392 }
393
394 for (i = 0; i < 5; i++) {
395 ret = test_generic_drain(&ring);
396 if (ret) {
397 fprintf(stderr, "test_generic_drain failed\n");
398 return T_EXIT_FAIL;
399 }
400 }
401
402 io_uring_queue_exit(&ring);
403
404 return T_EXIT_PASS;
405 }
406
main(int argc,char * argv[])407 int main(int argc, char *argv[])
408 {
409 int ret;
410
411 if (argc > 1)
412 return T_EXIT_SKIP;
413
414 ret = test(false);
415 if (ret != T_EXIT_PASS) {
416 fprintf(stderr, "%s: test(false) failed\n", argv[0]);
417 return ret;
418 }
419
420 if (t_probe_defer_taskrun()) {
421 ret = test(true);
422 if (ret != T_EXIT_PASS) {
423 fprintf(stderr, "%s: test(true) failed\n", argv[0]);
424 return ret;
425 }
426 }
427
428 return ret;
429 }
430