1 /* SPDX-License-Identifier: MIT */
2 /*
3 * Description: Basic IO cancel test
4 */
5 #include <errno.h>
6 #include <stdio.h>
7 #include <unistd.h>
8 #include <stdlib.h>
9 #include <string.h>
10 #include <fcntl.h>
11 #include <sys/types.h>
12 #include <sys/time.h>
13 #include <sys/wait.h>
14 #include <poll.h>
15
16 #include "helpers.h"
17 #include "liburing.h"
18
19 #define FILE_SIZE (128 * 1024)
20 #define BS 4096
21 #define BUFFERS (FILE_SIZE / BS)
22
23 static struct iovec *vecs;
24
utime_since(const struct timeval * s,const struct timeval * e)25 static unsigned long long utime_since(const struct timeval *s,
26 const struct timeval *e)
27 {
28 long long sec, usec;
29
30 sec = e->tv_sec - s->tv_sec;
31 usec = (e->tv_usec - s->tv_usec);
32 if (sec > 0 && usec < 0) {
33 sec--;
34 usec += 1000000;
35 }
36
37 sec *= 1000000;
38 return sec + usec;
39 }
40
utime_since_now(struct timeval * tv)41 static unsigned long long utime_since_now(struct timeval *tv)
42 {
43 struct timeval end;
44
45 gettimeofday(&end, NULL);
46 return utime_since(tv, &end);
47 }
48
start_io(struct io_uring * ring,int fd,int do_write)49 static int start_io(struct io_uring *ring, int fd, int do_write)
50 {
51 struct io_uring_sqe *sqe;
52 int i, ret;
53
54 for (i = 0; i < BUFFERS; i++) {
55 off_t offset;
56
57 sqe = io_uring_get_sqe(ring);
58 if (!sqe) {
59 fprintf(stderr, "sqe get failed\n");
60 goto err;
61 }
62 offset = BS * (rand() % BUFFERS);
63 if (do_write) {
64 io_uring_prep_writev(sqe, fd, &vecs[i], 1, offset);
65 } else {
66 io_uring_prep_readv(sqe, fd, &vecs[i], 1, offset);
67 }
68 sqe->user_data = i + 1;
69 }
70
71 ret = io_uring_submit(ring);
72 if (ret != BUFFERS) {
73 fprintf(stderr, "submit got %d, wanted %d\n", ret, BUFFERS);
74 goto err;
75 }
76
77 return 0;
78 err:
79 return 1;
80 }
81
wait_io(struct io_uring * ring,unsigned nr_io,int do_partial)82 static int wait_io(struct io_uring *ring, unsigned nr_io, int do_partial)
83 {
84 struct io_uring_cqe *cqe;
85 int i, ret;
86
87 for (i = 0; i < nr_io; i++) {
88 ret = io_uring_wait_cqe(ring, &cqe);
89 if (ret) {
90 fprintf(stderr, "wait_cqe=%d\n", ret);
91 goto err;
92 }
93 if (do_partial && cqe->user_data) {
94 if (!(cqe->user_data & 1)) {
95 if (cqe->res != BS) {
96 fprintf(stderr, "IO %d wasn't cancelled but got error %d\n", (unsigned) cqe->user_data, cqe->res);
97 goto err;
98 }
99 }
100 }
101 io_uring_cqe_seen(ring, cqe);
102 }
103 return 0;
104 err:
105 return 1;
106
107 }
108
do_io(struct io_uring * ring,int fd,int do_write)109 static int do_io(struct io_uring *ring, int fd, int do_write)
110 {
111 if (start_io(ring, fd, do_write))
112 return 1;
113 if (wait_io(ring, BUFFERS, 0))
114 return 1;
115 return 0;
116 }
117
start_cancel(struct io_uring * ring,int do_partial,int async_cancel)118 static int start_cancel(struct io_uring *ring, int do_partial, int async_cancel)
119 {
120 struct io_uring_sqe *sqe;
121 int i, ret, submitted = 0;
122
123 for (i = 0; i < BUFFERS; i++) {
124 if (do_partial && (i & 1))
125 continue;
126 sqe = io_uring_get_sqe(ring);
127 if (!sqe) {
128 fprintf(stderr, "sqe get failed\n");
129 goto err;
130 }
131 io_uring_prep_cancel64(sqe, i + 1, 0);
132 if (async_cancel)
133 sqe->flags |= IOSQE_ASYNC;
134 sqe->user_data = 0;
135 submitted++;
136 }
137
138 ret = io_uring_submit(ring);
139 if (ret != submitted) {
140 fprintf(stderr, "submit got %d, wanted %d\n", ret, submitted);
141 goto err;
142 }
143 return 0;
144 err:
145 return 1;
146 }
147
148 /*
149 * Test cancels. If 'do_partial' is set, then we only attempt to cancel half of
150 * the submitted IO. This is done to verify that cancelling one piece of IO doesn't
151 * impact others.
152 */
test_io_cancel(const char * file,int do_write,int do_partial,int async_cancel)153 static int test_io_cancel(const char *file, int do_write, int do_partial,
154 int async_cancel)
155 {
156 struct io_uring ring;
157 struct timeval start_tv;
158 unsigned long usecs;
159 unsigned to_wait;
160 int fd, ret;
161
162 fd = open(file, O_RDWR | O_DIRECT);
163 if (fd < 0) {
164 perror("file open");
165 goto err;
166 }
167
168 ret = io_uring_queue_init(4 * BUFFERS, &ring, 0);
169 if (ret) {
170 fprintf(stderr, "ring create failed: %d\n", ret);
171 goto err;
172 }
173
174 if (do_io(&ring, fd, do_write))
175 goto err;
176 gettimeofday(&start_tv, NULL);
177 if (do_io(&ring, fd, do_write))
178 goto err;
179 usecs = utime_since_now(&start_tv);
180
181 if (start_io(&ring, fd, do_write))
182 goto err;
183 /* sleep for 1/3 of the total time, to allow some to start/complete */
184 usleep(usecs / 3);
185 if (start_cancel(&ring, do_partial, async_cancel))
186 goto err;
187 to_wait = BUFFERS;
188 if (do_partial)
189 to_wait += BUFFERS / 2;
190 else
191 to_wait += BUFFERS;
192 if (wait_io(&ring, to_wait, do_partial))
193 goto err;
194
195 io_uring_queue_exit(&ring);
196 close(fd);
197 return 0;
198 err:
199 if (fd != -1)
200 close(fd);
201 return 1;
202 }
203
test_dont_cancel_another_ring(void)204 static int test_dont_cancel_another_ring(void)
205 {
206 struct io_uring ring1, ring2;
207 struct io_uring_cqe *cqe;
208 struct io_uring_sqe *sqe;
209 char buffer[128];
210 int ret, fds[2];
211 struct __kernel_timespec ts = { .tv_sec = 0, .tv_nsec = 100000000, };
212
213 ret = io_uring_queue_init(8, &ring1, 0);
214 if (ret) {
215 fprintf(stderr, "ring create failed: %d\n", ret);
216 return 1;
217 }
218 ret = io_uring_queue_init(8, &ring2, 0);
219 if (ret) {
220 fprintf(stderr, "ring create failed: %d\n", ret);
221 return 1;
222 }
223 if (pipe(fds)) {
224 perror("pipe");
225 return 1;
226 }
227
228 sqe = io_uring_get_sqe(&ring1);
229 if (!sqe) {
230 fprintf(stderr, "%s: failed to get sqe\n", __FUNCTION__);
231 return 1;
232 }
233 io_uring_prep_read(sqe, fds[0], buffer, 10, 0);
234 sqe->flags |= IOSQE_ASYNC;
235 sqe->user_data = 1;
236
237 ret = io_uring_submit(&ring1);
238 if (ret != 1) {
239 fprintf(stderr, "%s: got %d, wanted 1\n", __FUNCTION__, ret);
240 return 1;
241 }
242
243 /* make sure it doesn't cancel requests of the other ctx */
244 sqe = io_uring_get_sqe(&ring2);
245 if (!sqe) {
246 fprintf(stderr, "%s: failed to get sqe\n", __FUNCTION__);
247 return 1;
248 }
249 io_uring_prep_cancel64(sqe, 1, 0);
250 sqe->user_data = 2;
251
252 ret = io_uring_submit(&ring2);
253 if (ret != 1) {
254 fprintf(stderr, "%s: got %d, wanted 1\n", __FUNCTION__, ret);
255 return 1;
256 }
257
258 ret = io_uring_wait_cqe(&ring2, &cqe);
259 if (ret) {
260 fprintf(stderr, "wait_cqe=%d\n", ret);
261 return 1;
262 }
263 if (cqe->user_data != 2 || cqe->res != -ENOENT) {
264 fprintf(stderr, "error: cqe %i: res=%i, but expected -ENOENT\n",
265 (int)cqe->user_data, (int)cqe->res);
266 return 1;
267 }
268 io_uring_cqe_seen(&ring2, cqe);
269
270 ret = io_uring_wait_cqe_timeout(&ring1, &cqe, &ts);
271 if (ret != -ETIME) {
272 fprintf(stderr, "read got cancelled or wait failed\n");
273 return 1;
274 }
275 io_uring_cqe_seen(&ring1, cqe);
276
277 close(fds[0]);
278 close(fds[1]);
279 io_uring_queue_exit(&ring1);
280 io_uring_queue_exit(&ring2);
281 return 0;
282 }
283
test_cancel_req_across_fork(void)284 static int test_cancel_req_across_fork(void)
285 {
286 struct io_uring ring;
287 struct io_uring_cqe *cqe;
288 struct io_uring_sqe *sqe;
289 char buffer[128];
290 int ret, i, fds[2];
291 pid_t p;
292
293 ret = io_uring_queue_init(8, &ring, 0);
294 if (ret) {
295 fprintf(stderr, "ring create failed: %d\n", ret);
296 return 1;
297 }
298 if (pipe(fds)) {
299 perror("pipe");
300 return 1;
301 }
302 sqe = io_uring_get_sqe(&ring);
303 if (!sqe) {
304 fprintf(stderr, "%s: failed to get sqe\n", __FUNCTION__);
305 return 1;
306 }
307 io_uring_prep_read(sqe, fds[0], buffer, 10, 0);
308 sqe->flags |= IOSQE_ASYNC;
309 sqe->user_data = 1;
310
311 ret = io_uring_submit(&ring);
312 if (ret != 1) {
313 fprintf(stderr, "%s: got %d, wanted 1\n", __FUNCTION__, ret);
314 return 1;
315 }
316
317 p = fork();
318 if (p == -1) {
319 fprintf(stderr, "fork() failed\n");
320 return 1;
321 }
322
323 if (p == 0) {
324 sqe = io_uring_get_sqe(&ring);
325 if (!sqe) {
326 fprintf(stderr, "%s: failed to get sqe\n", __FUNCTION__);
327 return 1;
328 }
329 io_uring_prep_cancel64(sqe, 1, 0);
330 sqe->user_data = 2;
331
332 ret = io_uring_submit(&ring);
333 if (ret != 1) {
334 fprintf(stderr, "%s: got %d, wanted 1\n", __FUNCTION__, ret);
335 return 1;
336 }
337
338 for (i = 0; i < 2; ++i) {
339 ret = io_uring_wait_cqe(&ring, &cqe);
340 if (ret) {
341 fprintf(stderr, "wait_cqe=%d\n", ret);
342 return 1;
343 }
344 switch (cqe->user_data) {
345 case 1:
346 if (cqe->res != -EINTR &&
347 cqe->res != -ECANCELED) {
348 fprintf(stderr, "%i %i\n", (int)cqe->user_data, cqe->res);
349 exit(1);
350 }
351 break;
352 case 2:
353 if (cqe->res != -EALREADY && cqe->res) {
354 fprintf(stderr, "%i %i\n", (int)cqe->user_data, cqe->res);
355 exit(1);
356 }
357 break;
358 default:
359 fprintf(stderr, "%i %i\n", (int)cqe->user_data, cqe->res);
360 exit(1);
361 }
362
363 io_uring_cqe_seen(&ring, cqe);
364 }
365 exit(0);
366 } else {
367 int wstatus;
368
369 if (waitpid(p, &wstatus, 0) == (pid_t)-1) {
370 perror("waitpid()");
371 return 1;
372 }
373 if (!WIFEXITED(wstatus) || WEXITSTATUS(wstatus)) {
374 fprintf(stderr, "child failed %i\n", WEXITSTATUS(wstatus));
375 return 1;
376 }
377 }
378
379 close(fds[0]);
380 close(fds[1]);
381 io_uring_queue_exit(&ring);
382 return 0;
383 }
384
test_cancel_inflight_exit(void)385 static int test_cancel_inflight_exit(void)
386 {
387 struct __kernel_timespec ts = { .tv_sec = 1, .tv_nsec = 0, };
388 struct io_uring ring;
389 struct io_uring_cqe *cqe;
390 struct io_uring_sqe *sqe;
391 int ret, i;
392 pid_t p;
393
394 ret = io_uring_queue_init(8, &ring, 0);
395 if (ret) {
396 fprintf(stderr, "ring create failed: %d\n", ret);
397 return 1;
398 }
399 p = fork();
400 if (p == -1) {
401 fprintf(stderr, "fork() failed\n");
402 return 1;
403 }
404
405 if (p == 0) {
406 sqe = io_uring_get_sqe(&ring);
407 io_uring_prep_poll_add(sqe, ring.ring_fd, POLLIN);
408 sqe->user_data = 1;
409 sqe->flags |= IOSQE_IO_LINK;
410
411 sqe = io_uring_get_sqe(&ring);
412 io_uring_prep_timeout(sqe, &ts, 0, 0);
413 sqe->user_data = 2;
414
415 sqe = io_uring_get_sqe(&ring);
416 io_uring_prep_timeout(sqe, &ts, 0, 0);
417 sqe->user_data = 3;
418
419 ret = io_uring_submit(&ring);
420 if (ret != 3) {
421 fprintf(stderr, "io_uring_submit() failed %s, ret %i\n", __FUNCTION__, ret);
422 exit(1);
423 }
424 exit(0);
425 } else {
426 int wstatus;
427
428 if (waitpid(p, &wstatus, 0) == (pid_t)-1) {
429 perror("waitpid()");
430 return 1;
431 }
432 if (!WIFEXITED(wstatus) || WEXITSTATUS(wstatus)) {
433 fprintf(stderr, "child failed %i\n", WEXITSTATUS(wstatus));
434 return 1;
435 }
436 }
437
438 for (i = 0; i < 3; ++i) {
439 ret = io_uring_wait_cqe(&ring, &cqe);
440 if (ret) {
441 fprintf(stderr, "wait_cqe=%d\n", ret);
442 return 1;
443 }
444 if ((cqe->user_data == 1 && cqe->res != -ECANCELED) ||
445 (cqe->user_data == 2 && cqe->res != -ECANCELED) ||
446 (cqe->user_data == 3 && cqe->res != -ETIME)) {
447 fprintf(stderr, "%i %i\n", (int)cqe->user_data, cqe->res);
448 return 1;
449 }
450 io_uring_cqe_seen(&ring, cqe);
451 }
452
453 io_uring_queue_exit(&ring);
454 return 0;
455 }
456
test_sqpoll_cancel_iowq_requests(void)457 static int test_sqpoll_cancel_iowq_requests(void)
458 {
459 struct io_uring ring;
460 struct io_uring_sqe *sqe;
461 int ret, fds[2];
462 char buffer[16];
463
464 ret = io_uring_queue_init(8, &ring, IORING_SETUP_SQPOLL);
465 if (ret) {
466 fprintf(stderr, "ring create failed: %d\n", ret);
467 return 1;
468 }
469 if (pipe(fds)) {
470 perror("pipe");
471 return 1;
472 }
473 /* pin both pipe ends via io-wq */
474 sqe = io_uring_get_sqe(&ring);
475 io_uring_prep_read(sqe, fds[0], buffer, 10, 0);
476 sqe->flags |= IOSQE_ASYNC | IOSQE_IO_LINK;
477 sqe->user_data = 1;
478
479 sqe = io_uring_get_sqe(&ring);
480 io_uring_prep_write(sqe, fds[1], buffer, 10, 0);
481 sqe->flags |= IOSQE_ASYNC;
482 sqe->user_data = 2;
483 ret = io_uring_submit(&ring);
484 if (ret != 2) {
485 fprintf(stderr, "%s: got %d, wanted 1\n", __FUNCTION__, ret);
486 return 1;
487 }
488
489 /* wait for sqpoll to kick in and submit before exit */
490 sleep(1);
491 io_uring_queue_exit(&ring);
492
493 /* close the write end, so if ring is cancelled properly read() fails*/
494 close(fds[1]);
495 ret = read(fds[0], buffer, 10);
496 close(fds[0]);
497 return 0;
498 }
499
main(int argc,char * argv[])500 int main(int argc, char *argv[])
501 {
502 const char *fname = ".io-cancel-test";
503 int i, ret;
504
505 if (argc > 1)
506 return 0;
507
508 if (test_dont_cancel_another_ring()) {
509 fprintf(stderr, "test_dont_cancel_another_ring() failed\n");
510 return 1;
511 }
512
513 if (test_cancel_req_across_fork()) {
514 fprintf(stderr, "test_cancel_req_across_fork() failed\n");
515 return 1;
516 }
517
518 if (test_cancel_inflight_exit()) {
519 fprintf(stderr, "test_cancel_inflight_exit() failed\n");
520 return 1;
521 }
522
523 if (test_sqpoll_cancel_iowq_requests()) {
524 fprintf(stderr, "test_sqpoll_cancel_iowq_requests() failed\n");
525 return 1;
526 }
527
528 t_create_file(fname, FILE_SIZE);
529
530 vecs = t_create_buffers(BUFFERS, BS);
531
532 for (i = 0; i < 8; i++) {
533 int write = (i & 1) != 0;
534 int partial = (i & 2) != 0;
535 int async = (i & 4) != 0;
536
537 ret = test_io_cancel(fname, write, partial, async);
538 if (ret) {
539 fprintf(stderr, "test_io_cancel %d %d %d failed\n",
540 write, partial, async);
541 goto err;
542 }
543 }
544
545 unlink(fname);
546 return 0;
547 err:
548 unlink(fname);
549 return 1;
550 }
551