• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Description: Basic IO cancel test
4  */
5 #include <errno.h>
6 #include <stdio.h>
7 #include <unistd.h>
8 #include <stdlib.h>
9 #include <string.h>
10 #include <fcntl.h>
11 #include <sys/types.h>
12 #include <sys/time.h>
13 #include <sys/wait.h>
14 #include <poll.h>
15 
16 #include "helpers.h"
17 #include "liburing.h"
18 
19 #define FILE_SIZE	(128 * 1024)
20 #define BS		4096
21 #define BUFFERS		(FILE_SIZE / BS)
22 
23 static struct iovec *vecs;
24 
utime_since(const struct timeval * s,const struct timeval * e)25 static unsigned long long utime_since(const struct timeval *s,
26 				      const struct timeval *e)
27 {
28 	long long sec, usec;
29 
30 	sec = e->tv_sec - s->tv_sec;
31 	usec = (e->tv_usec - s->tv_usec);
32 	if (sec > 0 && usec < 0) {
33 		sec--;
34 		usec += 1000000;
35 	}
36 
37 	sec *= 1000000;
38 	return sec + usec;
39 }
40 
utime_since_now(struct timeval * tv)41 static unsigned long long utime_since_now(struct timeval *tv)
42 {
43 	struct timeval end;
44 
45 	gettimeofday(&end, NULL);
46 	return utime_since(tv, &end);
47 }
48 
start_io(struct io_uring * ring,int fd,int do_write)49 static int start_io(struct io_uring *ring, int fd, int do_write)
50 {
51 	struct io_uring_sqe *sqe;
52 	int i, ret;
53 
54 	for (i = 0; i < BUFFERS; i++) {
55 		off_t offset;
56 
57 		sqe = io_uring_get_sqe(ring);
58 		if (!sqe) {
59 			fprintf(stderr, "sqe get failed\n");
60 			goto err;
61 		}
62 		offset = BS * (rand() % BUFFERS);
63 		if (do_write) {
64 			io_uring_prep_writev(sqe, fd, &vecs[i], 1, offset);
65 		} else {
66 			io_uring_prep_readv(sqe, fd, &vecs[i], 1, offset);
67 		}
68 		sqe->user_data = i + 1;
69 	}
70 
71 	ret = io_uring_submit(ring);
72 	if (ret != BUFFERS) {
73 		fprintf(stderr, "submit got %d, wanted %d\n", ret, BUFFERS);
74 		goto err;
75 	}
76 
77 	return 0;
78 err:
79 	return 1;
80 }
81 
wait_io(struct io_uring * ring,unsigned nr_io,int do_partial)82 static int wait_io(struct io_uring *ring, unsigned nr_io, int do_partial)
83 {
84 	struct io_uring_cqe *cqe;
85 	int i, ret;
86 
87 	for (i = 0; i < nr_io; i++) {
88 		ret = io_uring_wait_cqe(ring, &cqe);
89 		if (ret) {
90 			fprintf(stderr, "wait_cqe=%d\n", ret);
91 			goto err;
92 		}
93 		if (do_partial && cqe->user_data) {
94 			if (!(cqe->user_data & 1)) {
95 				if (cqe->res != BS) {
96 					fprintf(stderr, "IO %d wasn't canceled but got error %d\n", (unsigned) cqe->user_data, cqe->res);
97 					goto err;
98 				}
99 			}
100 		}
101 		io_uring_cqe_seen(ring, cqe);
102 	}
103 	return 0;
104 err:
105 	return 1;
106 
107 }
108 
do_io(struct io_uring * ring,int fd,int do_write)109 static int do_io(struct io_uring *ring, int fd, int do_write)
110 {
111 	if (start_io(ring, fd, do_write))
112 		return 1;
113 	if (wait_io(ring, BUFFERS, 0))
114 		return 1;
115 	return 0;
116 }
117 
start_cancel(struct io_uring * ring,int do_partial,int async_cancel)118 static int start_cancel(struct io_uring *ring, int do_partial, int async_cancel)
119 {
120 	struct io_uring_sqe *sqe;
121 	int i, ret, submitted = 0;
122 
123 	for (i = 0; i < BUFFERS; i++) {
124 		if (do_partial && (i & 1))
125 			continue;
126 		sqe = io_uring_get_sqe(ring);
127 		if (!sqe) {
128 			fprintf(stderr, "sqe get failed\n");
129 			goto err;
130 		}
131 		io_uring_prep_cancel64(sqe, i + 1, 0);
132 		if (async_cancel)
133 			sqe->flags |= IOSQE_ASYNC;
134 		sqe->user_data = 0;
135 		submitted++;
136 	}
137 
138 	ret = io_uring_submit(ring);
139 	if (ret != submitted) {
140 		fprintf(stderr, "submit got %d, wanted %d\n", ret, submitted);
141 		goto err;
142 	}
143 	return 0;
144 err:
145 	return 1;
146 }
147 
148 /*
149  * Test cancels. If 'do_partial' is set, then we only attempt to cancel half of
150  * the submitted IO. This is done to verify that canceling one piece of IO doesn't
151  * impact others.
152  */
test_io_cancel(const char * file,int do_write,int do_partial,int async_cancel)153 static int test_io_cancel(const char *file, int do_write, int do_partial,
154 			  int async_cancel)
155 {
156 	struct io_uring ring;
157 	struct timeval start_tv;
158 	unsigned long usecs;
159 	unsigned to_wait;
160 	int fd, ret;
161 
162 	fd = open(file, O_RDWR | O_DIRECT);
163 	if (fd < 0) {
164 		if (errno == EINVAL)
165 			return T_EXIT_SKIP;
166 		perror("file open");
167 		goto err;
168 	}
169 
170 	ret = io_uring_queue_init(4 * BUFFERS, &ring, 0);
171 	if (ret) {
172 		fprintf(stderr, "ring create failed: %d\n", ret);
173 		goto err;
174 	}
175 
176 	if (do_io(&ring, fd, do_write))
177 		goto err;
178 	gettimeofday(&start_tv, NULL);
179 	if (do_io(&ring, fd, do_write))
180 		goto err;
181 	usecs = utime_since_now(&start_tv);
182 
183 	if (start_io(&ring, fd, do_write))
184 		goto err;
185 	/* sleep for 1/3 of the total time, to allow some to start/complete */
186 	usleep(usecs / 3);
187 	if (start_cancel(&ring, do_partial, async_cancel))
188 		goto err;
189 	to_wait = BUFFERS;
190 	if (do_partial)
191 		to_wait += BUFFERS / 2;
192 	else
193 		to_wait += BUFFERS;
194 	if (wait_io(&ring, to_wait, do_partial))
195 		goto err;
196 
197 	io_uring_queue_exit(&ring);
198 	close(fd);
199 	return 0;
200 err:
201 	if (fd != -1)
202 		close(fd);
203 	return 1;
204 }
205 
test_dont_cancel_another_ring(void)206 static int test_dont_cancel_another_ring(void)
207 {
208 	struct io_uring ring1, ring2;
209 	struct io_uring_cqe *cqe;
210 	struct io_uring_sqe *sqe;
211 	char buffer[128];
212 	int ret, fds[2];
213 	struct __kernel_timespec ts = { .tv_sec = 0, .tv_nsec = 100000000, };
214 
215 	ret = io_uring_queue_init(8, &ring1, 0);
216 	if (ret) {
217 		fprintf(stderr, "ring create failed: %d\n", ret);
218 		return 1;
219 	}
220 	ret = io_uring_queue_init(8, &ring2, 0);
221 	if (ret) {
222 		fprintf(stderr, "ring create failed: %d\n", ret);
223 		return 1;
224 	}
225 	if (pipe(fds)) {
226 		perror("pipe");
227 		return 1;
228 	}
229 
230 	sqe = io_uring_get_sqe(&ring1);
231 	if (!sqe) {
232 		fprintf(stderr, "%s: failed to get sqe\n", __FUNCTION__);
233 		return 1;
234 	}
235 	io_uring_prep_read(sqe, fds[0], buffer, 10, 0);
236 	sqe->flags |= IOSQE_ASYNC;
237 	sqe->user_data = 1;
238 
239 	ret = io_uring_submit(&ring1);
240 	if (ret != 1) {
241 		fprintf(stderr, "%s: got %d, wanted 1\n", __FUNCTION__, ret);
242 		return 1;
243 	}
244 
245 	/* make sure it doesn't cancel requests of the other ctx */
246 	sqe = io_uring_get_sqe(&ring2);
247 	if (!sqe) {
248 		fprintf(stderr, "%s: failed to get sqe\n", __FUNCTION__);
249 		return 1;
250 	}
251 	io_uring_prep_cancel64(sqe, 1, 0);
252 	sqe->user_data = 2;
253 
254 	ret = io_uring_submit(&ring2);
255 	if (ret != 1) {
256 		fprintf(stderr, "%s: got %d, wanted 1\n", __FUNCTION__, ret);
257 		return 1;
258 	}
259 
260 	ret = io_uring_wait_cqe(&ring2, &cqe);
261 	if (ret) {
262 		fprintf(stderr, "wait_cqe=%d\n", ret);
263 		return 1;
264 	}
265 	if (cqe->user_data != 2 || cqe->res != -ENOENT) {
266 		fprintf(stderr, "error: cqe %i: res=%i, but expected -ENOENT\n",
267 			(int)cqe->user_data, (int)cqe->res);
268 		return 1;
269 	}
270 	io_uring_cqe_seen(&ring2, cqe);
271 
272 	ret = io_uring_wait_cqe_timeout(&ring1, &cqe, &ts);
273 	if (ret != -ETIME) {
274 		fprintf(stderr, "read got canceled or wait failed\n");
275 		return 1;
276 	}
277 	io_uring_cqe_seen(&ring1, cqe);
278 
279 	close(fds[0]);
280 	close(fds[1]);
281 	io_uring_queue_exit(&ring1);
282 	io_uring_queue_exit(&ring2);
283 	return 0;
284 }
285 
test_cancel_req_across_fork(void)286 static int test_cancel_req_across_fork(void)
287 {
288 	struct io_uring ring;
289 	struct io_uring_cqe *cqe;
290 	struct io_uring_sqe *sqe;
291 	char buffer[128];
292 	int ret, i, fds[2];
293 	pid_t p;
294 
295 	ret = io_uring_queue_init(8, &ring, 0);
296 	if (ret) {
297 		fprintf(stderr, "ring create failed: %d\n", ret);
298 		return 1;
299 	}
300 	if (pipe(fds)) {
301 		perror("pipe");
302 		return 1;
303 	}
304 	sqe = io_uring_get_sqe(&ring);
305 	if (!sqe) {
306 		fprintf(stderr, "%s: failed to get sqe\n", __FUNCTION__);
307 		return 1;
308 	}
309 	io_uring_prep_read(sqe, fds[0], buffer, 10, 0);
310 	sqe->flags |= IOSQE_ASYNC;
311 	sqe->user_data = 1;
312 
313 	ret = io_uring_submit(&ring);
314 	if (ret != 1) {
315 		fprintf(stderr, "%s: got %d, wanted 1\n", __FUNCTION__, ret);
316 		return 1;
317 	}
318 
319 	p = fork();
320 	if (p == -1) {
321 		fprintf(stderr, "fork() failed\n");
322 		return 1;
323 	}
324 
325 	if (p == 0) {
326 		sqe = io_uring_get_sqe(&ring);
327 		if (!sqe) {
328 			fprintf(stderr, "%s: failed to get sqe\n", __FUNCTION__);
329 			return 1;
330 		}
331 		io_uring_prep_cancel64(sqe, 1, 0);
332 		sqe->user_data = 2;
333 
334 		ret = io_uring_submit(&ring);
335 		if (ret != 1) {
336 			fprintf(stderr, "%s: got %d, wanted 1\n", __FUNCTION__, ret);
337 			return 1;
338 		}
339 
340 		for (i = 0; i < 2; ++i) {
341 			ret = io_uring_wait_cqe(&ring, &cqe);
342 			if (ret) {
343 				fprintf(stderr, "wait_cqe=%d\n", ret);
344 				return 1;
345 			}
346 			switch (cqe->user_data) {
347 			case 1:
348 				if (cqe->res != -EINTR &&
349 				    cqe->res != -ECANCELED) {
350 					fprintf(stderr, "user_data %i res %i\n",
351 						(unsigned)cqe->user_data, cqe->res);
352 					exit(1);
353 				}
354 				break;
355 			case 2:
356 				if (cqe->res != -EALREADY && cqe->res) {
357 					fprintf(stderr, "user_data %i res %i\n",
358 						(unsigned)cqe->user_data, cqe->res);
359 					exit(1);
360 				}
361 				break;
362 			default:
363 				fprintf(stderr, "user_data %i res %i\n",
364 					(unsigned)cqe->user_data, cqe->res);
365 				exit(1);
366 			}
367 
368 			io_uring_cqe_seen(&ring, cqe);
369 		}
370 		exit(0);
371 	} else {
372 		int wstatus;
373 		pid_t childpid;
374 
375 		do {
376 			childpid = waitpid(p, &wstatus, 0);
377 		} while (childpid == (pid_t)-1 && errno == EINTR);
378 
379 		if (childpid == (pid_t)-1) {
380 			perror("waitpid()");
381 			return 1;
382 		}
383 		if (!WIFEXITED(wstatus) || WEXITSTATUS(wstatus)) {
384 			fprintf(stderr, "child failed %i\n", WEXITSTATUS(wstatus));
385 			return 1;
386 		}
387 	}
388 
389 	close(fds[0]);
390 	close(fds[1]);
391 	io_uring_queue_exit(&ring);
392 	return 0;
393 }
394 
test_cancel_inflight_exit(void)395 static int test_cancel_inflight_exit(void)
396 {
397 	struct __kernel_timespec ts = { .tv_sec = 1, .tv_nsec = 0, };
398 	struct io_uring ring;
399 	struct io_uring_cqe *cqe;
400 	struct io_uring_sqe *sqe;
401 	int ret, i;
402 	pid_t p;
403 
404 	ret = io_uring_queue_init(8, &ring, 0);
405 	if (ret) {
406 		fprintf(stderr, "ring create failed: %d\n", ret);
407 		return 1;
408 	}
409 	p = fork();
410 	if (p == -1) {
411 		fprintf(stderr, "fork() failed\n");
412 		return 1;
413 	}
414 
415 	if (p == 0) {
416 		sqe = io_uring_get_sqe(&ring);
417 		io_uring_prep_poll_add(sqe, ring.ring_fd, POLLIN);
418 		sqe->user_data = 1;
419 		sqe->flags |= IOSQE_IO_LINK;
420 
421 		sqe = io_uring_get_sqe(&ring);
422 		io_uring_prep_timeout(sqe, &ts, 0, 0);
423 		sqe->user_data = 2;
424 
425 		sqe = io_uring_get_sqe(&ring);
426 		io_uring_prep_timeout(sqe, &ts, 0, 0);
427 		sqe->user_data = 3;
428 
429 		ret = io_uring_submit(&ring);
430 		if (ret != 3) {
431 			fprintf(stderr, "io_uring_submit() failed %s, ret %i\n", __FUNCTION__, ret);
432 			exit(1);
433 		}
434 		exit(0);
435 	} else {
436 		int wstatus;
437 
438 		if (waitpid(p, &wstatus, 0) == (pid_t)-1) {
439 			perror("waitpid()");
440 			return 1;
441 		}
442 		if (!WIFEXITED(wstatus) || WEXITSTATUS(wstatus)) {
443 			fprintf(stderr, "child failed %i\n", WEXITSTATUS(wstatus));
444 			return 1;
445 		}
446 	}
447 
448 	for (i = 0; i < 3; ++i) {
449 		ret = io_uring_wait_cqe(&ring, &cqe);
450 		if (ret) {
451 			fprintf(stderr, "wait_cqe=%d\n", ret);
452 			return 1;
453 		}
454 		if ((cqe->user_data == 1 && cqe->res != -ECANCELED) ||
455 		    (cqe->user_data == 2 && cqe->res != -ECANCELED) ||
456 		    (cqe->user_data == 3 && cqe->res != -ETIME)) {
457 			fprintf(stderr, "user_data %i res %i\n",
458 				(unsigned)cqe->user_data, cqe->res);
459 			return 1;
460 		}
461 		io_uring_cqe_seen(&ring, cqe);
462 	}
463 
464 	io_uring_queue_exit(&ring);
465 	return 0;
466 }
467 
test_sqpoll_cancel_iowq_requests(void)468 static int test_sqpoll_cancel_iowq_requests(void)
469 {
470 	struct io_uring ring;
471 	struct io_uring_sqe *sqe;
472 	int ret, fds[2];
473 	char buffer[16];
474 
475 	ret = io_uring_queue_init(8, &ring, IORING_SETUP_SQPOLL);
476 	if (ret) {
477 		fprintf(stderr, "ring create failed: %d\n", ret);
478 		return 1;
479 	}
480 	if (pipe(fds)) {
481 		perror("pipe");
482 		return 1;
483 	}
484 	/* pin both pipe ends via io-wq */
485 	sqe = io_uring_get_sqe(&ring);
486 	io_uring_prep_read(sqe, fds[0], buffer, 10, 0);
487 	sqe->flags |= IOSQE_ASYNC | IOSQE_IO_LINK;
488 	sqe->user_data = 1;
489 
490 	sqe = io_uring_get_sqe(&ring);
491 	io_uring_prep_write(sqe, fds[1], buffer, 10, 0);
492 	sqe->flags |= IOSQE_ASYNC;
493 	sqe->user_data = 2;
494 	ret = io_uring_submit(&ring);
495 	if (ret != 2) {
496 		fprintf(stderr, "%s: got %d, wanted 1\n", __FUNCTION__, ret);
497 		return 1;
498 	}
499 
500 	/* wait for sqpoll to kick in and submit before exit */
501 	sleep(1);
502 	io_uring_queue_exit(&ring);
503 
504 	/* close the write end, so if ring is canceled properly read() fails*/
505 	close(fds[1]);
506 	ret = read(fds[0], buffer, 10);
507 	close(fds[0]);
508 	return 0;
509 }
510 
main(int argc,char * argv[])511 int main(int argc, char *argv[])
512 {
513 	const char *fname = ".io-cancel-test";
514 	int i, ret;
515 
516 	if (argc > 1)
517 		return T_EXIT_SKIP;
518 
519 	if (test_dont_cancel_another_ring()) {
520 		fprintf(stderr, "test_dont_cancel_another_ring() failed\n");
521 		return T_EXIT_FAIL;
522 	}
523 
524 	if (test_cancel_req_across_fork()) {
525 		fprintf(stderr, "test_cancel_req_across_fork() failed\n");
526 		return T_EXIT_FAIL;
527 	}
528 
529 	if (test_cancel_inflight_exit()) {
530 		fprintf(stderr, "test_cancel_inflight_exit() failed\n");
531 		return T_EXIT_FAIL;
532 	}
533 
534 	if (test_sqpoll_cancel_iowq_requests()) {
535 		fprintf(stderr, "test_sqpoll_cancel_iowq_requests() failed\n");
536 		return T_EXIT_FAIL;
537 	}
538 
539 	t_create_file(fname, FILE_SIZE);
540 
541 	vecs = t_create_buffers(BUFFERS, BS);
542 
543 	for (i = 0; i < 8; i++) {
544 		int write = (i & 1) != 0;
545 		int partial = (i & 2) != 0;
546 		int async = (i & 4) != 0;
547 
548 		ret = test_io_cancel(fname, write, partial, async);
549 		if (ret == T_EXIT_FAIL) {
550 			fprintf(stderr, "test_io_cancel %d %d %d failed\n",
551 				write, partial, async);
552 			goto err;
553 		}
554 	}
555 
556 	unlink(fname);
557 	return T_EXIT_PASS;
558 err:
559 	unlink(fname);
560 	return T_EXIT_FAIL;
561 }
562