• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Description: Basic IO cancel test
4  */
5 #include <errno.h>
6 #include <stdio.h>
7 #include <unistd.h>
8 #include <stdlib.h>
9 #include <string.h>
10 #include <fcntl.h>
11 #include <sys/types.h>
12 #include <sys/time.h>
13 #include <sys/wait.h>
14 #include <poll.h>
15 
16 #include "helpers.h"
17 #include "liburing.h"
18 
19 #define FILE_SIZE	(128 * 1024)
20 #define BS		4096
21 #define BUFFERS		(FILE_SIZE / BS)
22 
23 static struct iovec *vecs;
24 
start_io(struct io_uring * ring,int fd,int do_write)25 static int start_io(struct io_uring *ring, int fd, int do_write)
26 {
27 	struct io_uring_sqe *sqe;
28 	int i, ret;
29 
30 	for (i = 0; i < BUFFERS; i++) {
31 		off_t offset;
32 
33 		sqe = io_uring_get_sqe(ring);
34 		if (!sqe) {
35 			fprintf(stderr, "sqe get failed\n");
36 			goto err;
37 		}
38 		offset = BS * (rand() % BUFFERS);
39 		if (do_write) {
40 			io_uring_prep_writev(sqe, fd, &vecs[i], 1, offset);
41 		} else {
42 			io_uring_prep_readv(sqe, fd, &vecs[i], 1, offset);
43 		}
44 		sqe->user_data = i + 1;
45 	}
46 
47 	ret = io_uring_submit(ring);
48 	if (ret != BUFFERS) {
49 		fprintf(stderr, "submit got %d, wanted %d\n", ret, BUFFERS);
50 		goto err;
51 	}
52 
53 	return 0;
54 err:
55 	return 1;
56 }
57 
wait_io(struct io_uring * ring,unsigned nr_io,int do_partial)58 static int wait_io(struct io_uring *ring, unsigned nr_io, int do_partial)
59 {
60 	struct io_uring_cqe *cqe;
61 	int i, ret;
62 
63 	for (i = 0; i < nr_io; i++) {
64 		ret = io_uring_wait_cqe(ring, &cqe);
65 		if (ret) {
66 			fprintf(stderr, "wait_cqe=%d\n", ret);
67 			goto err;
68 		}
69 		if (do_partial && cqe->user_data) {
70 			if (!(cqe->user_data & 1)) {
71 				if (cqe->res != BS) {
72 					fprintf(stderr, "IO %d wasn't canceled but got error %d\n", (unsigned) cqe->user_data, cqe->res);
73 					goto err;
74 				}
75 			}
76 		}
77 		io_uring_cqe_seen(ring, cqe);
78 	}
79 	return 0;
80 err:
81 	return 1;
82 
83 }
84 
do_io(struct io_uring * ring,int fd,int do_write)85 static int do_io(struct io_uring *ring, int fd, int do_write)
86 {
87 	if (start_io(ring, fd, do_write))
88 		return 1;
89 	if (wait_io(ring, BUFFERS, 0))
90 		return 1;
91 	return 0;
92 }
93 
start_cancel(struct io_uring * ring,int do_partial,int async_cancel)94 static int start_cancel(struct io_uring *ring, int do_partial, int async_cancel)
95 {
96 	struct io_uring_sqe *sqe;
97 	int i, ret, submitted = 0;
98 
99 	for (i = 0; i < BUFFERS; i++) {
100 		if (do_partial && (i & 1))
101 			continue;
102 		sqe = io_uring_get_sqe(ring);
103 		if (!sqe) {
104 			fprintf(stderr, "sqe get failed\n");
105 			goto err;
106 		}
107 		io_uring_prep_cancel64(sqe, i + 1, 0);
108 		if (async_cancel)
109 			sqe->flags |= IOSQE_ASYNC;
110 		sqe->user_data = 0;
111 		submitted++;
112 	}
113 
114 	ret = io_uring_submit(ring);
115 	if (ret != submitted) {
116 		fprintf(stderr, "submit got %d, wanted %d\n", ret, submitted);
117 		goto err;
118 	}
119 	return 0;
120 err:
121 	return 1;
122 }
123 
124 /*
125  * Test cancels. If 'do_partial' is set, then we only attempt to cancel half of
126  * the submitted IO. This is done to verify that canceling one piece of IO doesn't
127  * impact others.
128  */
test_io_cancel(const char * file,int do_write,int do_partial,int async_cancel)129 static int test_io_cancel(const char *file, int do_write, int do_partial,
130 			  int async_cancel)
131 {
132 	struct io_uring ring;
133 	struct timeval start_tv;
134 	unsigned long usecs;
135 	unsigned to_wait;
136 	int fd, ret;
137 
138 	fd = open(file, O_RDWR | O_DIRECT);
139 	if (fd < 0) {
140 		if (errno == EINVAL)
141 			return T_EXIT_SKIP;
142 		perror("file open");
143 		goto err;
144 	}
145 
146 	ret = io_uring_queue_init(4 * BUFFERS, &ring, 0);
147 	if (ret) {
148 		fprintf(stderr, "ring create failed: %d\n", ret);
149 		goto err;
150 	}
151 
152 	if (do_io(&ring, fd, do_write))
153 		goto err;
154 	gettimeofday(&start_tv, NULL);
155 	if (do_io(&ring, fd, do_write))
156 		goto err;
157 	usecs = utime_since_now(&start_tv);
158 
159 	if (start_io(&ring, fd, do_write))
160 		goto err;
161 	/* sleep for 1/3 of the total time, to allow some to start/complete */
162 	usleep(usecs / 3);
163 	if (start_cancel(&ring, do_partial, async_cancel))
164 		goto err;
165 	to_wait = BUFFERS;
166 	if (do_partial)
167 		to_wait += BUFFERS / 2;
168 	else
169 		to_wait += BUFFERS;
170 	if (wait_io(&ring, to_wait, do_partial))
171 		goto err;
172 
173 	io_uring_queue_exit(&ring);
174 	close(fd);
175 	return 0;
176 err:
177 	if (fd != -1)
178 		close(fd);
179 	return 1;
180 }
181 
test_dont_cancel_another_ring(void)182 static int test_dont_cancel_another_ring(void)
183 {
184 	struct io_uring ring1, ring2;
185 	struct io_uring_cqe *cqe;
186 	struct io_uring_sqe *sqe;
187 	char buffer[128];
188 	int ret, fds[2];
189 	struct __kernel_timespec ts = { .tv_sec = 0, .tv_nsec = 100000000, };
190 
191 	ret = io_uring_queue_init(8, &ring1, 0);
192 	if (ret) {
193 		fprintf(stderr, "ring create failed: %d\n", ret);
194 		return 1;
195 	}
196 	ret = io_uring_queue_init(8, &ring2, 0);
197 	if (ret) {
198 		fprintf(stderr, "ring create failed: %d\n", ret);
199 		return 1;
200 	}
201 	if (pipe(fds)) {
202 		perror("pipe");
203 		return 1;
204 	}
205 
206 	sqe = io_uring_get_sqe(&ring1);
207 	if (!sqe) {
208 		fprintf(stderr, "%s: failed to get sqe\n", __FUNCTION__);
209 		return 1;
210 	}
211 	io_uring_prep_read(sqe, fds[0], buffer, 10, 0);
212 	sqe->flags |= IOSQE_ASYNC;
213 	sqe->user_data = 1;
214 
215 	ret = io_uring_submit(&ring1);
216 	if (ret != 1) {
217 		fprintf(stderr, "%s: got %d, wanted 1\n", __FUNCTION__, ret);
218 		return 1;
219 	}
220 
221 	/* make sure it doesn't cancel requests of the other ctx */
222 	sqe = io_uring_get_sqe(&ring2);
223 	if (!sqe) {
224 		fprintf(stderr, "%s: failed to get sqe\n", __FUNCTION__);
225 		return 1;
226 	}
227 	io_uring_prep_cancel64(sqe, 1, 0);
228 	sqe->user_data = 2;
229 
230 	ret = io_uring_submit(&ring2);
231 	if (ret != 1) {
232 		fprintf(stderr, "%s: got %d, wanted 1\n", __FUNCTION__, ret);
233 		return 1;
234 	}
235 
236 	ret = io_uring_wait_cqe(&ring2, &cqe);
237 	if (ret) {
238 		fprintf(stderr, "wait_cqe=%d\n", ret);
239 		return 1;
240 	}
241 	if (cqe->user_data != 2 || cqe->res != -ENOENT) {
242 		fprintf(stderr, "error: cqe %i: res=%i, but expected -ENOENT\n",
243 			(int)cqe->user_data, (int)cqe->res);
244 		return 1;
245 	}
246 	io_uring_cqe_seen(&ring2, cqe);
247 
248 	ret = io_uring_wait_cqe_timeout(&ring1, &cqe, &ts);
249 	if (ret != -ETIME) {
250 		fprintf(stderr, "read got canceled or wait failed\n");
251 		return 1;
252 	}
253 	io_uring_cqe_seen(&ring1, cqe);
254 
255 	close(fds[0]);
256 	close(fds[1]);
257 	io_uring_queue_exit(&ring1);
258 	io_uring_queue_exit(&ring2);
259 	return 0;
260 }
261 
test_cancel_req_across_fork(void)262 static int test_cancel_req_across_fork(void)
263 {
264 	struct io_uring ring;
265 	struct io_uring_cqe *cqe;
266 	struct io_uring_sqe *sqe;
267 	char buffer[128];
268 	int ret, i, fds[2];
269 	pid_t p;
270 
271 	ret = io_uring_queue_init(8, &ring, 0);
272 	if (ret) {
273 		fprintf(stderr, "ring create failed: %d\n", ret);
274 		return 1;
275 	}
276 	if (pipe(fds)) {
277 		perror("pipe");
278 		return 1;
279 	}
280 	sqe = io_uring_get_sqe(&ring);
281 	if (!sqe) {
282 		fprintf(stderr, "%s: failed to get sqe\n", __FUNCTION__);
283 		return 1;
284 	}
285 	io_uring_prep_read(sqe, fds[0], buffer, 10, 0);
286 	sqe->flags |= IOSQE_ASYNC;
287 	sqe->user_data = 1;
288 
289 	ret = io_uring_submit(&ring);
290 	if (ret != 1) {
291 		fprintf(stderr, "%s: got %d, wanted 1\n", __FUNCTION__, ret);
292 		return 1;
293 	}
294 
295 	p = fork();
296 	if (p == -1) {
297 		fprintf(stderr, "fork() failed\n");
298 		return 1;
299 	}
300 
301 	if (p == 0) {
302 		sqe = io_uring_get_sqe(&ring);
303 		if (!sqe) {
304 			fprintf(stderr, "%s: failed to get sqe\n", __FUNCTION__);
305 			return 1;
306 		}
307 		io_uring_prep_cancel64(sqe, 1, 0);
308 		sqe->user_data = 2;
309 
310 		ret = io_uring_submit(&ring);
311 		if (ret != 1) {
312 			fprintf(stderr, "%s: got %d, wanted 1\n", __FUNCTION__, ret);
313 			return 1;
314 		}
315 
316 		for (i = 0; i < 2; ++i) {
317 			ret = io_uring_wait_cqe(&ring, &cqe);
318 			if (ret) {
319 				fprintf(stderr, "wait_cqe=%d\n", ret);
320 				return 1;
321 			}
322 			switch (cqe->user_data) {
323 			case 1:
324 				if (cqe->res != -EINTR &&
325 				    cqe->res != -ECANCELED) {
326 					fprintf(stderr, "user_data %i res %i\n",
327 						(unsigned)cqe->user_data, cqe->res);
328 					exit(1);
329 				}
330 				break;
331 			case 2:
332 				if (cqe->res != -EALREADY && cqe->res) {
333 					fprintf(stderr, "user_data %i res %i\n",
334 						(unsigned)cqe->user_data, cqe->res);
335 					exit(1);
336 				}
337 				break;
338 			default:
339 				fprintf(stderr, "user_data %i res %i\n",
340 					(unsigned)cqe->user_data, cqe->res);
341 				exit(1);
342 			}
343 
344 			io_uring_cqe_seen(&ring, cqe);
345 		}
346 		exit(0);
347 	} else {
348 		int wstatus;
349 		pid_t childpid;
350 
351 		do {
352 			childpid = waitpid(p, &wstatus, 0);
353 		} while (childpid == (pid_t)-1 && errno == EINTR);
354 
355 		if (childpid == (pid_t)-1) {
356 			perror("waitpid()");
357 			return 1;
358 		}
359 		if (!WIFEXITED(wstatus) || WEXITSTATUS(wstatus)) {
360 			fprintf(stderr, "child failed %i\n", WEXITSTATUS(wstatus));
361 			return 1;
362 		}
363 	}
364 
365 	close(fds[0]);
366 	close(fds[1]);
367 	io_uring_queue_exit(&ring);
368 	return 0;
369 }
370 
test_cancel_inflight_exit(void)371 static int test_cancel_inflight_exit(void)
372 {
373 	struct __kernel_timespec ts = { .tv_sec = 1, .tv_nsec = 0, };
374 	struct io_uring ring;
375 	struct io_uring_cqe *cqe;
376 	struct io_uring_sqe *sqe;
377 	int ret, i;
378 	pid_t p;
379 
380 	ret = io_uring_queue_init(8, &ring, 0);
381 	if (ret) {
382 		fprintf(stderr, "ring create failed: %d\n", ret);
383 		return 1;
384 	}
385 	p = fork();
386 	if (p == -1) {
387 		fprintf(stderr, "fork() failed\n");
388 		return 1;
389 	}
390 
391 	if (p == 0) {
392 		sqe = io_uring_get_sqe(&ring);
393 		io_uring_prep_poll_add(sqe, ring.ring_fd, POLLIN);
394 		sqe->user_data = 1;
395 		sqe->flags |= IOSQE_IO_LINK;
396 
397 		sqe = io_uring_get_sqe(&ring);
398 		io_uring_prep_timeout(sqe, &ts, 0, 0);
399 		sqe->user_data = 2;
400 
401 		sqe = io_uring_get_sqe(&ring);
402 		io_uring_prep_timeout(sqe, &ts, 0, 0);
403 		sqe->user_data = 3;
404 
405 		ret = io_uring_submit(&ring);
406 		if (ret != 3) {
407 			fprintf(stderr, "io_uring_submit() failed %s, ret %i\n", __FUNCTION__, ret);
408 			exit(1);
409 		}
410 		exit(0);
411 	} else {
412 		int wstatus;
413 
414 		if (waitpid(p, &wstatus, 0) == (pid_t)-1) {
415 			perror("waitpid()");
416 			return 1;
417 		}
418 		if (!WIFEXITED(wstatus) || WEXITSTATUS(wstatus)) {
419 			fprintf(stderr, "child failed %i\n", WEXITSTATUS(wstatus));
420 			return 1;
421 		}
422 	}
423 
424 	for (i = 0; i < 3; ++i) {
425 		ret = io_uring_wait_cqe(&ring, &cqe);
426 		if (ret) {
427 			fprintf(stderr, "wait_cqe=%d\n", ret);
428 			return 1;
429 		}
430 		if ((cqe->user_data == 1 && cqe->res != -ECANCELED) ||
431 		    (cqe->user_data == 2 && cqe->res != -ECANCELED) ||
432 		    (cqe->user_data == 3 && cqe->res != -ETIME)) {
433 			fprintf(stderr, "user_data %i res %i\n",
434 				(unsigned)cqe->user_data, cqe->res);
435 			return 1;
436 		}
437 		io_uring_cqe_seen(&ring, cqe);
438 	}
439 
440 	io_uring_queue_exit(&ring);
441 	return 0;
442 }
443 
test_sqpoll_cancel_iowq_requests(void)444 static int test_sqpoll_cancel_iowq_requests(void)
445 {
446 	struct io_uring ring;
447 	struct io_uring_sqe *sqe;
448 	int ret, fds[2];
449 	char buffer[16];
450 
451 	ret = io_uring_queue_init(8, &ring, IORING_SETUP_SQPOLL);
452 	if (ret) {
453 		fprintf(stderr, "ring create failed: %d\n", ret);
454 		return 1;
455 	}
456 	if (pipe(fds)) {
457 		perror("pipe");
458 		return 1;
459 	}
460 	/* pin both pipe ends via io-wq */
461 	sqe = io_uring_get_sqe(&ring);
462 	io_uring_prep_read(sqe, fds[0], buffer, 10, 0);
463 	sqe->flags |= IOSQE_ASYNC | IOSQE_IO_LINK;
464 	sqe->user_data = 1;
465 
466 	sqe = io_uring_get_sqe(&ring);
467 	io_uring_prep_write(sqe, fds[1], buffer, 10, 0);
468 	sqe->flags |= IOSQE_ASYNC;
469 	sqe->user_data = 2;
470 	ret = io_uring_submit(&ring);
471 	if (ret != 2) {
472 		fprintf(stderr, "%s: got %d, wanted 1\n", __FUNCTION__, ret);
473 		return 1;
474 	}
475 
476 	/* wait for sqpoll to kick in and submit before exit */
477 	sleep(1);
478 	io_uring_queue_exit(&ring);
479 
480 	/* close the write end, so if ring is canceled properly read() fails*/
481 	close(fds[1]);
482 	ret = read(fds[0], buffer, 10);
483 	close(fds[0]);
484 	return 0;
485 }
486 
main(int argc,char * argv[])487 int main(int argc, char *argv[])
488 {
489 	const char *fname = ".io-cancel-test";
490 	int i, ret;
491 
492 	if (argc > 1)
493 		return T_EXIT_SKIP;
494 
495 	if (test_dont_cancel_another_ring()) {
496 		fprintf(stderr, "test_dont_cancel_another_ring() failed\n");
497 		return T_EXIT_FAIL;
498 	}
499 
500 	if (test_cancel_req_across_fork()) {
501 		fprintf(stderr, "test_cancel_req_across_fork() failed\n");
502 		return T_EXIT_FAIL;
503 	}
504 
505 	if (test_cancel_inflight_exit()) {
506 		fprintf(stderr, "test_cancel_inflight_exit() failed\n");
507 		return T_EXIT_FAIL;
508 	}
509 
510 	if (test_sqpoll_cancel_iowq_requests()) {
511 		fprintf(stderr, "test_sqpoll_cancel_iowq_requests() failed\n");
512 		return T_EXIT_FAIL;
513 	}
514 
515 	t_create_file(fname, FILE_SIZE);
516 
517 	vecs = t_create_buffers(BUFFERS, BS);
518 
519 	for (i = 0; i < 8; i++) {
520 		int write = (i & 1) != 0;
521 		int partial = (i & 2) != 0;
522 		int async = (i & 4) != 0;
523 
524 		ret = test_io_cancel(fname, write, partial, async);
525 		if (ret == T_EXIT_FAIL) {
526 			fprintf(stderr, "test_io_cancel %d %d %d failed\n",
527 				write, partial, async);
528 			goto err;
529 		}
530 	}
531 
532 	unlink(fname);
533 	return T_EXIT_PASS;
534 err:
535 	unlink(fname);
536 	return T_EXIT_FAIL;
537 }
538