• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Description: Basic IO cancel test
4  */
5 #include <errno.h>
6 #include <stdio.h>
7 #include <unistd.h>
8 #include <stdlib.h>
9 #include <string.h>
10 #include <fcntl.h>
11 #include <sys/types.h>
12 #include <sys/time.h>
13 #include <sys/wait.h>
14 #include <sys/poll.h>
15 
16 #include "helpers.h"
17 #include "liburing.h"
18 
19 #define FILE_SIZE	(128 * 1024)
20 #define BS		4096
21 #define BUFFERS		(FILE_SIZE / BS)
22 
23 static struct iovec *vecs;
24 
utime_since(const struct timeval * s,const struct timeval * e)25 static unsigned long long utime_since(const struct timeval *s,
26 				      const struct timeval *e)
27 {
28 	long long sec, usec;
29 
30 	sec = e->tv_sec - s->tv_sec;
31 	usec = (e->tv_usec - s->tv_usec);
32 	if (sec > 0 && usec < 0) {
33 		sec--;
34 		usec += 1000000;
35 	}
36 
37 	sec *= 1000000;
38 	return sec + usec;
39 }
40 
utime_since_now(struct timeval * tv)41 static unsigned long long utime_since_now(struct timeval *tv)
42 {
43 	struct timeval end;
44 
45 	gettimeofday(&end, NULL);
46 	return utime_since(tv, &end);
47 }
48 
start_io(struct io_uring * ring,int fd,int do_write)49 static int start_io(struct io_uring *ring, int fd, int do_write)
50 {
51 	struct io_uring_sqe *sqe;
52 	int i, ret;
53 
54 	for (i = 0; i < BUFFERS; i++) {
55 		off_t offset;
56 
57 		sqe = io_uring_get_sqe(ring);
58 		if (!sqe) {
59 			fprintf(stderr, "sqe get failed\n");
60 			goto err;
61 		}
62 		offset = BS * (rand() % BUFFERS);
63 		if (do_write) {
64 			io_uring_prep_writev(sqe, fd, &vecs[i], 1, offset);
65 		} else {
66 			io_uring_prep_readv(sqe, fd, &vecs[i], 1, offset);
67 		}
68 		sqe->user_data = i + 1;
69 	}
70 
71 	ret = io_uring_submit(ring);
72 	if (ret != BUFFERS) {
73 		fprintf(stderr, "submit got %d, wanted %d\n", ret, BUFFERS);
74 		goto err;
75 	}
76 
77 	return 0;
78 err:
79 	return 1;
80 }
81 
wait_io(struct io_uring * ring,unsigned nr_io,int do_partial)82 static int wait_io(struct io_uring *ring, unsigned nr_io, int do_partial)
83 {
84 	struct io_uring_cqe *cqe;
85 	int i, ret;
86 
87 	for (i = 0; i < nr_io; i++) {
88 		ret = io_uring_wait_cqe(ring, &cqe);
89 		if (ret) {
90 			fprintf(stderr, "wait_cqe=%d\n", ret);
91 			goto err;
92 		}
93 		if (do_partial && cqe->user_data) {
94 			if (!(cqe->user_data & 1)) {
95 				if (cqe->res != BS) {
96 					fprintf(stderr, "IO %d wasn't cancelled but got error %d\n", (unsigned) cqe->user_data, cqe->res);
97 					goto err;
98 				}
99 			}
100 		}
101 		io_uring_cqe_seen(ring, cqe);
102 	}
103 	return 0;
104 err:
105 	return 1;
106 
107 }
108 
do_io(struct io_uring * ring,int fd,int do_write)109 static int do_io(struct io_uring *ring, int fd, int do_write)
110 {
111 	if (start_io(ring, fd, do_write))
112 		return 1;
113 	if (wait_io(ring, BUFFERS, 0))
114 		return 1;
115 	return 0;
116 }
117 
start_cancel(struct io_uring * ring,int do_partial)118 static int start_cancel(struct io_uring *ring, int do_partial)
119 {
120 	struct io_uring_sqe *sqe;
121 	int i, ret, submitted = 0;
122 
123 	for (i = 0; i < BUFFERS; i++) {
124 		if (do_partial && (i & 1))
125 			continue;
126 		sqe = io_uring_get_sqe(ring);
127 		if (!sqe) {
128 			fprintf(stderr, "sqe get failed\n");
129 			goto err;
130 		}
131 		io_uring_prep_cancel(sqe, (void *) (unsigned long) i + 1, 0);
132 		sqe->user_data = 0;
133 		submitted++;
134 	}
135 
136 	ret = io_uring_submit(ring);
137 	if (ret != submitted) {
138 		fprintf(stderr, "submit got %d, wanted %d\n", ret, submitted);
139 		goto err;
140 	}
141 	return 0;
142 err:
143 	return 1;
144 }
145 
146 /*
147  * Test cancels. If 'do_partial' is set, then we only attempt to cancel half of
148  * the submitted IO. This is done to verify that cancelling one piece of IO doesn't
149  * impact others.
150  */
test_io_cancel(const char * file,int do_write,int do_partial)151 static int test_io_cancel(const char *file, int do_write, int do_partial)
152 {
153 	struct io_uring ring;
154 	struct timeval start_tv;
155 	unsigned long usecs;
156 	unsigned to_wait;
157 	int fd, ret;
158 
159 	fd = open(file, O_RDWR | O_DIRECT);
160 	if (fd < 0) {
161 		perror("file open");
162 		goto err;
163 	}
164 
165 	ret = io_uring_queue_init(4 * BUFFERS, &ring, 0);
166 	if (ret) {
167 		fprintf(stderr, "ring create failed: %d\n", ret);
168 		goto err;
169 	}
170 
171 	if (do_io(&ring, fd, do_write))
172 		goto err;
173 	gettimeofday(&start_tv, NULL);
174 	if (do_io(&ring, fd, do_write))
175 		goto err;
176 	usecs = utime_since_now(&start_tv);
177 
178 	if (start_io(&ring, fd, do_write))
179 		goto err;
180 	/* sleep for 1/3 of the total time, to allow some to start/complete */
181 	usleep(usecs / 3);
182 	if (start_cancel(&ring, do_partial))
183 		goto err;
184 	to_wait = BUFFERS;
185 	if (do_partial)
186 		to_wait += BUFFERS / 2;
187 	else
188 		to_wait += BUFFERS;
189 	if (wait_io(&ring, to_wait, do_partial))
190 		goto err;
191 
192 	io_uring_queue_exit(&ring);
193 	close(fd);
194 	return 0;
195 err:
196 	if (fd != -1)
197 		close(fd);
198 	return 1;
199 }
200 
test_dont_cancel_another_ring(void)201 static int test_dont_cancel_another_ring(void)
202 {
203 	struct io_uring ring1, ring2;
204 	struct io_uring_cqe *cqe;
205 	struct io_uring_sqe *sqe;
206 	char buffer[128];
207 	int ret, fds[2];
208 	struct __kernel_timespec ts = { .tv_sec = 0, .tv_nsec = 100000000, };
209 
210 	ret = io_uring_queue_init(8, &ring1, 0);
211 	if (ret) {
212 		fprintf(stderr, "ring create failed: %d\n", ret);
213 		return 1;
214 	}
215 	ret = io_uring_queue_init(8, &ring2, 0);
216 	if (ret) {
217 		fprintf(stderr, "ring create failed: %d\n", ret);
218 		return 1;
219 	}
220 	if (pipe(fds)) {
221 		perror("pipe");
222 		return 1;
223 	}
224 
225 	sqe = io_uring_get_sqe(&ring1);
226 	if (!sqe) {
227 		fprintf(stderr, "%s: failed to get sqe\n", __FUNCTION__);
228 		return 1;
229 	}
230 	io_uring_prep_read(sqe, fds[0], buffer, 10, 0);
231 	sqe->flags |= IOSQE_ASYNC;
232 	sqe->user_data = 1;
233 
234 	ret = io_uring_submit(&ring1);
235 	if (ret != 1) {
236 		fprintf(stderr, "%s: got %d, wanted 1\n", __FUNCTION__, ret);
237 		return 1;
238 	}
239 
240 	/* make sure it doesn't cancel requests of the other ctx */
241 	sqe = io_uring_get_sqe(&ring2);
242 	if (!sqe) {
243 		fprintf(stderr, "%s: failed to get sqe\n", __FUNCTION__);
244 		return 1;
245 	}
246 	io_uring_prep_cancel(sqe, (void *) (unsigned long)1, 0);
247 	sqe->user_data = 2;
248 
249 	ret = io_uring_submit(&ring2);
250 	if (ret != 1) {
251 		fprintf(stderr, "%s: got %d, wanted 1\n", __FUNCTION__, ret);
252 		return 1;
253 	}
254 
255 	ret = io_uring_wait_cqe(&ring2, &cqe);
256 	if (ret) {
257 		fprintf(stderr, "wait_cqe=%d\n", ret);
258 		return 1;
259 	}
260 	if (cqe->user_data != 2 || cqe->res != -ENOENT) {
261 		fprintf(stderr, "error: cqe %i: res=%i, but expected -ENOENT\n",
262 			(int)cqe->user_data, (int)cqe->res);
263 		return 1;
264 	}
265 	io_uring_cqe_seen(&ring2, cqe);
266 
267 	ret = io_uring_wait_cqe_timeout(&ring1, &cqe, &ts);
268 	if (ret != -ETIME) {
269 		fprintf(stderr, "read got cancelled or wait failed\n");
270 		return 1;
271 	}
272 	io_uring_cqe_seen(&ring1, cqe);
273 
274 	close(fds[0]);
275 	close(fds[1]);
276 	io_uring_queue_exit(&ring1);
277 	io_uring_queue_exit(&ring2);
278 	return 0;
279 }
280 
test_cancel_req_across_fork(void)281 static int test_cancel_req_across_fork(void)
282 {
283 	struct io_uring ring;
284 	struct io_uring_cqe *cqe;
285 	struct io_uring_sqe *sqe;
286 	char buffer[128];
287 	int ret, i, fds[2];
288 	pid_t p;
289 
290 	ret = io_uring_queue_init(8, &ring, 0);
291 	if (ret) {
292 		fprintf(stderr, "ring create failed: %d\n", ret);
293 		return 1;
294 	}
295 	if (pipe(fds)) {
296 		perror("pipe");
297 		return 1;
298 	}
299 	sqe = io_uring_get_sqe(&ring);
300 	if (!sqe) {
301 		fprintf(stderr, "%s: failed to get sqe\n", __FUNCTION__);
302 		return 1;
303 	}
304 	io_uring_prep_read(sqe, fds[0], buffer, 10, 0);
305 	sqe->flags |= IOSQE_ASYNC;
306 	sqe->user_data = 1;
307 
308 	ret = io_uring_submit(&ring);
309 	if (ret != 1) {
310 		fprintf(stderr, "%s: got %d, wanted 1\n", __FUNCTION__, ret);
311 		return 1;
312 	}
313 
314 	p = fork();
315 	if (p == -1) {
316 		fprintf(stderr, "fork() failed\n");
317 		return 1;
318 	}
319 
320 	if (p == 0) {
321 		sqe = io_uring_get_sqe(&ring);
322 		if (!sqe) {
323 			fprintf(stderr, "%s: failed to get sqe\n", __FUNCTION__);
324 			return 1;
325 		}
326 		io_uring_prep_cancel(sqe, (void *) (unsigned long)1, 0);
327 		sqe->user_data = 2;
328 
329 		ret = io_uring_submit(&ring);
330 		if (ret != 1) {
331 			fprintf(stderr, "%s: got %d, wanted 1\n", __FUNCTION__, ret);
332 			return 1;
333 		}
334 
335 		for (i = 0; i < 2; ++i) {
336 			ret = io_uring_wait_cqe(&ring, &cqe);
337 			if (ret) {
338 				fprintf(stderr, "wait_cqe=%d\n", ret);
339 				return 1;
340 			}
341 			if ((cqe->user_data == 1 && cqe->res != -EINTR) ||
342 			    (cqe->user_data == 2 && cqe->res != -EALREADY)) {
343 				fprintf(stderr, "%i %i\n", (int)cqe->user_data, cqe->res);
344 				exit(1);
345 			}
346 
347 			io_uring_cqe_seen(&ring, cqe);
348 		}
349 		exit(0);
350 	} else {
351 		int wstatus;
352 
353 		if (waitpid(p, &wstatus, 0) == (pid_t)-1) {
354 			perror("waitpid()");
355 			return 1;
356 		}
357 		if (!WIFEXITED(wstatus) || WEXITSTATUS(wstatus)) {
358 			fprintf(stderr, "child failed %i\n", WEXITSTATUS(wstatus));
359 			return 1;
360 		}
361 	}
362 
363 	close(fds[0]);
364 	close(fds[1]);
365 	io_uring_queue_exit(&ring);
366 	return 0;
367 }
368 
test_cancel_inflight_exit(void)369 static int test_cancel_inflight_exit(void)
370 {
371 	struct __kernel_timespec ts = { .tv_sec = 1, .tv_nsec = 0, };
372 	struct io_uring ring;
373 	struct io_uring_cqe *cqe;
374 	struct io_uring_sqe *sqe;
375 	int ret, i;
376 	pid_t p;
377 
378 	ret = io_uring_queue_init(8, &ring, 0);
379 	if (ret) {
380 		fprintf(stderr, "ring create failed: %d\n", ret);
381 		return 1;
382 	}
383 	p = fork();
384 	if (p == -1) {
385 		fprintf(stderr, "fork() failed\n");
386 		return 1;
387 	}
388 
389 	if (p == 0) {
390 		sqe = io_uring_get_sqe(&ring);
391 		io_uring_prep_poll_add(sqe, ring.ring_fd, POLLIN);
392 		sqe->user_data = 1;
393 		sqe->flags |= IOSQE_IO_LINK;
394 
395 		sqe = io_uring_get_sqe(&ring);
396 		io_uring_prep_timeout(sqe, &ts, 0, 0);
397 		sqe->user_data = 2;
398 
399 		sqe = io_uring_get_sqe(&ring);
400 		io_uring_prep_timeout(sqe, &ts, 0, 0);
401 		sqe->user_data = 3;
402 
403 		ret = io_uring_submit(&ring);
404 		if (ret != 3) {
405 			fprintf(stderr, "io_uring_submit() failed %s, ret %i\n", __FUNCTION__, ret);
406 			exit(1);
407 		}
408 		exit(0);
409 	} else {
410 		int wstatus;
411 
412 		if (waitpid(p, &wstatus, 0) == (pid_t)-1) {
413 			perror("waitpid()");
414 			return 1;
415 		}
416 		if (!WIFEXITED(wstatus) || WEXITSTATUS(wstatus)) {
417 			fprintf(stderr, "child failed %i\n", WEXITSTATUS(wstatus));
418 			return 1;
419 		}
420 	}
421 
422 	for (i = 0; i < 3; ++i) {
423 		ret = io_uring_wait_cqe(&ring, &cqe);
424 		if (ret) {
425 			fprintf(stderr, "wait_cqe=%d\n", ret);
426 			return 1;
427 		}
428 		if ((cqe->user_data == 1 && cqe->res != -ECANCELED) ||
429 		    (cqe->user_data == 2 && cqe->res != -ECANCELED) ||
430 		    (cqe->user_data == 3 && cqe->res != -ETIME)) {
431 			fprintf(stderr, "%i %i\n", (int)cqe->user_data, cqe->res);
432 			return 1;
433 		}
434 		io_uring_cqe_seen(&ring, cqe);
435 	}
436 
437 	io_uring_queue_exit(&ring);
438 	return 0;
439 }
440 
test_sqpoll_cancel_iowq_requests(void)441 static int test_sqpoll_cancel_iowq_requests(void)
442 {
443 	struct io_uring ring;
444 	struct io_uring_sqe *sqe;
445 	int ret, fds[2];
446 	char buffer[16];
447 
448 	ret = io_uring_queue_init(8, &ring, IORING_SETUP_SQPOLL);
449 	if (ret) {
450 		fprintf(stderr, "ring create failed: %d\n", ret);
451 		return 1;
452 	}
453 	if (pipe(fds)) {
454 		perror("pipe");
455 		return 1;
456 	}
457 	/* pin both pipe ends via io-wq */
458 	sqe = io_uring_get_sqe(&ring);
459 	io_uring_prep_read(sqe, fds[0], buffer, 10, 0);
460 	sqe->flags |= IOSQE_ASYNC | IOSQE_IO_LINK;
461 	sqe->user_data = 1;
462 
463 	sqe = io_uring_get_sqe(&ring);
464 	io_uring_prep_write(sqe, fds[1], buffer, 10, 0);
465 	sqe->flags |= IOSQE_ASYNC;
466 	sqe->user_data = 2;
467 	ret = io_uring_submit(&ring);
468 	if (ret != 2) {
469 		fprintf(stderr, "%s: got %d, wanted 1\n", __FUNCTION__, ret);
470 		return 1;
471 	}
472 
473 	/* wait for sqpoll to kick in and submit before exit */
474 	sleep(1);
475 	io_uring_queue_exit(&ring);
476 
477 	/* close the write end, so if ring is cancelled properly read() fails*/
478 	close(fds[1]);
479 	ret = read(fds[0], buffer, 10);
480 	close(fds[0]);
481 	return 0;
482 }
483 
main(int argc,char * argv[])484 int main(int argc, char *argv[])
485 {
486 	int i, ret;
487 
488 	if (argc > 1)
489 		return 0;
490 
491 	if (test_dont_cancel_another_ring()) {
492 		fprintf(stderr, "test_dont_cancel_another_ring() failed\n");
493 		return 1;
494 	}
495 
496 	if (test_cancel_req_across_fork()) {
497 		fprintf(stderr, "test_cancel_req_across_fork() failed\n");
498 		return 1;
499 	}
500 
501 	if (test_cancel_inflight_exit()) {
502 		fprintf(stderr, "test_cancel_inflight_exit() failed\n");
503 		return 1;
504 	}
505 
506 	if (test_sqpoll_cancel_iowq_requests()) {
507 		fprintf(stderr, "test_sqpoll_cancel_iowq_requests() failed\n");
508 		return 1;
509 	}
510 
511 	t_create_file(".basic-rw", FILE_SIZE);
512 
513 	vecs = t_create_buffers(BUFFERS, BS);
514 
515 	for (i = 0; i < 4; i++) {
516 		int v1 = (i & 1) != 0;
517 		int v2 = (i & 2) != 0;
518 
519 		ret = test_io_cancel(".basic-rw", v1, v2);
520 		if (ret) {
521 			fprintf(stderr, "test_io_cancel %d %d failed\n", v1, v2);
522 			goto err;
523 		}
524 	}
525 
526 	unlink(".basic-rw");
527 	return 0;
528 err:
529 	unlink(".basic-rw");
530 	return 1;
531 }
532