• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Check that IORING_OP_ACCEPT works, and send some data across to verify we
4  * didn't get a junk fd.
5  */
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <stdint.h>
9 #include <assert.h>
10 #include <limits.h>
11 
12 #include <errno.h>
13 #include <fcntl.h>
14 #include <unistd.h>
15 #include <sys/socket.h>
16 #include <sys/time.h>
17 #include <sys/resource.h>
18 #include <sys/un.h>
19 #include <netinet/tcp.h>
20 #include <netinet/in.h>
21 #include <arpa/inet.h>
22 
23 #include "helpers.h"
24 #include "liburing.h"
25 
26 #define MAX_FDS 32
27 #define NOP_USER_DATA (1LLU << 50)
28 #define INITIAL_USER_DATA 1000
29 
30 static int no_accept;
31 static int no_accept_multi;
32 
33 struct data {
34 	char buf[128];
35 	struct iovec iov;
36 };
37 
38 struct accept_test_args {
39 	int accept_should_error;
40 	bool fixed;
41 	bool nonblock;
42 	bool queue_accept_before_connect;
43 	bool multishot;
44 	int extra_loops;
45 	bool overflow;
46 };
47 
close_fds(int fds[],int nr)48 static void close_fds(int fds[], int nr)
49 {
50 	int i;
51 
52 	for (i = 0; i < nr; i++)
53 		close(fds[i]);
54 }
55 
close_sock_fds(int s_fd[],int c_fd[],int nr,bool fixed)56 static void close_sock_fds(int s_fd[], int c_fd[], int nr, bool fixed)
57 {
58 	if (!fixed)
59 		close_fds(s_fd, nr);
60 	close_fds(c_fd, nr);
61 }
62 
queue_send(struct io_uring * ring,int fd)63 static void queue_send(struct io_uring *ring, int fd)
64 {
65 	struct io_uring_sqe *sqe;
66 	struct data *d;
67 
68 	d = t_malloc(sizeof(*d));
69 	d->iov.iov_base = d->buf;
70 	d->iov.iov_len = sizeof(d->buf);
71 
72 	sqe = io_uring_get_sqe(ring);
73 	io_uring_prep_writev(sqe, fd, &d->iov, 1, 0);
74 	sqe->user_data = 1;
75 }
76 
queue_recv(struct io_uring * ring,int fd,bool fixed)77 static void queue_recv(struct io_uring *ring, int fd, bool fixed)
78 {
79 	struct io_uring_sqe *sqe;
80 	struct data *d;
81 
82 	d = t_malloc(sizeof(*d));
83 	d->iov.iov_base = d->buf;
84 	d->iov.iov_len = sizeof(d->buf);
85 
86 	sqe = io_uring_get_sqe(ring);
87 	io_uring_prep_readv(sqe, fd, &d->iov, 1, 0);
88 	sqe->user_data = 2;
89 	if (fixed)
90 		sqe->flags |= IOSQE_FIXED_FILE;
91 }
92 
queue_accept_multishot(struct io_uring * ring,int fd,int idx,bool fixed)93 static void queue_accept_multishot(struct io_uring *ring, int fd,
94 				   int idx, bool fixed)
95 {
96 	struct io_uring_sqe *sqe = io_uring_get_sqe(ring);
97 	int ret;
98 
99 	if (fixed)
100 		io_uring_prep_multishot_accept_direct(sqe, fd,
101 						NULL, NULL,
102 						0);
103 	else
104 		io_uring_prep_multishot_accept(sqe, fd, NULL, NULL, 0);
105 
106 	io_uring_sqe_set_data64(sqe, idx);
107 	ret = io_uring_submit(ring);
108 	assert(ret != -1);
109 }
110 
queue_accept_conn(struct io_uring * ring,int fd,struct accept_test_args args)111 static void queue_accept_conn(struct io_uring *ring, int fd,
112 			      struct accept_test_args args)
113 {
114 	struct io_uring_sqe *sqe;
115 	int ret;
116 	int fixed_idx = args.fixed ? 0 : -1;
117 	int count = 1 + args.extra_loops;
118 
119 	if (args.multishot) {
120 		queue_accept_multishot(ring, fd, INITIAL_USER_DATA, args.fixed);
121 		return;
122 	}
123 
124 	while (count--) {
125 		sqe = io_uring_get_sqe(ring);
126 		if (fixed_idx < 0) {
127 			io_uring_prep_accept(sqe, fd, NULL, NULL, 0);
128 		} else {
129 			io_uring_prep_accept_direct(sqe, fd, NULL, NULL,
130 						    0, fixed_idx);
131 		}
132 		ret = io_uring_submit(ring);
133 		assert(ret != -1);
134 	}
135 }
136 
accept_conn(struct io_uring * ring,int fixed_idx,int * multishot,int fd)137 static int accept_conn(struct io_uring *ring, int fixed_idx, int *multishot, int fd)
138 {
139 	struct io_uring_cqe *pcqe;
140 	struct io_uring_cqe cqe;
141 	int ret;
142 
143 	do {
144 		ret = io_uring_wait_cqe(ring, &pcqe);
145 		assert(!ret);
146 		cqe = *pcqe;
147 		io_uring_cqe_seen(ring, pcqe);
148 	} while (cqe.user_data == NOP_USER_DATA);
149 
150 	if (*multishot) {
151 		if (!(cqe.flags & IORING_CQE_F_MORE)) {
152 			(*multishot)++;
153 			queue_accept_multishot(ring, fd, *multishot, fixed_idx == 0);
154 		} else {
155 			if (cqe.user_data != *multishot) {
156 				fprintf(stderr, "received multishot after told done!\n");
157 				return -ECANCELED;
158 			}
159 		}
160 	}
161 
162 	ret = cqe.res;
163 
164 	if (fixed_idx >= 0) {
165 		if (ret > 0) {
166 			if (!multishot) {
167 				close(ret);
168 				return -EINVAL;
169 			}
170 		} else if (!ret) {
171 			ret = fixed_idx;
172 		}
173 	}
174 	return ret;
175 }
176 
start_accept_listen(struct sockaddr_in * addr,int port_off,int extra_flags)177 static int start_accept_listen(struct sockaddr_in *addr, int port_off,
178 			       int extra_flags)
179 {
180 	int fd, ret;
181 
182 	fd = socket(AF_INET, SOCK_STREAM | SOCK_CLOEXEC | extra_flags,
183 		    IPPROTO_TCP);
184 
185 	int32_t val = 1;
186 	ret = setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &val, sizeof(val));
187 	assert(ret != -1);
188 	ret = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &val, sizeof(val));
189 	assert(ret != -1);
190 
191 	struct sockaddr_in laddr;
192 
193 	if (!addr)
194 		addr = &laddr;
195 
196 	addr->sin_family = AF_INET;
197 	addr->sin_addr.s_addr = inet_addr("127.0.0.1");
198 	ret = t_bind_ephemeral_port(fd, addr);
199 	assert(!ret);
200 	ret = listen(fd, 128);
201 	assert(ret != -1);
202 
203 	return fd;
204 }
205 
set_client_fd(struct sockaddr_in * addr)206 static int set_client_fd(struct sockaddr_in *addr)
207 {
208 	int32_t val;
209 	int fd, ret;
210 
211 	fd = socket(AF_INET, SOCK_STREAM | SOCK_CLOEXEC, IPPROTO_TCP);
212 
213 	val = 1;
214 	ret = setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, &val, sizeof(val));
215 	assert(ret != -1);
216 
217 	int32_t flags = fcntl(fd, F_GETFL, 0);
218 	assert(flags != -1);
219 
220 	flags |= O_NONBLOCK;
221 	ret = fcntl(fd, F_SETFL, flags);
222 	assert(ret != -1);
223 
224 	ret = connect(fd, (struct sockaddr *)addr, sizeof(*addr));
225 	assert(ret == -1);
226 
227 	flags = fcntl(fd, F_GETFL, 0);
228 	assert(flags != -1);
229 
230 	flags &= ~O_NONBLOCK;
231 	ret = fcntl(fd, F_SETFL, flags);
232 	assert(ret != -1);
233 
234 	return fd;
235 }
236 
cause_overflow(struct io_uring * ring)237 static void cause_overflow(struct io_uring *ring)
238 {
239 	int i, ret;
240 
241 	for (i = 0; i < ring->cq.ring_entries; i++) {
242 		struct io_uring_sqe *sqe = io_uring_get_sqe(ring);
243 
244 		io_uring_prep_nop(sqe);
245 		io_uring_sqe_set_data64(sqe, NOP_USER_DATA);
246 		ret = io_uring_submit(ring);
247 		assert(ret != -1);
248 	}
249 
250 }
251 
clear_overflow(struct io_uring * ring)252 static void clear_overflow(struct io_uring *ring)
253 {
254 	struct io_uring_cqe *cqe;
255 
256 	while (!io_uring_peek_cqe(ring, &cqe)) {
257 		if (cqe->user_data != NOP_USER_DATA)
258 			break;
259 		io_uring_cqe_seen(ring, cqe);
260 	}
261 }
262 
test_loop(struct io_uring * ring,struct accept_test_args args,int recv_s0,struct sockaddr_in * addr)263 static int test_loop(struct io_uring *ring,
264 		     struct accept_test_args args,
265 		     int recv_s0,
266 		     struct sockaddr_in *addr)
267 {
268 	struct io_uring_cqe *cqe;
269 	uint32_t head, count = 0;
270 	int i, ret, s_fd[MAX_FDS], c_fd[MAX_FDS], done = 0;
271 	bool fixed = args.fixed;
272 	bool multishot = args.multishot;
273 	uint32_t multishot_mask = 0;
274 	int nr_fds = multishot ? MAX_FDS : 1;
275 	int multishot_idx = multishot ? INITIAL_USER_DATA : 0;
276 	int err_ret = T_EXIT_FAIL;
277 
278 	if (args.overflow)
279 		cause_overflow(ring);
280 
281 	for (i = 0; i < nr_fds; i++) {
282 		c_fd[i] = set_client_fd(addr);
283 		if (args.overflow && i == nr_fds / 2)
284 			clear_overflow(ring);
285 	}
286 
287 	if (!args.queue_accept_before_connect)
288 		queue_accept_conn(ring, recv_s0, args);
289 
290 	for (i = 0; i < nr_fds; i++) {
291 		s_fd[i] = accept_conn(ring, fixed ? 0 : -1, &multishot_idx, recv_s0);
292 		if (s_fd[i] == -EINVAL) {
293 			if (args.accept_should_error)
294 				goto out;
295 			fprintf(stdout,
296 				"%s %s Accept not supported, skipping\n",
297 				fixed ? "Fixed" : "",
298 				multishot ? "Multishot" : "");
299 			if (multishot)
300 				no_accept_multi = 1;
301 			else
302 				no_accept = 1;
303 			ret = T_EXIT_SKIP;
304 			goto out;
305 		} else if (s_fd[i] < 0) {
306 			if (args.accept_should_error &&
307 			    (s_fd[i] == -EBADF || s_fd[i] == -EINVAL))
308 				goto out;
309 			fprintf(stderr, "%s %s Accept[%d] got %d\n",
310 				fixed ? "Fixed" : "",
311 				multishot ? "Multishot" : "",
312 				i, s_fd[i]);
313 			goto err;
314 		} else if (s_fd[i] == 195 && args.overflow) {
315 			fprintf(stderr, "Broken overflow handling\n");
316 			goto err;
317 		}
318 
319 		if (multishot && fixed) {
320 			if (s_fd[i] >= MAX_FDS) {
321 				fprintf(stderr,
322 					"Fixed Multishot Accept[%d] got outbound index: %d\n",
323 					i, s_fd[i]);
324 				goto err;
325 			}
326 			/*
327 			 * for fixed multishot accept test, the file slots
328 			 * allocated are [0, 32), this means we finally end up
329 			 * with each bit of a u32 being 1.
330 			 */
331 			multishot_mask |= (1U << s_fd[i]);
332 		}
333 	}
334 
335 	if (multishot) {
336 		if (fixed && (~multishot_mask != 0U)) {
337 			fprintf(stderr, "Fixed Multishot Accept misses events\n");
338 			goto err;
339 		}
340 		goto out;
341 	}
342 
343 	queue_send(ring, c_fd[0]);
344 	queue_recv(ring, s_fd[0], fixed);
345 
346 	ret = io_uring_submit_and_wait(ring, 2);
347 	assert(ret != -1);
348 
349 	while (count < 2) {
350 		io_uring_for_each_cqe(ring, head, cqe) {
351 			if (cqe->res < 0) {
352 				fprintf(stderr, "Got cqe res %d, user_data %i\n",
353 						cqe->res, (int)cqe->user_data);
354 				done = 1;
355 				break;
356 			}
357 			assert(cqe->res == 128);
358 			count++;
359 		}
360 
361 		assert(count <= 2);
362 		io_uring_cq_advance(ring, count);
363 		if (done)
364 			goto err;
365 	}
366 
367 out:
368 	close_sock_fds(s_fd, c_fd, nr_fds, fixed);
369 	return T_EXIT_PASS;
370 err:
371 	close_sock_fds(s_fd, c_fd, nr_fds, fixed);
372 	return err_ret;
373 }
374 
test(struct io_uring * ring,struct accept_test_args args)375 static int test(struct io_uring *ring, struct accept_test_args args)
376 {
377 	struct sockaddr_in addr;
378 	int ret = 0;
379 	int loop;
380 	int32_t recv_s0 = start_accept_listen(&addr, 0,
381 					      args.nonblock ? SOCK_NONBLOCK : 0);
382 	if (args.queue_accept_before_connect)
383 		queue_accept_conn(ring, recv_s0, args);
384 	for (loop = 0; loop < 1 + args.extra_loops; loop++) {
385 		ret = test_loop(ring, args, recv_s0, &addr);
386 		if (ret)
387 			break;
388 	}
389 
390 	close(recv_s0);
391 	return ret;
392 }
393 
sig_alrm(int sig)394 static void sig_alrm(int sig)
395 {
396 	exit(0);
397 }
398 
test_accept_pending_on_exit(void)399 static int test_accept_pending_on_exit(void)
400 {
401 	struct io_uring m_io_uring;
402 	struct io_uring_cqe *cqe;
403 	struct io_uring_sqe *sqe;
404 	int fd, ret;
405 
406 	ret = io_uring_queue_init(32, &m_io_uring, 0);
407 	assert(ret >= 0);
408 
409 	fd = start_accept_listen(NULL, 0, 0);
410 
411 	sqe = io_uring_get_sqe(&m_io_uring);
412 	io_uring_prep_accept(sqe, fd, NULL, NULL, 0);
413 	ret = io_uring_submit(&m_io_uring);
414 	assert(ret != -1);
415 
416 	signal(SIGALRM, sig_alrm);
417 	alarm(1);
418 	ret = io_uring_wait_cqe(&m_io_uring, &cqe);
419 	assert(!ret);
420 	io_uring_cqe_seen(&m_io_uring, cqe);
421 
422 	io_uring_queue_exit(&m_io_uring);
423 	return 0;
424 }
425 
426 struct test_accept_many_args {
427 	unsigned int usecs;
428 	bool nonblock;
429 	bool single_sock;
430 	bool close_fds;
431 };
432 
433 /*
434  * Test issue many accepts and see if we handle cancelation on exit
435  */
test_accept_many(struct test_accept_many_args args)436 static int test_accept_many(struct test_accept_many_args args)
437 {
438 	struct io_uring m_io_uring;
439 	struct io_uring_cqe *cqe;
440 	struct io_uring_sqe *sqe;
441 	unsigned long cur_lim;
442 	struct rlimit rlim;
443 	int *fds, i, ret;
444 	unsigned int nr = 128;
445 	int nr_socks = args.single_sock ? 1 : nr;
446 
447 	if (getrlimit(RLIMIT_NPROC, &rlim) < 0) {
448 		perror("getrlimit");
449 		return 1;
450 	}
451 
452 	cur_lim = rlim.rlim_cur;
453 	rlim.rlim_cur = nr / 4;
454 
455 	if (setrlimit(RLIMIT_NPROC, &rlim) < 0) {
456 		perror("setrlimit");
457 		return 1;
458 	}
459 
460 	ret = io_uring_queue_init(2 * nr, &m_io_uring, 0);
461 	assert(ret >= 0);
462 
463 	fds = t_calloc(nr_socks, sizeof(int));
464 
465 	for (i = 0; i < nr_socks; i++)
466 		fds[i] = start_accept_listen(NULL, i,
467 					     args.nonblock ? SOCK_NONBLOCK : 0);
468 
469 	for (i = 0; i < nr; i++) {
470 		int sock_idx = args.single_sock ? 0 : i;
471 		sqe = io_uring_get_sqe(&m_io_uring);
472 		io_uring_prep_accept(sqe, fds[sock_idx], NULL, NULL, 0);
473 		sqe->user_data = 1 + i;
474 		ret = io_uring_submit(&m_io_uring);
475 		assert(ret == 1);
476 	}
477 
478 	if (args.usecs)
479 		usleep(args.usecs);
480 
481 	if (args.close_fds)
482 		for (i = 0; i < nr_socks; i++)
483 			close(fds[i]);
484 
485 	for (i = 0; i < nr; i++) {
486 		if (io_uring_peek_cqe(&m_io_uring, &cqe))
487 			break;
488 		if (cqe->res != -ECANCELED) {
489 			fprintf(stderr, "Expected cqe to be canceled %d\n", cqe->res);
490 			ret = 1;
491 			goto out;
492 		}
493 		io_uring_cqe_seen(&m_io_uring, cqe);
494 	}
495 	ret = 0;
496 out:
497 	rlim.rlim_cur = cur_lim;
498 	if (setrlimit(RLIMIT_NPROC, &rlim) < 0) {
499 		perror("setrlimit");
500 		return 1;
501 	}
502 
503 	free(fds);
504 	io_uring_queue_exit(&m_io_uring);
505 	return ret;
506 }
507 
test_accept_cancel(unsigned usecs,unsigned int nr,bool multishot)508 static int test_accept_cancel(unsigned usecs, unsigned int nr, bool multishot)
509 {
510 	struct io_uring m_io_uring;
511 	struct io_uring_cqe *cqe;
512 	struct io_uring_sqe *sqe;
513 	int fd, i, ret;
514 
515 	if (multishot && no_accept_multi)
516 		return T_EXIT_SKIP;
517 
518 	ret = io_uring_queue_init(32, &m_io_uring, 0);
519 	assert(ret >= 0);
520 
521 	fd = start_accept_listen(NULL, 0, 0);
522 
523 	for (i = 1; i <= nr; i++) {
524 		sqe = io_uring_get_sqe(&m_io_uring);
525 		if (!multishot)
526 			io_uring_prep_accept(sqe, fd, NULL, NULL, 0);
527 		else
528 			io_uring_prep_multishot_accept(sqe, fd, NULL, NULL, 0);
529 		sqe->user_data = i;
530 		ret = io_uring_submit(&m_io_uring);
531 		assert(ret == 1);
532 	}
533 
534 	if (usecs)
535 		usleep(usecs);
536 
537 	for (i = 1; i <= nr; i++) {
538 		sqe = io_uring_get_sqe(&m_io_uring);
539 		io_uring_prep_cancel64(sqe, i, 0);
540 		sqe->user_data = nr + i;
541 		ret = io_uring_submit(&m_io_uring);
542 		assert(ret == 1);
543 	}
544 	for (i = 0; i < nr * 2; i++) {
545 		ret = io_uring_wait_cqe(&m_io_uring, &cqe);
546 		assert(!ret);
547 		/*
548 		 * Two cases here:
549 		 *
550 		 * 1) We cancel the accept4() before it got started, we should
551 		 *    get '0' for the cancel request and '-ECANCELED' for the
552 		 *    accept request.
553 		 * 2) We cancel the accept4() after it's already running, we
554 		 *    should get '-EALREADY' for the cancel request and
555 		 *    '-EINTR' for the accept request.
556 		 */
557 		if (cqe->user_data == 0) {
558 			fprintf(stderr, "unexpected 0 user data\n");
559 			goto err;
560 		} else if (cqe->user_data <= nr) {
561 			/* no multishot */
562 			if (cqe->res == -EINVAL)
563 				return T_EXIT_SKIP;
564 			if (cqe->res != -EINTR && cqe->res != -ECANCELED) {
565 				fprintf(stderr, "Cancelled accept got %d\n", cqe->res);
566 				goto err;
567 			}
568 		} else if (cqe->user_data <= nr * 2) {
569 			if (cqe->res != -EALREADY && cqe->res != 0) {
570 				fprintf(stderr, "Cancel got %d\n", cqe->res);
571 				goto err;
572 			}
573 		}
574 		io_uring_cqe_seen(&m_io_uring, cqe);
575 	}
576 
577 	io_uring_queue_exit(&m_io_uring);
578 	close(fd);
579 	return 0;
580 err:
581 	io_uring_queue_exit(&m_io_uring);
582 	close(fd);
583 	return 1;
584 }
585 
test_accept(int count,bool before)586 static int test_accept(int count, bool before)
587 {
588 	struct io_uring m_io_uring;
589 	int ret;
590 	struct accept_test_args args = {
591 		.queue_accept_before_connect = before,
592 		.extra_loops = count - 1
593 	};
594 
595 	ret = io_uring_queue_init(32, &m_io_uring, 0);
596 	assert(ret >= 0);
597 	ret = test(&m_io_uring, args);
598 	io_uring_queue_exit(&m_io_uring);
599 	return ret;
600 }
601 
test_multishot_accept(int count,bool before,bool overflow)602 static int test_multishot_accept(int count, bool before, bool overflow)
603 {
604 	struct io_uring m_io_uring;
605 	int ret;
606 	struct accept_test_args args = {
607 		.queue_accept_before_connect = before,
608 		.multishot = true,
609 		.extra_loops = count - 1,
610 		.overflow = overflow
611 	};
612 
613 	if (no_accept_multi)
614 		return T_EXIT_SKIP;
615 
616 	ret = io_uring_queue_init(MAX_FDS + 10, &m_io_uring, 0);
617 	assert(ret >= 0);
618 	ret = test(&m_io_uring, args);
619 	io_uring_queue_exit(&m_io_uring);
620 	return ret;
621 }
622 
test_accept_multishot_wrong_arg(void)623 static int test_accept_multishot_wrong_arg(void)
624 {
625 	struct io_uring m_io_uring;
626 	struct io_uring_cqe *cqe;
627 	struct io_uring_sqe *sqe;
628 	int fd, ret;
629 
630 	ret = io_uring_queue_init(4, &m_io_uring, 0);
631 	assert(ret >= 0);
632 
633 	fd = start_accept_listen(NULL, 0, 0);
634 
635 	sqe = io_uring_get_sqe(&m_io_uring);
636 	io_uring_prep_multishot_accept_direct(sqe, fd, NULL, NULL, 0);
637 	sqe->file_index = 1;
638 	ret = io_uring_submit(&m_io_uring);
639 	assert(ret == 1);
640 
641 	ret = io_uring_wait_cqe(&m_io_uring, &cqe);
642 	assert(!ret);
643 	if (cqe->res != -EINVAL) {
644 		fprintf(stderr, "file index should be IORING_FILE_INDEX_ALLOC \
645 				if its accept in multishot direct mode\n");
646 		goto err;
647 	}
648 	io_uring_cqe_seen(&m_io_uring, cqe);
649 
650 	io_uring_queue_exit(&m_io_uring);
651 	close(fd);
652 	return 0;
653 err:
654 	io_uring_queue_exit(&m_io_uring);
655 	close(fd);
656 	return 1;
657 }
658 
659 
test_accept_nonblock(bool queue_before_connect,int count)660 static int test_accept_nonblock(bool queue_before_connect, int count)
661 {
662 	struct io_uring m_io_uring;
663 	int ret;
664 	struct accept_test_args args = {
665 		.nonblock = true,
666 		.queue_accept_before_connect = queue_before_connect,
667 		.extra_loops = count - 1
668 	};
669 
670 	ret = io_uring_queue_init(32, &m_io_uring, 0);
671 	assert(ret >= 0);
672 	ret = test(&m_io_uring, args);
673 	io_uring_queue_exit(&m_io_uring);
674 	return ret;
675 }
676 
test_accept_fixed(void)677 static int test_accept_fixed(void)
678 {
679 	struct io_uring m_io_uring;
680 	int ret, fd = -1;
681 	struct accept_test_args args = {
682 		.fixed = true
683 	};
684 
685 	ret = io_uring_queue_init(32, &m_io_uring, 0);
686 	assert(ret >= 0);
687 	ret = io_uring_register_files(&m_io_uring, &fd, 1);
688 	if (ret) {
689 		/* kernel doesn't support sparse registered files, skip */
690 		if (ret == -EBADF || ret == -EINVAL)
691 			return T_EXIT_SKIP;
692 		return T_EXIT_FAIL;
693 	}
694 	ret = test(&m_io_uring, args);
695 	io_uring_queue_exit(&m_io_uring);
696 	return ret;
697 }
698 
test_multishot_fixed_accept(void)699 static int test_multishot_fixed_accept(void)
700 {
701 	struct io_uring m_io_uring;
702 	int ret, fd[MAX_FDS];
703 	struct accept_test_args args = {
704 		.fixed = true,
705 		.multishot = true
706 	};
707 
708 	if (no_accept_multi)
709 		return T_EXIT_SKIP;
710 
711 	memset(fd, -1, sizeof(fd));
712 	ret = io_uring_queue_init(MAX_FDS + 10, &m_io_uring, 0);
713 	assert(ret >= 0);
714 	ret = io_uring_register_files(&m_io_uring, fd, MAX_FDS);
715 	if (ret) {
716 		/* kernel doesn't support sparse registered files, skip */
717 		if (ret == -EBADF || ret == -EINVAL)
718 			return T_EXIT_SKIP;
719 		return T_EXIT_FAIL;
720 	}
721 	ret = test(&m_io_uring, args);
722 	io_uring_queue_exit(&m_io_uring);
723 	return ret;
724 }
725 
test_accept_sqpoll(void)726 static int test_accept_sqpoll(void)
727 {
728 	struct io_uring m_io_uring;
729 	struct io_uring_params p = { };
730 	int ret;
731 	struct accept_test_args args = { };
732 
733 	p.flags = IORING_SETUP_SQPOLL;
734 	ret = t_create_ring_params(32, &m_io_uring, &p);
735 	if (ret == T_SETUP_SKIP)
736 		return 0;
737 	else if (ret < 0)
738 		return ret;
739 
740 	args.accept_should_error = 1;
741 	if (p.features & IORING_FEAT_SQPOLL_NONFIXED)
742 		args.accept_should_error = 0;
743 
744 	ret = test(&m_io_uring, args);
745 	io_uring_queue_exit(&m_io_uring);
746 	return ret;
747 }
748 
main(int argc,char * argv[])749 int main(int argc, char *argv[])
750 {
751 	int ret;
752 
753 	if (argc > 1)
754 		return T_EXIT_SKIP;
755 
756 	ret = test_accept(1, false);
757 	if (ret == T_EXIT_FAIL) {
758 		fprintf(stderr, "test_accept failed\n");
759 		return ret;
760 	}
761 	if (no_accept)
762 		return T_EXIT_SKIP;
763 
764 	ret = test_accept(2, false);
765 	if (ret == T_EXIT_FAIL) {
766 		fprintf(stderr, "test_accept(2) failed\n");
767 		return ret;
768 	}
769 
770 	ret = test_accept(2, true);
771 	if (ret == T_EXIT_FAIL) {
772 		fprintf(stderr, "test_accept(2, true) failed\n");
773 		return ret;
774 	}
775 
776 	ret = test_accept_nonblock(false, 1);
777 	if (ret == T_EXIT_FAIL) {
778 		fprintf(stderr, "test_accept_nonblock failed\n");
779 		return ret;
780 	}
781 
782 	ret = test_accept_nonblock(true, 1);
783 	if (ret == T_EXIT_FAIL) {
784 		fprintf(stderr, "test_accept_nonblock(before, 1) failed\n");
785 		return ret;
786 	}
787 
788 	ret = test_accept_nonblock(true, 3);
789 	if (ret == T_EXIT_FAIL) {
790 		fprintf(stderr, "test_accept_nonblock(before,3) failed\n");
791 		return ret;
792 	}
793 
794 	ret = test_accept_fixed();
795 	if (ret == T_EXIT_FAIL) {
796 		fprintf(stderr, "test_accept_fixed failed\n");
797 		return ret;
798 	}
799 
800 	ret = test_multishot_fixed_accept();
801 	if (ret == T_EXIT_FAIL) {
802 		fprintf(stderr, "test_multishot_fixed_accept failed\n");
803 		return ret;
804 	}
805 
806 	ret = test_accept_multishot_wrong_arg();
807 	if (ret == T_EXIT_FAIL) {
808 		fprintf(stderr, "test_accept_multishot_wrong_arg failed\n");
809 		return ret;
810 	}
811 
812 	ret = test_accept_sqpoll();
813 	if (ret == T_EXIT_FAIL) {
814 		fprintf(stderr, "test_accept_sqpoll failed\n");
815 		return ret;
816 	}
817 
818 	ret = test_accept_cancel(0, 1, false);
819 	if (ret == T_EXIT_FAIL) {
820 		fprintf(stderr, "test_accept_cancel nodelay failed\n");
821 		return ret;
822 	}
823 
824 	ret = test_accept_cancel(10000, 1, false);
825 	if (ret == T_EXIT_FAIL) {
826 		fprintf(stderr, "test_accept_cancel delay failed\n");
827 		return ret;
828 	}
829 
830 	ret = test_accept_cancel(0, 4, false);
831 	if (ret == T_EXIT_FAIL) {
832 		fprintf(stderr, "test_accept_cancel nodelay failed\n");
833 		return ret;
834 	}
835 
836 	ret = test_accept_cancel(10000, 4, false);
837 	if (ret == T_EXIT_FAIL) {
838 		fprintf(stderr, "test_accept_cancel delay failed\n");
839 		return ret;
840 	}
841 
842 	ret = test_accept_cancel(0, 1, true);
843 	if (ret == T_EXIT_FAIL) {
844 		fprintf(stderr, "test_accept_cancel multishot nodelay failed\n");
845 		return ret;
846 	}
847 
848 	ret = test_accept_cancel(10000, 1, true);
849 	if (ret == T_EXIT_FAIL) {
850 		fprintf(stderr, "test_accept_cancel multishot delay failed\n");
851 		return ret;
852 	}
853 
854 	ret = test_accept_cancel(0, 4, true);
855 	if (ret == T_EXIT_FAIL) {
856 		fprintf(stderr, "test_accept_cancel multishot nodelay failed\n");
857 		return ret;
858 	}
859 
860 	ret = test_accept_cancel(10000, 4, true);
861 	if (ret == T_EXIT_FAIL) {
862 		fprintf(stderr, "test_accept_cancel multishot delay failed\n");
863 		return ret;
864 	}
865 
866 	ret = test_multishot_accept(1, true, true);
867 	if (ret == T_EXIT_FAIL) {
868 		fprintf(stderr, "test_multishot_accept(1, false, true) failed\n");
869 		return ret;
870 	}
871 
872 	ret = test_multishot_accept(1, false, false);
873 	if (ret == T_EXIT_FAIL) {
874 		fprintf(stderr, "test_multishot_accept(1, false, false) failed\n");
875 		return ret;
876 	}
877 
878 	ret = test_multishot_accept(1, true, false);
879 	if (ret == T_EXIT_FAIL) {
880 		fprintf(stderr, "test_multishot_accept(1, true, false) failed\n");
881 		return ret;
882 	}
883 
884 	ret = test_accept_many((struct test_accept_many_args) {});
885 	if (ret == T_EXIT_FAIL) {
886 		fprintf(stderr, "test_accept_many failed\n");
887 		return ret;
888 	}
889 
890 	ret = test_accept_many((struct test_accept_many_args) {
891 				.usecs = 100000 });
892 	if (ret == T_EXIT_FAIL) {
893 		fprintf(stderr, "test_accept_many(sleep) failed\n");
894 		return ret;
895 	}
896 
897 	ret = test_accept_many((struct test_accept_many_args) {
898 				.nonblock = true });
899 	if (ret == T_EXIT_FAIL) {
900 		fprintf(stderr, "test_accept_many(nonblock) failed\n");
901 		return ret;
902 	}
903 
904 	ret = test_accept_many((struct test_accept_many_args) {
905 				.nonblock = true,
906 				.single_sock = true,
907 				.close_fds = true });
908 	if (ret == T_EXIT_FAIL) {
909 		fprintf(stderr, "test_accept_many(nonblock,close) failed\n");
910 		return ret;
911 	}
912 
913 	ret = test_accept_pending_on_exit();
914 	if (ret == T_EXIT_FAIL) {
915 		fprintf(stderr, "test_accept_pending_on_exit failed\n");
916 		return ret;
917 	}
918 	return T_EXIT_PASS;
919 }
920