• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: MIT
2 #include <errno.h>
3 #include <stdio.h>
4 #include <unistd.h>
5 #include <stdlib.h>
6 #include <string.h>
7 #include <sys/eventfd.h>
8 #include <signal.h>
9 #include <poll.h>
10 #include <assert.h>
11 #include <pthread.h>
12 #include <sys/types.h>
13 #include <sys/wait.h>
14 
15 #include "liburing.h"
16 #include "test.h"
17 #include "helpers.h"
18 
19 #define EXEC_FILENAME ".defer-taskrun"
20 #define EXEC_FILESIZE (1U<<20)
21 
can_read_t(int fd,int time)22 static bool can_read_t(int fd, int time)
23 {
24 	int ret;
25 	struct pollfd p = {
26 		.fd = fd,
27 		.events = POLLIN,
28 	};
29 
30 	ret = poll(&p, 1, time);
31 
32 	return ret == 1;
33 }
34 
can_read(int fd)35 static bool can_read(int fd)
36 {
37 	return can_read_t(fd, 0);
38 }
39 
eventfd_clear(int fd)40 static void eventfd_clear(int fd)
41 {
42 	uint64_t val;
43 	int ret;
44 
45 	assert(can_read(fd));
46 	ret = read(fd, &val, 8);
47 	assert(ret == 8);
48 }
49 
eventfd_trigger(int fd)50 static void eventfd_trigger(int fd)
51 {
52 	uint64_t val = 1;
53 	int ret;
54 
55 	ret = write(fd, &val, sizeof(val));
56 	assert(ret == sizeof(val));
57 }
58 
59 #define CHECK(x)								\
60 do {										\
61 	if (!(x)) {								\
62 		fprintf(stderr, "%s:%d %s failed\n", __FILE__, __LINE__, #x);	\
63 		return -1;							\
64 	}									\
65 } while (0)
66 
67 
test_eventfd(void)68 static int test_eventfd(void)
69 {
70 	struct io_uring ring;
71 	int ret;
72 	int fda, fdb;
73 	struct io_uring_cqe *cqe;
74 
75 	ret = io_uring_queue_init(8, &ring, IORING_SETUP_SINGLE_ISSUER |
76 					    IORING_SETUP_DEFER_TASKRUN);
77 	if (ret)
78 		return ret;
79 
80 	fda = eventfd(0, EFD_NONBLOCK);
81 	fdb = eventfd(0, EFD_NONBLOCK);
82 
83 	CHECK(fda >= 0 && fdb >= 0);
84 
85 	ret = io_uring_register_eventfd(&ring, fda);
86 	if (ret)
87 		return ret;
88 
89 	CHECK(!can_read(fda));
90 	CHECK(!can_read(fdb));
91 
92 	io_uring_prep_poll_add(io_uring_get_sqe(&ring), fdb, POLLIN);
93 	io_uring_submit(&ring);
94 	CHECK(!can_read(fda)); /* poll should not have completed */
95 
96 	io_uring_prep_nop(io_uring_get_sqe(&ring));
97 	io_uring_submit(&ring);
98 	CHECK(can_read(fda)); /* nop should have */
99 
100 	CHECK(io_uring_peek_cqe(&ring, &cqe) == 0);
101 	CHECK(cqe->res == 0);
102 	io_uring_cqe_seen(&ring, cqe);
103 	eventfd_clear(fda);
104 
105 	eventfd_trigger(fdb);
106 	/* can take time due to rcu_call */
107 	CHECK(can_read_t(fda, 1000));
108 
109 	/* should not have processed the cqe yet */
110 	CHECK(io_uring_cq_ready(&ring) == 0);
111 
112 	io_uring_get_events(&ring);
113 	CHECK(io_uring_cq_ready(&ring) == 1);
114 
115 
116 	io_uring_queue_exit(&ring);
117 	return 0;
118 }
119 
120 struct thread_data {
121 	struct io_uring ring;
122 	int efd;
123 	char buff[8];
124 };
125 
thread(void * t)126 static void *thread(void *t)
127 {
128 	struct thread_data *td = t;
129 
130 	io_uring_enable_rings(&td->ring);
131 	io_uring_prep_read(io_uring_get_sqe(&td->ring), td->efd, td->buff, sizeof(td->buff), 0);
132 	io_uring_submit(&td->ring);
133 
134 	return NULL;
135 }
136 
test_thread_shutdown(void)137 static int test_thread_shutdown(void)
138 {
139 	pthread_t t1;
140 	int ret;
141 	struct thread_data td;
142 	struct io_uring_cqe *cqe;
143 	uint64_t val = 1;
144 
145 	ret = io_uring_queue_init(8, &td.ring, IORING_SETUP_SINGLE_ISSUER |
146 					       IORING_SETUP_DEFER_TASKRUN |
147 					       IORING_SETUP_R_DISABLED);
148 	if (ret)
149 		return ret;
150 
151 	CHECK(io_uring_get_events(&td.ring) == -EBADFD);
152 
153 	td.efd = eventfd(0, 0);
154 	CHECK(td.efd >= 0);
155 
156 	CHECK(pthread_create(&t1, NULL, thread, &td) == 0);
157 	CHECK(pthread_join(t1, NULL) == 0);
158 
159 	CHECK(io_uring_get_events(&td.ring) == -EEXIST);
160 
161 	CHECK(write(td.efd, &val, sizeof(val)) == sizeof(val));
162 	CHECK(io_uring_wait_cqe(&td.ring, &cqe) == -EEXIST);
163 
164 	close(td.efd);
165 	io_uring_queue_exit(&td.ring);
166 	return 0;
167 }
168 
test_exec(const char * filename)169 static int test_exec(const char *filename)
170 {
171 	int ret;
172 	int fd;
173 	struct io_uring ring;
174 	pid_t fork_pid;
175 	static char * const new_argv[] = {"1", "2", "3", NULL};
176 	static char * const new_env[] = {NULL};
177 	char *buff;
178 
179 	fork_pid = fork();
180 	CHECK(fork_pid >= 0);
181 	if (fork_pid > 0) {
182 		int wstatus;
183 
184 		CHECK(waitpid(fork_pid, &wstatus, 0) != (pid_t)-1);
185 		if (!WIFEXITED(wstatus) || WEXITSTATUS(wstatus) == T_EXIT_FAIL) {
186 			fprintf(stderr, "child failed %i\n", WEXITSTATUS(wstatus));
187 			return -1;
188 		}
189 		return T_EXIT_PASS;
190 	}
191 
192 	ret = io_uring_queue_init(8, &ring, IORING_SETUP_SINGLE_ISSUER |
193 					    IORING_SETUP_DEFER_TASKRUN);
194 	if (ret)
195 		return ret;
196 
197 	if (filename) {
198 		fd = open(filename, O_RDONLY | O_DIRECT);
199 		if (fd < 0 && (errno == EINVAL || errno == EPERM || errno == EACCES))
200 			return T_EXIT_SKIP;
201 	} else {
202 		t_create_file(EXEC_FILENAME, EXEC_FILESIZE);
203 		fd = open(EXEC_FILENAME, O_RDONLY | O_DIRECT);
204 		if (fd < 0 && (errno == EINVAL || errno == EPERM || errno == EACCES)) {
205 			unlink(EXEC_FILENAME);
206 			return T_EXIT_SKIP;
207 		}
208 		unlink(EXEC_FILENAME);
209 	}
210 	buff = (char*)malloc(EXEC_FILESIZE);
211 	CHECK(posix_memalign((void **)&buff, 4096, EXEC_FILESIZE) == 0);
212 	CHECK(buff);
213 
214 	CHECK(fd >= 0);
215 	io_uring_prep_read(io_uring_get_sqe(&ring), fd, buff, EXEC_FILESIZE, 0);
216 	io_uring_submit(&ring);
217 	ret = execve("/proc/self/exe", new_argv, new_env);
218 	/* if we get here it failed anyway */
219 	fprintf(stderr, "execve failed %d\n", ret);
220 	return T_EXIT_FAIL;
221 }
222 
test_flag(void)223 static int test_flag(void)
224 {
225 	struct io_uring ring;
226 	int ret;
227 	int fd;
228 	struct io_uring_cqe *cqe;
229 
230 	ret = io_uring_queue_init(8, &ring, IORING_SETUP_SINGLE_ISSUER |
231 					    IORING_SETUP_DEFER_TASKRUN |
232 					    IORING_SETUP_TASKRUN_FLAG);
233 	CHECK(!ret);
234 
235 	fd = eventfd(0, EFD_NONBLOCK);
236 	CHECK(fd >= 0);
237 
238 	io_uring_prep_poll_add(io_uring_get_sqe(&ring), fd, POLLIN);
239 	io_uring_submit(&ring);
240 	CHECK(!can_read(fd)); /* poll should not have completed */
241 
242 	eventfd_trigger(fd);
243 	CHECK(can_read(fd));
244 
245 	/* should not have processed the poll cqe yet */
246 	CHECK(io_uring_cq_ready(&ring) == 0);
247 
248 	/* flag should be set */
249 	CHECK(IO_URING_READ_ONCE(*ring.sq.kflags) & IORING_SQ_TASKRUN);
250 
251 	/* Specifically peek, knowing we have only no cqe
252 	 * but because the flag is set, liburing should try and get more
253 	 */
254 	ret = io_uring_peek_cqe(&ring, &cqe);
255 
256 	CHECK(ret == 0 && cqe);
257 	CHECK(!(IO_URING_READ_ONCE(*ring.sq.kflags) & IORING_SQ_TASKRUN));
258 
259 	close(fd);
260 	io_uring_queue_exit(&ring);
261 	return 0;
262 }
263 
test_ring_shutdown(void)264 static int test_ring_shutdown(void)
265 {
266 	struct io_uring ring;
267 	int ret;
268 	int fd[2];
269 	char buff = '\0';
270 	char send = 'X';
271 
272 	ret = io_uring_queue_init(8, &ring, IORING_SETUP_SINGLE_ISSUER |
273 					    IORING_SETUP_DEFER_TASKRUN |
274 					    IORING_SETUP_TASKRUN_FLAG);
275 	CHECK(!ret);
276 
277 	ret = t_create_socket_pair(fd, true);
278 	CHECK(!ret);
279 
280 	io_uring_prep_recv(io_uring_get_sqe(&ring), fd[0], &buff, 1, 0);
281 	io_uring_submit(&ring);
282 
283 	ret = write(fd[1], &send, 1);
284 	CHECK(ret == 1);
285 
286 	/* should not have processed the poll cqe yet */
287 	CHECK(io_uring_cq_ready(&ring) == 0);
288 	io_uring_queue_exit(&ring);
289 
290 	/* task work should have been processed by now */
291 	CHECK(buff = 'X');
292 
293 	return 0;
294 }
295 
test_drain(void)296 static int test_drain(void)
297 {
298 	struct io_uring ring;
299 	int ret, i, fd[2];
300 	struct io_uring_sqe *sqe;
301 	struct io_uring_cqe *cqe;
302 	struct iovec iovecs[128];
303 	char buff[ARRAY_SIZE(iovecs)];
304 
305 	ret = io_uring_queue_init(8, &ring, IORING_SETUP_SINGLE_ISSUER |
306 					    IORING_SETUP_DEFER_TASKRUN |
307 					    IORING_SETUP_TASKRUN_FLAG);
308 	CHECK(!ret);
309 
310 	for (i = 0; i < ARRAY_SIZE(iovecs); i++) {
311 		iovecs[i].iov_base = &buff[i];
312 		iovecs[i].iov_len = 1;
313 	}
314 
315 	ret = t_create_socket_pair(fd, true);
316 	CHECK(!ret);
317 
318 	sqe = io_uring_get_sqe(&ring);
319 	io_uring_prep_writev(sqe, fd[1], &iovecs[0], ARRAY_SIZE(iovecs), 0);
320 	sqe->flags |= IOSQE_IO_DRAIN;
321 	io_uring_submit(&ring);
322 
323 	for (i = 0; i < ARRAY_SIZE(iovecs); i++)
324 		iovecs[i].iov_base = NULL;
325 
326 	CHECK(io_uring_wait_cqe(&ring, &cqe) == 0);
327 	CHECK(cqe->res == 128);
328 
329 	close(fd[0]);
330 	close(fd[1]);
331 	io_uring_queue_exit(&ring);
332 	return 0;
333 }
334 
main(int argc,char * argv[])335 int main(int argc, char *argv[])
336 {
337 	int ret;
338 	const char *filename = NULL;
339 
340 	if (argc > 2)
341 		return T_EXIT_SKIP;
342 	if (argc == 2) {
343 		/* This test exposes interesting behaviour with a null-blk
344 		 * device configured like:
345 		 * $ modprobe null-blk completion_nsec=100000000 irqmode=2
346 		 * and then run with $ defer-taskrun.t /dev/nullb0
347 		 */
348 		filename = argv[1];
349 	}
350 
351 	if (!t_probe_defer_taskrun())
352 		return T_EXIT_SKIP;
353 
354 	ret = test_thread_shutdown();
355 	if (ret) {
356 		fprintf(stderr, "test_thread_shutdown failed\n");
357 		return T_EXIT_FAIL;
358 	}
359 
360 	ret = test_exec(filename);
361 	if (ret == T_EXIT_FAIL) {
362 		fprintf(stderr, "test_exec failed\n");
363 		return T_EXIT_FAIL;
364 	}
365 
366 	ret = test_eventfd();
367 	if (ret) {
368 		fprintf(stderr, "eventfd failed\n");
369 		return T_EXIT_FAIL;
370 	}
371 
372 	ret = test_flag();
373 	if (ret) {
374 		fprintf(stderr, "flag failed\n");
375 		return T_EXIT_FAIL;
376 	}
377 
378 	ret = test_ring_shutdown();
379 	if (ret) {
380 		fprintf(stderr, "test_ring_shutdown failed\n");
381 		return T_EXIT_FAIL;
382 	}
383 
384 	ret = test_drain();
385 	if (ret) {
386 		fprintf(stderr, "test_drain failed\n");
387 		return T_EXIT_FAIL;
388 	}
389 
390 	return T_EXIT_PASS;
391 }
392