• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Description: basic read/write tests for io_uring passthrough commands
4  */
5 #include <errno.h>
6 #include <stdio.h>
7 #include <unistd.h>
8 #include <stdlib.h>
9 #include <string.h>
10 
11 #include "helpers.h"
12 #include "liburing.h"
13 #include "../src/syscall.h"
14 #include "nvme.h"
15 
16 #define FILE_SIZE	(256 * 1024)
17 #define BS		8192
18 #define BUFFERS		(FILE_SIZE / BS)
19 
20 static struct iovec *vecs;
21 static int no_pt;
22 
23 /*
24  * Each offset in the file has the ((test_case / 2) * FILE_SIZE)
25  * + (offset / sizeof(int)) stored for every
26  * sizeof(int) address.
27  */
verify_buf(int tc,void * buf,off_t off)28 static int verify_buf(int tc, void *buf, off_t off)
29 {
30 	int i, u_in_buf = BS / sizeof(unsigned int);
31 	unsigned int *ptr;
32 
33 	off /= sizeof(unsigned int);
34 	off += (tc / 2) * FILE_SIZE;
35 	ptr = buf;
36 	for (i = 0; i < u_in_buf; i++) {
37 		if (off != *ptr) {
38 			fprintf(stderr, "Found %u, wanted %llu\n", *ptr,
39 					(unsigned long long) off);
40 			return 1;
41 		}
42 		ptr++;
43 		off++;
44 	}
45 
46 	return 0;
47 }
48 
fill_pattern(int tc)49 static int fill_pattern(int tc)
50 {
51 	unsigned int val, *ptr;
52 	int i, j;
53 	int u_in_buf = BS / sizeof(val);
54 
55 	val = (tc / 2) * FILE_SIZE;
56 	for (i = 0; i < BUFFERS; i++) {
57 		ptr = vecs[i].iov_base;
58 		for (j = 0; j < u_in_buf; j++) {
59 			*ptr = val;
60 			val++;
61 			ptr++;
62 		}
63 	}
64 
65 	return 0;
66 }
67 
__test_io(const char * file,struct io_uring * ring,int tc,int read,int sqthread,int fixed,int nonvec)68 static int __test_io(const char *file, struct io_uring *ring, int tc, int read,
69 		     int sqthread, int fixed, int nonvec)
70 {
71 	struct io_uring_sqe *sqe;
72 	struct io_uring_cqe *cqe;
73 	struct nvme_uring_cmd *cmd;
74 	int open_flags;
75 	int do_fixed;
76 	int i, ret, fd = -1;
77 	off_t offset;
78 	__u64 slba;
79 	__u32 nlb;
80 
81 	if (read)
82 		open_flags = O_RDONLY;
83 	else
84 		open_flags = O_WRONLY;
85 
86 	if (fixed) {
87 		ret = t_register_buffers(ring, vecs, BUFFERS);
88 		if (ret == T_SETUP_SKIP)
89 			return 0;
90 		if (ret != T_SETUP_OK) {
91 			fprintf(stderr, "buffer reg failed: %d\n", ret);
92 			goto err;
93 		}
94 	}
95 
96 	fd = open(file, open_flags);
97 	if (fd < 0) {
98 		if (errno == EACCES || errno == EPERM)
99 			return T_EXIT_SKIP;
100 		perror("file open");
101 		goto err;
102 	}
103 
104 	if (sqthread) {
105 		ret = io_uring_register_files(ring, &fd, 1);
106 		if (ret) {
107 			fprintf(stderr, "file reg failed: %d\n", ret);
108 			goto err;
109 		}
110 	}
111 
112 	if (!read)
113 		fill_pattern(tc);
114 
115 	offset = 0;
116 	for (i = 0; i < BUFFERS; i++) {
117 		sqe = io_uring_get_sqe(ring);
118 		if (!sqe) {
119 			fprintf(stderr, "sqe get failed\n");
120 			goto err;
121 		}
122 		if (read) {
123 			int use_fd = fd;
124 
125 			do_fixed = fixed;
126 
127 			if (sqthread)
128 				use_fd = 0;
129 			if (fixed && (i & 1))
130 				do_fixed = 0;
131 			if (do_fixed) {
132 				io_uring_prep_read_fixed(sqe, use_fd, vecs[i].iov_base,
133 								vecs[i].iov_len,
134 								offset, i);
135 				sqe->cmd_op = NVME_URING_CMD_IO;
136 			} else if (nonvec) {
137 				io_uring_prep_read(sqe, use_fd, vecs[i].iov_base,
138 							vecs[i].iov_len, offset);
139 				sqe->cmd_op = NVME_URING_CMD_IO;
140 			} else {
141 				io_uring_prep_readv(sqe, use_fd, &vecs[i], 1,
142 								offset);
143 				sqe->cmd_op = NVME_URING_CMD_IO_VEC;
144 			}
145 		} else {
146 			int use_fd = fd;
147 
148 			do_fixed = fixed;
149 
150 			if (sqthread)
151 				use_fd = 0;
152 			if (fixed && (i & 1))
153 				do_fixed = 0;
154 			if (do_fixed) {
155 				io_uring_prep_write_fixed(sqe, use_fd, vecs[i].iov_base,
156 								vecs[i].iov_len,
157 								offset, i);
158 				sqe->cmd_op = NVME_URING_CMD_IO;
159 			} else if (nonvec) {
160 				io_uring_prep_write(sqe, use_fd, vecs[i].iov_base,
161 							vecs[i].iov_len, offset);
162 				sqe->cmd_op = NVME_URING_CMD_IO;
163 			} else {
164 				io_uring_prep_writev(sqe, use_fd, &vecs[i], 1,
165 								offset);
166 				sqe->cmd_op = NVME_URING_CMD_IO_VEC;
167 			}
168 		}
169 		sqe->opcode = IORING_OP_URING_CMD;
170 		if (do_fixed)
171 			sqe->uring_cmd_flags |= IORING_URING_CMD_FIXED;
172 		sqe->user_data = ((uint64_t)offset << 32) | i;
173 		if (sqthread)
174 			sqe->flags |= IOSQE_FIXED_FILE;
175 
176 		cmd = (struct nvme_uring_cmd *)sqe->cmd;
177 		memset(cmd, 0, sizeof(struct nvme_uring_cmd));
178 
179 		cmd->opcode = read ? nvme_cmd_read : nvme_cmd_write;
180 
181 		slba = offset >> lba_shift;
182 		nlb = (BS >> lba_shift) - 1;
183 
184 		/* cdw10 and cdw11 represent starting lba */
185 		cmd->cdw10 = slba & 0xffffffff;
186 		cmd->cdw11 = slba >> 32;
187 		/* cdw12 represent number of lba's for read/write */
188 		cmd->cdw12 = nlb;
189 		if (do_fixed || nonvec) {
190 			cmd->addr = (__u64)(uintptr_t)vecs[i].iov_base;
191 			cmd->data_len = vecs[i].iov_len;
192 		} else {
193 			cmd->addr = (__u64)(uintptr_t)&vecs[i];
194 			cmd->data_len = 1;
195 		}
196 		cmd->nsid = nsid;
197 
198 		offset += BS;
199 	}
200 
201 	ret = io_uring_submit(ring);
202 	if (ret != BUFFERS) {
203 		fprintf(stderr, "submit got %d, wanted %d\n", ret, BUFFERS);
204 		goto err;
205 	}
206 
207 	for (i = 0; i < BUFFERS; i++) {
208 		ret = io_uring_wait_cqe(ring, &cqe);
209 		if (ret) {
210 			fprintf(stderr, "wait_cqe=%d\n", ret);
211 			goto err;
212 		}
213 		if (cqe->res != 0) {
214 			if (!no_pt) {
215 				no_pt = 1;
216 				goto skip;
217 			}
218 			fprintf(stderr, "cqe res %d, wanted 0\n", cqe->res);
219 			goto err;
220 		}
221 		io_uring_cqe_seen(ring, cqe);
222 		if (read) {
223 			int index = cqe->user_data & 0xffffffff;
224 			void *buf = vecs[index].iov_base;
225 			off_t voff = cqe->user_data >> 32;
226 
227 			if (verify_buf(tc, buf, voff))
228 				goto err;
229 		}
230 	}
231 
232 	if (fixed) {
233 		ret = io_uring_unregister_buffers(ring);
234 		if (ret) {
235 			fprintf(stderr, "buffer unreg failed: %d\n", ret);
236 			goto err;
237 		}
238 	}
239 	if (sqthread) {
240 		ret = io_uring_unregister_files(ring);
241 		if (ret) {
242 			fprintf(stderr, "file unreg failed: %d\n", ret);
243 			goto err;
244 		}
245 	}
246 
247 skip:
248 	close(fd);
249 	return 0;
250 err:
251 	if (fd != -1)
252 		close(fd);
253 	return 1;
254 }
255 
test_io(const char * file,int tc,int read,int sqthread,int fixed,int nonvec)256 static int test_io(const char *file, int tc, int read, int sqthread,
257 		   int fixed, int nonvec)
258 {
259 	struct io_uring ring;
260 	int ret, ring_flags = 0;
261 
262 	ring_flags |= IORING_SETUP_SQE128;
263 	ring_flags |= IORING_SETUP_CQE32;
264 
265 	if (sqthread)
266 		ring_flags |= IORING_SETUP_SQPOLL;
267 
268 	ret = t_create_ring(64, &ring, ring_flags);
269 	if (ret == T_SETUP_SKIP)
270 		return 0;
271 	if (ret != T_SETUP_OK) {
272 		if (ret == -EINVAL) {
273 			no_pt = 1;
274 			return T_SETUP_SKIP;
275 		}
276 		fprintf(stderr, "ring create failed: %d\n", ret);
277 		return 1;
278 	}
279 
280 	ret = __test_io(file, &ring, tc, read, sqthread, fixed, nonvec);
281 	io_uring_queue_exit(&ring);
282 
283 	return ret;
284 }
285 
286 /*
287  * Send a passthrough command that nvme will fail during submission.
288  * This comes handy for testing error handling.
289  */
test_invalid_passthru_submit(const char * file)290 static int test_invalid_passthru_submit(const char *file)
291 {
292 	struct io_uring ring;
293 	int fd, ret, ring_flags, open_flags;
294 	struct io_uring_cqe *cqe;
295 	struct io_uring_sqe *sqe;
296 	struct nvme_uring_cmd *cmd;
297 
298 	ring_flags = IORING_SETUP_CQE32 | IORING_SETUP_SQE128;
299 
300 	ret = t_create_ring(1, &ring, ring_flags);
301 	if (ret != T_SETUP_OK) {
302 		fprintf(stderr, "ring create failed: %d\n", ret);
303 		return 1;
304 	}
305 
306 	open_flags = O_RDONLY;
307 	fd = open(file, open_flags);
308 	if (fd < 0) {
309 		perror("file open");
310 		goto err;
311 	}
312 
313 	sqe = io_uring_get_sqe(&ring);
314 	io_uring_prep_read(sqe, fd, vecs[0].iov_base, vecs[0].iov_len, 0);
315 	sqe->cmd_op = NVME_URING_CMD_IO;
316 	sqe->opcode = IORING_OP_URING_CMD;
317 	sqe->user_data = 1;
318 	cmd = (struct nvme_uring_cmd *)sqe->cmd;
319 	memset(cmd, 0, sizeof(struct nvme_uring_cmd));
320 	cmd->opcode = nvme_cmd_read;
321 	cmd->addr = (__u64)(uintptr_t)&vecs[0].iov_base;
322 	cmd->data_len = vecs[0].iov_len;
323 	/* populate wrong nsid to force failure */
324 	cmd->nsid = nsid + 1;
325 
326 	ret = io_uring_submit(&ring);
327 	if (ret != 1) {
328 		fprintf(stderr, "submit got %d, wanted %d\n", ret, 1);
329 		goto err;
330 	}
331 	ret = io_uring_wait_cqe(&ring, &cqe);
332 	if (ret) {
333 		fprintf(stderr, "wait_cqe=%d\n", ret);
334 		goto err;
335 	}
336 	if (cqe->res == 0) {
337 		fprintf(stderr, "cqe res %d, wanted failure\n", cqe->res);
338 		goto err;
339 	}
340 	io_uring_cqe_seen(&ring, cqe);
341 	close(fd);
342 	io_uring_queue_exit(&ring);
343 	return 0;
344 err:
345 	if (fd != -1)
346 		close(fd);
347 	io_uring_queue_exit(&ring);
348 	return 1;
349 }
350 
351 /*
352  * if we are polling io_uring_submit needs to always enter the
353  * kernel to fetch events
354  */
test_io_uring_submit_enters(const char * file)355 static int test_io_uring_submit_enters(const char *file)
356 {
357 	struct io_uring ring;
358 	int fd, i, ret, ring_flags, open_flags;
359 	unsigned head;
360 	struct io_uring_cqe *cqe;
361 	struct nvme_uring_cmd *cmd;
362 	struct io_uring_sqe *sqe;
363 
364 	ring_flags = IORING_SETUP_IOPOLL;
365 	ring_flags |= IORING_SETUP_SQE128;
366 	ring_flags |= IORING_SETUP_CQE32;
367 
368 	ret = io_uring_queue_init(64, &ring, ring_flags);
369 	if (ret) {
370 		fprintf(stderr, "ring create failed: %d\n", ret);
371 		return 1;
372 	}
373 
374 	open_flags = O_WRONLY;
375 	fd = open(file, open_flags);
376 	if (fd < 0) {
377 		perror("file open");
378 		goto err;
379 	}
380 
381 	for (i = 0; i < BUFFERS; i++) {
382 		off_t offset = BS * (rand() % BUFFERS);
383 		__u64 slba;
384 		__u32 nlb;
385 
386 		sqe = io_uring_get_sqe(&ring);
387 		io_uring_prep_readv(sqe, fd, &vecs[i], 1, offset);
388 		sqe->user_data = i;
389 		sqe->opcode = IORING_OP_URING_CMD;
390 		sqe->cmd_op = NVME_URING_CMD_IO;
391 		cmd = (struct nvme_uring_cmd *)sqe->cmd;
392 		memset(cmd, 0, sizeof(struct nvme_uring_cmd));
393 
394 		slba = offset >> lba_shift;
395 		nlb = (BS >> lba_shift) - 1;
396 
397 		cmd->opcode = nvme_cmd_read;
398 		cmd->cdw10 = slba & 0xffffffff;
399 		cmd->cdw11 = slba >> 32;
400 		cmd->cdw12 = nlb;
401 		cmd->addr = (__u64)(uintptr_t)&vecs[i];
402 		cmd->data_len = 1;
403 		cmd->nsid = nsid;
404 	}
405 
406 	/* submit manually to avoid adding IORING_ENTER_GETEVENTS */
407 	ret = __sys_io_uring_enter(ring.ring_fd, __io_uring_flush_sq(&ring), 0,
408 						0, NULL);
409 	if (ret < 0)
410 		goto err;
411 
412 	for (i = 0; i < 500; i++) {
413 		ret = io_uring_submit(&ring);
414 		if (ret != 0) {
415 			fprintf(stderr, "still had %d sqes to submit\n", ret);
416 			goto err;
417 		}
418 
419 		io_uring_for_each_cqe(&ring, head, cqe) {
420 			if (cqe->res == -EOPNOTSUPP)
421 				fprintf(stdout, "Device doesn't support polled IO\n");
422 			goto ok;
423 		}
424 		usleep(10000);
425 	}
426 err:
427 	ret = 1;
428 	if (fd != -1)
429 		close(fd);
430 
431 ok:
432 	io_uring_queue_exit(&ring);
433 	return ret;
434 }
435 
main(int argc,char * argv[])436 int main(int argc, char *argv[])
437 {
438 	int i, ret;
439 	char *fname;
440 
441 	if (argc < 2)
442 		return T_EXIT_SKIP;
443 
444 	fname = argv[1];
445 	ret = nvme_get_info(fname);
446 
447 	if (ret)
448 		return T_EXIT_SKIP;
449 
450 	vecs = t_create_buffers(BUFFERS, BS);
451 
452 	for (i = 0; i < 16; i++) {
453 		int read = (i & 1) != 0;
454 		int sqthread = (i & 2) != 0;
455 		int fixed = (i & 4) != 0;
456 		int nonvec = (i & 8) != 0;
457 
458 		ret = test_io(fname, i, read, sqthread, fixed, nonvec);
459 		if (no_pt)
460 			break;
461 		if (ret) {
462 			fprintf(stderr, "test_io failed %d/%d/%d/%d\n",
463 				read, sqthread, fixed, nonvec);
464 			goto err;
465 		}
466 	}
467 
468 	if (no_pt)
469 		return T_EXIT_SKIP;
470 
471 	ret = test_io_uring_submit_enters(fname);
472 	if (ret) {
473 		fprintf(stderr, "test_io_uring_submit_enters failed\n");
474 		goto err;
475 	}
476 
477 	ret = test_invalid_passthru_submit(fname);
478 	if (ret) {
479 		fprintf(stderr, "test_invalid_passthru_submit failed\n");
480 		goto err;
481 	}
482 
483 	return T_EXIT_PASS;
484 err:
485 	return T_EXIT_FAIL;
486 }
487