• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Description: basic read/write tests for io_uring passthrough commands
4  */
5 #include <errno.h>
6 #include <stdio.h>
7 #include <unistd.h>
8 #include <stdlib.h>
9 #include <string.h>
10 
11 #include "helpers.h"
12 #include "liburing.h"
13 #include "../src/syscall.h"
14 #include "nvme.h"
15 
16 #define FILE_SIZE	(256 * 1024)
17 #define BS		8192
18 #define BUFFERS		(FILE_SIZE / BS)
19 
20 static struct iovec *vecs;
21 static int no_pt;
22 
23 /*
24  * Each offset in the file has the ((test_case / 2) * FILE_SIZE)
25  * + (offset / sizeof(int)) stored for every
26  * sizeof(int) address.
27  */
verify_buf(int tc,void * buf,off_t off)28 static int verify_buf(int tc, void *buf, off_t off)
29 {
30 	int i, u_in_buf = BS / sizeof(unsigned int);
31 	unsigned int *ptr;
32 
33 	off /= sizeof(unsigned int);
34 	off += (tc / 2) * FILE_SIZE;
35 	ptr = buf;
36 	for (i = 0; i < u_in_buf; i++) {
37 		if (off != *ptr) {
38 			fprintf(stderr, "Found %u, wanted %llu\n", *ptr,
39 					(unsigned long long) off);
40 			return 1;
41 		}
42 		ptr++;
43 		off++;
44 	}
45 
46 	return 0;
47 }
48 
fill_pattern(int tc)49 static int fill_pattern(int tc)
50 {
51 	unsigned int val, *ptr;
52 	int i, j;
53 	int u_in_buf = BS / sizeof(val);
54 
55 	val = (tc / 2) * FILE_SIZE;
56 	for (i = 0; i < BUFFERS; i++) {
57 		ptr = vecs[i].iov_base;
58 		for (j = 0; j < u_in_buf; j++) {
59 			*ptr = val;
60 			val++;
61 			ptr++;
62 		}
63 	}
64 
65 	return 0;
66 }
67 
__test_io(const char * file,struct io_uring * ring,int tc,int read,int sqthread,int fixed,int nonvec)68 static int __test_io(const char *file, struct io_uring *ring, int tc, int read,
69 		     int sqthread, int fixed, int nonvec)
70 {
71 	struct io_uring_sqe *sqe;
72 	struct io_uring_cqe *cqe;
73 	struct nvme_uring_cmd *cmd;
74 	int open_flags;
75 	int do_fixed;
76 	int i, ret, fd = -1;
77 	off_t offset;
78 	__u64 slba;
79 	__u32 nlb;
80 
81 	if (read)
82 		open_flags = O_RDONLY;
83 	else
84 		open_flags = O_WRONLY;
85 
86 	if (fixed) {
87 		ret = t_register_buffers(ring, vecs, BUFFERS);
88 		if (ret == T_SETUP_SKIP)
89 			return 0;
90 		if (ret != T_SETUP_OK) {
91 			fprintf(stderr, "buffer reg failed: %d\n", ret);
92 			goto err;
93 		}
94 	}
95 
96 	fd = open(file, open_flags);
97 	if (fd < 0) {
98 		perror("file open");
99 		goto err;
100 	}
101 
102 	if (sqthread) {
103 		ret = io_uring_register_files(ring, &fd, 1);
104 		if (ret) {
105 			fprintf(stderr, "file reg failed: %d\n", ret);
106 			goto err;
107 		}
108 	}
109 
110 	if (!read)
111 		fill_pattern(tc);
112 
113 	offset = 0;
114 	for (i = 0; i < BUFFERS; i++) {
115 		sqe = io_uring_get_sqe(ring);
116 		if (!sqe) {
117 			fprintf(stderr, "sqe get failed\n");
118 			goto err;
119 		}
120 		if (read) {
121 			int use_fd = fd;
122 
123 			do_fixed = fixed;
124 
125 			if (sqthread)
126 				use_fd = 0;
127 			if (fixed && (i & 1))
128 				do_fixed = 0;
129 			if (do_fixed) {
130 				io_uring_prep_read_fixed(sqe, use_fd, vecs[i].iov_base,
131 								vecs[i].iov_len,
132 								offset, i);
133 				sqe->cmd_op = NVME_URING_CMD_IO;
134 			} else if (nonvec) {
135 				io_uring_prep_read(sqe, use_fd, vecs[i].iov_base,
136 							vecs[i].iov_len, offset);
137 				sqe->cmd_op = NVME_URING_CMD_IO;
138 			} else {
139 				io_uring_prep_readv(sqe, use_fd, &vecs[i], 1,
140 								offset);
141 				sqe->cmd_op = NVME_URING_CMD_IO_VEC;
142 			}
143 		} else {
144 			int use_fd = fd;
145 
146 			do_fixed = fixed;
147 
148 			if (sqthread)
149 				use_fd = 0;
150 			if (fixed && (i & 1))
151 				do_fixed = 0;
152 			if (do_fixed) {
153 				io_uring_prep_write_fixed(sqe, use_fd, vecs[i].iov_base,
154 								vecs[i].iov_len,
155 								offset, i);
156 				sqe->cmd_op = NVME_URING_CMD_IO;
157 			} else if (nonvec) {
158 				io_uring_prep_write(sqe, use_fd, vecs[i].iov_base,
159 							vecs[i].iov_len, offset);
160 				sqe->cmd_op = NVME_URING_CMD_IO;
161 			} else {
162 				io_uring_prep_writev(sqe, use_fd, &vecs[i], 1,
163 								offset);
164 				sqe->cmd_op = NVME_URING_CMD_IO_VEC;
165 			}
166 		}
167 		sqe->opcode = IORING_OP_URING_CMD;
168 		sqe->user_data = ((uint64_t)offset << 32) | i;
169 		if (sqthread)
170 			sqe->flags |= IOSQE_FIXED_FILE;
171 
172 		cmd = (struct nvme_uring_cmd *)sqe->cmd;
173 		memset(cmd, 0, sizeof(struct nvme_uring_cmd));
174 
175 		cmd->opcode = read ? nvme_cmd_read : nvme_cmd_write;
176 
177 		slba = offset >> lba_shift;
178 		nlb = (BS >> lba_shift) - 1;
179 
180 		/* cdw10 and cdw11 represent starting lba */
181 		cmd->cdw10 = slba & 0xffffffff;
182 		cmd->cdw11 = slba >> 32;
183 		/* cdw12 represent number of lba's for read/write */
184 		cmd->cdw12 = nlb;
185 		if (do_fixed || nonvec) {
186 			cmd->addr = (__u64)(uintptr_t)vecs[i].iov_base;
187 			cmd->data_len = vecs[i].iov_len;
188 		} else {
189 			cmd->addr = (__u64)(uintptr_t)&vecs[i];
190 			cmd->data_len = 1;
191 		}
192 		cmd->nsid = nsid;
193 
194 		offset += BS;
195 	}
196 
197 	ret = io_uring_submit(ring);
198 	if (ret != BUFFERS) {
199 		fprintf(stderr, "submit got %d, wanted %d\n", ret, BUFFERS);
200 		goto err;
201 	}
202 
203 	for (i = 0; i < BUFFERS; i++) {
204 		ret = io_uring_wait_cqe(ring, &cqe);
205 		if (ret) {
206 			fprintf(stderr, "wait_cqe=%d\n", ret);
207 			goto err;
208 		}
209 		if (cqe->res != 0) {
210 			if (!no_pt) {
211 				no_pt = 1;
212 				goto skip;
213 			}
214 			fprintf(stderr, "cqe res %d, wanted 0\n", cqe->res);
215 			goto err;
216 		}
217 		io_uring_cqe_seen(ring, cqe);
218 		if (read) {
219 			int index = cqe->user_data & 0xffffffff;
220 			void *buf = vecs[index].iov_base;
221 			off_t voff = cqe->user_data >> 32;
222 
223 			if (verify_buf(tc, buf, voff))
224 				goto err;
225 		}
226 	}
227 
228 	if (fixed) {
229 		ret = io_uring_unregister_buffers(ring);
230 		if (ret) {
231 			fprintf(stderr, "buffer unreg failed: %d\n", ret);
232 			goto err;
233 		}
234 	}
235 	if (sqthread) {
236 		ret = io_uring_unregister_files(ring);
237 		if (ret) {
238 			fprintf(stderr, "file unreg failed: %d\n", ret);
239 			goto err;
240 		}
241 	}
242 
243 skip:
244 	close(fd);
245 	return 0;
246 err:
247 	if (fd != -1)
248 		close(fd);
249 	return 1;
250 }
251 
test_io(const char * file,int tc,int read,int sqthread,int fixed,int nonvec)252 static int test_io(const char *file, int tc, int read, int sqthread,
253 		   int fixed, int nonvec)
254 {
255 	struct io_uring ring;
256 	int ret, ring_flags = 0;
257 
258 	ring_flags |= IORING_SETUP_SQE128;
259 	ring_flags |= IORING_SETUP_CQE32;
260 
261 	if (sqthread)
262 		ring_flags |= IORING_SETUP_SQPOLL;
263 
264 	ret = t_create_ring(64, &ring, ring_flags);
265 	if (ret == T_SETUP_SKIP)
266 		return 0;
267 	if (ret != T_SETUP_OK) {
268 		if (ret == -EINVAL) {
269 			no_pt = 1;
270 			return T_SETUP_SKIP;
271 		}
272 		fprintf(stderr, "ring create failed: %d\n", ret);
273 		return 1;
274 	}
275 
276 	ret = __test_io(file, &ring, tc, read, sqthread, fixed, nonvec);
277 	io_uring_queue_exit(&ring);
278 
279 	return ret;
280 }
281 
282 /*
283  * Send a passthrough command that nvme will fail during submission.
284  * This comes handy for testing error handling.
285  */
test_invalid_passthru_submit(const char * file)286 static int test_invalid_passthru_submit(const char *file)
287 {
288 	struct io_uring ring;
289 	int fd, ret, ring_flags, open_flags;
290 	struct io_uring_cqe *cqe;
291 	struct io_uring_sqe *sqe;
292 	struct nvme_uring_cmd *cmd;
293 
294 	ring_flags = IORING_SETUP_CQE32 | IORING_SETUP_SQE128;
295 
296 	ret = t_create_ring(1, &ring, ring_flags);
297 	if (ret != T_SETUP_OK) {
298 		fprintf(stderr, "ring create failed: %d\n", ret);
299 		return 1;
300 	}
301 
302 	open_flags = O_RDONLY;
303 	fd = open(file, open_flags);
304 	if (fd < 0) {
305 		perror("file open");
306 		goto err;
307 	}
308 
309 	sqe = io_uring_get_sqe(&ring);
310 	io_uring_prep_read(sqe, fd, vecs[0].iov_base, vecs[0].iov_len, 0);
311 	sqe->cmd_op = NVME_URING_CMD_IO;
312 	sqe->opcode = IORING_OP_URING_CMD;
313 	sqe->user_data = 1;
314 	cmd = (struct nvme_uring_cmd *)sqe->cmd;
315 	memset(cmd, 0, sizeof(struct nvme_uring_cmd));
316 	cmd->opcode = nvme_cmd_read;
317 	cmd->addr = (__u64)(uintptr_t)&vecs[0].iov_base;
318 	cmd->data_len = vecs[0].iov_len;
319 	/* populate wrong nsid to force failure */
320 	cmd->nsid = nsid + 1;
321 
322 	ret = io_uring_submit(&ring);
323 	if (ret != 1) {
324 		fprintf(stderr, "submit got %d, wanted %d\n", ret, 1);
325 		goto err;
326 	}
327 	ret = io_uring_wait_cqe(&ring, &cqe);
328 	if (ret) {
329 		fprintf(stderr, "wait_cqe=%d\n", ret);
330 		goto err;
331 	}
332 	if (cqe->res == 0) {
333 		fprintf(stderr, "cqe res %d, wanted failure\n", cqe->res);
334 		goto err;
335 	}
336 	io_uring_cqe_seen(&ring, cqe);
337 	close(fd);
338 	io_uring_queue_exit(&ring);
339 	return 0;
340 err:
341 	if (fd != -1)
342 		close(fd);
343 	io_uring_queue_exit(&ring);
344 	return 1;
345 }
346 
347 /*
348  * if we are polling io_uring_submit needs to always enter the
349  * kernel to fetch events
350  */
test_io_uring_submit_enters(const char * file)351 static int test_io_uring_submit_enters(const char *file)
352 {
353 	struct io_uring ring;
354 	int fd, i, ret, ring_flags, open_flags;
355 	unsigned head;
356 	struct io_uring_cqe *cqe;
357 	struct nvme_uring_cmd *cmd;
358 	struct io_uring_sqe *sqe;
359 
360 	ring_flags = IORING_SETUP_IOPOLL;
361 	ring_flags |= IORING_SETUP_SQE128;
362 	ring_flags |= IORING_SETUP_CQE32;
363 
364 	ret = io_uring_queue_init(64, &ring, ring_flags);
365 	if (ret) {
366 		fprintf(stderr, "ring create failed: %d\n", ret);
367 		return 1;
368 	}
369 
370 	open_flags = O_WRONLY;
371 	fd = open(file, open_flags);
372 	if (fd < 0) {
373 		perror("file open");
374 		goto err;
375 	}
376 
377 	for (i = 0; i < BUFFERS; i++) {
378 		off_t offset = BS * (rand() % BUFFERS);
379 		__u64 slba;
380 		__u32 nlb;
381 
382 		sqe = io_uring_get_sqe(&ring);
383 		io_uring_prep_readv(sqe, fd, &vecs[i], 1, offset);
384 		sqe->user_data = i;
385 		sqe->opcode = IORING_OP_URING_CMD;
386 		sqe->cmd_op = NVME_URING_CMD_IO;
387 		cmd = (struct nvme_uring_cmd *)sqe->cmd;
388 		memset(cmd, 0, sizeof(struct nvme_uring_cmd));
389 
390 		slba = offset >> lba_shift;
391 		nlb = (BS >> lba_shift) - 1;
392 
393 		cmd->opcode = nvme_cmd_read;
394 		cmd->cdw10 = slba & 0xffffffff;
395 		cmd->cdw11 = slba >> 32;
396 		cmd->cdw12 = nlb;
397 		cmd->addr = (__u64)(uintptr_t)&vecs[i];
398 		cmd->data_len = 1;
399 		cmd->nsid = nsid;
400 	}
401 
402 	/* submit manually to avoid adding IORING_ENTER_GETEVENTS */
403 	ret = __sys_io_uring_enter(ring.ring_fd, __io_uring_flush_sq(&ring), 0,
404 						0, NULL);
405 	if (ret < 0)
406 		goto err;
407 
408 	for (i = 0; i < 500; i++) {
409 		ret = io_uring_submit(&ring);
410 		if (ret != 0) {
411 			fprintf(stderr, "still had %d sqes to submit\n", ret);
412 			goto err;
413 		}
414 
415 		io_uring_for_each_cqe(&ring, head, cqe) {
416 			if (cqe->res == -EOPNOTSUPP)
417 				fprintf(stdout, "Device doesn't support polled IO\n");
418 			goto ok;
419 		}
420 		usleep(10000);
421 	}
422 err:
423 	ret = 1;
424 	if (fd != -1)
425 		close(fd);
426 
427 ok:
428 	io_uring_queue_exit(&ring);
429 	return ret;
430 }
431 
main(int argc,char * argv[])432 int main(int argc, char *argv[])
433 {
434 	int i, ret;
435 	char *fname;
436 
437 	if (argc < 2)
438 		return T_EXIT_SKIP;
439 
440 	fname = argv[1];
441 	ret = nvme_get_info(fname);
442 
443 	if (ret)
444 		return T_EXIT_SKIP;
445 
446 	vecs = t_create_buffers(BUFFERS, BS);
447 
448 	for (i = 0; i < 16; i++) {
449 		int read = (i & 1) != 0;
450 		int sqthread = (i & 2) != 0;
451 		int fixed = (i & 4) != 0;
452 		int nonvec = (i & 8) != 0;
453 
454 		ret = test_io(fname, i, read, sqthread, fixed, nonvec);
455 		if (no_pt)
456 			break;
457 		if (ret) {
458 			fprintf(stderr, "test_io failed %d/%d/%d/%d\n",
459 				read, sqthread, fixed, nonvec);
460 			goto err;
461 		}
462 	}
463 
464 	if (no_pt)
465 		return T_EXIT_SKIP;
466 
467 	ret = test_io_uring_submit_enters(fname);
468 	if (ret) {
469 		fprintf(stderr, "test_io_uring_submit_enters failed\n");
470 		goto err;
471 	}
472 
473 	ret = test_invalid_passthru_submit(fname);
474 	if (ret) {
475 		fprintf(stderr, "test_invalid_passthru_submit failed\n");
476 		goto err;
477 	}
478 
479 	return T_EXIT_PASS;
480 err:
481 	return T_EXIT_FAIL;
482 }
483