• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef LIB_URING_H
2 #define LIB_URING_H
3 
4 #ifdef __cplusplus
5 extern "C" {
6 #endif
7 
8 #include <sys/uio.h>
9 #include <signal.h>
10 #include <string.h>
11 #include "../../include/uapi/linux/io_uring.h"
12 #include <inttypes.h>
13 #include <linux/swab.h>
14 #include "barrier.h"
15 
16 /*
17  * Library interface to io_uring
18  */
19 struct io_uring_sq {
20 	unsigned *khead;
21 	unsigned *ktail;
22 	unsigned *kring_mask;
23 	unsigned *kring_entries;
24 	unsigned *kflags;
25 	unsigned *kdropped;
26 	unsigned *array;
27 	struct io_uring_sqe *sqes;
28 
29 	unsigned sqe_head;
30 	unsigned sqe_tail;
31 
32 	size_t ring_sz;
33 };
34 
35 struct io_uring_cq {
36 	unsigned *khead;
37 	unsigned *ktail;
38 	unsigned *kring_mask;
39 	unsigned *kring_entries;
40 	unsigned *koverflow;
41 	struct io_uring_cqe *cqes;
42 
43 	size_t ring_sz;
44 };
45 
46 struct io_uring {
47 	struct io_uring_sq sq;
48 	struct io_uring_cq cq;
49 	int ring_fd;
50 };
51 
52 /*
53  * System calls
54  */
55 extern int io_uring_setup(unsigned entries, struct io_uring_params *p);
56 extern int io_uring_enter(int fd, unsigned to_submit,
57 	unsigned min_complete, unsigned flags, sigset_t *sig);
58 extern int io_uring_register(int fd, unsigned int opcode, void *arg,
59 	unsigned int nr_args);
60 
61 /*
62  * Library interface
63  */
64 extern int io_uring_queue_init(unsigned entries, struct io_uring *ring,
65 	unsigned flags);
66 extern int io_uring_queue_mmap(int fd, struct io_uring_params *p,
67 	struct io_uring *ring);
68 extern void io_uring_queue_exit(struct io_uring *ring);
69 extern int io_uring_peek_cqe(struct io_uring *ring,
70 	struct io_uring_cqe **cqe_ptr);
71 extern int io_uring_wait_cqe(struct io_uring *ring,
72 	struct io_uring_cqe **cqe_ptr);
73 extern int io_uring_submit(struct io_uring *ring);
74 extern struct io_uring_sqe *io_uring_get_sqe(struct io_uring *ring);
75 
76 /*
77  * Must be called after io_uring_{peek,wait}_cqe() after the cqe has
78  * been processed by the application.
79  */
io_uring_cqe_seen(struct io_uring * ring,struct io_uring_cqe * cqe)80 static inline void io_uring_cqe_seen(struct io_uring *ring,
81 				     struct io_uring_cqe *cqe)
82 {
83 	if (cqe) {
84 		struct io_uring_cq *cq = &ring->cq;
85 
86 		(*cq->khead)++;
87 		/*
88 		 * Ensure that the kernel sees our new head, the kernel has
89 		 * the matching read barrier.
90 		 */
91 		write_barrier();
92 	}
93 }
94 
95 /*
96  * Command prep helpers
97  */
io_uring_sqe_set_data(struct io_uring_sqe * sqe,void * data)98 static inline void io_uring_sqe_set_data(struct io_uring_sqe *sqe, void *data)
99 {
100 	sqe->user_data = (unsigned long) data;
101 }
102 
io_uring_cqe_get_data(struct io_uring_cqe * cqe)103 static inline void *io_uring_cqe_get_data(struct io_uring_cqe *cqe)
104 {
105 	return (void *) (uintptr_t) cqe->user_data;
106 }
107 
io_uring_prep_rw(int op,struct io_uring_sqe * sqe,int fd,const void * addr,unsigned len,off_t offset)108 static inline void io_uring_prep_rw(int op, struct io_uring_sqe *sqe, int fd,
109 				    const void *addr, unsigned len,
110 				    off_t offset)
111 {
112 	memset(sqe, 0, sizeof(*sqe));
113 	sqe->opcode = op;
114 	sqe->fd = fd;
115 	sqe->off = offset;
116 	sqe->addr = (unsigned long) addr;
117 	sqe->len = len;
118 }
119 
io_uring_prep_readv(struct io_uring_sqe * sqe,int fd,const struct iovec * iovecs,unsigned nr_vecs,off_t offset)120 static inline void io_uring_prep_readv(struct io_uring_sqe *sqe, int fd,
121 				       const struct iovec *iovecs,
122 				       unsigned nr_vecs, off_t offset)
123 {
124 	io_uring_prep_rw(IORING_OP_READV, sqe, fd, iovecs, nr_vecs, offset);
125 }
126 
io_uring_prep_read_fixed(struct io_uring_sqe * sqe,int fd,void * buf,unsigned nbytes,off_t offset)127 static inline void io_uring_prep_read_fixed(struct io_uring_sqe *sqe, int fd,
128 					    void *buf, unsigned nbytes,
129 					    off_t offset)
130 {
131 	io_uring_prep_rw(IORING_OP_READ_FIXED, sqe, fd, buf, nbytes, offset);
132 }
133 
io_uring_prep_writev(struct io_uring_sqe * sqe,int fd,const struct iovec * iovecs,unsigned nr_vecs,off_t offset)134 static inline void io_uring_prep_writev(struct io_uring_sqe *sqe, int fd,
135 					const struct iovec *iovecs,
136 					unsigned nr_vecs, off_t offset)
137 {
138 	io_uring_prep_rw(IORING_OP_WRITEV, sqe, fd, iovecs, nr_vecs, offset);
139 }
140 
io_uring_prep_write_fixed(struct io_uring_sqe * sqe,int fd,const void * buf,unsigned nbytes,off_t offset)141 static inline void io_uring_prep_write_fixed(struct io_uring_sqe *sqe, int fd,
142 					     const void *buf, unsigned nbytes,
143 					     off_t offset)
144 {
145 	io_uring_prep_rw(IORING_OP_WRITE_FIXED, sqe, fd, buf, nbytes, offset);
146 }
147 
io_uring_prep_poll_add(struct io_uring_sqe * sqe,int fd,unsigned poll_mask)148 static inline void io_uring_prep_poll_add(struct io_uring_sqe *sqe, int fd,
149 					  unsigned poll_mask)
150 {
151 	memset(sqe, 0, sizeof(*sqe));
152 	sqe->opcode = IORING_OP_POLL_ADD;
153 	sqe->fd = fd;
154 #if __BYTE_ORDER == __BIG_ENDIAN
155 	poll_mask = __swahw32(poll_mask);
156 #endif
157 	sqe->poll_events = poll_mask;
158 }
159 
io_uring_prep_poll_remove(struct io_uring_sqe * sqe,void * user_data)160 static inline void io_uring_prep_poll_remove(struct io_uring_sqe *sqe,
161 					     void *user_data)
162 {
163 	memset(sqe, 0, sizeof(*sqe));
164 	sqe->opcode = IORING_OP_POLL_REMOVE;
165 	sqe->addr = (unsigned long) user_data;
166 }
167 
io_uring_prep_fsync(struct io_uring_sqe * sqe,int fd,unsigned fsync_flags)168 static inline void io_uring_prep_fsync(struct io_uring_sqe *sqe, int fd,
169 				       unsigned fsync_flags)
170 {
171 	memset(sqe, 0, sizeof(*sqe));
172 	sqe->opcode = IORING_OP_FSYNC;
173 	sqe->fd = fd;
174 	sqe->fsync_flags = fsync_flags;
175 }
176 
io_uring_prep_nop(struct io_uring_sqe * sqe)177 static inline void io_uring_prep_nop(struct io_uring_sqe *sqe)
178 {
179 	memset(sqe, 0, sizeof(*sqe));
180 	sqe->opcode = IORING_OP_NOP;
181 }
182 
183 #ifdef __cplusplus
184 }
185 #endif
186 
187 #endif
188