1 /* SPDX-License-Identifier: MIT */
2 /*
3 * gcc -Wall -O2 -D_GNU_SOURCE -o io_uring-cp io_uring-cp.c -luring
4 */
5 #include <stdio.h>
6 #include <fcntl.h>
7 #include <string.h>
8 #include <stdlib.h>
9 #include <unistd.h>
10 #include <assert.h>
11 #include <errno.h>
12 #include <inttypes.h>
13 #include <sys/types.h>
14 #include <sys/stat.h>
15 #include <sys/ioctl.h>
16 #include "liburing.h"
17
18 #define QD 64
19 #define BS (32*1024)
20
21 static int infd, outfd;
22
23 struct io_data {
24 int read;
25 off_t first_offset, offset;
26 size_t first_len;
27 struct iovec iov;
28 };
29
setup_context(unsigned entries,struct io_uring * ring)30 static int setup_context(unsigned entries, struct io_uring *ring)
31 {
32 int ret;
33
34 ret = io_uring_queue_init(entries, ring, 0);
35 if (ret < 0) {
36 fprintf(stderr, "queue_init: %s\n", strerror(-ret));
37 return -1;
38 }
39
40 return 0;
41 }
42
get_file_size(int fd,off_t * size)43 static int get_file_size(int fd, off_t *size)
44 {
45 struct stat st;
46
47 if (fstat(fd, &st) < 0)
48 return -1;
49 if (S_ISREG(st.st_mode)) {
50 *size = st.st_size;
51 return 0;
52 } else if (S_ISBLK(st.st_mode)) {
53 unsigned long long bytes;
54
55 if (ioctl(fd, BLKGETSIZE64, &bytes) != 0)
56 return -1;
57
58 *size = bytes;
59 return 0;
60 }
61
62 return -1;
63 }
64
queue_prepped(struct io_uring * ring,struct io_data * data)65 static void queue_prepped(struct io_uring *ring, struct io_data *data)
66 {
67 struct io_uring_sqe *sqe;
68
69 sqe = io_uring_get_sqe(ring);
70 assert(sqe);
71
72 if (data->read)
73 io_uring_prep_readv(sqe, infd, &data->iov, 1, data->offset);
74 else
75 io_uring_prep_writev(sqe, outfd, &data->iov, 1, data->offset);
76
77 io_uring_sqe_set_data(sqe, data);
78 }
79
queue_read(struct io_uring * ring,off_t size,off_t offset)80 static int queue_read(struct io_uring *ring, off_t size, off_t offset)
81 {
82 struct io_uring_sqe *sqe;
83 struct io_data *data;
84
85 data = malloc(size + sizeof(*data));
86 if (!data)
87 return 1;
88
89 sqe = io_uring_get_sqe(ring);
90 if (!sqe) {
91 free(data);
92 return 1;
93 }
94
95 data->read = 1;
96 data->offset = data->first_offset = offset;
97
98 data->iov.iov_base = data + 1;
99 data->iov.iov_len = size;
100 data->first_len = size;
101
102 io_uring_prep_readv(sqe, infd, &data->iov, 1, offset);
103 io_uring_sqe_set_data(sqe, data);
104 return 0;
105 }
106
queue_write(struct io_uring * ring,struct io_data * data)107 static void queue_write(struct io_uring *ring, struct io_data *data)
108 {
109 data->read = 0;
110 data->offset = data->first_offset;
111
112 data->iov.iov_base = data + 1;
113 data->iov.iov_len = data->first_len;
114
115 queue_prepped(ring, data);
116 io_uring_submit(ring);
117 }
118
copy_file(struct io_uring * ring,off_t insize)119 static int copy_file(struct io_uring *ring, off_t insize)
120 {
121 unsigned long reads, writes;
122 struct io_uring_cqe *cqe;
123 off_t write_left, offset;
124 int ret;
125
126 write_left = insize;
127 writes = reads = offset = 0;
128
129 while (insize || write_left) {
130 int had_reads, got_comp;
131
132 /*
133 * Queue up as many reads as we can
134 */
135 had_reads = reads;
136 while (insize) {
137 off_t this_size = insize;
138
139 if (reads + writes >= QD)
140 break;
141 if (this_size > BS)
142 this_size = BS;
143 else if (!this_size)
144 break;
145
146 if (queue_read(ring, this_size, offset))
147 break;
148
149 insize -= this_size;
150 offset += this_size;
151 reads++;
152 }
153
154 if (had_reads != reads) {
155 ret = io_uring_submit(ring);
156 if (ret < 0) {
157 fprintf(stderr, "io_uring_submit: %s\n", strerror(-ret));
158 break;
159 }
160 }
161
162 /*
163 * Queue is full at this point. Find at least one completion.
164 */
165 got_comp = 0;
166 while (write_left) {
167 struct io_data *data;
168
169 if (!got_comp) {
170 ret = io_uring_wait_cqe(ring, &cqe);
171 got_comp = 1;
172 } else {
173 ret = io_uring_peek_cqe(ring, &cqe);
174 if (ret == -EAGAIN) {
175 cqe = NULL;
176 ret = 0;
177 }
178 }
179 if (ret < 0) {
180 fprintf(stderr, "io_uring_peek_cqe: %s\n",
181 strerror(-ret));
182 return 1;
183 }
184 if (!cqe)
185 break;
186
187 data = io_uring_cqe_get_data(cqe);
188 if (cqe->res < 0) {
189 if (cqe->res == -EAGAIN) {
190 queue_prepped(ring, data);
191 io_uring_cqe_seen(ring, cqe);
192 continue;
193 }
194 fprintf(stderr, "cqe failed: %s\n",
195 strerror(-cqe->res));
196 return 1;
197 } else if (cqe->res != data->iov.iov_len) {
198 /* Short read/write, adjust and requeue */
199 data->iov.iov_base += cqe->res;
200 data->iov.iov_len -= cqe->res;
201 data->offset += cqe->res;
202 queue_prepped(ring, data);
203 io_uring_cqe_seen(ring, cqe);
204 continue;
205 }
206
207 /*
208 * All done. if write, nothing else to do. if read,
209 * queue up corresponding write.
210 */
211 if (data->read) {
212 queue_write(ring, data);
213 write_left -= data->first_len;
214 reads--;
215 writes++;
216 } else {
217 free(data);
218 writes--;
219 }
220 io_uring_cqe_seen(ring, cqe);
221 }
222 }
223
224 /* wait out pending writes */
225 while (writes) {
226 struct io_data *data;
227
228 ret = io_uring_wait_cqe(ring, &cqe);
229 if (ret) {
230 fprintf(stderr, "wait_cqe=%d\n", ret);
231 return 1;
232 }
233 if (cqe->res < 0) {
234 fprintf(stderr, "write res=%d\n", cqe->res);
235 return 1;
236 }
237 data = io_uring_cqe_get_data(cqe);
238 free(data);
239 writes--;
240 io_uring_cqe_seen(ring, cqe);
241 }
242
243 return 0;
244 }
245
main(int argc,char * argv[])246 int main(int argc, char *argv[])
247 {
248 struct io_uring ring;
249 off_t insize;
250 int ret;
251
252 if (argc < 3) {
253 printf("%s: infile outfile\n", argv[0]);
254 return 1;
255 }
256
257 infd = open(argv[1], O_RDONLY);
258 if (infd < 0) {
259 perror("open infile");
260 return 1;
261 }
262 outfd = open(argv[2], O_WRONLY | O_CREAT | O_TRUNC, 0644);
263 if (outfd < 0) {
264 perror("open outfile");
265 return 1;
266 }
267
268 if (setup_context(QD, &ring))
269 return 1;
270 if (get_file_size(infd, &insize))
271 return 1;
272
273 ret = copy_file(&ring, insize);
274
275 close(infd);
276 close(outfd);
277 io_uring_queue_exit(&ring);
278 return ret;
279 }
280