• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * libaio engine
3  *
4  * IO engine using the Linux native aio interface.
5  *
6  */
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <unistd.h>
10 #include <errno.h>
11 #include <assert.h>
12 #include <libaio.h>
13 
14 #include "../fio.h"
15 #include "../lib/pow2.h"
16 #include "../optgroup.h"
17 
18 static int fio_libaio_commit(struct thread_data *td);
19 
20 struct libaio_data {
21 	io_context_t aio_ctx;
22 	struct io_event *aio_events;
23 	struct iocb **iocbs;
24 	struct io_u **io_us;
25 
26 	/*
27 	 * Basic ring buffer. 'head' is incremented in _queue(), and
28 	 * 'tail' is incremented in _commit(). We keep 'queued' so
29 	 * that we know if the ring is full or empty, when
30 	 * 'head' == 'tail'. 'entries' is the ring size, and
31 	 * 'is_pow2' is just an optimization to use AND instead of
32 	 * modulus to get the remainder on ring increment.
33 	 */
34 	int is_pow2;
35 	unsigned int entries;
36 	unsigned int queued;
37 	unsigned int head;
38 	unsigned int tail;
39 };
40 
41 struct libaio_options {
42 	void *pad;
43 	unsigned int userspace_reap;
44 };
45 
46 static struct fio_option options[] = {
47 	{
48 		.name	= "userspace_reap",
49 		.lname	= "Libaio userspace reaping",
50 		.type	= FIO_OPT_STR_SET,
51 		.off1	= offsetof(struct libaio_options, userspace_reap),
52 		.help	= "Use alternative user-space reap implementation",
53 		.category = FIO_OPT_C_ENGINE,
54 		.group	= FIO_OPT_G_LIBAIO,
55 	},
56 	{
57 		.name	= NULL,
58 	},
59 };
60 
ring_inc(struct libaio_data * ld,unsigned int * val,unsigned int add)61 static inline void ring_inc(struct libaio_data *ld, unsigned int *val,
62 			    unsigned int add)
63 {
64 	if (ld->is_pow2)
65 		*val = (*val + add) & (ld->entries - 1);
66 	else
67 		*val = (*val + add) % ld->entries;
68 }
69 
fio_libaio_prep(struct thread_data fio_unused * td,struct io_u * io_u)70 static int fio_libaio_prep(struct thread_data fio_unused *td, struct io_u *io_u)
71 {
72 	struct fio_file *f = io_u->file;
73 
74 	if (io_u->ddir == DDIR_READ)
75 		io_prep_pread(&io_u->iocb, f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset);
76 	else if (io_u->ddir == DDIR_WRITE)
77 		io_prep_pwrite(&io_u->iocb, f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset);
78 	else if (ddir_sync(io_u->ddir))
79 		io_prep_fsync(&io_u->iocb, f->fd);
80 
81 	return 0;
82 }
83 
fio_libaio_event(struct thread_data * td,int event)84 static struct io_u *fio_libaio_event(struct thread_data *td, int event)
85 {
86 	struct libaio_data *ld = td->io_ops_data;
87 	struct io_event *ev;
88 	struct io_u *io_u;
89 
90 	ev = ld->aio_events + event;
91 	io_u = container_of(ev->obj, struct io_u, iocb);
92 
93 	if (ev->res != io_u->xfer_buflen) {
94 		if (ev->res > io_u->xfer_buflen)
95 			io_u->error = -ev->res;
96 		else
97 			io_u->resid = io_u->xfer_buflen - ev->res;
98 	} else
99 		io_u->error = 0;
100 
101 	return io_u;
102 }
103 
104 struct aio_ring {
105 	unsigned id;		 /** kernel internal index number */
106 	unsigned nr;		 /** number of io_events */
107 	unsigned head;
108 	unsigned tail;
109 
110 	unsigned magic;
111 	unsigned compat_features;
112 	unsigned incompat_features;
113 	unsigned header_length;	/** size of aio_ring */
114 
115 	struct io_event events[0];
116 };
117 
118 #define AIO_RING_MAGIC	0xa10a10a1
119 
user_io_getevents(io_context_t aio_ctx,unsigned int max,struct io_event * events)120 static int user_io_getevents(io_context_t aio_ctx, unsigned int max,
121 			     struct io_event *events)
122 {
123 	long i = 0;
124 	unsigned head;
125 	struct aio_ring *ring = (struct aio_ring*) aio_ctx;
126 
127 	while (i < max) {
128 		head = ring->head;
129 
130 		if (head == ring->tail) {
131 			/* There are no more completions */
132 			break;
133 		} else {
134 			/* There is another completion to reap */
135 			events[i] = ring->events[head];
136 			read_barrier();
137 			ring->head = (head + 1) % ring->nr;
138 			i++;
139 		}
140 	}
141 
142 	return i;
143 }
144 
fio_libaio_getevents(struct thread_data * td,unsigned int min,unsigned int max,const struct timespec * t)145 static int fio_libaio_getevents(struct thread_data *td, unsigned int min,
146 				unsigned int max, const struct timespec *t)
147 {
148 	struct libaio_data *ld = td->io_ops_data;
149 	struct libaio_options *o = td->eo;
150 	unsigned actual_min = td->o.iodepth_batch_complete_min == 0 ? 0 : min;
151 	struct timespec __lt, *lt = NULL;
152 	int r, events = 0;
153 
154 	if (t) {
155 		__lt = *t;
156 		lt = &__lt;
157 	}
158 
159 	do {
160 		if (o->userspace_reap == 1
161 		    && actual_min == 0
162 		    && ((struct aio_ring *)(ld->aio_ctx))->magic
163 				== AIO_RING_MAGIC) {
164 			r = user_io_getevents(ld->aio_ctx, max,
165 				ld->aio_events + events);
166 		} else {
167 			r = io_getevents(ld->aio_ctx, actual_min,
168 				max, ld->aio_events + events, lt);
169 		}
170 		if (r > 0)
171 			events += r;
172 		else if ((min && r == 0) || r == -EAGAIN) {
173 			fio_libaio_commit(td);
174 			usleep(100);
175 		} else if (r != -EINTR)
176 			break;
177 	} while (events < min);
178 
179 	return r < 0 ? r : events;
180 }
181 
fio_libaio_queue(struct thread_data * td,struct io_u * io_u)182 static int fio_libaio_queue(struct thread_data *td, struct io_u *io_u)
183 {
184 	struct libaio_data *ld = td->io_ops_data;
185 
186 	fio_ro_check(td, io_u);
187 
188 	if (ld->queued == td->o.iodepth)
189 		return FIO_Q_BUSY;
190 
191 	/*
192 	 * fsync is tricky, since it can fail and we need to do it
193 	 * serialized with other io. the reason is that linux doesn't
194 	 * support aio fsync yet. So return busy for the case where we
195 	 * have pending io, to let fio complete those first.
196 	 */
197 	if (ddir_sync(io_u->ddir)) {
198 		if (ld->queued)
199 			return FIO_Q_BUSY;
200 
201 		do_io_u_sync(td, io_u);
202 		return FIO_Q_COMPLETED;
203 	}
204 
205 	if (io_u->ddir == DDIR_TRIM) {
206 		if (ld->queued)
207 			return FIO_Q_BUSY;
208 
209 		do_io_u_trim(td, io_u);
210 		return FIO_Q_COMPLETED;
211 	}
212 
213 	ld->iocbs[ld->head] = &io_u->iocb;
214 	ld->io_us[ld->head] = io_u;
215 	ring_inc(ld, &ld->head, 1);
216 	ld->queued++;
217 	return FIO_Q_QUEUED;
218 }
219 
fio_libaio_queued(struct thread_data * td,struct io_u ** io_us,unsigned int nr)220 static void fio_libaio_queued(struct thread_data *td, struct io_u **io_us,
221 			      unsigned int nr)
222 {
223 	struct timeval now;
224 	unsigned int i;
225 
226 	if (!fio_fill_issue_time(td))
227 		return;
228 
229 	fio_gettime(&now, NULL);
230 
231 	for (i = 0; i < nr; i++) {
232 		struct io_u *io_u = io_us[i];
233 
234 		memcpy(&io_u->issue_time, &now, sizeof(now));
235 		io_u_queued(td, io_u);
236 	}
237 }
238 
fio_libaio_commit(struct thread_data * td)239 static int fio_libaio_commit(struct thread_data *td)
240 {
241 	struct libaio_data *ld = td->io_ops_data;
242 	struct iocb **iocbs;
243 	struct io_u **io_us;
244 	struct timeval tv;
245 	int ret, wait_start = 0;
246 
247 	if (!ld->queued)
248 		return 0;
249 
250 	do {
251 		long nr = ld->queued;
252 
253 		nr = min((unsigned int) nr, ld->entries - ld->tail);
254 		io_us = ld->io_us + ld->tail;
255 		iocbs = ld->iocbs + ld->tail;
256 
257 		ret = io_submit(ld->aio_ctx, nr, iocbs);
258 		if (ret > 0) {
259 			fio_libaio_queued(td, io_us, ret);
260 			io_u_mark_submit(td, ret);
261 
262 			ld->queued -= ret;
263 			ring_inc(ld, &ld->tail, ret);
264 			ret = 0;
265 			wait_start = 0;
266 		} else if (ret == -EINTR || !ret) {
267 			if (!ret)
268 				io_u_mark_submit(td, ret);
269 			wait_start = 0;
270 			continue;
271 		} else if (ret == -EAGAIN) {
272 			/*
273 			 * If we get EAGAIN, we should break out without
274 			 * error and let the upper layer reap some
275 			 * events for us. If we have no queued IO, we
276 			 * must loop here. If we loop for more than 30s,
277 			 * just error out, something must be buggy in the
278 			 * IO path.
279 			 */
280 			if (ld->queued) {
281 				ret = 0;
282 				break;
283 			}
284 			if (!wait_start) {
285 				fio_gettime(&tv, NULL);
286 				wait_start = 1;
287 			} else if (mtime_since_now(&tv) > 30000) {
288 				log_err("fio: aio appears to be stalled, giving up\n");
289 				break;
290 			}
291 			usleep(1);
292 			continue;
293 		} else if (ret == -ENOMEM) {
294 			/*
295 			 * If we get -ENOMEM, reap events if we can. If
296 			 * we cannot, treat it as a fatal event since there's
297 			 * nothing we can do about it.
298 			 */
299 			if (ld->queued)
300 				ret = 0;
301 			break;
302 		} else
303 			break;
304 	} while (ld->queued);
305 
306 	return ret;
307 }
308 
fio_libaio_cancel(struct thread_data * td,struct io_u * io_u)309 static int fio_libaio_cancel(struct thread_data *td, struct io_u *io_u)
310 {
311 	struct libaio_data *ld = td->io_ops_data;
312 
313 	return io_cancel(ld->aio_ctx, &io_u->iocb, ld->aio_events);
314 }
315 
fio_libaio_cleanup(struct thread_data * td)316 static void fio_libaio_cleanup(struct thread_data *td)
317 {
318 	struct libaio_data *ld = td->io_ops_data;
319 
320 	if (ld) {
321 		/*
322 		 * Work-around to avoid huge RCU stalls at exit time. If we
323 		 * don't do this here, then it'll be torn down by exit_aio().
324 		 * But for that case we can parallellize the freeing, thus
325 		 * speeding it up a lot.
326 		 */
327 		if (!(td->flags & TD_F_CHILD))
328 			io_destroy(ld->aio_ctx);
329 		free(ld->aio_events);
330 		free(ld->iocbs);
331 		free(ld->io_us);
332 		free(ld);
333 	}
334 }
335 
fio_libaio_init(struct thread_data * td)336 static int fio_libaio_init(struct thread_data *td)
337 {
338 	struct libaio_options *o = td->eo;
339 	struct libaio_data *ld;
340 	int err = 0;
341 
342 	ld = calloc(1, sizeof(*ld));
343 
344 	/*
345 	 * First try passing in 0 for queue depth, since we don't
346 	 * care about the user ring. If that fails, the kernel is too old
347 	 * and we need the right depth.
348 	 */
349 	if (!o->userspace_reap)
350 		err = io_queue_init(INT_MAX, &ld->aio_ctx);
351 	if (o->userspace_reap || err == -EINVAL)
352 		err = io_queue_init(td->o.iodepth, &ld->aio_ctx);
353 	if (err) {
354 		td_verror(td, -err, "io_queue_init");
355 		log_err("fio: check /proc/sys/fs/aio-max-nr\n");
356 		free(ld);
357 		return 1;
358 	}
359 
360 	ld->entries = td->o.iodepth;
361 	ld->is_pow2 = is_power_of_2(ld->entries);
362 	ld->aio_events = calloc(ld->entries, sizeof(struct io_event));
363 	ld->iocbs = calloc(ld->entries, sizeof(struct iocb *));
364 	ld->io_us = calloc(ld->entries, sizeof(struct io_u *));
365 
366 	td->io_ops_data = ld;
367 	return 0;
368 }
369 
370 static struct ioengine_ops ioengine = {
371 	.name			= "libaio",
372 	.version		= FIO_IOOPS_VERSION,
373 	.init			= fio_libaio_init,
374 	.prep			= fio_libaio_prep,
375 	.queue			= fio_libaio_queue,
376 	.commit			= fio_libaio_commit,
377 	.cancel			= fio_libaio_cancel,
378 	.getevents		= fio_libaio_getevents,
379 	.event			= fio_libaio_event,
380 	.cleanup		= fio_libaio_cleanup,
381 	.open_file		= generic_open_file,
382 	.close_file		= generic_close_file,
383 	.get_file_size		= generic_get_file_size,
384 	.options		= options,
385 	.option_struct_size	= sizeof(struct libaio_options),
386 };
387 
fio_libaio_register(void)388 static void fio_init fio_libaio_register(void)
389 {
390 	register_ioengine(&ioengine);
391 }
392 
fio_libaio_unregister(void)393 static void fio_exit fio_libaio_unregister(void)
394 {
395 	unregister_ioengine(&ioengine);
396 }
397