• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #include <unistd.h>
2 #include <fcntl.h>
3 #include <string.h>
4 #include <signal.h>
5 #include <time.h>
6 #include <assert.h>
7 
8 #include "fio.h"
9 #include "hash.h"
10 #include "verify.h"
11 #include "trim.h"
12 #include "lib/rand.h"
13 #include "lib/axmap.h"
14 #include "err.h"
15 
16 struct io_completion_data {
17 	int nr;				/* input */
18 
19 	int error;			/* output */
20 	uint64_t bytes_done[DDIR_RWDIR_CNT];	/* output */
21 	struct timeval time;		/* output */
22 };
23 
24 /*
25  * The ->io_axmap contains a map of blocks we have or have not done io
26  * to yet. Used to make sure we cover the entire range in a fair fashion.
27  */
random_map_free(struct fio_file * f,const uint64_t block)28 static int random_map_free(struct fio_file *f, const uint64_t block)
29 {
30 	return !axmap_isset(f->io_axmap, block);
31 }
32 
33 /*
34  * Mark a given offset as used in the map.
35  */
mark_random_map(struct thread_data * td,struct io_u * io_u)36 static void mark_random_map(struct thread_data *td, struct io_u *io_u)
37 {
38 	unsigned int min_bs = td->o.rw_min_bs;
39 	struct fio_file *f = io_u->file;
40 	unsigned int nr_blocks;
41 	uint64_t block;
42 
43 	block = (io_u->offset - f->file_offset) / (uint64_t) min_bs;
44 	nr_blocks = (io_u->buflen + min_bs - 1) / min_bs;
45 
46 	if (!(io_u->flags & IO_U_F_BUSY_OK))
47 		nr_blocks = axmap_set_nr(f->io_axmap, block, nr_blocks);
48 
49 	if ((nr_blocks * min_bs) < io_u->buflen)
50 		io_u->buflen = nr_blocks * min_bs;
51 }
52 
last_block(struct thread_data * td,struct fio_file * f,enum fio_ddir ddir)53 static uint64_t last_block(struct thread_data *td, struct fio_file *f,
54 			   enum fio_ddir ddir)
55 {
56 	uint64_t max_blocks;
57 	uint64_t max_size;
58 
59 	assert(ddir_rw(ddir));
60 
61 	/*
62 	 * Hmm, should we make sure that ->io_size <= ->real_file_size?
63 	 */
64 	max_size = f->io_size;
65 	if (max_size > f->real_file_size)
66 		max_size = f->real_file_size;
67 
68 	if (td->o.zone_range)
69 		max_size = td->o.zone_range;
70 
71 	max_blocks = max_size / (uint64_t) td->o.ba[ddir];
72 	if (!max_blocks)
73 		return 0;
74 
75 	return max_blocks;
76 }
77 
78 struct rand_off {
79 	struct flist_head list;
80 	uint64_t off;
81 };
82 
__get_next_rand_offset(struct thread_data * td,struct fio_file * f,enum fio_ddir ddir,uint64_t * b)83 static int __get_next_rand_offset(struct thread_data *td, struct fio_file *f,
84 				  enum fio_ddir ddir, uint64_t *b)
85 {
86 	uint64_t r, lastb;
87 
88 	lastb = last_block(td, f, ddir);
89 	if (!lastb)
90 		return 1;
91 
92 	if (td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE) {
93 		uint64_t rmax;
94 
95 		rmax = td->o.use_os_rand ? OS_RAND_MAX : FRAND_MAX;
96 
97 		if (td->o.use_os_rand) {
98 			rmax = OS_RAND_MAX;
99 			r = os_random_long(&td->random_state);
100 		} else {
101 			rmax = FRAND_MAX;
102 			r = __rand(&td->__random_state);
103 		}
104 
105 		dprint(FD_RANDOM, "off rand %llu\n", (unsigned long long) r);
106 
107 		*b = lastb * (r / ((uint64_t) rmax + 1.0));
108 	} else {
109 		uint64_t off = 0;
110 
111 		if (lfsr_next(&f->lfsr, &off, lastb))
112 			return 1;
113 
114 		*b = off;
115 	}
116 
117 	/*
118 	 * if we are not maintaining a random map, we are done.
119 	 */
120 	if (!file_randommap(td, f))
121 		goto ret;
122 
123 	/*
124 	 * calculate map offset and check if it's free
125 	 */
126 	if (random_map_free(f, *b))
127 		goto ret;
128 
129 	dprint(FD_RANDOM, "get_next_rand_offset: offset %llu busy\n",
130 						(unsigned long long) *b);
131 
132 	*b = axmap_next_free(f->io_axmap, *b);
133 	if (*b == (uint64_t) -1ULL)
134 		return 1;
135 ret:
136 	return 0;
137 }
138 
__get_next_rand_offset_zipf(struct thread_data * td,struct fio_file * f,enum fio_ddir ddir,uint64_t * b)139 static int __get_next_rand_offset_zipf(struct thread_data *td,
140 				       struct fio_file *f, enum fio_ddir ddir,
141 				       uint64_t *b)
142 {
143 	*b = zipf_next(&f->zipf);
144 	return 0;
145 }
146 
__get_next_rand_offset_pareto(struct thread_data * td,struct fio_file * f,enum fio_ddir ddir,uint64_t * b)147 static int __get_next_rand_offset_pareto(struct thread_data *td,
148 					 struct fio_file *f, enum fio_ddir ddir,
149 					 uint64_t *b)
150 {
151 	*b = pareto_next(&f->zipf);
152 	return 0;
153 }
154 
flist_cmp(void * data,struct flist_head * a,struct flist_head * b)155 static int flist_cmp(void *data, struct flist_head *a, struct flist_head *b)
156 {
157 	struct rand_off *r1 = flist_entry(a, struct rand_off, list);
158 	struct rand_off *r2 = flist_entry(b, struct rand_off, list);
159 
160 	return r1->off - r2->off;
161 }
162 
get_off_from_method(struct thread_data * td,struct fio_file * f,enum fio_ddir ddir,uint64_t * b)163 static int get_off_from_method(struct thread_data *td, struct fio_file *f,
164 			       enum fio_ddir ddir, uint64_t *b)
165 {
166 	if (td->o.random_distribution == FIO_RAND_DIST_RANDOM)
167 		return __get_next_rand_offset(td, f, ddir, b);
168 	else if (td->o.random_distribution == FIO_RAND_DIST_ZIPF)
169 		return __get_next_rand_offset_zipf(td, f, ddir, b);
170 	else if (td->o.random_distribution == FIO_RAND_DIST_PARETO)
171 		return __get_next_rand_offset_pareto(td, f, ddir, b);
172 
173 	log_err("fio: unknown random distribution: %d\n", td->o.random_distribution);
174 	return 1;
175 }
176 
177 /*
178  * Sort the reads for a verify phase in batches of verifysort_nr, if
179  * specified.
180  */
should_sort_io(struct thread_data * td)181 static inline int should_sort_io(struct thread_data *td)
182 {
183 	if (!td->o.verifysort_nr || !td->o.do_verify)
184 		return 0;
185 	if (!td_random(td))
186 		return 0;
187 	if (td->runstate != TD_VERIFYING)
188 		return 0;
189 	if (td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE)
190 		return 0;
191 
192 	return 1;
193 }
194 
should_do_random(struct thread_data * td,enum fio_ddir ddir)195 static int should_do_random(struct thread_data *td, enum fio_ddir ddir)
196 {
197 	unsigned int v;
198 	unsigned long r;
199 
200 	if (td->o.perc_rand[ddir] == 100)
201 		return 1;
202 
203 	if (td->o.use_os_rand) {
204 		r = os_random_long(&td->seq_rand_state[ddir]);
205 		v = 1 + (int) (100.0 * (r / (OS_RAND_MAX + 1.0)));
206 	} else {
207 		r = __rand(&td->__seq_rand_state[ddir]);
208 		v = 1 + (int) (100.0 * (r / (FRAND_MAX + 1.0)));
209 	}
210 
211 	return v <= td->o.perc_rand[ddir];
212 }
213 
get_next_rand_offset(struct thread_data * td,struct fio_file * f,enum fio_ddir ddir,uint64_t * b)214 static int get_next_rand_offset(struct thread_data *td, struct fio_file *f,
215 				enum fio_ddir ddir, uint64_t *b)
216 {
217 	struct rand_off *r;
218 	int i, ret = 1;
219 
220 	if (!should_sort_io(td))
221 		return get_off_from_method(td, f, ddir, b);
222 
223 	if (!flist_empty(&td->next_rand_list)) {
224 		struct rand_off *r;
225 fetch:
226 		r = flist_entry(td->next_rand_list.next, struct rand_off, list);
227 		flist_del(&r->list);
228 		*b = r->off;
229 		free(r);
230 		return 0;
231 	}
232 
233 	for (i = 0; i < td->o.verifysort_nr; i++) {
234 		r = malloc(sizeof(*r));
235 
236 		ret = get_off_from_method(td, f, ddir, &r->off);
237 		if (ret) {
238 			free(r);
239 			break;
240 		}
241 
242 		flist_add(&r->list, &td->next_rand_list);
243 	}
244 
245 	if (ret && !i)
246 		return ret;
247 
248 	assert(!flist_empty(&td->next_rand_list));
249 	flist_sort(NULL, &td->next_rand_list, flist_cmp);
250 	goto fetch;
251 }
252 
get_next_rand_block(struct thread_data * td,struct fio_file * f,enum fio_ddir ddir,uint64_t * b)253 static int get_next_rand_block(struct thread_data *td, struct fio_file *f,
254 			       enum fio_ddir ddir, uint64_t *b)
255 {
256 	if (!get_next_rand_offset(td, f, ddir, b))
257 		return 0;
258 
259 	if (td->o.time_based) {
260 		fio_file_reset(td, f);
261 		if (!get_next_rand_offset(td, f, ddir, b))
262 			return 0;
263 	}
264 
265 	dprint(FD_IO, "%s: rand offset failed, last=%llu, size=%llu\n",
266 			f->file_name, (unsigned long long) f->last_pos,
267 			(unsigned long long) f->real_file_size);
268 	return 1;
269 }
270 
get_next_seq_offset(struct thread_data * td,struct fio_file * f,enum fio_ddir ddir,uint64_t * offset)271 static int get_next_seq_offset(struct thread_data *td, struct fio_file *f,
272 			       enum fio_ddir ddir, uint64_t *offset)
273 {
274 	assert(ddir_rw(ddir));
275 
276 	if (f->last_pos >= f->io_size + get_start_offset(td, f) && td->o.time_based)
277 		f->last_pos = f->last_pos - f->io_size;
278 
279 	if (f->last_pos < f->real_file_size) {
280 		uint64_t pos;
281 
282 		if (f->last_pos == f->file_offset && td->o.ddir_seq_add < 0)
283 			f->last_pos = f->real_file_size;
284 
285 		pos = f->last_pos - f->file_offset;
286 		if (pos)
287 			pos += td->o.ddir_seq_add;
288 
289 		*offset = pos;
290 		return 0;
291 	}
292 
293 	return 1;
294 }
295 
get_next_block(struct thread_data * td,struct io_u * io_u,enum fio_ddir ddir,int rw_seq,unsigned int * is_random)296 static int get_next_block(struct thread_data *td, struct io_u *io_u,
297 			  enum fio_ddir ddir, int rw_seq,
298 			  unsigned int *is_random)
299 {
300 	struct fio_file *f = io_u->file;
301 	uint64_t b, offset;
302 	int ret;
303 
304 	assert(ddir_rw(ddir));
305 
306 	b = offset = -1ULL;
307 
308 	if (rw_seq) {
309 		if (td_random(td)) {
310 			if (should_do_random(td, ddir)) {
311 				ret = get_next_rand_block(td, f, ddir, &b);
312 				*is_random = 1;
313 			} else {
314 				*is_random = 0;
315 				io_u->flags |= IO_U_F_BUSY_OK;
316 				ret = get_next_seq_offset(td, f, ddir, &offset);
317 				if (ret)
318 					ret = get_next_rand_block(td, f, ddir, &b);
319 			}
320 		} else {
321 			*is_random = 0;
322 			ret = get_next_seq_offset(td, f, ddir, &offset);
323 		}
324 	} else {
325 		io_u->flags |= IO_U_F_BUSY_OK;
326 		*is_random = 0;
327 
328 		if (td->o.rw_seq == RW_SEQ_SEQ) {
329 			ret = get_next_seq_offset(td, f, ddir, &offset);
330 			if (ret) {
331 				ret = get_next_rand_block(td, f, ddir, &b);
332 				*is_random = 0;
333 			}
334 		} else if (td->o.rw_seq == RW_SEQ_IDENT) {
335 			if (f->last_start != -1ULL)
336 				offset = f->last_start - f->file_offset;
337 			else
338 				offset = 0;
339 			ret = 0;
340 		} else {
341 			log_err("fio: unknown rw_seq=%d\n", td->o.rw_seq);
342 			ret = 1;
343 		}
344 	}
345 
346 	if (!ret) {
347 		if (offset != -1ULL)
348 			io_u->offset = offset;
349 		else if (b != -1ULL)
350 			io_u->offset = b * td->o.ba[ddir];
351 		else {
352 			log_err("fio: bug in offset generation: offset=%llu, b=%llu\n", (unsigned long long) offset, (unsigned long long) b);
353 			ret = 1;
354 		}
355 	}
356 
357 	return ret;
358 }
359 
360 /*
361  * For random io, generate a random new block and see if it's used. Repeat
362  * until we find a free one. For sequential io, just return the end of
363  * the last io issued.
364  */
__get_next_offset(struct thread_data * td,struct io_u * io_u,unsigned int * is_random)365 static int __get_next_offset(struct thread_data *td, struct io_u *io_u,
366 			     unsigned int *is_random)
367 {
368 	struct fio_file *f = io_u->file;
369 	enum fio_ddir ddir = io_u->ddir;
370 	int rw_seq_hit = 0;
371 
372 	assert(ddir_rw(ddir));
373 
374 	if (td->o.ddir_seq_nr && !--td->ddir_seq_nr) {
375 		rw_seq_hit = 1;
376 		td->ddir_seq_nr = td->o.ddir_seq_nr;
377 	}
378 
379 	if (get_next_block(td, io_u, ddir, rw_seq_hit, is_random))
380 		return 1;
381 
382 	if (io_u->offset >= f->io_size) {
383 		dprint(FD_IO, "get_next_offset: offset %llu >= io_size %llu\n",
384 					(unsigned long long) io_u->offset,
385 					(unsigned long long) f->io_size);
386 		return 1;
387 	}
388 
389 	io_u->offset += f->file_offset;
390 	if (io_u->offset >= f->real_file_size) {
391 		dprint(FD_IO, "get_next_offset: offset %llu >= size %llu\n",
392 					(unsigned long long) io_u->offset,
393 					(unsigned long long) f->real_file_size);
394 		return 1;
395 	}
396 
397 	return 0;
398 }
399 
get_next_offset(struct thread_data * td,struct io_u * io_u,unsigned int * is_random)400 static int get_next_offset(struct thread_data *td, struct io_u *io_u,
401 			   unsigned int *is_random)
402 {
403 	if (td->flags & TD_F_PROFILE_OPS) {
404 		struct prof_io_ops *ops = &td->prof_io_ops;
405 
406 		if (ops->fill_io_u_off)
407 			return ops->fill_io_u_off(td, io_u, is_random);
408 	}
409 
410 	return __get_next_offset(td, io_u, is_random);
411 }
412 
io_u_fits(struct thread_data * td,struct io_u * io_u,unsigned int buflen)413 static inline int io_u_fits(struct thread_data *td, struct io_u *io_u,
414 			    unsigned int buflen)
415 {
416 	struct fio_file *f = io_u->file;
417 
418 	return io_u->offset + buflen <= f->io_size + get_start_offset(td, f);
419 }
420 
__get_next_buflen(struct thread_data * td,struct io_u * io_u,unsigned int is_random)421 static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u,
422 				      unsigned int is_random)
423 {
424 	int ddir = io_u->ddir;
425 	unsigned int buflen = 0;
426 	unsigned int minbs, maxbs;
427 	unsigned long r, rand_max;
428 
429 	assert(ddir_rw(ddir));
430 
431 	if (td->o.bs_is_seq_rand)
432 		ddir = is_random ? DDIR_WRITE: DDIR_READ;
433 
434 	minbs = td->o.min_bs[ddir];
435 	maxbs = td->o.max_bs[ddir];
436 
437 	if (minbs == maxbs)
438 		return minbs;
439 
440 	/*
441 	 * If we can't satisfy the min block size from here, then fail
442 	 */
443 	if (!io_u_fits(td, io_u, minbs))
444 		return 0;
445 
446 	if (td->o.use_os_rand)
447 		rand_max = OS_RAND_MAX;
448 	else
449 		rand_max = FRAND_MAX;
450 
451 	do {
452 		if (td->o.use_os_rand)
453 			r = os_random_long(&td->bsrange_state);
454 		else
455 			r = __rand(&td->__bsrange_state);
456 
457 		if (!td->o.bssplit_nr[ddir]) {
458 			buflen = 1 + (unsigned int) ((double) maxbs *
459 					(r / (rand_max + 1.0)));
460 			if (buflen < minbs)
461 				buflen = minbs;
462 		} else {
463 			long perc = 0;
464 			unsigned int i;
465 
466 			for (i = 0; i < td->o.bssplit_nr[ddir]; i++) {
467 				struct bssplit *bsp = &td->o.bssplit[ddir][i];
468 
469 				buflen = bsp->bs;
470 				perc += bsp->perc;
471 				if ((r <= ((rand_max / 100L) * perc)) &&
472 				    io_u_fits(td, io_u, buflen))
473 					break;
474 			}
475 		}
476 
477 		if (td->o.do_verify && td->o.verify != VERIFY_NONE)
478 			buflen = (buflen + td->o.verify_interval - 1) &
479 				~(td->o.verify_interval - 1);
480 
481 		if (!td->o.bs_unaligned && is_power_of_2(minbs))
482 			buflen = (buflen + minbs - 1) & ~(minbs - 1);
483 
484 	} while (!io_u_fits(td, io_u, buflen));
485 
486 	return buflen;
487 }
488 
get_next_buflen(struct thread_data * td,struct io_u * io_u,unsigned int is_random)489 static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u,
490 				    unsigned int is_random)
491 {
492 	if (td->flags & TD_F_PROFILE_OPS) {
493 		struct prof_io_ops *ops = &td->prof_io_ops;
494 
495 		if (ops->fill_io_u_size)
496 			return ops->fill_io_u_size(td, io_u, is_random);
497 	}
498 
499 	return __get_next_buflen(td, io_u, is_random);
500 }
501 
set_rwmix_bytes(struct thread_data * td)502 static void set_rwmix_bytes(struct thread_data *td)
503 {
504 	unsigned int diff;
505 
506 	/*
507 	 * we do time or byte based switch. this is needed because
508 	 * buffered writes may issue a lot quicker than they complete,
509 	 * whereas reads do not.
510 	 */
511 	diff = td->o.rwmix[td->rwmix_ddir ^ 1];
512 	td->rwmix_issues = (td->io_issues[td->rwmix_ddir] * diff) / 100;
513 }
514 
get_rand_ddir(struct thread_data * td)515 static inline enum fio_ddir get_rand_ddir(struct thread_data *td)
516 {
517 	unsigned int v;
518 	unsigned long r;
519 
520 	if (td->o.use_os_rand) {
521 		r = os_random_long(&td->rwmix_state);
522 		v = 1 + (int) (100.0 * (r / (OS_RAND_MAX + 1.0)));
523 	} else {
524 		r = __rand(&td->__rwmix_state);
525 		v = 1 + (int) (100.0 * (r / (FRAND_MAX + 1.0)));
526 	}
527 
528 	if (v <= td->o.rwmix[DDIR_READ])
529 		return DDIR_READ;
530 
531 	return DDIR_WRITE;
532 }
533 
io_u_quiesce(struct thread_data * td)534 void io_u_quiesce(struct thread_data *td)
535 {
536 	/*
537 	 * We are going to sleep, ensure that we flush anything pending as
538 	 * not to skew our latency numbers.
539 	 *
540 	 * Changed to only monitor 'in flight' requests here instead of the
541 	 * td->cur_depth, b/c td->cur_depth does not accurately represent
542 	 * io's that have been actually submitted to an async engine,
543 	 * and cur_depth is meaningless for sync engines.
544 	 */
545 	while (td->io_u_in_flight) {
546 		int fio_unused ret;
547 
548 		ret = io_u_queued_complete(td, 1, NULL);
549 	}
550 }
551 
rate_ddir(struct thread_data * td,enum fio_ddir ddir)552 static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir)
553 {
554 	enum fio_ddir odir = ddir ^ 1;
555 	struct timeval t;
556 	long usec;
557 
558 	assert(ddir_rw(ddir));
559 
560 	if (td->rate_pending_usleep[ddir] <= 0)
561 		return ddir;
562 
563 	/*
564 	 * We have too much pending sleep in this direction. See if we
565 	 * should switch.
566 	 */
567 	if (td_rw(td) && td->o.rwmix[odir]) {
568 		/*
569 		 * Other direction does not have too much pending, switch
570 		 */
571 		if (td->rate_pending_usleep[odir] < 100000)
572 			return odir;
573 
574 		/*
575 		 * Both directions have pending sleep. Sleep the minimum time
576 		 * and deduct from both.
577 		 */
578 		if (td->rate_pending_usleep[ddir] <=
579 			td->rate_pending_usleep[odir]) {
580 			usec = td->rate_pending_usleep[ddir];
581 		} else {
582 			usec = td->rate_pending_usleep[odir];
583 			ddir = odir;
584 		}
585 	} else
586 		usec = td->rate_pending_usleep[ddir];
587 
588 	io_u_quiesce(td);
589 
590 	fio_gettime(&t, NULL);
591 	usec_sleep(td, usec);
592 	usec = utime_since_now(&t);
593 
594 	td->rate_pending_usleep[ddir] -= usec;
595 
596 	odir = ddir ^ 1;
597 	if (td_rw(td) && __should_check_rate(td, odir))
598 		td->rate_pending_usleep[odir] -= usec;
599 
600 	if (ddir_trim(ddir))
601 		return ddir;
602 
603 	return ddir;
604 }
605 
606 /*
607  * Return the data direction for the next io_u. If the job is a
608  * mixed read/write workload, check the rwmix cycle and switch if
609  * necessary.
610  */
get_rw_ddir(struct thread_data * td)611 static enum fio_ddir get_rw_ddir(struct thread_data *td)
612 {
613 	enum fio_ddir ddir;
614 
615 	/*
616 	 * see if it's time to fsync
617 	 */
618 	if (td->o.fsync_blocks &&
619 	   !(td->io_issues[DDIR_WRITE] % td->o.fsync_blocks) &&
620 	     td->io_issues[DDIR_WRITE] && should_fsync(td))
621 		return DDIR_SYNC;
622 
623 	/*
624 	 * see if it's time to fdatasync
625 	 */
626 	if (td->o.fdatasync_blocks &&
627 	   !(td->io_issues[DDIR_WRITE] % td->o.fdatasync_blocks) &&
628 	     td->io_issues[DDIR_WRITE] && should_fsync(td))
629 		return DDIR_DATASYNC;
630 
631 	/*
632 	 * see if it's time to sync_file_range
633 	 */
634 	if (td->sync_file_range_nr &&
635 	   !(td->io_issues[DDIR_WRITE] % td->sync_file_range_nr) &&
636 	     td->io_issues[DDIR_WRITE] && should_fsync(td))
637 		return DDIR_SYNC_FILE_RANGE;
638 
639 	if (td_rw(td)) {
640 		/*
641 		 * Check if it's time to seed a new data direction.
642 		 */
643 		if (td->io_issues[td->rwmix_ddir] >= td->rwmix_issues) {
644 			/*
645 			 * Put a top limit on how many bytes we do for
646 			 * one data direction, to avoid overflowing the
647 			 * ranges too much
648 			 */
649 			ddir = get_rand_ddir(td);
650 
651 			if (ddir != td->rwmix_ddir)
652 				set_rwmix_bytes(td);
653 
654 			td->rwmix_ddir = ddir;
655 		}
656 		ddir = td->rwmix_ddir;
657 	} else if (td_read(td))
658 		ddir = DDIR_READ;
659 	else if (td_write(td))
660 		ddir = DDIR_WRITE;
661 	else
662 		ddir = DDIR_TRIM;
663 
664 	td->rwmix_ddir = rate_ddir(td, ddir);
665 	return td->rwmix_ddir;
666 }
667 
set_rw_ddir(struct thread_data * td,struct io_u * io_u)668 static void set_rw_ddir(struct thread_data *td, struct io_u *io_u)
669 {
670 	io_u->ddir = io_u->acct_ddir = get_rw_ddir(td);
671 
672 	if (io_u->ddir == DDIR_WRITE && (td->io_ops->flags & FIO_BARRIER) &&
673 	    td->o.barrier_blocks &&
674 	   !(td->io_issues[DDIR_WRITE] % td->o.barrier_blocks) &&
675 	     td->io_issues[DDIR_WRITE])
676 		io_u->flags |= IO_U_F_BARRIER;
677 }
678 
put_file_log(struct thread_data * td,struct fio_file * f)679 void put_file_log(struct thread_data *td, struct fio_file *f)
680 {
681 	unsigned int ret = put_file(td, f);
682 
683 	if (ret)
684 		td_verror(td, ret, "file close");
685 }
686 
put_io_u(struct thread_data * td,struct io_u * io_u)687 void put_io_u(struct thread_data *td, struct io_u *io_u)
688 {
689 	td_io_u_lock(td);
690 
691 	if (io_u->file && !(io_u->flags & IO_U_F_FREE_DEF))
692 		put_file_log(td, io_u->file);
693 	io_u->file = NULL;
694 	io_u->flags &= ~IO_U_F_FREE_DEF;
695 	io_u->flags |= IO_U_F_FREE;
696 
697 	if (io_u->flags & IO_U_F_IN_CUR_DEPTH)
698 		td->cur_depth--;
699 	io_u_qpush(&td->io_u_freelist, io_u);
700 	td_io_u_unlock(td);
701 	td_io_u_free_notify(td);
702 }
703 
clear_io_u(struct thread_data * td,struct io_u * io_u)704 void clear_io_u(struct thread_data *td, struct io_u *io_u)
705 {
706 	io_u->flags &= ~IO_U_F_FLIGHT;
707 	put_io_u(td, io_u);
708 }
709 
requeue_io_u(struct thread_data * td,struct io_u ** io_u)710 void requeue_io_u(struct thread_data *td, struct io_u **io_u)
711 {
712 	struct io_u *__io_u = *io_u;
713 	enum fio_ddir ddir = acct_ddir(__io_u);
714 
715 	dprint(FD_IO, "requeue %p\n", __io_u);
716 
717 	td_io_u_lock(td);
718 
719 	__io_u->flags |= IO_U_F_FREE;
720 	if ((__io_u->flags & IO_U_F_FLIGHT) && ddir_rw(ddir))
721 		td->io_issues[ddir]--;
722 
723 	__io_u->flags &= ~IO_U_F_FLIGHT;
724 	if (__io_u->flags & IO_U_F_IN_CUR_DEPTH)
725 		td->cur_depth--;
726 
727 	io_u_rpush(&td->io_u_requeues, __io_u);
728 	td_io_u_unlock(td);
729 	*io_u = NULL;
730 }
731 
fill_io_u(struct thread_data * td,struct io_u * io_u)732 static int fill_io_u(struct thread_data *td, struct io_u *io_u)
733 {
734 	unsigned int is_random;
735 
736 	if (td->io_ops->flags & FIO_NOIO)
737 		goto out;
738 
739 	set_rw_ddir(td, io_u);
740 
741 	/*
742 	 * fsync() or fdatasync() or trim etc, we are done
743 	 */
744 	if (!ddir_rw(io_u->ddir))
745 		goto out;
746 
747 	/*
748 	 * See if it's time to switch to a new zone
749 	 */
750 	if (td->zone_bytes >= td->o.zone_size && td->o.zone_skip) {
751 		td->zone_bytes = 0;
752 		io_u->file->file_offset += td->o.zone_range + td->o.zone_skip;
753 		io_u->file->last_pos = io_u->file->file_offset;
754 		td->io_skip_bytes += td->o.zone_skip;
755 	}
756 
757 	/*
758 	 * No log, let the seq/rand engine retrieve the next buflen and
759 	 * position.
760 	 */
761 	if (get_next_offset(td, io_u, &is_random)) {
762 		dprint(FD_IO, "io_u %p, failed getting offset\n", io_u);
763 		return 1;
764 	}
765 
766 	io_u->buflen = get_next_buflen(td, io_u, is_random);
767 	if (!io_u->buflen) {
768 		dprint(FD_IO, "io_u %p, failed getting buflen\n", io_u);
769 		return 1;
770 	}
771 
772 	if (io_u->offset + io_u->buflen > io_u->file->real_file_size) {
773 		dprint(FD_IO, "io_u %p, offset too large\n", io_u);
774 		dprint(FD_IO, "  off=%llu/%lu > %llu\n",
775 			(unsigned long long) io_u->offset, io_u->buflen,
776 			(unsigned long long) io_u->file->real_file_size);
777 		return 1;
778 	}
779 
780 	/*
781 	 * mark entry before potentially trimming io_u
782 	 */
783 	if (td_random(td) && file_randommap(td, io_u->file))
784 		mark_random_map(td, io_u);
785 
786 out:
787 	dprint_io_u(io_u, "fill_io_u");
788 	td->zone_bytes += io_u->buflen;
789 	return 0;
790 }
791 
__io_u_mark_map(unsigned int * map,unsigned int nr)792 static void __io_u_mark_map(unsigned int *map, unsigned int nr)
793 {
794 	int idx = 0;
795 
796 	switch (nr) {
797 	default:
798 		idx = 6;
799 		break;
800 	case 33 ... 64:
801 		idx = 5;
802 		break;
803 	case 17 ... 32:
804 		idx = 4;
805 		break;
806 	case 9 ... 16:
807 		idx = 3;
808 		break;
809 	case 5 ... 8:
810 		idx = 2;
811 		break;
812 	case 1 ... 4:
813 		idx = 1;
814 	case 0:
815 		break;
816 	}
817 
818 	map[idx]++;
819 }
820 
io_u_mark_submit(struct thread_data * td,unsigned int nr)821 void io_u_mark_submit(struct thread_data *td, unsigned int nr)
822 {
823 	__io_u_mark_map(td->ts.io_u_submit, nr);
824 	td->ts.total_submit++;
825 }
826 
io_u_mark_complete(struct thread_data * td,unsigned int nr)827 void io_u_mark_complete(struct thread_data *td, unsigned int nr)
828 {
829 	__io_u_mark_map(td->ts.io_u_complete, nr);
830 	td->ts.total_complete++;
831 }
832 
io_u_mark_depth(struct thread_data * td,unsigned int nr)833 void io_u_mark_depth(struct thread_data *td, unsigned int nr)
834 {
835 	int idx = 0;
836 
837 	switch (td->cur_depth) {
838 	default:
839 		idx = 6;
840 		break;
841 	case 32 ... 63:
842 		idx = 5;
843 		break;
844 	case 16 ... 31:
845 		idx = 4;
846 		break;
847 	case 8 ... 15:
848 		idx = 3;
849 		break;
850 	case 4 ... 7:
851 		idx = 2;
852 		break;
853 	case 2 ... 3:
854 		idx = 1;
855 	case 1:
856 		break;
857 	}
858 
859 	td->ts.io_u_map[idx] += nr;
860 }
861 
io_u_mark_lat_usec(struct thread_data * td,unsigned long usec)862 static void io_u_mark_lat_usec(struct thread_data *td, unsigned long usec)
863 {
864 	int idx = 0;
865 
866 	assert(usec < 1000);
867 
868 	switch (usec) {
869 	case 750 ... 999:
870 		idx = 9;
871 		break;
872 	case 500 ... 749:
873 		idx = 8;
874 		break;
875 	case 250 ... 499:
876 		idx = 7;
877 		break;
878 	case 100 ... 249:
879 		idx = 6;
880 		break;
881 	case 50 ... 99:
882 		idx = 5;
883 		break;
884 	case 20 ... 49:
885 		idx = 4;
886 		break;
887 	case 10 ... 19:
888 		idx = 3;
889 		break;
890 	case 4 ... 9:
891 		idx = 2;
892 		break;
893 	case 2 ... 3:
894 		idx = 1;
895 	case 0 ... 1:
896 		break;
897 	}
898 
899 	assert(idx < FIO_IO_U_LAT_U_NR);
900 	td->ts.io_u_lat_u[idx]++;
901 }
902 
io_u_mark_lat_msec(struct thread_data * td,unsigned long msec)903 static void io_u_mark_lat_msec(struct thread_data *td, unsigned long msec)
904 {
905 	int idx = 0;
906 
907 	switch (msec) {
908 	default:
909 		idx = 11;
910 		break;
911 	case 1000 ... 1999:
912 		idx = 10;
913 		break;
914 	case 750 ... 999:
915 		idx = 9;
916 		break;
917 	case 500 ... 749:
918 		idx = 8;
919 		break;
920 	case 250 ... 499:
921 		idx = 7;
922 		break;
923 	case 100 ... 249:
924 		idx = 6;
925 		break;
926 	case 50 ... 99:
927 		idx = 5;
928 		break;
929 	case 20 ... 49:
930 		idx = 4;
931 		break;
932 	case 10 ... 19:
933 		idx = 3;
934 		break;
935 	case 4 ... 9:
936 		idx = 2;
937 		break;
938 	case 2 ... 3:
939 		idx = 1;
940 	case 0 ... 1:
941 		break;
942 	}
943 
944 	assert(idx < FIO_IO_U_LAT_M_NR);
945 	td->ts.io_u_lat_m[idx]++;
946 }
947 
io_u_mark_latency(struct thread_data * td,unsigned long usec)948 static void io_u_mark_latency(struct thread_data *td, unsigned long usec)
949 {
950 	if (usec < 1000)
951 		io_u_mark_lat_usec(td, usec);
952 	else
953 		io_u_mark_lat_msec(td, usec / 1000);
954 }
955 
956 /*
957  * Get next file to service by choosing one at random
958  */
get_next_file_rand(struct thread_data * td,enum fio_file_flags goodf,enum fio_file_flags badf)959 static struct fio_file *get_next_file_rand(struct thread_data *td,
960 					   enum fio_file_flags goodf,
961 					   enum fio_file_flags badf)
962 {
963 	struct fio_file *f;
964 	int fno;
965 
966 	do {
967 		int opened = 0;
968 		unsigned long r;
969 
970 		if (td->o.use_os_rand) {
971 			r = os_random_long(&td->next_file_state);
972 			fno = (unsigned int) ((double) td->o.nr_files
973 				* (r / (OS_RAND_MAX + 1.0)));
974 		} else {
975 			r = __rand(&td->__next_file_state);
976 			fno = (unsigned int) ((double) td->o.nr_files
977 				* (r / (FRAND_MAX + 1.0)));
978 		}
979 
980 		f = td->files[fno];
981 		if (fio_file_done(f))
982 			continue;
983 
984 		if (!fio_file_open(f)) {
985 			int err;
986 
987 			if (td->nr_open_files >= td->o.open_files)
988 				return ERR_PTR(-EBUSY);
989 
990 			err = td_io_open_file(td, f);
991 			if (err)
992 				continue;
993 			opened = 1;
994 		}
995 
996 		if ((!goodf || (f->flags & goodf)) && !(f->flags & badf)) {
997 			dprint(FD_FILE, "get_next_file_rand: %p\n", f);
998 			return f;
999 		}
1000 		if (opened)
1001 			td_io_close_file(td, f);
1002 	} while (1);
1003 }
1004 
1005 /*
1006  * Get next file to service by doing round robin between all available ones
1007  */
get_next_file_rr(struct thread_data * td,int goodf,int badf)1008 static struct fio_file *get_next_file_rr(struct thread_data *td, int goodf,
1009 					 int badf)
1010 {
1011 	unsigned int old_next_file = td->next_file;
1012 	struct fio_file *f;
1013 
1014 	do {
1015 		int opened = 0;
1016 
1017 		f = td->files[td->next_file];
1018 
1019 		td->next_file++;
1020 		if (td->next_file >= td->o.nr_files)
1021 			td->next_file = 0;
1022 
1023 		dprint(FD_FILE, "trying file %s %x\n", f->file_name, f->flags);
1024 		if (fio_file_done(f)) {
1025 			f = NULL;
1026 			continue;
1027 		}
1028 
1029 		if (!fio_file_open(f)) {
1030 			int err;
1031 
1032 			if (td->nr_open_files >= td->o.open_files)
1033 				return ERR_PTR(-EBUSY);
1034 
1035 			err = td_io_open_file(td, f);
1036 			if (err) {
1037 				dprint(FD_FILE, "error %d on open of %s\n",
1038 					err, f->file_name);
1039 				f = NULL;
1040 				continue;
1041 			}
1042 			opened = 1;
1043 		}
1044 
1045 		dprint(FD_FILE, "goodf=%x, badf=%x, ff=%x\n", goodf, badf,
1046 								f->flags);
1047 		if ((!goodf || (f->flags & goodf)) && !(f->flags & badf))
1048 			break;
1049 
1050 		if (opened)
1051 			td_io_close_file(td, f);
1052 
1053 		f = NULL;
1054 	} while (td->next_file != old_next_file);
1055 
1056 	dprint(FD_FILE, "get_next_file_rr: %p\n", f);
1057 	return f;
1058 }
1059 
__get_next_file(struct thread_data * td)1060 static struct fio_file *__get_next_file(struct thread_data *td)
1061 {
1062 	struct fio_file *f;
1063 
1064 	assert(td->o.nr_files <= td->files_index);
1065 
1066 	if (td->nr_done_files >= td->o.nr_files) {
1067 		dprint(FD_FILE, "get_next_file: nr_open=%d, nr_done=%d,"
1068 				" nr_files=%d\n", td->nr_open_files,
1069 						  td->nr_done_files,
1070 						  td->o.nr_files);
1071 		return NULL;
1072 	}
1073 
1074 	f = td->file_service_file;
1075 	if (f && fio_file_open(f) && !fio_file_closing(f)) {
1076 		if (td->o.file_service_type == FIO_FSERVICE_SEQ)
1077 			goto out;
1078 		if (td->file_service_left--)
1079 			goto out;
1080 	}
1081 
1082 	if (td->o.file_service_type == FIO_FSERVICE_RR ||
1083 	    td->o.file_service_type == FIO_FSERVICE_SEQ)
1084 		f = get_next_file_rr(td, FIO_FILE_open, FIO_FILE_closing);
1085 	else
1086 		f = get_next_file_rand(td, FIO_FILE_open, FIO_FILE_closing);
1087 
1088 	if (IS_ERR(f))
1089 		return f;
1090 
1091 	td->file_service_file = f;
1092 	td->file_service_left = td->file_service_nr - 1;
1093 out:
1094 	if (f)
1095 		dprint(FD_FILE, "get_next_file: %p [%s]\n", f, f->file_name);
1096 	else
1097 		dprint(FD_FILE, "get_next_file: NULL\n");
1098 	return f;
1099 }
1100 
get_next_file(struct thread_data * td)1101 static struct fio_file *get_next_file(struct thread_data *td)
1102 {
1103 	if (!(td->flags & TD_F_PROFILE_OPS)) {
1104 		struct prof_io_ops *ops = &td->prof_io_ops;
1105 
1106 		if (ops->get_next_file)
1107 			return ops->get_next_file(td);
1108 	}
1109 
1110 	return __get_next_file(td);
1111 }
1112 
set_io_u_file(struct thread_data * td,struct io_u * io_u)1113 static long set_io_u_file(struct thread_data *td, struct io_u *io_u)
1114 {
1115 	struct fio_file *f;
1116 
1117 	do {
1118 		f = get_next_file(td);
1119 		if (IS_ERR_OR_NULL(f))
1120 			return PTR_ERR(f);
1121 
1122 		io_u->file = f;
1123 		get_file(f);
1124 
1125 		if (!fill_io_u(td, io_u))
1126 			break;
1127 
1128 		put_file_log(td, f);
1129 		td_io_close_file(td, f);
1130 		io_u->file = NULL;
1131 		fio_file_set_done(f);
1132 		td->nr_done_files++;
1133 		dprint(FD_FILE, "%s: is done (%d of %d)\n", f->file_name,
1134 					td->nr_done_files, td->o.nr_files);
1135 	} while (1);
1136 
1137 	return 0;
1138 }
1139 
lat_fatal(struct thread_data * td,struct io_completion_data * icd,unsigned long tusec,unsigned long max_usec)1140 static void lat_fatal(struct thread_data *td, struct io_completion_data *icd,
1141 		      unsigned long tusec, unsigned long max_usec)
1142 {
1143 	if (!td->error)
1144 		log_err("fio: latency of %lu usec exceeds specified max (%lu usec)\n", tusec, max_usec);
1145 	td_verror(td, ETIMEDOUT, "max latency exceeded");
1146 	icd->error = ETIMEDOUT;
1147 }
1148 
lat_new_cycle(struct thread_data * td)1149 static void lat_new_cycle(struct thread_data *td)
1150 {
1151 	fio_gettime(&td->latency_ts, NULL);
1152 	td->latency_ios = ddir_rw_sum(td->io_blocks);
1153 	td->latency_failed = 0;
1154 }
1155 
1156 /*
1157  * We had an IO outside the latency target. Reduce the queue depth. If we
1158  * are at QD=1, then it's time to give up.
1159  */
__lat_target_failed(struct thread_data * td)1160 static int __lat_target_failed(struct thread_data *td)
1161 {
1162 	if (td->latency_qd == 1)
1163 		return 1;
1164 
1165 	td->latency_qd_high = td->latency_qd;
1166 
1167 	if (td->latency_qd == td->latency_qd_low)
1168 		td->latency_qd_low--;
1169 
1170 	td->latency_qd = (td->latency_qd + td->latency_qd_low) / 2;
1171 
1172 	dprint(FD_RATE, "Ramped down: %d %d %d\n", td->latency_qd_low, td->latency_qd, td->latency_qd_high);
1173 
1174 	/*
1175 	 * When we ramp QD down, quiesce existing IO to prevent
1176 	 * a storm of ramp downs due to pending higher depth.
1177 	 */
1178 	io_u_quiesce(td);
1179 	lat_new_cycle(td);
1180 	return 0;
1181 }
1182 
lat_target_failed(struct thread_data * td)1183 static int lat_target_failed(struct thread_data *td)
1184 {
1185 	if (td->o.latency_percentile.u.f == 100.0)
1186 		return __lat_target_failed(td);
1187 
1188 	td->latency_failed++;
1189 	return 0;
1190 }
1191 
lat_target_init(struct thread_data * td)1192 void lat_target_init(struct thread_data *td)
1193 {
1194 	td->latency_end_run = 0;
1195 
1196 	if (td->o.latency_target) {
1197 		dprint(FD_RATE, "Latency target=%llu\n", td->o.latency_target);
1198 		fio_gettime(&td->latency_ts, NULL);
1199 		td->latency_qd = 1;
1200 		td->latency_qd_high = td->o.iodepth;
1201 		td->latency_qd_low = 1;
1202 		td->latency_ios = ddir_rw_sum(td->io_blocks);
1203 	} else
1204 		td->latency_qd = td->o.iodepth;
1205 }
1206 
lat_target_reset(struct thread_data * td)1207 void lat_target_reset(struct thread_data *td)
1208 {
1209 	if (!td->latency_end_run)
1210 		lat_target_init(td);
1211 }
1212 
lat_target_success(struct thread_data * td)1213 static void lat_target_success(struct thread_data *td)
1214 {
1215 	const unsigned int qd = td->latency_qd;
1216 	struct thread_options *o = &td->o;
1217 
1218 	td->latency_qd_low = td->latency_qd;
1219 
1220 	/*
1221 	 * If we haven't failed yet, we double up to a failing value instead
1222 	 * of bisecting from highest possible queue depth. If we have set
1223 	 * a limit other than td->o.iodepth, bisect between that.
1224 	 */
1225 	if (td->latency_qd_high != o->iodepth)
1226 		td->latency_qd = (td->latency_qd + td->latency_qd_high) / 2;
1227 	else
1228 		td->latency_qd *= 2;
1229 
1230 	if (td->latency_qd > o->iodepth)
1231 		td->latency_qd = o->iodepth;
1232 
1233 	dprint(FD_RATE, "Ramped up: %d %d %d\n", td->latency_qd_low, td->latency_qd, td->latency_qd_high);
1234 
1235 	/*
1236 	 * Same as last one, we are done. Let it run a latency cycle, so
1237 	 * we get only the results from the targeted depth.
1238 	 */
1239 	if (td->latency_qd == qd) {
1240 		if (td->latency_end_run) {
1241 			dprint(FD_RATE, "We are done\n");
1242 			td->done = 1;
1243 		} else {
1244 			dprint(FD_RATE, "Quiesce and final run\n");
1245 			io_u_quiesce(td);
1246 			td->latency_end_run = 1;
1247 			reset_all_stats(td);
1248 			reset_io_stats(td);
1249 		}
1250 	}
1251 
1252 	lat_new_cycle(td);
1253 }
1254 
1255 /*
1256  * Check if we can bump the queue depth
1257  */
lat_target_check(struct thread_data * td)1258 void lat_target_check(struct thread_data *td)
1259 {
1260 	uint64_t usec_window;
1261 	uint64_t ios;
1262 	double success_ios;
1263 
1264 	usec_window = utime_since_now(&td->latency_ts);
1265 	if (usec_window < td->o.latency_window)
1266 		return;
1267 
1268 	ios = ddir_rw_sum(td->io_blocks) - td->latency_ios;
1269 	success_ios = (double) (ios - td->latency_failed) / (double) ios;
1270 	success_ios *= 100.0;
1271 
1272 	dprint(FD_RATE, "Success rate: %.2f%% (target %.2f%%)\n", success_ios, td->o.latency_percentile.u.f);
1273 
1274 	if (success_ios >= td->o.latency_percentile.u.f)
1275 		lat_target_success(td);
1276 	else
1277 		__lat_target_failed(td);
1278 }
1279 
1280 /*
1281  * If latency target is enabled, we might be ramping up or down and not
1282  * using the full queue depth available.
1283  */
queue_full(struct thread_data * td)1284 int queue_full(struct thread_data *td)
1285 {
1286 	const int qempty = io_u_qempty(&td->io_u_freelist);
1287 
1288 	if (qempty)
1289 		return 1;
1290 	if (!td->o.latency_target)
1291 		return 0;
1292 
1293 	return td->cur_depth >= td->latency_qd;
1294 }
1295 
__get_io_u(struct thread_data * td)1296 struct io_u *__get_io_u(struct thread_data *td)
1297 {
1298 	struct io_u *io_u = NULL;
1299 
1300 	td_io_u_lock(td);
1301 
1302 again:
1303 	if (!io_u_rempty(&td->io_u_requeues))
1304 		io_u = io_u_rpop(&td->io_u_requeues);
1305 	else if (!queue_full(td)) {
1306 		io_u = io_u_qpop(&td->io_u_freelist);
1307 
1308 		io_u->file = NULL;
1309 		io_u->buflen = 0;
1310 		io_u->resid = 0;
1311 		io_u->end_io = NULL;
1312 	}
1313 
1314 	if (io_u) {
1315 		assert(io_u->flags & IO_U_F_FREE);
1316 		io_u->flags &= ~(IO_U_F_FREE | IO_U_F_FREE_DEF);
1317 		io_u->flags &= ~(IO_U_F_TRIMMED | IO_U_F_BARRIER);
1318 		io_u->flags &= ~IO_U_F_VER_LIST;
1319 
1320 		io_u->error = 0;
1321 		io_u->acct_ddir = -1;
1322 		td->cur_depth++;
1323 		io_u->flags |= IO_U_F_IN_CUR_DEPTH;
1324 		io_u->ipo = NULL;
1325 	} else if (td->o.verify_async) {
1326 		/*
1327 		 * We ran out, wait for async verify threads to finish and
1328 		 * return one
1329 		 */
1330 		pthread_cond_wait(&td->free_cond, &td->io_u_lock);
1331 		goto again;
1332 	}
1333 
1334 	td_io_u_unlock(td);
1335 	return io_u;
1336 }
1337 
check_get_trim(struct thread_data * td,struct io_u * io_u)1338 static int check_get_trim(struct thread_data *td, struct io_u *io_u)
1339 {
1340 	if (!(td->flags & TD_F_TRIM_BACKLOG))
1341 		return 0;
1342 
1343 	if (td->trim_entries) {
1344 		int get_trim = 0;
1345 
1346 		if (td->trim_batch) {
1347 			td->trim_batch--;
1348 			get_trim = 1;
1349 		} else if (!(td->io_hist_len % td->o.trim_backlog) &&
1350 			 td->last_ddir != DDIR_READ) {
1351 			td->trim_batch = td->o.trim_batch;
1352 			if (!td->trim_batch)
1353 				td->trim_batch = td->o.trim_backlog;
1354 			get_trim = 1;
1355 		}
1356 
1357 		if (get_trim && !get_next_trim(td, io_u))
1358 			return 1;
1359 	}
1360 
1361 	return 0;
1362 }
1363 
check_get_verify(struct thread_data * td,struct io_u * io_u)1364 static int check_get_verify(struct thread_data *td, struct io_u *io_u)
1365 {
1366 	if (!(td->flags & TD_F_VER_BACKLOG))
1367 		return 0;
1368 
1369 	if (td->io_hist_len) {
1370 		int get_verify = 0;
1371 
1372 		if (td->verify_batch)
1373 			get_verify = 1;
1374 		else if (!(td->io_hist_len % td->o.verify_backlog) &&
1375 			 td->last_ddir != DDIR_READ) {
1376 			td->verify_batch = td->o.verify_batch;
1377 			if (!td->verify_batch)
1378 				td->verify_batch = td->o.verify_backlog;
1379 			get_verify = 1;
1380 		}
1381 
1382 		if (get_verify && !get_next_verify(td, io_u)) {
1383 			td->verify_batch--;
1384 			return 1;
1385 		}
1386 	}
1387 
1388 	return 0;
1389 }
1390 
1391 /*
1392  * Fill offset and start time into the buffer content, to prevent too
1393  * easy compressible data for simple de-dupe attempts. Do this for every
1394  * 512b block in the range, since that should be the smallest block size
1395  * we can expect from a device.
1396  */
small_content_scramble(struct io_u * io_u)1397 static void small_content_scramble(struct io_u *io_u)
1398 {
1399 	unsigned int i, nr_blocks = io_u->buflen / 512;
1400 	uint64_t boffset;
1401 	unsigned int offset;
1402 	void *p, *end;
1403 
1404 	if (!nr_blocks)
1405 		return;
1406 
1407 	p = io_u->xfer_buf;
1408 	boffset = io_u->offset;
1409 	io_u->buf_filled_len = 0;
1410 
1411 	for (i = 0; i < nr_blocks; i++) {
1412 		/*
1413 		 * Fill the byte offset into a "random" start offset of
1414 		 * the buffer, given by the product of the usec time
1415 		 * and the actual offset.
1416 		 */
1417 		offset = (io_u->start_time.tv_usec ^ boffset) & 511;
1418 		offset &= ~(sizeof(uint64_t) - 1);
1419 		if (offset >= 512 - sizeof(uint64_t))
1420 			offset -= sizeof(uint64_t);
1421 		memcpy(p + offset, &boffset, sizeof(boffset));
1422 
1423 		end = p + 512 - sizeof(io_u->start_time);
1424 		memcpy(end, &io_u->start_time, sizeof(io_u->start_time));
1425 		p += 512;
1426 		boffset += 512;
1427 	}
1428 }
1429 
1430 /*
1431  * Return an io_u to be processed. Gets a buflen and offset, sets direction,
1432  * etc. The returned io_u is fully ready to be prepped and submitted.
1433  */
get_io_u(struct thread_data * td)1434 struct io_u *get_io_u(struct thread_data *td)
1435 {
1436 	struct fio_file *f;
1437 	struct io_u *io_u;
1438 	int do_scramble = 0;
1439 	long ret = 0;
1440 
1441 	io_u = __get_io_u(td);
1442 	if (!io_u) {
1443 		dprint(FD_IO, "__get_io_u failed\n");
1444 		return NULL;
1445 	}
1446 
1447 	if (check_get_verify(td, io_u))
1448 		goto out;
1449 	if (check_get_trim(td, io_u))
1450 		goto out;
1451 
1452 	/*
1453 	 * from a requeue, io_u already setup
1454 	 */
1455 	if (io_u->file)
1456 		goto out;
1457 
1458 	/*
1459 	 * If using an iolog, grab next piece if any available.
1460 	 */
1461 	if (td->flags & TD_F_READ_IOLOG) {
1462 		if (read_iolog_get(td, io_u))
1463 			goto err_put;
1464 	} else if (set_io_u_file(td, io_u)) {
1465 		ret = -EBUSY;
1466 		dprint(FD_IO, "io_u %p, setting file failed\n", io_u);
1467 		goto err_put;
1468 	}
1469 
1470 	f = io_u->file;
1471 	if (!f) {
1472 		dprint(FD_IO, "io_u %p, setting file failed\n", io_u);
1473 		goto err_put;
1474 	}
1475 
1476 	assert(fio_file_open(f));
1477 
1478 	if (ddir_rw(io_u->ddir)) {
1479 		if (!io_u->buflen && !(td->io_ops->flags & FIO_NOIO)) {
1480 			dprint(FD_IO, "get_io_u: zero buflen on %p\n", io_u);
1481 			goto err_put;
1482 		}
1483 
1484 		f->last_start = io_u->offset;
1485 		f->last_pos = io_u->offset + io_u->buflen;
1486 
1487 		if (io_u->ddir == DDIR_WRITE) {
1488 			if (td->flags & TD_F_REFILL_BUFFERS) {
1489 				io_u_fill_buffer(td, io_u,
1490 					io_u->xfer_buflen, io_u->xfer_buflen);
1491 			} else if ((td->flags & TD_F_SCRAMBLE_BUFFERS) &&
1492 				   !(td->flags & TD_F_COMPRESS))
1493 				do_scramble = 1;
1494 			if (td->flags & TD_F_VER_NONE) {
1495 				populate_verify_io_u(td, io_u);
1496 				do_scramble = 0;
1497 			}
1498 		} else if (io_u->ddir == DDIR_READ) {
1499 			/*
1500 			 * Reset the buf_filled parameters so next time if the
1501 			 * buffer is used for writes it is refilled.
1502 			 */
1503 			io_u->buf_filled_len = 0;
1504 		}
1505 	}
1506 
1507 	/*
1508 	 * Set io data pointers.
1509 	 */
1510 	io_u->xfer_buf = io_u->buf;
1511 	io_u->xfer_buflen = io_u->buflen;
1512 
1513 out:
1514 	assert(io_u->file);
1515 	if (!td_io_prep(td, io_u)) {
1516 		if (!td->o.disable_slat)
1517 			fio_gettime(&io_u->start_time, NULL);
1518 		if (do_scramble)
1519 			small_content_scramble(io_u);
1520 		return io_u;
1521 	}
1522 err_put:
1523 	dprint(FD_IO, "get_io_u failed\n");
1524 	put_io_u(td, io_u);
1525 	return ERR_PTR(ret);
1526 }
1527 
io_u_log_error(struct thread_data * td,struct io_u * io_u)1528 void io_u_log_error(struct thread_data *td, struct io_u *io_u)
1529 {
1530 	enum error_type_bit eb = td_error_type(io_u->ddir, io_u->error);
1531 	const char *msg[] = { "read", "write", "sync", "datasync",
1532 				"sync_file_range", "wait", "trim" };
1533 
1534 	if (td_non_fatal_error(td, eb, io_u->error) && !td->o.error_dump)
1535 		return;
1536 
1537 	log_err("fio: io_u error");
1538 
1539 	if (io_u->file)
1540 		log_err(" on file %s", io_u->file->file_name);
1541 
1542 	log_err(": %s\n", strerror(io_u->error));
1543 
1544 	log_err("     %s offset=%llu, buflen=%lu\n", msg[io_u->ddir],
1545 					io_u->offset, io_u->xfer_buflen);
1546 
1547 	if (!td->error)
1548 		td_verror(td, io_u->error, "io_u error");
1549 }
1550 
gtod_reduce(struct thread_data * td)1551 static inline int gtod_reduce(struct thread_data *td)
1552 {
1553 	return td->o.disable_clat && td->o.disable_lat && td->o.disable_slat
1554 		&& td->o.disable_bw;
1555 }
1556 
account_io_completion(struct thread_data * td,struct io_u * io_u,struct io_completion_data * icd,const enum fio_ddir idx,unsigned int bytes)1557 static void account_io_completion(struct thread_data *td, struct io_u *io_u,
1558 				  struct io_completion_data *icd,
1559 				  const enum fio_ddir idx, unsigned int bytes)
1560 {
1561 	unsigned long lusec = 0;
1562 
1563 	if (!gtod_reduce(td))
1564 		lusec = utime_since(&io_u->issue_time, &icd->time);
1565 
1566 	if (!td->o.disable_lat) {
1567 		unsigned long tusec;
1568 
1569 		tusec = utime_since(&io_u->start_time, &icd->time);
1570 		add_lat_sample(td, idx, tusec, bytes);
1571 
1572 		if (td->flags & TD_F_PROFILE_OPS) {
1573 			struct prof_io_ops *ops = &td->prof_io_ops;
1574 
1575 			if (ops->io_u_lat)
1576 				icd->error = ops->io_u_lat(td, tusec);
1577 		}
1578 
1579 		if (td->o.max_latency && tusec > td->o.max_latency)
1580 			lat_fatal(td, icd, tusec, td->o.max_latency);
1581 		if (td->o.latency_target && tusec > td->o.latency_target) {
1582 			if (lat_target_failed(td))
1583 				lat_fatal(td, icd, tusec, td->o.latency_target);
1584 		}
1585 	}
1586 
1587 	if (!td->o.disable_clat) {
1588 		add_clat_sample(td, idx, lusec, bytes);
1589 		io_u_mark_latency(td, lusec);
1590 	}
1591 
1592 	if (!td->o.disable_bw)
1593 		add_bw_sample(td, idx, bytes, &icd->time);
1594 
1595 	if (!gtod_reduce(td))
1596 		add_iops_sample(td, idx, bytes, &icd->time);
1597 }
1598 
usec_for_io(struct thread_data * td,enum fio_ddir ddir)1599 static long long usec_for_io(struct thread_data *td, enum fio_ddir ddir)
1600 {
1601 	uint64_t secs, remainder, bps, bytes;
1602 
1603 	bytes = td->this_io_bytes[ddir];
1604 	bps = td->rate_bps[ddir];
1605 	secs = bytes / bps;
1606 	remainder = bytes % bps;
1607 	return remainder * 1000000 / bps + secs * 1000000;
1608 }
1609 
io_completed(struct thread_data * td,struct io_u * io_u,struct io_completion_data * icd)1610 static void io_completed(struct thread_data *td, struct io_u *io_u,
1611 			 struct io_completion_data *icd)
1612 {
1613 	struct fio_file *f;
1614 
1615 	dprint_io_u(io_u, "io complete");
1616 
1617 	td_io_u_lock(td);
1618 	assert(io_u->flags & IO_U_F_FLIGHT);
1619 	io_u->flags &= ~(IO_U_F_FLIGHT | IO_U_F_BUSY_OK);
1620 
1621 	/*
1622 	 * Mark IO ok to verify
1623 	 */
1624 	if (io_u->ipo) {
1625 		/*
1626 		 * Remove errored entry from the verification list
1627 		 */
1628 		if (io_u->error)
1629 			unlog_io_piece(td, io_u);
1630 		else {
1631 			io_u->ipo->flags &= ~IP_F_IN_FLIGHT;
1632 			write_barrier();
1633 		}
1634 	}
1635 
1636 	td_io_u_unlock(td);
1637 
1638 	if (ddir_sync(io_u->ddir)) {
1639 		td->last_was_sync = 1;
1640 		f = io_u->file;
1641 		if (f) {
1642 			f->first_write = -1ULL;
1643 			f->last_write = -1ULL;
1644 		}
1645 		return;
1646 	}
1647 
1648 	td->last_was_sync = 0;
1649 	td->last_ddir = io_u->ddir;
1650 
1651 	if (!io_u->error && ddir_rw(io_u->ddir)) {
1652 		unsigned int bytes = io_u->buflen - io_u->resid;
1653 		const enum fio_ddir idx = io_u->ddir;
1654 		const enum fio_ddir odx = io_u->ddir ^ 1;
1655 		int ret;
1656 
1657 		td->io_blocks[idx]++;
1658 		td->this_io_blocks[idx]++;
1659 		td->io_bytes[idx] += bytes;
1660 
1661 		if (!(io_u->flags & IO_U_F_VER_LIST))
1662 			td->this_io_bytes[idx] += bytes;
1663 
1664 		if (idx == DDIR_WRITE) {
1665 			f = io_u->file;
1666 			if (f) {
1667 				if (f->first_write == -1ULL ||
1668 				    io_u->offset < f->first_write)
1669 					f->first_write = io_u->offset;
1670 				if (f->last_write == -1ULL ||
1671 				    ((io_u->offset + bytes) > f->last_write))
1672 					f->last_write = io_u->offset + bytes;
1673 			}
1674 		}
1675 
1676 		if (ramp_time_over(td) && (td->runstate == TD_RUNNING ||
1677 					   td->runstate == TD_VERIFYING)) {
1678 			account_io_completion(td, io_u, icd, idx, bytes);
1679 
1680 			if (__should_check_rate(td, idx)) {
1681 				td->rate_pending_usleep[idx] =
1682 					(usec_for_io(td, idx) -
1683 					 utime_since_now(&td->start));
1684 			}
1685 			if (idx != DDIR_TRIM && __should_check_rate(td, odx))
1686 				td->rate_pending_usleep[odx] =
1687 					(usec_for_io(td, odx) -
1688 					 utime_since_now(&td->start));
1689 		}
1690 
1691 		icd->bytes_done[idx] += bytes;
1692 
1693 		if (io_u->end_io) {
1694 			ret = io_u->end_io(td, io_u);
1695 			if (ret && !icd->error)
1696 				icd->error = ret;
1697 		}
1698 	} else if (io_u->error) {
1699 		icd->error = io_u->error;
1700 		io_u_log_error(td, io_u);
1701 	}
1702 	if (icd->error) {
1703 		enum error_type_bit eb = td_error_type(io_u->ddir, icd->error);
1704 		if (!td_non_fatal_error(td, eb, icd->error))
1705 			return;
1706 		/*
1707 		 * If there is a non_fatal error, then add to the error count
1708 		 * and clear all the errors.
1709 		 */
1710 		update_error_count(td, icd->error);
1711 		td_clear_error(td);
1712 		icd->error = 0;
1713 		io_u->error = 0;
1714 	}
1715 }
1716 
init_icd(struct thread_data * td,struct io_completion_data * icd,int nr)1717 static void init_icd(struct thread_data *td, struct io_completion_data *icd,
1718 		     int nr)
1719 {
1720 	int ddir;
1721 
1722 	if (!gtod_reduce(td))
1723 		fio_gettime(&icd->time, NULL);
1724 
1725 	icd->nr = nr;
1726 
1727 	icd->error = 0;
1728 	for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++)
1729 		icd->bytes_done[ddir] = 0;
1730 }
1731 
ios_completed(struct thread_data * td,struct io_completion_data * icd)1732 static void ios_completed(struct thread_data *td,
1733 			  struct io_completion_data *icd)
1734 {
1735 	struct io_u *io_u;
1736 	int i;
1737 
1738 	for (i = 0; i < icd->nr; i++) {
1739 		io_u = td->io_ops->event(td, i);
1740 
1741 		io_completed(td, io_u, icd);
1742 
1743 		if (!(io_u->flags & IO_U_F_FREE_DEF))
1744 			put_io_u(td, io_u);
1745 	}
1746 }
1747 
1748 /*
1749  * Complete a single io_u for the sync engines.
1750  */
io_u_sync_complete(struct thread_data * td,struct io_u * io_u,uint64_t * bytes)1751 int io_u_sync_complete(struct thread_data *td, struct io_u *io_u,
1752 		       uint64_t *bytes)
1753 {
1754 	struct io_completion_data icd;
1755 
1756 	init_icd(td, &icd, 1);
1757 	io_completed(td, io_u, &icd);
1758 
1759 	if (!(io_u->flags & IO_U_F_FREE_DEF))
1760 		put_io_u(td, io_u);
1761 
1762 	if (icd.error) {
1763 		td_verror(td, icd.error, "io_u_sync_complete");
1764 		return -1;
1765 	}
1766 
1767 	if (bytes) {
1768 		int ddir;
1769 
1770 		for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++)
1771 			bytes[ddir] += icd.bytes_done[ddir];
1772 	}
1773 
1774 	return 0;
1775 }
1776 
1777 /*
1778  * Called to complete min_events number of io for the async engines.
1779  */
io_u_queued_complete(struct thread_data * td,int min_evts,uint64_t * bytes)1780 int io_u_queued_complete(struct thread_data *td, int min_evts,
1781 			 uint64_t *bytes)
1782 {
1783 	struct io_completion_data icd;
1784 	struct timespec *tvp = NULL;
1785 	int ret;
1786 	struct timespec ts = { .tv_sec = 0, .tv_nsec = 0, };
1787 
1788 	dprint(FD_IO, "io_u_queued_completed: min=%d\n", min_evts);
1789 
1790 	if (!min_evts)
1791 		tvp = &ts;
1792 
1793 	ret = td_io_getevents(td, min_evts, td->o.iodepth_batch_complete, tvp);
1794 	if (ret < 0) {
1795 		td_verror(td, -ret, "td_io_getevents");
1796 		return ret;
1797 	} else if (!ret)
1798 		return ret;
1799 
1800 	init_icd(td, &icd, ret);
1801 	ios_completed(td, &icd);
1802 	if (icd.error) {
1803 		td_verror(td, icd.error, "io_u_queued_complete");
1804 		return -1;
1805 	}
1806 
1807 	if (bytes) {
1808 		int ddir;
1809 
1810 		for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++)
1811 			bytes[ddir] += icd.bytes_done[ddir];
1812 	}
1813 
1814 	return 0;
1815 }
1816 
1817 /*
1818  * Call when io_u is really queued, to update the submission latency.
1819  */
io_u_queued(struct thread_data * td,struct io_u * io_u)1820 void io_u_queued(struct thread_data *td, struct io_u *io_u)
1821 {
1822 	if (!td->o.disable_slat) {
1823 		unsigned long slat_time;
1824 
1825 		slat_time = utime_since(&io_u->start_time, &io_u->issue_time);
1826 		add_slat_sample(td, io_u->ddir, slat_time, io_u->xfer_buflen);
1827 	}
1828 }
1829 
fill_io_buffer(struct thread_data * td,void * buf,unsigned int min_write,unsigned int max_bs)1830 void fill_io_buffer(struct thread_data *td, void *buf, unsigned int min_write,
1831 		    unsigned int max_bs)
1832 {
1833 	if (td->o.buffer_pattern_bytes)
1834 		fill_buffer_pattern(td, buf, max_bs);
1835 	else if (!td->o.zero_buffers) {
1836 		unsigned int perc = td->o.compress_percentage;
1837 
1838 		if (perc) {
1839 			unsigned int seg = min_write;
1840 
1841 			seg = min(min_write, td->o.compress_chunk);
1842 			if (!seg)
1843 				seg = min_write;
1844 
1845 			fill_random_buf_percentage(&td->buf_state, buf,
1846 						perc, seg, max_bs);
1847 		} else
1848 			fill_random_buf(&td->buf_state, buf, max_bs);
1849 	} else
1850 		memset(buf, 0, max_bs);
1851 }
1852 
1853 /*
1854  * "randomly" fill the buffer contents
1855  */
io_u_fill_buffer(struct thread_data * td,struct io_u * io_u,unsigned int min_write,unsigned int max_bs)1856 void io_u_fill_buffer(struct thread_data *td, struct io_u *io_u,
1857 		      unsigned int min_write, unsigned int max_bs)
1858 {
1859 	io_u->buf_filled_len = 0;
1860 	fill_io_buffer(td, io_u->buf, min_write, max_bs);
1861 }
1862