1 #include <stdio.h>
2 #include <stdlib.h>
3 #include <unistd.h>
4 #include <ctype.h>
5 #include <string.h>
6 #include <assert.h>
7 #include <libgen.h>
8 #include <fcntl.h>
9 #include <sys/types.h>
10 #include <sys/stat.h>
11 #include <netinet/in.h>
12
13 #include "fio.h"
14 #include "verify.h"
15 #include "parse.h"
16 #include "lib/fls.h"
17 #include "lib/pattern.h"
18 #include "options.h"
19 #include "optgroup.h"
20
21 char client_sockaddr_str[INET6_ADDRSTRLEN] = { 0 };
22
23 #define cb_data_to_td(data) container_of(data, struct thread_data, o)
24
25 static struct pattern_fmt_desc fmt_desc[] = {
26 {
27 .fmt = "%o",
28 .len = FIELD_SIZE(struct io_u *, offset),
29 .paste = paste_blockoff
30 }
31 };
32
33 /*
34 * Check if mmap/mmaphuge has a :/foo/bar/file at the end. If so, return that.
35 */
get_opt_postfix(const char * str)36 static char *get_opt_postfix(const char *str)
37 {
38 char *p = strstr(str, ":");
39
40 if (!p)
41 return NULL;
42
43 p++;
44 strip_blank_front(&p);
45 strip_blank_end(p);
46 return strdup(p);
47 }
48
bs_cmp(const void * p1,const void * p2)49 static int bs_cmp(const void *p1, const void *p2)
50 {
51 const struct bssplit *bsp1 = p1;
52 const struct bssplit *bsp2 = p2;
53
54 return (int) bsp1->perc - (int) bsp2->perc;
55 }
56
57 struct split {
58 unsigned int nr;
59 unsigned int val1[100];
60 unsigned int val2[100];
61 };
62
split_parse_ddir(struct thread_options * o,struct split * split,enum fio_ddir ddir,char * str)63 static int split_parse_ddir(struct thread_options *o, struct split *split,
64 enum fio_ddir ddir, char *str)
65 {
66 unsigned int i, perc;
67 long long val;
68 char *fname;
69
70 split->nr = 0;
71
72 i = 0;
73 while ((fname = strsep(&str, ":")) != NULL) {
74 char *perc_str;
75
76 if (!strlen(fname))
77 break;
78
79 perc_str = strstr(fname, "/");
80 if (perc_str) {
81 *perc_str = '\0';
82 perc_str++;
83 perc = atoi(perc_str);
84 if (perc > 100)
85 perc = 100;
86 else if (!perc)
87 perc = -1U;
88 } else
89 perc = -1U;
90
91 if (str_to_decimal(fname, &val, 1, o, 0, 0)) {
92 log_err("fio: bssplit conversion failed\n");
93 return 1;
94 }
95
96 split->val1[i] = val;
97 split->val2[i] = perc;
98 i++;
99 if (i == 100)
100 break;
101 }
102
103 split->nr = i;
104 return 0;
105 }
106
bssplit_ddir(struct thread_options * o,enum fio_ddir ddir,char * str)107 static int bssplit_ddir(struct thread_options *o, enum fio_ddir ddir, char *str)
108 {
109 unsigned int i, perc, perc_missing;
110 unsigned int max_bs, min_bs;
111 struct split split;
112
113 memset(&split, 0, sizeof(split));
114
115 if (split_parse_ddir(o, &split, ddir, str))
116 return 1;
117 if (!split.nr)
118 return 0;
119
120 max_bs = 0;
121 min_bs = -1;
122 o->bssplit[ddir] = malloc(split.nr * sizeof(struct bssplit));
123 o->bssplit_nr[ddir] = split.nr;
124 for (i = 0; i < split.nr; i++) {
125 if (split.val1[i] > max_bs)
126 max_bs = split.val1[i];
127 if (split.val1[i] < min_bs)
128 min_bs = split.val1[i];
129
130 o->bssplit[ddir][i].bs = split.val1[i];
131 o->bssplit[ddir][i].perc =split.val2[i];
132 }
133
134 /*
135 * Now check if the percentages add up, and how much is missing
136 */
137 perc = perc_missing = 0;
138 for (i = 0; i < o->bssplit_nr[ddir]; i++) {
139 struct bssplit *bsp = &o->bssplit[ddir][i];
140
141 if (bsp->perc == -1U)
142 perc_missing++;
143 else
144 perc += bsp->perc;
145 }
146
147 if (perc > 100 && perc_missing > 1) {
148 log_err("fio: bssplit percentages add to more than 100%%\n");
149 free(o->bssplit[ddir]);
150 o->bssplit[ddir] = NULL;
151 return 1;
152 }
153
154 /*
155 * If values didn't have a percentage set, divide the remains between
156 * them.
157 */
158 if (perc_missing) {
159 if (perc_missing == 1 && o->bssplit_nr[ddir] == 1)
160 perc = 100;
161 for (i = 0; i < o->bssplit_nr[ddir]; i++) {
162 struct bssplit *bsp = &o->bssplit[ddir][i];
163
164 if (bsp->perc == -1U)
165 bsp->perc = (100 - perc) / perc_missing;
166 }
167 }
168
169 o->min_bs[ddir] = min_bs;
170 o->max_bs[ddir] = max_bs;
171
172 /*
173 * now sort based on percentages, for ease of lookup
174 */
175 qsort(o->bssplit[ddir], o->bssplit_nr[ddir], sizeof(struct bssplit), bs_cmp);
176 return 0;
177 }
178
179 typedef int (split_parse_fn)(struct thread_options *, enum fio_ddir, char *);
180
str_split_parse(struct thread_data * td,char * str,split_parse_fn * fn)181 static int str_split_parse(struct thread_data *td, char *str, split_parse_fn *fn)
182 {
183 char *odir, *ddir;
184 int ret = 0;
185
186 odir = strchr(str, ',');
187 if (odir) {
188 ddir = strchr(odir + 1, ',');
189 if (ddir) {
190 ret = fn(&td->o, DDIR_TRIM, ddir + 1);
191 if (!ret)
192 *ddir = '\0';
193 } else {
194 char *op;
195
196 op = strdup(odir + 1);
197 ret = fn(&td->o, DDIR_TRIM, op);
198
199 free(op);
200 }
201 if (!ret)
202 ret = fn(&td->o, DDIR_WRITE, odir + 1);
203 if (!ret) {
204 *odir = '\0';
205 ret = fn(&td->o, DDIR_READ, str);
206 }
207 } else {
208 char *op;
209
210 op = strdup(str);
211 ret = fn(&td->o, DDIR_WRITE, op);
212 free(op);
213
214 if (!ret) {
215 op = strdup(str);
216 ret = fn(&td->o, DDIR_TRIM, op);
217 free(op);
218 }
219 if (!ret)
220 ret = fn(&td->o, DDIR_READ, str);
221 }
222
223 return ret;
224 }
225
str_bssplit_cb(void * data,const char * input)226 static int str_bssplit_cb(void *data, const char *input)
227 {
228 struct thread_data *td = cb_data_to_td(data);
229 char *str, *p;
230 int ret = 0;
231
232 p = str = strdup(input);
233
234 strip_blank_front(&str);
235 strip_blank_end(str);
236
237 ret = str_split_parse(td, str, bssplit_ddir);
238
239 if (parse_dryrun()) {
240 int i;
241
242 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
243 free(td->o.bssplit[i]);
244 td->o.bssplit[i] = NULL;
245 td->o.bssplit_nr[i] = 0;
246 }
247 }
248
249 free(p);
250 return ret;
251 }
252
str2error(char * str)253 static int str2error(char *str)
254 {
255 const char *err[] = { "EPERM", "ENOENT", "ESRCH", "EINTR", "EIO",
256 "ENXIO", "E2BIG", "ENOEXEC", "EBADF",
257 "ECHILD", "EAGAIN", "ENOMEM", "EACCES",
258 "EFAULT", "ENOTBLK", "EBUSY", "EEXIST",
259 "EXDEV", "ENODEV", "ENOTDIR", "EISDIR",
260 "EINVAL", "ENFILE", "EMFILE", "ENOTTY",
261 "ETXTBSY","EFBIG", "ENOSPC", "ESPIPE",
262 "EROFS","EMLINK", "EPIPE", "EDOM", "ERANGE" };
263 int i = 0, num = sizeof(err) / sizeof(char *);
264
265 while (i < num) {
266 if (!strcmp(err[i], str))
267 return i + 1;
268 i++;
269 }
270 return 0;
271 }
272
ignore_error_type(struct thread_data * td,int etype,char * str)273 static int ignore_error_type(struct thread_data *td, int etype, char *str)
274 {
275 unsigned int i;
276 int *error;
277 char *fname;
278
279 if (etype >= ERROR_TYPE_CNT) {
280 log_err("Illegal error type\n");
281 return 1;
282 }
283
284 td->o.ignore_error_nr[etype] = 4;
285 error = malloc(4 * sizeof(struct bssplit));
286
287 i = 0;
288 while ((fname = strsep(&str, ":")) != NULL) {
289
290 if (!strlen(fname))
291 break;
292
293 /*
294 * grow struct buffer, if needed
295 */
296 if (i == td->o.ignore_error_nr[etype]) {
297 td->o.ignore_error_nr[etype] <<= 1;
298 error = realloc(error, td->o.ignore_error_nr[etype]
299 * sizeof(int));
300 }
301 if (fname[0] == 'E') {
302 error[i] = str2error(fname);
303 } else {
304 error[i] = atoi(fname);
305 if (error[i] < 0)
306 error[i] = -error[i];
307 }
308 if (!error[i]) {
309 log_err("Unknown error %s, please use number value \n",
310 fname);
311 free(error);
312 return 1;
313 }
314 i++;
315 }
316 if (i) {
317 td->o.continue_on_error |= 1 << etype;
318 td->o.ignore_error_nr[etype] = i;
319 td->o.ignore_error[etype] = error;
320 } else
321 free(error);
322
323 return 0;
324
325 }
326
str_ignore_error_cb(void * data,const char * input)327 static int str_ignore_error_cb(void *data, const char *input)
328 {
329 struct thread_data *td = cb_data_to_td(data);
330 char *str, *p, *n;
331 int type = 0, ret = 1;
332
333 if (parse_dryrun())
334 return 0;
335
336 p = str = strdup(input);
337
338 strip_blank_front(&str);
339 strip_blank_end(str);
340
341 while (p) {
342 n = strchr(p, ',');
343 if (n)
344 *n++ = '\0';
345 ret = ignore_error_type(td, type, p);
346 if (ret)
347 break;
348 p = n;
349 type++;
350 }
351 free(str);
352 return ret;
353 }
354
str_rw_cb(void * data,const char * str)355 static int str_rw_cb(void *data, const char *str)
356 {
357 struct thread_data *td = cb_data_to_td(data);
358 struct thread_options *o = &td->o;
359 char *nr;
360
361 if (parse_dryrun())
362 return 0;
363
364 o->ddir_seq_nr = 1;
365 o->ddir_seq_add = 0;
366
367 nr = get_opt_postfix(str);
368 if (!nr)
369 return 0;
370
371 if (td_random(td))
372 o->ddir_seq_nr = atoi(nr);
373 else {
374 long long val;
375
376 if (str_to_decimal(nr, &val, 1, o, 0, 0)) {
377 log_err("fio: rw postfix parsing failed\n");
378 free(nr);
379 return 1;
380 }
381
382 o->ddir_seq_add = val;
383 }
384
385 free(nr);
386 return 0;
387 }
388
str_mem_cb(void * data,const char * mem)389 static int str_mem_cb(void *data, const char *mem)
390 {
391 struct thread_data *td = cb_data_to_td(data);
392
393 if (td->o.mem_type == MEM_MMAPHUGE || td->o.mem_type == MEM_MMAP ||
394 td->o.mem_type == MEM_MMAPSHARED)
395 td->o.mmapfile = get_opt_postfix(mem);
396
397 return 0;
398 }
399
fio_clock_source_cb(void * data,const char * str)400 static int fio_clock_source_cb(void *data, const char *str)
401 {
402 struct thread_data *td = cb_data_to_td(data);
403
404 fio_clock_source = td->o.clocksource;
405 fio_clock_source_set = 1;
406 fio_clock_init();
407 return 0;
408 }
409
str_rwmix_read_cb(void * data,unsigned long long * val)410 static int str_rwmix_read_cb(void *data, unsigned long long *val)
411 {
412 struct thread_data *td = cb_data_to_td(data);
413
414 td->o.rwmix[DDIR_READ] = *val;
415 td->o.rwmix[DDIR_WRITE] = 100 - *val;
416 return 0;
417 }
418
str_rwmix_write_cb(void * data,unsigned long long * val)419 static int str_rwmix_write_cb(void *data, unsigned long long *val)
420 {
421 struct thread_data *td = cb_data_to_td(data);
422
423 td->o.rwmix[DDIR_WRITE] = *val;
424 td->o.rwmix[DDIR_READ] = 100 - *val;
425 return 0;
426 }
427
str_exitall_cb(void)428 static int str_exitall_cb(void)
429 {
430 exitall_on_terminate = 1;
431 return 0;
432 }
433
434 #ifdef FIO_HAVE_CPU_AFFINITY
fio_cpus_split(os_cpu_mask_t * mask,unsigned int cpu_index)435 int fio_cpus_split(os_cpu_mask_t *mask, unsigned int cpu_index)
436 {
437 unsigned int i, index, cpus_in_mask;
438 const long max_cpu = cpus_online();
439
440 cpus_in_mask = fio_cpu_count(mask);
441 cpu_index = cpu_index % cpus_in_mask;
442
443 index = 0;
444 for (i = 0; i < max_cpu; i++) {
445 if (!fio_cpu_isset(mask, i))
446 continue;
447
448 if (cpu_index != index)
449 fio_cpu_clear(mask, i);
450
451 index++;
452 }
453
454 return fio_cpu_count(mask);
455 }
456
str_cpumask_cb(void * data,unsigned long long * val)457 static int str_cpumask_cb(void *data, unsigned long long *val)
458 {
459 struct thread_data *td = cb_data_to_td(data);
460 unsigned int i;
461 long max_cpu;
462 int ret;
463
464 if (parse_dryrun())
465 return 0;
466
467 ret = fio_cpuset_init(&td->o.cpumask);
468 if (ret < 0) {
469 log_err("fio: cpuset_init failed\n");
470 td_verror(td, ret, "fio_cpuset_init");
471 return 1;
472 }
473
474 max_cpu = cpus_online();
475
476 for (i = 0; i < sizeof(int) * 8; i++) {
477 if ((1 << i) & *val) {
478 if (i >= max_cpu) {
479 log_err("fio: CPU %d too large (max=%ld)\n", i,
480 max_cpu - 1);
481 return 1;
482 }
483 dprint(FD_PARSE, "set cpu allowed %d\n", i);
484 fio_cpu_set(&td->o.cpumask, i);
485 }
486 }
487
488 return 0;
489 }
490
set_cpus_allowed(struct thread_data * td,os_cpu_mask_t * mask,const char * input)491 static int set_cpus_allowed(struct thread_data *td, os_cpu_mask_t *mask,
492 const char *input)
493 {
494 char *cpu, *str, *p;
495 long max_cpu;
496 int ret = 0;
497
498 ret = fio_cpuset_init(mask);
499 if (ret < 0) {
500 log_err("fio: cpuset_init failed\n");
501 td_verror(td, ret, "fio_cpuset_init");
502 return 1;
503 }
504
505 p = str = strdup(input);
506
507 strip_blank_front(&str);
508 strip_blank_end(str);
509
510 max_cpu = cpus_online();
511
512 while ((cpu = strsep(&str, ",")) != NULL) {
513 char *str2, *cpu2;
514 int icpu, icpu2;
515
516 if (!strlen(cpu))
517 break;
518
519 str2 = cpu;
520 icpu2 = -1;
521 while ((cpu2 = strsep(&str2, "-")) != NULL) {
522 if (!strlen(cpu2))
523 break;
524
525 icpu2 = atoi(cpu2);
526 }
527
528 icpu = atoi(cpu);
529 if (icpu2 == -1)
530 icpu2 = icpu;
531 while (icpu <= icpu2) {
532 if (icpu >= FIO_MAX_CPUS) {
533 log_err("fio: your OS only supports up to"
534 " %d CPUs\n", (int) FIO_MAX_CPUS);
535 ret = 1;
536 break;
537 }
538 if (icpu >= max_cpu) {
539 log_err("fio: CPU %d too large (max=%ld)\n",
540 icpu, max_cpu - 1);
541 ret = 1;
542 break;
543 }
544
545 dprint(FD_PARSE, "set cpu allowed %d\n", icpu);
546 fio_cpu_set(mask, icpu);
547 icpu++;
548 }
549 if (ret)
550 break;
551 }
552
553 free(p);
554 return ret;
555 }
556
str_cpus_allowed_cb(void * data,const char * input)557 static int str_cpus_allowed_cb(void *data, const char *input)
558 {
559 struct thread_data *td = cb_data_to_td(data);
560
561 if (parse_dryrun())
562 return 0;
563
564 return set_cpus_allowed(td, &td->o.cpumask, input);
565 }
566
str_verify_cpus_allowed_cb(void * data,const char * input)567 static int str_verify_cpus_allowed_cb(void *data, const char *input)
568 {
569 struct thread_data *td = cb_data_to_td(data);
570
571 if (parse_dryrun())
572 return 0;
573
574 return set_cpus_allowed(td, &td->o.verify_cpumask, input);
575 }
576
577 #ifdef CONFIG_ZLIB
str_log_cpus_allowed_cb(void * data,const char * input)578 static int str_log_cpus_allowed_cb(void *data, const char *input)
579 {
580 struct thread_data *td = cb_data_to_td(data);
581
582 if (parse_dryrun())
583 return 0;
584
585 return set_cpus_allowed(td, &td->o.log_gz_cpumask, input);
586 }
587 #endif /* CONFIG_ZLIB */
588
589 #endif /* FIO_HAVE_CPU_AFFINITY */
590
591 #ifdef CONFIG_LIBNUMA
str_numa_cpunodes_cb(void * data,char * input)592 static int str_numa_cpunodes_cb(void *data, char *input)
593 {
594 struct thread_data *td = cb_data_to_td(data);
595 struct bitmask *verify_bitmask;
596
597 if (parse_dryrun())
598 return 0;
599
600 /* numa_parse_nodestring() parses a character string list
601 * of nodes into a bit mask. The bit mask is allocated by
602 * numa_allocate_nodemask(), so it should be freed by
603 * numa_free_nodemask().
604 */
605 verify_bitmask = numa_parse_nodestring(input);
606 if (verify_bitmask == NULL) {
607 log_err("fio: numa_parse_nodestring failed\n");
608 td_verror(td, 1, "str_numa_cpunodes_cb");
609 return 1;
610 }
611 numa_free_nodemask(verify_bitmask);
612
613 td->o.numa_cpunodes = strdup(input);
614 return 0;
615 }
616
str_numa_mpol_cb(void * data,char * input)617 static int str_numa_mpol_cb(void *data, char *input)
618 {
619 struct thread_data *td = cb_data_to_td(data);
620 const char * const policy_types[] =
621 { "default", "prefer", "bind", "interleave", "local", NULL };
622 int i;
623 char *nodelist;
624 struct bitmask *verify_bitmask;
625
626 if (parse_dryrun())
627 return 0;
628
629 nodelist = strchr(input, ':');
630 if (nodelist) {
631 /* NUL-terminate mode */
632 *nodelist++ = '\0';
633 }
634
635 for (i = 0; i <= MPOL_LOCAL; i++) {
636 if (!strcmp(input, policy_types[i])) {
637 td->o.numa_mem_mode = i;
638 break;
639 }
640 }
641 if (i > MPOL_LOCAL) {
642 log_err("fio: memory policy should be: default, prefer, bind, interleave, local\n");
643 goto out;
644 }
645
646 switch (td->o.numa_mem_mode) {
647 case MPOL_PREFERRED:
648 /*
649 * Insist on a nodelist of one node only
650 */
651 if (nodelist) {
652 char *rest = nodelist;
653 while (isdigit(*rest))
654 rest++;
655 if (*rest) {
656 log_err("fio: one node only for \'prefer\'\n");
657 goto out;
658 }
659 } else {
660 log_err("fio: one node is needed for \'prefer\'\n");
661 goto out;
662 }
663 break;
664 case MPOL_INTERLEAVE:
665 /*
666 * Default to online nodes with memory if no nodelist
667 */
668 if (!nodelist)
669 nodelist = strdup("all");
670 break;
671 case MPOL_LOCAL:
672 case MPOL_DEFAULT:
673 /*
674 * Don't allow a nodelist
675 */
676 if (nodelist) {
677 log_err("fio: NO nodelist for \'local\'\n");
678 goto out;
679 }
680 break;
681 case MPOL_BIND:
682 /*
683 * Insist on a nodelist
684 */
685 if (!nodelist) {
686 log_err("fio: a nodelist is needed for \'bind\'\n");
687 goto out;
688 }
689 break;
690 }
691
692
693 /* numa_parse_nodestring() parses a character string list
694 * of nodes into a bit mask. The bit mask is allocated by
695 * numa_allocate_nodemask(), so it should be freed by
696 * numa_free_nodemask().
697 */
698 switch (td->o.numa_mem_mode) {
699 case MPOL_PREFERRED:
700 td->o.numa_mem_prefer_node = atoi(nodelist);
701 break;
702 case MPOL_INTERLEAVE:
703 case MPOL_BIND:
704 verify_bitmask = numa_parse_nodestring(nodelist);
705 if (verify_bitmask == NULL) {
706 log_err("fio: numa_parse_nodestring failed\n");
707 td_verror(td, 1, "str_numa_memnodes_cb");
708 return 1;
709 }
710 td->o.numa_memnodes = strdup(nodelist);
711 numa_free_nodemask(verify_bitmask);
712
713 break;
714 case MPOL_LOCAL:
715 case MPOL_DEFAULT:
716 default:
717 break;
718 }
719
720 return 0;
721 out:
722 return 1;
723 }
724 #endif
725
str_fst_cb(void * data,const char * str)726 static int str_fst_cb(void *data, const char *str)
727 {
728 struct thread_data *td = cb_data_to_td(data);
729 double val;
730 bool done = false;
731 char *nr;
732
733 td->file_service_nr = 1;
734
735 switch (td->o.file_service_type) {
736 case FIO_FSERVICE_RANDOM:
737 case FIO_FSERVICE_RR:
738 case FIO_FSERVICE_SEQ:
739 nr = get_opt_postfix(str);
740 if (nr) {
741 td->file_service_nr = atoi(nr);
742 free(nr);
743 }
744 done = true;
745 break;
746 case FIO_FSERVICE_ZIPF:
747 val = FIO_DEF_ZIPF;
748 break;
749 case FIO_FSERVICE_PARETO:
750 val = FIO_DEF_PARETO;
751 break;
752 case FIO_FSERVICE_GAUSS:
753 val = 0.0;
754 break;
755 default:
756 log_err("fio: bad file service type: %d\n", td->o.file_service_type);
757 return 1;
758 }
759
760 if (done)
761 return 0;
762
763 nr = get_opt_postfix(str);
764 if (nr && !str_to_float(nr, &val, 0)) {
765 log_err("fio: file service type random postfix parsing failed\n");
766 free(nr);
767 return 1;
768 }
769
770 free(nr);
771
772 switch (td->o.file_service_type) {
773 case FIO_FSERVICE_ZIPF:
774 if (val == 1.00) {
775 log_err("fio: zipf theta must be different than 1.0\n");
776 return 1;
777 }
778 if (parse_dryrun())
779 return 0;
780 td->zipf_theta = val;
781 break;
782 case FIO_FSERVICE_PARETO:
783 if (val <= 0.00 || val >= 1.00) {
784 log_err("fio: pareto input out of range (0 < input < 1.0)\n");
785 return 1;
786 }
787 if (parse_dryrun())
788 return 0;
789 td->pareto_h = val;
790 break;
791 case FIO_FSERVICE_GAUSS:
792 if (val < 0.00 || val >= 100.00) {
793 log_err("fio: normal deviation out of range (0 <= input < 100.0)\n");
794 return 1;
795 }
796 if (parse_dryrun())
797 return 0;
798 td->gauss_dev = val;
799 break;
800 }
801
802 return 0;
803 }
804
805 #ifdef CONFIG_SYNC_FILE_RANGE
str_sfr_cb(void * data,const char * str)806 static int str_sfr_cb(void *data, const char *str)
807 {
808 struct thread_data *td = cb_data_to_td(data);
809 char *nr = get_opt_postfix(str);
810
811 td->sync_file_range_nr = 1;
812 if (nr) {
813 td->sync_file_range_nr = atoi(nr);
814 free(nr);
815 }
816
817 return 0;
818 }
819 #endif
820
zone_cmp(const void * p1,const void * p2)821 static int zone_cmp(const void *p1, const void *p2)
822 {
823 const struct zone_split *zsp1 = p1;
824 const struct zone_split *zsp2 = p2;
825
826 return (int) zsp2->access_perc - (int) zsp1->access_perc;
827 }
828
zone_split_ddir(struct thread_options * o,enum fio_ddir ddir,char * str)829 static int zone_split_ddir(struct thread_options *o, enum fio_ddir ddir,
830 char *str)
831 {
832 unsigned int i, perc, perc_missing, sperc, sperc_missing;
833 struct split split;
834
835 memset(&split, 0, sizeof(split));
836
837 if (split_parse_ddir(o, &split, ddir, str))
838 return 1;
839 if (!split.nr)
840 return 0;
841
842 o->zone_split[ddir] = malloc(split.nr * sizeof(struct zone_split));
843 o->zone_split_nr[ddir] = split.nr;
844 for (i = 0; i < split.nr; i++) {
845 o->zone_split[ddir][i].access_perc = split.val1[i];
846 o->zone_split[ddir][i].size_perc = split.val2[i];
847 }
848
849 /*
850 * Now check if the percentages add up, and how much is missing
851 */
852 perc = perc_missing = 0;
853 sperc = sperc_missing = 0;
854 for (i = 0; i < o->zone_split_nr[ddir]; i++) {
855 struct zone_split *zsp = &o->zone_split[ddir][i];
856
857 if (zsp->access_perc == (uint8_t) -1U)
858 perc_missing++;
859 else
860 perc += zsp->access_perc;
861
862 if (zsp->size_perc == (uint8_t) -1U)
863 sperc_missing++;
864 else
865 sperc += zsp->size_perc;
866
867 }
868
869 if (perc > 100 || sperc > 100) {
870 log_err("fio: zone_split percentages add to more than 100%%\n");
871 free(o->zone_split[ddir]);
872 o->zone_split[ddir] = NULL;
873 return 1;
874 }
875 if (perc < 100) {
876 log_err("fio: access percentage don't add up to 100 for zoned "
877 "random distribution (got=%u)\n", perc);
878 free(o->zone_split[ddir]);
879 o->zone_split[ddir] = NULL;
880 return 1;
881 }
882
883 /*
884 * If values didn't have a percentage set, divide the remains between
885 * them.
886 */
887 if (perc_missing) {
888 if (perc_missing == 1 && o->zone_split_nr[ddir] == 1)
889 perc = 100;
890 for (i = 0; i < o->zone_split_nr[ddir]; i++) {
891 struct zone_split *zsp = &o->zone_split[ddir][i];
892
893 if (zsp->access_perc == (uint8_t) -1U)
894 zsp->access_perc = (100 - perc) / perc_missing;
895 }
896 }
897 if (sperc_missing) {
898 if (sperc_missing == 1 && o->zone_split_nr[ddir] == 1)
899 sperc = 100;
900 for (i = 0; i < o->zone_split_nr[ddir]; i++) {
901 struct zone_split *zsp = &o->zone_split[ddir][i];
902
903 if (zsp->size_perc == (uint8_t) -1U)
904 zsp->size_perc = (100 - sperc) / sperc_missing;
905 }
906 }
907
908 /*
909 * now sort based on percentages, for ease of lookup
910 */
911 qsort(o->zone_split[ddir], o->zone_split_nr[ddir], sizeof(struct zone_split), zone_cmp);
912 return 0;
913 }
914
__td_zone_gen_index(struct thread_data * td,enum fio_ddir ddir)915 static void __td_zone_gen_index(struct thread_data *td, enum fio_ddir ddir)
916 {
917 unsigned int i, j, sprev, aprev;
918
919 td->zone_state_index[ddir] = malloc(sizeof(struct zone_split_index) * 100);
920
921 sprev = aprev = 0;
922 for (i = 0; i < td->o.zone_split_nr[ddir]; i++) {
923 struct zone_split *zsp = &td->o.zone_split[ddir][i];
924
925 for (j = aprev; j < aprev + zsp->access_perc; j++) {
926 struct zone_split_index *zsi = &td->zone_state_index[ddir][j];
927
928 zsi->size_perc = sprev + zsp->size_perc;
929 zsi->size_perc_prev = sprev;
930 }
931
932 aprev += zsp->access_perc;
933 sprev += zsp->size_perc;
934 }
935 }
936
937 /*
938 * Generate state table for indexes, so we don't have to do it inline from
939 * the hot IO path
940 */
td_zone_gen_index(struct thread_data * td)941 static void td_zone_gen_index(struct thread_data *td)
942 {
943 int i;
944
945 td->zone_state_index = malloc(DDIR_RWDIR_CNT *
946 sizeof(struct zone_split_index *));
947
948 for (i = 0; i < DDIR_RWDIR_CNT; i++)
949 __td_zone_gen_index(td, i);
950 }
951
parse_zoned_distribution(struct thread_data * td,const char * input)952 static int parse_zoned_distribution(struct thread_data *td, const char *input)
953 {
954 char *str, *p;
955 int i, ret = 0;
956
957 p = str = strdup(input);
958
959 strip_blank_front(&str);
960 strip_blank_end(str);
961
962 /* We expect it to start like that, bail if not */
963 if (strncmp(str, "zoned:", 6)) {
964 log_err("fio: mismatch in zoned input <%s>\n", str);
965 free(p);
966 return 1;
967 }
968 str += strlen("zoned:");
969
970 ret = str_split_parse(td, str, zone_split_ddir);
971
972 free(p);
973
974 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
975 int j;
976
977 dprint(FD_PARSE, "zone ddir %d (nr=%u): \n", i, td->o.zone_split_nr[i]);
978
979 for (j = 0; j < td->o.zone_split_nr[i]; j++) {
980 struct zone_split *zsp = &td->o.zone_split[i][j];
981
982 dprint(FD_PARSE, "\t%d: %u/%u\n", j, zsp->access_perc,
983 zsp->size_perc);
984 }
985 }
986
987 if (parse_dryrun()) {
988 int i;
989
990 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
991 free(td->o.zone_split[i]);
992 td->o.zone_split[i] = NULL;
993 td->o.zone_split_nr[i] = 0;
994 }
995
996 return ret;
997 }
998
999 if (!ret)
1000 td_zone_gen_index(td);
1001 else {
1002 for (i = 0; i < DDIR_RWDIR_CNT; i++)
1003 td->o.zone_split_nr[i] = 0;
1004 }
1005
1006 return ret;
1007 }
1008
str_random_distribution_cb(void * data,const char * str)1009 static int str_random_distribution_cb(void *data, const char *str)
1010 {
1011 struct thread_data *td = cb_data_to_td(data);
1012 double val;
1013 char *nr;
1014
1015 if (td->o.random_distribution == FIO_RAND_DIST_ZIPF)
1016 val = FIO_DEF_ZIPF;
1017 else if (td->o.random_distribution == FIO_RAND_DIST_PARETO)
1018 val = FIO_DEF_PARETO;
1019 else if (td->o.random_distribution == FIO_RAND_DIST_GAUSS)
1020 val = 0.0;
1021 else if (td->o.random_distribution == FIO_RAND_DIST_ZONED)
1022 return parse_zoned_distribution(td, str);
1023 else
1024 return 0;
1025
1026 nr = get_opt_postfix(str);
1027 if (nr && !str_to_float(nr, &val, 0)) {
1028 log_err("fio: random postfix parsing failed\n");
1029 free(nr);
1030 return 1;
1031 }
1032
1033 free(nr);
1034
1035 if (td->o.random_distribution == FIO_RAND_DIST_ZIPF) {
1036 if (val == 1.00) {
1037 log_err("fio: zipf theta must different than 1.0\n");
1038 return 1;
1039 }
1040 if (parse_dryrun())
1041 return 0;
1042 td->o.zipf_theta.u.f = val;
1043 } else if (td->o.random_distribution == FIO_RAND_DIST_PARETO) {
1044 if (val <= 0.00 || val >= 1.00) {
1045 log_err("fio: pareto input out of range (0 < input < 1.0)\n");
1046 return 1;
1047 }
1048 if (parse_dryrun())
1049 return 0;
1050 td->o.pareto_h.u.f = val;
1051 } else {
1052 if (val < 0.00 || val >= 100.0) {
1053 log_err("fio: normal deviation out of range (0 <= input < 100.0)\n");
1054 return 1;
1055 }
1056 if (parse_dryrun())
1057 return 0;
1058 td->o.gauss_dev.u.f = val;
1059 }
1060
1061 return 0;
1062 }
1063
str_steadystate_cb(void * data,const char * str)1064 static int str_steadystate_cb(void *data, const char *str)
1065 {
1066 struct thread_data *td = cb_data_to_td(data);
1067 double val;
1068 char *nr;
1069 char *pct;
1070 long long ll;
1071
1072 if (td->o.ss_state != FIO_SS_IOPS && td->o.ss_state != FIO_SS_IOPS_SLOPE &&
1073 td->o.ss_state != FIO_SS_BW && td->o.ss_state != FIO_SS_BW_SLOPE) {
1074 /* should be impossible to get here */
1075 log_err("fio: unknown steady state criterion\n");
1076 return 1;
1077 }
1078
1079 nr = get_opt_postfix(str);
1080 if (!nr) {
1081 log_err("fio: steadystate threshold must be specified in addition to criterion\n");
1082 free(nr);
1083 return 1;
1084 }
1085
1086 /* ENHANCEMENT Allow fio to understand size=10.2% and use here */
1087 pct = strstr(nr, "%");
1088 if (pct) {
1089 *pct = '\0';
1090 strip_blank_end(nr);
1091 if (!str_to_float(nr, &val, 0)) {
1092 log_err("fio: could not parse steadystate threshold percentage\n");
1093 free(nr);
1094 return 1;
1095 }
1096
1097 dprint(FD_PARSE, "set steady state threshold to %f%%\n", val);
1098 free(nr);
1099 if (parse_dryrun())
1100 return 0;
1101
1102 td->o.ss_state |= __FIO_SS_PCT;
1103 td->o.ss_limit.u.f = val;
1104 } else if (td->o.ss_state & __FIO_SS_IOPS) {
1105 if (!str_to_float(nr, &val, 0)) {
1106 log_err("fio: steadystate IOPS threshold postfix parsing failed\n");
1107 free(nr);
1108 return 1;
1109 }
1110
1111 dprint(FD_PARSE, "set steady state IOPS threshold to %f\n", val);
1112 free(nr);
1113 if (parse_dryrun())
1114 return 0;
1115
1116 td->o.ss_limit.u.f = val;
1117 } else { /* bandwidth criterion */
1118 if (str_to_decimal(nr, &ll, 1, td, 0, 0)) {
1119 log_err("fio: steadystate BW threshold postfix parsing failed\n");
1120 free(nr);
1121 return 1;
1122 }
1123
1124 dprint(FD_PARSE, "set steady state BW threshold to %lld\n", ll);
1125 free(nr);
1126 if (parse_dryrun())
1127 return 0;
1128
1129 td->o.ss_limit.u.f = (double) ll;
1130 }
1131
1132 td->ss.state = td->o.ss_state;
1133 return 0;
1134 }
1135
1136 /*
1137 * Return next name in the string. Files are separated with ':'. If the ':'
1138 * is escaped with a '\', then that ':' is part of the filename and does not
1139 * indicate a new file.
1140 */
get_next_name(char ** ptr)1141 static char *get_next_name(char **ptr)
1142 {
1143 char *str = *ptr;
1144 char *p, *start;
1145
1146 if (!str || !strlen(str))
1147 return NULL;
1148
1149 start = str;
1150 do {
1151 /*
1152 * No colon, we are done
1153 */
1154 p = strchr(str, ':');
1155 if (!p) {
1156 *ptr = NULL;
1157 break;
1158 }
1159
1160 /*
1161 * We got a colon, but it's the first character. Skip and
1162 * continue
1163 */
1164 if (p == start) {
1165 str = ++start;
1166 continue;
1167 }
1168
1169 if (*(p - 1) != '\\') {
1170 *p = '\0';
1171 *ptr = p + 1;
1172 break;
1173 }
1174
1175 memmove(p - 1, p, strlen(p) + 1);
1176 str = p;
1177 } while (1);
1178
1179 return start;
1180 }
1181
1182
get_max_name_idx(char * input)1183 static int get_max_name_idx(char *input)
1184 {
1185 unsigned int cur_idx;
1186 char *str, *p;
1187
1188 p = str = strdup(input);
1189 for (cur_idx = 0; ; cur_idx++)
1190 if (get_next_name(&str) == NULL)
1191 break;
1192
1193 free(p);
1194 return cur_idx;
1195 }
1196
1197 /*
1198 * Returns the directory at the index, indexes > entires will be
1199 * assigned via modulo division of the index
1200 */
set_name_idx(char * target,size_t tlen,char * input,int index,bool unique_filename)1201 int set_name_idx(char *target, size_t tlen, char *input, int index,
1202 bool unique_filename)
1203 {
1204 unsigned int cur_idx;
1205 int len;
1206 char *fname, *str, *p;
1207
1208 p = str = strdup(input);
1209
1210 index %= get_max_name_idx(input);
1211 for (cur_idx = 0; cur_idx <= index; cur_idx++)
1212 fname = get_next_name(&str);
1213
1214 if (client_sockaddr_str[0] && unique_filename) {
1215 len = snprintf(target, tlen, "%s/%s.", fname,
1216 client_sockaddr_str);
1217 } else
1218 len = snprintf(target, tlen, "%s/", fname);
1219
1220 target[tlen - 1] = '\0';
1221 free(p);
1222
1223 return len;
1224 }
1225
str_filename_cb(void * data,const char * input)1226 static int str_filename_cb(void *data, const char *input)
1227 {
1228 struct thread_data *td = cb_data_to_td(data);
1229 char *fname, *str, *p;
1230
1231 p = str = strdup(input);
1232
1233 strip_blank_front(&str);
1234 strip_blank_end(str);
1235
1236 /*
1237 * Ignore what we may already have from nrfiles option.
1238 */
1239 if (!td->files_index)
1240 td->o.nr_files = 0;
1241
1242 while ((fname = get_next_name(&str)) != NULL) {
1243 if (!strlen(fname))
1244 break;
1245 add_file(td, fname, 0, 1);
1246 }
1247
1248 free(p);
1249 return 0;
1250 }
1251
str_directory_cb(void * data,const char fio_unused * unused)1252 static int str_directory_cb(void *data, const char fio_unused *unused)
1253 {
1254 struct thread_data *td = cb_data_to_td(data);
1255 struct stat sb;
1256 char *dirname, *str, *p;
1257 int ret = 0;
1258
1259 if (parse_dryrun())
1260 return 0;
1261
1262 p = str = strdup(td->o.directory);
1263 while ((dirname = get_next_name(&str)) != NULL) {
1264 if (lstat(dirname, &sb) < 0) {
1265 ret = errno;
1266
1267 log_err("fio: %s is not a directory\n", dirname);
1268 td_verror(td, ret, "lstat");
1269 goto out;
1270 }
1271 if (!S_ISDIR(sb.st_mode)) {
1272 log_err("fio: %s is not a directory\n", dirname);
1273 ret = 1;
1274 goto out;
1275 }
1276 }
1277
1278 out:
1279 free(p);
1280 return ret;
1281 }
1282
str_opendir_cb(void * data,const char fio_unused * str)1283 static int str_opendir_cb(void *data, const char fio_unused *str)
1284 {
1285 struct thread_data *td = cb_data_to_td(data);
1286
1287 if (parse_dryrun())
1288 return 0;
1289
1290 if (!td->files_index)
1291 td->o.nr_files = 0;
1292
1293 return add_dir_files(td, td->o.opendir);
1294 }
1295
str_buffer_pattern_cb(void * data,const char * input)1296 static int str_buffer_pattern_cb(void *data, const char *input)
1297 {
1298 struct thread_data *td = cb_data_to_td(data);
1299 int ret;
1300
1301 /* FIXME: for now buffer pattern does not support formats */
1302 ret = parse_and_fill_pattern(input, strlen(input), td->o.buffer_pattern,
1303 MAX_PATTERN_SIZE, NULL, 0, NULL, NULL);
1304 if (ret < 0)
1305 return 1;
1306
1307 assert(ret != 0);
1308 td->o.buffer_pattern_bytes = ret;
1309
1310 /*
1311 * If this job is doing any reading or has compression set,
1312 * ensure that we refill buffers for writes or we could be
1313 * invalidating the pattern through reads.
1314 */
1315 if (!td->o.compress_percentage && !td_read(td))
1316 td->o.refill_buffers = 0;
1317 else
1318 td->o.refill_buffers = 1;
1319
1320 td->o.scramble_buffers = 0;
1321 td->o.zero_buffers = 0;
1322
1323 return 0;
1324 }
1325
str_buffer_compress_cb(void * data,unsigned long long * il)1326 static int str_buffer_compress_cb(void *data, unsigned long long *il)
1327 {
1328 struct thread_data *td = cb_data_to_td(data);
1329
1330 td->flags |= TD_F_COMPRESS;
1331 td->o.compress_percentage = *il;
1332 return 0;
1333 }
1334
str_dedupe_cb(void * data,unsigned long long * il)1335 static int str_dedupe_cb(void *data, unsigned long long *il)
1336 {
1337 struct thread_data *td = cb_data_to_td(data);
1338
1339 td->flags |= TD_F_COMPRESS;
1340 td->o.dedupe_percentage = *il;
1341 td->o.refill_buffers = 1;
1342 return 0;
1343 }
1344
str_verify_pattern_cb(void * data,const char * input)1345 static int str_verify_pattern_cb(void *data, const char *input)
1346 {
1347 struct thread_data *td = cb_data_to_td(data);
1348 int ret;
1349
1350 td->o.verify_fmt_sz = ARRAY_SIZE(td->o.verify_fmt);
1351 ret = parse_and_fill_pattern(input, strlen(input), td->o.verify_pattern,
1352 MAX_PATTERN_SIZE, fmt_desc, sizeof(fmt_desc),
1353 td->o.verify_fmt, &td->o.verify_fmt_sz);
1354 if (ret < 0)
1355 return 1;
1356
1357 assert(ret != 0);
1358 td->o.verify_pattern_bytes = ret;
1359 /*
1360 * VERIFY_* could already be set
1361 */
1362 if (!fio_option_is_set(&td->o, verify))
1363 td->o.verify = VERIFY_PATTERN;
1364
1365 return 0;
1366 }
1367
str_gtod_reduce_cb(void * data,int * il)1368 static int str_gtod_reduce_cb(void *data, int *il)
1369 {
1370 struct thread_data *td = cb_data_to_td(data);
1371 int val = *il;
1372
1373 td->o.disable_lat = !!val;
1374 td->o.disable_clat = !!val;
1375 td->o.disable_slat = !!val;
1376 td->o.disable_bw = !!val;
1377 td->o.clat_percentiles = !val;
1378 if (val)
1379 td->tv_cache_mask = 63;
1380
1381 return 0;
1382 }
1383
str_size_cb(void * data,unsigned long long * __val)1384 static int str_size_cb(void *data, unsigned long long *__val)
1385 {
1386 struct thread_data *td = cb_data_to_td(data);
1387 unsigned long long v = *__val;
1388
1389 if (parse_is_percent(v)) {
1390 td->o.size = 0;
1391 td->o.size_percent = -1ULL - v;
1392 } else
1393 td->o.size = v;
1394
1395 return 0;
1396 }
1397
str_write_bw_log_cb(void * data,const char * str)1398 static int str_write_bw_log_cb(void *data, const char *str)
1399 {
1400 struct thread_data *td = cb_data_to_td(data);
1401
1402 if (str)
1403 td->o.bw_log_file = strdup(str);
1404
1405 td->o.write_bw_log = 1;
1406 return 0;
1407 }
1408
str_write_lat_log_cb(void * data,const char * str)1409 static int str_write_lat_log_cb(void *data, const char *str)
1410 {
1411 struct thread_data *td = cb_data_to_td(data);
1412
1413 if (str)
1414 td->o.lat_log_file = strdup(str);
1415
1416 td->o.write_lat_log = 1;
1417 return 0;
1418 }
1419
str_write_iops_log_cb(void * data,const char * str)1420 static int str_write_iops_log_cb(void *data, const char *str)
1421 {
1422 struct thread_data *td = cb_data_to_td(data);
1423
1424 if (str)
1425 td->o.iops_log_file = strdup(str);
1426
1427 td->o.write_iops_log = 1;
1428 return 0;
1429 }
1430
str_write_hist_log_cb(void * data,const char * str)1431 static int str_write_hist_log_cb(void *data, const char *str)
1432 {
1433 struct thread_data *td = cb_data_to_td(data);
1434
1435 if (str)
1436 td->o.hist_log_file = strdup(str);
1437
1438 td->o.write_hist_log = 1;
1439 return 0;
1440 }
1441
rw_verify(struct fio_option * o,void * data)1442 static int rw_verify(struct fio_option *o, void *data)
1443 {
1444 struct thread_data *td = cb_data_to_td(data);
1445
1446 if (read_only && td_write(td)) {
1447 log_err("fio: job <%s> has write bit set, but fio is in"
1448 " read-only mode\n", td->o.name);
1449 return 1;
1450 }
1451
1452 return 0;
1453 }
1454
gtod_cpu_verify(struct fio_option * o,void * data)1455 static int gtod_cpu_verify(struct fio_option *o, void *data)
1456 {
1457 #ifndef FIO_HAVE_CPU_AFFINITY
1458 struct thread_data *td = cb_data_to_td(data);
1459
1460 if (td->o.gtod_cpu) {
1461 log_err("fio: platform must support CPU affinity for"
1462 "gettimeofday() offloading\n");
1463 return 1;
1464 }
1465 #endif
1466
1467 return 0;
1468 }
1469
1470 /*
1471 * Map of job/command line options
1472 */
1473 struct fio_option fio_options[FIO_MAX_OPTS] = {
1474 {
1475 .name = "description",
1476 .lname = "Description of job",
1477 .type = FIO_OPT_STR_STORE,
1478 .off1 = offsetof(struct thread_options, description),
1479 .help = "Text job description",
1480 .category = FIO_OPT_C_GENERAL,
1481 .group = FIO_OPT_G_DESC,
1482 },
1483 {
1484 .name = "name",
1485 .lname = "Job name",
1486 .type = FIO_OPT_STR_STORE,
1487 .off1 = offsetof(struct thread_options, name),
1488 .help = "Name of this job",
1489 .category = FIO_OPT_C_GENERAL,
1490 .group = FIO_OPT_G_DESC,
1491 },
1492 {
1493 .name = "wait_for",
1494 .lname = "Waitee name",
1495 .type = FIO_OPT_STR_STORE,
1496 .off1 = offsetof(struct thread_options, wait_for),
1497 .help = "Name of the job this one wants to wait for before starting",
1498 .category = FIO_OPT_C_GENERAL,
1499 .group = FIO_OPT_G_DESC,
1500 },
1501 {
1502 .name = "filename",
1503 .lname = "Filename(s)",
1504 .type = FIO_OPT_STR_STORE,
1505 .off1 = offsetof(struct thread_options, filename),
1506 .cb = str_filename_cb,
1507 .prio = -1, /* must come after "directory" */
1508 .help = "File(s) to use for the workload",
1509 .category = FIO_OPT_C_FILE,
1510 .group = FIO_OPT_G_FILENAME,
1511 },
1512 {
1513 .name = "directory",
1514 .lname = "Directory",
1515 .type = FIO_OPT_STR_STORE,
1516 .off1 = offsetof(struct thread_options, directory),
1517 .cb = str_directory_cb,
1518 .help = "Directory to store files in",
1519 .category = FIO_OPT_C_FILE,
1520 .group = FIO_OPT_G_FILENAME,
1521 },
1522 {
1523 .name = "filename_format",
1524 .lname = "Filename Format",
1525 .type = FIO_OPT_STR_STORE,
1526 .off1 = offsetof(struct thread_options, filename_format),
1527 .prio = -1, /* must come after "directory" */
1528 .help = "Override default $jobname.$jobnum.$filenum naming",
1529 .def = "$jobname.$jobnum.$filenum",
1530 .category = FIO_OPT_C_FILE,
1531 .group = FIO_OPT_G_FILENAME,
1532 },
1533 {
1534 .name = "unique_filename",
1535 .lname = "Unique Filename",
1536 .type = FIO_OPT_BOOL,
1537 .off1 = offsetof(struct thread_options, unique_filename),
1538 .help = "For network clients, prefix file with source IP",
1539 .def = "1",
1540 .category = FIO_OPT_C_FILE,
1541 .group = FIO_OPT_G_FILENAME,
1542 },
1543 {
1544 .name = "lockfile",
1545 .lname = "Lockfile",
1546 .type = FIO_OPT_STR,
1547 .off1 = offsetof(struct thread_options, file_lock_mode),
1548 .help = "Lock file when doing IO to it",
1549 .prio = 1,
1550 .parent = "filename",
1551 .hide = 0,
1552 .def = "none",
1553 .category = FIO_OPT_C_FILE,
1554 .group = FIO_OPT_G_FILENAME,
1555 .posval = {
1556 { .ival = "none",
1557 .oval = FILE_LOCK_NONE,
1558 .help = "No file locking",
1559 },
1560 { .ival = "exclusive",
1561 .oval = FILE_LOCK_EXCLUSIVE,
1562 .help = "Exclusive file lock",
1563 },
1564 {
1565 .ival = "readwrite",
1566 .oval = FILE_LOCK_READWRITE,
1567 .help = "Read vs write lock",
1568 },
1569 },
1570 },
1571 {
1572 .name = "opendir",
1573 .lname = "Open directory",
1574 .type = FIO_OPT_STR_STORE,
1575 .off1 = offsetof(struct thread_options, opendir),
1576 .cb = str_opendir_cb,
1577 .help = "Recursively add files from this directory and down",
1578 .category = FIO_OPT_C_FILE,
1579 .group = FIO_OPT_G_FILENAME,
1580 },
1581 {
1582 .name = "rw",
1583 .lname = "Read/write",
1584 .alias = "readwrite",
1585 .type = FIO_OPT_STR,
1586 .cb = str_rw_cb,
1587 .off1 = offsetof(struct thread_options, td_ddir),
1588 .help = "IO direction",
1589 .def = "read",
1590 .verify = rw_verify,
1591 .category = FIO_OPT_C_IO,
1592 .group = FIO_OPT_G_IO_BASIC,
1593 .posval = {
1594 { .ival = "read",
1595 .oval = TD_DDIR_READ,
1596 .help = "Sequential read",
1597 },
1598 { .ival = "write",
1599 .oval = TD_DDIR_WRITE,
1600 .help = "Sequential write",
1601 },
1602 { .ival = "trim",
1603 .oval = TD_DDIR_TRIM,
1604 .help = "Sequential trim",
1605 },
1606 { .ival = "randread",
1607 .oval = TD_DDIR_RANDREAD,
1608 .help = "Random read",
1609 },
1610 { .ival = "randwrite",
1611 .oval = TD_DDIR_RANDWRITE,
1612 .help = "Random write",
1613 },
1614 { .ival = "randtrim",
1615 .oval = TD_DDIR_RANDTRIM,
1616 .help = "Random trim",
1617 },
1618 { .ival = "rw",
1619 .oval = TD_DDIR_RW,
1620 .help = "Sequential read and write mix",
1621 },
1622 { .ival = "readwrite",
1623 .oval = TD_DDIR_RW,
1624 .help = "Sequential read and write mix",
1625 },
1626 { .ival = "randrw",
1627 .oval = TD_DDIR_RANDRW,
1628 .help = "Random read and write mix"
1629 },
1630 { .ival = "trimwrite",
1631 .oval = TD_DDIR_TRIMWRITE,
1632 .help = "Trim and write mix, trims preceding writes"
1633 },
1634 },
1635 },
1636 {
1637 .name = "rw_sequencer",
1638 .lname = "RW Sequencer",
1639 .type = FIO_OPT_STR,
1640 .off1 = offsetof(struct thread_options, rw_seq),
1641 .help = "IO offset generator modifier",
1642 .def = "sequential",
1643 .category = FIO_OPT_C_IO,
1644 .group = FIO_OPT_G_IO_BASIC,
1645 .posval = {
1646 { .ival = "sequential",
1647 .oval = RW_SEQ_SEQ,
1648 .help = "Generate sequential offsets",
1649 },
1650 { .ival = "identical",
1651 .oval = RW_SEQ_IDENT,
1652 .help = "Generate identical offsets",
1653 },
1654 },
1655 },
1656
1657 {
1658 .name = "ioengine",
1659 .lname = "IO Engine",
1660 .type = FIO_OPT_STR_STORE,
1661 .off1 = offsetof(struct thread_options, ioengine),
1662 .help = "IO engine to use",
1663 .def = FIO_PREFERRED_ENGINE,
1664 .category = FIO_OPT_C_IO,
1665 .group = FIO_OPT_G_IO_BASIC,
1666 .posval = {
1667 { .ival = "sync",
1668 .help = "Use read/write",
1669 },
1670 { .ival = "psync",
1671 .help = "Use pread/pwrite",
1672 },
1673 { .ival = "vsync",
1674 .help = "Use readv/writev",
1675 },
1676 #ifdef CONFIG_PWRITEV
1677 { .ival = "pvsync",
1678 .help = "Use preadv/pwritev",
1679 },
1680 #endif
1681 #ifdef FIO_HAVE_PWRITEV2
1682 { .ival = "pvsync2",
1683 .help = "Use preadv2/pwritev2",
1684 },
1685 #endif
1686 #ifdef CONFIG_LIBAIO
1687 { .ival = "libaio",
1688 .help = "Linux native asynchronous IO",
1689 },
1690 #endif
1691 #ifdef CONFIG_POSIXAIO
1692 { .ival = "posixaio",
1693 .help = "POSIX asynchronous IO",
1694 },
1695 #endif
1696 #ifdef CONFIG_SOLARISAIO
1697 { .ival = "solarisaio",
1698 .help = "Solaris native asynchronous IO",
1699 },
1700 #endif
1701 #ifdef CONFIG_WINDOWSAIO
1702 { .ival = "windowsaio",
1703 .help = "Windows native asynchronous IO"
1704 },
1705 #endif
1706 #ifdef CONFIG_RBD
1707 { .ival = "rbd",
1708 .help = "Rados Block Device asynchronous IO"
1709 },
1710 #endif
1711 { .ival = "mmap",
1712 .help = "Memory mapped IO"
1713 },
1714 #ifdef CONFIG_LINUX_SPLICE
1715 { .ival = "splice",
1716 .help = "splice/vmsplice based IO",
1717 },
1718 { .ival = "netsplice",
1719 .help = "splice/vmsplice to/from the network",
1720 },
1721 #endif
1722 #ifdef FIO_HAVE_SGIO
1723 { .ival = "sg",
1724 .help = "SCSI generic v3 IO",
1725 },
1726 #endif
1727 { .ival = "null",
1728 .help = "Testing engine (no data transfer)",
1729 },
1730 { .ival = "net",
1731 .help = "Network IO",
1732 },
1733 { .ival = "cpuio",
1734 .help = "CPU cycle burner engine",
1735 },
1736 #ifdef CONFIG_GUASI
1737 { .ival = "guasi",
1738 .help = "GUASI IO engine",
1739 },
1740 #endif
1741 #ifdef FIO_HAVE_BINJECT
1742 { .ival = "binject",
1743 .help = "binject direct inject block engine",
1744 },
1745 #endif
1746 #ifdef CONFIG_RDMA
1747 { .ival = "rdma",
1748 .help = "RDMA IO engine",
1749 },
1750 #endif
1751 #ifdef CONFIG_FUSION_AW
1752 { .ival = "fusion-aw-sync",
1753 .help = "Fusion-io atomic write engine",
1754 },
1755 #endif
1756 #ifdef CONFIG_LINUX_EXT4_MOVE_EXTENT
1757 { .ival = "e4defrag",
1758 .help = "ext4 defrag engine",
1759 },
1760 #endif
1761 #ifdef CONFIG_LINUX_FALLOCATE
1762 { .ival = "falloc",
1763 .help = "fallocate() file based engine",
1764 },
1765 #endif
1766 #ifdef CONFIG_GFAPI
1767 { .ival = "gfapi",
1768 .help = "Glusterfs libgfapi(sync) based engine"
1769 },
1770 { .ival = "gfapi_async",
1771 .help = "Glusterfs libgfapi(async) based engine"
1772 },
1773 #endif
1774 #ifdef CONFIG_LIBHDFS
1775 { .ival = "libhdfs",
1776 .help = "Hadoop Distributed Filesystem (HDFS) engine"
1777 },
1778 #endif
1779 #ifdef CONFIG_PMEMBLK
1780 { .ival = "pmemblk",
1781 .help = "NVML libpmemblk based IO engine",
1782 },
1783
1784 #endif
1785 #ifdef CONFIG_LINUX_DEVDAX
1786 { .ival = "dev-dax",
1787 .help = "DAX Device based IO engine",
1788 },
1789 #endif
1790 { .ival = "external",
1791 .help = "Load external engine (append name)",
1792 },
1793 },
1794 },
1795 {
1796 .name = "iodepth",
1797 .lname = "IO Depth",
1798 .type = FIO_OPT_INT,
1799 .off1 = offsetof(struct thread_options, iodepth),
1800 .help = "Number of IO buffers to keep in flight",
1801 .minval = 1,
1802 .interval = 1,
1803 .def = "1",
1804 .category = FIO_OPT_C_IO,
1805 .group = FIO_OPT_G_IO_BASIC,
1806 },
1807 {
1808 .name = "iodepth_batch",
1809 .lname = "IO Depth batch",
1810 .alias = "iodepth_batch_submit",
1811 .type = FIO_OPT_INT,
1812 .off1 = offsetof(struct thread_options, iodepth_batch),
1813 .help = "Number of IO buffers to submit in one go",
1814 .parent = "iodepth",
1815 .hide = 1,
1816 .interval = 1,
1817 .def = "1",
1818 .category = FIO_OPT_C_IO,
1819 .group = FIO_OPT_G_IO_BASIC,
1820 },
1821 {
1822 .name = "iodepth_batch_complete_min",
1823 .lname = "Min IO depth batch complete",
1824 .alias = "iodepth_batch_complete",
1825 .type = FIO_OPT_INT,
1826 .off1 = offsetof(struct thread_options, iodepth_batch_complete_min),
1827 .help = "Min number of IO buffers to retrieve in one go",
1828 .parent = "iodepth",
1829 .hide = 1,
1830 .minval = 0,
1831 .interval = 1,
1832 .def = "1",
1833 .category = FIO_OPT_C_IO,
1834 .group = FIO_OPT_G_IO_BASIC,
1835 },
1836 {
1837 .name = "iodepth_batch_complete_max",
1838 .lname = "Max IO depth batch complete",
1839 .type = FIO_OPT_INT,
1840 .off1 = offsetof(struct thread_options, iodepth_batch_complete_max),
1841 .help = "Max number of IO buffers to retrieve in one go",
1842 .parent = "iodepth",
1843 .hide = 1,
1844 .minval = 0,
1845 .interval = 1,
1846 .category = FIO_OPT_C_IO,
1847 .group = FIO_OPT_G_IO_BASIC,
1848 },
1849 {
1850 .name = "iodepth_low",
1851 .lname = "IO Depth batch low",
1852 .type = FIO_OPT_INT,
1853 .off1 = offsetof(struct thread_options, iodepth_low),
1854 .help = "Low water mark for queuing depth",
1855 .parent = "iodepth",
1856 .hide = 1,
1857 .interval = 1,
1858 .category = FIO_OPT_C_IO,
1859 .group = FIO_OPT_G_IO_BASIC,
1860 },
1861 {
1862 .name = "io_submit_mode",
1863 .lname = "IO submit mode",
1864 .type = FIO_OPT_STR,
1865 .off1 = offsetof(struct thread_options, io_submit_mode),
1866 .help = "How IO submissions and completions are done",
1867 .def = "inline",
1868 .category = FIO_OPT_C_IO,
1869 .group = FIO_OPT_G_IO_BASIC,
1870 .posval = {
1871 { .ival = "inline",
1872 .oval = IO_MODE_INLINE,
1873 .help = "Submit and complete IO inline",
1874 },
1875 { .ival = "offload",
1876 .oval = IO_MODE_OFFLOAD,
1877 .help = "Offload submit and complete to threads",
1878 },
1879 },
1880 },
1881 {
1882 .name = "size",
1883 .lname = "Size",
1884 .type = FIO_OPT_STR_VAL,
1885 .cb = str_size_cb,
1886 .off1 = offsetof(struct thread_options, size),
1887 .help = "Total size of device or files",
1888 .interval = 1024 * 1024,
1889 .category = FIO_OPT_C_IO,
1890 .group = FIO_OPT_G_INVALID,
1891 },
1892 {
1893 .name = "io_size",
1894 .alias = "io_limit",
1895 .lname = "IO Size",
1896 .type = FIO_OPT_STR_VAL,
1897 .off1 = offsetof(struct thread_options, io_size),
1898 .help = "Total size of I/O to be performed",
1899 .interval = 1024 * 1024,
1900 .category = FIO_OPT_C_IO,
1901 .group = FIO_OPT_G_INVALID,
1902 },
1903 {
1904 .name = "fill_device",
1905 .lname = "Fill device",
1906 .alias = "fill_fs",
1907 .type = FIO_OPT_BOOL,
1908 .off1 = offsetof(struct thread_options, fill_device),
1909 .help = "Write until an ENOSPC error occurs",
1910 .def = "0",
1911 .category = FIO_OPT_C_FILE,
1912 .group = FIO_OPT_G_INVALID,
1913 },
1914 {
1915 .name = "filesize",
1916 .lname = "File size",
1917 .type = FIO_OPT_STR_VAL,
1918 .off1 = offsetof(struct thread_options, file_size_low),
1919 .off2 = offsetof(struct thread_options, file_size_high),
1920 .minval = 1,
1921 .help = "Size of individual files",
1922 .interval = 1024 * 1024,
1923 .category = FIO_OPT_C_FILE,
1924 .group = FIO_OPT_G_INVALID,
1925 },
1926 {
1927 .name = "file_append",
1928 .lname = "File append",
1929 .type = FIO_OPT_BOOL,
1930 .off1 = offsetof(struct thread_options, file_append),
1931 .help = "IO will start at the end of the file(s)",
1932 .def = "0",
1933 .category = FIO_OPT_C_FILE,
1934 .group = FIO_OPT_G_INVALID,
1935 },
1936 {
1937 .name = "offset",
1938 .lname = "IO offset",
1939 .alias = "fileoffset",
1940 .type = FIO_OPT_STR_VAL,
1941 .off1 = offsetof(struct thread_options, start_offset),
1942 .help = "Start IO from this offset",
1943 .def = "0",
1944 .interval = 1024 * 1024,
1945 .category = FIO_OPT_C_IO,
1946 .group = FIO_OPT_G_INVALID,
1947 },
1948 {
1949 .name = "offset_increment",
1950 .lname = "IO offset increment",
1951 .type = FIO_OPT_STR_VAL,
1952 .off1 = offsetof(struct thread_options, offset_increment),
1953 .help = "What is the increment from one offset to the next",
1954 .parent = "offset",
1955 .hide = 1,
1956 .def = "0",
1957 .interval = 1024 * 1024,
1958 .category = FIO_OPT_C_IO,
1959 .group = FIO_OPT_G_INVALID,
1960 },
1961 {
1962 .name = "number_ios",
1963 .lname = "Number of IOs to perform",
1964 .type = FIO_OPT_STR_VAL,
1965 .off1 = offsetof(struct thread_options, number_ios),
1966 .help = "Force job completion after this number of IOs",
1967 .def = "0",
1968 .category = FIO_OPT_C_IO,
1969 .group = FIO_OPT_G_INVALID,
1970 },
1971 {
1972 .name = "bs",
1973 .lname = "Block size",
1974 .alias = "blocksize",
1975 .type = FIO_OPT_INT,
1976 .off1 = offsetof(struct thread_options, bs[DDIR_READ]),
1977 .off2 = offsetof(struct thread_options, bs[DDIR_WRITE]),
1978 .off3 = offsetof(struct thread_options, bs[DDIR_TRIM]),
1979 .minval = 1,
1980 .help = "Block size unit",
1981 .def = "4096",
1982 .parent = "rw",
1983 .hide = 1,
1984 .interval = 512,
1985 .category = FIO_OPT_C_IO,
1986 .group = FIO_OPT_G_INVALID,
1987 },
1988 {
1989 .name = "ba",
1990 .lname = "Block size align",
1991 .alias = "blockalign",
1992 .type = FIO_OPT_INT,
1993 .off1 = offsetof(struct thread_options, ba[DDIR_READ]),
1994 .off2 = offsetof(struct thread_options, ba[DDIR_WRITE]),
1995 .off3 = offsetof(struct thread_options, ba[DDIR_TRIM]),
1996 .minval = 1,
1997 .help = "IO block offset alignment",
1998 .parent = "rw",
1999 .hide = 1,
2000 .interval = 512,
2001 .category = FIO_OPT_C_IO,
2002 .group = FIO_OPT_G_INVALID,
2003 },
2004 {
2005 .name = "bsrange",
2006 .lname = "Block size range",
2007 .alias = "blocksize_range",
2008 .type = FIO_OPT_RANGE,
2009 .off1 = offsetof(struct thread_options, min_bs[DDIR_READ]),
2010 .off2 = offsetof(struct thread_options, max_bs[DDIR_READ]),
2011 .off3 = offsetof(struct thread_options, min_bs[DDIR_WRITE]),
2012 .off4 = offsetof(struct thread_options, max_bs[DDIR_WRITE]),
2013 .off5 = offsetof(struct thread_options, min_bs[DDIR_TRIM]),
2014 .off6 = offsetof(struct thread_options, max_bs[DDIR_TRIM]),
2015 .minval = 1,
2016 .help = "Set block size range (in more detail than bs)",
2017 .parent = "rw",
2018 .hide = 1,
2019 .interval = 4096,
2020 .category = FIO_OPT_C_IO,
2021 .group = FIO_OPT_G_INVALID,
2022 },
2023 {
2024 .name = "bssplit",
2025 .lname = "Block size split",
2026 .type = FIO_OPT_STR,
2027 .cb = str_bssplit_cb,
2028 .off1 = offsetof(struct thread_options, bssplit),
2029 .help = "Set a specific mix of block sizes",
2030 .parent = "rw",
2031 .hide = 1,
2032 .category = FIO_OPT_C_IO,
2033 .group = FIO_OPT_G_INVALID,
2034 },
2035 {
2036 .name = "bs_unaligned",
2037 .lname = "Block size unaligned",
2038 .alias = "blocksize_unaligned",
2039 .type = FIO_OPT_STR_SET,
2040 .off1 = offsetof(struct thread_options, bs_unaligned),
2041 .help = "Don't sector align IO buffer sizes",
2042 .parent = "rw",
2043 .hide = 1,
2044 .category = FIO_OPT_C_IO,
2045 .group = FIO_OPT_G_INVALID,
2046 },
2047 {
2048 .name = "bs_is_seq_rand",
2049 .lname = "Block size division is seq/random (not read/write)",
2050 .type = FIO_OPT_BOOL,
2051 .off1 = offsetof(struct thread_options, bs_is_seq_rand),
2052 .help = "Consider any blocksize setting to be sequential,random",
2053 .def = "0",
2054 .parent = "blocksize",
2055 .category = FIO_OPT_C_IO,
2056 .group = FIO_OPT_G_INVALID,
2057 },
2058 {
2059 .name = "randrepeat",
2060 .lname = "Random repeatable",
2061 .type = FIO_OPT_BOOL,
2062 .off1 = offsetof(struct thread_options, rand_repeatable),
2063 .help = "Use repeatable random IO pattern",
2064 .def = "1",
2065 .parent = "rw",
2066 .hide = 1,
2067 .category = FIO_OPT_C_IO,
2068 .group = FIO_OPT_G_RANDOM,
2069 },
2070 {
2071 .name = "randseed",
2072 .lname = "The random generator seed",
2073 .type = FIO_OPT_STR_VAL,
2074 .off1 = offsetof(struct thread_options, rand_seed),
2075 .help = "Set the random generator seed value",
2076 .def = "0x89",
2077 .parent = "rw",
2078 .category = FIO_OPT_C_IO,
2079 .group = FIO_OPT_G_RANDOM,
2080 },
2081 {
2082 .name = "use_os_rand",
2083 .lname = "Use OS random",
2084 .type = FIO_OPT_DEPRECATED,
2085 .off1 = offsetof(struct thread_options, dep_use_os_rand),
2086 .category = FIO_OPT_C_IO,
2087 .group = FIO_OPT_G_RANDOM,
2088 },
2089 {
2090 .name = "norandommap",
2091 .lname = "No randommap",
2092 .type = FIO_OPT_STR_SET,
2093 .off1 = offsetof(struct thread_options, norandommap),
2094 .help = "Accept potential duplicate random blocks",
2095 .parent = "rw",
2096 .hide = 1,
2097 .hide_on_set = 1,
2098 .category = FIO_OPT_C_IO,
2099 .group = FIO_OPT_G_RANDOM,
2100 },
2101 {
2102 .name = "softrandommap",
2103 .lname = "Soft randommap",
2104 .type = FIO_OPT_BOOL,
2105 .off1 = offsetof(struct thread_options, softrandommap),
2106 .help = "Set norandommap if randommap allocation fails",
2107 .parent = "norandommap",
2108 .hide = 1,
2109 .def = "0",
2110 .category = FIO_OPT_C_IO,
2111 .group = FIO_OPT_G_RANDOM,
2112 },
2113 {
2114 .name = "random_generator",
2115 .lname = "Random Generator",
2116 .type = FIO_OPT_STR,
2117 .off1 = offsetof(struct thread_options, random_generator),
2118 .help = "Type of random number generator to use",
2119 .def = "tausworthe",
2120 .posval = {
2121 { .ival = "tausworthe",
2122 .oval = FIO_RAND_GEN_TAUSWORTHE,
2123 .help = "Strong Tausworthe generator",
2124 },
2125 { .ival = "lfsr",
2126 .oval = FIO_RAND_GEN_LFSR,
2127 .help = "Variable length LFSR",
2128 },
2129 {
2130 .ival = "tausworthe64",
2131 .oval = FIO_RAND_GEN_TAUSWORTHE64,
2132 .help = "64-bit Tausworthe variant",
2133 },
2134 },
2135 .category = FIO_OPT_C_IO,
2136 .group = FIO_OPT_G_RANDOM,
2137 },
2138 {
2139 .name = "random_distribution",
2140 .lname = "Random Distribution",
2141 .type = FIO_OPT_STR,
2142 .off1 = offsetof(struct thread_options, random_distribution),
2143 .cb = str_random_distribution_cb,
2144 .help = "Random offset distribution generator",
2145 .def = "random",
2146 .posval = {
2147 { .ival = "random",
2148 .oval = FIO_RAND_DIST_RANDOM,
2149 .help = "Completely random",
2150 },
2151 { .ival = "zipf",
2152 .oval = FIO_RAND_DIST_ZIPF,
2153 .help = "Zipf distribution",
2154 },
2155 { .ival = "pareto",
2156 .oval = FIO_RAND_DIST_PARETO,
2157 .help = "Pareto distribution",
2158 },
2159 { .ival = "normal",
2160 .oval = FIO_RAND_DIST_GAUSS,
2161 .help = "Normal (Gaussian) distribution",
2162 },
2163 { .ival = "zoned",
2164 .oval = FIO_RAND_DIST_ZONED,
2165 .help = "Zoned random distribution",
2166 },
2167
2168 },
2169 .category = FIO_OPT_C_IO,
2170 .group = FIO_OPT_G_RANDOM,
2171 },
2172 {
2173 .name = "percentage_random",
2174 .lname = "Percentage Random",
2175 .type = FIO_OPT_INT,
2176 .off1 = offsetof(struct thread_options, perc_rand[DDIR_READ]),
2177 .off2 = offsetof(struct thread_options, perc_rand[DDIR_WRITE]),
2178 .off3 = offsetof(struct thread_options, perc_rand[DDIR_TRIM]),
2179 .maxval = 100,
2180 .help = "Percentage of seq/random mix that should be random",
2181 .def = "100,100,100",
2182 .interval = 5,
2183 .inverse = "percentage_sequential",
2184 .category = FIO_OPT_C_IO,
2185 .group = FIO_OPT_G_RANDOM,
2186 },
2187 {
2188 .name = "percentage_sequential",
2189 .lname = "Percentage Sequential",
2190 .type = FIO_OPT_DEPRECATED,
2191 .category = FIO_OPT_C_IO,
2192 .group = FIO_OPT_G_RANDOM,
2193 },
2194 {
2195 .name = "allrandrepeat",
2196 .lname = "All Random Repeat",
2197 .type = FIO_OPT_BOOL,
2198 .off1 = offsetof(struct thread_options, allrand_repeatable),
2199 .help = "Use repeatable random numbers for everything",
2200 .def = "0",
2201 .category = FIO_OPT_C_IO,
2202 .group = FIO_OPT_G_RANDOM,
2203 },
2204 {
2205 .name = "nrfiles",
2206 .lname = "Number of files",
2207 .alias = "nr_files",
2208 .type = FIO_OPT_INT,
2209 .off1 = offsetof(struct thread_options, nr_files),
2210 .help = "Split job workload between this number of files",
2211 .def = "1",
2212 .interval = 1,
2213 .category = FIO_OPT_C_FILE,
2214 .group = FIO_OPT_G_INVALID,
2215 },
2216 {
2217 .name = "openfiles",
2218 .lname = "Number of open files",
2219 .type = FIO_OPT_INT,
2220 .off1 = offsetof(struct thread_options, open_files),
2221 .help = "Number of files to keep open at the same time",
2222 .category = FIO_OPT_C_FILE,
2223 .group = FIO_OPT_G_INVALID,
2224 },
2225 {
2226 .name = "file_service_type",
2227 .lname = "File service type",
2228 .type = FIO_OPT_STR,
2229 .cb = str_fst_cb,
2230 .off1 = offsetof(struct thread_options, file_service_type),
2231 .help = "How to select which file to service next",
2232 .def = "roundrobin",
2233 .category = FIO_OPT_C_FILE,
2234 .group = FIO_OPT_G_INVALID,
2235 .posval = {
2236 { .ival = "random",
2237 .oval = FIO_FSERVICE_RANDOM,
2238 .help = "Choose a file at random (uniform)",
2239 },
2240 { .ival = "zipf",
2241 .oval = FIO_FSERVICE_ZIPF,
2242 .help = "Zipf randomized",
2243 },
2244 { .ival = "pareto",
2245 .oval = FIO_FSERVICE_PARETO,
2246 .help = "Pareto randomized",
2247 },
2248 { .ival = "gauss",
2249 .oval = FIO_FSERVICE_GAUSS,
2250 .help = "Normal (Gaussian) distribution",
2251 },
2252 { .ival = "roundrobin",
2253 .oval = FIO_FSERVICE_RR,
2254 .help = "Round robin select files",
2255 },
2256 { .ival = "sequential",
2257 .oval = FIO_FSERVICE_SEQ,
2258 .help = "Finish one file before moving to the next",
2259 },
2260 },
2261 .parent = "nrfiles",
2262 .hide = 1,
2263 },
2264 #ifdef CONFIG_POSIX_FALLOCATE
2265 {
2266 .name = "fallocate",
2267 .lname = "Fallocate",
2268 .type = FIO_OPT_STR,
2269 .off1 = offsetof(struct thread_options, fallocate_mode),
2270 .help = "Whether pre-allocation is performed when laying out files",
2271 .def = "posix",
2272 .category = FIO_OPT_C_FILE,
2273 .group = FIO_OPT_G_INVALID,
2274 .posval = {
2275 { .ival = "none",
2276 .oval = FIO_FALLOCATE_NONE,
2277 .help = "Do not pre-allocate space",
2278 },
2279 { .ival = "posix",
2280 .oval = FIO_FALLOCATE_POSIX,
2281 .help = "Use posix_fallocate()",
2282 },
2283 #ifdef CONFIG_LINUX_FALLOCATE
2284 { .ival = "keep",
2285 .oval = FIO_FALLOCATE_KEEP_SIZE,
2286 .help = "Use fallocate(..., FALLOC_FL_KEEP_SIZE, ...)",
2287 },
2288 #endif
2289 /* Compatibility with former boolean values */
2290 { .ival = "0",
2291 .oval = FIO_FALLOCATE_NONE,
2292 .help = "Alias for 'none'",
2293 },
2294 { .ival = "1",
2295 .oval = FIO_FALLOCATE_POSIX,
2296 .help = "Alias for 'posix'",
2297 },
2298 },
2299 },
2300 #else /* CONFIG_POSIX_FALLOCATE */
2301 {
2302 .name = "fallocate",
2303 .lname = "Fallocate",
2304 .type = FIO_OPT_UNSUPPORTED,
2305 .help = "Your platform does not support fallocate",
2306 },
2307 #endif /* CONFIG_POSIX_FALLOCATE */
2308 {
2309 .name = "fadvise_hint",
2310 .lname = "Fadvise hint",
2311 .type = FIO_OPT_STR,
2312 .off1 = offsetof(struct thread_options, fadvise_hint),
2313 .posval = {
2314 { .ival = "0",
2315 .oval = F_ADV_NONE,
2316 .help = "Don't issue fadvise",
2317 },
2318 { .ival = "1",
2319 .oval = F_ADV_TYPE,
2320 .help = "Advise using fio IO pattern",
2321 },
2322 { .ival = "random",
2323 .oval = F_ADV_RANDOM,
2324 .help = "Advise using FADV_RANDOM",
2325 },
2326 { .ival = "sequential",
2327 .oval = F_ADV_SEQUENTIAL,
2328 .help = "Advise using FADV_SEQUENTIAL",
2329 },
2330 },
2331 .help = "Use fadvise() to advise the kernel on IO pattern",
2332 .def = "1",
2333 .category = FIO_OPT_C_FILE,
2334 .group = FIO_OPT_G_INVALID,
2335 },
2336 #ifdef FIO_HAVE_STREAMID
2337 {
2338 .name = "fadvise_stream",
2339 .lname = "Fadvise stream",
2340 .type = FIO_OPT_INT,
2341 .off1 = offsetof(struct thread_options, fadvise_stream),
2342 .help = "Use fadvise() to set stream ID",
2343 .category = FIO_OPT_C_FILE,
2344 .group = FIO_OPT_G_INVALID,
2345 },
2346 #else
2347 {
2348 .name = "fadvise_stream",
2349 .lname = "Fadvise stream",
2350 .type = FIO_OPT_UNSUPPORTED,
2351 .help = "Your platform does not support fadvise stream ID",
2352 },
2353 #endif
2354 {
2355 .name = "fsync",
2356 .lname = "Fsync",
2357 .type = FIO_OPT_INT,
2358 .off1 = offsetof(struct thread_options, fsync_blocks),
2359 .help = "Issue fsync for writes every given number of blocks",
2360 .def = "0",
2361 .interval = 1,
2362 .category = FIO_OPT_C_FILE,
2363 .group = FIO_OPT_G_INVALID,
2364 },
2365 {
2366 .name = "fdatasync",
2367 .lname = "Fdatasync",
2368 .type = FIO_OPT_INT,
2369 .off1 = offsetof(struct thread_options, fdatasync_blocks),
2370 .help = "Issue fdatasync for writes every given number of blocks",
2371 .def = "0",
2372 .interval = 1,
2373 .category = FIO_OPT_C_FILE,
2374 .group = FIO_OPT_G_INVALID,
2375 },
2376 {
2377 .name = "write_barrier",
2378 .lname = "Write barrier",
2379 .type = FIO_OPT_INT,
2380 .off1 = offsetof(struct thread_options, barrier_blocks),
2381 .help = "Make every Nth write a barrier write",
2382 .def = "0",
2383 .interval = 1,
2384 .category = FIO_OPT_C_IO,
2385 .group = FIO_OPT_G_INVALID,
2386 },
2387 #ifdef CONFIG_SYNC_FILE_RANGE
2388 {
2389 .name = "sync_file_range",
2390 .lname = "Sync file range",
2391 .posval = {
2392 { .ival = "wait_before",
2393 .oval = SYNC_FILE_RANGE_WAIT_BEFORE,
2394 .help = "SYNC_FILE_RANGE_WAIT_BEFORE",
2395 .orval = 1,
2396 },
2397 { .ival = "write",
2398 .oval = SYNC_FILE_RANGE_WRITE,
2399 .help = "SYNC_FILE_RANGE_WRITE",
2400 .orval = 1,
2401 },
2402 {
2403 .ival = "wait_after",
2404 .oval = SYNC_FILE_RANGE_WAIT_AFTER,
2405 .help = "SYNC_FILE_RANGE_WAIT_AFTER",
2406 .orval = 1,
2407 },
2408 },
2409 .type = FIO_OPT_STR_MULTI,
2410 .cb = str_sfr_cb,
2411 .off1 = offsetof(struct thread_options, sync_file_range),
2412 .help = "Use sync_file_range()",
2413 .category = FIO_OPT_C_FILE,
2414 .group = FIO_OPT_G_INVALID,
2415 },
2416 #else
2417 {
2418 .name = "sync_file_range",
2419 .lname = "Sync file range",
2420 .type = FIO_OPT_UNSUPPORTED,
2421 .help = "Your platform does not support sync_file_range",
2422 },
2423 #endif
2424 {
2425 .name = "direct",
2426 .lname = "Direct I/O",
2427 .type = FIO_OPT_BOOL,
2428 .off1 = offsetof(struct thread_options, odirect),
2429 .help = "Use O_DIRECT IO (negates buffered)",
2430 .def = "0",
2431 .inverse = "buffered",
2432 .category = FIO_OPT_C_IO,
2433 .group = FIO_OPT_G_IO_TYPE,
2434 },
2435 {
2436 .name = "atomic",
2437 .lname = "Atomic I/O",
2438 .type = FIO_OPT_BOOL,
2439 .off1 = offsetof(struct thread_options, oatomic),
2440 .help = "Use Atomic IO with O_DIRECT (implies O_DIRECT)",
2441 .def = "0",
2442 .category = FIO_OPT_C_IO,
2443 .group = FIO_OPT_G_IO_TYPE,
2444 },
2445 {
2446 .name = "buffered",
2447 .lname = "Buffered I/O",
2448 .type = FIO_OPT_BOOL,
2449 .off1 = offsetof(struct thread_options, odirect),
2450 .neg = 1,
2451 .help = "Use buffered IO (negates direct)",
2452 .def = "1",
2453 .inverse = "direct",
2454 .category = FIO_OPT_C_IO,
2455 .group = FIO_OPT_G_IO_TYPE,
2456 },
2457 {
2458 .name = "overwrite",
2459 .lname = "Overwrite",
2460 .type = FIO_OPT_BOOL,
2461 .off1 = offsetof(struct thread_options, overwrite),
2462 .help = "When writing, set whether to overwrite current data",
2463 .def = "0",
2464 .category = FIO_OPT_C_FILE,
2465 .group = FIO_OPT_G_INVALID,
2466 },
2467 {
2468 .name = "loops",
2469 .lname = "Loops",
2470 .type = FIO_OPT_INT,
2471 .off1 = offsetof(struct thread_options, loops),
2472 .help = "Number of times to run the job",
2473 .def = "1",
2474 .interval = 1,
2475 .category = FIO_OPT_C_GENERAL,
2476 .group = FIO_OPT_G_RUNTIME,
2477 },
2478 {
2479 .name = "numjobs",
2480 .lname = "Number of jobs",
2481 .type = FIO_OPT_INT,
2482 .off1 = offsetof(struct thread_options, numjobs),
2483 .help = "Duplicate this job this many times",
2484 .def = "1",
2485 .interval = 1,
2486 .category = FIO_OPT_C_GENERAL,
2487 .group = FIO_OPT_G_RUNTIME,
2488 },
2489 {
2490 .name = "startdelay",
2491 .lname = "Start delay",
2492 .type = FIO_OPT_STR_VAL_TIME,
2493 .off1 = offsetof(struct thread_options, start_delay),
2494 .off2 = offsetof(struct thread_options, start_delay_high),
2495 .help = "Only start job when this period has passed",
2496 .def = "0",
2497 .is_seconds = 1,
2498 .is_time = 1,
2499 .category = FIO_OPT_C_GENERAL,
2500 .group = FIO_OPT_G_RUNTIME,
2501 },
2502 {
2503 .name = "runtime",
2504 .lname = "Runtime",
2505 .alias = "timeout",
2506 .type = FIO_OPT_STR_VAL_TIME,
2507 .off1 = offsetof(struct thread_options, timeout),
2508 .help = "Stop workload when this amount of time has passed",
2509 .def = "0",
2510 .is_seconds = 1,
2511 .is_time = 1,
2512 .category = FIO_OPT_C_GENERAL,
2513 .group = FIO_OPT_G_RUNTIME,
2514 },
2515 {
2516 .name = "time_based",
2517 .lname = "Time based",
2518 .type = FIO_OPT_STR_SET,
2519 .off1 = offsetof(struct thread_options, time_based),
2520 .help = "Keep running until runtime/timeout is met",
2521 .category = FIO_OPT_C_GENERAL,
2522 .group = FIO_OPT_G_RUNTIME,
2523 },
2524 {
2525 .name = "verify_only",
2526 .lname = "Verify only",
2527 .type = FIO_OPT_STR_SET,
2528 .off1 = offsetof(struct thread_options, verify_only),
2529 .help = "Verifies previously written data is still valid",
2530 .category = FIO_OPT_C_GENERAL,
2531 .group = FIO_OPT_G_RUNTIME,
2532 },
2533 {
2534 .name = "ramp_time",
2535 .lname = "Ramp time",
2536 .type = FIO_OPT_STR_VAL_TIME,
2537 .off1 = offsetof(struct thread_options, ramp_time),
2538 .help = "Ramp up time before measuring performance",
2539 .is_seconds = 1,
2540 .is_time = 1,
2541 .category = FIO_OPT_C_GENERAL,
2542 .group = FIO_OPT_G_RUNTIME,
2543 },
2544 {
2545 .name = "clocksource",
2546 .lname = "Clock source",
2547 .type = FIO_OPT_STR,
2548 .cb = fio_clock_source_cb,
2549 .off1 = offsetof(struct thread_options, clocksource),
2550 .help = "What type of timing source to use",
2551 .category = FIO_OPT_C_GENERAL,
2552 .group = FIO_OPT_G_CLOCK,
2553 .posval = {
2554 #ifdef CONFIG_GETTIMEOFDAY
2555 { .ival = "gettimeofday",
2556 .oval = CS_GTOD,
2557 .help = "Use gettimeofday(2) for timing",
2558 },
2559 #endif
2560 #ifdef CONFIG_CLOCK_GETTIME
2561 { .ival = "clock_gettime",
2562 .oval = CS_CGETTIME,
2563 .help = "Use clock_gettime(2) for timing",
2564 },
2565 #endif
2566 #ifdef ARCH_HAVE_CPU_CLOCK
2567 { .ival = "cpu",
2568 .oval = CS_CPUCLOCK,
2569 .help = "Use CPU private clock",
2570 },
2571 #endif
2572 },
2573 },
2574 {
2575 .name = "mem",
2576 .alias = "iomem",
2577 .lname = "I/O Memory",
2578 .type = FIO_OPT_STR,
2579 .cb = str_mem_cb,
2580 .off1 = offsetof(struct thread_options, mem_type),
2581 .help = "Backing type for IO buffers",
2582 .def = "malloc",
2583 .category = FIO_OPT_C_IO,
2584 .group = FIO_OPT_G_INVALID,
2585 .posval = {
2586 { .ival = "malloc",
2587 .oval = MEM_MALLOC,
2588 .help = "Use malloc(3) for IO buffers",
2589 },
2590 #ifndef CONFIG_NO_SHM
2591 { .ival = "shm",
2592 .oval = MEM_SHM,
2593 .help = "Use shared memory segments for IO buffers",
2594 },
2595 #ifdef FIO_HAVE_HUGETLB
2596 { .ival = "shmhuge",
2597 .oval = MEM_SHMHUGE,
2598 .help = "Like shm, but use huge pages",
2599 },
2600 #endif
2601 #endif
2602 { .ival = "mmap",
2603 .oval = MEM_MMAP,
2604 .help = "Use mmap(2) (file or anon) for IO buffers",
2605 },
2606 { .ival = "mmapshared",
2607 .oval = MEM_MMAPSHARED,
2608 .help = "Like mmap, but use the shared flag",
2609 },
2610 #ifdef FIO_HAVE_HUGETLB
2611 { .ival = "mmaphuge",
2612 .oval = MEM_MMAPHUGE,
2613 .help = "Like mmap, but use huge pages",
2614 },
2615 #endif
2616 #ifdef CONFIG_CUDA
2617 { .ival = "cudamalloc",
2618 .oval = MEM_CUDA_MALLOC,
2619 .help = "Allocate GPU device memory for GPUDirect RDMA",
2620 },
2621 #endif
2622 },
2623 },
2624 {
2625 .name = "iomem_align",
2626 .alias = "mem_align",
2627 .lname = "I/O memory alignment",
2628 .type = FIO_OPT_INT,
2629 .off1 = offsetof(struct thread_options, mem_align),
2630 .minval = 0,
2631 .help = "IO memory buffer offset alignment",
2632 .def = "0",
2633 .parent = "iomem",
2634 .hide = 1,
2635 .category = FIO_OPT_C_IO,
2636 .group = FIO_OPT_G_INVALID,
2637 },
2638 {
2639 .name = "verify",
2640 .lname = "Verify",
2641 .type = FIO_OPT_STR,
2642 .off1 = offsetof(struct thread_options, verify),
2643 .help = "Verify data written",
2644 .def = "0",
2645 .category = FIO_OPT_C_IO,
2646 .group = FIO_OPT_G_VERIFY,
2647 .posval = {
2648 { .ival = "0",
2649 .oval = VERIFY_NONE,
2650 .help = "Don't do IO verification",
2651 },
2652 { .ival = "md5",
2653 .oval = VERIFY_MD5,
2654 .help = "Use md5 checksums for verification",
2655 },
2656 { .ival = "crc64",
2657 .oval = VERIFY_CRC64,
2658 .help = "Use crc64 checksums for verification",
2659 },
2660 { .ival = "crc32",
2661 .oval = VERIFY_CRC32,
2662 .help = "Use crc32 checksums for verification",
2663 },
2664 { .ival = "crc32c-intel",
2665 .oval = VERIFY_CRC32C,
2666 .help = "Use crc32c checksums for verification (hw assisted, if available)",
2667 },
2668 { .ival = "crc32c",
2669 .oval = VERIFY_CRC32C,
2670 .help = "Use crc32c checksums for verification (hw assisted, if available)",
2671 },
2672 { .ival = "crc16",
2673 .oval = VERIFY_CRC16,
2674 .help = "Use crc16 checksums for verification",
2675 },
2676 { .ival = "crc7",
2677 .oval = VERIFY_CRC7,
2678 .help = "Use crc7 checksums for verification",
2679 },
2680 { .ival = "sha1",
2681 .oval = VERIFY_SHA1,
2682 .help = "Use sha1 checksums for verification",
2683 },
2684 { .ival = "sha256",
2685 .oval = VERIFY_SHA256,
2686 .help = "Use sha256 checksums for verification",
2687 },
2688 { .ival = "sha512",
2689 .oval = VERIFY_SHA512,
2690 .help = "Use sha512 checksums for verification",
2691 },
2692 { .ival = "sha3-224",
2693 .oval = VERIFY_SHA3_224,
2694 .help = "Use sha3-224 checksums for verification",
2695 },
2696 { .ival = "sha3-256",
2697 .oval = VERIFY_SHA3_256,
2698 .help = "Use sha3-256 checksums for verification",
2699 },
2700 { .ival = "sha3-384",
2701 .oval = VERIFY_SHA3_384,
2702 .help = "Use sha3-384 checksums for verification",
2703 },
2704 { .ival = "sha3-512",
2705 .oval = VERIFY_SHA3_512,
2706 .help = "Use sha3-512 checksums for verification",
2707 },
2708 { .ival = "xxhash",
2709 .oval = VERIFY_XXHASH,
2710 .help = "Use xxhash checksums for verification",
2711 },
2712 /* Meta information was included into verify_header,
2713 * 'meta' verification is implied by default. */
2714 { .ival = "meta",
2715 .oval = VERIFY_HDR_ONLY,
2716 .help = "Use io information for verification. "
2717 "Now is implied by default, thus option is obsolete, "
2718 "don't use it",
2719 },
2720 { .ival = "pattern",
2721 .oval = VERIFY_PATTERN_NO_HDR,
2722 .help = "Verify strict pattern",
2723 },
2724 {
2725 .ival = "null",
2726 .oval = VERIFY_NULL,
2727 .help = "Pretend to verify",
2728 },
2729 },
2730 },
2731 {
2732 .name = "do_verify",
2733 .lname = "Perform verify step",
2734 .type = FIO_OPT_BOOL,
2735 .off1 = offsetof(struct thread_options, do_verify),
2736 .help = "Run verification stage after write",
2737 .def = "1",
2738 .parent = "verify",
2739 .hide = 1,
2740 .category = FIO_OPT_C_IO,
2741 .group = FIO_OPT_G_VERIFY,
2742 },
2743 {
2744 .name = "verifysort",
2745 .lname = "Verify sort",
2746 .type = FIO_OPT_BOOL,
2747 .off1 = offsetof(struct thread_options, verifysort),
2748 .help = "Sort written verify blocks for read back",
2749 .def = "1",
2750 .parent = "verify",
2751 .hide = 1,
2752 .category = FIO_OPT_C_IO,
2753 .group = FIO_OPT_G_VERIFY,
2754 },
2755 {
2756 .name = "verifysort_nr",
2757 .lname = "Verify Sort Nr",
2758 .type = FIO_OPT_INT,
2759 .off1 = offsetof(struct thread_options, verifysort_nr),
2760 .help = "Pre-load and sort verify blocks for a read workload",
2761 .minval = 0,
2762 .maxval = 131072,
2763 .def = "1024",
2764 .parent = "verify",
2765 .category = FIO_OPT_C_IO,
2766 .group = FIO_OPT_G_VERIFY,
2767 },
2768 {
2769 .name = "verify_interval",
2770 .lname = "Verify interval",
2771 .type = FIO_OPT_INT,
2772 .off1 = offsetof(struct thread_options, verify_interval),
2773 .minval = 2 * sizeof(struct verify_header),
2774 .help = "Store verify buffer header every N bytes",
2775 .parent = "verify",
2776 .hide = 1,
2777 .interval = 2 * sizeof(struct verify_header),
2778 .category = FIO_OPT_C_IO,
2779 .group = FIO_OPT_G_VERIFY,
2780 },
2781 {
2782 .name = "verify_offset",
2783 .lname = "Verify offset",
2784 .type = FIO_OPT_INT,
2785 .help = "Offset verify header location by N bytes",
2786 .off1 = offsetof(struct thread_options, verify_offset),
2787 .minval = sizeof(struct verify_header),
2788 .parent = "verify",
2789 .hide = 1,
2790 .category = FIO_OPT_C_IO,
2791 .group = FIO_OPT_G_VERIFY,
2792 },
2793 {
2794 .name = "verify_pattern",
2795 .lname = "Verify pattern",
2796 .type = FIO_OPT_STR,
2797 .cb = str_verify_pattern_cb,
2798 .off1 = offsetof(struct thread_options, verify_pattern),
2799 .help = "Fill pattern for IO buffers",
2800 .parent = "verify",
2801 .hide = 1,
2802 .category = FIO_OPT_C_IO,
2803 .group = FIO_OPT_G_VERIFY,
2804 },
2805 {
2806 .name = "verify_fatal",
2807 .lname = "Verify fatal",
2808 .type = FIO_OPT_BOOL,
2809 .off1 = offsetof(struct thread_options, verify_fatal),
2810 .def = "0",
2811 .help = "Exit on a single verify failure, don't continue",
2812 .parent = "verify",
2813 .hide = 1,
2814 .category = FIO_OPT_C_IO,
2815 .group = FIO_OPT_G_VERIFY,
2816 },
2817 {
2818 .name = "verify_dump",
2819 .lname = "Verify dump",
2820 .type = FIO_OPT_BOOL,
2821 .off1 = offsetof(struct thread_options, verify_dump),
2822 .def = "0",
2823 .help = "Dump contents of good and bad blocks on failure",
2824 .parent = "verify",
2825 .hide = 1,
2826 .category = FIO_OPT_C_IO,
2827 .group = FIO_OPT_G_VERIFY,
2828 },
2829 {
2830 .name = "verify_async",
2831 .lname = "Verify asynchronously",
2832 .type = FIO_OPT_INT,
2833 .off1 = offsetof(struct thread_options, verify_async),
2834 .def = "0",
2835 .help = "Number of async verifier threads to use",
2836 .parent = "verify",
2837 .hide = 1,
2838 .category = FIO_OPT_C_IO,
2839 .group = FIO_OPT_G_VERIFY,
2840 },
2841 {
2842 .name = "verify_backlog",
2843 .lname = "Verify backlog",
2844 .type = FIO_OPT_STR_VAL,
2845 .off1 = offsetof(struct thread_options, verify_backlog),
2846 .help = "Verify after this number of blocks are written",
2847 .parent = "verify",
2848 .hide = 1,
2849 .category = FIO_OPT_C_IO,
2850 .group = FIO_OPT_G_VERIFY,
2851 },
2852 {
2853 .name = "verify_backlog_batch",
2854 .lname = "Verify backlog batch",
2855 .type = FIO_OPT_INT,
2856 .off1 = offsetof(struct thread_options, verify_batch),
2857 .help = "Verify this number of IO blocks",
2858 .parent = "verify",
2859 .hide = 1,
2860 .category = FIO_OPT_C_IO,
2861 .group = FIO_OPT_G_VERIFY,
2862 },
2863 #ifdef FIO_HAVE_CPU_AFFINITY
2864 {
2865 .name = "verify_async_cpus",
2866 .lname = "Async verify CPUs",
2867 .type = FIO_OPT_STR,
2868 .cb = str_verify_cpus_allowed_cb,
2869 .off1 = offsetof(struct thread_options, verify_cpumask),
2870 .help = "Set CPUs allowed for async verify threads",
2871 .parent = "verify_async",
2872 .hide = 1,
2873 .category = FIO_OPT_C_IO,
2874 .group = FIO_OPT_G_VERIFY,
2875 },
2876 #else
2877 {
2878 .name = "verify_async_cpus",
2879 .lname = "Async verify CPUs",
2880 .type = FIO_OPT_UNSUPPORTED,
2881 .help = "Your platform does not support CPU affinities",
2882 },
2883 #endif
2884 {
2885 .name = "experimental_verify",
2886 .lname = "Experimental Verify",
2887 .off1 = offsetof(struct thread_options, experimental_verify),
2888 .type = FIO_OPT_BOOL,
2889 .help = "Enable experimental verification",
2890 .parent = "verify",
2891 .category = FIO_OPT_C_IO,
2892 .group = FIO_OPT_G_VERIFY,
2893 },
2894 {
2895 .name = "verify_state_load",
2896 .lname = "Load verify state",
2897 .off1 = offsetof(struct thread_options, verify_state),
2898 .type = FIO_OPT_BOOL,
2899 .help = "Load verify termination state",
2900 .parent = "verify",
2901 .category = FIO_OPT_C_IO,
2902 .group = FIO_OPT_G_VERIFY,
2903 },
2904 {
2905 .name = "verify_state_save",
2906 .lname = "Save verify state",
2907 .off1 = offsetof(struct thread_options, verify_state_save),
2908 .type = FIO_OPT_BOOL,
2909 .def = "1",
2910 .help = "Save verify state on termination",
2911 .parent = "verify",
2912 .category = FIO_OPT_C_IO,
2913 .group = FIO_OPT_G_VERIFY,
2914 },
2915 #ifdef FIO_HAVE_TRIM
2916 {
2917 .name = "trim_percentage",
2918 .lname = "Trim percentage",
2919 .type = FIO_OPT_INT,
2920 .off1 = offsetof(struct thread_options, trim_percentage),
2921 .minval = 0,
2922 .maxval = 100,
2923 .help = "Number of verify blocks to trim (i.e., discard)",
2924 .parent = "verify",
2925 .def = "0",
2926 .interval = 1,
2927 .hide = 1,
2928 .category = FIO_OPT_C_IO,
2929 .group = FIO_OPT_G_TRIM,
2930 },
2931 {
2932 .name = "trim_verify_zero",
2933 .lname = "Verify trim zero",
2934 .type = FIO_OPT_BOOL,
2935 .help = "Verify that trimmed (i.e., discarded) blocks are returned as zeroes",
2936 .off1 = offsetof(struct thread_options, trim_zero),
2937 .parent = "trim_percentage",
2938 .hide = 1,
2939 .def = "1",
2940 .category = FIO_OPT_C_IO,
2941 .group = FIO_OPT_G_TRIM,
2942 },
2943 {
2944 .name = "trim_backlog",
2945 .lname = "Trim backlog",
2946 .type = FIO_OPT_STR_VAL,
2947 .off1 = offsetof(struct thread_options, trim_backlog),
2948 .help = "Trim after this number of blocks are written",
2949 .parent = "trim_percentage",
2950 .hide = 1,
2951 .interval = 1,
2952 .category = FIO_OPT_C_IO,
2953 .group = FIO_OPT_G_TRIM,
2954 },
2955 {
2956 .name = "trim_backlog_batch",
2957 .lname = "Trim backlog batch",
2958 .type = FIO_OPT_INT,
2959 .off1 = offsetof(struct thread_options, trim_batch),
2960 .help = "Trim this number of IO blocks",
2961 .parent = "trim_percentage",
2962 .hide = 1,
2963 .interval = 1,
2964 .category = FIO_OPT_C_IO,
2965 .group = FIO_OPT_G_TRIM,
2966 },
2967 #else
2968 {
2969 .name = "trim_percentage",
2970 .lname = "Trim percentage",
2971 .type = FIO_OPT_UNSUPPORTED,
2972 .help = "Fio does not support TRIM on your platform",
2973 },
2974 {
2975 .name = "trim_verify_zero",
2976 .lname = "Verify trim zero",
2977 .type = FIO_OPT_UNSUPPORTED,
2978 .help = "Fio does not support TRIM on your platform",
2979 },
2980 {
2981 .name = "trim_backlog",
2982 .lname = "Trim backlog",
2983 .type = FIO_OPT_UNSUPPORTED,
2984 .help = "Fio does not support TRIM on your platform",
2985 },
2986 {
2987 .name = "trim_backlog_batch",
2988 .lname = "Trim backlog batch",
2989 .type = FIO_OPT_UNSUPPORTED,
2990 .help = "Fio does not support TRIM on your platform",
2991 },
2992 #endif
2993 {
2994 .name = "write_iolog",
2995 .lname = "Write I/O log",
2996 .type = FIO_OPT_STR_STORE,
2997 .off1 = offsetof(struct thread_options, write_iolog_file),
2998 .help = "Store IO pattern to file",
2999 .category = FIO_OPT_C_IO,
3000 .group = FIO_OPT_G_IOLOG,
3001 },
3002 {
3003 .name = "read_iolog",
3004 .lname = "Read I/O log",
3005 .type = FIO_OPT_STR_STORE,
3006 .off1 = offsetof(struct thread_options, read_iolog_file),
3007 .help = "Playback IO pattern from file",
3008 .category = FIO_OPT_C_IO,
3009 .group = FIO_OPT_G_IOLOG,
3010 },
3011 {
3012 .name = "replay_no_stall",
3013 .lname = "Don't stall on replay",
3014 .type = FIO_OPT_BOOL,
3015 .off1 = offsetof(struct thread_options, no_stall),
3016 .def = "0",
3017 .parent = "read_iolog",
3018 .hide = 1,
3019 .help = "Playback IO pattern file as fast as possible without stalls",
3020 .category = FIO_OPT_C_IO,
3021 .group = FIO_OPT_G_IOLOG,
3022 },
3023 {
3024 .name = "replay_redirect",
3025 .lname = "Redirect device for replay",
3026 .type = FIO_OPT_STR_STORE,
3027 .off1 = offsetof(struct thread_options, replay_redirect),
3028 .parent = "read_iolog",
3029 .hide = 1,
3030 .help = "Replay all I/O onto this device, regardless of trace device",
3031 .category = FIO_OPT_C_IO,
3032 .group = FIO_OPT_G_IOLOG,
3033 },
3034 {
3035 .name = "replay_scale",
3036 .lname = "Replace offset scale factor",
3037 .type = FIO_OPT_INT,
3038 .off1 = offsetof(struct thread_options, replay_scale),
3039 .parent = "read_iolog",
3040 .def = "1",
3041 .help = "Align offsets to this blocksize",
3042 .category = FIO_OPT_C_IO,
3043 .group = FIO_OPT_G_IOLOG,
3044 },
3045 {
3046 .name = "replay_align",
3047 .lname = "Replace alignment",
3048 .type = FIO_OPT_INT,
3049 .off1 = offsetof(struct thread_options, replay_align),
3050 .parent = "read_iolog",
3051 .help = "Scale offset down by this factor",
3052 .category = FIO_OPT_C_IO,
3053 .group = FIO_OPT_G_IOLOG,
3054 .pow2 = 1,
3055 },
3056 {
3057 .name = "exec_prerun",
3058 .lname = "Pre-execute runnable",
3059 .type = FIO_OPT_STR_STORE,
3060 .off1 = offsetof(struct thread_options, exec_prerun),
3061 .help = "Execute this file prior to running job",
3062 .category = FIO_OPT_C_GENERAL,
3063 .group = FIO_OPT_G_INVALID,
3064 },
3065 {
3066 .name = "exec_postrun",
3067 .lname = "Post-execute runnable",
3068 .type = FIO_OPT_STR_STORE,
3069 .off1 = offsetof(struct thread_options, exec_postrun),
3070 .help = "Execute this file after running job",
3071 .category = FIO_OPT_C_GENERAL,
3072 .group = FIO_OPT_G_INVALID,
3073 },
3074 #ifdef FIO_HAVE_IOSCHED_SWITCH
3075 {
3076 .name = "ioscheduler",
3077 .lname = "I/O scheduler",
3078 .type = FIO_OPT_STR_STORE,
3079 .off1 = offsetof(struct thread_options, ioscheduler),
3080 .help = "Use this IO scheduler on the backing device",
3081 .category = FIO_OPT_C_FILE,
3082 .group = FIO_OPT_G_INVALID,
3083 },
3084 #else
3085 {
3086 .name = "ioscheduler",
3087 .lname = "I/O scheduler",
3088 .type = FIO_OPT_UNSUPPORTED,
3089 .help = "Your platform does not support IO scheduler switching",
3090 },
3091 #endif
3092 {
3093 .name = "zonesize",
3094 .lname = "Zone size",
3095 .type = FIO_OPT_STR_VAL,
3096 .off1 = offsetof(struct thread_options, zone_size),
3097 .help = "Amount of data to read per zone",
3098 .def = "0",
3099 .interval = 1024 * 1024,
3100 .category = FIO_OPT_C_IO,
3101 .group = FIO_OPT_G_ZONE,
3102 },
3103 {
3104 .name = "zonerange",
3105 .lname = "Zone range",
3106 .type = FIO_OPT_STR_VAL,
3107 .off1 = offsetof(struct thread_options, zone_range),
3108 .help = "Give size of an IO zone",
3109 .def = "0",
3110 .interval = 1024 * 1024,
3111 .category = FIO_OPT_C_IO,
3112 .group = FIO_OPT_G_ZONE,
3113 },
3114 {
3115 .name = "zoneskip",
3116 .lname = "Zone skip",
3117 .type = FIO_OPT_STR_VAL,
3118 .off1 = offsetof(struct thread_options, zone_skip),
3119 .help = "Space between IO zones",
3120 .def = "0",
3121 .interval = 1024 * 1024,
3122 .category = FIO_OPT_C_IO,
3123 .group = FIO_OPT_G_ZONE,
3124 },
3125 {
3126 .name = "lockmem",
3127 .lname = "Lock memory",
3128 .type = FIO_OPT_STR_VAL,
3129 .off1 = offsetof(struct thread_options, lockmem),
3130 .help = "Lock down this amount of memory (per worker)",
3131 .def = "0",
3132 .interval = 1024 * 1024,
3133 .category = FIO_OPT_C_GENERAL,
3134 .group = FIO_OPT_G_INVALID,
3135 },
3136 {
3137 .name = "rwmixread",
3138 .lname = "Read/write mix read",
3139 .type = FIO_OPT_INT,
3140 .cb = str_rwmix_read_cb,
3141 .off1 = offsetof(struct thread_options, rwmix[DDIR_READ]),
3142 .maxval = 100,
3143 .help = "Percentage of mixed workload that is reads",
3144 .def = "50",
3145 .interval = 5,
3146 .inverse = "rwmixwrite",
3147 .category = FIO_OPT_C_IO,
3148 .group = FIO_OPT_G_RWMIX,
3149 },
3150 {
3151 .name = "rwmixwrite",
3152 .lname = "Read/write mix write",
3153 .type = FIO_OPT_INT,
3154 .cb = str_rwmix_write_cb,
3155 .off1 = offsetof(struct thread_options, rwmix[DDIR_WRITE]),
3156 .maxval = 100,
3157 .help = "Percentage of mixed workload that is writes",
3158 .def = "50",
3159 .interval = 5,
3160 .inverse = "rwmixread",
3161 .category = FIO_OPT_C_IO,
3162 .group = FIO_OPT_G_RWMIX,
3163 },
3164 {
3165 .name = "rwmixcycle",
3166 .lname = "Read/write mix cycle",
3167 .type = FIO_OPT_DEPRECATED,
3168 .category = FIO_OPT_C_IO,
3169 .group = FIO_OPT_G_RWMIX,
3170 },
3171 {
3172 .name = "nice",
3173 .lname = "Nice",
3174 .type = FIO_OPT_INT,
3175 .off1 = offsetof(struct thread_options, nice),
3176 .help = "Set job CPU nice value",
3177 .minval = -19,
3178 .maxval = 20,
3179 .def = "0",
3180 .interval = 1,
3181 .category = FIO_OPT_C_GENERAL,
3182 .group = FIO_OPT_G_CRED,
3183 },
3184 #ifdef FIO_HAVE_IOPRIO
3185 {
3186 .name = "prio",
3187 .lname = "I/O nice priority",
3188 .type = FIO_OPT_INT,
3189 .off1 = offsetof(struct thread_options, ioprio),
3190 .help = "Set job IO priority value",
3191 .minval = IOPRIO_MIN_PRIO,
3192 .maxval = IOPRIO_MAX_PRIO,
3193 .interval = 1,
3194 .category = FIO_OPT_C_GENERAL,
3195 .group = FIO_OPT_G_CRED,
3196 },
3197 #else
3198 {
3199 .name = "prio",
3200 .lname = "I/O nice priority",
3201 .type = FIO_OPT_UNSUPPORTED,
3202 .help = "Your platform does not support IO priorities",
3203 },
3204 #endif
3205 #ifdef FIO_HAVE_IOPRIO_CLASS
3206 #ifndef FIO_HAVE_IOPRIO
3207 #error "FIO_HAVE_IOPRIO_CLASS requires FIO_HAVE_IOPRIO"
3208 #endif
3209 {
3210 .name = "prioclass",
3211 .lname = "I/O nice priority class",
3212 .type = FIO_OPT_INT,
3213 .off1 = offsetof(struct thread_options, ioprio_class),
3214 .help = "Set job IO priority class",
3215 .minval = IOPRIO_MIN_PRIO_CLASS,
3216 .maxval = IOPRIO_MAX_PRIO_CLASS,
3217 .interval = 1,
3218 .category = FIO_OPT_C_GENERAL,
3219 .group = FIO_OPT_G_CRED,
3220 },
3221 #else
3222 {
3223 .name = "prioclass",
3224 .lname = "I/O nice priority class",
3225 .type = FIO_OPT_UNSUPPORTED,
3226 .help = "Your platform does not support IO priority classes",
3227 },
3228 #endif
3229 {
3230 .name = "thinktime",
3231 .lname = "Thinktime",
3232 .type = FIO_OPT_INT,
3233 .off1 = offsetof(struct thread_options, thinktime),
3234 .help = "Idle time between IO buffers (usec)",
3235 .def = "0",
3236 .is_time = 1,
3237 .category = FIO_OPT_C_IO,
3238 .group = FIO_OPT_G_THINKTIME,
3239 },
3240 {
3241 .name = "thinktime_spin",
3242 .lname = "Thinktime spin",
3243 .type = FIO_OPT_INT,
3244 .off1 = offsetof(struct thread_options, thinktime_spin),
3245 .help = "Start think time by spinning this amount (usec)",
3246 .def = "0",
3247 .is_time = 1,
3248 .parent = "thinktime",
3249 .hide = 1,
3250 .category = FIO_OPT_C_IO,
3251 .group = FIO_OPT_G_THINKTIME,
3252 },
3253 {
3254 .name = "thinktime_blocks",
3255 .lname = "Thinktime blocks",
3256 .type = FIO_OPT_INT,
3257 .off1 = offsetof(struct thread_options, thinktime_blocks),
3258 .help = "IO buffer period between 'thinktime'",
3259 .def = "1",
3260 .parent = "thinktime",
3261 .hide = 1,
3262 .category = FIO_OPT_C_IO,
3263 .group = FIO_OPT_G_THINKTIME,
3264 },
3265 {
3266 .name = "rate",
3267 .lname = "I/O rate",
3268 .type = FIO_OPT_INT,
3269 .off1 = offsetof(struct thread_options, rate[DDIR_READ]),
3270 .off2 = offsetof(struct thread_options, rate[DDIR_WRITE]),
3271 .off3 = offsetof(struct thread_options, rate[DDIR_TRIM]),
3272 .help = "Set bandwidth rate",
3273 .category = FIO_OPT_C_IO,
3274 .group = FIO_OPT_G_RATE,
3275 },
3276 {
3277 .name = "rate_min",
3278 .alias = "ratemin",
3279 .lname = "I/O min rate",
3280 .type = FIO_OPT_INT,
3281 .off1 = offsetof(struct thread_options, ratemin[DDIR_READ]),
3282 .off2 = offsetof(struct thread_options, ratemin[DDIR_WRITE]),
3283 .off3 = offsetof(struct thread_options, ratemin[DDIR_TRIM]),
3284 .help = "Job must meet this rate or it will be shutdown",
3285 .parent = "rate",
3286 .hide = 1,
3287 .category = FIO_OPT_C_IO,
3288 .group = FIO_OPT_G_RATE,
3289 },
3290 {
3291 .name = "rate_iops",
3292 .lname = "I/O rate IOPS",
3293 .type = FIO_OPT_INT,
3294 .off1 = offsetof(struct thread_options, rate_iops[DDIR_READ]),
3295 .off2 = offsetof(struct thread_options, rate_iops[DDIR_WRITE]),
3296 .off3 = offsetof(struct thread_options, rate_iops[DDIR_TRIM]),
3297 .help = "Limit IO used to this number of IO operations/sec",
3298 .hide = 1,
3299 .category = FIO_OPT_C_IO,
3300 .group = FIO_OPT_G_RATE,
3301 },
3302 {
3303 .name = "rate_iops_min",
3304 .lname = "I/O min rate IOPS",
3305 .type = FIO_OPT_INT,
3306 .off1 = offsetof(struct thread_options, rate_iops_min[DDIR_READ]),
3307 .off2 = offsetof(struct thread_options, rate_iops_min[DDIR_WRITE]),
3308 .off3 = offsetof(struct thread_options, rate_iops_min[DDIR_TRIM]),
3309 .help = "Job must meet this rate or it will be shut down",
3310 .parent = "rate_iops",
3311 .hide = 1,
3312 .category = FIO_OPT_C_IO,
3313 .group = FIO_OPT_G_RATE,
3314 },
3315 {
3316 .name = "rate_process",
3317 .lname = "Rate Process",
3318 .type = FIO_OPT_STR,
3319 .off1 = offsetof(struct thread_options, rate_process),
3320 .help = "What process controls how rated IO is managed",
3321 .def = "linear",
3322 .category = FIO_OPT_C_IO,
3323 .group = FIO_OPT_G_RATE,
3324 .posval = {
3325 { .ival = "linear",
3326 .oval = RATE_PROCESS_LINEAR,
3327 .help = "Linear rate of IO",
3328 },
3329 {
3330 .ival = "poisson",
3331 .oval = RATE_PROCESS_POISSON,
3332 .help = "Rate follows Poisson process",
3333 },
3334 },
3335 .parent = "rate",
3336 },
3337 {
3338 .name = "rate_cycle",
3339 .alias = "ratecycle",
3340 .lname = "I/O rate cycle",
3341 .type = FIO_OPT_INT,
3342 .off1 = offsetof(struct thread_options, ratecycle),
3343 .help = "Window average for rate limits (msec)",
3344 .def = "1000",
3345 .parent = "rate",
3346 .hide = 1,
3347 .category = FIO_OPT_C_IO,
3348 .group = FIO_OPT_G_RATE,
3349 },
3350 {
3351 .name = "max_latency",
3352 .lname = "Max Latency",
3353 .type = FIO_OPT_INT,
3354 .off1 = offsetof(struct thread_options, max_latency),
3355 .help = "Maximum tolerated IO latency (usec)",
3356 .is_time = 1,
3357 .category = FIO_OPT_C_IO,
3358 .group = FIO_OPT_G_LATPROF,
3359 },
3360 {
3361 .name = "latency_target",
3362 .lname = "Latency Target (usec)",
3363 .type = FIO_OPT_STR_VAL_TIME,
3364 .off1 = offsetof(struct thread_options, latency_target),
3365 .help = "Ramp to max queue depth supporting this latency",
3366 .is_time = 1,
3367 .category = FIO_OPT_C_IO,
3368 .group = FIO_OPT_G_LATPROF,
3369 },
3370 {
3371 .name = "latency_window",
3372 .lname = "Latency Window (usec)",
3373 .type = FIO_OPT_STR_VAL_TIME,
3374 .off1 = offsetof(struct thread_options, latency_window),
3375 .help = "Time to sustain latency_target",
3376 .is_time = 1,
3377 .category = FIO_OPT_C_IO,
3378 .group = FIO_OPT_G_LATPROF,
3379 },
3380 {
3381 .name = "latency_percentile",
3382 .lname = "Latency Percentile",
3383 .type = FIO_OPT_FLOAT_LIST,
3384 .off1 = offsetof(struct thread_options, latency_percentile),
3385 .help = "Percentile of IOs must be below latency_target",
3386 .def = "100",
3387 .maxlen = 1,
3388 .minfp = 0.0,
3389 .maxfp = 100.0,
3390 .category = FIO_OPT_C_IO,
3391 .group = FIO_OPT_G_LATPROF,
3392 },
3393 {
3394 .name = "invalidate",
3395 .lname = "Cache invalidate",
3396 .type = FIO_OPT_BOOL,
3397 .off1 = offsetof(struct thread_options, invalidate_cache),
3398 .help = "Invalidate buffer/page cache prior to running job",
3399 .def = "1",
3400 .category = FIO_OPT_C_IO,
3401 .group = FIO_OPT_G_IO_TYPE,
3402 },
3403 {
3404 .name = "sync",
3405 .lname = "Synchronous I/O",
3406 .type = FIO_OPT_BOOL,
3407 .off1 = offsetof(struct thread_options, sync_io),
3408 .help = "Use O_SYNC for buffered writes",
3409 .def = "0",
3410 .parent = "buffered",
3411 .hide = 1,
3412 .category = FIO_OPT_C_IO,
3413 .group = FIO_OPT_G_IO_TYPE,
3414 },
3415 {
3416 .name = "create_serialize",
3417 .lname = "Create serialize",
3418 .type = FIO_OPT_BOOL,
3419 .off1 = offsetof(struct thread_options, create_serialize),
3420 .help = "Serialize creation of job files",
3421 .def = "1",
3422 .category = FIO_OPT_C_FILE,
3423 .group = FIO_OPT_G_INVALID,
3424 },
3425 {
3426 .name = "create_fsync",
3427 .lname = "Create fsync",
3428 .type = FIO_OPT_BOOL,
3429 .off1 = offsetof(struct thread_options, create_fsync),
3430 .help = "fsync file after creation",
3431 .def = "1",
3432 .category = FIO_OPT_C_FILE,
3433 .group = FIO_OPT_G_INVALID,
3434 },
3435 {
3436 .name = "create_on_open",
3437 .lname = "Create on open",
3438 .type = FIO_OPT_BOOL,
3439 .off1 = offsetof(struct thread_options, create_on_open),
3440 .help = "Create files when they are opened for IO",
3441 .def = "0",
3442 .category = FIO_OPT_C_FILE,
3443 .group = FIO_OPT_G_INVALID,
3444 },
3445 {
3446 .name = "create_only",
3447 .lname = "Create Only",
3448 .type = FIO_OPT_BOOL,
3449 .off1 = offsetof(struct thread_options, create_only),
3450 .help = "Only perform file creation phase",
3451 .category = FIO_OPT_C_FILE,
3452 .def = "0",
3453 },
3454 {
3455 .name = "allow_file_create",
3456 .lname = "Allow file create",
3457 .type = FIO_OPT_BOOL,
3458 .off1 = offsetof(struct thread_options, allow_create),
3459 .help = "Permit fio to create files, if they don't exist",
3460 .def = "1",
3461 .category = FIO_OPT_C_FILE,
3462 .group = FIO_OPT_G_FILENAME,
3463 },
3464 {
3465 .name = "allow_mounted_write",
3466 .lname = "Allow mounted write",
3467 .type = FIO_OPT_BOOL,
3468 .off1 = offsetof(struct thread_options, allow_mounted_write),
3469 .help = "Allow writes to a mounted partition",
3470 .def = "0",
3471 .category = FIO_OPT_C_FILE,
3472 .group = FIO_OPT_G_FILENAME,
3473 },
3474 {
3475 .name = "pre_read",
3476 .lname = "Pre-read files",
3477 .type = FIO_OPT_BOOL,
3478 .off1 = offsetof(struct thread_options, pre_read),
3479 .help = "Pre-read files before starting official testing",
3480 .def = "0",
3481 .category = FIO_OPT_C_FILE,
3482 .group = FIO_OPT_G_INVALID,
3483 },
3484 #ifdef FIO_HAVE_CPU_AFFINITY
3485 {
3486 .name = "cpumask",
3487 .lname = "CPU mask",
3488 .type = FIO_OPT_INT,
3489 .cb = str_cpumask_cb,
3490 .off1 = offsetof(struct thread_options, cpumask),
3491 .help = "CPU affinity mask",
3492 .category = FIO_OPT_C_GENERAL,
3493 .group = FIO_OPT_G_CRED,
3494 },
3495 {
3496 .name = "cpus_allowed",
3497 .lname = "CPUs allowed",
3498 .type = FIO_OPT_STR,
3499 .cb = str_cpus_allowed_cb,
3500 .off1 = offsetof(struct thread_options, cpumask),
3501 .help = "Set CPUs allowed",
3502 .category = FIO_OPT_C_GENERAL,
3503 .group = FIO_OPT_G_CRED,
3504 },
3505 {
3506 .name = "cpus_allowed_policy",
3507 .lname = "CPUs allowed distribution policy",
3508 .type = FIO_OPT_STR,
3509 .off1 = offsetof(struct thread_options, cpus_allowed_policy),
3510 .help = "Distribution policy for cpus_allowed",
3511 .parent = "cpus_allowed",
3512 .prio = 1,
3513 .posval = {
3514 { .ival = "shared",
3515 .oval = FIO_CPUS_SHARED,
3516 .help = "Mask shared between threads",
3517 },
3518 { .ival = "split",
3519 .oval = FIO_CPUS_SPLIT,
3520 .help = "Mask split between threads",
3521 },
3522 },
3523 .category = FIO_OPT_C_GENERAL,
3524 .group = FIO_OPT_G_CRED,
3525 },
3526 #else
3527 {
3528 .name = "cpumask",
3529 .lname = "CPU mask",
3530 .type = FIO_OPT_UNSUPPORTED,
3531 .help = "Your platform does not support CPU affinities",
3532 },
3533 {
3534 .name = "cpus_allowed",
3535 .lname = "CPUs allowed",
3536 .type = FIO_OPT_UNSUPPORTED,
3537 .help = "Your platform does not support CPU affinities",
3538 },
3539 {
3540 .name = "cpus_allowed_policy",
3541 .lname = "CPUs allowed distribution policy",
3542 .type = FIO_OPT_UNSUPPORTED,
3543 .help = "Your platform does not support CPU affinities",
3544 },
3545 #endif
3546 #ifdef CONFIG_LIBNUMA
3547 {
3548 .name = "numa_cpu_nodes",
3549 .lname = "NUMA CPU Nodes",
3550 .type = FIO_OPT_STR,
3551 .cb = str_numa_cpunodes_cb,
3552 .off1 = offsetof(struct thread_options, numa_cpunodes),
3553 .help = "NUMA CPU nodes bind",
3554 .category = FIO_OPT_C_GENERAL,
3555 .group = FIO_OPT_G_INVALID,
3556 },
3557 {
3558 .name = "numa_mem_policy",
3559 .lname = "NUMA Memory Policy",
3560 .type = FIO_OPT_STR,
3561 .cb = str_numa_mpol_cb,
3562 .off1 = offsetof(struct thread_options, numa_memnodes),
3563 .help = "NUMA memory policy setup",
3564 .category = FIO_OPT_C_GENERAL,
3565 .group = FIO_OPT_G_INVALID,
3566 },
3567 #else
3568 {
3569 .name = "numa_cpu_nodes",
3570 .lname = "NUMA CPU Nodes",
3571 .type = FIO_OPT_UNSUPPORTED,
3572 .help = "Build fio with libnuma-dev(el) to enable this option",
3573 },
3574 {
3575 .name = "numa_mem_policy",
3576 .lname = "NUMA Memory Policy",
3577 .type = FIO_OPT_UNSUPPORTED,
3578 .help = "Build fio with libnuma-dev(el) to enable this option",
3579 },
3580 #endif
3581 #ifdef CONFIG_CUDA
3582 {
3583 .name = "gpu_dev_id",
3584 .lname = "GPU device ID",
3585 .type = FIO_OPT_INT,
3586 .off1 = offsetof(struct thread_options, gpu_dev_id),
3587 .help = "Set GPU device ID for GPUDirect RDMA",
3588 .def = "0",
3589 .category = FIO_OPT_C_GENERAL,
3590 .group = FIO_OPT_G_INVALID,
3591 },
3592 #endif
3593 {
3594 .name = "end_fsync",
3595 .lname = "End fsync",
3596 .type = FIO_OPT_BOOL,
3597 .off1 = offsetof(struct thread_options, end_fsync),
3598 .help = "Include fsync at the end of job",
3599 .def = "0",
3600 .category = FIO_OPT_C_FILE,
3601 .group = FIO_OPT_G_INVALID,
3602 },
3603 {
3604 .name = "fsync_on_close",
3605 .lname = "Fsync on close",
3606 .type = FIO_OPT_BOOL,
3607 .off1 = offsetof(struct thread_options, fsync_on_close),
3608 .help = "fsync files on close",
3609 .def = "0",
3610 .category = FIO_OPT_C_FILE,
3611 .group = FIO_OPT_G_INVALID,
3612 },
3613 {
3614 .name = "unlink",
3615 .lname = "Unlink file",
3616 .type = FIO_OPT_BOOL,
3617 .off1 = offsetof(struct thread_options, unlink),
3618 .help = "Unlink created files after job has completed",
3619 .def = "0",
3620 .category = FIO_OPT_C_FILE,
3621 .group = FIO_OPT_G_INVALID,
3622 },
3623 {
3624 .name = "unlink_each_loop",
3625 .lname = "Unlink file after each loop of a job",
3626 .type = FIO_OPT_BOOL,
3627 .off1 = offsetof(struct thread_options, unlink_each_loop),
3628 .help = "Unlink created files after each loop in a job has completed",
3629 .def = "0",
3630 .category = FIO_OPT_C_FILE,
3631 .group = FIO_OPT_G_INVALID,
3632 },
3633 {
3634 .name = "exitall",
3635 .lname = "Exit-all on terminate",
3636 .type = FIO_OPT_STR_SET,
3637 .cb = str_exitall_cb,
3638 .help = "Terminate all jobs when one exits",
3639 .category = FIO_OPT_C_GENERAL,
3640 .group = FIO_OPT_G_PROCESS,
3641 },
3642 {
3643 .name = "exitall_on_error",
3644 .lname = "Exit-all on terminate in error",
3645 .type = FIO_OPT_STR_SET,
3646 .off1 = offsetof(struct thread_options, exitall_error),
3647 .help = "Terminate all jobs when one exits in error",
3648 .category = FIO_OPT_C_GENERAL,
3649 .group = FIO_OPT_G_PROCESS,
3650 },
3651 {
3652 .name = "stonewall",
3653 .lname = "Wait for previous",
3654 .alias = "wait_for_previous",
3655 .type = FIO_OPT_STR_SET,
3656 .off1 = offsetof(struct thread_options, stonewall),
3657 .help = "Insert a hard barrier between this job and previous",
3658 .category = FIO_OPT_C_GENERAL,
3659 .group = FIO_OPT_G_PROCESS,
3660 },
3661 {
3662 .name = "new_group",
3663 .lname = "New group",
3664 .type = FIO_OPT_STR_SET,
3665 .off1 = offsetof(struct thread_options, new_group),
3666 .help = "Mark the start of a new group (for reporting)",
3667 .category = FIO_OPT_C_GENERAL,
3668 .group = FIO_OPT_G_PROCESS,
3669 },
3670 {
3671 .name = "thread",
3672 .lname = "Thread",
3673 .type = FIO_OPT_STR_SET,
3674 .off1 = offsetof(struct thread_options, use_thread),
3675 .help = "Use threads instead of processes",
3676 #ifdef CONFIG_NO_SHM
3677 .def = "1",
3678 .no_warn_def = 1,
3679 #endif
3680 .category = FIO_OPT_C_GENERAL,
3681 .group = FIO_OPT_G_PROCESS,
3682 },
3683 {
3684 .name = "per_job_logs",
3685 .lname = "Per Job Logs",
3686 .type = FIO_OPT_BOOL,
3687 .off1 = offsetof(struct thread_options, per_job_logs),
3688 .help = "Include job number in generated log files or not",
3689 .def = "1",
3690 .category = FIO_OPT_C_LOG,
3691 .group = FIO_OPT_G_INVALID,
3692 },
3693 {
3694 .name = "write_bw_log",
3695 .lname = "Write bandwidth log",
3696 .type = FIO_OPT_STR,
3697 .off1 = offsetof(struct thread_options, bw_log_file),
3698 .cb = str_write_bw_log_cb,
3699 .help = "Write log of bandwidth during run",
3700 .category = FIO_OPT_C_LOG,
3701 .group = FIO_OPT_G_INVALID,
3702 },
3703 {
3704 .name = "write_lat_log",
3705 .lname = "Write latency log",
3706 .type = FIO_OPT_STR,
3707 .off1 = offsetof(struct thread_options, lat_log_file),
3708 .cb = str_write_lat_log_cb,
3709 .help = "Write log of latency during run",
3710 .category = FIO_OPT_C_LOG,
3711 .group = FIO_OPT_G_INVALID,
3712 },
3713 {
3714 .name = "write_iops_log",
3715 .lname = "Write IOPS log",
3716 .type = FIO_OPT_STR,
3717 .off1 = offsetof(struct thread_options, iops_log_file),
3718 .cb = str_write_iops_log_cb,
3719 .help = "Write log of IOPS during run",
3720 .category = FIO_OPT_C_LOG,
3721 .group = FIO_OPT_G_INVALID,
3722 },
3723 {
3724 .name = "log_avg_msec",
3725 .lname = "Log averaging (msec)",
3726 .type = FIO_OPT_INT,
3727 .off1 = offsetof(struct thread_options, log_avg_msec),
3728 .help = "Average bw/iops/lat logs over this period of time",
3729 .def = "0",
3730 .category = FIO_OPT_C_LOG,
3731 .group = FIO_OPT_G_INVALID,
3732 },
3733 {
3734 .name = "log_hist_msec",
3735 .lname = "Log histograms (msec)",
3736 .type = FIO_OPT_INT,
3737 .off1 = offsetof(struct thread_options, log_hist_msec),
3738 .help = "Dump completion latency histograms at frequency of this time value",
3739 .def = "0",
3740 .category = FIO_OPT_C_LOG,
3741 .group = FIO_OPT_G_INVALID,
3742 },
3743 {
3744 .name = "log_hist_coarseness",
3745 .lname = "Histogram logs coarseness",
3746 .type = FIO_OPT_INT,
3747 .off1 = offsetof(struct thread_options, log_hist_coarseness),
3748 .help = "Integer in range [0,6]. Higher coarseness outputs"
3749 " fewer histogram bins per sample. The number of bins for"
3750 " these are [1216, 608, 304, 152, 76, 38, 19] respectively.",
3751 .def = "0",
3752 .category = FIO_OPT_C_LOG,
3753 .group = FIO_OPT_G_INVALID,
3754 },
3755 {
3756 .name = "write_hist_log",
3757 .lname = "Write latency histogram logs",
3758 .type = FIO_OPT_STR,
3759 .off1 = offsetof(struct thread_options, hist_log_file),
3760 .cb = str_write_hist_log_cb,
3761 .help = "Write log of latency histograms during run",
3762 .category = FIO_OPT_C_LOG,
3763 .group = FIO_OPT_G_INVALID,
3764 },
3765 {
3766 .name = "log_max_value",
3767 .lname = "Log maximum instead of average",
3768 .type = FIO_OPT_BOOL,
3769 .off1 = offsetof(struct thread_options, log_max),
3770 .help = "Log max sample in a window instead of average",
3771 .def = "0",
3772 .category = FIO_OPT_C_LOG,
3773 .group = FIO_OPT_G_INVALID,
3774 },
3775 {
3776 .name = "log_offset",
3777 .lname = "Log offset of IO",
3778 .type = FIO_OPT_BOOL,
3779 .off1 = offsetof(struct thread_options, log_offset),
3780 .help = "Include offset of IO for each log entry",
3781 .def = "0",
3782 .category = FIO_OPT_C_LOG,
3783 .group = FIO_OPT_G_INVALID,
3784 },
3785 #ifdef CONFIG_ZLIB
3786 {
3787 .name = "log_compression",
3788 .lname = "Log compression",
3789 .type = FIO_OPT_INT,
3790 .off1 = offsetof(struct thread_options, log_gz),
3791 .help = "Log in compressed chunks of this size",
3792 .minval = 1024ULL,
3793 .maxval = 512 * 1024 * 1024ULL,
3794 .category = FIO_OPT_C_LOG,
3795 .group = FIO_OPT_G_INVALID,
3796 },
3797 #ifdef FIO_HAVE_CPU_AFFINITY
3798 {
3799 .name = "log_compression_cpus",
3800 .lname = "Log Compression CPUs",
3801 .type = FIO_OPT_STR,
3802 .cb = str_log_cpus_allowed_cb,
3803 .off1 = offsetof(struct thread_options, log_gz_cpumask),
3804 .parent = "log_compression",
3805 .help = "Limit log compression to these CPUs",
3806 .category = FIO_OPT_C_LOG,
3807 .group = FIO_OPT_G_INVALID,
3808 },
3809 #else
3810 {
3811 .name = "log_compression_cpus",
3812 .lname = "Log Compression CPUs",
3813 .type = FIO_OPT_UNSUPPORTED,
3814 .help = "Your platform does not support CPU affinities",
3815 },
3816 #endif
3817 {
3818 .name = "log_store_compressed",
3819 .lname = "Log store compressed",
3820 .type = FIO_OPT_BOOL,
3821 .off1 = offsetof(struct thread_options, log_gz_store),
3822 .help = "Store logs in a compressed format",
3823 .category = FIO_OPT_C_LOG,
3824 .group = FIO_OPT_G_INVALID,
3825 },
3826 #else
3827 {
3828 .name = "log_compression",
3829 .lname = "Log compression",
3830 .type = FIO_OPT_UNSUPPORTED,
3831 .help = "Install libz-dev(el) to get compression support",
3832 },
3833 {
3834 .name = "log_store_compressed",
3835 .lname = "Log store compressed",
3836 .type = FIO_OPT_UNSUPPORTED,
3837 .help = "Install libz-dev(el) to get compression support",
3838 },
3839 #endif
3840 {
3841 .name = "log_unix_epoch",
3842 .lname = "Log epoch unix",
3843 .type = FIO_OPT_BOOL,
3844 .off1 = offsetof(struct thread_options, log_unix_epoch),
3845 .help = "Use Unix time in log files",
3846 .category = FIO_OPT_C_LOG,
3847 .group = FIO_OPT_G_INVALID,
3848 },
3849 {
3850 .name = "block_error_percentiles",
3851 .lname = "Block error percentiles",
3852 .type = FIO_OPT_BOOL,
3853 .off1 = offsetof(struct thread_options, block_error_hist),
3854 .help = "Record trim block errors and make a histogram",
3855 .def = "0",
3856 .category = FIO_OPT_C_LOG,
3857 .group = FIO_OPT_G_INVALID,
3858 },
3859 {
3860 .name = "bwavgtime",
3861 .lname = "Bandwidth average time",
3862 .type = FIO_OPT_INT,
3863 .off1 = offsetof(struct thread_options, bw_avg_time),
3864 .help = "Time window over which to calculate bandwidth"
3865 " (msec)",
3866 .def = "500",
3867 .parent = "write_bw_log",
3868 .hide = 1,
3869 .interval = 100,
3870 .category = FIO_OPT_C_LOG,
3871 .group = FIO_OPT_G_INVALID,
3872 },
3873 {
3874 .name = "iopsavgtime",
3875 .lname = "IOPS average time",
3876 .type = FIO_OPT_INT,
3877 .off1 = offsetof(struct thread_options, iops_avg_time),
3878 .help = "Time window over which to calculate IOPS (msec)",
3879 .def = "500",
3880 .parent = "write_iops_log",
3881 .hide = 1,
3882 .interval = 100,
3883 .category = FIO_OPT_C_LOG,
3884 .group = FIO_OPT_G_INVALID,
3885 },
3886 {
3887 .name = "group_reporting",
3888 .lname = "Group reporting",
3889 .type = FIO_OPT_STR_SET,
3890 .off1 = offsetof(struct thread_options, group_reporting),
3891 .help = "Do reporting on a per-group basis",
3892 .category = FIO_OPT_C_STAT,
3893 .group = FIO_OPT_G_INVALID,
3894 },
3895 {
3896 .name = "stats",
3897 .lname = "Stats",
3898 .type = FIO_OPT_BOOL,
3899 .off1 = offsetof(struct thread_options, stats),
3900 .help = "Enable collection of stats",
3901 .def = "1",
3902 .category = FIO_OPT_C_STAT,
3903 .group = FIO_OPT_G_INVALID,
3904 },
3905 {
3906 .name = "zero_buffers",
3907 .lname = "Zero I/O buffers",
3908 .type = FIO_OPT_STR_SET,
3909 .off1 = offsetof(struct thread_options, zero_buffers),
3910 .help = "Init IO buffers to all zeroes",
3911 .category = FIO_OPT_C_IO,
3912 .group = FIO_OPT_G_IO_BUF,
3913 },
3914 {
3915 .name = "refill_buffers",
3916 .lname = "Refill I/O buffers",
3917 .type = FIO_OPT_STR_SET,
3918 .off1 = offsetof(struct thread_options, refill_buffers),
3919 .help = "Refill IO buffers on every IO submit",
3920 .category = FIO_OPT_C_IO,
3921 .group = FIO_OPT_G_IO_BUF,
3922 },
3923 {
3924 .name = "scramble_buffers",
3925 .lname = "Scramble I/O buffers",
3926 .type = FIO_OPT_BOOL,
3927 .off1 = offsetof(struct thread_options, scramble_buffers),
3928 .help = "Slightly scramble buffers on every IO submit",
3929 .def = "1",
3930 .category = FIO_OPT_C_IO,
3931 .group = FIO_OPT_G_IO_BUF,
3932 },
3933 {
3934 .name = "buffer_pattern",
3935 .lname = "Buffer pattern",
3936 .type = FIO_OPT_STR,
3937 .cb = str_buffer_pattern_cb,
3938 .off1 = offsetof(struct thread_options, buffer_pattern),
3939 .help = "Fill pattern for IO buffers",
3940 .category = FIO_OPT_C_IO,
3941 .group = FIO_OPT_G_IO_BUF,
3942 },
3943 {
3944 .name = "buffer_compress_percentage",
3945 .lname = "Buffer compression percentage",
3946 .type = FIO_OPT_INT,
3947 .cb = str_buffer_compress_cb,
3948 .off1 = offsetof(struct thread_options, compress_percentage),
3949 .maxval = 100,
3950 .minval = 0,
3951 .help = "How compressible the buffer is (approximately)",
3952 .interval = 5,
3953 .category = FIO_OPT_C_IO,
3954 .group = FIO_OPT_G_IO_BUF,
3955 },
3956 {
3957 .name = "buffer_compress_chunk",
3958 .lname = "Buffer compression chunk size",
3959 .type = FIO_OPT_INT,
3960 .off1 = offsetof(struct thread_options, compress_chunk),
3961 .parent = "buffer_compress_percentage",
3962 .hide = 1,
3963 .help = "Size of compressible region in buffer",
3964 .interval = 256,
3965 .category = FIO_OPT_C_IO,
3966 .group = FIO_OPT_G_IO_BUF,
3967 },
3968 {
3969 .name = "dedupe_percentage",
3970 .lname = "Dedupe percentage",
3971 .type = FIO_OPT_INT,
3972 .cb = str_dedupe_cb,
3973 .off1 = offsetof(struct thread_options, dedupe_percentage),
3974 .maxval = 100,
3975 .minval = 0,
3976 .help = "Percentage of buffers that are dedupable",
3977 .interval = 1,
3978 .category = FIO_OPT_C_IO,
3979 .group = FIO_OPT_G_IO_BUF,
3980 },
3981 {
3982 .name = "clat_percentiles",
3983 .lname = "Completion latency percentiles",
3984 .type = FIO_OPT_BOOL,
3985 .off1 = offsetof(struct thread_options, clat_percentiles),
3986 .help = "Enable the reporting of completion latency percentiles",
3987 .def = "1",
3988 .category = FIO_OPT_C_STAT,
3989 .group = FIO_OPT_G_INVALID,
3990 },
3991 {
3992 .name = "percentile_list",
3993 .lname = "Percentile list",
3994 .type = FIO_OPT_FLOAT_LIST,
3995 .off1 = offsetof(struct thread_options, percentile_list),
3996 .off2 = offsetof(struct thread_options, percentile_precision),
3997 .help = "Specify a custom list of percentiles to report for "
3998 "completion latency and block errors",
3999 .def = "1:5:10:20:30:40:50:60:70:80:90:95:99:99.5:99.9:99.95:99.99",
4000 .maxlen = FIO_IO_U_LIST_MAX_LEN,
4001 .minfp = 0.0,
4002 .maxfp = 100.0,
4003 .category = FIO_OPT_C_STAT,
4004 .group = FIO_OPT_G_INVALID,
4005 },
4006
4007 #ifdef FIO_HAVE_DISK_UTIL
4008 {
4009 .name = "disk_util",
4010 .lname = "Disk utilization",
4011 .type = FIO_OPT_BOOL,
4012 .off1 = offsetof(struct thread_options, do_disk_util),
4013 .help = "Log disk utilization statistics",
4014 .def = "1",
4015 .category = FIO_OPT_C_STAT,
4016 .group = FIO_OPT_G_INVALID,
4017 },
4018 #else
4019 {
4020 .name = "disk_util",
4021 .lname = "Disk utilization",
4022 .type = FIO_OPT_UNSUPPORTED,
4023 .help = "Your platform does not support disk utilization",
4024 },
4025 #endif
4026 {
4027 .name = "gtod_reduce",
4028 .lname = "Reduce gettimeofday() calls",
4029 .type = FIO_OPT_BOOL,
4030 .help = "Greatly reduce number of gettimeofday() calls",
4031 .cb = str_gtod_reduce_cb,
4032 .def = "0",
4033 .hide_on_set = 1,
4034 .category = FIO_OPT_C_STAT,
4035 .group = FIO_OPT_G_INVALID,
4036 },
4037 {
4038 .name = "disable_lat",
4039 .lname = "Disable all latency stats",
4040 .type = FIO_OPT_BOOL,
4041 .off1 = offsetof(struct thread_options, disable_lat),
4042 .help = "Disable latency numbers",
4043 .parent = "gtod_reduce",
4044 .hide = 1,
4045 .def = "0",
4046 .category = FIO_OPT_C_STAT,
4047 .group = FIO_OPT_G_INVALID,
4048 },
4049 {
4050 .name = "disable_clat",
4051 .lname = "Disable completion latency stats",
4052 .type = FIO_OPT_BOOL,
4053 .off1 = offsetof(struct thread_options, disable_clat),
4054 .help = "Disable completion latency numbers",
4055 .parent = "gtod_reduce",
4056 .hide = 1,
4057 .def = "0",
4058 .category = FIO_OPT_C_STAT,
4059 .group = FIO_OPT_G_INVALID,
4060 },
4061 {
4062 .name = "disable_slat",
4063 .lname = "Disable submission latency stats",
4064 .type = FIO_OPT_BOOL,
4065 .off1 = offsetof(struct thread_options, disable_slat),
4066 .help = "Disable submission latency numbers",
4067 .parent = "gtod_reduce",
4068 .hide = 1,
4069 .def = "0",
4070 .category = FIO_OPT_C_STAT,
4071 .group = FIO_OPT_G_INVALID,
4072 },
4073 {
4074 .name = "disable_bw_measurement",
4075 .alias = "disable_bw",
4076 .lname = "Disable bandwidth stats",
4077 .type = FIO_OPT_BOOL,
4078 .off1 = offsetof(struct thread_options, disable_bw),
4079 .help = "Disable bandwidth logging",
4080 .parent = "gtod_reduce",
4081 .hide = 1,
4082 .def = "0",
4083 .category = FIO_OPT_C_STAT,
4084 .group = FIO_OPT_G_INVALID,
4085 },
4086 {
4087 .name = "gtod_cpu",
4088 .lname = "Dedicated gettimeofday() CPU",
4089 .type = FIO_OPT_INT,
4090 .off1 = offsetof(struct thread_options, gtod_cpu),
4091 .help = "Set up dedicated gettimeofday() thread on this CPU",
4092 .verify = gtod_cpu_verify,
4093 .category = FIO_OPT_C_GENERAL,
4094 .group = FIO_OPT_G_CLOCK,
4095 },
4096 {
4097 .name = "unified_rw_reporting",
4098 .lname = "Unified RW Reporting",
4099 .type = FIO_OPT_BOOL,
4100 .off1 = offsetof(struct thread_options, unified_rw_rep),
4101 .help = "Unify reporting across data direction",
4102 .def = "0",
4103 .category = FIO_OPT_C_GENERAL,
4104 .group = FIO_OPT_G_INVALID,
4105 },
4106 {
4107 .name = "continue_on_error",
4108 .lname = "Continue on error",
4109 .type = FIO_OPT_STR,
4110 .off1 = offsetof(struct thread_options, continue_on_error),
4111 .help = "Continue on non-fatal errors during IO",
4112 .def = "none",
4113 .category = FIO_OPT_C_GENERAL,
4114 .group = FIO_OPT_G_ERR,
4115 .posval = {
4116 { .ival = "none",
4117 .oval = ERROR_TYPE_NONE,
4118 .help = "Exit when an error is encountered",
4119 },
4120 { .ival = "read",
4121 .oval = ERROR_TYPE_READ,
4122 .help = "Continue on read errors only",
4123 },
4124 { .ival = "write",
4125 .oval = ERROR_TYPE_WRITE,
4126 .help = "Continue on write errors only",
4127 },
4128 { .ival = "io",
4129 .oval = ERROR_TYPE_READ | ERROR_TYPE_WRITE,
4130 .help = "Continue on any IO errors",
4131 },
4132 { .ival = "verify",
4133 .oval = ERROR_TYPE_VERIFY,
4134 .help = "Continue on verify errors only",
4135 },
4136 { .ival = "all",
4137 .oval = ERROR_TYPE_ANY,
4138 .help = "Continue on all io and verify errors",
4139 },
4140 { .ival = "0",
4141 .oval = ERROR_TYPE_NONE,
4142 .help = "Alias for 'none'",
4143 },
4144 { .ival = "1",
4145 .oval = ERROR_TYPE_ANY,
4146 .help = "Alias for 'all'",
4147 },
4148 },
4149 },
4150 {
4151 .name = "ignore_error",
4152 .lname = "Ignore Error",
4153 .type = FIO_OPT_STR,
4154 .cb = str_ignore_error_cb,
4155 .off1 = offsetof(struct thread_options, ignore_error_nr),
4156 .help = "Set a specific list of errors to ignore",
4157 .parent = "rw",
4158 .category = FIO_OPT_C_GENERAL,
4159 .group = FIO_OPT_G_ERR,
4160 },
4161 {
4162 .name = "error_dump",
4163 .lname = "Error Dump",
4164 .type = FIO_OPT_BOOL,
4165 .off1 = offsetof(struct thread_options, error_dump),
4166 .def = "0",
4167 .help = "Dump info on each error",
4168 .category = FIO_OPT_C_GENERAL,
4169 .group = FIO_OPT_G_ERR,
4170 },
4171 {
4172 .name = "profile",
4173 .lname = "Profile",
4174 .type = FIO_OPT_STR_STORE,
4175 .off1 = offsetof(struct thread_options, profile),
4176 .help = "Select a specific builtin performance test",
4177 .category = FIO_OPT_C_PROFILE,
4178 .group = FIO_OPT_G_INVALID,
4179 },
4180 {
4181 .name = "cgroup",
4182 .lname = "Cgroup",
4183 .type = FIO_OPT_STR_STORE,
4184 .off1 = offsetof(struct thread_options, cgroup),
4185 .help = "Add job to cgroup of this name",
4186 .category = FIO_OPT_C_GENERAL,
4187 .group = FIO_OPT_G_CGROUP,
4188 },
4189 {
4190 .name = "cgroup_nodelete",
4191 .lname = "Cgroup no-delete",
4192 .type = FIO_OPT_BOOL,
4193 .off1 = offsetof(struct thread_options, cgroup_nodelete),
4194 .help = "Do not delete cgroups after job completion",
4195 .def = "0",
4196 .parent = "cgroup",
4197 .category = FIO_OPT_C_GENERAL,
4198 .group = FIO_OPT_G_CGROUP,
4199 },
4200 {
4201 .name = "cgroup_weight",
4202 .lname = "Cgroup weight",
4203 .type = FIO_OPT_INT,
4204 .off1 = offsetof(struct thread_options, cgroup_weight),
4205 .help = "Use given weight for cgroup",
4206 .minval = 100,
4207 .maxval = 1000,
4208 .parent = "cgroup",
4209 .category = FIO_OPT_C_GENERAL,
4210 .group = FIO_OPT_G_CGROUP,
4211 },
4212 {
4213 .name = "uid",
4214 .lname = "User ID",
4215 .type = FIO_OPT_INT,
4216 .off1 = offsetof(struct thread_options, uid),
4217 .help = "Run job with this user ID",
4218 .category = FIO_OPT_C_GENERAL,
4219 .group = FIO_OPT_G_CRED,
4220 },
4221 {
4222 .name = "gid",
4223 .lname = "Group ID",
4224 .type = FIO_OPT_INT,
4225 .off1 = offsetof(struct thread_options, gid),
4226 .help = "Run job with this group ID",
4227 .category = FIO_OPT_C_GENERAL,
4228 .group = FIO_OPT_G_CRED,
4229 },
4230 {
4231 .name = "kb_base",
4232 .lname = "KB Base",
4233 .type = FIO_OPT_INT,
4234 .off1 = offsetof(struct thread_options, kb_base),
4235 .prio = 1,
4236 .def = "1024",
4237 .posval = {
4238 { .ival = "1024",
4239 .oval = 1024,
4240 .help = "Inputs invert IEC and SI prefixes (for compatibility); outputs prefer binary",
4241 },
4242 { .ival = "1000",
4243 .oval = 1000,
4244 .help = "Inputs use IEC and SI prefixes; outputs prefer SI",
4245 },
4246 },
4247 .help = "Unit prefix interpretation for quantities of data (IEC and SI)",
4248 .category = FIO_OPT_C_GENERAL,
4249 .group = FIO_OPT_G_INVALID,
4250 },
4251 {
4252 .name = "unit_base",
4253 .lname = "Unit for quantities of data (Bits or Bytes)",
4254 .type = FIO_OPT_INT,
4255 .off1 = offsetof(struct thread_options, unit_base),
4256 .prio = 1,
4257 .posval = {
4258 { .ival = "0",
4259 .oval = 0,
4260 .help = "Auto-detect",
4261 },
4262 { .ival = "8",
4263 .oval = 8,
4264 .help = "Normal (byte based)",
4265 },
4266 { .ival = "1",
4267 .oval = 1,
4268 .help = "Bit based",
4269 },
4270 },
4271 .help = "Bit multiple of result summary data (8 for byte, 1 for bit)",
4272 .category = FIO_OPT_C_GENERAL,
4273 .group = FIO_OPT_G_INVALID,
4274 },
4275 {
4276 .name = "hugepage-size",
4277 .lname = "Hugepage size",
4278 .type = FIO_OPT_INT,
4279 .off1 = offsetof(struct thread_options, hugepage_size),
4280 .help = "When using hugepages, specify size of each page",
4281 .def = __fio_stringify(FIO_HUGE_PAGE),
4282 .interval = 1024 * 1024,
4283 .category = FIO_OPT_C_GENERAL,
4284 .group = FIO_OPT_G_INVALID,
4285 },
4286 {
4287 .name = "flow_id",
4288 .lname = "I/O flow ID",
4289 .type = FIO_OPT_INT,
4290 .off1 = offsetof(struct thread_options, flow_id),
4291 .help = "The flow index ID to use",
4292 .def = "0",
4293 .category = FIO_OPT_C_IO,
4294 .group = FIO_OPT_G_IO_FLOW,
4295 },
4296 {
4297 .name = "flow",
4298 .lname = "I/O flow weight",
4299 .type = FIO_OPT_INT,
4300 .off1 = offsetof(struct thread_options, flow),
4301 .help = "Weight for flow control of this job",
4302 .parent = "flow_id",
4303 .hide = 1,
4304 .def = "0",
4305 .category = FIO_OPT_C_IO,
4306 .group = FIO_OPT_G_IO_FLOW,
4307 },
4308 {
4309 .name = "flow_watermark",
4310 .lname = "I/O flow watermark",
4311 .type = FIO_OPT_INT,
4312 .off1 = offsetof(struct thread_options, flow_watermark),
4313 .help = "High watermark for flow control. This option"
4314 " should be set to the same value for all threads"
4315 " with non-zero flow.",
4316 .parent = "flow_id",
4317 .hide = 1,
4318 .def = "1024",
4319 .category = FIO_OPT_C_IO,
4320 .group = FIO_OPT_G_IO_FLOW,
4321 },
4322 {
4323 .name = "flow_sleep",
4324 .lname = "I/O flow sleep",
4325 .type = FIO_OPT_INT,
4326 .off1 = offsetof(struct thread_options, flow_sleep),
4327 .help = "How many microseconds to sleep after being held"
4328 " back by the flow control mechanism",
4329 .parent = "flow_id",
4330 .hide = 1,
4331 .def = "0",
4332 .category = FIO_OPT_C_IO,
4333 .group = FIO_OPT_G_IO_FLOW,
4334 },
4335 {
4336 .name = "skip_bad",
4337 .lname = "Skip operations against bad blocks",
4338 .type = FIO_OPT_BOOL,
4339 .off1 = offsetof(struct thread_options, skip_bad),
4340 .help = "Skip operations against known bad blocks.",
4341 .hide = 1,
4342 .def = "0",
4343 .category = FIO_OPT_C_IO,
4344 .group = FIO_OPT_G_MTD,
4345 },
4346 {
4347 .name = "steadystate",
4348 .lname = "Steady state threshold",
4349 .alias = "ss",
4350 .type = FIO_OPT_STR,
4351 .off1 = offsetof(struct thread_options, ss_state),
4352 .cb = str_steadystate_cb,
4353 .help = "Define the criterion and limit to judge when a job has reached steady state",
4354 .def = "iops_slope:0.01%",
4355 .posval = {
4356 { .ival = "iops",
4357 .oval = FIO_SS_IOPS,
4358 .help = "maximum mean deviation of IOPS measurements",
4359 },
4360 { .ival = "iops_slope",
4361 .oval = FIO_SS_IOPS_SLOPE,
4362 .help = "slope calculated from IOPS measurements",
4363 },
4364 { .ival = "bw",
4365 .oval = FIO_SS_BW,
4366 .help = "maximum mean deviation of bandwidth measurements",
4367 },
4368 {
4369 .ival = "bw_slope",
4370 .oval = FIO_SS_BW_SLOPE,
4371 .help = "slope calculated from bandwidth measurements",
4372 },
4373 },
4374 .category = FIO_OPT_C_GENERAL,
4375 .group = FIO_OPT_G_RUNTIME,
4376 },
4377 {
4378 .name = "steadystate_duration",
4379 .lname = "Steady state duration",
4380 .alias = "ss_dur",
4381 .parent = "steadystate",
4382 .type = FIO_OPT_STR_VAL_TIME,
4383 .off1 = offsetof(struct thread_options, ss_dur),
4384 .help = "Stop workload upon attaining steady state for specified duration",
4385 .def = "0",
4386 .is_seconds = 1,
4387 .is_time = 1,
4388 .category = FIO_OPT_C_GENERAL,
4389 .group = FIO_OPT_G_RUNTIME,
4390 },
4391 {
4392 .name = "steadystate_ramp_time",
4393 .lname = "Steady state ramp time",
4394 .alias = "ss_ramp",
4395 .parent = "steadystate",
4396 .type = FIO_OPT_STR_VAL_TIME,
4397 .off1 = offsetof(struct thread_options, ss_ramp_time),
4398 .help = "Delay before initiation of data collection for steady state job termination testing",
4399 .def = "0",
4400 .is_seconds = 1,
4401 .is_time = 1,
4402 .category = FIO_OPT_C_GENERAL,
4403 .group = FIO_OPT_G_RUNTIME,
4404 },
4405 {
4406 .name = NULL,
4407 },
4408 };
4409
add_to_lopt(struct option * lopt,struct fio_option * o,const char * name,int val)4410 static void add_to_lopt(struct option *lopt, struct fio_option *o,
4411 const char *name, int val)
4412 {
4413 lopt->name = (char *) name;
4414 lopt->val = val;
4415 if (o->type == FIO_OPT_STR_SET)
4416 lopt->has_arg = optional_argument;
4417 else
4418 lopt->has_arg = required_argument;
4419 }
4420
options_to_lopts(struct fio_option * opts,struct option * long_options,int i,int option_type)4421 static void options_to_lopts(struct fio_option *opts,
4422 struct option *long_options,
4423 int i, int option_type)
4424 {
4425 struct fio_option *o = &opts[0];
4426 while (o->name) {
4427 add_to_lopt(&long_options[i], o, o->name, option_type);
4428 if (o->alias) {
4429 i++;
4430 add_to_lopt(&long_options[i], o, o->alias, option_type);
4431 }
4432
4433 i++;
4434 o++;
4435 assert(i < FIO_NR_OPTIONS);
4436 }
4437 }
4438
fio_options_set_ioengine_opts(struct option * long_options,struct thread_data * td)4439 void fio_options_set_ioengine_opts(struct option *long_options,
4440 struct thread_data *td)
4441 {
4442 unsigned int i;
4443
4444 i = 0;
4445 while (long_options[i].name) {
4446 if (long_options[i].val == FIO_GETOPT_IOENGINE) {
4447 memset(&long_options[i], 0, sizeof(*long_options));
4448 break;
4449 }
4450 i++;
4451 }
4452
4453 /*
4454 * Just clear out the prior ioengine options.
4455 */
4456 if (!td || !td->eo)
4457 return;
4458
4459 options_to_lopts(td->io_ops->options, long_options, i,
4460 FIO_GETOPT_IOENGINE);
4461 }
4462
fio_options_dup_and_init(struct option * long_options)4463 void fio_options_dup_and_init(struct option *long_options)
4464 {
4465 unsigned int i;
4466
4467 options_init(fio_options);
4468
4469 i = 0;
4470 while (long_options[i].name)
4471 i++;
4472
4473 options_to_lopts(fio_options, long_options, i, FIO_GETOPT_JOB);
4474 }
4475
4476 struct fio_keyword {
4477 const char *word;
4478 const char *desc;
4479 char *replace;
4480 };
4481
4482 static struct fio_keyword fio_keywords[] = {
4483 {
4484 .word = "$pagesize",
4485 .desc = "Page size in the system",
4486 },
4487 {
4488 .word = "$mb_memory",
4489 .desc = "Megabytes of memory online",
4490 },
4491 {
4492 .word = "$ncpus",
4493 .desc = "Number of CPUs online in the system",
4494 },
4495 {
4496 .word = NULL,
4497 },
4498 };
4499
fio_keywords_exit(void)4500 void fio_keywords_exit(void)
4501 {
4502 struct fio_keyword *kw;
4503
4504 kw = &fio_keywords[0];
4505 while (kw->word) {
4506 free(kw->replace);
4507 kw->replace = NULL;
4508 kw++;
4509 }
4510 }
4511
fio_keywords_init(void)4512 void fio_keywords_init(void)
4513 {
4514 unsigned long long mb_memory;
4515 char buf[128];
4516 long l;
4517
4518 sprintf(buf, "%lu", (unsigned long) page_size);
4519 fio_keywords[0].replace = strdup(buf);
4520
4521 mb_memory = os_phys_mem() / (1024 * 1024);
4522 sprintf(buf, "%llu", mb_memory);
4523 fio_keywords[1].replace = strdup(buf);
4524
4525 l = cpus_online();
4526 sprintf(buf, "%lu", l);
4527 fio_keywords[2].replace = strdup(buf);
4528 }
4529
4530 #define BC_APP "bc"
4531
bc_calc(char * str)4532 static char *bc_calc(char *str)
4533 {
4534 char buf[128], *tmp;
4535 FILE *f;
4536 int ret;
4537
4538 /*
4539 * No math, just return string
4540 */
4541 if ((!strchr(str, '+') && !strchr(str, '-') && !strchr(str, '*') &&
4542 !strchr(str, '/')) || strchr(str, '\''))
4543 return str;
4544
4545 /*
4546 * Split option from value, we only need to calculate the value
4547 */
4548 tmp = strchr(str, '=');
4549 if (!tmp)
4550 return str;
4551
4552 tmp++;
4553
4554 /*
4555 * Prevent buffer overflows; such a case isn't reasonable anyway
4556 */
4557 if (strlen(str) >= 128 || strlen(tmp) > 100)
4558 return str;
4559
4560 sprintf(buf, "which %s > /dev/null", BC_APP);
4561 if (system(buf)) {
4562 log_err("fio: bc is needed for performing math\n");
4563 return NULL;
4564 }
4565
4566 sprintf(buf, "echo '%s' | %s", tmp, BC_APP);
4567 f = popen(buf, "r");
4568 if (!f)
4569 return NULL;
4570
4571 ret = fread(&buf[tmp - str], 1, 128 - (tmp - str), f);
4572 if (ret <= 0) {
4573 pclose(f);
4574 return NULL;
4575 }
4576
4577 pclose(f);
4578 buf[(tmp - str) + ret - 1] = '\0';
4579 memcpy(buf, str, tmp - str);
4580 free(str);
4581 return strdup(buf);
4582 }
4583
4584 /*
4585 * Return a copy of the input string with substrings of the form ${VARNAME}
4586 * substituted with the value of the environment variable VARNAME. The
4587 * substitution always occurs, even if VARNAME is empty or the corresponding
4588 * environment variable undefined.
4589 */
option_dup_subs(const char * opt)4590 static char *option_dup_subs(const char *opt)
4591 {
4592 char out[OPT_LEN_MAX+1];
4593 char in[OPT_LEN_MAX+1];
4594 char *outptr = out;
4595 char *inptr = in;
4596 char *ch1, *ch2, *env;
4597 ssize_t nchr = OPT_LEN_MAX;
4598 size_t envlen;
4599
4600 if (strlen(opt) + 1 > OPT_LEN_MAX) {
4601 log_err("OPT_LEN_MAX (%d) is too small\n", OPT_LEN_MAX);
4602 return NULL;
4603 }
4604
4605 in[OPT_LEN_MAX] = '\0';
4606 strncpy(in, opt, OPT_LEN_MAX);
4607
4608 while (*inptr && nchr > 0) {
4609 if (inptr[0] == '$' && inptr[1] == '{') {
4610 ch2 = strchr(inptr, '}');
4611 if (ch2 && inptr+1 < ch2) {
4612 ch1 = inptr+2;
4613 inptr = ch2+1;
4614 *ch2 = '\0';
4615
4616 env = getenv(ch1);
4617 if (env) {
4618 envlen = strlen(env);
4619 if (envlen <= nchr) {
4620 memcpy(outptr, env, envlen);
4621 outptr += envlen;
4622 nchr -= envlen;
4623 }
4624 }
4625
4626 continue;
4627 }
4628 }
4629
4630 *outptr++ = *inptr++;
4631 --nchr;
4632 }
4633
4634 *outptr = '\0';
4635 return strdup(out);
4636 }
4637
4638 /*
4639 * Look for reserved variable names and replace them with real values
4640 */
fio_keyword_replace(char * opt)4641 static char *fio_keyword_replace(char *opt)
4642 {
4643 char *s;
4644 int i;
4645 int docalc = 0;
4646
4647 for (i = 0; fio_keywords[i].word != NULL; i++) {
4648 struct fio_keyword *kw = &fio_keywords[i];
4649
4650 while ((s = strstr(opt, kw->word)) != NULL) {
4651 char *new = malloc(strlen(opt) + 1);
4652 char *o_org = opt;
4653 int olen = s - opt;
4654 int len;
4655
4656 /*
4657 * Copy part of the string before the keyword and
4658 * sprintf() the replacement after it.
4659 */
4660 memcpy(new, opt, olen);
4661 len = sprintf(new + olen, "%s", kw->replace);
4662
4663 /*
4664 * If there's more in the original string, copy that
4665 * in too
4666 */
4667 opt += strlen(kw->word) + olen;
4668 if (strlen(opt))
4669 memcpy(new + olen + len, opt, opt - o_org - 1);
4670
4671 /*
4672 * replace opt and free the old opt
4673 */
4674 opt = new;
4675 free(o_org);
4676
4677 docalc = 1;
4678 }
4679 }
4680
4681 /*
4682 * Check for potential math and invoke bc, if possible
4683 */
4684 if (docalc)
4685 opt = bc_calc(opt);
4686
4687 return opt;
4688 }
4689
dup_and_sub_options(char ** opts,int num_opts)4690 static char **dup_and_sub_options(char **opts, int num_opts)
4691 {
4692 int i;
4693 char **opts_copy = malloc(num_opts * sizeof(*opts));
4694 for (i = 0; i < num_opts; i++) {
4695 opts_copy[i] = option_dup_subs(opts[i]);
4696 if (!opts_copy[i])
4697 continue;
4698 opts_copy[i] = fio_keyword_replace(opts_copy[i]);
4699 }
4700 return opts_copy;
4701 }
4702
show_closest_option(const char * opt)4703 static void show_closest_option(const char *opt)
4704 {
4705 int best_option, best_distance;
4706 int i, distance;
4707 char *name;
4708
4709 if (!strlen(opt))
4710 return;
4711
4712 name = strdup(opt);
4713 i = 0;
4714 while (name[i] != '\0' && name[i] != '=')
4715 i++;
4716 name[i] = '\0';
4717
4718 best_option = -1;
4719 best_distance = INT_MAX;
4720 i = 0;
4721 while (fio_options[i].name) {
4722 distance = string_distance(name, fio_options[i].name);
4723 if (distance < best_distance) {
4724 best_distance = distance;
4725 best_option = i;
4726 }
4727 i++;
4728 }
4729
4730 if (best_option != -1 && string_distance_ok(name, best_distance) &&
4731 fio_options[best_option].type != FIO_OPT_UNSUPPORTED)
4732 log_err("Did you mean %s?\n", fio_options[best_option].name);
4733
4734 free(name);
4735 }
4736
fio_options_parse(struct thread_data * td,char ** opts,int num_opts)4737 int fio_options_parse(struct thread_data *td, char **opts, int num_opts)
4738 {
4739 int i, ret, unknown;
4740 char **opts_copy;
4741
4742 sort_options(opts, fio_options, num_opts);
4743 opts_copy = dup_and_sub_options(opts, num_opts);
4744
4745 for (ret = 0, i = 0, unknown = 0; i < num_opts; i++) {
4746 struct fio_option *o;
4747 int newret = parse_option(opts_copy[i], opts[i], fio_options,
4748 &o, &td->o, &td->opt_list);
4749
4750 if (!newret && o)
4751 fio_option_mark_set(&td->o, o);
4752
4753 if (opts_copy[i]) {
4754 if (newret && !o) {
4755 unknown++;
4756 continue;
4757 }
4758 free(opts_copy[i]);
4759 opts_copy[i] = NULL;
4760 }
4761
4762 ret |= newret;
4763 }
4764
4765 if (unknown) {
4766 ret |= ioengine_load(td);
4767 if (td->eo) {
4768 sort_options(opts_copy, td->io_ops->options, num_opts);
4769 opts = opts_copy;
4770 }
4771 for (i = 0; i < num_opts; i++) {
4772 struct fio_option *o = NULL;
4773 int newret = 1;
4774
4775 if (!opts_copy[i])
4776 continue;
4777
4778 if (td->eo)
4779 newret = parse_option(opts_copy[i], opts[i],
4780 td->io_ops->options, &o,
4781 td->eo, &td->opt_list);
4782
4783 ret |= newret;
4784 if (!o) {
4785 log_err("Bad option <%s>\n", opts[i]);
4786 show_closest_option(opts[i]);
4787 }
4788 free(opts_copy[i]);
4789 opts_copy[i] = NULL;
4790 }
4791 }
4792
4793 free(opts_copy);
4794 return ret;
4795 }
4796
fio_cmd_option_parse(struct thread_data * td,const char * opt,char * val)4797 int fio_cmd_option_parse(struct thread_data *td, const char *opt, char *val)
4798 {
4799 int ret;
4800
4801 ret = parse_cmd_option(opt, val, fio_options, &td->o, &td->opt_list);
4802 if (!ret) {
4803 struct fio_option *o;
4804
4805 o = find_option(fio_options, opt);
4806 if (o)
4807 fio_option_mark_set(&td->o, o);
4808 }
4809
4810 return ret;
4811 }
4812
fio_cmd_ioengine_option_parse(struct thread_data * td,const char * opt,char * val)4813 int fio_cmd_ioengine_option_parse(struct thread_data *td, const char *opt,
4814 char *val)
4815 {
4816 return parse_cmd_option(opt, val, td->io_ops->options, td->eo,
4817 &td->opt_list);
4818 }
4819
fio_fill_default_options(struct thread_data * td)4820 void fio_fill_default_options(struct thread_data *td)
4821 {
4822 td->o.magic = OPT_MAGIC;
4823 fill_default_options(&td->o, fio_options);
4824 }
4825
fio_show_option_help(const char * opt)4826 int fio_show_option_help(const char *opt)
4827 {
4828 return show_cmd_help(fio_options, opt);
4829 }
4830
4831 /*
4832 * dupe FIO_OPT_STR_STORE options
4833 */
fio_options_mem_dupe(struct thread_data * td)4834 void fio_options_mem_dupe(struct thread_data *td)
4835 {
4836 options_mem_dupe(fio_options, &td->o);
4837
4838 if (td->eo && td->io_ops) {
4839 void *oldeo = td->eo;
4840
4841 td->eo = malloc(td->io_ops->option_struct_size);
4842 memcpy(td->eo, oldeo, td->io_ops->option_struct_size);
4843 options_mem_dupe(td->io_ops->options, td->eo);
4844 }
4845 }
4846
fio_get_kb_base(void * data)4847 unsigned int fio_get_kb_base(void *data)
4848 {
4849 struct thread_data *td = cb_data_to_td(data);
4850 struct thread_options *o = &td->o;
4851 unsigned int kb_base = 0;
4852
4853 /*
4854 * This is a hack... For private options, *data is not holding
4855 * a pointer to the thread_options, but to private data. This means
4856 * we can't safely dereference it, but magic is first so mem wise
4857 * it is valid. But this also means that if the job first sets
4858 * kb_base and expects that to be honored by private options,
4859 * it will be disappointed. We will return the global default
4860 * for this.
4861 */
4862 if (o && o->magic == OPT_MAGIC)
4863 kb_base = o->kb_base;
4864 if (!kb_base)
4865 kb_base = 1024;
4866
4867 return kb_base;
4868 }
4869
add_option(struct fio_option * o)4870 int add_option(struct fio_option *o)
4871 {
4872 struct fio_option *__o;
4873 int opt_index = 0;
4874
4875 __o = fio_options;
4876 while (__o->name) {
4877 opt_index++;
4878 __o++;
4879 }
4880
4881 if (opt_index + 1 == FIO_MAX_OPTS) {
4882 log_err("fio: FIO_MAX_OPTS is too small\n");
4883 return 1;
4884 }
4885
4886 memcpy(&fio_options[opt_index], o, sizeof(*o));
4887 fio_options[opt_index + 1].name = NULL;
4888 return 0;
4889 }
4890
invalidate_profile_options(const char * prof_name)4891 void invalidate_profile_options(const char *prof_name)
4892 {
4893 struct fio_option *o;
4894
4895 o = fio_options;
4896 while (o->name) {
4897 if (o->prof_name && !strcmp(o->prof_name, prof_name)) {
4898 o->type = FIO_OPT_INVALID;
4899 o->prof_name = NULL;
4900 }
4901 o++;
4902 }
4903 }
4904
add_opt_posval(const char * optname,const char * ival,const char * help)4905 void add_opt_posval(const char *optname, const char *ival, const char *help)
4906 {
4907 struct fio_option *o;
4908 unsigned int i;
4909
4910 o = find_option(fio_options, optname);
4911 if (!o)
4912 return;
4913
4914 for (i = 0; i < PARSE_MAX_VP; i++) {
4915 if (o->posval[i].ival)
4916 continue;
4917
4918 o->posval[i].ival = ival;
4919 o->posval[i].help = help;
4920 break;
4921 }
4922 }
4923
del_opt_posval(const char * optname,const char * ival)4924 void del_opt_posval(const char *optname, const char *ival)
4925 {
4926 struct fio_option *o;
4927 unsigned int i;
4928
4929 o = find_option(fio_options, optname);
4930 if (!o)
4931 return;
4932
4933 for (i = 0; i < PARSE_MAX_VP; i++) {
4934 if (!o->posval[i].ival)
4935 continue;
4936 if (strcmp(o->posval[i].ival, ival))
4937 continue;
4938
4939 o->posval[i].ival = NULL;
4940 o->posval[i].help = NULL;
4941 }
4942 }
4943
fio_options_free(struct thread_data * td)4944 void fio_options_free(struct thread_data *td)
4945 {
4946 options_free(fio_options, &td->o);
4947 if (td->eo && td->io_ops && td->io_ops->options) {
4948 options_free(td->io_ops->options, td->eo);
4949 free(td->eo);
4950 td->eo = NULL;
4951 }
4952 }
4953
fio_option_find(const char * name)4954 struct fio_option *fio_option_find(const char *name)
4955 {
4956 return find_option(fio_options, name);
4957 }
4958
find_next_opt(struct thread_options * o,struct fio_option * from,unsigned int off1)4959 static struct fio_option *find_next_opt(struct thread_options *o,
4960 struct fio_option *from,
4961 unsigned int off1)
4962 {
4963 struct fio_option *opt;
4964
4965 if (!from)
4966 from = &fio_options[0];
4967 else
4968 from++;
4969
4970 opt = NULL;
4971 do {
4972 if (off1 == from->off1) {
4973 opt = from;
4974 break;
4975 }
4976 from++;
4977 } while (from->name);
4978
4979 return opt;
4980 }
4981
opt_is_set(struct thread_options * o,struct fio_option * opt)4982 static int opt_is_set(struct thread_options *o, struct fio_option *opt)
4983 {
4984 unsigned int opt_off, index, offset;
4985
4986 opt_off = opt - &fio_options[0];
4987 index = opt_off / (8 * sizeof(uint64_t));
4988 offset = opt_off & ((8 * sizeof(uint64_t)) - 1);
4989 return (o->set_options[index] & ((uint64_t)1 << offset)) != 0;
4990 }
4991
__fio_option_is_set(struct thread_options * o,unsigned int off1)4992 bool __fio_option_is_set(struct thread_options *o, unsigned int off1)
4993 {
4994 struct fio_option *opt, *next;
4995
4996 next = NULL;
4997 while ((opt = find_next_opt(o, next, off1)) != NULL) {
4998 if (opt_is_set(o, opt))
4999 return true;
5000
5001 next = opt;
5002 }
5003
5004 return false;
5005 }
5006
fio_option_mark_set(struct thread_options * o,struct fio_option * opt)5007 void fio_option_mark_set(struct thread_options *o, struct fio_option *opt)
5008 {
5009 unsigned int opt_off, index, offset;
5010
5011 opt_off = opt - &fio_options[0];
5012 index = opt_off / (8 * sizeof(uint64_t));
5013 offset = opt_off & ((8 * sizeof(uint64_t)) - 1);
5014 o->set_options[index] |= (uint64_t)1 << offset;
5015 }
5016