• Home
  • Raw
  • Download

Lines Matching refs:td

139 static bool __check_min_rate(struct thread_data *td, struct timeval *now,  in __check_min_rate()  argument
152 if (!td->o.ratemin[ddir] && !td->o.rate_iops_min[ddir]) in __check_min_rate()
158 if (mtime_since(&td->start, now) < 2000) in __check_min_rate()
161 iops += td->this_io_blocks[ddir]; in __check_min_rate()
162 bytes += td->this_io_bytes[ddir]; in __check_min_rate()
163 ratemin += td->o.ratemin[ddir]; in __check_min_rate()
164 rate_iops += td->o.rate_iops[ddir]; in __check_min_rate()
165 rate_iops_min += td->o.rate_iops_min[ddir]; in __check_min_rate()
170 if (td->rate_bytes[ddir] || td->rate_blocks[ddir]) { in __check_min_rate()
171 spent = mtime_since(&td->lastrate[ddir], now); in __check_min_rate()
172 if (spent < td->o.ratecycle) in __check_min_rate()
175 if (td->o.rate[ddir] || td->o.ratemin[ddir]) { in __check_min_rate()
179 if (bytes < td->rate_bytes[ddir]) { in __check_min_rate()
181 td->o.name, ratemin, bytes); in __check_min_rate()
185 rate = ((bytes - td->rate_bytes[ddir]) * 1000) / spent; in __check_min_rate()
190 bytes < td->rate_bytes[ddir]) { in __check_min_rate()
192 td->o.name, ratemin, rate); in __check_min_rate()
202 td->o.name, rate_iops, iops); in __check_min_rate()
206 rate = ((iops - td->rate_blocks[ddir]) * 1000) / spent; in __check_min_rate()
211 iops < td->rate_blocks[ddir]) { in __check_min_rate()
213 td->o.name, rate_iops_min, rate); in __check_min_rate()
220 td->rate_bytes[ddir] = bytes; in __check_min_rate()
221 td->rate_blocks[ddir] = iops; in __check_min_rate()
222 memcpy(&td->lastrate[ddir], now, sizeof(*now)); in __check_min_rate()
226 static bool check_min_rate(struct thread_data *td, struct timeval *now) in check_min_rate() argument
230 if (td->bytes_done[DDIR_READ]) in check_min_rate()
231 ret |= __check_min_rate(td, now, DDIR_READ); in check_min_rate()
232 if (td->bytes_done[DDIR_WRITE]) in check_min_rate()
233 ret |= __check_min_rate(td, now, DDIR_WRITE); in check_min_rate()
234 if (td->bytes_done[DDIR_TRIM]) in check_min_rate()
235 ret |= __check_min_rate(td, now, DDIR_TRIM); in check_min_rate()
244 static void cleanup_pending_aio(struct thread_data *td) in cleanup_pending_aio() argument
251 r = io_u_queued_complete(td, 0); in cleanup_pending_aio()
258 if (td->io_ops->cancel) { in cleanup_pending_aio()
262 io_u_qiter(&td->io_u_all, io_u, i) { in cleanup_pending_aio()
264 r = td->io_ops->cancel(td, io_u); in cleanup_pending_aio()
266 put_io_u(td, io_u); in cleanup_pending_aio()
271 if (td->cur_depth) in cleanup_pending_aio()
272 r = io_u_queued_complete(td, td->cur_depth); in cleanup_pending_aio()
279 static bool fio_io_sync(struct thread_data *td, struct fio_file *f) in fio_io_sync() argument
281 struct io_u *io_u = __get_io_u(td); in fio_io_sync()
290 if (td_io_prep(td, io_u)) { in fio_io_sync()
291 put_io_u(td, io_u); in fio_io_sync()
296 ret = td_io_queue(td, io_u); in fio_io_sync()
298 td_verror(td, io_u->error, "td_io_queue"); in fio_io_sync()
299 put_io_u(td, io_u); in fio_io_sync()
302 if (td_io_commit(td)) in fio_io_sync()
304 if (io_u_queued_complete(td, 1) < 0) in fio_io_sync()
308 td_verror(td, io_u->error, "td_io_queue"); in fio_io_sync()
312 if (io_u_sync_complete(td, io_u) < 0) in fio_io_sync()
315 if (td_io_commit(td)) in fio_io_sync()
323 static int fio_file_fsync(struct thread_data *td, struct fio_file *f) in fio_file_fsync() argument
328 return fio_io_sync(td, f); in fio_file_fsync()
330 if (td_io_open_file(td, f)) in fio_file_fsync()
333 ret = fio_io_sync(td, f); in fio_file_fsync()
334 td_io_close_file(td, f); in fio_file_fsync()
338 static inline void __update_tv_cache(struct thread_data *td) in __update_tv_cache() argument
340 fio_gettime(&td->tv_cache, NULL); in __update_tv_cache()
343 static inline void update_tv_cache(struct thread_data *td) in update_tv_cache() argument
345 if ((++td->tv_cache_nr & td->tv_cache_mask) == td->tv_cache_mask) in update_tv_cache()
346 __update_tv_cache(td); in update_tv_cache()
349 static inline bool runtime_exceeded(struct thread_data *td, struct timeval *t) in runtime_exceeded() argument
351 if (in_ramp_time(td)) in runtime_exceeded()
353 if (!td->o.timeout) in runtime_exceeded()
355 if (utime_since(&td->epoch, t) >= td->o.timeout) in runtime_exceeded()
366 static inline void update_runtime(struct thread_data *td, in update_runtime() argument
370 if (ddir == DDIR_WRITE && td_write(td) && td->o.verify_only) in update_runtime()
373 td->ts.runtime[ddir] -= (elapsed_us[ddir] + 999) / 1000; in update_runtime()
374 elapsed_us[ddir] += utime_since_now(&td->start); in update_runtime()
375 td->ts.runtime[ddir] += (elapsed_us[ddir] + 999) / 1000; in update_runtime()
378 static bool break_on_this_error(struct thread_data *td, enum fio_ddir ddir, in break_on_this_error() argument
383 if (ret < 0 || td->error) { in break_on_this_error()
384 int err = td->error; in break_on_this_error()
391 if (!(td->o.continue_on_error & (1 << eb))) in break_on_this_error()
394 if (td_non_fatal_error(td, eb, err)) { in break_on_this_error()
399 update_error_count(td, err); in break_on_this_error()
400 td_clear_error(td); in break_on_this_error()
403 } else if (td->o.fill_device && err == ENOSPC) { in break_on_this_error()
408 td_clear_error(td); in break_on_this_error()
409 fio_mark_td_terminate(td); in break_on_this_error()
416 update_error_count(td, err); in break_on_this_error()
424 static void check_update_rusage(struct thread_data *td) in check_update_rusage() argument
426 if (td->update_rusage) { in check_update_rusage()
427 td->update_rusage = 0; in check_update_rusage()
428 update_rusage_stat(td); in check_update_rusage()
429 fio_mutex_up(td->rusage_sem); in check_update_rusage()
433 static int wait_for_completions(struct thread_data *td, struct timeval *time) in wait_for_completions() argument
435 const int full = queue_full(td); in wait_for_completions()
439 if (td->flags & TD_F_REGROW_LOGS) in wait_for_completions()
440 return io_u_quiesce(td); in wait_for_completions()
445 min_evts = min(td->o.iodepth_batch_complete_min, td->cur_depth); in wait_for_completions()
446 if ((full && !min_evts) || !td->o.iodepth_batch_complete_min) in wait_for_completions()
449 if (time && (__should_check_rate(td, DDIR_READ) || in wait_for_completions()
450 __should_check_rate(td, DDIR_WRITE) || in wait_for_completions()
451 __should_check_rate(td, DDIR_TRIM))) in wait_for_completions()
455 ret = io_u_queued_complete(td, min_evts); in wait_for_completions()
458 } while (full && (td->cur_depth > td->o.iodepth_low)); in wait_for_completions()
463 int io_queue_event(struct thread_data *td, struct io_u *io_u, int *ret, in io_queue_event() argument
473 clear_io_u(td, io_u); in io_queue_event()
482 trim_io_piece(td, io_u); in io_queue_event()
489 unlog_io_piece(td, io_u); in io_queue_event()
490 td_verror(td, EIO, "full resid"); in io_queue_event()
491 put_io_u(td, io_u); in io_queue_event()
500 td->ts.short_io_u[io_u->ddir]++; in io_queue_event()
506 requeue_io_u(td, &io_u); in io_queue_event()
509 if (comp_time && (__should_check_rate(td, DDIR_READ) || in io_queue_event()
510 __should_check_rate(td, DDIR_WRITE) || in io_queue_event()
511 __should_check_rate(td, DDIR_TRIM))) in io_queue_event()
514 *ret = io_u_sync_complete(td, io_u); in io_queue_event()
519 if (td->flags & TD_F_REGROW_LOGS) in io_queue_event()
520 regrow_logs(td); in io_queue_event()
536 if (td->io_ops->commit == NULL) in io_queue_event()
537 io_u_queued(td, io_u); in io_queue_event()
543 unlog_io_piece(td, io_u); in io_queue_event()
544 requeue_io_u(td, &io_u); in io_queue_event()
545 ret2 = td_io_commit(td); in io_queue_event()
551 td_verror(td, -(*ret), "td_io_queue"); in io_queue_event()
555 if (break_on_this_error(td, ddir, ret)) in io_queue_event()
561 static inline bool io_in_polling(struct thread_data *td) in io_in_polling() argument
563 return !td->o.iodepth_batch_complete_min && in io_in_polling()
564 !td->o.iodepth_batch_complete_max; in io_in_polling()
569 static int unlink_all_files(struct thread_data *td) in unlink_all_files() argument
575 for_each_file(td, f, i) { in unlink_all_files()
578 ret = td_io_unlink_file(td, f); in unlink_all_files()
584 td_verror(td, ret, "unlink_all_files"); in unlink_all_files()
593 static void do_verify(struct thread_data *td, uint64_t verify_bytes) in do_verify() argument
606 for_each_file(td, f, i) { in do_verify()
609 if (fio_io_sync(td, f)) in do_verify()
611 if (file_invalidate_cache(td, f)) in do_verify()
615 check_update_rusage(td); in do_verify()
617 if (td->error) in do_verify()
626 if (!td->o.rand_repeatable) in do_verify()
627 td_fill_verify_state_seed(td); in do_verify()
629 td_set_runstate(td, TD_VERIFYING); in do_verify()
632 while (!td->terminate) { in do_verify()
636 update_tv_cache(td); in do_verify()
637 check_update_rusage(td); in do_verify()
639 if (runtime_exceeded(td, &td->tv_cache)) { in do_verify()
640 __update_tv_cache(td); in do_verify()
641 if (runtime_exceeded(td, &td->tv_cache)) { in do_verify()
642 fio_mark_td_terminate(td); in do_verify()
647 if (flow_threshold_exceeded(td)) in do_verify()
650 if (!td->o.experimental_verify) { in do_verify()
651 io_u = __get_io_u(td); in do_verify()
655 if (get_next_verify(td, io_u)) { in do_verify()
656 put_io_u(td, io_u); in do_verify()
660 if (td_io_prep(td, io_u)) { in do_verify()
661 put_io_u(td, io_u); in do_verify()
665 if (ddir_rw_sum(td->bytes_done) + td->o.rw_min_bs > verify_bytes) in do_verify()
668 while ((io_u = get_io_u(td)) != NULL) { in do_verify()
685 td->io_issues[DDIR_READ]++; in do_verify()
686 put_io_u(td, io_u); in do_verify()
690 io_u_set(td, io_u, IO_U_F_TRIMMED); in do_verify()
696 put_io_u(td, io_u); in do_verify()
705 if (verify_state_should_stop(td, io_u)) { in do_verify()
706 put_io_u(td, io_u); in do_verify()
710 if (td->o.verify_async) in do_verify()
716 if (!td->o.disable_slat) in do_verify()
719 ret = td_io_queue(td, io_u); in do_verify()
721 if (io_queue_event(td, io_u, &ret, ddir, NULL, 1, NULL)) in do_verify()
730 full = queue_full(td) || (ret == FIO_Q_BUSY && td->cur_depth); in do_verify()
731 if (full || io_in_polling(td)) in do_verify()
732 ret = wait_for_completions(td, NULL); in do_verify()
738 check_update_rusage(td); in do_verify()
740 if (!td->error) { in do_verify()
741 min_events = td->cur_depth; in do_verify()
744 ret = io_u_queued_complete(td, min_events); in do_verify()
746 cleanup_pending_aio(td); in do_verify()
748 td_set_runstate(td, TD_RUNNING); in do_verify()
753 static bool exceeds_number_ios(struct thread_data *td) in exceeds_number_ios() argument
757 if (!td->o.number_ios) in exceeds_number_ios()
760 number_ios = ddir_rw_sum(td->io_blocks); in exceeds_number_ios()
761 number_ios += td->io_u_queued + td->io_u_in_flight; in exceeds_number_ios()
763 return number_ios >= (td->o.number_ios * td->loops); in exceeds_number_ios()
766 static bool io_bytes_exceeded(struct thread_data *td, uint64_t *this_bytes) in io_bytes_exceeded() argument
770 if (td_rw(td)) in io_bytes_exceeded()
772 else if (td_write(td)) in io_bytes_exceeded()
774 else if (td_read(td)) in io_bytes_exceeded()
779 if (td->o.io_size) in io_bytes_exceeded()
780 limit = td->o.io_size; in io_bytes_exceeded()
782 limit = td->o.size; in io_bytes_exceeded()
784 limit *= td->loops; in io_bytes_exceeded()
785 return bytes >= limit || exceeds_number_ios(td); in io_bytes_exceeded()
788 static bool io_issue_bytes_exceeded(struct thread_data *td) in io_issue_bytes_exceeded() argument
790 return io_bytes_exceeded(td, td->io_issue_bytes); in io_issue_bytes_exceeded()
793 static bool io_complete_bytes_exceeded(struct thread_data *td) in io_complete_bytes_exceeded() argument
795 return io_bytes_exceeded(td, td->this_io_bytes); in io_complete_bytes_exceeded()
802 static long long usec_for_io(struct thread_data *td, enum fio_ddir ddir) in usec_for_io() argument
806 assert(!(td->flags & TD_F_CHILD)); in usec_for_io()
807 bytes = td->rate_io_issue_bytes[ddir]; in usec_for_io()
808 bps = td->rate_bps[ddir]; in usec_for_io()
810 if (td->o.rate_process == RATE_PROCESS_POISSON) { in usec_for_io()
812 iops = bps / td->o.bs[ddir]; in usec_for_io()
814 -logf(__rand_0_1(&td->poisson_state[ddir])); in usec_for_io()
820 td->last_usec[ddir] += val; in usec_for_io()
821 return td->last_usec[ddir]; in usec_for_io()
837 static void do_io(struct thread_data *td, uint64_t *bytes_done) in do_io() argument
844 bytes_done[i] = td->bytes_done[i]; in do_io()
846 if (in_ramp_time(td)) in do_io()
847 td_set_runstate(td, TD_RAMP); in do_io()
849 td_set_runstate(td, TD_RUNNING); in do_io()
851 lat_target_init(td); in do_io()
853 total_bytes = td->o.size; in do_io()
858 if (td_write(td) && td_random(td) && td->o.norandommap) in do_io()
859 total_bytes = max(total_bytes, (uint64_t) td->o.io_size); in do_io()
865 if (td->o.verify != VERIFY_NONE && in do_io()
866 (td_write(td) && td->o.verify_backlog)) in do_io()
867 total_bytes += td->o.size; in do_io()
871 if (td_trimwrite(td)) in do_io()
872 total_bytes += td->total_io_size; in do_io()
874 while ((td->o.read_iolog_file && !flist_empty(&td->io_log_list)) || in do_io()
875 (!flist_empty(&td->trim_list)) || !io_issue_bytes_exceeded(td) || in do_io()
876 td->o.time_based) { in do_io()
882 check_update_rusage(td); in do_io()
884 if (td->terminate || td->done) in do_io()
887 update_tv_cache(td); in do_io()
889 if (runtime_exceeded(td, &td->tv_cache)) { in do_io()
890 __update_tv_cache(td); in do_io()
891 if (runtime_exceeded(td, &td->tv_cache)) { in do_io()
892 fio_mark_td_terminate(td); in do_io()
897 if (flow_threshold_exceeded(td)) in do_io()
906 (!td->o.time_based || in do_io()
907 (td->o.time_based && td->o.verify != VERIFY_NONE))) in do_io()
910 io_u = get_io_u(td); in do_io()
919 if (td->o.latency_target) in do_io()
931 if (td->o.verify != VERIFY_NONE && io_u->ddir == DDIR_READ && in do_io()
932 ((io_u->flags & IO_U_F_VER_LIST) || !td_rw(td))) { in do_io()
934 if (!td->o.verify_pattern_bytes) { in do_io()
935 io_u->rand_seed = __rand(&td->verify_state); in do_io()
937 io_u->rand_seed *= __rand(&td->verify_state); in do_io()
940 if (verify_state_should_stop(td, io_u)) { in do_io()
941 put_io_u(td, io_u); in do_io()
945 if (td->o.verify_async) in do_io()
949 td_set_runstate(td, TD_VERIFYING); in do_io()
950 } else if (in_ramp_time(td)) in do_io()
951 td_set_runstate(td, TD_RAMP); in do_io()
953 td_set_runstate(td, TD_RUNNING); in do_io()
960 if (td_write(td) && io_u->ddir == DDIR_WRITE && in do_io()
961 td->o.do_verify && in do_io()
962 td->o.verify != VERIFY_NONE && in do_io()
963 !td->o.experimental_verify) in do_io()
964 log_io_piece(td, io_u); in do_io()
966 if (td->o.io_submit_mode == IO_MODE_OFFLOAD) { in do_io()
970 if (td->error) in do_io()
973 workqueue_enqueue(&td->io_wq, &io_u->work); in do_io()
977 td->io_issues[ddir]++; in do_io()
978 td->io_issue_bytes[ddir] += blen; in do_io()
979 td->rate_io_issue_bytes[ddir] += blen; in do_io()
982 if (should_check_rate(td)) in do_io()
983 td->rate_next_io_time[ddir] = usec_for_io(td, ddir); in do_io()
986 ret = td_io_queue(td, io_u); in do_io()
988 if (should_check_rate(td)) in do_io()
989 td->rate_next_io_time[ddir] = usec_for_io(td, ddir); in do_io()
991 if (io_queue_event(td, io_u, &ret, ddir, &bytes_issued, 0, &comp_time)) in do_io()
1000 full = queue_full(td) || in do_io()
1001 (ret == FIO_Q_BUSY && td->cur_depth); in do_io()
1002 if (full || io_in_polling(td)) in do_io()
1003 ret = wait_for_completions(td, &comp_time); in do_io()
1007 if (!ddir_rw_sum(td->bytes_done) && in do_io()
1008 !td_ioengine_flagged(td, FIO_NOIO)) in do_io()
1011 if (!in_ramp_time(td) && should_check_rate(td)) { in do_io()
1012 if (check_min_rate(td, &comp_time)) { in do_io()
1013 if (exitall_on_terminate || td->o.exitall_error) in do_io()
1014 fio_terminate_threads(td->groupid); in do_io()
1015 td_verror(td, EIO, "check_min_rate"); in do_io()
1019 if (!in_ramp_time(td) && td->o.latency_target) in do_io()
1020 lat_target_check(td); in do_io()
1022 if (td->o.thinktime) { in do_io()
1025 b = ddir_rw_sum(td->io_blocks); in do_io()
1026 if (!(b % td->o.thinktime_blocks)) { in do_io()
1029 io_u_quiesce(td); in do_io()
1031 if (td->o.thinktime_spin) in do_io()
1032 usec_spin(td->o.thinktime_spin); in do_io()
1034 left = td->o.thinktime - td->o.thinktime_spin; in do_io()
1036 usec_sleep(td, left); in do_io()
1041 check_update_rusage(td); in do_io()
1043 if (td->trim_entries) in do_io()
1044 log_err("fio: %lu trim entries leaked?\n", td->trim_entries); in do_io()
1046 if (td->o.fill_device && td->error == ENOSPC) { in do_io()
1047 td->error = 0; in do_io()
1048 fio_mark_td_terminate(td); in do_io()
1050 if (!td->error) { in do_io()
1053 if (td->o.io_submit_mode == IO_MODE_OFFLOAD) { in do_io()
1054 workqueue_flush(&td->io_wq); in do_io()
1057 i = td->cur_depth; in do_io()
1060 ret = io_u_queued_complete(td, i); in do_io()
1061 if (td->o.fill_device && td->error == ENOSPC) in do_io()
1062 td->error = 0; in do_io()
1065 if (should_fsync(td) && td->o.end_fsync) { in do_io()
1066 td_set_runstate(td, TD_FSYNCING); in do_io()
1068 for_each_file(td, f, i) { in do_io()
1069 if (!fio_file_fsync(td, f)) in do_io()
1077 cleanup_pending_aio(td); in do_io()
1082 if (!ddir_rw_sum(td->this_io_bytes)) in do_io()
1083 td->done = 1; in do_io()
1086 bytes_done[i] = td->bytes_done[i] - bytes_done[i]; in do_io()
1089 static void free_file_completion_logging(struct thread_data *td) in free_file_completion_logging() argument
1094 for_each_file(td, f, i) { in free_file_completion_logging()
1101 static int init_file_completion_logging(struct thread_data *td, in init_file_completion_logging() argument
1107 if (td->o.verify == VERIFY_NONE || !td->o.verify_state_save) in init_file_completion_logging()
1110 for_each_file(td, f, i) { in init_file_completion_logging()
1119 free_file_completion_logging(td); in init_file_completion_logging()
1124 static void cleanup_io_u(struct thread_data *td) in cleanup_io_u() argument
1128 while ((io_u = io_u_qpop(&td->io_u_freelist)) != NULL) { in cleanup_io_u()
1130 if (td->io_ops->io_u_free) in cleanup_io_u()
1131 td->io_ops->io_u_free(td, io_u); in cleanup_io_u()
1136 free_io_mem(td); in cleanup_io_u()
1138 io_u_rexit(&td->io_u_requeues); in cleanup_io_u()
1139 io_u_qexit(&td->io_u_freelist); in cleanup_io_u()
1140 io_u_qexit(&td->io_u_all); in cleanup_io_u()
1142 free_file_completion_logging(td); in cleanup_io_u()
1145 static int init_io_u(struct thread_data *td) in init_io_u() argument
1153 max_units = td->o.iodepth; in init_io_u()
1154 max_bs = td_max_bs(td); in init_io_u()
1155 min_write = td->o.min_bs[DDIR_WRITE]; in init_io_u()
1156 td->orig_buffer_size = (unsigned long long) max_bs in init_io_u()
1159 if (td_ioengine_flagged(td, FIO_NOIO) || !(td_read(td) || td_write(td))) in init_io_u()
1163 err += io_u_rinit(&td->io_u_requeues, td->o.iodepth); in init_io_u()
1164 err += io_u_qinit(&td->io_u_freelist, td->o.iodepth); in init_io_u()
1165 err += io_u_qinit(&td->io_u_all, td->o.iodepth); in init_io_u()
1178 if (td->o.odirect || td->o.mem_align || td->o.oatomic || in init_io_u()
1179 td_ioengine_flagged(td, FIO_RAWIO)) in init_io_u()
1180 td->orig_buffer_size += page_mask + td->o.mem_align; in init_io_u()
1182 if (td->o.mem_type == MEM_SHMHUGE || td->o.mem_type == MEM_MMAPHUGE) { in init_io_u()
1185 bs = td->orig_buffer_size + td->o.hugepage_size - 1; in init_io_u()
1186 td->orig_buffer_size = bs & ~(td->o.hugepage_size - 1); in init_io_u()
1189 if (td->orig_buffer_size != (size_t) td->orig_buffer_size) { in init_io_u()
1194 if (data_xfer && allocate_io_mem(td)) in init_io_u()
1197 if (td->o.odirect || td->o.mem_align || td->o.oatomic || in init_io_u()
1198 td_ioengine_flagged(td, FIO_RAWIO)) in init_io_u()
1199 p = PTR_ALIGN(td->orig_buffer, page_mask) + td->o.mem_align; in init_io_u()
1201 p = td->orig_buffer; in init_io_u()
1208 if (td->terminate) in init_io_u()
1226 if (td_write(td)) in init_io_u()
1227 io_u_fill_buffer(td, io_u, min_write, max_bs); in init_io_u()
1228 if (td_write(td) && td->o.verify_pattern_bytes) { in init_io_u()
1233 fill_verify_pattern(td, io_u->buf, max_bs, io_u, 0, 0); in init_io_u()
1239 io_u_qpush(&td->io_u_freelist, io_u); in init_io_u()
1245 io_u_qpush(&td->io_u_all, io_u); in init_io_u()
1247 if (td->io_ops->io_u_init) { in init_io_u()
1248 int ret = td->io_ops->io_u_init(td, io_u); in init_io_u()
1259 if (init_file_completion_logging(td, max_units)) in init_io_u()
1269 static int switch_ioscheduler(struct thread_data *td) in switch_ioscheduler() argument
1276 if (td_ioengine_flagged(td, FIO_DISKLESSIO)) in switch_ioscheduler()
1279 assert(td->files && td->files[0]); in switch_ioscheduler()
1280 sprintf(tmp, "%s/queue/scheduler", td->files[0]->du->sysfs_root); in switch_ioscheduler()
1289 td_verror(td, errno, "fopen iosched"); in switch_ioscheduler()
1296 ret = fwrite(td->o.ioscheduler, strlen(td->o.ioscheduler), 1, f); in switch_ioscheduler()
1298 td_verror(td, errno, "fwrite"); in switch_ioscheduler()
1311 td_verror(td, errno, "fread"); in switch_ioscheduler()
1329 sprintf(tmp2, "[%s]", td->o.ioscheduler); in switch_ioscheduler()
1331 log_err("fio: io scheduler %s not found\n", td->o.ioscheduler); in switch_ioscheduler()
1332 td_verror(td, EINVAL, "iosched_switch"); in switch_ioscheduler()
1344 static bool keep_running(struct thread_data *td) in keep_running() argument
1348 if (td->done) in keep_running()
1350 if (td->o.time_based) in keep_running()
1352 if (td->o.loops) { in keep_running()
1353 td->o.loops--; in keep_running()
1356 if (exceeds_number_ios(td)) in keep_running()
1359 if (td->o.io_size) in keep_running()
1360 limit = td->o.io_size; in keep_running()
1362 limit = td->o.size; in keep_running()
1364 if (limit != -1ULL && ddir_rw_sum(td->io_bytes) < limit) { in keep_running()
1371 diff = limit - ddir_rw_sum(td->io_bytes); in keep_running()
1372 if (diff < td_max_bs(td)) in keep_running()
1375 if (fio_files_done(td) && !td->o.io_size) in keep_running()
1405 static uint64_t do_dry_run(struct thread_data *td) in do_dry_run() argument
1407 td_set_runstate(td, TD_RUNNING); in do_dry_run()
1409 while ((td->o.read_iolog_file && !flist_empty(&td->io_log_list)) || in do_dry_run()
1410 (!flist_empty(&td->trim_list)) || !io_complete_bytes_exceeded(td)) { in do_dry_run()
1414 if (td->terminate || td->done) in do_dry_run()
1417 io_u = get_io_u(td); in do_dry_run()
1421 io_u_set(td, io_u, IO_U_F_FLIGHT); in do_dry_run()
1425 td->io_issues[acct_ddir(io_u)]++; in do_dry_run()
1427 io_u_mark_depth(td, 1); in do_dry_run()
1428 td->ts.total_io_u[io_u->ddir]++; in do_dry_run()
1431 if (td_write(td) && io_u->ddir == DDIR_WRITE && in do_dry_run()
1432 td->o.do_verify && in do_dry_run()
1433 td->o.verify != VERIFY_NONE && in do_dry_run()
1434 !td->o.experimental_verify) in do_dry_run()
1435 log_io_piece(td, io_u); in do_dry_run()
1437 ret = io_u_sync_complete(td, io_u); in do_dry_run()
1441 return td->bytes_done[DDIR_WRITE] + td->bytes_done[DDIR_TRIM]; in do_dry_run()
1445 struct thread_data *td; member
1457 struct thread_data *td = fd->td; in thread_main() local
1458 struct thread_options *o = &td->o; in thread_main()
1470 td->pid = getpid(); in thread_main()
1472 td->pid = gettid(); in thread_main()
1476 dprint(FD_PROCESS, "jobs pid=%d started\n", (int) td->pid); in thread_main()
1479 fio_server_send_start(td); in thread_main()
1481 INIT_FLIST_HEAD(&td->io_log_list); in thread_main()
1482 INIT_FLIST_HEAD(&td->io_hist_list); in thread_main()
1483 INIT_FLIST_HEAD(&td->verify_list); in thread_main()
1484 INIT_FLIST_HEAD(&td->trim_list); in thread_main()
1485 INIT_FLIST_HEAD(&td->next_rand_list); in thread_main()
1486 td->io_hist_tree = RB_ROOT; in thread_main()
1488 ret = mutex_cond_init_pshared(&td->io_u_lock, &td->free_cond); in thread_main()
1490 td_verror(td, ret, "mutex_cond_init_pshared"); in thread_main()
1493 ret = cond_init_pshared(&td->verify_cond); in thread_main()
1495 td_verror(td, ret, "mutex_cond_pshared"); in thread_main()
1499 td_set_runstate(td, TD_INITIALIZED); in thread_main()
1503 fio_mutex_down(td->mutex); in thread_main()
1511 td_verror(td, errno, "setgid"); in thread_main()
1515 td_verror(td, errno, "setuid"); in thread_main()
1524 if (iolog_compress_init(td, sk_out)) in thread_main()
1540 ret = fio_cpus_split(&o->cpumask, td->thread_number - 1); in thread_main()
1544 td_verror(td, EINVAL, "cpus_split"); in thread_main()
1548 ret = fio_setaffinity(td->pid, o->cpumask); in thread_main()
1550 td_verror(td, errno, "cpu_set_affinity"); in thread_main()
1562 td_verror(td, errno, "Does not support NUMA API\n"); in thread_main()
1571 td_verror(td, errno, \ in thread_main()
1607 if (fio_pin_memory(td)) in thread_main()
1614 if (init_iolog(td)) in thread_main()
1617 if (init_io_u(td)) in thread_main()
1620 if (o->verify_async && verify_async_init(td)) in thread_main()
1627 td_verror(td, errno, "ioprio_set"); in thread_main()
1632 if (o->cgroup && cgroup_setup(td, cgroup_list, &cgroup_mnt)) in thread_main()
1637 td_verror(td, errno, "nice"); in thread_main()
1641 if (o->ioscheduler && switch_ioscheduler(td)) in thread_main()
1644 if (!o->create_serialize && setup_files(td)) in thread_main()
1647 if (td_io_init(td)) in thread_main()
1650 if (init_random_map(td)) in thread_main()
1657 if (pre_read_files(td) < 0) in thread_main()
1661 fio_verify_init(td); in thread_main()
1663 if (rate_submit_init(td, sk_out)) in thread_main()
1666 set_epoch_time(td, o->log_unix_epoch); in thread_main()
1667 fio_getrusage(&td->ru_start); in thread_main()
1668 memcpy(&td->bw_sample_time, &td->epoch, sizeof(td->epoch)); in thread_main()
1669 memcpy(&td->iops_sample_time, &td->epoch, sizeof(td->epoch)); in thread_main()
1670 memcpy(&td->ss.prev_time, &td->epoch, sizeof(td->epoch)); in thread_main()
1674 memcpy(&td->lastrate[DDIR_READ], &td->bw_sample_time, in thread_main()
1675 sizeof(td->bw_sample_time)); in thread_main()
1676 memcpy(&td->lastrate[DDIR_WRITE], &td->bw_sample_time, in thread_main()
1677 sizeof(td->bw_sample_time)); in thread_main()
1678 memcpy(&td->lastrate[DDIR_TRIM], &td->bw_sample_time, in thread_main()
1679 sizeof(td->bw_sample_time)); in thread_main()
1685 while (keep_running(td)) { in thread_main()
1688 fio_gettime(&td->start, NULL); in thread_main()
1689 memcpy(&td->tv_cache, &td->start, sizeof(td->start)); in thread_main()
1692 clear_io_state(td, 0); in thread_main()
1694 if (o->unlink_each_loop && unlink_all_files(td)) in thread_main()
1698 prune_io_piece_log(td); in thread_main()
1700 if (td->o.verify_only && td_write(td)) in thread_main()
1701 verify_bytes = do_dry_run(td); in thread_main()
1703 do_io(td, bytes_done); in thread_main()
1706 fio_mark_td_terminate(td); in thread_main()
1719 if (td->runstate >= TD_EXITED) in thread_main()
1733 check_update_rusage(td); in thread_main()
1739 td->error = EDEADLK; in thread_main()
1744 if (td_read(td) && td->io_bytes[DDIR_READ]) in thread_main()
1745 update_runtime(td, elapsed_us, DDIR_READ); in thread_main()
1746 if (td_write(td) && td->io_bytes[DDIR_WRITE]) in thread_main()
1747 update_runtime(td, elapsed_us, DDIR_WRITE); in thread_main()
1748 if (td_trim(td) && td->io_bytes[DDIR_TRIM]) in thread_main()
1749 update_runtime(td, elapsed_us, DDIR_TRIM); in thread_main()
1750 fio_gettime(&td->start, NULL); in thread_main()
1753 if (td->error || td->terminate) in thread_main()
1758 td_ioengine_flagged(td, FIO_UNIDIR)) in thread_main()
1761 clear_io_state(td, 0); in thread_main()
1763 fio_gettime(&td->start, NULL); in thread_main()
1765 do_verify(td, verify_bytes); in thread_main()
1770 check_update_rusage(td); in thread_main()
1773 update_runtime(td, elapsed_us, DDIR_READ); in thread_main()
1774 fio_gettime(&td->start, NULL); in thread_main()
1777 if (td->error || td->terminate) in thread_main()
1786 if ((td->o.size || td->o.io_size) && !ddir_rw_sum(bytes_done) && in thread_main()
1787 !(td_ioengine_flagged(td, FIO_NOIO) || in thread_main()
1788 td_ioengine_flagged(td, FIO_DISKLESSIO))) in thread_main()
1791 td->o.name, td->io_ops->name); in thread_main()
1793 td_set_runstate(td, TD_FINISHING); in thread_main()
1795 update_rusage_stat(td); in thread_main()
1796 td->ts.total_run_time = mtime_since_now(&td->epoch); in thread_main()
1797 td->ts.io_bytes[DDIR_READ] = td->io_bytes[DDIR_READ]; in thread_main()
1798 td->ts.io_bytes[DDIR_WRITE] = td->io_bytes[DDIR_WRITE]; in thread_main()
1799 td->ts.io_bytes[DDIR_TRIM] = td->io_bytes[DDIR_TRIM]; in thread_main()
1801 if (td->o.verify_state_save && !(td->flags & TD_F_VSTATE_SAVED) && in thread_main()
1802 (td->o.verify != VERIFY_NONE && td_write(td))) in thread_main()
1803 verify_save_state(td->thread_number); in thread_main()
1805 fio_unpin_memory(td); in thread_main()
1807 td_writeout_logs(td, true); in thread_main()
1809 iolog_compress_exit(td); in thread_main()
1810 rate_submit_exit(td); in thread_main()
1815 if (exitall_on_terminate || (o->exitall_error && td->error)) in thread_main()
1816 fio_terminate_threads(td->groupid); in thread_main()
1819 if (td->error) in thread_main()
1820 log_info("fio: pid=%d, err=%d/%s\n", (int) td->pid, td->error, in thread_main()
1821 td->verror); in thread_main()
1824 verify_async_exit(td); in thread_main()
1826 close_and_free_files(td); in thread_main()
1827 cleanup_io_u(td); in thread_main()
1828 close_ioengine(td); in thread_main()
1829 cgroup_shutdown(td, &cgroup_mnt); in thread_main()
1830 verify_free_state(td); in thread_main()
1832 if (td->zone_state_index) { in thread_main()
1836 free(td->zone_state_index[i]); in thread_main()
1837 free(td->zone_state_index); in thread_main()
1838 td->zone_state_index = NULL; in thread_main()
1844 td_verror(td, ret, "fio_cpuset_exit"); in thread_main()
1851 write_iolog_close(td); in thread_main()
1853 td_set_runstate(td, TD_EXITED); in thread_main()
1859 check_update_rusage(td); in thread_main()
1862 return (void *) (uintptr_t) td->error; in thread_main()
1871 struct thread_data *td; in reap_threads() local
1879 for_each_td(td, i) { in reap_threads()
1886 if (td->io_ops && !strcmp(td->io_ops->name, "cpuio")) in reap_threads()
1891 if (!td->pid) { in reap_threads()
1895 if (td->runstate == TD_REAPED) in reap_threads()
1897 if (td->o.use_thread) { in reap_threads()
1898 if (td->runstate == TD_EXITED) { in reap_threads()
1899 td_set_runstate(td, TD_REAPED); in reap_threads()
1906 if (td->runstate == TD_EXITED) in reap_threads()
1912 ret = waitpid(td->pid, &status, flags); in reap_threads()
1916 (int) td->pid, td->runstate); in reap_threads()
1917 td->sig = ECHILD; in reap_threads()
1918 td_set_runstate(td, TD_REAPED); in reap_threads()
1922 } else if (ret == td->pid) { in reap_threads()
1928 (int) td->pid, sig); in reap_threads()
1929 td->sig = sig; in reap_threads()
1930 td_set_runstate(td, TD_REAPED); in reap_threads()
1934 if (WEXITSTATUS(status) && !td->error) in reap_threads()
1935 td->error = WEXITSTATUS(status); in reap_threads()
1937 td_set_runstate(td, TD_REAPED); in reap_threads()
1946 if (td->terminate && in reap_threads()
1947 td->runstate < TD_FSYNCING && in reap_threads()
1948 time_since_now(&td->terminate_time) >= FIO_REAP_TIMEOUT) { in reap_threads()
1952 td->o.name, td->runstate, in reap_threads()
1953 (unsigned long) time_since_now(&td->terminate_time)); in reap_threads()
1954 td_set_runstate(td, TD_REAPED); in reap_threads()
1965 (*m_rate) -= ddir_rw_sum(td->o.ratemin); in reap_threads()
1966 (*t_rate) -= ddir_rw_sum(td->o.rate); in reap_threads()
1967 if (!td->pid) in reap_threads()
1970 if (td->error) in reap_threads()
1973 done_secs += mtime_since_now(&td->epoch) / 1000; in reap_threads()
1974 profile_td_exit(td); in reap_threads()
2031 static int fio_verify_load_state(struct thread_data *td) in fio_verify_load_state() argument
2035 if (!td->o.verify_state) in fio_verify_load_state()
2041 ret = fio_server_get_verify_state(td->o.name, in fio_verify_load_state()
2042 td->thread_number - 1, &data); in fio_verify_load_state()
2044 verify_assign_state(td, data); in fio_verify_load_state()
2046 ret = verify_load_state(td, "local"); in fio_verify_load_state()
2058 static bool check_mount_writes(struct thread_data *td) in check_mount_writes() argument
2063 if (!td_write(td) || td->o.allow_mounted_write) in check_mount_writes()
2070 for_each_file(td, f, i) { in check_mount_writes()
2091 struct thread_data *td; in waitee_running() local
2097 for_each_td(td, i) { in waitee_running()
2098 if (!strcmp(td->o.name, self) || strcmp(td->o.name, waitee)) in waitee_running()
2101 if (td->runstate < TD_EXITED) { in waitee_running()
2103 self, td->o.name, in waitee_running()
2104 runstate_to_name(td->runstate)); in waitee_running()
2118 struct thread_data *td; in run_threads() local
2131 for_each_td(td, i) { in run_threads()
2132 if (check_mount_writes(td)) in run_threads()
2134 if (td->o.use_thread) in run_threads()
2160 for_each_td(td, i) { in run_threads()
2161 print_status_init(td->thread_number - 1); in run_threads()
2163 if (!td->o.create_serialize) in run_threads()
2166 if (fio_verify_load_state(td)) in run_threads()
2174 if (setup_files(td)) { in run_threads()
2177 if (td->error) in run_threads()
2179 (int) td->pid, td->error, td->verror); in run_threads()
2180 td_set_runstate(td, TD_REAPED); in run_threads()
2191 for_each_file(td, f, j) { in run_threads()
2193 td_io_close_file(td, f); in run_threads()
2212 for_each_td(td, i) { in run_threads()
2213 if (td->runstate != TD_NOT_CREATED) in run_threads()
2220 if (td->terminate) { in run_threads()
2225 if (td->o.start_delay) { in run_threads()
2228 if (td->o.start_delay > spent) in run_threads()
2232 if (td->o.stonewall && (nr_started || nr_running)) { in run_threads()
2234 td->o.name); in run_threads()
2238 if (waitee_running(td)) { in run_threads()
2240 td->o.name, td->o.wait_for); in run_threads()
2244 init_disk_util(td); in run_threads()
2246 td->rusage_sem = fio_mutex_init(FIO_MUTEX_LOCKED); in run_threads()
2247 td->update_rusage = 0; in run_threads()
2253 td_set_runstate(td, TD_CREATED); in run_threads()
2254 map[this_jobs++] = td; in run_threads()
2258 fd->td = td; in run_threads()
2261 if (td->o.use_thread) { in run_threads()
2265 ret = pthread_create(&td->thread, NULL, in run_threads()
2274 ret = pthread_detach(td->thread); in run_threads()
2314 td = map[i]; in run_threads()
2315 if (!td) in run_threads()
2317 if (td->runstate == TD_INITIALIZED) { in run_threads()
2320 } else if (td->runstate >= TD_EXITED) { in run_threads()
2333 td = map[i]; in run_threads()
2334 if (!td) in run_threads()
2336 kill(td->pid, SIGTERM); in run_threads()
2344 for_each_td(td, i) { in run_threads()
2345 if (td->runstate != TD_INITIALIZED) in run_threads()
2348 if (in_ramp_time(td)) in run_threads()
2349 td_set_runstate(td, TD_RAMP); in run_threads()
2351 td_set_runstate(td, TD_RUNNING); in run_threads()
2354 m_rate += ddir_rw_sum(td->o.ratemin); in run_threads()
2355 t_rate += ddir_rw_sum(td->o.rate); in run_threads()
2357 fio_mutex_up(td->mutex); in run_threads()
2384 struct thread_data *td; in fio_backend() local
2433 for_each_td(td, i) { in fio_backend()
2434 if (td->ss.dur) { in fio_backend()
2435 if (td->ss.iops_data != NULL) { in fio_backend()
2436 free(td->ss.iops_data); in fio_backend()
2437 free(td->ss.bw_data); in fio_backend()
2440 fio_options_free(td); in fio_backend()
2441 if (td->rusage_sem) { in fio_backend()
2442 fio_mutex_remove(td->rusage_sem); in fio_backend()
2443 td->rusage_sem = NULL; in fio_backend()
2445 fio_mutex_remove(td->mutex); in fio_backend()
2446 td->mutex = NULL; in fio_backend()