• Home
  • Raw
  • Download

Lines Matching refs:td

17 	struct thread_data *td = sw->priv;  in io_workqueue_fn()  local
22 io_u_set(td, io_u, IO_U_F_NO_FILE_PUT); in io_workqueue_fn()
24 td->cur_depth++; in io_workqueue_fn()
27 ret = td_io_queue(td, io_u); in io_workqueue_fn()
30 ret = io_u_queued_complete(td, 1); in io_workqueue_fn()
32 td->cur_depth -= ret; in io_workqueue_fn()
33 io_u_clear(td, io_u, IO_U_F_FLIGHT); in io_workqueue_fn()
38 io_queue_event(td, io_u, &ret, ddir, NULL, 0, NULL); in io_workqueue_fn()
41 td->cur_depth--; in io_workqueue_fn()
45 if (td->o.iodepth == 1) in io_workqueue_fn()
50 ret = io_u_queued_complete(td, min_evts); in io_workqueue_fn()
52 td->cur_depth -= ret; in io_workqueue_fn()
54 ret = io_u_queued_complete(td, td->cur_depth); in io_workqueue_fn()
56 td->cur_depth -= ret; in io_workqueue_fn()
64 struct thread_data *td = sw->priv; in io_workqueue_pre_sleep_flush_fn() local
66 if (td->io_u_queued || td->cur_depth || td->io_u_in_flight) in io_workqueue_pre_sleep_flush_fn()
74 struct thread_data *td = sw->priv; in io_workqueue_pre_sleep_fn() local
77 ret = io_u_quiesce(td); in io_workqueue_pre_sleep_fn()
79 td->cur_depth -= ret; in io_workqueue_pre_sleep_fn()
84 struct thread_data *td; in io_workqueue_alloc_fn() local
86 td = calloc(1, sizeof(*td)); in io_workqueue_alloc_fn()
87 sw->priv = td; in io_workqueue_alloc_fn()
99 struct thread_data *parent = sw->wq->td; in io_workqueue_init_worker_fn()
100 struct thread_data *td = sw->priv; in io_workqueue_init_worker_fn() local
102 memcpy(&td->o, &parent->o, sizeof(td->o)); in io_workqueue_init_worker_fn()
103 memcpy(&td->ts, &parent->ts, sizeof(td->ts)); in io_workqueue_init_worker_fn()
104 td->o.uid = td->o.gid = -1U; in io_workqueue_init_worker_fn()
105 dup_files(td, parent); in io_workqueue_init_worker_fn()
106 td->eo = parent->eo; in io_workqueue_init_worker_fn()
107 fio_options_mem_dupe(td); in io_workqueue_init_worker_fn()
109 if (ioengine_load(td)) in io_workqueue_init_worker_fn()
112 td->pid = gettid(); in io_workqueue_init_worker_fn()
114 INIT_FLIST_HEAD(&td->io_log_list); in io_workqueue_init_worker_fn()
115 INIT_FLIST_HEAD(&td->io_hist_list); in io_workqueue_init_worker_fn()
116 INIT_FLIST_HEAD(&td->verify_list); in io_workqueue_init_worker_fn()
117 INIT_FLIST_HEAD(&td->trim_list); in io_workqueue_init_worker_fn()
118 INIT_FLIST_HEAD(&td->next_rand_list); in io_workqueue_init_worker_fn()
119 td->io_hist_tree = RB_ROOT; in io_workqueue_init_worker_fn()
121 td->o.iodepth = 1; in io_workqueue_init_worker_fn()
122 if (td_io_init(td)) in io_workqueue_init_worker_fn()
125 set_epoch_time(td, td->o.log_unix_epoch); in io_workqueue_init_worker_fn()
126 fio_getrusage(&td->ru_start); in io_workqueue_init_worker_fn()
127 clear_io_state(td, 1); in io_workqueue_init_worker_fn()
129 td_set_runstate(td, TD_RUNNING); in io_workqueue_init_worker_fn()
130 td->flags |= TD_F_CHILD; in io_workqueue_init_worker_fn()
131 td->parent = parent; in io_workqueue_init_worker_fn()
135 close_ioengine(td); in io_workqueue_init_worker_fn()
144 struct thread_data *td = sw->priv; in io_workqueue_exit_worker_fn() local
147 sum_thread_stats(&sw->wq->td->ts, &td->ts, *sum_cnt == 1); in io_workqueue_exit_worker_fn()
149 fio_options_free(td); in io_workqueue_exit_worker_fn()
150 close_and_free_files(td); in io_workqueue_exit_worker_fn()
151 if (td->io_ops) in io_workqueue_exit_worker_fn()
152 close_ioengine(td); in io_workqueue_exit_worker_fn()
153 td_set_runstate(td, TD_EXITED); in io_workqueue_exit_worker_fn()
213 struct thread_data *dst = sw->wq->td; in io_workqueue_update_acct_fn()
235 int rate_submit_init(struct thread_data *td, struct sk_out *sk_out) in rate_submit_init() argument
237 if (td->o.io_submit_mode != IO_MODE_OFFLOAD) in rate_submit_init()
240 return workqueue_init(td, &td->io_wq, &rated_wq_ops, td->o.iodepth, sk_out); in rate_submit_init()
243 void rate_submit_exit(struct thread_data *td) in rate_submit_exit() argument
245 if (td->o.io_submit_mode != IO_MODE_OFFLOAD) in rate_submit_exit()
248 workqueue_exit(&td->io_wq); in rate_submit_exit()