Lines Matching full:work
28 /* List head pointing to ordered work list */
54 static void normal_work_helper(struct btrfs_work *work);
59 struct btrfs_work *work = container_of(arg, struct btrfs_work, \
61 normal_work_helper(work); \
71 btrfs_work_owner(const struct btrfs_work *work) in btrfs_work_owner() argument
73 return work->wq->fs_info; in btrfs_work_owner()
208 * Hook for threshold which will be called before executing the work,
259 struct btrfs_work *work; in run_ordered_work() local
269 work = list_entry(list->next, struct btrfs_work, in run_ordered_work()
271 if (!test_bit(WORK_DONE_BIT, &work->flags)) in run_ordered_work()
276 * we leave the work item on the list as a barrier so in run_ordered_work()
277 * that later work items that are done don't have their in run_ordered_work()
280 if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags)) in run_ordered_work()
282 trace_btrfs_ordered_sched(work); in run_ordered_work()
284 work->ordered_func(work); in run_ordered_work()
288 list_del(&work->ordered_list); in run_ordered_work()
291 if (work == self) { in run_ordered_work()
293 * This is the work item that the worker is currently in run_ordered_work()
297 * of work items. I.e., if a work item with the same in run_ordered_work()
298 * address and work function is queued twice, the second in run_ordered_work()
300 * work item may be freed and recycled with the same in run_ordered_work()
301 * work function; the workqueue code assumes that the in run_ordered_work()
302 * original work item cannot depend on the recycled work in run_ordered_work()
305 * Note that the work of one Btrfs filesystem may depend in run_ordered_work()
306 * on the work of another Btrfs filesystem via, e.g., a in run_ordered_work()
308 * work item to be recycled until we are really done, in run_ordered_work()
316 * the lock held though. Save the work as tag for the in run_ordered_work()
320 wtag = work; in run_ordered_work()
321 work->ordered_free(work); in run_ordered_work()
334 static void normal_work_helper(struct btrfs_work *work) in normal_work_helper() argument
341 * We should not touch things inside work in the following cases: in normal_work_helper()
342 * 1) after work->func() if it has no ordered_free in normal_work_helper()
343 * Since the struct is freed in work->func(). in normal_work_helper()
345 * The work may be freed in other threads almost instantly. in normal_work_helper()
348 if (work->ordered_func) in normal_work_helper()
350 wq = work->wq; in normal_work_helper()
351 /* Safe for tracepoints in case work gets freed by the callback */ in normal_work_helper()
352 wtag = work; in normal_work_helper()
354 trace_btrfs_work_sched(work); in normal_work_helper()
356 work->func(work); in normal_work_helper()
358 set_bit(WORK_DONE_BIT, &work->flags); in normal_work_helper()
359 run_ordered_work(wq, work); in normal_work_helper()
365 void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t uniq_func, in btrfs_init_work() argument
370 work->func = func; in btrfs_init_work()
371 work->ordered_func = ordered_func; in btrfs_init_work()
372 work->ordered_free = ordered_free; in btrfs_init_work()
373 INIT_WORK(&work->normal_work, uniq_func); in btrfs_init_work()
374 INIT_LIST_HEAD(&work->ordered_list); in btrfs_init_work()
375 work->flags = 0; in btrfs_init_work()
379 struct btrfs_work *work) in __btrfs_queue_work() argument
383 work->wq = wq; in __btrfs_queue_work()
385 if (work->ordered_func) { in __btrfs_queue_work()
387 list_add_tail(&work->ordered_list, &wq->ordered_list); in __btrfs_queue_work()
390 trace_btrfs_work_queued(work); in __btrfs_queue_work()
391 queue_work(wq->normal_wq, &work->normal_work); in __btrfs_queue_work()
395 struct btrfs_work *work) in btrfs_queue_work() argument
399 if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags) && wq->high) in btrfs_queue_work()
403 __btrfs_queue_work(dest_wq, work); in btrfs_queue_work()
433 void btrfs_set_work_high_priority(struct btrfs_work *work) in btrfs_set_work_high_priority() argument
435 set_bit(WORK_HIGH_PRIO_BIT, &work->flags); in btrfs_set_work_high_priority()