• Home
  • Raw
  • Download

Lines Matching refs:work

426 	struct work_struct *work = addr;  in work_fixup_init()  local
430 cancel_work_sync(work); in work_fixup_init()
431 debug_object_init(work, &work_debug_descr); in work_fixup_init()
445 struct work_struct *work = addr; in work_fixup_activate() local
455 if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) { in work_fixup_activate()
456 debug_object_init(work, &work_debug_descr); in work_fixup_activate()
457 debug_object_activate(work, &work_debug_descr); in work_fixup_activate()
477 struct work_struct *work = addr; in work_fixup_free() local
481 cancel_work_sync(work); in work_fixup_free()
482 debug_object_free(work, &work_debug_descr); in work_fixup_free()
497 static inline void debug_work_activate(struct work_struct *work) in debug_work_activate() argument
499 debug_object_activate(work, &work_debug_descr); in debug_work_activate()
502 static inline void debug_work_deactivate(struct work_struct *work) in debug_work_deactivate() argument
504 debug_object_deactivate(work, &work_debug_descr); in debug_work_deactivate()
507 void __init_work(struct work_struct *work, int onstack) in __init_work() argument
510 debug_object_init_on_stack(work, &work_debug_descr); in __init_work()
512 debug_object_init(work, &work_debug_descr); in __init_work()
516 void destroy_work_on_stack(struct work_struct *work) in destroy_work_on_stack() argument
518 debug_object_free(work, &work_debug_descr); in destroy_work_on_stack()
522 void destroy_delayed_work_on_stack(struct delayed_work *work) in destroy_delayed_work_on_stack() argument
524 destroy_timer_on_stack(&work->timer); in destroy_delayed_work_on_stack()
525 debug_object_free(&work->work, &work_debug_descr); in destroy_delayed_work_on_stack()
530 static inline void debug_work_activate(struct work_struct *work) { } in debug_work_activate() argument
531 static inline void debug_work_deactivate(struct work_struct *work) { } in debug_work_deactivate() argument
590 static int get_work_color(struct work_struct *work) in get_work_color() argument
592 return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) & in get_work_color()
621 static inline void set_work_data(struct work_struct *work, unsigned long data, in set_work_data() argument
624 WARN_ON_ONCE(!work_pending(work)); in set_work_data()
625 atomic_long_set(&work->data, data | flags | work_static(work)); in set_work_data()
628 static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq, in set_work_pwq() argument
631 set_work_data(work, (unsigned long)pwq, in set_work_pwq()
635 static void set_work_pool_and_keep_pending(struct work_struct *work, in set_work_pool_and_keep_pending() argument
638 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, in set_work_pool_and_keep_pending()
642 static void set_work_pool_and_clear_pending(struct work_struct *work, in set_work_pool_and_clear_pending() argument
652 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0); in set_work_pool_and_clear_pending()
684 static void clear_work_data(struct work_struct *work) in clear_work_data() argument
687 set_work_data(work, WORK_STRUCT_NO_POOL, 0); in clear_work_data()
690 static struct pool_workqueue *get_work_pwq(struct work_struct *work) in get_work_pwq() argument
692 unsigned long data = atomic_long_read(&work->data); in get_work_pwq()
715 static struct worker_pool *get_work_pool(struct work_struct *work) in get_work_pool() argument
717 unsigned long data = atomic_long_read(&work->data); in get_work_pool()
740 static int get_work_pool_id(struct work_struct *work) in get_work_pool_id() argument
742 unsigned long data = atomic_long_read(&work->data); in get_work_pool_id()
751 static void mark_work_canceling(struct work_struct *work) in mark_work_canceling() argument
753 unsigned long pool_id = get_work_pool_id(work); in mark_work_canceling()
756 set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING); in mark_work_canceling()
759 static bool work_is_canceling(struct work_struct *work) in work_is_canceling() argument
761 unsigned long data = atomic_long_read(&work->data); in work_is_canceling()
1009 struct work_struct *work) in find_worker_executing_work() argument
1014 (unsigned long)work) in find_worker_executing_work()
1015 if (worker->current_work == work && in find_worker_executing_work()
1016 worker->current_func == work->func) in find_worker_executing_work()
1039 static void move_linked_works(struct work_struct *work, struct list_head *head, in move_linked_works() argument
1048 list_for_each_entry_safe_from(work, n, NULL, entry) { in move_linked_works()
1049 list_move_tail(&work->entry, head); in move_linked_works()
1050 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED)) in move_linked_works()
1121 static void pwq_activate_delayed_work(struct work_struct *work) in pwq_activate_delayed_work() argument
1123 struct pool_workqueue *pwq = get_work_pwq(work); in pwq_activate_delayed_work()
1125 trace_workqueue_activate_work(work); in pwq_activate_delayed_work()
1126 move_linked_works(work, &pwq->pool->worklist, NULL); in pwq_activate_delayed_work()
1127 __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work)); in pwq_activate_delayed_work()
1133 struct work_struct *work = list_first_entry(&pwq->delayed_works, in pwq_activate_first_delayed() local
1136 pwq_activate_delayed_work(work); in pwq_activate_first_delayed()
1213 static int try_to_grab_pending(struct work_struct *work, bool is_dwork, in try_to_grab_pending() argument
1223 struct delayed_work *dwork = to_delayed_work(work); in try_to_grab_pending()
1235 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) in try_to_grab_pending()
1242 pool = get_work_pool(work); in try_to_grab_pending()
1255 pwq = get_work_pwq(work); in try_to_grab_pending()
1257 debug_work_deactivate(work); in try_to_grab_pending()
1266 if (*work_data_bits(work) & WORK_STRUCT_DELAYED) in try_to_grab_pending()
1267 pwq_activate_delayed_work(work); in try_to_grab_pending()
1269 list_del_init(&work->entry); in try_to_grab_pending()
1270 pwq_dec_nr_in_flight(pwq, get_work_color(work)); in try_to_grab_pending()
1273 set_work_pool_and_keep_pending(work, pool->id); in try_to_grab_pending()
1281 if (work_is_canceling(work)) in try_to_grab_pending()
1300 static void insert_work(struct pool_workqueue *pwq, struct work_struct *work, in insert_work() argument
1306 set_work_pwq(work, pwq, extra_flags); in insert_work()
1307 list_add_tail(&work->entry, head); in insert_work()
1338 struct work_struct *work) in __queue_work() argument
1374 last_pool = get_work_pool(work); in __queue_work()
1380 worker = find_worker_executing_work(last_pool, work); in __queue_work()
1413 trace_workqueue_queue_work(req_cpu, pwq, work); in __queue_work()
1415 if (WARN_ON(!list_empty(&work->entry))) { in __queue_work()
1424 trace_workqueue_activate_work(work); in __queue_work()
1432 debug_work_activate(work); in __queue_work()
1433 insert_work(pwq, work, worklist, work_flags); in __queue_work()
1450 struct work_struct *work) in queue_work_on() argument
1457 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { in queue_work_on()
1458 __queue_work(cpu, wq, work); in queue_work_on()
1472 __queue_work(dwork->cpu, dwork->wq, &dwork->work); in delayed_work_timer_fn()
1480 struct work_struct *work = &dwork->work; in __queue_delayed_work() local
1486 WARN_ON_ONCE(!list_empty(&work->entry)); in __queue_delayed_work()
1495 __queue_work(cpu, wq, &dwork->work); in __queue_delayed_work()
1525 struct work_struct *work = &dwork->work; in queue_delayed_work_on() local
1532 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { in queue_delayed_work_on()
1567 ret = try_to_grab_pending(&dwork->work, true, &flags); in mod_delayed_work_on()
1834 static void send_mayday(struct work_struct *work) in send_mayday() argument
1836 struct pool_workqueue *pwq = get_work_pwq(work); in send_mayday()
1860 struct work_struct *work; in pool_mayday_timeout() local
1872 list_for_each_entry(work, &pool->worklist, entry) in pool_mayday_timeout()
1873 send_mayday(work); in pool_mayday_timeout()
1985 static void process_one_work(struct worker *worker, struct work_struct *work) in process_one_work() argument
1989 struct pool_workqueue *pwq = get_work_pwq(work); in process_one_work()
2004 lockdep_copy_map(&lockdep_map, &work->lockdep_map); in process_one_work()
2016 collision = find_worker_executing_work(pool, work); in process_one_work()
2018 move_linked_works(work, &collision->scheduled, NULL); in process_one_work()
2023 debug_work_deactivate(work); in process_one_work()
2024 hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work); in process_one_work()
2025 worker->current_work = work; in process_one_work()
2026 worker->current_func = work->func; in process_one_work()
2028 work_color = get_work_color(work); in process_one_work()
2030 list_del_init(&work->entry); in process_one_work()
2057 set_work_pool_and_clear_pending(work, pool->id); in process_one_work()
2063 trace_workqueue_execute_start(work); in process_one_work()
2064 worker->current_func(work); in process_one_work()
2069 trace_workqueue_execute_end(work); in process_one_work()
2122 struct work_struct *work = list_first_entry(&worker->scheduled, in process_scheduled_works() local
2124 process_one_work(worker, work); in process_scheduled_works()
2190 struct work_struct *work = in worker_thread() local
2194 if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) { in worker_thread()
2196 process_one_work(worker, work); in worker_thread()
2200 move_linked_works(work, &worker->scheduled, NULL); in worker_thread()
2276 struct work_struct *work, *n; in rescuer_thread() local
2293 list_for_each_entry_safe(work, n, &pool->worklist, entry) in rescuer_thread()
2294 if (get_work_pwq(work) == pwq) in rescuer_thread()
2295 move_linked_works(work, scheduled, &n); in rescuer_thread()
2360 struct work_struct work; member
2365 static void wq_barrier_func(struct work_struct *work) in wq_barrier_func() argument
2367 struct wq_barrier *barr = container_of(work, struct wq_barrier, work); in wq_barrier_func()
2408 INIT_WORK_ONSTACK(&barr->work, wq_barrier_func); in insert_wq_barrier()
2409 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work)); in insert_wq_barrier()
2428 debug_work_activate(&barr->work); in insert_wq_barrier()
2429 insert_work(pwq, &barr->work, head, in insert_wq_barrier()
2712 static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr) in start_flush_work() argument
2721 pool = get_work_pool(work); in start_flush_work()
2729 pwq = get_work_pwq(work); in start_flush_work()
2734 worker = find_worker_executing_work(pool, work); in start_flush_work()
2740 insert_wq_barrier(pwq, barr, work, worker); in start_flush_work()
2772 bool flush_work(struct work_struct *work) in flush_work() argument
2776 lock_map_acquire(&work->lockdep_map); in flush_work()
2777 lock_map_release(&work->lockdep_map); in flush_work()
2779 if (start_flush_work(work, &barr)) { in flush_work()
2781 destroy_work_on_stack(&barr.work); in flush_work()
2791 struct work_struct *work; member
2798 if (cwait->work != key) in cwt_wakefn()
2803 static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) in __cancel_work_timer() argument
2810 ret = try_to_grab_pending(work, is_dwork, &flags); in __cancel_work_timer()
2832 cwait.work = work; in __cancel_work_timer()
2836 if (work_is_canceling(work)) in __cancel_work_timer()
2843 mark_work_canceling(work); in __cancel_work_timer()
2846 flush_work(work); in __cancel_work_timer()
2847 clear_work_data(work); in __cancel_work_timer()
2856 __wake_up(&cancel_waitq, TASK_NORMAL, 1, work); in __cancel_work_timer()
2879 bool cancel_work_sync(struct work_struct *work) in cancel_work_sync() argument
2881 return __cancel_work_timer(work, false); in cancel_work_sync()
2901 __queue_work(dwork->cpu, dwork->wq, &dwork->work); in flush_delayed_work()
2903 return flush_work(&dwork->work); in flush_delayed_work()
2929 ret = try_to_grab_pending(&dwork->work, true, &flags); in cancel_delayed_work()
2935 set_work_pool_and_clear_pending(&dwork->work, in cancel_delayed_work()
2936 get_work_pool_id(&dwork->work)); in cancel_delayed_work()
2953 return __cancel_work_timer(&dwork->work, true); in cancel_delayed_work_sync()
2980 struct work_struct *work = per_cpu_ptr(works, cpu); in schedule_on_each_cpu() local
2982 INIT_WORK(work, func); in schedule_on_each_cpu()
2983 schedule_work_on(cpu, work); in schedule_on_each_cpu()
3009 fn(&ew->work); in execute_in_process_context()
3013 INIT_WORK(&ew->work, fn); in execute_in_process_context()
3014 schedule_work(&ew->work); in execute_in_process_context()
3306 static void pwq_unbound_release_workfn(struct work_struct *work) in pwq_unbound_release_workfn() argument
3308 struct pool_workqueue *pwq = container_of(work, struct pool_workqueue, in pwq_unbound_release_workfn()
4166 unsigned int work_busy(struct work_struct *work) in work_busy() argument
4172 if (work_pending(work)) in work_busy()
4176 pool = get_work_pool(work); in work_busy()
4179 if (find_worker_executing_work(pool, work)) in work_busy()
4274 static void pr_cont_work(bool comma, struct work_struct *work) in pr_cont_work() argument
4276 if (work->func == wq_barrier_func) { in pr_cont_work()
4279 barr = container_of(work, struct wq_barrier, work); in pr_cont_work()
4284 pr_cont("%s %pf", comma ? "," : "", work->func); in pr_cont_work()
4291 struct work_struct *work; in show_pwq() local
4321 list_for_each_entry(work, &worker->scheduled, entry) in show_pwq()
4322 pr_cont_work(false, work); in show_pwq()
4328 list_for_each_entry(work, &pool->worklist, entry) { in show_pwq()
4329 if (get_work_pwq(work) == pwq) { in show_pwq()
4338 list_for_each_entry(work, &pool->worklist, entry) { in show_pwq()
4339 if (get_work_pwq(work) != pwq) in show_pwq()
4342 pr_cont_work(comma, work); in show_pwq()
4343 comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED); in show_pwq()
4352 list_for_each_entry(work, &pwq->delayed_works, entry) { in show_pwq()
4353 pr_cont_work(comma, work); in show_pwq()
4354 comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED); in show_pwq()
4442 static void wq_unbind_fn(struct work_struct *work) in wq_unbind_fn() argument
4686 struct work_struct work; member
4692 static void work_for_cpu_fn(struct work_struct *work) in work_for_cpu_fn() argument
4694 struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work); in work_for_cpu_fn()
4714 INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn); in work_on_cpu()
4715 schedule_work_on(cpu, &wfc.work); in work_on_cpu()
4716 flush_work(&wfc.work); in work_on_cpu()
4717 destroy_work_on_stack(&wfc.work); in work_on_cpu()