• Home
  • Raw
  • Download

Lines Matching refs:work

444 	struct work_struct *work = addr;  in work_is_static_object()  local
446 return test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work)); in work_is_static_object()
455 struct work_struct *work = addr; in work_fixup_init() local
459 cancel_work_sync(work); in work_fixup_init()
460 debug_object_init(work, &work_debug_descr); in work_fixup_init()
473 struct work_struct *work = addr; in work_fixup_free() local
477 cancel_work_sync(work); in work_fixup_free()
478 debug_object_free(work, &work_debug_descr); in work_fixup_free()
493 static inline void debug_work_activate(struct work_struct *work) in debug_work_activate() argument
495 debug_object_activate(work, &work_debug_descr); in debug_work_activate()
498 static inline void debug_work_deactivate(struct work_struct *work) in debug_work_deactivate() argument
500 debug_object_deactivate(work, &work_debug_descr); in debug_work_deactivate()
503 void __init_work(struct work_struct *work, int onstack) in __init_work() argument
506 debug_object_init_on_stack(work, &work_debug_descr); in __init_work()
508 debug_object_init(work, &work_debug_descr); in __init_work()
512 void destroy_work_on_stack(struct work_struct *work) in destroy_work_on_stack() argument
514 debug_object_free(work, &work_debug_descr); in destroy_work_on_stack()
518 void destroy_delayed_work_on_stack(struct delayed_work *work) in destroy_delayed_work_on_stack() argument
520 destroy_timer_on_stack(&work->timer); in destroy_delayed_work_on_stack()
521 debug_object_free(&work->work, &work_debug_descr); in destroy_delayed_work_on_stack()
526 static inline void debug_work_activate(struct work_struct *work) { } in debug_work_activate() argument
527 static inline void debug_work_deactivate(struct work_struct *work) { } in debug_work_deactivate() argument
586 static int get_work_color(struct work_struct *work) in get_work_color() argument
588 return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) & in get_work_color()
617 static inline void set_work_data(struct work_struct *work, unsigned long data, in set_work_data() argument
620 WARN_ON_ONCE(!work_pending(work)); in set_work_data()
621 atomic_long_set(&work->data, data | flags | work_static(work)); in set_work_data()
624 static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq, in set_work_pwq() argument
627 set_work_data(work, (unsigned long)pwq, in set_work_pwq()
631 static void set_work_pool_and_keep_pending(struct work_struct *work, in set_work_pool_and_keep_pending() argument
634 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, in set_work_pool_and_keep_pending()
638 static void set_work_pool_and_clear_pending(struct work_struct *work, in set_work_pool_and_clear_pending() argument
648 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0); in set_work_pool_and_clear_pending()
680 static void clear_work_data(struct work_struct *work) in clear_work_data() argument
683 set_work_data(work, WORK_STRUCT_NO_POOL, 0); in clear_work_data()
686 static struct pool_workqueue *get_work_pwq(struct work_struct *work) in get_work_pwq() argument
688 unsigned long data = atomic_long_read(&work->data); in get_work_pwq()
711 static struct worker_pool *get_work_pool(struct work_struct *work) in get_work_pool() argument
713 unsigned long data = atomic_long_read(&work->data); in get_work_pool()
736 static int get_work_pool_id(struct work_struct *work) in get_work_pool_id() argument
738 unsigned long data = atomic_long_read(&work->data); in get_work_pool_id()
747 static void mark_work_canceling(struct work_struct *work) in mark_work_canceling() argument
749 unsigned long pool_id = get_work_pool_id(work); in mark_work_canceling()
752 set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING); in mark_work_canceling()
755 static bool work_is_canceling(struct work_struct *work) in work_is_canceling() argument
757 unsigned long data = atomic_long_read(&work->data); in work_is_canceling()
1029 struct work_struct *work) in find_worker_executing_work() argument
1034 (unsigned long)work) in find_worker_executing_work()
1035 if (worker->current_work == work && in find_worker_executing_work()
1036 worker->current_func == work->func) in find_worker_executing_work()
1059 static void move_linked_works(struct work_struct *work, struct list_head *head, in move_linked_works() argument
1068 list_for_each_entry_safe_from(work, n, NULL, entry) { in move_linked_works()
1069 list_move_tail(&work->entry, head); in move_linked_works()
1070 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED)) in move_linked_works()
1141 static void pwq_activate_delayed_work(struct work_struct *work) in pwq_activate_delayed_work() argument
1143 struct pool_workqueue *pwq = get_work_pwq(work); in pwq_activate_delayed_work()
1145 trace_workqueue_activate_work(work); in pwq_activate_delayed_work()
1148 move_linked_works(work, &pwq->pool->worklist, NULL); in pwq_activate_delayed_work()
1149 __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work)); in pwq_activate_delayed_work()
1155 struct work_struct *work = list_first_entry(&pwq->delayed_works, in pwq_activate_first_delayed() local
1158 pwq_activate_delayed_work(work); in pwq_activate_first_delayed()
1235 static int try_to_grab_pending(struct work_struct *work, bool is_dwork, in try_to_grab_pending() argument
1245 struct delayed_work *dwork = to_delayed_work(work); in try_to_grab_pending()
1257 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) in try_to_grab_pending()
1265 pool = get_work_pool(work); in try_to_grab_pending()
1278 pwq = get_work_pwq(work); in try_to_grab_pending()
1280 debug_work_deactivate(work); in try_to_grab_pending()
1289 if (*work_data_bits(work) & WORK_STRUCT_DELAYED) in try_to_grab_pending()
1290 pwq_activate_delayed_work(work); in try_to_grab_pending()
1292 list_del_init(&work->entry); in try_to_grab_pending()
1293 pwq_dec_nr_in_flight(pwq, get_work_color(work)); in try_to_grab_pending()
1296 set_work_pool_and_keep_pending(work, pool->id); in try_to_grab_pending()
1306 if (work_is_canceling(work)) in try_to_grab_pending()
1325 static void insert_work(struct pool_workqueue *pwq, struct work_struct *work, in insert_work() argument
1331 set_work_pwq(work, pwq, extra_flags); in insert_work()
1332 list_add_tail(&work->entry, head); in insert_work()
1396 struct work_struct *work) in __queue_work() argument
1412 debug_work_activate(work); in __queue_work()
1434 last_pool = get_work_pool(work); in __queue_work()
1440 worker = find_worker_executing_work(last_pool, work); in __queue_work()
1473 trace_workqueue_queue_work(req_cpu, pwq, work); in __queue_work()
1475 if (WARN_ON(!list_empty(&work->entry))) in __queue_work()
1482 trace_workqueue_activate_work(work); in __queue_work()
1492 insert_work(pwq, work, worklist, work_flags); in __queue_work()
1511 struct work_struct *work) in queue_work_on() argument
1518 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { in queue_work_on()
1519 __queue_work(cpu, wq, work); in queue_work_on()
1582 struct work_struct *work) in queue_work_node() argument
1600 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { in queue_work_node()
1603 __queue_work(cpu, wq, work); in queue_work_node()
1617 __queue_work(dwork->cpu, dwork->wq, &dwork->work); in delayed_work_timer_fn()
1625 struct work_struct *work = &dwork->work; in __queue_delayed_work() local
1630 WARN_ON_ONCE(!list_empty(&work->entry)); in __queue_delayed_work()
1639 __queue_work(cpu, wq, &dwork->work); in __queue_delayed_work()
1667 struct work_struct *work = &dwork->work; in queue_delayed_work_on() local
1674 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { in queue_delayed_work_on()
1709 ret = try_to_grab_pending(&dwork->work, true, &flags); in mod_delayed_work_on()
1728 __queue_work(WORK_CPU_UNBOUND, rwork->wq, &rwork->work); in rcu_work_rcufn()
1744 struct work_struct *work = &rwork->work; in queue_rcu_work() local
1746 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { in queue_rcu_work()
2012 static void send_mayday(struct work_struct *work) in send_mayday() argument
2014 struct pool_workqueue *pwq = get_work_pwq(work); in send_mayday()
2038 struct work_struct *work; in pool_mayday_timeout() local
2050 list_for_each_entry(work, &pool->worklist, entry) in pool_mayday_timeout()
2051 send_mayday(work); in pool_mayday_timeout()
2163 static void process_one_work(struct worker *worker, struct work_struct *work) in process_one_work() argument
2167 struct pool_workqueue *pwq = get_work_pwq(work); in process_one_work()
2182 lockdep_copy_map(&lockdep_map, &work->lockdep_map); in process_one_work()
2194 collision = find_worker_executing_work(pool, work); in process_one_work()
2196 move_linked_works(work, &collision->scheduled, NULL); in process_one_work()
2201 debug_work_deactivate(work); in process_one_work()
2202 hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work); in process_one_work()
2203 worker->current_work = work; in process_one_work()
2204 worker->current_func = work->func; in process_one_work()
2206 work_color = get_work_color(work); in process_one_work()
2214 list_del_init(&work->entry); in process_one_work()
2241 set_work_pool_and_clear_pending(work, pool->id); in process_one_work()
2269 trace_workqueue_execute_start(work); in process_one_work()
2270 worker->current_func(work); in process_one_work()
2275 trace_workqueue_execute_end(work); in process_one_work()
2330 struct work_struct *work = list_first_entry(&worker->scheduled, in process_scheduled_works() local
2332 process_one_work(worker, work); in process_scheduled_works()
2408 struct work_struct *work = in worker_thread() local
2414 if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) { in worker_thread()
2416 process_one_work(worker, work); in worker_thread()
2420 move_linked_works(work, &worker->scheduled, NULL); in worker_thread()
2496 struct work_struct *work, *n; in rescuer_thread() local
2513 list_for_each_entry_safe(work, n, &pool->worklist, entry) { in rescuer_thread()
2514 if (get_work_pwq(work) == pwq) { in rescuer_thread()
2517 move_linked_works(work, scheduled, &n); in rescuer_thread()
2616 struct work_struct work; member
2621 static void wq_barrier_func(struct work_struct *work) in wq_barrier_func() argument
2623 struct wq_barrier *barr = container_of(work, struct wq_barrier, work); in wq_barrier_func()
2664 INIT_WORK_ONSTACK(&barr->work, wq_barrier_func); in insert_wq_barrier()
2665 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work)); in insert_wq_barrier()
2686 debug_work_activate(&barr->work); in insert_wq_barrier()
2687 insert_work(pwq, &barr->work, head, in insert_wq_barrier()
2975 static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, in start_flush_work() argument
2985 pool = get_work_pool(work); in start_flush_work()
2993 pwq = get_work_pwq(work); in start_flush_work()
2998 worker = find_worker_executing_work(pool, work); in start_flush_work()
3004 check_flush_dependency(pwq->wq, work); in start_flush_work()
3006 insert_wq_barrier(pwq, barr, work, worker); in start_flush_work()
3031 static bool __flush_work(struct work_struct *work, bool from_cancel) in __flush_work() argument
3038 if (WARN_ON(!work->func)) in __flush_work()
3042 lock_map_acquire(&work->lockdep_map); in __flush_work()
3043 lock_map_release(&work->lockdep_map); in __flush_work()
3046 if (start_flush_work(work, &barr, from_cancel)) { in __flush_work()
3048 destroy_work_on_stack(&barr.work); in __flush_work()
3066 bool flush_work(struct work_struct *work) in flush_work() argument
3068 return __flush_work(work, false); in flush_work()
3074 struct work_struct *work; member
3081 if (cwait->work != key) in cwt_wakefn()
3086 static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) in __cancel_work_timer() argument
3093 ret = try_to_grab_pending(work, is_dwork, &flags); in __cancel_work_timer()
3115 cwait.work = work; in __cancel_work_timer()
3119 if (work_is_canceling(work)) in __cancel_work_timer()
3126 mark_work_canceling(work); in __cancel_work_timer()
3134 __flush_work(work, true); in __cancel_work_timer()
3136 clear_work_data(work); in __cancel_work_timer()
3145 __wake_up(&cancel_waitq, TASK_NORMAL, 1, work); in __cancel_work_timer()
3168 bool cancel_work_sync(struct work_struct *work) in cancel_work_sync() argument
3170 return __cancel_work_timer(work, false); in cancel_work_sync()
3190 __queue_work(dwork->cpu, dwork->wq, &dwork->work); in flush_delayed_work()
3192 return flush_work(&dwork->work); in flush_delayed_work()
3206 if (test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&rwork->work))) { in flush_rcu_work()
3208 flush_work(&rwork->work); in flush_rcu_work()
3211 return flush_work(&rwork->work); in flush_rcu_work()
3216 static bool __cancel_work(struct work_struct *work, bool is_dwork) in __cancel_work() argument
3222 ret = try_to_grab_pending(work, is_dwork, &flags); in __cancel_work()
3228 set_work_pool_and_clear_pending(work, get_work_pool_id(work)); in __cancel_work()
3251 return __cancel_work(&dwork->work, true); in cancel_delayed_work()
3266 return __cancel_work_timer(&dwork->work, true); in cancel_delayed_work_sync()
3293 struct work_struct *work = per_cpu_ptr(works, cpu); in schedule_on_each_cpu() local
3295 INIT_WORK(work, func); in schedule_on_each_cpu()
3296 schedule_work_on(cpu, work); in schedule_on_each_cpu()
3322 fn(&ew->work); in execute_in_process_context()
3326 INIT_WORK(&ew->work, fn); in execute_in_process_context()
3327 schedule_work(&ew->work); in execute_in_process_context()
3655 static void pwq_unbound_release_workfn(struct work_struct *work) in pwq_unbound_release_workfn() argument
3657 struct pool_workqueue *pwq = container_of(work, struct pool_workqueue, in pwq_unbound_release_workfn()
4535 unsigned int work_busy(struct work_struct *work) in work_busy() argument
4541 if (work_pending(work)) in work_busy()
4545 pool = get_work_pool(work); in work_busy()
4548 if (find_worker_executing_work(pool, work)) in work_busy()
4638 static void pr_cont_work(bool comma, struct work_struct *work) in pr_cont_work() argument
4640 if (work->func == wq_barrier_func) { in pr_cont_work()
4643 barr = container_of(work, struct wq_barrier, work); in pr_cont_work()
4648 pr_cont("%s %ps", comma ? "," : "", work->func); in pr_cont_work()
4655 struct work_struct *work; in show_pwq() local
4685 list_for_each_entry(work, &worker->scheduled, entry) in show_pwq()
4686 pr_cont_work(false, work); in show_pwq()
4692 list_for_each_entry(work, &pool->worklist, entry) { in show_pwq()
4693 if (get_work_pwq(work) == pwq) { in show_pwq()
4702 list_for_each_entry(work, &pool->worklist, entry) { in show_pwq()
4703 if (get_work_pwq(work) != pwq) in show_pwq()
4706 pr_cont_work(comma, work); in show_pwq()
4707 comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED); in show_pwq()
4716 list_for_each_entry(work, &pwq->delayed_works, entry) { in show_pwq()
4717 pr_cont_work(comma, work); in show_pwq()
4718 comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED); in show_pwq()
5067 struct work_struct work; member
5073 static void work_for_cpu_fn(struct work_struct *work) in work_for_cpu_fn() argument
5075 struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work); in work_for_cpu_fn()
5095 INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn); in work_on_cpu()
5096 schedule_work_on(cpu, &wfc.work); in work_on_cpu()
5097 flush_work(&wfc.work); in work_on_cpu()
5098 destroy_work_on_stack(&wfc.work); in work_on_cpu()