• Home
  • Raw
  • Download

Lines Matching refs:work

466 	struct work_struct *work = addr;  in work_is_static_object()  local
468 return test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work)); in work_is_static_object()
477 struct work_struct *work = addr; in work_fixup_init() local
481 cancel_work_sync(work); in work_fixup_init()
482 debug_object_init(work, &work_debug_descr); in work_fixup_init()
495 struct work_struct *work = addr; in work_fixup_free() local
499 cancel_work_sync(work); in work_fixup_free()
500 debug_object_free(work, &work_debug_descr); in work_fixup_free()
515 static inline void debug_work_activate(struct work_struct *work) in debug_work_activate() argument
517 debug_object_activate(work, &work_debug_descr); in debug_work_activate()
520 static inline void debug_work_deactivate(struct work_struct *work) in debug_work_deactivate() argument
522 debug_object_deactivate(work, &work_debug_descr); in debug_work_deactivate()
525 void __init_work(struct work_struct *work, int onstack) in __init_work() argument
528 debug_object_init_on_stack(work, &work_debug_descr); in __init_work()
530 debug_object_init(work, &work_debug_descr); in __init_work()
534 void destroy_work_on_stack(struct work_struct *work) in destroy_work_on_stack() argument
536 debug_object_free(work, &work_debug_descr); in destroy_work_on_stack()
540 void destroy_delayed_work_on_stack(struct delayed_work *work) in destroy_delayed_work_on_stack() argument
542 destroy_timer_on_stack(&work->timer); in destroy_delayed_work_on_stack()
543 debug_object_free(&work->work, &work_debug_descr); in destroy_delayed_work_on_stack()
548 static inline void debug_work_activate(struct work_struct *work) { } in debug_work_activate() argument
549 static inline void debug_work_deactivate(struct work_struct *work) { } in debug_work_deactivate() argument
639 static inline void set_work_data(struct work_struct *work, unsigned long data, in set_work_data() argument
642 WARN_ON_ONCE(!work_pending(work)); in set_work_data()
643 atomic_long_set(&work->data, data | flags | work_static(work)); in set_work_data()
646 static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq, in set_work_pwq() argument
649 set_work_data(work, (unsigned long)pwq, in set_work_pwq()
653 static void set_work_pool_and_keep_pending(struct work_struct *work, in set_work_pool_and_keep_pending() argument
656 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, in set_work_pool_and_keep_pending()
660 static void set_work_pool_and_clear_pending(struct work_struct *work, in set_work_pool_and_clear_pending() argument
670 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0); in set_work_pool_and_clear_pending()
702 static void clear_work_data(struct work_struct *work) in clear_work_data() argument
705 set_work_data(work, WORK_STRUCT_NO_POOL, 0); in clear_work_data()
713 static struct pool_workqueue *get_work_pwq(struct work_struct *work) in get_work_pwq() argument
715 unsigned long data = atomic_long_read(&work->data); in get_work_pwq()
738 static struct worker_pool *get_work_pool(struct work_struct *work) in get_work_pool() argument
740 unsigned long data = atomic_long_read(&work->data); in get_work_pool()
762 static int get_work_pool_id(struct work_struct *work) in get_work_pool_id() argument
764 unsigned long data = atomic_long_read(&work->data); in get_work_pool_id()
772 static void mark_work_canceling(struct work_struct *work) in mark_work_canceling() argument
774 unsigned long pool_id = get_work_pool_id(work); in mark_work_canceling()
777 set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING); in mark_work_canceling()
780 static bool work_is_canceling(struct work_struct *work) in work_is_canceling() argument
782 unsigned long data = atomic_long_read(&work->data); in work_is_canceling()
1059 struct work_struct *work) in find_worker_executing_work() argument
1064 (unsigned long)work) in find_worker_executing_work()
1065 if (worker->current_work == work && in find_worker_executing_work()
1066 worker->current_func == work->func) in find_worker_executing_work()
1089 static void move_linked_works(struct work_struct *work, struct list_head *head, in move_linked_works() argument
1098 list_for_each_entry_safe_from(work, n, NULL, entry) { in move_linked_works()
1099 list_move_tail(&work->entry, head); in move_linked_works()
1100 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED)) in move_linked_works()
1171 static void pwq_activate_inactive_work(struct work_struct *work) in pwq_activate_inactive_work() argument
1173 struct pool_workqueue *pwq = get_work_pwq(work); in pwq_activate_inactive_work()
1175 trace_workqueue_activate_work(work); in pwq_activate_inactive_work()
1178 move_linked_works(work, &pwq->pool->worklist, NULL); in pwq_activate_inactive_work()
1179 __clear_bit(WORK_STRUCT_INACTIVE_BIT, work_data_bits(work)); in pwq_activate_inactive_work()
1185 struct work_struct *work = list_first_entry(&pwq->inactive_works, in pwq_activate_first_inactive() local
1188 pwq_activate_inactive_work(work); in pwq_activate_first_inactive()
1268 static int try_to_grab_pending(struct work_struct *work, bool is_dwork, in try_to_grab_pending() argument
1278 struct delayed_work *dwork = to_delayed_work(work); in try_to_grab_pending()
1290 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) in try_to_grab_pending()
1298 pool = get_work_pool(work); in try_to_grab_pending()
1311 pwq = get_work_pwq(work); in try_to_grab_pending()
1313 debug_work_deactivate(work); in try_to_grab_pending()
1326 if (*work_data_bits(work) & WORK_STRUCT_INACTIVE) in try_to_grab_pending()
1327 pwq_activate_inactive_work(work); in try_to_grab_pending()
1329 list_del_init(&work->entry); in try_to_grab_pending()
1330 pwq_dec_nr_in_flight(pwq, *work_data_bits(work)); in try_to_grab_pending()
1333 set_work_pool_and_keep_pending(work, pool->id); in try_to_grab_pending()
1343 if (work_is_canceling(work)) in try_to_grab_pending()
1362 static void insert_work(struct pool_workqueue *pwq, struct work_struct *work, in insert_work() argument
1368 kasan_record_aux_stack_noalloc(work); in insert_work()
1371 set_work_pwq(work, pwq, extra_flags); in insert_work()
1372 list_add_tail(&work->entry, head); in insert_work()
1429 struct work_struct *work) in __queue_work() argument
1468 last_pool = get_work_pool(work); in __queue_work()
1474 worker = find_worker_executing_work(last_pool, work); in __queue_work()
1507 trace_workqueue_queue_work(req_cpu, pwq, work); in __queue_work()
1509 if (WARN_ON(!list_empty(&work->entry))) in __queue_work()
1516 trace_workqueue_activate_work(work); in __queue_work()
1526 debug_work_activate(work); in __queue_work()
1527 insert_work(pwq, work, worklist, work_flags); in __queue_work()
1547 struct work_struct *work) in queue_work_on() argument
1554 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { in queue_work_on()
1555 __queue_work(cpu, wq, work); in queue_work_on()
1618 struct work_struct *work) in queue_work_node() argument
1636 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { in queue_work_node()
1639 __queue_work(cpu, wq, work); in queue_work_node()
1653 __queue_work(dwork->cpu, dwork->wq, &dwork->work); in delayed_work_timer_fn()
1661 struct work_struct *work = &dwork->work; in __queue_delayed_work() local
1666 WARN_ON_ONCE(!list_empty(&work->entry)); in __queue_delayed_work()
1675 __queue_work(cpu, wq, &dwork->work); in __queue_delayed_work()
1703 struct work_struct *work = &dwork->work; in queue_delayed_work_on() local
1710 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { in queue_delayed_work_on()
1745 ret = try_to_grab_pending(&dwork->work, true, &flags); in mod_delayed_work_on()
1764 __queue_work(WORK_CPU_UNBOUND, rwork->wq, &rwork->work); in rcu_work_rcufn()
1780 struct work_struct *work = &rwork->work; in queue_rcu_work() local
1782 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { in queue_rcu_work()
2040 static void send_mayday(struct work_struct *work) in send_mayday() argument
2042 struct pool_workqueue *pwq = get_work_pwq(work); in send_mayday()
2066 struct work_struct *work; in pool_mayday_timeout() local
2078 list_for_each_entry(work, &pool->worklist, entry) in pool_mayday_timeout()
2079 send_mayday(work); in pool_mayday_timeout()
2191 static void process_one_work(struct worker *worker, struct work_struct *work) in process_one_work() argument
2195 struct pool_workqueue *pwq = get_work_pwq(work); in process_one_work()
2210 lockdep_copy_map(&lockdep_map, &work->lockdep_map); in process_one_work()
2222 collision = find_worker_executing_work(pool, work); in process_one_work()
2224 move_linked_works(work, &collision->scheduled, NULL); in process_one_work()
2229 debug_work_deactivate(work); in process_one_work()
2230 hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work); in process_one_work()
2231 worker->current_work = work; in process_one_work()
2232 worker->current_func = work->func; in process_one_work()
2234 work_data = *work_data_bits(work); in process_one_work()
2243 list_del_init(&work->entry); in process_one_work()
2270 set_work_pool_and_clear_pending(work, pool->id); in process_one_work()
2298 trace_workqueue_execute_start(work); in process_one_work()
2299 worker->current_func(work); in process_one_work()
2304 trace_workqueue_execute_end(work, worker->current_func); in process_one_work()
2360 struct work_struct *work = list_first_entry(&worker->scheduled, in process_scheduled_works() local
2362 process_one_work(worker, work); in process_scheduled_works()
2438 struct work_struct *work = in worker_thread() local
2444 if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) { in worker_thread()
2446 process_one_work(worker, work); in worker_thread()
2450 move_linked_works(work, &worker->scheduled, NULL); in worker_thread()
2526 struct work_struct *work, *n; in rescuer_thread() local
2543 list_for_each_entry_safe(work, n, &pool->worklist, entry) { in rescuer_thread()
2544 if (get_work_pwq(work) == pwq) { in rescuer_thread()
2547 move_linked_works(work, scheduled, &n); in rescuer_thread()
2646 struct work_struct work; member
2651 static void wq_barrier_func(struct work_struct *work) in wq_barrier_func() argument
2653 struct wq_barrier *barr = container_of(work, struct wq_barrier, work); in wq_barrier_func()
2695 INIT_WORK_ONSTACK(&barr->work, wq_barrier_func); in insert_wq_barrier()
2696 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work)); in insert_wq_barrier()
2725 debug_work_activate(&barr->work); in insert_wq_barrier()
2726 insert_work(pwq, &barr->work, head, work_flags); in insert_wq_barrier()
3013 static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, in start_flush_work() argument
3023 pool = get_work_pool(work); in start_flush_work()
3031 pwq = get_work_pwq(work); in start_flush_work()
3036 worker = find_worker_executing_work(pool, work); in start_flush_work()
3042 check_flush_dependency(pwq->wq, work); in start_flush_work()
3044 insert_wq_barrier(pwq, barr, work, worker); in start_flush_work()
3069 static bool __flush_work(struct work_struct *work, bool from_cancel) in __flush_work() argument
3076 if (WARN_ON(!work->func)) in __flush_work()
3079 lock_map_acquire(&work->lockdep_map); in __flush_work()
3080 lock_map_release(&work->lockdep_map); in __flush_work()
3082 if (start_flush_work(work, &barr, from_cancel)) { in __flush_work()
3084 destroy_work_on_stack(&barr.work); in __flush_work()
3102 bool flush_work(struct work_struct *work) in flush_work() argument
3104 return __flush_work(work, false); in flush_work()
3110 struct work_struct *work; member
3117 if (cwait->work != key) in cwt_wakefn()
3122 static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) in __cancel_work_timer() argument
3129 ret = try_to_grab_pending(work, is_dwork, &flags); in __cancel_work_timer()
3151 cwait.work = work; in __cancel_work_timer()
3155 if (work_is_canceling(work)) in __cancel_work_timer()
3162 mark_work_canceling(work); in __cancel_work_timer()
3170 __flush_work(work, true); in __cancel_work_timer()
3172 clear_work_data(work); in __cancel_work_timer()
3181 __wake_up(&cancel_waitq, TASK_NORMAL, 1, work); in __cancel_work_timer()
3204 bool cancel_work_sync(struct work_struct *work) in cancel_work_sync() argument
3206 return __cancel_work_timer(work, false); in cancel_work_sync()
3226 __queue_work(dwork->cpu, dwork->wq, &dwork->work); in flush_delayed_work()
3228 return flush_work(&dwork->work); in flush_delayed_work()
3242 if (test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&rwork->work))) { in flush_rcu_work()
3244 flush_work(&rwork->work); in flush_rcu_work()
3247 return flush_work(&rwork->work); in flush_rcu_work()
3252 static bool __cancel_work(struct work_struct *work, bool is_dwork) in __cancel_work() argument
3258 ret = try_to_grab_pending(work, is_dwork, &flags); in __cancel_work()
3264 set_work_pool_and_clear_pending(work, get_work_pool_id(work)); in __cancel_work()
3272 bool cancel_work(struct work_struct *work) in cancel_work() argument
3274 return __cancel_work(work, false); in cancel_work()
3296 return __cancel_work(&dwork->work, true); in cancel_delayed_work()
3311 return __cancel_work_timer(&dwork->work, true); in cancel_delayed_work_sync()
3338 struct work_struct *work = per_cpu_ptr(works, cpu); in schedule_on_each_cpu() local
3340 INIT_WORK(work, func); in schedule_on_each_cpu()
3341 schedule_work_on(cpu, work); in schedule_on_each_cpu()
3367 fn(&ew->work); in execute_in_process_context()
3371 INIT_WORK(&ew->work, fn); in execute_in_process_context()
3372 schedule_work(&ew->work); in execute_in_process_context()
3712 static void pwq_unbound_release_workfn(struct work_struct *work) in pwq_unbound_release_workfn() argument
3714 struct pool_workqueue *pwq = container_of(work, struct pool_workqueue, in pwq_unbound_release_workfn()
4621 unsigned int work_busy(struct work_struct *work) in work_busy() argument
4627 if (work_pending(work)) in work_busy()
4631 pool = get_work_pool(work); in work_busy()
4634 if (find_worker_executing_work(pool, work)) in work_busy()
4724 static void pr_cont_work(bool comma, struct work_struct *work) in pr_cont_work() argument
4726 if (work->func == wq_barrier_func) { in pr_cont_work()
4729 barr = container_of(work, struct wq_barrier, work); in pr_cont_work()
4734 pr_cont("%s %ps", comma ? "," : "", work->func); in pr_cont_work()
4741 struct work_struct *work; in show_pwq() local
4771 list_for_each_entry(work, &worker->scheduled, entry) in show_pwq()
4772 pr_cont_work(false, work); in show_pwq()
4778 list_for_each_entry(work, &pool->worklist, entry) { in show_pwq()
4779 if (get_work_pwq(work) == pwq) { in show_pwq()
4788 list_for_each_entry(work, &pool->worklist, entry) { in show_pwq()
4789 if (get_work_pwq(work) != pwq) in show_pwq()
4792 pr_cont_work(comma, work); in show_pwq()
4793 comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED); in show_pwq()
4802 list_for_each_entry(work, &pwq->inactive_works, entry) { in show_pwq()
4803 pr_cont_work(comma, work); in show_pwq()
4804 comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED); in show_pwq()
5182 struct work_struct work; member
5188 static void work_for_cpu_fn(struct work_struct *work) in work_for_cpu_fn() argument
5190 struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work); in work_for_cpu_fn()
5210 INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn); in work_on_cpu()
5211 schedule_work_on(cpu, &wfc.work); in work_on_cpu()
5212 flush_work(&wfc.work); in work_on_cpu()
5213 destroy_work_on_stack(&wfc.work); in work_on_cpu()