• Home
  • Raw
  • Download

Lines Matching refs:work

110 static inline void set_wq_data(struct work_struct *work,  in set_wq_data()  argument
115 BUG_ON(!work_pending(work)); in set_wq_data()
118 new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work); in set_wq_data()
119 atomic_long_set(&work->data, new); in set_wq_data()
123 struct cpu_workqueue_struct *get_wq_data(struct work_struct *work) in get_wq_data() argument
125 return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK); in get_wq_data()
129 struct work_struct *work, struct list_head *head) in insert_work() argument
131 set_wq_data(work, cwq); in insert_work()
137 list_add_tail(&work->entry, head); in insert_work()
142 struct work_struct *work) in __queue_work() argument
147 insert_work(cwq, work, &cwq->worklist); in __queue_work()
161 int queue_work(struct workqueue_struct *wq, struct work_struct *work) in queue_work() argument
165 ret = queue_work_on(get_cpu(), wq, work); in queue_work()
184 queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work) in queue_work_on() argument
188 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { in queue_work_on()
189 BUG_ON(!list_empty(&work->entry)); in queue_work_on()
190 __queue_work(wq_per_cpu(wq, cpu), work); in queue_work_on()
200 struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work); in delayed_work_timer_fn()
203 __queue_work(wq_per_cpu(wq, smp_processor_id()), &dwork->work); in delayed_work_timer_fn()
218 return queue_work(wq, &dwork->work); in queue_delayed_work()
238 struct work_struct *work = &dwork->work; in queue_delayed_work_on() local
240 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { in queue_delayed_work_on()
242 BUG_ON(!list_empty(&work->entry)); in queue_delayed_work_on()
247 set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id())); in queue_delayed_work_on()
273 struct work_struct *work = list_entry(cwq->worklist.next, in run_workqueue() local
275 work_func_t f = work->func; in run_workqueue()
285 struct lockdep_map lockdep_map = work->lockdep_map; in run_workqueue()
288 cwq->current_work = work; in run_workqueue()
292 BUG_ON(get_wq_data(work) != cwq); in run_workqueue()
293 work_clear_pending(work); in run_workqueue()
296 f(work); in run_workqueue()
348 struct work_struct work; member
352 static void wq_barrier_func(struct work_struct *work) in wq_barrier_func() argument
354 struct wq_barrier *barr = container_of(work, struct wq_barrier, work); in wq_barrier_func()
361 INIT_WORK(&barr->work, wq_barrier_func); in insert_wq_barrier()
362 __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work)); in insert_wq_barrier()
366 insert_work(cwq, &barr->work, head); in insert_wq_barrier()
434 int flush_work(struct work_struct *work) in flush_work() argument
441 cwq = get_wq_data(work); in flush_work()
450 if (!list_empty(&work->entry)) { in flush_work()
456 if (unlikely(cwq != get_wq_data(work))) in flush_work()
458 prev = &work->entry; in flush_work()
460 if (cwq->current_work != work) in flush_work()
479 static int try_to_grab_pending(struct work_struct *work) in try_to_grab_pending() argument
484 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) in try_to_grab_pending()
492 cwq = get_wq_data(work); in try_to_grab_pending()
497 if (!list_empty(&work->entry)) { in try_to_grab_pending()
504 if (cwq == get_wq_data(work)) { in try_to_grab_pending()
505 list_del_init(&work->entry); in try_to_grab_pending()
515 struct work_struct *work) in wait_on_cpu_work() argument
521 if (unlikely(cwq->current_work == work)) { in wait_on_cpu_work()
531 static void wait_on_work(struct work_struct *work) in wait_on_work() argument
540 lock_map_acquire(&work->lockdep_map); in wait_on_work()
541 lock_map_release(&work->lockdep_map); in wait_on_work()
543 cwq = get_wq_data(work); in wait_on_work()
551 wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work); in wait_on_work()
554 static int __cancel_work_timer(struct work_struct *work, in __cancel_work_timer() argument
562 ret = try_to_grab_pending(work); in __cancel_work_timer()
563 wait_on_work(work); in __cancel_work_timer()
566 work_clear_pending(work); in __cancel_work_timer()
591 int cancel_work_sync(struct work_struct *work) in cancel_work_sync() argument
593 return __cancel_work_timer(work, NULL); in cancel_work_sync()
608 return __cancel_work_timer(&dwork->work, &dwork->timer); in cancel_delayed_work_sync()
620 int schedule_work(struct work_struct *work) in schedule_work() argument
622 return queue_work(keventd_wq, work); in schedule_work()
633 int schedule_work_on(int cpu, struct work_struct *work) in schedule_work_on() argument
635 return queue_work_on(cpu, keventd_wq, work); in schedule_work_on()
690 struct work_struct *work = per_cpu_ptr(works, cpu); in schedule_on_each_cpu() local
692 INIT_WORK(work, func); in schedule_on_each_cpu()
693 schedule_work_on(cpu, work); in schedule_on_each_cpu()
723 fn(&ew->work); in execute_in_process_context()
727 INIT_WORK(&ew->work, fn); in execute_in_process_context()
728 schedule_work(&ew->work); in execute_in_process_context()
977 struct work_struct work; member
985 struct work_for_cpu *wfc = container_of(w, struct work_for_cpu, work); in do_work_for_cpu()
1003 INIT_WORK(&wfc.work, do_work_for_cpu); in work_on_cpu()
1006 queue_work_on(cpu, work_on_cpu_wq, &wfc.work); in work_on_cpu()
1007 flush_work(&wfc.work); in work_on_cpu()