Searched refs:wq (Results 1 – 6 of 6) sorted by relevance
/kernel/ |
D | workqueue.c | 206 struct workqueue_struct *wq; /* I: the owning workqueue */ member 380 static void workqueue_sysfs_unregister(struct workqueue_struct *wq); 395 #define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \ argument 397 !lockdep_is_held(&wq->mutex) && \ 450 #define for_each_pwq(pwq, wq) \ argument 451 list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node, \ 452 lockdep_is_held(&(wq->mutex))) 585 static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq, in unbound_pwq_by_node() argument 588 assert_rcu_or_wq_mutex_or_pool_mutex(wq); in unbound_pwq_by_node() 597 return wq->dfl_pwq; in unbound_pwq_by_node() [all …]
|
D | watch_queue.c | 467 struct watch_queue *wq = rcu_access_pointer(w->queue); in add_one_watch() local 468 if (wqueue == wq && watch->id == w->id) in add_one_watch() 530 int remove_watch_from_object(struct watch_list *wlist, struct watch_queue *wq, in remove_watch_from_object() argument 544 (watch->id == id && rcu_access_pointer(watch->queue) == wq)) in remove_watch_from_object()
|
/kernel/sched/ |
D | wait.c | 349 int do_wait_intr(wait_queue_head_t *wq, wait_queue_entry_t *wait) in do_wait_intr() argument 352 __add_wait_queue_entry_tail(wq, wait); in do_wait_intr() 358 spin_unlock(&wq->lock); in do_wait_intr() 360 spin_lock(&wq->lock); in do_wait_intr() 366 int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_entry_t *wait) in do_wait_intr_irq() argument 369 __add_wait_queue_entry_tail(wq, wait); in do_wait_intr_irq() 375 spin_unlock_irq(&wq->lock); in do_wait_intr_irq() 377 spin_lock_irq(&wq->lock); in do_wait_intr_irq()
|
/kernel/locking/ |
D | test-ww_mutex.c | 17 struct workqueue_struct *wq; variable 305 queue_work(wq, &cycles[n].work); in __test_cycle() 307 flush_workqueue(wq); in __test_cycle() 571 queue_work(wq, &stress->work); in stress() 575 flush_workqueue(wq); in stress() 590 wq = alloc_workqueue("test-ww_mutex", WQ_UNBOUND, 0); in test_ww_mutex_init() 591 if (!wq) in test_ww_mutex_init() 631 destroy_workqueue(wq); in test_ww_mutex_exit()
|
/kernel/rcu/ |
D | refscale.c | 98 wait_queue_head_t wq; member 533 wait_event(rt->wq, (atomic_read(&nreaders_exp) && smp_load_acquire(&rt->start_reader)) || in ref_scale_reader() 681 wake_up(&reader_tasks[r].wq); in main_func() 852 init_waitqueue_head(&reader_tasks[i].wq); in ref_scale_init()
|
/kernel/events/ |
D | uprobes.c | 99 wait_queue_head_t wq; /* if all slots are busy */ member 1505 init_waitqueue_head(&area->wq); in __create_xol_area() 1597 wait_event(area->wq, (atomic_read(&area->slot_count) < UINSNS_PER_PAGE)); in xol_take_insn_slot() 1661 if (waitqueue_active(&area->wq)) in xol_free_insn_slot() 1662 wake_up(&area->wq); in xol_free_insn_slot()
|