/fs/autofs/ |
D | waitq.c | 17 struct autofs_wait_queue *wq, *nwq; in autofs_catatonic_mode() local 28 wq = sbi->queues; in autofs_catatonic_mode() 30 while (wq) { in autofs_catatonic_mode() 31 nwq = wq->next; in autofs_catatonic_mode() 32 wq->status = -ENOENT; /* Magic is gone - report failure */ in autofs_catatonic_mode() 33 kfree(wq->name.name - wq->offset); in autofs_catatonic_mode() 34 wq->name.name = NULL; in autofs_catatonic_mode() 35 wake_up_interruptible(&wq->queue); in autofs_catatonic_mode() 36 if (!--wq->wait_ctr) in autofs_catatonic_mode() 37 kfree(wq); in autofs_catatonic_mode() [all …]
|
/fs/btrfs/ |
D | async-thread.c | 50 struct btrfs_fs_info * __pure btrfs_workqueue_owner(const struct btrfs_workqueue *wq) in btrfs_workqueue_owner() argument 52 return wq->fs_info; in btrfs_workqueue_owner() 57 return work->wq->fs_info; in btrfs_work_owner() 60 bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq) in btrfs_workqueue_normal_congested() argument 68 if (wq->thresh == NO_THRESHOLD) in btrfs_workqueue_normal_congested() 71 return atomic_read(&wq->pending) > wq->thresh * 2; in btrfs_workqueue_normal_congested() 121 static inline void thresh_queue_hook(struct btrfs_workqueue *wq) in thresh_queue_hook() argument 123 if (wq->thresh == NO_THRESHOLD) in thresh_queue_hook() 125 atomic_inc(&wq->pending); in thresh_queue_hook() 133 static inline void thresh_exec_hook(struct btrfs_workqueue *wq) in thresh_exec_hook() argument [all …]
|
D | async-thread.h | 25 struct btrfs_workqueue *wq; member 36 void btrfs_queue_work(struct btrfs_workqueue *wq, 38 void btrfs_destroy_workqueue(struct btrfs_workqueue *wq); 39 void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int max); 41 struct btrfs_fs_info * __pure btrfs_workqueue_owner(const struct btrfs_workqueue *wq); 42 bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq); 43 void btrfs_flush_workqueue(struct btrfs_workqueue *wq);
|
D | misc.h | 13 static inline void cond_wake_up(struct wait_queue_head *wq) in cond_wake_up() argument 19 if (wq_has_sleeper(wq)) in cond_wake_up() 20 wake_up(wq); in cond_wake_up() 23 static inline void cond_wake_up_nomb(struct wait_queue_head *wq) in cond_wake_up_nomb() argument 31 if (waitqueue_active(wq)) in cond_wake_up_nomb() 32 wake_up(wq); in cond_wake_up_nomb()
|
D | extent-io-tree.c | 129 ASSERT(!waitqueue_active(&state->wq)); in extent_io_tree_release() 153 init_waitqueue_head(&state->wq); in alloc_extent_state() 520 wake_up(&state->wq); in clear_state_bit() 668 wake_up(&state->wq); in __clear_extent_bit() 707 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE); in wait_on_state() 711 finish_wait(&state->wq, &wait); in wait_on_state()
|
D | ordered-data.c | 303 struct btrfs_workqueue *wq; in btrfs_mark_ordered_io_finished() local 310 wq = fs_info->endio_freespace_worker; in btrfs_mark_ordered_io_finished() 312 wq = fs_info->endio_write_workers; in btrfs_mark_ordered_io_finished() 413 btrfs_queue_work(wq, &entry->work); in btrfs_mark_ordered_io_finished()
|
/fs/xfs/ |
D | xfs_pwork.c | 74 pctl->wq = alloc_workqueue("%s-%d", in xfs_pwork_init() 77 if (!pctl->wq) in xfs_pwork_init() 97 queue_work(pctl->wq, &pwork->work); in xfs_pwork_queue() 105 destroy_workqueue(pctl->wq); in xfs_pwork_destroy() 106 pctl->wq = NULL; in xfs_pwork_destroy()
|
D | xfs_log_priv.h | 608 struct wait_queue_head *wq, in xlog_wait() argument 614 add_wait_queue_exclusive(wq, &wait); in xlog_wait() 618 remove_wait_queue(wq, &wait); in xlog_wait()
|
D | xfs_pwork.h | 18 struct workqueue_struct *wq; member
|
/fs/ |
D | userfaultfd.c | 93 wait_queue_entry_t wq; member 126 static int userfaultfd_wake_function(wait_queue_entry_t *wq, unsigned mode, in userfaultfd_wake_function() argument 134 uwq = container_of(wq, struct userfaultfd_wait_queue, wq); in userfaultfd_wake_function() 147 ret = wake_up_state(wq->private, mode); in userfaultfd_wake_function() 160 list_del_init(&wq->entry); in userfaultfd_wake_function() 499 init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function); in handle_userfault() 500 uwq.wq.private = current; in handle_userfault() 513 __add_wait_queue(&ctx->fault_pending_wqh, &uwq.wq); in handle_userfault() 548 if (!list_empty_careful(&uwq.wq.entry)) { in handle_userfault() 554 list_del(&uwq.wq.entry); in handle_userfault() [all …]
|
D | eventpoll.c | 190 wait_queue_head_t wq; member 677 if (waitqueue_active(&ep->wq)) in ep_done_scan() 678 wake_up(&ep->wq); in ep_done_scan() 992 init_waitqueue_head(&ep->wq); in ep_alloc() 1227 if (waitqueue_active(&ep->wq)) { in ep_poll_callback() 1244 wake_up(&ep->wq); in ep_poll_callback() 1600 if (waitqueue_active(&ep->wq)) in ep_insert() 1601 wake_up(&ep->wq); in ep_insert() 1676 if (waitqueue_active(&ep->wq)) in ep_modify() 1677 wake_up(&ep->wq); in ep_modify() [all …]
|
D | dax.c | 199 wait_queue_head_t *wq; in dax_wake_entry() local 201 wq = dax_entry_waitqueue(xas, entry, &key); in dax_wake_entry() 209 if (waitqueue_active(wq)) in dax_wake_entry() 210 __wake_up(wq, TASK_NORMAL, mode == WAKE_ALL ? 0 : 1, &key); in dax_wake_entry() 227 wait_queue_head_t *wq; in get_unlocked_entry() local 241 wq = dax_entry_waitqueue(xas, entry, &ewait.key); in get_unlocked_entry() 242 prepare_to_wait_exclusive(wq, &ewait.wait, in get_unlocked_entry() 247 finish_wait(wq, &ewait.wait); in get_unlocked_entry() 260 wait_queue_head_t *wq; in wait_entry_unlocked() local 265 wq = dax_entry_waitqueue(xas, entry, &ewait.key); in wait_entry_unlocked() [all …]
|
D | inode.c | 2237 wait_queue_head_t *wq; in __wait_on_freeing_inode() local 2239 wq = bit_waitqueue(&inode->i_state, __I_NEW); in __wait_on_freeing_inode() 2240 prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE); in __wait_on_freeing_inode() 2244 finish_wait(wq, &wait.wq_entry); in __wait_on_freeing_inode() 2392 wait_queue_head_t *wq = bit_waitqueue(&inode->i_state, __I_DIO_WAKEUP); in __inode_dio_wait() local 2396 prepare_to_wait(wq, &q.wq_entry, TASK_UNINTERRUPTIBLE); in __inode_dio_wait() 2400 finish_wait(wq, &q.wq_entry); in __inode_dio_wait()
|
D | direct-io.c | 570 struct workqueue_struct *wq = alloc_workqueue("dio/%s", in sb_init_dio_done_wq() local 573 if (!wq) in sb_init_dio_done_wq() 578 old = cmpxchg(&sb->s_dio_done_wq, NULL, wq); in sb_init_dio_done_wq() 581 destroy_workqueue(wq); in sb_init_dio_done_wq()
|
/fs/jfs/ |
D | jfs_lock.h | 22 #define __SLEEP_COND(wq, cond, lock_cmd, unlock_cmd) \ argument 26 add_wait_queue(&wq, &__wait); \ 36 remove_wait_queue(&wq, &__wait); \
|
/fs/nfs/blocklayout/ |
D | rpc_pipefs.c | 61 DECLARE_WAITQUEUE(wq, current); in bl_resolve_deviceid() 86 add_wait_queue(&nn->bl_wq, &wq); in bl_resolve_deviceid() 89 remove_wait_queue(&nn->bl_wq, &wq); in bl_resolve_deviceid() 95 remove_wait_queue(&nn->bl_wq, &wq); in bl_resolve_deviceid()
|
/fs/nfs/ |
D | callback.c | 110 DEFINE_WAIT(wq); in nfs41_callback_svc() 119 prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_INTERRUPTIBLE); in nfs41_callback_svc() 126 finish_wait(&serv->sv_cb_waitq, &wq); in nfs41_callback_svc() 135 finish_wait(&serv->sv_cb_waitq, &wq); in nfs41_callback_svc()
|
D | inode.c | 2308 struct workqueue_struct *wq; in nfsiod_start() local 2310 wq = alloc_workqueue("nfsiod", WQ_MEM_RECLAIM | WQ_UNBOUND, 0); in nfsiod_start() 2311 if (wq == NULL) in nfsiod_start() 2313 nfsiod_workqueue = wq; in nfsiod_start() 2322 struct workqueue_struct *wq; in nfsiod_stop() local 2324 wq = nfsiod_workqueue; in nfsiod_stop() 2325 if (wq == NULL) in nfsiod_stop() 2328 destroy_workqueue(wq); in nfsiod_stop()
|
D | unlink.c | 127 alias = d_alloc_parallel(dentry->d_parent, &data->args.name, &data->wq); in nfs_call_unlink() 187 init_waitqueue_head(&data->wq); in nfs_async_unlink()
|
/fs/jffs2/ |
D | os-linux.h | 41 #define sleep_on_spinunlock(wq, s) \ argument 44 add_wait_queue((wq), &__wait); \ 48 remove_wait_queue((wq), &__wait); \
|
/fs/ocfs2/dlm/ |
D | dlmmaster.c | 263 init_waitqueue_head(&mle->wq); in dlm_init_mle() 539 init_waitqueue_head(&res->wq); in dlm_init_lockres() 653 wake_up(&res->wq); in dlm_lockres_drop_inflight_ref() 988 wake_up(&res->wq); in dlm_get_lock_resource() 1104 (void)wait_event_timeout(mle->wq, in dlm_wait_for_lock_mastery() 1745 wake_up(&res->wq); in dlm_do_assert_master() 1924 wake_up(&mle->wq); in dlm_assert_master_handler() 1945 wake_up(&res->wq); in dlm_assert_master_handler() 2043 wake_up(&res->wq); in dlm_assert_master_post_handler() 2396 wake_up(&res->wq); in dlm_deref_lockres_done_handler() [all …]
|
D | dlmconvert.c | 77 wake_up(&res->wq); in dlmconvert_master() 341 wake_up(&res->wq); in dlmconvert_remote() 528 wake_up(&res->wq); in dlm_convert_lock_handler()
|
D | dlmthread.c | 48 add_wait_queue(&res->wq, &wait); in __dlm_wait_on_lockres_flags() 57 remove_wait_queue(&res->wq, &wait); in __dlm_wait_on_lockres_flags() 271 wake_up(&res->wq); in dlm_purge_lockres()
|
/fs/ext4/ |
D | page-io.c | 226 struct workqueue_struct *wq; in ext4_add_complete_io() local 233 wq = sbi->rsv_conversion_wq; in ext4_add_complete_io() 235 queue_work(wq, &ei->i_rsv_conversion_work); in ext4_add_complete_io()
|
/fs/afs/ |
D | dir_silly.c | 242 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); in afs_silly_iput() 248 alias = d_alloc_parallel(dentry->d_parent, &dentry->d_name, &wq); in afs_silly_iput()
|