Home
last modified time | relevance | path

Searched refs:work (Results 1 – 25 of 117) sorted by relevance

12345

/fs/btrfs/
Dasync-thread.c67 static void normal_work_helper(struct btrfs_work *work);
72 struct btrfs_work *work = container_of(arg, struct btrfs_work, \
74 normal_work_helper(work); \
84 btrfs_work_owner(struct btrfs_work *work) in btrfs_work_owner() argument
86 return work->wq->fs_info; in btrfs_work_owner()
271 struct btrfs_work *work; in run_ordered_work() local
281 work = list_entry(list->next, struct btrfs_work, in run_ordered_work()
283 if (!test_bit(WORK_DONE_BIT, &work->flags)) in run_ordered_work()
292 if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags)) in run_ordered_work()
294 trace_btrfs_ordered_sched(work); in run_ordered_work()
[all …]
Dasync-thread.h76 void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t helper,
81 struct btrfs_work *work);
84 void btrfs_set_work_high_priority(struct btrfs_work *work);
85 struct btrfs_fs_info *btrfs_work_owner(struct btrfs_work *work);
Draid56.c89 struct btrfs_work work; member
182 static void rmw_work(struct btrfs_work *work);
183 static void read_rebuild_work(struct btrfs_work *work);
1485 btrfs_init_work(&rbio->work, btrfs_rmw_helper, in async_rmw_stripe()
1489 &rbio->work); in async_rmw_stripe()
1494 btrfs_init_work(&rbio->work, btrfs_rmw_helper, in async_read_rebuild()
1498 &rbio->work); in async_read_rebuild()
1657 struct btrfs_work work; member
1721 static void unplug_work(struct btrfs_work *work) in unplug_work() argument
1724 plug = container_of(work, struct btrfs_plug_cb, work); in unplug_work()
[all …]
Dscrub.c108 struct btrfs_work work; member
128 struct btrfs_work work; member
150 struct btrfs_work work; member
213 struct btrfs_work work; member
231 struct btrfs_work work; member
278 static void scrub_bio_end_io_worker(struct btrfs_work *work);
295 static void scrub_wr_bio_end_io_worker(struct btrfs_work *work);
302 static void copy_nocow_pages_worker(struct btrfs_work *work);
483 btrfs_init_work(&sbio->work, btrfs_scrub_helper, in scrub_setup_ctx()
790 static void scrub_fixup_nodatasum(struct btrfs_work *work) in scrub_fixup_nodatasum() argument
[all …]
Dreada.c93 struct btrfs_work work; member
734 static void reada_start_machine_worker(struct btrfs_work *work) in reada_start_machine_worker() argument
740 rmw = container_of(work, struct reada_machine_work, work); in reada_start_machine_worker()
802 btrfs_init_work(&rmw->work, btrfs_readahead_helper, in reada_start_machine()
806 btrfs_queue_work(fs_info->readahead_workers, &rmw->work); in reada_start_machine()
/fs/
Dfs-writeback.c177 struct wb_writeback_work *work) in finish_writeback_work() argument
179 struct wb_completion *done = work->done; in finish_writeback_work()
181 if (work->auto_free) in finish_writeback_work()
182 kfree(work); in finish_writeback_work()
188 struct wb_writeback_work *work) in wb_queue_work() argument
190 trace_writeback_queue(wb, work); in wb_queue_work()
192 if (work->done) in wb_queue_work()
193 atomic_inc(&work->done->cnt); in wb_queue_work()
198 list_add_tail(&work->list, &wb->work_list); in wb_queue_work()
201 finish_writeback_work(wb, work); in wb_queue_work()
[all …]
Dsync.c122 static void do_sync_work(struct work_struct *work) in do_sync_work() argument
137 kfree(work); in do_sync_work()
142 struct work_struct *work; in emergency_sync() local
144 work = kmalloc(sizeof(*work), GFP_ATOMIC); in emergency_sync()
145 if (work) { in emergency_sync()
146 INIT_WORK(work, do_sync_work); in emergency_sync()
147 schedule_work(work); in emergency_sync()
Dsuper.c139 static void destroy_super_work(struct work_struct *work) in destroy_super_work() argument
141 struct super_block *s = container_of(work, struct super_block, in destroy_super_work()
861 static void do_emergency_remount(struct work_struct *work) in do_emergency_remount() argument
888 kfree(work); in do_emergency_remount()
894 struct work_struct *work; in emergency_remount() local
896 work = kmalloc(sizeof(*work), GFP_ATOMIC); in emergency_remount()
897 if (work) { in emergency_remount()
898 INIT_WORK(work, do_emergency_remount); in emergency_remount()
899 schedule_work(work); in emergency_remount()
/fs/afs/
Dcmservice.c145 static void SRXAFSCB_CallBack(struct work_struct *work) in SRXAFSCB_CallBack() argument
147 struct afs_call *call = container_of(work, struct afs_call, work); in SRXAFSCB_CallBack()
279 INIT_WORK(&call->work, SRXAFSCB_CallBack); in afs_deliver_cb_callback()
280 queue_work(afs_wq, &call->work); in afs_deliver_cb_callback()
287 static void SRXAFSCB_InitCallBackState(struct work_struct *work) in SRXAFSCB_InitCallBackState() argument
289 struct afs_call *call = container_of(work, struct afs_call, work); in SRXAFSCB_InitCallBackState()
325 INIT_WORK(&call->work, SRXAFSCB_InitCallBackState); in afs_deliver_cb_init_call_back_state()
326 queue_work(afs_wq, &call->work); in afs_deliver_cb_init_call_back_state()
399 INIT_WORK(&call->work, SRXAFSCB_InitCallBackState); in afs_deliver_cb_init_call_back_state3()
400 queue_work(afs_wq, &call->work); in afs_deliver_cb_init_call_back_state3()
[all …]
Dcallback.c63 void afs_broken_callback_work(struct work_struct *work) in afs_broken_callback_work() argument
66 container_of(work, struct afs_vnode, cb_broken_work); in afs_broken_callback_work()
334 void afs_dispatch_give_up_callbacks(struct work_struct *work) in afs_dispatch_give_up_callbacks() argument
337 container_of(work, struct afs_server, cb_break_work.work); in afs_dispatch_give_up_callbacks()
361 static void afs_callback_updater(struct work_struct *work)
369 server = container_of(work, struct afs_server, updater);
Drxrpc.c574 static void afs_delete_async_call(struct work_struct *work) in afs_delete_async_call() argument
576 struct afs_call *call = container_of(work, struct afs_call, async_work); in afs_delete_async_call()
588 static void afs_process_async_call(struct work_struct *work) in afs_process_async_call() argument
590 struct afs_call *call = container_of(work, struct afs_call, async_work); in afs_process_async_call()
627 static void afs_charge_preallocation(struct work_struct *work) in afs_charge_preallocation() argument
/fs/xfs/
Dxfs_mru_cache.c113 struct delayed_work work; /* Workqueue data for reaping. */ member
217 queue_delayed_work(xfs_mru_reap_wq, &mru->work, in _xfs_mru_cache_list_insert()
277 struct work_struct *work) in _xfs_mru_cache_reap() argument
280 container_of(work, struct xfs_mru_cache, work.work); in _xfs_mru_cache_reap()
298 queue_delayed_work(xfs_mru_reap_wq, &mru->work, next); in _xfs_mru_cache_reap()
368 INIT_DELAYED_WORK(&mru->work, _xfs_mru_cache_reap); in xfs_mru_cache_create()
400 cancel_delayed_work_sync(&mru->work); in xfs_mru_cache_flush()
/fs/crypto/
Dbio.c55 static void completion_pages(struct work_struct *work) in completion_pages() argument
58 container_of(work, struct fscrypt_ctx, r.work); in completion_pages()
68 INIT_WORK(&ctx->r.work, completion_pages); in fscrypt_enqueue_decrypt_bio()
70 fscrypt_enqueue_decrypt_work(&ctx->r.work); in fscrypt_enqueue_decrypt_bio()
/fs/ocfs2/cluster/
Dtcp.c137 static void o2net_sc_connect_completed(struct work_struct *work);
138 static void o2net_rx_until_empty(struct work_struct *work);
139 static void o2net_shutdown_sc(struct work_struct *work);
141 static void o2net_sc_send_keep_req(struct work_struct *work);
475 struct work_struct *work) in o2net_sc_queue_work() argument
478 if (!queue_work(o2net_wq, work)) in o2net_sc_queue_work()
482 struct delayed_work *work, in o2net_sc_queue_delayed_work() argument
486 if (!queue_delayed_work(o2net_wq, work, delay)) in o2net_sc_queue_delayed_work()
490 struct delayed_work *work) in o2net_sc_cancel_delayed_work() argument
492 if (cancel_delayed_work(work)) in o2net_sc_cancel_delayed_work()
[all …]
/fs/ncpfs/
Dncp_fs_sb.h149 extern void ncp_tcp_rcv_proc(struct work_struct *work);
150 extern void ncp_tcp_tx_proc(struct work_struct *work);
151 extern void ncpdgram_rcv_proc(struct work_struct *work);
152 extern void ncpdgram_timeout_proc(struct work_struct *work);
Dsock.c382 void ncpdgram_rcv_proc(struct work_struct *work) in ncpdgram_rcv_proc() argument
385 container_of(work, struct ncp_server, rcv.tq); in ncpdgram_rcv_proc()
501 void ncpdgram_timeout_proc(struct work_struct *work) in ncpdgram_timeout_proc() argument
504 container_of(work, struct ncp_server, timeout_tq); in ncpdgram_timeout_proc()
680 void ncp_tcp_rcv_proc(struct work_struct *work) in ncp_tcp_rcv_proc() argument
683 container_of(work, struct ncp_server, rcv.tq); in ncp_tcp_rcv_proc()
690 void ncp_tcp_tx_proc(struct work_struct *work) in ncp_tcp_tx_proc() argument
693 container_of(work, struct ncp_server, tx.tq); in ncp_tcp_tx_proc()
/fs/fscache/
Dobject.c49 .work = f \
55 #define transit_to(state) ({ prefetch(&STATE(state)->work); STATE(state); })
70 .work = NULL, \
194 ASSERT(state->work != NULL); in fscache_object_sm_dispatcher()
204 if (!state->work) { in fscache_object_sm_dispatcher()
229 new_state = state->work(object, event); in fscache_object_sm_dispatcher()
246 if (state->work) { in fscache_object_sm_dispatcher()
271 static void fscache_object_work_func(struct work_struct *work) in fscache_object_work_func() argument
274 container_of(work, struct fscache_object, work); in fscache_object_work_func()
310 INIT_WORK(&object->work, fscache_object_work_func); in fscache_object_init()
[all …]
Doperation.c40 INIT_WORK(&op->work, fscache_op_work_func); in fscache_operation_init()
76 if (!queue_work(fscache_op_wq, &op->work)) in fscache_enqueue_operation()
543 void fscache_operation_gc(struct work_struct *work) in fscache_operation_gc() argument
548 container_of(work, struct fscache_cache, op_gc); in fscache_operation_gc()
595 void fscache_op_work_func(struct work_struct *work) in fscache_op_work_func() argument
598 container_of(work, struct fscache_operation, work); in fscache_op_work_func()
/fs/nfs/
Dnfs4renewd.c55 nfs4_renew_state(struct work_struct *work) in nfs4_renew_state() argument
59 container_of(work, struct nfs_client, cl_renewd.work); in nfs4_renew_state()
Ddirect.c99 struct work_struct work; member
109 static void nfs_direct_write_schedule_work(struct work_struct *work);
317 INIT_WORK(&dreq->work, nfs_direct_write_schedule_work); in nfs_direct_req_alloc()
752 static void nfs_direct_write_schedule_work(struct work_struct *work) in nfs_direct_write_schedule_work() argument
754 struct nfs_direct_req *dreq = container_of(work, struct nfs_direct_req, work); in nfs_direct_write_schedule_work()
773 schedule_work(&dreq->work); /* Calls nfs_direct_write_schedule_work */ in nfs_direct_write_complete()
/fs/overlayfs/
Dsuper.c798 struct dentry *work; in ovl_workdir_create() local
808 work = lookup_one_len(OVL_WORKDIR_NAME, dentry, in ovl_workdir_create()
811 if (!IS_ERR(work)) { in ovl_workdir_create()
820 if (work->d_inode) { in ovl_workdir_create()
826 ovl_workdir_cleanup(dir, mnt, work, 0); in ovl_workdir_create()
827 dput(work); in ovl_workdir_create()
831 err = ovl_create_real(dir, work, &stat, NULL, NULL, true); in ovl_workdir_create()
848 err = vfs_removexattr(work, XATTR_NAME_POSIX_ACL_DEFAULT); in ovl_workdir_create()
852 err = vfs_removexattr(work, XATTR_NAME_POSIX_ACL_ACCESS); in ovl_workdir_create()
857 inode_lock(work->d_inode); in ovl_workdir_create()
[all …]
/fs/cifs/
Dsmb2misc.c449 cifs_ses_oplock_break(struct work_struct *work) in cifs_ses_oplock_break() argument
451 struct smb2_lease_break_work *lw = container_of(work, in cifs_ses_oplock_break()
660 smb2_cancelled_close_fid(struct work_struct *work) in smb2_cancelled_close_fid() argument
662 struct close_cancelled_open *cancelled = container_of(work, in smb2_cancelled_close_fid()
663 struct close_cancelled_open, work); in smb2_cancelled_close_fid()
697 INIT_WORK(&cancelled->work, smb2_cancelled_close_fid); in smb2_handle_cancelled_mid()
698 queue_work(cifsiod_wq, &cancelled->work); in smb2_handle_cancelled_mid()
Dsmb2proto.h59 extern void smb2_echo_request(struct work_struct *work);
103 extern void smb2_reconnect_server(struct work_struct *work);
167 void smb2_cancelled_close_fid(struct work_struct *work);
/fs/jffs2/
DLICENCE22 files and link them with other works to produce a work based on these
23 files, these files do not by themselves cause the resulting work to be
28 This exception does not invalidate any other reasons why a work based on
/fs/fat/
Ddir.c360 unsigned char c, work[MSDOS_NAME]; in fat_parse_short() local
371 memcpy(work, de->name, sizeof(work)); in fat_parse_short()
373 if (work[0] == 0x05) in fat_parse_short()
374 work[0] = 0xE5; in fat_parse_short()
378 c = work[i]; in fat_parse_short()
381 chl = fat_shortname2uni(nls_disk, &work[i], 8 - i, in fat_parse_short()
398 ptname[i] = work[i]; in fat_parse_short()
414 c = work[k]; in fat_parse_short()
417 chl = fat_shortname2uni(nls_disk, &work[k], MSDOS_NAME - k, in fat_parse_short()
438 ptname[i] = work[k]; in fat_parse_short()

12345