/fs/ |
D | fs-writeback.c | 111 struct wb_writeback_work *work) in bdi_queue_work() argument 113 trace_writeback_queue(bdi, work); in bdi_queue_work() 116 list_add_tail(&work->list, &bdi->work_list); in bdi_queue_work() 118 trace_writeback_nothread(bdi, work); in bdi_queue_work() 127 struct wb_writeback_work *work; in __bdi_start_writeback() local 133 work = kzalloc(sizeof(*work), GFP_ATOMIC); in __bdi_start_writeback() 134 if (!work) { in __bdi_start_writeback() 142 work->sync_mode = WB_SYNC_NONE; in __bdi_start_writeback() 143 work->nr_pages = nr_pages; in __bdi_start_writeback() 144 work->range_cyclic = range_cyclic; in __bdi_start_writeback() [all …]
|
D | sync.c | 108 static void do_sync_work(struct work_struct *work) in do_sync_work() argument 117 kfree(work); in do_sync_work() 122 struct work_struct *work; in emergency_sync() local 124 work = kmalloc(sizeof(*work), GFP_ATOMIC); in emergency_sync() 125 if (work) { in emergency_sync() 126 INIT_WORK(work, do_sync_work); in emergency_sync() 127 schedule_work(work); in emergency_sync()
|
D | super.c | 785 static void do_emergency_remount(struct work_struct *work) in do_emergency_remount() argument 812 kfree(work); in do_emergency_remount() 818 struct work_struct *work; in emergency_remount() local 820 work = kmalloc(sizeof(*work), GFP_ATOMIC); in emergency_remount() 821 if (work) { in emergency_remount() 822 INIT_WORK(work, do_emergency_remount); in emergency_remount() 823 schedule_work(work); in emergency_remount()
|
/fs/btrfs/ |
D | async-thread.c | 85 struct btrfs_work work; member 89 static void start_new_worker_func(struct btrfs_work *work) in start_new_worker_func() argument 92 start = container_of(work, struct worker_start, work); in start_new_worker_func() 152 start->work.func = start_new_worker_func; in check_pending_worker_creates() 166 btrfs_queue_worker(workers->atomic_worker_start, &start->work); in check_pending_worker_creates() 175 struct btrfs_work *work) in run_ordered_completions() argument 180 set_bit(WORK_DONE_BIT, &work->flags); in run_ordered_completions() 186 work = list_entry(workers->prio_order_list.next, in run_ordered_completions() 189 work = list_entry(workers->order_list.next, in run_ordered_completions() 194 if (!test_bit(WORK_DONE_BIT, &work->flags)) in run_ordered_completions() [all …]
|
D | async-thread.h | 47 void (*func)(struct btrfs_work *work); 48 void (*ordered_func)(struct btrfs_work *work); 49 void (*ordered_free)(struct btrfs_work *work); 112 void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work); 117 void btrfs_requeue_work(struct btrfs_work *work); 118 void btrfs_set_work_high_prio(struct btrfs_work *work);
|
D | disk-io.c | 49 static void end_workqueue_fn(struct btrfs_work *work); 78 struct btrfs_work work; member 100 struct btrfs_work work; member 673 end_io_wq->work.func = end_workqueue_fn; in end_workqueue_bio() 674 end_io_wq->work.flags = 0; in end_workqueue_bio() 679 &end_io_wq->work); in end_workqueue_bio() 682 &end_io_wq->work); in end_workqueue_bio() 685 &end_io_wq->work); in end_workqueue_bio() 689 &end_io_wq->work); in end_workqueue_bio() 692 &end_io_wq->work); in end_workqueue_bio() [all …]
|
D | reada.c | 93 struct btrfs_work work; member 717 static void reada_start_machine_worker(struct btrfs_work *work) in reada_start_machine_worker() argument 722 rmw = container_of(work, struct reada_machine_work, work); in reada_start_machine_worker() 772 rmw->work.func = reada_start_machine_worker; in reada_start_machine() 775 btrfs_queue_worker(&fs_info->readahead_workers, &rmw->work); in reada_start_machine()
|
D | scrub.c | 76 struct btrfs_work work; member 120 struct btrfs_work work; member 170 static void scrub_bio_end_io_worker(struct btrfs_work *work); 243 sbio->work.func = scrub_bio_end_io_worker; in scrub_setup_dev() 525 static void scrub_fixup_nodatasum(struct btrfs_work *work) in scrub_fixup_nodatasum() argument 535 fixup = container_of(work, struct scrub_fixup_nodatasum, work); in scrub_fixup_nodatasum() 778 fixup_nodatasum->work.func = scrub_fixup_nodatasum; in scrub_handle_errored_block() 780 &fixup_nodatasum->work); in scrub_handle_errored_block() 1602 btrfs_queue_worker(&fs_info->scrub_workers, &sbio->work); in scrub_bio_end_io() 1605 static void scrub_bio_end_io_worker(struct btrfs_work *work) in scrub_bio_end_io_worker() argument [all …]
|
/fs/afs/ |
D | cmservice.c | 142 static void SRXAFSCB_CallBack(struct work_struct *work) in SRXAFSCB_CallBack() argument 144 struct afs_call *call = container_of(work, struct afs_call, work); in SRXAFSCB_CallBack() 291 INIT_WORK(&call->work, SRXAFSCB_CallBack); in afs_deliver_cb_callback() 292 queue_work(afs_wq, &call->work); in afs_deliver_cb_callback() 299 static void SRXAFSCB_InitCallBackState(struct work_struct *work) in SRXAFSCB_InitCallBackState() argument 301 struct afs_call *call = container_of(work, struct afs_call, work); in SRXAFSCB_InitCallBackState() 338 INIT_WORK(&call->work, SRXAFSCB_InitCallBackState); in afs_deliver_cb_init_call_back_state() 339 queue_work(afs_wq, &call->work); in afs_deliver_cb_init_call_back_state() 369 INIT_WORK(&call->work, SRXAFSCB_InitCallBackState); in afs_deliver_cb_init_call_back_state3() 370 queue_work(afs_wq, &call->work); in afs_deliver_cb_init_call_back_state3() [all …]
|
D | callback.c | 63 void afs_broken_callback_work(struct work_struct *work) in afs_broken_callback_work() argument 66 container_of(work, struct afs_vnode, cb_broken_work); in afs_broken_callback_work() 334 void afs_dispatch_give_up_callbacks(struct work_struct *work) in afs_dispatch_give_up_callbacks() argument 337 container_of(work, struct afs_server, cb_break_work.work); in afs_dispatch_give_up_callbacks() 363 static void afs_callback_updater(struct work_struct *work) 371 server = container_of(work, struct afs_server, updater);
|
D | rxrpc.c | 606 static void afs_delete_async_call(struct work_struct *work) in afs_delete_async_call() argument 609 container_of(work, struct afs_call, async_work); in afs_delete_async_call() 623 static void afs_process_async_call(struct work_struct *work) in afs_process_async_call() argument 626 container_of(work, struct afs_call, async_work); in afs_process_async_call() 669 static void afs_collect_incoming_call(struct work_struct *work) in afs_collect_incoming_call() argument
|
D | mntpt.c | 27 static void afs_mntpt_expiry_timed_out(struct work_struct *work); 262 static void afs_mntpt_expiry_timed_out(struct work_struct *work) in afs_mntpt_expiry_timed_out() argument
|
/fs/xfs/ |
D | xfs_mru_cache.c | 211 queue_delayed_work(xfs_mru_reap_wq, &mru->work, in _xfs_mru_cache_list_insert() 278 struct work_struct *work) in _xfs_mru_cache_reap() argument 280 xfs_mru_cache_t *mru = container_of(work, xfs_mru_cache_t, work.work); in _xfs_mru_cache_reap() 298 queue_delayed_work(xfs_mru_reap_wq, &mru->work, next); in _xfs_mru_cache_reap() 379 INIT_DELAYED_WORK(&mru->work, _xfs_mru_cache_reap); in xfs_mru_cache_create() 411 cancel_delayed_work_sync(&mru->work); in xfs_mru_cache_flush()
|
D | xfs_sync.c | 457 struct work_struct *work) in xfs_sync_worker() argument 459 struct xfs_mount *mp = container_of(to_delayed_work(work), in xfs_sync_worker() 516 struct work_struct *work) in xfs_reclaim_worker() argument 518 struct xfs_mount *mp = container_of(to_delayed_work(work), in xfs_reclaim_worker() 549 struct work_struct *work) in xfs_flush_worker() argument 551 struct xfs_mount *mp = container_of(work, in xfs_flush_worker()
|
/fs/ncpfs/ |
D | ncp_fs_sb.h | 151 extern void ncp_tcp_rcv_proc(struct work_struct *work); 152 extern void ncp_tcp_tx_proc(struct work_struct *work); 153 extern void ncpdgram_rcv_proc(struct work_struct *work); 154 extern void ncpdgram_timeout_proc(struct work_struct *work);
|
D | sock.c | 381 void ncpdgram_rcv_proc(struct work_struct *work) in ncpdgram_rcv_proc() argument 384 container_of(work, struct ncp_server, rcv.tq); in ncpdgram_rcv_proc() 500 void ncpdgram_timeout_proc(struct work_struct *work) in ncpdgram_timeout_proc() argument 503 container_of(work, struct ncp_server, timeout_tq); in ncpdgram_timeout_proc() 679 void ncp_tcp_rcv_proc(struct work_struct *work) in ncp_tcp_rcv_proc() argument 682 container_of(work, struct ncp_server, rcv.tq); in ncp_tcp_rcv_proc() 689 void ncp_tcp_tx_proc(struct work_struct *work) in ncp_tcp_tx_proc() argument 692 container_of(work, struct ncp_server, tx.tq); in ncp_tcp_tx_proc()
|
/fs/ocfs2/cluster/ |
D | tcp.c | 137 static void o2net_sc_connect_completed(struct work_struct *work); 138 static void o2net_rx_until_empty(struct work_struct *work); 139 static void o2net_shutdown_sc(struct work_struct *work); 141 static void o2net_sc_send_keep_req(struct work_struct *work); 478 struct work_struct *work) in o2net_sc_queue_work() argument 481 if (!queue_work(o2net_wq, work)) in o2net_sc_queue_work() 485 struct delayed_work *work, in o2net_sc_queue_delayed_work() argument 489 if (!queue_delayed_work(o2net_wq, work, delay)) in o2net_sc_queue_delayed_work() 493 struct delayed_work *work) in o2net_sc_cancel_delayed_work() argument 495 if (cancel_delayed_work(work)) in o2net_sc_cancel_delayed_work() [all …]
|
/fs/fat/ |
D | dir.c | 351 unsigned char work[MSDOS_NAME]; in fat_search_long() local 383 memcpy(work, de->name, sizeof(de->name)); in fat_search_long() 385 if (work[0] == 0x05) in fat_search_long() 386 work[0] = 0xE5; in fat_search_long() 388 if (!work[i]) in fat_search_long() 390 chl = fat_shortname2uni(nls_disk, &work[i], 8 - i, in fat_search_long() 394 if (work[i] != ' ') in fat_search_long() 404 if (!work[i]) in fat_search_long() 406 chl = fat_shortname2uni(nls_disk, &work[i], in fat_search_long() 411 if (work[i] != ' ') in fat_search_long() [all …]
|
/fs/cifs/ |
D | AUTHORS | 10 Jeremy Allison of the Samba team has done invaluable work in adding the server 16 Newbigin and others for their work on the Linux smbfs module. Thanks to 18 Workgroup for their work specifying this highly complex protocol and finally 30 Mark Hamzy (for some of the early cifs IPv6 work) 41 Jeff Layton (many, many fixes, as well as great work on the cifs Kerberos code)
|
/fs/nfs/ |
D | nfs4renewd.c | 55 nfs4_renew_state(struct work_struct *work) in nfs4_renew_state() argument 59 container_of(work, struct nfs_client, cl_renewd.work); in nfs4_renew_state()
|
/fs/fscache/ |
D | operation.c | 46 if (!queue_work(fscache_op_wq, &op->work)) in fscache_enqueue_operation() 387 void fscache_operation_gc(struct work_struct *work) in fscache_operation_gc() argument 392 container_of(work, struct fscache_cache, op_gc); in fscache_operation_gc() 447 void fscache_op_work_func(struct work_struct *work) in fscache_op_work_func() argument 450 container_of(work, struct fscache_operation, work); in fscache_op_work_func()
|
D | object.c | 332 void fscache_object_work_func(struct work_struct *work) in fscache_object_work_func() argument 335 container_of(work, struct fscache_object, work); in fscache_object_work_func() 761 if (queue_work(fscache_object_wq, &object->work)) { in fscache_enqueue_object()
|
/fs/ext4/ |
D | page-io.c | 124 static void ext4_end_io_work(struct work_struct *work) in ext4_end_io_work() argument 126 ext4_io_end_t *io = container_of(work, ext4_io_end_t, work); in ext4_end_io_work() 149 queue_work(EXT4_SB(inode->i_sb)->dio_unwritten_wq, &io->work); in ext4_end_io_work() 174 INIT_WORK(&io->work, ext4_end_io_work); in ext4_init_io_end() 265 queue_work(wq, &io_end->work); in ext4_end_bio()
|
/fs/jffs2/ |
D | LICENCE | 22 files and link them with other works to produce a work based on these 23 files, these files do not by themselves cause the resulting work to be 28 This exception does not invalidate any other reasons why a work based on
|
/fs/sysfs/ |
D | file.c | 709 struct work_struct work; member 715 static void sysfs_schedule_callback_work(struct work_struct *work) in sysfs_schedule_callback_work() argument 717 struct sysfs_schedule_callback_struct *ss = container_of(work, in sysfs_schedule_callback_work() 718 struct sysfs_schedule_callback_struct, work); in sysfs_schedule_callback_work() 785 INIT_WORK(&ss->work, sysfs_schedule_callback_work); in sysfs_schedule_callback() 790 queue_work(sysfs_workqueue, &ss->work); in sysfs_schedule_callback()
|