/fs/btrfs/ |
D | compression.c | 104 struct compressed_bio *cb, in check_compressed_csum() argument 112 u32 *cb_sum = &cb->sums; in check_compressed_csum() 117 for (i = 0; i < cb->nr_pages; i++) { in check_compressed_csum() 118 page = cb->compressed_pages[i]; in check_compressed_csum() 130 cb->mirror_num); in check_compressed_csum() 154 struct compressed_bio *cb = bio->bi_private; in end_compressed_bio_read() local 161 cb->errors = 1; in end_compressed_bio_read() 166 if (!atomic_dec_and_test(&cb->pending_bios)) in end_compressed_bio_read() 169 inode = cb->inode; in end_compressed_bio_read() 170 ret = check_compressed_csum(inode, cb, in end_compressed_bio_read() [all …]
|
D | raid56.c | 1654 struct blk_plug_cb cb; member 1728 static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule) in btrfs_raid_unplug() argument 1731 plug = container_of(cb, struct btrfs_plug_cb, cb); in btrfs_raid_unplug() 1751 struct blk_plug_cb *cb; in raid56_parity_write() local 1777 cb = blk_check_plugged(btrfs_raid_unplug, root->fs_info, in raid56_parity_write() 1779 if (cb) { in raid56_parity_write() 1780 plug = container_of(cb, struct btrfs_plug_cb, cb); in raid56_parity_write()
|
D | send.c | 4226 iterate_inode_ref_t cb; in process_all_refs() local 4235 cb = __record_new_ref; in process_all_refs() 4238 cb = __record_deleted_ref; in process_all_refs() 4272 ret = iterate_inode_ref(root, path, &found_key, 0, cb, sctx); in process_all_refs()
|
D | ctree.h | 2789 btrfs_changed_cb_t cb, void *ctx);
|
/fs/nfsd/ |
D | nfs4callback.c | 352 const struct nfsd4_callback *cb, in encode_cb_sequence4args() argument 355 struct nfsd4_session *session = cb->cb_clp->cl_cb_session; in encode_cb_sequence4args() 396 struct nfsd4_callback *cb) in decode_cb_sequence4resok() argument 398 struct nfsd4_session *session = cb->cb_clp->cl_cb_session; in decode_cb_sequence4resok() 438 cb->cb_seq_status = status; in decode_cb_sequence4resok() 447 struct nfsd4_callback *cb) in decode_cb_sequence4res() argument 451 if (cb->cb_clp->cl_minorversion == 0) in decode_cb_sequence4res() 454 status = decode_cb_op_status(xdr, OP_CB_SEQUENCE, &cb->cb_seq_status); in decode_cb_sequence4res() 455 if (unlikely(status || cb->cb_seq_status)) in decode_cb_sequence4res() 458 return decode_cb_sequence4resok(xdr, cb); in decode_cb_sequence4res() [all …]
|
D | state.h | 138 #define cb_to_delegation(cb) \ argument 139 container_of(cb, struct nfs4_delegation, dl_recall) 622 extern void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp, 624 extern void nfsd4_run_cb(struct nfsd4_callback *cb);
|
D | nfs4layouts.c | 645 nfsd4_cb_layout_prepare(struct nfsd4_callback *cb) in nfsd4_cb_layout_prepare() argument 648 container_of(cb, struct nfs4_layout_stateid, ls_recall); in nfsd4_cb_layout_prepare() 656 nfsd4_cb_layout_done(struct nfsd4_callback *cb, struct rpc_task *task) in nfsd4_cb_layout_done() argument 659 container_of(cb, struct nfs4_layout_stateid, ls_recall); in nfsd4_cb_layout_done() 710 nfsd4_cb_layout_release(struct nfsd4_callback *cb) in nfsd4_cb_layout_release() argument 713 container_of(cb, struct nfs4_layout_stateid, ls_recall); in nfsd4_cb_layout_release()
|
D | nfs4idmap.c | 140 idtoname_match(struct cache_head *ca, struct cache_head *cb) in idtoname_match() argument 143 struct ent *b = container_of(cb, struct ent, h); in idtoname_match() 311 nametoid_match(struct cache_head *ca, struct cache_head *cb) in nametoid_match() argument 314 struct ent *b = container_of(cb, struct ent, h); in nametoid_match()
|
D | nfs4state.c | 296 nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task) in nfsd4_cb_notify_lock_done() argument 313 nfsd4_cb_notify_lock_release(struct nfsd4_callback *cb) in nfsd4_cb_notify_lock_release() argument 315 struct nfsd4_blocked_lock *nbl = container_of(cb, in nfsd4_cb_notify_lock_release() 3812 static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb) in nfsd4_cb_recall_prepare() argument 3814 struct nfs4_delegation *dp = cb_to_delegation(cb); in nfsd4_cb_recall_prepare() 3835 static int nfsd4_cb_recall_done(struct nfsd4_callback *cb, in nfsd4_cb_recall_done() argument 3838 struct nfs4_delegation *dp = cb_to_delegation(cb); in nfsd4_cb_recall_done() 3862 static void nfsd4_cb_recall_release(struct nfsd4_callback *cb) in nfsd4_cb_recall_release() argument 3864 struct nfs4_delegation *dp = cb_to_delegation(cb); in nfsd4_cb_recall_release()
|
/fs/ntfs/ |
D | compress.c | 176 u8 *cb = cb_start; /* Current position in cb. */ in ntfs_decompress() local 177 u8 *cb_sb_start = cb; /* Beginning of the current sb in the cb. */ in ntfs_decompress() 204 cb - cb_start); in ntfs_decompress() 211 if (cb == cb_end || !le16_to_cpup((le16*)cb) || in ntfs_decompress() 256 if (cb + 6 > cb_end) in ntfs_decompress() 260 cb_sb_start = cb; in ntfs_decompress() 261 cb_sb_end = cb_sb_start + (le16_to_cpup((le16*)cb) & NTFS_SB_SIZE_MASK) in ntfs_decompress() 270 cb = cb_sb_end; in ntfs_decompress() 283 if (!(le16_to_cpup((le16*)cb) & NTFS_SB_IS_COMPRESSED)) { in ntfs_decompress() 288 cb += 2; in ntfs_decompress() [all …]
|
/fs/dlm/ |
D | ast.c | 115 struct dlm_callback *cb, int *resid) in dlm_rem_lkb_callback() argument 128 memcpy(cb, &lkb->lkb_callbacks[0], sizeof(struct dlm_callback)); in dlm_rem_lkb_callback() 145 if ((cb->flags & DLM_CB_BAST) && lkb->lkb_last_cast.seq) { in dlm_rem_lkb_callback() 146 if (dlm_modes_compat(cb->mode, lkb->lkb_last_cast.mode)) { in dlm_rem_lkb_callback() 147 cb->flags |= DLM_CB_SKIP; in dlm_rem_lkb_callback() 152 (unsigned long long)cb->seq, in dlm_rem_lkb_callback() 153 cb->mode, in dlm_rem_lkb_callback() 161 if (cb->flags & DLM_CB_CAST) { in dlm_rem_lkb_callback() 162 memcpy(&lkb->lkb_last_cast, cb, sizeof(struct dlm_callback)); in dlm_rem_lkb_callback() 166 if (cb->flags & DLM_CB_BAST) { in dlm_rem_lkb_callback() [all …]
|
D | lowcomms.c | 75 static void cbuf_add(struct cbuf *cb, int n) in cbuf_add() argument 77 cb->len += n; in cbuf_add() 80 static int cbuf_data(struct cbuf *cb) in cbuf_data() argument 82 return ((cb->base + cb->len) & cb->mask); in cbuf_data() 85 static void cbuf_init(struct cbuf *cb, int size) in cbuf_init() argument 87 cb->base = cb->len = 0; in cbuf_init() 88 cb->mask = size-1; in cbuf_init() 91 static void cbuf_eat(struct cbuf *cb, int n) in cbuf_eat() argument 93 cb->len -= n; in cbuf_eat() 94 cb->base += n; in cbuf_eat() [all …]
|
D | user.c | 782 struct dlm_callback cb; in device_read() local 844 rv = dlm_rem_lkb_callback(lkb->lkb_resource->res_ls, lkb, &cb, &resid); in device_read() 859 if (cb.flags & DLM_CB_SKIP) { in device_read() 866 if (cb.flags & DLM_CB_CAST) { in device_read() 867 new_mode = cb.mode; in device_read() 869 if (!cb.sb_status && lkb->lkb_lksb->sb_lvbptr && in device_read() 873 lkb->lkb_lksb->sb_status = cb.sb_status; in device_read() 874 lkb->lkb_lksb->sb_flags = cb.sb_flags; in device_read() 879 cb.flags, cb.mode, copy_lvb, buf, count); in device_read()
|
D | ast.h | 20 struct dlm_callback *cb, int *resid);
|
/fs/afs/ |
D | cmservice.c | 168 struct afs_callback *cb; in afs_deliver_cb_callback() local 213 cb = call->request; in afs_deliver_cb_callback() 215 for (loop = call->count; loop > 0; loop--, cb++) { in afs_deliver_cb_callback() 216 cb->fid.vid = ntohl(*bp++); in afs_deliver_cb_callback() 217 cb->fid.vnode = ntohl(*bp++); in afs_deliver_cb_callback() 218 cb->fid.unique = ntohl(*bp++); in afs_deliver_cb_callback() 219 cb->type = AFSCM_CB_UNTYPED; in afs_deliver_cb_callback() 247 cb = call->request; in afs_deliver_cb_callback() 249 for (loop = call->count2; loop > 0; loop--, cb++) { in afs_deliver_cb_callback() 250 cb->version = ntohl(*bp++); in afs_deliver_cb_callback() [all …]
|
D | callback.c | 225 struct afs_callback *cb; in afs_do_give_up_callback() local 229 cb = &server->cb_break[server->cb_break_head]; in afs_do_give_up_callback() 230 cb->fid = vnode->fid; in afs_do_give_up_callback() 231 cb->version = vnode->cb_version; in afs_do_give_up_callback() 232 cb->expiry = vnode->cb_expiry; in afs_do_give_up_callback() 233 cb->type = vnode->cb_type; in afs_do_give_up_callback()
|
D | inode.c | 201 struct afs_callback *cb) in afs_iget() argument 242 if (!cb) { in afs_iget() 250 vnode->cb_version = cb->version; in afs_iget() 251 vnode->cb_expiry = cb->expiry; in afs_iget() 252 vnode->cb_type = cb->type; in afs_iget()
|
D | fsclient.c | 147 struct afs_callback *cb) in xdr_decode_AFSCallBack_raw() argument 151 cb->version = ntohl(*bp++); in xdr_decode_AFSCallBack_raw() 152 cb->expiry = ntohl(*bp++); in xdr_decode_AFSCallBack_raw() 153 cb->type = ntohl(*bp++); in xdr_decode_AFSCallBack_raw() 572 struct afs_callback *cb = in afs_fs_give_up_callbacks() local 575 *bp++ = htonl(cb->fid.vid); in afs_fs_give_up_callbacks() 576 *bp++ = htonl(cb->fid.vnode); in afs_fs_give_up_callbacks() 577 *bp++ = htonl(cb->fid.unique); in afs_fs_give_up_callbacks() 578 *tp++ = htonl(cb->version); in afs_fs_give_up_callbacks() 579 *tp++ = htonl(cb->expiry); in afs_fs_give_up_callbacks() [all …]
|
D | dir.c | 734 struct afs_callback cb; in afs_mkdir() local 755 mode, &fid, &status, &cb, &server); in afs_mkdir() 759 inode = afs_iget(dir->i_sb, key, &fid, &status, &cb); in afs_mkdir() 911 struct afs_callback cb; in afs_create() local 932 mode, &fid, &status, &cb, &server); in afs_create() 936 inode = afs_iget(dir->i_sb, key, &fid, &status, &cb); in afs_create()
|
/fs/ocfs2/dlm/ |
D | dlmapi.h | 213 void dlm_setup_eviction_cb(struct dlm_eviction_cb *cb, 217 struct dlm_eviction_cb *cb); 218 void dlm_unregister_eviction_cb(struct dlm_eviction_cb *cb);
|
D | dlmdomain.c | 2298 struct dlm_eviction_cb *cb; in dlm_fire_domain_eviction_callbacks() local 2301 list_for_each_entry(cb, &dlm->dlm_eviction_callbacks, ec_item) { in dlm_fire_domain_eviction_callbacks() 2302 cb->ec_func(node_num, cb->ec_data); in dlm_fire_domain_eviction_callbacks() 2307 void dlm_setup_eviction_cb(struct dlm_eviction_cb *cb, in dlm_setup_eviction_cb() argument 2311 INIT_LIST_HEAD(&cb->ec_item); in dlm_setup_eviction_cb() 2312 cb->ec_func = f; in dlm_setup_eviction_cb() 2313 cb->ec_data = data; in dlm_setup_eviction_cb() 2318 struct dlm_eviction_cb *cb) in dlm_register_eviction_cb() argument 2321 list_add_tail(&cb->ec_item, &dlm->dlm_eviction_callbacks); in dlm_register_eviction_cb() 2326 void dlm_unregister_eviction_cb(struct dlm_eviction_cb *cb) in dlm_unregister_eviction_cb() argument [all …]
|
/fs/xfs/ |
D | xfs_log.c | 564 xfs_log_callback_t *cb) in xfs_log_notify() argument 573 cb->cb_next = NULL; in xfs_log_notify() 574 *(iclog->ic_callback_tail) = cb; in xfs_log_notify() 575 iclog->ic_callback_tail = &(cb->cb_next); in xfs_log_notify() 2627 xfs_log_callback_t *cb, *cb_next; in xlog_state_do_callback() local 2755 cb = iclog->ic_callback; in xlog_state_do_callback() 2756 while (cb) { in xlog_state_do_callback() 2762 for (; cb; cb = cb_next) { in xlog_state_do_callback() 2763 cb_next = cb->cb_next; in xlog_state_do_callback() 2764 cb->cb_func(cb->cb_arg, aborted); in xlog_state_do_callback() [all …]
|
D | xfs_buf_item.c | 990 void (*cb)(xfs_buf_t *, xfs_log_item_t *), in xfs_buf_attach_iodone() 997 lip->li_cb = cb; in xfs_buf_attach_iodone()
|
/fs/nfs/ |
D | dns_resolve.c | 153 struct cache_head *cb) in nfs_dns_match() argument 159 b = container_of(cb, struct nfs_dns_ent, h); in nfs_dns_match()
|
/fs/ceph/ |
D | mds_client.c | 1065 int (*cb)(struct inode *, struct ceph_cap *, in iterate_session_caps() 1096 ret = cb(inode, cap, arg); in iterate_session_caps()
|