/fs/nfsd/ |
D | nfscache.c | 49 struct svc_cacherep *rp; in nfsd_reply_cache_init() local 55 rp = kmalloc(sizeof(*rp), GFP_KERNEL); in nfsd_reply_cache_init() 56 if (!rp) in nfsd_reply_cache_init() 58 list_add(&rp->c_lru, &lru_head); in nfsd_reply_cache_init() 59 rp->c_state = RC_UNUSED; in nfsd_reply_cache_init() 60 rp->c_type = RC_NOCACHE; in nfsd_reply_cache_init() 61 INIT_HLIST_NODE(&rp->c_hash); in nfsd_reply_cache_init() 79 struct svc_cacherep *rp; in nfsd_reply_cache_shutdown() local 82 rp = list_entry(lru_head.next, struct svc_cacherep, c_lru); in nfsd_reply_cache_shutdown() 83 if (rp->c_state == RC_DONE && rp->c_type == RC_REPLBUFF) in nfsd_reply_cache_shutdown() [all …]
|
D | nfs4state.c | 1012 struct nfs4_replay *rp; in alloc_init_open_stateowner() local 1033 rp = &sop->so_replay; in alloc_init_open_stateowner() 1034 rp->rp_status = nfserr_serverfault; in alloc_init_open_stateowner() 1035 rp->rp_buflen = 0; in alloc_init_open_stateowner() 1036 rp->rp_buf = rp->rp_ibuf; in alloc_init_open_stateowner() 2575 struct nfs4_replay *rp; in alloc_init_lock_stateowner() local 2598 rp = &sop->so_replay; in alloc_init_lock_stateowner() 2599 rp->rp_status = nfserr_serverfault; in alloc_init_lock_stateowner() 2600 rp->rp_buflen = 0; in alloc_init_lock_stateowner() 2601 rp->rp_buf = rp->rp_ibuf; in alloc_init_lock_stateowner()
|
D | nfs4proc.c | 186 struct nfs4_replay *rp = &open->op_stateowner->so_replay; in nfsd4_open() local 188 cstate->current_fh.fh_handle.fh_size = rp->rp_openfh_len; in nfsd4_open() 189 memcpy(&cstate->current_fh.fh_handle.fh_base, rp->rp_openfh, in nfsd4_open() 190 rp->rp_openfh_len); in nfsd4_open()
|
D | nfs4xdr.c | 2658 struct nfs4_replay *rp = op->replay; in nfsd4_encode_replay() local 2660 BUG_ON(!rp); in nfsd4_encode_replay() 2664 *p++ = rp->rp_status; /* already xdr'ed */ in nfsd4_encode_replay() 2667 RESERVE_SPACE(rp->rp_buflen); in nfsd4_encode_replay() 2668 WRITEMEM(rp->rp_buf, rp->rp_buflen); in nfsd4_encode_replay()
|
/fs/ntfs/ |
D | logfile.c | 51 RESTART_PAGE_HEADER *rp, s64 pos) in ntfs_check_restart_page_header() argument 62 logfile_system_page_size = le32_to_cpu(rp->system_page_size); in ntfs_check_restart_page_header() 63 logfile_log_page_size = le32_to_cpu(rp->log_page_size); in ntfs_check_restart_page_header() 82 if (sle16_to_cpu(rp->major_ver) != 1 || in ntfs_check_restart_page_header() 83 sle16_to_cpu(rp->minor_ver) != 1) { in ntfs_check_restart_page_header() 86 "1.1 only.)", (int)sle16_to_cpu(rp->major_ver), in ntfs_check_restart_page_header() 87 (int)sle16_to_cpu(rp->minor_ver)); in ntfs_check_restart_page_header() 94 if (ntfs_is_chkd_record(rp->magic) && !le16_to_cpu(rp->usa_count)) { in ntfs_check_restart_page_header() 100 if (usa_count != le16_to_cpu(rp->usa_count)) { in ntfs_check_restart_page_header() 106 usa_ofs = le16_to_cpu(rp->usa_ofs); in ntfs_check_restart_page_header() [all …]
|
D | logfile.h | 300 RESTART_PAGE_HEADER **rp); 303 const RESTART_PAGE_HEADER *rp);
|
D | super.c | 1187 RESTART_PAGE_HEADER **rp) in load_and_check_logfile() argument 1199 if (!ntfs_check_logfile(tmp_ino, rp)) { in load_and_check_logfile() 1751 RESTART_PAGE_HEADER *rp; in load_system_files() local 1926 rp = NULL; in load_system_files() 1927 if (!load_and_check_logfile(vol, &rp) || in load_system_files() 1928 !ntfs_is_logfile_clean(vol->logfile_ino, rp)) { in load_system_files() 1944 BUG_ON(!rp); in load_system_files() 1945 ntfs_free(rp); in load_system_files() 1957 ntfs_free(rp); in load_system_files()
|
D | layout.h | 1106 } __attribute__ ((__packed__)) rp; member
|
/fs/cifs/ |
D | cifs_unicode.h | 281 register const struct UniCaseRange *rp; in UniToupper() local 287 rp = CifsUniUpperRange; /* Use range tables */ in UniToupper() 288 while (rp->start) { in UniToupper() 289 if (uc < rp->start) /* Before start of range */ in UniToupper() 291 if (uc <= rp->end) /* In range */ in UniToupper() 292 return uc + rp->table[uc - rp->start]; in UniToupper() 293 rp++; /* Try next range */ in UniToupper() 323 register struct UniCaseRange *rp; in UniTolower() local 329 rp = UniLowerRange; /* Use range tables */ in UniTolower() 330 while (rp->start) { in UniTolower() [all …]
|
/fs/jfs/ |
D | jfs_unicode.h | 122 UNICASERANGE *rp; in UniToupper() local 127 rp = UniUpperRange; /* Use range tables */ in UniToupper() 128 while (rp->start) { in UniToupper() 129 if (uc < rp->start) /* Before start of range */ in UniToupper() 131 if (uc <= rp->end) /* In range */ in UniToupper() 132 return uc + rp->table[uc - rp->start]; in UniToupper() 133 rp++; /* Try next range */ in UniToupper()
|
D | jfs_dtree.c | 181 static int ciGetLeafPrefixKey(dtpage_t * lp, int li, dtpage_t * rp, 937 dtpage_t *rp; /* new right page split from sp */ in dtSplitUp() local 1096 if ((rc = dtSplitPage(tid, ip, split, &rmp, &rp, &rpxd))) { in dtSplitUp() 1176 switch (rp->header.flag & BT_TYPE) { in dtSplitUp() 1188 rp, 0, &key, in dtSplitUp() 1201 dtGetKey(rp, 0, &key, sbi->mntflag); in dtSplitUp() 1212 dtGetKey(rp, 0, &key, sbi->mntflag); in dtSplitUp() 1249 dtSplitPage(tid, ip, split, &rmp, &rp, &rpxd); in dtSplitUp() 1337 dtpage_t *rp; /* new right page allocated */ in dtSplitPage() local 1386 rp = (dtpage_t *) rmp->data; in dtSplitPage() [all …]
|
D | jfs_xtree.c | 1225 xtpage_t *rp; /* new right page allocated */ in xtSplitPage() local 1272 rp = (xtpage_t *) rmp->data; in xtSplitPage() 1273 rp->header.self = *pxd; in xtSplitPage() 1274 rp->header.flag = sp->header.flag & BT_TYPE; in xtSplitPage() 1275 rp->header.maxentry = sp->header.maxentry; /* little-endian */ in xtSplitPage() 1276 rp->header.nextindex = cpu_to_le16(XTENTRYSTART); in xtSplitPage() 1298 rp->header.next = cpu_to_le64(nextbn); in xtSplitPage() 1299 rp->header.prev = cpu_to_le64(addressPXD(&sp->header.self)); in xtSplitPage() 1324 xad = &rp->xad[XTENTRYSTART]; in xtSplitPage() 1328 rp->header.nextindex = cpu_to_le16(XTENTRYSTART + 1); in xtSplitPage() [all …]
|
/fs/xfs/ |
D | xfs_ialloc_btree.h | 75 #define XFS_INOBT_IS_FREE(rp,i) \ argument 76 (((rp)->ir_free & XFS_INOBT_MASK(i)) != 0) 77 #define XFS_INOBT_SET_FREE(rp,i) ((rp)->ir_free |= XFS_INOBT_MASK(i)) argument 78 #define XFS_INOBT_CLR_FREE(rp,i) ((rp)->ir_free &= ~XFS_INOBT_MASK(i)) argument
|
D | xfs_btree.c | 1773 union xfs_btree_rec *rp; in xfs_btree_update() local 1788 rp = xfs_btree_rec_addr(cur, ptr, block); in xfs_btree_update() 1791 xfs_btree_copy_recs(cur, rp, rec, 1); in xfs_btree_update() 2815 union xfs_btree_rec *rp; in xfs_btree_insrec() local 2817 rp = xfs_btree_rec_addr(cur, ptr, block); in xfs_btree_insrec() 2819 xfs_btree_shift_recs(cur, rp, 1, numrecs - ptr + 1); in xfs_btree_insrec() 2822 xfs_btree_copy_recs(cur, rp, recp, 1); in xfs_btree_insrec() 2827 ASSERT(cur->bc_ops->recs_inorder(cur, rp, in xfs_btree_insrec()
|
/fs/ |
D | binfmt_flat.c | 422 unsigned long *reloc = 0, *rp; in load_flat_file() local 710 for (rp = (unsigned long *)datapos; *rp != 0xffffffff; rp++) { in load_flat_file() 712 if (*rp) { in load_flat_file() 713 addr = calc_reloc(*rp, libinfo, id, 0); in load_flat_file() 718 *rp = addr; in load_flat_file() 746 rp = (unsigned long *) calc_reloc(addr, libinfo, id, 1); in load_flat_file() 747 if (rp == (unsigned long *)RELOC_FAILED) { in load_flat_file() 753 addr = flat_get_addr_from_rp(rp, relval, flags, in load_flat_file() 769 flat_put_addr_at_rp(rp, addr, relval); in load_flat_file()
|
/fs/dlm/ |
D | debug_fs.c | 470 struct dlm_rsb *r, *rp; in table_seq_next() local 481 rp = ri->rsb; in table_seq_next() 482 next = rp->res_hashchain.next; in table_seq_next() 489 dlm_put_rsb(rp); in table_seq_next() 494 dlm_put_rsb(rp); in table_seq_next()
|
/fs/ncpfs/ |
D | sock.c | 168 static inline int get_conn_number(struct ncp_reply_header *rp) in get_conn_number() argument 170 return rp->conn_low | (rp->conn_high << 8); in get_conn_number()
|