/fs/fuse/ |
D | file.c | 441 if (idx_from >= curr_index + wpa->ia.ap.num_pages) in fuse_find_writeback() 653 struct fuse_args *args = &ia->ap.args; in fuse_read_args_fill() 669 static void fuse_release_user_pages(struct fuse_args_pages *ap, in fuse_release_user_pages() argument 674 for (i = 0; i < ap->num_pages; i++) { in fuse_release_user_pages() 676 set_page_dirty_lock(ap->pages[i]); in fuse_release_user_pages() 677 put_page(ap->pages[i]); in fuse_release_user_pages() 755 ia->ap.pages = fuse_pages_alloc(npages, GFP_KERNEL, in fuse_io_alloc() 756 &ia->ap.descs); in fuse_io_alloc() 757 if (!ia->ap.pages) { in fuse_io_alloc() 767 kfree(ia->ap.pages); in fuse_io_free() [all …]
|
D | cuse.c | 303 struct fuse_args_pages ap; member 321 struct cuse_init_args *ia = container_of(args, typeof(*ia), ap.args); in cuse_process_init_reply() 322 struct fuse_args_pages *ap = &ia->ap; in cuse_process_init_reply() local 325 struct page *page = ap->pages[0]; in cuse_process_init_reply() 342 rc = cuse_parse_devinfo(page_address(page), ap->args.out_args[1].size, in cuse_process_init_reply() 431 struct fuse_args_pages *ap; in cuse_send_init() local 444 ap = &ia->ap; in cuse_send_init() 448 ap->args.opcode = CUSE_INIT; in cuse_send_init() 449 ap->args.in_numargs = 1; in cuse_send_init() 450 ap->args.in_args[0].size = sizeof(ia->in); in cuse_send_init() [all …]
|
D | virtio_fs.c | 555 struct fuse_args_pages *ap; in virtio_fs_request_complete() local 568 ap = container_of(args, typeof(*ap), args); in virtio_fs_request_complete() 569 for (i = 0; i < ap->num_pages; i++) { in virtio_fs_request_complete() 570 thislen = ap->descs[i].length; in virtio_fs_request_complete() 572 WARN_ON(ap->descs[i].offset); in virtio_fs_request_complete() 573 page = ap->pages[i]; in virtio_fs_request_complete() 1049 struct fuse_args_pages *ap = container_of(args, typeof(*ap), args); in sg_count_fuse_req() local 1057 total_sgs += sg_count_fuse_pages(ap->descs, ap->num_pages, in sg_count_fuse_req() 1071 total_sgs += sg_count_fuse_pages(ap->descs, ap->num_pages, in sg_count_fuse_req() 1107 struct fuse_args_pages *ap = container_of(req->args, typeof(*ap), args); in sg_init_fuse_args() local [all …]
|
D | dev.c | 1001 struct fuse_args_pages *ap = container_of(req->args, typeof(*ap), args); in fuse_copy_pages() local 1004 for (i = 0; i < ap->num_pages && (nbytes || zeroing); i++) { in fuse_copy_pages() 1006 unsigned int offset = ap->descs[i].offset; in fuse_copy_pages() 1007 unsigned int count = min(nbytes, ap->descs[i].length); in fuse_copy_pages() 1009 err = fuse_copy_page(cs, &ap->pages[i], offset, count, zeroing); in fuse_copy_pages() 1667 struct fuse_args_pages ap; member 1675 container_of(args, typeof(*ra), ap.args); in fuse_retrieve_end() 1677 release_pages(ra->ap.pages, ra->ap.num_pages); in fuse_retrieve_end() 1695 struct fuse_args_pages *ap; in fuse_retrieve() local 1710 args_size += num_pages * (sizeof(ap->pages[0]) + sizeof(ap->descs[0])); in fuse_retrieve() [all …]
|
D | readdir.c | 340 struct fuse_args_pages *ap = &ia.ap; in fuse_readdir_uncached() local 350 ap->args.out_pages = true; in fuse_readdir_uncached() 351 ap->num_pages = 1; in fuse_readdir_uncached() 352 ap->pages = &page; in fuse_readdir_uncached() 353 ap->descs = &desc; in fuse_readdir_uncached() 363 res = fuse_simple_request(fm, &ap->args); in fuse_readdir_uncached()
|
D | dir.c | 1661 struct fuse_args_pages ap = { in fuse_readlink_page() local 1669 ap.args.opcode = FUSE_READLINK; in fuse_readlink_page() 1670 ap.args.nodeid = get_node_id(inode); in fuse_readlink_page() 1671 ap.args.out_pages = true; in fuse_readlink_page() 1672 ap.args.out_argvar = true; in fuse_readlink_page() 1673 ap.args.page_zeroing = true; in fuse_readlink_page() 1674 ap.args.out_numargs = 1; in fuse_readlink_page() 1675 ap.args.out_args[0].size = desc.length; in fuse_readlink_page() 1676 res = fuse_simple_request(fm, &ap.args); in fuse_readlink_page()
|
D | fuse_i.h | 1028 struct fuse_args_pages ap; member
|
/fs/xfs/ |
D | xfs_bmap_util.c | 74 struct xfs_bmalloca *ap) /* bmap alloc argument struct */ in xfs_bmap_rtalloc() argument 84 mp = ap->ip->i_mount; in xfs_bmap_rtalloc() 85 align = xfs_get_extsz_hint(ap->ip); in xfs_bmap_rtalloc() 87 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, in xfs_bmap_rtalloc() 88 align, 1, ap->eof, 0, in xfs_bmap_rtalloc() 89 ap->conv, &ap->offset, &ap->length); in xfs_bmap_rtalloc() 92 ASSERT(ap->length); in xfs_bmap_rtalloc() 93 ASSERT(ap->length % mp->m_sb.sb_rextsize == 0); in xfs_bmap_rtalloc() 99 div_u64_rem(ap->offset, align, &mod); in xfs_bmap_rtalloc() 100 if (mod || ap->length % align) in xfs_bmap_rtalloc() [all …]
|
D | xfs_bmap_util.h | 20 int xfs_bmap_rtalloc(struct xfs_bmalloca *ap); 27 xfs_bmap_rtalloc(struct xfs_bmalloca *ap) in xfs_bmap_rtalloc() argument 50 void xfs_bmap_adjacent(struct xfs_bmalloca *ap);
|
D | xfs_filestream.c | 322 struct xfs_bmalloca *ap, in xfs_filestream_new_ag() argument 325 struct xfs_inode *ip = ap->ip, *pip; in xfs_filestream_new_ag() 327 xfs_extlen_t minlen = ap->length; in xfs_filestream_new_ag() 346 if (ap->datatype & XFS_ALLOC_USERDATA) in xfs_filestream_new_ag() 348 if (ap->tp->t_flags & XFS_TRANS_LOWMODE) in xfs_filestream_new_ag()
|
D | xfs_filestream.h | 17 int xfs_filestream_new_ag(struct xfs_bmalloca *ap, xfs_agnumber_t *agp);
|
D | xfs_buf.c | 2121 struct xfs_buf *ap = container_of(a, struct xfs_buf, b_list); in xfs_buf_cmp() local 2125 diff = ap->b_maps[0].bm_bn - bp->b_maps[0].bm_bn; in xfs_buf_cmp()
|
/fs/xfs/libxfs/ |
D | xfs_bmap.c | 3129 struct xfs_bmalloca *ap) /* bmap alloc argument struct */ in xfs_bmap_adjacent() argument 3144 mp = ap->ip->i_mount; in xfs_bmap_adjacent() 3145 nullfb = ap->tp->t_firstblock == NULLFSBLOCK; in xfs_bmap_adjacent() 3146 rt = XFS_IS_REALTIME_INODE(ap->ip) && in xfs_bmap_adjacent() 3147 (ap->datatype & XFS_ALLOC_USERDATA); in xfs_bmap_adjacent() 3149 ap->tp->t_firstblock); in xfs_bmap_adjacent() 3154 if (ap->eof && ap->prev.br_startoff != NULLFILEOFF && in xfs_bmap_adjacent() 3155 !isnullstartblock(ap->prev.br_startblock) && in xfs_bmap_adjacent() 3156 ISVALID(ap->prev.br_startblock + ap->prev.br_blockcount, in xfs_bmap_adjacent() 3157 ap->prev.br_startblock)) { in xfs_bmap_adjacent() [all …]
|
/fs/gfs2/ |
D | quota.h | 27 struct gfs2_alloc_parms *ap); 41 struct gfs2_alloc_parms *ap) in gfs2_quota_lock_check() argument 46 ap->allowed = UINT_MAX; /* Assume we are permitted a whole lot */ in gfs2_quota_lock_check() 55 ret = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid, ap); in gfs2_quota_lock_check()
|
D | file.c | 451 struct gfs2_alloc_parms ap = { .aflags = 0, }; in gfs2_page_mkwrite() local 507 ap.target = data_blocks + ind_blocks; in gfs2_page_mkwrite() 508 ret = gfs2_quota_lock_check(ip, &ap); in gfs2_page_mkwrite() 511 ret = gfs2_inplace_reserve(ip, &ap); in gfs2_page_mkwrite() 1061 struct gfs2_alloc_parms ap = { .aflags = 0, }; in __gfs2_fallocate() local 1086 ap.min_target = data_blocks + ind_blocks; in __gfs2_fallocate() 1108 ap.target = data_blocks + ind_blocks; in __gfs2_fallocate() 1110 error = gfs2_quota_lock_check(ip, &ap); in __gfs2_fallocate() 1116 if (ap.allowed) in __gfs2_fallocate() 1117 max_blks = ap.allowed; in __gfs2_fallocate() [all …]
|
D | inode.c | 395 struct gfs2_alloc_parms ap = { .target = *dblocks, .aflags = flags, }; in alloc_dinode() local 398 error = gfs2_quota_lock_check(ip, &ap); in alloc_dinode() 402 error = gfs2_inplace_reserve(ip, &ap); in alloc_dinode() 537 struct gfs2_alloc_parms ap = { .target = da->nr_blocks, }; in link_dinode() local 541 error = gfs2_quota_lock_check(dip, &ap); in link_dinode() 545 error = gfs2_inplace_reserve(dip, &ap); in link_dinode() 989 struct gfs2_alloc_parms ap = { .target = da.nr_blocks, }; in gfs2_link() local 990 error = gfs2_quota_lock_check(dip, &ap); in gfs2_link() 994 error = gfs2_inplace_reserve(dip, &ap); in gfs2_link() 1540 struct gfs2_alloc_parms ap = { .target = da.nr_blocks, }; in gfs2_rename() local [all …]
|
D | quota.c | 895 struct gfs2_alloc_parms ap = { .aflags = 0, }; in do_sync() local 950 ap.target = reserved; in do_sync() 951 error = gfs2_inplace_reserve(ip, &ap); in do_sync() 1218 struct gfs2_alloc_parms *ap) in gfs2_quota_check() argument 1226 ap->allowed = UINT_MAX; /* Assume we are permitted a whole lot */ in gfs2_quota_check() 1244 if (limit > 0 && (limit - value) < ap->allowed) in gfs2_quota_check() 1245 ap->allowed = limit - value; in gfs2_quota_check() 1247 if (limit && limit < (value + (s64)ap->target)) { in gfs2_quota_check() 1250 if (!ap->min_target || ap->min_target > ap->allowed) { in gfs2_quota_check() 1746 struct gfs2_alloc_parms ap = { .aflags = 0, }; in gfs2_set_dqblk() local [all …]
|
D | rgrp.c | 1536 const struct gfs2_alloc_parms *ap) in rg_mblk_search() argument 1549 extlen = max_t(u32, atomic_read(&ip->i_sizehint), ap->target); in rg_mblk_search() 2007 int gfs2_inplace_reserve(struct gfs2_inode *ip, struct gfs2_alloc_parms *ap) in gfs2_inplace_reserve() argument 2019 if (gfs2_assert_warn(sdp, ap->target)) in gfs2_inplace_reserve() 2030 if (S_ISDIR(ip->i_inode.i_mode) && (ap->aflags & GFS2_AF_ORLOV)) in gfs2_inplace_reserve() 2071 (loops == 0 && ap->target > rs->rs_rbm.rgd->rd_extfail_pt)) in gfs2_inplace_reserve() 2079 rg_mblk_search(rs->rs_rbm.rgd, ip, ap); in gfs2_inplace_reserve() 2087 if (free_blocks >= ap->target || in gfs2_inplace_reserve() 2088 (loops == 2 && ap->min_target && in gfs2_inplace_reserve() 2089 free_blocks >= ap->min_target)) { in gfs2_inplace_reserve() [all …]
|
D | rgrp.h | 41 struct gfs2_alloc_parms *ap);
|
D | bmap.c | 1075 struct gfs2_alloc_parms ap = {}; in gfs2_iomap_begin_write() local 1081 ap.target = data_blocks + ind_blocks; in gfs2_iomap_begin_write() 1082 ret = gfs2_quota_lock_check(ip, &ap); in gfs2_iomap_begin_write() 1086 ret = gfs2_inplace_reserve(ip, &ap); in gfs2_iomap_begin_write() 2112 struct gfs2_alloc_parms ap = { .target = 1, }; in do_grow() local 2118 error = gfs2_quota_lock_check(ip, &ap); in do_grow() 2122 error = gfs2_inplace_reserve(ip, &ap); in do_grow()
|
D | xattr.c | 728 struct gfs2_alloc_parms ap = { .target = blks }; in ea_alloc_skeleton() local 735 error = gfs2_quota_lock_check(ip, &ap); in ea_alloc_skeleton() 739 error = gfs2_inplace_reserve(ip, &ap); in ea_alloc_skeleton()
|
/fs/xfs/scrub/ |
D | bitmap.c | 69 struct xbitmap_range *ap; in xbitmap_range_cmp() local 72 ap = container_of(a, struct xbitmap_range, list); in xbitmap_range_cmp() 75 if (ap->start > bp->start) in xbitmap_range_cmp() 77 if (ap->start < bp->start) in xbitmap_range_cmp()
|
/fs/ext4/ |
D | ioctl.c | 39 unsigned char *ap, *bp; in memswap() local 41 ap = (unsigned char *)a; in memswap() 44 swap(*ap, *bp); in memswap() 45 ap++; in memswap()
|