/fs/erofs/ |
D | data.c | 26 struct erofs_map_blocks *map, in erofs_map_blocks_flatmode() argument 31 u64 offset = map->m_la; in erofs_map_blocks_flatmode() 35 trace_erofs_map_blocks_flatmode_enter(inode, map, flags); in erofs_map_blocks_flatmode() 41 map->m_flags = EROFS_MAP_MAPPED; in erofs_map_blocks_flatmode() 44 map->m_pa = blknr_to_addr(vi->raw_blkaddr) + map->m_la; in erofs_map_blocks_flatmode() 45 map->m_plen = blknr_to_addr(lastblk) - offset; in erofs_map_blocks_flatmode() 50 map->m_pa = iloc(sbi, vi->nid) + vi->inode_isize + in erofs_map_blocks_flatmode() 51 vi->xattr_isize + erofs_blkoff(map->m_la); in erofs_map_blocks_flatmode() 52 map->m_plen = inode->i_size - offset; in erofs_map_blocks_flatmode() 55 if (erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE) { in erofs_map_blocks_flatmode() [all …]
|
D | zmap.c | 111 struct erofs_map_blocks *map; member 126 struct erofs_map_blocks *const map = m->map; in z_erofs_reload_indexes() local 127 struct page *mpage = map->mpage; in z_erofs_reload_indexes() 145 map->mpage = NULL; in z_erofs_reload_indexes() 150 map->mpage = mpage; in z_erofs_reload_indexes() 424 struct erofs_map_blocks *const map = m->map; in z_erofs_extent_lookback() local 456 map->m_la = (lcn << lclusterbits) | m->clusterofs; in z_erofs_extent_lookback() 472 struct erofs_map_blocks *const map = m->map; in z_erofs_get_extent_compressedlen() local 487 map->m_plen = 1 << lclusterbits; in z_erofs_get_extent_compressedlen() 533 map->m_plen = m->compressedlcs << lclusterbits; in z_erofs_get_extent_compressedlen() [all …]
|
D | zdata.c | 335 struct erofs_map_blocks map; member 536 struct erofs_map_blocks *map) in z_erofs_lookup_collection() argument 549 if (cl->pageofs != (map->m_la & ~PAGE_MASK)) { in z_erofs_lookup_collection() 556 if ((map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT) > length) { in z_erofs_lookup_collection() 561 unsigned int llen = map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT; in z_erofs_lookup_collection() 563 if (map->m_flags & EROFS_MAP_FULL_MAPPED) in z_erofs_lookup_collection() 584 struct erofs_map_blocks *map) in z_erofs_register_collection() argument 591 if (!(map->m_flags & EROFS_MAP_ENCODED)) { in z_erofs_register_collection() 597 pcl = z_erofs_alloc_pcluster(map->m_plen >> PAGE_SHIFT); in z_erofs_register_collection() 602 pcl->obj.index = map->m_pa >> PAGE_SHIFT; in z_erofs_register_collection() [all …]
|
/fs/ocfs2/ |
D | heartbeat.c | 27 static inline void __ocfs2_node_map_set_bit(struct ocfs2_node_map *map, 29 static inline void __ocfs2_node_map_clear_bit(struct ocfs2_node_map *map, 34 static void ocfs2_node_map_init(struct ocfs2_node_map *map) in ocfs2_node_map_init() argument 36 map->num_nodes = OCFS2_NODE_MAP_MAX_NODES; in ocfs2_node_map_init() 37 memset(map->map, 0, BITS_TO_LONGS(OCFS2_NODE_MAP_MAX_NODES) * in ocfs2_node_map_init() 68 static inline void __ocfs2_node_map_set_bit(struct ocfs2_node_map *map, in __ocfs2_node_map_set_bit() argument 71 set_bit(bit, map->map); in __ocfs2_node_map_set_bit() 75 struct ocfs2_node_map *map, in ocfs2_node_map_set_bit() argument 80 BUG_ON(bit >= map->num_nodes); in ocfs2_node_map_set_bit() 82 __ocfs2_node_map_set_bit(map, bit); in ocfs2_node_map_set_bit() [all …]
|
/fs/reiserfs/ |
D | objectid.c | 17 static void check_objectid_map(struct super_block *s, __le32 * map) in check_objectid_map() argument 19 if (le32_to_cpu(map[0]) != 1) in check_objectid_map() 21 (long unsigned int)le32_to_cpu(map[0])); in check_objectid_map() 27 static void check_objectid_map(struct super_block *s, __le32 * map) in check_objectid_map() argument 53 __le32 *map = objectid_map(s, rs); in reiserfs_get_unused_objectid() local 58 check_objectid_map(s, map); in reiserfs_get_unused_objectid() 62 unused_objectid = le32_to_cpu(map[1]); in reiserfs_get_unused_objectid() 76 map[1] = cpu_to_le32(unused_objectid + 1); in reiserfs_get_unused_objectid() 86 if (sb_oid_cursize(rs) > 2 && map[1] == map[2]) { in reiserfs_get_unused_objectid() 87 memmove(map + 1, map + 3, in reiserfs_get_unused_objectid() [all …]
|
/fs/ntfs3/ |
D | bitfunc.c | 36 const u8 *map = (u8 *)lmap + (bit >> 3); in are_bits_clear() local 40 return !nbits || !(*map & fill_mask[pos + nbits] & in are_bits_clear() 43 if (*map++ & zero_mask[pos]) in are_bits_clear() 48 pos = ((size_t)map) & (sizeof(size_t) - 1); in are_bits_clear() 52 for (nbits -= pos * 8; pos; pos--, map++) { in are_bits_clear() 53 if (*map) in are_bits_clear() 59 for (pos = nbits / BITS_IN_SIZE_T; pos; pos--, map += sizeof(size_t)) { in are_bits_clear() 60 if (*((size_t *)map)) in are_bits_clear() 64 for (pos = (nbits % BITS_IN_SIZE_T) >> 3; pos; pos--, map++) { in are_bits_clear() 65 if (*map) in are_bits_clear() [all …]
|
/fs/udf/ |
D | partition.c | 33 struct udf_part_map *map; in udf_get_pblock() local 39 map = &sbi->s_partmaps[partition]; in udf_get_pblock() 40 if (map->s_partition_func) in udf_get_pblock() 41 return map->s_partition_func(sb, block, partition, offset); in udf_get_pblock() 43 return map->s_partition_root + block + offset; in udf_get_pblock() 54 struct udf_part_map *map; in udf_get_pblock_virt15() local 58 map = &sbi->s_partmaps[partition]; in udf_get_pblock_virt15() 59 vdata = &map->s_type_specific.s_virtual; in udf_get_pblock_virt15() 118 struct udf_part_map *map; in udf_get_pblock_spar15() local 122 map = &sbi->s_partmaps[partition]; in udf_get_pblock_spar15() [all …]
|
D | super.c | 274 static void udf_free_partition(struct udf_part_map *map) in udf_free_partition() argument 279 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) in udf_free_partition() 280 iput(map->s_uspace.s_table); in udf_free_partition() 281 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) in udf_free_partition() 282 udf_sb_free_bitmap(map->s_uspace.s_bitmap); in udf_free_partition() 283 if (map->s_partition_type == UDF_SPARABLE_MAP15) in udf_free_partition() 285 brelse(map->s_type_specific.s_sparing.s_spar_map[i]); in udf_free_partition() 286 else if (map->s_partition_type == UDF_METADATA_MAP25) { in udf_free_partition() 287 mdata = &map->s_type_specific.s_metadata; in udf_free_partition() 942 struct udf_part_map *map; in udf_load_metadata_files() local [all …]
|
/fs/xfs/libxfs/ |
D | xfs_attr_remote.c | 384 struct xfs_bmbt_irec map[ATTR_RMTVALUE_MAPSIZE]; in xfs_attr_rmtval_get() local 405 blkcnt, map, &nmap, in xfs_attr_rmtval_get() 415 ASSERT((map[i].br_startblock != DELAYSTARTBLOCK) && in xfs_attr_rmtval_get() 416 (map[i].br_startblock != HOLESTARTBLOCK)); in xfs_attr_rmtval_get() 417 dblkno = XFS_FSB_TO_DADDR(mp, map[i].br_startblock); in xfs_attr_rmtval_get() 418 dblkcnt = XFS_FSB_TO_BB(mp, map[i].br_blockcount); in xfs_attr_rmtval_get() 432 lblkno += map[i].br_blockcount; in xfs_attr_rmtval_get() 433 blkcnt -= map[i].br_blockcount; in xfs_attr_rmtval_get() 477 struct xfs_bmbt_irec map; in xfs_attr_rmtval_set_value() local 504 blkcnt, &map, &nmap, in xfs_attr_rmtval_set_value() [all …]
|
D | xfs_bit.c | 20 xfs_bitmap_empty(uint *map, uint size) in xfs_bitmap_empty() argument 25 if (map[i] != 0) in xfs_bitmap_empty() 37 xfs_contig_bits(uint *map, uint size, uint start_bit) in xfs_contig_bits() argument 39 uint * p = ((unsigned int *) map) + (start_bit >> BIT_TO_WORD_SHIFT); in xfs_contig_bits() 76 int xfs_next_bit(uint *map, uint size, uint start_bit) in xfs_next_bit() argument 78 uint * p = ((unsigned int *) map) + (start_bit >> BIT_TO_WORD_SHIFT); in xfs_next_bit()
|
/fs/f2fs/ |
D | data.c | 1427 int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, in f2fs_map_blocks() argument 1430 unsigned int maxblocks = map->m_len; in f2fs_map_blocks() 1433 int mode = map->m_may_create ? ALLOC_NODE : LOOKUP_NODE; in f2fs_map_blocks() 1446 map->m_bdev = inode->i_sb->s_bdev; in f2fs_map_blocks() 1447 map->m_multidev_dio = in f2fs_map_blocks() 1450 map->m_len = 0; in f2fs_map_blocks() 1451 map->m_flags = 0; in f2fs_map_blocks() 1454 pgofs = (pgoff_t)map->m_lblk; in f2fs_map_blocks() 1459 map->m_may_create) in f2fs_map_blocks() 1462 map->m_pblk = ei.blk + pgofs - ei.fofs; in f2fs_map_blocks() [all …]
|
/fs/ext4/ |
D | readpage.c | 281 struct ext4_map_blocks map; in ext4_mpage_readpages() local 284 map.m_pblk = 0; in ext4_mpage_readpages() 285 map.m_lblk = 0; in ext4_mpage_readpages() 286 map.m_len = 0; in ext4_mpage_readpages() 287 map.m_flags = 0; in ext4_mpage_readpages() 313 if ((map.m_flags & EXT4_MAP_MAPPED) && in ext4_mpage_readpages() 314 block_in_file > map.m_lblk && in ext4_mpage_readpages() 315 block_in_file < (map.m_lblk + map.m_len)) { in ext4_mpage_readpages() 316 unsigned map_offset = block_in_file - map.m_lblk; in ext4_mpage_readpages() 317 unsigned last = map.m_len - map_offset; in ext4_mpage_readpages() [all …]
|
D | inode.c | 411 struct ext4_map_blocks *map) in __check_block_validity() argument 417 if (!ext4_inode_block_valid(inode, map->m_pblk, map->m_len)) { in __check_block_validity() 418 ext4_error_inode(inode, func, line, map->m_pblk, in __check_block_validity() 420 "(length %d)", (unsigned long) map->m_lblk, in __check_block_validity() 421 map->m_pblk, map->m_len); in __check_block_validity() 442 #define check_block_validity(inode, map) \ argument 443 __check_block_validity((inode), __func__, __LINE__, (map)) 449 struct ext4_map_blocks *map, in ext4_map_blocks_es_recheck() argument 454 map->m_flags = 0; in ext4_map_blocks_es_recheck() 464 retval = ext4_ext_map_blocks(handle, inode, map, 0); in ext4_map_blocks_es_recheck() [all …]
|
D | extents.c | 3327 struct ext4_map_blocks *map, in ext4_split_extent() argument 3338 int allocated = map->m_len; in ext4_split_extent() 3346 if (map->m_lblk + map->m_len < ee_block + ee_len) { in ext4_split_extent() 3355 map->m_lblk + map->m_len, split_flag1, flags1); in ext4_split_extent() 3359 allocated = ee_len - (map->m_lblk - ee_block); in ext4_split_extent() 3365 path = ext4_find_extent(inode, map->m_lblk, ppath, flags); in ext4_split_extent() 3372 (unsigned long) map->m_lblk); in ext4_split_extent() 3378 if (map->m_lblk >= ee_block) { in ext4_split_extent() 3386 map->m_lblk, split_flag1, flags); in ext4_split_extent() 3418 struct ext4_map_blocks *map, in ext4_ext_convert_to_initialized() argument [all …]
|
D | fast_commit.c | 843 struct ext4_map_blocks map; in ext4_fc_write_inode_data() local 864 map.m_lblk = cur_lblk_off; in ext4_fc_write_inode_data() 865 map.m_len = new_blk_size - cur_lblk_off + 1; in ext4_fc_write_inode_data() 866 ret = ext4_map_blocks(NULL, inode, &map, 0); in ext4_fc_write_inode_data() 870 if (map.m_len == 0) { in ext4_fc_write_inode_data() 877 lrange.fc_lblk = cpu_to_le32(map.m_lblk); in ext4_fc_write_inode_data() 878 lrange.fc_len = cpu_to_le32(map.m_len); in ext4_fc_write_inode_data() 883 unsigned int max = (map.m_flags & EXT4_MAP_UNWRITTEN) ? in ext4_fc_write_inode_data() 887 map.m_len = min(max, map.m_len); in ext4_fc_write_inode_data() 891 ex->ee_block = cpu_to_le32(map.m_lblk); in ext4_fc_write_inode_data() [all …]
|
/fs/omfs/ |
D | bitmap.c | 48 static int set_run(struct super_block *sb, int map, in set_run() argument 57 bh = sb_bread(sb, clus_to_blk(sbi, sbi->s_bitmap_ino) + map); in set_run() 64 map++; in set_run() 69 clus_to_blk(sbi, sbi->s_bitmap_ino) + map); in set_run() 74 set_bit(bit, sbi->s_imap[map]); in set_run() 77 clear_bit(bit, sbi->s_imap[map]); in set_run() 96 unsigned int map, bit; in omfs_allocate_block() local 102 map = tmp; in omfs_allocate_block() 105 if (map >= sbi->s_imap_size || test_and_set_bit(bit, sbi->s_imap[map])) in omfs_allocate_block() 109 bh = sb_bread(sb, clus_to_blk(sbi, sbi->s_bitmap_ino) + map); in omfs_allocate_block() [all …]
|
/fs/adfs/ |
D | map.c | 73 unsigned char *map = dm->dm_bh->b_data; in lookup_zone() local 78 frag = GET_FRAG_ID(map, 8, idmask & 0x7fff); in lookup_zone() 82 frag = GET_FRAG_ID(map, start, idmask); in lookup_zone() 84 fragend = find_next_bit_le(map, endbit, start + idlen); in lookup_zone() 121 unsigned char *map = dm->dm_bh->b_data; in scan_free_map() local 129 frag = GET_FRAG_ID(map, start, idmask); in scan_free_map() 141 frag = GET_FRAG_ID(map, start, idmask); in scan_free_map() 143 fragend = find_next_bit_le(map, endbit, start + idlen); in scan_free_map() 260 static unsigned char adfs_calczonecheck(struct super_block *sb, unsigned char *map) in adfs_calczonecheck() argument 267 v0 += map[i] + (v3 >> 8); in adfs_calczonecheck() [all …]
|
/fs/xfs/ |
D | xfs_dir2_readdir.c | 251 struct xfs_bmbt_irec map; in xfs_dir2_leaf_readbuf() local 272 if (!xfs_iext_lookup_extent(dp, ifp, map_off, &icur, &map)) in xfs_dir2_leaf_readbuf() 274 if (map.br_startoff >= last_da) in xfs_dir2_leaf_readbuf() 276 xfs_trim_extent(&map, map_off, last_da - map_off); in xfs_dir2_leaf_readbuf() 279 new_off = xfs_dir2_da_to_byte(geo, map.br_startoff); in xfs_dir2_leaf_readbuf() 282 error = xfs_dir3_data_read(args->trans, dp, map.br_startoff, 0, &bp); in xfs_dir2_leaf_readbuf() 295 *ra_blk = map.br_startoff; in xfs_dir2_leaf_readbuf() 296 next_ra = map.br_startoff + geo->fsbcount; in xfs_dir2_leaf_readbuf() 299 if (map.br_blockcount < geo->fsbcount && in xfs_dir2_leaf_readbuf() 300 !xfs_iext_next_extent(ifp, &icur, &map)) in xfs_dir2_leaf_readbuf() [all …]
|
D | xfs_buf.h | 113 #define DEFINE_SINGLE_BUF_MAP(map, blkno, numblk) \ argument 114 struct xfs_buf_map (map) = { .bm_bn = (blkno), .bm_len = (numblk) }; 202 int xfs_buf_get_map(struct xfs_buftarg *target, struct xfs_buf_map *map, 204 int xfs_buf_read_map(struct xfs_buftarg *target, struct xfs_buf_map *map, 208 struct xfs_buf_map *map, int nmaps, 218 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks); in xfs_buf_get() 220 return xfs_buf_get_map(target, &map, 1, 0, bpp); in xfs_buf_get() 232 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks); in xfs_buf_read() 234 return xfs_buf_read_map(target, &map, 1, flags, bpp, ops, in xfs_buf_read() 245 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks); in xfs_buf_readahead() [all …]
|
/fs/btrfs/ |
D | volumes.c | 1845 n = rb_last(&em_tree->map.rb_root); in find_next_chunk() 3086 struct map_lookup *map, u64 chunk_offset) in remove_chunk_item() argument 3097 for (i = 0; i < map->num_stripes; i++) { in remove_chunk_item() 3100 ret = btrfs_update_device(trans, map->stripes[i].dev); in remove_chunk_item() 3112 struct map_lookup *map; in btrfs_remove_chunk() local 3127 map = em->map_lookup; in btrfs_remove_chunk() 3140 for (i = 0; i < map->num_stripes; i++) { in btrfs_remove_chunk() 3141 struct btrfs_device *device = map->stripes[i].dev; in btrfs_remove_chunk() 3143 map->stripes[i].physical, in btrfs_remove_chunk() 3186 check_system_chunk(trans, map->type); in btrfs_remove_chunk() [all …]
|
D | raid56.h | 10 static inline int nr_parity_stripes(const struct map_lookup *map) in nr_parity_stripes() argument 12 if (map->type & BTRFS_BLOCK_GROUP_RAID5) in nr_parity_stripes() 14 else if (map->type & BTRFS_BLOCK_GROUP_RAID6) in nr_parity_stripes() 20 static inline int nr_data_stripes(const struct map_lookup *map) in nr_data_stripes() argument 22 return map->num_stripes - nr_parity_stripes(map); in nr_data_stripes()
|
D | scrub.c | 2539 static int scrub_extent(struct scrub_ctx *sctx, struct map_lookup *map, in scrub_extent() argument 2549 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) in scrub_extent() 2550 blocksize = map->stripe_len; in scrub_extent() 2558 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) in scrub_extent() 2559 blocksize = map->stripe_len; in scrub_extent() 2738 struct map_lookup *map, u64 *offset, in get_raid56_logic_offset() argument 2747 const int data_stripes = nr_data_stripes(map); in get_raid56_logic_offset() 2749 last_offset = (physical - map->stripes[num].physical) * data_stripes; in get_raid56_logic_offset() 2755 *offset = last_offset + i * map->stripe_len; in get_raid56_logic_offset() 2757 stripe_nr = div64_u64(*offset, map->stripe_len); in get_raid56_logic_offset() [all …]
|
/fs/nfs/blocklayout/ |
D | dev.c | 167 struct pnfs_block_dev_map *map) in bl_map_simple() argument 169 map->start = dev->start; in bl_map_simple() 170 map->len = dev->len; in bl_map_simple() 171 map->disk_offset = dev->disk_offset; in bl_map_simple() 172 map->bdev = dev->bdev; in bl_map_simple() 177 struct pnfs_block_dev_map *map) in bl_map_concat() argument 188 child->map(child, offset - child->start, map); in bl_map_concat() 197 struct pnfs_block_dev_map *map) in bl_map_stripe() argument 221 child->map(child, disk_offset, map); in bl_map_stripe() 223 map->start += offset; in bl_map_stripe() [all …]
|
D | blocklayout.c | 135 static bool offset_in_map(u64 offset, struct pnfs_block_dev_map *map) in offset_in_map() argument 137 return offset >= map->start && offset < map->start + map->len; in offset_in_map() 142 struct page *page, struct pnfs_block_dev_map *map, in do_add_page_to_bio() argument 159 if (!offset_in_map(disk_addr, map)) { in do_add_page_to_bio() 160 if (!dev->map(dev, disk_addr, map) || !offset_in_map(disk_addr, map)) in do_add_page_to_bio() 164 disk_addr += map->disk_offset; in do_add_page_to_bio() 165 disk_addr -= map->start; in do_add_page_to_bio() 169 if (end >= map->start + map->len) in do_add_page_to_bio() 170 *len = map->start + map->len - disk_addr; in do_add_page_to_bio() 174 bio = bl_alloc_init_bio(npg, map->bdev, in do_add_page_to_bio() [all …]
|
/fs/nfsd/ |
D | nfs4layouts.c | 60 struct nfsd4_deviceid_map *map, *old; in nfsd4_alloc_devid_map() local 63 map = kzalloc(sizeof(*map) + fsid_len, GFP_KERNEL); in nfsd4_alloc_devid_map() 64 if (!map) in nfsd4_alloc_devid_map() 67 map->fsid_type = fh->fh_fsid_type; in nfsd4_alloc_devid_map() 68 memcpy(&map->fsid, fh->fh_fsid, fsid_len); in nfsd4_alloc_devid_map() 87 map->idx = nfsd_devid_seq++; in nfsd4_alloc_devid_map() 88 list_add_tail_rcu(&map->hash, &nfsd_devid_hash[devid_hashfn(map->idx)]); in nfsd4_alloc_devid_map() 89 fhp->fh_export->ex_devid_map = map; in nfsd4_alloc_devid_map() 90 map = NULL; in nfsd4_alloc_devid_map() 94 kfree(map); in nfsd4_alloc_devid_map() [all …]
|