/fs/afs/ |
D | dir_edit.c | 194 unsigned int need_slots, nr_blocks, b; in afs_edit_dir_add() local 224 nr_blocks = i_size / AFS_DIR_BLOCK_SIZE; in afs_edit_dir_add() 229 for (b = 0; b < nr_blocks + 1; b++) { in afs_edit_dir_add() 238 if (nr_blocks >= AFS_DIR_MAX_BLOCKS) in afs_edit_dir_add() 263 if (b == nr_blocks) { in afs_edit_dir_add() 303 nr_blocks = 1; in afs_edit_dir_add() 371 unsigned int need_slots, nr_blocks, b; in afs_edit_dir_remove() local 385 nr_blocks = i_size / AFS_DIR_BLOCK_SIZE; in afs_edit_dir_remove() 403 for (b = 0; b < nr_blocks; b++) { in afs_edit_dir_remove()
|
/fs/crypto/ |
D | inline_crypt.c | 472 u64 fscrypt_limit_io_blocks(const struct inode *inode, u64 lblk, u64 nr_blocks) in fscrypt_limit_io_blocks() argument 478 return nr_blocks; in fscrypt_limit_io_blocks() 480 if (nr_blocks <= 1) in fscrypt_limit_io_blocks() 481 return nr_blocks; in fscrypt_limit_io_blocks() 486 return nr_blocks; in fscrypt_limit_io_blocks() 492 return min_t(u64, nr_blocks, (u64)U32_MAX + 1 - dun); in fscrypt_limit_io_blocks()
|
/fs/xfs/libxfs/ |
D | xfs_btree_staging.c | 648 uint64_t nr_blocks = 0; in xfs_btree_bload_compute_geometry() local 683 nr_blocks++; in xfs_btree_bload_compute_geometry() 715 nr_blocks++; in xfs_btree_bload_compute_geometry() 723 nr_blocks += level_blocks; in xfs_btree_bload_compute_geometry() 732 bbl->nr_blocks = nr_blocks - 1; in xfs_btree_bload_compute_geometry() 734 bbl->nr_blocks = nr_blocks; in xfs_btree_bload_compute_geometry()
|
D | xfs_btree_staging.h | 109 uint64_t nr_blocks; member
|
/fs/jffs2/ |
D | build.c | 326 size += c->nr_blocks * 100; /* And 100 bytes per eraseblock */ in jffs2_calc_trigger_levels() 356 c->flash_size / 1024, c->sector_size / 1024, c->nr_blocks); in jffs2_calc_trigger_levels() 380 c->nr_blocks = c->flash_size / c->sector_size; in jffs2_do_mount_fs() 381 size = sizeof(struct jffs2_eraseblock) * c->nr_blocks; in jffs2_do_mount_fs() 391 for (i=0; i<c->nr_blocks; i++) { in jffs2_do_mount_fs()
|
D | jffs2_fs_sb.h | 91 uint32_t nr_blocks; member
|
D | scan.c | 143 for (i=0; i<c->nr_blocks; i++) { in jffs2_scan_medium() 265 ((c->nr_free_blocks+empty_blocks+bad_blocks) != c->nr_blocks || bad_blocks == c->nr_blocks)) { in jffs2_scan_medium() 268 empty_blocks, bad_blocks, c->nr_blocks); in jffs2_scan_medium()
|
D | debug.c | 285 if (nr_counted != c->nr_blocks) { in __jffs2_dbg_superblock_counts() 287 __func__, nr_counted, c->nr_blocks); in __jffs2_dbg_superblock_counts()
|
D | nodelist.c | 509 for (i=0; i<c->nr_blocks; i++) { in jffs2_free_raw_node_refs()
|
D | nodemgmt.c | 598 if (blocknr >= c->nr_blocks) { in jffs2_mark_node_obsolete()
|
/fs/gfs2/ |
D | inode.c | 532 return da->nr_blocks + gfs2_rg_blocks(dip, da->nr_blocks) + in gfs2_trans_da_blks() 540 struct gfs2_alloc_parms ap = { .target = da->nr_blocks, }; in link_dinode() 543 if (da->nr_blocks) { in link_dinode() 991 if (da.nr_blocks) { in gfs2_link() 992 struct gfs2_alloc_parms ap = { .target = da.nr_blocks, }; in gfs2_link() 1030 if (da.nr_blocks) in gfs2_link() 1033 if (da.nr_blocks) in gfs2_link() 1390 struct gfs2_diradd da = { .nr_blocks = 0, .save_loc = 0, }; in gfs2_rename() 1549 if (da.nr_blocks) { in gfs2_rename() 1550 struct gfs2_alloc_parms ap = { .target = da.nr_blocks, }; in gfs2_rename() [all …]
|
D | dir.h | 20 unsigned nr_blocks; member
|
D | dir.c | 2165 da->nr_blocks = 0; in gfs2_diradd_alloc_required() 2171 da->nr_blocks = sdp->sd_max_dirres; in gfs2_diradd_alloc_required() 2174 da->nr_blocks = 1; in gfs2_diradd_alloc_required()
|
/fs/iomap/ |
D | buffered-io.c | 55 unsigned int nr_blocks = i_blocks_per_page(inode, page); in iomap_page_create() local 57 if (iop || nr_blocks <= 1) in iomap_page_create() 60 iop = kzalloc(struct_size(iop, uptodate, BITS_TO_LONGS(nr_blocks)), in iomap_page_create() 64 bitmap_fill(iop->uptodate, nr_blocks); in iomap_page_create() 73 unsigned int nr_blocks = i_blocks_per_page(page->mapping->host, page); in iomap_page_release() local 79 WARN_ON_ONCE(bitmap_full(iop->uptodate, nr_blocks) != in iomap_page_release()
|
/fs/xfs/scrub/ |
D | repair.h | 23 bool xrep_ag_has_space(struct xfs_perag *pag, xfs_extlen_t nr_blocks,
|
D | repair.c | 161 xfs_extlen_t nr_blocks, in xrep_ag_has_space() argument 166 pag->pagf_freeblks > xfs_ag_resv_needed(pag, type) + nr_blocks; in xrep_ag_has_space()
|
/fs/ntfs/ |
D | volume.h | 32 LCN nr_blocks; /* Number of sb->s_blocksize bytes member
|
D | super.c | 667 sector_t nr_blocks = NTFS_SB(sb)->nr_blocks; in read_ntfs_boot_sector() local 687 if ((bh_backup = sb_bread(sb, nr_blocks - 1))) { in read_ntfs_boot_sector() 695 if ((bh_backup = sb_bread(sb, nr_blocks >> 1))) { in read_ntfs_boot_sector() 2781 vol->nr_blocks = i_size_read(sb->s_bdev->bd_inode) >> in ntfs_fill_super() 2820 vol->nr_blocks = i_size_read(sb->s_bdev->bd_inode) >> in ntfs_fill_super()
|
D | inode.c | 1758 unsigned int i, nr_blocks; in ntfs_read_inode_mount() local 1803 nr_blocks = vol->mft_record_size >> sb->s_blocksize_bits; in ntfs_read_inode_mount() 1804 if (!nr_blocks) in ntfs_read_inode_mount() 1805 nr_blocks = 1; in ntfs_read_inode_mount() 1808 for (i = 0; i < nr_blocks; i++) { in ntfs_read_inode_mount()
|
/fs/zonefs/ |
D | super.c | 248 loff_t nr_blocks; in zonefs_update_stats() local 260 nr_blocks = (old_isize - new_isize) >> sb->s_blocksize_bits; in zonefs_update_stats() 261 if (sbi->s_used_blocks > nr_blocks) in zonefs_update_stats() 262 sbi->s_used_blocks -= nr_blocks; in zonefs_update_stats()
|
/fs/ocfs2/ |
D | file.c | 1547 u64 start_block, end_block, nr_blocks; in ocfs2_zeroout_partial_cluster() local 1558 nr_blocks = end_block - start_block; in ocfs2_zeroout_partial_cluster() 1559 if (!nr_blocks) in ocfs2_zeroout_partial_cluster() 1572 return sb_issue_zeroout(sb, p_block, nr_blocks, GFP_NOFS); in ocfs2_zeroout_partial_cluster()
|
/fs/xfs/ |
D | xfs_trace.h | 3943 uint64_t block_idx, uint64_t nr_blocks, 3945 TP_ARGS(cur, level, block_idx, nr_blocks, ptr, nr_records), 3951 __field(unsigned long long, nr_blocks) 3961 __entry->nr_blocks = nr_blocks; 3978 __entry->nr_blocks,
|