/fs/jfs/ |
D | jfs_dmap.c | 65 int nblocks); 72 static int dbAllocAny(struct bmap * bmp, s64 nblocks, int l2nb, s64 * results); 74 int nblocks); 76 int nblocks, 79 int nblocks); 80 static int dbAllocDmapLev(struct bmap * bmp, struct dmap * dp, int nblocks, 83 static int dbAllocAG(struct bmap * bmp, int agno, s64 nblocks, int l2nb, 85 static int dbAllocCtl(struct bmap * bmp, s64 nblocks, int l2nb, s64 blkno, 87 static int dbExtend(struct inode *ip, s64 blkno, s64 nblocks, s64 addnblocks); 92 int nblocks); [all …]
|
D | jfs_dmap.h | 148 __le32 nblocks; /* 4: num blks covered by this dmap */ member 284 extern int dbFree(struct inode *ipbmap, s64 blkno, s64 nblocks); 287 int free, s64 blkno, s64 nblocks, struct tblock * tblk); 291 extern int dbAlloc(struct inode *ipbmap, s64 hint, s64 nblocks, s64 * results); 294 s64 blkno, s64 nblocks, s64 addnblocks, s64 * results); 297 extern int dbAllocBottomUp(struct inode *ip, s64 blkno, s64 nblocks); 298 extern int dbExtendFS(struct inode *ipbmap, s64 blkno, s64 nblocks);
|
D | jfs_discard.c | 32 void jfs_issue_discard(struct inode *ip, u64 blkno, u64 nblocks) in jfs_issue_discard() argument 37 r = sb_issue_discard(sb, blkno, nblocks, GFP_NOFS, 0); in jfs_issue_discard() 41 (unsigned long long)nblocks, r); in jfs_issue_discard() 46 (unsigned long long)nblocks, r); in jfs_issue_discard()
|
D | resize.c | 61 s64 XAddress, XSize, nblocks, xoff, xaddr, t64; in jfs_extendfs() local 313 nblocks = min(t64 - mapSize, XSize); in jfs_extendfs() 322 if ((rc = dbExtendFS(ipbmap, XAddress, nblocks))) in jfs_extendfs() 332 XSize -= nblocks; in jfs_extendfs() 382 xlen = min(xlen, (int) nblocks) & ~(sbi->nbperpage - 1); in jfs_extendfs() 387 if ((rc = xtAppend(tid, ipbmap, 0, xoff, nblocks, &xlen, &xaddr, 0))) { in jfs_extendfs()
|
D | xattr.c | 207 int nblocks; in ea_write() local 225 nblocks = (size + (sb->s_blocksize - 1)) >> sb->s_blocksize_bits; in ea_write() 228 rc = dquot_alloc_block(ip, nblocks); in ea_write() 232 rc = dbAlloc(ip, INOHINT(ip), nblocks, &blkno); in ea_write() 235 dquot_free_block(ip, nblocks); in ea_write() 246 for (i = 0; i < nblocks; i += sbi->nbperpage) { in ea_write() 289 DXDlength(ea, nblocks); in ea_write() 300 dquot_free_block(ip, nblocks); in ea_write() 302 dbFree(ip, blkno, nblocks); in ea_write() 356 int nblocks; in ea_read() local [all …]
|
D | jfs_extent.c | 496 extBalloc(struct inode *ip, s64 hint, s64 * nblocks, s64 * blkno) in extBalloc() argument 517 if (*nblocks >= max && *nblocks > nbperpage) in extBalloc() 520 nb = nblks = *nblocks; in extBalloc() 538 *nblocks = nb; in extBalloc()
|
D | jfs_discard.h | 10 extern void jfs_issue_discard(struct inode *ip, u64 blkno, u64 nblocks);
|
/fs/erofs/ |
D | data.c | 53 erofs_blk_t nblocks, lastblk; in erofs_map_blocks_flatmode() local 60 nblocks = DIV_ROUND_UP(inode->i_size, PAGE_SIZE); in erofs_map_blocks_flatmode() 61 lastblk = nblocks - tailendpacking; in erofs_map_blocks_flatmode() 131 unsigned int nblocks, in erofs_read_raw_page() argument 139 DBG_BUGON(!nblocks); in erofs_read_raw_page() 216 if (nblocks > DIV_ROUND_UP(map.m_plen, PAGE_SIZE)) in erofs_read_raw_page() 217 nblocks = DIV_ROUND_UP(map.m_plen, PAGE_SIZE); in erofs_read_raw_page() 218 if (nblocks > BIO_MAX_PAGES) in erofs_read_raw_page() 219 nblocks = BIO_MAX_PAGES; in erofs_read_raw_page() 221 bio = bio_alloc(GFP_NOIO, nblocks); in erofs_read_raw_page()
|
/fs/nilfs2/ |
D | the_nilfs.c | 397 u64 nsegments, nblocks; in nilfs_store_disk_layout() local 451 nblocks = (u64)i_size_read(nilfs->ns_sb->s_bdev->bd_inode) >> in nilfs_store_disk_layout() 453 if (nblocks) { in nilfs_store_disk_layout() 461 if (nblocks < min_block_count) { in nilfs_store_disk_layout() 465 (unsigned long long)nblocks); in nilfs_store_disk_layout() 733 sector_t start = 0, nblocks = 0; in nilfs_discard_segments() local 743 if (!nblocks) { in nilfs_discard_segments() 745 nblocks = seg_end - seg_start + 1; in nilfs_discard_segments() 746 } else if (start + nblocks == seg_start) { in nilfs_discard_segments() 747 nblocks += seg_end - seg_start + 1; in nilfs_discard_segments() [all …]
|
D | sufile.c | 548 unsigned long nblocks, time64_t modtime) in nilfs_sufile_set_segment_usage() argument 570 su->su_nblocks = cpu_to_le32(nblocks); in nilfs_sufile_set_segment_usage() 1067 sector_t start = 0, nblocks = 0; in nilfs_sufile_trim_fs() local 1123 if (!nblocks) { in nilfs_sufile_trim_fs() 1126 nblocks = seg_end - seg_start + 1; in nilfs_sufile_trim_fs() 1130 if (start + nblocks == seg_start) { in nilfs_sufile_trim_fs() 1132 nblocks += seg_end - seg_start + 1; in nilfs_sufile_trim_fs() 1138 nblocks -= start_block - start; in nilfs_sufile_trim_fs() 1142 if (nblocks >= minlen) { in nilfs_sufile_trim_fs() 1147 nblocks * sects_per_block, in nilfs_sufile_trim_fs() [all …]
|
D | segbuf.h | 34 unsigned long nblocks; member 131 return segbuf->sb_sum.nblocks == segbuf->sb_sum.nsumblk; in nilfs_segbuf_empty() 139 segbuf->sb_sum.nblocks++; in nilfs_segbuf_add_segsum_buffer() 148 segbuf->sb_sum.nblocks++; in nilfs_segbuf_add_payload_buffer()
|
D | segbuf.c | 83 segbuf->sb_pseg_start = prev->sb_pseg_start + prev->sb_sum.nblocks; in nilfs_segbuf_map_cont() 120 segbuf->sb_pseg_start + segbuf->sb_sum.nblocks); in nilfs_segbuf_extend_payload() 134 segbuf->sb_sum.nblocks = segbuf->sb_sum.nsumblk = 0; in nilfs_segbuf_reset() 165 raw_sum->ss_nblocks = cpu_to_le32(segbuf->sb_sum.nblocks); in nilfs_segbuf_fill_in_segsum() 411 wi->rest_blocks = segbuf->sb_sum.nblocks; in nilfs_segbuf_prepare_write() 517 segbuf->sb_sum.nblocks, in nilfs_segbuf_wait()
|
D | recovery.c | 327 unsigned long nblocks, ndatablk, nnodeblk; in nilfs_scan_dsync_log() local 336 nblocks = le32_to_cpu(finfo->fi_nblocks); in nilfs_scan_dsync_log() 338 nnodeblk = nblocks - ndatablk; in nilfs_scan_dsync_log() 806 unsigned long nblocks; in nilfs_search_super_root() local 842 nblocks = le32_to_cpu(sum->ss_nblocks); in nilfs_search_super_root() 843 pseg_end = pseg_start + nblocks - 1; in nilfs_search_super_root() 892 nilfs->ns_pseg_offset = pseg_start + nblocks - seg_start; in nilfs_search_super_root() 909 pseg_start += nblocks; in nilfs_search_super_root()
|
/fs/ext4/ |
D | ext4_jbd2.h | 298 #define ext4_journal_start_sb(sb, type, nblocks) \ argument 299 __ext4_journal_start_sb((sb), __LINE__, (type), (nblocks), 0) 301 #define ext4_journal_start(inode, type, nblocks) \ argument 302 __ext4_journal_start((inode), __LINE__, (type), (nblocks), 0) 335 static inline int ext4_journal_extend(handle_t *handle, int nblocks) in ext4_journal_extend() argument 338 return jbd2_journal_extend(handle, nblocks); in ext4_journal_extend() 342 static inline int ext4_journal_restart(handle_t *handle, int nblocks) in ext4_journal_restart() argument 345 return jbd2_journal_restart(handle, nblocks); in ext4_journal_restart()
|
D | ialloc.c | 743 int nblocks) in __ext4_new_inode() argument 791 nblocks += (S_ISDIR(mode) ? 2 : 1) * in __ext4_new_inode() 811 nblocks += num_security_xattrs * in __ext4_new_inode() 818 nblocks += __ext4_xattr_set_credits(sb, in __ext4_new_inode() 924 BUG_ON(nblocks <= 0); in __ext4_new_inode() 926 handle_type, nblocks, in __ext4_new_inode()
|
/fs/reiserfs/ |
D | xattr.h | 74 size_t nblocks = JOURNAL_BLOCKS_PER_OBJECT(inode->i_sb); in reiserfs_xattr_jcreate_nblocks() local 77 nblocks += JOURNAL_BLOCKS_PER_OBJECT(inode->i_sb); in reiserfs_xattr_jcreate_nblocks() 79 nblocks += JOURNAL_BLOCKS_PER_OBJECT(inode->i_sb); in reiserfs_xattr_jcreate_nblocks() 82 return nblocks; in reiserfs_xattr_jcreate_nblocks()
|
D | xattr_acl.c | 364 int nblocks = 0; in reiserfs_cache_default_acl() local 378 nblocks = reiserfs_xattr_jcreate_nblocks(inode); in reiserfs_cache_default_acl() 379 nblocks += JOURNAL_BLOCKS_PER_OBJECT(inode->i_sb); in reiserfs_cache_default_acl() 384 nblocks += reiserfs_xattr_nblocks(inode, size) * 4; in reiserfs_cache_default_acl() 388 return nblocks; in reiserfs_cache_default_acl()
|
/fs/jbd2/ |
D | transaction.c | 431 static handle_t *new_handle(int nblocks) in new_handle() argument 436 handle->h_buffer_credits = nblocks; in new_handle() 442 handle_t *jbd2__journal_start(journal_t *journal, int nblocks, int rsv_blocks, in jbd2__journal_start() argument 458 handle = new_handle(nblocks); in jbd2__journal_start() 485 line_no, nblocks); in jbd2__journal_start() 511 handle_t *jbd2_journal_start(journal_t *journal, int nblocks) in jbd2_journal_start() argument 513 return jbd2__journal_start(journal, nblocks, 0, GFP_NOFS, 0, 0); in jbd2_journal_start() 601 int jbd2_journal_extend(handle_t *handle, int nblocks) in jbd2_journal_extend() argument 619 "transaction not running\n", handle, nblocks); in jbd2_journal_extend() 624 wanted = atomic_add_return(nblocks, in jbd2_journal_extend() [all …]
|
D | checkpoint.c | 89 int nblocks, space_left; in __jbd2_log_wait_for_space() local 92 nblocks = jbd2_space_needed(journal); in __jbd2_log_wait_for_space() 93 while (jbd2_log_space_left(journal) < nblocks) { in __jbd2_log_wait_for_space() 115 if (space_left < nblocks) { in __jbd2_log_wait_for_space() 141 __func__, nblocks, space_left); in __jbd2_log_wait_for_space()
|
/fs/ |
D | mpage.c | 210 unsigned nblocks; in do_mpage_readpage() local 235 nblocks = map_bh->b_size >> blkbits; in do_mpage_readpage() 238 block_in_file < (args->first_logical_block + nblocks)) { in do_mpage_readpage() 240 unsigned last = nblocks - map_offset; in do_mpage_readpage() 298 nblocks = map_bh->b_size >> blkbits; in do_mpage_readpage() 300 if (relative_block == nblocks) { in do_mpage_readpage() 357 nblocks = map_bh->b_size >> blkbits; in do_mpage_readpage() 358 if ((buffer_boundary(map_bh) && relative_block == nblocks) || in do_mpage_readpage()
|
/fs/btrfs/ |
D | file-item.c | 168 int nblocks; in __btrfs_lookup_bio_sums() local 176 nblocks = bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits; in __btrfs_lookup_bio_sums() 178 if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) { in __btrfs_lookup_bio_sums() 179 btrfs_bio->csum = kmalloc_array(nblocks, csum_size, in __btrfs_lookup_bio_sums() 219 csum, nblocks); in __btrfs_lookup_bio_sums() 268 count = min_t(int, nblocks, (item_last_offset - disk_bytenr) >> in __btrfs_lookup_bio_sums() 275 nblocks -= count; in __btrfs_lookup_bio_sums()
|
/fs/gfs2/ |
D | rgrp.c | 2391 int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks, in gfs2_alloc_blocks() argument 2412 (unsigned long long)ip->i_no_addr, error, *nblocks, in gfs2_alloc_blocks() 2418 gfs2_alloc_extent(&rbm, dinode, nblocks); in gfs2_alloc_blocks() 2422 gfs2_adjust_reservation(ip, &rbm, *nblocks); in gfs2_alloc_blocks() 2423 ndata = *nblocks; in gfs2_alloc_blocks() 2439 if (rbm.rgd->rd_free < *nblocks) { in gfs2_alloc_blocks() 2440 fs_warn(sdp, "nblocks=%u\n", *nblocks); in gfs2_alloc_blocks() 2444 rbm.rgd->rd_free -= *nblocks; in gfs2_alloc_blocks() 2455 gfs2_statfs_change(sdp, 0, -(s64)*nblocks, dinode ? 1 : 0); in gfs2_alloc_blocks() 2457 gfs2_trans_remove_revoke(sdp, block, *nblocks); in gfs2_alloc_blocks() [all …]
|
/fs/minix/ |
D | itree_v1.c | 66 return nblocks(size, sb); in V1_minix_blocks()
|
D | itree_v2.c | 74 return nblocks(size, sb); in V2_minix_blocks()
|
/fs/ocfs2/ |
D | journal.c | 413 int ocfs2_extend_trans(handle_t *handle, int nblocks) in ocfs2_extend_trans() argument 418 BUG_ON(nblocks < 0); in ocfs2_extend_trans() 420 if (!nblocks) in ocfs2_extend_trans() 425 trace_ocfs2_extend_trans(old_nblocks, nblocks); in ocfs2_extend_trans() 430 status = jbd2_journal_extend(handle, nblocks); in ocfs2_extend_trans() 438 trace_ocfs2_extend_trans_restart(old_nblocks + nblocks); in ocfs2_extend_trans() 440 old_nblocks + nblocks); in ocfs2_extend_trans()
|