/fs/erofs/ |
D | zpvec.h | 34 unsigned int nr, index; member 51 unsigned int nr) in z_erofs_pagevec_ctor_next_page() argument 59 for (index = 0; index < nr; ++index) { in z_erofs_pagevec_ctor_next_page() 66 DBG_BUGON(nr >= ctor->nr); in z_erofs_pagevec_ctor_next_page() 74 struct page *next = z_erofs_pagevec_ctor_next_page(ctor, ctor->nr); in z_erofs_pagevec_ctor_pagedown() 83 ctor->nr = PAGE_SIZE / sizeof(struct page *); in z_erofs_pagevec_ctor_pagedown() 88 unsigned int nr, in z_erofs_pagevec_ctor_init() argument 92 ctor->nr = nr; in z_erofs_pagevec_ctor_init() 96 if (i >= nr) { in z_erofs_pagevec_ctor_init() 97 i -= nr; in z_erofs_pagevec_ctor_init() [all …]
|
/fs/minix/ |
D | minix.h | 107 #define minix_test_and_set_bit(nr, addr) \ argument 108 __test_and_set_bit((nr), (unsigned long *)(addr)) 109 #define minix_set_bit(nr, addr) \ argument 110 __set_bit((nr), (unsigned long *)(addr)) 111 #define minix_test_and_clear_bit(nr, addr) \ argument 112 __test_and_clear_bit((nr), (unsigned long *)(addr)) 113 #define minix_test_bit(nr, addr) \ argument 114 test_bit((nr), (unsigned long *)(addr)) 142 #define minix_test_and_set_bit(nr, addr) \ argument 143 __test_and_set_bit((nr) ^ 16, (unsigned long *)(addr)) [all …]
|
D | itree_common.c | 84 int nr = minix_new_block(inode); in alloc_branch() local 85 if (!nr) in alloc_branch() 87 branch[n].key = cpu_to_block(nr); in alloc_branch() 90 minix_free_block(inode, nr); in alloc_branch() 102 parent = nr; in alloc_branch() 265 unsigned long nr; in free_data() local 268 nr = block_to_cpu(*p); in free_data() 269 if (nr) { in free_data() 271 minix_free_block(inode, nr); in free_data() 279 unsigned long nr; in free_branches() local [all …]
|
/fs/ext4/ |
D | indirect.c | 927 ext4_fsblk_t nr; /* Current block # */ in ext4_free_data() local 942 nr = le32_to_cpu(*p); in ext4_free_data() 943 if (nr) { in ext4_free_data() 946 block_to_free = nr; in ext4_free_data() 949 } else if (nr == block_to_free + count) { in ext4_free_data() 957 block_to_free = nr; in ext4_free_data() 1007 ext4_fsblk_t nr; in ext4_free_branches() local 1018 nr = le32_to_cpu(*p); in ext4_free_branches() 1019 if (!nr) in ext4_free_branches() 1022 if (!ext4_inode_block_valid(inode, nr, 1)) { in ext4_free_branches() [all …]
|
/fs/ocfs2/ |
D | blockcheck.c | 99 u32 ocfs2_hamming_encode(u32 parity, void *data, unsigned int d, unsigned int nr) in ocfs2_hamming_encode() argument 120 b = calc_code_bit(nr + i, &p); in ocfs2_hamming_encode() 158 void ocfs2_hamming_fix(void *data, unsigned int d, unsigned int nr, in ocfs2_hamming_fix() argument 176 if (fix >= calc_code_bit(nr + d, NULL)) in ocfs2_hamming_fix() 184 b = calc_code_bit(nr, NULL); in ocfs2_hamming_fix() 443 void ocfs2_block_check_compute_bhs(struct buffer_head **bhs, int nr, in ocfs2_block_check_compute_bhs() argument 449 BUG_ON(nr < 0); in ocfs2_block_check_compute_bhs() 451 if (!nr) in ocfs2_block_check_compute_bhs() 456 for (i = 0, crc = ~0, ecc = 0; i < nr; i++) { in ocfs2_block_check_compute_bhs() 487 int ocfs2_block_check_validate_bhs(struct buffer_head **bhs, int nr, in ocfs2_block_check_validate_bhs() argument [all …]
|
D | buffer_head_io.c | 92 unsigned int nr, struct buffer_head *bhs[]) in ocfs2_read_blocks_sync() argument 99 trace_ocfs2_read_blocks_sync((unsigned long long)block, nr); in ocfs2_read_blocks_sync() 101 if (!nr) in ocfs2_read_blocks_sync() 109 for (i = 0 ; i < nr ; i++) { in ocfs2_read_blocks_sync() 156 for (i = nr; i > 0; i--) { in ocfs2_read_blocks_sync() 195 int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr, in ocfs2_read_blocks() argument 206 trace_ocfs2_read_blocks_begin(ci, (unsigned long long)block, nr, flags); in ocfs2_read_blocks() 218 if (nr < 0) { in ocfs2_read_blocks() 219 mlog(ML_ERROR, "asked to read %d blocks!\n", nr); in ocfs2_read_blocks() 225 if (nr == 0) { in ocfs2_read_blocks() [all …]
|
D | blockcheck.h | 37 struct buffer_head **bhs, int nr, 40 struct buffer_head **bhs, int nr, 49 void ocfs2_block_check_compute_bhs(struct buffer_head **bhs, int nr, 51 int ocfs2_block_check_validate_bhs(struct buffer_head **bhs, int nr, 78 unsigned int nr); 89 void ocfs2_hamming_fix(void *data, unsigned int d, unsigned int nr,
|
/fs/ubifs/ |
D | shrinker.c | 58 static int shrink_tnc(struct ubifs_info *c, int nr, int age, int *contention) in shrink_tnc() argument 81 while (znode && total_freed < nr && in shrink_tnc() 144 static int shrink_tnc_trees(int nr, int age, int *contention) in shrink_tnc_trees() argument 187 freed += shrink_tnc(c, nr, age, contention); in shrink_tnc_trees() 198 if (freed >= nr) in shrink_tnc_trees() 282 unsigned long nr = sc->nr_to_scan; in ubifs_shrink_scan() local 299 freed = shrink_tnc_trees(nr, OLD_ZNODE_AGE, &contention); in ubifs_shrink_scan() 300 if (freed >= nr) in ubifs_shrink_scan() 304 freed += shrink_tnc_trees(nr - freed, YOUNG_ZNODE_AGE, &contention); in ubifs_shrink_scan() 305 if (freed >= nr) in ubifs_shrink_scan() [all …]
|
/fs/ |
D | aio.c | 59 unsigned nr; /* number of io_events */ member 83 unsigned nr; member 341 for (i = 0; i < table->nr; i++) { in aio_ring_mremap() 549 ring->nr = nr_events; /* user copy */ in aio_setup_ring() 656 for (i = 0; i < table->nr; i++) in ioctx_add_table() 672 new_nr = (table ? table->nr : 1) * 4; in ioctx_add_table() 680 table->nr = new_nr; in ioctx_add_table() 687 } else if (table->nr > old->nr) { in ioctx_add_table() 689 old->nr * sizeof(struct kioctx *)); in ioctx_add_table() 700 static void aio_nr_sub(unsigned nr) in aio_nr_sub() argument [all …]
|
D | file.c | 46 #define BITBIT_NR(nr) BITS_TO_LONGS(BITS_TO_LONGS(nr)) argument 47 #define BITBIT_SIZE(nr) (BITBIT_NR(nr) * sizeof(long)) argument 105 static struct fdtable * alloc_fdtable(unsigned int nr) in alloc_fdtable() argument 117 nr /= (1024 / sizeof(struct file *)); in alloc_fdtable() 118 nr = roundup_pow_of_two(nr + 1); in alloc_fdtable() 119 nr *= (1024 / sizeof(struct file *)); in alloc_fdtable() 120 nr = ALIGN(nr, BITS_PER_LONG); in alloc_fdtable() 129 if (unlikely(nr > sysctl_nr_open)) in alloc_fdtable() 130 nr = ((sysctl_nr_open - 1) | (BITS_PER_LONG - 1)) + 1; in alloc_fdtable() 135 fdt->max_fds = nr; in alloc_fdtable() [all …]
|
D | coredump.c | 354 int nr = 0; in zap_process() local 366 nr++; in zap_process() 370 return nr; in zap_process() 378 int nr = -EAGAIN; in zap_threads() local 384 nr = zap_process(tsk, exit_code, 0); in zap_threads() 388 if (unlikely(nr < 0)) in zap_threads() 389 return nr; in zap_threads() 392 if (atomic_read(&mm->mm_users) == nr + 1) in zap_threads() 436 nr += zap_process(p, exit_code, in zap_threads() 445 atomic_set(&core_state->nr_threads, nr); in zap_threads() [all …]
|
D | select.c | 382 #define FDS_LONGS(nr) (((nr)+FDS_BITPERLONG-1)/FDS_BITPERLONG) argument 383 #define FDS_BYTES(nr) (FDS_LONGS(nr)*sizeof(long)) argument 389 int get_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset) in get_fd_set() argument 391 nr = FDS_BYTES(nr); in get_fd_set() 393 return copy_from_user(fdset, ufdset, nr) ? -EFAULT : 0; in get_fd_set() 395 memset(fdset, 0, nr); in get_fd_set() 400 set_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset) in set_fd_set() argument 403 return __copy_to_user(ufdset, fdset, FDS_BYTES(nr)); in set_fd_set() 408 void zero_fd_set(unsigned long nr, unsigned long *fdset) in zero_fd_set() argument 410 memset(fdset, 0, FDS_BYTES(nr)); in zero_fd_set() [all …]
|
/fs/f2fs/ |
D | shrinker.c | 82 unsigned long nr = sc->nr_to_scan; in f2fs_shrink_scan() local 109 freed += f2fs_shrink_age_extent_tree(sbi, nr >> 2); in f2fs_shrink_scan() 112 freed += f2fs_shrink_read_extent_tree(sbi, nr >> 2); in f2fs_shrink_scan() 115 if (freed < nr) in f2fs_shrink_scan() 116 freed += f2fs_try_to_free_nats(sbi, nr - freed); in f2fs_shrink_scan() 119 if (freed < nr) in f2fs_shrink_scan() 120 freed += f2fs_try_to_free_nids(sbi, nr - freed); in f2fs_shrink_scan() 126 if (freed >= nr) in f2fs_shrink_scan()
|
/fs/sysv/ |
D | itree.c | 54 static inline int block_to_cpu(struct sysv_sb_info *sbi, sysv_zone_t nr) in block_to_cpu() argument 56 return sbi->s_block_base + fs32_to_cpu(sbi, nr); in block_to_cpu() 333 sysv_zone_t nr = *p; in free_data() local 334 if (nr) { in free_data() 336 sysv_free_block(inode->i_sb, nr); in free_data() 350 sysv_zone_t nr = *p; in free_branches() local 351 if (!nr) in free_branches() 354 block = block_to_cpu(SYSV_SB(sb), nr); in free_branches() 361 sysv_free_block(sb, nr); in free_branches() 374 sysv_zone_t nr = 0; in sysv_truncate() local [all …]
|
D | balloc.c | 43 void sysv_free_block(struct super_block * sb, sysv_zone_t nr) in sysv_free_block() argument 49 unsigned block = fs32_to_cpu(sbi, nr); in sysv_free_block() 92 sbi->s_bcache[count++] = nr; in sysv_free_block() 104 sysv_zone_t nr; in sysv_new_block() local 113 nr = sbi->s_bcache[--count]; in sysv_new_block() 114 if (nr == 0) /* Applies only to Xenix FS, SystemV FS */ in sysv_new_block() 117 block = fs32_to_cpu(sbi, nr); in sysv_new_block() 152 return nr; in sysv_new_block()
|
/fs/hpfs/ |
D | alloc.c | 120 unsigned nr = (near & 0x3fff) & ~(n - 1); in alloc_in_bmp() local 134 if (!tstbits(bmp, nr, n + forward)) { in alloc_in_bmp() 135 ret = bs + nr; in alloc_in_bmp() 138 q = nr + n; b = 0; in alloc_in_bmp() 143 if (q>>5 != nr>>5) { in alloc_in_bmp() 145 q = nr & 0x1f; in alloc_in_bmp() 147 } else if (q > nr) break; in alloc_in_bmp() 153 nr >>= 5; in alloc_in_bmp() 155 i = nr; in alloc_in_bmp() 178 } while (i != nr); in alloc_in_bmp() [all …]
|
/fs/reiserfs/ |
D | lbalance.c | 315 int nr, free_space; in leaf_copy_items_entirely() local 338 nr = blkh_nr_item(blkh); in leaf_copy_items_entirely() 345 dest_before = (last_first == LAST_TO_FIRST) ? 0 : nr; in leaf_copy_items_entirely() 355 memmove(ih + cpy_num, ih, (nr - dest_before) * IH_SIZE); in leaf_copy_items_entirely() 365 for (i = dest_before; i < nr + cpy_num; i++) { in leaf_copy_items_entirely() 371 last_loc = ih_location(&ih[nr + cpy_num - 1 - dest_before]); in leaf_copy_items_entirely() 389 set_blkh_nr_item(blkh, nr + cpy_num); in leaf_copy_items_entirely() 907 int nr, free_space; in leaf_insert_into_buf() local 915 nr = blkh_nr_item(blkh); in leaf_insert_into_buf() 930 last_loc = nr ? ih_location(&ih[nr - before - 1]) : bh->b_size; in leaf_insert_into_buf() [all …]
|
D | ibalance.c | 133 int nr; in internal_insert_childs() local 143 nr = blkh_nr_item(blkh); in internal_insert_childs() 153 memmove(dc + count, dc, (nr + 1 - (to + 1)) * DC_SIZE); in internal_insert_childs() 167 (nr - to) * KEY_SIZE + (nr + 1 + count) * DC_SIZE); in internal_insert_childs() 210 int nr; in internal_delete_pointers_items() local 228 nr = blkh_nr_item(blkh); in internal_delete_pointers_items() 230 if (first_p == 0 && del_num == nr + 1) { in internal_delete_pointers_items() 245 memmove(dc, dc + del_num, (nr + 1 - first_p - del_num) * DC_SIZE); in internal_delete_pointers_items() 248 (nr - first_i - del_num) * KEY_SIZE + (nr + 1 - in internal_delete_pointers_items() 435 int nr; in internal_insert_key() local [all …]
|
/fs/afs/ |
D | addr_list.c | 28 struct afs_addr_list *afs_alloc_addrlist(unsigned int nr, in afs_alloc_addrlist() argument 35 _enter("%u,%u,%u", nr, service, port); in afs_alloc_addrlist() 37 if (nr > AFS_MAX_ADDRESSES) in afs_alloc_addrlist() 38 nr = AFS_MAX_ADDRESSES; in afs_alloc_addrlist() 40 alist = kzalloc(struct_size(alist, addrs, nr), GFP_KERNEL); in afs_alloc_addrlist() 45 alist->max_addrs = nr; in afs_alloc_addrlist() 47 for (i = 0; i < nr; i++) { in afs_alloc_addrlist() 73 unsigned int nr = 0; in afs_parse_text_addrs() local 95 nr++; in afs_parse_text_addrs() 118 _debug("%u/%u addresses", nr, AFS_MAX_ADDRESSES); in afs_parse_text_addrs() [all …]
|
/fs/fat/ |
D | cache.c | 232 int nr; in fat_get_cluster() local 262 nr = -EIO; in fat_get_cluster() 266 nr = fat_ent_read(inode, &fatent, *dclus); in fat_get_cluster() 267 if (nr < 0) in fat_get_cluster() 269 else if (nr == FAT_ENT_FREE) { in fat_get_cluster() 273 nr = -EIO; in fat_get_cluster() 275 } else if (nr == FAT_ENT_EOF) { in fat_get_cluster() 280 *dclus = nr; in fat_get_cluster() 284 nr = 0; in fat_get_cluster() 288 return nr; in fat_get_cluster()
|
/fs/btrfs/ |
D | print-tree.c | 210 u32 type, nr; in btrfs_print_leaf() local 227 nr = btrfs_header_nritems(l); in btrfs_print_leaf() 231 btrfs_header_bytenr(l), btrfs_header_generation(l), nr, in btrfs_print_leaf() 234 for (i = 0 ; i < nr ; i++) { in btrfs_print_leaf() 367 int i; u32 nr; in btrfs_print_tree() local 374 nr = btrfs_header_nritems(c); in btrfs_print_tree() 383 nr, (u32)BTRFS_NODEPTRS_PER_BLOCK(fs_info) - nr, in btrfs_print_tree() 386 for (i = 0; i < nr; i++) { in btrfs_print_tree() 395 for (i = 0; i < nr; i++) { in btrfs_print_tree()
|
D | ctree.h | 1618 int nr) in btrfs_stripe_nr() argument 1622 offset += nr * sizeof(struct btrfs_stripe); in btrfs_stripe_nr() 1626 static inline char *btrfs_stripe_dev_uuid_nr(struct btrfs_chunk *c, int nr) in btrfs_stripe_dev_uuid_nr() argument 1628 return btrfs_stripe_dev_uuid(btrfs_stripe_nr(c, nr)); in btrfs_stripe_dev_uuid_nr() 1632 struct btrfs_chunk *c, int nr) in btrfs_stripe_offset_nr() argument 1634 return btrfs_stripe_offset(eb, btrfs_stripe_nr(c, nr)); in btrfs_stripe_offset_nr() 1638 struct btrfs_chunk *c, int nr) in btrfs_stripe_devid_nr() argument 1640 return btrfs_stripe_devid(eb, btrfs_stripe_nr(c, nr)); in btrfs_stripe_devid_nr() 1777 static inline u64 btrfs_node_blockptr(const struct extent_buffer *eb, int nr) in btrfs_node_blockptr() argument 1781 sizeof(struct btrfs_key_ptr) * nr; in btrfs_node_blockptr() [all …]
|
/fs/xfs/scrub/ |
D | refcount.c | 156 xfs_nlink_t nr; in xchk_refcountbt_process_rmap_fragments() local 186 nr = 0; in xchk_refcountbt_process_rmap_fragments() 188 if (frag->rm.rm_startblock > refchk->bno || nr > target_nr) in xchk_refcountbt_process_rmap_fragments() 194 nr++; in xchk_refcountbt_process_rmap_fragments() 201 if (nr != target_nr) in xchk_refcountbt_process_rmap_fragments() 206 nr = 0; in xchk_refcountbt_process_rmap_fragments() 217 nr++; in xchk_refcountbt_process_rmap_fragments() 228 nr--; in xchk_refcountbt_process_rmap_fragments() 229 if (nr == 0) in xchk_refcountbt_process_rmap_fragments() 239 if (nr) in xchk_refcountbt_process_rmap_fragments()
|
/fs/ext2/ |
D | inode.c | 1106 unsigned long nr; in ext2_free_data() local 1109 nr = le32_to_cpu(*p); in ext2_free_data() 1110 if (nr) { in ext2_free_data() 1115 else if (block_to_free == nr - count) in ext2_free_data() 1121 block_to_free = nr; in ext2_free_data() 1146 unsigned long nr; in ext2_free_branches() local 1151 nr = le32_to_cpu(*p); in ext2_free_branches() 1152 if (!nr) in ext2_free_branches() 1155 bh = sb_bread(inode->i_sb, nr); in ext2_free_branches() 1163 inode->i_ino, nr); in ext2_free_branches() [all …]
|
/fs/jffs2/ |
D | malloc.c | 202 struct jffs2_eraseblock *jeb, int nr) in jffs2_prealloc_raw_node_refs() argument 205 int i = nr; in jffs2_prealloc_raw_node_refs() 207 dbg_memalloc("%d\n", nr); in jffs2_prealloc_raw_node_refs() 212 dbg_memalloc("Reserving %d refs for block @0x%08x\n", nr, jeb->offset); in jffs2_prealloc_raw_node_refs() 233 jeb->allocated_refs = nr; in jffs2_prealloc_raw_node_refs() 236 nr, jeb->offset, jeb->last_node, jeb->last_node->flash_offset, in jffs2_prealloc_raw_node_refs()
|