Home
last modified time | relevance | path

Searched refs:curr (Results 1 – 15 of 15) sorted by relevance

/fs/hfsplus/
Dbitmap.c25 __be32 *pptr, *curr, *end; in hfsplus_block_allocate() local
43 curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32; in hfsplus_block_allocate()
52 val = *curr; in hfsplus_block_allocate()
61 curr++; in hfsplus_block_allocate()
65 while (curr < end) { in hfsplus_block_allocate()
66 val = *curr; in hfsplus_block_allocate()
75 curr++; in hfsplus_block_allocate()
87 curr = pptr = kmap(page); in hfsplus_block_allocate()
98 start = offset + (curr - pptr) * 32 + i; in hfsplus_block_allocate()
115 *curr++ = cpu_to_be32(n); in hfsplus_block_allocate()
[all …]
/fs/hfs/
Dbitmap.c31 __be32 *curr, *end; in hfs_find_set_zero_bits() local
40 curr = bitmap + (offset / 32); in hfs_find_set_zero_bits()
44 val = *curr; in hfs_find_set_zero_bits()
56 while (++curr < end) { in hfs_find_set_zero_bits()
57 val = *curr; in hfs_find_set_zero_bits()
70 start = (curr - bitmap) * 32 + i; in hfs_find_set_zero_bits()
85 *curr++ = cpu_to_be32(n); in hfs_find_set_zero_bits()
88 n = be32_to_cpu(*curr); in hfs_find_set_zero_bits()
95 *curr++ = cpu_to_be32(0xffffffff); in hfs_find_set_zero_bits()
107 *curr = cpu_to_be32(n); in hfs_find_set_zero_bits()
[all …]
/fs/btrfs/
Ddelayed-inode.c723 struct btrfs_delayed_item *curr, *next; in btrfs_batch_insert_items() local
755 curr = next; in btrfs_batch_insert_items()
756 next = __btrfs_next_delayed_item(curr); in btrfs_batch_insert_items()
760 if (!btrfs_is_continuous_delayed_item(curr, next)) in btrfs_batch_insert_items()
805 list_for_each_entry_safe(curr, next, &head, tree_list) { in btrfs_batch_insert_items()
807 write_extent_buffer(leaf, &curr->data, in btrfs_batch_insert_items()
809 curr->data_len); in btrfs_batch_insert_items()
812 btrfs_delayed_item_release_metadata(fs_info, curr); in btrfs_batch_insert_items()
814 list_del(&curr->tree_list); in btrfs_batch_insert_items()
815 btrfs_release_delayed_item(curr); in btrfs_batch_insert_items()
[all …]
Dscrub.c169 int curr; member
644 if (sctx->curr != -1) { in scrub_free_ctx()
645 struct scrub_bio *sbio = sctx->bios[sctx->curr]; in scrub_free_ctx()
686 sctx->curr = -1; in scrub_setup_ctx()
2297 if (sctx->curr == -1) in scrub_submit()
2300 sbio = sctx->bios[sctx->curr]; in scrub_submit()
2301 sctx->curr = -1; in scrub_submit()
2317 while (sctx->curr == -1) { in scrub_add_page_to_rd_bio()
2319 sctx->curr = sctx->first_free; in scrub_add_page_to_rd_bio()
2320 if (sctx->curr != -1) { in scrub_add_page_to_rd_bio()
[all …]
Dvolumes.c7279 struct btrfs_device *curr, *next; in btrfs_update_commit_device_size() local
7286 list_for_each_entry_safe(curr, next, &fs_devices->resized_devices, in btrfs_update_commit_device_size()
7288 list_del_init(&curr->resized_list); in btrfs_update_commit_device_size()
7289 curr->commit_total_bytes = curr->disk_total_bytes; in btrfs_update_commit_device_size()
/fs/nilfs2/
Dcpfile.c62 __u64 curr, in nilfs_cpfile_checkpoints_in_block() argument
67 nilfs_cpfile_get_offset(cpfile, curr), in nilfs_cpfile_checkpoints_in_block()
68 max - curr); in nilfs_cpfile_checkpoints_in_block()
492 __u64 curr = *cnop, next; in nilfs_cpfile_do_get_ssinfo() local
499 if (curr == 0) { in nilfs_cpfile_do_get_ssinfo()
505 curr = le64_to_cpu(header->ch_snapshot_list.ssl_next); in nilfs_cpfile_do_get_ssinfo()
508 if (curr == 0) { in nilfs_cpfile_do_get_ssinfo()
512 } else if (unlikely(curr == ~(__u64)0)) { in nilfs_cpfile_do_get_ssinfo()
517 curr_blkoff = nilfs_cpfile_get_blkoff(cpfile, curr); in nilfs_cpfile_do_get_ssinfo()
518 ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr, 0, &bh); in nilfs_cpfile_do_get_ssinfo()
[all …]
Dalloc.c430 unsigned long curr, unsigned long max) in nilfs_palloc_rest_groups_in_desc_block() argument
434 curr % nilfs_palloc_groups_per_desc_block(inode), in nilfs_palloc_rest_groups_in_desc_block()
435 max - curr + 1); in nilfs_palloc_rest_groups_in_desc_block()
Dsufile.c73 nilfs_sufile_segment_usages_in_block(const struct inode *sufile, __u64 curr, in nilfs_sufile_segment_usages_in_block() argument
78 nilfs_sufile_get_offset(sufile, curr), in nilfs_sufile_segment_usages_in_block()
79 max - curr + 1); in nilfs_sufile_segment_usages_in_block()
/fs/udf/
Dsuper.c1616 struct udf_vds_record *curr; in udf_process_sequence() local
1647 curr = &vds[VDS_POS_PRIMARY_VOL_DESC]; in udf_process_sequence()
1648 if (vdsn >= curr->volDescSeqNum) { in udf_process_sequence()
1649 curr->volDescSeqNum = vdsn; in udf_process_sequence()
1650 curr->block = block; in udf_process_sequence()
1654 curr = &vds[VDS_POS_VOL_DESC_PTR]; in udf_process_sequence()
1655 if (vdsn >= curr->volDescSeqNum) { in udf_process_sequence()
1656 curr->volDescSeqNum = vdsn; in udf_process_sequence()
1657 curr->block = block; in udf_process_sequence()
1669 curr = &vds[VDS_POS_IMP_USE_VOL_DESC]; in udf_process_sequence()
[all …]
Dinode.c918 int curr = *c; in udf_split_extents() local
919 int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) + in udf_split_extents()
921 int8_t etype = (laarr[curr].extLength >> 30); in udf_split_extents()
926 laarr[curr + 2] = laarr[curr + 1]; in udf_split_extents()
927 laarr[curr + 1] = laarr[curr]; in udf_split_extents()
929 laarr[curr + 3] = laarr[curr + 1]; in udf_split_extents()
930 laarr[curr + 2] = laarr[curr + 1] = laarr[curr]; in udf_split_extents()
936 &laarr[curr].extLocation, in udf_split_extents()
938 laarr[curr].extLength = in udf_split_extents()
941 laarr[curr].extLocation.logicalBlockNum = 0; in udf_split_extents()
[all …]
/fs/
Dcoredump.c450 struct core_thread *curr, *next; in coredump_finish() local
461 while ((curr = next) != NULL) { in coredump_finish()
462 next = curr->next; in coredump_finish()
463 task = curr->task; in coredump_finish()
469 curr->task = NULL; in coredump_finish()
/fs/afs/
Ddir.c238 unsigned offset, next, curr; in afs_dir_iterate_block() local
244 curr = (ctx->pos - blkoff) / sizeof(union afs_dirent); in afs_dir_iterate_block()
258 if (offset >= curr) in afs_dir_iterate_block()
272 (offset < curr ? "skip" : "fill"), in afs_dir_iterate_block()
301 if (offset < curr) in afs_dir_iterate_block()
/fs/ocfs2/
Dsuballoc.c422 u16 curr, best; in ocfs2_find_smallest_chain() local
424 best = curr = 0; in ocfs2_find_smallest_chain()
425 while (curr < le16_to_cpu(cl->cl_count)) { in ocfs2_find_smallest_chain()
427 le32_to_cpu(cl->cl_recs[curr].c_total)) in ocfs2_find_smallest_chain()
428 best = curr; in ocfs2_find_smallest_chain()
429 curr++; in ocfs2_find_smallest_chain()
1403 u16 curr, best; in ocfs2_find_victim_chain() local
1407 best = curr = 0; in ocfs2_find_victim_chain()
1408 while (curr < le16_to_cpu(cl->cl_next_free_rec)) { in ocfs2_find_victim_chain()
1409 if (le32_to_cpu(cl->cl_recs[curr].c_free) > in ocfs2_find_victim_chain()
[all …]
/fs/ext2/
Dballoc.c1152 int curr = my_rsv->rsv_end - in ext2_try_to_allocate_with_rsv() local
1155 if (curr < *count) in ext2_try_to_allocate_with_rsv()
1157 *count - curr); in ext2_try_to_allocate_with_rsv()
/fs/xfs/libxfs/
Dxfs_da_btree.c1456 struct xfs_da_blkinfo *curr; in xfs_da3_node_lookup_int() local
1492 curr = blk->bp->b_addr; in xfs_da3_node_lookup_int()
1493 blk->magic = be16_to_cpu(curr->magic); in xfs_da3_node_lookup_int()