/fs/hfsplus/ |
D | bitmap.c | 25 __be32 *pptr, *curr, *end; in hfsplus_block_allocate() local 43 curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32; in hfsplus_block_allocate() 52 val = *curr; in hfsplus_block_allocate() 61 curr++; in hfsplus_block_allocate() 65 while (curr < end) { in hfsplus_block_allocate() 66 val = *curr; in hfsplus_block_allocate() 75 curr++; in hfsplus_block_allocate() 87 curr = pptr = kmap(page); in hfsplus_block_allocate() 98 start = offset + (curr - pptr) * 32 + i; in hfsplus_block_allocate() 115 *curr++ = cpu_to_be32(n); in hfsplus_block_allocate() [all …]
|
/fs/hfs/ |
D | bitmap.c | 31 __be32 *curr, *end; in hfs_find_set_zero_bits() local 40 curr = bitmap + (offset / 32); in hfs_find_set_zero_bits() 44 val = *curr; in hfs_find_set_zero_bits() 56 while (++curr < end) { in hfs_find_set_zero_bits() 57 val = *curr; in hfs_find_set_zero_bits() 70 start = (curr - bitmap) * 32 + i; in hfs_find_set_zero_bits() 85 *curr++ = cpu_to_be32(n); in hfs_find_set_zero_bits() 88 n = be32_to_cpu(*curr); in hfs_find_set_zero_bits() 95 *curr++ = cpu_to_be32(0xffffffff); in hfs_find_set_zero_bits() 107 *curr = cpu_to_be32(n); in hfs_find_set_zero_bits() [all …]
|
/fs/btrfs/ |
D | delayed-inode.c | 698 struct btrfs_delayed_item *curr, *next; in btrfs_batch_insert_items() local 730 curr = next; in btrfs_batch_insert_items() 731 next = __btrfs_next_delayed_item(curr); in btrfs_batch_insert_items() 735 if (!btrfs_is_continuous_delayed_item(curr, next)) in btrfs_batch_insert_items() 777 list_for_each_entry_safe(curr, next, &head, tree_list) { in btrfs_batch_insert_items() 779 write_extent_buffer(leaf, &curr->data, in btrfs_batch_insert_items() 781 curr->data_len); in btrfs_batch_insert_items() 784 btrfs_delayed_item_release_metadata(root, curr); in btrfs_batch_insert_items() 786 list_del(&curr->tree_list); in btrfs_batch_insert_items() 787 btrfs_release_delayed_item(curr); in btrfs_batch_insert_items() [all …]
|
D | scrub.c | 158 int curr; member 546 if (sctx->curr != -1) { in scrub_free_ctx() 547 struct scrub_bio *sbio = sctx->bios[sctx->curr]; in scrub_free_ctx() 587 sctx->curr = -1; in scrub_setup_ctx() 2024 if (sctx->curr == -1) in scrub_submit() 2027 sbio = sctx->bios[sctx->curr]; in scrub_submit() 2028 sctx->curr = -1; in scrub_submit() 2044 while (sctx->curr == -1) { in scrub_add_page_to_rd_bio() 2046 sctx->curr = sctx->first_free; in scrub_add_page_to_rd_bio() 2047 if (sctx->curr != -1) { in scrub_add_page_to_rd_bio() [all …]
|
D | volumes.c | 7602 struct btrfs_device *curr, *next; in btrfs_commit_device_sizes() local 7615 list_for_each_entry_safe(curr, next, &trans->dev_update_list, in btrfs_commit_device_sizes() 7617 list_del_init(&curr->post_commit_list); in btrfs_commit_device_sizes() 7618 curr->commit_total_bytes = curr->disk_total_bytes; in btrfs_commit_device_sizes() 7619 curr->commit_bytes_used = curr->bytes_used; in btrfs_commit_device_sizes()
|
/fs/erofs/ |
D | zpvec.h | 31 struct page *curr, *next; member 40 if (!ctor->curr) in z_erofs_pagevec_ctor_exit() 46 kunmap(ctor->curr); in z_erofs_pagevec_ctor_exit() 78 ctor->curr = next; in z_erofs_pagevec_ctor_pagedown() 81 kmap_atomic(ctor->curr) : kmap(ctor->curr); in z_erofs_pagevec_ctor_pagedown() 93 ctor->curr = ctor->next = NULL; in z_erofs_pagevec_ctor_init()
|
/fs/nilfs2/ |
D | cpfile.c | 53 __u64 curr, in nilfs_cpfile_checkpoints_in_block() argument 58 nilfs_cpfile_get_offset(cpfile, curr), in nilfs_cpfile_checkpoints_in_block() 59 max - curr); in nilfs_cpfile_checkpoints_in_block() 483 __u64 curr = *cnop, next; in nilfs_cpfile_do_get_ssinfo() local 490 if (curr == 0) { in nilfs_cpfile_do_get_ssinfo() 496 curr = le64_to_cpu(header->ch_snapshot_list.ssl_next); in nilfs_cpfile_do_get_ssinfo() 499 if (curr == 0) { in nilfs_cpfile_do_get_ssinfo() 503 } else if (unlikely(curr == ~(__u64)0)) { in nilfs_cpfile_do_get_ssinfo() 508 curr_blkoff = nilfs_cpfile_get_blkoff(cpfile, curr); in nilfs_cpfile_do_get_ssinfo() 509 ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr, 0, &bh); in nilfs_cpfile_do_get_ssinfo() [all …]
|
D | alloc.c | 421 unsigned long curr, unsigned long max) in nilfs_palloc_rest_groups_in_desc_block() argument 425 curr % nilfs_palloc_groups_per_desc_block(inode), in nilfs_palloc_rest_groups_in_desc_block() 426 max - curr + 1); in nilfs_palloc_rest_groups_in_desc_block()
|
D | sufile.c | 64 nilfs_sufile_segment_usages_in_block(const struct inode *sufile, __u64 curr, in nilfs_sufile_segment_usages_in_block() argument 69 nilfs_sufile_get_offset(sufile, curr), in nilfs_sufile_segment_usages_in_block() 70 max - curr + 1); in nilfs_sufile_segment_usages_in_block()
|
/fs/ |
D | char_dev.c | 100 struct char_device_struct *cd, *curr, *prev = NULL; in __register_chrdev_region() local 134 for (curr = chrdevs[i]; curr; prev = curr, curr = curr->next) { in __register_chrdev_region() 135 if (curr->major < major) in __register_chrdev_region() 138 if (curr->major > major) in __register_chrdev_region() 141 if (curr->baseminor + curr->minorct <= baseminor) in __register_chrdev_region() 144 if (curr->baseminor >= baseminor + minorct) in __register_chrdev_region() 156 cd->next = curr; in __register_chrdev_region()
|
D | coredump.c | 476 struct core_thread *curr, *next; in coredump_finish() local 487 while ((curr = next) != NULL) { in coredump_finish() 488 next = curr->next; in coredump_finish() 489 task = curr->task; in coredump_finish() 495 curr->task = NULL; in coredump_finish()
|
D | io_uring.c | 2900 static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode, in io_wake_function() argument 2903 struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue, in io_wake_function() 2909 return autoremove_wake_function(curr, mode, wake_flags, key); in io_wake_function()
|
/fs/udf/ |
D | inode.c | 925 int curr = *c; in udf_split_extents() local 926 int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) + in udf_split_extents() 928 int8_t etype = (laarr[curr].extLength >> 30); in udf_split_extents() 933 laarr[curr + 2] = laarr[curr + 1]; in udf_split_extents() 934 laarr[curr + 1] = laarr[curr]; in udf_split_extents() 936 laarr[curr + 3] = laarr[curr + 1]; in udf_split_extents() 937 laarr[curr + 2] = laarr[curr + 1] = laarr[curr]; in udf_split_extents() 943 &laarr[curr].extLocation, in udf_split_extents() 945 laarr[curr].extLength = in udf_split_extents() 948 laarr[curr].extLocation.logicalBlockNum = 0; in udf_split_extents() [all …]
|
D | super.c | 1662 struct udf_vds_record *curr; in udf_process_sequence() local 1718 curr = get_volume_descriptor_record(ident, bh, &data); in udf_process_sequence() 1719 if (IS_ERR(curr)) { in udf_process_sequence() 1721 return PTR_ERR(curr); in udf_process_sequence() 1724 if (!curr) in udf_process_sequence() 1726 if (vdsn >= curr->volDescSeqNum) { in udf_process_sequence() 1727 curr->volDescSeqNum = vdsn; in udf_process_sequence() 1728 curr->block = block; in udf_process_sequence()
|
/fs/ocfs2/ |
D | suballoc.c | 406 u16 curr, best; in ocfs2_find_smallest_chain() local 408 best = curr = 0; in ocfs2_find_smallest_chain() 409 while (curr < le16_to_cpu(cl->cl_count)) { in ocfs2_find_smallest_chain() 411 le32_to_cpu(cl->cl_recs[curr].c_total)) in ocfs2_find_smallest_chain() 412 best = curr; in ocfs2_find_smallest_chain() 413 curr++; in ocfs2_find_smallest_chain() 1384 u16 curr, best; in ocfs2_find_victim_chain() local 1388 best = curr = 0; in ocfs2_find_victim_chain() 1389 while (curr < le16_to_cpu(cl->cl_next_free_rec)) { in ocfs2_find_victim_chain() 1390 if (le32_to_cpu(cl->cl_recs[curr].c_free) > in ocfs2_find_victim_chain() [all …]
|
/fs/ext2/ |
D | balloc.c | 1150 int curr = my_rsv->rsv_end - in ext2_try_to_allocate_with_rsv() local 1153 if (curr < *count) in ext2_try_to_allocate_with_rsv() 1155 *count - curr); in ext2_try_to_allocate_with_rsv()
|
/fs/afs/ |
D | dir.c | 356 unsigned offset, next, curr; in afs_dir_iterate_block() local 362 curr = (ctx->pos - blkoff) / sizeof(union afs_xdr_dirent); in afs_dir_iterate_block() 376 if (offset >= curr) in afs_dir_iterate_block() 390 (offset < curr ? "skip" : "fill"), in afs_dir_iterate_block() 419 if (offset < curr) in afs_dir_iterate_block()
|
/fs/xfs/libxfs/ |
D | xfs_da_btree.c | 1488 struct xfs_da_blkinfo *curr; in xfs_da3_node_lookup_int() local 1526 curr = blk->bp->b_addr; in xfs_da3_node_lookup_int() 1527 magic = be16_to_cpu(curr->magic); in xfs_da3_node_lookup_int()
|