/fs/ocfs2/ |
D | move_extents.c | 44 struct ocfs2_move_extents *range; member 297 context->range->me_flags &= ~OCFS2_MOVE_EXT_FL_COMPLETE; in ocfs2_defrag_extent() 468 struct ocfs2_move_extents *range) in ocfs2_validate_and_adjust_move_goal() argument 481 range->me_goal = ocfs2_block_to_cluster_start(inode->i_sb, in ocfs2_validate_and_adjust_move_goal() 482 range->me_goal); in ocfs2_validate_and_adjust_move_goal() 487 ret = ocfs2_find_victim_alloc_group(inode, range->me_goal, in ocfs2_validate_and_adjust_move_goal() 500 if (range->me_goal == le64_to_cpu(bg->bg_blkno)) in ocfs2_validate_and_adjust_move_goal() 501 range->me_goal += c_to_b; in ocfs2_validate_and_adjust_move_goal() 507 range->me_len) { in ocfs2_validate_and_adjust_move_goal() 516 range->me_goal); in ocfs2_validate_and_adjust_move_goal() [all …]
|
D | ioctl.c | 922 struct fstrim_range range; in ocfs2_ioctl() local 931 if (copy_from_user(&range, argp, sizeof(range))) in ocfs2_ioctl() 934 range.minlen = max_t(u64, q->limits.discard_granularity, in ocfs2_ioctl() 935 range.minlen); in ocfs2_ioctl() 936 ret = ocfs2_trim_fs(sb, &range); in ocfs2_ioctl() 940 if (copy_to_user(argp, &range, sizeof(range))) in ocfs2_ioctl()
|
D | alloc.c | 1792 u32 range; in __ocfs2_find_path() local 1819 range = le32_to_cpu(rec->e_cpos) + in __ocfs2_find_path() 1821 if (cpos >= le32_to_cpu(rec->e_cpos) && cpos < range) in __ocfs2_find_path() 2326 unsigned int range; in ocfs2_leftmost_rec_contains() local 2340 range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec); in ocfs2_leftmost_rec_contains() 2341 if (cpos >= le32_to_cpu(rec->e_cpos) && cpos < range) in ocfs2_leftmost_rec_contains() 2531 u32 range; in ocfs2_update_edge_lengths() local 2547 range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec); in ocfs2_update_edge_lengths() 2554 rec->e_int_clusters = cpu_to_le32(range); in ocfs2_update_edge_lengths() 3866 unsigned int range; in ocfs2_insert_at_leaf() local [all …]
|
D | extent_map.c | 54 unsigned int range; in __ocfs2_extent_map_lookup() local 60 range = emi->ei_cpos + emi->ei_clusters; in __ocfs2_extent_map_lookup() 62 if (cpos >= emi->ei_cpos && cpos < range) { in __ocfs2_extent_map_lookup() 108 unsigned int range; in ocfs2_extent_map_trunc() local 120 range = emi->ei_cpos + emi->ei_clusters; in ocfs2_extent_map_trunc() 121 if (range > cpos) { in ocfs2_extent_map_trunc()
|
/fs/xfs/ |
D | xfs_discard.c | 157 struct fstrim_range range; in xfs_ioc_trim() local 175 if (copy_from_user(&range, urange, sizeof(range))) in xfs_ioc_trim() 178 range.minlen = max_t(u64, granularity, range.minlen); in xfs_ioc_trim() 179 minlen = BTOBB(range.minlen); in xfs_ioc_trim() 187 if (range.start >= XFS_FSB_TO_B(mp, mp->m_sb.sb_dblocks) || in xfs_ioc_trim() 188 range.minlen > XFS_FSB_TO_B(mp, mp->m_ag_max_usable) || in xfs_ioc_trim() 189 range.len < mp->m_sb.sb_blocksize) in xfs_ioc_trim() 192 start = BTOBB(range.start); in xfs_ioc_trim() 193 end = start + BTOBBT(range.len) - 1; in xfs_ioc_trim() 214 range.len = XFS_FSB_TO_B(mp, blocks_trimmed); in xfs_ioc_trim() [all …]
|
/fs/ |
D | userfaultfd.c | 112 struct userfaultfd_wake_range *range = key; in userfaultfd_wake_function() local 120 start = range->start; in userfaultfd_wake_function() 121 len = range->len; in userfaultfd_wake_function() 887 struct userfaultfd_wake_range range = { .len = 0, }; in userfaultfd_release() local 941 __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, &range); in userfaultfd_release() 942 __wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, &range); in userfaultfd_release() 1235 struct userfaultfd_wake_range *range) in __wake_userfault() argument 1241 range); in __wake_userfault() 1243 __wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, range); in __wake_userfault() 1248 struct userfaultfd_wake_range *range) in wake_userfault() argument [all …]
|
/fs/jfs/ |
D | ioctl.c | 126 struct fstrim_range range; in jfs_ioctl() local 137 if (copy_from_user(&range, (struct fstrim_range __user *)arg, in jfs_ioctl() 138 sizeof(range))) in jfs_ioctl() 141 range.minlen = max_t(unsigned int, range.minlen, in jfs_ioctl() 144 ret = jfs_ioc_trim(inode, &range); in jfs_ioctl() 148 if (copy_to_user((struct fstrim_range __user *)arg, &range, in jfs_ioctl() 149 sizeof(range))) in jfs_ioctl()
|
D | jfs_discard.c | 65 int jfs_ioc_trim(struct inode *ip, struct fstrim_range *range) in jfs_ioc_trim() argument 80 start = range->start >> sb->s_blocksize_bits; in jfs_ioc_trim() 81 end = start + (range->len >> sb->s_blocksize_bits) - 1; in jfs_ioc_trim() 82 minlen = range->minlen >> sb->s_blocksize_bits; in jfs_ioc_trim() 88 range->len < sb->s_blocksize) in jfs_ioc_trim() 103 range->len = trimmed << sb->s_blocksize_bits; in jfs_ioc_trim()
|
/fs/nfs/ |
D | pnfs.c | 60 const struct pnfs_layout_range *range, 416 struct pnfs_layout_range range = { in nfs4_layout_refresh_old_stateid() local 436 err = pnfs_mark_matching_lsegs_return(lo, &head, &range, 0); in nfs4_layout_refresh_old_stateid() 439 *dst_range = range; in nfs4_layout_refresh_old_stateid() 460 struct pnfs_layout_range range = { in pnfs_mark_layout_stateid_invalid() local 471 pnfs_free_returned_lsegs(lo, lseg_list, &range, 0); in pnfs_mark_layout_stateid_invalid() 505 struct pnfs_layout_range range = { in pnfs_layout_io_set_failed() local 514 pnfs_mark_matching_lsegs_invalid(lo, &head, &range, 0); in pnfs_layout_io_set_failed() 541 const struct pnfs_layout_range *range, in pnfs_init_lseg() argument 550 lseg->pls_range = *range; in pnfs_init_lseg() [all …]
|
/fs/f2fs/ |
D | file.c | 2322 struct fstrim_range range; in f2fs_ioc_fitrim() local 2331 if (copy_from_user(&range, (struct fstrim_range __user *)arg, in f2fs_ioc_fitrim() 2332 sizeof(range))) in f2fs_ioc_fitrim() 2339 range.minlen = max((unsigned int)range.minlen, in f2fs_ioc_fitrim() 2341 ret = f2fs_trim_fs(F2FS_SB(sb), &range); in f2fs_ioc_fitrim() 2346 if (copy_to_user((struct fstrim_range __user *)arg, &range, in f2fs_ioc_fitrim() 2347 sizeof(range))) in f2fs_ioc_fitrim() 2505 static int __f2fs_ioc_gc_range(struct file *filp, struct f2fs_gc_range *range) in __f2fs_ioc_gc_range() argument 2516 end = range->start + range->len; in __f2fs_ioc_gc_range() 2517 if (end < range->start || range->start < MAIN_BLKADDR(sbi) || in __f2fs_ioc_gc_range() [all …]
|
/fs/nfs/blocklayout/ |
D | blocklayout.c | 674 .mode = lgr->range.iomode, in bl_alloc_lseg() 675 .start = lgr->range.offset >> SECTOR_SHIFT, in bl_alloc_lseg() 676 .inval = lgr->range.offset >> SECTOR_SHIFT, in bl_alloc_lseg() 677 .cowread = lgr->range.offset >> SECTOR_SHIFT, in bl_alloc_lseg() 722 if (lgr->range.offset + lgr->range.length != in bl_alloc_lseg() 769 struct pnfs_layout_range *range) in bl_return_range() argument 772 sector_t offset = range->offset >> SECTOR_SHIFT, end; in bl_return_range() 774 if (range->offset % 8) { in bl_return_range() 776 __func__, range->offset); in bl_return_range() 780 if (range->length != NFS4_MAX_UINT64) { in bl_return_range() [all …]
|
/fs/nilfs2/ |
D | ioctl.c | 1071 struct fstrim_range range; in nilfs_ioctl_trim_fs() local 1080 if (copy_from_user(&range, argp, sizeof(range))) in nilfs_ioctl_trim_fs() 1083 range.minlen = max_t(u64, range.minlen, q->limits.discard_granularity); in nilfs_ioctl_trim_fs() 1086 ret = nilfs_sufile_trim_fs(nilfs->ns_sufile, &range); in nilfs_ioctl_trim_fs() 1092 if (copy_to_user(argp, &range, sizeof(range))) in nilfs_ioctl_trim_fs() 1112 __u64 range[2]; in nilfs_ioctl_set_alloc_range() local 1121 if (copy_from_user(range, argp, sizeof(__u64[2]))) in nilfs_ioctl_set_alloc_range() 1125 if (range[1] > i_size_read(inode->i_sb->s_bdev->bd_inode)) in nilfs_ioctl_set_alloc_range() 1130 minseg = range[0] + segbytes - 1; in nilfs_ioctl_set_alloc_range() 1133 if (range[1] < 4096) in nilfs_ioctl_set_alloc_range() [all …]
|
D | sufile.c | 1059 int nilfs_sufile_trim_fs(struct inode *sufile, struct fstrim_range *range) in nilfs_sufile_trim_fs() argument 1074 len = range->len >> nilfs->ns_blocksize_bits; in nilfs_sufile_trim_fs() 1075 minlen = range->minlen >> nilfs->ns_blocksize_bits; in nilfs_sufile_trim_fs() 1078 if (!len || range->start >= max_blocks << nilfs->ns_blocksize_bits) in nilfs_sufile_trim_fs() 1081 start_block = (range->start + nilfs->ns_blocksize - 1) >> in nilfs_sufile_trim_fs() 1191 range->len = ndiscarded << nilfs->ns_blocksize_bits; in nilfs_sufile_trim_fs()
|
/fs/incfs/ |
D | verity.c | 160 return range(NULL, 0); in incfs_get_verity_digest() 221 return range(ERR_PTR(incfs_hash_alg), 0); in incfs_calc_verity_digest_from_desc() 225 return range((u8 *)hash_alg, 0); in incfs_calc_verity_digest_from_desc() 227 verity_file_digest = range(kzalloc(hash_alg->digest_size, GFP_KERNEL), in incfs_calc_verity_digest_from_desc() 230 return range(ERR_PTR(-ENOMEM), 0); in incfs_calc_verity_digest_from_desc() 248 verity_file_digest = range(ERR_PTR(err), 0); in incfs_calc_verity_digest_from_desc() 289 return range((u8 *)desc, 0); in incfs_calc_verity_digest() 428 struct mem_range hash = range(hash_buf, hash_size); in incfs_add_signature_record() 446 hash_tree = incfs_alloc_mtree(range((u8 *)&sig, sizeof(sig)), in incfs_add_signature_record() 462 range((u8 *)&sig, sizeof(sig)), in incfs_add_signature_record() [all …]
|
D | data_mgmt.c | 736 range(buf, INCFS_DATA_FILE_BLOCK_SIZE), in validate_hash_tree() 737 range(calculated_digest, digest_size)); in validate_hash_tree() 774 range(calculated_digest, digest_size)); in validate_hash_tree() 846 static int copy_one_range(struct incfs_filled_range *range, void __user *buffer, in copy_one_range() argument 853 if (copy_to_user(((char __user *)buffer) + *size_out, range, in copy_one_range() 854 sizeof(*range))) in copy_one_range() 857 *size_out += sizeof(*range); in copy_one_range() 868 struct incfs_filled_range range; in incfs_get_filled_blocks() local 899 range = (struct incfs_filled_range){ in incfs_get_filled_blocks() 904 error = copy_one_range(&range, buffer, size, size_out); in incfs_get_filled_blocks() [all …]
|
D | integrity.c | 79 static bool read_mem_range(u8 **p, u8 *top, struct mem_range *range) in read_mem_range() argument 86 range->len = len; in read_mem_range() 87 range->data = *p; in read_mem_range()
|
D | pseudo_files.c | 219 struct mem_range name = range(file_name, strlen(file_name)); in validate_name() 282 return range(NULL, 0); in incfs_copy_signature_info_from_user() 285 return range(ERR_PTR(-EFAULT), 0); in incfs_copy_signature_info_from_user() 289 return range(ERR_PTR(-ENOMEM), 0); in incfs_copy_signature_info_from_user() 293 return range(ERR_PTR(-EFAULT), 0); in incfs_copy_signature_info_from_user() 296 return range(result, size); in incfs_copy_signature_info_from_user() 658 range(attr_value, args.file_attr_len), in ioctl_create_file() 1357 range((u8 *)dentry->d_name.name, dentry->d_name.len); in dir_lookup_pseudo_files()
|
/fs/nfs/flexfilelayout/ |
D | flexfilelayoutdev.c | 420 const struct pnfs_layout_range *range, in ff_layout_get_ds_cred() argument 426 cred = ff_layout_get_mirror_cred(mirror, range->iomode); in ff_layout_get_ds_cred() 507 const struct pnfs_layout_range *range, in do_layout_fetch_ds_ioerr() argument 520 range->offset, in do_layout_fetch_ds_ioerr() 521 pnfs_end_offset(range->offset, range->length))) in do_layout_fetch_ds_ioerr() 534 const struct pnfs_layout_range *range, in ff_layout_fetch_ds_ioerr() argument 540 ret = do_layout_fetch_ds_ioerr(lo, range, head, maxnum); in ff_layout_fetch_ds_ioerr() 544 do_layout_fetch_ds_ioerr(lo, range, &discard, -1); in ff_layout_fetch_ds_ioerr()
|
D | flexfilelayout.h | 202 const struct pnfs_layout_range *range, 221 const struct pnfs_layout_range *range,
|
/fs/hpfs/ |
D | super.c | 208 struct fstrim_range range; in hpfs_ioctl() local 213 if (copy_from_user(&range, (struct fstrim_range __user *)arg, sizeof(range))) in hpfs_ioctl() 215 …r = hpfs_trim_fs(file_inode(file)->i_sb, range.start >> 9, (range.start + range.len) >> 9, (range.… in hpfs_ioctl() 218 range.len = (u64)n_trimmed << 9; in hpfs_ioctl() 219 if (copy_to_user((struct fstrim_range __user *)arg, &range, sizeof(range))) in hpfs_ioctl()
|
/fs/fat/ |
D | file.c | 129 struct fstrim_range range; in fat_ioctl_fitrim() local 140 if (copy_from_user(&range, user_range, sizeof(range))) in fat_ioctl_fitrim() 143 range.minlen = max_t(unsigned int, range.minlen, in fat_ioctl_fitrim() 146 err = fat_trim_fs(inode, &range); in fat_ioctl_fitrim() 150 if (copy_to_user(user_range, &range, sizeof(range))) in fat_ioctl_fitrim()
|
D | fatent.c | 761 int fat_trim_fs(struct inode *inode, struct fstrim_range *range) in fat_trim_fs() argument 778 ent_start = max_t(u64, range->start>>sbi->cluster_bits, FAT_START_ENT); in fat_trim_fs() 779 ent_end = ent_start + (range->len >> sbi->cluster_bits) - 1; in fat_trim_fs() 780 minlen = range->minlen >> sbi->cluster_bits; in fat_trim_fs() 782 if (ent_start >= sbi->max_cluster || range->len < sbi->cluster_size) in fat_trim_fs() 844 range->len = trimmed << sbi->cluster_bits; in fat_trim_fs()
|
/fs/fuse/ |
D | dax.c | 1215 struct fuse_dax_mapping *range, *temp; in fuse_free_dax_mem_ranges() local 1218 list_for_each_entry_safe(range, temp, mem_list, list) { in fuse_free_dax_mem_ranges() 1219 list_del(&range->list); in fuse_free_dax_mem_ranges() 1220 if (!list_empty(&range->busy_list)) in fuse_free_dax_mem_ranges() 1221 list_del(&range->busy_list); in fuse_free_dax_mem_ranges() 1222 kfree(range); in fuse_free_dax_mem_ranges() 1240 struct fuse_dax_mapping *range; in fuse_dax_mem_range_init() local 1264 range = kzalloc(sizeof(struct fuse_dax_mapping), GFP_KERNEL); in fuse_dax_mem_range_init() 1266 if (!range) in fuse_dax_mem_range_init() 1273 range->window_offset = i * FUSE_DAX_SZ; in fuse_dax_mem_range_init() [all …]
|
/fs/btrfs/ |
D | ioctl.c | 510 struct fstrim_range range; in btrfs_ioctl_fitrim() local 544 if (copy_from_user(&range, arg, sizeof(range))) in btrfs_ioctl_fitrim() 552 if (range.len < fs_info->sb->s_blocksize) in btrfs_ioctl_fitrim() 555 range.minlen = max(range.minlen, minlen); in btrfs_ioctl_fitrim() 556 ret = btrfs_trim_fs(fs_info, &range); in btrfs_ioctl_fitrim() 560 if (copy_to_user(arg, &range, sizeof(range))) in btrfs_ioctl_fitrim() 1472 struct btrfs_ioctl_defrag_range_args *range, in btrfs_defrag_file() argument 1483 u64 newer_off = range->start; in btrfs_defrag_file() 1489 u32 extent_thresh = range->extent_thresh; in btrfs_defrag_file() 1494 bool do_compress = range->flags & BTRFS_DEFRAG_RANGE_COMPRESS; in btrfs_defrag_file() [all …]
|
D | file.c | 278 struct btrfs_ioctl_defrag_range_args range; in __btrfs_run_defrag_inode() local 298 memset(&range, 0, sizeof(range)); in __btrfs_run_defrag_inode() 299 range.len = (u64)-1; in __btrfs_run_defrag_inode() 300 range.start = defrag->last_offset; in __btrfs_run_defrag_inode() 303 num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid, in __btrfs_run_defrag_inode() 312 defrag->last_offset = range.start; in __btrfs_run_defrag_inode() 3023 struct falloc_range *range = NULL; in add_falloc_range() local 3038 range = kmalloc(sizeof(*range), GFP_KERNEL); in add_falloc_range() 3039 if (!range) in add_falloc_range() 3041 range->start = start; in add_falloc_range() [all …]
|