Home
last modified time | relevance | path

Searched refs:start (Results 1 – 25 of 396) sorted by relevance

12345678910>>...16

/fs/btrfs/
Dextent-io-tree.c46 state->start, state->end, state->state, in btrfs_extent_state_leak_debug_check()
54 #define btrfs_debug_check_extent_io_range(tree, start, end) \ argument
55 __btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end))
58 u64 start, u64 end) in __btrfs_debug_check_extent_io_range() argument
70 caller, btrfs_ino(BTRFS_I(inode)), isize, start, end); in __btrfs_debug_check_extent_io_range()
90 u64 start; member
190 changeset->bytes_changed += state->end - state->start + 1; in add_extent_changeset()
191 ret = ulist_add(&changeset->range_changed, state->start, state->end, in add_extent_changeset()
247 if (offset < entry->start) in tree_search_for_insert()
295 if (offset < entry->start) in tree_search_prev_next()
[all …]
Dextent-io-tree.h87 u64 start; member
106 int lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
109 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
115 u64 *start, u64 search_end,
119 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
121 int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
123 int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
127 static inline int clear_extent_bit(struct extent_io_tree *tree, u64 start, in clear_extent_bit() argument
131 return __clear_extent_bit(tree, start, end, bits, cached, in clear_extent_bit()
135 static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, in unlock_extent() argument
[all …]
Dsubpage.c228 struct page *page, u64 start, u32 len) in btrfs_subpage_assert() argument
232 ASSERT(IS_ALIGNED(start, fs_info->sectorsize) && in btrfs_subpage_assert()
239 ASSERT(page_offset(page) <= start && in btrfs_subpage_assert()
240 start + len <= page_offset(page) + PAGE_SIZE); in btrfs_subpage_assert()
244 struct page *page, u64 start, u32 len) in btrfs_subpage_start_reader() argument
249 btrfs_subpage_assert(fs_info, page, start, len); in btrfs_subpage_start_reader()
255 struct page *page, u64 start, u32 len) in btrfs_subpage_end_reader() argument
262 btrfs_subpage_assert(fs_info, page, start, len); in btrfs_subpage_end_reader()
278 static void btrfs_subpage_clamp_range(struct page *page, u64 *start, u32 *len) in btrfs_subpage_clamp_range() argument
280 u64 orig_start = *start; in btrfs_subpage_clamp_range()
[all …]
Dextent_map.c85 static u64 range_end(u64 start, u64 len) in range_end() argument
87 if (start + len < start) in range_end()
89 return start + len; in range_end()
98 u64 end = range_end(em->start, em->len); in tree_insert()
105 if (em->start < entry->start) { in tree_insert()
107 } else if (em->start >= extent_map_end(entry)) { in tree_insert()
116 while (parent && em->start >= extent_map_end(entry)) { in tree_insert()
121 if (end > entry->start && em->start < extent_map_end(entry)) in tree_insert()
126 while (parent && em->start < entry->start) { in tree_insert()
131 if (end > entry->start && em->start < extent_map_end(entry)) in tree_insert()
[all …]
Dfree-space-tree.c41 cache->start); in set_free_space_tree_thresholds()
74 key.objectid = block_group->start; in add_new_free_space_info()
106 key.objectid = block_group->start; in search_free_space_info()
115 block_group->start); in search_free_space_info()
179 static void le_bitmap_set(unsigned long *map, unsigned int start, int len) in le_bitmap_set() argument
181 u8 *p = ((u8 *)map) + BIT_BYTE(start); in le_bitmap_set()
182 const unsigned int size = start + len; in le_bitmap_set()
183 int bits_to_set = BITS_PER_BYTE - (start % BITS_PER_BYTE); in le_bitmap_set()
184 u8 mask_to_set = BITMAP_FIRST_BYTE_MASK(start); in le_bitmap_set()
211 u64 start, end; in convert_free_space_to_bitmaps() local
[all …]
Dextent_io.c77 eb->start, eb->len, atomic_read(&eb->refs), eb->bflags, in btrfs_extent_buffer_leak_debug_check()
186 void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end) in extent_range_clear_dirty_for_io() argument
188 unsigned long index = start >> PAGE_SHIFT; in extent_range_clear_dirty_for_io()
201 void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end) in extent_range_redirty_for_io() argument
204 unsigned long index = start >> PAGE_SHIFT; in extent_range_redirty_for_io()
228 unsigned long page_ops, u64 start, u64 end) in process_one_page() argument
232 ASSERT(end + 1 - start != 0 && end + 1 - start < U32_MAX); in process_one_page()
233 len = end + 1 - start; in process_one_page()
236 btrfs_page_clamp_set_ordered(fs_info, page, start, len); in process_one_page()
238 btrfs_page_clamp_set_error(fs_info, page, start, len); in process_one_page()
[all …]
Dextent_io.h55 #define BITMAP_FIRST_BYTE_MASK(start) \ argument
56 ((BYTE_MASK << ((start) & (BITS_PER_BYTE - 1))) & BYTE_MASK)
79 u64 start; member
152 int extent_write_locked_range(struct inode *inode, u64 start, u64 end);
159 u64 start, u64 len);
164 u64 start, u64 owner_root, int level);
166 u64 start, unsigned long len);
168 u64 start);
171 u64 start);
202 unsigned long start, unsigned long len);
[all …]
Dsubpage.h96 struct page *page, u64 start, u32 len);
98 struct page *page, u64 start, u32 len);
101 struct page *page, u64 start, u32 len);
103 struct page *page, u64 start, u32 len);
105 struct page *page, u64 start, u32 len);
107 struct page *page, u64 start, u32 len);
125 struct page *page, u64 start, u32 len); \
127 struct page *page, u64 start, u32 len); \
129 struct page *page, u64 start, u32 len); \
131 struct page *page, u64 start, u32 len); \
[all …]
Dinode.c119 u64 start, u64 end, int *page_started,
122 static struct extent_map *create_io_em(struct btrfs_inode *inode, u64 start,
429 drop_args.start = 0; in cow_file_range_inline()
476 u64 start; member
488 u64 start; member
503 u64 start, u64 ram_size, in add_async_extent() argument
513 async_extent->start = start; in add_async_extent()
527 static inline int inode_need_compress(struct btrfs_inode *inode, u64 start, in inode_need_compress() argument
565 if (!PAGE_ALIGNED(start) || in inode_need_compress()
582 return btrfs_compress_heuristic(&inode->vfs_inode, start, end); in inode_need_compress()
[all …]
Ddelalloc-space.c130 struct extent_changeset **reserved, u64 start, in btrfs_check_data_free_space() argument
138 len = round_up(start + len, fs_info->sectorsize) - in btrfs_check_data_free_space()
139 round_down(start, fs_info->sectorsize); in btrfs_check_data_free_space()
140 start = round_down(start, fs_info->sectorsize); in btrfs_check_data_free_space()
152 ret = btrfs_qgroup_reserve_data(inode, reserved, start, len); in btrfs_check_data_free_space()
190 struct extent_changeset *reserved, u64 start, u64 len) in btrfs_free_reserved_data_space() argument
195 len = round_up(start + len, fs_info->sectorsize) - in btrfs_free_reserved_data_space()
196 round_down(start, fs_info->sectorsize); in btrfs_free_reserved_data_space()
197 start = round_down(start, fs_info->sectorsize); in btrfs_free_reserved_data_space()
200 btrfs_qgroup_free_data(inode, reserved, start, len, NULL); in btrfs_free_reserved_data_space()
[all …]
Dfile.c204 u64 search_start = args->start; in btrfs_drop_extents()
209 u64 last_end = args->start; in btrfs_drop_extents()
235 btrfs_drop_extent_map_range(inode, args->start, args->end - 1, false); in btrfs_drop_extents()
237 if (args->start >= inode->disk_i_size && !args->replace_extent) in btrfs_drop_extents()
247 if (ret > 0 && path->slots[0] > 0 && search_start == args->start) { in btrfs_drop_extents()
322 search_start = max(key.offset, args->start); in btrfs_drop_extents()
333 if (args->start > key.offset && args->end < extent_end) { in btrfs_drop_extents()
341 new_key.offset = args->start; in btrfs_drop_extents()
355 args->start - key.offset); in btrfs_drop_extents()
360 extent_offset += args->start - key.offset; in btrfs_drop_extents()
[all …]
/fs/btrfs/tests/
Dextent-io-tests.c20 static noinline int process_page_range(struct inode *inode, u64 start, u64 end, in process_page_range() argument
25 unsigned long index = start >> PAGE_SHIFT; in process_page_range()
53 start, end, ret); in process_page_range()
102 test_msg(" start=%llu len=%llu flags=%s", state->start, in dump_extent_io_tree()
103 state->end + 1 - state->start, flags_str); in dump_extent_io_tree()
118 u64 start, end, test_start; in test_find_delalloc() local
163 start = 0; in test_find_delalloc()
164 end = start + PAGE_SIZE - 1; in test_find_delalloc()
165 found = find_lock_delalloc_range(inode, locked_page, &start, in test_find_delalloc()
171 if (start != 0 || end != (sectorsize - 1)) { in test_find_delalloc()
[all …]
Dinode-tests.c15 static void insert_extent(struct btrfs_root *root, u64 start, u64 len, in insert_extent() argument
34 key.offset = start; in insert_extent()
288 if (em->start != 0 || em->len != 5) { in test_btrfs_get_extent()
291 em->start, em->len); in test_btrfs_get_extent()
298 offset = em->start + em->len; in test_btrfs_get_extent()
311 if (em->start != offset || em->len != (sectorsize - 5)) { in test_btrfs_get_extent()
314 offset, em->start, em->len); in test_btrfs_get_extent()
326 offset = em->start + em->len; in test_btrfs_get_extent()
338 if (em->start != offset || em->len != 4) { in test_btrfs_get_extent()
341 offset, em->start, em->len); in test_btrfs_get_extent()
[all …]
Dextent-map-tests.c28 em->start, em->len, em->block_start, in free_extent_map_tree()
59 u64 start = 0; in test_case_1() local
70 em->start = 0; in test_case_1()
91 em->start = SZ_16K; in test_case_1()
112 em->start = start; in test_case_1()
114 em->block_start = start; in test_case_1()
117 ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, em->start, em->len); in test_case_1()
120 test_err("case1 [%llu %llu]: ret %d", start, start + len, ret); in test_case_1()
124 (em->start != 0 || extent_map_end(em) != SZ_16K || in test_case_1()
128 start, start + len, ret, em->start, em->len, in test_case_1()
[all …]
Dfree-space-tree-tests.c15 u64 start; member
51 end = cache->start + cache->length; in __check_free_space_extents()
64 extent_start != extents[i].start || in __check_free_space_extents()
75 extent_start != extents[i].start || in __check_free_space_extents()
90 key.objectid != extents[i].start || in __check_free_space_extents()
156 {cache->start, cache->length}, in test_empty_block_group()
173 cache->start, in test_remove_all()
191 {cache->start + alignment, cache->length - alignment}, in test_remove_beginning()
196 cache->start, alignment); in test_remove_beginning()
214 {cache->start, cache->length - alignment}, in test_remove_end()
[all …]
/fs/pstore/
Dram_core.c35 atomic_t start; member
49 return atomic_read(&prz->buffer->start); in buffer_start()
62 old = atomic_read(&prz->buffer->start); in buffer_start_add()
66 atomic_set(&prz->buffer->start, new); in buffer_start_add()
123 unsigned int start, unsigned int count) in persistent_ram_update_ecc() argument
136 block = buffer->data + (start & ~(ecc_block_size - 1)); in persistent_ram_update_ecc()
137 par = prz->par_buffer + (start / ecc_block_size) * ecc_size; in persistent_ram_update_ecc()
145 } while (block < buffer->data + start + count); in persistent_ram_update_ecc()
275 const void *s, unsigned int start, unsigned int count) in persistent_ram_update() argument
278 memcpy_toio(buffer->data + start, s, count); in persistent_ram_update()
[all …]
/fs/xfs/scrub/
Dbitmap.c23 uint64_t start, in xbitmap_set() argument
33 bmr->start = start; in xbitmap_set()
75 if (ap->start > bp->start) in xbitmap_range_cmp()
77 if (ap->start < bp->start) in xbitmap_range_cmp()
137 while (sub_br->start + sub_br->len <= br->start) { in xbitmap_disunion()
142 if (sub_br->start >= br->start + br->len) { in xbitmap_disunion()
148 sub_start = sub_br->start; in xbitmap_disunion()
150 if (sub_br->start < br->start) { in xbitmap_disunion()
151 sub_len -= br->start - sub_br->start; in xbitmap_disunion()
152 sub_start = br->start; in xbitmap_disunion()
[all …]
/fs/afs/
Dwrite.c17 static void afs_write_to_cache(struct afs_vnode *vnode, loff_t start, size_t len,
189 loff_t start, loff_t len) in afs_kill_pages() argument
193 pgoff_t index = start / PAGE_SIZE; in afs_kill_pages()
194 pgoff_t last = (start + len - 1) / PAGE_SIZE, next; in afs_kill_pages()
197 vnode->fid.vid, vnode->fid.vnode, len, start); in afs_kill_pages()
227 loff_t start, loff_t len) in afs_redirty_pages() argument
231 pgoff_t index = start / PAGE_SIZE; in afs_redirty_pages()
232 pgoff_t last = (start + len - 1) / PAGE_SIZE, next; in afs_redirty_pages()
235 vnode->fid.vid, vnode->fid.vnode, len, start); in afs_redirty_pages()
238 _debug("redirty %llx @%llx", len, start); in afs_redirty_pages()
[all …]
/fs/xfs/libxfs/
Dxfs_rtbitmap.c97 xfs_rtblock_t start, /* starting block to look at */ in xfs_rtfind_back() argument
118 block = XFS_BITTOBLOCK(mp, start); in xfs_rtfind_back()
127 word = XFS_BITTOWORD(mp, start); in xfs_rtfind_back()
129 bit = (int)(start & (XFS_NBWORD - 1)); in xfs_rtfind_back()
130 len = start - limit + 1; in xfs_rtfind_back()
158 *rtblock = start - i + 1; in xfs_rtfind_back()
204 *rtblock = start - i + 1; in xfs_rtfind_back()
251 *rtblock = start - i + 1; in xfs_rtfind_back()
260 *rtblock = start - i + 1; in xfs_rtfind_back()
272 xfs_rtblock_t start, /* starting block to look at */ in xfs_rtfind_forw() argument
[all …]
/fs/
Duserfaultfd.c86 unsigned long start; member
99 unsigned long start; member
132 unsigned long start, len; in userfaultfd_wake_function() local
137 start = range->start; in userfaultfd_wake_function()
139 if (len && (start > uwq->msg.arg.pagefault.address || in userfaultfd_wake_function()
140 start + len <= uwq->msg.arg.pagefault.address)) in userfaultfd_wake_function()
779 unsigned long start, unsigned long end) in userfaultfd_remove() argument
796 ewq.msg.arg.remove.start = start; in userfaultfd_remove()
805 unsigned long start, unsigned long end) in has_unmap_ctx() argument
810 if (unmap_ctx->ctx == ctx && unmap_ctx->start == start && in has_unmap_ctx()
[all …]
/fs/squashfs/
Dxattr.c32 u64 start = SQUASHFS_XATTR_BLK(squashfs_i(inode)->xattr) in squashfs_listxattr() local
50 err = squashfs_read_metadata(sb, &entry, &start, &offset, in squashfs_listxattr()
69 err = squashfs_read_metadata(sb, buffer, &start, in squashfs_listxattr()
80 err = squashfs_read_metadata(sb, NULL, &start, in squashfs_listxattr()
88 err = squashfs_read_metadata(sb, &val, &start, &offset, in squashfs_listxattr()
93 err = squashfs_read_metadata(sb, NULL, &start, &offset, in squashfs_listxattr()
110 u64 start = SQUASHFS_XATTR_BLK(squashfs_i(inode)->xattr) in squashfs_xattr_get() local
127 err = squashfs_read_metadata(sb, &entry, &start, &offset, in squashfs_xattr_get()
137 err = squashfs_read_metadata(sb, target, &start, in squashfs_xattr_get()
140 err = squashfs_read_metadata(sb, NULL, &start, in squashfs_xattr_get()
[all …]
Dxattr_id.c65 u64 start, end; in squashfs_read_xattr_id_table() local
89 start = table_start + sizeof(*id_table); in squashfs_read_xattr_id_table()
92 if (len != (end - start)) in squashfs_read_xattr_id_table()
95 table = squashfs_read_table(sb, start, len); in squashfs_read_xattr_id_table()
109 start = le64_to_cpu(table[n]); in squashfs_read_xattr_id_table()
112 if (start >= end || (end - start) > in squashfs_read_xattr_id_table()
119 start = le64_to_cpu(table[indexes - 1]); in squashfs_read_xattr_id_table()
120 if (start >= table_start || (table_start - start) > in squashfs_read_xattr_id_table()
/fs/proc/
Dvmcore.c242 static int vmcoredd_copy_dumps(struct iov_iter *iter, u64 start, size_t size) in vmcoredd_copy_dumps() argument
252 if (start < offset + dump->size) { in vmcoredd_copy_dumps()
253 tsz = min(offset + (u64)dump->size - start, (u64)size); in vmcoredd_copy_dumps()
254 buf = dump->buf + start - offset; in vmcoredd_copy_dumps()
261 start += tsz; in vmcoredd_copy_dumps()
277 u64 start, size_t size) in vmcoredd_mmap_dumps() argument
287 if (start < offset + dump->size) { in vmcoredd_mmap_dumps()
288 tsz = min(offset + (u64)dump->size - start, (u64)size); in vmcoredd_mmap_dumps()
289 buf = dump->buf + start - offset; in vmcoredd_mmap_dumps()
297 start += tsz; in vmcoredd_mmap_dumps()
[all …]
/fs/hfs/
Dbitmap.c32 u32 mask, start, len, n; in hfs_find_set_zero_bits() local
70 start = (curr - bitmap) * 32 + i; in hfs_find_set_zero_bits()
71 if (start >= size) in hfs_find_set_zero_bits()
72 return start; in hfs_find_set_zero_bits()
74 len = min(size - start, len); in hfs_find_set_zero_bits()
108 *max = (curr - bitmap) * 32 + i - start; in hfs_find_set_zero_bits()
109 return start; in hfs_find_set_zero_bits()
193 int hfs_clear_vbm_bits(struct super_block *sb, u16 start, u16 count) in hfs_clear_vbm_bits() argument
203 hfs_dbg(BITMAP, "clear_bits: %u,%u\n", start, count); in hfs_clear_vbm_bits()
205 if ((start + count) > HFS_SB(sb)->fs_ablocks) in hfs_clear_vbm_bits()
[all …]
/fs/hfsplus/
Dextents.c312 u32 count, start; in hfsplus_add_extent() local
319 start = be32_to_cpu(extent->start_block); in hfsplus_add_extent()
320 if (alloc_block != start + count) { in hfsplus_add_extent()
341 u32 count, start; in hfsplus_free_extents() local
361 start = be32_to_cpu(extent->start_block); in hfsplus_free_extents()
363 err = hfsplus_block_free(sb, start, count); in hfsplus_free_extents()
367 start, count); in hfsplus_free_extents()
374 err = hfsplus_block_free(sb, start + count, block_nr); in hfsplus_free_extents()
378 start, count); in hfsplus_free_extents()
401 u32 total_blocks, blocks, start; in hfsplus_free_fork() local
[all …]

12345678910>>...16