Home
last modified time | relevance | path

Searched refs:start (Results 1 – 25 of 350) sorted by relevance

12345678910>>...14

/fs/btrfs/
Dextent_io.h87 #define BITMAP_FIRST_BYTE_MASK(start) \ argument
88 ((BYTE_MASK << ((start) & (BITS_PER_BYTE - 1))) & BYTE_MASK)
110 struct page *page, u64 start, u64 end,
140 u64 start; member
159 u64 start; member
259 u64 start, u64 len,
268 int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
271 static inline int lock_extent(struct extent_io_tree *tree, u64 start, u64 end) in lock_extent() argument
273 return lock_extent_bits(tree, start, end, NULL); in lock_extent()
276 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
[all …]
Dextent_io.c71 state->start, state->end, state->state, in btrfs_leak_debug_check()
81 eb->start, eb->len, atomic_read(&eb->refs), eb->bflags); in btrfs_leak_debug_check()
87 #define btrfs_debug_check_extent_io_range(tree, start, end) \ argument
88 __btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end))
90 struct extent_io_tree *tree, u64 start, u64 end) in __btrfs_debug_check_extent_io_range() argument
102 caller, btrfs_ino(BTRFS_I(inode)), isize, start, end); in __btrfs_debug_check_extent_io_range()
113 u64 start; member
142 changeset->bytes_changed += state->end - state->start + 1; in add_extent_changeset()
143 ret = ulist_add(&changeset->range_changed, state->start, state->end, in add_extent_changeset()
348 if (offset < entry->start) in tree_insert()
[all …]
Dextent_map.c87 static u64 range_end(u64 start, u64 len) in range_end() argument
89 if (start + len < start) in range_end()
91 return start + len; in range_end()
100 u64 end = range_end(em->start, em->len); in tree_insert()
107 if (em->start < entry->start) { in tree_insert()
109 } else if (em->start >= extent_map_end(entry)) { in tree_insert()
118 while (parent && em->start >= extent_map_end(entry)) { in tree_insert()
123 if (end > entry->start && em->start < extent_map_end(entry)) in tree_insert()
128 while (parent && em->start < entry->start) { in tree_insert()
133 if (end > entry->start && em->start < extent_map_end(entry)) in tree_insert()
[all …]
Ddelalloc-space.c139 struct extent_changeset **reserved, u64 start, u64 len) in btrfs_check_data_free_space() argument
145 len = round_up(start + len, fs_info->sectorsize) - in btrfs_check_data_free_space()
146 round_down(start, fs_info->sectorsize); in btrfs_check_data_free_space()
147 start = round_down(start, fs_info->sectorsize); in btrfs_check_data_free_space()
154 ret = btrfs_qgroup_reserve_data(inode, reserved, start, len); in btrfs_check_data_free_space()
156 btrfs_free_reserved_data_space_noquota(inode, start, len); in btrfs_check_data_free_space()
170 void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start, in btrfs_free_reserved_data_space_noquota() argument
177 len = round_up(start + len, fs_info->sectorsize) - in btrfs_free_reserved_data_space_noquota()
178 round_down(start, fs_info->sectorsize); in btrfs_free_reserved_data_space_noquota()
179 start = round_down(start, fs_info->sectorsize); in btrfs_free_reserved_data_space_noquota()
[all …]
Dfree-space-tree.c161 static void le_bitmap_set(unsigned long *map, unsigned int start, int len) in le_bitmap_set() argument
163 u8 *p = ((u8 *)map) + BIT_BYTE(start); in le_bitmap_set()
164 const unsigned int size = start + len; in le_bitmap_set()
165 int bits_to_set = BITS_PER_BYTE - (start % BITS_PER_BYTE); in le_bitmap_set()
166 u8 mask_to_set = BITMAP_FIRST_BYTE_MASK(start); in le_bitmap_set()
193 u64 start, end; in convert_free_space_to_bitmaps() local
208 start = block_group->key.objectid; in convert_free_space_to_bitmaps()
234 ASSERT(found_key.objectid >= start); in convert_free_space_to_bitmaps()
238 first = div_u64(found_key.objectid - start, in convert_free_space_to_bitmaps()
240 last = div_u64(found_key.objectid + found_key.offset - start, in convert_free_space_to_bitmaps()
[all …]
Dfile.c310 range.start = defrag->last_offset; in __btrfs_run_defrag_inode()
322 defrag->last_offset = range.start; in __btrfs_run_defrag_inode()
467 const u64 start, in btrfs_find_new_delalloc_bytes() argument
471 u64 search_start = start; in btrfs_find_new_delalloc_bytes()
472 const u64 end = start + len - 1; in btrfs_find_new_delalloc_bytes()
489 if (em->start < search_start) in btrfs_find_new_delalloc_bytes()
490 em_len -= search_start - em->start; in btrfs_find_new_delalloc_bytes()
587 void btrfs_drop_extent_cache(struct btrfs_inode *inode, u64 start, u64 end, in btrfs_drop_extent_cache() argument
594 u64 len = end - start + 1; in btrfs_drop_extent_cache()
602 WARN_ON(end < start); in btrfs_drop_extent_cache()
[all …]
Dinode.c85 u64 start, u64 end, int *page_started,
87 static struct extent_map *create_io_em(struct inode *inode, u64 start, u64 len,
169 u64 start, size_t size, size_t compressed_size, in insert_inline_extent() argument
195 key.offset = start; in insert_inline_extent()
235 start >> PAGE_SHIFT); in insert_inline_extent()
238 offset = offset_in_page(start); in insert_inline_extent()
268 static noinline int cow_file_range_inline(struct inode *inode, u64 start, in cow_file_range_inline() argument
278 u64 inline_len = actual_end - start; in cow_file_range_inline()
289 if (start > 0 || in cow_file_range_inline()
318 start, aligned_end, NULL, in cow_file_range_inline()
[all …]
Dextent_map.h34 u64 start; member
72 if (em->start + em->len < em->start) in extent_map_end()
74 return em->start + em->len; in extent_map_end()
86 u64 start, u64 len);
99 int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len, u64 gen);
102 u64 start, u64 len);
105 struct extent_map **em_in, u64 start, u64 len);
/fs/btrfs/tests/
Dextent-io-tests.c19 static noinline int process_page_range(struct inode *inode, u64 start, u64 end, in process_page_range() argument
24 unsigned long index = start >> PAGE_SHIFT; in process_page_range()
52 start, end, nr_pages, ret); in process_page_range()
69 u64 start, end, test_start; in test_find_delalloc() local
114 start = 0; in test_find_delalloc()
116 found = find_lock_delalloc_range(inode, locked_page, &start, in test_find_delalloc()
122 if (start != 0 || end != (sectorsize - 1)) { in test_find_delalloc()
124 sectorsize - 1, start, end); in test_find_delalloc()
127 unlock_extent(tmp, start, end); in test_find_delalloc()
145 start = test_start; in test_find_delalloc()
[all …]
Dextent-map-tests.c24 em->start, em->len, em->block_start, in free_extent_map_tree()
54 u64 start = 0; in test_case_1() local
65 em->start = 0; in test_case_1()
86 em->start = SZ_16K; in test_case_1()
107 em->start = start; in test_case_1()
109 em->block_start = start; in test_case_1()
112 ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, em->start, em->len); in test_case_1()
115 test_err("case1 [%llu %llu]: ret %d", start, start + len, ret); in test_case_1()
119 (em->start != 0 || extent_map_end(em) != SZ_16K || in test_case_1()
123 start, start + len, ret, em->start, em->len, in test_case_1()
[all …]
Dinode-tests.c15 static void insert_extent(struct btrfs_root *root, u64 start, u64 len, in insert_extent() argument
34 key.offset = start; in insert_extent()
295 if (em->start != 0 || em->len != 5) { in test_btrfs_get_extent()
298 em->start, em->len); in test_btrfs_get_extent()
305 offset = em->start + em->len; in test_btrfs_get_extent()
318 if (em->start != offset || em->len != (sectorsize - 5)) { in test_btrfs_get_extent()
321 offset, em->start, em->len); in test_btrfs_get_extent()
333 offset = em->start + em->len; in test_btrfs_get_extent()
345 if (em->start != offset || em->len != 4) { in test_btrfs_get_extent()
348 offset, em->start, em->len); in test_btrfs_get_extent()
[all …]
/fs/xfs/scrub/
Dbitmap.c23 uint64_t start, in xfs_bitmap_set() argument
33 bmr->start = start; in xfs_bitmap_set()
75 if (ap->start > bp->start) in xfs_bitmap_range_cmp()
77 if (ap->start < bp->start) in xfs_bitmap_range_cmp()
137 while (sub_br->start + sub_br->len <= br->start) { in xfs_bitmap_disunion()
142 if (sub_br->start >= br->start + br->len) { in xfs_bitmap_disunion()
148 sub_start = sub_br->start; in xfs_bitmap_disunion()
150 if (sub_br->start < br->start) { in xfs_bitmap_disunion()
151 sub_len -= br->start - sub_br->start; in xfs_bitmap_disunion()
152 sub_start = br->start; in xfs_bitmap_disunion()
[all …]
/fs/pstore/
Dram_core.c35 atomic_t start; member
49 return atomic_read(&prz->buffer->start); in buffer_start()
62 old = atomic_read(&prz->buffer->start); in buffer_start_add()
66 atomic_set(&prz->buffer->start, new); in buffer_start_add()
123 unsigned int start, unsigned int count) in persistent_ram_update_ecc() argument
136 block = buffer->data + (start & ~(ecc_block_size - 1)); in persistent_ram_update_ecc()
137 par = prz->par_buffer + (start / ecc_block_size) * ecc_size; in persistent_ram_update_ecc()
145 } while (block < buffer->data + start + count); in persistent_ram_update_ecc()
275 const void *s, unsigned int start, unsigned int count) in persistent_ram_update() argument
278 memcpy_toio(buffer->data + start, s, count); in persistent_ram_update()
[all …]
/fs/xfs/libxfs/
Dxfs_rtbitmap.c97 xfs_rtblock_t start, /* starting block to look at */ in xfs_rtfind_back() argument
118 block = XFS_BITTOBLOCK(mp, start); in xfs_rtfind_back()
127 word = XFS_BITTOWORD(mp, start); in xfs_rtfind_back()
129 bit = (int)(start & (XFS_NBWORD - 1)); in xfs_rtfind_back()
130 len = start - limit + 1; in xfs_rtfind_back()
158 *rtblock = start - i + 1; in xfs_rtfind_back()
204 *rtblock = start - i + 1; in xfs_rtfind_back()
251 *rtblock = start - i + 1; in xfs_rtfind_back()
260 *rtblock = start - i + 1; in xfs_rtfind_back()
272 xfs_rtblock_t start, /* starting block to look at */ in xfs_rtfind_forw() argument
[all …]
/fs/squashfs/
Dxattr.c32 u64 start = SQUASHFS_XATTR_BLK(squashfs_i(inode)->xattr) in squashfs_listxattr() local
50 err = squashfs_read_metadata(sb, &entry, &start, &offset, in squashfs_listxattr()
69 err = squashfs_read_metadata(sb, buffer, &start, in squashfs_listxattr()
80 err = squashfs_read_metadata(sb, NULL, &start, in squashfs_listxattr()
88 err = squashfs_read_metadata(sb, &val, &start, &offset, in squashfs_listxattr()
93 err = squashfs_read_metadata(sb, NULL, &start, &offset, in squashfs_listxattr()
110 u64 start = SQUASHFS_XATTR_BLK(squashfs_i(inode)->xattr) in squashfs_xattr_get() local
127 err = squashfs_read_metadata(sb, &entry, &start, &offset, in squashfs_xattr_get()
137 err = squashfs_read_metadata(sb, target, &start, in squashfs_xattr_get()
140 err = squashfs_read_metadata(sb, NULL, &start, in squashfs_xattr_get()
[all …]
/fs/
Duserfaultfd.c89 unsigned long start; member
102 unsigned long start; member
112 unsigned long start, len; in userfaultfd_wake_function() local
117 start = range->start; in userfaultfd_wake_function()
119 if (len && (start > uwq->msg.arg.pagefault.address || in userfaultfd_wake_function()
120 start + len <= uwq->msg.arg.pagefault.address)) in userfaultfd_wake_function()
791 unsigned long start, unsigned long end) in userfaultfd_remove() argument
808 ewq.msg.arg.remove.start = start; in userfaultfd_remove()
817 unsigned long start, unsigned long end) in has_unmap_ctx() argument
822 if (unmap_ctx->ctx == ctx && unmap_ctx->start == start && in has_unmap_ctx()
[all …]
/fs/proc/
Dvmcore.c220 static int vmcoredd_copy_dumps(void *dst, u64 start, size_t size, int userbuf) in vmcoredd_copy_dumps() argument
230 if (start < offset + dump->size) { in vmcoredd_copy_dumps()
231 tsz = min(offset + (u64)dump->size - start, (u64)size); in vmcoredd_copy_dumps()
232 buf = dump->buf + start - offset; in vmcoredd_copy_dumps()
239 start += tsz; in vmcoredd_copy_dumps()
256 u64 start, size_t size) in vmcoredd_mmap_dumps() argument
266 if (start < offset + dump->size) { in vmcoredd_mmap_dumps()
267 tsz = min(offset + (u64)dump->size - start, (u64)size); in vmcoredd_mmap_dumps()
268 buf = dump->buf + start - offset; in vmcoredd_mmap_dumps()
275 start += tsz; in vmcoredd_mmap_dumps()
[all …]
Dkcore.c149 unsigned long start, end; in get_sparsemem_vmemmap_info() local
153 start = ((unsigned long)pfn_to_page(pfn)) & PAGE_MASK; in get_sparsemem_vmemmap_info()
160 if (start < tmp->addr + tmp->size) in get_sparsemem_vmemmap_info()
164 if (start < end) { in get_sparsemem_vmemmap_info()
168 vmm->addr = start; in get_sparsemem_vmemmap_info()
169 vmm->size = end - start; in get_sparsemem_vmemmap_info()
322 unsigned long start; in read_kcore() local
467 start = kc_offset_to_vaddr(*fpos - data_offset); in read_kcore()
468 if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen) in read_kcore()
477 if (!m || start < m->addr || start >= m->addr + m->size) { in read_kcore()
[all …]
/fs/hfs/
Dbitmap.c32 u32 mask, start, len, n; in hfs_find_set_zero_bits() local
70 start = (curr - bitmap) * 32 + i; in hfs_find_set_zero_bits()
71 if (start >= size) in hfs_find_set_zero_bits()
72 return start; in hfs_find_set_zero_bits()
74 len = min(size - start, len); in hfs_find_set_zero_bits()
108 *max = (curr - bitmap) * 32 + i - start; in hfs_find_set_zero_bits()
109 return start; in hfs_find_set_zero_bits()
193 int hfs_clear_vbm_bits(struct super_block *sb, u16 start, u16 count) in hfs_clear_vbm_bits() argument
203 hfs_dbg(BITMAP, "clear_bits: %u,%u\n", start, count); in hfs_clear_vbm_bits()
205 if ((start + count) > HFS_SB(sb)->fs_ablocks) in hfs_clear_vbm_bits()
[all …]
Dextent.c223 u16 count, start; in hfs_add_extent() local
230 start = be16_to_cpu(extent->block); in hfs_add_extent()
231 if (alloc_block != start + count) { in hfs_add_extent()
251 u16 count, start; in hfs_free_extents() local
267 start = be16_to_cpu(extent->block); in hfs_free_extents()
269 hfs_clear_vbm_bits(sb, start, count); in hfs_free_extents()
275 hfs_clear_vbm_bits(sb, start + count, block_nr); in hfs_free_extents()
290 u32 total_blocks, blocks, start; in hfs_free_fork() local
323 start = be16_to_cpu(fd.key->ext.FABN); in hfs_free_fork()
324 hfs_free_extents(sb, extent, total_blocks - start, total_blocks); in hfs_free_fork()
[all …]
/fs/adfs/
Dmap.c72 unsigned int start = dm->dm_startbit; in lookup_zone() local
77 frag = GET_FRAG_ID(map, start, idmask); in lookup_zone()
78 mapptr = start + idlen; in lookup_zone()
99 start = mapptr; in lookup_zone()
105 frag, start, mapptr); in lookup_zone()
110 int length = mapptr - start; in lookup_zone()
116 return start + *offset; in lookup_zone()
133 unsigned int start = 8, mapptr; in scan_free_map() local
140 frag = GET_FRAG_ID(map, start, idmask); in scan_free_map()
150 start += frag; in scan_free_map()
[all …]
/fs/hfsplus/
Dextents.c312 u32 count, start; in hfsplus_add_extent() local
319 start = be32_to_cpu(extent->start_block); in hfsplus_add_extent()
320 if (alloc_block != start + count) { in hfsplus_add_extent()
341 u32 count, start; in hfsplus_free_extents() local
361 start = be32_to_cpu(extent->start_block); in hfsplus_free_extents()
363 err = hfsplus_block_free(sb, start, count); in hfsplus_free_extents()
367 start, count); in hfsplus_free_extents()
374 err = hfsplus_block_free(sb, start + count, block_nr); in hfsplus_free_extents()
378 start, count); in hfsplus_free_extents()
401 u32 total_blocks, blocks, start; in hfsplus_free_fork() local
[all …]
Dbitmap.c26 u32 mask, start, len, n; in hfsplus_block_allocate() local
39 start = size; in hfsplus_block_allocate()
84 start = size; in hfsplus_block_allocate()
94 start = size; in hfsplus_block_allocate()
98 start = offset + (curr - pptr) * 32 + i; in hfsplus_block_allocate()
99 if (start >= size) { in hfsplus_block_allocate()
104 len = min(size - start, len); in hfsplus_block_allocate()
135 start = size; in hfsplus_block_allocate()
155 *max = offset + (curr - pptr) * 32 + i - start; in hfsplus_block_allocate()
158 hfs_dbg(BITMAP, "-> %u,%u\n", start, *max); in hfsplus_block_allocate()
[all …]
/fs/hpfs/
Dalloc.c100 int hpfs_chk_sectors(struct super_block *s, secno start, int len, char *msg) in hpfs_chk_sectors() argument
102 if (start + len < start || start < 0x12 || in hpfs_chk_sectors()
103 start + len > hpfs_sb(s)->sb_fs_size) { in hpfs_chk_sectors()
104 hpfs_error(s, "sector(s) '%s' badly placed at %08x", msg, start); in hpfs_chk_sectors()
110 if (chk_if_allocated(s, start + i, msg)) return 1; in hpfs_chk_sectors()
503 static int do_trim(struct super_block *s, secno start, unsigned len, secno limit_start, secno limit… in do_trim() argument
509 end = start + len; in do_trim()
510 if (start < limit_start) in do_trim()
511 start = limit_start; in do_trim()
514 if (start >= end) in do_trim()
[all …]
/fs/xfs/
Dxfs_discard.c25 xfs_daddr_t start, in xfs_trim_extents() argument
97 if (dbno + dlen < start || dbno > end) { in xfs_trim_extents()
153 xfs_daddr_t start, end, minlen; in xfs_ioc_trim() local
182 if (range.start >= XFS_FSB_TO_B(mp, mp->m_sb.sb_dblocks) || in xfs_ioc_trim()
187 start = BTOBB(range.start); in xfs_ioc_trim()
188 end = start + BTOBBT(range.len) - 1; in xfs_ioc_trim()
193 start_agno = xfs_daddr_to_agno(mp, start); in xfs_ioc_trim()
197 error = xfs_trim_extents(mp, agno, start, end, minlen, in xfs_ioc_trim()

12345678910>>...14