/mm/ |
D | truncate.c | 35 pgoff_t index, void *entry) in __clear_shadow_entry() argument 37 XA_STATE(xas, &mapping->i_pages, index); in __clear_shadow_entry() 46 static void clear_shadow_entry(struct address_space *mapping, pgoff_t index, in clear_shadow_entry() argument 50 __clear_shadow_entry(mapping, index, entry); in clear_shadow_entry() 84 pgoff_t index = indices[i]; in truncate_exceptional_pvec_entries() local 91 if (index >= end) in truncate_exceptional_pvec_entries() 95 dax_delete_mapping_entry(mapping, index); in truncate_exceptional_pvec_entries() 99 __clear_shadow_entry(mapping, index, page); in truncate_exceptional_pvec_entries() 112 pgoff_t index, void *entry) in invalidate_exceptional_entry() argument 117 clear_shadow_entry(mapping, index, entry); in invalidate_exceptional_entry() [all …]
|
D | readahead.c | 99 if (add_to_page_cache_lru(page, mapping, page->index, in read_cache_pages() 188 unsigned long index = readahead_index(ractl); in page_cache_ra_unbounded() local 209 struct page *page = xa_load(&mapping->i_pages, index + i); in page_cache_ra_unbounded() 211 BUG_ON(index + i != ractl->_index + ractl->_nr_pages); in page_cache_ra_unbounded() 230 page->index = index + i; in page_cache_ra_unbounded() 232 } else if (add_to_page_cache_lru(page, mapping, index + i, in page_cache_ra_unbounded() 263 unsigned long index = readahead_index(ractl); in do_page_cache_ra() local 271 if (index > end_index) in do_page_cache_ra() 274 if (nr_to_read > end_index - index) in do_page_cache_ra() 275 nr_to_read = end_index - index + 1; in do_page_cache_ra() [all …]
|
D | shmem.c | 149 struct shmem_inode_info *info, pgoff_t index); 150 static int shmem_swapin_page(struct inode *inode, pgoff_t index, 154 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, 159 int shmem_getpage(struct inode *inode, pgoff_t index, in shmem_getpage() argument 162 return shmem_getpage_gfp(inode, index, pagep, sgp, in shmem_getpage() 422 pgoff_t index, void *expected, void *replacement) in shmem_replace_entry() argument 424 XA_STATE(xas, &mapping->i_pages, index); in shmem_replace_entry() 444 pgoff_t index, swp_entry_t swap) in shmem_confirm_swap() argument 446 return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap); in shmem_confirm_swap() 681 pgoff_t index, void *expected, gfp_t gfp, in shmem_add_to_page_cache() argument [all …]
|
D | filemap.c | 128 XA_STATE(xas, &mapping->i_pages, page->index); in page_cache_delete() 135 xas_set_order(&xas, page->index, compound_order(page)); in page_cache_delete() 303 XA_STATE(xas, &mapping->i_pages, pvec->pages[0]->index); in page_cache_delete_batch() 324 VM_BUG_ON_PAGE(page->index > pvec->pages[i]->index, in page_cache_delete_batch() 331 if (page->index == xas.xa_index) in page_cache_delete_batch() 340 if (page->index + compound_nr(page) - 1 == xas.xa_index) in page_cache_delete_batch() 512 pgoff_t index = start_byte >> PAGE_SHIFT; in __filemap_fdatawait_range() local 521 while (index <= end) { in __filemap_fdatawait_range() 524 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, in __filemap_fdatawait_range() 799 pgoff_t offset = old->index; in replace_page_cache_page() [all …]
|
D | khugepaged.c | 1693 pgoff_t index, end = start + HPAGE_PMD_NR; in collapse_file() local 1733 new_page->index = start; in collapse_file() 1743 for (index = start; index < end; index++) { in collapse_file() 1746 VM_BUG_ON(index != xas.xa_index); in collapse_file() 1754 if (index == start) { in collapse_file() 1759 xas_set(&xas, index); in collapse_file() 1773 if (shmem_getpage(mapping->host, index, &page, in collapse_file() 1789 file, index, in collapse_file() 1790 end - index); in collapse_file() 1793 page = find_lock_page(mapping, index); in collapse_file() [all …]
|
D | page_pinner.c | 43 unsigned int index; member 154 idx = lt_pinner.index++; in check_longterm_pin() 155 lt_pinner.index %= LONGTERM_PIN_BUCKETS; in check_longterm_pin() 355 idx = acf_pinner.index++; in __page_pinner_migration_failed() 356 acf_pinner.index %= LONGTERM_PIN_BUCKETS; in __page_pinner_migration_failed() 401 idx = (lt_pinner.index - 1 - i + LONGTERM_PIN_BUCKETS) % in read_longterm_page_pinner() 436 idx = (acf_pinner.index - 1 - i + LONGTERM_PIN_BUCKETS) % in read_alloc_contig_failed() 461 lt_pinner.index = 0; in pp_threshold_set()
|
D | page_ext.c | 119 static inline struct page_ext *get_entry(void *base, unsigned long index) in get_entry() argument 121 return base + page_ext_size * index; in get_entry() 178 unsigned long index; in lookup_page_ext() local 191 index = pfn - round_down(node_start_pfn(page_to_nid(page)), in lookup_page_ext() 193 return get_entry(base, index); in lookup_page_ext()
|
D | zsmalloc.c | 212 unsigned int index; member 606 if (class->index != i) in zs_stats_size_show() 875 return page->index; in obj_to_head() 1391 zspage->first_page->index = handle; in obj_malloc() 1455 set_zspage_mapping(zspage, class->index, newfg); in zs_malloc() 1611 int index = *obj_idx; in find_alloced_obj() local 1616 offset += class->size * index; in find_alloced_obj() 1628 index++; in find_alloced_obj() 1633 *obj_idx = index; in find_alloced_obj() 1739 set_zspage_mapping(zspage, class->index, fullness); in putback_zspage() [all …]
|
D | cleancache.c | 195 ret = cleancache_ops->get_page(pool_id, key, page->index, page); in __cleancache_get_page() 229 cleancache_ops->put_page(pool_id, key, page->index, page); in __cleancache_put_page() 257 key, page->index); in __cleancache_invalidate_page()
|
D | percpu.c | 254 page->index = (unsigned long)pcpu; in pcpu_set_page_chunk() 260 return (struct pcpu_chunk *)page->index; in pcpu_get_page_chunk() 284 static unsigned long *pcpu_index_alloc_map(struct pcpu_chunk *chunk, int index) in pcpu_index_alloc_map() argument 287 (index * PCPU_BITMAP_BLOCK_BITS / BITS_PER_LONG); in pcpu_index_alloc_map() 300 static unsigned long pcpu_block_off_to_off(int index, int off) in pcpu_block_off_to_off() argument 302 return index * PCPU_BITMAP_BLOCK_BITS + off; in pcpu_block_off_to_off() 730 static void pcpu_block_refresh_hint(struct pcpu_chunk *chunk, int index) in pcpu_block_refresh_hint() argument 732 struct pcpu_block_md *block = chunk->md_blocks + index; in pcpu_block_refresh_hint() 733 unsigned long *alloc_map = pcpu_index_alloc_map(chunk, index); in pcpu_block_refresh_hint() 1120 unsigned long index, end, i, area_off, area_bits; in pcpu_find_zero_area() local [all …]
|
D | internal.h | 77 struct file *file, pgoff_t index, unsigned long nr_to_read) in force_page_cache_readahead() argument 79 DEFINE_READAHEAD(ractl, file, mapping, index); in force_page_cache_readahead() 83 struct page *find_get_entry(struct address_space *mapping, pgoff_t index); 84 struct page *find_lock_entry(struct address_space *mapping, pgoff_t index);
|
D | madvise.c | 194 unsigned long index; in swapin_walk_pmd_entry() local 199 for (index = start; index != end; index += PAGE_SIZE) { in swapin_walk_pmd_entry() 206 pte = *(orig_pte + ((index - start) / PAGE_SIZE)); in swapin_walk_pmd_entry() 216 vma, index, false); in swapin_walk_pmd_entry()
|
D | slab_common.c | 651 unsigned int index; in kmalloc_slab() local 658 index = size_index[size_index_elem(size)]; in kmalloc_slab() 662 index = fls(size - 1); in kmalloc_slab() 665 trace_android_vh_kmalloc_slab(index, flags, &s); in kmalloc_slab() 669 return kmalloc_caches[kmalloc_type(flags)][index]; in kmalloc_slab()
|
D | page-writeback.c | 2183 pgoff_t index; in write_cache_pages() local 2191 index = mapping->writeback_index; /* prev offset */ in write_cache_pages() 2194 index = wbc->range_start >> PAGE_SHIFT; in write_cache_pages() 2200 tag_pages_for_writeback(mapping, index, end); in write_cache_pages() 2205 done_index = index; in write_cache_pages() 2206 while (!done && (index <= end)) { in write_cache_pages() 2209 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end, in write_cache_pages() 2217 done_index = page->index; in write_cache_pages() 2271 done_index = page->index + 1; in write_cache_pages()
|
D | vmstat.c | 2086 int index; in unusable_show_print() local 2094 index = unusable_free_index(order, &info); in unusable_show_print() 2095 seq_printf(m, "%d.%03d ", index / 1000, index % 1000); in unusable_show_print() 2136 int index; in extfrag_show_print() local 2146 index = __fragmentation_index(order, &info); in extfrag_show_print() 2147 seq_printf(m, "%d.%03d ", index / 1000, index % 1000); in extfrag_show_print()
|
D | gup.c | 301 unsigned long index; in unpin_user_pages_dirty_lock() local 314 for (index = 0; index < npages; index++) { in unpin_user_pages_dirty_lock() 315 struct page *page = compound_head(pages[index]); in unpin_user_pages_dirty_lock() 354 unsigned long index; in unpin_user_pages() local 368 for (index = 0; index < npages; index++) in unpin_user_pages() 369 unpin_user_page(pages[index]); in unpin_user_pages()
|
D | mincore.c | 51 static unsigned char mincore_page(struct address_space *mapping, pgoff_t index) in mincore_page() argument 62 page = find_get_incore_page(mapping, index); in mincore_page()
|
D | swap.c | 1288 struct address_space *mapping, pgoff_t *index, pgoff_t end, in pagevec_lookup_range_tag() argument 1291 pvec->nr = find_get_pages_range_tag(mapping, index, end, tag, in pagevec_lookup_range_tag() 1298 struct address_space *mapping, pgoff_t *index, pgoff_t end, in pagevec_lookup_range_nr_tag() argument 1301 pvec->nr = find_get_pages_range_tag(mapping, index, end, tag, in pagevec_lookup_range_nr_tag()
|
D | migrate.c | 201 new = page - pvmw.page->index + in remove_migration_pte() 397 newpage->index = page->index; in migrate_page_move_mapping() 423 newpage->index = page->index; in migrate_page_move_mapping() 522 newpage->index = page->index; in migrate_huge_page_move_mapping() 2151 new_page->index = page->index; in migrate_misplaced_transhuge_page()
|
D | swap_state.c | 429 struct page *find_get_incore_page(struct address_space *mapping, pgoff_t index) in find_get_incore_page() argument 433 struct page *page = find_get_entry(mapping, index); in find_get_incore_page() 438 return find_subpage(page, index); in find_get_incore_page()
|
D | swapfile.c | 605 cluster_set_null(&percpu_cluster->index); in scan_swap_map_ssd_cluster_conflict() 622 if (cluster_is_null(&cluster->index)) { in scan_swap_map_try_ssd_cluster() 624 cluster->index = si->free_clusters.head; in scan_swap_map_try_ssd_cluster() 625 cluster->next = cluster_next(&cluster->index) * in scan_swap_map_try_ssd_cluster() 647 (cluster_next(&cluster->index) + 1) * SWAPFILE_CLUSTER); in scan_swap_map_try_ssd_cluster() 658 cluster_set_null(&cluster->index); in scan_swap_map_try_ssd_cluster() 3335 cluster_set_null(&cluster->index); in SYSCALL_DEFINE2()
|
D | memfd.c | 50 xas_set(xas, page->index + cache_count); in memfd_tag_pins()
|
D | huge_memory.c | 2426 page_tail->index = head->index + tail; in __split_huge_page_tail() 2485 if (head[i].index >= end) { in __split_huge_page() 2492 __xa_store(&head->mapping->i_pages, head[i].index, in __split_huge_page()
|
/mm/kfence/ |
D | core.c | 140 long index; in addr_to_metadata() local 152 index = (addr - (unsigned long)__kfence_pool) / (PAGE_SIZE * 2) - 1; in addr_to_metadata() 153 if (index < 0 || index >= CONFIG_KFENCE_NUM_OBJECTS) in addr_to_metadata() 156 return &kfence_metadata[index]; in addr_to_metadata()
|
/mm/damon/ |
D | Kconfig | 12 See https://damonitor.github.io/doc/html/latest-damon/index.html for
|