Home
last modified time | relevance | path

Searched refs:index (Results 1 – 25 of 25) sorted by relevance

/mm/
Dtruncate.c35 pgoff_t index, void *entry) in __clear_shadow_entry() argument
37 XA_STATE(xas, &mapping->i_pages, index); in __clear_shadow_entry()
46 static void clear_shadow_entry(struct address_space *mapping, pgoff_t index, in clear_shadow_entry() argument
50 __clear_shadow_entry(mapping, index, entry); in clear_shadow_entry()
84 pgoff_t index = indices[i]; in truncate_exceptional_pvec_entries() local
91 if (index >= end) in truncate_exceptional_pvec_entries()
95 dax_delete_mapping_entry(mapping, index); in truncate_exceptional_pvec_entries()
99 __clear_shadow_entry(mapping, index, page); in truncate_exceptional_pvec_entries()
112 pgoff_t index, void *entry) in invalidate_exceptional_entry() argument
117 clear_shadow_entry(mapping, index, entry); in invalidate_exceptional_entry()
[all …]
Dshmem.c141 struct shmem_inode_info *info, pgoff_t index);
142 static int shmem_swapin_page(struct inode *inode, pgoff_t index,
146 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
151 int shmem_getpage(struct inode *inode, pgoff_t index, in shmem_getpage() argument
154 return shmem_getpage_gfp(inode, index, pagep, sgp, in shmem_getpage()
354 pgoff_t index, void *expected, void *replacement) in shmem_replace_entry() argument
356 XA_STATE(xas, &mapping->i_pages, index); in shmem_replace_entry()
376 pgoff_t index, swp_entry_t swap) in shmem_confirm_swap() argument
378 return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap); in shmem_confirm_swap()
608 pgoff_t index, void *expected, gfp_t gfp) in shmem_add_to_page_cache() argument
[all …]
Dfilemap.c122 XA_STATE(xas, &mapping->i_pages, page->index); in page_cache_delete()
129 xas_set_order(&xas, page->index, compound_order(page)); in page_cache_delete()
297 XA_STATE(xas, &mapping->i_pages, pvec->pages[0]->index); in page_cache_delete_batch()
318 VM_BUG_ON_PAGE(page->index > pvec->pages[i]->index, in page_cache_delete_batch()
325 if (page->index == xas.xa_index) in page_cache_delete_batch()
334 if (page->index + compound_nr(page) - 1 == xas.xa_index) in page_cache_delete_batch()
506 pgoff_t index = start_byte >> PAGE_SHIFT; in __filemap_fdatawait_range() local
515 while (index <= end) { in __filemap_fdatawait_range()
518 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, in __filemap_fdatawait_range()
815 pgoff_t offset = old->index; in replace_page_cache_page()
[all …]
Dkhugepaged.c1500 pgoff_t index, end = start + HPAGE_PMD_NR; in collapse_file() local
1540 new_page->index = start; in collapse_file()
1550 for (index = start; index < end; index++) { in collapse_file()
1553 VM_BUG_ON(index != xas.xa_index); in collapse_file()
1561 if (index == start) { in collapse_file()
1566 xas_set(&xas, index); in collapse_file()
1580 if (shmem_getpage(mapping->host, index, &page, in collapse_file()
1596 file, index, in collapse_file()
1600 page = find_lock_page(mapping, index); in collapse_file()
1662 unmap_mapping_pages(mapping, index, 1, false); in collapse_file()
[all …]
Dpage_ext.c102 static inline struct page_ext *get_entry(void *base, unsigned long index) in get_entry() argument
104 return base + page_ext_size * index; in get_entry()
118 unsigned long index; in lookup_page_ext() local
130 index = pfn - round_down(node_start_pfn(page_to_nid(page)), in lookup_page_ext()
132 return get_entry(base, index); in lookup_page_ext()
Dmadvise.c188 unsigned long index; in swapin_walk_pmd_entry() local
193 for (index = start; index != end; index += PAGE_SIZE) { in swapin_walk_pmd_entry()
200 pte = *(orig_pte + ((index - start) / PAGE_SIZE)); in swapin_walk_pmd_entry()
210 vma, index, false); in swapin_walk_pmd_entry()
226 pgoff_t index; in force_shm_swapin_readahead() local
231 index = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; in force_shm_swapin_readahead()
233 page = find_get_entry(mapping, index); in force_shm_swapin_readahead()
Dzsmalloc.c212 unsigned int index; member
610 if (class->index != i) in zs_stats_size_show()
879 return page->index; in obj_to_head()
1437 zspage->first_page->index = handle; in obj_malloc()
1501 set_zspage_mapping(zspage, class->index, newfg); in zs_malloc()
1657 int index = *obj_idx; in find_alloced_obj() local
1662 offset += class->size * index; in find_alloced_obj()
1674 index++; in find_alloced_obj()
1679 *obj_idx = index; in find_alloced_obj()
1785 set_zspage_mapping(zspage, class->index, fullness); in putback_zspage()
[all …]
Dpercpu.c247 page->index = (unsigned long)pcpu; in pcpu_set_page_chunk()
253 return (struct pcpu_chunk *)page->index; in pcpu_get_page_chunk()
304 static unsigned long *pcpu_index_alloc_map(struct pcpu_chunk *chunk, int index) in pcpu_index_alloc_map() argument
307 (index * PCPU_BITMAP_BLOCK_BITS / BITS_PER_LONG); in pcpu_index_alloc_map()
320 static unsigned long pcpu_block_off_to_off(int index, int off) in pcpu_block_off_to_off() argument
322 return index * PCPU_BITMAP_BLOCK_BITS + off; in pcpu_block_off_to_off()
748 static void pcpu_block_refresh_hint(struct pcpu_chunk *chunk, int index) in pcpu_block_refresh_hint() argument
750 struct pcpu_block_md *block = chunk->md_blocks + index; in pcpu_block_refresh_hint()
751 unsigned long *alloc_map = pcpu_index_alloc_map(chunk, index); in pcpu_block_refresh_hint()
1139 unsigned long index, end, i, area_off, area_bits; in pcpu_find_zero_area() local
[all …]
Dcleancache.c195 ret = cleancache_ops->get_page(pool_id, key, page->index, page); in __cleancache_get_page()
229 cleancache_ops->put_page(pool_id, key, page->index, page); in __cleancache_put_page()
257 key, page->index); in __cleancache_invalidate_page()
Dgup.c57 unsigned long index; in put_user_pages_dirty_lock() local
70 for (index = 0; index < npages; index++) { in put_user_pages_dirty_lock()
71 struct page *page = compound_head(pages[index]); in put_user_pages_dirty_lock()
110 unsigned long index; in put_user_pages() local
117 for (index = 0; index < npages; index++) in put_user_pages()
118 put_user_page(pages[index]); in put_user_pages()
Dpage-writeback.c2168 pgoff_t index; in write_cache_pages() local
2177 index = writeback_index; in write_cache_pages()
2180 index = wbc->range_start >> PAGE_SHIFT; in write_cache_pages()
2190 tag_pages_for_writeback(mapping, index, end); in write_cache_pages()
2191 done_index = index; in write_cache_pages()
2192 while (!done && (index <= end)) { in write_cache_pages()
2195 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end, in write_cache_pages()
2203 done_index = page->index; in write_cache_pages()
2257 done_index = page->index + 1; in write_cache_pages()
Dvmstat.c2029 int index; in unusable_show_print() local
2037 index = unusable_free_index(order, &info); in unusable_show_print()
2038 seq_printf(m, "%d.%03d ", index / 1000, index % 1000); in unusable_show_print()
2089 int index; in extfrag_show_print() local
2099 index = __fragmentation_index(order, &info); in extfrag_show_print()
2100 seq_printf(m, "%d.%03d ", index / 1000, index % 1000); in extfrag_show_print()
Dreadahead.c97 if (add_to_page_cache_lru(page, mapping, page->index, in read_cache_pages()
135 if (!add_to_page_cache_lru(page, mapping, page->index, gfp)) in read_pages()
199 page->index = page_offset; in __do_page_cache_readahead()
Dswap.c1051 struct address_space *mapping, pgoff_t *index, pgoff_t end, in pagevec_lookup_range_tag() argument
1054 pvec->nr = find_get_pages_range_tag(mapping, index, end, tag, in pagevec_lookup_range_tag()
1061 struct address_space *mapping, pgoff_t *index, pgoff_t end, in pagevec_lookup_range_nr_tag() argument
1064 pvec->nr = find_get_pages_range_tag(mapping, index, end, tag, in pagevec_lookup_range_nr_tag()
Dmigrate.c222 new = page - pvmw.page->index + in remove_migration_pte()
411 newpage->index = page->index; in migrate_page_move_mapping()
437 newpage->index = page->index; in migrate_page_move_mapping()
529 newpage->index = page->index; in migrate_huge_page_move_mapping()
2036 new_page->index = page->index; in migrate_misplaced_transhuge_page()
Dslab_common.c1139 unsigned int index; in kmalloc_slab() local
1145 index = size_index[size_index_elem(size)]; in kmalloc_slab()
1149 index = fls(size - 1); in kmalloc_slab()
1152 return kmalloc_caches[kmalloc_type(flags)][index]; in kmalloc_slab()
Dswapfile.c591 cluster_set_null(&percpu_cluster->index); in scan_swap_map_ssd_cluster_conflict()
609 if (cluster_is_null(&cluster->index)) { in scan_swap_map_try_ssd_cluster()
611 cluster->index = si->free_clusters.head; in scan_swap_map_try_ssd_cluster()
612 cluster->next = cluster_next(&cluster->index) * in scan_swap_map_try_ssd_cluster()
634 (cluster_next(&cluster->index) + 1) * SWAPFILE_CLUSTER); in scan_swap_map_try_ssd_cluster()
636 cluster_set_null(&cluster->index); in scan_swap_map_try_ssd_cluster()
649 cluster_set_null(&cluster->index); in scan_swap_map_try_ssd_cluster()
3221 cluster_set_null(&cluster->index); in SYSCALL_DEFINE2()
Dhuge_memory.c2475 page_tail->index = head->index + tail; in __split_huge_page_tail()
2533 if (head[i].index >= end) { in __split_huge_page()
2540 __xa_store(&head->mapping->i_pages, head[i].index, in __split_huge_page()
Dksm.c2572 page->index == linear_page_index(vma, address)) { in ksm_might_need_to_copy()
2674 page->index = linear_page_index(vma, address); in reuse_ksm_page()
Dmemory.c508 pgoff_t index; in print_bad_pte() local
533 index = linear_page_index(vma, addr); in print_bad_pte()
541 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); in print_bad_pte()
Dmemory-failure.c1217 start = (page->index << PAGE_SHIFT) & ~(size - 1); in memory_failure_dev_pagemap()
Drmap.c1046 page->index = linear_page_index(vma, address); in __page_set_anon_rmap()
Dslab.c1176 static void __init set_up_node(struct kmem_cache *cachep, int index) in set_up_node() argument
1181 cachep->node[node] = &init_kmem_cache_node[index + node]; in set_up_node()
Dhugetlb.c1441 pgoff_t index = page_index(page_head); in __basepage_index() local
1452 return (index << compound_order(page_head)) + compound_idx; in __basepage_index()
Dpage_alloc.c198 return page->index; in get_pcppage_migratetype()
203 page->index = migratetype; in set_pcppage_migratetype()