/mm/ |
D | truncate.c | 27 pgoff_t index, void *entry) in clear_exceptional_entry() argument 42 if (!__radix_tree_lookup(&mapping->page_tree, index, &node, &slot)) in clear_exceptional_entry() 153 (loff_t)page->index << PAGE_CACHE_SHIFT, in truncate_inode_page() 227 pgoff_t index; in truncate_inode_pages_range() local 256 index = start; in truncate_inode_pages_range() 257 while (index < end && pagevec_lookup_entries(&pvec, mapping, index, in truncate_inode_pages_range() 258 min(end - index, (pgoff_t)PAGEVEC_SIZE), in truncate_inode_pages_range() 264 index = indices[i]; in truncate_inode_pages_range() 265 if (index >= end) in truncate_inode_pages_range() 269 clear_exceptional_entry(mapping, index, page); in truncate_inode_pages_range() [all …]
|
D | shmem.c | 123 struct shmem_inode_info *info, pgoff_t index); 124 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, 127 static inline int shmem_getpage(struct inode *inode, pgoff_t index, in shmem_getpage() argument 130 return shmem_getpage_gfp(inode, index, pagep, sgp, in shmem_getpage() 256 pgoff_t index, void *expected, void *replacement) in shmem_radix_tree_replace() argument 263 pslot = radix_tree_lookup_slot(&mapping->page_tree, index); in shmem_radix_tree_replace() 281 pgoff_t index, swp_entry_t swap) in shmem_confirm_swap() argument 286 item = radix_tree_lookup(&mapping->page_tree, index); in shmem_confirm_swap() 296 pgoff_t index, void *expected) in shmem_add_to_page_cache() argument 305 page->index = index; in shmem_add_to_page_cache() [all …]
|
D | filemap.c | 119 error = __radix_tree_create(&mapping->page_tree, page->index, in page_cache_tree_insert() 158 unsigned long index; in page_cache_tree_delete() local 165 __radix_tree_lookup(&mapping->page_tree, page->index, &node, &slot); in page_cache_tree_delete() 195 index = page->index; in page_cache_tree_delete() 196 offset = index & RADIX_TREE_MAP_MASK; in page_cache_tree_delete() 199 radix_tree_tag_clear(&mapping->page_tree, index, tag); in page_cache_tree_delete() 388 pgoff_t index = start_byte >> PAGE_CACHE_SHIFT; in __filemap_fdatawait_range() local 398 while ((index <= end) && in __filemap_fdatawait_range() 399 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, in __filemap_fdatawait_range() 401 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) { in __filemap_fdatawait_range() [all …]
|
D | zsmalloc.c | 214 unsigned int index; member 517 if (class->index != i) in zs_stats_size_show() 852 off = page->index; in obj_idx_to_offset() 931 page->index = off; in init_zspage() 1419 set_zspage_mapping(first_page, class->index, ZS_EMPTY); in zs_malloc() 1572 static unsigned long find_alloced_obj(struct page *page, int index, in find_alloced_obj() argument 1581 offset = page->index; in find_alloced_obj() 1582 offset += class->size * index; in find_alloced_obj() 1594 index++; in find_alloced_obj() 1609 int index; member [all …]
|
D | madvise.c | 145 unsigned long index; in swapin_walk_pmd_entry() local 150 for (index = start; index != end; index += PAGE_SIZE) { in swapin_walk_pmd_entry() 157 pte = *(orig_pte + ((index - start) / PAGE_SIZE)); in swapin_walk_pmd_entry() 167 vma, index); in swapin_walk_pmd_entry() 193 pgoff_t index; in force_shm_swapin_readahead() local 198 index = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; in force_shm_swapin_readahead() 200 page = find_get_entry(mapping, index); in force_shm_swapin_readahead()
|
D | readahead.c | 92 if (add_to_page_cache_lru(page, mapping, page->index, in read_cache_pages() 130 if (!add_to_page_cache_lru(page, mapping, page->index, in read_pages() 187 page->index = page_offset; in __do_page_cache_readahead() 544 pgoff_t index, unsigned long nr) in do_readahead() argument 549 return force_page_cache_readahead(mapping, filp, index, nr); in do_readahead()
|
D | cleancache.c | 196 ret = cleancache_ops->get_page(pool_id, key, page->index, page); in __cleancache_get_page() 230 cleancache_ops->put_page(pool_id, key, page->index, page); in __cleancache_put_page() 258 key, page->index); in __cleancache_invalidate_page()
|
D | page-writeback.c | 2165 pgoff_t index; in write_cache_pages() local 2174 index = writeback_index; in write_cache_pages() 2177 index = wbc->range_start >> PAGE_CACHE_SHIFT; in write_cache_pages() 2187 tag_pages_for_writeback(mapping, index, end); in write_cache_pages() 2188 done_index = index; in write_cache_pages() 2189 while (!done && (index <= end)) { in write_cache_pages() 2192 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, in write_cache_pages() 2193 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); in write_cache_pages() 2207 if (page->index > end) { in write_cache_pages() 2216 done_index = page->index; in write_cache_pages() [all …]
|
D | vmstat.c | 1631 int index; in unusable_show_print() local 1639 index = unusable_free_index(order, &info); in unusable_show_print() 1640 seq_printf(m, "%d.%03d ", index / 1000, index % 1000); in unusable_show_print() 1691 int index; in extfrag_show_print() local 1701 index = __fragmentation_index(order, &info); in extfrag_show_print() 1702 seq_printf(m, "%d.%03d ", index / 1000, index % 1000); in extfrag_show_print()
|
D | migrate.c | 330 newpage->index = page->index; in migrate_page_move_mapping() 377 newpage->index = page->index; in migrate_page_move_mapping() 463 newpage->index = page->index; in migrate_huge_page_move_mapping() 1792 new_page->index = page->index; in migrate_misplaced_transhuge_page()
|
D | slab_common.c | 857 int index; in kmalloc_slab() local 868 index = size_index[size_index_elem(size)]; in kmalloc_slab() 870 index = fls(size - 1); in kmalloc_slab() 874 return kmalloc_dma_caches[index]; in kmalloc_slab() 877 return kmalloc_caches[index]; in kmalloc_slab()
|
D | rmap.c | 1083 VM_BUG_ON_PAGE(page->index != linear_page_index(vma, address), page); in page_move_anon_rmap() 1121 page->index = linear_page_index(vma, address); in __page_set_anon_rmap() 1147 BUG_ON(page->index != linear_page_index(vma, address)); in __page_check_anon_rmap() 1771 page->index = linear_page_index(vma, address); in __hugepage_set_anon_rmap()
|
D | swapfile.c | 429 cluster_set_null(&percpu_cluster->index); in scan_swap_map_ssd_cluster_conflict() 446 if (cluster_is_null(&cluster->index)) { in scan_swap_map_try_ssd_cluster() 448 cluster->index = si->free_cluster_head; in scan_swap_map_try_ssd_cluster() 449 cluster->next = cluster_next(&cluster->index) * in scan_swap_map_try_ssd_cluster() 470 while (tmp < si->max && tmp < (cluster_next(&cluster->index) + 1) * in scan_swap_map_try_ssd_cluster() 479 cluster_set_null(&cluster->index); in scan_swap_map_try_ssd_cluster() 2508 cluster_set_null(&cluster->index); in SYSCALL_DEFINE2()
|
D | debug.c | 87 page->mapping, page->index); in dump_page_badflags()
|
D | swap.c | 1134 pgoff_t *index, int tag, unsigned nr_pages) in pagevec_lookup_tag() argument 1136 pvec->nr = find_get_pages_tag(mapping, index, tag, in pagevec_lookup_tag()
|
D | percpu.c | 233 page->index = (unsigned long)pcpu; in pcpu_set_page_chunk() 239 return (struct pcpu_chunk *)page->index; in pcpu_get_page_chunk()
|
D | huge_memory.c | 1825 page_tail->index = page->index + i; in __split_huge_page_refcount() 1946 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); in __split_huge_page()
|
D | memory.c | 645 pgoff_t index; in print_bad_pte() local 671 index = linear_page_index(vma, addr); in print_bad_pte() 681 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); in print_bad_pte() 2051 vmf.pgoff = page->index; in do_page_mkwrite()
|
D | ksm.c | 1924 page->index == linear_page_index(vma, address)) { in ksm_might_need_to_copy()
|
D | slab.c | 1368 static void __init set_up_node(struct kmem_cache *cachep, int index) in set_up_node() argument 1373 cachep->node[node] = &init_kmem_cache_node[index + node]; in set_up_node()
|
D | hugetlb.c | 1352 pgoff_t index = page_index(page_head); in __basepage_index() local 1363 return (index << compound_order(page_head)) + compound_idx; in __basepage_index()
|
D | page_alloc.c | 138 return page->index; in get_pcppage_migratetype() 143 page->index = migratetype; in set_pcppage_migratetype()
|