/mm/ |
D | swap.c | 60 static void __page_cache_release(struct page *page) in __page_cache_release() argument 62 if (PageLRU(page)) { in __page_cache_release() 63 pg_data_t *pgdat = page_pgdat(page); in __page_cache_release() 68 lruvec = mem_cgroup_page_lruvec(page, pgdat); in __page_cache_release() 69 VM_BUG_ON_PAGE(!PageLRU(page), page); in __page_cache_release() 70 __ClearPageLRU(page); in __page_cache_release() 71 del_page_from_lru_list(page, lruvec, page_off_lru(page)); in __page_cache_release() 74 __ClearPageWaiters(page); in __page_cache_release() 77 static void __put_single_page(struct page *page) in __put_single_page() argument 79 __page_cache_release(page); in __put_single_page() [all …]
|
D | migrate.c | 85 int isolate_movable_page(struct page *page, isolate_mode_t mode) in isolate_movable_page() argument 98 if (unlikely(!get_page_unless_zero(page))) in isolate_movable_page() 106 if (unlikely(!__PageMovable(page))) in isolate_movable_page() 119 if (unlikely(!trylock_page(page))) in isolate_movable_page() 122 if (!PageMovable(page) || PageIsolated(page)) in isolate_movable_page() 125 mapping = page_mapping(page); in isolate_movable_page() 126 VM_BUG_ON_PAGE(!mapping, page); in isolate_movable_page() 128 if (!mapping->a_ops->isolate_page(page, mode)) in isolate_movable_page() 132 WARN_ON_ONCE(PageIsolated(page)); in isolate_movable_page() 133 __SetPageIsolated(page); in isolate_movable_page() [all …]
|
D | filemap.c | 120 struct page *page, void *shadow) in page_cache_delete() argument 122 XA_STATE(xas, &mapping->i_pages, page->index); in page_cache_delete() 128 if (!PageHuge(page)) { in page_cache_delete() 129 xas_set_order(&xas, page->index, compound_order(page)); in page_cache_delete() 130 nr = compound_nr(page); in page_cache_delete() 133 VM_BUG_ON_PAGE(!PageLocked(page), page); in page_cache_delete() 134 VM_BUG_ON_PAGE(PageTail(page), page); in page_cache_delete() 135 VM_BUG_ON_PAGE(nr != 1 && shadow, page); in page_cache_delete() 140 page->mapping = NULL; in page_cache_delete() 157 struct page *page) in unaccount_page_cache_page() argument [all …]
|
D | rmap.c | 465 struct anon_vma *page_get_anon_vma(struct page *page) in page_get_anon_vma() argument 471 anon_mapping = (unsigned long)READ_ONCE(page->mapping); in page_get_anon_vma() 474 if (!page_mapped(page)) in page_get_anon_vma() 490 if (!page_mapped(page)) { in page_get_anon_vma() 508 struct anon_vma *page_lock_anon_vma_read(struct page *page) in page_lock_anon_vma_read() argument 515 anon_mapping = (unsigned long)READ_ONCE(page->mapping); in page_lock_anon_vma_read() 518 if (!page_mapped(page)) in page_lock_anon_vma_read() 529 if (!page_mapped(page)) { in page_lock_anon_vma_read() 542 if (!page_mapped(page)) { in page_lock_anon_vma_read() 688 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) in page_address_in_vma() argument [all …]
|
D | truncate.c | 83 struct page *page = pvec->pages[i]; in truncate_exceptional_pvec_entries() local 86 if (!xa_is_value(page)) { in truncate_exceptional_pvec_entries() 87 pvec->pages[j++] = page; in truncate_exceptional_pvec_entries() 99 __clear_shadow_entry(mapping, index, page); in truncate_exceptional_pvec_entries() 152 void do_invalidatepage(struct page *page, unsigned int offset, in do_invalidatepage() argument 155 void (*invalidatepage)(struct page *, unsigned int, unsigned int); in do_invalidatepage() 157 invalidatepage = page->mapping->a_ops->invalidatepage; in do_invalidatepage() 163 (*invalidatepage)(page, offset, length); in do_invalidatepage() 177 truncate_cleanup_page(struct address_space *mapping, struct page *page) in truncate_cleanup_page() argument 179 if (page_mapped(page)) { in truncate_cleanup_page() [all …]
|
D | page_io.c | 31 struct page *page, bio_end_io_t end_io) in get_swap_bio() argument 39 bio->bi_iter.bi_sector = map_swap_page(page, &bdev); in get_swap_bio() 44 bio_add_page(bio, page, PAGE_SIZE * hpage_nr_pages(page), 0); in get_swap_bio() 51 struct page *page = bio_first_page_all(bio); in end_swap_bio_write() local 54 SetPageError(page); in end_swap_bio_write() 63 set_page_dirty(page); in end_swap_bio_write() 67 ClearPageReclaim(page); in end_swap_bio_write() 69 end_page_writeback(page); in end_swap_bio_write() 73 static void swap_slot_free_notify(struct page *page) in swap_slot_free_notify() argument 85 if (unlikely(!PageSwapCache(page))) in swap_slot_free_notify() [all …]
|
D | page_isolation.c | 18 static int set_migratetype_isolate(struct page *page, int migratetype, int isol_flags) in set_migratetype_isolate() argument 26 zone = page_zone(page); in set_migratetype_isolate() 35 if (is_migrate_isolate_page(page)) in set_migratetype_isolate() 38 pfn = page_to_pfn(page); in set_migratetype_isolate() 62 if (!has_unmovable_pages(zone, page, arg.pages_found, migratetype, in set_migratetype_isolate() 74 int mt = get_pageblock_migratetype(page); in set_migratetype_isolate() 76 set_pageblock_migratetype(page, MIGRATE_ISOLATE); in set_migratetype_isolate() 78 nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE, in set_migratetype_isolate() 90 static void unset_migratetype_isolate(struct page *page, unsigned migratetype) in unset_migratetype_isolate() argument 97 struct page *buddy; in unset_migratetype_isolate() [all …]
|
D | page_alloc.c | 196 static inline int get_pcppage_migratetype(struct page *page) in get_pcppage_migratetype() argument 198 return page->index; in get_pcppage_migratetype() 201 static inline void set_pcppage_migratetype(struct page *page, int migratetype) in set_pcppage_migratetype() argument 203 page->index = migratetype; in set_pcppage_migratetype() 248 static void __free_pages_ok(struct page *page, unsigned int order); 395 static inline void kasan_free_nondeferred_pages(struct page *page, int order) in kasan_free_nondeferred_pages() argument 398 kasan_free_pages(page, order); in kasan_free_nondeferred_pages() 461 static inline unsigned long *get_pageblock_bitmap(struct page *page, in get_pageblock_bitmap() argument 467 return page_zone(page)->pageblock_flags; in get_pageblock_bitmap() 471 static inline int pfn_to_bitidx(struct page *page, unsigned long pfn) in pfn_to_bitidx() argument [all …]
|
D | mlock.c | 59 void clear_page_mlock(struct page *page) in clear_page_mlock() argument 61 if (!TestClearPageMlocked(page)) in clear_page_mlock() 64 mod_zone_page_state(page_zone(page), NR_MLOCK, in clear_page_mlock() 65 -hpage_nr_pages(page)); in clear_page_mlock() 73 if (!isolate_lru_page(page)) { in clear_page_mlock() 74 putback_lru_page(page); in clear_page_mlock() 79 if (PageUnevictable(page)) in clear_page_mlock() 88 void mlock_vma_page(struct page *page) in mlock_vma_page() argument 91 BUG_ON(!PageLocked(page)); in mlock_vma_page() 93 VM_BUG_ON_PAGE(PageTail(page), page); in mlock_vma_page() [all …]
|
D | memory-failure.c | 81 static int hwpoison_filter_dev(struct page *p) in hwpoison_filter_dev() 111 static int hwpoison_filter_flags(struct page *p) in hwpoison_filter_flags() 136 static int hwpoison_filter_task(struct page *p) in hwpoison_filter_task() 147 static int hwpoison_filter_task(struct page *p) { return 0; } in hwpoison_filter_task() 150 int hwpoison_filter(struct page *p) in hwpoison_filter() 167 int hwpoison_filter(struct page *p) in hwpoison_filter() 241 void shake_page(struct page *p, int access) in shake_page() 264 static unsigned long dev_pagemap_mapping_shift(struct page *page, in dev_pagemap_mapping_shift() argument 267 unsigned long address = vma_address(page, vma); in dev_pagemap_mapping_shift() 308 static void add_to_kill(struct task_struct *tsk, struct page *p, in add_to_kill() [all …]
|
D | internal.h | 70 static inline void set_page_refcounted(struct page *page) in set_page_refcounted() argument 72 VM_BUG_ON_PAGE(PageTail(page), page); in set_page_refcounted() 73 VM_BUG_ON_PAGE(page_ref_count(page), page); in set_page_refcounted() 74 set_page_count(page, 1); in set_page_refcounted() 88 extern int isolate_lru_page(struct page *page); 89 extern void putback_lru_page(struct page *page); 147 extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn, 150 static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn, in pageblock_pfn_to_page() 159 extern int __isolate_free_page(struct page *page, unsigned int order); 160 extern void memblock_free_pages(struct page *page, unsigned long pfn, [all …]
|
D | slub.c | 352 static __always_inline void slab_lock(struct page *page) in slab_lock() argument 354 VM_BUG_ON_PAGE(PageTail(page), page); in slab_lock() 355 bit_spin_lock(PG_locked, &page->flags); in slab_lock() 358 static __always_inline void slab_unlock(struct page *page) in slab_unlock() argument 360 VM_BUG_ON_PAGE(PageTail(page), page); in slab_unlock() 361 __bit_spin_unlock(PG_locked, &page->flags); in slab_unlock() 365 static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page, in __cmpxchg_double_slab() argument 374 if (cmpxchg_double(&page->freelist, &page->counters, in __cmpxchg_double_slab() 381 slab_lock(page); in __cmpxchg_double_slab() 382 if (page->freelist == freelist_old && in __cmpxchg_double_slab() [all …]
|
D | huge_memory.c | 63 struct page *huge_zero_page __read_mostly; 80 static struct page *get_huge_zero_page(void) in get_huge_zero_page() 82 struct page *zero_page; in get_huge_zero_page() 116 struct page *mm_get_huge_zero_page(struct mm_struct *mm) in mm_get_huge_zero_page() 147 struct page *zero_page = xchg(&huge_zero_page, NULL); in shrink_huge_zero_page_scan() 500 static inline struct deferred_split *get_deferred_split_queue(struct page *page) in get_deferred_split_queue() argument 502 struct mem_cgroup *memcg = compound_head(page)->mem_cgroup; in get_deferred_split_queue() 503 struct pglist_data *pgdat = NODE_DATA(page_to_nid(page)); in get_deferred_split_queue() 511 static inline struct deferred_split *get_deferred_split_queue(struct page *page) in get_deferred_split_queue() argument 513 struct pglist_data *pgdat = NODE_DATA(page_to_nid(page)); in get_deferred_split_queue() [all …]
|
D | gup.c | 54 void put_user_pages_dirty_lock(struct page **pages, unsigned long npages, in put_user_pages_dirty_lock() 71 struct page *page = compound_head(pages[index]); in put_user_pages_dirty_lock() local 92 if (!PageDirty(page)) in put_user_pages_dirty_lock() 93 set_page_dirty_lock(page); in put_user_pages_dirty_lock() 94 put_user_page(page); in put_user_pages_dirty_lock() 108 void put_user_pages(struct page **pages, unsigned long npages) in put_user_pages() 123 static struct page *no_page_table(struct vm_area_struct *vma, in no_page_table() 173 static struct page *follow_page_pte(struct vm_area_struct *vma, in follow_page_pte() 178 struct page *page; in follow_page_pte() local 213 page = vm_normal_page(vma, address, pte); in follow_page_pte() [all …]
|
D | swap_state.c | 114 int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp) in add_to_swap_cache() argument 118 XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page)); in add_to_swap_cache() 119 unsigned long i, nr = compound_nr(page); in add_to_swap_cache() 121 VM_BUG_ON_PAGE(!PageLocked(page), page); in add_to_swap_cache() 122 VM_BUG_ON_PAGE(PageSwapCache(page), page); in add_to_swap_cache() 123 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); in add_to_swap_cache() 125 page_ref_add(page, nr); in add_to_swap_cache() 126 SetPageSwapCache(page); in add_to_swap_cache() 134 VM_BUG_ON_PAGE(xas.xa_index != idx + i, page); in add_to_swap_cache() 135 set_page_private(page + i, entry.val + i); in add_to_swap_cache() [all …]
|
D | balloon_compaction.c | 15 struct page *page) in balloon_page_enqueue_one() argument 23 BUG_ON(!trylock_page(page)); in balloon_page_enqueue_one() 24 balloon_page_insert(b_dev_info, page); in balloon_page_enqueue_one() 25 unlock_page(page); in balloon_page_enqueue_one() 43 struct page *page, *tmp; in balloon_page_list_enqueue() local 48 list_for_each_entry_safe(page, tmp, pages, lru) { in balloon_page_list_enqueue() 49 list_del(&page->lru); in balloon_page_list_enqueue() 50 balloon_page_enqueue_one(b_dev_info, page); in balloon_page_list_enqueue() 79 struct page *page, *tmp; in balloon_page_list_dequeue() local 84 list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) { in balloon_page_list_dequeue() [all …]
|
D | hugetlb.c | 866 static void enqueue_huge_page(struct hstate *h, struct page *page) in enqueue_huge_page() argument 868 int nid = page_to_nid(page); in enqueue_huge_page() 869 list_move(&page->lru, &h->hugepage_freelists[nid]); in enqueue_huge_page() 874 static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid) in dequeue_huge_page_node_exact() 876 struct page *page; in dequeue_huge_page_node_exact() local 878 list_for_each_entry(page, &h->hugepage_freelists[nid], lru) in dequeue_huge_page_node_exact() 879 if (!PageHWPoison(page)) in dequeue_huge_page_node_exact() 885 if (&h->hugepage_freelists[nid] == &page->lru) in dequeue_huge_page_node_exact() 887 list_move(&page->lru, &h->hugepage_activelist); in dequeue_huge_page_node_exact() 888 set_page_refcounted(page); in dequeue_huge_page_node_exact() [all …]
|
D | page_idle.c | 31 static struct page *page_idle_get_page(unsigned long pfn) in page_idle_get_page() 33 struct page *page; in page_idle_get_page() local 39 page = pfn_to_page(pfn); in page_idle_get_page() 40 if (!page || !PageLRU(page) || in page_idle_get_page() 41 !get_page_unless_zero(page)) in page_idle_get_page() 44 pgdat = page_pgdat(page); in page_idle_get_page() 46 if (unlikely(!PageLRU(page))) { in page_idle_get_page() 47 put_page(page); in page_idle_get_page() 48 page = NULL; in page_idle_get_page() 51 return page; in page_idle_get_page() [all …]
|
D | z3fold.c | 298 static struct z3fold_header *init_z3fold_page(struct page *page, bool headless, in init_z3fold_page() argument 301 struct z3fold_header *zhdr = page_address(page); in init_z3fold_page() 304 INIT_LIST_HEAD(&page->lru); in init_z3fold_page() 305 clear_bit(PAGE_HEADLESS, &page->private); in init_z3fold_page() 306 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private); in init_z3fold_page() 307 clear_bit(NEEDS_COMPACTING, &page->private); in init_z3fold_page() 308 clear_bit(PAGE_STALE, &page->private); in init_z3fold_page() 309 clear_bit(PAGE_CLAIMED, &page->private); in init_z3fold_page() 333 static void free_z3fold_page(struct page *page, bool headless) in free_z3fold_page() argument 336 lock_page(page); in free_z3fold_page() [all …]
|
D | vmscan.c | 140 struct page *prev; \ 154 struct page *prev; \ 766 static inline int is_page_cache_freeable(struct page *page) in is_page_cache_freeable() argument 773 int page_cache_pins = PageTransHuge(page) && PageSwapCache(page) ? in is_page_cache_freeable() 775 return page_count(page) - page_has_private(page) == 1 + page_cache_pins; in is_page_cache_freeable() 802 struct page *page, int error) in handle_write_error() argument 804 lock_page(page); in handle_write_error() 805 if (page_mapping(page) == mapping) in handle_write_error() 807 unlock_page(page); in handle_write_error() 826 static pageout_t pageout(struct page *page, struct address_space *mapping, in pageout() argument [all …]
|
D | shmem.c | 139 static bool shmem_should_replace_page(struct page *page, gfp_t gfp); 140 static int shmem_replace_page(struct page **pagep, gfp_t gfp, 143 struct page **pagep, enum sgp_type sgp, 147 struct page **pagep, enum sgp_type sgp, 152 struct page **pagep, enum sgp_type sgp) in shmem_getpage() 467 struct page *page; in shmem_unused_huge_shrink() local 519 page = find_get_page(inode->i_mapping, in shmem_unused_huge_shrink() 521 if (!page) in shmem_unused_huge_shrink() 525 if (!PageTransHuge(page)) { in shmem_unused_huge_shrink() 526 put_page(page); in shmem_unused_huge_shrink() [all …]
|
D | compaction.c | 55 struct page *page, *next; in release_freepages() local 58 list_for_each_entry_safe(page, next, freelist, lru) { in release_freepages() 59 unsigned long pfn = page_to_pfn(page); in release_freepages() 60 list_del(&page->lru); in release_freepages() 61 __free_page(page); in release_freepages() 72 struct page *page, *next; in split_map_pages() local 75 list_for_each_entry_safe(page, next, list, lru) { in split_map_pages() 76 list_del(&page->lru); in split_map_pages() 78 order = page_private(page); in split_map_pages() 81 post_alloc_hook(page, order, __GFP_MOVABLE); in split_map_pages() [all …]
|
D | ksm.c | 472 struct page *page; in break_ksm() local 477 page = follow_page(vma, addr, in break_ksm() 479 if (IS_ERR_OR_NULL(page)) in break_ksm() 481 if (PageKsm(page)) in break_ksm() 486 put_page(page); in break_ksm() 552 static struct page *get_mergeable_page(struct rmap_item *rmap_item) in get_mergeable_page() 557 struct page *page; in get_mergeable_page() local 564 page = follow_page(vma, addr, FOLL_GET); in get_mergeable_page() 565 if (IS_ERR_OR_NULL(page)) in get_mergeable_page() 567 if (PageAnon(page)) { in get_mergeable_page() [all …]
|
D | dmapool.c | 70 struct dma_page *page; in show_pools() local 86 list_for_each_entry(page, &pool->page_list, page_list) { in show_pools() 88 blocks += page->in_use; in show_pools() 205 static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page) in pool_initialise_page() argument 216 *(int *)(page->vaddr + offset) = next; in pool_initialise_page() 223 struct dma_page *page; in pool_alloc_page() local 225 page = kmalloc(sizeof(*page), mem_flags); in pool_alloc_page() 226 if (!page) in pool_alloc_page() 228 page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation, in pool_alloc_page() 229 &page->dma, mem_flags); in pool_alloc_page() [all …]
|
D | highmem.c | 58 static inline unsigned int get_pkmap_color(struct page *page) in get_pkmap_color() argument 150 struct page *kmap_to_page(void *vaddr) in kmap_to_page() 171 struct page *page; in flush_all_zero_pkmaps() local 193 page = pte_page(pkmap_page_table[i]); in flush_all_zero_pkmaps() 196 set_page_address(page, NULL); in flush_all_zero_pkmaps() 213 static inline unsigned long map_new_virtual(struct page *page) in map_new_virtual() argument 218 unsigned int color = get_pkmap_color(page); in map_new_virtual() 250 if (page_address(page)) in map_new_virtual() 251 return (unsigned long)page_address(page); in map_new_virtual() 259 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot)); in map_new_virtual() [all …]
|