Lines Matching refs:page
63 struct page *huge_zero_page __read_mostly;
90 static struct page *get_huge_zero_page(void) in get_huge_zero_page()
92 struct page *zero_page; in get_huge_zero_page()
127 struct page *mm_get_huge_zero_page(struct mm_struct *mm) in mm_get_huge_zero_page()
158 struct page *zero_page = xchg(&huge_zero_page, NULL); in shrink_huge_zero_page_scan()
487 static inline struct deferred_split *get_deferred_split_queue(struct page *page) in get_deferred_split_queue() argument
489 struct mem_cgroup *memcg = compound_head(page)->mem_cgroup; in get_deferred_split_queue()
490 struct pglist_data *pgdat = NODE_DATA(page_to_nid(page)); in get_deferred_split_queue()
498 static inline struct deferred_split *get_deferred_split_queue(struct page *page) in get_deferred_split_queue() argument
500 struct pglist_data *pgdat = NODE_DATA(page_to_nid(page)); in get_deferred_split_queue()
506 void prep_transhuge_page(struct page *page) in prep_transhuge_page() argument
513 INIT_LIST_HEAD(page_deferred_list(page)); in prep_transhuge_page()
514 set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR); in prep_transhuge_page()
517 bool is_transparent_hugepage(struct page *page) in is_transparent_hugepage() argument
519 if (!PageCompound(page)) in is_transparent_hugepage()
522 page = compound_head(page); in is_transparent_hugepage()
523 return is_huge_zero_page(page) || in is_transparent_hugepage()
524 page[1].compound_dtor == TRANSHUGE_PAGE_DTOR; in is_transparent_hugepage()
582 struct page *page, gfp_t gfp) in __do_huge_pmd_anonymous_page() argument
589 VM_BUG_ON_PAGE(!PageCompound(page), page); in __do_huge_pmd_anonymous_page()
591 if (mem_cgroup_charge(page, vma->vm_mm, gfp)) { in __do_huge_pmd_anonymous_page()
592 put_page(page); in __do_huge_pmd_anonymous_page()
597 cgroup_throttle_swaprate(page, gfp); in __do_huge_pmd_anonymous_page()
605 clear_huge_page(page, vmf->address, HPAGE_PMD_NR); in __do_huge_pmd_anonymous_page()
611 __SetPageUptodate(page); in __do_huge_pmd_anonymous_page()
628 put_page(page); in __do_huge_pmd_anonymous_page()
635 entry = mk_huge_pmd(page, vma->vm_page_prot); in __do_huge_pmd_anonymous_page()
637 page_add_new_anon_rmap(page, vma, haddr, true); in __do_huge_pmd_anonymous_page()
638 lru_cache_add_inactive_or_unevictable(page, vma); in __do_huge_pmd_anonymous_page()
654 put_page(page); in __do_huge_pmd_anonymous_page()
697 struct page *zero_page) in set_huge_zero_page()
715 struct page *page; in do_huge_pmd_anonymous_page() local
728 struct page *zero_page; in do_huge_pmd_anonymous_page()
763 page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER); in do_huge_pmd_anonymous_page()
764 if (unlikely(!page)) { in do_huge_pmd_anonymous_page()
768 prep_transhuge_page(page); in do_huge_pmd_anonymous_page()
769 return __do_huge_pmd_anonymous_page(vmf, page, gfp); in do_huge_pmd_anonymous_page()
964 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, in follow_devmap_pmd()
969 struct page *page; in follow_devmap_pmd() local
1006 page = pfn_to_page(pfn); in follow_devmap_pmd()
1007 if (!try_grab_page(page, flags)) in follow_devmap_pmd()
1008 page = ERR_PTR(-ENOMEM); in follow_devmap_pmd()
1010 return page; in follow_devmap_pmd()
1018 struct page *src_page; in copy_huge_pmd()
1136 struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, in follow_devmap_pud()
1141 struct page *page; in follow_devmap_pud() local
1174 page = pfn_to_page(pfn); in follow_devmap_pud()
1175 if (!try_grab_page(page, flags)) in follow_devmap_pud()
1176 page = ERR_PTR(-ENOMEM); in follow_devmap_pud()
1178 return page; in follow_devmap_pud()
1274 struct page *page; in do_huge_pmd_wp_page() local
1290 page = pmd_page(orig_pmd); in do_huge_pmd_wp_page()
1291 VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page); in do_huge_pmd_wp_page()
1294 if (!trylock_page(page)) { in do_huge_pmd_wp_page()
1295 get_page(page); in do_huge_pmd_wp_page()
1297 lock_page(page); in do_huge_pmd_wp_page()
1301 unlock_page(page); in do_huge_pmd_wp_page()
1302 put_page(page); in do_huge_pmd_wp_page()
1305 put_page(page); in do_huge_pmd_wp_page()
1312 if (reuse_swap_page(page, NULL)) { in do_huge_pmd_wp_page()
1318 unlock_page(page); in do_huge_pmd_wp_page()
1323 unlock_page(page); in do_huge_pmd_wp_page()
1340 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, in follow_trans_huge_pmd()
1346 struct page *page = NULL; in follow_trans_huge_pmd() local
1361 page = pmd_page(*pmd); in follow_trans_huge_pmd()
1362 VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page); in follow_trans_huge_pmd()
1364 if (!try_grab_page(page, flags)) in follow_trans_huge_pmd()
1392 if (PageAnon(page) && compound_mapcount(page) != 1) in follow_trans_huge_pmd()
1394 if (PageDoubleMap(page) || !page->mapping) in follow_trans_huge_pmd()
1396 if (!trylock_page(page)) in follow_trans_huge_pmd()
1398 if (page->mapping && !PageDoubleMap(page)) in follow_trans_huge_pmd()
1399 mlock_vma_page(page); in follow_trans_huge_pmd()
1400 unlock_page(page); in follow_trans_huge_pmd()
1403 page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT; in follow_trans_huge_pmd()
1404 VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page); in follow_trans_huge_pmd()
1407 return page; in follow_trans_huge_pmd()
1415 struct page *page; in do_huge_pmd_numa_page() local
1434 page = pmd_page(*vmf->pmd); in do_huge_pmd_numa_page()
1435 if (!get_page_unless_zero(page)) in do_huge_pmd_numa_page()
1438 put_and_wait_on_page_locked(page); in do_huge_pmd_numa_page()
1442 page = pmd_page(pmd); in do_huge_pmd_numa_page()
1443 BUG_ON(is_huge_zero_page(page)); in do_huge_pmd_numa_page()
1444 page_nid = page_to_nid(page); in do_huge_pmd_numa_page()
1445 last_cpupid = page_cpupid_last(page); in do_huge_pmd_numa_page()
1460 page_locked = trylock_page(page); in do_huge_pmd_numa_page()
1461 target_nid = mpol_misplaced(page, vma, haddr); in do_huge_pmd_numa_page()
1471 if (!get_page_unless_zero(page)) in do_huge_pmd_numa_page()
1474 put_and_wait_on_page_locked(page); in do_huge_pmd_numa_page()
1482 get_page(page); in do_huge_pmd_numa_page()
1484 anon_vma = page_lock_anon_vma_read(page, NULL); in do_huge_pmd_numa_page()
1489 unlock_page(page); in do_huge_pmd_numa_page()
1490 put_page(page); in do_huge_pmd_numa_page()
1497 put_page(page); in do_huge_pmd_numa_page()
1535 vmf->pmd, pmd, vmf->address, page, target_nid); in do_huge_pmd_numa_page()
1544 BUG_ON(!PageLocked(page)); in do_huge_pmd_numa_page()
1552 unlock_page(page); in do_huge_pmd_numa_page()
1576 struct page *page; in madvise_free_huge_pmd() local
1596 page = pmd_page(orig_pmd); in madvise_free_huge_pmd()
1601 if (total_mapcount(page) != 1) in madvise_free_huge_pmd()
1604 if (!trylock_page(page)) in madvise_free_huge_pmd()
1612 get_page(page); in madvise_free_huge_pmd()
1614 split_huge_page(page); in madvise_free_huge_pmd()
1615 unlock_page(page); in madvise_free_huge_pmd()
1616 put_page(page); in madvise_free_huge_pmd()
1620 if (PageDirty(page)) in madvise_free_huge_pmd()
1621 ClearPageDirty(page); in madvise_free_huge_pmd()
1622 unlock_page(page); in madvise_free_huge_pmd()
1633 mark_page_lazyfree(page); in madvise_free_huge_pmd()
1681 struct page *page = NULL; in zap_huge_pmd() local
1685 page = pmd_page(orig_pmd); in zap_huge_pmd()
1686 page_remove_rmap(page, true); in zap_huge_pmd()
1687 VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); in zap_huge_pmd()
1688 VM_BUG_ON_PAGE(!PageHead(page), page); in zap_huge_pmd()
1694 page = migration_entry_to_page(entry); in zap_huge_pmd()
1699 if (PageAnon(page)) { in zap_huge_pmd()
1705 add_mm_counter(tlb->mm, mm_counter_file(page), -HPAGE_PMD_NR); in zap_huge_pmd()
1710 tlb_remove_page_size(tlb, page, HPAGE_PMD_SIZE); in zap_huge_pmd()
2032 struct page *page; in __split_huge_pmd_locked() local
2062 page = migration_entry_to_page(entry); in __split_huge_pmd_locked()
2064 page = pmd_page(old_pmd); in __split_huge_pmd_locked()
2065 if (!PageDirty(page) && pmd_dirty(old_pmd)) in __split_huge_pmd_locked()
2066 set_page_dirty(page); in __split_huge_pmd_locked()
2067 if (!PageReferenced(page) && pmd_young(old_pmd)) in __split_huge_pmd_locked()
2068 SetPageReferenced(page); in __split_huge_pmd_locked()
2069 page_remove_rmap(page, true); in __split_huge_pmd_locked()
2070 put_page(page); in __split_huge_pmd_locked()
2072 add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR); in __split_huge_pmd_locked()
2116 page = migration_entry_to_page(entry); in __split_huge_pmd_locked()
2122 page = pmd_page(old_pmd); in __split_huge_pmd_locked()
2124 SetPageDirty(page); in __split_huge_pmd_locked()
2130 VM_BUG_ON_PAGE(!page_count(page), page); in __split_huge_pmd_locked()
2131 page_ref_add(page, HPAGE_PMD_NR - 1); in __split_huge_pmd_locked()
2149 swp_entry = make_migration_entry(page + i, write); in __split_huge_pmd_locked()
2156 entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot)); in __split_huge_pmd_locked()
2171 trace_android_vh_update_page_mapcount(&page[i], true, in __split_huge_pmd_locked()
2174 atomic_inc(&page[i]._mapcount); in __split_huge_pmd_locked()
2184 if (compound_mapcount(page) > 1 && in __split_huge_pmd_locked()
2185 !TestSetPageDoubleMap(page)) { in __split_huge_pmd_locked()
2187 trace_android_vh_update_page_mapcount(&page[i], true, in __split_huge_pmd_locked()
2190 atomic_inc(&page[i]._mapcount); in __split_huge_pmd_locked()
2194 lock_page_memcg(page); in __split_huge_pmd_locked()
2195 if (atomic_add_negative(-1, compound_mapcount_ptr(page))) { in __split_huge_pmd_locked()
2197 __dec_lruvec_page_state(page, NR_ANON_THPS); in __split_huge_pmd_locked()
2198 if (TestClearPageDoubleMap(page)) { in __split_huge_pmd_locked()
2201 trace_android_vh_update_page_mapcount(&page[i], in __split_huge_pmd_locked()
2204 atomic_dec(&page[i]._mapcount); in __split_huge_pmd_locked()
2208 unlock_page_memcg(page); in __split_huge_pmd_locked()
2216 page_remove_rmap(page + i, false); in __split_huge_pmd_locked()
2217 put_page(page + i); in __split_huge_pmd_locked()
2223 unsigned long address, bool freeze, struct page *page) in __split_huge_pmd() argument
2240 VM_BUG_ON(freeze && !page); in __split_huge_pmd()
2241 if (page) { in __split_huge_pmd()
2242 VM_WARN_ON_ONCE(!PageLocked(page)); in __split_huge_pmd()
2243 if (page != pmd_page(*pmd)) in __split_huge_pmd()
2249 if (!page) { in __split_huge_pmd()
2250 page = pmd_page(*pmd); in __split_huge_pmd()
2258 if (PageAnon(page)) { in __split_huge_pmd()
2259 if (unlikely(!trylock_page(page))) { in __split_huge_pmd()
2260 get_page(page); in __split_huge_pmd()
2263 lock_page(page); in __split_huge_pmd()
2266 unlock_page(page); in __split_huge_pmd()
2267 put_page(page); in __split_huge_pmd()
2268 page = NULL; in __split_huge_pmd()
2271 put_page(page); in __split_huge_pmd()
2276 if (PageMlocked(page)) in __split_huge_pmd()
2277 clear_page_mlock(page); in __split_huge_pmd()
2284 unlock_page(page); in __split_huge_pmd()
2302 bool freeze, struct page *page) in split_huge_pmd_address() argument
2323 __split_huge_pmd(vma, pmd, address, freeze, page); in split_huge_pmd_address()
2367 static void unmap_page(struct page *page) in unmap_page() argument
2372 VM_BUG_ON_PAGE(!PageHead(page), page); in unmap_page()
2374 if (PageAnon(page)) in unmap_page()
2377 try_to_unmap(page, ttu_flags); in unmap_page()
2379 VM_WARN_ON_ONCE_PAGE(page_mapped(page), page); in unmap_page()
2382 static void remap_page(struct page *page, unsigned int nr) in remap_page() argument
2385 if (PageTransHuge(page)) { in remap_page()
2386 remove_migration_ptes(page, page, true); in remap_page()
2389 remove_migration_ptes(page + i, page + i, true); in remap_page()
2393 static void __split_huge_page_tail(struct page *head, int tail, in __split_huge_page_tail()
2396 struct page *page_tail = head + tail; in __split_huge_page_tail()
2458 static void __split_huge_page(struct page *page, struct list_head *list, in __split_huge_page() argument
2461 struct page *head = compound_head(page); in __split_huge_page()
2491 } else if (!PageAnon(page)) { in __split_huge_page()
2530 struct page *subpage = head + i; in __split_huge_page()
2531 if (subpage == page) in __split_huge_page()
2546 int total_mapcount(struct page *page) in total_mapcount() argument
2550 VM_BUG_ON_PAGE(PageTail(page), page); in total_mapcount()
2552 if (likely(!PageCompound(page))) in total_mapcount()
2553 return atomic_read(&page->_mapcount) + 1; in total_mapcount()
2555 compound = compound_mapcount(page); in total_mapcount()
2556 nr = compound_nr(page); in total_mapcount()
2557 if (PageHuge(page)) in total_mapcount()
2561 ret += atomic_read(&page[i]._mapcount) + 1; in total_mapcount()
2563 if (!PageAnon(page)) in total_mapcount()
2565 if (PageDoubleMap(page)) in total_mapcount()
2594 int page_trans_huge_mapcount(struct page *page, int *total_mapcount) in page_trans_huge_mapcount() argument
2599 VM_BUG_ON_PAGE(PageHuge(page), page); in page_trans_huge_mapcount()
2601 if (likely(!PageTransCompound(page))) { in page_trans_huge_mapcount()
2602 mapcount = atomic_read(&page->_mapcount) + 1; in page_trans_huge_mapcount()
2608 page = compound_head(page); in page_trans_huge_mapcount()
2611 for (i = 0; i < thp_nr_pages(page); i++) { in page_trans_huge_mapcount()
2612 mapcount = atomic_read(&page[i]._mapcount) + 1; in page_trans_huge_mapcount()
2616 if (PageDoubleMap(page)) { in page_trans_huge_mapcount()
2618 _total_mapcount -= thp_nr_pages(page); in page_trans_huge_mapcount()
2620 mapcount = compound_mapcount(page); in page_trans_huge_mapcount()
2629 bool can_split_huge_page(struct page *page, int *pextra_pins) in can_split_huge_page() argument
2634 if (PageAnon(page)) in can_split_huge_page()
2635 extra_pins = PageSwapCache(page) ? thp_nr_pages(page) : 0; in can_split_huge_page()
2637 extra_pins = thp_nr_pages(page); in can_split_huge_page()
2640 return total_mapcount(page) == page_count(page) - extra_pins - 1; in can_split_huge_page()
2662 int split_huge_page_to_list(struct page *page, struct list_head *list) in split_huge_page_to_list() argument
2664 struct page *head = compound_head(page); in split_huge_page_to_list()
2760 __split_huge_page(page, list, end, flags); in split_huge_page_to_list()
2784 void free_transhuge_page(struct page *page) in free_transhuge_page() argument
2786 struct deferred_split *ds_queue = get_deferred_split_queue(page); in free_transhuge_page()
2790 if (!list_empty(page_deferred_list(page))) { in free_transhuge_page()
2792 list_del(page_deferred_list(page)); in free_transhuge_page()
2795 free_compound_page(page); in free_transhuge_page()
2798 void deferred_split_huge_page(struct page *page) in deferred_split_huge_page() argument
2800 struct deferred_split *ds_queue = get_deferred_split_queue(page); in deferred_split_huge_page()
2802 struct mem_cgroup *memcg = compound_head(page)->mem_cgroup; in deferred_split_huge_page()
2806 VM_BUG_ON_PAGE(!PageTransHuge(page), page); in deferred_split_huge_page()
2818 if (PageSwapCache(page)) in deferred_split_huge_page()
2821 if (!list_empty(page_deferred_list(page))) in deferred_split_huge_page()
2825 if (list_empty(page_deferred_list(page))) { in deferred_split_huge_page()
2827 list_add_tail(page_deferred_list(page), &ds_queue->split_queue); in deferred_split_huge_page()
2831 memcg_set_shrinker_bit(memcg, page_to_nid(page), in deferred_split_huge_page()
2858 struct page *page; in deferred_split_scan() local
2869 page = list_entry((void *)pos, struct page, mapping); in deferred_split_scan()
2870 page = compound_head(page); in deferred_split_scan()
2871 if (get_page_unless_zero(page)) { in deferred_split_scan()
2872 list_move(page_deferred_list(page), &list); in deferred_split_scan()
2875 list_del_init(page_deferred_list(page)); in deferred_split_scan()
2884 page = list_entry((void *)pos, struct page, mapping); in deferred_split_scan()
2885 if (!trylock_page(page)) in deferred_split_scan()
2888 if (!split_huge_page(page)) in deferred_split_scan()
2890 unlock_page(page); in deferred_split_scan()
2892 put_page(page); in deferred_split_scan()
2920 struct page *page; in split_huge_pages_set() local
2933 page = pfn_to_page(pfn); in split_huge_pages_set()
2934 if (!get_page_unless_zero(page)) in split_huge_pages_set()
2937 if (zone != page_zone(page)) in split_huge_pages_set()
2940 if (!PageHead(page) || PageHuge(page) || !PageLRU(page)) in split_huge_pages_set()
2944 lock_page(page); in split_huge_pages_set()
2945 if (!split_huge_page(page)) in split_huge_pages_set()
2947 unlock_page(page); in split_huge_pages_set()
2949 put_page(page); in split_huge_pages_set()
2971 struct page *page) in set_pmd_migration_entry() argument
2986 set_page_dirty(page); in set_pmd_migration_entry()
2987 entry = make_migration_entry(page, pmd_write(pmdval)); in set_pmd_migration_entry()
2992 page_remove_rmap(page, true); in set_pmd_migration_entry()
2993 put_page(page); in set_pmd_migration_entry()
2996 void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new) in remove_migration_pmd()