Lines Matching refs:page
84 static inline bool PageHugeFreed(struct page *head) in PageHugeFreed()
89 static inline void SetPageHugeFreed(struct page *head) in SetPageHugeFreed()
94 static inline void ClearPageHugeFreed(struct page *head) in ClearPageHugeFreed()
1070 static void enqueue_huge_page(struct hstate *h, struct page *page) in enqueue_huge_page() argument
1072 int nid = page_to_nid(page); in enqueue_huge_page()
1073 list_move(&page->lru, &h->hugepage_freelists[nid]); in enqueue_huge_page()
1076 SetPageHugeFreed(page); in enqueue_huge_page()
1079 static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid) in dequeue_huge_page_node_exact()
1081 struct page *page; in dequeue_huge_page_node_exact() local
1084 list_for_each_entry(page, &h->hugepage_freelists[nid], lru) { in dequeue_huge_page_node_exact()
1085 if (nocma && is_migrate_cma_page(page)) in dequeue_huge_page_node_exact()
1088 if (PageHWPoison(page)) in dequeue_huge_page_node_exact()
1091 list_move(&page->lru, &h->hugepage_activelist); in dequeue_huge_page_node_exact()
1092 set_page_refcounted(page); in dequeue_huge_page_node_exact()
1093 ClearPageHugeFreed(page); in dequeue_huge_page_node_exact()
1096 return page; in dequeue_huge_page_node_exact()
1102 static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid, in dequeue_huge_page_nodemask()
1116 struct page *page; in dequeue_huge_page_nodemask() local
1128 page = dequeue_huge_page_node_exact(h, node); in dequeue_huge_page_nodemask()
1129 if (page) in dequeue_huge_page_nodemask()
1130 return page; in dequeue_huge_page_nodemask()
1138 static struct page *dequeue_huge_page_vma(struct hstate *h, in dequeue_huge_page_vma()
1143 struct page *page; in dequeue_huge_page_vma() local
1164 page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask); in dequeue_huge_page_vma()
1165 if (page && !avoid_reserve && vma_has_reserves(vma, chg)) { in dequeue_huge_page_vma()
1166 SetPagePrivate(page); in dequeue_huge_page_vma()
1171 return page; in dequeue_huge_page_vma()
1249 static void destroy_compound_gigantic_page(struct page *page, in destroy_compound_gigantic_page() argument
1254 struct page *p = page + 1; in destroy_compound_gigantic_page()
1256 atomic_set(compound_mapcount_ptr(page), 0); in destroy_compound_gigantic_page()
1257 atomic_set(compound_pincount_ptr(page), 0); in destroy_compound_gigantic_page()
1259 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { in destroy_compound_gigantic_page()
1264 set_compound_order(page, 0); in destroy_compound_gigantic_page()
1265 page[1].compound_nr = 0; in destroy_compound_gigantic_page()
1266 __ClearPageHead(page); in destroy_compound_gigantic_page()
1269 static void free_gigantic_page(struct page *page, unsigned int order) in free_gigantic_page() argument
1276 if (cma_release(hugetlb_cma[page_to_nid(page)], page, 1 << order)) in free_gigantic_page()
1280 free_contig_range(page_to_pfn(page), 1 << order); in free_gigantic_page()
1284 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, in alloc_gigantic_page()
1293 struct page *page; in alloc_gigantic_page() local
1297 page = cma_alloc(hugetlb_cma[nid], nr_pages, in alloc_gigantic_page()
1300 if (page) in alloc_gigantic_page()
1301 return page; in alloc_gigantic_page()
1309 page = cma_alloc(hugetlb_cma[node], nr_pages, in alloc_gigantic_page()
1312 if (page) in alloc_gigantic_page()
1313 return page; in alloc_gigantic_page()
1323 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, in alloc_gigantic_page()
1331 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, in alloc_gigantic_page()
1336 static inline void free_gigantic_page(struct page *page, unsigned int order) { } in free_gigantic_page() argument
1337 static inline void destroy_compound_gigantic_page(struct page *page, in destroy_compound_gigantic_page() argument
1341 static void update_and_free_page(struct hstate *h, struct page *page) in update_and_free_page() argument
1344 struct page *subpage = page; in update_and_free_page()
1350 h->nr_huge_pages_node[page_to_nid(page)]--; in update_and_free_page()
1352 i++, subpage = mem_map_next(subpage, page, i)) { in update_and_free_page()
1358 VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page); in update_and_free_page()
1359 VM_BUG_ON_PAGE(hugetlb_cgroup_from_page_rsvd(page), page); in update_and_free_page()
1360 set_compound_page_dtor(page, NULL_COMPOUND_DTOR); in update_and_free_page()
1361 set_page_refcounted(page); in update_and_free_page()
1368 destroy_compound_gigantic_page(page, huge_page_order(h)); in update_and_free_page()
1369 free_gigantic_page(page, huge_page_order(h)); in update_and_free_page()
1372 __free_pages(page, huge_page_order(h)); in update_and_free_page()
1393 bool page_huge_active(struct page *page) in page_huge_active() argument
1395 return PageHeadHuge(page) && PagePrivate(&page[1]); in page_huge_active()
1399 void set_page_huge_active(struct page *page) in set_page_huge_active() argument
1401 VM_BUG_ON_PAGE(!PageHeadHuge(page), page); in set_page_huge_active()
1402 SetPagePrivate(&page[1]); in set_page_huge_active()
1405 static void clear_page_huge_active(struct page *page) in clear_page_huge_active() argument
1407 VM_BUG_ON_PAGE(!PageHeadHuge(page), page); in clear_page_huge_active()
1408 ClearPagePrivate(&page[1]); in clear_page_huge_active()
1415 static inline bool PageHugeTemporary(struct page *page) in PageHugeTemporary() argument
1417 if (!PageHuge(page)) in PageHugeTemporary()
1420 return (unsigned long)page[2].mapping == -1U; in PageHugeTemporary()
1423 static inline void SetPageHugeTemporary(struct page *page) in SetPageHugeTemporary() argument
1425 page[2].mapping = (void *)-1U; in SetPageHugeTemporary()
1428 static inline void ClearPageHugeTemporary(struct page *page) in ClearPageHugeTemporary() argument
1430 page[2].mapping = NULL; in ClearPageHugeTemporary()
1433 static void __free_huge_page(struct page *page) in __free_huge_page() argument
1439 struct hstate *h = page_hstate(page); in __free_huge_page()
1440 int nid = page_to_nid(page); in __free_huge_page()
1442 (struct hugepage_subpool *)page_private(page); in __free_huge_page()
1445 VM_BUG_ON_PAGE(page_count(page), page); in __free_huge_page()
1446 VM_BUG_ON_PAGE(page_mapcount(page), page); in __free_huge_page()
1448 set_page_private(page, 0); in __free_huge_page()
1449 page->mapping = NULL; in __free_huge_page()
1450 restore_reserve = PagePrivate(page); in __free_huge_page()
1451 ClearPagePrivate(page); in __free_huge_page()
1473 clear_page_huge_active(page); in __free_huge_page()
1475 pages_per_huge_page(h), page); in __free_huge_page()
1477 pages_per_huge_page(h), page); in __free_huge_page()
1481 if (PageHugeTemporary(page)) { in __free_huge_page()
1482 list_del(&page->lru); in __free_huge_page()
1483 ClearPageHugeTemporary(page); in __free_huge_page()
1484 update_and_free_page(h, page); in __free_huge_page()
1487 list_del(&page->lru); in __free_huge_page()
1488 update_and_free_page(h, page); in __free_huge_page()
1492 arch_clear_hugepage_flags(page); in __free_huge_page()
1493 enqueue_huge_page(h, page); in __free_huge_page()
1513 struct page *page; in free_hpage_workfn() local
1518 page = container_of((struct address_space **)node, in free_hpage_workfn()
1519 struct page, mapping); in free_hpage_workfn()
1521 __free_huge_page(page); in free_hpage_workfn()
1526 void free_huge_page(struct page *page) in free_huge_page() argument
1537 if (llist_add((struct llist_node *)&page->mapping, in free_huge_page()
1543 __free_huge_page(page); in free_huge_page()
1546 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid) in prep_new_huge_page() argument
1548 INIT_LIST_HEAD(&page->lru); in prep_new_huge_page()
1549 set_compound_page_dtor(page, HUGETLB_PAGE_DTOR); in prep_new_huge_page()
1550 set_hugetlb_cgroup(page, NULL); in prep_new_huge_page()
1551 set_hugetlb_cgroup_rsvd(page, NULL); in prep_new_huge_page()
1555 ClearPageHugeFreed(page); in prep_new_huge_page()
1559 static void prep_compound_gigantic_page(struct page *page, unsigned int order) in prep_compound_gigantic_page() argument
1563 struct page *p = page + 1; in prep_compound_gigantic_page()
1566 set_compound_order(page, order); in prep_compound_gigantic_page()
1567 __ClearPageReserved(page); in prep_compound_gigantic_page()
1568 __SetPageHead(page); in prep_compound_gigantic_page()
1569 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { in prep_compound_gigantic_page()
1584 set_compound_head(p, page); in prep_compound_gigantic_page()
1586 atomic_set(compound_mapcount_ptr(page), -1); in prep_compound_gigantic_page()
1587 atomic_set(compound_pincount_ptr(page), 0); in prep_compound_gigantic_page()
1595 int PageHuge(struct page *page) in PageHuge() argument
1597 if (!PageCompound(page)) in PageHuge()
1600 page = compound_head(page); in PageHuge()
1601 return page[1].compound_dtor == HUGETLB_PAGE_DTOR; in PageHuge()
1609 int PageHeadHuge(struct page *page_head) in PageHeadHuge()
1624 struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage) in hugetlb_page_mapping_lock_write()
1637 pgoff_t hugetlb_basepage_index(struct page *page) in hugetlb_basepage_index() argument
1639 struct page *page_head = compound_head(page); in hugetlb_basepage_index()
1644 compound_idx = page_to_pfn(page) - page_to_pfn(page_head); in hugetlb_basepage_index()
1646 compound_idx = page - page_head; in hugetlb_basepage_index()
1651 static struct page *alloc_buddy_huge_page(struct hstate *h, in alloc_buddy_huge_page()
1656 struct page *page; in alloc_buddy_huge_page() local
1673 page = __alloc_pages_nodemask(gfp_mask, order, nid, nmask); in alloc_buddy_huge_page()
1674 if (page) in alloc_buddy_huge_page()
1684 if (node_alloc_noretry && page && !alloc_try_hard) in alloc_buddy_huge_page()
1692 if (node_alloc_noretry && !page && alloc_try_hard) in alloc_buddy_huge_page()
1695 return page; in alloc_buddy_huge_page()
1702 static struct page *alloc_fresh_huge_page(struct hstate *h, in alloc_fresh_huge_page()
1706 struct page *page; in alloc_fresh_huge_page() local
1709 page = alloc_gigantic_page(h, gfp_mask, nid, nmask); in alloc_fresh_huge_page()
1711 page = alloc_buddy_huge_page(h, gfp_mask, in alloc_fresh_huge_page()
1713 if (!page) in alloc_fresh_huge_page()
1717 prep_compound_gigantic_page(page, huge_page_order(h)); in alloc_fresh_huge_page()
1718 prep_new_huge_page(h, page, page_to_nid(page)); in alloc_fresh_huge_page()
1720 return page; in alloc_fresh_huge_page()
1730 struct page *page; in alloc_pool_huge_page() local
1735 page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed, in alloc_pool_huge_page()
1737 if (page) in alloc_pool_huge_page()
1741 if (!page) in alloc_pool_huge_page()
1744 put_page(page); /* free it into the hugepage allocator */ in alloc_pool_huge_page()
1768 struct page *page = in free_pool_huge_page() local
1770 struct page, lru); in free_pool_huge_page()
1771 list_del(&page->lru); in free_pool_huge_page()
1778 update_and_free_page(h, page); in free_pool_huge_page()
1797 int dissolve_free_huge_page(struct page *page) in dissolve_free_huge_page() argument
1803 if (!PageHuge(page)) in dissolve_free_huge_page()
1807 if (!PageHuge(page)) { in dissolve_free_huge_page()
1812 if (!page_count(page)) { in dissolve_free_huge_page()
1813 struct page *head = compound_head(page); in dissolve_free_huge_page()
1842 if (PageHWPoison(head) && page != head) { in dissolve_free_huge_page()
1843 SetPageHWPoison(page); in dissolve_free_huge_page()
1869 struct page *page; in dissolve_free_huge_pages() local
1876 page = pfn_to_page(pfn); in dissolve_free_huge_pages()
1877 rc = dissolve_free_huge_page(page); in dissolve_free_huge_pages()
1888 static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask, in alloc_surplus_huge_page()
1891 struct page *page = NULL; in alloc_surplus_huge_page() local
1901 page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL); in alloc_surplus_huge_page()
1902 if (!page) in alloc_surplus_huge_page()
1914 SetPageHugeTemporary(page); in alloc_surplus_huge_page()
1916 put_page(page); in alloc_surplus_huge_page()
1920 h->surplus_huge_pages_node[page_to_nid(page)]++; in alloc_surplus_huge_page()
1926 return page; in alloc_surplus_huge_page()
1929 static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask, in alloc_migrate_huge_page()
1932 struct page *page; in alloc_migrate_huge_page() local
1937 page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL); in alloc_migrate_huge_page()
1938 if (!page) in alloc_migrate_huge_page()
1945 SetPageHugeTemporary(page); in alloc_migrate_huge_page()
1947 return page; in alloc_migrate_huge_page()
1954 struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h, in alloc_buddy_huge_page_with_mpol()
1957 struct page *page; in alloc_buddy_huge_page_with_mpol() local
1964 page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask); in alloc_buddy_huge_page_with_mpol()
1967 return page; in alloc_buddy_huge_page_with_mpol()
1971 struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, in alloc_huge_page_nodemask()
1976 struct page *page; in alloc_huge_page_nodemask() local
1978 page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask); in alloc_huge_page_nodemask()
1979 if (page) { in alloc_huge_page_nodemask()
1981 return page; in alloc_huge_page_nodemask()
1990 struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, in alloc_huge_page_vma()
1995 struct page *page; in alloc_huge_page_vma() local
2001 page = alloc_huge_page_nodemask(h, node, nodemask, gfp_mask); in alloc_huge_page_vma()
2004 return page; in alloc_huge_page_vma()
2015 struct page *page, *tmp; in gather_surplus_pages() local
2033 page = alloc_surplus_huge_page(h, htlb_alloc_mask(h), in gather_surplus_pages()
2035 if (!page) { in gather_surplus_pages()
2039 list_add(&page->lru, &surplus_list); in gather_surplus_pages()
2074 list_for_each_entry_safe(page, tmp, &surplus_list, lru) { in gather_surplus_pages()
2081 put_page_testzero(page); in gather_surplus_pages()
2082 VM_BUG_ON_PAGE(page_count(page), page); in gather_surplus_pages()
2083 enqueue_huge_page(h, page); in gather_surplus_pages()
2089 list_for_each_entry_safe(page, tmp, &surplus_list, lru) in gather_surplus_pages()
2090 put_page(page); in gather_surplus_pages()
2289 struct page *page) in restore_reserve_on_error() argument
2291 if (unlikely(PagePrivate(page))) { in restore_reserve_on_error()
2306 ClearPagePrivate(page); in restore_reserve_on_error()
2314 ClearPagePrivate(page); in restore_reserve_on_error()
2320 struct page *alloc_huge_page(struct vm_area_struct *vma, in alloc_huge_page()
2325 struct page *page; in alloc_huge_page() local
2388 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg); in alloc_huge_page()
2389 if (!page) { in alloc_huge_page()
2391 page = alloc_buddy_huge_page_with_mpol(h, vma, addr); in alloc_huge_page()
2392 if (!page) in alloc_huge_page()
2396 SetPagePrivate(page); in alloc_huge_page()
2399 list_add(&page->lru, &h->hugepage_activelist); in alloc_huge_page()
2402 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page); in alloc_huge_page()
2408 h_cg, page); in alloc_huge_page()
2413 set_page_private(page, (unsigned long)spool); in alloc_huge_page()
2432 pages_per_huge_page(h), page); in alloc_huge_page()
2434 return page; in alloc_huge_page()
2492 struct page *page = virt_to_page(m); in gather_bootmem_prealloc() local
2496 WARN_ON(page_count(page) != 1); in gather_bootmem_prealloc()
2497 prep_compound_gigantic_page(page, huge_page_order(h)); in gather_bootmem_prealloc()
2498 WARN_ON(PageReserved(page)); in gather_bootmem_prealloc()
2499 prep_new_huge_page(h, page, page_to_nid(page)); in gather_bootmem_prealloc()
2500 put_page(page); /* free it into the hugepage allocator */ in gather_bootmem_prealloc()
2507 adjust_managed_page_count(page, pages_per_huge_page(h)); in gather_bootmem_prealloc()
2599 struct page *page, *next; in try_to_free_low() local
2601 list_for_each_entry_safe(page, next, freel, lru) { in try_to_free_low()
2604 if (PageHighMem(page)) in try_to_free_low()
2606 list_del(&page->lru); in try_to_free_low()
2607 update_and_free_page(h, page); in try_to_free_low()
2609 h->free_huge_pages_node[page_to_nid(page)]--; in try_to_free_low()
3760 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, in make_huge_pte() argument
3766 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page, in make_huge_pte()
3769 entry = huge_pte_wrprotect(mk_huge_pte(page, in make_huge_pte()
3774 entry = arch_make_huge_pte(entry, vma, page, writable); in make_huge_pte()
3819 struct page *ptepage; in copy_hugetlb_page_range()
3928 struct page *ref_page) in __unmap_hugepage_range()
3935 struct page *page; in __unmap_hugepage_range() local
3989 page = pte_page(pte); in __unmap_hugepage_range()
3996 if (page != ref_page) { in __unmap_hugepage_range()
4011 set_page_dirty(page); in __unmap_hugepage_range()
4014 page_remove_rmap(page, true); in __unmap_hugepage_range()
4017 tlb_remove_page_size(tlb, page, huge_page_size(h)); in __unmap_hugepage_range()
4046 unsigned long end, struct page *ref_page) in __unmap_hugepage_range_final()
4064 unsigned long end, struct page *ref_page) in unmap_hugepage_range()
4094 struct page *page, unsigned long address) in unmap_ref_private() argument
4138 address + huge_page_size(h), page); in unmap_ref_private()
4151 struct page *pagecache_page, spinlock_t *ptl) in hugetlb_cow()
4155 struct page *old_page, *new_page; in hugetlb_cow()
4294 static struct page *hugetlbfs_pagecache_page(struct hstate *h, in hugetlbfs_pagecache_page()
4315 struct page *page; in hugetlbfs_pagecache_present() local
4320 page = find_get_page(mapping, idx); in hugetlbfs_pagecache_present()
4321 if (page) in hugetlbfs_pagecache_present()
4322 put_page(page); in hugetlbfs_pagecache_present()
4323 return page != NULL; in hugetlbfs_pagecache_present()
4326 int huge_add_to_page_cache(struct page *page, struct address_space *mapping, in huge_add_to_page_cache() argument
4331 int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL); in huge_add_to_page_cache()
4335 ClearPagePrivate(page); in huge_add_to_page_cache()
4341 set_page_dirty(page); in huge_add_to_page_cache()
4390 struct page *page; in hugetlb_no_page() local
4418 page = find_lock_page(mapping, idx); in hugetlb_no_page()
4419 if (!page) { in hugetlb_no_page()
4428 page = alloc_huge_page(vma, haddr, 0); in hugetlb_no_page()
4429 if (IS_ERR(page)) { in hugetlb_no_page()
4449 ret = vmf_error(PTR_ERR(page)); in hugetlb_no_page()
4452 clear_huge_page(page, address, pages_per_huge_page(h)); in hugetlb_no_page()
4453 __SetPageUptodate(page); in hugetlb_no_page()
4457 int err = huge_add_to_page_cache(page, mapping, idx); in hugetlb_no_page()
4459 put_page(page); in hugetlb_no_page()
4465 lock_page(page); in hugetlb_no_page()
4478 if (unlikely(PageHWPoison(page))) { in hugetlb_no_page()
4486 unlock_page(page); in hugetlb_no_page()
4487 put_page(page); in hugetlb_no_page()
4516 ClearPagePrivate(page); in hugetlb_no_page()
4517 hugepage_add_new_anon_rmap(page, vma, haddr); in hugetlb_no_page()
4519 page_dup_rmap(page, true); in hugetlb_no_page()
4520 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) in hugetlb_no_page()
4527 ret = hugetlb_cow(mm, vma, address, ptep, page, ptl); in hugetlb_no_page()
4538 set_page_huge_active(page); in hugetlb_no_page()
4540 unlock_page(page); in hugetlb_no_page()
4549 unlock_page(page); in hugetlb_no_page()
4550 restore_reserve_on_error(h, vma, haddr, page); in hugetlb_no_page()
4551 put_page(page); in hugetlb_no_page()
4587 struct page *page = NULL; in hugetlb_fault() local
4588 struct page *pagecache_page = NULL; in hugetlb_fault()
4690 page = pte_page(entry); in hugetlb_fault()
4691 if (page != pagecache_page) in hugetlb_fault()
4692 if (!trylock_page(page)) { in hugetlb_fault()
4697 get_page(page); in hugetlb_fault()
4712 if (page != pagecache_page) in hugetlb_fault()
4713 unlock_page(page); in hugetlb_fault()
4714 put_page(page); in hugetlb_fault()
4733 wait_on_page_locked(page); in hugetlb_fault()
4748 struct page **pagep) in hugetlb_mcopy_atomic_pte()
4759 struct page *page; in hugetlb_mcopy_atomic_pte() local
4767 page = find_lock_page(mapping, idx); in hugetlb_mcopy_atomic_pte()
4768 if (!page) in hugetlb_mcopy_atomic_pte()
4780 page = alloc_huge_page(dst_vma, dst_addr, 0); in hugetlb_mcopy_atomic_pte()
4781 if (IS_ERR(page)) { in hugetlb_mcopy_atomic_pte()
4786 ret = copy_huge_page_from_user(page, in hugetlb_mcopy_atomic_pte()
4793 *pagep = page; in hugetlb_mcopy_atomic_pte()
4798 page = *pagep; in hugetlb_mcopy_atomic_pte()
4807 __SetPageUptodate(page); in hugetlb_mcopy_atomic_pte()
4822 ret = huge_add_to_page_cache(page, mapping, idx); in hugetlb_mcopy_atomic_pte()
4849 page_dup_rmap(page, true); in hugetlb_mcopy_atomic_pte()
4851 ClearPagePrivate(page); in hugetlb_mcopy_atomic_pte()
4852 hugepage_add_new_anon_rmap(page, dst_vma, dst_addr); in hugetlb_mcopy_atomic_pte()
4861 _dst_pte = make_huge_pte(dst_vma, page, writable); in hugetlb_mcopy_atomic_pte()
4877 set_page_huge_active(page); in hugetlb_mcopy_atomic_pte()
4879 unlock_page(page); in hugetlb_mcopy_atomic_pte()
4886 unlock_page(page); in hugetlb_mcopy_atomic_pte()
4888 put_page(page); in hugetlb_mcopy_atomic_pte()
4894 struct page **pages, struct vm_area_struct **vmas, in follow_hugetlb_page()
4908 struct page *page; in follow_hugetlb_page() local
5006 page = pte_page(huge_ptep_get(pte)); in follow_hugetlb_page()
5024 pages[i] = mem_map_offset(page, pfn_offset); in follow_hugetlb_page()
5626 struct page * __weak
5633 struct page * __weak
5641 struct page * __weak
5646 struct page *page = NULL; in follow_huge_pmd_pte() local
5663 page = pte_page(pte) + in follow_huge_pmd_pte()
5673 if (WARN_ON_ONCE(!try_grab_page(page, flags))) { in follow_huge_pmd_pte()
5674 page = NULL; in follow_huge_pmd_pte()
5690 return page; in follow_huge_pmd_pte()
5693 struct page * __weak
5703 struct page * __weak
5712 int isolate_hugetlb(struct page *page, struct list_head *list) in isolate_hugetlb() argument
5717 if (!PageHeadHuge(page) || !page_huge_active(page) || in isolate_hugetlb()
5718 !get_page_unless_zero(page)) { in isolate_hugetlb()
5722 clear_page_huge_active(page); in isolate_hugetlb()
5723 list_move_tail(&page->lru, list); in isolate_hugetlb()
5729 void putback_active_hugepage(struct page *page) in putback_active_hugepage() argument
5731 VM_BUG_ON_PAGE(!PageHead(page), page); in putback_active_hugepage()
5733 set_page_huge_active(page); in putback_active_hugepage()
5734 list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist); in putback_active_hugepage()
5736 put_page(page); in putback_active_hugepage()
5739 void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason) in move_hugetlb_state()