• Home
  • Raw
  • Download

Lines Matching refs:page

866 static void enqueue_huge_page(struct hstate *h, struct page *page)  in enqueue_huge_page()  argument
868 int nid = page_to_nid(page); in enqueue_huge_page()
869 list_move(&page->lru, &h->hugepage_freelists[nid]); in enqueue_huge_page()
874 static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid) in dequeue_huge_page_node_exact()
876 struct page *page; in dequeue_huge_page_node_exact() local
878 list_for_each_entry(page, &h->hugepage_freelists[nid], lru) in dequeue_huge_page_node_exact()
879 if (!PageHWPoison(page)) in dequeue_huge_page_node_exact()
885 if (&h->hugepage_freelists[nid] == &page->lru) in dequeue_huge_page_node_exact()
887 list_move(&page->lru, &h->hugepage_activelist); in dequeue_huge_page_node_exact()
888 set_page_refcounted(page); in dequeue_huge_page_node_exact()
891 return page; in dequeue_huge_page_node_exact()
894 static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid, in dequeue_huge_page_nodemask()
908 struct page *page; in dequeue_huge_page_nodemask() local
920 page = dequeue_huge_page_node_exact(h, node); in dequeue_huge_page_nodemask()
921 if (page) in dequeue_huge_page_nodemask()
922 return page; in dequeue_huge_page_nodemask()
939 static struct page *dequeue_huge_page_vma(struct hstate *h, in dequeue_huge_page_vma()
944 struct page *page; in dequeue_huge_page_vma() local
965 page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask); in dequeue_huge_page_vma()
966 if (page && !avoid_reserve && vma_has_reserves(vma, chg)) { in dequeue_huge_page_vma()
967 SetPagePrivate(page); in dequeue_huge_page_vma()
972 return page; in dequeue_huge_page_vma()
1050 static void destroy_compound_gigantic_page(struct page *page, in destroy_compound_gigantic_page() argument
1055 struct page *p = page + 1; in destroy_compound_gigantic_page()
1057 atomic_set(compound_mapcount_ptr(page), 0); in destroy_compound_gigantic_page()
1058 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { in destroy_compound_gigantic_page()
1063 set_compound_order(page, 0); in destroy_compound_gigantic_page()
1064 __ClearPageHead(page); in destroy_compound_gigantic_page()
1067 static void free_gigantic_page(struct page *page, unsigned int order) in free_gigantic_page() argument
1069 free_contig_range(page_to_pfn(page), 1 << order); in free_gigantic_page()
1085 struct page *page; in pfn_range_valid_gigantic() local
1088 page = pfn_to_online_page(i); in pfn_range_valid_gigantic()
1089 if (!page) in pfn_range_valid_gigantic()
1092 if (page_zone(page) != z) in pfn_range_valid_gigantic()
1095 if (PageReserved(page)) in pfn_range_valid_gigantic()
1098 if (page_count(page) > 0) in pfn_range_valid_gigantic()
1101 if (PageHuge(page)) in pfn_range_valid_gigantic()
1115 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, in alloc_gigantic_page()
1154 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
1155 static void prep_compound_gigantic_page(struct page *page, unsigned int order);
1157 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, in alloc_gigantic_page()
1165 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, in alloc_gigantic_page()
1170 static inline void free_gigantic_page(struct page *page, unsigned int order) { } in free_gigantic_page() argument
1171 static inline void destroy_compound_gigantic_page(struct page *page, in destroy_compound_gigantic_page() argument
1175 static void update_and_free_page(struct hstate *h, struct page *page) in update_and_free_page() argument
1183 h->nr_huge_pages_node[page_to_nid(page)]--; in update_and_free_page()
1185 page[i].flags &= ~(1 << PG_locked | 1 << PG_error | in update_and_free_page()
1190 VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page); in update_and_free_page()
1191 set_compound_page_dtor(page, NULL_COMPOUND_DTOR); in update_and_free_page()
1192 set_page_refcounted(page); in update_and_free_page()
1194 destroy_compound_gigantic_page(page, huge_page_order(h)); in update_and_free_page()
1195 free_gigantic_page(page, huge_page_order(h)); in update_and_free_page()
1197 __free_pages(page, huge_page_order(h)); in update_and_free_page()
1218 bool page_huge_active(struct page *page) in page_huge_active() argument
1220 VM_BUG_ON_PAGE(!PageHuge(page), page); in page_huge_active()
1221 return PageHead(page) && PagePrivate(&page[1]); in page_huge_active()
1225 static void set_page_huge_active(struct page *page) in set_page_huge_active() argument
1227 VM_BUG_ON_PAGE(!PageHeadHuge(page), page); in set_page_huge_active()
1228 SetPagePrivate(&page[1]); in set_page_huge_active()
1231 static void clear_page_huge_active(struct page *page) in clear_page_huge_active() argument
1233 VM_BUG_ON_PAGE(!PageHeadHuge(page), page); in clear_page_huge_active()
1234 ClearPagePrivate(&page[1]); in clear_page_huge_active()
1241 static inline bool PageHugeTemporary(struct page *page) in PageHugeTemporary() argument
1243 if (!PageHuge(page)) in PageHugeTemporary()
1246 return (unsigned long)page[2].mapping == -1U; in PageHugeTemporary()
1249 static inline void SetPageHugeTemporary(struct page *page) in SetPageHugeTemporary() argument
1251 page[2].mapping = (void *)-1U; in SetPageHugeTemporary()
1254 static inline void ClearPageHugeTemporary(struct page *page) in ClearPageHugeTemporary() argument
1256 page[2].mapping = NULL; in ClearPageHugeTemporary()
1259 static void __free_huge_page(struct page *page) in __free_huge_page() argument
1265 struct hstate *h = page_hstate(page); in __free_huge_page()
1266 int nid = page_to_nid(page); in __free_huge_page()
1268 (struct hugepage_subpool *)page_private(page); in __free_huge_page()
1271 VM_BUG_ON_PAGE(page_count(page), page); in __free_huge_page()
1272 VM_BUG_ON_PAGE(page_mapcount(page), page); in __free_huge_page()
1274 set_page_private(page, 0); in __free_huge_page()
1275 page->mapping = NULL; in __free_huge_page()
1276 restore_reserve = PagePrivate(page); in __free_huge_page()
1277 ClearPagePrivate(page); in __free_huge_page()
1299 clear_page_huge_active(page); in __free_huge_page()
1301 pages_per_huge_page(h), page); in __free_huge_page()
1305 if (PageHugeTemporary(page)) { in __free_huge_page()
1306 list_del(&page->lru); in __free_huge_page()
1307 ClearPageHugeTemporary(page); in __free_huge_page()
1308 update_and_free_page(h, page); in __free_huge_page()
1311 list_del(&page->lru); in __free_huge_page()
1312 update_and_free_page(h, page); in __free_huge_page()
1316 arch_clear_hugepage_flags(page); in __free_huge_page()
1317 enqueue_huge_page(h, page); in __free_huge_page()
1337 struct page *page; in free_hpage_workfn() local
1342 page = container_of((struct address_space **)node, in free_hpage_workfn()
1343 struct page, mapping); in free_hpage_workfn()
1345 __free_huge_page(page); in free_hpage_workfn()
1350 void free_huge_page(struct page *page) in free_huge_page() argument
1361 if (llist_add((struct llist_node *)&page->mapping, in free_huge_page()
1367 __free_huge_page(page); in free_huge_page()
1370 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid) in prep_new_huge_page() argument
1372 INIT_LIST_HEAD(&page->lru); in prep_new_huge_page()
1373 set_compound_page_dtor(page, HUGETLB_PAGE_DTOR); in prep_new_huge_page()
1375 set_hugetlb_cgroup(page, NULL); in prep_new_huge_page()
1381 static void prep_compound_gigantic_page(struct page *page, unsigned int order) in prep_compound_gigantic_page() argument
1385 struct page *p = page + 1; in prep_compound_gigantic_page()
1388 set_compound_order(page, order); in prep_compound_gigantic_page()
1389 __ClearPageReserved(page); in prep_compound_gigantic_page()
1390 __SetPageHead(page); in prep_compound_gigantic_page()
1391 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { in prep_compound_gigantic_page()
1406 set_compound_head(p, page); in prep_compound_gigantic_page()
1408 atomic_set(compound_mapcount_ptr(page), -1); in prep_compound_gigantic_page()
1416 int PageHuge(struct page *page) in PageHuge() argument
1418 if (!PageCompound(page)) in PageHuge()
1421 page = compound_head(page); in PageHuge()
1422 return page[1].compound_dtor == HUGETLB_PAGE_DTOR; in PageHuge()
1430 int PageHeadHuge(struct page *page_head) in PageHeadHuge()
1438 pgoff_t __basepage_index(struct page *page) in __basepage_index() argument
1440 struct page *page_head = compound_head(page); in __basepage_index()
1445 return page_index(page); in __basepage_index()
1448 compound_idx = page_to_pfn(page) - page_to_pfn(page_head); in __basepage_index()
1450 compound_idx = page - page_head; in __basepage_index()
1455 static struct page *alloc_buddy_huge_page(struct hstate *h, in alloc_buddy_huge_page()
1460 struct page *page; in alloc_buddy_huge_page() local
1477 page = __alloc_pages_nodemask(gfp_mask, order, nid, nmask); in alloc_buddy_huge_page()
1478 if (page) in alloc_buddy_huge_page()
1488 if (node_alloc_noretry && page && !alloc_try_hard) in alloc_buddy_huge_page()
1496 if (node_alloc_noretry && !page && alloc_try_hard) in alloc_buddy_huge_page()
1499 return page; in alloc_buddy_huge_page()
1506 static struct page *alloc_fresh_huge_page(struct hstate *h, in alloc_fresh_huge_page()
1510 struct page *page; in alloc_fresh_huge_page() local
1513 page = alloc_gigantic_page(h, gfp_mask, nid, nmask); in alloc_fresh_huge_page()
1515 page = alloc_buddy_huge_page(h, gfp_mask, in alloc_fresh_huge_page()
1517 if (!page) in alloc_fresh_huge_page()
1521 prep_compound_gigantic_page(page, huge_page_order(h)); in alloc_fresh_huge_page()
1522 prep_new_huge_page(h, page, page_to_nid(page)); in alloc_fresh_huge_page()
1524 return page; in alloc_fresh_huge_page()
1534 struct page *page; in alloc_pool_huge_page() local
1539 page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed, in alloc_pool_huge_page()
1541 if (page) in alloc_pool_huge_page()
1545 if (!page) in alloc_pool_huge_page()
1548 put_page(page); /* free it into the hugepage allocator */ in alloc_pool_huge_page()
1572 struct page *page = in free_pool_huge_page() local
1574 struct page, lru); in free_pool_huge_page()
1575 list_del(&page->lru); in free_pool_huge_page()
1582 update_and_free_page(h, page); in free_pool_huge_page()
1601 int dissolve_free_huge_page(struct page *page) in dissolve_free_huge_page() argument
1606 if (!PageHuge(page)) in dissolve_free_huge_page()
1610 if (!PageHuge(page)) { in dissolve_free_huge_page()
1615 if (!page_count(page)) { in dissolve_free_huge_page()
1616 struct page *head = compound_head(page); in dissolve_free_huge_page()
1625 if (PageHWPoison(head) && page != head) { in dissolve_free_huge_page()
1626 SetPageHWPoison(page); in dissolve_free_huge_page()
1652 struct page *page; in dissolve_free_huge_pages() local
1659 page = pfn_to_page(pfn); in dissolve_free_huge_pages()
1660 rc = dissolve_free_huge_page(page); in dissolve_free_huge_pages()
1671 static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask, in alloc_surplus_huge_page()
1674 struct page *page = NULL; in alloc_surplus_huge_page() local
1684 page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL); in alloc_surplus_huge_page()
1685 if (!page) in alloc_surplus_huge_page()
1697 SetPageHugeTemporary(page); in alloc_surplus_huge_page()
1699 put_page(page); in alloc_surplus_huge_page()
1703 h->surplus_huge_pages_node[page_to_nid(page)]++; in alloc_surplus_huge_page()
1709 return page; in alloc_surplus_huge_page()
1712 struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask, in alloc_migrate_huge_page()
1715 struct page *page; in alloc_migrate_huge_page() local
1720 page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL); in alloc_migrate_huge_page()
1721 if (!page) in alloc_migrate_huge_page()
1728 SetPageHugeTemporary(page); in alloc_migrate_huge_page()
1730 return page; in alloc_migrate_huge_page()
1737 struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h, in alloc_buddy_huge_page_with_mpol()
1740 struct page *page; in alloc_buddy_huge_page_with_mpol() local
1747 page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask); in alloc_buddy_huge_page_with_mpol()
1750 return page; in alloc_buddy_huge_page_with_mpol()
1754 struct page *alloc_huge_page_node(struct hstate *h, int nid) in alloc_huge_page_node()
1757 struct page *page = NULL; in alloc_huge_page_node() local
1764 page = dequeue_huge_page_nodemask(h, gfp_mask, nid, NULL); in alloc_huge_page_node()
1767 if (!page) in alloc_huge_page_node()
1768 page = alloc_migrate_huge_page(h, gfp_mask, nid, NULL); in alloc_huge_page_node()
1770 return page; in alloc_huge_page_node()
1774 struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, in alloc_huge_page_nodemask()
1781 struct page *page; in alloc_huge_page_nodemask() local
1783 page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask); in alloc_huge_page_nodemask()
1784 if (page) { in alloc_huge_page_nodemask()
1786 return page; in alloc_huge_page_nodemask()
1795 struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, in alloc_huge_page_vma()
1800 struct page *page; in alloc_huge_page_vma() local
1806 page = alloc_huge_page_nodemask(h, node, nodemask); in alloc_huge_page_vma()
1809 return page; in alloc_huge_page_vma()
1819 struct page *page, *tmp; in gather_surplus_pages() local
1837 page = alloc_surplus_huge_page(h, htlb_alloc_mask(h), in gather_surplus_pages()
1839 if (!page) { in gather_surplus_pages()
1843 list_add(&page->lru, &surplus_list); in gather_surplus_pages()
1878 list_for_each_entry_safe(page, tmp, &surplus_list, lru) { in gather_surplus_pages()
1885 put_page_testzero(page); in gather_surplus_pages()
1886 VM_BUG_ON_PAGE(page_count(page), page); in gather_surplus_pages()
1887 enqueue_huge_page(h, page); in gather_surplus_pages()
1893 list_for_each_entry_safe(page, tmp, &surplus_list, lru) in gather_surplus_pages()
1894 put_page(page); in gather_surplus_pages()
2083 struct page *page) in restore_reserve_on_error() argument
2085 if (unlikely(PagePrivate(page))) { in restore_reserve_on_error()
2100 ClearPagePrivate(page); in restore_reserve_on_error()
2108 ClearPagePrivate(page); in restore_reserve_on_error()
2114 struct page *alloc_huge_page(struct vm_area_struct *vma, in alloc_huge_page()
2119 struct page *page; in alloc_huge_page() local
2171 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg); in alloc_huge_page()
2172 if (!page) { in alloc_huge_page()
2174 page = alloc_buddy_huge_page_with_mpol(h, vma, addr); in alloc_huge_page()
2175 if (!page) in alloc_huge_page()
2178 SetPagePrivate(page); in alloc_huge_page()
2182 list_move(&page->lru, &h->hugepage_activelist); in alloc_huge_page()
2185 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page); in alloc_huge_page()
2188 set_page_private(page, (unsigned long)spool); in alloc_huge_page()
2206 return page; in alloc_huge_page()
2251 static void __init prep_compound_huge_page(struct page *page, in prep_compound_huge_page() argument
2255 prep_compound_gigantic_page(page, order); in prep_compound_huge_page()
2257 prep_compound_page(page, order); in prep_compound_huge_page()
2266 struct page *page = virt_to_page(m); in gather_bootmem_prealloc() local
2269 WARN_ON(page_count(page) != 1); in gather_bootmem_prealloc()
2270 prep_compound_huge_page(page, h->order); in gather_bootmem_prealloc()
2271 WARN_ON(PageReserved(page)); in gather_bootmem_prealloc()
2272 prep_new_huge_page(h, page, page_to_nid(page)); in gather_bootmem_prealloc()
2273 put_page(page); /* free it into the hugepage allocator */ in gather_bootmem_prealloc()
2282 adjust_managed_page_count(page, 1 << h->order); in gather_bootmem_prealloc()
2370 struct page *page, *next; in try_to_free_low() local
2372 list_for_each_entry_safe(page, next, freel, lru) { in try_to_free_low()
2375 if (PageHighMem(page)) in try_to_free_low()
2377 list_del(&page->lru); in try_to_free_low()
2378 update_and_free_page(h, page); in try_to_free_low()
2380 h->free_huge_pages_node[page_to_nid(page)]--; in try_to_free_low()
3364 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, in make_huge_pte() argument
3370 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page, in make_huge_pte()
3373 entry = huge_pte_wrprotect(mk_huge_pte(page, in make_huge_pte()
3378 entry = arch_make_huge_pte(entry, vma, page, writable); in make_huge_pte()
3423 struct page *ptepage; in copy_hugetlb_page_range()
3521 struct page *ref_page) in __unmap_hugepage_range()
3528 struct page *page; in __unmap_hugepage_range() local
3583 page = pte_page(pte); in __unmap_hugepage_range()
3590 if (page != ref_page) { in __unmap_hugepage_range()
3605 set_page_dirty(page); in __unmap_hugepage_range()
3608 page_remove_rmap(page, true); in __unmap_hugepage_range()
3611 tlb_remove_page_size(tlb, page, huge_page_size(h)); in __unmap_hugepage_range()
3624 unsigned long end, struct page *ref_page) in __unmap_hugepage_range_final()
3642 unsigned long end, struct page *ref_page) in unmap_hugepage_range()
3672 struct page *page, unsigned long address) in unmap_ref_private() argument
3716 address + huge_page_size(h), page); in unmap_ref_private()
3729 struct page *pagecache_page, spinlock_t *ptl) in hugetlb_cow()
3733 struct page *old_page, *new_page; in hugetlb_cow()
3852 static struct page *hugetlbfs_pagecache_page(struct hstate *h, in hugetlbfs_pagecache_page()
3873 struct page *page; in hugetlbfs_pagecache_present() local
3878 page = find_get_page(mapping, idx); in hugetlbfs_pagecache_present()
3879 if (page) in hugetlbfs_pagecache_present()
3880 put_page(page); in hugetlbfs_pagecache_present()
3881 return page != NULL; in hugetlbfs_pagecache_present()
3884 int huge_add_to_page_cache(struct page *page, struct address_space *mapping, in huge_add_to_page_cache() argument
3889 int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL); in huge_add_to_page_cache()
3893 ClearPagePrivate(page); in huge_add_to_page_cache()
3899 set_page_dirty(page); in huge_add_to_page_cache()
3916 struct page *page; in hugetlb_no_page() local
3938 page = find_lock_page(mapping, idx); in hugetlb_no_page()
3939 if (!page) { in hugetlb_no_page()
3974 page = alloc_huge_page(vma, haddr, 0); in hugetlb_no_page()
3975 if (IS_ERR(page)) { in hugetlb_no_page()
3995 ret = vmf_error(PTR_ERR(page)); in hugetlb_no_page()
3998 clear_huge_page(page, address, pages_per_huge_page(h)); in hugetlb_no_page()
3999 __SetPageUptodate(page); in hugetlb_no_page()
4003 int err = huge_add_to_page_cache(page, mapping, idx); in hugetlb_no_page()
4005 put_page(page); in hugetlb_no_page()
4011 lock_page(page); in hugetlb_no_page()
4024 if (unlikely(PageHWPoison(page))) { in hugetlb_no_page()
4056 ClearPagePrivate(page); in hugetlb_no_page()
4057 hugepage_add_new_anon_rmap(page, vma, haddr); in hugetlb_no_page()
4059 page_dup_rmap(page, true); in hugetlb_no_page()
4060 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) in hugetlb_no_page()
4067 ret = hugetlb_cow(mm, vma, address, ptep, page, ptl); in hugetlb_no_page()
4078 set_page_huge_active(page); in hugetlb_no_page()
4080 unlock_page(page); in hugetlb_no_page()
4087 unlock_page(page); in hugetlb_no_page()
4088 restore_reserve_on_error(h, vma, haddr, page); in hugetlb_no_page()
4089 put_page(page); in hugetlb_no_page()
4127 struct page *page = NULL; in hugetlb_fault() local
4128 struct page *pagecache_page = NULL; in hugetlb_fault()
4210 page = pte_page(entry); in hugetlb_fault()
4211 if (page != pagecache_page) in hugetlb_fault()
4212 if (!trylock_page(page)) { in hugetlb_fault()
4217 get_page(page); in hugetlb_fault()
4232 if (page != pagecache_page) in hugetlb_fault()
4233 unlock_page(page); in hugetlb_fault()
4234 put_page(page); in hugetlb_fault()
4252 wait_on_page_locked(page); in hugetlb_fault()
4265 struct page **pagep) in hugetlb_mcopy_atomic_pte()
4275 struct page *page; in hugetlb_mcopy_atomic_pte() local
4279 page = alloc_huge_page(dst_vma, dst_addr, 0); in hugetlb_mcopy_atomic_pte()
4280 if (IS_ERR(page)) in hugetlb_mcopy_atomic_pte()
4283 ret = copy_huge_page_from_user(page, in hugetlb_mcopy_atomic_pte()
4290 *pagep = page; in hugetlb_mcopy_atomic_pte()
4295 page = *pagep; in hugetlb_mcopy_atomic_pte()
4304 __SetPageUptodate(page); in hugetlb_mcopy_atomic_pte()
4324 ret = huge_add_to_page_cache(page, mapping, idx); in hugetlb_mcopy_atomic_pte()
4351 page_dup_rmap(page, true); in hugetlb_mcopy_atomic_pte()
4353 ClearPagePrivate(page); in hugetlb_mcopy_atomic_pte()
4354 hugepage_add_new_anon_rmap(page, dst_vma, dst_addr); in hugetlb_mcopy_atomic_pte()
4357 _dst_pte = make_huge_pte(dst_vma, page, dst_vma->vm_flags & VM_WRITE); in hugetlb_mcopy_atomic_pte()
4372 set_page_huge_active(page); in hugetlb_mcopy_atomic_pte()
4374 unlock_page(page); in hugetlb_mcopy_atomic_pte()
4381 unlock_page(page); in hugetlb_mcopy_atomic_pte()
4383 put_page(page); in hugetlb_mcopy_atomic_pte()
4388 struct page **pages, struct vm_area_struct **vmas, in follow_hugetlb_page()
4402 struct page *page; in follow_hugetlb_page() local
4497 page = pte_page(huge_ptep_get(pte)); in follow_hugetlb_page()
4503 if (unlikely(page_count(page) <= 0)) { in follow_hugetlb_page()
4513 pages[i] = mem_map_offset(page, pfn_offset); in follow_hugetlb_page()
5052 struct page * __weak
5059 struct page * __weak
5067 struct page * __weak
5071 struct page *page = NULL; in follow_huge_pmd() local
5085 page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT); in follow_huge_pmd()
5087 get_page(page); in follow_huge_pmd()
5101 return page; in follow_huge_pmd()
5104 struct page * __weak
5114 struct page * __weak
5123 bool isolate_huge_page(struct page *page, struct list_head *list) in isolate_huge_page() argument
5127 VM_BUG_ON_PAGE(!PageHead(page), page); in isolate_huge_page()
5129 if (!page_huge_active(page) || !get_page_unless_zero(page)) { in isolate_huge_page()
5133 clear_page_huge_active(page); in isolate_huge_page()
5134 list_move_tail(&page->lru, list); in isolate_huge_page()
5140 void putback_active_hugepage(struct page *page) in putback_active_hugepage() argument
5142 VM_BUG_ON_PAGE(!PageHead(page), page); in putback_active_hugepage()
5144 set_page_huge_active(page); in putback_active_hugepage()
5145 list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist); in putback_active_hugepage()
5147 put_page(page); in putback_active_hugepage()
5150 void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason) in move_hugetlb_state()