• Home
  • Raw
  • Download

Lines Matching refs:h

94 static int hugetlb_acct_memory(struct hstate *h, long delta);
130 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, in hugepage_new_subpool() argument
142 spool->hstate = h; in hugepage_new_subpool()
145 if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) { in hugepage_new_subpool()
466 struct hstate *h, in record_hugetlb_cgroup_uncharge_info() argument
473 &h_cg->rsvd_hugepage[hstate_index(h)]; in record_hugetlb_cgroup_uncharge_info()
487 resv->pages_per_hpage = pages_per_huge_page(h); in record_hugetlb_cgroup_uncharge_info()
491 VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h)); in record_hugetlb_cgroup_uncharge_info()
548 long to, struct hstate *h, struct hugetlb_cgroup *cg, in hugetlb_resv_map_add() argument
555 record_hugetlb_cgroup_uncharge_info(cg, h, map, nrg); in hugetlb_resv_map_add()
574 struct hstate *h, long *regions_needed) in add_reservation_in_range() argument
614 iter->from, h, h_cg, in add_reservation_in_range()
627 t, h, h_cg, regions_needed); in add_reservation_in_range()
706 long in_regions_needed, struct hstate *h, in region_add() argument
744 add = add_reservation_in_range(resv, f, t, h_cg, h, NULL); in region_add()
942 struct hstate *h = hstate_inode(inode); in hugetlb_fix_reserve_counts() local
944 if (!hugetlb_acct_memory(h, 1)) in hugetlb_fix_reserve_counts()
989 static pgoff_t vma_hugecache_offset(struct hstate *h, in vma_hugecache_offset() argument
992 return ((address - vma->vm_start) >> huge_page_shift(h)) + in vma_hugecache_offset()
993 (vma->vm_pgoff >> huge_page_order(h)); in vma_hugecache_offset()
1073 struct hstate *h) in resv_map_set_hugetlb_cgroup_uncharge_info() argument
1076 if (!h_cg || !h) { in resv_map_set_hugetlb_cgroup_uncharge_info()
1082 &h_cg->rsvd_hugepage[hstate_index(h)]; in resv_map_set_hugetlb_cgroup_uncharge_info()
1083 resv_map->pages_per_hpage = pages_per_huge_page(h); in resv_map_set_hugetlb_cgroup_uncharge_info()
1317 static void enqueue_hugetlb_folio(struct hstate *h, struct folio *folio) in enqueue_hugetlb_folio() argument
1324 list_move(&folio->lru, &h->hugepage_freelists[nid]); in enqueue_hugetlb_folio()
1325 h->free_huge_pages++; in enqueue_hugetlb_folio()
1326 h->free_huge_pages_node[nid]++; in enqueue_hugetlb_folio()
1330 static struct folio *dequeue_hugetlb_folio_node_exact(struct hstate *h, in dequeue_hugetlb_folio_node_exact() argument
1337 list_for_each_entry(folio, &h->hugepage_freelists[nid], lru) { in dequeue_hugetlb_folio_node_exact()
1344 list_move(&folio->lru, &h->hugepage_activelist); in dequeue_hugetlb_folio_node_exact()
1347 h->free_huge_pages--; in dequeue_hugetlb_folio_node_exact()
1348 h->free_huge_pages_node[nid]--; in dequeue_hugetlb_folio_node_exact()
1355 static struct folio *dequeue_hugetlb_folio_nodemask(struct hstate *h, gfp_t gfp_mask, in dequeue_hugetlb_folio_nodemask() argument
1381 folio = dequeue_hugetlb_folio_node_exact(h, node); in dequeue_hugetlb_folio_nodemask()
1391 static unsigned long available_huge_pages(struct hstate *h) in available_huge_pages() argument
1393 return h->free_huge_pages - h->resv_huge_pages; in available_huge_pages()
1396 static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h, in dequeue_hugetlb_folio_vma() argument
1412 if (!vma_has_reserves(vma, chg) && !available_huge_pages(h)) in dequeue_hugetlb_folio_vma()
1416 if (avoid_reserve && !available_huge_pages(h)) in dequeue_hugetlb_folio_vma()
1419 gfp_mask = htlb_alloc_mask(h); in dequeue_hugetlb_folio_vma()
1423 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, in dequeue_hugetlb_folio_vma()
1431 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, in dequeue_hugetlb_folio_vma()
1436 h->resv_huge_pages--; in dequeue_hugetlb_folio_vma()
1474 static int hstate_next_node_to_alloc(struct hstate *h, in hstate_next_node_to_alloc() argument
1481 nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed); in hstate_next_node_to_alloc()
1482 h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed); in hstate_next_node_to_alloc()
1493 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed) in hstate_next_node_to_free() argument
1499 nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed); in hstate_next_node_to_free()
1500 h->next_nid_to_free = next_node_allowed(nid, nodes_allowed); in hstate_next_node_to_free()
1571 static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, in alloc_gigantic_folio() argument
1575 unsigned long nr_pages = pages_per_huge_page(h); in alloc_gigantic_folio()
1585 huge_page_order(h), true); in alloc_gigantic_folio()
1596 huge_page_order(h), true); in alloc_gigantic_folio()
1609 static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, in alloc_gigantic_folio() argument
1617 static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, in alloc_gigantic_folio() argument
1628 static inline void __clear_hugetlb_destructor(struct hstate *h, in __clear_hugetlb_destructor() argument
1646 static void __remove_hugetlb_folio(struct hstate *h, struct folio *folio, in __remove_hugetlb_folio() argument
1656 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) in __remove_hugetlb_folio()
1662 h->free_huge_pages--; in __remove_hugetlb_folio()
1663 h->free_huge_pages_node[nid]--; in __remove_hugetlb_folio()
1666 h->surplus_huge_pages--; in __remove_hugetlb_folio()
1667 h->surplus_huge_pages_node[nid]--; in __remove_hugetlb_folio()
1676 __clear_hugetlb_destructor(h, folio); in __remove_hugetlb_folio()
1685 h->nr_huge_pages--; in __remove_hugetlb_folio()
1686 h->nr_huge_pages_node[nid]--; in __remove_hugetlb_folio()
1689 static void remove_hugetlb_folio(struct hstate *h, struct folio *folio, in remove_hugetlb_folio() argument
1692 __remove_hugetlb_folio(h, folio, adjust_surplus, false); in remove_hugetlb_folio()
1695 static void remove_hugetlb_folio_for_demote(struct hstate *h, struct folio *folio, in remove_hugetlb_folio_for_demote() argument
1698 __remove_hugetlb_folio(h, folio, adjust_surplus, true); in remove_hugetlb_folio_for_demote()
1701 static void add_hugetlb_folio(struct hstate *h, struct folio *folio, in add_hugetlb_folio() argument
1712 h->nr_huge_pages++; in add_hugetlb_folio()
1713 h->nr_huge_pages_node[nid]++; in add_hugetlb_folio()
1716 h->surplus_huge_pages++; in add_hugetlb_folio()
1717 h->surplus_huge_pages_node[nid]++; in add_hugetlb_folio()
1744 enqueue_hugetlb_folio(h, folio); in add_hugetlb_folio()
1747 static void __update_and_free_hugetlb_folio(struct hstate *h, in __update_and_free_hugetlb_folio() argument
1750 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) in __update_and_free_hugetlb_folio()
1760 if (hugetlb_vmemmap_restore(h, &folio->page)) { in __update_and_free_hugetlb_folio()
1767 add_hugetlb_folio(h, folio, true); in __update_and_free_hugetlb_folio()
1778 __clear_hugetlb_destructor(h, folio); in __update_and_free_hugetlb_folio()
1793 if (hstate_is_gigantic(h) || in __update_and_free_hugetlb_folio()
1794 hugetlb_cma_folio(folio, huge_page_order(h))) { in __update_and_free_hugetlb_folio()
1795 destroy_compound_gigantic_folio(folio, huge_page_order(h)); in __update_and_free_hugetlb_folio()
1796 free_gigantic_folio(folio, huge_page_order(h)); in __update_and_free_hugetlb_folio()
1798 __free_pages(&folio->page, huge_page_order(h)); in __update_and_free_hugetlb_folio()
1823 struct hstate *h; in free_hpage_workfn() local
1835 h = size_to_hstate(page_size(page)); in free_hpage_workfn()
1837 __update_and_free_hugetlb_folio(h, page_folio(page)); in free_hpage_workfn()
1844 static inline void flush_free_hpage_work(struct hstate *h) in flush_free_hpage_work() argument
1846 if (hugetlb_vmemmap_optimizable(h)) in flush_free_hpage_work()
1850 static void update_and_free_hugetlb_folio(struct hstate *h, struct folio *folio, in update_and_free_hugetlb_folio() argument
1854 __update_and_free_hugetlb_folio(h, folio); in update_and_free_hugetlb_folio()
1869 static void update_and_free_pages_bulk(struct hstate *h, struct list_head *list) in update_and_free_pages_bulk() argument
1876 update_and_free_hugetlb_folio(h, folio, false); in update_and_free_pages_bulk()
1883 struct hstate *h; in size_to_hstate() local
1885 for_each_hstate(h) { in size_to_hstate()
1886 if (huge_page_size(h) == size) in size_to_hstate()
1887 return h; in size_to_hstate()
1898 struct hstate *h = folio_hstate(folio); in free_huge_folio() local
1935 hugetlb_cgroup_uncharge_folio(hstate_index(h), in free_huge_folio()
1936 pages_per_huge_page(h), folio); in free_huge_folio()
1937 hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h), in free_huge_folio()
1938 pages_per_huge_page(h), folio); in free_huge_folio()
1940 h->resv_huge_pages++; in free_huge_folio()
1943 remove_hugetlb_folio(h, folio, false); in free_huge_folio()
1945 update_and_free_hugetlb_folio(h, folio, true); in free_huge_folio()
1946 } else if (h->surplus_huge_pages_node[nid]) { in free_huge_folio()
1948 remove_hugetlb_folio(h, folio, true); in free_huge_folio()
1950 update_and_free_hugetlb_folio(h, folio, true); in free_huge_folio()
1953 enqueue_hugetlb_folio(h, folio); in free_huge_folio()
1961 static void __prep_account_new_huge_page(struct hstate *h, int nid) in __prep_account_new_huge_page() argument
1964 h->nr_huge_pages++; in __prep_account_new_huge_page()
1965 h->nr_huge_pages_node[nid]++; in __prep_account_new_huge_page()
1968 static void __prep_new_hugetlb_folio(struct hstate *h, struct folio *folio) in __prep_new_hugetlb_folio() argument
1970 hugetlb_vmemmap_optimize(h, &folio->page); in __prep_new_hugetlb_folio()
1978 static void prep_new_hugetlb_folio(struct hstate *h, struct folio *folio, int nid) in prep_new_hugetlb_folio() argument
1980 __prep_new_hugetlb_folio(h, folio); in prep_new_hugetlb_folio()
1982 __prep_account_new_huge_page(h, nid); in prep_new_hugetlb_folio()
2109 static struct folio *alloc_buddy_hugetlb_folio(struct hstate *h, in alloc_buddy_hugetlb_folio() argument
2113 int order = huge_page_order(h); in alloc_buddy_hugetlb_folio()
2179 static struct folio *alloc_fresh_hugetlb_folio(struct hstate *h, in alloc_fresh_hugetlb_folio() argument
2187 if (hstate_is_gigantic(h)) in alloc_fresh_hugetlb_folio()
2188 folio = alloc_gigantic_folio(h, gfp_mask, nid, nmask); in alloc_fresh_hugetlb_folio()
2190 folio = alloc_buddy_hugetlb_folio(h, gfp_mask, in alloc_fresh_hugetlb_folio()
2194 if (hstate_is_gigantic(h)) { in alloc_fresh_hugetlb_folio()
2195 if (!prep_compound_gigantic_folio(folio, huge_page_order(h))) { in alloc_fresh_hugetlb_folio()
2200 free_gigantic_folio(folio, huge_page_order(h)); in alloc_fresh_hugetlb_folio()
2208 prep_new_hugetlb_folio(h, folio, folio_nid(folio)); in alloc_fresh_hugetlb_folio()
2217 static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed, in alloc_pool_huge_page() argument
2222 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; in alloc_pool_huge_page()
2224 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { in alloc_pool_huge_page()
2225 folio = alloc_fresh_hugetlb_folio(h, gfp_mask, node, in alloc_pool_huge_page()
2243 static struct page *remove_pool_huge_page(struct hstate *h, in remove_pool_huge_page() argument
2252 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { in remove_pool_huge_page()
2257 if ((!acct_surplus || h->surplus_huge_pages_node[node]) && in remove_pool_huge_page()
2258 !list_empty(&h->hugepage_freelists[node])) { in remove_pool_huge_page()
2259 page = list_entry(h->hugepage_freelists[node].next, in remove_pool_huge_page()
2262 remove_hugetlb_folio(h, folio, acct_surplus); in remove_pool_huge_page()
2301 struct hstate *h = folio_hstate(folio); in dissolve_free_huge_page() local
2302 if (!available_huge_pages(h)) in dissolve_free_huge_page()
2324 remove_hugetlb_folio(h, folio, false); in dissolve_free_huge_page()
2325 h->max_huge_pages--; in dissolve_free_huge_page()
2336 rc = hugetlb_vmemmap_restore(h, &folio->page); in dissolve_free_huge_page()
2338 update_and_free_hugetlb_folio(h, folio, false); in dissolve_free_huge_page()
2341 add_hugetlb_folio(h, folio, false); in dissolve_free_huge_page()
2342 h->max_huge_pages++; in dissolve_free_huge_page()
2367 struct hstate *h; in dissolve_free_huge_pages() local
2373 for_each_hstate(h) in dissolve_free_huge_pages()
2374 order = min(order, huge_page_order(h)); in dissolve_free_huge_pages()
2389 static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h, in alloc_surplus_hugetlb_folio() argument
2394 if (hstate_is_gigantic(h)) in alloc_surplus_hugetlb_folio()
2398 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) in alloc_surplus_hugetlb_folio()
2402 folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask, NULL); in alloc_surplus_hugetlb_folio()
2414 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) { in alloc_surplus_hugetlb_folio()
2421 h->surplus_huge_pages++; in alloc_surplus_hugetlb_folio()
2422 h->surplus_huge_pages_node[folio_nid(folio)]++; in alloc_surplus_hugetlb_folio()
2430 static struct folio *alloc_migrate_hugetlb_folio(struct hstate *h, gfp_t gfp_mask, in alloc_migrate_hugetlb_folio() argument
2435 if (hstate_is_gigantic(h)) in alloc_migrate_hugetlb_folio()
2438 folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask, NULL); in alloc_migrate_hugetlb_folio()
2457 struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h, in alloc_buddy_hugetlb_folio_with_mpol() argument
2462 gfp_t gfp_mask = htlb_alloc_mask(h); in alloc_buddy_hugetlb_folio_with_mpol()
2471 folio = alloc_surplus_hugetlb_folio(h, gfp, nid, nodemask); in alloc_buddy_hugetlb_folio_with_mpol()
2478 folio = alloc_surplus_hugetlb_folio(h, gfp_mask, nid, nodemask); in alloc_buddy_hugetlb_folio_with_mpol()
2484 struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid, in alloc_hugetlb_folio_nodemask() argument
2488 if (available_huge_pages(h)) { in alloc_hugetlb_folio_nodemask()
2491 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, in alloc_hugetlb_folio_nodemask()
2500 return alloc_migrate_hugetlb_folio(h, gfp_mask, preferred_nid, nmask); in alloc_hugetlb_folio_nodemask()
2504 struct folio *alloc_hugetlb_folio_vma(struct hstate *h, struct vm_area_struct *vma, in alloc_hugetlb_folio_vma() argument
2513 gfp_mask = htlb_alloc_mask(h); in alloc_hugetlb_folio_vma()
2515 folio = alloc_hugetlb_folio_nodemask(h, node, nodemask, gfp_mask); in alloc_hugetlb_folio_vma()
2542 static int gather_surplus_pages(struct hstate *h, long delta) in gather_surplus_pages() argument
2552 nodemask_t *mbind_nodemask = policy_mbind_nodemask(htlb_alloc_mask(h)); in gather_surplus_pages()
2555 needed = (h->resv_huge_pages + delta) - h->free_huge_pages; in gather_surplus_pages()
2557 h->resv_huge_pages += delta; in gather_surplus_pages()
2570 folio = alloc_surplus_hugetlb_folio(h, htlb_alloc_mask(h), in gather_surplus_pages()
2590 needed = (h->resv_huge_pages + delta) - in gather_surplus_pages()
2591 (h->free_huge_pages + allocated); in gather_surplus_pages()
2611 h->resv_huge_pages += delta; in gather_surplus_pages()
2619 enqueue_hugetlb_folio(h, folio); in gather_surplus_pages()
2643 static void return_unused_surplus_pages(struct hstate *h, in return_unused_surplus_pages() argument
2652 h->resv_huge_pages -= unused_resv_pages; in return_unused_surplus_pages()
2654 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) in return_unused_surplus_pages()
2661 nr_pages = min(unused_resv_pages, h->surplus_huge_pages); in return_unused_surplus_pages()
2672 page = remove_pool_huge_page(h, &node_states[N_MEMORY], 1); in return_unused_surplus_pages()
2681 update_and_free_pages_bulk(h, &page_list); in return_unused_surplus_pages()
2722 static long __vma_reservation_common(struct hstate *h, in __vma_reservation_common() argument
2735 idx = vma_hugecache_offset(h, vma, addr); in __vma_reservation_common()
2802 static long vma_needs_reservation(struct hstate *h, in vma_needs_reservation() argument
2805 return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV); in vma_needs_reservation()
2808 static long vma_commit_reservation(struct hstate *h, in vma_commit_reservation() argument
2811 return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV); in vma_commit_reservation()
2814 static void vma_end_reservation(struct hstate *h, in vma_end_reservation() argument
2817 (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV); in vma_end_reservation()
2820 static long vma_add_reservation(struct hstate *h, in vma_add_reservation() argument
2823 return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV); in vma_add_reservation()
2826 static long vma_del_reservation(struct hstate *h, in vma_del_reservation() argument
2829 return __vma_reservation_common(h, vma, addr, VMA_DEL_RESV); in vma_del_reservation()
2852 void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma, in restore_reserve_on_error() argument
2855 long rc = vma_needs_reservation(h, vma, address); in restore_reserve_on_error()
2872 (void)vma_add_reservation(h, vma, address); in restore_reserve_on_error()
2874 vma_end_reservation(h, vma, address); in restore_reserve_on_error()
2885 rc = vma_del_reservation(h, vma, address); in restore_reserve_on_error()
2921 vma_end_reservation(h, vma, address); in restore_reserve_on_error()
2933 static int alloc_and_dissolve_hugetlb_folio(struct hstate *h, in alloc_and_dissolve_hugetlb_folio() argument
2936 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; in alloc_and_dissolve_hugetlb_folio()
2948 new_folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid, NULL, NULL); in alloc_and_dissolve_hugetlb_folio()
2951 __prep_new_hugetlb_folio(h, new_folio); in alloc_and_dissolve_hugetlb_folio()
2989 remove_hugetlb_folio(h, old_folio, false); in alloc_and_dissolve_hugetlb_folio()
2995 __prep_account_new_huge_page(h, nid); in alloc_and_dissolve_hugetlb_folio()
2996 enqueue_hugetlb_folio(h, new_folio); in alloc_and_dissolve_hugetlb_folio()
3002 update_and_free_hugetlb_folio(h, old_folio, false); in alloc_and_dissolve_hugetlb_folio()
3011 update_and_free_hugetlb_folio(h, new_folio, false); in alloc_and_dissolve_hugetlb_folio()
3018 struct hstate *h; in isolate_or_dissolve_huge_page() local
3029 h = folio_hstate(folio); in isolate_or_dissolve_huge_page()
3041 if (hstate_is_gigantic(h)) in isolate_or_dissolve_huge_page()
3047 ret = alloc_and_dissolve_hugetlb_folio(h, folio, list); in isolate_or_dissolve_huge_page()
3056 struct hstate *h = hstate_vma(vma); in alloc_hugetlb_folio() local
3064 idx = hstate_index(h); in alloc_hugetlb_folio()
3070 map_chg = gbl_chg = vma_needs_reservation(h, vma, addr); in alloc_hugetlb_folio()
3084 vma_end_reservation(h, vma, addr); in alloc_hugetlb_folio()
3105 idx, pages_per_huge_page(h), &h_cg); in alloc_hugetlb_folio()
3110 ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg); in alloc_hugetlb_folio()
3120 folio = dequeue_hugetlb_folio_vma(h, vma, addr, avoid_reserve, gbl_chg); in alloc_hugetlb_folio()
3123 folio = alloc_buddy_hugetlb_folio_with_mpol(h, vma, addr); in alloc_hugetlb_folio()
3129 h->resv_huge_pages--; in alloc_hugetlb_folio()
3131 list_add(&folio->lru, &h->hugepage_activelist); in alloc_hugetlb_folio()
3136 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, folio); in alloc_hugetlb_folio()
3141 hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h), in alloc_hugetlb_folio()
3149 map_commit = vma_commit_reservation(h, vma, addr); in alloc_hugetlb_folio()
3163 hugetlb_acct_memory(h, -rsv_adjust); in alloc_hugetlb_folio()
3166 hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h), in alloc_hugetlb_folio()
3167 pages_per_huge_page(h), folio); in alloc_hugetlb_folio()
3174 hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg); in alloc_hugetlb_folio()
3177 hugetlb_cgroup_uncharge_cgroup_rsvd(idx, pages_per_huge_page(h), in alloc_hugetlb_folio()
3182 vma_end_reservation(h, vma, addr); in alloc_hugetlb_folio()
3186 int alloc_bootmem_huge_page(struct hstate *h, int nid)
3188 int __alloc_bootmem_huge_page(struct hstate *h, int nid) in __alloc_bootmem_huge_page() argument
3195 m = memblock_alloc_try_nid_raw(huge_page_size(h), huge_page_size(h), in __alloc_bootmem_huge_page()
3202 for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) { in __alloc_bootmem_huge_page()
3204 huge_page_size(h), huge_page_size(h), in __alloc_bootmem_huge_page()
3220 m->hstate = h; in __alloc_bootmem_huge_page()
3235 struct hstate *h = m->hstate; in gather_bootmem_prealloc() local
3237 VM_BUG_ON(!hstate_is_gigantic(h)); in gather_bootmem_prealloc()
3239 if (prep_compound_gigantic_folio(folio, huge_page_order(h))) { in gather_bootmem_prealloc()
3241 prep_new_hugetlb_folio(h, folio, folio_nid(folio)); in gather_bootmem_prealloc()
3245 free_gigantic_folio(folio, huge_page_order(h)); in gather_bootmem_prealloc()
3253 adjust_managed_page_count(page, pages_per_huge_page(h)); in gather_bootmem_prealloc()
3257 static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid) in hugetlb_hstate_alloc_pages_onenode() argument
3262 for (i = 0; i < h->max_huge_pages_node[nid]; ++i) { in hugetlb_hstate_alloc_pages_onenode()
3263 if (hstate_is_gigantic(h)) { in hugetlb_hstate_alloc_pages_onenode()
3264 if (!alloc_bootmem_huge_page(h, nid)) in hugetlb_hstate_alloc_pages_onenode()
3268 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; in hugetlb_hstate_alloc_pages_onenode()
3270 folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, in hugetlb_hstate_alloc_pages_onenode()
3278 if (i == h->max_huge_pages_node[nid]) in hugetlb_hstate_alloc_pages_onenode()
3281 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); in hugetlb_hstate_alloc_pages_onenode()
3283 h->max_huge_pages_node[nid], buf, nid, i); in hugetlb_hstate_alloc_pages_onenode()
3284 h->max_huge_pages -= (h->max_huge_pages_node[nid] - i); in hugetlb_hstate_alloc_pages_onenode()
3285 h->max_huge_pages_node[nid] = i; in hugetlb_hstate_alloc_pages_onenode()
3288 static void __init hugetlb_hstate_alloc_pages(struct hstate *h) in hugetlb_hstate_alloc_pages() argument
3295 if (hstate_is_gigantic(h) && hugetlb_cma_size) { in hugetlb_hstate_alloc_pages()
3302 if (h->max_huge_pages_node[i] > 0) { in hugetlb_hstate_alloc_pages()
3303 hugetlb_hstate_alloc_pages_onenode(h, i); in hugetlb_hstate_alloc_pages()
3312 if (!hstate_is_gigantic(h)) { in hugetlb_hstate_alloc_pages()
3330 for (i = 0; i < h->max_huge_pages; ++i) { in hugetlb_hstate_alloc_pages()
3331 if (hstate_is_gigantic(h)) { in hugetlb_hstate_alloc_pages()
3332 if (!alloc_bootmem_huge_page(h, NUMA_NO_NODE)) in hugetlb_hstate_alloc_pages()
3334 } else if (!alloc_pool_huge_page(h, in hugetlb_hstate_alloc_pages()
3340 if (i < h->max_huge_pages) { in hugetlb_hstate_alloc_pages()
3343 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); in hugetlb_hstate_alloc_pages()
3345 h->max_huge_pages, buf, i); in hugetlb_hstate_alloc_pages()
3346 h->max_huge_pages = i; in hugetlb_hstate_alloc_pages()
3353 struct hstate *h, *h2; in hugetlb_init_hstates() local
3355 for_each_hstate(h) { in hugetlb_init_hstates()
3357 if (!hstate_is_gigantic(h)) in hugetlb_init_hstates()
3358 hugetlb_hstate_alloc_pages(h); in hugetlb_init_hstates()
3368 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) in hugetlb_init_hstates()
3370 if (hugetlb_cma_size && h->order <= HUGETLB_PAGE_ORDER) in hugetlb_init_hstates()
3373 if (h2 == h) in hugetlb_init_hstates()
3375 if (h2->order < h->order && in hugetlb_init_hstates()
3376 h2->order > h->demote_order) in hugetlb_init_hstates()
3377 h->demote_order = h2->order; in hugetlb_init_hstates()
3384 struct hstate *h; in report_hugepages() local
3386 for_each_hstate(h) { in report_hugepages()
3389 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); in report_hugepages()
3391 buf, h->free_huge_pages); in report_hugepages()
3393 hugetlb_vmemmap_optimizable_size(h) / SZ_1K, buf); in report_hugepages()
3398 static void try_to_free_low(struct hstate *h, unsigned long count, in try_to_free_low() argument
3405 if (hstate_is_gigantic(h)) in try_to_free_low()
3413 struct list_head *freel = &h->hugepage_freelists[i]; in try_to_free_low()
3415 if (count >= h->nr_huge_pages) in try_to_free_low()
3419 remove_hugetlb_folio(h, page_folio(page), false); in try_to_free_low()
3426 update_and_free_pages_bulk(h, &page_list); in try_to_free_low()
3430 static inline void try_to_free_low(struct hstate *h, unsigned long count, in try_to_free_low() argument
3441 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed, in adjust_pool_surplus() argument
3450 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { in adjust_pool_surplus()
3451 if (h->surplus_huge_pages_node[node]) in adjust_pool_surplus()
3455 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { in adjust_pool_surplus()
3456 if (h->surplus_huge_pages_node[node] < in adjust_pool_surplus()
3457 h->nr_huge_pages_node[node]) in adjust_pool_surplus()
3464 h->surplus_huge_pages += delta; in adjust_pool_surplus()
3465 h->surplus_huge_pages_node[node] += delta; in adjust_pool_surplus()
3469 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages) argument
3470 static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid, in set_max_huge_pages() argument
3492 mutex_lock(&h->resize_lock); in set_max_huge_pages()
3493 flush_free_hpage_work(h); in set_max_huge_pages()
3505 count += h->nr_huge_pages - h->nr_huge_pages_node[nid]; in set_max_huge_pages()
3523 if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) { in set_max_huge_pages()
3524 if (count > persistent_huge_pages(h)) { in set_max_huge_pages()
3526 mutex_unlock(&h->resize_lock); in set_max_huge_pages()
3544 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) { in set_max_huge_pages()
3545 if (!adjust_pool_surplus(h, nodes_allowed, -1)) in set_max_huge_pages()
3549 while (count > persistent_huge_pages(h)) { in set_max_huge_pages()
3560 ret = alloc_pool_huge_page(h, nodes_allowed, in set_max_huge_pages()
3586 min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages; in set_max_huge_pages()
3588 try_to_free_low(h, min_count, nodes_allowed); in set_max_huge_pages()
3593 while (min_count < persistent_huge_pages(h)) { in set_max_huge_pages()
3594 page = remove_pool_huge_page(h, nodes_allowed, 0); in set_max_huge_pages()
3602 update_and_free_pages_bulk(h, &page_list); in set_max_huge_pages()
3603 flush_free_hpage_work(h); in set_max_huge_pages()
3606 while (count < persistent_huge_pages(h)) { in set_max_huge_pages()
3607 if (!adjust_pool_surplus(h, nodes_allowed, 1)) in set_max_huge_pages()
3611 h->max_huge_pages = persistent_huge_pages(h); in set_max_huge_pages()
3613 mutex_unlock(&h->resize_lock); in set_max_huge_pages()
3620 static int demote_free_hugetlb_folio(struct hstate *h, struct folio *folio) in demote_free_hugetlb_folio() argument
3628 target_hstate = size_to_hstate(PAGE_SIZE << h->demote_order); in demote_free_hugetlb_folio()
3630 remove_hugetlb_folio_for_demote(h, folio, false); in demote_free_hugetlb_folio()
3633 rc = hugetlb_vmemmap_restore(h, &folio->page); in demote_free_hugetlb_folio()
3638 add_hugetlb_folio(h, folio, false); in demote_free_hugetlb_folio()
3646 destroy_compound_hugetlb_folio_for_demote(folio, huge_page_order(h)); in demote_free_hugetlb_folio()
3657 for (i = 0; i < pages_per_huge_page(h); in demote_free_hugetlb_folio()
3678 h->max_huge_pages--; in demote_free_hugetlb_folio()
3680 pages_per_huge_page(h) / pages_per_huge_page(target_hstate); in demote_free_hugetlb_folio()
3685 static int demote_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed) in demote_pool_huge_page() argument
3694 if (!h->demote_order) { in demote_pool_huge_page()
3699 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { in demote_pool_huge_page()
3700 list_for_each_entry(folio, &h->hugepage_freelists[node], lru) { in demote_pool_huge_page()
3703 return demote_free_hugetlb_folio(h, folio); in demote_pool_huge_page()
3745 struct hstate *h; in nr_hugepages_show_common() local
3749 h = kobj_to_hstate(kobj, &nid); in nr_hugepages_show_common()
3751 nr_huge_pages = h->nr_huge_pages; in nr_hugepages_show_common()
3753 nr_huge_pages = h->nr_huge_pages_node[nid]; in nr_hugepages_show_common()
3759 struct hstate *h, int nid, in __nr_hugepages_store_common() argument
3765 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) in __nr_hugepages_store_common()
3786 err = set_max_huge_pages(h, count, nid, n_mask); in __nr_hugepages_store_common()
3795 struct hstate *h; in nr_hugepages_store_common() local
3804 h = kobj_to_hstate(kobj, &nid); in nr_hugepages_store_common()
3805 return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len); in nr_hugepages_store_common()
3846 struct hstate *h = kobj_to_hstate(kobj, NULL); in nr_overcommit_hugepages_show() local
3847 return sysfs_emit(buf, "%lu\n", h->nr_overcommit_huge_pages); in nr_overcommit_hugepages_show()
3855 struct hstate *h = kobj_to_hstate(kobj, NULL); in nr_overcommit_hugepages_store() local
3857 if (hstate_is_gigantic(h)) in nr_overcommit_hugepages_store()
3865 h->nr_overcommit_huge_pages = input; in nr_overcommit_hugepages_store()
3875 struct hstate *h; in free_hugepages_show() local
3879 h = kobj_to_hstate(kobj, &nid); in free_hugepages_show()
3881 free_huge_pages = h->free_huge_pages; in free_hugepages_show()
3883 free_huge_pages = h->free_huge_pages_node[nid]; in free_hugepages_show()
3892 struct hstate *h = kobj_to_hstate(kobj, NULL); in resv_hugepages_show() local
3893 return sysfs_emit(buf, "%lu\n", h->resv_huge_pages); in resv_hugepages_show()
3900 struct hstate *h; in surplus_hugepages_show() local
3904 h = kobj_to_hstate(kobj, &nid); in surplus_hugepages_show()
3906 surplus_huge_pages = h->surplus_huge_pages; in surplus_hugepages_show()
3908 surplus_huge_pages = h->surplus_huge_pages_node[nid]; in surplus_hugepages_show()
3920 struct hstate *h; in demote_store() local
3927 h = kobj_to_hstate(kobj, &nid); in demote_store()
3937 mutex_lock(&h->resize_lock); in demote_store()
3946 nr_available = h->free_huge_pages_node[nid]; in demote_store()
3948 nr_available = h->free_huge_pages; in demote_store()
3949 nr_available -= h->resv_huge_pages; in demote_store()
3953 err = demote_pool_huge_page(h, n_mask); in demote_store()
3961 mutex_unlock(&h->resize_lock); in demote_store()
3972 struct hstate *h = kobj_to_hstate(kobj, NULL); in demote_size_show() local
3973 unsigned long demote_size = (PAGE_SIZE << h->demote_order) / SZ_1K; in demote_size_show()
3982 struct hstate *h, *demote_hstate; in demote_size_store() local
3996 h = kobj_to_hstate(kobj, NULL); in demote_size_store()
3997 if (demote_order >= h->order) in demote_size_store()
4001 mutex_lock(&h->resize_lock); in demote_size_store()
4002 h->demote_order = demote_order; in demote_size_store()
4003 mutex_unlock(&h->resize_lock); in demote_size_store()
4035 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent, in hugetlb_sysfs_add_hstate() argument
4040 int hi = hstate_index(h); in hugetlb_sysfs_add_hstate()
4042 hstate_kobjs[hi] = kobject_create_and_add(h->name, parent); in hugetlb_sysfs_add_hstate()
4053 if (h->demote_order) { in hugetlb_sysfs_add_hstate()
4057 pr_warn("HugeTLB unable to create demote interfaces for %s\n", h->name); in hugetlb_sysfs_add_hstate()
4127 struct hstate *h; in hugetlb_unregister_node() local
4133 for_each_hstate(h) { in hugetlb_unregister_node()
4134 int idx = hstate_index(h); in hugetlb_unregister_node()
4139 if (h->demote_order) in hugetlb_unregister_node()
4157 struct hstate *h; in hugetlb_register_node() local
4172 for_each_hstate(h) { in hugetlb_register_node()
4173 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj, in hugetlb_register_node()
4178 h->name, node->dev.id); in hugetlb_register_node()
4221 struct hstate *h; in hugetlb_sysfs_init() local
4228 for_each_hstate(h) { in hugetlb_sysfs_init()
4229 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj, in hugetlb_sysfs_init()
4232 pr_err("HugeTLB: Unable to add hstate %s", h->name); in hugetlb_sysfs_init()
4328 struct hstate *h; in hugetlb_add_hstate() local
4336 h = &hstates[hugetlb_max_hstate++]; in hugetlb_add_hstate()
4337 __mutex_init(&h->resize_lock, "resize mutex", &h->resize_key); in hugetlb_add_hstate()
4338 h->order = order; in hugetlb_add_hstate()
4339 h->mask = ~(huge_page_size(h) - 1); in hugetlb_add_hstate()
4341 INIT_LIST_HEAD(&h->hugepage_freelists[i]); in hugetlb_add_hstate()
4342 INIT_LIST_HEAD(&h->hugepage_activelist); in hugetlb_add_hstate()
4343 h->next_nid_to_alloc = first_memory_node; in hugetlb_add_hstate()
4344 h->next_nid_to_free = first_memory_node; in hugetlb_add_hstate()
4345 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", in hugetlb_add_hstate()
4346 huge_page_size(h)/SZ_1K); in hugetlb_add_hstate()
4348 parsed_hstate = h; in hugetlb_add_hstate()
4471 struct hstate *h; in hugepagesz_setup() local
4481 h = size_to_hstate(size); in hugepagesz_setup()
4482 if (h) { in hugepagesz_setup()
4490 if (!parsed_default_hugepagesz || h != &default_hstate || in hugepagesz_setup()
4501 parsed_hstate = h; in hugepagesz_setup()
4560 static unsigned int allowed_mems_nr(struct hstate *h) in allowed_mems_nr() argument
4565 unsigned int *array = h->free_huge_pages_node; in allowed_mems_nr()
4566 gfp_t gfp_mask = htlb_alloc_mask(h); in allowed_mems_nr()
4598 struct hstate *h = &default_hstate; in hugetlb_sysctl_handler_common() local
4599 unsigned long tmp = h->max_huge_pages; in hugetlb_sysctl_handler_common()
4611 ret = __nr_hugepages_store_common(obey_mempolicy, h, in hugetlb_sysctl_handler_common()
4637 struct hstate *h = &default_hstate; in hugetlb_overcommit_handler() local
4644 tmp = h->nr_overcommit_huge_pages; in hugetlb_overcommit_handler()
4646 if (write && hstate_is_gigantic(h)) in hugetlb_overcommit_handler()
4656 h->nr_overcommit_huge_pages = tmp; in hugetlb_overcommit_handler()
4705 struct hstate *h; in hugetlb_report_meminfo() local
4711 for_each_hstate(h) { in hugetlb_report_meminfo()
4712 unsigned long count = h->nr_huge_pages; in hugetlb_report_meminfo()
4714 total += huge_page_size(h) * count; in hugetlb_report_meminfo()
4716 if (h == &default_hstate) in hugetlb_report_meminfo()
4724 h->free_huge_pages, in hugetlb_report_meminfo()
4725 h->resv_huge_pages, in hugetlb_report_meminfo()
4726 h->surplus_huge_pages, in hugetlb_report_meminfo()
4727 huge_page_size(h) / SZ_1K); in hugetlb_report_meminfo()
4735 struct hstate *h = &default_hstate; in hugetlb_report_node_meminfo() local
4744 nid, h->nr_huge_pages_node[nid], in hugetlb_report_node_meminfo()
4745 nid, h->free_huge_pages_node[nid], in hugetlb_report_node_meminfo()
4746 nid, h->surplus_huge_pages_node[nid]); in hugetlb_report_node_meminfo()
4751 struct hstate *h; in hugetlb_show_meminfo_node() local
4756 for_each_hstate(h) in hugetlb_show_meminfo_node()
4759 h->nr_huge_pages_node[nid], in hugetlb_show_meminfo_node()
4760 h->free_huge_pages_node[nid], in hugetlb_show_meminfo_node()
4761 h->surplus_huge_pages_node[nid], in hugetlb_show_meminfo_node()
4762 huge_page_size(h) / SZ_1K); in hugetlb_show_meminfo_node()
4774 struct hstate *h; in hugetlb_total_pages() local
4777 for_each_hstate(h) in hugetlb_total_pages()
4778 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h); in hugetlb_total_pages()
4782 static int hugetlb_acct_memory(struct hstate *h, long delta) in hugetlb_acct_memory() argument
4814 if (gather_surplus_pages(h, delta) < 0) in hugetlb_acct_memory()
4817 if (delta > allowed_mems_nr(h)) { in hugetlb_acct_memory()
4818 return_unused_surplus_pages(h, delta); in hugetlb_acct_memory()
4825 return_unused_surplus_pages(h, (unsigned long) -delta); in hugetlb_acct_memory()
4872 struct hstate *h = hstate_vma(vma); in hugetlb_vm_op_close() local
4884 start = vma_hugecache_offset(h, vma, vma->vm_start); in hugetlb_vm_op_close()
4885 end = vma_hugecache_offset(h, vma, vma->vm_end); in hugetlb_vm_op_close()
4895 hugetlb_acct_memory(h, -gbl_reserve); in hugetlb_vm_op_close()
5037 struct hstate *h = hstate_vma(src_vma); in copy_hugetlb_page_range() local
5038 unsigned long sz = huge_page_size(h); in copy_hugetlb_page_range()
5039 unsigned long npages = pages_per_huge_page(h); in copy_hugetlb_page_range()
5061 last_addr_mask = hugetlb_mask_last_page(h); in copy_hugetlb_page_range()
5088 dst_ptl = huge_pte_lock(h, dst, dst_pte); in copy_hugetlb_page_range()
5089 src_ptl = huge_pte_lockptr(h, src, src_pte); in copy_hugetlb_page_range()
5168 dst_ptl = huge_pte_lock(h, dst, dst_pte); in copy_hugetlb_page_range()
5169 src_ptl = huge_pte_lockptr(h, src, src_pte); in copy_hugetlb_page_range()
5173 restore_reserve_on_error(h, dst_vma, addr, in copy_hugetlb_page_range()
5222 struct hstate *h = hstate_vma(vma); in move_huge_pte() local
5227 dst_ptl = huge_pte_lock(h, mm, dst_pte); in move_huge_pte()
5228 src_ptl = huge_pte_lockptr(h, mm, src_pte); in move_huge_pte()
5250 struct hstate *h = hstate_vma(vma); in move_hugetlb_page_tables() local
5252 unsigned long sz = huge_page_size(h); in move_hugetlb_page_tables()
5270 last_addr_mask = hugetlb_mask_last_page(h); in move_hugetlb_page_tables()
5319 struct hstate *h = hstate_vma(vma); in __unmap_hugepage_range() local
5320 unsigned long sz = huge_page_size(h); in __unmap_hugepage_range()
5325 BUG_ON(start & ~huge_page_mask(h)); in __unmap_hugepage_range()
5326 BUG_ON(end & ~huge_page_mask(h)); in __unmap_hugepage_range()
5335 last_addr_mask = hugetlb_mask_last_page(h); in __unmap_hugepage_range()
5344 ptl = huge_pte_lock(h, mm, ptep); in __unmap_hugepage_range()
5401 tlb_remove_huge_tlb_entry(h, tlb, ptep, address); in __unmap_hugepage_range()
5410 hugetlb_count_sub(pages_per_huge_page(h), mm); in __unmap_hugepage_range()
5414 tlb_remove_page_size(tlb, page, huge_page_size(h)); in __unmap_hugepage_range()
5507 struct hstate *h = hstate_vma(vma); in unmap_ref_private() local
5516 address = address & huge_page_mask(h); in unmap_ref_private()
5549 address + huge_page_size(h), page, 0); in unmap_ref_private()
5566 struct hstate *h = hstate_vma(vma); in hugetlb_wp() local
5571 unsigned long haddr = address & huge_page_mask(h); in hugetlb_wp()
5666 idx = vma_hugecache_offset(h, vma, haddr); in hugetlb_wp()
5676 ptep = hugetlb_walk(vma, haddr, huge_page_size(h)); in hugetlb_wp()
5708 haddr + huge_page_size(h)); in hugetlb_wp()
5716 ptep = hugetlb_walk(vma, haddr, huge_page_size(h)); in hugetlb_wp()
5726 set_huge_pte_at(mm, haddr, ptep, newpte, huge_page_size(h)); in hugetlb_wp()
5739 restore_reserve_on_error(h, vma, haddr, new_folio); in hugetlb_wp()
5753 static bool hugetlbfs_pagecache_present(struct hstate *h, in hugetlbfs_pagecache_present() argument
5757 pgoff_t idx = vma_hugecache_offset(h, vma, address); in hugetlbfs_pagecache_present()
5771 struct hstate *h = hstate_inode(inode); in hugetlb_add_to_page_cache() local
5790 inode->i_blocks += blocks_per_huge_page(h); in hugetlb_add_to_page_cache()
5834 static bool hugetlb_pte_stable(struct hstate *h, struct mm_struct *mm, in hugetlb_pte_stable() argument
5840 ptl = huge_pte_lock(h, mm, ptep); in hugetlb_pte_stable()
5853 struct hstate *h = hstate_vma(vma); in hugetlb_no_page() local
5860 unsigned long haddr = address & huge_page_mask(h); in hugetlb_no_page()
5883 size = i_size_read(mapping->host) >> huge_page_shift(h); in hugetlb_no_page()
5905 if (!hugetlb_pte_stable(h, mm, ptep, old_pte)) { in hugetlb_no_page()
5929 if (hugetlb_pte_stable(h, mm, ptep, old_pte)) in hugetlb_no_page()
5935 clear_huge_page(&folio->page, address, pages_per_huge_page(h)); in hugetlb_no_page()
5949 restore_reserve_on_error(h, vma, haddr, folio); in hugetlb_no_page()
5970 VM_FAULT_SET_HINDEX(hstate_index(h)); in hugetlb_no_page()
5979 if (!hugetlb_pte_stable(h, mm, ptep, old_pte)) { in hugetlb_no_page()
5996 if (vma_needs_reservation(h, vma, haddr) < 0) { in hugetlb_no_page()
6001 vma_end_reservation(h, vma, haddr); in hugetlb_no_page()
6004 ptl = huge_pte_lock(h, mm, ptep); in hugetlb_no_page()
6022 set_huge_pte_at(mm, haddr, ptep, new_pte, huge_page_size(h)); in hugetlb_no_page()
6024 hugetlb_count_add(pages_per_huge_page(h), mm); in hugetlb_no_page()
6050 restore_reserve_on_error(h, vma, haddr, folio); in hugetlb_no_page()
6091 struct hstate *h = hstate_vma(vma); in hugetlb_fault() local
6094 unsigned long haddr = address & huge_page_mask(h); in hugetlb_fault()
6108 idx = vma_hugecache_offset(h, vma, haddr); in hugetlb_fault()
6118 ptep = huge_pte_alloc(mm, vma, haddr, huge_page_size(h)); in hugetlb_fault()
6170 VM_FAULT_SET_HINDEX(hstate_index(h)); in hugetlb_fault()
6183 if (vma_needs_reservation(h, vma, haddr) < 0) { in hugetlb_fault()
6188 vma_end_reservation(h, vma, haddr); in hugetlb_fault()
6195 ptl = huge_pte_lock(h, mm, ptep); in hugetlb_fault()
6289 struct hstate *h = hstate_vma(dst_vma); in hugetlb_mfill_atomic_pte() local
6291 pgoff_t idx = vma_hugecache_offset(h, dst_vma, dst_addr); in hugetlb_mfill_atomic_pte()
6302 ptl = huge_pte_lock(h, dst_mm, dst_pte); in hugetlb_mfill_atomic_pte()
6312 huge_page_size(h)); in hugetlb_mfill_atomic_pte()
6332 hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) { in hugetlb_mfill_atomic_pte()
6352 restore_reserve_on_error(h, dst_vma, dst_addr, folio); in hugetlb_mfill_atomic_pte()
6358 folio = alloc_hugetlb_folio_vma(h, dst_vma, dst_addr); in hugetlb_mfill_atomic_pte()
6372 hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) { in hugetlb_mfill_atomic_pte()
6404 size = i_size_read(mapping->host) >> huge_page_shift(h); in hugetlb_mfill_atomic_pte()
6421 ptl = huge_pte_lock(h, dst_mm, dst_pte); in hugetlb_mfill_atomic_pte()
6463 set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, huge_page_size(h)); in hugetlb_mfill_atomic_pte()
6465 hugetlb_count_add(pages_per_huge_page(h), dst_mm); in hugetlb_mfill_atomic_pte()
6484 restore_reserve_on_error(h, dst_vma, dst_addr, folio); in hugetlb_mfill_atomic_pte()
6494 struct hstate *h = hstate_vma(vma); in hugetlb_follow_page_mask() local
6496 unsigned long haddr = address & huge_page_mask(h); in hugetlb_follow_page_mask()
6503 pte = hugetlb_walk(vma, haddr, huge_page_size(h)); in hugetlb_follow_page_mask()
6507 ptl = huge_pte_lock(h, mm, pte); in hugetlb_follow_page_mask()
6525 page = nth_page(page, ((address & ~huge_page_mask(h)) >> PAGE_SHIFT)); in hugetlb_follow_page_mask()
6543 *page_mask = (1U << huge_page_order(h)) - 1; in hugetlb_follow_page_mask()
6555 !hugetlbfs_pagecache_present(h, vma, address)) in hugetlb_follow_page_mask()
6569 struct hstate *h = hstate_vma(vma); in hugetlb_change_protection() local
6570 long pages = 0, psize = huge_page_size(h); in hugetlb_change_protection()
6592 last_addr_mask = hugetlb_mask_last_page(h); in hugetlb_change_protection()
6611 ptl = huge_pte_lock(h, mm, ptep); in hugetlb_change_protection()
6705 return pages > 0 ? (pages << h->order) : pages; in hugetlb_change_protection()
6715 struct hstate *h = hstate_inode(inode); in hugetlb_reserve_pages() local
6771 if (hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h), in hugetlb_reserve_pages()
6772 chg * pages_per_huge_page(h), &h_cg) < 0) in hugetlb_reserve_pages()
6779 resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, h_cg, h); in hugetlb_reserve_pages()
6795 if (hugetlb_acct_memory(h, gbl_reserve) < 0) in hugetlb_reserve_pages()
6810 add = region_add(resv_map, from, to, regions_needed, h, h_cg); in hugetlb_reserve_pages()
6813 hugetlb_acct_memory(h, -gbl_reserve); in hugetlb_reserve_pages()
6830 hstate_index(h), in hugetlb_reserve_pages()
6831 (chg - add) * pages_per_huge_page(h), h_cg); in hugetlb_reserve_pages()
6835 hugetlb_acct_memory(h, -rsv_adjust); in hugetlb_reserve_pages()
6852 hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h), in hugetlb_reserve_pages()
6853 chg * pages_per_huge_page(h), h_cg); in hugetlb_reserve_pages()
6872 struct hstate *h = hstate_inode(inode); in hugetlb_unreserve_pages() local
6894 inode->i_blocks -= (blocks_per_huge_page(h) * freed); in hugetlb_unreserve_pages()
6905 hugetlb_acct_memory(h, -gbl_reserve); in hugetlb_unreserve_pages()
7176 unsigned long hugetlb_mask_last_page(struct hstate *h) in hugetlb_mask_last_page() argument
7178 unsigned long hp_size = huge_page_size(h); in hugetlb_mask_last_page()
7191 __weak unsigned long hugetlb_mask_last_page(struct hstate *h) in hugetlb_mask_last_page() argument
7194 if (huge_page_size(h) == PMD_SIZE) in hugetlb_mask_last_page()
7265 struct hstate *h = folio_hstate(old_folio); in move_hugetlb_state() local
7295 if (h->surplus_huge_pages_node[old_nid]) { in move_hugetlb_state()
7296 h->surplus_huge_pages_node[old_nid]--; in move_hugetlb_state()
7297 h->surplus_huge_pages_node[new_nid]++; in move_hugetlb_state()
7307 struct hstate *h = hstate_vma(vma); in hugetlb_unshare_pmds() local
7308 unsigned long sz = huge_page_size(h); in hugetlb_unshare_pmds()
7335 ptl = huge_pte_lock(h, mm, ptep); in hugetlb_unshare_pmds()