• Home
  • Raw
  • Download

Lines Matching refs:h

98 static int hugetlb_acct_memory(struct hstate *h, long delta);
117 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, in hugepage_new_subpool() argument
129 spool->hstate = h; in hugepage_new_subpool()
132 if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) { in hugepage_new_subpool()
279 struct hstate *h, in record_hugetlb_cgroup_uncharge_info() argument
286 &h_cg->rsvd_hugepage[hstate_index(h)]; in record_hugetlb_cgroup_uncharge_info()
300 resv->pages_per_hpage = pages_per_huge_page(h); in record_hugetlb_cgroup_uncharge_info()
304 VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h)); in record_hugetlb_cgroup_uncharge_info()
370 struct hstate *h, long *regions_needed) in add_reservation_in_range() argument
409 record_hugetlb_cgroup_uncharge_info(h_cg, h, in add_reservation_in_range()
428 record_hugetlb_cgroup_uncharge_info(h_cg, h, resv, nrg); in add_reservation_in_range()
515 long in_regions_needed, struct hstate *h, in region_add() argument
553 add = add_reservation_in_range(resv, f, t, h_cg, h, NULL); in region_add()
752 struct hstate *h = hstate_inode(inode); in hugetlb_fix_reserve_counts() local
754 if (!hugetlb_acct_memory(h, 1)) in hugetlb_fix_reserve_counts()
799 static pgoff_t vma_hugecache_offset(struct hstate *h, in vma_hugecache_offset() argument
802 return ((address - vma->vm_start) >> huge_page_shift(h)) + in vma_hugecache_offset()
803 (vma->vm_pgoff >> huge_page_order(h)); in vma_hugecache_offset()
878 struct hstate *h) in resv_map_set_hugetlb_cgroup_uncharge_info() argument
881 if (!h_cg || !h) { in resv_map_set_hugetlb_cgroup_uncharge_info()
887 &h_cg->rsvd_hugepage[hstate_index(h)]; in resv_map_set_hugetlb_cgroup_uncharge_info()
888 resv_map->pages_per_hpage = pages_per_huge_page(h); in resv_map_set_hugetlb_cgroup_uncharge_info()
1068 static void enqueue_huge_page(struct hstate *h, struct page *page) in enqueue_huge_page() argument
1071 list_move(&page->lru, &h->hugepage_freelists[nid]); in enqueue_huge_page()
1072 h->free_huge_pages++; in enqueue_huge_page()
1073 h->free_huge_pages_node[nid]++; in enqueue_huge_page()
1077 static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid) in dequeue_huge_page_node_exact() argument
1082 list_for_each_entry(page, &h->hugepage_freelists[nid], lru) { in dequeue_huge_page_node_exact()
1089 list_move(&page->lru, &h->hugepage_activelist); in dequeue_huge_page_node_exact()
1092 h->free_huge_pages--; in dequeue_huge_page_node_exact()
1093 h->free_huge_pages_node[nid]--; in dequeue_huge_page_node_exact()
1100 static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid, in dequeue_huge_page_nodemask() argument
1126 page = dequeue_huge_page_node_exact(h, node); in dequeue_huge_page_nodemask()
1136 static struct page *dequeue_huge_page_vma(struct hstate *h, in dequeue_huge_page_vma() argument
1153 h->free_huge_pages - h->resv_huge_pages == 0) in dequeue_huge_page_vma()
1157 if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0) in dequeue_huge_page_vma()
1160 gfp_mask = htlb_alloc_mask(h); in dequeue_huge_page_vma()
1162 page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask); in dequeue_huge_page_vma()
1165 h->resv_huge_pages--; in dequeue_huge_page_vma()
1203 static int hstate_next_node_to_alloc(struct hstate *h, in hstate_next_node_to_alloc() argument
1210 nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed); in hstate_next_node_to_alloc()
1211 h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed); in hstate_next_node_to_alloc()
1222 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed) in hstate_next_node_to_free() argument
1228 nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed); in hstate_next_node_to_free()
1229 h->next_nid_to_free = next_node_allowed(nid, nodes_allowed); in hstate_next_node_to_free()
1282 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, in alloc_gigantic_page() argument
1285 unsigned long nr_pages = 1UL << huge_page_order(h); in alloc_gigantic_page()
1296 huge_page_order(h), true); in alloc_gigantic_page()
1307 huge_page_order(h), true); in alloc_gigantic_page()
1319 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, in alloc_gigantic_page() argument
1327 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, in alloc_gigantic_page() argument
1337 static void update_and_free_page(struct hstate *h, struct page *page) in update_and_free_page() argument
1342 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) in update_and_free_page()
1345 h->nr_huge_pages--; in update_and_free_page()
1346 h->nr_huge_pages_node[page_to_nid(page)]--; in update_and_free_page()
1347 for (i = 0; i < pages_per_huge_page(h); in update_and_free_page()
1374 if (hstate_is_gigantic(h)) { in update_and_free_page()
1381 destroy_compound_gigantic_page(page, huge_page_order(h)); in update_and_free_page()
1382 free_gigantic_page(page, huge_page_order(h)); in update_and_free_page()
1386 __free_pages(page, huge_page_order(h)); in update_and_free_page()
1392 struct hstate *h; in size_to_hstate() local
1394 for_each_hstate(h) { in size_to_hstate()
1395 if (huge_page_size(h) == size) in size_to_hstate()
1396 return h; in size_to_hstate()
1453 struct hstate *h = page_hstate(page); in __free_huge_page() local
1488 hugetlb_cgroup_uncharge_page(hstate_index(h), in __free_huge_page()
1489 pages_per_huge_page(h), page); in __free_huge_page()
1490 hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h), in __free_huge_page()
1491 pages_per_huge_page(h), page); in __free_huge_page()
1493 h->resv_huge_pages++; in __free_huge_page()
1498 update_and_free_page(h, page); in __free_huge_page()
1499 } else if (h->surplus_huge_pages_node[nid]) { in __free_huge_page()
1502 update_and_free_page(h, page); in __free_huge_page()
1503 h->surplus_huge_pages--; in __free_huge_page()
1504 h->surplus_huge_pages_node[nid]--; in __free_huge_page()
1507 enqueue_huge_page(h, page); in __free_huge_page()
1560 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid) in prep_new_huge_page() argument
1567 h->nr_huge_pages++; in prep_new_huge_page()
1568 h->nr_huge_pages_node[nid]++; in prep_new_huge_page()
1665 static struct page *alloc_buddy_huge_page(struct hstate *h, in alloc_buddy_huge_page() argument
1669 int order = huge_page_order(h); in alloc_buddy_huge_page()
1716 static struct page *alloc_fresh_huge_page(struct hstate *h, in alloc_fresh_huge_page() argument
1722 if (hstate_is_gigantic(h)) in alloc_fresh_huge_page()
1723 page = alloc_gigantic_page(h, gfp_mask, nid, nmask); in alloc_fresh_huge_page()
1725 page = alloc_buddy_huge_page(h, gfp_mask, in alloc_fresh_huge_page()
1730 if (hstate_is_gigantic(h)) in alloc_fresh_huge_page()
1731 prep_compound_gigantic_page(page, huge_page_order(h)); in alloc_fresh_huge_page()
1732 prep_new_huge_page(h, page, page_to_nid(page)); in alloc_fresh_huge_page()
1741 static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed, in alloc_pool_huge_page() argument
1746 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; in alloc_pool_huge_page()
1748 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { in alloc_pool_huge_page()
1749 page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed, in alloc_pool_huge_page()
1769 static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed, in free_pool_huge_page() argument
1775 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { in free_pool_huge_page()
1780 if ((!acct_surplus || h->surplus_huge_pages_node[node]) && in free_pool_huge_page()
1781 !list_empty(&h->hugepage_freelists[node])) { in free_pool_huge_page()
1783 list_entry(h->hugepage_freelists[node].next, in free_pool_huge_page()
1786 h->free_huge_pages--; in free_pool_huge_page()
1787 h->free_huge_pages_node[node]--; in free_pool_huge_page()
1789 h->surplus_huge_pages--; in free_pool_huge_page()
1790 h->surplus_huge_pages_node[node]--; in free_pool_huge_page()
1792 update_and_free_page(h, page); in free_pool_huge_page()
1828 struct hstate *h = page_hstate(head); in dissolve_free_huge_page() local
1830 if (h->free_huge_pages - h->resv_huge_pages == 0) in dissolve_free_huge_page()
1861 h->free_huge_pages--; in dissolve_free_huge_page()
1862 h->free_huge_pages_node[nid]--; in dissolve_free_huge_page()
1863 h->max_huge_pages--; in dissolve_free_huge_page()
1864 update_and_free_page(h, head); in dissolve_free_huge_page()
1902 static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask, in alloc_surplus_huge_page() argument
1907 if (hstate_is_gigantic(h)) in alloc_surplus_huge_page()
1911 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) in alloc_surplus_huge_page()
1915 page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL); in alloc_surplus_huge_page()
1927 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) { in alloc_surplus_huge_page()
1933 h->surplus_huge_pages++; in alloc_surplus_huge_page()
1934 h->surplus_huge_pages_node[page_to_nid(page)]++; in alloc_surplus_huge_page()
1943 static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask, in alloc_migrate_huge_page() argument
1948 if (hstate_is_gigantic(h)) in alloc_migrate_huge_page()
1951 page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL); in alloc_migrate_huge_page()
1968 struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h, in alloc_buddy_huge_page_with_mpol() argument
1973 gfp_t gfp_mask = htlb_alloc_mask(h); in alloc_buddy_huge_page_with_mpol()
1978 page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask); in alloc_buddy_huge_page_with_mpol()
1985 struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, in alloc_huge_page_nodemask() argument
1989 if (h->free_huge_pages - h->resv_huge_pages > 0) { in alloc_huge_page_nodemask()
1992 page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask); in alloc_huge_page_nodemask()
2000 return alloc_migrate_huge_page(h, gfp_mask, preferred_nid, nmask); in alloc_huge_page_nodemask()
2004 struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, in alloc_huge_page_vma() argument
2013 gfp_mask = htlb_alloc_mask(h); in alloc_huge_page_vma()
2015 page = alloc_huge_page_nodemask(h, node, nodemask, gfp_mask); in alloc_huge_page_vma()
2025 static int gather_surplus_pages(struct hstate *h, long delta) in gather_surplus_pages() argument
2035 needed = (h->resv_huge_pages + delta) - h->free_huge_pages; in gather_surplus_pages()
2037 h->resv_huge_pages += delta; in gather_surplus_pages()
2048 page = alloc_surplus_huge_page(h, htlb_alloc_mask(h), in gather_surplus_pages()
2064 needed = (h->resv_huge_pages + delta) - in gather_surplus_pages()
2065 (h->free_huge_pages + allocated); in gather_surplus_pages()
2085 h->resv_huge_pages += delta; in gather_surplus_pages()
2098 enqueue_huge_page(h, page); in gather_surplus_pages()
2125 static void return_unused_surplus_pages(struct hstate *h, in return_unused_surplus_pages() argument
2131 if (hstate_is_gigantic(h)) in return_unused_surplus_pages()
2138 nr_pages = min(unused_resv_pages, h->surplus_huge_pages); in return_unused_surplus_pages()
2153 h->resv_huge_pages--; in return_unused_surplus_pages()
2155 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1)) in return_unused_surplus_pages()
2162 h->resv_huge_pages -= unused_resv_pages; in return_unused_surplus_pages()
2196 static long __vma_reservation_common(struct hstate *h, in __vma_reservation_common() argument
2209 idx = vma_hugecache_offset(h, vma, addr); in __vma_reservation_common()
2267 static long vma_needs_reservation(struct hstate *h, in vma_needs_reservation() argument
2270 return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV); in vma_needs_reservation()
2273 static long vma_commit_reservation(struct hstate *h, in vma_commit_reservation() argument
2276 return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV); in vma_commit_reservation()
2279 static void vma_end_reservation(struct hstate *h, in vma_end_reservation() argument
2282 (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV); in vma_end_reservation()
2285 static long vma_add_reservation(struct hstate *h, in vma_add_reservation() argument
2288 return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV); in vma_add_reservation()
2302 static void restore_reserve_on_error(struct hstate *h, in restore_reserve_on_error() argument
2307 long rc = vma_needs_reservation(h, vma, address); in restore_reserve_on_error()
2323 rc = vma_add_reservation(h, vma, address); in restore_reserve_on_error()
2331 vma_end_reservation(h, vma, address); in restore_reserve_on_error()
2339 struct hstate *h = hstate_vma(vma); in alloc_huge_page() local
2347 idx = hstate_index(h); in alloc_huge_page()
2353 map_chg = gbl_chg = vma_needs_reservation(h, vma, addr); in alloc_huge_page()
2367 vma_end_reservation(h, vma, addr); in alloc_huge_page()
2388 idx, pages_per_huge_page(h), &h_cg); in alloc_huge_page()
2393 ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg); in alloc_huge_page()
2403 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg); in alloc_huge_page()
2406 page = alloc_buddy_huge_page_with_mpol(h, vma, addr); in alloc_huge_page()
2412 h->resv_huge_pages--; in alloc_huge_page()
2414 list_add(&page->lru, &h->hugepage_activelist); in alloc_huge_page()
2417 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page); in alloc_huge_page()
2422 hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h), in alloc_huge_page()
2430 map_commit = vma_commit_reservation(h, vma, addr); in alloc_huge_page()
2444 hugetlb_acct_memory(h, -rsv_adjust); in alloc_huge_page()
2446 hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h), in alloc_huge_page()
2447 pages_per_huge_page(h), page); in alloc_huge_page()
2452 hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg); in alloc_huge_page()
2455 hugetlb_cgroup_uncharge_cgroup_rsvd(idx, pages_per_huge_page(h), in alloc_huge_page()
2460 vma_end_reservation(h, vma, addr); in alloc_huge_page()
2464 int alloc_bootmem_huge_page(struct hstate *h)
2466 int __alloc_bootmem_huge_page(struct hstate *h) in __alloc_bootmem_huge_page() argument
2471 for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) { in __alloc_bootmem_huge_page()
2475 huge_page_size(h), huge_page_size(h), in __alloc_bootmem_huge_page()
2490 BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h))); in __alloc_bootmem_huge_page()
2494 m->hstate = h; in __alloc_bootmem_huge_page()
2508 struct hstate *h = m->hstate; in gather_bootmem_prealloc() local
2510 VM_BUG_ON(!hstate_is_gigantic(h)); in gather_bootmem_prealloc()
2512 prep_compound_gigantic_page(page, huge_page_order(h)); in gather_bootmem_prealloc()
2514 prep_new_huge_page(h, page, page_to_nid(page)); in gather_bootmem_prealloc()
2522 adjust_managed_page_count(page, pages_per_huge_page(h)); in gather_bootmem_prealloc()
2527 static void __init hugetlb_hstate_alloc_pages(struct hstate *h) in hugetlb_hstate_alloc_pages() argument
2532 if (!hstate_is_gigantic(h)) { in hugetlb_hstate_alloc_pages()
2550 for (i = 0; i < h->max_huge_pages; ++i) { in hugetlb_hstate_alloc_pages()
2551 if (hstate_is_gigantic(h)) { in hugetlb_hstate_alloc_pages()
2556 if (!alloc_bootmem_huge_page(h)) in hugetlb_hstate_alloc_pages()
2558 } else if (!alloc_pool_huge_page(h, in hugetlb_hstate_alloc_pages()
2564 if (i < h->max_huge_pages) { in hugetlb_hstate_alloc_pages()
2567 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); in hugetlb_hstate_alloc_pages()
2569 h->max_huge_pages, buf, i); in hugetlb_hstate_alloc_pages()
2570 h->max_huge_pages = i; in hugetlb_hstate_alloc_pages()
2578 struct hstate *h; in hugetlb_init_hstates() local
2580 for_each_hstate(h) { in hugetlb_init_hstates()
2581 if (minimum_order > huge_page_order(h)) in hugetlb_init_hstates()
2582 minimum_order = huge_page_order(h); in hugetlb_init_hstates()
2585 if (!hstate_is_gigantic(h)) in hugetlb_init_hstates()
2586 hugetlb_hstate_alloc_pages(h); in hugetlb_init_hstates()
2593 struct hstate *h; in report_hugepages() local
2595 for_each_hstate(h) { in report_hugepages()
2598 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); in report_hugepages()
2600 buf, h->free_huge_pages); in report_hugepages()
2605 static void try_to_free_low(struct hstate *h, unsigned long count, in try_to_free_low() argument
2610 if (hstate_is_gigantic(h)) in try_to_free_low()
2615 struct list_head *freel = &h->hugepage_freelists[i]; in try_to_free_low()
2617 if (count >= h->nr_huge_pages) in try_to_free_low()
2622 update_and_free_page(h, page); in try_to_free_low()
2623 h->free_huge_pages--; in try_to_free_low()
2624 h->free_huge_pages_node[page_to_nid(page)]--; in try_to_free_low()
2629 static inline void try_to_free_low(struct hstate *h, unsigned long count, in try_to_free_low() argument
2640 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed, in adjust_pool_surplus() argument
2648 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { in adjust_pool_surplus()
2649 if (h->surplus_huge_pages_node[node]) in adjust_pool_surplus()
2653 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { in adjust_pool_surplus()
2654 if (h->surplus_huge_pages_node[node] < in adjust_pool_surplus()
2655 h->nr_huge_pages_node[node]) in adjust_pool_surplus()
2662 h->surplus_huge_pages += delta; in adjust_pool_surplus()
2663 h->surplus_huge_pages_node[node] += delta; in adjust_pool_surplus()
2667 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages) argument
2668 static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid, in set_max_huge_pages() argument
2695 count += h->nr_huge_pages - h->nr_huge_pages_node[nid]; in set_max_huge_pages()
2713 if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) { in set_max_huge_pages()
2714 if (count > persistent_huge_pages(h)) { in set_max_huge_pages()
2733 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) { in set_max_huge_pages()
2734 if (!adjust_pool_surplus(h, nodes_allowed, -1)) in set_max_huge_pages()
2738 while (count > persistent_huge_pages(h)) { in set_max_huge_pages()
2749 ret = alloc_pool_huge_page(h, nodes_allowed, in set_max_huge_pages()
2775 min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages; in set_max_huge_pages()
2777 try_to_free_low(h, min_count, nodes_allowed); in set_max_huge_pages()
2778 while (min_count < persistent_huge_pages(h)) { in set_max_huge_pages()
2779 if (!free_pool_huge_page(h, nodes_allowed, 0)) in set_max_huge_pages()
2783 while (count < persistent_huge_pages(h)) { in set_max_huge_pages()
2784 if (!adjust_pool_surplus(h, nodes_allowed, 1)) in set_max_huge_pages()
2788 h->max_huge_pages = persistent_huge_pages(h); in set_max_huge_pages()
2825 struct hstate *h; in nr_hugepages_show_common() local
2829 h = kobj_to_hstate(kobj, &nid); in nr_hugepages_show_common()
2831 nr_huge_pages = h->nr_huge_pages; in nr_hugepages_show_common()
2833 nr_huge_pages = h->nr_huge_pages_node[nid]; in nr_hugepages_show_common()
2839 struct hstate *h, int nid, in __nr_hugepages_store_common() argument
2845 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) in __nr_hugepages_store_common()
2866 err = set_max_huge_pages(h, count, nid, n_mask); in __nr_hugepages_store_common()
2875 struct hstate *h; in nr_hugepages_store_common() local
2884 h = kobj_to_hstate(kobj, &nid); in nr_hugepages_store_common()
2885 return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len); in nr_hugepages_store_common()
2925 struct hstate *h = kobj_to_hstate(kobj, NULL); in nr_overcommit_hugepages_show() local
2926 return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages); in nr_overcommit_hugepages_show()
2934 struct hstate *h = kobj_to_hstate(kobj, NULL); in nr_overcommit_hugepages_store() local
2936 if (hstate_is_gigantic(h)) in nr_overcommit_hugepages_store()
2944 h->nr_overcommit_huge_pages = input; in nr_overcommit_hugepages_store()
2954 struct hstate *h; in free_hugepages_show() local
2958 h = kobj_to_hstate(kobj, &nid); in free_hugepages_show()
2960 free_huge_pages = h->free_huge_pages; in free_hugepages_show()
2962 free_huge_pages = h->free_huge_pages_node[nid]; in free_hugepages_show()
2971 struct hstate *h = kobj_to_hstate(kobj, NULL); in resv_hugepages_show() local
2972 return sprintf(buf, "%lu\n", h->resv_huge_pages); in resv_hugepages_show()
2979 struct hstate *h; in surplus_hugepages_show() local
2983 h = kobj_to_hstate(kobj, &nid); in surplus_hugepages_show()
2985 surplus_huge_pages = h->surplus_huge_pages; in surplus_hugepages_show()
2987 surplus_huge_pages = h->surplus_huge_pages_node[nid]; in surplus_hugepages_show()
3009 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent, in hugetlb_sysfs_add_hstate() argument
3014 int hi = hstate_index(h); in hugetlb_sysfs_add_hstate()
3016 hstate_kobjs[hi] = kobject_create_and_add(h->name, parent); in hugetlb_sysfs_add_hstate()
3031 struct hstate *h; in hugetlb_sysfs_init() local
3038 for_each_hstate(h) { in hugetlb_sysfs_init()
3039 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj, in hugetlb_sysfs_init()
3042 pr_err("HugeTLB: Unable to add hstate %s", h->name); in hugetlb_sysfs_init()
3104 struct hstate *h; in hugetlb_unregister_node() local
3110 for_each_hstate(h) { in hugetlb_unregister_node()
3111 int idx = hstate_index(h); in hugetlb_unregister_node()
3129 struct hstate *h; in hugetlb_register_node() local
3141 for_each_hstate(h) { in hugetlb_register_node()
3142 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj, in hugetlb_register_node()
3147 h->name, node->dev.id); in hugetlb_register_node()
3264 struct hstate *h; in hugetlb_add_hstate() local
3272 h = &hstates[hugetlb_max_hstate++]; in hugetlb_add_hstate()
3273 h->order = order; in hugetlb_add_hstate()
3274 h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1); in hugetlb_add_hstate()
3275 h->nr_huge_pages = 0; in hugetlb_add_hstate()
3276 h->free_huge_pages = 0; in hugetlb_add_hstate()
3278 INIT_LIST_HEAD(&h->hugepage_freelists[i]); in hugetlb_add_hstate()
3279 INIT_LIST_HEAD(&h->hugepage_activelist); in hugetlb_add_hstate()
3280 h->next_nid_to_alloc = first_memory_node; in hugetlb_add_hstate()
3281 h->next_nid_to_free = first_memory_node; in hugetlb_add_hstate()
3282 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", in hugetlb_add_hstate()
3283 huge_page_size(h)/1024); in hugetlb_add_hstate()
3285 parsed_hstate = h; in hugetlb_add_hstate()
3349 struct hstate *h; in hugepagesz_setup() local
3359 h = size_to_hstate(size); in hugepagesz_setup()
3360 if (h) { in hugepagesz_setup()
3368 if (!parsed_default_hugepagesz || h != &default_hstate || in hugepagesz_setup()
3379 parsed_hstate = h; in hugepagesz_setup()
3434 static unsigned int allowed_mems_nr(struct hstate *h) in allowed_mems_nr() argument
3439 unsigned int *array = h->free_huge_pages_node; in allowed_mems_nr()
3440 gfp_t gfp_mask = htlb_alloc_mask(h); in allowed_mems_nr()
3474 struct hstate *h = &default_hstate; in hugetlb_sysctl_handler_common() local
3475 unsigned long tmp = h->max_huge_pages; in hugetlb_sysctl_handler_common()
3487 ret = __nr_hugepages_store_common(obey_mempolicy, h, in hugetlb_sysctl_handler_common()
3513 struct hstate *h = &default_hstate; in hugetlb_overcommit_handler() local
3520 tmp = h->nr_overcommit_huge_pages; in hugetlb_overcommit_handler()
3522 if (write && hstate_is_gigantic(h)) in hugetlb_overcommit_handler()
3532 h->nr_overcommit_huge_pages = tmp; in hugetlb_overcommit_handler()
3543 struct hstate *h; in hugetlb_report_meminfo() local
3549 for_each_hstate(h) { in hugetlb_report_meminfo()
3550 unsigned long count = h->nr_huge_pages; in hugetlb_report_meminfo()
3552 total += (PAGE_SIZE << huge_page_order(h)) * count; in hugetlb_report_meminfo()
3554 if (h == &default_hstate) in hugetlb_report_meminfo()
3562 h->free_huge_pages, in hugetlb_report_meminfo()
3563 h->resv_huge_pages, in hugetlb_report_meminfo()
3564 h->surplus_huge_pages, in hugetlb_report_meminfo()
3565 (PAGE_SIZE << huge_page_order(h)) / 1024); in hugetlb_report_meminfo()
3573 struct hstate *h = &default_hstate; in hugetlb_report_node_meminfo() local
3582 nid, h->nr_huge_pages_node[nid], in hugetlb_report_node_meminfo()
3583 nid, h->free_huge_pages_node[nid], in hugetlb_report_node_meminfo()
3584 nid, h->surplus_huge_pages_node[nid]); in hugetlb_report_node_meminfo()
3589 struct hstate *h; in hugetlb_show_meminfo() local
3596 for_each_hstate(h) in hugetlb_show_meminfo()
3599 h->nr_huge_pages_node[nid], in hugetlb_show_meminfo()
3600 h->free_huge_pages_node[nid], in hugetlb_show_meminfo()
3601 h->surplus_huge_pages_node[nid], in hugetlb_show_meminfo()
3602 1UL << (huge_page_order(h) + PAGE_SHIFT - 10)); in hugetlb_show_meminfo()
3614 struct hstate *h; in hugetlb_total_pages() local
3617 for_each_hstate(h) in hugetlb_total_pages()
3618 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h); in hugetlb_total_pages()
3622 static int hugetlb_acct_memory(struct hstate *h, long delta) in hugetlb_acct_memory() argument
3651 if (gather_surplus_pages(h, delta) < 0) in hugetlb_acct_memory()
3654 if (delta > allowed_mems_nr(h)) { in hugetlb_acct_memory()
3655 return_unused_surplus_pages(h, delta); in hugetlb_acct_memory()
3662 return_unused_surplus_pages(h, (unsigned long) -delta); in hugetlb_acct_memory()
3689 struct hstate *h = hstate_vma(vma); in hugetlb_vm_op_close() local
3698 start = vma_hugecache_offset(h, vma, vma->vm_start); in hugetlb_vm_op_close()
3699 end = vma_hugecache_offset(h, vma, vma->vm_end); in hugetlb_vm_op_close()
3709 hugetlb_acct_memory(h, -gbl_reserve); in hugetlb_vm_op_close()
3818 struct hstate *h = hstate_vma(vma); in copy_hugetlb_page_range() local
3819 unsigned long sz = huge_page_size(h); in copy_hugetlb_page_range()
3865 dst_ptl = huge_pte_lock(h, dst, dst_pte); in copy_hugetlb_page_range()
3866 src_ptl = huge_pte_lockptr(h, src, src_pte); in copy_hugetlb_page_range()
3908 hugetlb_count_add(pages_per_huge_page(h), dst); in copy_hugetlb_page_range()
3932 struct hstate *h = hstate_vma(vma); in __unmap_hugepage_range() local
3933 unsigned long sz = huge_page_size(h); in __unmap_hugepage_range()
3938 BUG_ON(start & ~huge_page_mask(h)); in __unmap_hugepage_range()
3939 BUG_ON(end & ~huge_page_mask(h)); in __unmap_hugepage_range()
3961 ptl = huge_pte_lock(h, mm, ptep); in __unmap_hugepage_range()
4005 tlb_remove_huge_tlb_entry(h, tlb, ptep, address); in __unmap_hugepage_range()
4009 hugetlb_count_sub(pages_per_huge_page(h), mm); in __unmap_hugepage_range()
4013 tlb_remove_page_size(tlb, page, huge_page_size(h)); in __unmap_hugepage_range()
4092 struct hstate *h = hstate_vma(vma); in unmap_ref_private() local
4101 address = address & huge_page_mask(h); in unmap_ref_private()
4134 address + huge_page_size(h), page); in unmap_ref_private()
4150 struct hstate *h = hstate_vma(vma); in hugetlb_cow() local
4154 unsigned long haddr = address & huge_page_mask(h); in hugetlb_cow()
4215 idx = vma_hugecache_offset(h, vma, haddr); in hugetlb_cow()
4225 ptep = huge_pte_offset(mm, haddr, huge_page_size(h)); in hugetlb_cow()
4250 pages_per_huge_page(h)); in hugetlb_cow()
4254 haddr + huge_page_size(h)); in hugetlb_cow()
4262 ptep = huge_pte_offset(mm, haddr, huge_page_size(h)); in hugetlb_cow()
4280 restore_reserve_on_error(h, vma, haddr, new_page); in hugetlb_cow()
4290 static struct page *hugetlbfs_pagecache_page(struct hstate *h, in hugetlbfs_pagecache_page() argument
4297 idx = vma_hugecache_offset(h, vma, address); in hugetlbfs_pagecache_page()
4306 static bool hugetlbfs_pagecache_present(struct hstate *h, in hugetlbfs_pagecache_present() argument
4314 idx = vma_hugecache_offset(h, vma, address); in hugetlbfs_pagecache_present()
4326 struct hstate *h = hstate_inode(inode); in huge_add_to_page_cache() local
4340 inode->i_blocks += blocks_per_huge_page(h); in huge_add_to_page_cache()
4350 struct hstate *h = hstate_vma(vma); in hugetlb_no_page() local
4357 unsigned long haddr = address & huge_page_mask(h); in hugetlb_no_page()
4377 size = i_size_read(mapping->host) >> huge_page_shift(h); in hugetlb_no_page()
4426 ptl = huge_pte_lock(h, mm, ptep); in hugetlb_no_page()
4436 clear_huge_page(page, address, pages_per_huge_page(h)); in hugetlb_no_page()
4464 VM_FAULT_SET_HINDEX(hstate_index(h)); in hugetlb_no_page()
4476 if (vma_needs_reservation(h, vma, haddr) < 0) { in hugetlb_no_page()
4481 vma_end_reservation(h, vma, haddr); in hugetlb_no_page()
4484 ptl = huge_pte_lock(h, mm, ptep); in hugetlb_no_page()
4498 hugetlb_count_add(pages_per_huge_page(h), mm); in hugetlb_no_page()
4524 restore_reserve_on_error(h, vma, haddr, page); in hugetlb_no_page()
4563 struct hstate *h = hstate_vma(vma); in hugetlb_fault() local
4566 unsigned long haddr = address & huge_page_mask(h); in hugetlb_fault()
4568 ptep = huge_pte_offset(mm, haddr, huge_page_size(h)); in hugetlb_fault()
4581 VM_FAULT_SET_HINDEX(hstate_index(h)); in hugetlb_fault()
4597 ptep = huge_pte_alloc(mm, haddr, huge_page_size(h)); in hugetlb_fault()
4608 idx = vma_hugecache_offset(h, vma, haddr); in hugetlb_fault()
4641 if (vma_needs_reservation(h, vma, haddr) < 0) { in hugetlb_fault()
4646 vma_end_reservation(h, vma, haddr); in hugetlb_fault()
4649 pagecache_page = hugetlbfs_pagecache_page(h, in hugetlb_fault()
4653 ptl = huge_pte_lock(h, mm, ptep); in hugetlb_fault()
4726 struct hstate *h = hstate_vma(dst_vma); in hugetlb_mcopy_atomic_pte() local
4737 hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) { in hugetlb_mcopy_atomic_pte()
4750 pages_per_huge_page(h), false); in hugetlb_mcopy_atomic_pte()
4772 idx = vma_hugecache_offset(h, dst_vma, dst_addr); in hugetlb_mcopy_atomic_pte()
4778 size = i_size_read(mapping->host) >> huge_page_shift(h); in hugetlb_mcopy_atomic_pte()
4794 ptl = huge_pte_lockptr(h, dst_mm, dst_pte); in hugetlb_mcopy_atomic_pte()
4806 size = i_size_read(mapping->host) >> huge_page_shift(h); in hugetlb_mcopy_atomic_pte()
4831 hugetlb_count_add(pages_per_huge_page(h), dst_mm); in hugetlb_mcopy_atomic_pte()
4860 struct hstate *h = hstate_vma(vma); in follow_hugetlb_page() local
4885 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h), in follow_hugetlb_page()
4886 huge_page_size(h)); in follow_hugetlb_page()
4888 ptl = huge_pte_lock(h, mm, pte); in follow_hugetlb_page()
4899 !hugetlbfs_pagecache_present(h, vma, vaddr)) { in follow_hugetlb_page()
4964 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT; in follow_hugetlb_page()
4972 (vaddr + huge_page_size(h) < vma->vm_end) && in follow_hugetlb_page()
4973 (remainder >= pages_per_huge_page(h))) { in follow_hugetlb_page()
4974 vaddr += huge_page_size(h); in follow_hugetlb_page()
4975 remainder -= pages_per_huge_page(h); in follow_hugetlb_page()
4976 i += pages_per_huge_page(h); in follow_hugetlb_page()
5010 pfn_offset < pages_per_huge_page(h)) { in follow_hugetlb_page()
5045 struct hstate *h = hstate_vma(vma); in hugetlb_change_protection() local
5064 for (; address < end; address += huge_page_size(h)) { in hugetlb_change_protection()
5066 ptep = huge_pte_offset(mm, address, huge_page_size(h)); in hugetlb_change_protection()
5069 ptl = huge_pte_lock(h, mm, ptep); in hugetlb_change_protection()
5090 newpte, huge_page_size(h)); in hugetlb_change_protection()
5127 return pages << h->order; in hugetlb_change_protection()
5136 struct hstate *h = hstate_inode(inode); in hugetlb_reserve_pages() local
5190 hstate_index(h), chg * pages_per_huge_page(h), &h_cg); in hugetlb_reserve_pages()
5201 resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, h_cg, h); in hugetlb_reserve_pages()
5219 ret = hugetlb_acct_memory(h, gbl_reserve); in hugetlb_reserve_pages()
5236 add = region_add(resv_map, from, to, regions_needed, h, h_cg); in hugetlb_reserve_pages()
5239 hugetlb_acct_memory(h, -gbl_reserve); in hugetlb_reserve_pages()
5257 hstate_index(h), in hugetlb_reserve_pages()
5258 (chg - add) * pages_per_huge_page(h), h_cg); in hugetlb_reserve_pages()
5262 hugetlb_acct_memory(h, -rsv_adjust); in hugetlb_reserve_pages()
5278 hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h), in hugetlb_reserve_pages()
5279 chg * pages_per_huge_page(h), h_cg); in hugetlb_reserve_pages()
5295 struct hstate *h = hstate_inode(inode); in hugetlb_unreserve_pages() local
5317 inode->i_blocks -= (blocks_per_huge_page(h) * freed); in hugetlb_unreserve_pages()
5325 hugetlb_acct_memory(h, -gbl_reserve); in hugetlb_unreserve_pages()
5609 struct hstate *h = hstate_vma(vma); in follow_huge_pmd_pte() local
5621 ptep = huge_pte_offset(mm, address, huge_page_size(h)); in follow_huge_pmd_pte()
5625 ptl = huge_pte_lock(h, mm, ptep); in follow_huge_pmd_pte()
5629 ((address & ~huge_page_mask(h)) >> PAGE_SHIFT); in follow_huge_pmd_pte()
5706 struct hstate *h = page_hstate(oldpage); in move_hugetlb_state() local
5729 if (h->surplus_huge_pages_node[old_nid]) { in move_hugetlb_state()
5730 h->surplus_huge_pages_node[old_nid]--; in move_hugetlb_state()
5731 h->surplus_huge_pages_node[new_nid]++; in move_hugetlb_state()