• Home
  • Raw
  • Download

Lines Matching refs:h

75 static int hugetlb_acct_memory(struct hstate *h, long delta);
94 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, in hugepage_new_subpool() argument
106 spool->hstate = h; in hugepage_new_subpool()
109 if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) { in hugepage_new_subpool()
582 struct hstate *h = hstate_inode(inode); in hugetlb_fix_reserve_counts() local
584 hugetlb_acct_memory(h, 1); in hugetlb_fix_reserve_counts()
623 static pgoff_t vma_hugecache_offset(struct hstate *h, in vma_hugecache_offset() argument
626 return ((address - vma->vm_start) >> huge_page_shift(h)) + in vma_hugecache_offset()
627 (vma->vm_pgoff >> huge_page_order(h)); in vma_hugecache_offset()
866 static void enqueue_huge_page(struct hstate *h, struct page *page) in enqueue_huge_page() argument
869 list_move(&page->lru, &h->hugepage_freelists[nid]); in enqueue_huge_page()
870 h->free_huge_pages++; in enqueue_huge_page()
871 h->free_huge_pages_node[nid]++; in enqueue_huge_page()
874 static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid) in dequeue_huge_page_node_exact() argument
878 list_for_each_entry(page, &h->hugepage_freelists[nid], lru) in dequeue_huge_page_node_exact()
885 if (&h->hugepage_freelists[nid] == &page->lru) in dequeue_huge_page_node_exact()
887 list_move(&page->lru, &h->hugepage_activelist); in dequeue_huge_page_node_exact()
889 h->free_huge_pages--; in dequeue_huge_page_node_exact()
890 h->free_huge_pages_node[nid]--; in dequeue_huge_page_node_exact()
894 static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid, in dequeue_huge_page_nodemask() argument
920 page = dequeue_huge_page_node_exact(h, node); in dequeue_huge_page_nodemask()
931 static inline gfp_t htlb_alloc_mask(struct hstate *h) in htlb_alloc_mask() argument
933 if (hugepage_movable_supported(h)) in htlb_alloc_mask()
939 static struct page *dequeue_huge_page_vma(struct hstate *h, in dequeue_huge_page_vma() argument
956 h->free_huge_pages - h->resv_huge_pages == 0) in dequeue_huge_page_vma()
960 if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0) in dequeue_huge_page_vma()
963 gfp_mask = htlb_alloc_mask(h); in dequeue_huge_page_vma()
965 page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask); in dequeue_huge_page_vma()
968 h->resv_huge_pages--; in dequeue_huge_page_vma()
1006 static int hstate_next_node_to_alloc(struct hstate *h, in hstate_next_node_to_alloc() argument
1013 nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed); in hstate_next_node_to_alloc()
1014 h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed); in hstate_next_node_to_alloc()
1025 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed) in hstate_next_node_to_free() argument
1031 nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed); in hstate_next_node_to_free()
1032 h->next_nid_to_free = next_node_allowed(nid, nodes_allowed); in hstate_next_node_to_free()
1115 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, in alloc_gigantic_page() argument
1118 unsigned int order = huge_page_order(h); in alloc_gigantic_page()
1154 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
1157 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, in alloc_gigantic_page() argument
1165 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, in alloc_gigantic_page() argument
1175 static void update_and_free_page(struct hstate *h, struct page *page) in update_and_free_page() argument
1179 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) in update_and_free_page()
1182 h->nr_huge_pages--; in update_and_free_page()
1183 h->nr_huge_pages_node[page_to_nid(page)]--; in update_and_free_page()
1184 for (i = 0; i < pages_per_huge_page(h); i++) { in update_and_free_page()
1193 if (hstate_is_gigantic(h)) { in update_and_free_page()
1194 destroy_compound_gigantic_page(page, huge_page_order(h)); in update_and_free_page()
1195 free_gigantic_page(page, huge_page_order(h)); in update_and_free_page()
1197 __free_pages(page, huge_page_order(h)); in update_and_free_page()
1203 struct hstate *h; in size_to_hstate() local
1205 for_each_hstate(h) { in size_to_hstate()
1206 if (huge_page_size(h) == size) in size_to_hstate()
1207 return h; in size_to_hstate()
1265 struct hstate *h = page_hstate(page); in __free_huge_page() local
1300 hugetlb_cgroup_uncharge_page(hstate_index(h), in __free_huge_page()
1301 pages_per_huge_page(h), page); in __free_huge_page()
1303 h->resv_huge_pages++; in __free_huge_page()
1308 update_and_free_page(h, page); in __free_huge_page()
1309 } else if (h->surplus_huge_pages_node[nid]) { in __free_huge_page()
1312 update_and_free_page(h, page); in __free_huge_page()
1313 h->surplus_huge_pages--; in __free_huge_page()
1314 h->surplus_huge_pages_node[nid]--; in __free_huge_page()
1317 enqueue_huge_page(h, page); in __free_huge_page()
1370 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid) in prep_new_huge_page() argument
1376 h->nr_huge_pages++; in prep_new_huge_page()
1377 h->nr_huge_pages_node[nid]++; in prep_new_huge_page()
1455 static struct page *alloc_buddy_huge_page(struct hstate *h, in alloc_buddy_huge_page() argument
1459 int order = huge_page_order(h); in alloc_buddy_huge_page()
1506 static struct page *alloc_fresh_huge_page(struct hstate *h, in alloc_fresh_huge_page() argument
1512 if (hstate_is_gigantic(h)) in alloc_fresh_huge_page()
1513 page = alloc_gigantic_page(h, gfp_mask, nid, nmask); in alloc_fresh_huge_page()
1515 page = alloc_buddy_huge_page(h, gfp_mask, in alloc_fresh_huge_page()
1520 if (hstate_is_gigantic(h)) in alloc_fresh_huge_page()
1521 prep_compound_gigantic_page(page, huge_page_order(h)); in alloc_fresh_huge_page()
1522 prep_new_huge_page(h, page, page_to_nid(page)); in alloc_fresh_huge_page()
1531 static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed, in alloc_pool_huge_page() argument
1536 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; in alloc_pool_huge_page()
1538 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { in alloc_pool_huge_page()
1539 page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed, in alloc_pool_huge_page()
1559 static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed, in free_pool_huge_page() argument
1565 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { in free_pool_huge_page()
1570 if ((!acct_surplus || h->surplus_huge_pages_node[node]) && in free_pool_huge_page()
1571 !list_empty(&h->hugepage_freelists[node])) { in free_pool_huge_page()
1573 list_entry(h->hugepage_freelists[node].next, in free_pool_huge_page()
1576 h->free_huge_pages--; in free_pool_huge_page()
1577 h->free_huge_pages_node[node]--; in free_pool_huge_page()
1579 h->surplus_huge_pages--; in free_pool_huge_page()
1580 h->surplus_huge_pages_node[node]--; in free_pool_huge_page()
1582 update_and_free_page(h, page); in free_pool_huge_page()
1617 struct hstate *h = page_hstate(head); in dissolve_free_huge_page() local
1619 if (h->free_huge_pages - h->resv_huge_pages == 0) in dissolve_free_huge_page()
1630 h->free_huge_pages--; in dissolve_free_huge_page()
1631 h->free_huge_pages_node[nid]--; in dissolve_free_huge_page()
1632 h->max_huge_pages--; in dissolve_free_huge_page()
1633 update_and_free_page(h, head); in dissolve_free_huge_page()
1671 static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask, in alloc_surplus_huge_page() argument
1676 if (hstate_is_gigantic(h)) in alloc_surplus_huge_page()
1680 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) in alloc_surplus_huge_page()
1684 page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL); in alloc_surplus_huge_page()
1696 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) { in alloc_surplus_huge_page()
1702 h->surplus_huge_pages++; in alloc_surplus_huge_page()
1703 h->surplus_huge_pages_node[page_to_nid(page)]++; in alloc_surplus_huge_page()
1712 struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask, in alloc_migrate_huge_page() argument
1717 if (hstate_is_gigantic(h)) in alloc_migrate_huge_page()
1720 page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL); in alloc_migrate_huge_page()
1737 struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h, in alloc_buddy_huge_page_with_mpol() argument
1742 gfp_t gfp_mask = htlb_alloc_mask(h); in alloc_buddy_huge_page_with_mpol()
1747 page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask); in alloc_buddy_huge_page_with_mpol()
1754 struct page *alloc_huge_page_node(struct hstate *h, int nid) in alloc_huge_page_node() argument
1756 gfp_t gfp_mask = htlb_alloc_mask(h); in alloc_huge_page_node()
1763 if (h->free_huge_pages - h->resv_huge_pages > 0) in alloc_huge_page_node()
1764 page = dequeue_huge_page_nodemask(h, gfp_mask, nid, NULL); in alloc_huge_page_node()
1768 page = alloc_migrate_huge_page(h, gfp_mask, nid, NULL); in alloc_huge_page_node()
1774 struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, in alloc_huge_page_nodemask() argument
1777 gfp_t gfp_mask = htlb_alloc_mask(h); in alloc_huge_page_nodemask()
1780 if (h->free_huge_pages - h->resv_huge_pages > 0) { in alloc_huge_page_nodemask()
1783 page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask); in alloc_huge_page_nodemask()
1791 return alloc_migrate_huge_page(h, gfp_mask, preferred_nid, nmask); in alloc_huge_page_nodemask()
1795 struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, in alloc_huge_page_vma() argument
1804 gfp_mask = htlb_alloc_mask(h); in alloc_huge_page_vma()
1806 page = alloc_huge_page_nodemask(h, node, nodemask); in alloc_huge_page_vma()
1816 static int gather_surplus_pages(struct hstate *h, int delta) in gather_surplus_pages() argument
1824 needed = (h->resv_huge_pages + delta) - h->free_huge_pages; in gather_surplus_pages()
1826 h->resv_huge_pages += delta; in gather_surplus_pages()
1837 page = alloc_surplus_huge_page(h, htlb_alloc_mask(h), in gather_surplus_pages()
1853 needed = (h->resv_huge_pages + delta) - in gather_surplus_pages()
1854 (h->free_huge_pages + allocated); in gather_surplus_pages()
1874 h->resv_huge_pages += delta; in gather_surplus_pages()
1887 enqueue_huge_page(h, page); in gather_surplus_pages()
1914 static void return_unused_surplus_pages(struct hstate *h, in return_unused_surplus_pages() argument
1920 if (hstate_is_gigantic(h)) in return_unused_surplus_pages()
1927 nr_pages = min(unused_resv_pages, h->surplus_huge_pages); in return_unused_surplus_pages()
1942 h->resv_huge_pages--; in return_unused_surplus_pages()
1944 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1)) in return_unused_surplus_pages()
1951 h->resv_huge_pages -= unused_resv_pages; in return_unused_surplus_pages()
1985 static long __vma_reservation_common(struct hstate *h, in __vma_reservation_common() argument
1997 idx = vma_hugecache_offset(h, vma, addr); in __vma_reservation_common()
2046 static long vma_needs_reservation(struct hstate *h, in vma_needs_reservation() argument
2049 return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV); in vma_needs_reservation()
2052 static long vma_commit_reservation(struct hstate *h, in vma_commit_reservation() argument
2055 return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV); in vma_commit_reservation()
2058 static void vma_end_reservation(struct hstate *h, in vma_end_reservation() argument
2061 (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV); in vma_end_reservation()
2064 static long vma_add_reservation(struct hstate *h, in vma_add_reservation() argument
2067 return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV); in vma_add_reservation()
2081 static void restore_reserve_on_error(struct hstate *h, in restore_reserve_on_error() argument
2086 long rc = vma_needs_reservation(h, vma, address); in restore_reserve_on_error()
2102 rc = vma_add_reservation(h, vma, address); in restore_reserve_on_error()
2110 vma_end_reservation(h, vma, address); in restore_reserve_on_error()
2118 struct hstate *h = hstate_vma(vma); in alloc_huge_page() local
2125 idx = hstate_index(h); in alloc_huge_page()
2131 map_chg = gbl_chg = vma_needs_reservation(h, vma, addr); in alloc_huge_page()
2145 vma_end_reservation(h, vma, addr); in alloc_huge_page()
2161 ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg); in alloc_huge_page()
2171 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg); in alloc_huge_page()
2174 page = alloc_buddy_huge_page_with_mpol(h, vma, addr); in alloc_huge_page()
2179 h->resv_huge_pages--; in alloc_huge_page()
2182 list_move(&page->lru, &h->hugepage_activelist); in alloc_huge_page()
2185 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page); in alloc_huge_page()
2190 map_commit = vma_commit_reservation(h, vma, addr); in alloc_huge_page()
2204 hugetlb_acct_memory(h, -rsv_adjust); in alloc_huge_page()
2209 hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg); in alloc_huge_page()
2213 vma_end_reservation(h, vma, addr); in alloc_huge_page()
2217 int alloc_bootmem_huge_page(struct hstate *h)
2219 int __alloc_bootmem_huge_page(struct hstate *h) in __alloc_bootmem_huge_page() argument
2224 for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) { in __alloc_bootmem_huge_page()
2228 huge_page_size(h), huge_page_size(h), in __alloc_bootmem_huge_page()
2243 BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h))); in __alloc_bootmem_huge_page()
2247 m->hstate = h; in __alloc_bootmem_huge_page()
2267 struct hstate *h = m->hstate; in gather_bootmem_prealloc() local
2270 prep_compound_huge_page(page, h->order); in gather_bootmem_prealloc()
2272 prep_new_huge_page(h, page, page_to_nid(page)); in gather_bootmem_prealloc()
2281 if (hstate_is_gigantic(h)) in gather_bootmem_prealloc()
2282 adjust_managed_page_count(page, 1 << h->order); in gather_bootmem_prealloc()
2287 static void __init hugetlb_hstate_alloc_pages(struct hstate *h) in hugetlb_hstate_alloc_pages() argument
2292 if (!hstate_is_gigantic(h)) { in hugetlb_hstate_alloc_pages()
2310 for (i = 0; i < h->max_huge_pages; ++i) { in hugetlb_hstate_alloc_pages()
2311 if (hstate_is_gigantic(h)) { in hugetlb_hstate_alloc_pages()
2312 if (!alloc_bootmem_huge_page(h)) in hugetlb_hstate_alloc_pages()
2314 } else if (!alloc_pool_huge_page(h, in hugetlb_hstate_alloc_pages()
2320 if (i < h->max_huge_pages) { in hugetlb_hstate_alloc_pages()
2323 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); in hugetlb_hstate_alloc_pages()
2325 h->max_huge_pages, buf, i); in hugetlb_hstate_alloc_pages()
2326 h->max_huge_pages = i; in hugetlb_hstate_alloc_pages()
2334 struct hstate *h; in hugetlb_init_hstates() local
2336 for_each_hstate(h) { in hugetlb_init_hstates()
2337 if (minimum_order > huge_page_order(h)) in hugetlb_init_hstates()
2338 minimum_order = huge_page_order(h); in hugetlb_init_hstates()
2341 if (!hstate_is_gigantic(h)) in hugetlb_init_hstates()
2342 hugetlb_hstate_alloc_pages(h); in hugetlb_init_hstates()
2349 struct hstate *h; in report_hugepages() local
2351 for_each_hstate(h) { in report_hugepages()
2354 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); in report_hugepages()
2356 buf, h->free_huge_pages); in report_hugepages()
2361 static void try_to_free_low(struct hstate *h, unsigned long count, in try_to_free_low() argument
2366 if (hstate_is_gigantic(h)) in try_to_free_low()
2371 struct list_head *freel = &h->hugepage_freelists[i]; in try_to_free_low()
2373 if (count >= h->nr_huge_pages) in try_to_free_low()
2378 update_and_free_page(h, page); in try_to_free_low()
2379 h->free_huge_pages--; in try_to_free_low()
2380 h->free_huge_pages_node[page_to_nid(page)]--; in try_to_free_low()
2385 static inline void try_to_free_low(struct hstate *h, unsigned long count, in try_to_free_low() argument
2396 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed, in adjust_pool_surplus() argument
2404 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { in adjust_pool_surplus()
2405 if (h->surplus_huge_pages_node[node]) in adjust_pool_surplus()
2409 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { in adjust_pool_surplus()
2410 if (h->surplus_huge_pages_node[node] < in adjust_pool_surplus()
2411 h->nr_huge_pages_node[node]) in adjust_pool_surplus()
2418 h->surplus_huge_pages += delta; in adjust_pool_surplus()
2419 h->surplus_huge_pages_node[node] += delta; in adjust_pool_surplus()
2423 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages) argument
2424 static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid, in set_max_huge_pages() argument
2451 count += h->nr_huge_pages - h->nr_huge_pages_node[nid]; in set_max_huge_pages()
2469 if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) { in set_max_huge_pages()
2470 if (count > persistent_huge_pages(h)) { in set_max_huge_pages()
2489 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) { in set_max_huge_pages()
2490 if (!adjust_pool_surplus(h, nodes_allowed, -1)) in set_max_huge_pages()
2494 while (count > persistent_huge_pages(h)) { in set_max_huge_pages()
2505 ret = alloc_pool_huge_page(h, nodes_allowed, in set_max_huge_pages()
2531 min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages; in set_max_huge_pages()
2533 try_to_free_low(h, min_count, nodes_allowed); in set_max_huge_pages()
2534 while (min_count < persistent_huge_pages(h)) { in set_max_huge_pages()
2535 if (!free_pool_huge_page(h, nodes_allowed, 0)) in set_max_huge_pages()
2539 while (count < persistent_huge_pages(h)) { in set_max_huge_pages()
2540 if (!adjust_pool_surplus(h, nodes_allowed, 1)) in set_max_huge_pages()
2544 h->max_huge_pages = persistent_huge_pages(h); in set_max_huge_pages()
2581 struct hstate *h; in nr_hugepages_show_common() local
2585 h = kobj_to_hstate(kobj, &nid); in nr_hugepages_show_common()
2587 nr_huge_pages = h->nr_huge_pages; in nr_hugepages_show_common()
2589 nr_huge_pages = h->nr_huge_pages_node[nid]; in nr_hugepages_show_common()
2595 struct hstate *h, int nid, in __nr_hugepages_store_common() argument
2601 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) in __nr_hugepages_store_common()
2622 err = set_max_huge_pages(h, count, nid, n_mask); in __nr_hugepages_store_common()
2631 struct hstate *h; in nr_hugepages_store_common() local
2640 h = kobj_to_hstate(kobj, &nid); in nr_hugepages_store_common()
2641 return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len); in nr_hugepages_store_common()
2681 struct hstate *h = kobj_to_hstate(kobj, NULL); in nr_overcommit_hugepages_show() local
2682 return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages); in nr_overcommit_hugepages_show()
2690 struct hstate *h = kobj_to_hstate(kobj, NULL); in nr_overcommit_hugepages_store() local
2692 if (hstate_is_gigantic(h)) in nr_overcommit_hugepages_store()
2700 h->nr_overcommit_huge_pages = input; in nr_overcommit_hugepages_store()
2710 struct hstate *h; in free_hugepages_show() local
2714 h = kobj_to_hstate(kobj, &nid); in free_hugepages_show()
2716 free_huge_pages = h->free_huge_pages; in free_hugepages_show()
2718 free_huge_pages = h->free_huge_pages_node[nid]; in free_hugepages_show()
2727 struct hstate *h = kobj_to_hstate(kobj, NULL); in resv_hugepages_show() local
2728 return sprintf(buf, "%lu\n", h->resv_huge_pages); in resv_hugepages_show()
2735 struct hstate *h; in surplus_hugepages_show() local
2739 h = kobj_to_hstate(kobj, &nid); in surplus_hugepages_show()
2741 surplus_huge_pages = h->surplus_huge_pages; in surplus_hugepages_show()
2743 surplus_huge_pages = h->surplus_huge_pages_node[nid]; in surplus_hugepages_show()
2765 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent, in hugetlb_sysfs_add_hstate() argument
2770 int hi = hstate_index(h); in hugetlb_sysfs_add_hstate()
2772 hstate_kobjs[hi] = kobject_create_and_add(h->name, parent); in hugetlb_sysfs_add_hstate()
2785 struct hstate *h; in hugetlb_sysfs_init() local
2792 for_each_hstate(h) { in hugetlb_sysfs_init()
2793 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj, in hugetlb_sysfs_init()
2796 pr_err("Hugetlb: Unable to add hstate %s", h->name); in hugetlb_sysfs_init()
2858 struct hstate *h; in hugetlb_unregister_node() local
2864 for_each_hstate(h) { in hugetlb_unregister_node()
2865 int idx = hstate_index(h); in hugetlb_unregister_node()
2883 struct hstate *h; in hugetlb_register_node() local
2895 for_each_hstate(h) { in hugetlb_register_node()
2896 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj, in hugetlb_register_node()
2901 h->name, node->dev.id); in hugetlb_register_node()
2999 struct hstate *h; in hugetlb_add_hstate() local
3008 h = &hstates[hugetlb_max_hstate++]; in hugetlb_add_hstate()
3009 h->order = order; in hugetlb_add_hstate()
3010 h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1); in hugetlb_add_hstate()
3011 h->nr_huge_pages = 0; in hugetlb_add_hstate()
3012 h->free_huge_pages = 0; in hugetlb_add_hstate()
3014 INIT_LIST_HEAD(&h->hugepage_freelists[i]); in hugetlb_add_hstate()
3015 INIT_LIST_HEAD(&h->hugepage_activelist); in hugetlb_add_hstate()
3016 h->next_nid_to_alloc = first_memory_node; in hugetlb_add_hstate()
3017 h->next_nid_to_free = first_memory_node; in hugetlb_add_hstate()
3018 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", in hugetlb_add_hstate()
3019 huge_page_size(h)/1024); in hugetlb_add_hstate()
3021 parsed_hstate = h; in hugetlb_add_hstate()
3089 struct hstate *h = &default_hstate; in hugetlb_sysctl_handler_common() local
3090 unsigned long tmp = h->max_huge_pages; in hugetlb_sysctl_handler_common()
3103 ret = __nr_hugepages_store_common(obey_mempolicy, h, in hugetlb_sysctl_handler_common()
3130 struct hstate *h = &default_hstate; in hugetlb_overcommit_handler() local
3137 tmp = h->nr_overcommit_huge_pages; in hugetlb_overcommit_handler()
3139 if (write && hstate_is_gigantic(h)) in hugetlb_overcommit_handler()
3150 h->nr_overcommit_huge_pages = tmp; in hugetlb_overcommit_handler()
3161 struct hstate *h; in hugetlb_report_meminfo() local
3167 for_each_hstate(h) { in hugetlb_report_meminfo()
3168 unsigned long count = h->nr_huge_pages; in hugetlb_report_meminfo()
3170 total += (PAGE_SIZE << huge_page_order(h)) * count; in hugetlb_report_meminfo()
3172 if (h == &default_hstate) in hugetlb_report_meminfo()
3180 h->free_huge_pages, in hugetlb_report_meminfo()
3181 h->resv_huge_pages, in hugetlb_report_meminfo()
3182 h->surplus_huge_pages, in hugetlb_report_meminfo()
3183 (PAGE_SIZE << huge_page_order(h)) / 1024); in hugetlb_report_meminfo()
3191 struct hstate *h = &default_hstate; in hugetlb_report_node_meminfo() local
3198 nid, h->nr_huge_pages_node[nid], in hugetlb_report_node_meminfo()
3199 nid, h->free_huge_pages_node[nid], in hugetlb_report_node_meminfo()
3200 nid, h->surplus_huge_pages_node[nid]); in hugetlb_report_node_meminfo()
3205 struct hstate *h; in hugetlb_show_meminfo() local
3212 for_each_hstate(h) in hugetlb_show_meminfo()
3215 h->nr_huge_pages_node[nid], in hugetlb_show_meminfo()
3216 h->free_huge_pages_node[nid], in hugetlb_show_meminfo()
3217 h->surplus_huge_pages_node[nid], in hugetlb_show_meminfo()
3218 1UL << (huge_page_order(h) + PAGE_SHIFT - 10)); in hugetlb_show_meminfo()
3230 struct hstate *h; in hugetlb_total_pages() local
3233 for_each_hstate(h) in hugetlb_total_pages()
3234 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h); in hugetlb_total_pages()
3238 static int hugetlb_acct_memory(struct hstate *h, long delta) in hugetlb_acct_memory() argument
3261 if (gather_surplus_pages(h, delta) < 0) in hugetlb_acct_memory()
3264 if (delta > cpuset_mems_nr(h->free_huge_pages_node)) { in hugetlb_acct_memory()
3265 return_unused_surplus_pages(h, delta); in hugetlb_acct_memory()
3272 return_unused_surplus_pages(h, (unsigned long) -delta); in hugetlb_acct_memory()
3297 struct hstate *h = hstate_vma(vma); in hugetlb_vm_op_close() local
3306 start = vma_hugecache_offset(h, vma, vma->vm_start); in hugetlb_vm_op_close()
3307 end = vma_hugecache_offset(h, vma, vma->vm_end); in hugetlb_vm_op_close()
3319 hugetlb_acct_memory(h, -gbl_reserve); in hugetlb_vm_op_close()
3426 struct hstate *h = hstate_vma(vma); in copy_hugetlb_page_range() local
3427 unsigned long sz = huge_page_size(h); in copy_hugetlb_page_range()
3464 dst_ptl = huge_pte_lock(h, dst, dst_pte); in copy_hugetlb_page_range()
3465 src_ptl = huge_pte_lockptr(h, src, src_pte); in copy_hugetlb_page_range()
3507 hugetlb_count_add(pages_per_huge_page(h), dst); in copy_hugetlb_page_range()
3529 struct hstate *h = hstate_vma(vma); in __unmap_hugepage_range() local
3530 unsigned long sz = huge_page_size(h); in __unmap_hugepage_range()
3534 BUG_ON(start & ~huge_page_mask(h)); in __unmap_hugepage_range()
3535 BUG_ON(end & ~huge_page_mask(h)); in __unmap_hugepage_range()
3557 ptl = huge_pte_lock(h, mm, ptep); in __unmap_hugepage_range()
3603 tlb_remove_huge_tlb_entry(h, tlb, ptep, address); in __unmap_hugepage_range()
3607 hugetlb_count_sub(pages_per_huge_page(h), mm); in __unmap_hugepage_range()
3611 tlb_remove_page_size(tlb, page, huge_page_size(h)); in __unmap_hugepage_range()
3674 struct hstate *h = hstate_vma(vma); in unmap_ref_private() local
3683 address = address & huge_page_mask(h); in unmap_ref_private()
3716 address + huge_page_size(h), page); in unmap_ref_private()
3732 struct hstate *h = hstate_vma(vma); in hugetlb_cow() local
3736 unsigned long haddr = address & huge_page_mask(h); in hugetlb_cow()
3787 ptep = huge_pte_offset(mm, haddr, huge_page_size(h)); in hugetlb_cow()
3812 pages_per_huge_page(h)); in hugetlb_cow()
3816 haddr + huge_page_size(h)); in hugetlb_cow()
3824 ptep = huge_pte_offset(mm, haddr, huge_page_size(h)); in hugetlb_cow()
3842 restore_reserve_on_error(h, vma, haddr, new_page); in hugetlb_cow()
3852 static struct page *hugetlbfs_pagecache_page(struct hstate *h, in hugetlbfs_pagecache_page() argument
3859 idx = vma_hugecache_offset(h, vma, address); in hugetlbfs_pagecache_page()
3868 static bool hugetlbfs_pagecache_present(struct hstate *h, in hugetlbfs_pagecache_present() argument
3876 idx = vma_hugecache_offset(h, vma, address); in hugetlbfs_pagecache_present()
3888 struct hstate *h = hstate_inode(inode); in huge_add_to_page_cache() local
3902 inode->i_blocks += blocks_per_huge_page(h); in huge_add_to_page_cache()
3912 struct hstate *h = hstate_vma(vma); in hugetlb_no_page() local
3919 unsigned long haddr = address & huge_page_mask(h); in hugetlb_no_page()
3940 size = i_size_read(mapping->host) >> huge_page_shift(h); in hugetlb_no_page()
3967 hash = hugetlb_fault_mutex_hash(h, mapping, idx, haddr); in hugetlb_no_page()
3988 ptl = huge_pte_lock(h, mm, ptep); in hugetlb_no_page()
3998 clear_huge_page(page, address, pages_per_huge_page(h)); in hugetlb_no_page()
4026 VM_FAULT_SET_HINDEX(hstate_index(h)); in hugetlb_no_page()
4038 if (vma_needs_reservation(h, vma, haddr) < 0) { in hugetlb_no_page()
4043 vma_end_reservation(h, vma, haddr); in hugetlb_no_page()
4046 ptl = huge_pte_lock(h, mm, ptep); in hugetlb_no_page()
4047 size = i_size_read(mapping->host) >> huge_page_shift(h); in hugetlb_no_page()
4064 hugetlb_count_add(pages_per_huge_page(h), mm); in hugetlb_no_page()
4088 restore_reserve_on_error(h, vma, haddr, page); in hugetlb_no_page()
4094 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping, in hugetlb_fault_mutex_hash() argument
4112 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping, in hugetlb_fault_mutex_hash() argument
4129 struct hstate *h = hstate_vma(vma); in hugetlb_fault() local
4132 unsigned long haddr = address & huge_page_mask(h); in hugetlb_fault()
4134 ptep = huge_pte_offset(mm, haddr, huge_page_size(h)); in hugetlb_fault()
4142 VM_FAULT_SET_HINDEX(hstate_index(h)); in hugetlb_fault()
4144 ptep = huge_pte_alloc(mm, haddr, huge_page_size(h)); in hugetlb_fault()
4150 idx = vma_hugecache_offset(h, vma, haddr); in hugetlb_fault()
4157 hash = hugetlb_fault_mutex_hash(h, mapping, idx, haddr); in hugetlb_fault()
4187 if (vma_needs_reservation(h, vma, haddr) < 0) { in hugetlb_fault()
4192 vma_end_reservation(h, vma, haddr); in hugetlb_fault()
4195 pagecache_page = hugetlbfs_pagecache_page(h, in hugetlb_fault()
4199 ptl = huge_pte_lock(h, mm, ptep); in hugetlb_fault()
4271 struct hstate *h = hstate_vma(dst_vma); in hugetlb_mcopy_atomic_pte() local
4285 pages_per_huge_page(h), false); in hugetlb_mcopy_atomic_pte()
4307 idx = vma_hugecache_offset(h, dst_vma, dst_addr); in hugetlb_mcopy_atomic_pte()
4313 size = i_size_read(mapping->host) >> huge_page_shift(h); in hugetlb_mcopy_atomic_pte()
4329 ptl = huge_pte_lockptr(h, dst_mm, dst_pte); in hugetlb_mcopy_atomic_pte()
4341 size = i_size_read(mapping->host) >> huge_page_shift(h); in hugetlb_mcopy_atomic_pte()
4366 hugetlb_count_add(pages_per_huge_page(h), dst_mm); in hugetlb_mcopy_atomic_pte()
4395 struct hstate *h = hstate_vma(vma); in follow_hugetlb_page() local
4420 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h), in follow_hugetlb_page()
4421 huge_page_size(h)); in follow_hugetlb_page()
4423 ptl = huge_pte_lock(h, mm, pte); in follow_hugetlb_page()
4434 !hugetlbfs_pagecache_present(h, vma, vaddr)) { in follow_hugetlb_page()
4496 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT; in follow_hugetlb_page()
4525 pfn_offset < pages_per_huge_page(h)) { in follow_hugetlb_page()
4560 struct hstate *h = hstate_vma(vma); in hugetlb_change_protection() local
4579 for (; address < end; address += huge_page_size(h)) { in hugetlb_change_protection()
4581 ptep = huge_pte_offset(mm, address, huge_page_size(h)); in hugetlb_change_protection()
4584 ptl = huge_pte_lock(h, mm, ptep); in hugetlb_change_protection()
4605 newpte, huge_page_size(h)); in hugetlb_change_protection()
4642 return pages << h->order; in hugetlb_change_protection()
4651 struct hstate *h = hstate_inode(inode); in hugetlb_reserve_pages() local
4717 ret = hugetlb_acct_memory(h, gbl_reserve); in hugetlb_reserve_pages()
4750 hugetlb_acct_memory(h, -rsv_adjust); in hugetlb_reserve_pages()
4767 struct hstate *h = hstate_inode(inode); in hugetlb_unreserve_pages() local
4789 inode->i_blocks -= (blocks_per_huge_page(h) * freed); in hugetlb_unreserve_pages()
4797 hugetlb_acct_memory(h, -gbl_reserve); in hugetlb_unreserve_pages()
5152 struct hstate *h = page_hstate(oldpage); in move_hugetlb_state() local
5175 if (h->surplus_huge_pages_node[old_nid]) { in move_hugetlb_state()
5176 h->surplus_huge_pages_node[old_nid]--; in move_hugetlb_state()
5177 h->surplus_huge_pages_node[new_nid]++; in move_hugetlb_state()