/mm/ |
D | hugetlb.c | 84 static int hugetlb_acct_memory(struct hstate *h, long delta); 116 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, in hugepage_new_subpool() argument 128 spool->hstate = h; in hugepage_new_subpool() 131 if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) { in hugepage_new_subpool() 281 struct hstate *h, in record_hugetlb_cgroup_uncharge_info() argument 288 &h_cg->rsvd_hugepage[hstate_index(h)]; in record_hugetlb_cgroup_uncharge_info() 302 resv->pages_per_hpage = pages_per_huge_page(h); in record_hugetlb_cgroup_uncharge_info() 306 VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h)); in record_hugetlb_cgroup_uncharge_info() 364 long to, struct hstate *h, struct hugetlb_cgroup *cg, in hugetlb_resv_map_add() argument 371 record_hugetlb_cgroup_uncharge_info(cg, h, map, nrg); in hugetlb_resv_map_add() [all …]
|
D | hugetlb_vmemmap.c | 209 static inline unsigned long free_vmemmap_pages_size_per_hpage(struct hstate *h) in free_vmemmap_pages_size_per_hpage() argument 211 return (unsigned long)free_vmemmap_pages_per_hpage(h) << PAGE_SHIFT; in free_vmemmap_pages_size_per_hpage() 218 int alloc_huge_page_vmemmap(struct hstate *h, struct page *head) in alloc_huge_page_vmemmap() argument 228 vmemmap_end = vmemmap_addr + free_vmemmap_pages_size_per_hpage(h); in alloc_huge_page_vmemmap() 246 void free_huge_page_vmemmap(struct hstate *h, struct page *head) in free_huge_page_vmemmap() argument 251 if (!free_vmemmap_pages_per_hpage(h)) in free_huge_page_vmemmap() 255 vmemmap_end = vmemmap_addr + free_vmemmap_pages_size_per_hpage(h); in free_huge_page_vmemmap() 267 void __init hugetlb_vmemmap_init(struct hstate *h) in hugetlb_vmemmap_init() argument 269 unsigned int nr_pages = pages_per_huge_page(h); in hugetlb_vmemmap_init() 294 h->nr_free_vmemmap_pages = vmemmap_pages - RESERVE_VMEMMAP_NR; in hugetlb_vmemmap_init() [all …]
|
D | hugetlb_cgroup.c | 200 struct hstate *h; in hugetlb_cgroup_css_offline() local 206 for_each_hstate(h) { in hugetlb_cgroup_css_offline() 208 list_for_each_entry(page, &h->hugepage_activelist, lru) in hugetlb_cgroup_css_offline() 622 struct hstate *h = &hstates[idx]; in __hugetlb_cgroup_file_dfl_init() local 625 mem_fmt(buf, sizeof(buf), huge_page_size(h)); in __hugetlb_cgroup_file_dfl_init() 628 cft = &h->cgroup_files_dfl[0]; in __hugetlb_cgroup_file_dfl_init() 636 cft = &h->cgroup_files_dfl[1]; in __hugetlb_cgroup_file_dfl_init() 644 cft = &h->cgroup_files_dfl[2]; in __hugetlb_cgroup_file_dfl_init() 651 cft = &h->cgroup_files_dfl[3]; in __hugetlb_cgroup_file_dfl_init() 658 cft = &h->cgroup_files_dfl[4]; in __hugetlb_cgroup_file_dfl_init() [all …]
|
D | hugetlb_vmemmap.h | 14 int alloc_huge_page_vmemmap(struct hstate *h, struct page *head); 15 void free_huge_page_vmemmap(struct hstate *h, struct page *head); 16 void hugetlb_vmemmap_init(struct hstate *h); 22 static inline unsigned int free_vmemmap_pages_per_hpage(struct hstate *h) in free_vmemmap_pages_per_hpage() argument 24 return h->nr_free_vmemmap_pages; in free_vmemmap_pages_per_hpage() 27 static inline int alloc_huge_page_vmemmap(struct hstate *h, struct page *head) in alloc_huge_page_vmemmap() argument 32 static inline void free_huge_page_vmemmap(struct hstate *h, struct page *head) in free_huge_page_vmemmap() argument 36 static inline void hugetlb_vmemmap_init(struct hstate *h) in hugetlb_vmemmap_init() argument 40 static inline unsigned int free_vmemmap_pages_per_hpage(struct hstate *h) in free_vmemmap_pages_per_hpage() argument
|
D | pagewalk.c | 286 static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr, in hugetlb_entry_end() argument 289 unsigned long boundary = (addr & huge_page_mask(h)) + huge_page_size(h); in hugetlb_entry_end() 297 struct hstate *h = hstate_vma(vma); in walk_hugetlb_range() local 299 unsigned long hmask = huge_page_mask(h); in walk_hugetlb_range() 300 unsigned long sz = huge_page_size(h); in walk_hugetlb_range() 306 next = hugetlb_entry_end(h, addr, end); in walk_hugetlb_range()
|
D | z3fold.c | 441 unsigned long h = (unsigned long)zhdr; in __encode_handle() local 449 return h | (1 << PAGE_HEADLESS); in __encode_handle() 453 h += idx; in __encode_handle() 455 h |= (zhdr->last_chunks << BUDDY_SHIFT); in __encode_handle() 458 slots->slot[idx] = h; in __encode_handle()
|
D | migrate.c | 1626 struct hstate *h = page_hstate(compound_head(page)); in alloc_migration_target() local 1628 gfp_mask = htlb_modify_alloc_mask(h, gfp_mask); in alloc_migration_target() 1629 return alloc_huge_page_nodemask(h, nid, mtc->nmask, gfp_mask); in alloc_migration_target()
|
D | memory-failure.c | 668 struct hstate *h = hstate_vma(walk->vma); in hwpoison_hugetlb_range() local 670 return check_hwpoisoned_entry(pte, addr, huge_page_shift(h), in hwpoison_hugetlb_range()
|
D | slub.c | 2019 static void rcu_free_slab(struct rcu_head *h) in rcu_free_slab() argument 2021 struct page *page = container_of(h, struct page, rcu_head); in rcu_free_slab() 4276 struct page *page, *h; in free_partial() local 4280 list_for_each_entry_safe(page, h, &n->partial, slab_list) { in free_partial() 4291 list_for_each_entry_safe(page, h, &discard, slab_list) in free_partial()
|
/mm/damon/ |
D | vaddr.c | 438 struct hstate *h = hstate_vma(walk->vma); in damon_mkold_hugetlb_entry() local 442 ptl = huge_pte_lock(h, walk->mm, pte); in damon_mkold_hugetlb_entry() 565 struct hstate *h = hstate_vma(walk->vma); in damon_young_hugetlb_entry() local 570 ptl = huge_pte_lock(h, walk->mm, pte); in damon_young_hugetlb_entry() 583 *priv->page_sz = huge_page_size(h); in damon_young_hugetlb_entry()
|
/mm/kfence/ |
D | core.c | 129 #define ALLOC_COVERED_HNEXT(h) hash_32(h, ALLOC_COVERED_ORDER) argument 501 static void rcu_guarded_free(struct rcu_head *h) in rcu_guarded_free() argument 503 struct kfence_metadata *meta = container_of(h, struct kfence_metadata, rcu_head); in rcu_guarded_free()
|