Home
last modified time | relevance | path

Searched refs:h (Results 1 – 12 of 12) sorted by relevance

/mm/
Dhugetlb.c94 static int hugetlb_acct_memory(struct hstate *h, long delta);
130 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, in hugepage_new_subpool() argument
142 spool->hstate = h; in hugepage_new_subpool()
145 if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) { in hugepage_new_subpool()
466 struct hstate *h, in record_hugetlb_cgroup_uncharge_info() argument
473 &h_cg->rsvd_hugepage[hstate_index(h)]; in record_hugetlb_cgroup_uncharge_info()
487 resv->pages_per_hpage = pages_per_huge_page(h); in record_hugetlb_cgroup_uncharge_info()
491 VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h)); in record_hugetlb_cgroup_uncharge_info()
548 long to, struct hstate *h, struct hugetlb_cgroup *cg, in hugetlb_resv_map_add() argument
555 record_hugetlb_cgroup_uncharge_info(cg, h, map, nrg); in hugetlb_resv_map_add()
[all …]
Dhugetlb_vmemmap.h14 int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head);
15 void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head);
23 static inline unsigned int hugetlb_vmemmap_size(const struct hstate *h) in hugetlb_vmemmap_size() argument
25 return pages_per_huge_page(h) * sizeof(struct page); in hugetlb_vmemmap_size()
32 static inline unsigned int hugetlb_vmemmap_optimizable_size(const struct hstate *h) in hugetlb_vmemmap_optimizable_size() argument
34 int size = hugetlb_vmemmap_size(h) - HUGETLB_VMEMMAP_RESERVE_SIZE; in hugetlb_vmemmap_optimizable_size()
41 static inline int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head) in hugetlb_vmemmap_restore() argument
46 static inline void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head) in hugetlb_vmemmap_optimize() argument
50 static inline unsigned int hugetlb_vmemmap_optimizable_size(const struct hstate *h) in hugetlb_vmemmap_optimizable_size() argument
56 static inline bool hugetlb_vmemmap_optimizable(const struct hstate *h) in hugetlb_vmemmap_optimizable() argument
[all …]
Dhugetlb_cgroup.c78 struct hstate *h; in hugetlb_cgroup_have_usage() local
80 for_each_hstate(h) { in hugetlb_cgroup_have_usage()
82 hugetlb_cgroup_counter_from_cgroup(h_cg, hstate_index(h)))) in hugetlb_cgroup_have_usage()
227 struct hstate *h; in hugetlb_cgroup_css_offline() local
231 for_each_hstate(h) { in hugetlb_cgroup_css_offline()
233 list_for_each_entry(page, &h->hugepage_activelist, lru) in hugetlb_cgroup_css_offline()
234 hugetlb_cgroup_move_parent(hstate_index(h), h_cg, page); in hugetlb_cgroup_css_offline()
718 struct hstate *h = &hstates[idx]; in __hugetlb_cgroup_file_dfl_init() local
721 mem_fmt(buf, sizeof(buf), huge_page_size(h)); in __hugetlb_cgroup_file_dfl_init()
724 cft = &h->cgroup_files_dfl[0]; in __hugetlb_cgroup_file_dfl_init()
[all …]
Dhugetlb_vmemmap.c453 int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head) in hugetlb_vmemmap_restore() argument
462 vmemmap_end = vmemmap_start + hugetlb_vmemmap_size(h); in hugetlb_vmemmap_restore()
483 static bool vmemmap_should_optimize(const struct hstate *h, const struct page *head) in vmemmap_should_optimize() argument
488 if (!hugetlb_vmemmap_optimizable(h)) in vmemmap_should_optimize()
548 void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head) in hugetlb_vmemmap_optimize() argument
553 if (!vmemmap_should_optimize(h, head)) in hugetlb_vmemmap_optimize()
558 vmemmap_end = vmemmap_start + hugetlb_vmemmap_size(h); in hugetlb_vmemmap_optimize()
586 const struct hstate *h; in hugetlb_vmemmap_init() local
591 for_each_hstate(h) { in hugetlb_vmemmap_init()
592 if (hugetlb_vmemmap_optimizable(h)) { in hugetlb_vmemmap_init()
Dpagewalk.c302 static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr, in hugetlb_entry_end() argument
305 unsigned long boundary = (addr & huge_page_mask(h)) + huge_page_size(h); in hugetlb_entry_end()
313 struct hstate *h = hstate_vma(vma); in walk_hugetlb_range() local
315 unsigned long hmask = huge_page_mask(h); in walk_hugetlb_range()
316 unsigned long sz = huge_page_size(h); in walk_hugetlb_range()
323 next = hugetlb_entry_end(h, addr, end); in walk_hugetlb_range()
Dmremap.c990 struct hstate *h __maybe_unused = hstate_vma(vma); in SYSCALL_DEFINE5()
992 old_len = ALIGN(old_len, huge_page_size(h)); in SYSCALL_DEFINE5()
993 new_len = ALIGN(new_len, huge_page_size(h)); in SYSCALL_DEFINE5()
996 if (addr & ~huge_page_mask(h)) in SYSCALL_DEFINE5()
998 if (new_addr & ~huge_page_mask(h)) in SYSCALL_DEFINE5()
Dz3fold.c374 unsigned long h = (unsigned long)zhdr; in __encode_handle() local
382 return h | (1 << PAGE_HEADLESS); in __encode_handle()
386 h += idx; in __encode_handle()
388 h |= (zhdr->last_chunks << BUDDY_SHIFT); in __encode_handle()
391 slots->slot[idx] = h; in __encode_handle()
Dmigrate.c267 struct hstate *h = hstate_vma(vma); in remove_migration_pte() local
268 unsigned int shift = huge_page_shift(h); in remove_migration_pte()
269 unsigned long psize = huge_page_size(h); in remove_migration_pte()
2050 struct hstate *h = folio_hstate(src); in alloc_migration_target() local
2052 gfp_mask = htlb_modify_alloc_mask(h, gfp_mask); in alloc_migration_target()
2053 return alloc_hugetlb_folio_nodemask(h, nid, in alloc_migration_target()
Dmemory-failure.c829 struct hstate *h = hstate_vma(walk->vma); in hwpoison_hugetlb_range() local
831 return check_hwpoisoned_entry(pte, addr, huge_page_shift(h), in hwpoison_hugetlb_range()
Dslub.c2113 static void rcu_free_slab(struct rcu_head *h) in rcu_free_slab() argument
2115 struct slab *slab = container_of(h, struct slab, rcu_head); in rcu_free_slab()
4627 struct slab *slab, *h; in free_partial() local
4631 list_for_each_entry_safe(slab, h, &n->partial, slab_list) { in free_partial()
4642 list_for_each_entry_safe(slab, h, &discard, slab_list) in free_partial()
/mm/damon/
Dvaddr.c371 struct hstate *h = hstate_vma(walk->vma); in damon_mkold_hugetlb_entry() local
375 ptl = huge_pte_lock(h, walk->mm, pte); in damon_mkold_hugetlb_entry()
506 struct hstate *h = hstate_vma(walk->vma); in damon_young_hugetlb_entry() local
511 ptl = huge_pte_lock(h, walk->mm, pte); in damon_young_hugetlb_entry()
522 *priv->folio_sz = huge_page_size(h); in damon_young_hugetlb_entry()
/mm/kfence/
Dcore.c155 #define ALLOC_COVERED_HNEXT(h) hash_32(h, ALLOC_COVERED_ORDER) argument
563 static void rcu_guarded_free(struct rcu_head *h) in rcu_guarded_free() argument
565 struct kfence_metadata *meta = container_of(h, struct kfence_metadata, rcu_head); in rcu_guarded_free()