Searched refs:h (Results 1 – 8 of 8) sorted by relevance
/mm/ |
D | hugetlb.c | 75 static int hugetlb_acct_memory(struct hstate *h, long delta); 94 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, in hugepage_new_subpool() argument 106 spool->hstate = h; in hugepage_new_subpool() 109 if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) { in hugepage_new_subpool() 582 struct hstate *h = hstate_inode(inode); in hugetlb_fix_reserve_counts() local 584 hugetlb_acct_memory(h, 1); in hugetlb_fix_reserve_counts() 623 static pgoff_t vma_hugecache_offset(struct hstate *h, in vma_hugecache_offset() argument 626 return ((address - vma->vm_start) >> huge_page_shift(h)) + in vma_hugecache_offset() 627 (vma->vm_pgoff >> huge_page_order(h)); in vma_hugecache_offset() 866 static void enqueue_huge_page(struct hstate *h, struct page *page) in enqueue_huge_page() argument [all …]
|
D | hugetlb_cgroup.c | 164 struct hstate *h; in hugetlb_cgroup_css_offline() local 169 for_each_hstate(h) { in hugetlb_cgroup_css_offline() 171 list_for_each_entry(page, &h->hugepage_activelist, lru) in hugetlb_cgroup_css_offline() 357 struct hstate *h = &hstates[idx]; in __hugetlb_cgroup_file_init() local 360 mem_fmt(buf, 32, huge_page_size(h)); in __hugetlb_cgroup_file_init() 363 cft = &h->cgroup_files[0]; in __hugetlb_cgroup_file_init() 370 cft = &h->cgroup_files[1]; in __hugetlb_cgroup_file_init() 376 cft = &h->cgroup_files[2]; in __hugetlb_cgroup_file_init() 383 cft = &h->cgroup_files[3]; in __hugetlb_cgroup_file_init() 390 cft = &h->cgroup_files[4]; in __hugetlb_cgroup_file_init() [all …]
|
D | pagewalk.c | 175 static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr, in hugetlb_entry_end() argument 178 unsigned long boundary = (addr & huge_page_mask(h)) + huge_page_size(h); in hugetlb_entry_end() 186 struct hstate *h = hstate_vma(vma); in walk_hugetlb_range() local 188 unsigned long hmask = huge_page_mask(h); in walk_hugetlb_range() 189 unsigned long sz = huge_page_size(h); in walk_hugetlb_range() 195 next = hugetlb_entry_end(h, addr, end); in walk_hugetlb_range()
|
D | userfaultfd.c | 187 struct hstate *h; in __mcopy_atomic_hugetlb() local 259 h = hstate_vma(dst_vma); in __mcopy_atomic_hugetlb() 265 VM_BUG_ON(dst_addr & ~huge_page_mask(h)); in __mcopy_atomic_hugetlb() 272 hash = hugetlb_fault_mutex_hash(h, mapping, idx, dst_addr); in __mcopy_atomic_hugetlb() 276 dst_pte = huge_pte_alloc(dst_mm, dst_addr, huge_page_size(h)); in __mcopy_atomic_hugetlb() 303 pages_per_huge_page(h), true); in __mcopy_atomic_hugetlb()
|
D | z3fold.c | 376 unsigned long h = (unsigned long)zhdr; in __encode_handle() local 384 return h | (1 << PAGE_HEADLESS); in __encode_handle() 388 h += idx; in __encode_handle() 390 h |= (zhdr->last_chunks << BUDDY_SHIFT); in __encode_handle() 392 slots->slot[idx] = h; in __encode_handle() 402 static inline struct z3fold_header *handle_to_z3fold_header(unsigned long h) in handle_to_z3fold_header() argument 404 unsigned long addr = h; in handle_to_z3fold_header() 407 addr = *(unsigned long *)h; in handle_to_z3fold_header()
|
D | gup.c | 1403 struct hstate *h = page_hstate(page); in new_non_cma_page() local 1408 return alloc_migrate_huge_page(h, gfp_mask, nid, NULL); in new_non_cma_page()
|
D | slub.c | 1734 static void rcu_free_slab(struct rcu_head *h) in rcu_free_slab() argument 1736 struct page *page = container_of(h, struct page, rcu_head); in rcu_free_slab() 3704 struct page *page, *h; in free_partial() local 3708 list_for_each_entry_safe(page, h, &n->partial, slab_list) { in free_partial() 3719 list_for_each_entry_safe(page, h, &discard, slab_list) in free_partial()
|
D | migrate.c | 572 struct hstate *h = page_hstate(src); in copy_huge_page() local 573 nr_pages = pages_per_huge_page(h); in copy_huge_page()
|