Searched refs:sz (Results 1 – 9 of 9) sorted by relevance
/mm/damon/ |
D | core.c | 98 scheme->quota.sz = quota->sz; in damon_new_scheme() 356 unsigned long sz = 0; in damon_region_sz_limit() local 360 sz += r->ar.end - r->ar.start; in damon_region_sz_limit() 364 sz /= ctx->min_nr_regions; in damon_region_sz_limit() 365 if (sz < DAMON_MIN_REGION) in damon_region_sz_limit() 366 sz = DAMON_MIN_REGION; in damon_region_sz_limit() 368 return sz; in damon_region_sz_limit() 538 unsigned long sz; in __damos_valid_target() local 540 sz = r->ar.end - r->ar.start; in __damos_valid_target() 541 return s->min_sz_region <= sz && sz <= s->max_sz_region && in __damos_valid_target() [all …]
|
D | vaddr.c | 238 unsigned long sz = 0, nr_pieces; in __damon_va_init_regions() local 252 sz += regions[i].end - regions[i].start; in __damon_va_init_regions() 254 sz /= ctx->min_nr_regions; in __damon_va_init_regions() 255 if (sz < DAMON_MIN_REGION) in __damon_va_init_regions() 256 sz = DAMON_MIN_REGION; in __damon_va_init_regions() 267 nr_pieces = (regions[i].end - regions[i].start) / sz; in __damon_va_init_regions()
|
D | reclaim.c | 268 .sz = quota_sz, in damon_reclaim_new_scheme()
|
D | dbgfs.c | 113 s->quota.ms, s->quota.sz, in sprint_schemes() 208 "a.sz, "a.reset_interval, in str_to_schemes()
|
/mm/ |
D | z3fold.c | 680 size_t sz = 0; in compact_single_buddy() local 693 sz = zhdr->first_chunks << CHUNK_SHIFT; in compact_single_buddy() 698 sz = zhdr->middle_chunks << CHUNK_SHIFT; in compact_single_buddy() 703 sz = zhdr->last_chunks << CHUNK_SHIFT; in compact_single_buddy() 708 if (sz > 0) { in compact_single_buddy() 710 short chunks = size_to_chunks(sz); in compact_single_buddy() 713 new_zhdr = __z3fold_alloc(pool, sz, false); in compact_single_buddy() 741 memcpy(q, p, sz); in compact_single_buddy()
|
D | gup.c | 2333 unsigned long sz) in hugepte_addr_end() argument 2335 unsigned long __boundary = (addr + sz) & ~(sz-1); in hugepte_addr_end() 2339 static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr, in gup_hugepte() argument 2348 pte_end = (addr + sz) & ~(sz-1); in gup_hugepte() 2361 page = head + ((addr & (sz-1)) >> PAGE_SHIFT); in gup_hugepte() 2383 unsigned long sz = 1UL << hugepd_shift(hugepd); in gup_huge_pd() local 2388 next = hugepte_addr_end(addr, end, sz); in gup_huge_pd() 2389 if (!gup_hugepte(ptep, sz, addr, end, flags, pages, nr)) in gup_huge_pd()
|
D | hugetlb.c | 3823 unsigned long sz = huge_page_size(h); in copy_hugetlb_page_range() local 3845 for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) { in copy_hugetlb_page_range() 3847 src_pte = huge_pte_offset(src, addr, sz); in copy_hugetlb_page_range() 3850 dst_pte = huge_pte_alloc(dst, vma, addr, sz); in copy_hugetlb_page_range() 3893 entry, sz); in copy_hugetlb_page_range() 3895 set_huge_swap_pte_at(dst, addr, dst_pte, entry, sz); in copy_hugetlb_page_range() 3937 unsigned long sz = huge_page_size(h); in __unmap_hugepage_range() local 3949 tlb_change_page_size(tlb, sz); in __unmap_hugepage_range() 3960 for (; address < end; address += sz) { in __unmap_hugepage_range() 3961 ptep = huge_pte_offset(mm, address, sz); in __unmap_hugepage_range() [all …]
|
D | pagewalk.c | 252 unsigned long sz = huge_page_size(h); in walk_hugetlb_range() local 259 pte = huge_pte_offset(walk->mm, addr & hmask, sz); in walk_hugetlb_range()
|
D | mempolicy.c | 2714 unsigned long sz = vma_pages(vma); in mpol_set_shared_policy() local 2718 sz, npol ? npol->mode : -1, in mpol_set_shared_policy() 2723 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol); in mpol_set_shared_policy() 2727 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new); in mpol_set_shared_policy()
|