Home
last modified time | relevance | path

Searched refs:sz (Results 1 – 10 of 10) sorted by relevance

/mm/damon/
Dcore.c98 scheme->quota.sz = quota->sz; in damon_new_scheme()
355 unsigned long sz = 0; in damon_region_sz_limit() local
359 sz += r->ar.end - r->ar.start; in damon_region_sz_limit()
363 sz /= ctx->min_nr_regions; in damon_region_sz_limit()
364 if (sz < DAMON_MIN_REGION) in damon_region_sz_limit()
365 sz = DAMON_MIN_REGION; in damon_region_sz_limit()
367 return sz; in damon_region_sz_limit()
546 unsigned long sz; in __damos_valid_target() local
548 sz = r->ar.end - r->ar.start; in __damos_valid_target()
549 return s->min_sz_region <= sz && sz <= s->max_sz_region && in __damos_valid_target()
[all …]
Dvaddr.c238 unsigned long sz = 0, nr_pieces; in __damon_va_init_regions() local
252 sz += regions[i].end - regions[i].start; in __damon_va_init_regions()
254 sz /= ctx->min_nr_regions; in __damon_va_init_regions()
255 if (sz < DAMON_MIN_REGION) in __damon_va_init_regions()
256 sz = DAMON_MIN_REGION; in __damon_va_init_regions()
267 nr_pieces = (regions[i].end - regions[i].start) / sz; in __damon_va_init_regions()
Dreclaim.c268 .sz = quota_sz, in damon_reclaim_new_scheme()
Ddbgfs.c113 s->quota.ms, s->quota.sz, in sprint_schemes()
208 &quota.sz, &quota.reset_interval, in str_to_schemes()
/mm/
Dslab_common.c763 #define KMALLOC_DMA_NAME(sz) .name[KMALLOC_DMA] = "dma-kmalloc-" #sz, argument
765 #define KMALLOC_DMA_NAME(sz) argument
769 #define KMALLOC_CGROUP_NAME(sz) .name[KMALLOC_CGROUP] = "kmalloc-cg-" #sz, argument
771 #define KMALLOC_CGROUP_NAME(sz) argument
Dz3fold.c663 size_t sz = 0; in compact_single_buddy() local
676 sz = zhdr->first_chunks << CHUNK_SHIFT; in compact_single_buddy()
681 sz = zhdr->middle_chunks << CHUNK_SHIFT; in compact_single_buddy()
686 sz = zhdr->last_chunks << CHUNK_SHIFT; in compact_single_buddy()
691 if (sz > 0) { in compact_single_buddy()
693 short chunks = size_to_chunks(sz); in compact_single_buddy()
696 new_zhdr = __z3fold_alloc(pool, sz, false); in compact_single_buddy()
724 memcpy(q, p, sz); in compact_single_buddy()
Dgup.c2529 unsigned long sz) in hugepte_addr_end() argument
2531 unsigned long __boundary = (addr + sz) & ~(sz-1); in hugepte_addr_end()
2535 static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr, in gup_hugepte() argument
2544 pte_end = (addr + sz) & ~(sz-1); in gup_hugepte()
2557 page = head + ((addr & (sz-1)) >> PAGE_SHIFT); in gup_hugepte()
2579 unsigned long sz = 1UL << hugepd_shift(hugepd); in gup_huge_pd() local
2584 next = hugepte_addr_end(addr, end, sz); in gup_huge_pd()
2585 if (!gup_hugepte(ptep, sz, addr, end, flags, pages, nr)) in gup_huge_pd()
Dhugetlb.c4300 unsigned long sz = huge_page_size(h); in copy_hugetlb_page_range() local
4321 for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) { in copy_hugetlb_page_range()
4323 src_pte = huge_pte_offset(src, addr, sz); in copy_hugetlb_page_range()
4326 dst_pte = huge_pte_alloc(dst, vma, addr, sz); in copy_hugetlb_page_range()
4371 entry, sz); in copy_hugetlb_page_range()
4373 set_huge_swap_pte_at(dst, addr, dst_pte, entry, sz); in copy_hugetlb_page_range()
4462 unsigned long sz = huge_page_size(h); in __unmap_hugepage_range() local
4474 tlb_change_page_size(tlb, sz); in __unmap_hugepage_range()
4485 for (; address < end; address += sz) { in __unmap_hugepage_range()
4486 ptep = huge_pte_offset(mm, address, sz); in __unmap_hugepage_range()
[all …]
Dpagewalk.c300 unsigned long sz = huge_page_size(h); in walk_hugetlb_range() local
307 pte = huge_pte_offset(walk->mm, addr & hmask, sz); in walk_hugetlb_range()
Dmempolicy.c2624 unsigned long sz = vma_pages(vma); in mpol_set_shared_policy() local
2628 sz, npol ? npol->mode : -1, in mpol_set_shared_policy()
2633 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol); in mpol_set_shared_policy()
2637 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new); in mpol_set_shared_policy()