/mm/ |
D | gup.c | 720 int write, struct page **pages, int *nr) in gup_pte_range() argument 756 pages[*nr] = page; in gup_pte_range() 757 (*nr)++; in gup_pte_range() 779 int write, struct page **pages, int *nr) in gup_pte_range() argument 786 unsigned long end, int write, struct page **pages, int *nr) in gup_huge_pmd() argument 800 pages[*nr] = page; in gup_huge_pmd() 801 (*nr)++; in gup_huge_pmd() 807 *nr -= refs; in gup_huge_pmd() 812 *nr -= refs; in gup_huge_pmd() 833 unsigned long end, int write, struct page **pages, int *nr) in gup_huge_pud() argument [all …]
|
D | filemap_xip.c | 77 unsigned long nr, left; in do_xip_mapping_read() local 83 nr = PAGE_CACHE_SIZE; in do_xip_mapping_read() 87 nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; in do_xip_mapping_read() 88 if (nr <= offset) { in do_xip_mapping_read() 92 nr = nr - offset; in do_xip_mapping_read() 93 if (nr > len - copied) in do_xip_mapping_read() 94 nr = len - copied; in do_xip_mapping_read() 123 left = __copy_to_user(buf+copied, xip_mem+offset, nr); in do_xip_mapping_read() 125 left = __clear_user(buf + copied, nr); in do_xip_mapping_read() 132 copied += (nr - left); in do_xip_mapping_read() [all …]
|
D | vmscan.c | 164 int nr; in zone_reclaimable_pages() local 166 nr = zone_page_state(zone, NR_ACTIVE_FILE) + in zone_reclaimable_pages() 170 nr += zone_page_state(zone, NR_ACTIVE_ANON) + in zone_reclaimable_pages() 173 return nr; in zone_reclaimable_pages() 283 long nr; in shrink_slab_node() local 298 nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0); in shrink_slab_node() 300 total_scan = nr; in shrink_slab_node() 335 trace_mm_shrink_slab_start(shrinker, shrinkctl, nr, in shrink_slab_node() 382 trace_mm_shrink_slab_end(shrinker, nid, freed, nr, new_nr, total_scan); in shrink_slab_node() 1918 struct scan_control *sc, unsigned long *nr) in get_scan_count() argument [all …]
|
D | vmalloc.c | 113 unsigned long end, pgprot_t prot, struct page **pages, int *nr) in vmap_pte_range() argument 126 struct page *page = pages[*nr]; in vmap_pte_range() 133 (*nr)++; in vmap_pte_range() 139 unsigned long end, pgprot_t prot, struct page **pages, int *nr) in vmap_pmd_range() argument 149 if (vmap_pte_range(pmd, addr, next, prot, pages, nr)) in vmap_pmd_range() 156 unsigned long end, pgprot_t prot, struct page **pages, int *nr) in vmap_pud_range() argument 166 if (vmap_pmd_range(pud, addr, next, prot, pages, nr)) in vmap_pud_range() 185 int nr = 0; in vmap_page_range_noflush() local 191 err = vmap_pud_range(pgd, addr, next, prot, pages, &nr); in vmap_page_range_noflush() 196 return nr; in vmap_page_range_noflush() [all …]
|
D | quicklist.c | 63 void quicklist_trim(int nr, void (*dtor)(void *), in quicklist_trim() argument 69 q = &get_cpu_var(quicklist)[nr]; in quicklist_trim() 78 void *p = quicklist_alloc(nr, 0, NULL); in quicklist_trim()
|
D | swap.c | 435 release_pages(pvec->pages, pvec->nr, pvec->cold); in pagevec_lru_move_fn() 898 void release_pages(struct page **pages, int nr, bool cold) in release_pages() argument 907 for (i = 0; i < nr; i++) { in release_pages() 1072 pvec->nr = find_get_entries(mapping, start, nr_pages, in pagevec_lookup_entries() 1095 pvec->nr = j; in pagevec_remove_exceptionals() 1117 pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages); in pagevec_lookup() 1125 pvec->nr = find_get_pages_tag(mapping, index, tag, in pagevec_lookup_tag()
|
D | mm_init.c | 158 s32 nr = num_present_cpus(); in mm_compute_batch() local 159 s32 batch = max_t(s32, nr*2, 32); in mm_compute_batch() 162 memsized_batch = min_t(u64, (totalram_pages/nr)/256, 0x7fffffff); in mm_compute_batch()
|
D | shmem.c | 383 pvec.nr = find_get_entries(mapping, index, in shmem_unlock_mapping() 385 if (!pvec.nr) in shmem_unlock_mapping() 387 index = indices[pvec.nr - 1] + 1; in shmem_unlock_mapping() 389 check_move_unevictable_pages(pvec.pages, pvec.nr); in shmem_unlock_mapping() 420 pvec.nr = find_get_entries(mapping, index, in shmem_undo_range() 423 if (!pvec.nr) in shmem_undo_range() 488 pvec.nr = find_get_entries(mapping, index, in shmem_undo_range() 491 if (!pvec.nr) { in shmem_undo_range() 1548 unsigned long nr, ret; in shmem_file_read_iter() local 1555 nr = i_size & ~PAGE_CACHE_MASK; in shmem_file_read_iter() [all …]
|
D | percpu.c | 770 int nr = page_end - page_start; in pcpu_chunk_populated() local 774 bitmap_set(chunk->populated, page_start, nr); in pcpu_chunk_populated() 775 chunk->nr_populated += nr; in pcpu_chunk_populated() 776 pcpu_nr_empty_pop_pages += nr; in pcpu_chunk_populated() 792 int nr = page_end - page_start; in pcpu_chunk_depopulated() local 796 bitmap_clear(chunk->populated, page_start, nr); in pcpu_chunk_depopulated() 797 chunk->nr_populated -= nr; in pcpu_chunk_depopulated() 798 pcpu_nr_empty_pop_pages -= nr; in pcpu_chunk_depopulated() 1209 int nr = min(re - rs, nr_to_pop); in pcpu_balance_workfn() local 1211 ret = pcpu_populate_chunk(chunk, rs, rs + nr); in pcpu_balance_workfn() [all …]
|
D | readahead.c | 240 unsigned long max_sane_readahead(unsigned long nr) in max_sane_readahead() argument 242 return min(nr, MAX_READAHEAD); in max_sane_readahead() 554 pgoff_t index, unsigned long nr) in do_readahead() argument 559 return force_page_cache_readahead(mapping, filp, index, nr); in do_readahead()
|
D | mincore.c | 101 unsigned long nr = (end - addr) >> PAGE_SHIFT; in mincore_unmapped_range() local 108 for (i = 0; i < nr; i++, pgoff++) in mincore_unmapped_range() 111 for (i = 0; i < nr; i++) in mincore_unmapped_range()
|
D | highmem.c | 335 unsigned long nr; in kunmap_high() local 344 nr = PKMAP_NR(vaddr); in kunmap_high() 351 switch (--pkmap_count[nr]) { in kunmap_high()
|
D | swap_state.c | 265 void free_pages_and_swap_cache(struct page **pages, int nr) in free_pages_and_swap_cache() argument 271 for (i = 0; i < nr; i++) in free_pages_and_swap_cache() 273 release_pages(pagep, nr, false); in free_pages_and_swap_cache()
|
D | mlock.c | 335 int nr = pagevec_count(pvec); in __munlock_pagevec() local 336 int delta_munlocked = -nr; in __munlock_pagevec() 344 for (i = 0; i < nr; i++) { in __munlock_pagevec() 376 for (i = 0; i < nr; i++) { in __munlock_pagevec()
|
D | filemap.c | 1486 unsigned long nr, ret; in do_generic_file_read() local 1536 nr = PAGE_CACHE_SIZE; in do_generic_file_read() 1538 nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; in do_generic_file_read() 1539 if (nr <= offset) { in do_generic_file_read() 1544 nr = nr - offset; in do_generic_file_read() 1566 ret = copy_page_to_iter(page, offset, nr, iter); in do_generic_file_read() 1576 if (ret < nr) { in do_generic_file_read()
|
D | memory.c | 202 batch->nr = 0; in tlb_next_batch() 224 tlb->local.nr = 0; in tlb_gather_mmu() 253 free_pages_and_swap_cache(batch->pages, batch->nr); in tlb_flush_mmu_free() 254 batch->nr = 0; in tlb_flush_mmu_free() 298 batch->pages[batch->nr++] = page; in __tlb_remove_page() 299 if (batch->nr == batch->max) { in __tlb_remove_page() 304 VM_BUG_ON_PAGE(batch->nr > batch->max, page); in __tlb_remove_page() 306 return batch->max - batch->nr; in __tlb_remove_page() 342 for (i = 0; i < batch->nr; i++) in tlb_remove_table_rcu() 377 (*batch)->nr = 0; in tlb_remove_table() [all …]
|
D | memcontrol.c | 943 unsigned long nr = 0; in mem_cgroup_node_nr_lru_pages() local 956 nr += mz->lru_size[lru]; in mem_cgroup_node_nr_lru_pages() 959 return nr; in mem_cgroup_node_nr_lru_pages() 965 unsigned long nr = 0; in mem_cgroup_nr_lru_pages() local 969 nr += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask); in mem_cgroup_nr_lru_pages() 970 return nr; in mem_cgroup_nr_lru_pages() 4347 unsigned long nr; in memcg_numa_stat_show() local 4351 nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask); in memcg_numa_stat_show() 4352 seq_printf(m, "%s=%lu", stat->name, nr); in memcg_numa_stat_show() 4354 nr = mem_cgroup_node_nr_lru_pages(memcg, nid, in memcg_numa_stat_show() [all …]
|
D | zsmalloc.c | 1208 int nr; in init_zs_size_classes() local 1210 nr = (ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE) / ZS_SIZE_CLASS_DELTA + 1; in init_zs_size_classes() 1212 nr += 1; in init_zs_size_classes() 1214 zs_size_classes = nr; in init_zs_size_classes()
|
D | memory-failure.c | 246 int nr; in shake_page() local 254 nr = shrink_slab(&shrink, 1000, 1000); in shake_page() 257 } while (nr > 10); in shake_page()
|
D | slab.c | 815 int nr = min3(from->avail, max, to->limit - to->avail); in transfer_objects() local 817 if (!nr) in transfer_objects() 820 memcpy(to->entry + to->avail, from->entry + from->avail -nr, in transfer_objects() 821 sizeof(void *) *nr); in transfer_objects() 823 from->avail -= nr; in transfer_objects() 824 to->avail += nr; in transfer_objects() 825 return nr; in transfer_objects()
|
D | slub.c | 908 int nr = 0; in on_freelist() local 914 while (fp && nr <= page->objects) { in on_freelist() 933 nr++; in on_freelist() 946 if (page->inuse != page->objects - nr) { in on_freelist() 948 "counted were %d", page->inuse, page->objects - nr); in on_freelist() 949 page->inuse = page->objects - nr; in on_freelist()
|
D | page_alloc.c | 3346 unsigned long nr[MAX_ORDER], flags, total = 0; in show_free_areas() local 3359 nr[order] = area->nr_free; in show_free_areas() 3360 total += nr[order] << order; in show_free_areas() 3370 printk("%lu*%lukB ", nr[order], K(1UL) << order); in show_free_areas() 3371 if (nr[order]) in show_free_areas()
|
D | hugetlb.c | 2279 unsigned int nr = 0; in cpuset_mems_nr() local 2282 nr += array[node]; in cpuset_mems_nr() 2284 return nr; in cpuset_mems_nr()
|