/mm/ |
D | gup.c | 1785 static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start, in undo_dev_pagemap() argument 1788 while ((*nr) - nr_start) { in undo_dev_pagemap() 1789 struct page *page = pages[--(*nr)]; in undo_dev_pagemap() 1812 unsigned int flags, struct page **pages, int *nr) in gup_pte_range() argument 1815 int nr_start = *nr, ret = 0; in gup_pte_range() 1839 undo_dev_pagemap(nr, nr_start, pages); in gup_pte_range() 1860 pages[*nr] = page; in gup_pte_range() 1861 (*nr)++; in gup_pte_range() 1885 unsigned int flags, struct page **pages, int *nr) in gup_pte_range() argument 1893 unsigned long end, struct page **pages, int *nr) in __gup_device_huge() argument [all …]
|
D | gup_benchmark.c | 27 int nr; in __gup_benchmark_ioctl() local 40 nr = gup->nr_pages_per_call; in __gup_benchmark_ioctl() 43 if (nr != gup->nr_pages_per_call) in __gup_benchmark_ioctl() 46 next = addr + nr * PAGE_SIZE; in __gup_benchmark_ioctl() 49 nr = (next - addr) / PAGE_SIZE; in __gup_benchmark_ioctl() 54 nr = get_user_pages_fast(addr, nr, gup->flags & 1, in __gup_benchmark_ioctl() 58 nr = get_user_pages(addr, nr, in __gup_benchmark_ioctl() 63 nr = get_user_pages(addr, nr, gup->flags & 1, pages + i, in __gup_benchmark_ioctl() 72 if (nr <= 0) in __gup_benchmark_ioctl() 74 i += nr; in __gup_benchmark_ioctl()
|
D | swap_state.c | 62 #define ADD_CACHE_INFO(x, nr) do { swap_cache_info.x += (nr); } while (0) argument 73 unsigned int i, j, nr; in total_swapcache_pages() local 88 nr = nr_swapper_spaces[i]; in total_swapcache_pages() 90 for (j = 0; j < nr; j++) in total_swapcache_pages() 119 unsigned long i, nr = compound_nr(page); in add_to_swap_cache() local 125 page_ref_add(page, nr); in add_to_swap_cache() 133 for (i = 0; i < nr; i++) { in add_to_swap_cache() 139 address_space->nrpages += nr; in add_to_swap_cache() 140 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr); in add_to_swap_cache() 141 ADD_CACHE_INFO(add_total, nr); in add_to_swap_cache() [all …]
|
D | mmu_gather.c | 35 batch->nr = 0; in tlb_next_batch() 48 for (batch = &tlb->local; batch && batch->nr; batch = batch->next) { in tlb_batch_pages_flush() 49 free_pages_and_swap_cache(batch->pages, batch->nr); in tlb_batch_pages_flush() 50 batch->nr = 0; in tlb_batch_pages_flush() 81 batch->pages[batch->nr++] = page; in __tlb_remove_page_size() 82 if (batch->nr == batch->max) { in __tlb_remove_page_size() 87 VM_BUG_ON_PAGE(batch->nr > batch->max, page); in __tlb_remove_page_size() 140 for (i = 0; i < batch->nr; i++) in tlb_remove_table_rcu() 168 (*batch)->nr = 0; in tlb_remove_table() 171 (*batch)->tables[(*batch)->nr++] = table; in tlb_remove_table() [all …]
|
D | vmscan.c | 130 } nr; member 335 unsigned long nr; in zone_reclaimable_pages() local 337 nr = zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_FILE) + in zone_reclaimable_pages() 340 nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) + in zone_reclaimable_pages() 343 return nr; in zone_reclaimable_pages() 469 long nr; in do_shrink_slab() local 488 nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0); in do_shrink_slab() 490 total_scan = nr; in do_shrink_slab() 509 next_deferred = nr; in do_shrink_slab() 536 trace_mm_shrink_slab_start(shrinker, shrinkctl, nr, in do_shrink_slab() [all …]
|
D | swap_slots.c | 147 cache->nr = 0; in alloc_swap_slot_cache() 179 swapcache_free_entries(cache->slots + cache->cur, cache->nr); in drain_slots_cache_cpu() 181 cache->nr = 0; in drain_slots_cache_cpu() 267 if (!use_swap_slot_cache || cache->nr) in refill_swap_slots_cache() 272 cache->nr = get_swap_pages(SWAP_SLOTS_CACHE_SIZE, in refill_swap_slots_cache() 275 return cache->nr; in refill_swap_slots_cache() 338 if (cache->nr) { in get_swap_page() 342 cache->nr--; in get_swap_page()
|
D | mincore.c | 98 unsigned long nr = (end - addr) >> PAGE_SHIFT; in __mincore_unmapped_range() local 105 for (i = 0; i < nr; i++, pgoff++) in __mincore_unmapped_range() 108 for (i = 0; i < nr; i++) in __mincore_unmapped_range() 111 return nr; in __mincore_unmapped_range() 129 int nr = (end - addr) >> PAGE_SHIFT; in mincore_pte_range() local 133 memset(vec, 1, nr); in mincore_pte_range() 175 walk->private += nr; in mincore_pte_range()
|
D | rmap.c | 1116 int nr = compound ? hpage_nr_pages(page) : 1; in do_page_add_anon_rmap() local 1125 __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, nr); in do_page_add_anon_rmap() 1154 int nr = compound ? hpage_nr_pages(page) : 1; in page_add_new_anon_rmap() local 1169 __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, nr); in page_add_new_anon_rmap() 1182 int i, nr = 1; in page_add_file_rmap() local 1187 for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) { in page_add_file_rmap() 1189 nr++; in page_add_file_rmap() 1208 __mod_lruvec_page_state(page, NR_FILE_MAPPED, nr); in page_add_file_rmap() 1215 int i, nr = 1; in page_remove_file_rmap() local 1229 for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) { in page_remove_file_rmap() [all …]
|
D | shmem.c | 612 unsigned long nr = compound_nr(page); in shmem_add_to_page_cache() local 615 VM_BUG_ON_PAGE(index != round_down(index, nr), page); in shmem_add_to_page_cache() 620 page_ref_add(page, nr); in shmem_add_to_page_cache() 635 if (++i < nr) { in shmem_add_to_page_cache() 643 mapping->nrpages += nr; in shmem_add_to_page_cache() 644 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr); in shmem_add_to_page_cache() 645 __mod_node_page_state(page_pgdat(page), NR_SHMEM, nr); in shmem_add_to_page_cache() 652 page_ref_sub(page, nr); in shmem_add_to_page_cache() 779 pvec.nr = find_get_entries(mapping, index, in shmem_unlock_mapping() 781 if (!pvec.nr) in shmem_unlock_mapping() [all …]
|
D | vmalloc.c | 138 unsigned long end, pgprot_t prot, struct page **pages, int *nr) in vmap_pte_range() argument 151 struct page *page = pages[*nr]; in vmap_pte_range() 158 (*nr)++; in vmap_pte_range() 164 unsigned long end, pgprot_t prot, struct page **pages, int *nr) in vmap_pmd_range() argument 174 if (vmap_pte_range(pmd, addr, next, prot, pages, nr)) in vmap_pmd_range() 181 unsigned long end, pgprot_t prot, struct page **pages, int *nr) in vmap_pud_range() argument 191 if (vmap_pmd_range(pud, addr, next, prot, pages, nr)) in vmap_pud_range() 198 unsigned long end, pgprot_t prot, struct page **pages, int *nr) in vmap_p4d_range() argument 208 if (vmap_pud_range(p4d, addr, next, prot, pages, nr)) in vmap_p4d_range() 227 int nr = 0; in vmap_page_range_noflush() local [all …]
|
D | swap.c | 215 release_pages(pvec->pages, pvec->nr); in pagevec_lru_move_fn() 760 void release_pages(struct page **pages, int nr) in release_pages() argument 769 for (i = 0; i < nr; i++) { in release_pages() 995 pvec->nr = find_get_entries(mapping, start, nr_entries, in pagevec_lookup_entries() 1018 pvec->nr = j; in pagevec_remove_exceptionals() 1044 pvec->nr = find_get_pages_range(mapping, start, end, PAGEVEC_SIZE, in pagevec_lookup_range() 1054 pvec->nr = find_get_pages_range_tag(mapping, index, end, tag, in pagevec_lookup_range_tag() 1064 pvec->nr = find_get_pages_range_tag(mapping, index, end, tag, in pagevec_lookup_range_nr_tag()
|
D | percpu.c | 571 static inline void pcpu_update_empty_pages(struct pcpu_chunk *chunk, int nr) in pcpu_update_empty_pages() argument 573 chunk->nr_empty_pop_pages += nr; in pcpu_update_empty_pages() 575 pcpu_nr_empty_pop_pages += nr; in pcpu_update_empty_pages() 1134 unsigned long nr, in pcpu_find_zero_area() argument 1147 end = index + nr; in pcpu_find_zero_area() 1475 int nr = page_end - page_start; in pcpu_chunk_populated() local 1479 bitmap_set(chunk->populated, page_start, nr); in pcpu_chunk_populated() 1480 chunk->nr_populated += nr; in pcpu_chunk_populated() 1481 pcpu_nr_populated += nr; in pcpu_chunk_populated() 1483 pcpu_update_empty_pages(chunk, nr); in pcpu_chunk_populated() [all …]
|
D | filemap.c | 123 unsigned int nr = 1; in page_cache_delete() local 130 nr = compound_nr(page); in page_cache_delete() 135 VM_BUG_ON_PAGE(nr != 1 && shadow, page); in page_cache_delete() 144 mapping->nrexceptional += nr; in page_cache_delete() 153 mapping->nrpages -= nr; in page_cache_delete() 159 int nr; in unaccount_page_cache_page() local 200 nr = hpage_nr_pages(page); in unaccount_page_cache_page() 202 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr); in unaccount_page_cache_page() 204 __mod_node_page_state(page_pgdat(page), NR_SHMEM, -nr); in unaccount_page_cache_page() 1287 static inline bool clear_bit_unlock_is_negative_byte(long nr, volatile void *mem) in clear_bit_unlock_is_negative_byte() argument [all …]
|
D | mm_init.c | 146 s32 nr = num_present_cpus(); in mm_compute_batch() local 147 s32 batch = max_t(s32, nr*2, 32); in mm_compute_batch() 150 memsized_batch = min_t(u64, (totalram_pages()/nr)/256, 0x7fffffff); in mm_compute_batch()
|
D | sparse.c | 234 unsigned long nr, start_sec = pfn_to_section_nr(pfn); in subsection_map_init() local 239 for (nr = start_sec; nr <= end_sec; nr++) { in subsection_map_init() 245 ms = __nr_to_section(nr); in subsection_map_init() 248 pr_debug("%s: sec: %lu pfns: %lu set(%d, %d)\n", __func__, nr, in subsection_map_init()
|
D | highmem.c | 331 unsigned long nr; in kunmap_high() local 340 nr = PKMAP_NR(vaddr); in kunmap_high() 347 switch (--pkmap_count[nr]) { in kunmap_high()
|
D | memory_hotplug.c | 291 unsigned long nr, start_sec, end_sec; in __add_pages() local 312 for (nr = start_sec; nr <= end_sec; nr++) { in __add_pages() 522 unsigned long nr, start_sec, end_sec; in __remove_pages() local 531 for (nr = start_sec; nr <= end_sec; nr++) { in __remove_pages()
|
D | mlock.c | 293 int nr = pagevec_count(pvec); in __munlock_pagevec() local 294 int delta_munlocked = -nr; in __munlock_pagevec() 302 for (i = 0; i < nr; i++) { in __munlock_pagevec() 334 for (i = 0; i < nr; i++) { in __munlock_pagevec()
|
D | truncate.c | 104 pvec->nr = j; in truncate_exceptional_pvec_entries() 180 pgoff_t nr = PageTransHuge(page) ? HPAGE_PMD_NR : 1; in truncate_cleanup_page() local 181 unmap_mapping_pages(mapping, page->index, nr, false); in truncate_cleanup_page()
|
D | memcontrol.c | 3735 unsigned long nr = 0; in mem_cgroup_node_nr_lru_pages() local 3743 nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru); in mem_cgroup_node_nr_lru_pages() 3745 return nr; in mem_cgroup_node_nr_lru_pages() 3751 unsigned long nr = 0; in mem_cgroup_nr_lru_pages() local 3757 nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru); in mem_cgroup_nr_lru_pages() 3759 return nr; in mem_cgroup_nr_lru_pages() 3777 unsigned long nr; in memcg_numa_stat_show() local 3781 nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask); in memcg_numa_stat_show() 3782 seq_printf(m, "%s=%lu", stat->name, nr); in memcg_numa_stat_show() 3784 nr = mem_cgroup_node_nr_lru_pages(memcg, nid, in memcg_numa_stat_show() [all …]
|
D | mempool.c | 28 const int nr = pool->curr_nr; in poison_error() local 35 pr_err(" nr=%d @ %p: %s0x", nr, element, start > 0 ? "... " : ""); in poison_error()
|
D | slab_common.c | 104 void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p) in __kmem_cache_free_bulk() argument 108 for (i = 0; i < nr; i++) { in __kmem_cache_free_bulk() 116 int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr, in __kmem_cache_alloc_bulk() argument 121 for (i = 0; i < nr; i++) { in __kmem_cache_alloc_bulk()
|
D | slab.c | 578 int nr = min3(from->avail, max, to->limit - to->avail); in transfer_objects() local 580 if (!nr) in transfer_objects() 583 memcpy(to->entry + to->avail, from->entry + from->avail -nr, in transfer_objects() 584 sizeof(void *) *nr); in transfer_objects() 586 from->avail -= nr; in transfer_objects() 587 to->avail += nr; in transfer_objects() 588 return nr; in transfer_objects()
|
D | ksm.c | 1362 int nr = 0; in stable_node_dup() local 1389 nr += 1; in stable_node_dup() 1415 if (prune_stale_stable_nodes && nr == 1) { in stable_node_dup()
|
D | swapfile.c | 733 unsigned char usage, int nr, in scan_swap_map_slots() argument 743 if (nr > SWAP_BATCH) in scan_swap_map_slots() 744 nr = SWAP_BATCH; in scan_swap_map_slots() 855 if ((n_ret == nr) || (offset >= si->highest_bit)) in scan_swap_map_slots()
|