Home
last modified time | relevance | path

Searched refs:nr (Results 1 – 25 of 35) sorted by relevance

12

/mm/
Dgup_benchmark.c72 int nr; in __gup_benchmark_ioctl() local
92 nr = gup->nr_pages_per_call; in __gup_benchmark_ioctl()
95 if (nr != gup->nr_pages_per_call) in __gup_benchmark_ioctl()
98 next = addr + nr * PAGE_SIZE; in __gup_benchmark_ioctl()
101 nr = (next - addr) / PAGE_SIZE; in __gup_benchmark_ioctl()
109 nr = get_user_pages_fast(addr, nr, gup->flags, in __gup_benchmark_ioctl()
113 nr = get_user_pages(addr, nr, gup->flags, pages + i, in __gup_benchmark_ioctl()
117 nr = pin_user_pages_fast(addr, nr, gup->flags, in __gup_benchmark_ioctl()
121 nr = pin_user_pages(addr, nr, gup->flags, pages + i, in __gup_benchmark_ioctl()
125 nr = pin_user_pages(addr, nr, in __gup_benchmark_ioctl()
[all …]
Dgup.c2105 static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start, in undo_dev_pagemap() argument
2109 while ((*nr) - nr_start) { in undo_dev_pagemap()
2110 struct page *page = pages[--(*nr)]; in undo_dev_pagemap()
2142 struct page **pages, int *nr) in gup_pte_range() argument
2145 int nr_start = *nr, ret = 0; in gup_pte_range()
2169 undo_dev_pagemap(nr, nr_start, flags, pages); in gup_pte_range()
2204 pages[*nr] = page; in gup_pte_range()
2205 (*nr)++; in gup_pte_range()
2230 struct page **pages, int *nr) in gup_pte_range() argument
2239 struct page **pages, int *nr) in __gup_device_huge() argument
[all …]
Dmmu_gather.c35 batch->nr = 0; in tlb_next_batch()
48 for (batch = &tlb->local; batch && batch->nr; batch = batch->next) { in tlb_batch_pages_flush()
49 free_pages_and_swap_cache(batch->pages, batch->nr); in tlb_batch_pages_flush()
50 batch->nr = 0; in tlb_batch_pages_flush()
81 batch->pages[batch->nr++] = page; in __tlb_remove_page_size()
82 if (batch->nr == batch->max) { in __tlb_remove_page_size()
87 VM_BUG_ON_PAGE(batch->nr > batch->max, page); in __tlb_remove_page_size()
100 for (i = 0; i < batch->nr; i++) in __tlb_remove_table_free()
216 (*batch)->nr = 0; in tlb_remove_table()
219 (*batch)->tables[(*batch)->nr++] = table; in tlb_remove_table()
[all …]
Dswap_state.c62 #define ADD_CACHE_INFO(x, nr) data_race(swap_cache_info.x += (nr)) argument
73 unsigned int i, j, nr; in total_swapcache_pages() local
88 nr = nr_swapper_spaces[i]; in total_swapcache_pages()
90 for (j = 0; j < nr; j++) in total_swapcache_pages()
135 unsigned long i, nr = thp_nr_pages(page); in add_to_swap_cache() local
142 page_ref_add(page, nr); in add_to_swap_cache()
152 for (i = 0; i < nr; i++) { in add_to_swap_cache()
165 address_space->nrpages += nr; in add_to_swap_cache()
166 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr); in add_to_swap_cache()
167 ADD_CACHE_INFO(add_total, nr); in add_to_swap_cache()
[all …]
Dvmscan.c159 } nr; member
325 unsigned long nr; in zone_reclaimable_pages() local
327 nr = zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_FILE) + in zone_reclaimable_pages()
330 nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) + in zone_reclaimable_pages()
333 return nr; in zone_reclaimable_pages()
448 long nr; in do_shrink_slab() local
467 nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0); in do_shrink_slab()
469 total_scan = nr; in do_shrink_slab()
488 next_deferred = nr; in do_shrink_slab()
515 trace_mm_shrink_slab_start(shrinker, shrinkctl, nr, in do_shrink_slab()
[all …]
Dswap_slots.c171 cache->nr = 0; in alloc_swap_slot_cache()
203 swapcache_free_entries(cache->slots + cache->cur, cache->nr); in drain_slots_cache_cpu()
205 cache->nr = 0; in drain_slots_cache_cpu()
288 if (!use_swap_slot_cache || cache->nr) in refill_swap_slots_cache()
293 cache->nr = get_swap_pages(SWAP_SLOTS_CACHE_SIZE, in refill_swap_slots_cache()
296 return cache->nr; in refill_swap_slots_cache()
369 if (cache->nr) { in get_swap_page()
372 cache->nr--; in get_swap_page()
Dmincore.c74 unsigned long nr = (end - addr) >> PAGE_SHIFT; in __mincore_unmapped_range() local
81 for (i = 0; i < nr; i++, pgoff++) in __mincore_unmapped_range()
84 for (i = 0; i < nr; i++) in __mincore_unmapped_range()
87 return nr; in __mincore_unmapped_range()
106 int nr = (end - addr) >> PAGE_SHIFT; in mincore_pte_range() local
110 memset(vec, 1, nr); in mincore_pte_range()
152 walk->private += nr; in mincore_pte_range()
Drmap.c1160 int nr = compound ? thp_nr_pages(page) : 1; in do_page_add_anon_rmap() local
1169 __mod_lruvec_page_state(page, NR_ANON_MAPPED, nr); in do_page_add_anon_rmap()
1199 int nr = compound ? thp_nr_pages(page) : 1; in __page_add_new_anon_rmap() local
1216 __mod_lruvec_page_state(page, NR_ANON_MAPPED, nr); in __page_add_new_anon_rmap()
1229 int i, nr = 1; in page_add_file_rmap() local
1236 for (i = 0, nr = 0; i < thp_nr_pages(page); i++) { in page_add_file_rmap()
1241 nr++; in page_add_file_rmap()
1244 nr++; in page_add_file_rmap()
1271 __mod_lruvec_page_state(page, NR_FILE_MAPPED, nr); in page_add_file_rmap()
1278 int i, nr = 1; in page_remove_file_rmap() local
[all …]
Dmapping_dirty_helpers.c275 pgoff_t first_index, pgoff_t nr) in wp_shared_mapping_range() argument
280 WARN_ON(walk_page_mapping(mapping, first_index, nr, &wp_walk_ops, in wp_shared_mapping_range()
324 pgoff_t first_index, pgoff_t nr, in clean_record_shared_mapping_range() argument
335 .start = none_set ? nr : *start, in clean_record_shared_mapping_range()
340 WARN_ON(walk_page_mapping(mapping, first_index, nr, &clean_walk_ops, in clean_record_shared_mapping_range()
Dmm_init.c151 s32 nr = num_present_cpus(); in mm_compute_batch() local
152 s32 batch = max_t(s32, nr*2, 32); in mm_compute_batch()
162 memsized_batch = min_t(u64, ram_pages/nr/256, INT_MAX); in mm_compute_batch()
164 memsized_batch = min_t(u64, ram_pages/nr/4, INT_MAX); in mm_compute_batch()
Dshmem.c686 unsigned long nr = compound_nr(page); in shmem_add_to_page_cache() local
690 VM_BUG_ON_PAGE(index != round_down(index, nr), page); in shmem_add_to_page_cache()
695 page_ref_add(page, nr); in shmem_add_to_page_cache()
722 if (++i < nr) { in shmem_add_to_page_cache()
730 mapping->nrpages += nr; in shmem_add_to_page_cache()
731 __mod_lruvec_page_state(page, NR_FILE_PAGES, nr); in shmem_add_to_page_cache()
732 __mod_lruvec_page_state(page, NR_SHMEM, nr); in shmem_add_to_page_cache()
745 page_ref_sub(page, nr); in shmem_add_to_page_cache()
869 pvec.nr = find_get_entries(mapping, index, in shmem_unlock_mapping()
871 if (!pvec.nr) in shmem_unlock_mapping()
[all …]
Dvmalloc.c194 unsigned long end, pgprot_t prot, struct page **pages, int *nr, in vmap_pte_range() argument
208 struct page *page = pages[*nr]; in vmap_pte_range()
215 (*nr)++; in vmap_pte_range()
222 unsigned long end, pgprot_t prot, struct page **pages, int *nr, in vmap_pmd_range() argument
233 if (vmap_pte_range(pmd, addr, next, prot, pages, nr, mask)) in vmap_pmd_range()
240 unsigned long end, pgprot_t prot, struct page **pages, int *nr, in vmap_pud_range() argument
251 if (vmap_pmd_range(pud, addr, next, prot, pages, nr, mask)) in vmap_pud_range()
258 unsigned long end, pgprot_t prot, struct page **pages, int *nr, in vmap_p4d_range() argument
269 if (vmap_pud_range(p4d, addr, next, prot, pages, nr, mask)) in vmap_p4d_range()
301 int nr = 0; in map_kernel_range_noflush() local
[all …]
Dpercpu.c554 static inline void pcpu_update_empty_pages(struct pcpu_chunk *chunk, int nr) in pcpu_update_empty_pages() argument
556 chunk->nr_empty_pop_pages += nr; in pcpu_update_empty_pages()
558 pcpu_nr_empty_pop_pages[pcpu_chunk_type(chunk)] += nr; in pcpu_update_empty_pages()
1115 unsigned long nr, in pcpu_find_zero_area() argument
1128 end = index + nr; in pcpu_find_zero_area()
1484 int nr = page_end - page_start; in pcpu_chunk_populated() local
1488 bitmap_set(chunk->populated, page_start, nr); in pcpu_chunk_populated()
1489 chunk->nr_populated += nr; in pcpu_chunk_populated()
1490 pcpu_nr_populated += nr; in pcpu_chunk_populated()
1492 pcpu_update_empty_pages(chunk, nr); in pcpu_chunk_populated()
[all …]
Dswap.c233 release_pages(pvec->pages, pvec->nr); in pagevec_lru_move_fn()
1000 void release_pages(struct page **pages, int nr) in release_pages() argument
1009 for (i = 0; i < nr; i++) { in release_pages()
1232 pvec->nr = find_get_entries(mapping, start, nr_entries, in pagevec_lookup_entries()
1255 pvec->nr = j; in pagevec_remove_exceptionals()
1281 pvec->nr = find_get_pages_range(mapping, start, end, PAGEVEC_SIZE, in pagevec_lookup_range()
1291 pvec->nr = find_get_pages_range_tag(mapping, index, end, tag, in pagevec_lookup_range_tag()
1301 pvec->nr = find_get_pages_range_tag(mapping, index, end, tag, in pagevec_lookup_range_nr_tag()
Dfilemap.c129 unsigned int nr = 1; in page_cache_delete() local
136 nr = compound_nr(page); in page_cache_delete()
141 VM_BUG_ON_PAGE(nr != 1 && shadow, page); in page_cache_delete()
150 mapping->nrexceptional += nr; in page_cache_delete()
159 mapping->nrpages -= nr; in page_cache_delete()
165 int nr; in unaccount_page_cache_page() local
206 nr = thp_nr_pages(page); in unaccount_page_cache_page()
208 __mod_lruvec_page_state(page, NR_FILE_PAGES, -nr); in unaccount_page_cache_page()
210 __mod_lruvec_page_state(page, NR_SHMEM, -nr); in unaccount_page_cache_page()
1444 static inline bool clear_bit_unlock_is_negative_byte(long nr, volatile void *mem) in clear_bit_unlock_is_negative_byte() argument
[all …]
Dmigrate.c389 int nr = thp_nr_pages(page); in migrate_page_move_mapping() local
425 page_ref_add(newpage, nr); /* add cache reference */ in migrate_page_move_mapping()
447 for (i = 1; i < nr; i++) { in migrate_page_move_mapping()
458 page_ref_unfreeze(page, expected_count - nr); in migrate_page_move_mapping()
481 __mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr); in migrate_page_move_mapping()
482 __mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr); in migrate_page_move_mapping()
484 __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr); in migrate_page_move_mapping()
485 __mod_lruvec_state(new_lruvec, NR_SHMEM, nr); in migrate_page_move_mapping()
488 __mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr); in migrate_page_move_mapping()
489 __mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr); in migrate_page_move_mapping()
[all …]
Dsparse.c223 unsigned long nr, start_sec = pfn_to_section_nr(pfn); in subsection_map_init() local
228 for (nr = start_sec; nr <= end_sec; nr++) { in subsection_map_init()
234 ms = __nr_to_section(nr); in subsection_map_init()
237 pr_debug("%s: sec: %lu pfns: %lu set(%d, %d)\n", __func__, nr, in subsection_map_init()
Dhighmem.c331 unsigned long nr; in kunmap_high() local
340 nr = PKMAP_NR(vaddr); in kunmap_high()
347 switch (--pkmap_count[nr]) { in kunmap_high()
Dmlock.c299 int nr = pagevec_count(pvec); in __munlock_pagevec() local
300 int delta_munlocked = -nr; in __munlock_pagevec()
308 for (i = 0; i < nr; i++) { in __munlock_pagevec()
340 for (i = 0; i < nr; i++) { in __munlock_pagevec()
Dhuge_memory.c2382 static void remap_page(struct page *page, unsigned int nr) in remap_page() argument
2388 for (i = 0; i < nr; i++) in remap_page()
2466 unsigned int nr = thp_nr_pages(head); in __split_huge_page() local
2472 split_page_memcg(head, nr); in __split_huge_page()
2482 for (i = nr - 1; i >= 1; i--) { in __split_huge_page()
2502 split_page_owner(head, nr); in __split_huge_page()
2521 remap_page(head, nr); in __split_huge_page()
2529 for (i = 0; i < nr; i++) { in __split_huge_page()
2548 int i, compound, nr, ret; in total_mapcount() local
2556 nr = compound_nr(page); in total_mapcount()
[all …]
Dpagewalk.c520 pgoff_t nr, const struct mm_walk_ops *ops, in walk_page_mapping() argument
534 first_index + nr - 1) { in walk_page_mapping()
540 cea = first_index + nr; in walk_page_mapping()
Dslab_common.c108 void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p) in __kmem_cache_free_bulk() argument
112 for (i = 0; i < nr; i++) { in __kmem_cache_free_bulk()
120 int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr, in __kmem_cache_alloc_bulk() argument
125 for (i = 0; i < nr; i++) { in __kmem_cache_alloc_bulk()
Dmemcontrol.c3316 void split_page_memcg(struct page *head, unsigned int nr) in split_page_memcg() argument
3325 for (i = 1; i < nr; i++) { in split_page_memcg()
3330 css_get_many(&memcg->css, nr - 1); in split_page_memcg()
4004 unsigned long nr = 0; in mem_cgroup_node_nr_lru_pages() local
4013 nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru); in mem_cgroup_node_nr_lru_pages()
4015 nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru); in mem_cgroup_node_nr_lru_pages()
4017 return nr; in mem_cgroup_node_nr_lru_pages()
4024 unsigned long nr = 0; in mem_cgroup_nr_lru_pages() local
4031 nr += memcg_page_state(memcg, NR_LRU_BASE + lru); in mem_cgroup_nr_lru_pages()
4033 nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru); in mem_cgroup_nr_lru_pages()
[all …]
Dmempool.c28 const int nr = pool->curr_nr; in poison_error() local
35 pr_err(" nr=%d @ %p: %s0x", nr, element, start > 0 ? "... " : ""); in poison_error()
Dslab.h348 int idx, int nr) in mod_objcg_state() argument
356 mod_memcg_lruvec_state(lruvec, idx, nr); in mod_objcg_state()

12