/mm/ |
D | gup.c | 2105 static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start, in undo_dev_pagemap() 2142 struct page **pages, int *nr) in gup_pte_range() 2230 struct page **pages, int *nr) in gup_pte_range() 2239 struct page **pages, int *nr) in __gup_device_huge() 2269 struct page **pages, int *nr) in __gup_device_huge_pmd() 2287 struct page **pages, int *nr) in __gup_device_huge_pud() 2305 struct page **pages, int *nr) in __gup_device_huge_pmd() 2313 struct page **pages, int *nr) in __gup_device_huge_pud() 2323 int nr; in record_subpages() local 2341 struct page **pages, int *nr) in gup_hugepte() [all …]
|
D | swap_state.c | 62 #define ADD_CACHE_INFO(x, nr) data_race(swap_cache_info.x += (nr)) argument 73 unsigned int i, j, nr; in total_swapcache_pages() local 135 unsigned long i, nr = thp_nr_pages(page); in add_to_swap_cache() local 188 int i, nr = thp_nr_pages(page); in __delete_from_swap_cache() local 348 void free_pages_and_swap_cache(struct page **pages, int nr) in free_pages_and_swap_cache() 715 unsigned int i, nr; in init_swap_address_space() local
|
D | mincore.c | 74 unsigned long nr = (end - addr) >> PAGE_SHIFT; in __mincore_unmapped_range() local 106 int nr = (end - addr) >> PAGE_SHIFT; in mincore_pte_range() local
|
D | rmap.c | 1160 int nr = compound ? thp_nr_pages(page) : 1; in do_page_add_anon_rmap() local 1199 int nr = compound ? thp_nr_pages(page) : 1; in __page_add_new_anon_rmap() local 1229 int i, nr = 1; in page_add_file_rmap() local 1278 int i, nr = 1; in page_remove_file_rmap() local 1335 int i, nr; in page_remove_anon_compound_rmap() local
|
D | mapping_dirty_helpers.c | 275 pgoff_t first_index, pgoff_t nr) in wp_shared_mapping_range() 324 pgoff_t first_index, pgoff_t nr, in clean_record_shared_mapping_range()
|
D | vmalloc.c | 194 unsigned long end, pgprot_t prot, struct page **pages, int *nr, in vmap_pte_range() 222 unsigned long end, pgprot_t prot, struct page **pages, int *nr, in vmap_pmd_range() 240 unsigned long end, pgprot_t prot, struct page **pages, int *nr, in vmap_pud_range() 258 unsigned long end, pgprot_t prot, struct page **pages, int *nr, in vmap_p4d_range() 301 int nr = 0; in map_kernel_range_noflush() local 1362 unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT; in __purge_vmap_area_lazy() local 3477 unsigned int nr, *counters = m->private; in show_numa_info() local
|
D | mm_init.c | 151 s32 nr = num_present_cpus(); in mm_compute_batch() local
|
D | percpu.c | 554 static inline void pcpu_update_empty_pages(struct pcpu_chunk *chunk, int nr) in pcpu_update_empty_pages() 1115 unsigned long nr, in pcpu_find_zero_area() 1484 int nr = page_end - page_start; in pcpu_chunk_populated() local 1508 int nr = page_end - page_start; in pcpu_chunk_depopulated() local 2026 int nr = min_t(int, re - rs, nr_to_pop); in __pcpu_balance_workfn() local
|
D | gup_benchmark.c | 72 int nr; in __gup_benchmark_ioctl() local
|
D | vmscan.c | 159 } nr; member 325 unsigned long nr; in zone_reclaimable_pages() local 448 long nr; in do_shrink_slab() local 2329 unsigned long *nr) in get_scan_count() 2533 unsigned long nr[NR_LRU_LISTS]; in shrink_lruvec() local
|
D | slab_common.c | 108 void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p) in __kmem_cache_free_bulk() 120 int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr, in __kmem_cache_alloc_bulk()
|
D | filemap.c | 129 unsigned int nr = 1; in page_cache_delete() local 165 int nr; in unaccount_page_cache_page() local 1444 static inline bool clear_bit_unlock_is_negative_byte(long nr, volatile void *mem) in clear_bit_unlock_is_negative_byte() 2237 unsigned long nr, ret; in generic_file_buffered_read() local
|
D | highmem.c | 331 unsigned long nr; in kunmap_high() local
|
D | migrate.c | 389 int nr = thp_nr_pages(page); in migrate_page_move_mapping() local 996 int i, nr = compound_nr(newpage); in move_to_new_page() local 1573 static int store_status(int __user *status, int start, int value, int nr) in store_status()
|
D | mempool.c | 28 const int nr = pool->curr_nr; in poison_error() local
|
D | pagewalk.c | 520 pgoff_t nr, const struct mm_walk_ops *ops, in walk_page_mapping()
|
D | huge_memory.c | 2382 static void remap_page(struct page *page, unsigned int nr) in remap_page() 2466 unsigned int nr = thp_nr_pages(head); in __split_huge_page() local 2548 int i, compound, nr, ret; in total_mapcount() local
|
D | mlock.c | 299 int nr = pagevec_count(pvec); in __munlock_pagevec() local
|
D | page_owner.c | 241 void __split_page_owner(struct page *page, unsigned int nr) in __split_page_owner()
|
D | memcontrol.c | 3316 void split_page_memcg(struct page *head, unsigned int nr) in split_page_memcg() 4004 unsigned long nr = 0; in mem_cgroup_node_nr_lru_pages() local 4024 unsigned long nr = 0; in mem_cgroup_nr_lru_pages() local 4126 unsigned long nr; in memcg_stat_show() local 4160 unsigned long nr; in memcg_stat_show() local
|
D | swap.c | 1000 void release_pages(struct page **pages, int nr) in release_pages()
|
D | shmem.c | 686 unsigned long nr = compound_nr(page); in shmem_add_to_page_cache() local 1594 int nr; in shmem_alloc_and_acct_page() local 2567 unsigned long nr, ret; in shmem_file_read_iter() local
|
D | sparse.c | 223 unsigned long nr, start_sec = pfn_to_section_nr(pfn); in subsection_map_init() local
|
D | slab.h | 348 int idx, int nr) in mod_objcg_state()
|
D | ksm.c | 1364 int nr = 0; in stable_node_dup() local
|