/mm/ |
D | gup.c | 54 void put_user_pages_dirty_lock(struct page **pages, unsigned long npages, in put_user_pages_dirty_lock() 108 void put_user_pages(struct page **pages, unsigned long npages) in put_user_pages() 781 unsigned int gup_flags, struct page **pages, in __get_user_pages() 1002 struct page **pages, in __get_user_pages_locked() 1151 unsigned int gup_flags, struct page **pages, in get_user_pages_remote() 1318 unsigned long nr_pages, struct page **pages, in __get_user_pages_locked() 1437 struct page **pages, in check_and_migrate_cma_pages() 1524 struct page **pages, in check_and_migrate_cma_pages() 1540 struct page **pages, in __gup_longterm_locked() 1591 struct page **pages, in __gup_longterm_locked() [all …]
|
D | percpu-vm.c | 33 static struct page **pages; in pcpu_get_pages() local 54 struct page **pages, int page_start, int page_end) in pcpu_free_pages() 82 struct page **pages, int page_start, int page_end, in pcpu_alloc_pages() 153 struct page **pages, int page_start, int page_end) in pcpu_unmap_pages() 192 static int __pcpu_map_pages(unsigned long addr, struct page **pages, in __pcpu_map_pages() 214 struct page **pages, int page_start, int page_end) in pcpu_map_pages() 278 struct page **pages; in pcpu_populate_chunk() local 311 struct page **pages; in pcpu_depopulate_chunk() local
|
D | frame_vector.c | 126 struct page **pages; in put_vaddr_frames() local 158 struct page **pages; in frame_vector_to_pages() local 184 struct page **pages; in frame_vector_to_pfns() local
|
D | mprotect.c | 44 unsigned long pages = 0; in change_pte_range() local 170 unsigned long pages = 0; in change_pmd_range() local 233 unsigned long pages = 0; in change_pud_range() local 253 unsigned long pages = 0; in change_p4d_range() local 275 unsigned long pages = 0; in change_protection_range() local 301 unsigned long pages; in change_protection() local
|
D | mincore.c | 207 static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *vec) in do_mincore() 218 unsigned long pages = DIV_ROUND_UP(end - addr, PAGE_SIZE); in do_mincore() local 256 unsigned long pages; in SYSCALL_DEFINE3() local
|
D | readahead.c | 65 struct list_head *pages) in read_cache_pages_invalidate_pages() 88 int read_cache_pages(struct address_space *mapping, struct list_head *pages, in read_cache_pages() 117 struct list_head *pages, unsigned int nr_pages, gfp_t gfp) in read_pages()
|
D | balloon_compaction.c | 41 struct list_head *pages) in balloon_page_list_enqueue() 77 struct list_head *pages, size_t n_req_pages) in balloon_page_list_dequeue()
|
D | swap.c | 126 void put_pages_list(struct list_head *pages) in put_pages_list() 152 struct page **pages) in get_kernel_pages() 179 int get_kernel_page(unsigned long start, int write, struct page **pages) in get_kernel_page() 760 void release_pages(struct page **pages, int nr) in release_pages()
|
D | cma_debug.c | 124 int pages = val; in cma_free_write() local 156 int pages = val; in cma_alloc_write() local
|
D | vmalloc.c | 138 unsigned long end, pgprot_t prot, struct page **pages, int *nr) in vmap_pte_range() 164 unsigned long end, pgprot_t prot, struct page **pages, int *nr) in vmap_pmd_range() 181 unsigned long end, pgprot_t prot, struct page **pages, int *nr) in vmap_pud_range() 198 unsigned long end, pgprot_t prot, struct page **pages, int *nr) in vmap_p4d_range() 221 pgprot_t prot, struct page **pages) in vmap_page_range_noflush() 242 pgprot_t prot, struct page **pages) in vmap_page_range() 1777 void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) in vm_map_ram() 1962 pgprot_t prot, struct page **pages) in map_kernel_range_noflush() 2005 int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page **pages) in map_vm_area() 2369 void *vmap(struct page **pages, unsigned int count, in vmap() [all …]
|
D | migrate.c | 1596 const void __user * __user *pages, in do_pages_move() 1693 const void __user **pages, int *status) in do_pages_stat_array() 1732 const void __user * __user *pages, in do_pages_stat() 1766 const void __user * __user *pages, in kernel_move_pages() 1827 SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages, in SYSCALL_DEFINE6() argument 1842 const void __user * __user *pages; in COMPAT_SYSCALL_DEFINE6() local
|
D | cma.c | 80 unsigned long pages) in cma_bitmap_pages_to_bits() 517 bool cma_release(struct cma *cma, const struct page *pages, unsigned int count) in cma_release()
|
D | util.c | 424 int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc, in __account_locked_vm() 467 int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc) in account_locked_vm() 787 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) in __vm_enough_memory()
|
D | swap_state.c | 288 void free_pages_and_swap_cache(struct page **pages, int nr) in free_pages_and_swap_cache() 467 unsigned int pages, last_ra; in __swapin_nr_pages() local 504 unsigned int hits, pages, max_pages; in swapin_nr_pages() local
|
D | percpu-km.c | 51 struct page *pages; in pcpu_create_chunk() local
|
D | process_vm_access.c | 30 static int process_vm_rw_pages(struct page **pages, in process_vm_rw_pages() 99 int pages = min(nr_pages, max_pages_per_loop); in process_vm_rw_single_vec() local
|
D | nommu.c | 330 void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot) in vmap() 343 void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) in vm_map_ram() 389 int vm_map_pages(struct vm_area_struct *vma, struct page **pages, in vm_map_pages() 396 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages, in vm_map_pages_zero()
|
D | gup_benchmark.c | 28 struct page **pages; in __gup_benchmark_ioctl() local
|
D | swap_slots.c | 95 long pages; in check_cache_active() local
|
D | zpool.c | 315 int zpool_shrink(struct zpool *zpool, unsigned int pages, in zpool_shrink()
|
D | workingset.c | 398 unsigned long pages; in count_shadow_nodes() local
|
D | zsmalloc.c | 1028 struct page *pages[]) in create_page_chain() 1068 struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE]; in alloc_zspage() local 1303 struct page *pages[2]; in zs_map_object() local 1373 struct page *pages[2]; in zs_unmap_object() local 1896 struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE] = {NULL, }; in replace_sub_page() local
|
D | khugepaged.c | 184 unsigned long pages; in pages_to_scan_store() local 1902 static unsigned int khugepaged_scan_mm_slot(unsigned int pages, in khugepaged_scan_mm_slot() 2046 unsigned int pages = khugepaged_pages_to_scan; in khugepaged_do_scan() local
|
D | shmem.c | 200 static inline int shmem_acct_block(unsigned long flags, long pages) in shmem_acct_block() 209 static inline void shmem_unacct_blocks(unsigned long flags, long pages) in shmem_unacct_blocks() 215 static inline bool shmem_inode_acct_block(struct inode *inode, long pages) in shmem_inode_acct_block() 237 static inline void shmem_inode_unacct_blocks(struct inode *inode, long pages) in shmem_inode_unacct_blocks() 314 bool shmem_charge(struct inode *inode, long pages) in shmem_charge() 334 void shmem_uncharge(struct inode *inode, long pages) in shmem_uncharge()
|
D | highmem.c | 116 unsigned int pages = 0; in nr_free_highpages() local
|