Home
last modified time | relevance | path

Searched refs:pages (Results 1 – 25 of 51) sorted by relevance

123

/mm/
Dpercpu-vm.c33 static struct page **pages; in pcpu_get_pages() local
34 size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]); in pcpu_get_pages()
38 if (!pages) in pcpu_get_pages()
39 pages = pcpu_mem_zalloc(pages_size, GFP_KERNEL); in pcpu_get_pages()
40 return pages; in pcpu_get_pages()
54 struct page **pages, int page_start, int page_end) in pcpu_free_pages() argument
61 struct page *page = pages[pcpu_page_idx(cpu, i)]; in pcpu_free_pages()
82 struct page **pages, int page_start, int page_end, in pcpu_alloc_pages() argument
92 struct page **pagep = &pages[pcpu_page_idx(cpu, i)]; in pcpu_alloc_pages()
103 __free_page(pages[pcpu_page_idx(cpu, i)]); in pcpu_alloc_pages()
[all …]
Dgup.c298 void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages, in unpin_user_pages_dirty_lock() argument
310 unpin_user_pages(pages, npages); in unpin_user_pages_dirty_lock()
315 struct page *page = compound_head(pages[index]); in unpin_user_pages_dirty_lock()
352 void unpin_user_pages(struct page **pages, unsigned long npages) in unpin_user_pages() argument
369 unpin_user_page(pages[index]); in unpin_user_pages()
1063 unsigned int gup_flags, struct page **pages, in __get_user_pages() argument
1075 VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN))); in __get_user_pages()
1096 pages ? &pages[i] : NULL); in __get_user_pages()
1112 i = follow_hugetlb_page(mm, vma, pages, vmas, in __get_user_pages()
1166 if (pages) { in __get_user_pages()
[all …]
Dgup_benchmark.c24 static void put_back_pages(unsigned int cmd, struct page **pages, in put_back_pages() argument
33 put_page(pages[i]); in put_back_pages()
39 unpin_user_pages(pages, nr_pages); in put_back_pages()
44 static void verify_dma_pinned(unsigned int cmd, struct page **pages, in verify_dma_pinned() argument
55 page = pages[i]; in verify_dma_pinned()
73 struct page **pages; in __gup_benchmark_ioctl() local
82 pages = kvcalloc(nr_pages, sizeof(void *), GFP_KERNEL); in __gup_benchmark_ioctl()
83 if (!pages) in __gup_benchmark_ioctl()
110 pages + i); in __gup_benchmark_ioctl()
113 nr = get_user_pages(addr, nr, gup->flags, pages + i, in __gup_benchmark_ioctl()
[all …]
Dframe_vector.c108 struct page **pages; in put_vaddr_frames() local
112 pages = frame_vector_pages(vec); in put_vaddr_frames()
118 if (WARN_ON(IS_ERR(pages))) in put_vaddr_frames()
121 unpin_user_pages(pages, vec->nr_frames); in put_vaddr_frames()
140 struct page **pages; in frame_vector_to_pages() local
148 pages = (struct page **)nums; in frame_vector_to_pages()
150 pages[i] = pfn_to_page(nums[i]); in frame_vector_to_pages()
166 struct page **pages; in frame_vector_to_pfns() local
170 pages = (struct page **)(vec->ptrs); in frame_vector_to_pfns()
171 nums = (unsigned long *)pages; in frame_vector_to_pfns()
[all …]
Dmincore.c184 static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *vec) in do_mincore() argument
193 end = min(vma->vm_end, addr + (pages << PAGE_SHIFT)); in do_mincore()
195 unsigned long pages = DIV_ROUND_UP(end - addr, PAGE_SIZE); in do_mincore() local
196 memset(vec, 1, pages); in do_mincore()
197 return pages; in do_mincore()
233 unsigned long pages; in SYSCALL_DEFINE3() local
247 pages = len >> PAGE_SHIFT; in SYSCALL_DEFINE3()
248 pages += (offset_in_page(len)) != 0; in SYSCALL_DEFINE3()
250 if (!access_ok(vec, pages)) in SYSCALL_DEFINE3()
258 while (pages) { in SYSCALL_DEFINE3()
[all …]
Dmprotect.c44 unsigned long pages = 0; in change_pte_range() local
141 pages++; in change_pte_range()
177 pages++; in change_pte_range()
184 return pages; in change_pte_range()
218 unsigned long pages = 0; in change_pmd_range() local
259 pages += HPAGE_PMD_NR; in change_pmd_range()
271 pages += this_pages; in change_pmd_range()
281 return pages; in change_pmd_range()
290 unsigned long pages = 0; in change_pud_range() local
297 pages += change_pmd_range(vma, pud, addr, next, newprot, in change_pud_range()
[all …]
Dballoon_compaction.c41 struct list_head *pages) in balloon_page_list_enqueue() argument
48 list_for_each_entry_safe(page, tmp, pages, lru) { in balloon_page_list_enqueue()
77 struct list_head *pages, size_t n_req_pages) in balloon_page_list_dequeue() argument
84 list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) { in balloon_page_list_dequeue()
104 list_add(&page->lru, pages); in balloon_page_list_dequeue()
180 LIST_HEAD(pages); in balloon_page_dequeue()
183 n_pages = balloon_page_list_dequeue(b_dev_info, &pages, 1); in balloon_page_dequeue()
194 if (unlikely(list_empty(&b_dev_info->pages) && in balloon_page_dequeue()
200 return list_first_entry(&pages, struct page, lru); in balloon_page_dequeue()
226 list_add(&page->lru, &b_dev_info->pages); in balloon_page_putback()
Dswap.c144 void put_pages_list(struct list_head *pages) in put_pages_list() argument
146 while (!list_empty(pages)) { in put_pages_list()
149 victim = lru_to_page(pages); in put_pages_list()
170 struct page **pages) in get_kernel_pages() argument
178 pages[seg] = kmap_to_page(kiov[seg].iov_base); in get_kernel_pages()
179 get_page(pages[seg]); in get_kernel_pages()
197 int get_kernel_page(unsigned long start, int write, struct page **pages) in get_kernel_page() argument
204 return get_kernel_pages(&kiov, 1, write, pages); in get_kernel_page()
218 struct page *page = pvec->pages[i]; in pagevec_lru_move_fn()
233 release_pages(pvec->pages, pvec->nr); in pagevec_lru_move_fn()
[all …]
Dpercpu-km.c52 struct page *pages; in pcpu_create_chunk() local
60 pages = alloc_pages(gfp, order_base_2(nr_pages)); in pcpu_create_chunk()
61 if (!pages) { in pcpu_create_chunk()
67 pcpu_set_page_chunk(nth_page(pages, i), chunk); in pcpu_create_chunk()
69 chunk->data = pages; in pcpu_create_chunk()
70 chunk->base_addr = page_address(pages); in pcpu_create_chunk()
DKconfig223 with the reduced number of transparent huge pages that could be used
225 pages enlisted as being part of memory balloon devices avoids the
252 free pages from the buddy allocator for the purpose of reporting
253 those pages to another entity, such as a hypervisor, so that the
264 Allows the migration of the physical location of pages of processes
266 two situations. The first is on NUMA systems to put pages nearer
268 pages as migration can relocate pages to satisfy a huge page
313 mergeable. When it finds pages of identical content, it replaces
327 from userspace allocation. Keeping a user from writing to low pages
356 tristate "HWPoison pages injector"
[all …]
Dswap_state.c348 void free_pages_and_swap_cache(struct page **pages, int nr) in free_pages_and_swap_cache() argument
350 struct page **pagep = pages; in free_pages_and_swap_cache()
577 unsigned int pages, last_ra; in __swapin_nr_pages() local
584 pages = hits + 2; in __swapin_nr_pages()
585 if (pages == 2) { in __swapin_nr_pages()
592 pages = 1; in __swapin_nr_pages()
595 while (roundup < pages) in __swapin_nr_pages()
597 pages = roundup; in __swapin_nr_pages()
600 if (pages > max_pages) in __swapin_nr_pages()
601 pages = max_pages; in __swapin_nr_pages()
[all …]
Dreadahead.c67 struct list_head *pages) in read_cache_pages_invalidate_pages() argument
71 while (!list_empty(pages)) { in read_cache_pages_invalidate_pages()
72 victim = lru_to_page(pages); in read_cache_pages_invalidate_pages()
90 int read_cache_pages(struct address_space *mapping, struct list_head *pages, in read_cache_pages() argument
96 while (!list_empty(pages)) { in read_cache_pages()
97 page = lru_to_page(pages); in read_cache_pages()
108 read_cache_pages_invalidate_pages(mapping, pages); in read_cache_pages()
127 static void read_pages(struct readahead_control *rac, struct list_head *pages, in read_pages() argument
147 aops->readpages(rac->file, rac->mapping, pages, in read_pages()
150 put_pages_list(pages); in read_pages()
[all …]
Dutil.c472 int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc, in __account_locked_vm() argument
484 if (locked_vm + pages > limit) in __account_locked_vm()
488 mm->locked_vm = locked_vm + pages; in __account_locked_vm()
490 WARN_ON_ONCE(pages > locked_vm); in __account_locked_vm()
491 mm->locked_vm = locked_vm - pages; in __account_locked_vm()
495 (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT, in __account_locked_vm()
515 int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc) in account_locked_vm() argument
519 if (pages == 0 || !mm) in account_locked_vm()
523 ret = __account_locked_vm(mm, pages, inc, current, in account_locked_vm()
975 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) in __vm_enough_memory() argument
[all …]
Dworkingset.c463 unsigned long pages; in count_shadow_nodes() local
495 for (pages = 0, i = 0; i < NR_LRU_LISTS; i++) in count_shadow_nodes()
496 pages += lruvec_page_state_local(lruvec, in count_shadow_nodes()
498 pages += lruvec_page_state_local( in count_shadow_nodes()
500 pages += lruvec_page_state_local( in count_shadow_nodes()
504 pages = node_present_pages(sc->nid); in count_shadow_nodes()
506 max_nodes = pages >> (XA_CHUNK_SHIFT - 3); in count_shadow_nodes()
DKconfig.debug17 Unmap pages from the kernel linear mapping after free_pages().
22 pages are being allocated and freed, as unexpected state changes
29 fill the pages with poison patterns after free_pages() and verify
33 pages are not saved to the suspend image.
36 allowing the kernel mapping to be backed by large pages on some
82 bool "Poison pages after freeing"
84 Fill the pages with poison patterns after free_pages() and verify
93 If you are only interested in sanitization of freed pages without
133 <arch>/mm: Checked W+X mappings: passed, no W+X pages found.
137 <arch>/mm: Checked W+X mappings: failed, <N> W+X pages found.
Dvmalloc.c194 unsigned long end, pgprot_t prot, struct page **pages, int *nr, in vmap_pte_range() argument
208 struct page *page = pages[*nr]; in vmap_pte_range()
222 unsigned long end, pgprot_t prot, struct page **pages, int *nr, in vmap_pmd_range() argument
233 if (vmap_pte_range(pmd, addr, next, prot, pages, nr, mask)) in vmap_pmd_range()
240 unsigned long end, pgprot_t prot, struct page **pages, int *nr, in vmap_pud_range() argument
251 if (vmap_pmd_range(pud, addr, next, prot, pages, nr, mask)) in vmap_pud_range()
258 unsigned long end, pgprot_t prot, struct page **pages, int *nr, in vmap_p4d_range() argument
269 if (vmap_pud_range(p4d, addr, next, prot, pages, nr, mask)) in vmap_p4d_range()
294 pgprot_t prot, struct page **pages) in map_kernel_range_noflush() argument
310 err = vmap_p4d_range(pgd, addr, next, prot, pages, &nr, &mask); in map_kernel_range_noflush()
[all …]
Dzsmalloc.c1024 struct page *pages[]) in create_page_chain() argument
1040 page = pages[i]; in create_page_chain()
1064 struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE]; in alloc_zspage() local
1080 dec_zone_page_state(pages[i], NR_ZSPAGES); in alloc_zspage()
1081 __free_page(pages[i]); in alloc_zspage()
1088 pages[i] = page; in alloc_zspage()
1091 create_page_chain(class, zspage, pages); in alloc_zspage()
1133 struct page *pages[2], int off, int size) in __zs_map_object()
1150 addr = kmap_atomic(pages[0]); in __zs_map_object()
1153 addr = kmap_atomic(pages[1]); in __zs_map_object()
[all …]
Dtruncate.c71 if (xa_is_value(pvec->pages[j])) in truncate_exceptional_pvec_entries()
83 struct page *page = pvec->pages[i]; in truncate_exceptional_pvec_entries()
87 pvec->pages[j++] = page; in truncate_exceptional_pvec_entries()
338 struct page *page = pvec.pages[i]; in truncate_inode_pages_range()
362 truncate_cleanup_page(locked_pvec.pages[i]); in truncate_inode_pages_range()
365 unlock_page(locked_pvec.pages[i]); in truncate_inode_pages_range()
430 struct page *page = pvec.pages[i]; in truncate_inode_pages_range()
543 struct page *page = pvec.pages[i]; in __invalidate_mapping_pages()
723 struct page *page = pvec.pages[i]; in invalidate_inode_pages2_range()
Dcma.c88 unsigned long pages) in cma_bitmap_pages_to_bits() argument
90 return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit; in cma_bitmap_pages_to_bits()
601 bool cma_release(struct cma *cma, const struct page *pages, unsigned int count) in cma_release() argument
605 if (!cma || !pages) in cma_release()
608 pr_debug("%s(page %p, count %u)\n", __func__, (void *)pages, count); in cma_release()
610 pfn = page_to_pfn(pages); in cma_release()
619 trace_cma_release(cma->name, pfn, pages, count); in cma_release()
Dfrontswap.c389 unsigned long pages = 0, pages_to_unuse = 0; in __frontswap_unuse_pages() local
395 pages = pages_to_unuse = total_pages_to_unuse; in __frontswap_unuse_pages()
397 pages = si_frontswap_pages; in __frontswap_unuse_pages()
401 if (security_vm_enough_memory_mm(current->mm, pages)) { in __frontswap_unuse_pages()
405 vm_unacct_memory(pages); in __frontswap_unuse_pages()
Dcma_debug.c124 int pages = val; in cma_free_write() local
127 return cma_free_mem(cma, pages); in cma_free_write()
156 int pages = val; in cma_alloc_write() local
159 return cma_alloc_mem(cma, pages); in cma_alloc_write()
Dprocess_vm_access.c28 static int process_vm_rw_pages(struct page **pages, in process_vm_rw_pages() argument
36 struct page *page = *pages++; in process_vm_rw_pages()
84 / sizeof(struct pages *); in process_vm_rw_single_vec()
189 sizeof(struct pages *)*nr_pages), in process_vm_rw_core()
Dswap_slots.c103 long pages; in check_cache_active() local
108 pages = get_nr_swap_pages(); in check_cache_active()
110 if (pages > num_online_cpus() * in check_cache_active()
117 if (pages < num_online_cpus() * THRESHOLD_DEACTIVATE_SWAP_SLOTS_CACHE) in check_cache_active()
Dshmem.c208 static inline int shmem_acct_block(unsigned long flags, long pages) in shmem_acct_block() argument
214 pages * VM_ACCT(PAGE_SIZE)); in shmem_acct_block()
217 static inline void shmem_unacct_blocks(unsigned long flags, long pages) in shmem_unacct_blocks() argument
220 vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE)); in shmem_unacct_blocks()
223 static inline bool shmem_inode_acct_block(struct inode *inode, long pages) in shmem_inode_acct_block() argument
228 if (shmem_acct_block(info->flags, pages)) in shmem_inode_acct_block()
233 sbinfo->max_blocks - pages) > 0) in shmem_inode_acct_block()
235 percpu_counter_add(&sbinfo->used_blocks, pages); in shmem_inode_acct_block()
241 shmem_unacct_blocks(info->flags, pages); in shmem_inode_acct_block()
245 static inline void shmem_inode_unacct_blocks(struct inode *inode, long pages) in shmem_inode_unacct_blocks() argument
[all …]
Dmigrate.c1707 const void __user * __user *pages, in do_pages_move() argument
1724 if (get_user(p, pages + i)) in do_pages_move()
1793 const void __user **pages, int *status) in do_pages_stat_array() argument
1800 unsigned long addr = (unsigned long)(*pages); in do_pages_stat_array()
1820 pages++; in do_pages_stat_array()
1832 const void __user * __user *pages, in do_pages_stat() argument
1846 if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages))) in do_pages_stat()
1854 pages += chunk_nr; in do_pages_stat()
1913 const void __user * __user *pages, in kernel_move_pages() argument
1933 err = do_pages_move(mm, task_nodes, nr_pages, pages, in kernel_move_pages()
[all …]

123