Home
last modified time | relevance | path

Searched refs:pages (Results 1 – 25 of 50) sorted by relevance

12

/mm/
Dpercpu-vm.c33 static struct page **pages; in pcpu_get_pages() local
34 size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]); in pcpu_get_pages()
38 if (!pages) in pcpu_get_pages()
39 pages = pcpu_mem_zalloc(pages_size, GFP_KERNEL); in pcpu_get_pages()
40 return pages; in pcpu_get_pages()
54 struct page **pages, int page_start, int page_end) in pcpu_free_pages() argument
61 struct page *page = pages[pcpu_page_idx(cpu, i)]; in pcpu_free_pages()
82 struct page **pages, int page_start, int page_end, in pcpu_alloc_pages() argument
92 struct page **pagep = &pages[pcpu_page_idx(cpu, i)]; in pcpu_alloc_pages()
103 __free_page(pages[pcpu_page_idx(cpu, i)]); in pcpu_alloc_pages()
[all …]
Dgup.c54 void put_user_pages_dirty_lock(struct page **pages, unsigned long npages, in put_user_pages_dirty_lock() argument
66 put_user_pages(pages, npages); in put_user_pages_dirty_lock()
71 struct page *page = compound_head(pages[index]); in put_user_pages_dirty_lock()
108 void put_user_pages(struct page **pages, unsigned long npages) in put_user_pages() argument
118 put_user_page(pages[index]); in put_user_pages()
781 unsigned int gup_flags, struct page **pages, in __get_user_pages() argument
793 VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET)); in __get_user_pages()
814 pages ? &pages[i] : NULL); in __get_user_pages()
826 i = follow_hugetlb_page(mm, vma, pages, vmas, in __get_user_pages()
871 if (pages) { in __get_user_pages()
[all …]
Dmprotect.c44 unsigned long pages = 0; in change_pte_range() local
124 pages++; in change_pte_range()
140 pages++; in change_pte_range()
154 pages++; in change_pte_range()
161 return pages; in change_pte_range()
170 unsigned long pages = 0; in change_pmd_range() local
202 pages += HPAGE_PMD_NR; in change_pmd_range()
214 pages += this_pages; in change_pmd_range()
224 return pages; in change_pmd_range()
233 unsigned long pages = 0; in change_pud_range() local
[all …]
Dframe_vector.c126 struct page **pages; in put_vaddr_frames() local
130 pages = frame_vector_pages(vec); in put_vaddr_frames()
136 if (WARN_ON(IS_ERR(pages))) in put_vaddr_frames()
139 put_page(pages[i]); in put_vaddr_frames()
158 struct page **pages; in frame_vector_to_pages() local
166 pages = (struct page **)nums; in frame_vector_to_pages()
168 pages[i] = pfn_to_page(nums[i]); in frame_vector_to_pages()
184 struct page **pages; in frame_vector_to_pfns() local
188 pages = (struct page **)(vec->ptrs); in frame_vector_to_pfns()
189 nums = (unsigned long *)pages; in frame_vector_to_pfns()
[all …]
Dgup_benchmark.c28 struct page **pages; in __gup_benchmark_ioctl() local
35 pages = kvcalloc(nr_pages, sizeof(void *), GFP_KERNEL); in __gup_benchmark_ioctl()
36 if (!pages) in __gup_benchmark_ioctl()
55 pages + i); in __gup_benchmark_ioctl()
60 pages + i, NULL); in __gup_benchmark_ioctl()
63 nr = get_user_pages(addr, nr, gup->flags & 1, pages + i, in __gup_benchmark_ioctl()
67 kvfree(pages); in __gup_benchmark_ioctl()
83 if (!pages[i]) in __gup_benchmark_ioctl()
85 put_page(pages[i]); in __gup_benchmark_ioctl()
90 kvfree(pages); in __gup_benchmark_ioctl()
Dmincore.c207 static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *vec) in do_mincore() argument
216 end = min(vma->vm_end, addr + (pages << PAGE_SHIFT)); in do_mincore()
218 unsigned long pages = DIV_ROUND_UP(end - addr, PAGE_SIZE); in do_mincore() local
219 memset(vec, 1, pages); in do_mincore()
220 return pages; in do_mincore()
256 unsigned long pages; in SYSCALL_DEFINE3() local
270 pages = len >> PAGE_SHIFT; in SYSCALL_DEFINE3()
271 pages += (offset_in_page(len)) != 0; in SYSCALL_DEFINE3()
273 if (!access_ok(vec, pages)) in SYSCALL_DEFINE3()
281 while (pages) { in SYSCALL_DEFINE3()
[all …]
Dballoon_compaction.c41 struct list_head *pages) in balloon_page_list_enqueue() argument
48 list_for_each_entry_safe(page, tmp, pages, lru) { in balloon_page_list_enqueue()
77 struct list_head *pages, size_t n_req_pages) in balloon_page_list_dequeue() argument
84 list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) { in balloon_page_list_dequeue()
104 list_add(&page->lru, pages); in balloon_page_list_dequeue()
180 LIST_HEAD(pages); in balloon_page_dequeue()
183 n_pages = balloon_page_list_dequeue(b_dev_info, &pages, 1); in balloon_page_dequeue()
194 if (unlikely(list_empty(&b_dev_info->pages) && in balloon_page_dequeue()
200 return list_first_entry(&pages, struct page, lru); in balloon_page_dequeue()
226 list_add(&page->lru, &b_dev_info->pages); in balloon_page_putback()
Dprocess_vm_access.c30 static int process_vm_rw_pages(struct page **pages, in process_vm_rw_pages() argument
38 struct page *page = *pages++; in process_vm_rw_pages()
87 / sizeof(struct pages *); in process_vm_rw_single_vec()
99 int pages = min(nr_pages, max_pages_per_loop); in process_vm_rw_single_vec() local
109 pages = get_user_pages_remote(task, mm, pa, pages, flags, in process_vm_rw_single_vec()
113 if (pages <= 0) in process_vm_rw_single_vec()
116 bytes = pages * PAGE_SIZE - start_offset; in process_vm_rw_single_vec()
125 nr_pages -= pages; in process_vm_rw_single_vec()
126 pa += pages * PAGE_SIZE; in process_vm_rw_single_vec()
127 while (pages) in process_vm_rw_single_vec()
[all …]
Dswap.c126 void put_pages_list(struct list_head *pages) in put_pages_list() argument
128 while (!list_empty(pages)) { in put_pages_list()
131 victim = lru_to_page(pages); in put_pages_list()
152 struct page **pages) in get_kernel_pages() argument
160 pages[seg] = kmap_to_page(kiov[seg].iov_base); in get_kernel_pages()
161 get_page(pages[seg]); in get_kernel_pages()
179 int get_kernel_page(unsigned long start, int write, struct page **pages) in get_kernel_page() argument
186 return get_kernel_pages(&kiov, 1, write, pages); in get_kernel_page()
200 struct page *page = pvec->pages[i]; in pagevec_lru_move_fn()
215 release_pages(pvec->pages, pvec->nr); in pagevec_lru_move_fn()
[all …]
Dswap_state.c288 void free_pages_and_swap_cache(struct page **pages, int nr) in free_pages_and_swap_cache() argument
290 struct page **pagep = pages; in free_pages_and_swap_cache()
467 unsigned int pages, last_ra; in __swapin_nr_pages() local
474 pages = hits + 2; in __swapin_nr_pages()
475 if (pages == 2) { in __swapin_nr_pages()
482 pages = 1; in __swapin_nr_pages()
485 while (roundup < pages) in __swapin_nr_pages()
487 pages = roundup; in __swapin_nr_pages()
490 if (pages > max_pages) in __swapin_nr_pages()
491 pages = max_pages; in __swapin_nr_pages()
[all …]
Dpercpu-km.c51 struct page *pages; in pcpu_create_chunk() local
59 pages = alloc_pages(gfp, order_base_2(nr_pages)); in pcpu_create_chunk()
60 if (!pages) { in pcpu_create_chunk()
66 pcpu_set_page_chunk(nth_page(pages, i), chunk); in pcpu_create_chunk()
68 chunk->data = pages; in pcpu_create_chunk()
69 chunk->base_addr = page_address(pages); in pcpu_create_chunk()
Dreadahead.c65 struct list_head *pages) in read_cache_pages_invalidate_pages() argument
69 while (!list_empty(pages)) { in read_cache_pages_invalidate_pages()
70 victim = lru_to_page(pages); in read_cache_pages_invalidate_pages()
88 int read_cache_pages(struct address_space *mapping, struct list_head *pages, in read_cache_pages() argument
94 while (!list_empty(pages)) { in read_cache_pages()
95 page = lru_to_page(pages); in read_cache_pages()
106 read_cache_pages_invalidate_pages(mapping, pages); in read_cache_pages()
117 struct list_head *pages, unsigned int nr_pages, gfp_t gfp) in read_pages() argument
126 ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages); in read_pages()
128 put_pages_list(pages); in read_pages()
[all …]
DKconfig217 with the reduced number of transparent huge pages that could be used
219 pages enlisted as being part of memory balloon devices avoids the
247 Allows the migration of the physical location of pages of processes
249 two situations. The first is on NUMA systems to put pages nearer
251 pages as migration can relocate pages to satisfy a huge page
295 mergeable. When it finds pages of identical content, it replaces
309 from userspace allocation. Keeping a user from writing to low pages
338 tristate "HWPoison pages injector"
362 excess pages there must be before trimming should occur, or zero if
366 of 1 says that all excess pages should be trimmed.
[all …]
Dutil.c424 int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc, in __account_locked_vm() argument
436 if (locked_vm + pages > limit) in __account_locked_vm()
440 mm->locked_vm = locked_vm + pages; in __account_locked_vm()
442 WARN_ON_ONCE(pages > locked_vm); in __account_locked_vm()
443 mm->locked_vm = locked_vm - pages; in __account_locked_vm()
447 (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT, in __account_locked_vm()
467 int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc) in account_locked_vm() argument
471 if (pages == 0 || !mm) in account_locked_vm()
475 ret = __account_locked_vm(mm, pages, inc, current, in account_locked_vm()
787 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) in __vm_enough_memory() argument
[all …]
Dzsmalloc.c1028 struct page *pages[]) in create_page_chain() argument
1044 page = pages[i]; in create_page_chain()
1068 struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE]; in alloc_zspage() local
1084 dec_zone_page_state(pages[i], NR_ZSPAGES); in alloc_zspage()
1085 __free_page(pages[i]); in alloc_zspage()
1092 pages[i] = page; in alloc_zspage()
1095 create_page_chain(class, zspage, pages); in alloc_zspage()
1139 struct page *pages[2], int off, int size) in __zs_map_object()
1141 BUG_ON(map_vm_area(area->vm, PAGE_KERNEL, pages)); in __zs_map_object()
1147 struct page *pages[2], int off, int size) in __zs_unmap_object()
[all …]
DKconfig.debug17 Unmap pages from the kernel linear mapping after free_pages().
22 pages are being allocated and freed, as unexpected state changes
29 fill the pages with poison patterns after free_pages() and verify
33 pages are not saved to the suspend image.
36 allowing the kernel mapping to be backed by large pages on some
66 bool "Poison pages after freeing"
69 Fill the pages with poison patterns after free_pages() and verify
84 Skip the sanity checking on alloc, only fill the pages with
95 Instead of using the existing poison value, fill the pages with
Dvmalloc.c138 unsigned long end, pgprot_t prot, struct page **pages, int *nr) in vmap_pte_range() argument
151 struct page *page = pages[*nr]; in vmap_pte_range()
164 unsigned long end, pgprot_t prot, struct page **pages, int *nr) in vmap_pmd_range() argument
174 if (vmap_pte_range(pmd, addr, next, prot, pages, nr)) in vmap_pmd_range()
181 unsigned long end, pgprot_t prot, struct page **pages, int *nr) in vmap_pud_range() argument
191 if (vmap_pmd_range(pud, addr, next, prot, pages, nr)) in vmap_pud_range()
198 unsigned long end, pgprot_t prot, struct page **pages, int *nr) in vmap_p4d_range() argument
208 if (vmap_pud_range(p4d, addr, next, prot, pages, nr)) in vmap_p4d_range()
221 pgprot_t prot, struct page **pages) in vmap_page_range_noflush() argument
233 err = vmap_p4d_range(pgd, addr, next, prot, pages, &nr); in vmap_page_range_noflush()
[all …]
Dworkingset.c398 unsigned long pages; in count_shadow_nodes() local
430 for (pages = 0, i = 0; i < NR_LRU_LISTS; i++) in count_shadow_nodes()
431 pages += lruvec_page_state_local(lruvec, in count_shadow_nodes()
433 pages += lruvec_page_state_local(lruvec, NR_SLAB_RECLAIMABLE); in count_shadow_nodes()
434 pages += lruvec_page_state_local(lruvec, NR_SLAB_UNRECLAIMABLE); in count_shadow_nodes()
437 pages = node_present_pages(sc->nid); in count_shadow_nodes()
439 max_nodes = pages >> (XA_CHUNK_SHIFT - 3); in count_shadow_nodes()
Dtruncate.c71 if (xa_is_value(pvec->pages[j])) in truncate_exceptional_pvec_entries()
83 struct page *page = pvec->pages[i]; in truncate_exceptional_pvec_entries()
87 pvec->pages[j++] = page; in truncate_exceptional_pvec_entries()
341 struct page *page = pvec.pages[i]; in truncate_inode_pages_range()
365 truncate_cleanup_page(mapping, locked_pvec.pages[i]); in truncate_inode_pages_range()
368 unlock_page(locked_pvec.pages[i]); in truncate_inode_pages_range()
433 struct page *page = pvec.pages[i]; in truncate_inode_pages_range()
561 struct page *page = pvec.pages[i]; in invalidate_mapping_pages()
704 struct page *page = pvec.pages[i]; in invalidate_inode_pages2_range()
Dcma.c80 unsigned long pages) in cma_bitmap_pages_to_bits() argument
82 return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit; in cma_bitmap_pages_to_bits()
517 bool cma_release(struct cma *cma, const struct page *pages, unsigned int count) in cma_release() argument
521 if (!cma || !pages) in cma_release()
524 pr_debug("%s(page %p)\n", __func__, (void *)pages); in cma_release()
526 pfn = page_to_pfn(pages); in cma_release()
535 trace_cma_release(pfn, pages, count); in cma_release()
Dhuge_memory.c1216 struct page **pages; in do_huge_pmd_wp_page_fallback() local
1219 pages = kmalloc_array(HPAGE_PMD_NR, sizeof(struct page *), in do_huge_pmd_wp_page_fallback()
1221 if (unlikely(!pages)) { in do_huge_pmd_wp_page_fallback()
1227 pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE, vma, in do_huge_pmd_wp_page_fallback()
1229 if (unlikely(!pages[i] || in do_huge_pmd_wp_page_fallback()
1230 mem_cgroup_try_charge_delay(pages[i], vma->vm_mm, in do_huge_pmd_wp_page_fallback()
1232 if (pages[i]) in do_huge_pmd_wp_page_fallback()
1233 put_page(pages[i]); in do_huge_pmd_wp_page_fallback()
1235 memcg = (void *)page_private(pages[i]); in do_huge_pmd_wp_page_fallback()
1236 set_page_private(pages[i], 0); in do_huge_pmd_wp_page_fallback()
[all …]
Dfrontswap.c389 unsigned long pages = 0, pages_to_unuse = 0; in __frontswap_unuse_pages() local
395 pages = pages_to_unuse = total_pages_to_unuse; in __frontswap_unuse_pages()
397 pages = si_frontswap_pages; in __frontswap_unuse_pages()
401 if (security_vm_enough_memory_mm(current->mm, pages)) { in __frontswap_unuse_pages()
405 vm_unacct_memory(pages); in __frontswap_unuse_pages()
Dcma_debug.c124 int pages = val; in cma_free_write() local
127 return cma_free_mem(cma, pages); in cma_free_write()
156 int pages = val; in cma_alloc_write() local
159 return cma_alloc_mem(cma, pages); in cma_alloc_write()
Dswap_slots.c95 long pages; in check_cache_active() local
100 pages = get_nr_swap_pages(); in check_cache_active()
102 if (pages > num_online_cpus() * in check_cache_active()
109 if (pages < num_online_cpus() * THRESHOLD_DEACTIVATE_SWAP_SLOTS_CACHE) in check_cache_active()
Dshmem.c200 static inline int shmem_acct_block(unsigned long flags, long pages) in shmem_acct_block() argument
206 pages * VM_ACCT(PAGE_SIZE)); in shmem_acct_block()
209 static inline void shmem_unacct_blocks(unsigned long flags, long pages) in shmem_unacct_blocks() argument
212 vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE)); in shmem_unacct_blocks()
215 static inline bool shmem_inode_acct_block(struct inode *inode, long pages) in shmem_inode_acct_block() argument
220 if (shmem_acct_block(info->flags, pages)) in shmem_inode_acct_block()
225 sbinfo->max_blocks - pages) > 0) in shmem_inode_acct_block()
227 percpu_counter_add(&sbinfo->used_blocks, pages); in shmem_inode_acct_block()
233 shmem_unacct_blocks(info->flags, pages); in shmem_inode_acct_block()
237 static inline void shmem_inode_unacct_blocks(struct inode *inode, long pages) in shmem_inode_unacct_blocks() argument
[all …]

12