Home
last modified time | relevance | path

Searched refs:pages (Results 1 – 25 of 46) sorted by relevance

12

/mm/
Dpercpu-vm.c35 static struct page **pages; in pcpu_get_pages() local
36 size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]); in pcpu_get_pages()
40 if (!pages) in pcpu_get_pages()
41 pages = pcpu_mem_zalloc(pages_size); in pcpu_get_pages()
42 return pages; in pcpu_get_pages()
56 struct page **pages, int page_start, int page_end) in pcpu_free_pages() argument
63 struct page *page = pages[pcpu_page_idx(cpu, i)]; in pcpu_free_pages()
83 struct page **pages, int page_start, int page_end) in pcpu_alloc_pages() argument
91 struct page **pagep = &pages[pcpu_page_idx(cpu, i)]; in pcpu_alloc_pages()
102 __free_page(pages[pcpu_page_idx(cpu, i)]); in pcpu_alloc_pages()
[all …]
Dkmemcheck.c11 int pages; in kmemcheck_alloc_shadow() local
14 pages = 1 << order; in kmemcheck_alloc_shadow()
28 for(i = 0; i < pages; ++i) in kmemcheck_alloc_shadow()
36 kmemcheck_hide_pages(page, pages); in kmemcheck_alloc_shadow()
42 int pages; in kmemcheck_free_shadow() local
48 pages = 1 << order; in kmemcheck_free_shadow()
50 kmemcheck_show_pages(page, pages); in kmemcheck_free_shadow()
54 for(i = 0; i < pages; ++i) in kmemcheck_free_shadow()
103 int pages; in kmemcheck_pagealloc_alloc() local
108 pages = 1 << order; in kmemcheck_pagealloc_alloc()
[all …]
Dgup.c423 unsigned int gup_flags, struct page **pages, in __get_user_pages() argument
433 VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET)); in __get_user_pages()
455 pages ? &pages[i] : NULL); in __get_user_pages()
465 i = follow_hugetlb_page(mm, vma, pages, vmas, in __get_user_pages()
500 if (pages) { in __get_user_pages()
501 pages[i] = page; in __get_user_pages()
635 int force, struct page **pages, struct vm_area_struct **vmas) in get_user_pages() argument
639 if (pages) in get_user_pages()
646 return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas, in get_user_pages()
720 int write, struct page **pages, int *nr) in gup_pte_range() argument
[all …]
Dmprotect.c67 unsigned long pages = 0; in change_pte_range() local
107 pages++; in change_pte_range()
123 pages++; in change_pte_range()
130 return pages; in change_pte_range()
140 unsigned long pages = 0; in change_pmd_range() local
167 pages += HPAGE_PMD_NR; in change_pmd_range()
179 pages += this_pages; in change_pmd_range()
187 return pages; in change_pmd_range()
196 unsigned long pages = 0; in change_pud_range() local
203 pages += change_pmd_range(vma, pud, addr, next, newprot, in change_pud_range()
[all …]
Dswap_state.c265 void free_pages_and_swap_cache(struct page **pages, int nr) in free_pages_and_swap_cache() argument
267 struct page **pagep = pages; in free_pages_and_swap_cache()
397 unsigned int pages, max_pages, last_ra; in swapin_nr_pages() local
409 pages = atomic_xchg(&swapin_readahead_hits, 0) + 2; in swapin_nr_pages()
410 if (pages == 2) { in swapin_nr_pages()
417 pages = 1; in swapin_nr_pages()
421 while (roundup < pages) in swapin_nr_pages()
423 pages = roundup; in swapin_nr_pages()
426 if (pages > max_pages) in swapin_nr_pages()
427 pages = max_pages; in swapin_nr_pages()
[all …]
Dprocess_vm_access.c33 static int process_vm_rw_pages(struct page **pages, in process_vm_rw_pages() argument
41 struct page *page = *pages++; in process_vm_rw_pages()
90 / sizeof(struct pages *); in process_vm_rw_single_vec()
98 int pages = min(nr_pages, max_pages_per_loop); in process_vm_rw_single_vec() local
103 pages = get_user_pages(task, mm, pa, pages, in process_vm_rw_single_vec()
107 if (pages <= 0) in process_vm_rw_single_vec()
110 bytes = pages * PAGE_SIZE - start_offset; in process_vm_rw_single_vec()
119 nr_pages -= pages; in process_vm_rw_single_vec()
120 pa += pages * PAGE_SIZE; in process_vm_rw_single_vec()
121 while (pages) in process_vm_rw_single_vec()
[all …]
Dswap.c346 void put_pages_list(struct list_head *pages) in put_pages_list() argument
348 while (!list_empty(pages)) { in put_pages_list()
351 victim = list_entry(pages->prev, struct page, lru); in put_pages_list()
372 struct page **pages) in get_kernel_pages() argument
380 pages[seg] = kmap_to_page(kiov[seg].iov_base); in get_kernel_pages()
381 page_cache_get(pages[seg]); in get_kernel_pages()
399 int get_kernel_page(unsigned long start, int write, struct page **pages) in get_kernel_page() argument
406 return get_kernel_pages(&kiov, 1, write, pages); in get_kernel_page()
420 struct page *page = pvec->pages[i]; in pagevec_lru_move_fn()
435 release_pages(pvec->pages, pvec->nr, pvec->cold); in pagevec_lru_move_fn()
[all …]
Dpercpu-km.c52 struct page *pages; in pcpu_create_chunk() local
59 pages = alloc_pages(GFP_KERNEL, order_base_2(nr_pages)); in pcpu_create_chunk()
60 if (!pages) { in pcpu_create_chunk()
66 pcpu_set_page_chunk(nth_page(pages, i), chunk); in pcpu_create_chunk()
68 chunk->data = pages; in pcpu_create_chunk()
69 chunk->base_addr = page_address(pages) - pcpu_group_offsets[0]; in pcpu_create_chunk()
Dreadahead.c62 struct list_head *pages) in read_cache_pages_invalidate_pages() argument
66 while (!list_empty(pages)) { in read_cache_pages_invalidate_pages()
67 victim = list_to_page(pages); in read_cache_pages_invalidate_pages()
83 int read_cache_pages(struct address_space *mapping, struct list_head *pages, in read_cache_pages() argument
89 while (!list_empty(pages)) { in read_cache_pages()
90 page = list_to_page(pages); in read_cache_pages()
101 read_cache_pages_invalidate_pages(mapping, pages); in read_cache_pages()
112 struct list_head *pages, unsigned nr_pages) in read_pages() argument
121 ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages); in read_pages()
123 put_pages_list(pages); in read_pages()
[all …]
Dmincore.c225 static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *vec) in do_mincore() argument
234 end = min(vma->vm_end, addr + (pages << PAGE_SHIFT)); in do_mincore()
272 unsigned long pages; in SYSCALL_DEFINE3() local
284 pages = len >> PAGE_SHIFT; in SYSCALL_DEFINE3()
285 pages += (len & ~PAGE_MASK) != 0; in SYSCALL_DEFINE3()
287 if (!access_ok(VERIFY_WRITE, vec, pages)) in SYSCALL_DEFINE3()
295 while (pages) { in SYSCALL_DEFINE3()
301 retval = do_mincore(start, min(pages, PAGE_SIZE), tmp); in SYSCALL_DEFINE3()
310 pages -= retval; in SYSCALL_DEFINE3()
DKconfig161 such as direct mapping pages cannot be migrated. So the corresponding
248 with the reduced number of transparent huge pages that could be used
250 pages enlisted as being part of memory balloon devices avoids the
261 Allows the compaction of memory for the allocation of huge pages.
271 Allows the migration of the physical location of pages of processes
273 two situations. The first is on NUMA systems to put pages nearer
275 pages as migration can relocate pages to satisfy a huge page
306 # a major rework effort. Instead, use the bounce buffer to snapshot pages
335 mergeable. When it finds pages of identical content, it replaces
349 from userspace allocation. Keeping a user from writing to low pages
[all …]
Dhuge_memory.c484 unsigned long pages; in pages_to_scan_store() local
486 err = kstrtoul(buf, 10, &pages); in pages_to_scan_store()
487 if (err || !pages || pages > UINT_MAX) in pages_to_scan_store()
490 khugepaged_pages_to_scan = pages; in pages_to_scan_store()
990 struct page **pages; in do_huge_pmd_wp_page_fallback() local
994 pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR, in do_huge_pmd_wp_page_fallback()
996 if (unlikely(!pages)) { in do_huge_pmd_wp_page_fallback()
1002 pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE | in do_huge_pmd_wp_page_fallback()
1005 if (unlikely(!pages[i] || in do_huge_pmd_wp_page_fallback()
1006 mem_cgroup_try_charge(pages[i], mm, GFP_KERNEL, in do_huge_pmd_wp_page_fallback()
[all …]
Dbootmem.c57 static unsigned long __init bootmap_bytes(unsigned long pages) in bootmap_bytes() argument
59 unsigned long bytes = DIV_ROUND_UP(pages, 8); in bootmap_bytes()
68 unsigned long __init bootmem_bootmap_pages(unsigned long pages) in bootmem_bootmap_pages() argument
70 unsigned long bytes = bootmap_bytes(pages); in bootmem_bootmap_pages()
141 unsigned long __init init_bootmem(unsigned long start, unsigned long pages) in init_bootmem() argument
143 max_low_pfn = pages; in init_bootmem()
145 return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages); in init_bootmem()
175 unsigned long *map, start, end, pages, cur, count = 0; in free_all_bootmem_core() local
234 pages = bdata->node_low_pfn - bdata->node_min_pfn; in free_all_bootmem_core()
235 pages = bootmem_bootmap_pages(pages); in free_all_bootmem_core()
[all …]
Dvmalloc.c113 unsigned long end, pgprot_t prot, struct page **pages, int *nr) in vmap_pte_range() argument
126 struct page *page = pages[*nr]; in vmap_pte_range()
139 unsigned long end, pgprot_t prot, struct page **pages, int *nr) in vmap_pmd_range() argument
149 if (vmap_pte_range(pmd, addr, next, prot, pages, nr)) in vmap_pmd_range()
156 unsigned long end, pgprot_t prot, struct page **pages, int *nr) in vmap_pud_range() argument
166 if (vmap_pmd_range(pud, addr, next, prot, pages, nr)) in vmap_pud_range()
179 pgprot_t prot, struct page **pages) in vmap_page_range_noflush() argument
191 err = vmap_pud_range(pgd, addr, next, prot, pages, &nr); in vmap_page_range_noflush()
200 pgprot_t prot, struct page **pages) in vmap_page_range() argument
204 ret = vmap_page_range_noflush(start, end, prot, pages); in vmap_page_range()
[all …]
Dzsmalloc.c337 static int zs_zpool_shrink(void *pool, unsigned int pages, in zs_zpool_shrink() argument
1048 struct page *pages[2], int off, int size) in __zs_map_object()
1050 BUG_ON(map_vm_area(area->vm, PAGE_KERNEL, pages)); in __zs_map_object()
1056 struct page *pages[2], int off, int size) in __zs_unmap_object()
1086 struct page *pages[2], int off, int size) in __zs_map_object()
1103 addr = kmap_atomic(pages[0]); in __zs_map_object()
1106 addr = kmap_atomic(pages[1]); in __zs_map_object()
1114 struct page *pages[2], int off, int size) in __zs_unmap_object()
1135 addr = kmap_atomic(pages[0]); in __zs_unmap_object()
1138 addr = kmap_atomic(pages[1]); in __zs_unmap_object()
[all …]
Dnommu.c151 unsigned int foll_flags, struct page **pages, in __get_user_pages() argument
176 if (pages) { in __get_user_pages()
177 pages[i] = virt_to_page(start); in __get_user_pages()
178 if (pages[i]) in __get_user_pages()
179 page_cache_get(pages[i]); in __get_user_pages()
201 int write, int force, struct page **pages, in get_user_pages() argument
211 return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas, in get_user_pages()
432 void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot) in vmap() argument
445 void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) in vm_map_ram() argument
1152 struct page *pages; in do_mmap_private() local
[all …]
Diov_iter.c447 struct page **pages, size_t maxsize, unsigned maxpages, in get_pages_iovec() argument
468 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages); in get_pages_iovec()
475 struct page ***pages, size_t maxsize, in get_pages_alloc_iovec() argument
507 *pages = p; in get_pages_alloc_iovec()
777 struct page **pages, size_t maxsize, unsigned maxpages, in get_pages_bvec() argument
789 get_page(*pages = bvec->bv_page); in get_pages_bvec()
795 struct page ***pages, size_t maxsize, in get_pages_alloc_bvec() argument
806 *pages = kmalloc(sizeof(struct page *), GFP_KERNEL); in get_pages_alloc_bvec()
807 if (!*pages) in get_pages_alloc_bvec()
810 get_page(**pages = bvec->bv_page); in get_pages_alloc_bvec()
[all …]
Dcma.c85 unsigned long pages) in cma_bitmap_pages_to_bits() argument
87 return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit; in cma_bitmap_pages_to_bits()
434 bool cma_release(struct cma *cma, const struct page *pages, unsigned int count) in cma_release() argument
438 if (!cma || !pages) in cma_release()
441 pr_debug("%s(page %p)\n", __func__, (void *)pages); in cma_release()
443 pfn = page_to_pfn(pages); in cma_release()
Dfrontswap.c348 unsigned long pages = 0, pages_to_unuse = 0; in __frontswap_unuse_pages() local
354 pages = pages_to_unuse = total_pages_to_unuse; in __frontswap_unuse_pages()
356 pages = si_frontswap_pages; in __frontswap_unuse_pages()
360 if (security_vm_enough_memory_mm(current->mm, pages)) { in __frontswap_unuse_pages()
364 vm_unacct_memory(pages); in __frontswap_unuse_pages()
DKconfig.debug9 Unmap pages from the kernel linear mapping after free_pages().
14 fill the pages with poison patterns after free_pages() and verify
18 a resume because free pages are not saved to the suspend image.
Dnobootmem.c176 unsigned long pages; in free_all_bootmem() local
185 pages = free_low_memory_core_early(); in free_all_bootmem()
186 totalram_pages += pages; in free_all_bootmem()
188 return pages; in free_all_bootmem()
Dballoon_compaction.c65 list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) { in balloon_page_dequeue()
97 if (unlikely(list_empty(&b_dev_info->pages) && in balloon_page_dequeue()
128 list_add(&page->lru, &b_dev_info->pages); in __putback_balloon_page()
Dhighmem.c116 unsigned int pages = 0; in nr_free_highpages() local
119 pages += zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM], in nr_free_highpages()
122 pages += zone_page_state( in nr_free_highpages()
127 return pages; in nr_free_highpages()
Dpage-writeback.c1219 int pages; /* target nr_dirtied_pause */ in bdi_min_pause() local
1252 pages = dirty_ratelimit * t / roundup_pow_of_two(HZ); in bdi_min_pause()
1262 if (pages < DIRTY_POLL_THRESH) { in bdi_min_pause()
1264 pages = dirty_ratelimit * t / roundup_pow_of_two(HZ); in bdi_min_pause()
1265 if (pages > DIRTY_POLL_THRESH) { in bdi_min_pause()
1266 pages = DIRTY_POLL_THRESH; in bdi_min_pause()
1271 pause = HZ * pages / (task_ratelimit + 1); in bdi_min_pause()
1274 pages = task_ratelimit * t / roundup_pow_of_two(HZ); in bdi_min_pause()
1277 *nr_dirtied_pause = pages; in bdi_min_pause()
1281 return pages >= DIRTY_POLL_THRESH ? 1 + t / 2 : t; in bdi_min_pause()
[all …]
Dmigrate.c1306 const void __user * __user *pages, in do_pages_move() argument
1342 if (get_user(p, pages + j + chunk_start)) in do_pages_move()
1391 const void __user **pages, int *status) in do_pages_stat_array() argument
1398 unsigned long addr = (unsigned long)(*pages); in do_pages_stat_array()
1422 pages++; in do_pages_stat_array()
1434 const void __user * __user *pages, in do_pages_stat() argument
1448 if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages))) in do_pages_stat()
1456 pages += chunk_nr; in do_pages_stat()
1468 const void __user * __user *, pages, in SYSCALL_DEFINE6()
1516 err = do_pages_move(mm, task_nodes, nr_pages, pages, in SYSCALL_DEFINE6()
[all …]

12