/mm/ |
D | memory_hotplug.c | 247 unsigned long i, pfn, end_pfn, nr_pages; in register_page_bootmem_info_node() local 252 nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT; in register_page_bootmem_info_node() 255 for (i = 0; i < nr_pages; i++, page++) in register_page_bootmem_info_node() 261 nr_pages = zone->wait_table_hash_nr_entries in register_page_bootmem_info_node() 263 nr_pages = PAGE_ALIGN(nr_pages) >> PAGE_SHIFT; in register_page_bootmem_info_node() 266 for (i = 0; i < nr_pages; i++, page++) in register_page_bootmem_info_node() 446 int nr_pages = PAGES_PER_SECTION; in __add_zone() local 453 ret = ensure_zone_is_initialized(zone, phys_start_pfn, nr_pages); in __add_zone() 458 grow_zone_span(zone, phys_start_pfn, phys_start_pfn + nr_pages); in __add_zone() 460 phys_start_pfn + nr_pages); in __add_zone() [all …]
|
D | percpu-km.c | 50 const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT; in pcpu_create_chunk() local 59 pages = alloc_pages(GFP_KERNEL, order_base_2(nr_pages)); in pcpu_create_chunk() 65 for (i = 0; i < nr_pages; i++) in pcpu_create_chunk() 72 pcpu_chunk_populated(chunk, 0, nr_pages); in pcpu_create_chunk() 80 const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT; in pcpu_destroy_chunk() local 83 __free_pages(chunk->data, order_base_2(nr_pages)); in pcpu_destroy_chunk() 94 size_t nr_pages, alloc_pages; in pcpu_verify_alloc_info() local 102 nr_pages = (ai->groups[0].nr_units * ai->unit_size) >> PAGE_SHIFT; in pcpu_verify_alloc_info() 103 alloc_pages = roundup_pow_of_two(nr_pages); in pcpu_verify_alloc_info() 105 if (alloc_pages > nr_pages) in pcpu_verify_alloc_info() [all …]
|
D | page_isolation.c | 26 arg.nr_pages = pageblock_nr_pages; in set_migratetype_isolate() 59 unsigned long nr_pages; in set_migratetype_isolate() local 64 nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE); in set_migratetype_isolate() 66 __mod_zone_freepage_state(zone, -nr_pages, migratetype); in set_migratetype_isolate() 78 unsigned long flags, nr_pages; in unset_migratetype_isolate() local 119 nr_pages = move_freepages_block(zone, page, migratetype); in unset_migratetype_isolate() 120 __mod_zone_freepage_state(zone, nr_pages, migratetype); in unset_migratetype_isolate() 131 __first_valid_page(unsigned long pfn, unsigned long nr_pages) in __first_valid_page() argument 134 for (i = 0; i < nr_pages; i++) in __first_valid_page() 137 if (unlikely(i == nr_pages)) in __first_valid_page()
|
D | process_vm_access.c | 87 unsigned long nr_pages; in process_vm_rw_single_vec() local 95 nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1; in process_vm_rw_single_vec() 97 while (!rc && nr_pages && iov_iter_count(iter)) { in process_vm_rw_single_vec() 98 int pages = min(nr_pages, max_pages_per_loop); in process_vm_rw_single_vec() 119 nr_pages -= pages; in process_vm_rw_single_vec() 155 unsigned long nr_pages = 0; in process_vm_rw_core() local 171 nr_pages = max(nr_pages, nr_pages_iov); in process_vm_rw_core() 175 if (nr_pages == 0) in process_vm_rw_core() 178 if (nr_pages > PVM_MAX_PP_ARRAY_COUNT) { in process_vm_rw_core() 182 sizeof(struct pages *)*nr_pages), in process_vm_rw_core()
|
D | page_cgroup.c | 49 unsigned long nr_pages; in alloc_node_page_cgroup() local 51 nr_pages = NODE_DATA(nid)->node_spanned_pages; in alloc_node_page_cgroup() 52 if (!nr_pages) in alloc_node_page_cgroup() 55 table_size = sizeof(struct page_cgroup) * nr_pages; in alloc_node_page_cgroup() 193 unsigned long nr_pages, in online_page_cgroup() argument 200 end = SECTION_ALIGN_UP(start_pfn + nr_pages); in online_page_cgroup() 228 unsigned long nr_pages, int nid) in offline_page_cgroup() argument 233 end = SECTION_ALIGN_UP(start_pfn + nr_pages); in offline_page_cgroup() 249 mn->nr_pages, mn->status_change_nid); in page_cgroup_callback() 253 mn->nr_pages, mn->status_change_nid); in page_cgroup_callback() [all …]
|
D | migrate.c | 476 int nr_pages) in __copy_gigantic_page() argument 482 for (i = 0; i < nr_pages; ) { in __copy_gigantic_page() 495 int nr_pages; in copy_huge_page() local 500 nr_pages = pages_per_huge_page(h); in copy_huge_page() 502 if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) { in copy_huge_page() 503 __copy_gigantic_page(dst, src, nr_pages); in copy_huge_page() 509 nr_pages = hpage_nr_pages(src); in copy_huge_page() 512 for (i = 0; i < nr_pages; i++) { in copy_huge_page() 1305 unsigned long nr_pages, in do_pages_move() argument 1329 chunk_start < nr_pages; in do_pages_move() [all …]
|
D | gup.c | 422 unsigned long start, unsigned long nr_pages, in __get_user_pages() argument 430 if (!nr_pages) in __get_user_pages() 466 &start, &nr_pages, i, in __get_user_pages() 512 if (page_increm > nr_pages) in __get_user_pages() 513 page_increm = nr_pages; in __get_user_pages() 516 nr_pages -= page_increm; in __get_user_pages() 517 } while (nr_pages); in __get_user_pages() 634 unsigned long start, unsigned long nr_pages, int write, in get_user_pages() argument 646 return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas, in get_user_pages() 936 int __get_user_pages_fast(unsigned long start, int nr_pages, int write, in __get_user_pages_fast() argument [all …]
|
D | memcontrol.c | 903 int nr_pages) in mem_cgroup_charge_statistics() argument 911 nr_pages); in mem_cgroup_charge_statistics() 914 nr_pages); in mem_cgroup_charge_statistics() 918 nr_pages); in mem_cgroup_charge_statistics() 921 if (nr_pages > 0) in mem_cgroup_charge_statistics() 925 nr_pages = -nr_pages; /* for event */ in mem_cgroup_charge_statistics() 928 __this_cpu_add(memcg->stat->nr_page_events, nr_pages); in mem_cgroup_charge_statistics() 1403 int nr_pages) in mem_cgroup_update_lru_size() argument 1413 *lru_size += nr_pages; in mem_cgroup_update_lru_size() 2297 unsigned int nr_pages; member [all …]
|
D | mlock.c | 175 unsigned int nr_pages; in munlock_vma_page() local 188 nr_pages = hpage_nr_pages(page); in munlock_vma_page() 192 __mod_zone_page_state(zone, NR_MLOCK, -nr_pages); in munlock_vma_page() 205 return nr_pages - 1; in munlock_vma_page() 231 unsigned long nr_pages = (end - start) / PAGE_SIZE; in __mlock_vma_pages_range() local 260 return __get_user_pages(current, mm, start, nr_pages, gup_flags, in __mlock_vma_pages_range() 560 int nr_pages; in mlock_fixup() local 593 nr_pages = (end - start) >> PAGE_SHIFT; in mlock_fixup() 595 nr_pages = -nr_pages; in mlock_fixup() 596 mm->locked_vm += nr_pages; in mlock_fixup()
|
D | hugetlb_cgroup.c | 165 int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, in hugetlb_cgroup_charge_cgroup() argument 171 unsigned long csize = nr_pages * PAGE_SIZE; in hugetlb_cgroup_charge_cgroup() 198 void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages, in hugetlb_cgroup_commit_charge() argument 212 void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, in hugetlb_cgroup_uncharge_page() argument 216 unsigned long csize = nr_pages * PAGE_SIZE; in hugetlb_cgroup_uncharge_page() 229 void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages, in hugetlb_cgroup_uncharge_cgroup() argument 232 unsigned long csize = nr_pages * PAGE_SIZE; in hugetlb_cgroup_uncharge_cgroup()
|
D | memory-failure.c | 1041 int nr_pages = 1 << compound_order(hpage); in set_page_hwpoison_huge_page() local 1042 for (i = 0; i < nr_pages; i++) in set_page_hwpoison_huge_page() 1049 int nr_pages = 1 << compound_order(hpage); in clear_page_hwpoison_huge_page() local 1050 for (i = 0; i < nr_pages; i++) in clear_page_hwpoison_huge_page() 1078 unsigned int nr_pages; in memory_failure() local 1106 nr_pages = 1 << compound_order(hpage); in memory_failure() 1108 nr_pages = 1; in memory_failure() 1109 atomic_long_add(nr_pages, &num_poisoned_pages); in memory_failure() 1138 atomic_long_sub(nr_pages, &num_poisoned_pages); in memory_failure() 1209 atomic_long_sub(nr_pages, &num_poisoned_pages); in memory_failure() [all …]
|
D | sparse.c | 198 unsigned long nr_pages = 0; in node_memmap_size_bytes() local 206 nr_pages += PAGES_PER_SECTION; in node_memmap_size_bytes() 209 return nr_pages * sizeof(struct page); in node_memmap_size_bytes() 659 unsigned long magic, nr_pages; in free_map_bootmem() local 662 nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page)) in free_map_bootmem() 665 for (i = 0; i < nr_pages; i++, page++) { in free_map_bootmem() 744 static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages) in clear_hwpoisoned_pages() argument 759 static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages) in clear_hwpoisoned_pages() argument
|
D | page-writeback.c | 185 unsigned long nr_pages; in zone_dirtyable_memory() local 187 nr_pages = zone_page_state(zone, NR_FREE_PAGES); in zone_dirtyable_memory() 188 nr_pages -= min(nr_pages, zone->dirty_balance_reserve); in zone_dirtyable_memory() 190 nr_pages += zone_page_state(zone, NR_INACTIVE_FILE); in zone_dirtyable_memory() 191 nr_pages += zone_page_state(zone, NR_ACTIVE_FILE); in zone_dirtyable_memory() 193 return nr_pages; in zone_dirtyable_memory() 1668 int nr_pages = global_page_state(NR_FILE_DIRTY) + in laptop_mode_timer_fn() local 1676 bdi_start_writeback(&q->backing_dev_info, nr_pages, in laptop_mode_timer_fn() 1841 int nr_pages; in write_cache_pages() local 1877 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, in write_cache_pages() [all …]
|
D | quicklist.c | 55 pages_to_free = q->nr_pages - max_pages(min_pages); in min_pages_to_free() 70 if (q->nr_pages > min_pages) { in quicklist_trim() 98 count += q->nr_pages; in quicklist_total_size()
|
D | percpu-vm.c | 133 static void __pcpu_unmap_pages(unsigned long addr, int nr_pages) in __pcpu_unmap_pages() argument 135 unmap_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT); in __pcpu_unmap_pages() 192 int nr_pages) in __pcpu_map_pages() argument 194 return map_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT, in __pcpu_map_pages()
|
D | swapfile.c | 134 nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9); in discard_swap() 145 nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9); in discard_swap() 162 pgoff_t start_page, pgoff_t nr_pages) in discard_swap_cluster() argument 167 while (nr_pages) { in discard_swap_cluster() 171 start_page < se->start_page + se->nr_pages) { in discard_swap_cluster() 174 sector_t nr_blocks = se->nr_pages - offset; in discard_swap_cluster() 176 if (nr_blocks > nr_pages) in discard_swap_cluster() 177 nr_blocks = nr_pages; in discard_swap_cluster() 179 nr_pages -= nr_blocks; in discard_swap_cluster() 1639 offset < (se->start_page + se->nr_pages)) { in map_swap_entry() [all …]
|
D | vmscan.c | 1321 int nr_pages; in isolate_lru_pages() local 1330 nr_pages = hpage_nr_pages(page); in isolate_lru_pages() 1331 mem_cgroup_update_lru_size(lruvec, lru, -nr_pages); in isolate_lru_pages() 1333 nr_taken += nr_pages; in isolate_lru_pages() 1681 int nr_pages; in move_active_pages_to_lru() local 1690 nr_pages = hpage_nr_pages(page); in move_active_pages_to_lru() 1691 mem_cgroup_update_lru_size(lruvec, lru, nr_pages); in move_active_pages_to_lru() 1693 pgmoved += nr_pages; in move_active_pages_to_lru() 2801 unsigned long nr_pages, in try_to_free_mem_cgroup_pages() argument 2809 .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX), in try_to_free_mem_cgroup_pages() [all …]
|
D | hugetlb.c | 687 int nr_pages = 1 << order; in destroy_compound_gigantic_page() local 690 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { in destroy_compound_gigantic_page() 706 unsigned long nr_pages) in __alloc_gigantic_page() argument 708 unsigned long end_pfn = start_pfn + nr_pages; in __alloc_gigantic_page() 713 unsigned long nr_pages) in pfn_range_valid_gigantic() argument 715 unsigned long i, end_pfn = start_pfn + nr_pages; in pfn_range_valid_gigantic() 738 unsigned long start_pfn, unsigned long nr_pages) in zone_spans_last_pfn() argument 740 unsigned long last_pfn = start_pfn + nr_pages - 1; in zone_spans_last_pfn() 746 unsigned long nr_pages = 1 << order; in alloc_gigantic_page() local 754 pfn = ALIGN(z->zone_start_pfn, nr_pages); in alloc_gigantic_page() [all …]
|
D | swap.c | 1069 pgoff_t start, unsigned nr_pages, in pagevec_lookup_entries() argument 1072 pvec->nr = find_get_entries(mapping, start, nr_pages, in pagevec_lookup_entries() 1115 pgoff_t start, unsigned nr_pages) in pagevec_lookup() argument 1117 pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages); in pagevec_lookup() 1123 pgoff_t *index, int tag, unsigned nr_pages) in pagevec_lookup_tag() argument 1126 nr_pages, pvec->pages); in pagevec_lookup_tag()
|
D | filemap.c | 337 int nr_pages; in filemap_fdatawait_range() local 345 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, in filemap_fdatawait_range() 350 for (i = 0; i < nr_pages; i++) { in filemap_fdatawait_range() 1221 unsigned int nr_pages, struct page **pages) in find_get_pages() argument 1227 if (unlikely(!nr_pages)) in find_get_pages() 1267 if (++ret == nr_pages) in find_get_pages() 1288 unsigned int nr_pages, struct page **pages) in find_get_pages_contig() argument 1294 if (unlikely(!nr_pages)) in find_get_pages_contig() 1344 if (++ret == nr_pages) in find_get_pages_contig() 1364 int tag, unsigned int nr_pages, struct page **pages) in find_get_pages_tag() argument [all …]
|
D | internal.h | 259 int nr_pages = hpage_nr_pages(page); in mlock_migrate_page() local 262 __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); in mlock_migrate_page() 264 __mod_zone_page_state(page_zone(newpage), NR_MLOCK, nr_pages); in mlock_migrate_page()
|
D | util.c | 209 int nr_pages, int write, struct page **pages) in __get_user_pages_fast() argument 240 int nr_pages, int write, struct page **pages) in get_user_pages_fast() argument 246 ret = get_user_pages(current, mm, start, nr_pages, in get_user_pages_fast()
|
D | vmalloc.c | 1456 for (i = 0; i < area->nr_pages; i++) { in __vunmap() 1564 unsigned int nr_pages, array_size, i; in __vmalloc_area_node() local 1568 nr_pages = get_vm_area_size(area) >> PAGE_SHIFT; in __vmalloc_area_node() 1569 array_size = (nr_pages * sizeof(struct page *)); in __vmalloc_area_node() 1571 area->nr_pages = nr_pages; in __vmalloc_area_node() 1587 for (i = 0; i < area->nr_pages; i++) { in __vmalloc_area_node() 1597 area->nr_pages = i; in __vmalloc_area_node() 1612 (area->nr_pages*PAGE_SIZE), area->size); in __vmalloc_area_node() 2585 for (nr = 0; nr < v->nr_pages; nr++) in show_numa_info() 2614 if (v->nr_pages) in s_show() [all …]
|
D | readahead.c | 112 struct list_head *pages, unsigned nr_pages) in read_pages() argument 121 ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages); in read_pages() 127 for (page_idx = 0; page_idx < nr_pages; page_idx++) { in read_pages()
|
D | shmem.c | 1635 unsigned int loff, nr_pages, req_pages; in shmem_file_splice_read() local 1665 nr_pages = min(req_pages, spd.nr_pages_max); in shmem_file_splice_read() 1667 spd.nr_pages = find_get_pages_contig(mapping, index, in shmem_file_splice_read() 1668 nr_pages, spd.pages); in shmem_file_splice_read() 1669 index += spd.nr_pages; in shmem_file_splice_read() 1672 while (spd.nr_pages < nr_pages) { in shmem_file_splice_read() 1677 spd.pages[spd.nr_pages++] = page; in shmem_file_splice_read() 1682 nr_pages = spd.nr_pages; in shmem_file_splice_read() 1683 spd.nr_pages = 0; in shmem_file_splice_read() 1685 for (page_nr = 0; page_nr < nr_pages; page_nr++) { in shmem_file_splice_read() [all …]
|