/mm/ |
D | memory_hotplug.c | 233 unsigned long i, pfn, end_pfn, nr_pages; in register_page_bootmem_info_node() local 237 nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT; in register_page_bootmem_info_node() 240 for (i = 0; i < nr_pages; i++, page++) in register_page_bootmem_info_node() 260 static int check_pfn_span(unsigned long pfn, unsigned long nr_pages, in check_pfn_span() argument 279 || !IS_ALIGNED(nr_pages, min_align)) { in check_pfn_span() 281 reason, pfn, pfn + nr_pages - 1); in check_pfn_span() 288 unsigned long nr_pages) in check_hotplug_memory_addressable() argument 290 const u64 max_addr = PFN_PHYS(pfn + nr_pages) - 1; in check_hotplug_memory_addressable() 309 int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages, in __add_pages() argument 312 const unsigned long end_pfn = pfn + nr_pages; in __add_pages() [all …]
|
D | page_counter.c | 50 void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages) in page_counter_cancel() argument 54 new = atomic_long_sub_return(nr_pages, &counter->usage); in page_counter_cancel() 67 void page_counter_charge(struct page_counter *counter, unsigned long nr_pages) in page_counter_charge() argument 74 new = atomic_long_add_return(nr_pages, &c->usage); in page_counter_charge() 95 unsigned long nr_pages, in page_counter_try_charge() argument 116 new = atomic_long_add_return(nr_pages, &c->usage); in page_counter_try_charge() 118 atomic_long_sub(nr_pages, &c->usage); in page_counter_try_charge() 141 page_counter_cancel(c, nr_pages); in page_counter_try_charge() 151 void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages) in page_counter_uncharge() argument 156 page_counter_cancel(c, nr_pages); in page_counter_uncharge() [all …]
|
D | sparse.c | 212 unsigned long nr_pages) in subsection_mask_set() argument 215 int end = subsection_map_index(pfn + nr_pages - 1); in subsection_mask_set() 220 void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages) in subsection_map_init() argument 222 int end_sec = pfn_to_section_nr(pfn + nr_pages - 1); in subsection_map_init() 225 if (!nr_pages) in subsection_map_init() 232 pfns = min(nr_pages, PAGES_PER_SECTION in subsection_map_init() 242 nr_pages -= pfns; in subsection_map_init() 246 void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages) in subsection_map_init() argument 446 unsigned long nr_pages, int nid, struct vmem_altmap *altmap) in __populate_section_memmap() argument 652 unsigned long nr_pages, int nid, struct vmem_altmap *altmap) in populate_section_memmap() argument [all …]
|
D | hugetlb_cgroup.c | 164 unsigned int nr_pages; in hugetlb_cgroup_move_parent() local 178 nr_pages = compound_nr(page); in hugetlb_cgroup_move_parent() 182 page_counter_charge(&parent->hugepage[idx], nr_pages); in hugetlb_cgroup_move_parent() 186 page_counter_cancel(counter, nr_pages); in hugetlb_cgroup_move_parent() 231 static int __hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, in __hugetlb_cgroup_charge_cgroup() argument 258 nr_pages, &counter)) { in __hugetlb_cgroup_charge_cgroup() 274 int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, in hugetlb_cgroup_charge_cgroup() argument 277 return __hugetlb_cgroup_charge_cgroup(idx, nr_pages, ptr, false); in hugetlb_cgroup_charge_cgroup() 280 int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages, in hugetlb_cgroup_charge_cgroup_rsvd() argument 283 return __hugetlb_cgroup_charge_cgroup(idx, nr_pages, ptr, true); in hugetlb_cgroup_charge_cgroup_rsvd() [all …]
|
D | percpu-km.c | 50 const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT; in pcpu_create_chunk() local 60 pages = alloc_pages(gfp, order_base_2(nr_pages)); in pcpu_create_chunk() 66 for (i = 0; i < nr_pages; i++) in pcpu_create_chunk() 73 pcpu_chunk_populated(chunk, 0, nr_pages); in pcpu_create_chunk() 84 const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT; in pcpu_destroy_chunk() local 93 __free_pages(chunk->data, order_base_2(nr_pages)); in pcpu_destroy_chunk() 104 size_t nr_pages, alloc_pages; in pcpu_verify_alloc_info() local 112 nr_pages = (ai->groups[0].nr_units * ai->unit_size) >> PAGE_SHIFT; in pcpu_verify_alloc_info() 113 alloc_pages = roundup_pow_of_two(nr_pages); in pcpu_verify_alloc_info() 115 if (alloc_pages > nr_pages) in pcpu_verify_alloc_info() [all …]
|
D | gup.c | 1062 unsigned long start, unsigned long nr_pages, in __get_user_pages() argument 1070 if (!nr_pages) in __get_user_pages() 1113 &start, &nr_pages, i, in __get_user_pages() 1178 if (page_increm > nr_pages) in __get_user_pages() 1179 page_increm = nr_pages; in __get_user_pages() 1182 nr_pages -= page_increm; in __get_user_pages() 1183 } while (nr_pages); in __get_user_pages() 1293 unsigned long nr_pages, in __get_user_pages_locked() argument 1327 ret = __get_user_pages(mm, start, nr_pages, flags, pages, in __get_user_pages_locked() 1336 BUG_ON(ret >= nr_pages); in __get_user_pages_locked() [all …]
|
D | mlock.c | 62 int nr_pages; in clear_page_mlock() local 67 nr_pages = thp_nr_pages(page); in clear_page_mlock() 68 mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); in clear_page_mlock() 69 count_vm_events(UNEVICTABLE_PGCLEARED, nr_pages); in clear_page_mlock() 83 count_vm_events(UNEVICTABLE_PGSTRANDED, nr_pages); in clear_page_mlock() 100 int nr_pages = thp_nr_pages(page); in mlock_vma_page() local 102 mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages); in mlock_vma_page() 103 count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages); in mlock_vma_page() 162 int nr_pages = thp_nr_pages(page); in __munlock_isolation_failed() local 165 __count_vm_events(UNEVICTABLE_PGSTRANDED, nr_pages); in __munlock_isolation_failed() [all …]
|
D | gup_benchmark.c | 25 unsigned long nr_pages) in put_back_pages() argument 32 for (i = 0; i < nr_pages; i++) in put_back_pages() 39 unpin_user_pages(pages, nr_pages); in put_back_pages() 45 unsigned long nr_pages) in verify_dma_pinned() argument 54 for (i = 0; i < nr_pages; i++) { in verify_dma_pinned() 71 unsigned long i, nr_pages, addr, next; in __gup_benchmark_ioctl() local 81 nr_pages = gup->size / PAGE_SIZE; in __gup_benchmark_ioctl() 82 pages = kvcalloc(nr_pages, sizeof(void *), GFP_KERNEL); in __gup_benchmark_ioctl() 141 nr_pages = i; in __gup_benchmark_ioctl() 150 verify_dma_pinned(cmd, pages, nr_pages); in __gup_benchmark_ioctl() [all …]
|
D | memcontrol.c | 262 unsigned int nr_pages; in obj_cgroup_release() local 287 nr_pages = nr_bytes >> PAGE_SHIFT; in obj_cgroup_release() 291 if (nr_pages) in obj_cgroup_release() 292 __memcg_kmem_uncharge(memcg, nr_pages); in obj_cgroup_release() 671 unsigned long nr_pages = page_counter_read(&memcg->memory); in soft_limit_excess() local 675 if (nr_pages > soft_limit) in soft_limit_excess() 676 excess = nr_pages - soft_limit; in soft_limit_excess() 946 int nr_pages) in mem_cgroup_charge_statistics() argument 949 if (nr_pages > 0) in mem_cgroup_charge_statistics() 953 nr_pages = -nr_pages; /* for event */ in mem_cgroup_charge_statistics() [all …]
|
D | process_vm_access.c | 81 unsigned long nr_pages; in process_vm_rw_single_vec() local 90 nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1; in process_vm_rw_single_vec() 95 while (!rc && nr_pages && iov_iter_count(iter)) { in process_vm_rw_single_vec() 96 int pinned_pages = min(nr_pages, max_pages_per_loop); in process_vm_rw_single_vec() 123 nr_pages -= pinned_pages; in process_vm_rw_single_vec() 162 unsigned long nr_pages = 0; in process_vm_rw_core() local 178 nr_pages = max(nr_pages, nr_pages_iov); in process_vm_rw_core() 182 if (nr_pages == 0) in process_vm_rw_core() 185 if (nr_pages > PVM_MAX_PP_ARRAY_COUNT) { in process_vm_rw_core() 189 sizeof(struct pages *)*nr_pages), in process_vm_rw_core()
|
D | page_ext.c | 201 unsigned long nr_pages; in alloc_node_page_ext() local 203 nr_pages = NODE_DATA(nid)->node_spanned_pages; in alloc_node_page_ext() 204 if (!nr_pages) in alloc_node_page_ext() 214 nr_pages += MAX_ORDER_NR_PAGES; in alloc_node_page_ext() 216 table_size = page_ext_size * nr_pages; in alloc_node_page_ext() 378 unsigned long nr_pages, in online_page_ext() argument 385 end = SECTION_ALIGN_UP(start_pfn + nr_pages); in online_page_ext() 410 unsigned long nr_pages, int nid) in offline_page_ext() argument 415 end = SECTION_ALIGN_UP(start_pfn + nr_pages); in offline_page_ext() 446 mn->nr_pages, mn->status_change_nid); in page_ext_callback() [all …]
|
D | swap.c | 295 void lru_note_cost(struct lruvec *lruvec, bool file, unsigned int nr_pages) in lru_note_cost() argument 302 lruvec->file_cost += nr_pages; in lru_note_cost() 304 lruvec->anon_cost += nr_pages; in lru_note_cost() 337 int nr_pages = thp_nr_pages(page); in __activate_page() local 345 __count_vm_events(PGACTIVATE, nr_pages); in __activate_page() 347 nr_pages); in __activate_page() 510 int nr_pages = thp_nr_pages(page); in __lru_cache_add_inactive_or_unevictable() local 516 __mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages); in __lru_cache_add_inactive_or_unevictable() 517 count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages); in __lru_cache_add_inactive_or_unevictable() 548 int nr_pages = thp_nr_pages(page); in lru_deactivate_file_fn() local [all …]
|
D | page_isolation.c | 42 unsigned long nr_pages; in set_migratetype_isolate() local 47 nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE, in set_migratetype_isolate() 50 __mod_zone_freepage_state(zone, -nr_pages, mt); in set_migratetype_isolate() 70 unsigned long flags, nr_pages; in unset_migratetype_isolate() local 115 nr_pages = move_freepages_block(zone, page, migratetype, NULL); in unset_migratetype_isolate() 116 __mod_zone_freepage_state(zone, nr_pages, migratetype); in unset_migratetype_isolate() 127 __first_valid_page(unsigned long pfn, unsigned long nr_pages) in __first_valid_page() argument 131 for (i = 0; i < nr_pages; i++) { in __first_valid_page()
|
D | migrate.c | 542 int nr_pages) in __copy_gigantic_page() argument 548 for (i = 0; i < nr_pages; ) { in __copy_gigantic_page() 561 int nr_pages; in copy_huge_page() local 566 nr_pages = pages_per_huge_page(h); in copy_huge_page() 568 if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) { in copy_huge_page() 569 __copy_gigantic_page(dst, src, nr_pages); in copy_huge_page() 575 nr_pages = thp_nr_pages(src); in copy_huge_page() 578 for (i = 0; i < nr_pages; i++) { in copy_huge_page() 1677 int start, int i, unsigned long nr_pages) in move_pages_and_store_status() argument 1695 err += nr_pages - i - 1; in move_pages_and_store_status() [all …]
|
D | cma.h | 46 void cma_sysfs_account_success_pages(struct cma *cma, unsigned long nr_pages); 47 void cma_sysfs_account_fail_pages(struct cma *cma, unsigned long nr_pages); 50 unsigned long nr_pages) {}; in cma_sysfs_account_success_pages() argument 52 unsigned long nr_pages) {}; in cma_sysfs_account_fail_pages() argument
|
D | vmscan.c | 1142 unsigned int nr_pages; in shrink_page_list() local 1154 nr_pages = compound_nr(page); in shrink_page_list() 1157 sc->nr_scanned += nr_pages; in shrink_page_list() 1278 stat->nr_ref_keep += nr_pages; in shrink_page_list() 1342 if ((nr_pages > 1) && !PageTransHuge(page)) { in shrink_page_list() 1343 sc->nr_scanned -= (nr_pages - 1); in shrink_page_list() 1344 nr_pages = 1; in shrink_page_list() 1360 stat->nr_unmap_fail += nr_pages; in shrink_page_list() 1362 stat->nr_lazyfree_fail += nr_pages; in shrink_page_list() 1497 nr_reclaimed += nr_pages; in shrink_page_list() [all …]
|
D | page-writeback.c | 282 unsigned long nr_pages = 0; in node_dirtyable_memory() local 291 nr_pages += zone_page_state(zone, NR_FREE_PAGES); in node_dirtyable_memory() 299 nr_pages -= min(nr_pages, pgdat->totalreserve_pages); in node_dirtyable_memory() 301 nr_pages += node_page_state(pgdat, NR_INACTIVE_FILE); in node_dirtyable_memory() 302 nr_pages += node_page_state(pgdat, NR_ACTIVE_FILE); in node_dirtyable_memory() 304 return nr_pages; in node_dirtyable_memory() 317 unsigned long nr_pages; in highmem_dirtyable_memory() local 326 nr_pages = zone_page_state(z, NR_FREE_PAGES); in highmem_dirtyable_memory() 328 nr_pages -= min(nr_pages, high_wmark_pages(z)); in highmem_dirtyable_memory() 329 nr_pages += zone_page_state(z, NR_ZONE_INACTIVE_FILE); in highmem_dirtyable_memory() [all …]
|
D | cma_sysfs.c | 20 void cma_sysfs_account_success_pages(struct cma *cma, unsigned long nr_pages) in cma_sysfs_account_success_pages() argument 22 atomic64_add(nr_pages, &cma->nr_pages_succeeded); in cma_sysfs_account_success_pages() 25 void cma_sysfs_account_fail_pages(struct cma *cma, unsigned long nr_pages) in cma_sysfs_account_fail_pages() argument 27 atomic64_add(nr_pages, &cma->nr_pages_failed); in cma_sysfs_account_fail_pages()
|
D | percpu-vm.c | 134 static void __pcpu_unmap_pages(unsigned long addr, int nr_pages) in __pcpu_unmap_pages() argument 136 unmap_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT); in __pcpu_unmap_pages() 193 int nr_pages) in __pcpu_map_pages() argument 195 return map_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT, in __pcpu_map_pages()
|
D | percpu-internal.h | 80 int nr_pages; /* # of pages served by this chunk */ member 104 return chunk->nr_pages * PAGE_SIZE / PCPU_BITMAP_BLOCK_SIZE; in pcpu_chunk_nr_blocks() 128 return pcpu_nr_pages_to_map_bits(chunk->nr_pages); in pcpu_chunk_map_bits()
|
D | page_alloc.c | 723 int nr_pages = 1 << order; in prep_compound_page() local 726 for (i = 1; i < nr_pages; i++) { in prep_compound_page() 1630 unsigned int nr_pages = 1 << order; in __free_pages_core() local 1640 for (loop = 0; loop < (nr_pages - 1); loop++, p++) { in __free_pages_core() 1648 atomic_long_add(nr_pages, &page_zone(page)->managed_pages); in __free_pages_core() 1783 unsigned long nr_pages) in deferred_free_range() argument 1788 if (!nr_pages) in deferred_free_range() 1794 if (nr_pages == pageblock_nr_pages && in deferred_free_range() 1801 for (i = 0; i < nr_pages; i++, page++, pfn++) { in deferred_free_range() 1873 unsigned long nr_pages = 0; in deferred_init_pages() local [all …]
|
D | vmalloc.c | 2196 for (i = 0; i < area->nr_pages; i++) in set_area_direct_map() 2229 for (i = 0; i < area->nr_pages; i++) { in vm_remove_mappings() 2276 for (i = 0; i < area->nr_pages; i++) { in __vunmap() 2282 atomic_long_sub(area->nr_pages, &nr_vmalloc_pages); in __vunmap() 2420 area->nr_pages = count; in vmap() 2479 unsigned int nr_pages = get_vm_area_size(area) >> PAGE_SHIFT; in __vmalloc_area_node() local 2480 unsigned int array_size = nr_pages * sizeof(struct page *), i; in __vmalloc_area_node() 2502 area->nr_pages = nr_pages; in __vmalloc_area_node() 2504 for (i = 0; i < area->nr_pages; i++) { in __vmalloc_area_node() 2514 area->nr_pages = i; in __vmalloc_area_node() [all …]
|
D | sparse-vmemmap.c | 252 unsigned long nr_pages, int nid, struct vmem_altmap *altmap) in __populate_section_memmap() argument 255 unsigned long end = start + nr_pages * sizeof(struct page); in __populate_section_memmap() 258 !IS_ALIGNED(nr_pages, PAGES_PER_SUBSECTION))) in __populate_section_memmap()
|
D | swapfile.c | 182 nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9); in discard_swap() 193 nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9); in discard_swap() 216 else if (offset >= se->start_page + se->nr_pages) in offset_to_swap_extent() 243 pgoff_t start_page, pgoff_t nr_pages) in discard_swap_cluster() argument 247 while (nr_pages) { in discard_swap_cluster() 250 sector_t nr_blocks = se->nr_pages - offset; in discard_swap_cluster() 252 if (nr_blocks > nr_pages) in discard_swap_cluster() 253 nr_blocks = nr_pages; in discard_swap_cluster() 255 nr_pages -= nr_blocks; in discard_swap_cluster() 2388 unsigned long nr_pages, sector_t start_block) in add_swap_extent() argument [all …]
|
D | filemap.c | 515 int nr_pages; in __filemap_fdatawait_range() local 524 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, in __filemap_fdatawait_range() 526 if (!nr_pages) in __filemap_fdatawait_range() 529 for (i = 0; i < nr_pages; i++) { in __filemap_fdatawait_range() 1981 pgoff_t end, unsigned int nr_pages, in find_get_pages_range() argument 1988 if (unlikely(!nr_pages)) in find_get_pages_range() 2007 if (++ret == nr_pages) { in find_get_pages_range() 2047 unsigned int nr_pages, struct page **pages) in find_get_pages_contig() argument 2053 if (unlikely(!nr_pages)) in find_get_pages_contig() 2075 if (++ret == nr_pages) in find_get_pages_contig() [all …]
|