Home
last modified time | relevance | path

Searched refs:nr_pages (Results 1 – 25 of 41) sorted by relevance

12

/mm/
Dmemory_hotplug.c224 static int check_pfn_span(unsigned long pfn, unsigned long nr_pages, in check_pfn_span() argument
243 || !IS_ALIGNED(nr_pages, min_align)) { in check_pfn_span()
245 reason, pfn, pfn + nr_pages - 1); in check_pfn_span()
305 int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages, in __add_pages() argument
308 const unsigned long end_pfn = pfn + nr_pages; in __add_pages()
316 VM_BUG_ON(!mhp_range_allowed(PFN_PHYS(pfn), nr_pages * PAGE_SIZE, false)); in __add_pages()
323 || vmem_altmap_offset(altmap) > nr_pages) { in __add_pages()
330 err = check_pfn_span(pfn, nr_pages, "add"); in __add_pages()
463 unsigned long nr_pages) in remove_pfn_range_from_zone() argument
465 const unsigned long end_pfn = start_pfn + nr_pages; in remove_pfn_range_from_zone()
[all …]
Dpage_counter.c51 void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages) in page_counter_cancel() argument
55 new = atomic_long_sub_return(nr_pages, &counter->usage); in page_counter_cancel()
58 new, nr_pages)) { in page_counter_cancel()
72 void page_counter_charge(struct page_counter *counter, unsigned long nr_pages) in page_counter_charge() argument
79 new = atomic_long_add_return(nr_pages, &c->usage); in page_counter_charge()
101 unsigned long nr_pages, in page_counter_try_charge() argument
122 new = atomic_long_add_return(nr_pages, &c->usage); in page_counter_try_charge()
124 atomic_long_sub(nr_pages, &c->usage); in page_counter_try_charge()
148 page_counter_cancel(c, nr_pages); in page_counter_try_charge()
158 void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages) in page_counter_uncharge() argument
[all …]
Dsparse.c186 unsigned long nr_pages) in subsection_mask_set() argument
189 int end = subsection_map_index(pfn + nr_pages - 1); in subsection_mask_set()
194 void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages) in subsection_map_init() argument
196 int end_sec = pfn_to_section_nr(pfn + nr_pages - 1); in subsection_map_init()
199 if (!nr_pages) in subsection_map_init()
206 pfns = min(nr_pages, PAGES_PER_SECTION in subsection_map_init()
216 nr_pages -= pfns; in subsection_map_init()
220 void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages) in subsection_map_init() argument
430 unsigned long nr_pages, int nid, struct vmem_altmap *altmap) in __populate_section_memmap() argument
632 unsigned long nr_pages, int nid, struct vmem_altmap *altmap) in populate_section_memmap() argument
[all …]
Dhugetlb_cgroup.c164 unsigned int nr_pages; in hugetlb_cgroup_move_parent() local
178 nr_pages = compound_nr(page); in hugetlb_cgroup_move_parent()
182 page_counter_charge(&parent->hugepage[idx], nr_pages); in hugetlb_cgroup_move_parent()
186 page_counter_cancel(counter, nr_pages); in hugetlb_cgroup_move_parent()
231 static int __hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, in __hugetlb_cgroup_charge_cgroup() argument
258 nr_pages, &counter)) { in __hugetlb_cgroup_charge_cgroup()
274 int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, in hugetlb_cgroup_charge_cgroup() argument
277 return __hugetlb_cgroup_charge_cgroup(idx, nr_pages, ptr, false); in hugetlb_cgroup_charge_cgroup()
280 int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages, in hugetlb_cgroup_charge_cgroup_rsvd() argument
283 return __hugetlb_cgroup_charge_cgroup(idx, nr_pages, ptr, true); in hugetlb_cgroup_charge_cgroup_rsvd()
[all …]
Dgup_test.c10 unsigned long nr_pages, unsigned int gup_test_flags) in put_back_pages() argument
17 for (i = 0; i < nr_pages; i++) in put_back_pages()
24 unpin_user_pages(pages, nr_pages); in put_back_pages()
28 unpin_user_pages(pages, nr_pages); in put_back_pages()
30 for (i = 0; i < nr_pages; i++) in put_back_pages()
39 unsigned long nr_pages) in verify_dma_pinned() argument
48 for (i = 0; i < nr_pages; i++) { in verify_dma_pinned()
68 unsigned long nr_pages) in dump_pages_test() argument
78 if (gup->which_pages[i] > nr_pages) { in dump_pages_test()
102 unsigned long i, nr_pages, addr, next; in __gup_test_ioctl() local
[all …]
Dpercpu-km.c55 const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT; in pcpu_create_chunk() local
65 pages = alloc_pages(gfp, order_base_2(nr_pages)); in pcpu_create_chunk()
71 for (i = 0; i < nr_pages; i++) in pcpu_create_chunk()
78 pcpu_chunk_populated(chunk, 0, nr_pages); in pcpu_create_chunk()
89 const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT; in pcpu_destroy_chunk() local
98 __free_pages(chunk->data, order_base_2(nr_pages)); in pcpu_destroy_chunk()
109 size_t nr_pages, alloc_pages; in pcpu_verify_alloc_info() local
117 nr_pages = (ai->groups[0].nr_units * ai->unit_size) >> PAGE_SHIFT; in pcpu_verify_alloc_info()
118 alloc_pages = roundup_pow_of_two(nr_pages); in pcpu_verify_alloc_info()
120 if (alloc_pages > nr_pages) in pcpu_verify_alloc_info()
[all …]
Dgup.c1119 unsigned long start, unsigned long nr_pages, in __get_user_pages() argument
1127 if (!nr_pages) in __get_user_pages()
1170 &start, &nr_pages, i, in __get_user_pages()
1234 if (page_increm > nr_pages) in __get_user_pages()
1235 page_increm = nr_pages; in __get_user_pages()
1238 nr_pages -= page_increm; in __get_user_pages()
1239 } while (nr_pages); in __get_user_pages()
1348 unsigned long nr_pages, in __get_user_pages_locked() argument
1382 ret = __get_user_pages(mm, start, nr_pages, flags, pages, in __get_user_pages_locked()
1391 BUG_ON(ret >= nr_pages); in __get_user_pages_locked()
[all …]
Dmemcontrol.c269 unsigned int nr_pages);
275 unsigned int nr_pages; in obj_cgroup_release() local
300 nr_pages = nr_bytes >> PAGE_SHIFT; in obj_cgroup_release()
302 if (nr_pages) in obj_cgroup_release()
303 obj_cgroup_uncharge_pages(objcg, nr_pages); in obj_cgroup_release()
540 unsigned long nr_pages = page_counter_read(&memcg->memory); in soft_limit_excess() local
544 if (nr_pages > soft_limit) in soft_limit_excess()
545 excess = nr_pages - soft_limit; in soft_limit_excess()
873 int nr_pages) in mem_cgroup_charge_statistics() argument
876 if (nr_pages > 0) in mem_cgroup_charge_statistics()
[all …]
Dmlock.c62 int nr_pages; in clear_page_mlock() local
67 nr_pages = thp_nr_pages(page); in clear_page_mlock()
68 mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); in clear_page_mlock()
69 count_vm_events(UNEVICTABLE_PGCLEARED, nr_pages); in clear_page_mlock()
83 count_vm_events(UNEVICTABLE_PGSTRANDED, nr_pages); in clear_page_mlock()
100 int nr_pages = thp_nr_pages(page); in mlock_vma_page() local
102 mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages); in mlock_vma_page()
103 count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages); in mlock_vma_page()
142 int nr_pages = thp_nr_pages(page); in __munlock_isolation_failed() local
145 __count_vm_events(UNEVICTABLE_PGSTRANDED, nr_pages); in __munlock_isolation_failed()
[all …]
Dswap.c252 void lru_note_cost(struct lruvec *lruvec, bool file, unsigned int nr_pages) in lru_note_cost() argument
267 lruvec->file_cost += nr_pages; in lru_note_cost()
269 lruvec->anon_cost += nr_pages; in lru_note_cost()
301 int nr_pages = thp_nr_pages(page); in __activate_page() local
308 __count_vm_events(PGACTIVATE, nr_pages); in __activate_page()
310 nr_pages); in __activate_page()
520 int nr_pages = thp_nr_pages(page); in lru_cache_add_inactive_or_unevictable() local
526 __mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages); in lru_cache_add_inactive_or_unevictable()
527 count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages); in lru_cache_add_inactive_or_unevictable()
556 int nr_pages = thp_nr_pages(page); in lru_deactivate_file_fn() local
[all …]
Dprocess_vm_access.c80 unsigned long nr_pages; in process_vm_rw_single_vec() local
89 nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1; in process_vm_rw_single_vec()
94 while (!rc && nr_pages && iov_iter_count(iter)) { in process_vm_rw_single_vec()
95 int pinned_pages = min(nr_pages, max_pages_per_loop); in process_vm_rw_single_vec()
122 nr_pages -= pinned_pages; in process_vm_rw_single_vec()
161 unsigned long nr_pages = 0; in process_vm_rw_core() local
177 nr_pages = max(nr_pages, nr_pages_iov); in process_vm_rw_core()
181 if (nr_pages == 0) in process_vm_rw_core()
184 if (nr_pages > PVM_MAX_PP_ARRAY_COUNT) { in process_vm_rw_core()
188 sizeof(struct pages *)*nr_pages), in process_vm_rw_core()
Dpage_ext.c208 unsigned long nr_pages; in alloc_node_page_ext() local
210 nr_pages = NODE_DATA(nid)->node_spanned_pages; in alloc_node_page_ext()
211 if (!nr_pages) in alloc_node_page_ext()
221 nr_pages += MAX_ORDER_NR_PAGES; in alloc_node_page_ext()
223 table_size = page_ext_size * nr_pages; in alloc_node_page_ext()
384 unsigned long nr_pages, in online_page_ext() argument
391 end = SECTION_ALIGN_UP(start_pfn + nr_pages); in online_page_ext()
416 unsigned long nr_pages, int nid) in offline_page_ext() argument
421 end = SECTION_ALIGN_UP(start_pfn + nr_pages); in offline_page_ext()
452 mn->nr_pages, mn->status_change_nid); in page_ext_callback()
[all …]
Dpage_isolation.c43 unsigned long nr_pages; in set_migratetype_isolate() local
48 nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE, in set_migratetype_isolate()
51 __mod_zone_freepage_state(zone, -nr_pages, mt); in set_migratetype_isolate()
71 unsigned long flags, nr_pages; in unset_migratetype_isolate() local
115 nr_pages = move_freepages_block(zone, page, migratetype, NULL); in unset_migratetype_isolate()
116 __mod_zone_freepage_state(zone, nr_pages, migratetype); in unset_migratetype_isolate()
127 __first_valid_page(unsigned long pfn, unsigned long nr_pages) in __first_valid_page() argument
131 for (i = 0; i < nr_pages; i++) { in __first_valid_page()
Dcma.h44 void cma_sysfs_account_success_pages(struct cma *cma, unsigned long nr_pages);
45 void cma_sysfs_account_fail_pages(struct cma *cma, unsigned long nr_pages);
48 unsigned long nr_pages) {}; in cma_sysfs_account_success_pages() argument
50 unsigned long nr_pages) {}; in cma_sysfs_account_fail_pages() argument
Dmigrate.c1769 int start, int i, unsigned long nr_pages) in move_pages_and_store_status() argument
1787 err += nr_pages - i - 1; in move_pages_and_store_status()
1798 unsigned long nr_pages, in do_pages_move() argument
1811 for (i = start = 0; i < nr_pages; i++) { in do_pages_move()
1847 &pagelist, status, start, i, nr_pages); in do_pages_move()
1875 status, start, i, nr_pages); in do_pages_move()
1883 status, start, i, nr_pages); in do_pages_move()
1894 static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages, in do_pages_stat_array() argument
1901 for (i = 0; i < nr_pages; i++) { in do_pages_stat_array()
1950 static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages, in do_pages_stat() argument
[all …]
Dpage-writeback.c273 unsigned long nr_pages = 0; in node_dirtyable_memory() local
282 nr_pages += zone_page_state(zone, NR_FREE_PAGES); in node_dirtyable_memory()
290 nr_pages -= min(nr_pages, pgdat->totalreserve_pages); in node_dirtyable_memory()
292 nr_pages += node_page_state(pgdat, NR_INACTIVE_FILE); in node_dirtyable_memory()
293 nr_pages += node_page_state(pgdat, NR_ACTIVE_FILE); in node_dirtyable_memory()
295 return nr_pages; in node_dirtyable_memory()
308 unsigned long nr_pages; in highmem_dirtyable_memory() local
317 nr_pages = zone_page_state(z, NR_FREE_PAGES); in highmem_dirtyable_memory()
319 nr_pages -= min(nr_pages, high_wmark_pages(z)); in highmem_dirtyable_memory()
320 nr_pages += zone_page_state(z, NR_ZONE_INACTIVE_FILE); in highmem_dirtyable_memory()
[all …]
Dcma_sysfs.c17 void cma_sysfs_account_success_pages(struct cma *cma, unsigned long nr_pages) in cma_sysfs_account_success_pages() argument
19 atomic64_add(nr_pages, &cma->nr_pages_succeeded); in cma_sysfs_account_success_pages()
22 void cma_sysfs_account_fail_pages(struct cma *cma, unsigned long nr_pages) in cma_sysfs_account_fail_pages() argument
24 atomic64_add(nr_pages, &cma->nr_pages_failed); in cma_sysfs_account_fail_pages()
Dpercpu-vm.c135 static void __pcpu_unmap_pages(unsigned long addr, int nr_pages) in __pcpu_unmap_pages() argument
137 vunmap_range_noflush(addr, addr + (nr_pages << PAGE_SHIFT)); in __pcpu_unmap_pages()
194 int nr_pages) in __pcpu_map_pages() argument
196 return vmap_pages_range_noflush(addr, addr + (nr_pages << PAGE_SHIFT), in __pcpu_map_pages()
409 chunk->nr_empty_pop_pages >= chunk->nr_pages / 4)); in pcpu_should_reclaim_chunk()
Dvmscan.c1432 unsigned int nr_pages; in shrink_page_list() local
1444 nr_pages = compound_nr(page); in shrink_page_list()
1447 sc->nr_scanned += nr_pages; in shrink_page_list()
1573 stat->nr_ref_keep += nr_pages; in shrink_page_list()
1648 if ((nr_pages > 1) && !PageTransHuge(page)) { in shrink_page_list()
1649 sc->nr_scanned -= (nr_pages - 1); in shrink_page_list()
1650 nr_pages = 1; in shrink_page_list()
1667 stat->nr_unmap_fail += nr_pages; in shrink_page_list()
1669 stat->nr_lazyfree_fail += nr_pages; in shrink_page_list()
1808 nr_reclaimed += nr_pages; in shrink_page_list()
[all …]
Dpercpu-internal.h63 int nr_pages; /* # of pages served by this chunk */ member
89 return chunk->nr_pages * PAGE_SIZE / PCPU_BITMAP_BLOCK_SIZE; in pcpu_chunk_nr_blocks()
113 return pcpu_nr_pages_to_map_bits(chunk->nr_pages); in pcpu_chunk_map_bits()
Dbootmem_info.c105 unsigned long i, pfn, end_pfn, nr_pages; in register_page_bootmem_info_node() local
109 nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT; in register_page_bootmem_info_node()
112 for (i = 0; i < nr_pages; i++, page++) in register_page_bootmem_info_node()
Dvmalloc.c2559 for (i = 0; i < area->nr_pages; i++) in set_area_direct_map()
2593 for (i = 0; i < area->nr_pages; i += 1U << page_order) { in vm_remove_mappings()
2648 for (i = 0; i < area->nr_pages; i += 1U << page_order) { in __vunmap()
2655 atomic_long_sub(area->nr_pages, &nr_vmalloc_pages); in __vunmap()
2794 area->nr_pages = count; in vmap()
2851 unsigned int order, unsigned int nr_pages, struct page **pages) in vm_area_alloc_pages() argument
2864 while (nr_allocated < nr_pages) { in vm_area_alloc_pages()
2873 nr_pages_request = min(100U, nr_pages - nr_allocated); in vm_area_alloc_pages()
2897 while (nr_allocated < nr_pages) { in vm_area_alloc_pages()
2955 area->nr_pages = vm_area_alloc_pages(gfp_mask, node, in __vmalloc_area_node()
[all …]
Dpage_alloc.c833 int nr_pages = 1 << order; in prep_compound_page() local
836 for (i = 1; i < nr_pages; i++) { in prep_compound_page()
1792 unsigned int nr_pages = 1 << order; in __free_pages_core() local
1802 for (loop = 0; loop < (nr_pages - 1); loop++, p++) { in __free_pages_core()
1810 atomic_long_add(nr_pages, &page_zone(page)->managed_pages); in __free_pages_core()
1951 unsigned long nr_pages) in deferred_free_range() argument
1956 if (!nr_pages) in deferred_free_range()
1962 if (nr_pages == pageblock_nr_pages && in deferred_free_range()
1969 for (i = 0; i < nr_pages; i++, page++, pfn++) { in deferred_free_range()
2039 unsigned long nr_pages = 0; in deferred_init_pages() local
[all …]
Dsparse-vmemmap.c333 unsigned long nr_pages = (end - start) >> PAGE_SHIFT; in alloc_vmemmap_page_list() local
337 while (nr_pages--) { in alloc_vmemmap_page_list()
606 unsigned long nr_pages, int nid, struct vmem_altmap *altmap) in __populate_section_memmap() argument
609 unsigned long end = start + nr_pages * sizeof(struct page); in __populate_section_memmap()
612 !IS_ALIGNED(nr_pages, PAGES_PER_SUBSECTION))) in __populate_section_memmap()
Dhugetlb_vmemmap.c269 unsigned int nr_pages = pages_per_huge_page(h); in hugetlb_vmemmap_init() local
283 vmemmap_pages = (nr_pages * sizeof(struct page)) >> PAGE_SHIFT; in hugetlb_vmemmap_init()

12