Home
last modified time | relevance | path

Searched refs:nr_pages (Results 1 – 25 of 34) sorted by relevance

12

/mm/
Dmemory_hotplug.c227 unsigned long i, pfn, end_pfn, nr_pages; in register_page_bootmem_info_node() local
231 nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT; in register_page_bootmem_info_node()
234 for (i = 0; i < nr_pages; i++, page++) in register_page_bootmem_info_node()
254 static int check_pfn_span(unsigned long pfn, unsigned long nr_pages, in check_pfn_span() argument
273 || !IS_ALIGNED(nr_pages, min_align)) { in check_pfn_span()
275 reason, pfn, pfn + nr_pages - 1); in check_pfn_span()
287 int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages, in __add_pages() argument
299 || vmem_altmap_offset(altmap) > nr_pages) { in __add_pages()
306 err = check_pfn_span(pfn, nr_pages, "add"); in __add_pages()
311 end_sec = pfn_to_section_nr(pfn + nr_pages - 1); in __add_pages()
[all …]
Dpage_counter.c55 void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages) in page_counter_cancel() argument
59 new = atomic_long_sub_return(nr_pages, &counter->usage); in page_counter_cancel()
72 void page_counter_charge(struct page_counter *counter, unsigned long nr_pages) in page_counter_charge() argument
79 new = atomic_long_add_return(nr_pages, &c->usage); in page_counter_charge()
100 unsigned long nr_pages, in page_counter_try_charge() argument
121 new = atomic_long_add_return(nr_pages, &c->usage); in page_counter_try_charge()
123 atomic_long_sub(nr_pages, &c->usage); in page_counter_try_charge()
145 page_counter_cancel(c, nr_pages); in page_counter_try_charge()
155 void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages) in page_counter_uncharge() argument
160 page_counter_cancel(c, nr_pages); in page_counter_uncharge()
[all …]
Dsparse.c223 unsigned long nr_pages) in subsection_mask_set() argument
226 int end = subsection_map_index(pfn + nr_pages - 1); in subsection_mask_set()
231 void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages) in subsection_map_init() argument
233 int end_sec = pfn_to_section_nr(pfn + nr_pages - 1); in subsection_map_init()
236 if (!nr_pages) in subsection_map_init()
243 pfns = min(nr_pages, PAGES_PER_SECTION in subsection_map_init()
253 nr_pages -= pfns; in subsection_map_init()
452 unsigned long nr_pages, int nid, struct vmem_altmap *altmap) in __populate_section_memmap() argument
651 unsigned long nr_pages, int nid, struct vmem_altmap *altmap) in populate_section_memmap() argument
653 return __populate_section_memmap(pfn, nr_pages, nid, altmap); in populate_section_memmap()
[all …]
Dpercpu-km.c49 const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT; in pcpu_create_chunk() local
59 pages = alloc_pages(gfp, order_base_2(nr_pages)); in pcpu_create_chunk()
65 for (i = 0; i < nr_pages; i++) in pcpu_create_chunk()
72 pcpu_chunk_populated(chunk, 0, nr_pages); in pcpu_create_chunk()
83 const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT; in pcpu_destroy_chunk() local
92 __free_pages(chunk->data, order_base_2(nr_pages)); in pcpu_destroy_chunk()
103 size_t nr_pages, alloc_pages; in pcpu_verify_alloc_info() local
111 nr_pages = (ai->groups[0].nr_units * ai->unit_size) >> PAGE_SHIFT; in pcpu_verify_alloc_info()
112 alloc_pages = roundup_pow_of_two(nr_pages); in pcpu_verify_alloc_info()
114 if (alloc_pages > nr_pages) in pcpu_verify_alloc_info()
[all …]
Dmemcontrol.c586 unsigned long nr_pages = page_counter_read(&memcg->memory); in soft_limit_excess() local
590 if (nr_pages > soft_limit) in soft_limit_excess()
591 excess = nr_pages - soft_limit; in soft_limit_excess()
834 bool compound, int nr_pages) in mem_cgroup_charge_statistics() argument
841 __mod_memcg_state(memcg, MEMCG_RSS, nr_pages); in mem_cgroup_charge_statistics()
843 __mod_memcg_state(memcg, MEMCG_CACHE, nr_pages); in mem_cgroup_charge_statistics()
845 __mod_memcg_state(memcg, NR_SHMEM, nr_pages); in mem_cgroup_charge_statistics()
850 __mod_memcg_state(memcg, MEMCG_RSS_HUGE, nr_pages); in mem_cgroup_charge_statistics()
854 if (nr_pages > 0) in mem_cgroup_charge_statistics()
858 nr_pages = -nr_pages; /* for event */ in mem_cgroup_charge_statistics()
[all …]
Dgup.c780 unsigned long start, unsigned long nr_pages, in __get_user_pages() argument
788 if (!nr_pages) in __get_user_pages()
827 &start, &nr_pages, i, in __get_user_pages()
883 if (page_increm > nr_pages) in __get_user_pages()
884 page_increm = nr_pages; in __get_user_pages()
887 nr_pages -= page_increm; in __get_user_pages()
888 } while (nr_pages); in __get_user_pages()
1001 unsigned long nr_pages, in __get_user_pages_locked() argument
1023 ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages, in __get_user_pages_locked()
1032 BUG_ON(ret >= nr_pages); in __get_user_pages_locked()
[all …]
Dhugetlb_cgroup.c128 unsigned int nr_pages; in hugetlb_cgroup_move_parent() local
142 nr_pages = compound_nr(page); in hugetlb_cgroup_move_parent()
146 page_counter_charge(&parent->hugepage[idx], nr_pages); in hugetlb_cgroup_move_parent()
150 page_counter_cancel(counter, nr_pages); in hugetlb_cgroup_move_parent()
181 int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, in hugetlb_cgroup_charge_cgroup() argument
205 if (!page_counter_try_charge(&h_cg->hugepage[idx], nr_pages, &counter)) in hugetlb_cgroup_charge_cgroup()
214 void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages, in hugetlb_cgroup_commit_charge() argument
228 void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, in hugetlb_cgroup_uncharge_page() argument
240 page_counter_uncharge(&h_cg->hugepage[idx], nr_pages); in hugetlb_cgroup_uncharge_page()
244 void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages, in hugetlb_cgroup_uncharge_cgroup() argument
[all …]
Dpage_ext.c139 unsigned long nr_pages; in alloc_node_page_ext() local
141 nr_pages = NODE_DATA(nid)->node_spanned_pages; in alloc_node_page_ext()
142 if (!nr_pages) in alloc_node_page_ext()
152 nr_pages += MAX_ORDER_NR_PAGES; in alloc_node_page_ext()
154 table_size = page_ext_size * nr_pages; in alloc_node_page_ext()
287 unsigned long nr_pages, in online_page_ext() argument
294 end = SECTION_ALIGN_UP(start_pfn + nr_pages); in online_page_ext()
322 unsigned long nr_pages, int nid) in offline_page_ext() argument
327 end = SECTION_ALIGN_UP(start_pfn + nr_pages); in offline_page_ext()
344 mn->nr_pages, mn->status_change_nid); in page_ext_callback()
[all …]
Dprocess_vm_access.c84 unsigned long nr_pages; in process_vm_rw_single_vec() local
93 nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1; in process_vm_rw_single_vec()
98 while (!rc && nr_pages && iov_iter_count(iter)) { in process_vm_rw_single_vec()
99 int pages = min(nr_pages, max_pages_per_loop); in process_vm_rw_single_vec()
125 nr_pages -= pages; in process_vm_rw_single_vec()
162 unsigned long nr_pages = 0; in process_vm_rw_core() local
178 nr_pages = max(nr_pages, nr_pages_iov); in process_vm_rw_core()
182 if (nr_pages == 0) in process_vm_rw_core()
185 if (nr_pages > PVM_MAX_PP_ARRAY_COUNT) { in process_vm_rw_core()
189 sizeof(struct pages *)*nr_pages), in process_vm_rw_core()
Dpage_isolation.c40 arg.nr_pages = pageblock_nr_pages; in set_migratetype_isolate()
73 unsigned long nr_pages; in set_migratetype_isolate() local
78 nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE, in set_migratetype_isolate()
81 __mod_zone_freepage_state(zone, -nr_pages, mt); in set_migratetype_isolate()
93 unsigned long flags, nr_pages; in unset_migratetype_isolate() local
133 nr_pages = move_freepages_block(zone, page, migratetype, NULL); in unset_migratetype_isolate()
134 __mod_zone_freepage_state(zone, nr_pages, migratetype); in unset_migratetype_isolate()
147 __first_valid_page(unsigned long pfn, unsigned long nr_pages) in __first_valid_page() argument
151 for (i = 0; i < nr_pages; i++) { in __first_valid_page()
Dreadahead.c117 struct list_head *pages, unsigned int nr_pages, gfp_t gfp) in read_pages() argument
126 ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages); in read_pages()
132 for (page_idx = 0; page_idx < nr_pages; page_idx++) { in read_pages()
164 unsigned int nr_pages = 0; in __do_page_cache_readahead() local
189 if (nr_pages) in __do_page_cache_readahead()
190 read_pages(mapping, filp, &page_pool, nr_pages, in __do_page_cache_readahead()
192 nr_pages = 0; in __do_page_cache_readahead()
203 nr_pages++; in __do_page_cache_readahead()
211 if (nr_pages) in __do_page_cache_readahead()
212 read_pages(mapping, filp, &page_pool, nr_pages, gfp_mask); in __do_page_cache_readahead()
[all …]
Dmlock.c184 int nr_pages; in munlock_vma_page() local
201 nr_pages = 1; in munlock_vma_page()
205 nr_pages = hpage_nr_pages(page); in munlock_vma_page()
206 __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); in munlock_vma_page()
219 return nr_pages - 1; in munlock_vma_page()
524 int nr_pages; in mlock_fixup() local
560 nr_pages = (end - start) >> PAGE_SHIFT; in mlock_fixup()
562 nr_pages = -nr_pages; in mlock_fixup()
564 nr_pages = 0; in mlock_fixup()
565 mm->locked_vm += nr_pages; in mlock_fixup()
Dmigrate.c549 int nr_pages) in __copy_gigantic_page() argument
555 for (i = 0; i < nr_pages; ) { in __copy_gigantic_page()
568 int nr_pages; in copy_huge_page() local
573 nr_pages = pages_per_huge_page(h); in copy_huge_page()
575 if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) { in copy_huge_page()
576 __copy_gigantic_page(dst, src, nr_pages); in copy_huge_page()
582 nr_pages = hpage_nr_pages(src); in copy_huge_page()
585 for (i = 0; i < nr_pages; i++) { in copy_huge_page()
1595 unsigned long nr_pages, in do_pages_move() argument
1607 for (i = start = 0; i < nr_pages; i++) { in do_pages_move()
[all …]
Dpage-writeback.c279 unsigned long nr_pages = 0; in node_dirtyable_memory() local
288 nr_pages += zone_page_state(zone, NR_FREE_PAGES); in node_dirtyable_memory()
296 nr_pages -= min(nr_pages, pgdat->totalreserve_pages); in node_dirtyable_memory()
298 nr_pages += node_page_state(pgdat, NR_INACTIVE_FILE); in node_dirtyable_memory()
299 nr_pages += node_page_state(pgdat, NR_ACTIVE_FILE); in node_dirtyable_memory()
301 return nr_pages; in node_dirtyable_memory()
314 unsigned long nr_pages; in highmem_dirtyable_memory() local
323 nr_pages = zone_page_state(z, NR_FREE_PAGES); in highmem_dirtyable_memory()
325 nr_pages -= min(nr_pages, high_wmark_pages(z)); in highmem_dirtyable_memory()
326 nr_pages += zone_page_state(z, NR_ZONE_INACTIVE_FILE); in highmem_dirtyable_memory()
[all …]
Dvmscan.c1140 unsigned int nr_pages; in shrink_page_list() local
1152 nr_pages = compound_nr(page); in shrink_page_list()
1155 sc->nr_scanned += nr_pages; in shrink_page_list()
1276 stat->nr_ref_keep += nr_pages; in shrink_page_list()
1338 if ((nr_pages > 1) && !PageTransHuge(page)) { in shrink_page_list()
1339 sc->nr_scanned -= (nr_pages - 1); in shrink_page_list()
1340 nr_pages = 1; in shrink_page_list()
1353 stat->nr_unmap_fail += nr_pages; in shrink_page_list()
1484 nr_reclaimed += nr_pages; in shrink_page_list()
1501 if (nr_pages > 1) { in shrink_page_list()
[all …]
Dgup_benchmark.c26 unsigned long i, nr_pages, addr, next; in __gup_benchmark_ioctl() local
34 nr_pages = gup->size / PAGE_SIZE; in __gup_benchmark_ioctl()
35 pages = kvcalloc(nr_pages, sizeof(void *), GFP_KERNEL); in __gup_benchmark_ioctl()
82 for (i = 0; i < nr_pages; i++) { in __gup_benchmark_ioctl()
Dsparse-vmemmap.c249 unsigned long nr_pages, int nid, struct vmem_altmap *altmap) in __populate_section_memmap() argument
259 end = ALIGN(pfn + nr_pages, PAGES_PER_SUBSECTION); in __populate_section_memmap()
261 nr_pages = end - pfn; in __populate_section_memmap()
264 end = start + nr_pages * sizeof(struct page); in __populate_section_memmap()
Dpercpu-vm.c134 static void __pcpu_unmap_pages(unsigned long addr, int nr_pages) in __pcpu_unmap_pages() argument
136 unmap_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT); in __pcpu_unmap_pages()
193 int nr_pages) in __pcpu_map_pages() argument
195 return map_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT, in __pcpu_map_pages()
Dpercpu-internal.h58 int nr_pages; /* # of pages served by this chunk */ member
82 return chunk->nr_pages * PAGE_SIZE / PCPU_BITMAP_BLOCK_SIZE; in pcpu_chunk_nr_blocks()
106 return pcpu_nr_pages_to_map_bits(chunk->nr_pages); in pcpu_chunk_map_bits()
Dvmalloc.c2175 for (i = 0; i < area->nr_pages; i++) in set_area_direct_map()
2208 for (i = 0; i < area->nr_pages; i++) { in vm_remove_mappings()
2253 for (i = 0; i < area->nr_pages; i++) { in __vunmap()
2259 atomic_long_sub(area->nr_pages, &nr_vmalloc_pages); in __vunmap()
2401 unsigned int nr_pages, array_size, i; in __vmalloc_area_node() local
2408 nr_pages = get_vm_area_size(area) >> PAGE_SHIFT; in __vmalloc_area_node()
2409 array_size = (nr_pages * sizeof(struct page *)); in __vmalloc_area_node()
2426 area->nr_pages = nr_pages; in __vmalloc_area_node()
2428 for (i = 0; i < area->nr_pages; i++) { in __vmalloc_area_node()
2438 area->nr_pages = i; in __vmalloc_area_node()
[all …]
Dhugetlb.c1054 int nr_pages = 1 << order; in destroy_compound_gigantic_page() local
1058 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { in destroy_compound_gigantic_page()
1074 unsigned long nr_pages, gfp_t gfp_mask) in __alloc_gigantic_page() argument
1076 unsigned long end_pfn = start_pfn + nr_pages; in __alloc_gigantic_page()
1082 unsigned long start_pfn, unsigned long nr_pages) in pfn_range_valid_gigantic() argument
1084 unsigned long i, end_pfn = start_pfn + nr_pages; in pfn_range_valid_gigantic()
1109 unsigned long start_pfn, unsigned long nr_pages) in zone_spans_last_pfn() argument
1111 unsigned long last_pfn = start_pfn + nr_pages - 1; in zone_spans_last_pfn()
1119 unsigned long nr_pages = 1 << order; in alloc_gigantic_page() local
1129 pfn = ALIGN(zone->zone_start_pfn, nr_pages); in alloc_gigantic_page()
[all …]
Dfilemap.c509 int nr_pages; in __filemap_fdatawait_range() local
518 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, in __filemap_fdatawait_range()
520 if (!nr_pages) in __filemap_fdatawait_range()
523 for (i = 0; i < nr_pages; i++) { in __filemap_fdatawait_range()
1792 pgoff_t end, unsigned int nr_pages, in find_get_pages_range() argument
1799 if (unlikely(!nr_pages)) in find_get_pages_range()
1818 if (++ret == nr_pages) { in find_get_pages_range()
1858 unsigned int nr_pages, struct page **pages) in find_get_pages_contig() argument
1864 if (unlikely(!nr_pages)) in find_get_pages_contig()
1886 if (++ret == nr_pages) in find_get_pages_contig()
[all …]
Dinternal.h330 int nr_pages = hpage_nr_pages(page); in mlock_migrate_page() local
333 __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); in mlock_migrate_page()
335 __mod_zone_page_state(page_zone(newpage), NR_MLOCK, nr_pages); in mlock_migrate_page()
Dswapfile.c181 nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9); in discard_swap()
192 nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9); in discard_swap()
215 else if (offset >= se->start_page + se->nr_pages) in offset_to_swap_extent()
229 pgoff_t start_page, pgoff_t nr_pages) in discard_swap_cluster() argument
233 while (nr_pages) { in discard_swap_cluster()
236 sector_t nr_blocks = se->nr_pages - offset; in discard_swap_cluster()
238 if (nr_blocks > nr_pages) in discard_swap_cluster()
239 nr_blocks = nr_pages; in discard_swap_cluster()
241 nr_pages -= nr_blocks; in discard_swap_cluster()
2312 unsigned long nr_pages, sector_t start_block) in add_swap_extent() argument
[all …]
Dpage_alloc.c692 int nr_pages = 1 << order; in prep_compound_page() local
697 for (i = 1; i < nr_pages; i++) { in prep_compound_page()
1444 unsigned int nr_pages = 1 << order; in __free_pages_core() local
1449 for (loop = 0; loop < (nr_pages - 1); loop++, p++) { in __free_pages_core()
1457 atomic_long_add(nr_pages, &page_zone(page)->managed_pages); in __free_pages_core()
1583 unsigned long nr_pages) in deferred_free_range() argument
1588 if (!nr_pages) in deferred_free_range()
1594 if (nr_pages == pageblock_nr_pages && in deferred_free_range()
1601 for (i = 0; i < nr_pages; i++, page++, pfn++) { in deferred_free_range()
1674 unsigned long nr_pages = 0; in deferred_init_pages() local
[all …]

12