/mm/ |
D | memory_hotplug.c | 185 static void register_page_bootmem_info_section(unsigned long start_pfn) in register_page_bootmem_info_section() argument 191 section_nr = pfn_to_section_nr(start_pfn); in register_page_bootmem_info_section() 219 static void register_page_bootmem_info_section(unsigned long start_pfn) in register_page_bootmem_info_section() argument 225 if (!pfn_valid(start_pfn)) in register_page_bootmem_info_section() 228 section_nr = pfn_to_section_nr(start_pfn); in register_page_bootmem_info_section() 288 static void __meminit grow_zone_span(struct zone *zone, unsigned long start_pfn, in grow_zone_span() argument 296 if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn) in grow_zone_span() 297 zone->zone_start_pfn = start_pfn; in grow_zone_span() 305 static void resize_zone(struct zone *zone, unsigned long start_pfn, in resize_zone() argument 310 if (end_pfn - start_pfn) { in resize_zone() [all …]
|
D | page_isolation.c | 25 arg.start_pfn = pfn; in set_migratetype_isolate() 156 int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, in start_isolate_page_range() argument 163 BUG_ON((start_pfn) & (pageblock_nr_pages - 1)); in start_isolate_page_range() 166 for (pfn = start_pfn; in start_isolate_page_range() 178 for (pfn = start_pfn; in start_isolate_page_range() 189 int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, in undo_isolate_page_range() argument 194 BUG_ON((start_pfn) & (pageblock_nr_pages - 1)); in undo_isolate_page_range() 196 for (pfn = start_pfn; in undo_isolate_page_range() 261 int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, in test_pages_isolated() argument 274 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { in test_pages_isolated() [all …]
|
D | page_cgroup.c | 192 static int __meminit online_page_cgroup(unsigned long start_pfn, in online_page_cgroup() argument 199 start = SECTION_ALIGN_DOWN(start_pfn); in online_page_cgroup() 200 end = SECTION_ALIGN_UP(start_pfn + nr_pages); in online_page_cgroup() 208 nid = pfn_to_nid(start_pfn); in online_page_cgroup() 227 static int __meminit offline_page_cgroup(unsigned long start_pfn, in offline_page_cgroup() argument 232 start = SECTION_ALIGN_DOWN(start_pfn); in offline_page_cgroup() 233 end = SECTION_ALIGN_UP(start_pfn + nr_pages); in offline_page_cgroup() 248 ret = online_page_cgroup(mn->start_pfn, in page_cgroup_callback() 252 offline_page_cgroup(mn->start_pfn, in page_cgroup_callback() 256 offline_page_cgroup(mn->start_pfn, in page_cgroup_callback() [all …]
|
D | page_alloc.c | 267 unsigned long sp, start_pfn; in page_outside_zone_boundaries() local 271 start_pfn = zone->zone_start_pfn; in page_outside_zone_boundaries() 280 start_pfn, start_pfn + sp); in page_outside_zone_boundaries() 1073 unsigned long start_pfn, end_pfn; in move_freepages_block() local 1076 start_pfn = page_to_pfn(page); in move_freepages_block() 1077 start_pfn = start_pfn & ~(pageblock_nr_pages-1); in move_freepages_block() 1078 start_page = pfn_to_page(start_pfn); in move_freepages_block() 1080 end_pfn = start_pfn + pageblock_nr_pages - 1; in move_freepages_block() 1083 if (!zone_spans_pfn(zone, start_pfn)) in move_freepages_block() 4017 static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn) in pageblock_is_reserved() argument [all …]
|
D | compaction.c | 87 static struct page *pageblock_pfn_to_page(unsigned long start_pfn, in pageblock_pfn_to_page() argument 96 if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn)) in pageblock_pfn_to_page() 99 start_page = pfn_to_page(start_pfn); in pageblock_pfn_to_page() 131 unsigned long start_pfn = zone->zone_start_pfn; in __reset_isolation_suitable() local 135 zone->compact_cached_migrate_pfn[0] = start_pfn; in __reset_isolation_suitable() 136 zone->compact_cached_migrate_pfn[1] = start_pfn; in __reset_isolation_suitable() 141 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { in __reset_isolation_suitable() 340 unsigned long *start_pfn, in isolate_freepages_block() argument 349 unsigned long blockpfn = *start_pfn; in isolate_freepages_block() 460 *start_pfn = blockpfn; in isolate_freepages_block() [all …]
|
D | sparse.c | 144 void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn, in mminit_validate_memmodel_limits() argument 153 if (*start_pfn > max_sparsemem_pfn) { in mminit_validate_memmodel_limits() 156 *start_pfn, *end_pfn, max_sparsemem_pfn); in mminit_validate_memmodel_limits() 158 *start_pfn = max_sparsemem_pfn; in mminit_validate_memmodel_limits() 163 *start_pfn, *end_pfn, max_sparsemem_pfn); in mminit_validate_memmodel_limits() 194 unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn, in node_memmap_size_bytes() argument 200 mminit_validate_memmodel_limits(&start_pfn, &end_pfn); in node_memmap_size_bytes() 201 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { in node_memmap_size_bytes() 693 int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn) in sparse_add_one_section() argument 695 unsigned long section_nr = pfn_to_section_nr(start_pfn); in sparse_add_one_section() [all …]
|
D | nobootmem.c | 104 unsigned long start_pfn = PFN_UP(start); in __free_memory_core() local 108 if (start_pfn > end_pfn) in __free_memory_core() 111 __free_pages_memory(start_pfn, end_pfn); in __free_memory_core() 113 return end_pfn - start_pfn; in __free_memory_core()
|
D | internal.h | 181 unsigned long start_pfn, unsigned long end_pfn); 369 extern void mminit_validate_memmodel_limits(unsigned long *start_pfn, 372 static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn, in mminit_validate_memmodel_limits() argument
|
D | memblock.c | 1326 unsigned long start_pfn, end_pfn; in memblock_mem_size() local 1329 start_pfn = memblock_region_memory_base_pfn(r); in memblock_mem_size() 1331 start_pfn = min_t(unsigned long, start_pfn, limit_pfn); in memblock_mem_size() 1333 pages += end_pfn - start_pfn; in memblock_mem_size() 1406 unsigned long *start_pfn, unsigned long *end_pfn) in memblock_search_pfn_nid() argument 1414 *start_pfn = PFN_DOWN(type->regions[mid].base); in memblock_search_pfn_nid()
|
D | hugetlb.c | 705 static int __alloc_gigantic_page(unsigned long start_pfn, in __alloc_gigantic_page() argument 708 unsigned long end_pfn = start_pfn + nr_pages; in __alloc_gigantic_page() 709 return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE); in __alloc_gigantic_page() 712 static bool pfn_range_valid_gigantic(unsigned long start_pfn, in pfn_range_valid_gigantic() argument 715 unsigned long i, end_pfn = start_pfn + nr_pages; in pfn_range_valid_gigantic() 718 for (i = start_pfn; i < end_pfn; i++) { in pfn_range_valid_gigantic() 738 unsigned long start_pfn, unsigned long nr_pages) in zone_spans_last_pfn() argument 740 unsigned long last_pfn = start_pfn + nr_pages - 1; in zone_spans_last_pfn() 1111 void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn) in dissolve_free_huge_pages() argument 1124 VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << order)); in dissolve_free_huge_pages() [all …]
|
D | ksm.c | 1992 static void ksm_check_stable_tree(unsigned long start_pfn, in ksm_check_stable_tree() argument 2004 if (stable_node->kpfn >= start_pfn && in ksm_check_stable_tree() 2019 if (stable_node->kpfn >= start_pfn && in ksm_check_stable_tree() 2053 ksm_check_stable_tree(mn->start_pfn, in ksm_memory_callback() 2054 mn->start_pfn + mn->nr_pages); in ksm_memory_callback()
|
D | vmstat.c | 978 unsigned long start_pfn = zone->zone_start_pfn; in pagetypeinfo_showblockcount_print() local 982 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { in pagetypeinfo_showblockcount_print()
|
D | kmemleak.c | 1342 unsigned long start_pfn = node_start_pfn(i); in kmemleak_scan() local 1346 for (pfn = start_pfn; pfn < end_pfn; pfn++) { in kmemleak_scan()
|