/mm/ |
D | memory_hotplug.c | 172 static void register_page_bootmem_info_section(unsigned long start_pfn) in register_page_bootmem_info_section() argument 179 section_nr = pfn_to_section_nr(start_pfn); in register_page_bootmem_info_section() 207 static void register_page_bootmem_info_section(unsigned long start_pfn) in register_page_bootmem_info_section() argument 214 section_nr = pfn_to_section_nr(start_pfn); in register_page_bootmem_info_section() 355 unsigned long start_pfn, in find_smallest_section_pfn() argument 358 for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SUBSECTION) { in find_smallest_section_pfn() 359 if (unlikely(!pfn_to_online_page(start_pfn))) in find_smallest_section_pfn() 362 if (unlikely(pfn_to_nid(start_pfn) != nid)) in find_smallest_section_pfn() 365 if (zone != page_zone(pfn_to_page(start_pfn))) in find_smallest_section_pfn() 368 return start_pfn; in find_smallest_section_pfn() [all …]
|
D | page_isolation.c | 183 int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, in start_isolate_page_range() argument 191 BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages)); in start_isolate_page_range() 194 for (pfn = start_pfn; in start_isolate_page_range() 209 for (pfn = start_pfn; in start_isolate_page_range() 224 void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, in undo_isolate_page_range() argument 230 BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages)); in undo_isolate_page_range() 233 for (pfn = start_pfn; in undo_isolate_page_range() 287 int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, in test_pages_isolated() argument 299 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { in test_pages_isolated() 304 page = __first_valid_page(start_pfn, end_pfn - start_pfn); in test_pages_isolated() [all …]
|
D | page_ext.c | 377 static int __meminit online_page_ext(unsigned long start_pfn, in online_page_ext() argument 384 start = SECTION_ALIGN_DOWN(start_pfn); in online_page_ext() 385 end = SECTION_ALIGN_UP(start_pfn + nr_pages); in online_page_ext() 393 nid = pfn_to_nid(start_pfn); in online_page_ext() 409 static int __meminit offline_page_ext(unsigned long start_pfn, in offline_page_ext() argument 414 start = SECTION_ALIGN_DOWN(start_pfn); in offline_page_ext() 415 end = SECTION_ALIGN_UP(start_pfn + nr_pages); in offline_page_ext() 445 ret = online_page_ext(mn->start_pfn, in page_ext_callback() 449 offline_page_ext(mn->start_pfn, in page_ext_callback() 453 offline_page_ext(mn->start_pfn, in page_ext_callback() [all …]
|
D | page_alloc.c | 614 unsigned long sp, start_pfn; in page_outside_zone_boundaries() local 618 start_pfn = zone->zone_start_pfn; in page_outside_zone_boundaries() 627 start_pfn, start_pfn + sp); in page_outside_zone_boundaries() 1583 unsigned long start_pfn = PFN_DOWN(start); in reserve_bootmem_region() local 1586 for (; start_pfn < end_pfn; start_pfn++) { in reserve_bootmem_region() 1587 if (pfn_valid(start_pfn)) { in reserve_bootmem_region() 1588 struct page *page = pfn_to_page(start_pfn); in reserve_bootmem_region() 1590 init_reserved_page(start_pfn); in reserve_bootmem_region() 1669 unsigned long start_pfn, end_pfn; in __early_pfn_to_nid() local 1675 nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn); in __early_pfn_to_nid() [all …]
|
D | sparse.c | 154 void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn, in mminit_validate_memmodel_limits() argument 163 if (*start_pfn > max_sparsemem_pfn) { in mminit_validate_memmodel_limits() 166 *start_pfn, *end_pfn, max_sparsemem_pfn); in mminit_validate_memmodel_limits() 168 *start_pfn = max_sparsemem_pfn; in mminit_validate_memmodel_limits() 173 *start_pfn, *end_pfn, max_sparsemem_pfn); in mminit_validate_memmodel_limits() 610 void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn) in online_mem_sections() argument 614 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { in online_mem_sections() 629 void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn) in offline_mem_sections() argument 633 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { in offline_mem_sections() 906 int __meminit sparse_add_section(int nid, unsigned long start_pfn, in sparse_add_section() argument [all …]
|
D | compaction.c | 559 unsigned long *start_pfn, in isolate_freepages_block() argument 569 unsigned long blockpfn = *start_pfn; in isolate_freepages_block() 670 trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn, in isolate_freepages_block() 674 *start_pfn = blockpfn; in isolate_freepages_block() 706 unsigned long start_pfn, unsigned long end_pfn) in isolate_freepages_range() argument 711 pfn = start_pfn; in isolate_freepages_range() 839 unsigned long start_pfn = low_pfn; in isolate_migratepages_block() local 1119 trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn, in isolate_migratepages_block() 1141 isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn, in isolate_migratepages_range() argument 1147 pfn = start_pfn; in isolate_migratepages_range() [all …]
|
D | internal.h | 199 extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn, 202 static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn, in pageblock_pfn_to_page() argument 206 return pfn_to_page(start_pfn); in pageblock_pfn_to_page() 208 return __pageblock_pfn_to_page(start_pfn, end_pfn, zone); in pageblock_pfn_to_page() 278 unsigned long start_pfn, unsigned long end_pfn); 547 extern void mminit_validate_memmodel_limits(unsigned long *start_pfn, 550 static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn, in mminit_validate_memmodel_limits() argument
|
D | shuffle.c | 84 unsigned long start_pfn = z->zone_start_pfn; in __shuffle_zone() local 90 start_pfn = ALIGN(start_pfn, order_pages); in __shuffle_zone() 91 for (i = start_pfn; i < end_pfn; i += order_pages) { in __shuffle_zone()
|
D | ksm.c | 2712 unsigned long start_pfn, in stable_node_dup_remove_range() argument 2715 if (stable_node->kpfn >= start_pfn && in stable_node_dup_remove_range() 2728 unsigned long start_pfn, in stable_node_chain_remove_range() argument 2737 return stable_node_dup_remove_range(stable_node, start_pfn, in stable_node_chain_remove_range() 2744 stable_node_dup_remove_range(dup, start_pfn, end_pfn); in stable_node_chain_remove_range() 2753 static void ksm_check_stable_tree(unsigned long start_pfn, in ksm_check_stable_tree() argument 2765 start_pfn, end_pfn, in ksm_check_stable_tree() 2775 if (stable_node->kpfn >= start_pfn && in ksm_check_stable_tree() 2809 ksm_check_stable_tree(mn->start_pfn, in ksm_memory_callback() 2810 mn->start_pfn + mn->nr_pages); in ksm_memory_callback()
|
D | memblock.c | 1763 unsigned long *start_pfn, unsigned long *end_pfn) in memblock_search_pfn_nid() argument 1771 *start_pfn = PFN_DOWN(type->regions[mid].base); in memblock_search_pfn_nid() 1938 unsigned long start_pfn = PFN_UP(start); in __free_memory_core() local 1942 if (start_pfn >= end_pfn) in __free_memory_core() 1945 __free_pages_memory(start_pfn, end_pfn); in __free_memory_core() 1947 return end_pfn - start_pfn; in __free_memory_core()
|
D | kmemleak.c | 1452 unsigned long start_pfn = zone->zone_start_pfn; in kmemleak_scan() local 1456 for (pfn = start_pfn; pfn < end_pfn; pfn++) { in kmemleak_scan()
|
D | vmstat.c | 1501 unsigned long start_pfn = zone->zone_start_pfn; in pagetypeinfo_showblockcount_print() local 1505 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { in pagetypeinfo_showblockcount_print()
|
D | hugetlb.c | 1866 int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn) in dissolve_free_huge_pages() argument 1875 for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) { in dissolve_free_huge_pages()
|
/mm/kasan/ |
D | shadow.c | 182 start_kaddr = (unsigned long)pfn_to_kaddr(mem_data->start_pfn); in kasan_mem_notifier() 206 pfn_to_nid(mem_data->start_pfn), in kasan_mem_notifier()
|