Home
last modified time | relevance | path

Searched refs:start_pfn (Results 1 – 13 of 13) sorted by relevance

/mm/
Dmemory_hotplug.c186 static void register_page_bootmem_info_section(unsigned long start_pfn) in register_page_bootmem_info_section() argument
192 section_nr = pfn_to_section_nr(start_pfn); in register_page_bootmem_info_section()
220 static void register_page_bootmem_info_section(unsigned long start_pfn) in register_page_bootmem_info_section() argument
226 if (!pfn_valid(start_pfn)) in register_page_bootmem_info_section()
229 section_nr = pfn_to_section_nr(start_pfn); in register_page_bootmem_info_section()
289 static void __meminit grow_zone_span(struct zone *zone, unsigned long start_pfn, in grow_zone_span() argument
297 if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn) in grow_zone_span()
298 zone->zone_start_pfn = start_pfn; in grow_zone_span()
306 static void resize_zone(struct zone *zone, unsigned long start_pfn, in resize_zone() argument
311 if (end_pfn - start_pfn) { in resize_zone()
[all …]
Dpage_isolation.c26 arg.start_pfn = pfn; in set_migratetype_isolate()
158 int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, in start_isolate_page_range() argument
165 BUG_ON((start_pfn) & (pageblock_nr_pages - 1)); in start_isolate_page_range()
168 for (pfn = start_pfn; in start_isolate_page_range()
180 for (pfn = start_pfn; in start_isolate_page_range()
191 int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, in undo_isolate_page_range() argument
196 BUG_ON((start_pfn) & (pageblock_nr_pages - 1)); in undo_isolate_page_range()
198 for (pfn = start_pfn; in undo_isolate_page_range()
245 int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, in test_pages_isolated() argument
258 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { in test_pages_isolated()
[all …]
Dpage_ext.c276 static int __meminit online_page_ext(unsigned long start_pfn, in online_page_ext() argument
283 start = SECTION_ALIGN_DOWN(start_pfn); in online_page_ext()
284 end = SECTION_ALIGN_UP(start_pfn + nr_pages); in online_page_ext()
292 nid = pfn_to_nid(start_pfn); in online_page_ext()
311 static int __meminit offline_page_ext(unsigned long start_pfn, in offline_page_ext() argument
316 start = SECTION_ALIGN_DOWN(start_pfn); in offline_page_ext()
317 end = SECTION_ALIGN_UP(start_pfn + nr_pages); in offline_page_ext()
333 ret = online_page_ext(mn->start_pfn, in page_ext_callback()
337 offline_page_ext(mn->start_pfn, in page_ext_callback()
341 offline_page_ext(mn->start_pfn, in page_ext_callback()
[all …]
Dcompaction.c91 static struct page *pageblock_pfn_to_page(unsigned long start_pfn, in pageblock_pfn_to_page() argument
100 if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn)) in pageblock_pfn_to_page()
103 start_page = pfn_to_page(start_pfn); in pageblock_pfn_to_page()
214 unsigned long start_pfn = zone->zone_start_pfn; in __reset_isolation_suitable() local
221 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { in __reset_isolation_suitable()
396 unsigned long *start_pfn, in isolate_freepages_block() argument
405 unsigned long blockpfn = *start_pfn; in isolate_freepages_block()
515 trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn, in isolate_freepages_block()
519 *start_pfn = blockpfn; in isolate_freepages_block()
554 unsigned long start_pfn, unsigned long end_pfn) in isolate_freepages_range() argument
[all …]
Dpage_alloc.c396 unsigned long sp, start_pfn; in page_outside_zone_boundaries() local
400 start_pfn = zone->zone_start_pfn; in page_outside_zone_boundaries()
409 start_pfn, start_pfn + sp); in page_outside_zone_boundaries()
1008 unsigned long start_pfn = PFN_DOWN(start); in reserve_bootmem_region() local
1011 for (; start_pfn < end_pfn; start_pfn++) { in reserve_bootmem_region()
1012 if (pfn_valid(start_pfn)) { in reserve_bootmem_region()
1013 struct page *page = pfn_to_page(start_pfn); in reserve_bootmem_region()
1015 init_reserved_page(start_pfn); in reserve_bootmem_region()
1587 unsigned long start_pfn, end_pfn; in move_freepages_block() local
1590 start_pfn = page_to_pfn(page); in move_freepages_block()
[all …]
Dsparse.c144 void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn, in mminit_validate_memmodel_limits() argument
153 if (*start_pfn > max_sparsemem_pfn) { in mminit_validate_memmodel_limits()
156 *start_pfn, *end_pfn, max_sparsemem_pfn); in mminit_validate_memmodel_limits()
158 *start_pfn = max_sparsemem_pfn; in mminit_validate_memmodel_limits()
163 *start_pfn, *end_pfn, max_sparsemem_pfn); in mminit_validate_memmodel_limits()
194 unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn, in node_memmap_size_bytes() argument
200 mminit_validate_memmodel_limits(&start_pfn, &end_pfn); in node_memmap_size_bytes()
201 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { in node_memmap_size_bytes()
693 int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn) in sparse_add_one_section() argument
695 unsigned long section_nr = pfn_to_section_nr(start_pfn); in sparse_add_one_section()
[all …]
Dnobootmem.c113 unsigned long start_pfn = PFN_UP(start); in __free_memory_core() local
117 if (start_pfn > end_pfn) in __free_memory_core()
120 __free_pages_memory(start_pfn, end_pfn); in __free_memory_core()
122 return end_pfn - start_pfn; in __free_memory_core()
Dinternal.h246 unsigned long start_pfn, unsigned long end_pfn);
429 extern void mminit_validate_memmodel_limits(unsigned long *start_pfn,
432 static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn, in mminit_validate_memmodel_limits() argument
Dmemblock.c1426 unsigned long start_pfn, end_pfn; in memblock_mem_size() local
1429 start_pfn = memblock_region_memory_base_pfn(r); in memblock_mem_size()
1431 start_pfn = min_t(unsigned long, start_pfn, limit_pfn); in memblock_mem_size()
1433 pages += end_pfn - start_pfn; in memblock_mem_size()
1515 unsigned long *start_pfn, unsigned long *end_pfn) in memblock_search_pfn_nid() argument
1523 *start_pfn = PFN_DOWN(type->regions[mid].base); in memblock_search_pfn_nid()
Dksm.c2042 static void ksm_check_stable_tree(unsigned long start_pfn, in ksm_check_stable_tree() argument
2054 if (stable_node->kpfn >= start_pfn && in ksm_check_stable_tree()
2069 if (stable_node->kpfn >= start_pfn && in ksm_check_stable_tree()
2103 ksm_check_stable_tree(mn->start_pfn, in ksm_memory_callback()
2104 mn->start_pfn + mn->nr_pages); in ksm_memory_callback()
Dhugetlb.c1044 static int __alloc_gigantic_page(unsigned long start_pfn, in __alloc_gigantic_page() argument
1047 unsigned long end_pfn = start_pfn + nr_pages; in __alloc_gigantic_page()
1048 return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE); in __alloc_gigantic_page()
1051 static bool pfn_range_valid_gigantic(unsigned long start_pfn, in pfn_range_valid_gigantic() argument
1054 unsigned long i, end_pfn = start_pfn + nr_pages; in pfn_range_valid_gigantic()
1057 for (i = start_pfn; i < end_pfn; i++) { in pfn_range_valid_gigantic()
1077 unsigned long start_pfn, unsigned long nr_pages) in zone_spans_last_pfn() argument
1079 unsigned long last_pfn = start_pfn + nr_pages - 1; in zone_spans_last_pfn()
1487 void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn) in dissolve_free_huge_pages() argument
1494 for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) in dissolve_free_huge_pages()
Dvmstat.c1006 unsigned long start_pfn = zone->zone_start_pfn; in pagetypeinfo_showblockcount_print() local
1010 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { in pagetypeinfo_showblockcount_print()
Dkmemleak.c1382 unsigned long start_pfn = node_start_pfn(i); in kmemleak_scan() local
1386 for (pfn = start_pfn; pfn < end_pfn; pfn++) { in kmemleak_scan()