• Home
  • Raw
  • Download

Lines Matching +full:fine +full:- +full:granular

1 // SPDX-License-Identifier: GPL-2.0-only
3 * mm_init.c - Memory initialisation verification and debugging
18 #include <linux/page-isolation.h>
60 zonelist = &pgdat->node_zonelists[listid]; in mminit_verify_zonelist()
61 zone = &pgdat->node_zones[zoneid]; in mminit_verify_zonelist()
68 zone->name); in mminit_verify_zonelist()
72 pr_cont("%d:%s ", zone_to_nid(zone), zone->name); in mminit_verify_zonelist()
84 width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH in mminit_verify_pageflags_layout()
85 - LAST_CPUPID_SHIFT - KASAN_TAG_WIDTH - LRU_GEN_WIDTH - LRU_REFS_WIDTH; in mminit_verify_pageflags_layout()
111 "Node/Zone ID: %lu -> %lu\n", in mminit_verify_pageflags_layout()
115 "location: %d -> %d layout %d -> %d unused %d -> %d page-flags\n", in mminit_verify_pageflags_layout()
127 shift -= SECTIONS_WIDTH; in mminit_verify_pageflags_layout()
131 shift -= NODES_WIDTH; in mminit_verify_pageflags_layout()
135 shift -= ZONES_WIDTH; in mminit_verify_pageflags_layout()
212 return -ENOMEM; in mm_sysfs_init()
242 return -EINVAL; in cmdline_parse_core()
304 unsigned long pages = end_pfn - start_pfn; in early_calculate_totalpages()
321 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) { in find_usable_zone_for_movable()
330 VM_BUG_ON(zone_index == -1); in find_usable_zone_for_movable()
365 usable_startpfn = PFN_DOWN(r->base); in find_zone_movable_pfns_for_nodes()
437 * Round-up so that ZONE_MOVABLE is at least as large as what in find_zone_movable_pfns_for_nodes()
443 corepages = totalpages - required_movablecore; in find_zone_movable_pfns_for_nodes()
491 - start_pfn; in find_zone_movable_pfns_for_nodes()
493 kernelcore_remaining -= min(kernel_pages, in find_zone_movable_pfns_for_nodes()
495 required_kernelcore -= min(kernel_pages, in find_zone_movable_pfns_for_nodes()
515 * start_pfn->end_pfn. Calculate size_pages as the in find_zone_movable_pfns_for_nodes()
518 size_pages = end_pfn - start_pfn; in find_zone_movable_pfns_for_nodes()
528 required_kernelcore -= min(required_kernelcore, in find_zone_movable_pfns_for_nodes()
530 kernelcore_remaining -= size_pages; in find_zone_movable_pfns_for_nodes()
542 usable_nodes--; in find_zone_movable_pfns_for_nodes()
574 INIT_LIST_HEAD(&page->lru); in __init_single_page()
605 if (state->last_start <= pfn && pfn < state->last_end) in __early_pfn_to_nid()
606 return state->last_nid; in __early_pfn_to_nid()
610 state->last_start = start_pfn; in __early_pfn_to_nid()
611 state->last_end = end_pfn; in __early_pfn_to_nid()
612 state->last_nid = nid; in __early_pfn_to_nid()
655 pgdat->first_deferred_pfn = ULONG_MAX; in pgdat_set_deferred_range()
661 if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn) in early_page_initialised()
687 /* Always populate low zones for address-constrained allocations */ in defer_init()
691 if (NODE_DATA(nid)->first_deferred_pfn != ULONG_MAX) in defer_init()
699 (pfn & (PAGES_PER_SECTION - 1)) == 0) { in defer_init()
700 NODE_DATA(nid)->first_deferred_pfn = pfn; in defer_init()
717 struct zone *zone = &pgdat->node_zones[zid]; in init_reserved_page()
760 /* Avoid false-positive PageTail() */ in reserve_bootmem_region()
761 INIT_LIST_HEAD(&page->lru); in reserve_bootmem_region()
802 * - physical memory bank size is not necessarily the exact multiple of the
804 * - early reserved memory may not be listed in memblock.memory
805 * - memory layouts defined with memmap= kernel parameter may not align
809 * - PG_Reserved is set
810 * - zone and node links point to zone and node that span the page if the
812 * - zone and node links point to adjacent zone/node if the hole falls on
826 pfn = pageblock_end_pfn(pfn) - 1; in init_unavailable_range()
840 * Initially all pages are reserved - free ones are freed
842 * done. Non-atomic initialization, single-pass.
856 if (highest_memmap_pfn < end_pfn - 1) in memmap_init_range()
857 highest_memmap_pfn = end_pfn - 1; in memmap_init_range()
871 if (start_pfn == altmap->base_pfn) in memmap_init_range()
872 start_pfn += altmap->reserve; in memmap_init_range()
873 end_pfn = altmap->base_pfn + vmem_altmap_offset(altmap); in memmap_init_range()
879 * There can be holes in boot-time mem_map[]s handed to this in memmap_init_range()
914 unsigned long zone_start_pfn = zone->zone_start_pfn; in memmap_init_zone_range()
915 unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages; in memmap_init_zone_range()
924 memmap_init_range(end_pfn - start_pfn, nid, zone_id, start_pfn, in memmap_init_zone_range()
943 struct zone *zone = node->node_zones + j; in memmap_init()
982 * We can use the non-atomic __set_bit operation for setting in __init_zone_device_page()
988 * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer in __init_zone_device_page()
990 * ever freed or placed on a driver-private list. in __init_zone_device_page()
992 page->pgmap = pgmap; in __init_zone_device_page()
993 page->zone_device_data = NULL; in __init_zone_device_page()
999 * the address space during boot when many long-lived in __init_zone_device_page()
1014 if (pgmap->type == MEMORY_DEVICE_PRIVATE || in __init_zone_device_page()
1015 pgmap->type == MEMORY_DEVICE_COHERENT) in __init_zone_device_page()
1043 unsigned int order = pgmap->vmemmap_shift; in memmap_init_compound()
1050 prep_compound_tail(head, pfn - head_pfn); in memmap_init_compound()
1069 struct pglist_data *pgdat = zone->zone_pgdat; in memmap_init_zone_device()
1074 int nid = pgdat->node_id; in memmap_init_zone_device()
1085 start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap); in memmap_init_zone_device()
1086 nr_pages = end_pfn - start_pfn; in memmap_init_zone_device()
1102 nr_pages, jiffies_to_msecs(jiffies - start)); in memmap_init_zone_device()
1150 unsigned long nr_absent = range_end_pfn - range_start_pfn; in __absent_pages_in_range()
1157 nr_absent -= end_pfn - start_pfn; in __absent_pages_in_range()
1163 * absent_pages_in_range - Return number of page frames in holes within a range
1206 nr_absent += end_pfn - start_pfn; in zone_absent_pages_in_node()
1210 nr_absent += end_pfn - start_pfn; in zone_absent_pages_in_node()
1219 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
1246 return *zone_end_pfn - *zone_start_pfn; in zone_spanned_pages_in_node()
1253 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) { in reset_memoryless_node_totalpages()
1254 z->zone_start_pfn = 0; in reset_memoryless_node_totalpages()
1255 z->spanned_pages = 0; in reset_memoryless_node_totalpages()
1256 z->present_pages = 0; in reset_memoryless_node_totalpages()
1258 z->present_early_pages = 0; in reset_memoryless_node_totalpages()
1262 pgdat->node_spanned_pages = 0; in reset_memoryless_node_totalpages()
1263 pgdat->node_present_pages = 0; in reset_memoryless_node_totalpages()
1264 pr_debug("On node %d totalpages: 0\n", pgdat->node_id); in reset_memoryless_node_totalpages()
1275 struct zone *zone = pgdat->node_zones + i; in calculate_node_totalpages()
1280 spanned = zone_spanned_pages_in_node(pgdat->node_id, i, in calculate_node_totalpages()
1285 absent = zone_absent_pages_in_node(pgdat->node_id, i, in calculate_node_totalpages()
1289 real_size = spanned - absent; in calculate_node_totalpages()
1292 zone->zone_start_pfn = zone_start_pfn; in calculate_node_totalpages()
1294 zone->zone_start_pfn = 0; in calculate_node_totalpages()
1295 zone->spanned_pages = spanned; in calculate_node_totalpages()
1296 zone->present_pages = real_size; in calculate_node_totalpages()
1298 zone->present_early_pages = real_size; in calculate_node_totalpages()
1305 pgdat->node_spanned_pages = totalpages; in calculate_node_totalpages()
1306 pgdat->node_present_pages = realtotalpages; in calculate_node_totalpages()
1307 pr_debug("On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages); in calculate_node_totalpages()
1333 struct deferred_split *ds_queue = &pgdat->deferred_split_queue; in pgdat_init_split_queue()
1335 spin_lock_init(&ds_queue->split_queue_lock); in pgdat_init_split_queue()
1336 INIT_LIST_HEAD(&ds_queue->split_queue); in pgdat_init_split_queue()
1337 ds_queue->split_queue_len = 0; in pgdat_init_split_queue()
1346 init_waitqueue_head(&pgdat->kcompactd_wait); in pgdat_init_kcompactd()
1362 init_waitqueue_head(&pgdat->kswapd_wait); in pgdat_init_internals()
1363 init_waitqueue_head(&pgdat->pfmemalloc_wait); in pgdat_init_internals()
1366 init_waitqueue_head(&pgdat->reclaim_wait[i]); in pgdat_init_internals()
1369 lruvec_init(&pgdat->__lruvec); in pgdat_init_internals()
1375 atomic_long_set(&zone->managed_pages, remaining_pages); in zone_init_internals()
1377 zone->name = zone_names[idx]; in zone_init_internals()
1378 zone->zone_pgdat = NODE_DATA(nid); in zone_init_internals()
1379 spin_lock_init(&zone->lock); in zone_init_internals()
1388 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); in zone_init_free_lists()
1389 zone->free_area[order].nr_free = 0; in zone_init_free_lists()
1393 INIT_LIST_HEAD(&zone->unaccepted_pages); in zone_init_free_lists()
1401 struct pglist_data *pgdat = zone->zone_pgdat; in init_currently_empty_zone()
1404 if (zone_idx > pgdat->nr_zones) in init_currently_empty_zone()
1405 pgdat->nr_zones = zone_idx; in init_currently_empty_zone()
1407 zone->zone_start_pfn = zone_start_pfn; in init_currently_empty_zone()
1410 "Initialising map node %d zone %lu pfns %lu -> %lu\n", in init_currently_empty_zone()
1411 pgdat->node_id, in init_currently_empty_zone()
1416 zone->initialized = 1; in init_currently_empty_zone()
1421 * Calculate the size of the zone->blockflags rounded to an unsigned long
1431 zonesize += zone_start_pfn & (pageblock_nr_pages-1); in usemap_size()
1442 unsigned long usemapsize = usemap_size(zone->zone_start_pfn, in setup_usemap()
1443 zone->spanned_pages); in setup_usemap()
1444 zone->pageblock_flags = NULL; in setup_usemap()
1446 zone->pageblock_flags = in setup_usemap()
1449 if (!zone->pageblock_flags) in setup_usemap()
1451 usemapsize, zone->name, zone_to_nid(zone)); in setup_usemap()
1484 * is unused as pageblock_order is set at compile-time. See
1485 * include/linux/pageblock-flags.h for the values of pageblock_order based on
1496 * - init pgdat internals
1497 * - init all zones belonging to this node
1504 int nid = pgdat->node_id; in free_area_init_core_hotplug()
1510 if (pgdat->per_cpu_nodestats == &boot_nodestats) in free_area_init_core_hotplug()
1511 pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat); in free_area_init_core_hotplug()
1518 pgdat->nr_zones = 0; in free_area_init_core_hotplug()
1519 pgdat->kswapd_order = 0; in free_area_init_core_hotplug()
1520 pgdat->kswapd_highest_zoneidx = 0; in free_area_init_core_hotplug()
1521 pgdat->node_start_pfn = 0; in free_area_init_core_hotplug()
1522 pgdat->node_present_pages = 0; in free_area_init_core_hotplug()
1527 p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu); in free_area_init_core_hotplug()
1532 * When memory is hot-added, all the memory is in offline state. So in free_area_init_core_hotplug()
1537 struct zone *zone = pgdat->node_zones + z; in free_area_init_core_hotplug()
1539 zone->present_pages = 0; in free_area_init_core_hotplug()
1547 * - mark all pages reserved
1548 * - mark all memory queues empty
1549 * - clear the memory bitmaps
1557 int nid = pgdat->node_id; in free_area_init_core()
1560 pgdat->per_cpu_nodestats = &boot_nodestats; in free_area_init_core()
1563 struct zone *zone = pgdat->node_zones + j; in free_area_init_core()
1566 size = zone->spanned_pages; in free_area_init_core()
1567 freesize = zone->present_pages; in free_area_init_core()
1572 * and per-cpu initialisations in free_area_init_core()
1577 freesize -= memmap_pages; in free_area_init_core()
1588 freesize -= dma_reserve; in free_area_init_core()
1596 nr_kernel_pages -= memmap_pages; in free_area_init_core()
1610 init_currently_empty_zone(zone, zone->zone_start_pfn, size); in free_area_init_core()
1641 if (!pgdat->node_spanned_pages) in alloc_node_mem_map()
1644 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1); in alloc_node_mem_map()
1645 offset = pgdat->node_start_pfn - start; in alloc_node_mem_map()
1647 if (!pgdat->node_mem_map) { in alloc_node_mem_map()
1658 size = (end - start) * sizeof(struct page); in alloc_node_mem_map()
1660 pgdat->node_id, false); in alloc_node_mem_map()
1663 size, pgdat->node_id); in alloc_node_mem_map()
1664 pgdat->node_mem_map = map + offset; in alloc_node_mem_map()
1667 __func__, pgdat->node_id, (unsigned long)pgdat, in alloc_node_mem_map()
1668 (unsigned long)pgdat->node_mem_map); in alloc_node_mem_map()
1674 mem_map = NODE_DATA(0)->node_mem_map; in alloc_node_mem_map()
1675 if (page_to_pfn(mem_map) != pgdat->node_start_pfn) in alloc_node_mem_map()
1676 mem_map -= offset; in alloc_node_mem_map()
1685 * get_pfn_range_for_nid - Return the start and end page frames for a node
1700 *start_pfn = -1UL; in get_pfn_range_for_nid()
1708 if (*start_pfn == -1UL) in get_pfn_range_for_nid()
1719 WARN_ON(pgdat->nr_zones || pgdat->kswapd_highest_zoneidx); in free_area_init_node()
1723 pgdat->node_id = nid; in free_area_init_node()
1724 pgdat->node_start_pfn = start_pfn; in free_area_init_node()
1725 pgdat->per_cpu_nodestats = NULL; in free_area_init_node()
1728 pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid, in free_area_init_node()
1730 end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0); in free_area_init_node()
1751 for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) { in check_for_memory()
1752 struct zone *zone = &pgdat->node_zones[zone_type]; in check_for_memory()
1755 node_set_state(pgdat->node_id, N_HIGH_MEMORY); in check_for_memory()
1757 node_set_state(pgdat->node_id, N_NORMAL_MEMORY); in check_for_memory()
1786 * free_area_init - Initialise all pg_data_t and zone data
1815 zone = MAX_NR_ZONES - i - 1; in free_area_init()
1838 pr_info(" %-8s ", zone_names[i]); in free_area_init()
1843 pr_cont("[mem %#018Lx-%#018Lx]\n", in free_area_init()
1847 << PAGE_SHIFT) - 1); in free_area_init()
1860 * subsection-map relative to active online memory ranges to in free_area_init()
1861 * enable future "sub-section" extensions of the memory map. in free_area_init()
1865 pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid, in free_area_init()
1867 ((u64)end_pfn << PAGE_SHIFT) - 1); in free_area_init()
1868 subsection_map_init(start_pfn, end_pfn - start_pfn); in free_area_init()
1907 if (pgdat->node_present_pages) in free_area_init()
1919 * node_map_pfn_alignment - determine the maximum internode alignment
1926 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the
1930 * This is used to test whether pfn -> nid mapping of the chosen memory
1931 * model has fine enough granularity to avoid incorrect mapping for the
1952 * Start with a mask granular enough to pin-point to the in node_map_pfn_alignment()
1953 * start pfn and tick off bits one-by-one until it becomes in node_map_pfn_alignment()
1956 mask = ~((1 << __ffs(start)) - 1); in node_map_pfn_alignment()
1980 /* Free a large naturally-aligned chunk if possible */ in deferred_free_range()
2032 deferred_free_range(pfn - nr_free, nr_free); in deferred_free_pages()
2035 deferred_free_range(pfn - nr_free, nr_free); in deferred_free_pages()
2042 deferred_free_range(pfn - nr_free, nr_free); in deferred_free_pages()
2075 * This function is meant to pre-load the iterator for the zone init.
2189 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); in deferred_init_memmap()
2202 first_init_pfn = pgdat->first_deferred_pfn; in deferred_init_memmap()
2210 BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn); in deferred_init_memmap()
2211 BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat)); in deferred_init_memmap()
2212 pgdat->first_deferred_pfn = ULONG_MAX; in deferred_init_memmap()
2217 * pre-grown prior to start of deferred page initialization. in deferred_init_memmap()
2223 zone = pgdat->node_zones + zid; in deferred_init_memmap()
2241 .size = epfn_align - spfn, in deferred_init_memmap()
2256 pgdat->node_id, jiffies_to_msecs(jiffies - start)); in deferred_init_memmap()
2280 pg_data_t *pgdat = zone->zone_pgdat; in deferred_grow_zone()
2281 unsigned long first_deferred_pfn = pgdat->first_deferred_pfn; in deferred_grow_zone()
2296 if (first_deferred_pfn != pgdat->first_deferred_pfn) { in deferred_grow_zone()
2304 pgdat->first_deferred_pfn = ULONG_MAX; in deferred_grow_zone()
2331 pgdat->first_deferred_pfn = spfn; in deferred_grow_zone()
2348 } while (++p, --i); in init_cma_reserved_pageblock()
2355 page_zone(page)->cma_pages += pageblock_nr_pages; in init_cma_reserved_pageblock()
2361 unsigned long block_start_pfn = zone->zone_start_pfn; in set_zone_contiguous()
2378 zone->contiguous = true; in set_zone_contiguous()
2399 * on-demand struct page initialization. in page_alloc_init_late()
2442 * Because 32-bit systems cannot have large physical memory, where this scaling
2453 * - it is assumed that the hash table must contain an exact power-of-2
2455 * - limit is the number of hash buckets, not the total allocation size
2478 numentries -= arch_reserved_kernel_pages(); in alloc_large_system_hash()
2496 numentries >>= (scale - PAGE_SHIFT); in alloc_large_system_hash()
2498 numentries <<= (PAGE_SHIFT - scale); in alloc_large_system_hash()
2536 * If bucketsize is not a power-of-two, we may free in alloc_large_system_hash()
2543 } while (!table && size > PAGE_SIZE && --log2qty); in alloc_large_system_hash()
2549 tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size, in alloc_large_system_hash()
2555 *_hash_mask = (1 << log2qty) - 1; in alloc_large_system_hash()
2561 * set_dma_reserve - set the specified number of pages reserved in the first zone
2564 * The per-cpu batchsize and zone watermarks are determined by managed_pages.
2569 * smaller per-cpu batchsize.
2646 pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, " in mem_debugging_and_hardening_init()
2668 …pr_info("mem auto-init: please make sure init_on_alloc and init_on_free are disabled when running … in mem_debugging_and_hardening_init()
2689 /* Report memory auto-initialization states for this boot. */
2707 pr_info("mem auto-init: stack:%s, heap alloc:%s, heap free:%s\n", in report_meminit()
2711 pr_info("mem auto-init: clearing system memory may take some time...\n"); in report_meminit()
2720 codesize = _etext - _stext; in mem_init_print_info()
2721 datasize = _edata - _sdata; in mem_init_print_info()
2722 rosize = __end_rodata - __start_rodata; in mem_init_print_info()
2723 bss_size = __bss_stop - __bss_start; in mem_init_print_info()
2724 init_data_size = __init_end - __init_begin; in mem_init_print_info()
2725 init_code_size = _einittext - _sinittext; in mem_init_print_info()
2737 size -= adj; \ in mem_init_print_info()
2749 …(%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved" in mem_init_print_info()
2757 K(physpages - totalram_pages() - totalcma_pages), in mem_init_print_info()
2800 /* Should be run before the first non-init thread is created */ in mm_core_init()