/mm/ |
D | memory_hotplug.c | 246 void register_page_bootmem_info_node(struct pglist_data *pgdat) in register_page_bootmem_info_node() argument 249 int node = pgdat->node_id; in register_page_bootmem_info_node() 254 page = virt_to_page(pgdat); in register_page_bootmem_info_node() 259 zone = &pgdat->node_zones[0]; in register_page_bootmem_info_node() 260 for (; zone < pgdat->node_zones + MAX_NR_ZONES - 1; zone++) { in register_page_bootmem_info_node() 272 pfn = pgdat->node_start_pfn; in register_page_bootmem_info_node() 273 end_pfn = pgdat_end_pfn(pgdat); in register_page_bootmem_info_node() 432 static void __meminit grow_pgdat_span(struct pglist_data *pgdat, unsigned long start_pfn, in grow_pgdat_span() argument 435 unsigned long old_pgdat_end_pfn = pgdat_end_pfn(pgdat); in grow_pgdat_span() 437 if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn) in grow_pgdat_span() [all …]
|
D | nobootmem.c | 161 void reset_node_managed_pages(pg_data_t *pgdat) in reset_node_managed_pages() argument 165 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) in reset_node_managed_pages() 171 struct pglist_data *pgdat; in reset_all_zones_managed_pages() local 176 for_each_online_pgdat(pgdat) in reset_all_zones_managed_pages() 177 reset_node_managed_pages(pgdat); in reset_all_zones_managed_pages() 214 void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, in free_bootmem_node() argument 316 void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat, in ___alloc_bootmem_node_nopanic() argument 325 ptr = __alloc_memory_core_early(pgdat->node_id, size, align, in ___alloc_bootmem_node_nopanic() 343 void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size, in __alloc_bootmem_node_nopanic() argument 347 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); in __alloc_bootmem_node_nopanic() [all …]
|
D | vmstat.c | 196 void set_pgdat_percpu_threshold(pg_data_t *pgdat, in set_pgdat_percpu_threshold() argument 204 for (i = 0; i < pgdat->nr_zones; i++) { in set_pgdat_percpu_threshold() 205 zone = &pgdat->node_zones[i]; in set_pgdat_percpu_threshold() 879 pg_data_t *pgdat; in frag_start() local 882 for (pgdat = first_online_pgdat(); in frag_start() 883 pgdat && node; in frag_start() 884 pgdat = next_online_pgdat(pgdat)) in frag_start() 887 return pgdat; in frag_start() 892 pg_data_t *pgdat = (pg_data_t *)arg; in frag_next() local 895 return next_online_pgdat(pgdat); in frag_next() [all …]
|
D | bootmem.c | 128 unsigned long __init init_bootmem_node(pg_data_t *pgdat, unsigned long freepfn, in init_bootmem_node() argument 131 return init_bootmem_core(pgdat->bdata, freepfn, startpfn, endpfn); in init_bootmem_node() 248 void reset_node_managed_pages(pg_data_t *pgdat) in reset_node_managed_pages() argument 252 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) in reset_node_managed_pages() 258 struct pglist_data *pgdat; in reset_all_zones_managed_pages() local 263 for_each_online_pgdat(pgdat) in reset_all_zones_managed_pages() 264 reset_node_managed_pages(pgdat); in reset_all_zones_managed_pages() 400 void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, in free_bootmem_node() argument 410 mark_bootmem_node(pgdat->bdata, start, end, 0, 0); in free_bootmem_node() 445 int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, in reserve_bootmem_node() argument [all …]
|
D | vmscan.c | 2707 static bool pfmemalloc_watermark_ok(pg_data_t *pgdat) in pfmemalloc_watermark_ok() argument 2716 zone = &pgdat->node_zones[i]; in pfmemalloc_watermark_ok() 2732 if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) { in pfmemalloc_watermark_ok() 2733 pgdat->classzone_idx = min(pgdat->classzone_idx, in pfmemalloc_watermark_ok() 2735 wake_up_interruptible(&pgdat->kswapd_wait); in pfmemalloc_watermark_ok() 2755 pg_data_t *pgdat = NULL; in throttle_direct_reclaim() local 2794 pgdat = zone->zone_pgdat; in throttle_direct_reclaim() 2795 if (pfmemalloc_watermark_ok(pgdat)) in throttle_direct_reclaim() 2801 if (!pgdat) in throttle_direct_reclaim() 2816 wait_event_interruptible_timeout(pgdat->pfmemalloc_wait, in throttle_direct_reclaim() [all …]
|
D | page_alloc.c | 289 static inline void reset_deferred_meminit(pg_data_t *pgdat) in reset_deferred_meminit() argument 300 (pgdat->node_spanned_pages >> 8)); in reset_deferred_meminit() 307 start_addr = PFN_PHYS(pgdat->node_start_pfn); in reset_deferred_meminit() 308 end_addr = PFN_PHYS(pgdat->node_start_pfn + max_pgcnt); in reset_deferred_meminit() 312 pgdat->static_init_pgcnt = min(max_pgcnt, pgdat->node_spanned_pages); in reset_deferred_meminit() 313 pgdat->first_deferred_pfn = ULONG_MAX; in reset_deferred_meminit() 339 static inline bool update_defer_init(pg_data_t *pgdat, in update_defer_init() argument 344 if (zone_end < pgdat_end_pfn(pgdat)) in update_defer_init() 348 if ((*nr_initialised > pgdat->static_init_pgcnt) && in update_defer_init() 350 pgdat->first_deferred_pfn = pfn; in update_defer_init() [all …]
|
D | mmzone.c | 17 struct pglist_data *next_online_pgdat(struct pglist_data *pgdat) in next_online_pgdat() argument 19 int nid = next_online_node(pgdat->node_id); in next_online_pgdat() 31 pg_data_t *pgdat = zone->zone_pgdat; in next_zone() local 33 if (zone < pgdat->node_zones + MAX_NR_ZONES - 1) in next_zone() 36 pgdat = next_online_pgdat(pgdat); in next_zone() 37 if (pgdat) in next_zone() 38 zone = pgdat->node_zones; in next_zone()
|
D | sparse.c | 264 sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, in sparse_early_usemaps_alloc_pgdat_section() argument 280 goal = __pa(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT); in sparse_early_usemaps_alloc_pgdat_section() 299 struct pglist_data *pgdat = NODE_DATA(nid); in check_usemap_section_nr() local 303 pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT); in check_usemap_section_nr() 334 sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, in sparse_early_usemaps_alloc_pgdat_section() argument 337 return memblock_virt_alloc_node_nopanic(size, pgdat->node_id); in sparse_early_usemaps_alloc_pgdat_section() 696 struct pglist_data *pgdat = zone->zone_pgdat; in sparse_add_one_section() local 707 ret = sparse_index_init(section_nr, pgdat->node_id); in sparse_add_one_section() 710 memmap = kmalloc_section_memmap(section_nr, pgdat->node_id); in sparse_add_one_section() 719 pgdat_resize_lock(pgdat, &flags); in sparse_add_one_section() [all …]
|
D | page_owner.c | 228 static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone) in init_pages_in_zone() argument 288 pgdat->node_id, zone->name, count); in init_pages_in_zone() 291 static void init_zones_in_node(pg_data_t *pgdat) in init_zones_in_node() argument 294 struct zone *node_zones = pgdat->node_zones; in init_zones_in_node() 302 init_pages_in_zone(pgdat, zone); in init_zones_in_node() 309 pg_data_t *pgdat; in init_early_allocated_pages() local 312 for_each_online_pgdat(pgdat) in init_early_allocated_pages() 313 init_zones_in_node(pgdat); in init_early_allocated_pages()
|
D | migrate.c | 1563 static bool migrate_balanced_pgdat(struct pglist_data *pgdat, in migrate_balanced_pgdat() argument 1567 for (z = pgdat->nr_zones - 1; z >= 0; z--) { in migrate_balanced_pgdat() 1568 struct zone *zone = pgdat->node_zones + z; in migrate_balanced_pgdat() 1612 static bool numamigrate_update_ratelimit(pg_data_t *pgdat, in numamigrate_update_ratelimit() argument 1620 if (time_after(jiffies, pgdat->numabalancing_migrate_next_window)) { in numamigrate_update_ratelimit() 1621 spin_lock(&pgdat->numabalancing_migrate_lock); in numamigrate_update_ratelimit() 1622 pgdat->numabalancing_migrate_nr_pages = 0; in numamigrate_update_ratelimit() 1623 pgdat->numabalancing_migrate_next_window = jiffies + in numamigrate_update_ratelimit() 1625 spin_unlock(&pgdat->numabalancing_migrate_lock); in numamigrate_update_ratelimit() 1627 if (pgdat->numabalancing_migrate_nr_pages > ratelimit_pages) { in numamigrate_update_ratelimit() [all …]
|
D | mm_init.c | 33 pg_data_t *pgdat = NODE_DATA(nid); in mminit_verify_zonelist() local 45 zonelist = &pgdat->node_zonelists[listid]; in mminit_verify_zonelist() 46 zone = &pgdat->node_zones[zoneid]; in mminit_verify_zonelist()
|
D | highmem.c | 115 pg_data_t *pgdat; in nr_free_highpages() local 118 for_each_online_pgdat(pgdat) { in nr_free_highpages() 119 pages += zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM], in nr_free_highpages() 123 &pgdat->node_zones[ZONE_MOVABLE], in nr_free_highpages()
|
D | page_ext.c | 97 void __meminit pgdat_page_ext_init(struct pglist_data *pgdat) in pgdat_page_ext_init() argument 99 pgdat->node_page_ext = NULL; in pgdat_page_ext_init() 400 void __meminit pgdat_page_ext_init(struct pglist_data *pgdat) in pgdat_page_ext_init() argument
|
D | compaction.c | 239 void reset_isolation_suitable(pg_data_t *pgdat) in reset_isolation_suitable() argument 244 struct zone *zone = &pgdat->node_zones[zoneid]; in reset_isolation_suitable() 1639 static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc) in __compact_pgdat() argument 1646 zone = &pgdat->node_zones[zoneid]; in __compact_pgdat() 1679 void compact_pgdat(pg_data_t *pgdat, int order) in compact_pgdat() argument 1689 __compact_pgdat(pgdat, &cc); in compact_pgdat()
|