• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/mm/page_alloc.c
4  *
5  *  Manages the free list, the system allocates free pages here.
6  *  Note that kmalloc() lives in slab.c
7  *
8  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
9  *  Swap reorganised 29.12.95, Stephen Tweedie
10  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
11  *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
12  *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
13  *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
14  *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
15  *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
16  */
17 
18 #include <linux/stddef.h>
19 #include <linux/mm.h>
20 #include <linux/highmem.h>
21 #include <linux/swap.h>
22 #include <linux/interrupt.h>
23 #include <linux/pagemap.h>
24 #include <linux/jiffies.h>
25 #include <linux/memblock.h>
26 #include <linux/compiler.h>
27 #include <linux/kernel.h>
28 #include <linux/kasan.h>
29 #include <linux/module.h>
30 #include <linux/suspend.h>
31 #include <linux/pagevec.h>
32 #include <linux/blkdev.h>
33 #include <linux/slab.h>
34 #include <linux/ratelimit.h>
35 #include <linux/oom.h>
36 #include <linux/topology.h>
37 #include <linux/sysctl.h>
38 #include <linux/cpu.h>
39 #include <linux/cpuset.h>
40 #include <linux/memory_hotplug.h>
41 #include <linux/nodemask.h>
42 #include <linux/vmalloc.h>
43 #include <linux/vmstat.h>
44 #include <linux/mempolicy.h>
45 #include <linux/memremap.h>
46 #include <linux/stop_machine.h>
47 #include <linux/random.h>
48 #include <linux/sort.h>
49 #include <linux/pfn.h>
50 #include <linux/backing-dev.h>
51 #include <linux/fault-inject.h>
52 #include <linux/page-isolation.h>
53 #include <linux/debugobjects.h>
54 #include <linux/kmemleak.h>
55 #include <linux/compaction.h>
56 #include <trace/events/kmem.h>
57 #include <trace/events/oom.h>
58 #include <linux/prefetch.h>
59 #include <linux/mm_inline.h>
60 #include <linux/migrate.h>
61 #include <linux/hugetlb.h>
62 #include <linux/sched/rt.h>
63 #include <linux/sched/mm.h>
64 #include <linux/page_owner.h>
65 #include <linux/kthread.h>
66 #include <linux/memcontrol.h>
67 #include <linux/ftrace.h>
68 #include <linux/lockdep.h>
69 #include <linux/nmi.h>
70 #include <linux/psi.h>
71 #include <linux/padata.h>
72 #include <linux/khugepaged.h>
73 #include <linux/zswapd.h>
74 
75 #include <asm/sections.h>
76 #include <asm/tlbflush.h>
77 #include <asm/div64.h>
78 #include "internal.h"
79 #include "shuffle.h"
80 #include "page_reporting.h"
81 
82 /* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */
83 typedef int __bitwise fpi_t;
84 
85 /* No special request */
86 #define FPI_NONE		((__force fpi_t)0)
87 
88 /*
89  * Skip free page reporting notification for the (possibly merged) page.
90  * This does not hinder free page reporting from grabbing the page,
91  * reporting it and marking it "reported" -  it only skips notifying
92  * the free page reporting infrastructure about a newly freed page. For
93  * example, used when temporarily pulling a page from a freelist and
94  * putting it back unmodified.
95  */
96 #define FPI_SKIP_REPORT_NOTIFY	((__force fpi_t)BIT(0))
97 
98 /*
99  * Place the (possibly merged) page to the tail of the freelist. Will ignore
100  * page shuffling (relevant code - e.g., memory onlining - is expected to
101  * shuffle the whole zone).
102  *
103  * Note: No code should rely on this flag for correctness - it's purely
104  *       to allow for optimizations when handing back either fresh pages
105  *       (memory onlining) or untouched pages (page isolation, free page
106  *       reporting).
107  */
108 #define FPI_TO_TAIL		((__force fpi_t)BIT(1))
109 
110 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
111 static DEFINE_MUTEX(pcp_batch_high_lock);
112 #define MIN_PERCPU_PAGELIST_FRACTION	(8)
113 
114 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
115 DEFINE_PER_CPU(int, numa_node);
116 EXPORT_PER_CPU_SYMBOL(numa_node);
117 #endif
118 
119 DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key);
120 
121 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
122 /*
123  * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
124  * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
125  * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
126  * defined in <linux/topology.h>.
127  */
128 DEFINE_PER_CPU(int, _numa_mem_);		/* Kernel "local memory" node */
129 EXPORT_PER_CPU_SYMBOL(_numa_mem_);
130 #endif
131 
132 /* work_structs for global per-cpu drains */
133 struct pcpu_drain {
134 	struct zone *zone;
135 	struct work_struct work;
136 };
137 static DEFINE_MUTEX(pcpu_drain_mutex);
138 static DEFINE_PER_CPU(struct pcpu_drain, pcpu_drain);
139 
140 #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
141 volatile unsigned long latent_entropy __latent_entropy;
142 EXPORT_SYMBOL(latent_entropy);
143 #endif
144 
145 /*
146  * Array of node states.
147  */
148 nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
149 	[N_POSSIBLE] = NODE_MASK_ALL,
150 	[N_ONLINE] = { { [0] = 1UL } },
151 #ifndef CONFIG_NUMA
152 	[N_NORMAL_MEMORY] = { { [0] = 1UL } },
153 #ifdef CONFIG_HIGHMEM
154 	[N_HIGH_MEMORY] = { { [0] = 1UL } },
155 #endif
156 	[N_MEMORY] = { { [0] = 1UL } },
157 	[N_CPU] = { { [0] = 1UL } },
158 #endif	/* NUMA */
159 };
160 EXPORT_SYMBOL(node_states);
161 
162 atomic_long_t _totalram_pages __read_mostly;
163 EXPORT_SYMBOL(_totalram_pages);
164 unsigned long totalreserve_pages __read_mostly;
165 unsigned long totalcma_pages __read_mostly;
166 
167 int percpu_pagelist_fraction;
168 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
169 #ifdef CONFIG_INIT_ON_ALLOC_DEFAULT_ON
170 DEFINE_STATIC_KEY_TRUE(init_on_alloc);
171 #else
172 DEFINE_STATIC_KEY_FALSE(init_on_alloc);
173 #endif
174 EXPORT_SYMBOL(init_on_alloc);
175 
176 #ifdef CONFIG_INIT_ON_FREE_DEFAULT_ON
177 DEFINE_STATIC_KEY_TRUE(init_on_free);
178 #else
179 DEFINE_STATIC_KEY_FALSE(init_on_free);
180 #endif
181 EXPORT_SYMBOL(init_on_free);
182 
early_init_on_alloc(char * buf)183 static int __init early_init_on_alloc(char *buf)
184 {
185 	int ret;
186 	bool bool_result;
187 
188 	ret = kstrtobool(buf, &bool_result);
189 	if (ret)
190 		return ret;
191 	if (bool_result && page_poisoning_enabled())
192 		pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, will take precedence over init_on_alloc\n");
193 	if (bool_result)
194 		static_branch_enable(&init_on_alloc);
195 	else
196 		static_branch_disable(&init_on_alloc);
197 	return 0;
198 }
199 early_param("init_on_alloc", early_init_on_alloc);
200 
early_init_on_free(char * buf)201 static int __init early_init_on_free(char *buf)
202 {
203 	int ret;
204 	bool bool_result;
205 
206 	ret = kstrtobool(buf, &bool_result);
207 	if (ret)
208 		return ret;
209 	if (bool_result && page_poisoning_enabled())
210 		pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, will take precedence over init_on_free\n");
211 	if (bool_result)
212 		static_branch_enable(&init_on_free);
213 	else
214 		static_branch_disable(&init_on_free);
215 	return 0;
216 }
217 early_param("init_on_free", early_init_on_free);
218 
219 /*
220  * A cached value of the page's pageblock's migratetype, used when the page is
221  * put on a pcplist. Used to avoid the pageblock migratetype lookup when
222  * freeing from pcplists in most cases, at the cost of possibly becoming stale.
223  * Also the migratetype set in the page does not necessarily match the pcplist
224  * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any
225  * other index - this ensures that it will be put on the correct CMA freelist.
226  */
get_pcppage_migratetype(struct page * page)227 static inline int get_pcppage_migratetype(struct page *page)
228 {
229 	return page->index;
230 }
231 
set_pcppage_migratetype(struct page * page,int migratetype)232 static inline void set_pcppage_migratetype(struct page *page, int migratetype)
233 {
234 	page->index = migratetype;
235 }
236 
237 #ifdef CONFIG_PM_SLEEP
238 /*
239  * The following functions are used by the suspend/hibernate code to temporarily
240  * change gfp_allowed_mask in order to avoid using I/O during memory allocations
241  * while devices are suspended.  To avoid races with the suspend/hibernate code,
242  * they should always be called with system_transition_mutex held
243  * (gfp_allowed_mask also should only be modified with system_transition_mutex
244  * held, unless the suspend/hibernate code is guaranteed not to run in parallel
245  * with that modification).
246  */
247 
248 static gfp_t saved_gfp_mask;
249 
pm_restore_gfp_mask(void)250 void pm_restore_gfp_mask(void)
251 {
252 	WARN_ON(!mutex_is_locked(&system_transition_mutex));
253 	if (saved_gfp_mask) {
254 		gfp_allowed_mask = saved_gfp_mask;
255 		saved_gfp_mask = 0;
256 	}
257 }
258 
pm_restrict_gfp_mask(void)259 void pm_restrict_gfp_mask(void)
260 {
261 	WARN_ON(!mutex_is_locked(&system_transition_mutex));
262 	WARN_ON(saved_gfp_mask);
263 	saved_gfp_mask = gfp_allowed_mask;
264 	gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS);
265 }
266 
pm_suspended_storage(void)267 bool pm_suspended_storage(void)
268 {
269 	if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
270 		return false;
271 	return true;
272 }
273 #endif /* CONFIG_PM_SLEEP */
274 
275 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
276 unsigned int pageblock_order __read_mostly;
277 #endif
278 
279 static void __free_pages_ok(struct page *page, unsigned int order,
280 			    fpi_t fpi_flags);
281 
282 /*
283  * results with 256, 32 in the lowmem_reserve sysctl:
284  *	1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
285  *	1G machine -> (16M dma, 784M normal, 224M high)
286  *	NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
287  *	HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
288  *	HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA
289  *
290  * TBD: should special case ZONE_DMA32 machines here - in those we normally
291  * don't need any ZONE_NORMAL reservation
292  */
293 int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = {
294 #ifdef CONFIG_ZONE_DMA
295 	[ZONE_DMA] = 256,
296 #endif
297 #ifdef CONFIG_ZONE_DMA32
298 	[ZONE_DMA32] = 256,
299 #endif
300 	[ZONE_NORMAL] = 32,
301 #ifdef CONFIG_HIGHMEM
302 	[ZONE_HIGHMEM] = 0,
303 #endif
304 	[ZONE_MOVABLE] = 0,
305 };
306 
307 static char * const zone_names[MAX_NR_ZONES] = {
308 #ifdef CONFIG_ZONE_DMA
309 	 "DMA",
310 #endif
311 #ifdef CONFIG_ZONE_DMA32
312 	 "DMA32",
313 #endif
314 	 "Normal",
315 #ifdef CONFIG_HIGHMEM
316 	 "HighMem",
317 #endif
318 	 "Movable",
319 #ifdef CONFIG_ZONE_DEVICE
320 	 "Device",
321 #endif
322 };
323 
324 const char * const migratetype_names[MIGRATE_TYPES] = {
325 	"Unmovable",
326 	"Movable",
327 	"Reclaimable",
328 #ifdef CONFIG_CMA_REUSE
329 	"CMA",
330 #endif
331 	"HighAtomic",
332 #if defined(CONFIG_CMA) && !defined(CONFIG_CMA_REUSE)
333 	"CMA",
334 #endif
335 #ifdef CONFIG_MEMORY_ISOLATION
336 	"Isolate",
337 #endif
338 };
339 
340 compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS] = {
341 	[NULL_COMPOUND_DTOR] = NULL,
342 	[COMPOUND_PAGE_DTOR] = free_compound_page,
343 #ifdef CONFIG_HUGETLB_PAGE
344 	[HUGETLB_PAGE_DTOR] = free_huge_page,
345 #endif
346 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
347 	[TRANSHUGE_PAGE_DTOR] = free_transhuge_page,
348 #endif
349 };
350 
351 int min_free_kbytes = 1024;
352 int user_min_free_kbytes = -1;
353 #ifdef CONFIG_DISCONTIGMEM
354 /*
355  * DiscontigMem defines memory ranges as separate pg_data_t even if the ranges
356  * are not on separate NUMA nodes. Functionally this works but with
357  * watermark_boost_factor, it can reclaim prematurely as the ranges can be
358  * quite small. By default, do not boost watermarks on discontigmem as in
359  * many cases very high-order allocations like THP are likely to be
360  * unsupported and the premature reclaim offsets the advantage of long-term
361  * fragmentation avoidance.
362  */
363 int watermark_boost_factor __read_mostly;
364 #else
365 int watermark_boost_factor __read_mostly = 15000;
366 #endif
367 int watermark_scale_factor = 10;
368 
369 static unsigned long nr_kernel_pages __initdata;
370 static unsigned long nr_all_pages __initdata;
371 static unsigned long dma_reserve __initdata;
372 
373 static unsigned long arch_zone_lowest_possible_pfn[MAX_NR_ZONES] __initdata;
374 static unsigned long arch_zone_highest_possible_pfn[MAX_NR_ZONES] __initdata;
375 static unsigned long required_kernelcore __initdata;
376 static unsigned long required_kernelcore_percent __initdata;
377 static unsigned long required_movablecore __initdata;
378 static unsigned long required_movablecore_percent __initdata;
379 static unsigned long zone_movable_pfn[MAX_NUMNODES] __initdata;
380 static bool mirrored_kernelcore __meminitdata;
381 
382 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
383 int movable_zone;
384 EXPORT_SYMBOL(movable_zone);
385 
386 #if MAX_NUMNODES > 1
387 unsigned int nr_node_ids __read_mostly = MAX_NUMNODES;
388 unsigned int nr_online_nodes __read_mostly = 1;
389 EXPORT_SYMBOL(nr_node_ids);
390 EXPORT_SYMBOL(nr_online_nodes);
391 #endif
392 
393 int page_group_by_mobility_disabled __read_mostly;
394 
395 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
396 /*
397  * During boot we initialize deferred pages on-demand, as needed, but once
398  * page_alloc_init_late() has finished, the deferred pages are all initialized,
399  * and we can permanently disable that path.
400  */
401 static DEFINE_STATIC_KEY_TRUE(deferred_pages);
402 
403 /*
404  * Calling kasan_free_pages() only after deferred memory initialization
405  * has completed. Poisoning pages during deferred memory init will greatly
406  * lengthen the process and cause problem in large memory systems as the
407  * deferred pages initialization is done with interrupt disabled.
408  *
409  * Assuming that there will be no reference to those newly initialized
410  * pages before they are ever allocated, this should have no effect on
411  * KASAN memory tracking as the poison will be properly inserted at page
412  * allocation time. The only corner case is when pages are allocated by
413  * on-demand allocation and then freed again before the deferred pages
414  * initialization is done, but this is not likely to happen.
415  */
kasan_free_nondeferred_pages(struct page * page,int order)416 static inline void kasan_free_nondeferred_pages(struct page *page, int order)
417 {
418 	if (!static_branch_unlikely(&deferred_pages))
419 		kasan_free_pages(page, order);
420 }
421 
422 /* Returns true if the struct page for the pfn is uninitialised */
early_page_uninitialised(unsigned long pfn)423 static inline bool __meminit early_page_uninitialised(unsigned long pfn)
424 {
425 	int nid = early_pfn_to_nid(pfn);
426 
427 	if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
428 		return true;
429 
430 	return false;
431 }
432 
433 /*
434  * Returns true when the remaining initialisation should be deferred until
435  * later in the boot cycle when it can be parallelised.
436  */
437 static bool __meminit
defer_init(int nid,unsigned long pfn,unsigned long end_pfn)438 defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
439 {
440 	static unsigned long prev_end_pfn, nr_initialised;
441 
442 	/*
443 	 * prev_end_pfn static that contains the end of previous zone
444 	 * No need to protect because called very early in boot before smp_init.
445 	 */
446 	if (prev_end_pfn != end_pfn) {
447 		prev_end_pfn = end_pfn;
448 		nr_initialised = 0;
449 	}
450 
451 	/* Always populate low zones for address-constrained allocations */
452 	if (end_pfn < pgdat_end_pfn(NODE_DATA(nid)))
453 		return false;
454 
455 	if (NODE_DATA(nid)->first_deferred_pfn != ULONG_MAX)
456 		return true;
457 	/*
458 	 * We start only with one section of pages, more pages are added as
459 	 * needed until the rest of deferred pages are initialized.
460 	 */
461 	nr_initialised++;
462 	if ((nr_initialised > PAGES_PER_SECTION) &&
463 	    (pfn & (PAGES_PER_SECTION - 1)) == 0) {
464 		NODE_DATA(nid)->first_deferred_pfn = pfn;
465 		return true;
466 	}
467 	return false;
468 }
469 #else
470 #define kasan_free_nondeferred_pages(p, o)	kasan_free_pages(p, o)
471 
early_page_uninitialised(unsigned long pfn)472 static inline bool early_page_uninitialised(unsigned long pfn)
473 {
474 	return false;
475 }
476 
defer_init(int nid,unsigned long pfn,unsigned long end_pfn)477 static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
478 {
479 	return false;
480 }
481 #endif
482 
483 /* Return a pointer to the bitmap storing bits affecting a block of pages */
get_pageblock_bitmap(struct page * page,unsigned long pfn)484 static inline unsigned long *get_pageblock_bitmap(struct page *page,
485 							unsigned long pfn)
486 {
487 #ifdef CONFIG_SPARSEMEM
488 	return section_to_usemap(__pfn_to_section(pfn));
489 #else
490 	return page_zone(page)->pageblock_flags;
491 #endif /* CONFIG_SPARSEMEM */
492 }
493 
pfn_to_bitidx(struct page * page,unsigned long pfn)494 static inline int pfn_to_bitidx(struct page *page, unsigned long pfn)
495 {
496 #ifdef CONFIG_SPARSEMEM
497 	pfn &= (PAGES_PER_SECTION-1);
498 #else
499 	pfn = pfn - round_down(page_zone(page)->zone_start_pfn, pageblock_nr_pages);
500 #endif /* CONFIG_SPARSEMEM */
501 	return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
502 }
503 
504 /**
505  * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
506  * @page: The page within the block of interest
507  * @pfn: The target page frame number
508  * @mask: mask of bits that the caller is interested in
509  *
510  * Return: pageblock_bits flags
511  */
512 static __always_inline
__get_pfnblock_flags_mask(struct page * page,unsigned long pfn,unsigned long mask)513 unsigned long __get_pfnblock_flags_mask(struct page *page,
514 					unsigned long pfn,
515 					unsigned long mask)
516 {
517 	unsigned long *bitmap;
518 	unsigned long bitidx, word_bitidx;
519 	unsigned long word;
520 
521 	bitmap = get_pageblock_bitmap(page, pfn);
522 	bitidx = pfn_to_bitidx(page, pfn);
523 	word_bitidx = bitidx / BITS_PER_LONG;
524 	bitidx &= (BITS_PER_LONG-1);
525 
526 	word = bitmap[word_bitidx];
527 	return (word >> bitidx) & mask;
528 }
529 
get_pfnblock_flags_mask(struct page * page,unsigned long pfn,unsigned long mask)530 unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
531 					unsigned long mask)
532 {
533 	return __get_pfnblock_flags_mask(page, pfn, mask);
534 }
535 
get_pfnblock_migratetype(struct page * page,unsigned long pfn)536 static __always_inline int get_pfnblock_migratetype(struct page *page, unsigned long pfn)
537 {
538 	return __get_pfnblock_flags_mask(page, pfn, MIGRATETYPE_MASK);
539 }
540 
541 /**
542  * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
543  * @page: The page within the block of interest
544  * @flags: The flags to set
545  * @pfn: The target page frame number
546  * @mask: mask of bits that the caller is interested in
547  */
set_pfnblock_flags_mask(struct page * page,unsigned long flags,unsigned long pfn,unsigned long mask)548 void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
549 					unsigned long pfn,
550 					unsigned long mask)
551 {
552 	unsigned long *bitmap;
553 	unsigned long bitidx, word_bitidx;
554 	unsigned long old_word, word;
555 
556 	BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
557 	BUILD_BUG_ON(MIGRATE_TYPES > (1 << PB_migratetype_bits));
558 
559 	bitmap = get_pageblock_bitmap(page, pfn);
560 	bitidx = pfn_to_bitidx(page, pfn);
561 	word_bitidx = bitidx / BITS_PER_LONG;
562 	bitidx &= (BITS_PER_LONG-1);
563 
564 	VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page);
565 
566 	mask <<= bitidx;
567 	flags <<= bitidx;
568 
569 	word = READ_ONCE(bitmap[word_bitidx]);
570 	for (;;) {
571 		old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags);
572 		if (word == old_word)
573 			break;
574 		word = old_word;
575 	}
576 }
577 
set_pageblock_migratetype(struct page * page,int migratetype)578 void set_pageblock_migratetype(struct page *page, int migratetype)
579 {
580 	if (unlikely(page_group_by_mobility_disabled &&
581 		     migratetype < MIGRATE_PCPTYPES))
582 		migratetype = MIGRATE_UNMOVABLE;
583 
584 	set_pfnblock_flags_mask(page, (unsigned long)migratetype,
585 				page_to_pfn(page), MIGRATETYPE_MASK);
586 }
587 
588 #ifdef CONFIG_DEBUG_VM
page_outside_zone_boundaries(struct zone * zone,struct page * page)589 static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
590 {
591 	int ret = 0;
592 	unsigned seq;
593 	unsigned long pfn = page_to_pfn(page);
594 	unsigned long sp, start_pfn;
595 
596 	do {
597 		seq = zone_span_seqbegin(zone);
598 		start_pfn = zone->zone_start_pfn;
599 		sp = zone->spanned_pages;
600 		if (!zone_spans_pfn(zone, pfn))
601 			ret = 1;
602 	} while (zone_span_seqretry(zone, seq));
603 
604 	if (ret)
605 		pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
606 			pfn, zone_to_nid(zone), zone->name,
607 			start_pfn, start_pfn + sp);
608 
609 	return ret;
610 }
611 
page_is_consistent(struct zone * zone,struct page * page)612 static int page_is_consistent(struct zone *zone, struct page *page)
613 {
614 	if (!pfn_valid_within(page_to_pfn(page)))
615 		return 0;
616 	if (zone != page_zone(page))
617 		return 0;
618 
619 	return 1;
620 }
621 /*
622  * Temporary debugging check for pages not lying within a given zone.
623  */
bad_range(struct zone * zone,struct page * page)624 static int __maybe_unused bad_range(struct zone *zone, struct page *page)
625 {
626 	if (page_outside_zone_boundaries(zone, page))
627 		return 1;
628 	if (!page_is_consistent(zone, page))
629 		return 1;
630 
631 	return 0;
632 }
633 #else
bad_range(struct zone * zone,struct page * page)634 static inline int __maybe_unused bad_range(struct zone *zone, struct page *page)
635 {
636 	return 0;
637 }
638 #endif
639 
bad_page(struct page * page,const char * reason)640 static void bad_page(struct page *page, const char *reason)
641 {
642 	static unsigned long resume;
643 	static unsigned long nr_shown;
644 	static unsigned long nr_unshown;
645 
646 	/*
647 	 * Allow a burst of 60 reports, then keep quiet for that minute;
648 	 * or allow a steady drip of one report per second.
649 	 */
650 	if (nr_shown == 60) {
651 		if (time_before(jiffies, resume)) {
652 			nr_unshown++;
653 			goto out;
654 		}
655 		if (nr_unshown) {
656 			pr_alert(
657 			      "BUG: Bad page state: %lu messages suppressed\n",
658 				nr_unshown);
659 			nr_unshown = 0;
660 		}
661 		nr_shown = 0;
662 	}
663 	if (nr_shown++ == 0)
664 		resume = jiffies + 60 * HZ;
665 
666 	pr_alert("BUG: Bad page state in process %s  pfn:%05lx\n",
667 		current->comm, page_to_pfn(page));
668 	__dump_page(page, reason);
669 	dump_page_owner(page);
670 
671 	print_modules();
672 	dump_stack();
673 out:
674 	/* Leave bad fields for debug, except PageBuddy could make trouble */
675 	page_mapcount_reset(page); /* remove PageBuddy */
676 	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
677 }
678 
679 /*
680  * Higher-order pages are called "compound pages".  They are structured thusly:
681  *
682  * The first PAGE_SIZE page is called the "head page" and have PG_head set.
683  *
684  * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded
685  * in bit 0 of page->compound_head. The rest of bits is pointer to head page.
686  *
687  * The first tail page's ->compound_dtor holds the offset in array of compound
688  * page destructors. See compound_page_dtors.
689  *
690  * The first tail page's ->compound_order holds the order of allocation.
691  * This usage means that zero-order pages may not be compound.
692  */
693 
free_compound_page(struct page * page)694 void free_compound_page(struct page *page)
695 {
696 	mem_cgroup_uncharge(page);
697 	__free_pages_ok(page, compound_order(page), FPI_NONE);
698 }
699 
prep_compound_page(struct page * page,unsigned int order)700 void prep_compound_page(struct page *page, unsigned int order)
701 {
702 	int i;
703 	int nr_pages = 1 << order;
704 
705 	__SetPageHead(page);
706 	for (i = 1; i < nr_pages; i++) {
707 		struct page *p = page + i;
708 		set_page_count(p, 0);
709 		p->mapping = TAIL_MAPPING;
710 		set_compound_head(p, page);
711 	}
712 
713 	set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
714 	set_compound_order(page, order);
715 	atomic_set(compound_mapcount_ptr(page), -1);
716 	if (hpage_pincount_available(page))
717 		atomic_set(compound_pincount_ptr(page), 0);
718 }
719 
720 #ifdef CONFIG_DEBUG_PAGEALLOC
721 unsigned int _debug_guardpage_minorder;
722 
723 bool _debug_pagealloc_enabled_early __read_mostly
724 			= IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT);
725 EXPORT_SYMBOL(_debug_pagealloc_enabled_early);
726 DEFINE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
727 EXPORT_SYMBOL(_debug_pagealloc_enabled);
728 
729 DEFINE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
730 
early_debug_pagealloc(char * buf)731 static int __init early_debug_pagealloc(char *buf)
732 {
733 	return kstrtobool(buf, &_debug_pagealloc_enabled_early);
734 }
735 early_param("debug_pagealloc", early_debug_pagealloc);
736 
init_debug_pagealloc(void)737 void init_debug_pagealloc(void)
738 {
739 	if (!debug_pagealloc_enabled())
740 		return;
741 
742 	static_branch_enable(&_debug_pagealloc_enabled);
743 
744 	if (!debug_guardpage_minorder())
745 		return;
746 
747 	static_branch_enable(&_debug_guardpage_enabled);
748 }
749 
debug_guardpage_minorder_setup(char * buf)750 static int __init debug_guardpage_minorder_setup(char *buf)
751 {
752 	unsigned long res;
753 
754 	if (kstrtoul(buf, 10, &res) < 0 ||  res > MAX_ORDER / 2) {
755 		pr_err("Bad debug_guardpage_minorder value\n");
756 		return 0;
757 	}
758 	_debug_guardpage_minorder = res;
759 	pr_info("Setting debug_guardpage_minorder to %lu\n", res);
760 	return 0;
761 }
762 early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup);
763 
set_page_guard(struct zone * zone,struct page * page,unsigned int order,int migratetype)764 static inline bool set_page_guard(struct zone *zone, struct page *page,
765 				unsigned int order, int migratetype)
766 {
767 	if (!debug_guardpage_enabled())
768 		return false;
769 
770 	if (order >= debug_guardpage_minorder())
771 		return false;
772 
773 	__SetPageGuard(page);
774 	INIT_LIST_HEAD(&page->lru);
775 	set_page_private(page, order);
776 	/* Guard pages are not available for any usage */
777 	__mod_zone_freepage_state(zone, -(1 << order), migratetype);
778 
779 	return true;
780 }
781 
clear_page_guard(struct zone * zone,struct page * page,unsigned int order,int migratetype)782 static inline void clear_page_guard(struct zone *zone, struct page *page,
783 				unsigned int order, int migratetype)
784 {
785 	if (!debug_guardpage_enabled())
786 		return;
787 
788 	__ClearPageGuard(page);
789 
790 	set_page_private(page, 0);
791 	if (!is_migrate_isolate(migratetype))
792 		__mod_zone_freepage_state(zone, (1 << order), migratetype);
793 }
794 #else
set_page_guard(struct zone * zone,struct page * page,unsigned int order,int migratetype)795 static inline bool set_page_guard(struct zone *zone, struct page *page,
796 			unsigned int order, int migratetype) { return false; }
clear_page_guard(struct zone * zone,struct page * page,unsigned int order,int migratetype)797 static inline void clear_page_guard(struct zone *zone, struct page *page,
798 				unsigned int order, int migratetype) {}
799 #endif
800 
set_buddy_order(struct page * page,unsigned int order)801 static inline void set_buddy_order(struct page *page, unsigned int order)
802 {
803 	set_page_private(page, order);
804 	__SetPageBuddy(page);
805 }
806 
807 /*
808  * This function checks whether a page is free && is the buddy
809  * we can coalesce a page and its buddy if
810  * (a) the buddy is not in a hole (check before calling!) &&
811  * (b) the buddy is in the buddy system &&
812  * (c) a page and its buddy have the same order &&
813  * (d) a page and its buddy are in the same zone.
814  *
815  * For recording whether a page is in the buddy system, we set PageBuddy.
816  * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
817  *
818  * For recording page's order, we use page_private(page).
819  */
page_is_buddy(struct page * page,struct page * buddy,unsigned int order)820 static inline bool page_is_buddy(struct page *page, struct page *buddy,
821 							unsigned int order)
822 {
823 	if (!page_is_guard(buddy) && !PageBuddy(buddy))
824 		return false;
825 
826 	if (buddy_order(buddy) != order)
827 		return false;
828 
829 	/*
830 	 * zone check is done late to avoid uselessly calculating
831 	 * zone/node ids for pages that could never merge.
832 	 */
833 	if (page_zone_id(page) != page_zone_id(buddy))
834 		return false;
835 
836 	VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
837 
838 	return true;
839 }
840 
841 #ifdef CONFIG_COMPACTION
task_capc(struct zone * zone)842 static inline struct capture_control *task_capc(struct zone *zone)
843 {
844 	struct capture_control *capc = current->capture_control;
845 
846 	return unlikely(capc) &&
847 		!(current->flags & PF_KTHREAD) &&
848 		!capc->page &&
849 		capc->cc->zone == zone ? capc : NULL;
850 }
851 
852 static inline bool
compaction_capture(struct capture_control * capc,struct page * page,int order,int migratetype)853 compaction_capture(struct capture_control *capc, struct page *page,
854 		   int order, int migratetype)
855 {
856 	if (!capc || order != capc->cc->order)
857 		return false;
858 
859 	/* Do not accidentally pollute CMA or isolated regions*/
860 	if (is_migrate_cma(migratetype) ||
861 	    is_migrate_isolate(migratetype))
862 		return false;
863 
864 	/*
865 	 * Do not let lower order allocations polluate a movable pageblock.
866 	 * This might let an unmovable request use a reclaimable pageblock
867 	 * and vice-versa but no more than normal fallback logic which can
868 	 * have trouble finding a high-order free page.
869 	 */
870 	if (order < pageblock_order && migratetype == MIGRATE_MOVABLE)
871 		return false;
872 
873 	capc->page = page;
874 	return true;
875 }
876 
877 #else
task_capc(struct zone * zone)878 static inline struct capture_control *task_capc(struct zone *zone)
879 {
880 	return NULL;
881 }
882 
883 static inline bool
compaction_capture(struct capture_control * capc,struct page * page,int order,int migratetype)884 compaction_capture(struct capture_control *capc, struct page *page,
885 		   int order, int migratetype)
886 {
887 	return false;
888 }
889 #endif /* CONFIG_COMPACTION */
890 
891 /* Used for pages not on another list */
add_to_free_list(struct page * page,struct zone * zone,unsigned int order,int migratetype)892 static inline void add_to_free_list(struct page *page, struct zone *zone,
893 				    unsigned int order, int migratetype)
894 {
895 	struct free_area *area = &zone->free_area[order];
896 
897 	list_add(&page->lru, &area->free_list[migratetype]);
898 	area->nr_free++;
899 }
900 
901 /* Used for pages not on another list */
add_to_free_list_tail(struct page * page,struct zone * zone,unsigned int order,int migratetype)902 static inline void add_to_free_list_tail(struct page *page, struct zone *zone,
903 					 unsigned int order, int migratetype)
904 {
905 	struct free_area *area = &zone->free_area[order];
906 
907 	list_add_tail(&page->lru, &area->free_list[migratetype]);
908 	area->nr_free++;
909 }
910 
911 /*
912  * Used for pages which are on another list. Move the pages to the tail
913  * of the list - so the moved pages won't immediately be considered for
914  * allocation again (e.g., optimization for memory onlining).
915  */
move_to_free_list(struct page * page,struct zone * zone,unsigned int order,int migratetype)916 static inline void move_to_free_list(struct page *page, struct zone *zone,
917 				     unsigned int order, int migratetype)
918 {
919 	struct free_area *area = &zone->free_area[order];
920 
921 	list_move_tail(&page->lru, &area->free_list[migratetype]);
922 }
923 
del_page_from_free_list(struct page * page,struct zone * zone,unsigned int order)924 static inline void del_page_from_free_list(struct page *page, struct zone *zone,
925 					   unsigned int order)
926 {
927 	/* clear reported state and update reported page count */
928 	if (page_reported(page))
929 		__ClearPageReported(page);
930 
931 	list_del(&page->lru);
932 	__ClearPageBuddy(page);
933 	set_page_private(page, 0);
934 	zone->free_area[order].nr_free--;
935 }
936 
937 /*
938  * If this is not the largest possible page, check if the buddy
939  * of the next-highest order is free. If it is, it's possible
940  * that pages are being freed that will coalesce soon. In case,
941  * that is happening, add the free page to the tail of the list
942  * so it's less likely to be used soon and more likely to be merged
943  * as a higher order page
944  */
945 static inline bool
buddy_merge_likely(unsigned long pfn,unsigned long buddy_pfn,struct page * page,unsigned int order)946 buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn,
947 		   struct page *page, unsigned int order)
948 {
949 	struct page *higher_page, *higher_buddy;
950 	unsigned long combined_pfn;
951 
952 	if (order >= MAX_ORDER - 2)
953 		return false;
954 
955 	if (!pfn_valid_within(buddy_pfn))
956 		return false;
957 
958 	combined_pfn = buddy_pfn & pfn;
959 	higher_page = page + (combined_pfn - pfn);
960 	buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1);
961 	higher_buddy = higher_page + (buddy_pfn - combined_pfn);
962 
963 	return pfn_valid_within(buddy_pfn) &&
964 	       page_is_buddy(higher_page, higher_buddy, order + 1);
965 }
966 
967 /*
968  * Freeing function for a buddy system allocator.
969  *
970  * The concept of a buddy system is to maintain direct-mapped table
971  * (containing bit values) for memory blocks of various "orders".
972  * The bottom level table contains the map for the smallest allocatable
973  * units of memory (here, pages), and each level above it describes
974  * pairs of units from the levels below, hence, "buddies".
975  * At a high level, all that happens here is marking the table entry
976  * at the bottom level available, and propagating the changes upward
977  * as necessary, plus some accounting needed to play nicely with other
978  * parts of the VM system.
979  * At each level, we keep a list of pages, which are heads of continuous
980  * free pages of length of (1 << order) and marked with PageBuddy.
981  * Page's order is recorded in page_private(page) field.
982  * So when we are allocating or freeing one, we can derive the state of the
983  * other.  That is, if we allocate a small block, and both were
984  * free, the remainder of the region must be split into blocks.
985  * If a block is freed, and its buddy is also free, then this
986  * triggers coalescing into a block of larger size.
987  *
988  * -- nyc
989  */
990 
__free_one_page(struct page * page,unsigned long pfn,struct zone * zone,unsigned int order,int migratetype,fpi_t fpi_flags)991 static inline void __free_one_page(struct page *page,
992 		unsigned long pfn,
993 		struct zone *zone, unsigned int order,
994 		int migratetype, fpi_t fpi_flags)
995 {
996 	struct capture_control *capc = task_capc(zone);
997 	unsigned long buddy_pfn;
998 	unsigned long combined_pfn;
999 	unsigned int max_order;
1000 	struct page *buddy;
1001 	bool to_tail;
1002 
1003 	max_order = min_t(unsigned int, MAX_ORDER - 1, pageblock_order);
1004 
1005 	VM_BUG_ON(!zone_is_initialized(zone));
1006 	VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
1007 
1008 	VM_BUG_ON(migratetype == -1);
1009 	if (likely(!is_migrate_isolate(migratetype)))
1010 		__mod_zone_freepage_state(zone, 1 << order, migratetype);
1011 
1012 	VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page);
1013 	VM_BUG_ON_PAGE(bad_range(zone, page), page);
1014 
1015 continue_merging:
1016 	while (order < max_order) {
1017 		if (compaction_capture(capc, page, order, migratetype)) {
1018 			__mod_zone_freepage_state(zone, -(1 << order),
1019 								migratetype);
1020 			return;
1021 		}
1022 		buddy_pfn = __find_buddy_pfn(pfn, order);
1023 		buddy = page + (buddy_pfn - pfn);
1024 
1025 		if (!pfn_valid_within(buddy_pfn))
1026 			goto done_merging;
1027 		if (!page_is_buddy(page, buddy, order))
1028 			goto done_merging;
1029 		/*
1030 		 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
1031 		 * merge with it and move up one order.
1032 		 */
1033 		if (page_is_guard(buddy))
1034 			clear_page_guard(zone, buddy, order, migratetype);
1035 		else
1036 			del_page_from_free_list(buddy, zone, order);
1037 		combined_pfn = buddy_pfn & pfn;
1038 		page = page + (combined_pfn - pfn);
1039 		pfn = combined_pfn;
1040 		order++;
1041 	}
1042 	if (order < MAX_ORDER - 1) {
1043 		/* If we are here, it means order is >= pageblock_order.
1044 		 * We want to prevent merge between freepages on isolate
1045 		 * pageblock and normal pageblock. Without this, pageblock
1046 		 * isolation could cause incorrect freepage or CMA accounting.
1047 		 *
1048 		 * We don't want to hit this code for the more frequent
1049 		 * low-order merging.
1050 		 */
1051 		if (unlikely(has_isolate_pageblock(zone))) {
1052 			int buddy_mt;
1053 
1054 			buddy_pfn = __find_buddy_pfn(pfn, order);
1055 			buddy = page + (buddy_pfn - pfn);
1056 			buddy_mt = get_pageblock_migratetype(buddy);
1057 
1058 			if (migratetype != buddy_mt
1059 					&& (is_migrate_isolate(migratetype) ||
1060 						is_migrate_isolate(buddy_mt)))
1061 				goto done_merging;
1062 		}
1063 		max_order = order + 1;
1064 		goto continue_merging;
1065 	}
1066 
1067 done_merging:
1068 	set_buddy_order(page, order);
1069 
1070 	if (fpi_flags & FPI_TO_TAIL)
1071 		to_tail = true;
1072 	else if (is_shuffle_order(order))
1073 		to_tail = shuffle_pick_tail();
1074 	else
1075 		to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order);
1076 
1077 	if (to_tail)
1078 		add_to_free_list_tail(page, zone, order, migratetype);
1079 	else
1080 		add_to_free_list(page, zone, order, migratetype);
1081 
1082 	/* Notify page reporting subsystem of freed page */
1083 	if (!(fpi_flags & FPI_SKIP_REPORT_NOTIFY))
1084 		page_reporting_notify_free(order);
1085 }
1086 
1087 /*
1088  * A bad page could be due to a number of fields. Instead of multiple branches,
1089  * try and check multiple fields with one check. The caller must do a detailed
1090  * check if necessary.
1091  */
page_expected_state(struct page * page,unsigned long check_flags)1092 static inline bool page_expected_state(struct page *page,
1093 					unsigned long check_flags)
1094 {
1095 	if (unlikely(atomic_read(&page->_mapcount) != -1))
1096 		return false;
1097 
1098 	if (unlikely((unsigned long)page->mapping |
1099 			page_ref_count(page) |
1100 #ifdef CONFIG_MEMCG
1101 			(unsigned long)page->mem_cgroup |
1102 #endif
1103 			(page->flags & check_flags)))
1104 		return false;
1105 
1106 	return true;
1107 }
1108 
page_bad_reason(struct page * page,unsigned long flags)1109 static const char *page_bad_reason(struct page *page, unsigned long flags)
1110 {
1111 	const char *bad_reason = NULL;
1112 
1113 	if (unlikely(atomic_read(&page->_mapcount) != -1))
1114 		bad_reason = "nonzero mapcount";
1115 	if (unlikely(page->mapping != NULL))
1116 		bad_reason = "non-NULL mapping";
1117 	if (unlikely(page_ref_count(page) != 0))
1118 		bad_reason = "nonzero _refcount";
1119 	if (unlikely(page->flags & flags)) {
1120 		if (flags == PAGE_FLAGS_CHECK_AT_PREP)
1121 			bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag(s) set";
1122 		else
1123 			bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
1124 	}
1125 #ifdef CONFIG_MEMCG
1126 	if (unlikely(page->mem_cgroup))
1127 		bad_reason = "page still charged to cgroup";
1128 #endif
1129 	return bad_reason;
1130 }
1131 
check_free_page_bad(struct page * page)1132 static void check_free_page_bad(struct page *page)
1133 {
1134 	bad_page(page,
1135 		 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_FREE));
1136 }
1137 
check_free_page(struct page * page)1138 static inline int check_free_page(struct page *page)
1139 {
1140 	if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE)))
1141 		return 0;
1142 
1143 	/* Something has gone sideways, find it */
1144 	check_free_page_bad(page);
1145 	return 1;
1146 }
1147 
free_tail_pages_check(struct page * head_page,struct page * page)1148 static int free_tail_pages_check(struct page *head_page, struct page *page)
1149 {
1150 	int ret = 1;
1151 
1152 	/*
1153 	 * We rely page->lru.next never has bit 0 set, unless the page
1154 	 * is PageTail(). Let's make sure that's true even for poisoned ->lru.
1155 	 */
1156 	BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1);
1157 
1158 	if (!IS_ENABLED(CONFIG_DEBUG_VM)) {
1159 		ret = 0;
1160 		goto out;
1161 	}
1162 	switch (page - head_page) {
1163 	case 1:
1164 		/* the first tail page: ->mapping may be compound_mapcount() */
1165 		if (unlikely(compound_mapcount(page))) {
1166 			bad_page(page, "nonzero compound_mapcount");
1167 			goto out;
1168 		}
1169 		break;
1170 	case 2:
1171 		/*
1172 		 * the second tail page: ->mapping is
1173 		 * deferred_list.next -- ignore value.
1174 		 */
1175 		break;
1176 	default:
1177 		if (page->mapping != TAIL_MAPPING) {
1178 			bad_page(page, "corrupted mapping in tail page");
1179 			goto out;
1180 		}
1181 		break;
1182 	}
1183 	if (unlikely(!PageTail(page))) {
1184 		bad_page(page, "PageTail not set");
1185 		goto out;
1186 	}
1187 	if (unlikely(compound_head(page) != head_page)) {
1188 		bad_page(page, "compound_head not consistent");
1189 		goto out;
1190 	}
1191 	ret = 0;
1192 out:
1193 	page->mapping = NULL;
1194 	clear_compound_head(page);
1195 	return ret;
1196 }
1197 
kernel_init_free_pages(struct page * page,int numpages)1198 static void kernel_init_free_pages(struct page *page, int numpages)
1199 {
1200 	int i;
1201 
1202 	/* s390's use of memset() could override KASAN redzones. */
1203 	kasan_disable_current();
1204 	for (i = 0; i < numpages; i++)
1205 		clear_highpage(page + i);
1206 	kasan_enable_current();
1207 }
1208 
free_pages_prepare(struct page * page,unsigned int order,bool check_free)1209 static __always_inline bool free_pages_prepare(struct page *page,
1210 					unsigned int order, bool check_free)
1211 {
1212 	int bad = 0;
1213 
1214 	VM_BUG_ON_PAGE(PageTail(page), page);
1215 
1216 	trace_mm_page_free(page, order);
1217 
1218 	if (unlikely(PageHWPoison(page)) && !order) {
1219 		/*
1220 		 * Do not let hwpoison pages hit pcplists/buddy
1221 		 * Untie memcg state and reset page's owner
1222 		 */
1223 		if (memcg_kmem_enabled() && PageKmemcg(page))
1224 			__memcg_kmem_uncharge_page(page, order);
1225 		reset_page_owner(page, order);
1226 		return false;
1227 	}
1228 
1229 	/*
1230 	 * Check tail pages before head page information is cleared to
1231 	 * avoid checking PageCompound for order-0 pages.
1232 	 */
1233 	if (unlikely(order)) {
1234 		bool compound = PageCompound(page);
1235 		int i;
1236 
1237 		VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
1238 
1239 		if (compound)
1240 			ClearPageDoubleMap(page);
1241 		for (i = 1; i < (1 << order); i++) {
1242 			if (compound)
1243 				bad += free_tail_pages_check(page, page + i);
1244 			if (unlikely(check_free_page(page + i))) {
1245 				bad++;
1246 				continue;
1247 			}
1248 			(page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1249 		}
1250 	}
1251 	if (PageMappingFlags(page))
1252 		page->mapping = NULL;
1253 	if (memcg_kmem_enabled() && PageKmemcg(page))
1254 		__memcg_kmem_uncharge_page(page, order);
1255 	if (check_free)
1256 		bad += check_free_page(page);
1257 	if (bad)
1258 		return false;
1259 
1260 	page_cpupid_reset_last(page);
1261 	page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1262 	reset_page_owner(page, order);
1263 
1264 	if (!PageHighMem(page)) {
1265 		debug_check_no_locks_freed(page_address(page),
1266 					   PAGE_SIZE << order);
1267 		debug_check_no_obj_freed(page_address(page),
1268 					   PAGE_SIZE << order);
1269 	}
1270 	if (want_init_on_free())
1271 		kernel_init_free_pages(page, 1 << order);
1272 
1273 	kernel_poison_pages(page, 1 << order, 0);
1274 	/*
1275 	 * arch_free_page() can make the page's contents inaccessible.  s390
1276 	 * does this.  So nothing which can access the page's contents should
1277 	 * happen after this.
1278 	 */
1279 	arch_free_page(page, order);
1280 
1281 	if (debug_pagealloc_enabled_static())
1282 		kernel_map_pages(page, 1 << order, 0);
1283 
1284 	kasan_free_nondeferred_pages(page, order);
1285 
1286 	return true;
1287 }
1288 
1289 #ifdef CONFIG_DEBUG_VM
1290 /*
1291  * With DEBUG_VM enabled, order-0 pages are checked immediately when being freed
1292  * to pcp lists. With debug_pagealloc also enabled, they are also rechecked when
1293  * moved from pcp lists to free lists.
1294  */
free_pcp_prepare(struct page * page)1295 static bool free_pcp_prepare(struct page *page)
1296 {
1297 	return free_pages_prepare(page, 0, true);
1298 }
1299 
bulkfree_pcp_prepare(struct page * page)1300 static bool bulkfree_pcp_prepare(struct page *page)
1301 {
1302 	if (debug_pagealloc_enabled_static())
1303 		return check_free_page(page);
1304 	else
1305 		return false;
1306 }
1307 #else
1308 /*
1309  * With DEBUG_VM disabled, order-0 pages being freed are checked only when
1310  * moving from pcp lists to free list in order to reduce overhead. With
1311  * debug_pagealloc enabled, they are checked also immediately when being freed
1312  * to the pcp lists.
1313  */
free_pcp_prepare(struct page * page)1314 static bool free_pcp_prepare(struct page *page)
1315 {
1316 	if (debug_pagealloc_enabled_static())
1317 		return free_pages_prepare(page, 0, true);
1318 	else
1319 		return free_pages_prepare(page, 0, false);
1320 }
1321 
bulkfree_pcp_prepare(struct page * page)1322 static bool bulkfree_pcp_prepare(struct page *page)
1323 {
1324 	return check_free_page(page);
1325 }
1326 #endif /* CONFIG_DEBUG_VM */
1327 
prefetch_buddy(struct page * page)1328 static inline void prefetch_buddy(struct page *page)
1329 {
1330 	unsigned long pfn = page_to_pfn(page);
1331 	unsigned long buddy_pfn = __find_buddy_pfn(pfn, 0);
1332 	struct page *buddy = page + (buddy_pfn - pfn);
1333 
1334 	prefetch(buddy);
1335 }
1336 
1337 /*
1338  * Frees a number of pages from the PCP lists
1339  * Assumes all pages on list are in same zone, and of same order.
1340  * count is the number of pages to free.
1341  *
1342  * If the zone was previously in an "all pages pinned" state then look to
1343  * see if this freeing clears that state.
1344  *
1345  * And clear the zone's pages_scanned counter, to hold off the "all pages are
1346  * pinned" detection logic.
1347  */
free_pcppages_bulk(struct zone * zone,int count,struct per_cpu_pages * pcp)1348 static void free_pcppages_bulk(struct zone *zone, int count,
1349 					struct per_cpu_pages *pcp)
1350 {
1351 	int migratetype = 0;
1352 	int batch_free = 0;
1353 	int prefetch_nr = 0;
1354 	bool isolated_pageblocks;
1355 	struct page *page, *tmp;
1356 	LIST_HEAD(head);
1357 
1358 	/*
1359 	 * Ensure proper count is passed which otherwise would stuck in the
1360 	 * below while (list_empty(list)) loop.
1361 	 */
1362 	count = min(pcp->count, count);
1363 	while (count) {
1364 		struct list_head *list;
1365 
1366 		/*
1367 		 * Remove pages from lists in a round-robin fashion. A
1368 		 * batch_free count is maintained that is incremented when an
1369 		 * empty list is encountered.  This is so more pages are freed
1370 		 * off fuller lists instead of spinning excessively around empty
1371 		 * lists
1372 		 */
1373 		do {
1374 			batch_free++;
1375 			if (++migratetype == MIGRATE_PCPTYPES)
1376 				migratetype = 0;
1377 			list = &pcp->lists[migratetype];
1378 		} while (list_empty(list));
1379 
1380 		/* This is the only non-empty list. Free them all. */
1381 		if (batch_free == MIGRATE_PCPTYPES)
1382 			batch_free = count;
1383 
1384 		do {
1385 			page = list_last_entry(list, struct page, lru);
1386 			/* must delete to avoid corrupting pcp list */
1387 			list_del(&page->lru);
1388 			pcp->count--;
1389 
1390 			if (bulkfree_pcp_prepare(page))
1391 				continue;
1392 
1393 			list_add_tail(&page->lru, &head);
1394 
1395 			/*
1396 			 * We are going to put the page back to the global
1397 			 * pool, prefetch its buddy to speed up later access
1398 			 * under zone->lock. It is believed the overhead of
1399 			 * an additional test and calculating buddy_pfn here
1400 			 * can be offset by reduced memory latency later. To
1401 			 * avoid excessive prefetching due to large count, only
1402 			 * prefetch buddy for the first pcp->batch nr of pages.
1403 			 */
1404 			if (prefetch_nr++ < pcp->batch)
1405 				prefetch_buddy(page);
1406 		} while (--count && --batch_free && !list_empty(list));
1407 	}
1408 
1409 	spin_lock(&zone->lock);
1410 	isolated_pageblocks = has_isolate_pageblock(zone);
1411 
1412 	/*
1413 	 * Use safe version since after __free_one_page(),
1414 	 * page->lru.next will not point to original list.
1415 	 */
1416 	list_for_each_entry_safe(page, tmp, &head, lru) {
1417 		int mt = get_pcppage_migratetype(page);
1418 		/* MIGRATE_ISOLATE page should not go to pcplists */
1419 		VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
1420 		/* Pageblock could have been isolated meanwhile */
1421 		if (unlikely(isolated_pageblocks))
1422 			mt = get_pageblock_migratetype(page);
1423 
1424 		__free_one_page(page, page_to_pfn(page), zone, 0, mt, FPI_NONE);
1425 		trace_mm_page_pcpu_drain(page, 0, mt);
1426 	}
1427 	spin_unlock(&zone->lock);
1428 }
1429 
free_one_page(struct zone * zone,struct page * page,unsigned long pfn,unsigned int order,int migratetype,fpi_t fpi_flags)1430 static void free_one_page(struct zone *zone,
1431 				struct page *page, unsigned long pfn,
1432 				unsigned int order,
1433 				int migratetype, fpi_t fpi_flags)
1434 {
1435 	spin_lock(&zone->lock);
1436 	if (unlikely(has_isolate_pageblock(zone) ||
1437 		is_migrate_isolate(migratetype))) {
1438 		migratetype = get_pfnblock_migratetype(page, pfn);
1439 	}
1440 	__free_one_page(page, pfn, zone, order, migratetype, fpi_flags);
1441 	spin_unlock(&zone->lock);
1442 }
1443 
__init_single_page(struct page * page,unsigned long pfn,unsigned long zone,int nid)1444 static void __meminit __init_single_page(struct page *page, unsigned long pfn,
1445 				unsigned long zone, int nid)
1446 {
1447 	mm_zero_struct_page(page);
1448 	set_page_links(page, zone, nid, pfn);
1449 	init_page_count(page);
1450 	page_mapcount_reset(page);
1451 	page_cpupid_reset_last(page);
1452 	page_kasan_tag_reset(page);
1453 
1454 	INIT_LIST_HEAD(&page->lru);
1455 #ifdef WANT_PAGE_VIRTUAL
1456 	/* The shift won't overflow because ZONE_NORMAL is below 4G. */
1457 	if (!is_highmem_idx(zone))
1458 		set_page_address(page, __va(pfn << PAGE_SHIFT));
1459 #endif
1460 }
1461 
1462 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
init_reserved_page(unsigned long pfn)1463 static void __meminit init_reserved_page(unsigned long pfn)
1464 {
1465 	pg_data_t *pgdat;
1466 	int nid, zid;
1467 
1468 	if (!early_page_uninitialised(pfn))
1469 		return;
1470 
1471 	nid = early_pfn_to_nid(pfn);
1472 	pgdat = NODE_DATA(nid);
1473 
1474 	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1475 		struct zone *zone = &pgdat->node_zones[zid];
1476 
1477 		if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone))
1478 			break;
1479 	}
1480 	__init_single_page(pfn_to_page(pfn), pfn, zid, nid);
1481 }
1482 #else
init_reserved_page(unsigned long pfn)1483 static inline void init_reserved_page(unsigned long pfn)
1484 {
1485 }
1486 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1487 
1488 /*
1489  * Initialised pages do not have PageReserved set. This function is
1490  * called for each range allocated by the bootmem allocator and
1491  * marks the pages PageReserved. The remaining valid pages are later
1492  * sent to the buddy page allocator.
1493  */
reserve_bootmem_region(phys_addr_t start,phys_addr_t end)1494 void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
1495 {
1496 	unsigned long start_pfn = PFN_DOWN(start);
1497 	unsigned long end_pfn = PFN_UP(end);
1498 
1499 	for (; start_pfn < end_pfn; start_pfn++) {
1500 		if (pfn_valid(start_pfn)) {
1501 			struct page *page = pfn_to_page(start_pfn);
1502 
1503 			init_reserved_page(start_pfn);
1504 
1505 			/* Avoid false-positive PageTail() */
1506 			INIT_LIST_HEAD(&page->lru);
1507 
1508 			/*
1509 			 * no need for atomic set_bit because the struct
1510 			 * page is not visible yet so nobody should
1511 			 * access it yet.
1512 			 */
1513 			__SetPageReserved(page);
1514 		}
1515 	}
1516 }
1517 
__free_pages_ok(struct page * page,unsigned int order,fpi_t fpi_flags)1518 static void __free_pages_ok(struct page *page, unsigned int order,
1519 			    fpi_t fpi_flags)
1520 {
1521 	unsigned long flags;
1522 	int migratetype;
1523 	unsigned long pfn = page_to_pfn(page);
1524 
1525 	if (!free_pages_prepare(page, order, true))
1526 		return;
1527 
1528 	migratetype = get_pfnblock_migratetype(page, pfn);
1529 	local_irq_save(flags);
1530 	__count_vm_events(PGFREE, 1 << order);
1531 	free_one_page(page_zone(page), page, pfn, order, migratetype,
1532 		      fpi_flags);
1533 	local_irq_restore(flags);
1534 }
1535 
__free_pages_core(struct page * page,unsigned int order)1536 void __free_pages_core(struct page *page, unsigned int order)
1537 {
1538 	unsigned int nr_pages = 1 << order;
1539 	struct page *p = page;
1540 	unsigned int loop;
1541 
1542 	/*
1543 	 * When initializing the memmap, __init_single_page() sets the refcount
1544 	 * of all pages to 1 ("allocated"/"not free"). We have to set the
1545 	 * refcount of all involved pages to 0.
1546 	 */
1547 	prefetchw(p);
1548 	for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
1549 		prefetchw(p + 1);
1550 		__ClearPageReserved(p);
1551 		set_page_count(p, 0);
1552 	}
1553 	__ClearPageReserved(p);
1554 	set_page_count(p, 0);
1555 
1556 	atomic_long_add(nr_pages, &page_zone(page)->managed_pages);
1557 
1558 	/*
1559 	 * Bypass PCP and place fresh pages right to the tail, primarily
1560 	 * relevant for memory onlining.
1561 	 */
1562 	__free_pages_ok(page, order, FPI_TO_TAIL);
1563 }
1564 
1565 #ifdef CONFIG_NEED_MULTIPLE_NODES
1566 
1567 static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
1568 
1569 #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
1570 
1571 /*
1572  * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
1573  */
__early_pfn_to_nid(unsigned long pfn,struct mminit_pfnnid_cache * state)1574 int __meminit __early_pfn_to_nid(unsigned long pfn,
1575 					struct mminit_pfnnid_cache *state)
1576 {
1577 	unsigned long start_pfn, end_pfn;
1578 	int nid;
1579 
1580 	if (state->last_start <= pfn && pfn < state->last_end)
1581 		return state->last_nid;
1582 
1583 	nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
1584 	if (nid != NUMA_NO_NODE) {
1585 		state->last_start = start_pfn;
1586 		state->last_end = end_pfn;
1587 		state->last_nid = nid;
1588 	}
1589 
1590 	return nid;
1591 }
1592 #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
1593 
early_pfn_to_nid(unsigned long pfn)1594 int __meminit early_pfn_to_nid(unsigned long pfn)
1595 {
1596 	static DEFINE_SPINLOCK(early_pfn_lock);
1597 	int nid;
1598 
1599 	spin_lock(&early_pfn_lock);
1600 	nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
1601 	if (nid < 0)
1602 		nid = first_online_node;
1603 	spin_unlock(&early_pfn_lock);
1604 
1605 	return nid;
1606 }
1607 #endif /* CONFIG_NEED_MULTIPLE_NODES */
1608 
memblock_free_pages(struct page * page,unsigned long pfn,unsigned int order)1609 void __init memblock_free_pages(struct page *page, unsigned long pfn,
1610 							unsigned int order)
1611 {
1612 	if (early_page_uninitialised(pfn))
1613 		return;
1614 	__free_pages_core(page, order);
1615 }
1616 
1617 /*
1618  * Check that the whole (or subset of) a pageblock given by the interval of
1619  * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
1620  * with the migration of free compaction scanner. The scanners then need to
1621  * use only pfn_valid_within() check for arches that allow holes within
1622  * pageblocks.
1623  *
1624  * Return struct page pointer of start_pfn, or NULL if checks were not passed.
1625  *
1626  * It's possible on some configurations to have a setup like node0 node1 node0
1627  * i.e. it's possible that all pages within a zones range of pages do not
1628  * belong to a single zone. We assume that a border between node0 and node1
1629  * can occur within a single pageblock, but not a node0 node1 node0
1630  * interleaving within a single pageblock. It is therefore sufficient to check
1631  * the first and last page of a pageblock and avoid checking each individual
1632  * page in a pageblock.
1633  */
__pageblock_pfn_to_page(unsigned long start_pfn,unsigned long end_pfn,struct zone * zone)1634 struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
1635 				     unsigned long end_pfn, struct zone *zone)
1636 {
1637 	struct page *start_page;
1638 	struct page *end_page;
1639 
1640 	/* end_pfn is one past the range we are checking */
1641 	end_pfn--;
1642 
1643 	if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn))
1644 		return NULL;
1645 
1646 	start_page = pfn_to_online_page(start_pfn);
1647 	if (!start_page)
1648 		return NULL;
1649 
1650 	if (page_zone(start_page) != zone)
1651 		return NULL;
1652 
1653 	end_page = pfn_to_page(end_pfn);
1654 
1655 	/* This gives a shorter code than deriving page_zone(end_page) */
1656 	if (page_zone_id(start_page) != page_zone_id(end_page))
1657 		return NULL;
1658 
1659 	return start_page;
1660 }
1661 
set_zone_contiguous(struct zone * zone)1662 void set_zone_contiguous(struct zone *zone)
1663 {
1664 	unsigned long block_start_pfn = zone->zone_start_pfn;
1665 	unsigned long block_end_pfn;
1666 
1667 	block_end_pfn = ALIGN(block_start_pfn + 1, pageblock_nr_pages);
1668 	for (; block_start_pfn < zone_end_pfn(zone);
1669 			block_start_pfn = block_end_pfn,
1670 			 block_end_pfn += pageblock_nr_pages) {
1671 
1672 		block_end_pfn = min(block_end_pfn, zone_end_pfn(zone));
1673 
1674 		if (!__pageblock_pfn_to_page(block_start_pfn,
1675 					     block_end_pfn, zone))
1676 			return;
1677 		cond_resched();
1678 	}
1679 
1680 	/* We confirm that there is no hole */
1681 	zone->contiguous = true;
1682 }
1683 
clear_zone_contiguous(struct zone * zone)1684 void clear_zone_contiguous(struct zone *zone)
1685 {
1686 	zone->contiguous = false;
1687 }
1688 
1689 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
deferred_free_range(unsigned long pfn,unsigned long nr_pages)1690 static void __init deferred_free_range(unsigned long pfn,
1691 				       unsigned long nr_pages)
1692 {
1693 	struct page *page;
1694 	unsigned long i;
1695 
1696 	if (!nr_pages)
1697 		return;
1698 
1699 	page = pfn_to_page(pfn);
1700 
1701 	/* Free a large naturally-aligned chunk if possible */
1702 	if (nr_pages == pageblock_nr_pages &&
1703 	    (pfn & (pageblock_nr_pages - 1)) == 0) {
1704 		set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1705 		__free_pages_core(page, pageblock_order);
1706 		return;
1707 	}
1708 
1709 	for (i = 0; i < nr_pages; i++, page++, pfn++) {
1710 		if ((pfn & (pageblock_nr_pages - 1)) == 0)
1711 			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1712 		__free_pages_core(page, 0);
1713 	}
1714 }
1715 
1716 /* Completion tracking for deferred_init_memmap() threads */
1717 static atomic_t pgdat_init_n_undone __initdata;
1718 static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
1719 
pgdat_init_report_one_done(void)1720 static inline void __init pgdat_init_report_one_done(void)
1721 {
1722 	if (atomic_dec_and_test(&pgdat_init_n_undone))
1723 		complete(&pgdat_init_all_done_comp);
1724 }
1725 
1726 /*
1727  * Returns true if page needs to be initialized or freed to buddy allocator.
1728  *
1729  * First we check if pfn is valid on architectures where it is possible to have
1730  * holes within pageblock_nr_pages. On systems where it is not possible, this
1731  * function is optimized out.
1732  *
1733  * Then, we check if a current large page is valid by only checking the validity
1734  * of the head pfn.
1735  */
deferred_pfn_valid(unsigned long pfn)1736 static inline bool __init deferred_pfn_valid(unsigned long pfn)
1737 {
1738 	if (!pfn_valid_within(pfn))
1739 		return false;
1740 	if (!(pfn & (pageblock_nr_pages - 1)) && !pfn_valid(pfn))
1741 		return false;
1742 	return true;
1743 }
1744 
1745 /*
1746  * Free pages to buddy allocator. Try to free aligned pages in
1747  * pageblock_nr_pages sizes.
1748  */
deferred_free_pages(unsigned long pfn,unsigned long end_pfn)1749 static void __init deferred_free_pages(unsigned long pfn,
1750 				       unsigned long end_pfn)
1751 {
1752 	unsigned long nr_pgmask = pageblock_nr_pages - 1;
1753 	unsigned long nr_free = 0;
1754 
1755 	for (; pfn < end_pfn; pfn++) {
1756 		if (!deferred_pfn_valid(pfn)) {
1757 			deferred_free_range(pfn - nr_free, nr_free);
1758 			nr_free = 0;
1759 		} else if (!(pfn & nr_pgmask)) {
1760 			deferred_free_range(pfn - nr_free, nr_free);
1761 			nr_free = 1;
1762 		} else {
1763 			nr_free++;
1764 		}
1765 	}
1766 	/* Free the last block of pages to allocator */
1767 	deferred_free_range(pfn - nr_free, nr_free);
1768 }
1769 
1770 /*
1771  * Initialize struct pages.  We minimize pfn page lookups and scheduler checks
1772  * by performing it only once every pageblock_nr_pages.
1773  * Return number of pages initialized.
1774  */
deferred_init_pages(struct zone * zone,unsigned long pfn,unsigned long end_pfn)1775 static unsigned long  __init deferred_init_pages(struct zone *zone,
1776 						 unsigned long pfn,
1777 						 unsigned long end_pfn)
1778 {
1779 	unsigned long nr_pgmask = pageblock_nr_pages - 1;
1780 	int nid = zone_to_nid(zone);
1781 	unsigned long nr_pages = 0;
1782 	int zid = zone_idx(zone);
1783 	struct page *page = NULL;
1784 
1785 	for (; pfn < end_pfn; pfn++) {
1786 		if (!deferred_pfn_valid(pfn)) {
1787 			page = NULL;
1788 			continue;
1789 		} else if (!page || !(pfn & nr_pgmask)) {
1790 			page = pfn_to_page(pfn);
1791 		} else {
1792 			page++;
1793 		}
1794 		__init_single_page(page, pfn, zid, nid);
1795 		nr_pages++;
1796 	}
1797 	return (nr_pages);
1798 }
1799 
1800 /*
1801  * This function is meant to pre-load the iterator for the zone init.
1802  * Specifically it walks through the ranges until we are caught up to the
1803  * first_init_pfn value and exits there. If we never encounter the value we
1804  * return false indicating there are no valid ranges left.
1805  */
1806 static bool __init
deferred_init_mem_pfn_range_in_zone(u64 * i,struct zone * zone,unsigned long * spfn,unsigned long * epfn,unsigned long first_init_pfn)1807 deferred_init_mem_pfn_range_in_zone(u64 *i, struct zone *zone,
1808 				    unsigned long *spfn, unsigned long *epfn,
1809 				    unsigned long first_init_pfn)
1810 {
1811 	u64 j;
1812 
1813 	/*
1814 	 * Start out by walking through the ranges in this zone that have
1815 	 * already been initialized. We don't need to do anything with them
1816 	 * so we just need to flush them out of the system.
1817 	 */
1818 	for_each_free_mem_pfn_range_in_zone(j, zone, spfn, epfn) {
1819 		if (*epfn <= first_init_pfn)
1820 			continue;
1821 		if (*spfn < first_init_pfn)
1822 			*spfn = first_init_pfn;
1823 		*i = j;
1824 		return true;
1825 	}
1826 
1827 	return false;
1828 }
1829 
1830 /*
1831  * Initialize and free pages. We do it in two loops: first we initialize
1832  * struct page, then free to buddy allocator, because while we are
1833  * freeing pages we can access pages that are ahead (computing buddy
1834  * page in __free_one_page()).
1835  *
1836  * In order to try and keep some memory in the cache we have the loop
1837  * broken along max page order boundaries. This way we will not cause
1838  * any issues with the buddy page computation.
1839  */
1840 static unsigned long __init
deferred_init_maxorder(u64 * i,struct zone * zone,unsigned long * start_pfn,unsigned long * end_pfn)1841 deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn,
1842 		       unsigned long *end_pfn)
1843 {
1844 	unsigned long mo_pfn = ALIGN(*start_pfn + 1, MAX_ORDER_NR_PAGES);
1845 	unsigned long spfn = *start_pfn, epfn = *end_pfn;
1846 	unsigned long nr_pages = 0;
1847 	u64 j = *i;
1848 
1849 	/* First we loop through and initialize the page values */
1850 	for_each_free_mem_pfn_range_in_zone_from(j, zone, start_pfn, end_pfn) {
1851 		unsigned long t;
1852 
1853 		if (mo_pfn <= *start_pfn)
1854 			break;
1855 
1856 		t = min(mo_pfn, *end_pfn);
1857 		nr_pages += deferred_init_pages(zone, *start_pfn, t);
1858 
1859 		if (mo_pfn < *end_pfn) {
1860 			*start_pfn = mo_pfn;
1861 			break;
1862 		}
1863 	}
1864 
1865 	/* Reset values and now loop through freeing pages as needed */
1866 	swap(j, *i);
1867 
1868 	for_each_free_mem_pfn_range_in_zone_from(j, zone, &spfn, &epfn) {
1869 		unsigned long t;
1870 
1871 		if (mo_pfn <= spfn)
1872 			break;
1873 
1874 		t = min(mo_pfn, epfn);
1875 		deferred_free_pages(spfn, t);
1876 
1877 		if (mo_pfn <= epfn)
1878 			break;
1879 	}
1880 
1881 	return nr_pages;
1882 }
1883 
1884 static void __init
deferred_init_memmap_chunk(unsigned long start_pfn,unsigned long end_pfn,void * arg)1885 deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn,
1886 			   void *arg)
1887 {
1888 	unsigned long spfn, epfn;
1889 	struct zone *zone = arg;
1890 	u64 i;
1891 
1892 	deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, start_pfn);
1893 
1894 	/*
1895 	 * Initialize and free pages in MAX_ORDER sized increments so that we
1896 	 * can avoid introducing any issues with the buddy allocator.
1897 	 */
1898 	while (spfn < end_pfn) {
1899 		deferred_init_maxorder(&i, zone, &spfn, &epfn);
1900 		cond_resched();
1901 	}
1902 }
1903 
1904 /* An arch may override for more concurrency. */
1905 __weak int __init
deferred_page_init_max_threads(const struct cpumask * node_cpumask)1906 deferred_page_init_max_threads(const struct cpumask *node_cpumask)
1907 {
1908 	return 1;
1909 }
1910 
1911 /* Initialise remaining memory on a node */
deferred_init_memmap(void * data)1912 static int __init deferred_init_memmap(void *data)
1913 {
1914 	pg_data_t *pgdat = data;
1915 	const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
1916 	unsigned long spfn = 0, epfn = 0;
1917 	unsigned long first_init_pfn, flags;
1918 	unsigned long start = jiffies;
1919 	struct zone *zone;
1920 	int zid, max_threads;
1921 	u64 i;
1922 
1923 	/* Bind memory initialisation thread to a local node if possible */
1924 	if (!cpumask_empty(cpumask))
1925 		set_cpus_allowed_ptr(current, cpumask);
1926 
1927 	pgdat_resize_lock(pgdat, &flags);
1928 	first_init_pfn = pgdat->first_deferred_pfn;
1929 	if (first_init_pfn == ULONG_MAX) {
1930 		pgdat_resize_unlock(pgdat, &flags);
1931 		pgdat_init_report_one_done();
1932 		return 0;
1933 	}
1934 
1935 	/* Sanity check boundaries */
1936 	BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn);
1937 	BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat));
1938 	pgdat->first_deferred_pfn = ULONG_MAX;
1939 
1940 	/*
1941 	 * Once we unlock here, the zone cannot be grown anymore, thus if an
1942 	 * interrupt thread must allocate this early in boot, zone must be
1943 	 * pre-grown prior to start of deferred page initialization.
1944 	 */
1945 	pgdat_resize_unlock(pgdat, &flags);
1946 
1947 	/* Only the highest zone is deferred so find it */
1948 	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1949 		zone = pgdat->node_zones + zid;
1950 		if (first_init_pfn < zone_end_pfn(zone))
1951 			break;
1952 	}
1953 
1954 	/* If the zone is empty somebody else may have cleared out the zone */
1955 	if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
1956 						 first_init_pfn))
1957 		goto zone_empty;
1958 
1959 	max_threads = deferred_page_init_max_threads(cpumask);
1960 
1961 	while (spfn < epfn) {
1962 		unsigned long epfn_align = ALIGN(epfn, PAGES_PER_SECTION);
1963 		struct padata_mt_job job = {
1964 			.thread_fn   = deferred_init_memmap_chunk,
1965 			.fn_arg      = zone,
1966 			.start       = spfn,
1967 			.size        = epfn_align - spfn,
1968 			.align       = PAGES_PER_SECTION,
1969 			.min_chunk   = PAGES_PER_SECTION,
1970 			.max_threads = max_threads,
1971 		};
1972 
1973 		padata_do_multithreaded(&job);
1974 		deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
1975 						    epfn_align);
1976 	}
1977 zone_empty:
1978 	/* Sanity check that the next zone really is unpopulated */
1979 	WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));
1980 
1981 	pr_info("node %d deferred pages initialised in %ums\n",
1982 		pgdat->node_id, jiffies_to_msecs(jiffies - start));
1983 
1984 	pgdat_init_report_one_done();
1985 	return 0;
1986 }
1987 
1988 /*
1989  * If this zone has deferred pages, try to grow it by initializing enough
1990  * deferred pages to satisfy the allocation specified by order, rounded up to
1991  * the nearest PAGES_PER_SECTION boundary.  So we're adding memory in increments
1992  * of SECTION_SIZE bytes by initializing struct pages in increments of
1993  * PAGES_PER_SECTION * sizeof(struct page) bytes.
1994  *
1995  * Return true when zone was grown, otherwise return false. We return true even
1996  * when we grow less than requested, to let the caller decide if there are
1997  * enough pages to satisfy the allocation.
1998  *
1999  * Note: We use noinline because this function is needed only during boot, and
2000  * it is called from a __ref function _deferred_grow_zone. This way we are
2001  * making sure that it is not inlined into permanent text section.
2002  */
2003 static noinline bool __init
deferred_grow_zone(struct zone * zone,unsigned int order)2004 deferred_grow_zone(struct zone *zone, unsigned int order)
2005 {
2006 	unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION);
2007 	pg_data_t *pgdat = zone->zone_pgdat;
2008 	unsigned long first_deferred_pfn = pgdat->first_deferred_pfn;
2009 	unsigned long spfn, epfn, flags;
2010 	unsigned long nr_pages = 0;
2011 	u64 i;
2012 
2013 	/* Only the last zone may have deferred pages */
2014 	if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat))
2015 		return false;
2016 
2017 	pgdat_resize_lock(pgdat, &flags);
2018 
2019 	/*
2020 	 * If someone grew this zone while we were waiting for spinlock, return
2021 	 * true, as there might be enough pages already.
2022 	 */
2023 	if (first_deferred_pfn != pgdat->first_deferred_pfn) {
2024 		pgdat_resize_unlock(pgdat, &flags);
2025 		return true;
2026 	}
2027 
2028 	/* If the zone is empty somebody else may have cleared out the zone */
2029 	if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
2030 						 first_deferred_pfn)) {
2031 		pgdat->first_deferred_pfn = ULONG_MAX;
2032 		pgdat_resize_unlock(pgdat, &flags);
2033 		/* Retry only once. */
2034 		return first_deferred_pfn != ULONG_MAX;
2035 	}
2036 
2037 	/*
2038 	 * Initialize and free pages in MAX_ORDER sized increments so
2039 	 * that we can avoid introducing any issues with the buddy
2040 	 * allocator.
2041 	 */
2042 	while (spfn < epfn) {
2043 		/* update our first deferred PFN for this section */
2044 		first_deferred_pfn = spfn;
2045 
2046 		nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn);
2047 		touch_nmi_watchdog();
2048 
2049 		/* We should only stop along section boundaries */
2050 		if ((first_deferred_pfn ^ spfn) < PAGES_PER_SECTION)
2051 			continue;
2052 
2053 		/* If our quota has been met we can stop here */
2054 		if (nr_pages >= nr_pages_needed)
2055 			break;
2056 	}
2057 
2058 	pgdat->first_deferred_pfn = spfn;
2059 	pgdat_resize_unlock(pgdat, &flags);
2060 
2061 	return nr_pages > 0;
2062 }
2063 
2064 /*
2065  * deferred_grow_zone() is __init, but it is called from
2066  * get_page_from_freelist() during early boot until deferred_pages permanently
2067  * disables this call. This is why we have refdata wrapper to avoid warning,
2068  * and to ensure that the function body gets unloaded.
2069  */
2070 static bool __ref
_deferred_grow_zone(struct zone * zone,unsigned int order)2071 _deferred_grow_zone(struct zone *zone, unsigned int order)
2072 {
2073 	return deferred_grow_zone(zone, order);
2074 }
2075 
2076 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
2077 
page_alloc_init_late(void)2078 void __init page_alloc_init_late(void)
2079 {
2080 	struct zone *zone;
2081 	int nid;
2082 
2083 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
2084 
2085 	/* There will be num_node_state(N_MEMORY) threads */
2086 	atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
2087 	for_each_node_state(nid, N_MEMORY) {
2088 		kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
2089 	}
2090 
2091 	/* Block until all are initialised */
2092 	wait_for_completion(&pgdat_init_all_done_comp);
2093 
2094 	/*
2095 	 * The number of managed pages has changed due to the initialisation
2096 	 * so the pcpu batch and high limits needs to be updated or the limits
2097 	 * will be artificially small.
2098 	 */
2099 	for_each_populated_zone(zone)
2100 		zone_pcp_update(zone);
2101 
2102 	/*
2103 	 * We initialized the rest of the deferred pages.  Permanently disable
2104 	 * on-demand struct page initialization.
2105 	 */
2106 	static_branch_disable(&deferred_pages);
2107 
2108 	/* Reinit limits that are based on free pages after the kernel is up */
2109 	files_maxfiles_init();
2110 #endif
2111 
2112 	/* Discard memblock private memory */
2113 	memblock_discard();
2114 
2115 	for_each_node_state(nid, N_MEMORY)
2116 		shuffle_free_memory(NODE_DATA(nid));
2117 
2118 	for_each_populated_zone(zone)
2119 		set_zone_contiguous(zone);
2120 }
2121 
2122 #ifdef CONFIG_CMA
2123 /* Free whole pageblock and set its migration type to MIGRATE_CMA. */
init_cma_reserved_pageblock(struct page * page)2124 void __init init_cma_reserved_pageblock(struct page *page)
2125 {
2126 	unsigned i = pageblock_nr_pages;
2127 	struct page *p = page;
2128 
2129 	do {
2130 		__ClearPageReserved(p);
2131 		set_page_count(p, 0);
2132 	} while (++p, --i);
2133 
2134 	set_pageblock_migratetype(page, MIGRATE_CMA);
2135 
2136 	if (pageblock_order >= MAX_ORDER) {
2137 		i = pageblock_nr_pages;
2138 		p = page;
2139 		do {
2140 			set_page_refcounted(p);
2141 			__free_pages(p, MAX_ORDER - 1);
2142 			p += MAX_ORDER_NR_PAGES;
2143 		} while (i -= MAX_ORDER_NR_PAGES);
2144 	} else {
2145 		set_page_refcounted(page);
2146 		__free_pages(page, pageblock_order);
2147 	}
2148 
2149 	adjust_managed_page_count(page, pageblock_nr_pages);
2150 }
2151 #endif
2152 
2153 /*
2154  * The order of subdivision here is critical for the IO subsystem.
2155  * Please do not alter this order without good reasons and regression
2156  * testing. Specifically, as large blocks of memory are subdivided,
2157  * the order in which smaller blocks are delivered depends on the order
2158  * they're subdivided in this function. This is the primary factor
2159  * influencing the order in which pages are delivered to the IO
2160  * subsystem according to empirical testing, and this is also justified
2161  * by considering the behavior of a buddy system containing a single
2162  * large block of memory acted on by a series of small allocations.
2163  * This behavior is a critical factor in sglist merging's success.
2164  *
2165  * -- nyc
2166  */
expand(struct zone * zone,struct page * page,int low,int high,int migratetype)2167 static inline void expand(struct zone *zone, struct page *page,
2168 	int low, int high, int migratetype)
2169 {
2170 	unsigned long size = 1 << high;
2171 
2172 	while (high > low) {
2173 		high--;
2174 		size >>= 1;
2175 		VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
2176 
2177 		/*
2178 		 * Mark as guard pages (or page), that will allow to
2179 		 * merge back to allocator when buddy will be freed.
2180 		 * Corresponding page table entries will not be touched,
2181 		 * pages will stay not present in virtual address space
2182 		 */
2183 		if (set_page_guard(zone, &page[size], high, migratetype))
2184 			continue;
2185 
2186 		add_to_free_list(&page[size], zone, high, migratetype);
2187 		set_buddy_order(&page[size], high);
2188 	}
2189 }
2190 
check_new_page_bad(struct page * page)2191 static void check_new_page_bad(struct page *page)
2192 {
2193 	if (unlikely(page->flags & __PG_HWPOISON)) {
2194 		/* Don't complain about hwpoisoned pages */
2195 		page_mapcount_reset(page); /* remove PageBuddy */
2196 		return;
2197 	}
2198 
2199 	bad_page(page,
2200 		 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_PREP));
2201 }
2202 
2203 /*
2204  * This page is about to be returned from the page allocator
2205  */
check_new_page(struct page * page)2206 static inline int check_new_page(struct page *page)
2207 {
2208 	if (likely(page_expected_state(page,
2209 				PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON)))
2210 		return 0;
2211 
2212 	check_new_page_bad(page);
2213 	return 1;
2214 }
2215 
free_pages_prezeroed(void)2216 static inline bool free_pages_prezeroed(void)
2217 {
2218 	return (IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) &&
2219 		page_poisoning_enabled()) || want_init_on_free();
2220 }
2221 
2222 #ifdef CONFIG_DEBUG_VM
2223 /*
2224  * With DEBUG_VM enabled, order-0 pages are checked for expected state when
2225  * being allocated from pcp lists. With debug_pagealloc also enabled, they are
2226  * also checked when pcp lists are refilled from the free lists.
2227  */
check_pcp_refill(struct page * page)2228 static inline bool check_pcp_refill(struct page *page)
2229 {
2230 	if (debug_pagealloc_enabled_static())
2231 		return check_new_page(page);
2232 	else
2233 		return false;
2234 }
2235 
check_new_pcp(struct page * page)2236 static inline bool check_new_pcp(struct page *page)
2237 {
2238 	return check_new_page(page);
2239 }
2240 #else
2241 /*
2242  * With DEBUG_VM disabled, free order-0 pages are checked for expected state
2243  * when pcp lists are being refilled from the free lists. With debug_pagealloc
2244  * enabled, they are also checked when being allocated from the pcp lists.
2245  */
check_pcp_refill(struct page * page)2246 static inline bool check_pcp_refill(struct page *page)
2247 {
2248 	return check_new_page(page);
2249 }
check_new_pcp(struct page * page)2250 static inline bool check_new_pcp(struct page *page)
2251 {
2252 	if (debug_pagealloc_enabled_static())
2253 		return check_new_page(page);
2254 	else
2255 		return false;
2256 }
2257 #endif /* CONFIG_DEBUG_VM */
2258 
check_new_pages(struct page * page,unsigned int order)2259 static bool check_new_pages(struct page *page, unsigned int order)
2260 {
2261 	int i;
2262 	for (i = 0; i < (1 << order); i++) {
2263 		struct page *p = page + i;
2264 
2265 		if (unlikely(check_new_page(p)))
2266 			return true;
2267 	}
2268 
2269 	return false;
2270 }
2271 
post_alloc_hook(struct page * page,unsigned int order,gfp_t gfp_flags)2272 inline void post_alloc_hook(struct page *page, unsigned int order,
2273 				gfp_t gfp_flags)
2274 {
2275 	set_page_private(page, 0);
2276 	set_page_refcounted(page);
2277 
2278 	arch_alloc_page(page, order);
2279 	if (debug_pagealloc_enabled_static())
2280 		kernel_map_pages(page, 1 << order, 1);
2281 	kasan_alloc_pages(page, order);
2282 	kernel_poison_pages(page, 1 << order, 1);
2283 	set_page_owner(page, order, gfp_flags);
2284 }
2285 
prep_new_page(struct page * page,unsigned int order,gfp_t gfp_flags,unsigned int alloc_flags)2286 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
2287 							unsigned int alloc_flags)
2288 {
2289 	post_alloc_hook(page, order, gfp_flags);
2290 
2291 	if (!free_pages_prezeroed() && want_init_on_alloc(gfp_flags))
2292 		kernel_init_free_pages(page, 1 << order);
2293 
2294 	if (order && (gfp_flags & __GFP_COMP))
2295 		prep_compound_page(page, order);
2296 
2297 	/*
2298 	 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
2299 	 * allocate the page. The expectation is that the caller is taking
2300 	 * steps that will free more memory. The caller should avoid the page
2301 	 * being used for !PFMEMALLOC purposes.
2302 	 */
2303 	if (alloc_flags & ALLOC_NO_WATERMARKS)
2304 		set_page_pfmemalloc(page);
2305 	else
2306 		clear_page_pfmemalloc(page);
2307 }
2308 
2309 /*
2310  * Go through the free lists for the given migratetype and remove
2311  * the smallest available page from the freelists
2312  */
2313 static __always_inline
__rmqueue_smallest(struct zone * zone,unsigned int order,int migratetype)2314 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
2315 						int migratetype)
2316 {
2317 	unsigned int current_order;
2318 	struct free_area *area;
2319 	struct page *page;
2320 
2321 	/* Find a page of the appropriate size in the preferred list */
2322 	for (current_order = order; current_order < MAX_ORDER; ++current_order) {
2323 		area = &(zone->free_area[current_order]);
2324 		page = get_page_from_free_area(area, migratetype);
2325 		if (!page)
2326 			continue;
2327 		del_page_from_free_list(page, zone, current_order);
2328 		expand(zone, page, order, current_order, migratetype);
2329 		set_pcppage_migratetype(page, migratetype);
2330 		return page;
2331 	}
2332 
2333 	return NULL;
2334 }
2335 
2336 
2337 /*
2338  * This array describes the order lists are fallen back to when
2339  * the free lists for the desirable migrate type are depleted
2340  */
2341 static int fallbacks[MIGRATE_TYPES][3] = {
2342 	[MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,   MIGRATE_TYPES },
2343 	[MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES },
2344 	[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,   MIGRATE_TYPES },
2345 #ifdef CONFIG_CMA
2346 	[MIGRATE_CMA]         = { MIGRATE_TYPES }, /* Never used */
2347 #endif
2348 #ifdef CONFIG_MEMORY_ISOLATION
2349 	[MIGRATE_ISOLATE]     = { MIGRATE_TYPES }, /* Never used */
2350 #endif
2351 };
2352 
2353 #ifdef CONFIG_CMA
__rmqueue_cma_fallback(struct zone * zone,unsigned int order)2354 static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone,
2355 					unsigned int order)
2356 {
2357 	return __rmqueue_smallest(zone, order, MIGRATE_CMA);
2358 }
2359 #else
__rmqueue_cma_fallback(struct zone * zone,unsigned int order)2360 static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
2361 					unsigned int order) { return NULL; }
2362 #endif
2363 
2364 /*
2365  * Move the free pages in a range to the freelist tail of the requested type.
2366  * Note that start_page and end_pages are not aligned on a pageblock
2367  * boundary. If alignment is required, use move_freepages_block()
2368  */
move_freepages(struct zone * zone,unsigned long start_pfn,unsigned long end_pfn,int migratetype,int * num_movable)2369 static int move_freepages(struct zone *zone,
2370 			  unsigned long start_pfn, unsigned long end_pfn,
2371 			  int migratetype, int *num_movable)
2372 {
2373 	struct page *page;
2374 	unsigned long pfn;
2375 	unsigned int order;
2376 	int pages_moved = 0;
2377 
2378 	for (pfn = start_pfn; pfn <= end_pfn;) {
2379 		if (!pfn_valid_within(pfn)) {
2380 			pfn++;
2381 			continue;
2382 		}
2383 
2384 		page = pfn_to_page(pfn);
2385 		if (!PageBuddy(page)) {
2386 			/*
2387 			 * We assume that pages that could be isolated for
2388 			 * migration are movable. But we don't actually try
2389 			 * isolating, as that would be expensive.
2390 			 */
2391 			if (num_movable &&
2392 					(PageLRU(page) || __PageMovable(page)))
2393 				(*num_movable)++;
2394 			pfn++;
2395 			continue;
2396 		}
2397 
2398 		/* Make sure we are not inadvertently changing nodes */
2399 		VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
2400 		VM_BUG_ON_PAGE(page_zone(page) != zone, page);
2401 
2402 		order = buddy_order(page);
2403 		move_to_free_list(page, zone, order, migratetype);
2404 		pfn += 1 << order;
2405 		pages_moved += 1 << order;
2406 	}
2407 
2408 	return pages_moved;
2409 }
2410 
move_freepages_block(struct zone * zone,struct page * page,int migratetype,int * num_movable)2411 int move_freepages_block(struct zone *zone, struct page *page,
2412 				int migratetype, int *num_movable)
2413 {
2414 	unsigned long start_pfn, end_pfn, pfn;
2415 
2416 	if (num_movable)
2417 		*num_movable = 0;
2418 
2419 	pfn = page_to_pfn(page);
2420 	start_pfn = pfn & ~(pageblock_nr_pages - 1);
2421 	end_pfn = start_pfn + pageblock_nr_pages - 1;
2422 
2423 	/* Do not cross zone boundaries */
2424 	if (!zone_spans_pfn(zone, start_pfn))
2425 		start_pfn = pfn;
2426 	if (!zone_spans_pfn(zone, end_pfn))
2427 		return 0;
2428 
2429 	return move_freepages(zone, start_pfn, end_pfn, migratetype,
2430 								num_movable);
2431 }
2432 
change_pageblock_range(struct page * pageblock_page,int start_order,int migratetype)2433 static void change_pageblock_range(struct page *pageblock_page,
2434 					int start_order, int migratetype)
2435 {
2436 	int nr_pageblocks = 1 << (start_order - pageblock_order);
2437 
2438 	while (nr_pageblocks--) {
2439 		set_pageblock_migratetype(pageblock_page, migratetype);
2440 		pageblock_page += pageblock_nr_pages;
2441 	}
2442 }
2443 
2444 /*
2445  * When we are falling back to another migratetype during allocation, try to
2446  * steal extra free pages from the same pageblocks to satisfy further
2447  * allocations, instead of polluting multiple pageblocks.
2448  *
2449  * If we are stealing a relatively large buddy page, it is likely there will
2450  * be more free pages in the pageblock, so try to steal them all. For
2451  * reclaimable and unmovable allocations, we steal regardless of page size,
2452  * as fragmentation caused by those allocations polluting movable pageblocks
2453  * is worse than movable allocations stealing from unmovable and reclaimable
2454  * pageblocks.
2455  */
can_steal_fallback(unsigned int order,int start_mt)2456 static bool can_steal_fallback(unsigned int order, int start_mt)
2457 {
2458 	/*
2459 	 * Leaving this order check is intended, although there is
2460 	 * relaxed order check in next check. The reason is that
2461 	 * we can actually steal whole pageblock if this condition met,
2462 	 * but, below check doesn't guarantee it and that is just heuristic
2463 	 * so could be changed anytime.
2464 	 */
2465 	if (order >= pageblock_order)
2466 		return true;
2467 
2468 	if (order >= pageblock_order / 2 ||
2469 		start_mt == MIGRATE_RECLAIMABLE ||
2470 		start_mt == MIGRATE_UNMOVABLE ||
2471 		page_group_by_mobility_disabled)
2472 		return true;
2473 
2474 	return false;
2475 }
2476 
boost_watermark(struct zone * zone)2477 static inline bool boost_watermark(struct zone *zone)
2478 {
2479 	unsigned long max_boost;
2480 
2481 	if (!watermark_boost_factor)
2482 		return false;
2483 	/*
2484 	 * Don't bother in zones that are unlikely to produce results.
2485 	 * On small machines, including kdump capture kernels running
2486 	 * in a small area, boosting the watermark can cause an out of
2487 	 * memory situation immediately.
2488 	 */
2489 	if ((pageblock_nr_pages * 4) > zone_managed_pages(zone))
2490 		return false;
2491 
2492 	max_boost = mult_frac(zone->_watermark[WMARK_HIGH],
2493 			watermark_boost_factor, 10000);
2494 
2495 	/*
2496 	 * high watermark may be uninitialised if fragmentation occurs
2497 	 * very early in boot so do not boost. We do not fall
2498 	 * through and boost by pageblock_nr_pages as failing
2499 	 * allocations that early means that reclaim is not going
2500 	 * to help and it may even be impossible to reclaim the
2501 	 * boosted watermark resulting in a hang.
2502 	 */
2503 	if (!max_boost)
2504 		return false;
2505 
2506 	max_boost = max(pageblock_nr_pages, max_boost);
2507 
2508 	zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages,
2509 		max_boost);
2510 
2511 	return true;
2512 }
2513 
2514 /*
2515  * This function implements actual steal behaviour. If order is large enough,
2516  * we can steal whole pageblock. If not, we first move freepages in this
2517  * pageblock to our migratetype and determine how many already-allocated pages
2518  * are there in the pageblock with a compatible migratetype. If at least half
2519  * of pages are free or compatible, we can change migratetype of the pageblock
2520  * itself, so pages freed in the future will be put on the correct free list.
2521  */
steal_suitable_fallback(struct zone * zone,struct page * page,unsigned int alloc_flags,int start_type,bool whole_block)2522 static void steal_suitable_fallback(struct zone *zone, struct page *page,
2523 		unsigned int alloc_flags, int start_type, bool whole_block)
2524 {
2525 	unsigned int current_order = buddy_order(page);
2526 	int free_pages, movable_pages, alike_pages;
2527 	int old_block_type;
2528 
2529 	old_block_type = get_pageblock_migratetype(page);
2530 
2531 	/*
2532 	 * This can happen due to races and we want to prevent broken
2533 	 * highatomic accounting.
2534 	 */
2535 	if (is_migrate_highatomic(old_block_type))
2536 		goto single_page;
2537 
2538 	/* Take ownership for orders >= pageblock_order */
2539 	if (current_order >= pageblock_order) {
2540 		change_pageblock_range(page, current_order, start_type);
2541 		goto single_page;
2542 	}
2543 
2544 	/*
2545 	 * Boost watermarks to increase reclaim pressure to reduce the
2546 	 * likelihood of future fallbacks. Wake kswapd now as the node
2547 	 * may be balanced overall and kswapd will not wake naturally.
2548 	 */
2549 	if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD))
2550 		set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
2551 
2552 	/* We are not allowed to try stealing from the whole block */
2553 	if (!whole_block)
2554 		goto single_page;
2555 
2556 	free_pages = move_freepages_block(zone, page, start_type,
2557 						&movable_pages);
2558 	/*
2559 	 * Determine how many pages are compatible with our allocation.
2560 	 * For movable allocation, it's the number of movable pages which
2561 	 * we just obtained. For other types it's a bit more tricky.
2562 	 */
2563 	if (start_type == MIGRATE_MOVABLE) {
2564 		alike_pages = movable_pages;
2565 	} else {
2566 		/*
2567 		 * If we are falling back a RECLAIMABLE or UNMOVABLE allocation
2568 		 * to MOVABLE pageblock, consider all non-movable pages as
2569 		 * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or
2570 		 * vice versa, be conservative since we can't distinguish the
2571 		 * exact migratetype of non-movable pages.
2572 		 */
2573 		if (old_block_type == MIGRATE_MOVABLE)
2574 			alike_pages = pageblock_nr_pages
2575 						- (free_pages + movable_pages);
2576 		else
2577 			alike_pages = 0;
2578 	}
2579 
2580 	/* moving whole block can fail due to zone boundary conditions */
2581 	if (!free_pages)
2582 		goto single_page;
2583 
2584 	/*
2585 	 * If a sufficient number of pages in the block are either free or of
2586 	 * comparable migratability as our allocation, claim the whole block.
2587 	 */
2588 	if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||
2589 			page_group_by_mobility_disabled)
2590 		set_pageblock_migratetype(page, start_type);
2591 
2592 	return;
2593 
2594 single_page:
2595 	move_to_free_list(page, zone, current_order, start_type);
2596 }
2597 
2598 /*
2599  * Check whether there is a suitable fallback freepage with requested order.
2600  * If only_stealable is true, this function returns fallback_mt only if
2601  * we can steal other freepages all together. This would help to reduce
2602  * fragmentation due to mixed migratetype pages in one pageblock.
2603  */
find_suitable_fallback(struct free_area * area,unsigned int order,int migratetype,bool only_stealable,bool * can_steal)2604 int find_suitable_fallback(struct free_area *area, unsigned int order,
2605 			int migratetype, bool only_stealable, bool *can_steal)
2606 {
2607 	int i;
2608 	int fallback_mt;
2609 
2610 	if (area->nr_free == 0)
2611 		return -1;
2612 
2613 	*can_steal = false;
2614 	for (i = 0;; i++) {
2615 		fallback_mt = fallbacks[migratetype][i];
2616 		if (fallback_mt == MIGRATE_TYPES)
2617 			break;
2618 
2619 		if (free_area_empty(area, fallback_mt))
2620 			continue;
2621 
2622 		if (can_steal_fallback(order, migratetype))
2623 			*can_steal = true;
2624 
2625 		if (!only_stealable)
2626 			return fallback_mt;
2627 
2628 		if (*can_steal)
2629 			return fallback_mt;
2630 	}
2631 
2632 	return -1;
2633 }
2634 
2635 /*
2636  * Reserve a pageblock for exclusive use of high-order atomic allocations if
2637  * there are no empty page blocks that contain a page with a suitable order
2638  */
reserve_highatomic_pageblock(struct page * page,struct zone * zone,unsigned int alloc_order)2639 static void reserve_highatomic_pageblock(struct page *page, struct zone *zone,
2640 				unsigned int alloc_order)
2641 {
2642 	int mt;
2643 	unsigned long max_managed, flags;
2644 
2645 	/*
2646 	 * Limit the number reserved to 1 pageblock or roughly 1% of a zone.
2647 	 * Check is race-prone but harmless.
2648 	 */
2649 	max_managed = (zone_managed_pages(zone) / 100) + pageblock_nr_pages;
2650 	if (zone->nr_reserved_highatomic >= max_managed)
2651 		return;
2652 
2653 	spin_lock_irqsave(&zone->lock, flags);
2654 
2655 	/* Recheck the nr_reserved_highatomic limit under the lock */
2656 	if (zone->nr_reserved_highatomic >= max_managed)
2657 		goto out_unlock;
2658 
2659 	/* Yoink! */
2660 	mt = get_pageblock_migratetype(page);
2661 	if (!is_migrate_highatomic(mt) && !is_migrate_isolate(mt)
2662 	    && !is_migrate_cma(mt)) {
2663 		zone->nr_reserved_highatomic += pageblock_nr_pages;
2664 		set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
2665 		move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL);
2666 	}
2667 
2668 out_unlock:
2669 	spin_unlock_irqrestore(&zone->lock, flags);
2670 }
2671 
2672 /*
2673  * Used when an allocation is about to fail under memory pressure. This
2674  * potentially hurts the reliability of high-order allocations when under
2675  * intense memory pressure but failed atomic allocations should be easier
2676  * to recover from than an OOM.
2677  *
2678  * If @force is true, try to unreserve a pageblock even though highatomic
2679  * pageblock is exhausted.
2680  */
unreserve_highatomic_pageblock(const struct alloc_context * ac,bool force)2681 static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
2682 						bool force)
2683 {
2684 	struct zonelist *zonelist = ac->zonelist;
2685 	unsigned long flags;
2686 	struct zoneref *z;
2687 	struct zone *zone;
2688 	struct page *page;
2689 	int order;
2690 	bool ret;
2691 
2692 	for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx,
2693 								ac->nodemask) {
2694 		/*
2695 		 * Preserve at least one pageblock unless memory pressure
2696 		 * is really high.
2697 		 */
2698 		if (!force && zone->nr_reserved_highatomic <=
2699 					pageblock_nr_pages)
2700 			continue;
2701 
2702 		spin_lock_irqsave(&zone->lock, flags);
2703 		for (order = 0; order < MAX_ORDER; order++) {
2704 			struct free_area *area = &(zone->free_area[order]);
2705 
2706 			page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC);
2707 			if (!page)
2708 				continue;
2709 
2710 			/*
2711 			 * In page freeing path, migratetype change is racy so
2712 			 * we can counter several free pages in a pageblock
2713 			 * in this loop althoug we changed the pageblock type
2714 			 * from highatomic to ac->migratetype. So we should
2715 			 * adjust the count once.
2716 			 */
2717 			if (is_migrate_highatomic_page(page)) {
2718 				/*
2719 				 * It should never happen but changes to
2720 				 * locking could inadvertently allow a per-cpu
2721 				 * drain to add pages to MIGRATE_HIGHATOMIC
2722 				 * while unreserving so be safe and watch for
2723 				 * underflows.
2724 				 */
2725 				zone->nr_reserved_highatomic -= min(
2726 						pageblock_nr_pages,
2727 						zone->nr_reserved_highatomic);
2728 			}
2729 
2730 			/*
2731 			 * Convert to ac->migratetype and avoid the normal
2732 			 * pageblock stealing heuristics. Minimally, the caller
2733 			 * is doing the work and needs the pages. More
2734 			 * importantly, if the block was always converted to
2735 			 * MIGRATE_UNMOVABLE or another type then the number
2736 			 * of pageblocks that cannot be completely freed
2737 			 * may increase.
2738 			 */
2739 			set_pageblock_migratetype(page, ac->migratetype);
2740 			ret = move_freepages_block(zone, page, ac->migratetype,
2741 									NULL);
2742 			if (ret) {
2743 				spin_unlock_irqrestore(&zone->lock, flags);
2744 				return ret;
2745 			}
2746 		}
2747 		spin_unlock_irqrestore(&zone->lock, flags);
2748 	}
2749 
2750 	return false;
2751 }
2752 
2753 /*
2754  * Try finding a free buddy page on the fallback list and put it on the free
2755  * list of requested migratetype, possibly along with other pages from the same
2756  * block, depending on fragmentation avoidance heuristics. Returns true if
2757  * fallback was found so that __rmqueue_smallest() can grab it.
2758  *
2759  * The use of signed ints for order and current_order is a deliberate
2760  * deviation from the rest of this file, to make the for loop
2761  * condition simpler.
2762  */
2763 static __always_inline bool
__rmqueue_fallback(struct zone * zone,int order,int start_migratetype,unsigned int alloc_flags)2764 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
2765 						unsigned int alloc_flags)
2766 {
2767 	struct free_area *area;
2768 	int current_order;
2769 	int min_order = order;
2770 	struct page *page;
2771 	int fallback_mt;
2772 	bool can_steal;
2773 
2774 	/*
2775 	 * Do not steal pages from freelists belonging to other pageblocks
2776 	 * i.e. orders < pageblock_order. If there are no local zones free,
2777 	 * the zonelists will be reiterated without ALLOC_NOFRAGMENT.
2778 	 */
2779 	if (alloc_flags & ALLOC_NOFRAGMENT)
2780 		min_order = pageblock_order;
2781 
2782 	/*
2783 	 * Find the largest available free page in the other list. This roughly
2784 	 * approximates finding the pageblock with the most free pages, which
2785 	 * would be too costly to do exactly.
2786 	 */
2787 	for (current_order = MAX_ORDER - 1; current_order >= min_order;
2788 				--current_order) {
2789 		area = &(zone->free_area[current_order]);
2790 		fallback_mt = find_suitable_fallback(area, current_order,
2791 				start_migratetype, false, &can_steal);
2792 		if (fallback_mt == -1)
2793 			continue;
2794 
2795 		/*
2796 		 * We cannot steal all free pages from the pageblock and the
2797 		 * requested migratetype is movable. In that case it's better to
2798 		 * steal and split the smallest available page instead of the
2799 		 * largest available page, because even if the next movable
2800 		 * allocation falls back into a different pageblock than this
2801 		 * one, it won't cause permanent fragmentation.
2802 		 */
2803 		if (!can_steal && start_migratetype == MIGRATE_MOVABLE
2804 					&& current_order > order)
2805 			goto find_smallest;
2806 
2807 		goto do_steal;
2808 	}
2809 
2810 	return false;
2811 
2812 find_smallest:
2813 	for (current_order = order; current_order < MAX_ORDER;
2814 							current_order++) {
2815 		area = &(zone->free_area[current_order]);
2816 		fallback_mt = find_suitable_fallback(area, current_order,
2817 				start_migratetype, false, &can_steal);
2818 		if (fallback_mt != -1)
2819 			break;
2820 	}
2821 
2822 	/*
2823 	 * This should not happen - we already found a suitable fallback
2824 	 * when looking for the largest page.
2825 	 */
2826 	VM_BUG_ON(current_order == MAX_ORDER);
2827 
2828 do_steal:
2829 	page = get_page_from_free_area(area, fallback_mt);
2830 
2831 	steal_suitable_fallback(zone, page, alloc_flags, start_migratetype,
2832 								can_steal);
2833 
2834 	trace_mm_page_alloc_extfrag(page, order, current_order,
2835 		start_migratetype, fallback_mt);
2836 
2837 	return true;
2838 
2839 }
2840 
2841 static __always_inline struct page *
__rmqueue_with_cma_reuse(struct zone * zone,unsigned int order,int migratetype,unsigned int alloc_flags)2842 __rmqueue_with_cma_reuse(struct zone *zone, unsigned int order,
2843 					int migratetype, unsigned int alloc_flags)
2844 {
2845 	struct page *page = NULL;
2846 retry:
2847 	page = __rmqueue_smallest(zone, order, migratetype);
2848 
2849 	if (unlikely(!page) && is_migrate_cma(migratetype)) {
2850 		migratetype = MIGRATE_MOVABLE;
2851 		alloc_flags &= ~ALLOC_CMA;
2852 		page = __rmqueue_smallest(zone, order, migratetype);
2853 	}
2854 
2855 	if (unlikely(!page) &&
2856 		__rmqueue_fallback(zone, order, migratetype, alloc_flags))
2857 		goto retry;
2858 
2859 	return page;
2860 }
2861 
2862 /*
2863  * Do the hard work of removing an element from the buddy allocator.
2864  * Call me with the zone->lock already held.
2865  */
2866 static __always_inline struct page *
__rmqueue(struct zone * zone,unsigned int order,int migratetype,unsigned int alloc_flags)2867 __rmqueue(struct zone *zone, unsigned int order, int migratetype,
2868 						unsigned int alloc_flags)
2869 {
2870 	struct page *page;
2871 
2872 #ifdef CONFIG_CMA_REUSE
2873 	page = __rmqueue_with_cma_reuse(zone, order, migratetype, alloc_flags);
2874 	goto out;
2875 #endif
2876 
2877 	if (IS_ENABLED(CONFIG_CMA)) {
2878 		/*
2879 		 * Balance movable allocations between regular and CMA areas by
2880 		 * allocating from CMA when over half of the zone's free memory
2881 		 * is in the CMA area.
2882 		 */
2883 		if (alloc_flags & ALLOC_CMA &&
2884 		    zone_page_state(zone, NR_FREE_CMA_PAGES) >
2885 		    zone_page_state(zone, NR_FREE_PAGES) / 2) {
2886 			page = __rmqueue_cma_fallback(zone, order);
2887 			if (page)
2888 				goto out;
2889 		}
2890 	}
2891 retry:
2892 	page = __rmqueue_smallest(zone, order, migratetype);
2893 	if (unlikely(!page)) {
2894 		if (alloc_flags & ALLOC_CMA)
2895 			page = __rmqueue_cma_fallback(zone, order);
2896 
2897 		if (!page && __rmqueue_fallback(zone, order, migratetype,
2898 								alloc_flags))
2899 			goto retry;
2900 	}
2901 out:
2902 	if (page)
2903 		trace_mm_page_alloc_zone_locked(page, order, migratetype);
2904 	return page;
2905 }
2906 
2907 /*
2908  * Obtain a specified number of elements from the buddy allocator, all under
2909  * a single hold of the lock, for efficiency.  Add them to the supplied list.
2910  * Returns the number of new pages which were placed at *list.
2911  */
rmqueue_bulk(struct zone * zone,unsigned int order,unsigned long count,struct list_head * list,int migratetype,unsigned int alloc_flags)2912 static int rmqueue_bulk(struct zone *zone, unsigned int order,
2913 			unsigned long count, struct list_head *list,
2914 			int migratetype, unsigned int alloc_flags)
2915 {
2916 	int i, alloced = 0;
2917 
2918 	spin_lock(&zone->lock);
2919 	for (i = 0; i < count; ++i) {
2920 		struct page *page = __rmqueue(zone, order, migratetype,
2921 								alloc_flags);
2922 		if (unlikely(page == NULL))
2923 			break;
2924 
2925 		if (unlikely(check_pcp_refill(page)))
2926 			continue;
2927 
2928 		/*
2929 		 * Split buddy pages returned by expand() are received here in
2930 		 * physical page order. The page is added to the tail of
2931 		 * caller's list. From the callers perspective, the linked list
2932 		 * is ordered by page number under some conditions. This is
2933 		 * useful for IO devices that can forward direction from the
2934 		 * head, thus also in the physical page order. This is useful
2935 		 * for IO devices that can merge IO requests if the physical
2936 		 * pages are ordered properly.
2937 		 */
2938 		list_add_tail(&page->lru, list);
2939 		alloced++;
2940 		if (is_migrate_cma(get_pcppage_migratetype(page)))
2941 			__mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
2942 					      -(1 << order));
2943 	}
2944 
2945 	/*
2946 	 * i pages were removed from the buddy list even if some leak due
2947 	 * to check_pcp_refill failing so adjust NR_FREE_PAGES based
2948 	 * on i. Do not confuse with 'alloced' which is the number of
2949 	 * pages added to the pcp list.
2950 	 */
2951 	__mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
2952 	spin_unlock(&zone->lock);
2953 	return alloced;
2954 }
2955 
2956 #ifdef CONFIG_NUMA
2957 /*
2958  * Called from the vmstat counter updater to drain pagesets of this
2959  * currently executing processor on remote nodes after they have
2960  * expired.
2961  *
2962  * Note that this function must be called with the thread pinned to
2963  * a single processor.
2964  */
drain_zone_pages(struct zone * zone,struct per_cpu_pages * pcp)2965 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
2966 {
2967 	unsigned long flags;
2968 	int to_drain, batch;
2969 
2970 	local_irq_save(flags);
2971 	batch = READ_ONCE(pcp->batch);
2972 	to_drain = min(pcp->count, batch);
2973 	if (to_drain > 0)
2974 		free_pcppages_bulk(zone, to_drain, pcp);
2975 	local_irq_restore(flags);
2976 }
2977 #endif
2978 
2979 /*
2980  * Drain pcplists of the indicated processor and zone.
2981  *
2982  * The processor must either be the current processor and the
2983  * thread pinned to the current processor or a processor that
2984  * is not online.
2985  */
drain_pages_zone(unsigned int cpu,struct zone * zone)2986 static void drain_pages_zone(unsigned int cpu, struct zone *zone)
2987 {
2988 	unsigned long flags;
2989 	struct per_cpu_pageset *pset;
2990 	struct per_cpu_pages *pcp;
2991 
2992 	local_irq_save(flags);
2993 	pset = per_cpu_ptr(zone->pageset, cpu);
2994 
2995 	pcp = &pset->pcp;
2996 	if (pcp->count)
2997 		free_pcppages_bulk(zone, pcp->count, pcp);
2998 	local_irq_restore(flags);
2999 }
3000 
3001 /*
3002  * Drain pcplists of all zones on the indicated processor.
3003  *
3004  * The processor must either be the current processor and the
3005  * thread pinned to the current processor or a processor that
3006  * is not online.
3007  */
drain_pages(unsigned int cpu)3008 static void drain_pages(unsigned int cpu)
3009 {
3010 	struct zone *zone;
3011 
3012 	for_each_populated_zone(zone) {
3013 		drain_pages_zone(cpu, zone);
3014 	}
3015 }
3016 
3017 /*
3018  * Spill all of this CPU's per-cpu pages back into the buddy allocator.
3019  *
3020  * The CPU has to be pinned. When zone parameter is non-NULL, spill just
3021  * the single zone's pages.
3022  */
drain_local_pages(struct zone * zone)3023 void drain_local_pages(struct zone *zone)
3024 {
3025 	int cpu = smp_processor_id();
3026 
3027 	if (zone)
3028 		drain_pages_zone(cpu, zone);
3029 	else
3030 		drain_pages(cpu);
3031 }
3032 
drain_local_pages_wq(struct work_struct * work)3033 static void drain_local_pages_wq(struct work_struct *work)
3034 {
3035 	struct pcpu_drain *drain;
3036 
3037 	drain = container_of(work, struct pcpu_drain, work);
3038 
3039 	/*
3040 	 * drain_all_pages doesn't use proper cpu hotplug protection so
3041 	 * we can race with cpu offline when the WQ can move this from
3042 	 * a cpu pinned worker to an unbound one. We can operate on a different
3043 	 * cpu which is allright but we also have to make sure to not move to
3044 	 * a different one.
3045 	 */
3046 	preempt_disable();
3047 	drain_local_pages(drain->zone);
3048 	preempt_enable();
3049 }
3050 
3051 /*
3052  * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
3053  *
3054  * When zone parameter is non-NULL, spill just the single zone's pages.
3055  *
3056  * Note that this can be extremely slow as the draining happens in a workqueue.
3057  */
drain_all_pages(struct zone * zone)3058 void drain_all_pages(struct zone *zone)
3059 {
3060 	int cpu;
3061 
3062 	/*
3063 	 * Allocate in the BSS so we wont require allocation in
3064 	 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
3065 	 */
3066 	static cpumask_t cpus_with_pcps;
3067 
3068 	/*
3069 	 * Make sure nobody triggers this path before mm_percpu_wq is fully
3070 	 * initialized.
3071 	 */
3072 	if (WARN_ON_ONCE(!mm_percpu_wq))
3073 		return;
3074 
3075 	/*
3076 	 * Do not drain if one is already in progress unless it's specific to
3077 	 * a zone. Such callers are primarily CMA and memory hotplug and need
3078 	 * the drain to be complete when the call returns.
3079 	 */
3080 	if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) {
3081 		if (!zone)
3082 			return;
3083 		mutex_lock(&pcpu_drain_mutex);
3084 	}
3085 
3086 	/*
3087 	 * We don't care about racing with CPU hotplug event
3088 	 * as offline notification will cause the notified
3089 	 * cpu to drain that CPU pcps and on_each_cpu_mask
3090 	 * disables preemption as part of its processing
3091 	 */
3092 	for_each_online_cpu(cpu) {
3093 		struct per_cpu_pageset *pcp;
3094 		struct zone *z;
3095 		bool has_pcps = false;
3096 
3097 		if (zone) {
3098 			pcp = per_cpu_ptr(zone->pageset, cpu);
3099 			if (pcp->pcp.count)
3100 				has_pcps = true;
3101 		} else {
3102 			for_each_populated_zone(z) {
3103 				pcp = per_cpu_ptr(z->pageset, cpu);
3104 				if (pcp->pcp.count) {
3105 					has_pcps = true;
3106 					break;
3107 				}
3108 			}
3109 		}
3110 
3111 		if (has_pcps)
3112 			cpumask_set_cpu(cpu, &cpus_with_pcps);
3113 		else
3114 			cpumask_clear_cpu(cpu, &cpus_with_pcps);
3115 	}
3116 
3117 	for_each_cpu(cpu, &cpus_with_pcps) {
3118 		struct pcpu_drain *drain = per_cpu_ptr(&pcpu_drain, cpu);
3119 
3120 		drain->zone = zone;
3121 		INIT_WORK(&drain->work, drain_local_pages_wq);
3122 		queue_work_on(cpu, mm_percpu_wq, &drain->work);
3123 	}
3124 	for_each_cpu(cpu, &cpus_with_pcps)
3125 		flush_work(&per_cpu_ptr(&pcpu_drain, cpu)->work);
3126 
3127 	mutex_unlock(&pcpu_drain_mutex);
3128 }
3129 
3130 #ifdef CONFIG_HIBERNATION
3131 
3132 /*
3133  * Touch the watchdog for every WD_PAGE_COUNT pages.
3134  */
3135 #define WD_PAGE_COUNT	(128*1024)
3136 
mark_free_pages(struct zone * zone)3137 void mark_free_pages(struct zone *zone)
3138 {
3139 	unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT;
3140 	unsigned long flags;
3141 	unsigned int order, t;
3142 	struct page *page;
3143 
3144 	if (zone_is_empty(zone))
3145 		return;
3146 
3147 	spin_lock_irqsave(&zone->lock, flags);
3148 
3149 	max_zone_pfn = zone_end_pfn(zone);
3150 	for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
3151 		if (pfn_valid(pfn)) {
3152 			page = pfn_to_page(pfn);
3153 
3154 			if (!--page_count) {
3155 				touch_nmi_watchdog();
3156 				page_count = WD_PAGE_COUNT;
3157 			}
3158 
3159 			if (page_zone(page) != zone)
3160 				continue;
3161 
3162 			if (!swsusp_page_is_forbidden(page))
3163 				swsusp_unset_page_free(page);
3164 		}
3165 
3166 	for_each_migratetype_order(order, t) {
3167 		list_for_each_entry(page,
3168 				&zone->free_area[order].free_list[t], lru) {
3169 			unsigned long i;
3170 
3171 			pfn = page_to_pfn(page);
3172 			for (i = 0; i < (1UL << order); i++) {
3173 				if (!--page_count) {
3174 					touch_nmi_watchdog();
3175 					page_count = WD_PAGE_COUNT;
3176 				}
3177 				swsusp_set_page_free(pfn_to_page(pfn + i));
3178 			}
3179 		}
3180 	}
3181 	spin_unlock_irqrestore(&zone->lock, flags);
3182 }
3183 #endif /* CONFIG_PM */
3184 
free_unref_page_prepare(struct page * page,unsigned long pfn)3185 static bool free_unref_page_prepare(struct page *page, unsigned long pfn)
3186 {
3187 	int migratetype;
3188 
3189 	if (!free_pcp_prepare(page))
3190 		return false;
3191 
3192 	migratetype = get_pfnblock_migratetype(page, pfn);
3193 	set_pcppage_migratetype(page, migratetype);
3194 	return true;
3195 }
3196 
free_unref_page_commit(struct page * page,unsigned long pfn)3197 static void free_unref_page_commit(struct page *page, unsigned long pfn)
3198 {
3199 	struct zone *zone = page_zone(page);
3200 	struct per_cpu_pages *pcp;
3201 	int migratetype;
3202 
3203 	migratetype = get_pcppage_migratetype(page);
3204 	__count_vm_event(PGFREE);
3205 
3206 	/*
3207 	 * We only track unmovable, reclaimable and movable on pcp lists.
3208 	 * Free ISOLATE pages back to the allocator because they are being
3209 	 * offlined but treat HIGHATOMIC as movable pages so we can get those
3210 	 * areas back if necessary. Otherwise, we may have to free
3211 	 * excessively into the page allocator
3212 	 */
3213 	if (migratetype >= MIGRATE_PCPTYPES) {
3214 		if (unlikely(is_migrate_isolate(migratetype))) {
3215 			free_one_page(zone, page, pfn, 0, migratetype,
3216 				      FPI_NONE);
3217 			return;
3218 		}
3219 		migratetype = MIGRATE_MOVABLE;
3220 	}
3221 
3222 	pcp = &this_cpu_ptr(zone->pageset)->pcp;
3223 	list_add(&page->lru, &pcp->lists[migratetype]);
3224 	pcp->count++;
3225 	if (pcp->count >= pcp->high) {
3226 		unsigned long batch = READ_ONCE(pcp->batch);
3227 		free_pcppages_bulk(zone, batch, pcp);
3228 	}
3229 }
3230 
3231 /*
3232  * Free a 0-order page
3233  */
free_unref_page(struct page * page)3234 void free_unref_page(struct page *page)
3235 {
3236 	unsigned long flags;
3237 	unsigned long pfn = page_to_pfn(page);
3238 
3239 	if (!free_unref_page_prepare(page, pfn))
3240 		return;
3241 
3242 	local_irq_save(flags);
3243 	free_unref_page_commit(page, pfn);
3244 	local_irq_restore(flags);
3245 }
3246 
3247 /*
3248  * Free a list of 0-order pages
3249  */
free_unref_page_list(struct list_head * list)3250 void free_unref_page_list(struct list_head *list)
3251 {
3252 	struct page *page, *next;
3253 	unsigned long flags, pfn;
3254 	int batch_count = 0;
3255 
3256 	/* Prepare pages for freeing */
3257 	list_for_each_entry_safe(page, next, list, lru) {
3258 		pfn = page_to_pfn(page);
3259 		if (!free_unref_page_prepare(page, pfn))
3260 			list_del(&page->lru);
3261 		set_page_private(page, pfn);
3262 	}
3263 
3264 	local_irq_save(flags);
3265 	list_for_each_entry_safe(page, next, list, lru) {
3266 		unsigned long pfn = page_private(page);
3267 
3268 		set_page_private(page, 0);
3269 		trace_mm_page_free_batched(page);
3270 		free_unref_page_commit(page, pfn);
3271 
3272 		/*
3273 		 * Guard against excessive IRQ disabled times when we get
3274 		 * a large list of pages to free.
3275 		 */
3276 		if (++batch_count == SWAP_CLUSTER_MAX) {
3277 			local_irq_restore(flags);
3278 			batch_count = 0;
3279 			local_irq_save(flags);
3280 		}
3281 	}
3282 	local_irq_restore(flags);
3283 }
3284 
3285 /*
3286  * split_page takes a non-compound higher-order page, and splits it into
3287  * n (1<<order) sub-pages: page[0..n]
3288  * Each sub-page must be freed individually.
3289  *
3290  * Note: this is probably too low level an operation for use in drivers.
3291  * Please consult with lkml before using this in your driver.
3292  */
split_page(struct page * page,unsigned int order)3293 void split_page(struct page *page, unsigned int order)
3294 {
3295 	int i;
3296 
3297 	VM_BUG_ON_PAGE(PageCompound(page), page);
3298 	VM_BUG_ON_PAGE(!page_count(page), page);
3299 
3300 	for (i = 1; i < (1 << order); i++)
3301 		set_page_refcounted(page + i);
3302 	split_page_owner(page, 1 << order);
3303 	split_page_memcg(page, 1 << order);
3304 }
3305 EXPORT_SYMBOL_GPL(split_page);
3306 
__isolate_free_page(struct page * page,unsigned int order)3307 int __isolate_free_page(struct page *page, unsigned int order)
3308 {
3309 	unsigned long watermark;
3310 	struct zone *zone;
3311 	int mt;
3312 
3313 	BUG_ON(!PageBuddy(page));
3314 
3315 	zone = page_zone(page);
3316 	mt = get_pageblock_migratetype(page);
3317 
3318 	if (!is_migrate_isolate(mt)) {
3319 		/*
3320 		 * Obey watermarks as if the page was being allocated. We can
3321 		 * emulate a high-order watermark check with a raised order-0
3322 		 * watermark, because we already know our high-order page
3323 		 * exists.
3324 		 */
3325 		watermark = zone->_watermark[WMARK_MIN] + (1UL << order);
3326 		if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
3327 			return 0;
3328 
3329 		__mod_zone_freepage_state(zone, -(1UL << order), mt);
3330 	}
3331 
3332 	/* Remove page from free list */
3333 
3334 	del_page_from_free_list(page, zone, order);
3335 
3336 	/*
3337 	 * Set the pageblock if the isolated page is at least half of a
3338 	 * pageblock
3339 	 */
3340 	if (order >= pageblock_order - 1) {
3341 		struct page *endpage = page + (1 << order) - 1;
3342 		for (; page < endpage; page += pageblock_nr_pages) {
3343 			int mt = get_pageblock_migratetype(page);
3344 			if (!is_migrate_isolate(mt) && !is_migrate_cma(mt)
3345 			    && !is_migrate_highatomic(mt))
3346 				set_pageblock_migratetype(page,
3347 							  MIGRATE_MOVABLE);
3348 		}
3349 	}
3350 
3351 
3352 	return 1UL << order;
3353 }
3354 
3355 /**
3356  * __putback_isolated_page - Return a now-isolated page back where we got it
3357  * @page: Page that was isolated
3358  * @order: Order of the isolated page
3359  * @mt: The page's pageblock's migratetype
3360  *
3361  * This function is meant to return a page pulled from the free lists via
3362  * __isolate_free_page back to the free lists they were pulled from.
3363  */
__putback_isolated_page(struct page * page,unsigned int order,int mt)3364 void __putback_isolated_page(struct page *page, unsigned int order, int mt)
3365 {
3366 	struct zone *zone = page_zone(page);
3367 
3368 	/* zone lock should be held when this function is called */
3369 	lockdep_assert_held(&zone->lock);
3370 
3371 	/* Return isolated page to tail of freelist. */
3372 	__free_one_page(page, page_to_pfn(page), zone, order, mt,
3373 			FPI_SKIP_REPORT_NOTIFY | FPI_TO_TAIL);
3374 }
3375 
3376 /*
3377  * Update NUMA hit/miss statistics
3378  *
3379  * Must be called with interrupts disabled.
3380  */
zone_statistics(struct zone * preferred_zone,struct zone * z)3381 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z)
3382 {
3383 #ifdef CONFIG_NUMA
3384 	enum numa_stat_item local_stat = NUMA_LOCAL;
3385 
3386 	/* skip numa counters update if numa stats is disabled */
3387 	if (!static_branch_likely(&vm_numa_stat_key))
3388 		return;
3389 
3390 	if (zone_to_nid(z) != numa_node_id())
3391 		local_stat = NUMA_OTHER;
3392 
3393 	if (zone_to_nid(z) == zone_to_nid(preferred_zone))
3394 		__inc_numa_state(z, NUMA_HIT);
3395 	else {
3396 		__inc_numa_state(z, NUMA_MISS);
3397 		__inc_numa_state(preferred_zone, NUMA_FOREIGN);
3398 	}
3399 	__inc_numa_state(z, local_stat);
3400 #endif
3401 }
3402 
3403 /* Remove page from the per-cpu list, caller must protect the list */
__rmqueue_pcplist(struct zone * zone,int migratetype,unsigned int alloc_flags,struct per_cpu_pages * pcp,struct list_head * list)3404 static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
3405 			unsigned int alloc_flags,
3406 			struct per_cpu_pages *pcp,
3407 			struct list_head *list)
3408 {
3409 	struct page *page;
3410 
3411 	do {
3412 		if (list_empty(list)) {
3413 			pcp->count += rmqueue_bulk(zone, 0,
3414 					pcp->batch, list,
3415 					migratetype, alloc_flags);
3416 			if (unlikely(list_empty(list)))
3417 				return NULL;
3418 		}
3419 
3420 		page = list_first_entry(list, struct page, lru);
3421 		list_del(&page->lru);
3422 		pcp->count--;
3423 	} while (check_new_pcp(page));
3424 
3425 	return page;
3426 }
3427 
3428 /* Lock and remove page from the per-cpu list */
rmqueue_pcplist(struct zone * preferred_zone,struct zone * zone,gfp_t gfp_flags,int migratetype,unsigned int alloc_flags)3429 static struct page *rmqueue_pcplist(struct zone *preferred_zone,
3430 			struct zone *zone, gfp_t gfp_flags,
3431 			int migratetype, unsigned int alloc_flags)
3432 {
3433 	struct per_cpu_pages *pcp;
3434 	struct list_head *list;
3435 	struct page *page;
3436 	unsigned long flags;
3437 
3438 	local_irq_save(flags);
3439 	pcp = &this_cpu_ptr(zone->pageset)->pcp;
3440 	list = &pcp->lists[migratetype];
3441 	page = __rmqueue_pcplist(zone,  migratetype, alloc_flags, pcp, list);
3442 	if (page) {
3443 		__count_zid_vm_events(PGALLOC, page_zonenum(page), 1);
3444 		zone_statistics(preferred_zone, zone);
3445 	}
3446 	local_irq_restore(flags);
3447 	return page;
3448 }
3449 
3450 /*
3451  * Allocate a page from the given zone. Use pcplists for order-0 allocations.
3452  */
3453 static inline
rmqueue(struct zone * preferred_zone,struct zone * zone,unsigned int order,gfp_t gfp_flags,unsigned int alloc_flags,int migratetype)3454 struct page *rmqueue(struct zone *preferred_zone,
3455 			struct zone *zone, unsigned int order,
3456 			gfp_t gfp_flags, unsigned int alloc_flags,
3457 			int migratetype)
3458 {
3459 	unsigned long flags;
3460 	struct page *page;
3461 
3462 	if (likely(order == 0)) {
3463 		/*
3464 		 * MIGRATE_MOVABLE pcplist could have the pages on CMA area and
3465 		 * we need to skip it when CMA area isn't allowed.
3466 		 */
3467 		if (!IS_ENABLED(CONFIG_CMA) || alloc_flags & ALLOC_CMA ||
3468 				migratetype != MIGRATE_MOVABLE ||
3469 				IS_ENABLED(CONFIG_CMA_REUSE)) {
3470 			page = rmqueue_pcplist(preferred_zone, zone, gfp_flags,
3471 					migratetype, alloc_flags);
3472 			goto out;
3473 		}
3474 	}
3475 
3476 	/*
3477 	 * We most definitely don't want callers attempting to
3478 	 * allocate greater than order-1 page units with __GFP_NOFAIL.
3479 	 */
3480 	WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
3481 	spin_lock_irqsave(&zone->lock, flags);
3482 
3483 	do {
3484 		page = NULL;
3485 		/*
3486 		 * order-0 request can reach here when the pcplist is skipped
3487 		 * due to non-CMA allocation context. HIGHATOMIC area is
3488 		 * reserved for high-order atomic allocation, so order-0
3489 		 * request should skip it.
3490 		 */
3491 		if (order > 0 && alloc_flags & ALLOC_HARDER) {
3492 			page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
3493 			if (page)
3494 				trace_mm_page_alloc_zone_locked(page, order, migratetype);
3495 		}
3496 		if (!page)
3497 			page = __rmqueue(zone, order, migratetype, alloc_flags);
3498 	} while (page && check_new_pages(page, order));
3499 	spin_unlock(&zone->lock);
3500 	if (!page)
3501 		goto failed;
3502 	__mod_zone_freepage_state(zone, -(1 << order),
3503 				  get_pcppage_migratetype(page));
3504 
3505 	__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
3506 	zone_statistics(preferred_zone, zone);
3507 	local_irq_restore(flags);
3508 
3509 out:
3510 	/* Separate test+clear to avoid unnecessary atomics */
3511 	if (test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags)) {
3512 		clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
3513 		wakeup_kswapd(zone, 0, 0, zone_idx(zone));
3514 	}
3515 
3516 	VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
3517 	return page;
3518 
3519 failed:
3520 	local_irq_restore(flags);
3521 	return NULL;
3522 }
3523 
3524 #ifdef CONFIG_FAIL_PAGE_ALLOC
3525 
3526 static struct {
3527 	struct fault_attr attr;
3528 
3529 	bool ignore_gfp_highmem;
3530 	bool ignore_gfp_reclaim;
3531 	u32 min_order;
3532 } fail_page_alloc = {
3533 	.attr = FAULT_ATTR_INITIALIZER,
3534 	.ignore_gfp_reclaim = true,
3535 	.ignore_gfp_highmem = true,
3536 	.min_order = 1,
3537 };
3538 
setup_fail_page_alloc(char * str)3539 static int __init setup_fail_page_alloc(char *str)
3540 {
3541 	return setup_fault_attr(&fail_page_alloc.attr, str);
3542 }
3543 __setup("fail_page_alloc=", setup_fail_page_alloc);
3544 
__should_fail_alloc_page(gfp_t gfp_mask,unsigned int order)3545 static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3546 {
3547 	if (order < fail_page_alloc.min_order)
3548 		return false;
3549 	if (gfp_mask & __GFP_NOFAIL)
3550 		return false;
3551 	if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
3552 		return false;
3553 	if (fail_page_alloc.ignore_gfp_reclaim &&
3554 			(gfp_mask & __GFP_DIRECT_RECLAIM))
3555 		return false;
3556 
3557 	return should_fail(&fail_page_alloc.attr, 1 << order);
3558 }
3559 
3560 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
3561 
fail_page_alloc_debugfs(void)3562 static int __init fail_page_alloc_debugfs(void)
3563 {
3564 	umode_t mode = S_IFREG | 0600;
3565 	struct dentry *dir;
3566 
3567 	dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
3568 					&fail_page_alloc.attr);
3569 
3570 	debugfs_create_bool("ignore-gfp-wait", mode, dir,
3571 			    &fail_page_alloc.ignore_gfp_reclaim);
3572 	debugfs_create_bool("ignore-gfp-highmem", mode, dir,
3573 			    &fail_page_alloc.ignore_gfp_highmem);
3574 	debugfs_create_u32("min-order", mode, dir, &fail_page_alloc.min_order);
3575 
3576 	return 0;
3577 }
3578 
3579 late_initcall(fail_page_alloc_debugfs);
3580 
3581 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
3582 
3583 #else /* CONFIG_FAIL_PAGE_ALLOC */
3584 
__should_fail_alloc_page(gfp_t gfp_mask,unsigned int order)3585 static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3586 {
3587 	return false;
3588 }
3589 
3590 #endif /* CONFIG_FAIL_PAGE_ALLOC */
3591 
should_fail_alloc_page(gfp_t gfp_mask,unsigned int order)3592 noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3593 {
3594 	return __should_fail_alloc_page(gfp_mask, order);
3595 }
3596 ALLOW_ERROR_INJECTION(should_fail_alloc_page, TRUE);
3597 
__zone_watermark_unusable_free(struct zone * z,unsigned int order,unsigned int alloc_flags)3598 static inline long __zone_watermark_unusable_free(struct zone *z,
3599 				unsigned int order, unsigned int alloc_flags)
3600 {
3601 	const bool alloc_harder = (alloc_flags & (ALLOC_HARDER|ALLOC_OOM));
3602 	long unusable_free = (1 << order) - 1;
3603 
3604 	/*
3605 	 * If the caller does not have rights to ALLOC_HARDER then subtract
3606 	 * the high-atomic reserves. This will over-estimate the size of the
3607 	 * atomic reserve but it avoids a search.
3608 	 */
3609 	if (likely(!alloc_harder))
3610 		unusable_free += z->nr_reserved_highatomic;
3611 
3612 #ifdef CONFIG_CMA
3613 	/* If allocation can't use CMA areas don't use free CMA pages */
3614 	if (!(alloc_flags & ALLOC_CMA))
3615 		unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES);
3616 #endif
3617 
3618 	return unusable_free;
3619 }
3620 
3621 /*
3622  * Return true if free base pages are above 'mark'. For high-order checks it
3623  * will return true of the order-0 watermark is reached and there is at least
3624  * one free page of a suitable size. Checking now avoids taking the zone lock
3625  * to check in the allocation paths if no pages are free.
3626  */
__zone_watermark_ok(struct zone * z,unsigned int order,unsigned long mark,int highest_zoneidx,unsigned int alloc_flags,long free_pages)3627 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
3628 			 int highest_zoneidx, unsigned int alloc_flags,
3629 			 long free_pages)
3630 {
3631 	long min = mark;
3632 	int o;
3633 	const bool alloc_harder = (alloc_flags & (ALLOC_HARDER|ALLOC_OOM));
3634 
3635 	/* free_pages may go negative - that's OK */
3636 	free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags);
3637 
3638 	if (alloc_flags & ALLOC_HIGH)
3639 		min -= min / 2;
3640 
3641 	if (unlikely(alloc_harder)) {
3642 		/*
3643 		 * OOM victims can try even harder than normal ALLOC_HARDER
3644 		 * users on the grounds that it's definitely going to be in
3645 		 * the exit path shortly and free memory. Any allocation it
3646 		 * makes during the free path will be small and short-lived.
3647 		 */
3648 		if (alloc_flags & ALLOC_OOM)
3649 			min -= min / 2;
3650 		else
3651 			min -= min / 4;
3652 	}
3653 
3654 	/*
3655 	 * Check watermarks for an order-0 allocation request. If these
3656 	 * are not met, then a high-order request also cannot go ahead
3657 	 * even if a suitable page happened to be free.
3658 	 */
3659 	if (free_pages <= min + z->lowmem_reserve[highest_zoneidx])
3660 		return false;
3661 
3662 	/* If this is an order-0 request then the watermark is fine */
3663 	if (!order)
3664 		return true;
3665 
3666 	/* For a high-order request, check at least one suitable page is free */
3667 	for (o = order; o < MAX_ORDER; o++) {
3668 		struct free_area *area = &z->free_area[o];
3669 		int mt;
3670 
3671 		if (!area->nr_free)
3672 			continue;
3673 
3674 		for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
3675 			if (!free_area_empty(area, mt))
3676 				return true;
3677 		}
3678 
3679 #ifdef CONFIG_CMA
3680 		if ((alloc_flags & ALLOC_CMA) &&
3681 		    !free_area_empty(area, MIGRATE_CMA)) {
3682 			return true;
3683 		}
3684 #endif
3685 		if (alloc_harder && !free_area_empty(area, MIGRATE_HIGHATOMIC))
3686 			return true;
3687 	}
3688 	return false;
3689 }
3690 
zone_watermark_ok(struct zone * z,unsigned int order,unsigned long mark,int highest_zoneidx,unsigned int alloc_flags)3691 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
3692 		      int highest_zoneidx, unsigned int alloc_flags)
3693 {
3694 	return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
3695 					zone_page_state(z, NR_FREE_PAGES));
3696 }
3697 
zone_watermark_fast(struct zone * z,unsigned int order,unsigned long mark,int highest_zoneidx,unsigned int alloc_flags,gfp_t gfp_mask)3698 static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
3699 				unsigned long mark, int highest_zoneidx,
3700 				unsigned int alloc_flags, gfp_t gfp_mask)
3701 {
3702 	long free_pages;
3703 
3704 	free_pages = zone_page_state(z, NR_FREE_PAGES);
3705 
3706 	/*
3707 	 * Fast check for order-0 only. If this fails then the reserves
3708 	 * need to be calculated.
3709 	 */
3710 	if (!order) {
3711 		long fast_free;
3712 
3713 		fast_free = free_pages;
3714 		fast_free -= __zone_watermark_unusable_free(z, 0, alloc_flags);
3715 		if (fast_free > mark + z->lowmem_reserve[highest_zoneidx])
3716 			return true;
3717 	}
3718 
3719 	if (__zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
3720 					free_pages))
3721 		return true;
3722 	/*
3723 	 * Ignore watermark boosting for GFP_ATOMIC order-0 allocations
3724 	 * when checking the min watermark. The min watermark is the
3725 	 * point where boosting is ignored so that kswapd is woken up
3726 	 * when below the low watermark.
3727 	 */
3728 	if (unlikely(!order && (gfp_mask & __GFP_ATOMIC) && z->watermark_boost
3729 		&& ((alloc_flags & ALLOC_WMARK_MASK) == WMARK_MIN))) {
3730 		mark = z->_watermark[WMARK_MIN];
3731 		return __zone_watermark_ok(z, order, mark, highest_zoneidx,
3732 					alloc_flags, free_pages);
3733 	}
3734 
3735 	return false;
3736 }
3737 
zone_watermark_ok_safe(struct zone * z,unsigned int order,unsigned long mark,int highest_zoneidx)3738 bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
3739 			unsigned long mark, int highest_zoneidx)
3740 {
3741 	long free_pages = zone_page_state(z, NR_FREE_PAGES);
3742 
3743 	if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
3744 		free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
3745 
3746 	return __zone_watermark_ok(z, order, mark, highest_zoneidx, 0,
3747 								free_pages);
3748 }
3749 
3750 #ifdef CONFIG_NUMA
zone_allows_reclaim(struct zone * local_zone,struct zone * zone)3751 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3752 {
3753 	return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <=
3754 				node_reclaim_distance;
3755 }
3756 #else	/* CONFIG_NUMA */
zone_allows_reclaim(struct zone * local_zone,struct zone * zone)3757 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3758 {
3759 	return true;
3760 }
3761 #endif	/* CONFIG_NUMA */
3762 
3763 /*
3764  * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid
3765  * fragmentation is subtle. If the preferred zone was HIGHMEM then
3766  * premature use of a lower zone may cause lowmem pressure problems that
3767  * are worse than fragmentation. If the next zone is ZONE_DMA then it is
3768  * probably too small. It only makes sense to spread allocations to avoid
3769  * fragmentation between the Normal and DMA32 zones.
3770  */
3771 static inline unsigned int
alloc_flags_nofragment(struct zone * zone,gfp_t gfp_mask)3772 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
3773 {
3774 	unsigned int alloc_flags;
3775 
3776 	/*
3777 	 * __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD
3778 	 * to save a branch.
3779 	 */
3780 	alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM);
3781 
3782 #ifdef CONFIG_ZONE_DMA32
3783 	if (!zone)
3784 		return alloc_flags;
3785 
3786 	if (zone_idx(zone) != ZONE_NORMAL)
3787 		return alloc_flags;
3788 
3789 	/*
3790 	 * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and
3791 	 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume
3792 	 * on UMA that if Normal is populated then so is DMA32.
3793 	 */
3794 	BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1);
3795 	if (nr_online_nodes > 1 && !populated_zone(--zone))
3796 		return alloc_flags;
3797 
3798 	alloc_flags |= ALLOC_NOFRAGMENT;
3799 #endif /* CONFIG_ZONE_DMA32 */
3800 	return alloc_flags;
3801 }
3802 
current_alloc_flags(gfp_t gfp_mask,unsigned int alloc_flags)3803 static inline unsigned int current_alloc_flags(gfp_t gfp_mask,
3804 					unsigned int alloc_flags)
3805 {
3806 #ifdef CONFIG_CMA
3807 	unsigned int pflags = current->flags;
3808 
3809 	if (!(pflags & PF_MEMALLOC_NOCMA) &&
3810 			gfp_migratetype(gfp_mask) == get_cma_migratetype())
3811 		alloc_flags |= ALLOC_CMA;
3812 
3813 #endif
3814 	return alloc_flags;
3815 }
3816 
3817 /*
3818  * get_page_from_freelist goes through the zonelist trying to allocate
3819  * a page.
3820  */
3821 static struct page *
get_page_from_freelist(gfp_t gfp_mask,unsigned int order,int alloc_flags,const struct alloc_context * ac)3822 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
3823 						const struct alloc_context *ac)
3824 {
3825 	struct zoneref *z;
3826 	struct zone *zone;
3827 	struct pglist_data *last_pgdat_dirty_limit = NULL;
3828 	bool no_fallback;
3829 
3830 retry:
3831 	/*
3832 	 * Scan zonelist, looking for a zone with enough free.
3833 	 * See also __cpuset_node_allowed() comment in kernel/cpuset.c.
3834 	 */
3835 	no_fallback = alloc_flags & ALLOC_NOFRAGMENT;
3836 	z = ac->preferred_zoneref;
3837 	for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx,
3838 					ac->nodemask) {
3839 		struct page *page;
3840 		unsigned long mark;
3841 
3842 		if (cpusets_enabled() &&
3843 			(alloc_flags & ALLOC_CPUSET) &&
3844 			!__cpuset_zone_allowed(zone, gfp_mask))
3845 				continue;
3846 		/*
3847 		 * When allocating a page cache page for writing, we
3848 		 * want to get it from a node that is within its dirty
3849 		 * limit, such that no single node holds more than its
3850 		 * proportional share of globally allowed dirty pages.
3851 		 * The dirty limits take into account the node's
3852 		 * lowmem reserves and high watermark so that kswapd
3853 		 * should be able to balance it without having to
3854 		 * write pages from its LRU list.
3855 		 *
3856 		 * XXX: For now, allow allocations to potentially
3857 		 * exceed the per-node dirty limit in the slowpath
3858 		 * (spread_dirty_pages unset) before going into reclaim,
3859 		 * which is important when on a NUMA setup the allowed
3860 		 * nodes are together not big enough to reach the
3861 		 * global limit.  The proper fix for these situations
3862 		 * will require awareness of nodes in the
3863 		 * dirty-throttling and the flusher threads.
3864 		 */
3865 		if (ac->spread_dirty_pages) {
3866 			if (last_pgdat_dirty_limit == zone->zone_pgdat)
3867 				continue;
3868 
3869 			if (!node_dirty_ok(zone->zone_pgdat)) {
3870 				last_pgdat_dirty_limit = zone->zone_pgdat;
3871 				continue;
3872 			}
3873 		}
3874 
3875 		if (no_fallback && nr_online_nodes > 1 &&
3876 		    zone != ac->preferred_zoneref->zone) {
3877 			int local_nid;
3878 
3879 			/*
3880 			 * If moving to a remote node, retry but allow
3881 			 * fragmenting fallbacks. Locality is more important
3882 			 * than fragmentation avoidance.
3883 			 */
3884 			local_nid = zone_to_nid(ac->preferred_zoneref->zone);
3885 			if (zone_to_nid(zone) != local_nid) {
3886 				alloc_flags &= ~ALLOC_NOFRAGMENT;
3887 				goto retry;
3888 			}
3889 		}
3890 
3891 		mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
3892 		if (!zone_watermark_fast(zone, order, mark,
3893 				       ac->highest_zoneidx, alloc_flags,
3894 				       gfp_mask)) {
3895 			int ret;
3896 
3897 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
3898 			/*
3899 			 * Watermark failed for this zone, but see if we can
3900 			 * grow this zone if it contains deferred pages.
3901 			 */
3902 			if (static_branch_unlikely(&deferred_pages)) {
3903 				if (_deferred_grow_zone(zone, order))
3904 					goto try_this_zone;
3905 			}
3906 #endif
3907 			/* Checked here to keep the fast path fast */
3908 			BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
3909 			if (alloc_flags & ALLOC_NO_WATERMARKS)
3910 				goto try_this_zone;
3911 
3912 			if (node_reclaim_mode == 0 ||
3913 			    !zone_allows_reclaim(ac->preferred_zoneref->zone, zone))
3914 				continue;
3915 
3916 			ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
3917 			switch (ret) {
3918 			case NODE_RECLAIM_NOSCAN:
3919 				/* did not scan */
3920 				continue;
3921 			case NODE_RECLAIM_FULL:
3922 				/* scanned but unreclaimable */
3923 				continue;
3924 			default:
3925 				/* did we reclaim enough */
3926 				if (zone_watermark_ok(zone, order, mark,
3927 					ac->highest_zoneidx, alloc_flags))
3928 					goto try_this_zone;
3929 
3930 				continue;
3931 			}
3932 		}
3933 
3934 try_this_zone:
3935 		page = rmqueue(ac->preferred_zoneref->zone, zone, order,
3936 				gfp_mask, alloc_flags, ac->migratetype);
3937 		if (page) {
3938 			prep_new_page(page, order, gfp_mask, alloc_flags);
3939 
3940 			/*
3941 			 * If this is a high-order atomic allocation then check
3942 			 * if the pageblock should be reserved for the future
3943 			 */
3944 			if (unlikely(order && (alloc_flags & ALLOC_HARDER)))
3945 				reserve_highatomic_pageblock(page, zone, order);
3946 
3947 			return page;
3948 		} else {
3949 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
3950 			/* Try again if zone has deferred pages */
3951 			if (static_branch_unlikely(&deferred_pages)) {
3952 				if (_deferred_grow_zone(zone, order))
3953 					goto try_this_zone;
3954 			}
3955 #endif
3956 		}
3957 	}
3958 
3959 	/*
3960 	 * It's possible on a UMA machine to get through all zones that are
3961 	 * fragmented. If avoiding fragmentation, reset and try again.
3962 	 */
3963 	if (no_fallback) {
3964 		alloc_flags &= ~ALLOC_NOFRAGMENT;
3965 		goto retry;
3966 	}
3967 
3968 	return NULL;
3969 }
3970 
warn_alloc_show_mem(gfp_t gfp_mask,nodemask_t * nodemask)3971 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
3972 {
3973 	unsigned int filter = SHOW_MEM_FILTER_NODES;
3974 
3975 	/*
3976 	 * This documents exceptions given to allocations in certain
3977 	 * contexts that are allowed to allocate outside current's set
3978 	 * of allowed nodes.
3979 	 */
3980 	if (!(gfp_mask & __GFP_NOMEMALLOC))
3981 		if (tsk_is_oom_victim(current) ||
3982 		    (current->flags & (PF_MEMALLOC | PF_EXITING)))
3983 			filter &= ~SHOW_MEM_FILTER_NODES;
3984 	if (in_interrupt() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
3985 		filter &= ~SHOW_MEM_FILTER_NODES;
3986 
3987 	show_mem(filter, nodemask);
3988 }
3989 
warn_alloc(gfp_t gfp_mask,nodemask_t * nodemask,const char * fmt,...)3990 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
3991 {
3992 	struct va_format vaf;
3993 	va_list args;
3994 	static DEFINE_RATELIMIT_STATE(nopage_rs, 10*HZ, 1);
3995 
3996 	if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs))
3997 		return;
3998 
3999 	va_start(args, fmt);
4000 	vaf.fmt = fmt;
4001 	vaf.va = &args;
4002 	pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl",
4003 			current->comm, &vaf, gfp_mask, &gfp_mask,
4004 			nodemask_pr_args(nodemask));
4005 	va_end(args);
4006 
4007 	cpuset_print_current_mems_allowed();
4008 	pr_cont("\n");
4009 	dump_stack();
4010 	warn_alloc_show_mem(gfp_mask, nodemask);
4011 }
4012 
4013 static inline struct page *
__alloc_pages_cpuset_fallback(gfp_t gfp_mask,unsigned int order,unsigned int alloc_flags,const struct alloc_context * ac)4014 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order,
4015 			      unsigned int alloc_flags,
4016 			      const struct alloc_context *ac)
4017 {
4018 	struct page *page;
4019 
4020 	page = get_page_from_freelist(gfp_mask, order,
4021 			alloc_flags|ALLOC_CPUSET, ac);
4022 	/*
4023 	 * fallback to ignore cpuset restriction if our nodes
4024 	 * are depleted
4025 	 */
4026 	if (!page)
4027 		page = get_page_from_freelist(gfp_mask, order,
4028 				alloc_flags, ac);
4029 
4030 	return page;
4031 }
4032 
4033 static inline struct page *
__alloc_pages_may_oom(gfp_t gfp_mask,unsigned int order,const struct alloc_context * ac,unsigned long * did_some_progress)4034 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
4035 	const struct alloc_context *ac, unsigned long *did_some_progress)
4036 {
4037 	struct oom_control oc = {
4038 		.zonelist = ac->zonelist,
4039 		.nodemask = ac->nodemask,
4040 		.memcg = NULL,
4041 		.gfp_mask = gfp_mask,
4042 		.order = order,
4043 	};
4044 	struct page *page;
4045 
4046 	*did_some_progress = 0;
4047 
4048 	/*
4049 	 * Acquire the oom lock.  If that fails, somebody else is
4050 	 * making progress for us.
4051 	 */
4052 	if (!mutex_trylock(&oom_lock)) {
4053 		*did_some_progress = 1;
4054 		schedule_timeout_uninterruptible(1);
4055 		return NULL;
4056 	}
4057 
4058 	/*
4059 	 * Go through the zonelist yet one more time, keep very high watermark
4060 	 * here, this is only to catch a parallel oom killing, we must fail if
4061 	 * we're still under heavy pressure. But make sure that this reclaim
4062 	 * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY
4063 	 * allocation which will never fail due to oom_lock already held.
4064 	 */
4065 	page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) &
4066 				      ~__GFP_DIRECT_RECLAIM, order,
4067 				      ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
4068 	if (page)
4069 		goto out;
4070 
4071 	/* Coredumps can quickly deplete all memory reserves */
4072 	if (current->flags & PF_DUMPCORE)
4073 		goto out;
4074 	/* The OOM killer will not help higher order allocs */
4075 	if (order > PAGE_ALLOC_COSTLY_ORDER)
4076 		goto out;
4077 	/*
4078 	 * We have already exhausted all our reclaim opportunities without any
4079 	 * success so it is time to admit defeat. We will skip the OOM killer
4080 	 * because it is very likely that the caller has a more reasonable
4081 	 * fallback than shooting a random task.
4082 	 *
4083 	 * The OOM killer may not free memory on a specific node.
4084 	 */
4085 	if (gfp_mask & (__GFP_RETRY_MAYFAIL | __GFP_THISNODE))
4086 		goto out;
4087 	/* The OOM killer does not needlessly kill tasks for lowmem */
4088 	if (ac->highest_zoneidx < ZONE_NORMAL)
4089 		goto out;
4090 	if (pm_suspended_storage())
4091 		goto out;
4092 	/*
4093 	 * XXX: GFP_NOFS allocations should rather fail than rely on
4094 	 * other request to make a forward progress.
4095 	 * We are in an unfortunate situation where out_of_memory cannot
4096 	 * do much for this context but let's try it to at least get
4097 	 * access to memory reserved if the current task is killed (see
4098 	 * out_of_memory). Once filesystems are ready to handle allocation
4099 	 * failures more gracefully we should just bail out here.
4100 	 */
4101 
4102 	/* Exhausted what can be done so it's blame time */
4103 	if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
4104 		*did_some_progress = 1;
4105 
4106 		/*
4107 		 * Help non-failing allocations by giving them access to memory
4108 		 * reserves
4109 		 */
4110 		if (gfp_mask & __GFP_NOFAIL)
4111 			page = __alloc_pages_cpuset_fallback(gfp_mask, order,
4112 					ALLOC_NO_WATERMARKS, ac);
4113 	}
4114 out:
4115 	mutex_unlock(&oom_lock);
4116 	return page;
4117 }
4118 
4119 /*
4120  * Maximum number of compaction retries wit a progress before OOM
4121  * killer is consider as the only way to move forward.
4122  */
4123 #define MAX_COMPACT_RETRIES 16
4124 
4125 #ifdef CONFIG_COMPACTION
4126 /* Try memory compaction for high-order allocations before reclaim */
4127 static struct page *
__alloc_pages_direct_compact(gfp_t gfp_mask,unsigned int order,unsigned int alloc_flags,const struct alloc_context * ac,enum compact_priority prio,enum compact_result * compact_result)4128 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
4129 		unsigned int alloc_flags, const struct alloc_context *ac,
4130 		enum compact_priority prio, enum compact_result *compact_result)
4131 {
4132 	struct page *page = NULL;
4133 	unsigned long pflags;
4134 	unsigned int noreclaim_flag;
4135 
4136 	if (!order)
4137 		return NULL;
4138 
4139 	psi_memstall_enter(&pflags);
4140 	noreclaim_flag = memalloc_noreclaim_save();
4141 
4142 	*compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
4143 								prio, &page);
4144 
4145 	memalloc_noreclaim_restore(noreclaim_flag);
4146 	psi_memstall_leave(&pflags);
4147 
4148 	/*
4149 	 * At least in one zone compaction wasn't deferred or skipped, so let's
4150 	 * count a compaction stall
4151 	 */
4152 	count_vm_event(COMPACTSTALL);
4153 
4154 	/* Prep a captured page if available */
4155 	if (page)
4156 		prep_new_page(page, order, gfp_mask, alloc_flags);
4157 
4158 	/* Try get a page from the freelist if available */
4159 	if (!page)
4160 		page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4161 
4162 	if (page) {
4163 		struct zone *zone = page_zone(page);
4164 
4165 		zone->compact_blockskip_flush = false;
4166 		compaction_defer_reset(zone, order, true);
4167 		count_vm_event(COMPACTSUCCESS);
4168 		return page;
4169 	}
4170 
4171 	/*
4172 	 * It's bad if compaction run occurs and fails. The most likely reason
4173 	 * is that pages exist, but not enough to satisfy watermarks.
4174 	 */
4175 	count_vm_event(COMPACTFAIL);
4176 
4177 	cond_resched();
4178 
4179 	return NULL;
4180 }
4181 
4182 static inline bool
should_compact_retry(struct alloc_context * ac,int order,int alloc_flags,enum compact_result compact_result,enum compact_priority * compact_priority,int * compaction_retries)4183 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
4184 		     enum compact_result compact_result,
4185 		     enum compact_priority *compact_priority,
4186 		     int *compaction_retries)
4187 {
4188 	int max_retries = MAX_COMPACT_RETRIES;
4189 	int min_priority;
4190 	bool ret = false;
4191 	int retries = *compaction_retries;
4192 	enum compact_priority priority = *compact_priority;
4193 
4194 	if (!order)
4195 		return false;
4196 
4197 	if (compaction_made_progress(compact_result))
4198 		(*compaction_retries)++;
4199 
4200 	/*
4201 	 * compaction considers all the zone as desperately out of memory
4202 	 * so it doesn't really make much sense to retry except when the
4203 	 * failure could be caused by insufficient priority
4204 	 */
4205 	if (compaction_failed(compact_result))
4206 		goto check_priority;
4207 
4208 	/*
4209 	 * compaction was skipped because there are not enough order-0 pages
4210 	 * to work with, so we retry only if it looks like reclaim can help.
4211 	 */
4212 	if (compaction_needs_reclaim(compact_result)) {
4213 		ret = compaction_zonelist_suitable(ac, order, alloc_flags);
4214 		goto out;
4215 	}
4216 
4217 	/*
4218 	 * make sure the compaction wasn't deferred or didn't bail out early
4219 	 * due to locks contention before we declare that we should give up.
4220 	 * But the next retry should use a higher priority if allowed, so
4221 	 * we don't just keep bailing out endlessly.
4222 	 */
4223 	if (compaction_withdrawn(compact_result)) {
4224 		goto check_priority;
4225 	}
4226 
4227 	/*
4228 	 * !costly requests are much more important than __GFP_RETRY_MAYFAIL
4229 	 * costly ones because they are de facto nofail and invoke OOM
4230 	 * killer to move on while costly can fail and users are ready
4231 	 * to cope with that. 1/4 retries is rather arbitrary but we
4232 	 * would need much more detailed feedback from compaction to
4233 	 * make a better decision.
4234 	 */
4235 	if (order > PAGE_ALLOC_COSTLY_ORDER)
4236 		max_retries /= 4;
4237 	if (*compaction_retries <= max_retries) {
4238 		ret = true;
4239 		goto out;
4240 	}
4241 
4242 	/*
4243 	 * Make sure there are attempts at the highest priority if we exhausted
4244 	 * all retries or failed at the lower priorities.
4245 	 */
4246 check_priority:
4247 	min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ?
4248 			MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY;
4249 
4250 	if (*compact_priority > min_priority) {
4251 		(*compact_priority)--;
4252 		*compaction_retries = 0;
4253 		ret = true;
4254 	}
4255 out:
4256 	trace_compact_retry(order, priority, compact_result, retries, max_retries, ret);
4257 	return ret;
4258 }
4259 #else
4260 static inline struct page *
__alloc_pages_direct_compact(gfp_t gfp_mask,unsigned int order,unsigned int alloc_flags,const struct alloc_context * ac,enum compact_priority prio,enum compact_result * compact_result)4261 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
4262 		unsigned int alloc_flags, const struct alloc_context *ac,
4263 		enum compact_priority prio, enum compact_result *compact_result)
4264 {
4265 	*compact_result = COMPACT_SKIPPED;
4266 	return NULL;
4267 }
4268 
4269 static inline bool
should_compact_retry(struct alloc_context * ac,unsigned int order,int alloc_flags,enum compact_result compact_result,enum compact_priority * compact_priority,int * compaction_retries)4270 should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags,
4271 		     enum compact_result compact_result,
4272 		     enum compact_priority *compact_priority,
4273 		     int *compaction_retries)
4274 {
4275 	struct zone *zone;
4276 	struct zoneref *z;
4277 
4278 	if (!order || order > PAGE_ALLOC_COSTLY_ORDER)
4279 		return false;
4280 
4281 	/*
4282 	 * There are setups with compaction disabled which would prefer to loop
4283 	 * inside the allocator rather than hit the oom killer prematurely.
4284 	 * Let's give them a good hope and keep retrying while the order-0
4285 	 * watermarks are OK.
4286 	 */
4287 	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
4288 				ac->highest_zoneidx, ac->nodemask) {
4289 		if (zone_watermark_ok(zone, 0, min_wmark_pages(zone),
4290 					ac->highest_zoneidx, alloc_flags))
4291 			return true;
4292 	}
4293 	return false;
4294 }
4295 #endif /* CONFIG_COMPACTION */
4296 
4297 #ifdef CONFIG_LOCKDEP
4298 static struct lockdep_map __fs_reclaim_map =
4299 	STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map);
4300 
__need_fs_reclaim(gfp_t gfp_mask)4301 static bool __need_fs_reclaim(gfp_t gfp_mask)
4302 {
4303 	gfp_mask = current_gfp_context(gfp_mask);
4304 
4305 	/* no reclaim without waiting on it */
4306 	if (!(gfp_mask & __GFP_DIRECT_RECLAIM))
4307 		return false;
4308 
4309 	/* this guy won't enter reclaim */
4310 	if (current->flags & PF_MEMALLOC)
4311 		return false;
4312 
4313 	/* We're only interested __GFP_FS allocations for now */
4314 	if (!(gfp_mask & __GFP_FS))
4315 		return false;
4316 
4317 	if (gfp_mask & __GFP_NOLOCKDEP)
4318 		return false;
4319 
4320 	return true;
4321 }
4322 
__fs_reclaim_acquire(void)4323 void __fs_reclaim_acquire(void)
4324 {
4325 	lock_map_acquire(&__fs_reclaim_map);
4326 }
4327 
__fs_reclaim_release(void)4328 void __fs_reclaim_release(void)
4329 {
4330 	lock_map_release(&__fs_reclaim_map);
4331 }
4332 
fs_reclaim_acquire(gfp_t gfp_mask)4333 void fs_reclaim_acquire(gfp_t gfp_mask)
4334 {
4335 	if (__need_fs_reclaim(gfp_mask))
4336 		__fs_reclaim_acquire();
4337 }
4338 EXPORT_SYMBOL_GPL(fs_reclaim_acquire);
4339 
fs_reclaim_release(gfp_t gfp_mask)4340 void fs_reclaim_release(gfp_t gfp_mask)
4341 {
4342 	if (__need_fs_reclaim(gfp_mask))
4343 		__fs_reclaim_release();
4344 }
4345 EXPORT_SYMBOL_GPL(fs_reclaim_release);
4346 #endif
4347 
4348 /* Perform direct synchronous page reclaim */
4349 static unsigned long
__perform_reclaim(gfp_t gfp_mask,unsigned int order,const struct alloc_context * ac)4350 __perform_reclaim(gfp_t gfp_mask, unsigned int order,
4351 					const struct alloc_context *ac)
4352 {
4353 	unsigned int noreclaim_flag;
4354 	unsigned long pflags, progress;
4355 
4356 	cond_resched();
4357 
4358 	/* We now go into synchronous reclaim */
4359 	cpuset_memory_pressure_bump();
4360 	psi_memstall_enter(&pflags);
4361 	fs_reclaim_acquire(gfp_mask);
4362 	noreclaim_flag = memalloc_noreclaim_save();
4363 
4364 	progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
4365 								ac->nodemask);
4366 
4367 	memalloc_noreclaim_restore(noreclaim_flag);
4368 	fs_reclaim_release(gfp_mask);
4369 	psi_memstall_leave(&pflags);
4370 
4371 	cond_resched();
4372 
4373 	return progress;
4374 }
4375 
4376 /* The really slow allocator path where we enter direct reclaim */
4377 static inline struct page *
__alloc_pages_direct_reclaim(gfp_t gfp_mask,unsigned int order,unsigned int alloc_flags,const struct alloc_context * ac,unsigned long * did_some_progress)4378 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
4379 		unsigned int alloc_flags, const struct alloc_context *ac,
4380 		unsigned long *did_some_progress)
4381 {
4382 	struct page *page = NULL;
4383 	bool drained = false;
4384 
4385 	*did_some_progress = __perform_reclaim(gfp_mask, order, ac);
4386 	if (unlikely(!(*did_some_progress)))
4387 		return NULL;
4388 
4389 retry:
4390 	page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4391 
4392 	/*
4393 	 * If an allocation failed after direct reclaim, it could be because
4394 	 * pages are pinned on the per-cpu lists or in high alloc reserves.
4395 	 * Shrink them and try again
4396 	 */
4397 	if (!page && !drained) {
4398 		unreserve_highatomic_pageblock(ac, false);
4399 		drain_all_pages(NULL);
4400 		drained = true;
4401 		goto retry;
4402 	}
4403 
4404 	return page;
4405 }
4406 
wake_all_kswapds(unsigned int order,gfp_t gfp_mask,const struct alloc_context * ac)4407 static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask,
4408 			     const struct alloc_context *ac)
4409 {
4410 	struct zoneref *z;
4411 	struct zone *zone;
4412 	pg_data_t *last_pgdat = NULL;
4413 	enum zone_type highest_zoneidx = ac->highest_zoneidx;
4414 
4415 	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx,
4416 					ac->nodemask) {
4417 		if (last_pgdat != zone->zone_pgdat)
4418 			wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx);
4419 		last_pgdat = zone->zone_pgdat;
4420 	}
4421 }
4422 
4423 static inline unsigned int
gfp_to_alloc_flags(gfp_t gfp_mask)4424 gfp_to_alloc_flags(gfp_t gfp_mask)
4425 {
4426 	unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
4427 
4428 	/*
4429 	 * __GFP_HIGH is assumed to be the same as ALLOC_HIGH
4430 	 * and __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD
4431 	 * to save two branches.
4432 	 */
4433 	BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
4434 	BUILD_BUG_ON(__GFP_KSWAPD_RECLAIM != (__force gfp_t) ALLOC_KSWAPD);
4435 
4436 	/*
4437 	 * The caller may dip into page reserves a bit more if the caller
4438 	 * cannot run direct reclaim, or if the caller has realtime scheduling
4439 	 * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
4440 	 * set both ALLOC_HARDER (__GFP_ATOMIC) and ALLOC_HIGH (__GFP_HIGH).
4441 	 */
4442 	alloc_flags |= (__force int)
4443 		(gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM));
4444 
4445 	if (gfp_mask & __GFP_ATOMIC) {
4446 		/*
4447 		 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
4448 		 * if it can't schedule.
4449 		 */
4450 		if (!(gfp_mask & __GFP_NOMEMALLOC))
4451 			alloc_flags |= ALLOC_HARDER;
4452 		/*
4453 		 * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
4454 		 * comment for __cpuset_node_allowed().
4455 		 */
4456 		alloc_flags &= ~ALLOC_CPUSET;
4457 	} else if (unlikely(rt_task(current)) && !in_interrupt())
4458 		alloc_flags |= ALLOC_HARDER;
4459 
4460 	alloc_flags = current_alloc_flags(gfp_mask, alloc_flags);
4461 
4462 	return alloc_flags;
4463 }
4464 
oom_reserves_allowed(struct task_struct * tsk)4465 static bool oom_reserves_allowed(struct task_struct *tsk)
4466 {
4467 	if (!tsk_is_oom_victim(tsk))
4468 		return false;
4469 
4470 	/*
4471 	 * !MMU doesn't have oom reaper so give access to memory reserves
4472 	 * only to the thread with TIF_MEMDIE set
4473 	 */
4474 	if (!IS_ENABLED(CONFIG_MMU) && !test_thread_flag(TIF_MEMDIE))
4475 		return false;
4476 
4477 	return true;
4478 }
4479 
4480 /*
4481  * Distinguish requests which really need access to full memory
4482  * reserves from oom victims which can live with a portion of it
4483  */
__gfp_pfmemalloc_flags(gfp_t gfp_mask)4484 static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask)
4485 {
4486 	if (unlikely(gfp_mask & __GFP_NOMEMALLOC))
4487 		return 0;
4488 	if (gfp_mask & __GFP_MEMALLOC)
4489 		return ALLOC_NO_WATERMARKS;
4490 	if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
4491 		return ALLOC_NO_WATERMARKS;
4492 	if (!in_interrupt()) {
4493 		if (current->flags & PF_MEMALLOC)
4494 			return ALLOC_NO_WATERMARKS;
4495 		else if (oom_reserves_allowed(current))
4496 			return ALLOC_OOM;
4497 	}
4498 
4499 	return 0;
4500 }
4501 
gfp_pfmemalloc_allowed(gfp_t gfp_mask)4502 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
4503 {
4504 	return !!__gfp_pfmemalloc_flags(gfp_mask);
4505 }
4506 
4507 /*
4508  * Checks whether it makes sense to retry the reclaim to make a forward progress
4509  * for the given allocation request.
4510  *
4511  * We give up when we either have tried MAX_RECLAIM_RETRIES in a row
4512  * without success, or when we couldn't even meet the watermark if we
4513  * reclaimed all remaining pages on the LRU lists.
4514  *
4515  * Returns true if a retry is viable or false to enter the oom path.
4516  */
4517 static inline bool
should_reclaim_retry(gfp_t gfp_mask,unsigned order,struct alloc_context * ac,int alloc_flags,bool did_some_progress,int * no_progress_loops)4518 should_reclaim_retry(gfp_t gfp_mask, unsigned order,
4519 		     struct alloc_context *ac, int alloc_flags,
4520 		     bool did_some_progress, int *no_progress_loops)
4521 {
4522 	struct zone *zone;
4523 	struct zoneref *z;
4524 	bool ret = false;
4525 
4526 	/*
4527 	 * Costly allocations might have made a progress but this doesn't mean
4528 	 * their order will become available due to high fragmentation so
4529 	 * always increment the no progress counter for them
4530 	 */
4531 	if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER)
4532 		*no_progress_loops = 0;
4533 	else
4534 		(*no_progress_loops)++;
4535 
4536 	/*
4537 	 * Make sure we converge to OOM if we cannot make any progress
4538 	 * several times in the row.
4539 	 */
4540 	if (*no_progress_loops > MAX_RECLAIM_RETRIES) {
4541 		/* Before OOM, exhaust highatomic_reserve */
4542 		return unreserve_highatomic_pageblock(ac, true);
4543 	}
4544 
4545 	/*
4546 	 * Keep reclaiming pages while there is a chance this will lead
4547 	 * somewhere.  If none of the target zones can satisfy our allocation
4548 	 * request even if all reclaimable pages are considered then we are
4549 	 * screwed and have to go OOM.
4550 	 */
4551 	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
4552 				ac->highest_zoneidx, ac->nodemask) {
4553 		unsigned long available;
4554 		unsigned long reclaimable;
4555 		unsigned long min_wmark = min_wmark_pages(zone);
4556 		bool wmark;
4557 
4558 		available = reclaimable = zone_reclaimable_pages(zone);
4559 		available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
4560 
4561 		/*
4562 		 * Would the allocation succeed if we reclaimed all
4563 		 * reclaimable pages?
4564 		 */
4565 		wmark = __zone_watermark_ok(zone, order, min_wmark,
4566 				ac->highest_zoneidx, alloc_flags, available);
4567 		trace_reclaim_retry_zone(z, order, reclaimable,
4568 				available, min_wmark, *no_progress_loops, wmark);
4569 		if (wmark) {
4570 			/*
4571 			 * If we didn't make any progress and have a lot of
4572 			 * dirty + writeback pages then we should wait for
4573 			 * an IO to complete to slow down the reclaim and
4574 			 * prevent from pre mature OOM
4575 			 */
4576 			if (!did_some_progress) {
4577 				unsigned long write_pending;
4578 
4579 				write_pending = zone_page_state_snapshot(zone,
4580 							NR_ZONE_WRITE_PENDING);
4581 
4582 				if (2 * write_pending > reclaimable) {
4583 					congestion_wait(BLK_RW_ASYNC, HZ/10);
4584 					return true;
4585 				}
4586 			}
4587 
4588 			ret = true;
4589 			goto out;
4590 		}
4591 	}
4592 
4593 out:
4594 	/*
4595 	 * Memory allocation/reclaim might be called from a WQ context and the
4596 	 * current implementation of the WQ concurrency control doesn't
4597 	 * recognize that a particular WQ is congested if the worker thread is
4598 	 * looping without ever sleeping. Therefore we have to do a short sleep
4599 	 * here rather than calling cond_resched().
4600 	 */
4601 	if (current->flags & PF_WQ_WORKER)
4602 		schedule_timeout_uninterruptible(1);
4603 	else
4604 		cond_resched();
4605 	return ret;
4606 }
4607 
4608 static inline bool
check_retry_cpuset(int cpuset_mems_cookie,struct alloc_context * ac)4609 check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac)
4610 {
4611 	/*
4612 	 * It's possible that cpuset's mems_allowed and the nodemask from
4613 	 * mempolicy don't intersect. This should be normally dealt with by
4614 	 * policy_nodemask(), but it's possible to race with cpuset update in
4615 	 * such a way the check therein was true, and then it became false
4616 	 * before we got our cpuset_mems_cookie here.
4617 	 * This assumes that for all allocations, ac->nodemask can come only
4618 	 * from MPOL_BIND mempolicy (whose documented semantics is to be ignored
4619 	 * when it does not intersect with the cpuset restrictions) or the
4620 	 * caller can deal with a violated nodemask.
4621 	 */
4622 	if (cpusets_enabled() && ac->nodemask &&
4623 			!cpuset_nodemask_valid_mems_allowed(ac->nodemask)) {
4624 		ac->nodemask = NULL;
4625 		return true;
4626 	}
4627 
4628 	/*
4629 	 * When updating a task's mems_allowed or mempolicy nodemask, it is
4630 	 * possible to race with parallel threads in such a way that our
4631 	 * allocation can fail while the mask is being updated. If we are about
4632 	 * to fail, check if the cpuset changed during allocation and if so,
4633 	 * retry.
4634 	 */
4635 	if (read_mems_allowed_retry(cpuset_mems_cookie))
4636 		return true;
4637 
4638 	return false;
4639 }
4640 
4641 static inline struct page *
__alloc_pages_slowpath(gfp_t gfp_mask,unsigned int order,struct alloc_context * ac)4642 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
4643 						struct alloc_context *ac)
4644 {
4645 	bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
4646 	const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
4647 	struct page *page = NULL;
4648 	unsigned int alloc_flags;
4649 	unsigned long did_some_progress;
4650 	enum compact_priority compact_priority;
4651 	enum compact_result compact_result;
4652 	int compaction_retries;
4653 	int no_progress_loops;
4654 	unsigned int cpuset_mems_cookie;
4655 	int reserve_flags;
4656 
4657 	/*
4658 	 * We also sanity check to catch abuse of atomic reserves being used by
4659 	 * callers that are not in atomic context.
4660 	 */
4661 	if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
4662 				(__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
4663 		gfp_mask &= ~__GFP_ATOMIC;
4664 
4665 retry_cpuset:
4666 	compaction_retries = 0;
4667 	no_progress_loops = 0;
4668 	compact_priority = DEF_COMPACT_PRIORITY;
4669 	cpuset_mems_cookie = read_mems_allowed_begin();
4670 
4671 	/*
4672 	 * The fast path uses conservative alloc_flags to succeed only until
4673 	 * kswapd needs to be woken up, and to avoid the cost of setting up
4674 	 * alloc_flags precisely. So we do that now.
4675 	 */
4676 	alloc_flags = gfp_to_alloc_flags(gfp_mask);
4677 
4678 	/*
4679 	 * We need to recalculate the starting point for the zonelist iterator
4680 	 * because we might have used different nodemask in the fast path, or
4681 	 * there was a cpuset modification and we are retrying - otherwise we
4682 	 * could end up iterating over non-eligible zones endlessly.
4683 	 */
4684 	ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4685 					ac->highest_zoneidx, ac->nodemask);
4686 	if (!ac->preferred_zoneref->zone)
4687 		goto nopage;
4688 
4689 	if (alloc_flags & ALLOC_KSWAPD)
4690 		wake_all_kswapds(order, gfp_mask, ac);
4691 
4692 	/*
4693 	 * The adjusted alloc_flags might result in immediate success, so try
4694 	 * that first
4695 	 */
4696 	page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4697 	if (page)
4698 		goto got_pg;
4699 
4700 	/*
4701 	 * For costly allocations, try direct compaction first, as it's likely
4702 	 * that we have enough base pages and don't need to reclaim. For non-
4703 	 * movable high-order allocations, do that as well, as compaction will
4704 	 * try prevent permanent fragmentation by migrating from blocks of the
4705 	 * same migratetype.
4706 	 * Don't try this for allocations that are allowed to ignore
4707 	 * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen.
4708 	 */
4709 	if (can_direct_reclaim &&
4710 			(costly_order ||
4711 			   (order > 0 && ac->migratetype != MIGRATE_MOVABLE))
4712 			&& !gfp_pfmemalloc_allowed(gfp_mask)) {
4713 		page = __alloc_pages_direct_compact(gfp_mask, order,
4714 						alloc_flags, ac,
4715 						INIT_COMPACT_PRIORITY,
4716 						&compact_result);
4717 		if (page)
4718 			goto got_pg;
4719 
4720 		/*
4721 		 * Checks for costly allocations with __GFP_NORETRY, which
4722 		 * includes some THP page fault allocations
4723 		 */
4724 		if (costly_order && (gfp_mask & __GFP_NORETRY)) {
4725 			/*
4726 			 * If allocating entire pageblock(s) and compaction
4727 			 * failed because all zones are below low watermarks
4728 			 * or is prohibited because it recently failed at this
4729 			 * order, fail immediately unless the allocator has
4730 			 * requested compaction and reclaim retry.
4731 			 *
4732 			 * Reclaim is
4733 			 *  - potentially very expensive because zones are far
4734 			 *    below their low watermarks or this is part of very
4735 			 *    bursty high order allocations,
4736 			 *  - not guaranteed to help because isolate_freepages()
4737 			 *    may not iterate over freed pages as part of its
4738 			 *    linear scan, and
4739 			 *  - unlikely to make entire pageblocks free on its
4740 			 *    own.
4741 			 */
4742 			if (compact_result == COMPACT_SKIPPED ||
4743 			    compact_result == COMPACT_DEFERRED)
4744 				goto nopage;
4745 
4746 			/*
4747 			 * Looks like reclaim/compaction is worth trying, but
4748 			 * sync compaction could be very expensive, so keep
4749 			 * using async compaction.
4750 			 */
4751 			compact_priority = INIT_COMPACT_PRIORITY;
4752 		}
4753 	}
4754 
4755 retry:
4756 	/* Ensure kswapd doesn't accidentally go to sleep as long as we loop */
4757 	if (alloc_flags & ALLOC_KSWAPD)
4758 		wake_all_kswapds(order, gfp_mask, ac);
4759 
4760 	reserve_flags = __gfp_pfmemalloc_flags(gfp_mask);
4761 	if (reserve_flags)
4762 		alloc_flags = current_alloc_flags(gfp_mask, reserve_flags);
4763 
4764 	/*
4765 	 * Reset the nodemask and zonelist iterators if memory policies can be
4766 	 * ignored. These allocations are high priority and system rather than
4767 	 * user oriented.
4768 	 */
4769 	if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) {
4770 		ac->nodemask = NULL;
4771 		ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4772 					ac->highest_zoneidx, ac->nodemask);
4773 	}
4774 
4775 	/* Attempt with potentially adjusted zonelist and alloc_flags */
4776 	page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4777 	if (page)
4778 		goto got_pg;
4779 
4780 	/* Caller is not willing to reclaim, we can't balance anything */
4781 	if (!can_direct_reclaim)
4782 		goto nopage;
4783 
4784 	/* Avoid recursion of direct reclaim */
4785 	if (current->flags & PF_MEMALLOC)
4786 		goto nopage;
4787 
4788 	/* Try direct reclaim and then allocating */
4789 	page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
4790 							&did_some_progress);
4791 	if (page)
4792 		goto got_pg;
4793 
4794 	/* Try direct compaction and then allocating */
4795 	page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
4796 					compact_priority, &compact_result);
4797 	if (page)
4798 		goto got_pg;
4799 
4800 	/* Do not loop if specifically requested */
4801 	if (gfp_mask & __GFP_NORETRY)
4802 		goto nopage;
4803 
4804 	/*
4805 	 * Do not retry costly high order allocations unless they are
4806 	 * __GFP_RETRY_MAYFAIL
4807 	 */
4808 	if (costly_order && !(gfp_mask & __GFP_RETRY_MAYFAIL))
4809 		goto nopage;
4810 
4811 	if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
4812 				 did_some_progress > 0, &no_progress_loops))
4813 		goto retry;
4814 
4815 	/*
4816 	 * It doesn't make any sense to retry for the compaction if the order-0
4817 	 * reclaim is not able to make any progress because the current
4818 	 * implementation of the compaction depends on the sufficient amount
4819 	 * of free memory (see __compaction_suitable)
4820 	 */
4821 	if (did_some_progress > 0 &&
4822 			should_compact_retry(ac, order, alloc_flags,
4823 				compact_result, &compact_priority,
4824 				&compaction_retries))
4825 		goto retry;
4826 
4827 
4828 	/* Deal with possible cpuset update races before we start OOM killing */
4829 	if (check_retry_cpuset(cpuset_mems_cookie, ac))
4830 		goto retry_cpuset;
4831 
4832 	/* Reclaim has failed us, start killing things */
4833 	page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
4834 	if (page)
4835 		goto got_pg;
4836 
4837 	/* Avoid allocations with no watermarks from looping endlessly */
4838 	if (tsk_is_oom_victim(current) &&
4839 	    (alloc_flags & ALLOC_OOM ||
4840 	     (gfp_mask & __GFP_NOMEMALLOC)))
4841 		goto nopage;
4842 
4843 	/* Retry as long as the OOM killer is making progress */
4844 	if (did_some_progress) {
4845 		no_progress_loops = 0;
4846 		goto retry;
4847 	}
4848 
4849 nopage:
4850 	/* Deal with possible cpuset update races before we fail */
4851 	if (check_retry_cpuset(cpuset_mems_cookie, ac))
4852 		goto retry_cpuset;
4853 
4854 	/*
4855 	 * Make sure that __GFP_NOFAIL request doesn't leak out and make sure
4856 	 * we always retry
4857 	 */
4858 	if (gfp_mask & __GFP_NOFAIL) {
4859 		/*
4860 		 * All existing users of the __GFP_NOFAIL are blockable, so warn
4861 		 * of any new users that actually require GFP_NOWAIT
4862 		 */
4863 		if (WARN_ON_ONCE(!can_direct_reclaim))
4864 			goto fail;
4865 
4866 		/*
4867 		 * PF_MEMALLOC request from this context is rather bizarre
4868 		 * because we cannot reclaim anything and only can loop waiting
4869 		 * for somebody to do a work for us
4870 		 */
4871 		WARN_ON_ONCE(current->flags & PF_MEMALLOC);
4872 
4873 		/*
4874 		 * non failing costly orders are a hard requirement which we
4875 		 * are not prepared for much so let's warn about these users
4876 		 * so that we can identify them and convert them to something
4877 		 * else.
4878 		 */
4879 		WARN_ON_ONCE(order > PAGE_ALLOC_COSTLY_ORDER);
4880 
4881 		/*
4882 		 * Help non-failing allocations by giving them access to memory
4883 		 * reserves but do not use ALLOC_NO_WATERMARKS because this
4884 		 * could deplete whole memory reserves which would just make
4885 		 * the situation worse
4886 		 */
4887 		page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_HARDER, ac);
4888 		if (page)
4889 			goto got_pg;
4890 
4891 		cond_resched();
4892 		goto retry;
4893 	}
4894 fail:
4895 	warn_alloc(gfp_mask, ac->nodemask,
4896 			"page allocation failure: order:%u", order);
4897 got_pg:
4898 	return page;
4899 }
4900 
prepare_alloc_pages(gfp_t gfp_mask,unsigned int order,int preferred_nid,nodemask_t * nodemask,struct alloc_context * ac,gfp_t * alloc_mask,unsigned int * alloc_flags)4901 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
4902 		int preferred_nid, nodemask_t *nodemask,
4903 		struct alloc_context *ac, gfp_t *alloc_mask,
4904 		unsigned int *alloc_flags)
4905 {
4906 	ac->highest_zoneidx = gfp_zone(gfp_mask);
4907 	ac->zonelist = node_zonelist(preferred_nid, gfp_mask);
4908 	ac->nodemask = nodemask;
4909 	ac->migratetype = gfp_migratetype(gfp_mask);
4910 
4911 	if (cpusets_enabled()) {
4912 		*alloc_mask |= __GFP_HARDWALL;
4913 		/*
4914 		 * When we are in the interrupt context, it is irrelevant
4915 		 * to the current task context. It means that any node ok.
4916 		 */
4917 		if (!in_interrupt() && !ac->nodemask)
4918 			ac->nodemask = &cpuset_current_mems_allowed;
4919 		else
4920 			*alloc_flags |= ALLOC_CPUSET;
4921 	}
4922 
4923 	fs_reclaim_acquire(gfp_mask);
4924 	fs_reclaim_release(gfp_mask);
4925 
4926 	might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
4927 
4928 #ifdef CONFIG_HYPERHOLD_ZSWAPD
4929 	if (gfp_mask & __GFP_KSWAPD_RECLAIM)
4930 		wake_all_zswapd();
4931 #endif
4932 
4933 	if (should_fail_alloc_page(gfp_mask, order))
4934 		return false;
4935 
4936 	*alloc_flags = current_alloc_flags(gfp_mask, *alloc_flags);
4937 
4938 	/* Dirty zone balancing only done in the fast path */
4939 	ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE);
4940 
4941 	/*
4942 	 * The preferred zone is used for statistics but crucially it is
4943 	 * also used as the starting point for the zonelist iterator. It
4944 	 * may get reset for allocations that ignore memory policies.
4945 	 */
4946 	ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4947 					ac->highest_zoneidx, ac->nodemask);
4948 
4949 	return true;
4950 }
4951 
4952 /*
4953  * This is the 'heart' of the zoned buddy allocator.
4954  */
4955 struct page *
__alloc_pages_nodemask(gfp_t gfp_mask,unsigned int order,int preferred_nid,nodemask_t * nodemask)4956 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
4957 							nodemask_t *nodemask)
4958 {
4959 	struct page *page;
4960 	unsigned int alloc_flags = ALLOC_WMARK_LOW;
4961 	gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */
4962 	struct alloc_context ac = { };
4963 
4964 	/*
4965 	 * There are several places where we assume that the order value is sane
4966 	 * so bail out early if the request is out of bound.
4967 	 */
4968 	if (unlikely(order >= MAX_ORDER)) {
4969 		WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
4970 		return NULL;
4971 	}
4972 
4973 	gfp_mask &= gfp_allowed_mask;
4974 	alloc_mask = gfp_mask;
4975 	if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags))
4976 		return NULL;
4977 
4978 	/*
4979 	 * Forbid the first pass from falling back to types that fragment
4980 	 * memory until all local zones are considered.
4981 	 */
4982 	alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp_mask);
4983 
4984 	/* First allocation attempt */
4985 	page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
4986 	if (likely(page))
4987 		goto out;
4988 
4989 	/*
4990 	 * Apply scoped allocation constraints. This is mainly about GFP_NOFS
4991 	 * resp. GFP_NOIO which has to be inherited for all allocation requests
4992 	 * from a particular context which has been marked by
4993 	 * memalloc_no{fs,io}_{save,restore}.
4994 	 */
4995 	alloc_mask = current_gfp_context(gfp_mask);
4996 	ac.spread_dirty_pages = false;
4997 
4998 	/*
4999 	 * Restore the original nodemask if it was potentially replaced with
5000 	 * &cpuset_current_mems_allowed to optimize the fast-path attempt.
5001 	 */
5002 	ac.nodemask = nodemask;
5003 
5004 	page = __alloc_pages_slowpath(alloc_mask, order, &ac);
5005 
5006 out:
5007 	if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page &&
5008 	    unlikely(__memcg_kmem_charge_page(page, gfp_mask, order) != 0)) {
5009 		__free_pages(page, order);
5010 		page = NULL;
5011 	}
5012 
5013 	trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);
5014 
5015 	return page;
5016 }
5017 EXPORT_SYMBOL(__alloc_pages_nodemask);
5018 
5019 /*
5020  * Common helper functions. Never use with __GFP_HIGHMEM because the returned
5021  * address cannot represent highmem pages. Use alloc_pages and then kmap if
5022  * you need to access high mem.
5023  */
__get_free_pages(gfp_t gfp_mask,unsigned int order)5024 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
5025 {
5026 	struct page *page;
5027 
5028 	page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order);
5029 	if (!page)
5030 		return 0;
5031 	return (unsigned long) page_address(page);
5032 }
5033 EXPORT_SYMBOL(__get_free_pages);
5034 
get_zeroed_page(gfp_t gfp_mask)5035 unsigned long get_zeroed_page(gfp_t gfp_mask)
5036 {
5037 	return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
5038 }
5039 EXPORT_SYMBOL(get_zeroed_page);
5040 
free_the_page(struct page * page,unsigned int order)5041 static inline void free_the_page(struct page *page, unsigned int order)
5042 {
5043 	if (order == 0)		/* Via pcp? */
5044 		free_unref_page(page);
5045 	else
5046 		__free_pages_ok(page, order, FPI_NONE);
5047 }
5048 
__free_pages(struct page * page,unsigned int order)5049 void __free_pages(struct page *page, unsigned int order)
5050 {
5051 	if (put_page_testzero(page))
5052 		free_the_page(page, order);
5053 	else if (!PageHead(page))
5054 		while (order-- > 0)
5055 			free_the_page(page + (1 << order), order);
5056 }
5057 EXPORT_SYMBOL(__free_pages);
5058 
free_pages(unsigned long addr,unsigned int order)5059 void free_pages(unsigned long addr, unsigned int order)
5060 {
5061 	if (addr != 0) {
5062 		VM_BUG_ON(!virt_addr_valid((void *)addr));
5063 		__free_pages(virt_to_page((void *)addr), order);
5064 	}
5065 }
5066 
5067 EXPORT_SYMBOL(free_pages);
5068 
5069 /*
5070  * Page Fragment:
5071  *  An arbitrary-length arbitrary-offset area of memory which resides
5072  *  within a 0 or higher order page.  Multiple fragments within that page
5073  *  are individually refcounted, in the page's reference counter.
5074  *
5075  * The page_frag functions below provide a simple allocation framework for
5076  * page fragments.  This is used by the network stack and network device
5077  * drivers to provide a backing region of memory for use as either an
5078  * sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
5079  */
__page_frag_cache_refill(struct page_frag_cache * nc,gfp_t gfp_mask)5080 static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
5081 					     gfp_t gfp_mask)
5082 {
5083 	struct page *page = NULL;
5084 	gfp_t gfp = gfp_mask;
5085 
5086 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
5087 	gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
5088 		    __GFP_NOMEMALLOC;
5089 	page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
5090 				PAGE_FRAG_CACHE_MAX_ORDER);
5091 	nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
5092 #endif
5093 	if (unlikely(!page))
5094 		page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
5095 
5096 	nc->va = page ? page_address(page) : NULL;
5097 
5098 	return page;
5099 }
5100 
__page_frag_cache_drain(struct page * page,unsigned int count)5101 void __page_frag_cache_drain(struct page *page, unsigned int count)
5102 {
5103 	VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
5104 
5105 	if (page_ref_sub_and_test(page, count))
5106 		free_the_page(page, compound_order(page));
5107 }
5108 EXPORT_SYMBOL(__page_frag_cache_drain);
5109 
page_frag_alloc(struct page_frag_cache * nc,unsigned int fragsz,gfp_t gfp_mask)5110 void *page_frag_alloc(struct page_frag_cache *nc,
5111 		      unsigned int fragsz, gfp_t gfp_mask)
5112 {
5113 	unsigned int size = PAGE_SIZE;
5114 	struct page *page;
5115 	int offset;
5116 
5117 	if (unlikely(!nc->va)) {
5118 refill:
5119 		page = __page_frag_cache_refill(nc, gfp_mask);
5120 		if (!page)
5121 			return NULL;
5122 
5123 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
5124 		/* if size can vary use size else just use PAGE_SIZE */
5125 		size = nc->size;
5126 #endif
5127 		/* Even if we own the page, we do not use atomic_set().
5128 		 * This would break get_page_unless_zero() users.
5129 		 */
5130 		page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE);
5131 
5132 		/* reset page count bias and offset to start of new frag */
5133 		nc->pfmemalloc = page_is_pfmemalloc(page);
5134 		nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
5135 		nc->offset = size;
5136 	}
5137 
5138 	offset = nc->offset - fragsz;
5139 	if (unlikely(offset < 0)) {
5140 		page = virt_to_page(nc->va);
5141 
5142 		if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
5143 			goto refill;
5144 
5145 		if (unlikely(nc->pfmemalloc)) {
5146 			free_the_page(page, compound_order(page));
5147 			goto refill;
5148 		}
5149 
5150 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
5151 		/* if size can vary use size else just use PAGE_SIZE */
5152 		size = nc->size;
5153 #endif
5154 		/* OK, page count is 0, we can safely set it */
5155 		set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1);
5156 
5157 		/* reset page count bias and offset to start of new frag */
5158 		nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
5159 		offset = size - fragsz;
5160 	}
5161 
5162 	nc->pagecnt_bias--;
5163 	nc->offset = offset;
5164 
5165 	return nc->va + offset;
5166 }
5167 EXPORT_SYMBOL(page_frag_alloc);
5168 
5169 /*
5170  * Frees a page fragment allocated out of either a compound or order 0 page.
5171  */
page_frag_free(void * addr)5172 void page_frag_free(void *addr)
5173 {
5174 	struct page *page = virt_to_head_page(addr);
5175 
5176 	if (unlikely(put_page_testzero(page)))
5177 		free_the_page(page, compound_order(page));
5178 }
5179 EXPORT_SYMBOL(page_frag_free);
5180 
make_alloc_exact(unsigned long addr,unsigned int order,size_t size)5181 static void *make_alloc_exact(unsigned long addr, unsigned int order,
5182 		size_t size)
5183 {
5184 	if (addr) {
5185 		unsigned long alloc_end = addr + (PAGE_SIZE << order);
5186 		unsigned long used = addr + PAGE_ALIGN(size);
5187 
5188 		split_page(virt_to_page((void *)addr), order);
5189 		while (used < alloc_end) {
5190 			free_page(used);
5191 			used += PAGE_SIZE;
5192 		}
5193 	}
5194 	return (void *)addr;
5195 }
5196 
5197 /**
5198  * alloc_pages_exact - allocate an exact number physically-contiguous pages.
5199  * @size: the number of bytes to allocate
5200  * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
5201  *
5202  * This function is similar to alloc_pages(), except that it allocates the
5203  * minimum number of pages to satisfy the request.  alloc_pages() can only
5204  * allocate memory in power-of-two pages.
5205  *
5206  * This function is also limited by MAX_ORDER.
5207  *
5208  * Memory allocated by this function must be released by free_pages_exact().
5209  *
5210  * Return: pointer to the allocated area or %NULL in case of error.
5211  */
alloc_pages_exact(size_t size,gfp_t gfp_mask)5212 void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
5213 {
5214 	unsigned int order = get_order(size);
5215 	unsigned long addr;
5216 
5217 	if (WARN_ON_ONCE(gfp_mask & __GFP_COMP))
5218 		gfp_mask &= ~__GFP_COMP;
5219 
5220 	addr = __get_free_pages(gfp_mask, order);
5221 	return make_alloc_exact(addr, order, size);
5222 }
5223 EXPORT_SYMBOL(alloc_pages_exact);
5224 
5225 /**
5226  * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
5227  *			   pages on a node.
5228  * @nid: the preferred node ID where memory should be allocated
5229  * @size: the number of bytes to allocate
5230  * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
5231  *
5232  * Like alloc_pages_exact(), but try to allocate on node nid first before falling
5233  * back.
5234  *
5235  * Return: pointer to the allocated area or %NULL in case of error.
5236  */
alloc_pages_exact_nid(int nid,size_t size,gfp_t gfp_mask)5237 void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
5238 {
5239 	unsigned int order = get_order(size);
5240 	struct page *p;
5241 
5242 	if (WARN_ON_ONCE(gfp_mask & __GFP_COMP))
5243 		gfp_mask &= ~__GFP_COMP;
5244 
5245 	p = alloc_pages_node(nid, gfp_mask, order);
5246 	if (!p)
5247 		return NULL;
5248 	return make_alloc_exact((unsigned long)page_address(p), order, size);
5249 }
5250 
5251 /**
5252  * free_pages_exact - release memory allocated via alloc_pages_exact()
5253  * @virt: the value returned by alloc_pages_exact.
5254  * @size: size of allocation, same value as passed to alloc_pages_exact().
5255  *
5256  * Release the memory allocated by a previous call to alloc_pages_exact.
5257  */
free_pages_exact(void * virt,size_t size)5258 void free_pages_exact(void *virt, size_t size)
5259 {
5260 	unsigned long addr = (unsigned long)virt;
5261 	unsigned long end = addr + PAGE_ALIGN(size);
5262 
5263 	while (addr < end) {
5264 		free_page(addr);
5265 		addr += PAGE_SIZE;
5266 	}
5267 }
5268 EXPORT_SYMBOL(free_pages_exact);
5269 
5270 /**
5271  * nr_free_zone_pages - count number of pages beyond high watermark
5272  * @offset: The zone index of the highest zone
5273  *
5274  * nr_free_zone_pages() counts the number of pages which are beyond the
5275  * high watermark within all zones at or below a given zone index.  For each
5276  * zone, the number of pages is calculated as:
5277  *
5278  *     nr_free_zone_pages = managed_pages - high_pages
5279  *
5280  * Return: number of pages beyond high watermark.
5281  */
nr_free_zone_pages(int offset)5282 static unsigned long nr_free_zone_pages(int offset)
5283 {
5284 	struct zoneref *z;
5285 	struct zone *zone;
5286 
5287 	/* Just pick one node, since fallback list is circular */
5288 	unsigned long sum = 0;
5289 
5290 	struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
5291 
5292 	for_each_zone_zonelist(zone, z, zonelist, offset) {
5293 		unsigned long size = zone_managed_pages(zone);
5294 		unsigned long high = high_wmark_pages(zone);
5295 		if (size > high)
5296 			sum += size - high;
5297 	}
5298 
5299 	return sum;
5300 }
5301 
5302 /**
5303  * nr_free_buffer_pages - count number of pages beyond high watermark
5304  *
5305  * nr_free_buffer_pages() counts the number of pages which are beyond the high
5306  * watermark within ZONE_DMA and ZONE_NORMAL.
5307  *
5308  * Return: number of pages beyond high watermark within ZONE_DMA and
5309  * ZONE_NORMAL.
5310  */
nr_free_buffer_pages(void)5311 unsigned long nr_free_buffer_pages(void)
5312 {
5313 	return nr_free_zone_pages(gfp_zone(GFP_USER));
5314 }
5315 EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
5316 
show_node(struct zone * zone)5317 static inline void show_node(struct zone *zone)
5318 {
5319 	if (IS_ENABLED(CONFIG_NUMA))
5320 		printk("Node %d ", zone_to_nid(zone));
5321 }
5322 
si_mem_available(void)5323 long si_mem_available(void)
5324 {
5325 	long available;
5326 	unsigned long pagecache;
5327 	unsigned long wmark_low = 0;
5328 	unsigned long pages[NR_LRU_LISTS];
5329 	unsigned long reclaimable;
5330 	struct zone *zone;
5331 	int lru;
5332 
5333 	for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
5334 		pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
5335 
5336 	for_each_zone(zone)
5337 		wmark_low += low_wmark_pages(zone);
5338 
5339 	/*
5340 	 * Estimate the amount of memory available for userspace allocations,
5341 	 * without causing swapping.
5342 	 */
5343 	available = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages;
5344 
5345 	/*
5346 	 * Not all the page cache can be freed, otherwise the system will
5347 	 * start swapping. Assume at least half of the page cache, or the
5348 	 * low watermark worth of cache, needs to stay.
5349 	 */
5350 	pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE];
5351 	pagecache -= min(pagecache / 2, wmark_low);
5352 	available += pagecache;
5353 
5354 	/*
5355 	 * Part of the reclaimable slab and other kernel memory consists of
5356 	 * items that are in use, and cannot be freed. Cap this estimate at the
5357 	 * low watermark.
5358 	 */
5359 	reclaimable = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B) +
5360 		global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE);
5361 	available += reclaimable - min(reclaimable / 2, wmark_low);
5362 
5363 	if (available < 0)
5364 		available = 0;
5365 	return available;
5366 }
5367 EXPORT_SYMBOL_GPL(si_mem_available);
5368 
si_meminfo(struct sysinfo * val)5369 void si_meminfo(struct sysinfo *val)
5370 {
5371 	val->totalram = totalram_pages();
5372 	val->sharedram = global_node_page_state(NR_SHMEM);
5373 	val->freeram = global_zone_page_state(NR_FREE_PAGES);
5374 	val->bufferram = nr_blockdev_pages();
5375 	val->totalhigh = totalhigh_pages();
5376 	val->freehigh = nr_free_highpages();
5377 	val->mem_unit = PAGE_SIZE;
5378 }
5379 
5380 EXPORT_SYMBOL(si_meminfo);
5381 
5382 #ifdef CONFIG_NUMA
si_meminfo_node(struct sysinfo * val,int nid)5383 void si_meminfo_node(struct sysinfo *val, int nid)
5384 {
5385 	int zone_type;		/* needs to be signed */
5386 	unsigned long managed_pages = 0;
5387 	unsigned long managed_highpages = 0;
5388 	unsigned long free_highpages = 0;
5389 	pg_data_t *pgdat = NODE_DATA(nid);
5390 
5391 	for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
5392 		managed_pages += zone_managed_pages(&pgdat->node_zones[zone_type]);
5393 	val->totalram = managed_pages;
5394 	val->sharedram = node_page_state(pgdat, NR_SHMEM);
5395 	val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES);
5396 #ifdef CONFIG_HIGHMEM
5397 	for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
5398 		struct zone *zone = &pgdat->node_zones[zone_type];
5399 
5400 		if (is_highmem(zone)) {
5401 			managed_highpages += zone_managed_pages(zone);
5402 			free_highpages += zone_page_state(zone, NR_FREE_PAGES);
5403 		}
5404 	}
5405 	val->totalhigh = managed_highpages;
5406 	val->freehigh = free_highpages;
5407 #else
5408 	val->totalhigh = managed_highpages;
5409 	val->freehigh = free_highpages;
5410 #endif
5411 	val->mem_unit = PAGE_SIZE;
5412 }
5413 #endif
5414 
5415 /*
5416  * Determine whether the node should be displayed or not, depending on whether
5417  * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
5418  */
show_mem_node_skip(unsigned int flags,int nid,nodemask_t * nodemask)5419 static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask)
5420 {
5421 	if (!(flags & SHOW_MEM_FILTER_NODES))
5422 		return false;
5423 
5424 	/*
5425 	 * no node mask - aka implicit memory numa policy. Do not bother with
5426 	 * the synchronization - read_mems_allowed_begin - because we do not
5427 	 * have to be precise here.
5428 	 */
5429 	if (!nodemask)
5430 		nodemask = &cpuset_current_mems_allowed;
5431 
5432 	return !node_isset(nid, *nodemask);
5433 }
5434 
5435 #define K(x) ((x) << (PAGE_SHIFT-10))
5436 
show_migration_types(unsigned char type)5437 static void show_migration_types(unsigned char type)
5438 {
5439 	static const char types[MIGRATE_TYPES] = {
5440 		[MIGRATE_UNMOVABLE]	= 'U',
5441 		[MIGRATE_MOVABLE]	= 'M',
5442 		[MIGRATE_RECLAIMABLE]	= 'E',
5443 		[MIGRATE_HIGHATOMIC]	= 'H',
5444 #ifdef CONFIG_CMA
5445 		[MIGRATE_CMA]		= 'C',
5446 #endif
5447 #ifdef CONFIG_MEMORY_ISOLATION
5448 		[MIGRATE_ISOLATE]	= 'I',
5449 #endif
5450 	};
5451 	char tmp[MIGRATE_TYPES + 1];
5452 	char *p = tmp;
5453 	int i;
5454 
5455 	for (i = 0; i < MIGRATE_TYPES; i++) {
5456 		if (type & (1 << i))
5457 			*p++ = types[i];
5458 	}
5459 
5460 	*p = '\0';
5461 	printk(KERN_CONT "(%s) ", tmp);
5462 }
5463 
5464 /*
5465  * Show free area list (used inside shift_scroll-lock stuff)
5466  * We also calculate the percentage fragmentation. We do this by counting the
5467  * memory on each free list with the exception of the first item on the list.
5468  *
5469  * Bits in @filter:
5470  * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's
5471  *   cpuset.
5472  */
show_free_areas(unsigned int filter,nodemask_t * nodemask)5473 void show_free_areas(unsigned int filter, nodemask_t *nodemask)
5474 {
5475 	unsigned long free_pcp = 0;
5476 	int cpu;
5477 	struct zone *zone;
5478 	pg_data_t *pgdat;
5479 
5480 	for_each_populated_zone(zone) {
5481 		if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
5482 			continue;
5483 
5484 		for_each_online_cpu(cpu)
5485 			free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
5486 	}
5487 
5488 	printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
5489 		" active_file:%lu inactive_file:%lu isolated_file:%lu\n"
5490 		" unevictable:%lu dirty:%lu writeback:%lu\n"
5491 		" slab_reclaimable:%lu slab_unreclaimable:%lu\n"
5492 		" mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
5493 		" free:%lu free_pcp:%lu free_cma:%lu\n",
5494 		global_node_page_state(NR_ACTIVE_ANON),
5495 		global_node_page_state(NR_INACTIVE_ANON),
5496 		global_node_page_state(NR_ISOLATED_ANON),
5497 		global_node_page_state(NR_ACTIVE_FILE),
5498 		global_node_page_state(NR_INACTIVE_FILE),
5499 		global_node_page_state(NR_ISOLATED_FILE),
5500 		global_node_page_state(NR_UNEVICTABLE),
5501 		global_node_page_state(NR_FILE_DIRTY),
5502 		global_node_page_state(NR_WRITEBACK),
5503 		global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B),
5504 		global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B),
5505 		global_node_page_state(NR_FILE_MAPPED),
5506 		global_node_page_state(NR_SHMEM),
5507 		global_zone_page_state(NR_PAGETABLE),
5508 		global_zone_page_state(NR_BOUNCE),
5509 		global_zone_page_state(NR_FREE_PAGES),
5510 		free_pcp,
5511 		global_zone_page_state(NR_FREE_CMA_PAGES));
5512 
5513 	for_each_online_pgdat(pgdat) {
5514 		if (show_mem_node_skip(filter, pgdat->node_id, nodemask))
5515 			continue;
5516 
5517 		printk("Node %d"
5518 			" active_anon:%lukB"
5519 			" inactive_anon:%lukB"
5520 			" active_file:%lukB"
5521 			" inactive_file:%lukB"
5522 			" unevictable:%lukB"
5523 			" isolated(anon):%lukB"
5524 			" isolated(file):%lukB"
5525 			" mapped:%lukB"
5526 			" dirty:%lukB"
5527 			" writeback:%lukB"
5528 			" shmem:%lukB"
5529 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5530 			" shmem_thp: %lukB"
5531 			" shmem_pmdmapped: %lukB"
5532 			" anon_thp: %lukB"
5533 #endif
5534 			" writeback_tmp:%lukB"
5535 			" kernel_stack:%lukB"
5536 #ifdef CONFIG_SHADOW_CALL_STACK
5537 			" shadow_call_stack:%lukB"
5538 #endif
5539 			" all_unreclaimable? %s"
5540 			"\n",
5541 			pgdat->node_id,
5542 			K(node_page_state(pgdat, NR_ACTIVE_ANON)),
5543 			K(node_page_state(pgdat, NR_INACTIVE_ANON)),
5544 			K(node_page_state(pgdat, NR_ACTIVE_FILE)),
5545 			K(node_page_state(pgdat, NR_INACTIVE_FILE)),
5546 			K(node_page_state(pgdat, NR_UNEVICTABLE)),
5547 			K(node_page_state(pgdat, NR_ISOLATED_ANON)),
5548 			K(node_page_state(pgdat, NR_ISOLATED_FILE)),
5549 			K(node_page_state(pgdat, NR_FILE_MAPPED)),
5550 			K(node_page_state(pgdat, NR_FILE_DIRTY)),
5551 			K(node_page_state(pgdat, NR_WRITEBACK)),
5552 			K(node_page_state(pgdat, NR_SHMEM)),
5553 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5554 			K(node_page_state(pgdat, NR_SHMEM_THPS) * HPAGE_PMD_NR),
5555 			K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)
5556 					* HPAGE_PMD_NR),
5557 			K(node_page_state(pgdat, NR_ANON_THPS) * HPAGE_PMD_NR),
5558 #endif
5559 			K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
5560 			node_page_state(pgdat, NR_KERNEL_STACK_KB),
5561 #ifdef CONFIG_SHADOW_CALL_STACK
5562 			node_page_state(pgdat, NR_KERNEL_SCS_KB),
5563 #endif
5564 			pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ?
5565 				"yes" : "no");
5566 	}
5567 
5568 	for_each_populated_zone(zone) {
5569 		int i;
5570 
5571 		if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
5572 			continue;
5573 
5574 		free_pcp = 0;
5575 		for_each_online_cpu(cpu)
5576 			free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
5577 
5578 		show_node(zone);
5579 		printk(KERN_CONT
5580 			"%s"
5581 			" free:%lukB"
5582 			" min:%lukB"
5583 			" low:%lukB"
5584 			" high:%lukB"
5585 			" reserved_highatomic:%luKB"
5586 			" active_anon:%lukB"
5587 			" inactive_anon:%lukB"
5588 			" active_file:%lukB"
5589 			" inactive_file:%lukB"
5590 			" unevictable:%lukB"
5591 			" writepending:%lukB"
5592 			" present:%lukB"
5593 			" managed:%lukB"
5594 			" mlocked:%lukB"
5595 			" pagetables:%lukB"
5596 			" bounce:%lukB"
5597 			" free_pcp:%lukB"
5598 			" local_pcp:%ukB"
5599 			" free_cma:%lukB"
5600 			"\n",
5601 			zone->name,
5602 			K(zone_page_state(zone, NR_FREE_PAGES)),
5603 			K(min_wmark_pages(zone)),
5604 			K(low_wmark_pages(zone)),
5605 			K(high_wmark_pages(zone)),
5606 			K(zone->nr_reserved_highatomic),
5607 			K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)),
5608 			K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)),
5609 			K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)),
5610 			K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)),
5611 			K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)),
5612 			K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)),
5613 			K(zone->present_pages),
5614 			K(zone_managed_pages(zone)),
5615 			K(zone_page_state(zone, NR_MLOCK)),
5616 			K(zone_page_state(zone, NR_PAGETABLE)),
5617 			K(zone_page_state(zone, NR_BOUNCE)),
5618 			K(free_pcp),
5619 			K(this_cpu_read(zone->pageset->pcp.count)),
5620 			K(zone_page_state(zone, NR_FREE_CMA_PAGES)));
5621 		printk("lowmem_reserve[]:");
5622 		for (i = 0; i < MAX_NR_ZONES; i++)
5623 			printk(KERN_CONT " %ld", zone->lowmem_reserve[i]);
5624 		printk(KERN_CONT "\n");
5625 	}
5626 
5627 	for_each_populated_zone(zone) {
5628 		unsigned int order;
5629 		unsigned long nr[MAX_ORDER], flags, total = 0;
5630 		unsigned char types[MAX_ORDER];
5631 
5632 		if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
5633 			continue;
5634 		show_node(zone);
5635 		printk(KERN_CONT "%s: ", zone->name);
5636 
5637 		spin_lock_irqsave(&zone->lock, flags);
5638 		for (order = 0; order < MAX_ORDER; order++) {
5639 			struct free_area *area = &zone->free_area[order];
5640 			int type;
5641 
5642 			nr[order] = area->nr_free;
5643 			total += nr[order] << order;
5644 
5645 			types[order] = 0;
5646 			for (type = 0; type < MIGRATE_TYPES; type++) {
5647 				if (!free_area_empty(area, type))
5648 					types[order] |= 1 << type;
5649 			}
5650 		}
5651 		spin_unlock_irqrestore(&zone->lock, flags);
5652 		for (order = 0; order < MAX_ORDER; order++) {
5653 			printk(KERN_CONT "%lu*%lukB ",
5654 			       nr[order], K(1UL) << order);
5655 			if (nr[order])
5656 				show_migration_types(types[order]);
5657 		}
5658 		printk(KERN_CONT "= %lukB\n", K(total));
5659 	}
5660 
5661 	hugetlb_show_meminfo();
5662 
5663 	printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES));
5664 
5665 	show_swap_cache_info();
5666 }
5667 
zoneref_set_zone(struct zone * zone,struct zoneref * zoneref)5668 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
5669 {
5670 	zoneref->zone = zone;
5671 	zoneref->zone_idx = zone_idx(zone);
5672 }
5673 
5674 /*
5675  * Builds allocation fallback zone lists.
5676  *
5677  * Add all populated zones of a node to the zonelist.
5678  */
build_zonerefs_node(pg_data_t * pgdat,struct zoneref * zonerefs)5679 static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs)
5680 {
5681 	struct zone *zone;
5682 	enum zone_type zone_type = MAX_NR_ZONES;
5683 	int nr_zones = 0;
5684 
5685 	do {
5686 		zone_type--;
5687 		zone = pgdat->node_zones + zone_type;
5688 		if (managed_zone(zone)) {
5689 			zoneref_set_zone(zone, &zonerefs[nr_zones++]);
5690 			check_highest_zone(zone_type);
5691 		}
5692 	} while (zone_type);
5693 
5694 	return nr_zones;
5695 }
5696 
5697 #ifdef CONFIG_NUMA
5698 
__parse_numa_zonelist_order(char * s)5699 static int __parse_numa_zonelist_order(char *s)
5700 {
5701 	/*
5702 	 * We used to support different zonlists modes but they turned
5703 	 * out to be just not useful. Let's keep the warning in place
5704 	 * if somebody still use the cmd line parameter so that we do
5705 	 * not fail it silently
5706 	 */
5707 	if (!(*s == 'd' || *s == 'D' || *s == 'n' || *s == 'N')) {
5708 		pr_warn("Ignoring unsupported numa_zonelist_order value:  %s\n", s);
5709 		return -EINVAL;
5710 	}
5711 	return 0;
5712 }
5713 
5714 char numa_zonelist_order[] = "Node";
5715 
5716 /*
5717  * sysctl handler for numa_zonelist_order
5718  */
numa_zonelist_order_handler(struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)5719 int numa_zonelist_order_handler(struct ctl_table *table, int write,
5720 		void *buffer, size_t *length, loff_t *ppos)
5721 {
5722 	if (write)
5723 		return __parse_numa_zonelist_order(buffer);
5724 	return proc_dostring(table, write, buffer, length, ppos);
5725 }
5726 
5727 
5728 #define MAX_NODE_LOAD (nr_online_nodes)
5729 static int node_load[MAX_NUMNODES];
5730 
5731 /**
5732  * find_next_best_node - find the next node that should appear in a given node's fallback list
5733  * @node: node whose fallback list we're appending
5734  * @used_node_mask: nodemask_t of already used nodes
5735  *
5736  * We use a number of factors to determine which is the next node that should
5737  * appear on a given node's fallback list.  The node should not have appeared
5738  * already in @node's fallback list, and it should be the next closest node
5739  * according to the distance array (which contains arbitrary distance values
5740  * from each node to each node in the system), and should also prefer nodes
5741  * with no CPUs, since presumably they'll have very little allocation pressure
5742  * on them otherwise.
5743  *
5744  * Return: node id of the found node or %NUMA_NO_NODE if no node is found.
5745  */
find_next_best_node(int node,nodemask_t * used_node_mask)5746 static int find_next_best_node(int node, nodemask_t *used_node_mask)
5747 {
5748 	int n, val;
5749 	int min_val = INT_MAX;
5750 	int best_node = NUMA_NO_NODE;
5751 
5752 	/* Use the local node if we haven't already */
5753 	if (!node_isset(node, *used_node_mask)) {
5754 		node_set(node, *used_node_mask);
5755 		return node;
5756 	}
5757 
5758 	for_each_node_state(n, N_MEMORY) {
5759 
5760 		/* Don't want a node to appear more than once */
5761 		if (node_isset(n, *used_node_mask))
5762 			continue;
5763 
5764 		/* Use the distance array to find the distance */
5765 		val = node_distance(node, n);
5766 
5767 		/* Penalize nodes under us ("prefer the next node") */
5768 		val += (n < node);
5769 
5770 		/* Give preference to headless and unused nodes */
5771 		if (!cpumask_empty(cpumask_of_node(n)))
5772 			val += PENALTY_FOR_NODE_WITH_CPUS;
5773 
5774 		/* Slight preference for less loaded node */
5775 		val *= (MAX_NODE_LOAD*MAX_NUMNODES);
5776 		val += node_load[n];
5777 
5778 		if (val < min_val) {
5779 			min_val = val;
5780 			best_node = n;
5781 		}
5782 	}
5783 
5784 	if (best_node >= 0)
5785 		node_set(best_node, *used_node_mask);
5786 
5787 	return best_node;
5788 }
5789 
5790 
5791 /*
5792  * Build zonelists ordered by node and zones within node.
5793  * This results in maximum locality--normal zone overflows into local
5794  * DMA zone, if any--but risks exhausting DMA zone.
5795  */
build_zonelists_in_node_order(pg_data_t * pgdat,int * node_order,unsigned nr_nodes)5796 static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order,
5797 		unsigned nr_nodes)
5798 {
5799 	struct zoneref *zonerefs;
5800 	int i;
5801 
5802 	zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
5803 
5804 	for (i = 0; i < nr_nodes; i++) {
5805 		int nr_zones;
5806 
5807 		pg_data_t *node = NODE_DATA(node_order[i]);
5808 
5809 		nr_zones = build_zonerefs_node(node, zonerefs);
5810 		zonerefs += nr_zones;
5811 	}
5812 	zonerefs->zone = NULL;
5813 	zonerefs->zone_idx = 0;
5814 }
5815 
5816 /*
5817  * Build gfp_thisnode zonelists
5818  */
build_thisnode_zonelists(pg_data_t * pgdat)5819 static void build_thisnode_zonelists(pg_data_t *pgdat)
5820 {
5821 	struct zoneref *zonerefs;
5822 	int nr_zones;
5823 
5824 	zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs;
5825 	nr_zones = build_zonerefs_node(pgdat, zonerefs);
5826 	zonerefs += nr_zones;
5827 	zonerefs->zone = NULL;
5828 	zonerefs->zone_idx = 0;
5829 }
5830 
5831 /*
5832  * Build zonelists ordered by zone and nodes within zones.
5833  * This results in conserving DMA zone[s] until all Normal memory is
5834  * exhausted, but results in overflowing to remote node while memory
5835  * may still exist in local DMA zone.
5836  */
5837 
build_zonelists(pg_data_t * pgdat)5838 static void build_zonelists(pg_data_t *pgdat)
5839 {
5840 	static int node_order[MAX_NUMNODES];
5841 	int node, load, nr_nodes = 0;
5842 	nodemask_t used_mask = NODE_MASK_NONE;
5843 	int local_node, prev_node;
5844 
5845 	/* NUMA-aware ordering of nodes */
5846 	local_node = pgdat->node_id;
5847 	load = nr_online_nodes;
5848 	prev_node = local_node;
5849 
5850 	memset(node_order, 0, sizeof(node_order));
5851 	while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
5852 		/*
5853 		 * We don't want to pressure a particular node.
5854 		 * So adding penalty to the first node in same
5855 		 * distance group to make it round-robin.
5856 		 */
5857 		if (node_distance(local_node, node) !=
5858 		    node_distance(local_node, prev_node))
5859 			node_load[node] = load;
5860 
5861 		node_order[nr_nodes++] = node;
5862 		prev_node = node;
5863 		load--;
5864 	}
5865 
5866 	build_zonelists_in_node_order(pgdat, node_order, nr_nodes);
5867 	build_thisnode_zonelists(pgdat);
5868 }
5869 
5870 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
5871 /*
5872  * Return node id of node used for "local" allocations.
5873  * I.e., first node id of first zone in arg node's generic zonelist.
5874  * Used for initializing percpu 'numa_mem', which is used primarily
5875  * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
5876  */
local_memory_node(int node)5877 int local_memory_node(int node)
5878 {
5879 	struct zoneref *z;
5880 
5881 	z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
5882 				   gfp_zone(GFP_KERNEL),
5883 				   NULL);
5884 	return zone_to_nid(z->zone);
5885 }
5886 #endif
5887 
5888 static void setup_min_unmapped_ratio(void);
5889 static void setup_min_slab_ratio(void);
5890 #else	/* CONFIG_NUMA */
5891 
build_zonelists(pg_data_t * pgdat)5892 static void build_zonelists(pg_data_t *pgdat)
5893 {
5894 	int node, local_node;
5895 	struct zoneref *zonerefs;
5896 	int nr_zones;
5897 
5898 	local_node = pgdat->node_id;
5899 
5900 	zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
5901 	nr_zones = build_zonerefs_node(pgdat, zonerefs);
5902 	zonerefs += nr_zones;
5903 
5904 	/*
5905 	 * Now we build the zonelist so that it contains the zones
5906 	 * of all the other nodes.
5907 	 * We don't want to pressure a particular node, so when
5908 	 * building the zones for node N, we make sure that the
5909 	 * zones coming right after the local ones are those from
5910 	 * node N+1 (modulo N)
5911 	 */
5912 	for (node = local_node + 1; node < MAX_NUMNODES; node++) {
5913 		if (!node_online(node))
5914 			continue;
5915 		nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs);
5916 		zonerefs += nr_zones;
5917 	}
5918 	for (node = 0; node < local_node; node++) {
5919 		if (!node_online(node))
5920 			continue;
5921 		nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs);
5922 		zonerefs += nr_zones;
5923 	}
5924 
5925 	zonerefs->zone = NULL;
5926 	zonerefs->zone_idx = 0;
5927 }
5928 
5929 #endif	/* CONFIG_NUMA */
5930 
5931 /*
5932  * Boot pageset table. One per cpu which is going to be used for all
5933  * zones and all nodes. The parameters will be set in such a way
5934  * that an item put on a list will immediately be handed over to
5935  * the buddy list. This is safe since pageset manipulation is done
5936  * with interrupts disabled.
5937  *
5938  * The boot_pagesets must be kept even after bootup is complete for
5939  * unused processors and/or zones. They do play a role for bootstrapping
5940  * hotplugged processors.
5941  *
5942  * zoneinfo_show() and maybe other functions do
5943  * not check if the processor is online before following the pageset pointer.
5944  * Other parts of the kernel may not check if the zone is available.
5945  */
5946 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
5947 static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
5948 static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
5949 
__build_all_zonelists(void * data)5950 static void __build_all_zonelists(void *data)
5951 {
5952 	int nid;
5953 	int __maybe_unused cpu;
5954 	pg_data_t *self = data;
5955 	static DEFINE_SPINLOCK(lock);
5956 
5957 	spin_lock(&lock);
5958 
5959 #ifdef CONFIG_NUMA
5960 	memset(node_load, 0, sizeof(node_load));
5961 #endif
5962 
5963 	/*
5964 	 * This node is hotadded and no memory is yet present.   So just
5965 	 * building zonelists is fine - no need to touch other nodes.
5966 	 */
5967 	if (self && !node_online(self->node_id)) {
5968 		build_zonelists(self);
5969 	} else {
5970 		for_each_online_node(nid) {
5971 			pg_data_t *pgdat = NODE_DATA(nid);
5972 
5973 			build_zonelists(pgdat);
5974 		}
5975 
5976 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
5977 		/*
5978 		 * We now know the "local memory node" for each node--
5979 		 * i.e., the node of the first zone in the generic zonelist.
5980 		 * Set up numa_mem percpu variable for on-line cpus.  During
5981 		 * boot, only the boot cpu should be on-line;  we'll init the
5982 		 * secondary cpus' numa_mem as they come on-line.  During
5983 		 * node/memory hotplug, we'll fixup all on-line cpus.
5984 		 */
5985 		for_each_online_cpu(cpu)
5986 			set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
5987 #endif
5988 	}
5989 
5990 	spin_unlock(&lock);
5991 }
5992 
5993 static noinline void __init
build_all_zonelists_init(void)5994 build_all_zonelists_init(void)
5995 {
5996 	int cpu;
5997 
5998 	__build_all_zonelists(NULL);
5999 
6000 	/*
6001 	 * Initialize the boot_pagesets that are going to be used
6002 	 * for bootstrapping processors. The real pagesets for
6003 	 * each zone will be allocated later when the per cpu
6004 	 * allocator is available.
6005 	 *
6006 	 * boot_pagesets are used also for bootstrapping offline
6007 	 * cpus if the system is already booted because the pagesets
6008 	 * are needed to initialize allocators on a specific cpu too.
6009 	 * F.e. the percpu allocator needs the page allocator which
6010 	 * needs the percpu allocator in order to allocate its pagesets
6011 	 * (a chicken-egg dilemma).
6012 	 */
6013 	for_each_possible_cpu(cpu)
6014 		setup_pageset(&per_cpu(boot_pageset, cpu), 0);
6015 
6016 	mminit_verify_zonelist();
6017 	cpuset_init_current_mems_allowed();
6018 }
6019 
6020 /*
6021  * unless system_state == SYSTEM_BOOTING.
6022  *
6023  * __ref due to call of __init annotated helper build_all_zonelists_init
6024  * [protected by SYSTEM_BOOTING].
6025  */
build_all_zonelists(pg_data_t * pgdat)6026 void __ref build_all_zonelists(pg_data_t *pgdat)
6027 {
6028 	unsigned long vm_total_pages;
6029 
6030 	if (system_state == SYSTEM_BOOTING) {
6031 		build_all_zonelists_init();
6032 	} else {
6033 		__build_all_zonelists(pgdat);
6034 		/* cpuset refresh routine should be here */
6035 	}
6036 	/* Get the number of free pages beyond high watermark in all zones. */
6037 	vm_total_pages = nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
6038 	/*
6039 	 * Disable grouping by mobility if the number of pages in the
6040 	 * system is too low to allow the mechanism to work. It would be
6041 	 * more accurate, but expensive to check per-zone. This check is
6042 	 * made on memory-hotadd so a system can start with mobility
6043 	 * disabled and enable it later
6044 	 */
6045 	if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
6046 		page_group_by_mobility_disabled = 1;
6047 	else
6048 		page_group_by_mobility_disabled = 0;
6049 
6050 	pr_info("Built %u zonelists, mobility grouping %s.  Total pages: %ld\n",
6051 		nr_online_nodes,
6052 		page_group_by_mobility_disabled ? "off" : "on",
6053 		vm_total_pages);
6054 #ifdef CONFIG_NUMA
6055 	pr_info("Policy zone: %s\n", zone_names[policy_zone]);
6056 #endif
6057 }
6058 
6059 /* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */
6060 static bool __meminit
overlap_memmap_init(unsigned long zone,unsigned long * pfn)6061 overlap_memmap_init(unsigned long zone, unsigned long *pfn)
6062 {
6063 	static struct memblock_region *r;
6064 
6065 	if (mirrored_kernelcore && zone == ZONE_MOVABLE) {
6066 		if (!r || *pfn >= memblock_region_memory_end_pfn(r)) {
6067 			for_each_mem_region(r) {
6068 				if (*pfn < memblock_region_memory_end_pfn(r))
6069 					break;
6070 			}
6071 		}
6072 		if (*pfn >= memblock_region_memory_base_pfn(r) &&
6073 		    memblock_is_mirror(r)) {
6074 			*pfn = memblock_region_memory_end_pfn(r);
6075 			return true;
6076 		}
6077 	}
6078 	return false;
6079 }
6080 
6081 /*
6082  * Initially all pages are reserved - free ones are freed
6083  * up by memblock_free_all() once the early boot process is
6084  * done. Non-atomic initialization, single-pass.
6085  *
6086  * All aligned pageblocks are initialized to the specified migratetype
6087  * (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related
6088  * zone stats (e.g., nr_isolate_pageblock) are touched.
6089  */
memmap_init_zone(unsigned long size,int nid,unsigned long zone,unsigned long start_pfn,unsigned long zone_end_pfn,enum meminit_context context,struct vmem_altmap * altmap,int migratetype)6090 void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
6091 		unsigned long start_pfn, unsigned long zone_end_pfn,
6092 		enum meminit_context context,
6093 		struct vmem_altmap *altmap, int migratetype)
6094 {
6095 	unsigned long pfn, end_pfn = start_pfn + size;
6096 	struct page *page;
6097 
6098 	if (highest_memmap_pfn < end_pfn - 1)
6099 		highest_memmap_pfn = end_pfn - 1;
6100 
6101 #ifdef CONFIG_ZONE_DEVICE
6102 	/*
6103 	 * Honor reservation requested by the driver for this ZONE_DEVICE
6104 	 * memory. We limit the total number of pages to initialize to just
6105 	 * those that might contain the memory mapping. We will defer the
6106 	 * ZONE_DEVICE page initialization until after we have released
6107 	 * the hotplug lock.
6108 	 */
6109 	if (zone == ZONE_DEVICE) {
6110 		if (!altmap)
6111 			return;
6112 
6113 		if (start_pfn == altmap->base_pfn)
6114 			start_pfn += altmap->reserve;
6115 		end_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
6116 	}
6117 #endif
6118 
6119 	for (pfn = start_pfn; pfn < end_pfn; ) {
6120 		/*
6121 		 * There can be holes in boot-time mem_map[]s handed to this
6122 		 * function.  They do not exist on hotplugged memory.
6123 		 */
6124 		if (context == MEMINIT_EARLY) {
6125 			if (overlap_memmap_init(zone, &pfn))
6126 				continue;
6127 			if (defer_init(nid, pfn, zone_end_pfn))
6128 				break;
6129 		}
6130 
6131 		page = pfn_to_page(pfn);
6132 		__init_single_page(page, pfn, zone, nid);
6133 		if (context == MEMINIT_HOTPLUG)
6134 			__SetPageReserved(page);
6135 
6136 		/*
6137 		 * Usually, we want to mark the pageblock MIGRATE_MOVABLE,
6138 		 * such that unmovable allocations won't be scattered all
6139 		 * over the place during system boot.
6140 		 */
6141 		if (IS_ALIGNED(pfn, pageblock_nr_pages)) {
6142 			set_pageblock_migratetype(page, migratetype);
6143 			cond_resched();
6144 		}
6145 		pfn++;
6146 	}
6147 }
6148 
6149 #ifdef CONFIG_ZONE_DEVICE
memmap_init_zone_device(struct zone * zone,unsigned long start_pfn,unsigned long nr_pages,struct dev_pagemap * pgmap)6150 void __ref memmap_init_zone_device(struct zone *zone,
6151 				   unsigned long start_pfn,
6152 				   unsigned long nr_pages,
6153 				   struct dev_pagemap *pgmap)
6154 {
6155 	unsigned long pfn, end_pfn = start_pfn + nr_pages;
6156 	struct pglist_data *pgdat = zone->zone_pgdat;
6157 	struct vmem_altmap *altmap = pgmap_altmap(pgmap);
6158 	unsigned long zone_idx = zone_idx(zone);
6159 	unsigned long start = jiffies;
6160 	int nid = pgdat->node_id;
6161 
6162 	if (WARN_ON_ONCE(!pgmap || zone_idx(zone) != ZONE_DEVICE))
6163 		return;
6164 
6165 	/*
6166 	 * The call to memmap_init should have already taken care
6167 	 * of the pages reserved for the memmap, so we can just jump to
6168 	 * the end of that region and start processing the device pages.
6169 	 */
6170 	if (altmap) {
6171 		start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
6172 		nr_pages = end_pfn - start_pfn;
6173 	}
6174 
6175 	for (pfn = start_pfn; pfn < end_pfn; pfn++) {
6176 		struct page *page = pfn_to_page(pfn);
6177 
6178 		__init_single_page(page, pfn, zone_idx, nid);
6179 
6180 		/*
6181 		 * Mark page reserved as it will need to wait for onlining
6182 		 * phase for it to be fully associated with a zone.
6183 		 *
6184 		 * We can use the non-atomic __set_bit operation for setting
6185 		 * the flag as we are still initializing the pages.
6186 		 */
6187 		__SetPageReserved(page);
6188 
6189 		/*
6190 		 * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer
6191 		 * and zone_device_data.  It is a bug if a ZONE_DEVICE page is
6192 		 * ever freed or placed on a driver-private list.
6193 		 */
6194 		page->pgmap = pgmap;
6195 		page->zone_device_data = NULL;
6196 
6197 		/*
6198 		 * Mark the block movable so that blocks are reserved for
6199 		 * movable at startup. This will force kernel allocations
6200 		 * to reserve their blocks rather than leaking throughout
6201 		 * the address space during boot when many long-lived
6202 		 * kernel allocations are made.
6203 		 *
6204 		 * Please note that MEMINIT_HOTPLUG path doesn't clear memmap
6205 		 * because this is done early in section_activate()
6206 		 */
6207 		if (IS_ALIGNED(pfn, pageblock_nr_pages)) {
6208 			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
6209 			cond_resched();
6210 		}
6211 	}
6212 
6213 	pr_info("%s initialised %lu pages in %ums\n", __func__,
6214 		nr_pages, jiffies_to_msecs(jiffies - start));
6215 }
6216 
6217 #endif
zone_init_free_lists(struct zone * zone)6218 static void __meminit zone_init_free_lists(struct zone *zone)
6219 {
6220 	unsigned int order, t;
6221 	for_each_migratetype_order(order, t) {
6222 		INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
6223 		zone->free_area[order].nr_free = 0;
6224 	}
6225 }
6226 
6227 #if !defined(CONFIG_FLAT_NODE_MEM_MAP)
6228 /*
6229  * Only struct pages that correspond to ranges defined by memblock.memory
6230  * are zeroed and initialized by going through __init_single_page() during
6231  * memmap_init_zone_range().
6232  *
6233  * But, there could be struct pages that correspond to holes in
6234  * memblock.memory. This can happen because of the following reasons:
6235  * - physical memory bank size is not necessarily the exact multiple of the
6236  *   arbitrary section size
6237  * - early reserved memory may not be listed in memblock.memory
6238  * - memory layouts defined with memmap= kernel parameter may not align
6239  *   nicely with memmap sections
6240  *
6241  * Explicitly initialize those struct pages so that:
6242  * - PG_Reserved is set
6243  * - zone and node links point to zone and node that span the page if the
6244  *   hole is in the middle of a zone
6245  * - zone and node links point to adjacent zone/node if the hole falls on
6246  *   the zone boundary; the pages in such holes will be prepended to the
6247  *   zone/node above the hole except for the trailing pages in the last
6248  *   section that will be appended to the zone/node below.
6249  */
init_unavailable_range(unsigned long spfn,unsigned long epfn,int zone,int node)6250 static void __init init_unavailable_range(unsigned long spfn,
6251 					  unsigned long epfn,
6252 					  int zone, int node)
6253 {
6254 	unsigned long pfn;
6255 	u64 pgcnt = 0;
6256 
6257 	for (pfn = spfn; pfn < epfn; pfn++) {
6258 		if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages))) {
6259 			pfn = ALIGN_DOWN(pfn, pageblock_nr_pages)
6260 				+ pageblock_nr_pages - 1;
6261 			continue;
6262 		}
6263 		__init_single_page(pfn_to_page(pfn), pfn, zone, node);
6264 		__SetPageReserved(pfn_to_page(pfn));
6265 		pgcnt++;
6266 	}
6267 
6268 	if (pgcnt)
6269 		pr_info("On node %d, zone %s: %lld pages in unavailable ranges",
6270 			node, zone_names[zone], pgcnt);
6271 }
6272 #else
init_unavailable_range(unsigned long spfn,unsigned long epfn,int zone,int node)6273 static inline void init_unavailable_range(unsigned long spfn,
6274 					  unsigned long epfn,
6275 					  int zone, int node)
6276 {
6277 }
6278 #endif
6279 
memmap_init_zone_range(struct zone * zone,unsigned long start_pfn,unsigned long end_pfn,unsigned long * hole_pfn)6280 static void __init memmap_init_zone_range(struct zone *zone,
6281 					  unsigned long start_pfn,
6282 					  unsigned long end_pfn,
6283 					  unsigned long *hole_pfn)
6284 {
6285 	unsigned long zone_start_pfn = zone->zone_start_pfn;
6286 	unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages;
6287 	int nid = zone_to_nid(zone), zone_id = zone_idx(zone);
6288 
6289 	start_pfn = clamp(start_pfn, zone_start_pfn, zone_end_pfn);
6290 	end_pfn = clamp(end_pfn, zone_start_pfn, zone_end_pfn);
6291 
6292 	if (start_pfn >= end_pfn)
6293 		return;
6294 
6295 	memmap_init_zone(end_pfn - start_pfn, nid, zone_id, start_pfn,
6296 			  zone_end_pfn, MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
6297 
6298 	if (*hole_pfn < start_pfn)
6299 		init_unavailable_range(*hole_pfn, start_pfn, zone_id, nid);
6300 
6301 	*hole_pfn = end_pfn;
6302 }
6303 
memmap_init(void)6304 void __init __weak memmap_init(void)
6305 {
6306 	unsigned long start_pfn, end_pfn;
6307 	unsigned long hole_pfn = 0;
6308 	int i, j, zone_id, nid;
6309 
6310 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
6311 		struct pglist_data *node = NODE_DATA(nid);
6312 
6313 		for (j = 0; j < MAX_NR_ZONES; j++) {
6314 			struct zone *zone = node->node_zones + j;
6315 
6316 			if (!populated_zone(zone))
6317 				continue;
6318 
6319 			memmap_init_zone_range(zone, start_pfn, end_pfn,
6320 					       &hole_pfn);
6321 			zone_id = j;
6322 		}
6323 	}
6324 
6325 #ifdef CONFIG_SPARSEMEM
6326 	/*
6327 	 * Initialize the memory map for hole in the range [memory_end,
6328 	 * section_end].
6329 	 * Append the pages in this hole to the highest zone in the last
6330 	 * node.
6331 	 * The call to init_unavailable_range() is outside the ifdef to
6332 	 * silence the compiler warining about zone_id set but not used;
6333 	 * for FLATMEM it is a nop anyway
6334 	 */
6335 	end_pfn = round_up(end_pfn, PAGES_PER_SECTION);
6336 	if (hole_pfn < end_pfn)
6337 #endif
6338 		init_unavailable_range(hole_pfn, end_pfn, zone_id, nid);
6339 }
6340 
6341 /* A stub for backwards compatibility with custom implementatin on IA-64 */
arch_memmap_init(unsigned long size,int nid,unsigned long zone,unsigned long range_start_pfn)6342 void __meminit __weak arch_memmap_init(unsigned long size, int nid,
6343 				       unsigned long zone,
6344 				       unsigned long range_start_pfn)
6345 {
6346 }
6347 
zone_batchsize(struct zone * zone)6348 static int zone_batchsize(struct zone *zone)
6349 {
6350 #ifdef CONFIG_MMU
6351 	int batch;
6352 
6353 	/*
6354 	 * The per-cpu-pages pools are set to around 1000th of the
6355 	 * size of the zone.
6356 	 */
6357 	batch = zone_managed_pages(zone) / 1024;
6358 	/* But no more than a meg. */
6359 	if (batch * PAGE_SIZE > 1024 * 1024)
6360 		batch = (1024 * 1024) / PAGE_SIZE;
6361 	batch /= 4;		/* We effectively *= 4 below */
6362 	if (batch < 1)
6363 		batch = 1;
6364 
6365 	/*
6366 	 * Clamp the batch to a 2^n - 1 value. Having a power
6367 	 * of 2 value was found to be more likely to have
6368 	 * suboptimal cache aliasing properties in some cases.
6369 	 *
6370 	 * For example if 2 tasks are alternately allocating
6371 	 * batches of pages, one task can end up with a lot
6372 	 * of pages of one half of the possible page colors
6373 	 * and the other with pages of the other colors.
6374 	 */
6375 	batch = rounddown_pow_of_two(batch + batch/2) - 1;
6376 
6377 	return batch;
6378 
6379 #else
6380 	/* The deferral and batching of frees should be suppressed under NOMMU
6381 	 * conditions.
6382 	 *
6383 	 * The problem is that NOMMU needs to be able to allocate large chunks
6384 	 * of contiguous memory as there's no hardware page translation to
6385 	 * assemble apparent contiguous memory from discontiguous pages.
6386 	 *
6387 	 * Queueing large contiguous runs of pages for batching, however,
6388 	 * causes the pages to actually be freed in smaller chunks.  As there
6389 	 * can be a significant delay between the individual batches being
6390 	 * recycled, this leads to the once large chunks of space being
6391 	 * fragmented and becoming unavailable for high-order allocations.
6392 	 */
6393 	return 0;
6394 #endif
6395 }
6396 
6397 /*
6398  * pcp->high and pcp->batch values are related and dependent on one another:
6399  * ->batch must never be higher then ->high.
6400  * The following function updates them in a safe manner without read side
6401  * locking.
6402  *
6403  * Any new users of pcp->batch and pcp->high should ensure they can cope with
6404  * those fields changing asynchronously (acording to the above rule).
6405  *
6406  * mutex_is_locked(&pcp_batch_high_lock) required when calling this function
6407  * outside of boot time (or some other assurance that no concurrent updaters
6408  * exist).
6409  */
pageset_update(struct per_cpu_pages * pcp,unsigned long high,unsigned long batch)6410 static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
6411 		unsigned long batch)
6412 {
6413        /* start with a fail safe value for batch */
6414 	pcp->batch = 1;
6415 	smp_wmb();
6416 
6417        /* Update high, then batch, in order */
6418 	pcp->high = high;
6419 	smp_wmb();
6420 
6421 	pcp->batch = batch;
6422 }
6423 
6424 /* a companion to pageset_set_high() */
pageset_set_batch(struct per_cpu_pageset * p,unsigned long batch)6425 static void pageset_set_batch(struct per_cpu_pageset *p, unsigned long batch)
6426 {
6427 	pageset_update(&p->pcp, 6 * batch, max(1UL, 1 * batch));
6428 }
6429 
pageset_init(struct per_cpu_pageset * p)6430 static void pageset_init(struct per_cpu_pageset *p)
6431 {
6432 	struct per_cpu_pages *pcp;
6433 	int migratetype;
6434 
6435 	memset(p, 0, sizeof(*p));
6436 
6437 	pcp = &p->pcp;
6438 	for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
6439 		INIT_LIST_HEAD(&pcp->lists[migratetype]);
6440 }
6441 
setup_pageset(struct per_cpu_pageset * p,unsigned long batch)6442 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
6443 {
6444 	pageset_init(p);
6445 	pageset_set_batch(p, batch);
6446 }
6447 
6448 /*
6449  * pageset_set_high() sets the high water mark for hot per_cpu_pagelist
6450  * to the value high for the pageset p.
6451  */
pageset_set_high(struct per_cpu_pageset * p,unsigned long high)6452 static void pageset_set_high(struct per_cpu_pageset *p,
6453 				unsigned long high)
6454 {
6455 	unsigned long batch = max(1UL, high / 4);
6456 	if ((high / 4) > (PAGE_SHIFT * 8))
6457 		batch = PAGE_SHIFT * 8;
6458 
6459 	pageset_update(&p->pcp, high, batch);
6460 }
6461 
pageset_set_high_and_batch(struct zone * zone,struct per_cpu_pageset * pcp)6462 static void pageset_set_high_and_batch(struct zone *zone,
6463 				       struct per_cpu_pageset *pcp)
6464 {
6465 	if (percpu_pagelist_fraction)
6466 		pageset_set_high(pcp,
6467 			(zone_managed_pages(zone) /
6468 				percpu_pagelist_fraction));
6469 	else
6470 		pageset_set_batch(pcp, zone_batchsize(zone));
6471 }
6472 
zone_pageset_init(struct zone * zone,int cpu)6473 static void __meminit zone_pageset_init(struct zone *zone, int cpu)
6474 {
6475 	struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
6476 
6477 	pageset_init(pcp);
6478 	pageset_set_high_and_batch(zone, pcp);
6479 }
6480 
setup_zone_pageset(struct zone * zone)6481 void __meminit setup_zone_pageset(struct zone *zone)
6482 {
6483 	int cpu;
6484 	zone->pageset = alloc_percpu(struct per_cpu_pageset);
6485 	for_each_possible_cpu(cpu)
6486 		zone_pageset_init(zone, cpu);
6487 }
6488 
6489 /*
6490  * Allocate per cpu pagesets and initialize them.
6491  * Before this call only boot pagesets were available.
6492  */
setup_per_cpu_pageset(void)6493 void __init setup_per_cpu_pageset(void)
6494 {
6495 	struct pglist_data *pgdat;
6496 	struct zone *zone;
6497 	int __maybe_unused cpu;
6498 
6499 	for_each_populated_zone(zone)
6500 		setup_zone_pageset(zone);
6501 
6502 #ifdef CONFIG_NUMA
6503 	/*
6504 	 * Unpopulated zones continue using the boot pagesets.
6505 	 * The numa stats for these pagesets need to be reset.
6506 	 * Otherwise, they will end up skewing the stats of
6507 	 * the nodes these zones are associated with.
6508 	 */
6509 	for_each_possible_cpu(cpu) {
6510 		struct per_cpu_pageset *pcp = &per_cpu(boot_pageset, cpu);
6511 		memset(pcp->vm_numa_stat_diff, 0,
6512 		       sizeof(pcp->vm_numa_stat_diff));
6513 	}
6514 #endif
6515 
6516 	for_each_online_pgdat(pgdat)
6517 		pgdat->per_cpu_nodestats =
6518 			alloc_percpu(struct per_cpu_nodestat);
6519 }
6520 
zone_pcp_init(struct zone * zone)6521 static __meminit void zone_pcp_init(struct zone *zone)
6522 {
6523 	/*
6524 	 * per cpu subsystem is not up at this point. The following code
6525 	 * relies on the ability of the linker to provide the
6526 	 * offset of a (static) per cpu variable into the per cpu area.
6527 	 */
6528 	zone->pageset = &boot_pageset;
6529 
6530 	if (populated_zone(zone))
6531 		printk(KERN_DEBUG "  %s zone: %lu pages, LIFO batch:%u\n",
6532 			zone->name, zone->present_pages,
6533 					 zone_batchsize(zone));
6534 }
6535 
init_currently_empty_zone(struct zone * zone,unsigned long zone_start_pfn,unsigned long size)6536 void __meminit init_currently_empty_zone(struct zone *zone,
6537 					unsigned long zone_start_pfn,
6538 					unsigned long size)
6539 {
6540 	struct pglist_data *pgdat = zone->zone_pgdat;
6541 	int zone_idx = zone_idx(zone) + 1;
6542 
6543 	if (zone_idx > pgdat->nr_zones)
6544 		pgdat->nr_zones = zone_idx;
6545 
6546 	zone->zone_start_pfn = zone_start_pfn;
6547 
6548 	mminit_dprintk(MMINIT_TRACE, "memmap_init",
6549 			"Initialising map node %d zone %lu pfns %lu -> %lu\n",
6550 			pgdat->node_id,
6551 			(unsigned long)zone_idx(zone),
6552 			zone_start_pfn, (zone_start_pfn + size));
6553 
6554 	zone_init_free_lists(zone);
6555 	zone->initialized = 1;
6556 }
6557 
6558 /**
6559  * get_pfn_range_for_nid - Return the start and end page frames for a node
6560  * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
6561  * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
6562  * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
6563  *
6564  * It returns the start and end page frame of a node based on information
6565  * provided by memblock_set_node(). If called for a node
6566  * with no available memory, a warning is printed and the start and end
6567  * PFNs will be 0.
6568  */
get_pfn_range_for_nid(unsigned int nid,unsigned long * start_pfn,unsigned long * end_pfn)6569 void __init get_pfn_range_for_nid(unsigned int nid,
6570 			unsigned long *start_pfn, unsigned long *end_pfn)
6571 {
6572 	unsigned long this_start_pfn, this_end_pfn;
6573 	int i;
6574 
6575 	*start_pfn = -1UL;
6576 	*end_pfn = 0;
6577 
6578 	for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
6579 		*start_pfn = min(*start_pfn, this_start_pfn);
6580 		*end_pfn = max(*end_pfn, this_end_pfn);
6581 	}
6582 
6583 	if (*start_pfn == -1UL)
6584 		*start_pfn = 0;
6585 }
6586 
6587 /*
6588  * This finds a zone that can be used for ZONE_MOVABLE pages. The
6589  * assumption is made that zones within a node are ordered in monotonic
6590  * increasing memory addresses so that the "highest" populated zone is used
6591  */
find_usable_zone_for_movable(void)6592 static void __init find_usable_zone_for_movable(void)
6593 {
6594 	int zone_index;
6595 	for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
6596 		if (zone_index == ZONE_MOVABLE)
6597 			continue;
6598 
6599 		if (arch_zone_highest_possible_pfn[zone_index] >
6600 				arch_zone_lowest_possible_pfn[zone_index])
6601 			break;
6602 	}
6603 
6604 	VM_BUG_ON(zone_index == -1);
6605 	movable_zone = zone_index;
6606 }
6607 
6608 /*
6609  * The zone ranges provided by the architecture do not include ZONE_MOVABLE
6610  * because it is sized independent of architecture. Unlike the other zones,
6611  * the starting point for ZONE_MOVABLE is not fixed. It may be different
6612  * in each node depending on the size of each node and how evenly kernelcore
6613  * is distributed. This helper function adjusts the zone ranges
6614  * provided by the architecture for a given node by using the end of the
6615  * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
6616  * zones within a node are in order of monotonic increases memory addresses
6617  */
adjust_zone_range_for_zone_movable(int nid,unsigned long zone_type,unsigned long node_start_pfn,unsigned long node_end_pfn,unsigned long * zone_start_pfn,unsigned long * zone_end_pfn)6618 static void __init adjust_zone_range_for_zone_movable(int nid,
6619 					unsigned long zone_type,
6620 					unsigned long node_start_pfn,
6621 					unsigned long node_end_pfn,
6622 					unsigned long *zone_start_pfn,
6623 					unsigned long *zone_end_pfn)
6624 {
6625 	/* Only adjust if ZONE_MOVABLE is on this node */
6626 	if (zone_movable_pfn[nid]) {
6627 		/* Size ZONE_MOVABLE */
6628 		if (zone_type == ZONE_MOVABLE) {
6629 			*zone_start_pfn = zone_movable_pfn[nid];
6630 			*zone_end_pfn = min(node_end_pfn,
6631 				arch_zone_highest_possible_pfn[movable_zone]);
6632 
6633 		/* Adjust for ZONE_MOVABLE starting within this range */
6634 		} else if (!mirrored_kernelcore &&
6635 			*zone_start_pfn < zone_movable_pfn[nid] &&
6636 			*zone_end_pfn > zone_movable_pfn[nid]) {
6637 			*zone_end_pfn = zone_movable_pfn[nid];
6638 
6639 		/* Check if this whole range is within ZONE_MOVABLE */
6640 		} else if (*zone_start_pfn >= zone_movable_pfn[nid])
6641 			*zone_start_pfn = *zone_end_pfn;
6642 	}
6643 }
6644 
6645 /*
6646  * Return the number of pages a zone spans in a node, including holes
6647  * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
6648  */
zone_spanned_pages_in_node(int nid,unsigned long zone_type,unsigned long node_start_pfn,unsigned long node_end_pfn,unsigned long * zone_start_pfn,unsigned long * zone_end_pfn)6649 static unsigned long __init zone_spanned_pages_in_node(int nid,
6650 					unsigned long zone_type,
6651 					unsigned long node_start_pfn,
6652 					unsigned long node_end_pfn,
6653 					unsigned long *zone_start_pfn,
6654 					unsigned long *zone_end_pfn)
6655 {
6656 	unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
6657 	unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
6658 	/* When hotadd a new node from cpu_up(), the node should be empty */
6659 	if (!node_start_pfn && !node_end_pfn)
6660 		return 0;
6661 
6662 	/* Get the start and end of the zone */
6663 	*zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
6664 	*zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
6665 	adjust_zone_range_for_zone_movable(nid, zone_type,
6666 				node_start_pfn, node_end_pfn,
6667 				zone_start_pfn, zone_end_pfn);
6668 
6669 	/* Check that this node has pages within the zone's required range */
6670 	if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn)
6671 		return 0;
6672 
6673 	/* Move the zone boundaries inside the node if necessary */
6674 	*zone_end_pfn = min(*zone_end_pfn, node_end_pfn);
6675 	*zone_start_pfn = max(*zone_start_pfn, node_start_pfn);
6676 
6677 	/* Return the spanned pages */
6678 	return *zone_end_pfn - *zone_start_pfn;
6679 }
6680 
6681 /*
6682  * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
6683  * then all holes in the requested range will be accounted for.
6684  */
__absent_pages_in_range(int nid,unsigned long range_start_pfn,unsigned long range_end_pfn)6685 unsigned long __init __absent_pages_in_range(int nid,
6686 				unsigned long range_start_pfn,
6687 				unsigned long range_end_pfn)
6688 {
6689 	unsigned long nr_absent = range_end_pfn - range_start_pfn;
6690 	unsigned long start_pfn, end_pfn;
6691 	int i;
6692 
6693 	for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
6694 		start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
6695 		end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
6696 		nr_absent -= end_pfn - start_pfn;
6697 	}
6698 	return nr_absent;
6699 }
6700 
6701 /**
6702  * absent_pages_in_range - Return number of page frames in holes within a range
6703  * @start_pfn: The start PFN to start searching for holes
6704  * @end_pfn: The end PFN to stop searching for holes
6705  *
6706  * Return: the number of pages frames in memory holes within a range.
6707  */
absent_pages_in_range(unsigned long start_pfn,unsigned long end_pfn)6708 unsigned long __init absent_pages_in_range(unsigned long start_pfn,
6709 							unsigned long end_pfn)
6710 {
6711 	return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
6712 }
6713 
6714 /* Return the number of page frames in holes in a zone on a node */
zone_absent_pages_in_node(int nid,unsigned long zone_type,unsigned long node_start_pfn,unsigned long node_end_pfn)6715 static unsigned long __init zone_absent_pages_in_node(int nid,
6716 					unsigned long zone_type,
6717 					unsigned long node_start_pfn,
6718 					unsigned long node_end_pfn)
6719 {
6720 	unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
6721 	unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
6722 	unsigned long zone_start_pfn, zone_end_pfn;
6723 	unsigned long nr_absent;
6724 
6725 	/* When hotadd a new node from cpu_up(), the node should be empty */
6726 	if (!node_start_pfn && !node_end_pfn)
6727 		return 0;
6728 
6729 	zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
6730 	zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
6731 
6732 	adjust_zone_range_for_zone_movable(nid, zone_type,
6733 			node_start_pfn, node_end_pfn,
6734 			&zone_start_pfn, &zone_end_pfn);
6735 	nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
6736 
6737 	/*
6738 	 * ZONE_MOVABLE handling.
6739 	 * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages
6740 	 * and vice versa.
6741 	 */
6742 	if (mirrored_kernelcore && zone_movable_pfn[nid]) {
6743 		unsigned long start_pfn, end_pfn;
6744 		struct memblock_region *r;
6745 
6746 		for_each_mem_region(r) {
6747 			start_pfn = clamp(memblock_region_memory_base_pfn(r),
6748 					  zone_start_pfn, zone_end_pfn);
6749 			end_pfn = clamp(memblock_region_memory_end_pfn(r),
6750 					zone_start_pfn, zone_end_pfn);
6751 
6752 			if (zone_type == ZONE_MOVABLE &&
6753 			    memblock_is_mirror(r))
6754 				nr_absent += end_pfn - start_pfn;
6755 
6756 			if (zone_type == ZONE_NORMAL &&
6757 			    !memblock_is_mirror(r))
6758 				nr_absent += end_pfn - start_pfn;
6759 		}
6760 	}
6761 
6762 	return nr_absent;
6763 }
6764 
calculate_node_totalpages(struct pglist_data * pgdat,unsigned long node_start_pfn,unsigned long node_end_pfn)6765 static void __init calculate_node_totalpages(struct pglist_data *pgdat,
6766 						unsigned long node_start_pfn,
6767 						unsigned long node_end_pfn)
6768 {
6769 	unsigned long realtotalpages = 0, totalpages = 0;
6770 	enum zone_type i;
6771 
6772 	for (i = 0; i < MAX_NR_ZONES; i++) {
6773 		struct zone *zone = pgdat->node_zones + i;
6774 		unsigned long zone_start_pfn, zone_end_pfn;
6775 		unsigned long spanned, absent;
6776 		unsigned long size, real_size;
6777 
6778 		spanned = zone_spanned_pages_in_node(pgdat->node_id, i,
6779 						     node_start_pfn,
6780 						     node_end_pfn,
6781 						     &zone_start_pfn,
6782 						     &zone_end_pfn);
6783 		absent = zone_absent_pages_in_node(pgdat->node_id, i,
6784 						   node_start_pfn,
6785 						   node_end_pfn);
6786 
6787 		size = spanned;
6788 		real_size = size - absent;
6789 
6790 		if (size)
6791 			zone->zone_start_pfn = zone_start_pfn;
6792 		else
6793 			zone->zone_start_pfn = 0;
6794 		zone->spanned_pages = size;
6795 		zone->present_pages = real_size;
6796 
6797 		totalpages += size;
6798 		realtotalpages += real_size;
6799 	}
6800 
6801 	pgdat->node_spanned_pages = totalpages;
6802 	pgdat->node_present_pages = realtotalpages;
6803 	printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
6804 							realtotalpages);
6805 }
6806 
6807 #ifndef CONFIG_SPARSEMEM
6808 /*
6809  * Calculate the size of the zone->blockflags rounded to an unsigned long
6810  * Start by making sure zonesize is a multiple of pageblock_order by rounding
6811  * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
6812  * round what is now in bits to nearest long in bits, then return it in
6813  * bytes.
6814  */
usemap_size(unsigned long zone_start_pfn,unsigned long zonesize)6815 static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
6816 {
6817 	unsigned long usemapsize;
6818 
6819 	zonesize += zone_start_pfn & (pageblock_nr_pages-1);
6820 	usemapsize = roundup(zonesize, pageblock_nr_pages);
6821 	usemapsize = usemapsize >> pageblock_order;
6822 	usemapsize *= NR_PAGEBLOCK_BITS;
6823 	usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
6824 
6825 	return usemapsize / 8;
6826 }
6827 
setup_usemap(struct pglist_data * pgdat,struct zone * zone,unsigned long zone_start_pfn,unsigned long zonesize)6828 static void __ref setup_usemap(struct pglist_data *pgdat,
6829 				struct zone *zone,
6830 				unsigned long zone_start_pfn,
6831 				unsigned long zonesize)
6832 {
6833 	unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
6834 	zone->pageblock_flags = NULL;
6835 	if (usemapsize) {
6836 		zone->pageblock_flags =
6837 			memblock_alloc_node(usemapsize, SMP_CACHE_BYTES,
6838 					    pgdat->node_id);
6839 		if (!zone->pageblock_flags)
6840 			panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n",
6841 			      usemapsize, zone->name, pgdat->node_id);
6842 	}
6843 }
6844 #else
setup_usemap(struct pglist_data * pgdat,struct zone * zone,unsigned long zone_start_pfn,unsigned long zonesize)6845 static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
6846 				unsigned long zone_start_pfn, unsigned long zonesize) {}
6847 #endif /* CONFIG_SPARSEMEM */
6848 
6849 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
6850 
6851 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
set_pageblock_order(void)6852 void __init set_pageblock_order(void)
6853 {
6854 	unsigned int order;
6855 
6856 	/* Check that pageblock_nr_pages has not already been setup */
6857 	if (pageblock_order)
6858 		return;
6859 
6860 	if (HPAGE_SHIFT > PAGE_SHIFT)
6861 		order = HUGETLB_PAGE_ORDER;
6862 	else
6863 		order = MAX_ORDER - 1;
6864 
6865 	/*
6866 	 * Assume the largest contiguous order of interest is a huge page.
6867 	 * This value may be variable depending on boot parameters on IA64 and
6868 	 * powerpc.
6869 	 */
6870 	pageblock_order = order;
6871 }
6872 #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
6873 
6874 /*
6875  * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
6876  * is unused as pageblock_order is set at compile-time. See
6877  * include/linux/pageblock-flags.h for the values of pageblock_order based on
6878  * the kernel config
6879  */
set_pageblock_order(void)6880 void __init set_pageblock_order(void)
6881 {
6882 }
6883 
6884 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
6885 
calc_memmap_size(unsigned long spanned_pages,unsigned long present_pages)6886 static unsigned long __init calc_memmap_size(unsigned long spanned_pages,
6887 						unsigned long present_pages)
6888 {
6889 	unsigned long pages = spanned_pages;
6890 
6891 	/*
6892 	 * Provide a more accurate estimation if there are holes within
6893 	 * the zone and SPARSEMEM is in use. If there are holes within the
6894 	 * zone, each populated memory region may cost us one or two extra
6895 	 * memmap pages due to alignment because memmap pages for each
6896 	 * populated regions may not be naturally aligned on page boundary.
6897 	 * So the (present_pages >> 4) heuristic is a tradeoff for that.
6898 	 */
6899 	if (spanned_pages > present_pages + (present_pages >> 4) &&
6900 	    IS_ENABLED(CONFIG_SPARSEMEM))
6901 		pages = present_pages;
6902 
6903 	return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
6904 }
6905 
6906 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pgdat_init_split_queue(struct pglist_data * pgdat)6907 static void pgdat_init_split_queue(struct pglist_data *pgdat)
6908 {
6909 	struct deferred_split *ds_queue = &pgdat->deferred_split_queue;
6910 
6911 	spin_lock_init(&ds_queue->split_queue_lock);
6912 	INIT_LIST_HEAD(&ds_queue->split_queue);
6913 	ds_queue->split_queue_len = 0;
6914 }
6915 #else
pgdat_init_split_queue(struct pglist_data * pgdat)6916 static void pgdat_init_split_queue(struct pglist_data *pgdat) {}
6917 #endif
6918 
6919 #ifdef CONFIG_COMPACTION
pgdat_init_kcompactd(struct pglist_data * pgdat)6920 static void pgdat_init_kcompactd(struct pglist_data *pgdat)
6921 {
6922 	init_waitqueue_head(&pgdat->kcompactd_wait);
6923 }
6924 #else
pgdat_init_kcompactd(struct pglist_data * pgdat)6925 static void pgdat_init_kcompactd(struct pglist_data *pgdat) {}
6926 #endif
6927 
pgdat_init_internals(struct pglist_data * pgdat)6928 static void __meminit pgdat_init_internals(struct pglist_data *pgdat)
6929 {
6930 	pgdat_resize_init(pgdat);
6931 
6932 	pgdat_init_split_queue(pgdat);
6933 	pgdat_init_kcompactd(pgdat);
6934 
6935 	init_waitqueue_head(&pgdat->kswapd_wait);
6936 	init_waitqueue_head(&pgdat->pfmemalloc_wait);
6937 #ifdef CONFIG_HYPERHOLD_ZSWAPD
6938 	init_waitqueue_head(&pgdat->zswapd_wait);
6939 #endif
6940 
6941 	pgdat_page_ext_init(pgdat);
6942 	spin_lock_init(&pgdat->lru_lock);
6943 	lruvec_init(&pgdat->__lruvec);
6944 #if defined(CONFIG_HYPERHOLD_FILE_LRU) && defined(CONFIG_MEMCG)
6945 	pgdat->__lruvec.pgdat = pgdat;
6946 #endif
6947 }
6948 
zone_init_internals(struct zone * zone,enum zone_type idx,int nid,unsigned long remaining_pages)6949 static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid,
6950 							unsigned long remaining_pages)
6951 {
6952 	atomic_long_set(&zone->managed_pages, remaining_pages);
6953 	zone_set_nid(zone, nid);
6954 	zone->name = zone_names[idx];
6955 	zone->zone_pgdat = NODE_DATA(nid);
6956 	spin_lock_init(&zone->lock);
6957 	zone_seqlock_init(zone);
6958 	zone_pcp_init(zone);
6959 }
6960 
6961 /*
6962  * Set up the zone data structures
6963  * - init pgdat internals
6964  * - init all zones belonging to this node
6965  *
6966  * NOTE: this function is only called during memory hotplug
6967  */
6968 #ifdef CONFIG_MEMORY_HOTPLUG
free_area_init_core_hotplug(int nid)6969 void __ref free_area_init_core_hotplug(int nid)
6970 {
6971 	enum zone_type z;
6972 	pg_data_t *pgdat = NODE_DATA(nid);
6973 
6974 	pgdat_init_internals(pgdat);
6975 	for (z = 0; z < MAX_NR_ZONES; z++)
6976 		zone_init_internals(&pgdat->node_zones[z], z, nid, 0);
6977 }
6978 #endif
6979 
6980 /*
6981  * Set up the zone data structures:
6982  *   - mark all pages reserved
6983  *   - mark all memory queues empty
6984  *   - clear the memory bitmaps
6985  *
6986  * NOTE: pgdat should get zeroed by caller.
6987  * NOTE: this function is only called during early init.
6988  */
free_area_init_core(struct pglist_data * pgdat)6989 static void __init free_area_init_core(struct pglist_data *pgdat)
6990 {
6991 	enum zone_type j;
6992 	int nid = pgdat->node_id;
6993 
6994 	pgdat_init_internals(pgdat);
6995 	pgdat->per_cpu_nodestats = &boot_nodestats;
6996 
6997 	for (j = 0; j < MAX_NR_ZONES; j++) {
6998 		struct zone *zone = pgdat->node_zones + j;
6999 		unsigned long size, freesize, memmap_pages;
7000 		unsigned long zone_start_pfn = zone->zone_start_pfn;
7001 
7002 		size = zone->spanned_pages;
7003 		freesize = zone->present_pages;
7004 
7005 		/*
7006 		 * Adjust freesize so that it accounts for how much memory
7007 		 * is used by this zone for memmap. This affects the watermark
7008 		 * and per-cpu initialisations
7009 		 */
7010 		memmap_pages = calc_memmap_size(size, freesize);
7011 		if (!is_highmem_idx(j)) {
7012 			if (freesize >= memmap_pages) {
7013 				freesize -= memmap_pages;
7014 				if (memmap_pages)
7015 					printk(KERN_DEBUG
7016 					       "  %s zone: %lu pages used for memmap\n",
7017 					       zone_names[j], memmap_pages);
7018 			} else
7019 				pr_warn("  %s zone: %lu pages exceeds freesize %lu\n",
7020 					zone_names[j], memmap_pages, freesize);
7021 		}
7022 
7023 		/* Account for reserved pages */
7024 		if (j == 0 && freesize > dma_reserve) {
7025 			freesize -= dma_reserve;
7026 			printk(KERN_DEBUG "  %s zone: %lu pages reserved\n",
7027 					zone_names[0], dma_reserve);
7028 		}
7029 
7030 		if (!is_highmem_idx(j))
7031 			nr_kernel_pages += freesize;
7032 		/* Charge for highmem memmap if there are enough kernel pages */
7033 		else if (nr_kernel_pages > memmap_pages * 2)
7034 			nr_kernel_pages -= memmap_pages;
7035 		nr_all_pages += freesize;
7036 
7037 		/*
7038 		 * Set an approximate value for lowmem here, it will be adjusted
7039 		 * when the bootmem allocator frees pages into the buddy system.
7040 		 * And all highmem pages will be managed by the buddy system.
7041 		 */
7042 		zone_init_internals(zone, j, nid, freesize);
7043 
7044 		if (!size)
7045 			continue;
7046 
7047 		set_pageblock_order();
7048 		setup_usemap(pgdat, zone, zone_start_pfn, size);
7049 		init_currently_empty_zone(zone, zone_start_pfn, size);
7050 		arch_memmap_init(size, nid, j, zone_start_pfn);
7051 	}
7052 }
7053 
7054 #ifdef CONFIG_FLAT_NODE_MEM_MAP
alloc_node_mem_map(struct pglist_data * pgdat)7055 static void __ref alloc_node_mem_map(struct pglist_data *pgdat)
7056 {
7057 	unsigned long __maybe_unused start = 0;
7058 	unsigned long __maybe_unused offset = 0;
7059 
7060 	/* Skip empty nodes */
7061 	if (!pgdat->node_spanned_pages)
7062 		return;
7063 
7064 	start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
7065 	offset = pgdat->node_start_pfn - start;
7066 	/* ia64 gets its own node_mem_map, before this, without bootmem */
7067 	if (!pgdat->node_mem_map) {
7068 		unsigned long size, end;
7069 		struct page *map;
7070 
7071 		/*
7072 		 * The zone's endpoints aren't required to be MAX_ORDER
7073 		 * aligned but the node_mem_map endpoints must be in order
7074 		 * for the buddy allocator to function correctly.
7075 		 */
7076 		end = pgdat_end_pfn(pgdat);
7077 		end = ALIGN(end, MAX_ORDER_NR_PAGES);
7078 		size =  (end - start) * sizeof(struct page);
7079 		map = memblock_alloc_node(size, SMP_CACHE_BYTES,
7080 					  pgdat->node_id);
7081 		if (!map)
7082 			panic("Failed to allocate %ld bytes for node %d memory map\n",
7083 			      size, pgdat->node_id);
7084 		pgdat->node_mem_map = map + offset;
7085 	}
7086 	pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n",
7087 				__func__, pgdat->node_id, (unsigned long)pgdat,
7088 				(unsigned long)pgdat->node_mem_map);
7089 #ifndef CONFIG_NEED_MULTIPLE_NODES
7090 	/*
7091 	 * With no DISCONTIG, the global mem_map is just set as node 0's
7092 	 */
7093 	if (pgdat == NODE_DATA(0)) {
7094 		mem_map = NODE_DATA(0)->node_mem_map;
7095 		if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
7096 			mem_map -= offset;
7097 	}
7098 #endif
7099 }
7100 #else
alloc_node_mem_map(struct pglist_data * pgdat)7101 static void __ref alloc_node_mem_map(struct pglist_data *pgdat) { }
7102 #endif /* CONFIG_FLAT_NODE_MEM_MAP */
7103 
7104 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
pgdat_set_deferred_range(pg_data_t * pgdat)7105 static inline void pgdat_set_deferred_range(pg_data_t *pgdat)
7106 {
7107 	pgdat->first_deferred_pfn = ULONG_MAX;
7108 }
7109 #else
pgdat_set_deferred_range(pg_data_t * pgdat)7110 static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {}
7111 #endif
7112 
free_area_init_node(int nid)7113 static void __init free_area_init_node(int nid)
7114 {
7115 	pg_data_t *pgdat = NODE_DATA(nid);
7116 	unsigned long start_pfn = 0;
7117 	unsigned long end_pfn = 0;
7118 
7119 	/* pg_data_t should be reset to zero when it's allocated */
7120 	WARN_ON(pgdat->nr_zones || pgdat->kswapd_highest_zoneidx);
7121 
7122 	get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
7123 
7124 	pgdat->node_id = nid;
7125 	pgdat->node_start_pfn = start_pfn;
7126 	pgdat->per_cpu_nodestats = NULL;
7127 
7128 	pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
7129 		(u64)start_pfn << PAGE_SHIFT,
7130 		end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0);
7131 	calculate_node_totalpages(pgdat, start_pfn, end_pfn);
7132 
7133 	alloc_node_mem_map(pgdat);
7134 	pgdat_set_deferred_range(pgdat);
7135 
7136 	free_area_init_core(pgdat);
7137 }
7138 
free_area_init_memoryless_node(int nid)7139 void __init free_area_init_memoryless_node(int nid)
7140 {
7141 	free_area_init_node(nid);
7142 }
7143 
7144 #if MAX_NUMNODES > 1
7145 /*
7146  * Figure out the number of possible node ids.
7147  */
setup_nr_node_ids(void)7148 void __init setup_nr_node_ids(void)
7149 {
7150 	unsigned int highest;
7151 
7152 	highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES);
7153 	nr_node_ids = highest + 1;
7154 }
7155 #endif
7156 
7157 /**
7158  * node_map_pfn_alignment - determine the maximum internode alignment
7159  *
7160  * This function should be called after node map is populated and sorted.
7161  * It calculates the maximum power of two alignment which can distinguish
7162  * all the nodes.
7163  *
7164  * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
7165  * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)).  If the
7166  * nodes are shifted by 256MiB, 256MiB.  Note that if only the last node is
7167  * shifted, 1GiB is enough and this function will indicate so.
7168  *
7169  * This is used to test whether pfn -> nid mapping of the chosen memory
7170  * model has fine enough granularity to avoid incorrect mapping for the
7171  * populated node map.
7172  *
7173  * Return: the determined alignment in pfn's.  0 if there is no alignment
7174  * requirement (single node).
7175  */
node_map_pfn_alignment(void)7176 unsigned long __init node_map_pfn_alignment(void)
7177 {
7178 	unsigned long accl_mask = 0, last_end = 0;
7179 	unsigned long start, end, mask;
7180 	int last_nid = NUMA_NO_NODE;
7181 	int i, nid;
7182 
7183 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
7184 		if (!start || last_nid < 0 || last_nid == nid) {
7185 			last_nid = nid;
7186 			last_end = end;
7187 			continue;
7188 		}
7189 
7190 		/*
7191 		 * Start with a mask granular enough to pin-point to the
7192 		 * start pfn and tick off bits one-by-one until it becomes
7193 		 * too coarse to separate the current node from the last.
7194 		 */
7195 		mask = ~((1 << __ffs(start)) - 1);
7196 		while (mask && last_end <= (start & (mask << 1)))
7197 			mask <<= 1;
7198 
7199 		/* accumulate all internode masks */
7200 		accl_mask |= mask;
7201 	}
7202 
7203 	/* convert mask to number of pages */
7204 	return ~accl_mask + 1;
7205 }
7206 
7207 /**
7208  * find_min_pfn_with_active_regions - Find the minimum PFN registered
7209  *
7210  * Return: the minimum PFN based on information provided via
7211  * memblock_set_node().
7212  */
find_min_pfn_with_active_regions(void)7213 unsigned long __init find_min_pfn_with_active_regions(void)
7214 {
7215 	return PHYS_PFN(memblock_start_of_DRAM());
7216 }
7217 
7218 /*
7219  * early_calculate_totalpages()
7220  * Sum pages in active regions for movable zone.
7221  * Populate N_MEMORY for calculating usable_nodes.
7222  */
early_calculate_totalpages(void)7223 static unsigned long __init early_calculate_totalpages(void)
7224 {
7225 	unsigned long totalpages = 0;
7226 	unsigned long start_pfn, end_pfn;
7227 	int i, nid;
7228 
7229 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
7230 		unsigned long pages = end_pfn - start_pfn;
7231 
7232 		totalpages += pages;
7233 		if (pages)
7234 			node_set_state(nid, N_MEMORY);
7235 	}
7236 	return totalpages;
7237 }
7238 
7239 /*
7240  * Find the PFN the Movable zone begins in each node. Kernel memory
7241  * is spread evenly between nodes as long as the nodes have enough
7242  * memory. When they don't, some nodes will have more kernelcore than
7243  * others
7244  */
find_zone_movable_pfns_for_nodes(void)7245 static void __init find_zone_movable_pfns_for_nodes(void)
7246 {
7247 	int i, nid;
7248 	unsigned long usable_startpfn;
7249 	unsigned long kernelcore_node, kernelcore_remaining;
7250 	/* save the state before borrow the nodemask */
7251 	nodemask_t saved_node_state = node_states[N_MEMORY];
7252 	unsigned long totalpages = early_calculate_totalpages();
7253 	int usable_nodes = nodes_weight(node_states[N_MEMORY]);
7254 	struct memblock_region *r;
7255 
7256 	/* Need to find movable_zone earlier when movable_node is specified. */
7257 	find_usable_zone_for_movable();
7258 
7259 	/*
7260 	 * If movable_node is specified, ignore kernelcore and movablecore
7261 	 * options.
7262 	 */
7263 	if (movable_node_is_enabled()) {
7264 		for_each_mem_region(r) {
7265 			if (!memblock_is_hotpluggable(r))
7266 				continue;
7267 
7268 			nid = memblock_get_region_node(r);
7269 
7270 			usable_startpfn = PFN_DOWN(r->base);
7271 			zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
7272 				min(usable_startpfn, zone_movable_pfn[nid]) :
7273 				usable_startpfn;
7274 		}
7275 
7276 		goto out2;
7277 	}
7278 
7279 	/*
7280 	 * If kernelcore=mirror is specified, ignore movablecore option
7281 	 */
7282 	if (mirrored_kernelcore) {
7283 		bool mem_below_4gb_not_mirrored = false;
7284 
7285 		for_each_mem_region(r) {
7286 			if (memblock_is_mirror(r))
7287 				continue;
7288 
7289 			nid = memblock_get_region_node(r);
7290 
7291 			usable_startpfn = memblock_region_memory_base_pfn(r);
7292 
7293 			if (usable_startpfn < 0x100000) {
7294 				mem_below_4gb_not_mirrored = true;
7295 				continue;
7296 			}
7297 
7298 			zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
7299 				min(usable_startpfn, zone_movable_pfn[nid]) :
7300 				usable_startpfn;
7301 		}
7302 
7303 		if (mem_below_4gb_not_mirrored)
7304 			pr_warn("This configuration results in unmirrored kernel memory.\n");
7305 
7306 		goto out2;
7307 	}
7308 
7309 	/*
7310 	 * If kernelcore=nn% or movablecore=nn% was specified, calculate the
7311 	 * amount of necessary memory.
7312 	 */
7313 	if (required_kernelcore_percent)
7314 		required_kernelcore = (totalpages * 100 * required_kernelcore_percent) /
7315 				       10000UL;
7316 	if (required_movablecore_percent)
7317 		required_movablecore = (totalpages * 100 * required_movablecore_percent) /
7318 					10000UL;
7319 
7320 	/*
7321 	 * If movablecore= was specified, calculate what size of
7322 	 * kernelcore that corresponds so that memory usable for
7323 	 * any allocation type is evenly spread. If both kernelcore
7324 	 * and movablecore are specified, then the value of kernelcore
7325 	 * will be used for required_kernelcore if it's greater than
7326 	 * what movablecore would have allowed.
7327 	 */
7328 	if (required_movablecore) {
7329 		unsigned long corepages;
7330 
7331 		/*
7332 		 * Round-up so that ZONE_MOVABLE is at least as large as what
7333 		 * was requested by the user
7334 		 */
7335 		required_movablecore =
7336 			roundup(required_movablecore, MAX_ORDER_NR_PAGES);
7337 		required_movablecore = min(totalpages, required_movablecore);
7338 		corepages = totalpages - required_movablecore;
7339 
7340 		required_kernelcore = max(required_kernelcore, corepages);
7341 	}
7342 
7343 	/*
7344 	 * If kernelcore was not specified or kernelcore size is larger
7345 	 * than totalpages, there is no ZONE_MOVABLE.
7346 	 */
7347 	if (!required_kernelcore || required_kernelcore >= totalpages)
7348 		goto out;
7349 
7350 	/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
7351 	usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
7352 
7353 restart:
7354 	/* Spread kernelcore memory as evenly as possible throughout nodes */
7355 	kernelcore_node = required_kernelcore / usable_nodes;
7356 	for_each_node_state(nid, N_MEMORY) {
7357 		unsigned long start_pfn, end_pfn;
7358 
7359 		/*
7360 		 * Recalculate kernelcore_node if the division per node
7361 		 * now exceeds what is necessary to satisfy the requested
7362 		 * amount of memory for the kernel
7363 		 */
7364 		if (required_kernelcore < kernelcore_node)
7365 			kernelcore_node = required_kernelcore / usable_nodes;
7366 
7367 		/*
7368 		 * As the map is walked, we track how much memory is usable
7369 		 * by the kernel using kernelcore_remaining. When it is
7370 		 * 0, the rest of the node is usable by ZONE_MOVABLE
7371 		 */
7372 		kernelcore_remaining = kernelcore_node;
7373 
7374 		/* Go through each range of PFNs within this node */
7375 		for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
7376 			unsigned long size_pages;
7377 
7378 			start_pfn = max(start_pfn, zone_movable_pfn[nid]);
7379 			if (start_pfn >= end_pfn)
7380 				continue;
7381 
7382 			/* Account for what is only usable for kernelcore */
7383 			if (start_pfn < usable_startpfn) {
7384 				unsigned long kernel_pages;
7385 				kernel_pages = min(end_pfn, usable_startpfn)
7386 								- start_pfn;
7387 
7388 				kernelcore_remaining -= min(kernel_pages,
7389 							kernelcore_remaining);
7390 				required_kernelcore -= min(kernel_pages,
7391 							required_kernelcore);
7392 
7393 				/* Continue if range is now fully accounted */
7394 				if (end_pfn <= usable_startpfn) {
7395 
7396 					/*
7397 					 * Push zone_movable_pfn to the end so
7398 					 * that if we have to rebalance
7399 					 * kernelcore across nodes, we will
7400 					 * not double account here
7401 					 */
7402 					zone_movable_pfn[nid] = end_pfn;
7403 					continue;
7404 				}
7405 				start_pfn = usable_startpfn;
7406 			}
7407 
7408 			/*
7409 			 * The usable PFN range for ZONE_MOVABLE is from
7410 			 * start_pfn->end_pfn. Calculate size_pages as the
7411 			 * number of pages used as kernelcore
7412 			 */
7413 			size_pages = end_pfn - start_pfn;
7414 			if (size_pages > kernelcore_remaining)
7415 				size_pages = kernelcore_remaining;
7416 			zone_movable_pfn[nid] = start_pfn + size_pages;
7417 
7418 			/*
7419 			 * Some kernelcore has been met, update counts and
7420 			 * break if the kernelcore for this node has been
7421 			 * satisfied
7422 			 */
7423 			required_kernelcore -= min(required_kernelcore,
7424 								size_pages);
7425 			kernelcore_remaining -= size_pages;
7426 			if (!kernelcore_remaining)
7427 				break;
7428 		}
7429 	}
7430 
7431 	/*
7432 	 * If there is still required_kernelcore, we do another pass with one
7433 	 * less node in the count. This will push zone_movable_pfn[nid] further
7434 	 * along on the nodes that still have memory until kernelcore is
7435 	 * satisfied
7436 	 */
7437 	usable_nodes--;
7438 	if (usable_nodes && required_kernelcore > usable_nodes)
7439 		goto restart;
7440 
7441 out2:
7442 	/* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
7443 	for (nid = 0; nid < MAX_NUMNODES; nid++)
7444 		zone_movable_pfn[nid] =
7445 			roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
7446 
7447 out:
7448 	/* restore the node_state */
7449 	node_states[N_MEMORY] = saved_node_state;
7450 }
7451 
7452 /* Any regular or high memory on that node ? */
check_for_memory(pg_data_t * pgdat,int nid)7453 static void check_for_memory(pg_data_t *pgdat, int nid)
7454 {
7455 	enum zone_type zone_type;
7456 
7457 	for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
7458 		struct zone *zone = &pgdat->node_zones[zone_type];
7459 		if (populated_zone(zone)) {
7460 			if (IS_ENABLED(CONFIG_HIGHMEM))
7461 				node_set_state(nid, N_HIGH_MEMORY);
7462 			if (zone_type <= ZONE_NORMAL)
7463 				node_set_state(nid, N_NORMAL_MEMORY);
7464 			break;
7465 		}
7466 	}
7467 }
7468 
7469 /*
7470  * Some architecturs, e.g. ARC may have ZONE_HIGHMEM below ZONE_NORMAL. For
7471  * such cases we allow max_zone_pfn sorted in the descending order
7472  */
arch_has_descending_max_zone_pfns(void)7473 bool __weak arch_has_descending_max_zone_pfns(void)
7474 {
7475 	return false;
7476 }
7477 
7478 /**
7479  * free_area_init - Initialise all pg_data_t and zone data
7480  * @max_zone_pfn: an array of max PFNs for each zone
7481  *
7482  * This will call free_area_init_node() for each active node in the system.
7483  * Using the page ranges provided by memblock_set_node(), the size of each
7484  * zone in each node and their holes is calculated. If the maximum PFN
7485  * between two adjacent zones match, it is assumed that the zone is empty.
7486  * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
7487  * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
7488  * starts where the previous one ended. For example, ZONE_DMA32 starts
7489  * at arch_max_dma_pfn.
7490  */
free_area_init(unsigned long * max_zone_pfn)7491 void __init free_area_init(unsigned long *max_zone_pfn)
7492 {
7493 	unsigned long start_pfn, end_pfn;
7494 	int i, nid, zone;
7495 	bool descending;
7496 
7497 	/* Record where the zone boundaries are */
7498 	memset(arch_zone_lowest_possible_pfn, 0,
7499 				sizeof(arch_zone_lowest_possible_pfn));
7500 	memset(arch_zone_highest_possible_pfn, 0,
7501 				sizeof(arch_zone_highest_possible_pfn));
7502 
7503 	start_pfn = find_min_pfn_with_active_regions();
7504 	descending = arch_has_descending_max_zone_pfns();
7505 
7506 	for (i = 0; i < MAX_NR_ZONES; i++) {
7507 		if (descending)
7508 			zone = MAX_NR_ZONES - i - 1;
7509 		else
7510 			zone = i;
7511 
7512 		if (zone == ZONE_MOVABLE)
7513 			continue;
7514 
7515 		end_pfn = max(max_zone_pfn[zone], start_pfn);
7516 		arch_zone_lowest_possible_pfn[zone] = start_pfn;
7517 		arch_zone_highest_possible_pfn[zone] = end_pfn;
7518 
7519 		start_pfn = end_pfn;
7520 	}
7521 
7522 	/* Find the PFNs that ZONE_MOVABLE begins at in each node */
7523 	memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
7524 	find_zone_movable_pfns_for_nodes();
7525 
7526 	/* Print out the zone ranges */
7527 	pr_info("Zone ranges:\n");
7528 	for (i = 0; i < MAX_NR_ZONES; i++) {
7529 		if (i == ZONE_MOVABLE)
7530 			continue;
7531 		pr_info("  %-8s ", zone_names[i]);
7532 		if (arch_zone_lowest_possible_pfn[i] ==
7533 				arch_zone_highest_possible_pfn[i])
7534 			pr_cont("empty\n");
7535 		else
7536 			pr_cont("[mem %#018Lx-%#018Lx]\n",
7537 				(u64)arch_zone_lowest_possible_pfn[i]
7538 					<< PAGE_SHIFT,
7539 				((u64)arch_zone_highest_possible_pfn[i]
7540 					<< PAGE_SHIFT) - 1);
7541 	}
7542 
7543 	/* Print out the PFNs ZONE_MOVABLE begins at in each node */
7544 	pr_info("Movable zone start for each node\n");
7545 	for (i = 0; i < MAX_NUMNODES; i++) {
7546 		if (zone_movable_pfn[i])
7547 			pr_info("  Node %d: %#018Lx\n", i,
7548 			       (u64)zone_movable_pfn[i] << PAGE_SHIFT);
7549 	}
7550 
7551 	/*
7552 	 * Print out the early node map, and initialize the
7553 	 * subsection-map relative to active online memory ranges to
7554 	 * enable future "sub-section" extensions of the memory map.
7555 	 */
7556 	pr_info("Early memory node ranges\n");
7557 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
7558 		pr_info("  node %3d: [mem %#018Lx-%#018Lx]\n", nid,
7559 			(u64)start_pfn << PAGE_SHIFT,
7560 			((u64)end_pfn << PAGE_SHIFT) - 1);
7561 		subsection_map_init(start_pfn, end_pfn - start_pfn);
7562 	}
7563 
7564 	/* Initialise every node */
7565 	mminit_verify_pageflags_layout();
7566 	setup_nr_node_ids();
7567 	for_each_online_node(nid) {
7568 		pg_data_t *pgdat = NODE_DATA(nid);
7569 		free_area_init_node(nid);
7570 
7571 		/* Any memory on that node */
7572 		if (pgdat->node_present_pages)
7573 			node_set_state(nid, N_MEMORY);
7574 		check_for_memory(pgdat, nid);
7575 	}
7576 
7577 	memmap_init();
7578 }
7579 
cmdline_parse_core(char * p,unsigned long * core,unsigned long * percent)7580 static int __init cmdline_parse_core(char *p, unsigned long *core,
7581 				     unsigned long *percent)
7582 {
7583 	unsigned long long coremem;
7584 	char *endptr;
7585 
7586 	if (!p)
7587 		return -EINVAL;
7588 
7589 	/* Value may be a percentage of total memory, otherwise bytes */
7590 	coremem = simple_strtoull(p, &endptr, 0);
7591 	if (*endptr == '%') {
7592 		/* Paranoid check for percent values greater than 100 */
7593 		WARN_ON(coremem > 100);
7594 
7595 		*percent = coremem;
7596 	} else {
7597 		coremem = memparse(p, &p);
7598 		/* Paranoid check that UL is enough for the coremem value */
7599 		WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
7600 
7601 		*core = coremem >> PAGE_SHIFT;
7602 		*percent = 0UL;
7603 	}
7604 	return 0;
7605 }
7606 
7607 /*
7608  * kernelcore=size sets the amount of memory for use for allocations that
7609  * cannot be reclaimed or migrated.
7610  */
cmdline_parse_kernelcore(char * p)7611 static int __init cmdline_parse_kernelcore(char *p)
7612 {
7613 	/* parse kernelcore=mirror */
7614 	if (parse_option_str(p, "mirror")) {
7615 		mirrored_kernelcore = true;
7616 		return 0;
7617 	}
7618 
7619 	return cmdline_parse_core(p, &required_kernelcore,
7620 				  &required_kernelcore_percent);
7621 }
7622 
7623 /*
7624  * movablecore=size sets the amount of memory for use for allocations that
7625  * can be reclaimed or migrated.
7626  */
cmdline_parse_movablecore(char * p)7627 static int __init cmdline_parse_movablecore(char *p)
7628 {
7629 	return cmdline_parse_core(p, &required_movablecore,
7630 				  &required_movablecore_percent);
7631 }
7632 
7633 early_param("kernelcore", cmdline_parse_kernelcore);
7634 early_param("movablecore", cmdline_parse_movablecore);
7635 
adjust_managed_page_count(struct page * page,long count)7636 void adjust_managed_page_count(struct page *page, long count)
7637 {
7638 	atomic_long_add(count, &page_zone(page)->managed_pages);
7639 	totalram_pages_add(count);
7640 #ifdef CONFIG_HIGHMEM
7641 	if (PageHighMem(page))
7642 		totalhigh_pages_add(count);
7643 #endif
7644 }
7645 EXPORT_SYMBOL(adjust_managed_page_count);
7646 
free_reserved_area(void * start,void * end,int poison,const char * s)7647 unsigned long free_reserved_area(void *start, void *end, int poison, const char *s)
7648 {
7649 	void *pos;
7650 	unsigned long pages = 0;
7651 
7652 	start = (void *)PAGE_ALIGN((unsigned long)start);
7653 	end = (void *)((unsigned long)end & PAGE_MASK);
7654 	for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
7655 		struct page *page = virt_to_page(pos);
7656 		void *direct_map_addr;
7657 
7658 		/*
7659 		 * 'direct_map_addr' might be different from 'pos'
7660 		 * because some architectures' virt_to_page()
7661 		 * work with aliases.  Getting the direct map
7662 		 * address ensures that we get a _writeable_
7663 		 * alias for the memset().
7664 		 */
7665 		direct_map_addr = page_address(page);
7666 		if ((unsigned int)poison <= 0xFF)
7667 			memset(direct_map_addr, poison, PAGE_SIZE);
7668 
7669 		free_reserved_page(page);
7670 	}
7671 
7672 	if (pages && s)
7673 		pr_info("Freeing %s memory: %ldK\n",
7674 			s, pages << (PAGE_SHIFT - 10));
7675 
7676 	return pages;
7677 }
7678 
7679 #ifdef	CONFIG_HIGHMEM
free_highmem_page(struct page * page)7680 void free_highmem_page(struct page *page)
7681 {
7682 	__free_reserved_page(page);
7683 	totalram_pages_inc();
7684 	atomic_long_inc(&page_zone(page)->managed_pages);
7685 	totalhigh_pages_inc();
7686 }
7687 #endif
7688 
7689 
mem_init_print_info(const char * str)7690 void __init mem_init_print_info(const char *str)
7691 {
7692 	unsigned long physpages, codesize, datasize, rosize, bss_size;
7693 	unsigned long init_code_size, init_data_size;
7694 
7695 	physpages = get_num_physpages();
7696 	codesize = _etext - _stext;
7697 	datasize = _edata - _sdata;
7698 	rosize = __end_rodata - __start_rodata;
7699 	bss_size = __bss_stop - __bss_start;
7700 	init_data_size = __init_end - __init_begin;
7701 	init_code_size = _einittext - _sinittext;
7702 
7703 	/*
7704 	 * Detect special cases and adjust section sizes accordingly:
7705 	 * 1) .init.* may be embedded into .data sections
7706 	 * 2) .init.text.* may be out of [__init_begin, __init_end],
7707 	 *    please refer to arch/tile/kernel/vmlinux.lds.S.
7708 	 * 3) .rodata.* may be embedded into .text or .data sections.
7709 	 */
7710 #define adj_init_size(start, end, size, pos, adj) \
7711 	do { \
7712 		if (start <= pos && pos < end && size > adj) \
7713 			size -= adj; \
7714 	} while (0)
7715 
7716 	adj_init_size(__init_begin, __init_end, init_data_size,
7717 		     _sinittext, init_code_size);
7718 	adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size);
7719 	adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size);
7720 	adj_init_size(_stext, _etext, codesize, __start_rodata, rosize);
7721 	adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize);
7722 
7723 #undef	adj_init_size
7724 
7725 	pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved"
7726 #ifdef	CONFIG_HIGHMEM
7727 		", %luK highmem"
7728 #endif
7729 		"%s%s)\n",
7730 		nr_free_pages() << (PAGE_SHIFT - 10),
7731 		physpages << (PAGE_SHIFT - 10),
7732 		codesize >> 10, datasize >> 10, rosize >> 10,
7733 		(init_data_size + init_code_size) >> 10, bss_size >> 10,
7734 		(physpages - totalram_pages() - totalcma_pages) << (PAGE_SHIFT - 10),
7735 		totalcma_pages << (PAGE_SHIFT - 10),
7736 #ifdef	CONFIG_HIGHMEM
7737 		totalhigh_pages() << (PAGE_SHIFT - 10),
7738 #endif
7739 		str ? ", " : "", str ? str : "");
7740 }
7741 
7742 /**
7743  * set_dma_reserve - set the specified number of pages reserved in the first zone
7744  * @new_dma_reserve: The number of pages to mark reserved
7745  *
7746  * The per-cpu batchsize and zone watermarks are determined by managed_pages.
7747  * In the DMA zone, a significant percentage may be consumed by kernel image
7748  * and other unfreeable allocations which can skew the watermarks badly. This
7749  * function may optionally be used to account for unfreeable pages in the
7750  * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
7751  * smaller per-cpu batchsize.
7752  */
set_dma_reserve(unsigned long new_dma_reserve)7753 void __init set_dma_reserve(unsigned long new_dma_reserve)
7754 {
7755 	dma_reserve = new_dma_reserve;
7756 }
7757 
page_alloc_cpu_dead(unsigned int cpu)7758 static int page_alloc_cpu_dead(unsigned int cpu)
7759 {
7760 
7761 	lru_add_drain_cpu(cpu);
7762 	drain_pages(cpu);
7763 
7764 	/*
7765 	 * Spill the event counters of the dead processor
7766 	 * into the current processors event counters.
7767 	 * This artificially elevates the count of the current
7768 	 * processor.
7769 	 */
7770 	vm_events_fold_cpu(cpu);
7771 
7772 	/*
7773 	 * Zero the differential counters of the dead processor
7774 	 * so that the vm statistics are consistent.
7775 	 *
7776 	 * This is only okay since the processor is dead and cannot
7777 	 * race with what we are doing.
7778 	 */
7779 	cpu_vm_stats_fold(cpu);
7780 	return 0;
7781 }
7782 
7783 #ifdef CONFIG_NUMA
7784 int hashdist = HASHDIST_DEFAULT;
7785 
set_hashdist(char * str)7786 static int __init set_hashdist(char *str)
7787 {
7788 	if (!str)
7789 		return 0;
7790 	hashdist = simple_strtoul(str, &str, 0);
7791 	return 1;
7792 }
7793 __setup("hashdist=", set_hashdist);
7794 #endif
7795 
page_alloc_init(void)7796 void __init page_alloc_init(void)
7797 {
7798 	int ret;
7799 
7800 #ifdef CONFIG_NUMA
7801 	if (num_node_state(N_MEMORY) == 1)
7802 		hashdist = 0;
7803 #endif
7804 
7805 	ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC_DEAD,
7806 					"mm/page_alloc:dead", NULL,
7807 					page_alloc_cpu_dead);
7808 	WARN_ON(ret < 0);
7809 }
7810 
7811 /*
7812  * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio
7813  *	or min_free_kbytes changes.
7814  */
calculate_totalreserve_pages(void)7815 static void calculate_totalreserve_pages(void)
7816 {
7817 	struct pglist_data *pgdat;
7818 	unsigned long reserve_pages = 0;
7819 	enum zone_type i, j;
7820 
7821 	for_each_online_pgdat(pgdat) {
7822 
7823 		pgdat->totalreserve_pages = 0;
7824 
7825 		for (i = 0; i < MAX_NR_ZONES; i++) {
7826 			struct zone *zone = pgdat->node_zones + i;
7827 			long max = 0;
7828 			unsigned long managed_pages = zone_managed_pages(zone);
7829 
7830 			/* Find valid and maximum lowmem_reserve in the zone */
7831 			for (j = i; j < MAX_NR_ZONES; j++) {
7832 				if (zone->lowmem_reserve[j] > max)
7833 					max = zone->lowmem_reserve[j];
7834 			}
7835 
7836 			/* we treat the high watermark as reserved pages. */
7837 			max += high_wmark_pages(zone);
7838 
7839 			if (max > managed_pages)
7840 				max = managed_pages;
7841 
7842 			pgdat->totalreserve_pages += max;
7843 
7844 			reserve_pages += max;
7845 		}
7846 	}
7847 	totalreserve_pages = reserve_pages;
7848 }
7849 
7850 /*
7851  * setup_per_zone_lowmem_reserve - called whenever
7852  *	sysctl_lowmem_reserve_ratio changes.  Ensures that each zone
7853  *	has a correct pages reserved value, so an adequate number of
7854  *	pages are left in the zone after a successful __alloc_pages().
7855  */
setup_per_zone_lowmem_reserve(void)7856 static void setup_per_zone_lowmem_reserve(void)
7857 {
7858 	struct pglist_data *pgdat;
7859 	enum zone_type i, j;
7860 
7861 	for_each_online_pgdat(pgdat) {
7862 		for (i = 0; i < MAX_NR_ZONES - 1; i++) {
7863 			struct zone *zone = &pgdat->node_zones[i];
7864 			int ratio = sysctl_lowmem_reserve_ratio[i];
7865 			bool clear = !ratio || !zone_managed_pages(zone);
7866 			unsigned long managed_pages = 0;
7867 
7868 			for (j = i + 1; j < MAX_NR_ZONES; j++) {
7869 				struct zone *upper_zone = &pgdat->node_zones[j];
7870 
7871 				managed_pages += zone_managed_pages(upper_zone);
7872 
7873 				if (clear)
7874 					zone->lowmem_reserve[j] = 0;
7875 				else
7876 					zone->lowmem_reserve[j] = managed_pages / ratio;
7877 			}
7878 		}
7879 	}
7880 
7881 	/* update totalreserve_pages */
7882 	calculate_totalreserve_pages();
7883 }
7884 
__setup_per_zone_wmarks(void)7885 static void __setup_per_zone_wmarks(void)
7886 {
7887 	unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
7888 	unsigned long lowmem_pages = 0;
7889 	struct zone *zone;
7890 	unsigned long flags;
7891 
7892 	/* Calculate total number of !ZONE_HIGHMEM pages */
7893 	for_each_zone(zone) {
7894 		if (!is_highmem(zone))
7895 			lowmem_pages += zone_managed_pages(zone);
7896 	}
7897 
7898 	for_each_zone(zone) {
7899 		u64 tmp;
7900 
7901 		spin_lock_irqsave(&zone->lock, flags);
7902 		tmp = (u64)pages_min * zone_managed_pages(zone);
7903 		do_div(tmp, lowmem_pages);
7904 		if (is_highmem(zone)) {
7905 			/*
7906 			 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
7907 			 * need highmem pages, so cap pages_min to a small
7908 			 * value here.
7909 			 *
7910 			 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
7911 			 * deltas control async page reclaim, and so should
7912 			 * not be capped for highmem.
7913 			 */
7914 			unsigned long min_pages;
7915 
7916 			min_pages = zone_managed_pages(zone) / 1024;
7917 			min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
7918 			zone->_watermark[WMARK_MIN] = min_pages;
7919 		} else {
7920 			/*
7921 			 * If it's a lowmem zone, reserve a number of pages
7922 			 * proportionate to the zone's size.
7923 			 */
7924 			zone->_watermark[WMARK_MIN] = tmp;
7925 		}
7926 
7927 		/*
7928 		 * Set the kswapd watermarks distance according to the
7929 		 * scale factor in proportion to available memory, but
7930 		 * ensure a minimum size on small systems.
7931 		 */
7932 		tmp = max_t(u64, tmp >> 2,
7933 			    mult_frac(zone_managed_pages(zone),
7934 				      watermark_scale_factor, 10000));
7935 
7936 		zone->watermark_boost = 0;
7937 		zone->_watermark[WMARK_LOW]  = min_wmark_pages(zone) + tmp;
7938 		zone->_watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2;
7939 
7940 		spin_unlock_irqrestore(&zone->lock, flags);
7941 	}
7942 
7943 	/* update totalreserve_pages */
7944 	calculate_totalreserve_pages();
7945 }
7946 
7947 /**
7948  * setup_per_zone_wmarks - called when min_free_kbytes changes
7949  * or when memory is hot-{added|removed}
7950  *
7951  * Ensures that the watermark[min,low,high] values for each zone are set
7952  * correctly with respect to min_free_kbytes.
7953  */
setup_per_zone_wmarks(void)7954 void setup_per_zone_wmarks(void)
7955 {
7956 	static DEFINE_SPINLOCK(lock);
7957 
7958 	spin_lock(&lock);
7959 	__setup_per_zone_wmarks();
7960 	spin_unlock(&lock);
7961 }
7962 
7963 /*
7964  * Initialise min_free_kbytes.
7965  *
7966  * For small machines we want it small (128k min).  For large machines
7967  * we want it large (256MB max).  But it is not linear, because network
7968  * bandwidth does not increase linearly with machine size.  We use
7969  *
7970  *	min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
7971  *	min_free_kbytes = sqrt(lowmem_kbytes * 16)
7972  *
7973  * which yields
7974  *
7975  * 16MB:	512k
7976  * 32MB:	724k
7977  * 64MB:	1024k
7978  * 128MB:	1448k
7979  * 256MB:	2048k
7980  * 512MB:	2896k
7981  * 1024MB:	4096k
7982  * 2048MB:	5792k
7983  * 4096MB:	8192k
7984  * 8192MB:	11584k
7985  * 16384MB:	16384k
7986  */
init_per_zone_wmark_min(void)7987 int __meminit init_per_zone_wmark_min(void)
7988 {
7989 	unsigned long lowmem_kbytes;
7990 	int new_min_free_kbytes;
7991 
7992 	lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
7993 	new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
7994 
7995 	if (new_min_free_kbytes > user_min_free_kbytes) {
7996 		min_free_kbytes = new_min_free_kbytes;
7997 		if (min_free_kbytes < 128)
7998 			min_free_kbytes = 128;
7999 		if (min_free_kbytes > 262144)
8000 			min_free_kbytes = 262144;
8001 	} else {
8002 		pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
8003 				new_min_free_kbytes, user_min_free_kbytes);
8004 	}
8005 	setup_per_zone_wmarks();
8006 	refresh_zone_stat_thresholds();
8007 	setup_per_zone_lowmem_reserve();
8008 
8009 #ifdef CONFIG_NUMA
8010 	setup_min_unmapped_ratio();
8011 	setup_min_slab_ratio();
8012 #endif
8013 
8014 	khugepaged_min_free_kbytes_update();
8015 
8016 	return 0;
8017 }
postcore_initcall(init_per_zone_wmark_min)8018 postcore_initcall(init_per_zone_wmark_min)
8019 
8020 /*
8021  * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
8022  *	that we can call two helper functions whenever min_free_kbytes
8023  *	changes.
8024  */
8025 int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
8026 		void *buffer, size_t *length, loff_t *ppos)
8027 {
8028 	int rc;
8029 
8030 	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
8031 	if (rc)
8032 		return rc;
8033 
8034 	if (write) {
8035 		user_min_free_kbytes = min_free_kbytes;
8036 		setup_per_zone_wmarks();
8037 	}
8038 	return 0;
8039 }
8040 
watermark_scale_factor_sysctl_handler(struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)8041 int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write,
8042 		void *buffer, size_t *length, loff_t *ppos)
8043 {
8044 	int rc;
8045 
8046 	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
8047 	if (rc)
8048 		return rc;
8049 
8050 	if (write)
8051 		setup_per_zone_wmarks();
8052 
8053 	return 0;
8054 }
8055 
8056 #ifdef CONFIG_NUMA
setup_min_unmapped_ratio(void)8057 static void setup_min_unmapped_ratio(void)
8058 {
8059 	pg_data_t *pgdat;
8060 	struct zone *zone;
8061 
8062 	for_each_online_pgdat(pgdat)
8063 		pgdat->min_unmapped_pages = 0;
8064 
8065 	for_each_zone(zone)
8066 		zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) *
8067 						         sysctl_min_unmapped_ratio) / 100;
8068 }
8069 
8070 
sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)8071 int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
8072 		void *buffer, size_t *length, loff_t *ppos)
8073 {
8074 	int rc;
8075 
8076 	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
8077 	if (rc)
8078 		return rc;
8079 
8080 	setup_min_unmapped_ratio();
8081 
8082 	return 0;
8083 }
8084 
setup_min_slab_ratio(void)8085 static void setup_min_slab_ratio(void)
8086 {
8087 	pg_data_t *pgdat;
8088 	struct zone *zone;
8089 
8090 	for_each_online_pgdat(pgdat)
8091 		pgdat->min_slab_pages = 0;
8092 
8093 	for_each_zone(zone)
8094 		zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) *
8095 						     sysctl_min_slab_ratio) / 100;
8096 }
8097 
sysctl_min_slab_ratio_sysctl_handler(struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)8098 int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
8099 		void *buffer, size_t *length, loff_t *ppos)
8100 {
8101 	int rc;
8102 
8103 	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
8104 	if (rc)
8105 		return rc;
8106 
8107 	setup_min_slab_ratio();
8108 
8109 	return 0;
8110 }
8111 #endif
8112 
8113 /*
8114  * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
8115  *	proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
8116  *	whenever sysctl_lowmem_reserve_ratio changes.
8117  *
8118  * The reserve ratio obviously has absolutely no relation with the
8119  * minimum watermarks. The lowmem reserve ratio can only make sense
8120  * if in function of the boot time zone sizes.
8121  */
lowmem_reserve_ratio_sysctl_handler(struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)8122 int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write,
8123 		void *buffer, size_t *length, loff_t *ppos)
8124 {
8125 	int i;
8126 
8127 	proc_dointvec_minmax(table, write, buffer, length, ppos);
8128 
8129 	for (i = 0; i < MAX_NR_ZONES; i++) {
8130 		if (sysctl_lowmem_reserve_ratio[i] < 1)
8131 			sysctl_lowmem_reserve_ratio[i] = 0;
8132 	}
8133 
8134 	setup_per_zone_lowmem_reserve();
8135 	return 0;
8136 }
8137 
__zone_pcp_update(struct zone * zone)8138 static void __zone_pcp_update(struct zone *zone)
8139 {
8140 	unsigned int cpu;
8141 
8142 	for_each_possible_cpu(cpu)
8143 		pageset_set_high_and_batch(zone,
8144 				per_cpu_ptr(zone->pageset, cpu));
8145 }
8146 
8147 /*
8148  * percpu_pagelist_fraction - changes the pcp->high for each zone on each
8149  * cpu.  It is the fraction of total pages in each zone that a hot per cpu
8150  * pagelist can have before it gets flushed back to buddy allocator.
8151  */
percpu_pagelist_fraction_sysctl_handler(struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)8152 int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write,
8153 		void *buffer, size_t *length, loff_t *ppos)
8154 {
8155 	struct zone *zone;
8156 	int old_percpu_pagelist_fraction;
8157 	int ret;
8158 
8159 	mutex_lock(&pcp_batch_high_lock);
8160 	old_percpu_pagelist_fraction = percpu_pagelist_fraction;
8161 
8162 	ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
8163 	if (!write || ret < 0)
8164 		goto out;
8165 
8166 	/* Sanity checking to avoid pcp imbalance */
8167 	if (percpu_pagelist_fraction &&
8168 	    percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION) {
8169 		percpu_pagelist_fraction = old_percpu_pagelist_fraction;
8170 		ret = -EINVAL;
8171 		goto out;
8172 	}
8173 
8174 	/* No change? */
8175 	if (percpu_pagelist_fraction == old_percpu_pagelist_fraction)
8176 		goto out;
8177 
8178 	for_each_populated_zone(zone)
8179 		__zone_pcp_update(zone);
8180 out:
8181 	mutex_unlock(&pcp_batch_high_lock);
8182 	return ret;
8183 }
8184 
8185 #ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES
8186 /*
8187  * Returns the number of pages that arch has reserved but
8188  * is not known to alloc_large_system_hash().
8189  */
arch_reserved_kernel_pages(void)8190 static unsigned long __init arch_reserved_kernel_pages(void)
8191 {
8192 	return 0;
8193 }
8194 #endif
8195 
8196 /*
8197  * Adaptive scale is meant to reduce sizes of hash tables on large memory
8198  * machines. As memory size is increased the scale is also increased but at
8199  * slower pace.  Starting from ADAPT_SCALE_BASE (64G), every time memory
8200  * quadruples the scale is increased by one, which means the size of hash table
8201  * only doubles, instead of quadrupling as well.
8202  * Because 32-bit systems cannot have large physical memory, where this scaling
8203  * makes sense, it is disabled on such platforms.
8204  */
8205 #if __BITS_PER_LONG > 32
8206 #define ADAPT_SCALE_BASE	(64ul << 30)
8207 #define ADAPT_SCALE_SHIFT	2
8208 #define ADAPT_SCALE_NPAGES	(ADAPT_SCALE_BASE >> PAGE_SHIFT)
8209 #endif
8210 
8211 /*
8212  * allocate a large system hash table from bootmem
8213  * - it is assumed that the hash table must contain an exact power-of-2
8214  *   quantity of entries
8215  * - limit is the number of hash buckets, not the total allocation size
8216  */
alloc_large_system_hash(const char * tablename,unsigned long bucketsize,unsigned long numentries,int scale,int flags,unsigned int * _hash_shift,unsigned int * _hash_mask,unsigned long low_limit,unsigned long high_limit)8217 void *__init alloc_large_system_hash(const char *tablename,
8218 				     unsigned long bucketsize,
8219 				     unsigned long numentries,
8220 				     int scale,
8221 				     int flags,
8222 				     unsigned int *_hash_shift,
8223 				     unsigned int *_hash_mask,
8224 				     unsigned long low_limit,
8225 				     unsigned long high_limit)
8226 {
8227 	unsigned long long max = high_limit;
8228 	unsigned long log2qty, size;
8229 	void *table = NULL;
8230 	gfp_t gfp_flags;
8231 	bool virt;
8232 
8233 	/* allow the kernel cmdline to have a say */
8234 	if (!numentries) {
8235 		/* round applicable memory size up to nearest megabyte */
8236 		numentries = nr_kernel_pages;
8237 		numentries -= arch_reserved_kernel_pages();
8238 
8239 		/* It isn't necessary when PAGE_SIZE >= 1MB */
8240 		if (PAGE_SHIFT < 20)
8241 			numentries = round_up(numentries, (1<<20)/PAGE_SIZE);
8242 
8243 #if __BITS_PER_LONG > 32
8244 		if (!high_limit) {
8245 			unsigned long adapt;
8246 
8247 			for (adapt = ADAPT_SCALE_NPAGES; adapt < numentries;
8248 			     adapt <<= ADAPT_SCALE_SHIFT)
8249 				scale++;
8250 		}
8251 #endif
8252 
8253 		/* limit to 1 bucket per 2^scale bytes of low memory */
8254 		if (scale > PAGE_SHIFT)
8255 			numentries >>= (scale - PAGE_SHIFT);
8256 		else
8257 			numentries <<= (PAGE_SHIFT - scale);
8258 
8259 		/* Make sure we've got at least a 0-order allocation.. */
8260 		if (unlikely(flags & HASH_SMALL)) {
8261 			/* Makes no sense without HASH_EARLY */
8262 			WARN_ON(!(flags & HASH_EARLY));
8263 			if (!(numentries >> *_hash_shift)) {
8264 				numentries = 1UL << *_hash_shift;
8265 				BUG_ON(!numentries);
8266 			}
8267 		} else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
8268 			numentries = PAGE_SIZE / bucketsize;
8269 	}
8270 	numentries = roundup_pow_of_two(numentries);
8271 
8272 	/* limit allocation size to 1/16 total memory by default */
8273 	if (max == 0) {
8274 		max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
8275 		do_div(max, bucketsize);
8276 	}
8277 	max = min(max, 0x80000000ULL);
8278 
8279 	if (numentries < low_limit)
8280 		numentries = low_limit;
8281 	if (numentries > max)
8282 		numentries = max;
8283 
8284 	log2qty = ilog2(numentries);
8285 
8286 	gfp_flags = (flags & HASH_ZERO) ? GFP_ATOMIC | __GFP_ZERO : GFP_ATOMIC;
8287 	do {
8288 		virt = false;
8289 		size = bucketsize << log2qty;
8290 		if (flags & HASH_EARLY) {
8291 			if (flags & HASH_ZERO)
8292 				table = memblock_alloc(size, SMP_CACHE_BYTES);
8293 			else
8294 				table = memblock_alloc_raw(size,
8295 							   SMP_CACHE_BYTES);
8296 		} else if (get_order(size) >= MAX_ORDER || hashdist) {
8297 			table = __vmalloc(size, gfp_flags);
8298 			virt = true;
8299 		} else {
8300 			/*
8301 			 * If bucketsize is not a power-of-two, we may free
8302 			 * some pages at the end of hash table which
8303 			 * alloc_pages_exact() automatically does
8304 			 */
8305 			table = alloc_pages_exact(size, gfp_flags);
8306 			kmemleak_alloc(table, size, 1, gfp_flags);
8307 		}
8308 	} while (!table && size > PAGE_SIZE && --log2qty);
8309 
8310 	if (!table)
8311 		panic("Failed to allocate %s hash table\n", tablename);
8312 
8313 	pr_info("%s hash table entries: %ld (order: %d, %lu bytes, %s)\n",
8314 		tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size,
8315 		virt ? "vmalloc" : "linear");
8316 
8317 	if (_hash_shift)
8318 		*_hash_shift = log2qty;
8319 	if (_hash_mask)
8320 		*_hash_mask = (1 << log2qty) - 1;
8321 
8322 	return table;
8323 }
8324 
8325 /*
8326  * This function checks whether pageblock includes unmovable pages or not.
8327  *
8328  * PageLRU check without isolation or lru_lock could race so that
8329  * MIGRATE_MOVABLE block might include unmovable pages. And __PageMovable
8330  * check without lock_page also may miss some movable non-lru pages at
8331  * race condition. So you can't expect this function should be exact.
8332  *
8333  * Returns a page without holding a reference. If the caller wants to
8334  * dereference that page (e.g., dumping), it has to make sure that it
8335  * cannot get removed (e.g., via memory unplug) concurrently.
8336  *
8337  */
has_unmovable_pages(struct zone * zone,struct page * page,int migratetype,int flags)8338 struct page *has_unmovable_pages(struct zone *zone, struct page *page,
8339 				 int migratetype, int flags)
8340 {
8341 	unsigned long iter = 0;
8342 	unsigned long pfn = page_to_pfn(page);
8343 	unsigned long offset = pfn % pageblock_nr_pages;
8344 
8345 	if (is_migrate_cma_page(page)) {
8346 		/*
8347 		 * CMA allocations (alloc_contig_range) really need to mark
8348 		 * isolate CMA pageblocks even when they are not movable in fact
8349 		 * so consider them movable here.
8350 		 */
8351 		if (is_migrate_cma(migratetype))
8352 			return NULL;
8353 
8354 		return page;
8355 	}
8356 
8357 	for (; iter < pageblock_nr_pages - offset; iter++) {
8358 		if (!pfn_valid_within(pfn + iter))
8359 			continue;
8360 
8361 		page = pfn_to_page(pfn + iter);
8362 
8363 		/*
8364 		 * Both, bootmem allocations and memory holes are marked
8365 		 * PG_reserved and are unmovable. We can even have unmovable
8366 		 * allocations inside ZONE_MOVABLE, for example when
8367 		 * specifying "movablecore".
8368 		 */
8369 		if (PageReserved(page))
8370 			return page;
8371 
8372 		/*
8373 		 * If the zone is movable and we have ruled out all reserved
8374 		 * pages then it should be reasonably safe to assume the rest
8375 		 * is movable.
8376 		 */
8377 		if (zone_idx(zone) == ZONE_MOVABLE)
8378 			continue;
8379 
8380 		/*
8381 		 * Hugepages are not in LRU lists, but they're movable.
8382 		 * THPs are on the LRU, but need to be counted as #small pages.
8383 		 * We need not scan over tail pages because we don't
8384 		 * handle each tail page individually in migration.
8385 		 */
8386 		if (PageHuge(page) || PageTransCompound(page)) {
8387 			struct page *head = compound_head(page);
8388 			unsigned int skip_pages;
8389 
8390 			if (PageHuge(page)) {
8391 				if (!hugepage_migration_supported(page_hstate(head)))
8392 					return page;
8393 			} else if (!PageLRU(head) && !__PageMovable(head)) {
8394 				return page;
8395 			}
8396 
8397 			skip_pages = compound_nr(head) - (page - head);
8398 			iter += skip_pages - 1;
8399 			continue;
8400 		}
8401 
8402 		/*
8403 		 * We can't use page_count without pin a page
8404 		 * because another CPU can free compound page.
8405 		 * This check already skips compound tails of THP
8406 		 * because their page->_refcount is zero at all time.
8407 		 */
8408 		if (!page_ref_count(page)) {
8409 			if (PageBuddy(page))
8410 				iter += (1 << buddy_order(page)) - 1;
8411 			continue;
8412 		}
8413 
8414 		/*
8415 		 * The HWPoisoned page may be not in buddy system, and
8416 		 * page_count() is not 0.
8417 		 */
8418 		if ((flags & MEMORY_OFFLINE) && PageHWPoison(page))
8419 			continue;
8420 
8421 		/*
8422 		 * We treat all PageOffline() pages as movable when offlining
8423 		 * to give drivers a chance to decrement their reference count
8424 		 * in MEM_GOING_OFFLINE in order to indicate that these pages
8425 		 * can be offlined as there are no direct references anymore.
8426 		 * For actually unmovable PageOffline() where the driver does
8427 		 * not support this, we will fail later when trying to actually
8428 		 * move these pages that still have a reference count > 0.
8429 		 * (false negatives in this function only)
8430 		 */
8431 		if ((flags & MEMORY_OFFLINE) && PageOffline(page))
8432 			continue;
8433 
8434 		if (__PageMovable(page) || PageLRU(page))
8435 			continue;
8436 
8437 		/*
8438 		 * If there are RECLAIMABLE pages, we need to check
8439 		 * it.  But now, memory offline itself doesn't call
8440 		 * shrink_node_slabs() and it still to be fixed.
8441 		 */
8442 		return page;
8443 	}
8444 	return NULL;
8445 }
8446 
8447 #ifdef CONFIG_CONTIG_ALLOC
pfn_max_align_down(unsigned long pfn)8448 static unsigned long pfn_max_align_down(unsigned long pfn)
8449 {
8450 	return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
8451 			     pageblock_nr_pages) - 1);
8452 }
8453 
pfn_max_align_up(unsigned long pfn)8454 static unsigned long pfn_max_align_up(unsigned long pfn)
8455 {
8456 	return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
8457 				pageblock_nr_pages));
8458 }
8459 
8460 /* [start, end) must belong to a single zone. */
__alloc_contig_migrate_range(struct compact_control * cc,unsigned long start,unsigned long end)8461 static int __alloc_contig_migrate_range(struct compact_control *cc,
8462 					unsigned long start, unsigned long end)
8463 {
8464 	/* This function is based on compact_zone() from compaction.c. */
8465 	unsigned int nr_reclaimed;
8466 	unsigned long pfn = start;
8467 	unsigned int tries = 0;
8468 	int ret = 0;
8469 	struct migration_target_control mtc = {
8470 		.nid = zone_to_nid(cc->zone),
8471 		.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
8472 	};
8473 
8474 	migrate_prep();
8475 
8476 	while (pfn < end || !list_empty(&cc->migratepages)) {
8477 		if (fatal_signal_pending(current)) {
8478 			ret = -EINTR;
8479 			break;
8480 		}
8481 
8482 		if (list_empty(&cc->migratepages)) {
8483 			cc->nr_migratepages = 0;
8484 			pfn = isolate_migratepages_range(cc, pfn, end);
8485 			if (!pfn) {
8486 				ret = -EINTR;
8487 				break;
8488 			}
8489 			tries = 0;
8490 		} else if (++tries == 5) {
8491 			ret = ret < 0 ? ret : -EBUSY;
8492 			break;
8493 		}
8494 
8495 		nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
8496 							&cc->migratepages);
8497 		cc->nr_migratepages -= nr_reclaimed;
8498 
8499 		ret = migrate_pages(&cc->migratepages, alloc_migration_target,
8500 				NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE);
8501 	}
8502 	if (ret < 0) {
8503 		putback_movable_pages(&cc->migratepages);
8504 		return ret;
8505 	}
8506 	return 0;
8507 }
8508 
8509 /**
8510  * alloc_contig_range() -- tries to allocate given range of pages
8511  * @start:	start PFN to allocate
8512  * @end:	one-past-the-last PFN to allocate
8513  * @migratetype:	migratetype of the underlaying pageblocks (either
8514  *			#MIGRATE_MOVABLE or #MIGRATE_CMA).  All pageblocks
8515  *			in range must have the same migratetype and it must
8516  *			be either of the two.
8517  * @gfp_mask:	GFP mask to use during compaction
8518  *
8519  * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
8520  * aligned.  The PFN range must belong to a single zone.
8521  *
8522  * The first thing this routine does is attempt to MIGRATE_ISOLATE all
8523  * pageblocks in the range.  Once isolated, the pageblocks should not
8524  * be modified by others.
8525  *
8526  * Return: zero on success or negative error code.  On success all
8527  * pages which PFN is in [start, end) are allocated for the caller and
8528  * need to be freed with free_contig_range().
8529  */
alloc_contig_range(unsigned long start,unsigned long end,unsigned migratetype,gfp_t gfp_mask)8530 int alloc_contig_range(unsigned long start, unsigned long end,
8531 		       unsigned migratetype, gfp_t gfp_mask)
8532 {
8533 	unsigned long outer_start, outer_end;
8534 	unsigned int order;
8535 	int ret = 0;
8536 
8537 	struct compact_control cc = {
8538 		.nr_migratepages = 0,
8539 		.order = -1,
8540 		.zone = page_zone(pfn_to_page(start)),
8541 		.mode = MIGRATE_SYNC,
8542 		.ignore_skip_hint = true,
8543 		.no_set_skip_hint = true,
8544 		.gfp_mask = current_gfp_context(gfp_mask),
8545 		.alloc_contig = true,
8546 	};
8547 	INIT_LIST_HEAD(&cc.migratepages);
8548 
8549 	/*
8550 	 * What we do here is we mark all pageblocks in range as
8551 	 * MIGRATE_ISOLATE.  Because pageblock and max order pages may
8552 	 * have different sizes, and due to the way page allocator
8553 	 * work, we align the range to biggest of the two pages so
8554 	 * that page allocator won't try to merge buddies from
8555 	 * different pageblocks and change MIGRATE_ISOLATE to some
8556 	 * other migration type.
8557 	 *
8558 	 * Once the pageblocks are marked as MIGRATE_ISOLATE, we
8559 	 * migrate the pages from an unaligned range (ie. pages that
8560 	 * we are interested in).  This will put all the pages in
8561 	 * range back to page allocator as MIGRATE_ISOLATE.
8562 	 *
8563 	 * When this is done, we take the pages in range from page
8564 	 * allocator removing them from the buddy system.  This way
8565 	 * page allocator will never consider using them.
8566 	 *
8567 	 * This lets us mark the pageblocks back as
8568 	 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
8569 	 * aligned range but not in the unaligned, original range are
8570 	 * put back to page allocator so that buddy can use them.
8571 	 */
8572 
8573 	ret = start_isolate_page_range(pfn_max_align_down(start),
8574 				       pfn_max_align_up(end), migratetype, 0);
8575 	if (ret)
8576 		return ret;
8577 
8578 	/*
8579 	 * In case of -EBUSY, we'd like to know which page causes problem.
8580 	 * So, just fall through. test_pages_isolated() has a tracepoint
8581 	 * which will report the busy page.
8582 	 *
8583 	 * It is possible that busy pages could become available before
8584 	 * the call to test_pages_isolated, and the range will actually be
8585 	 * allocated.  So, if we fall through be sure to clear ret so that
8586 	 * -EBUSY is not accidentally used or returned to caller.
8587 	 */
8588 	ret = __alloc_contig_migrate_range(&cc, start, end);
8589 	if (ret && ret != -EBUSY)
8590 		goto done;
8591 	ret =0;
8592 
8593 	/*
8594 	 * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
8595 	 * aligned blocks that are marked as MIGRATE_ISOLATE.  What's
8596 	 * more, all pages in [start, end) are free in page allocator.
8597 	 * What we are going to do is to allocate all pages from
8598 	 * [start, end) (that is remove them from page allocator).
8599 	 *
8600 	 * The only problem is that pages at the beginning and at the
8601 	 * end of interesting range may be not aligned with pages that
8602 	 * page allocator holds, ie. they can be part of higher order
8603 	 * pages.  Because of this, we reserve the bigger range and
8604 	 * once this is done free the pages we are not interested in.
8605 	 *
8606 	 * We don't have to hold zone->lock here because the pages are
8607 	 * isolated thus they won't get removed from buddy.
8608 	 */
8609 
8610 	lru_add_drain_all();
8611 
8612 	order = 0;
8613 	outer_start = start;
8614 	while (!PageBuddy(pfn_to_page(outer_start))) {
8615 		if (++order >= MAX_ORDER) {
8616 			outer_start = start;
8617 			break;
8618 		}
8619 		outer_start &= ~0UL << order;
8620 	}
8621 
8622 	if (outer_start != start) {
8623 		order = buddy_order(pfn_to_page(outer_start));
8624 
8625 		/*
8626 		 * outer_start page could be small order buddy page and
8627 		 * it doesn't include start page. Adjust outer_start
8628 		 * in this case to report failed page properly
8629 		 * on tracepoint in test_pages_isolated()
8630 		 */
8631 		if (outer_start + (1UL << order) <= start)
8632 			outer_start = start;
8633 	}
8634 
8635 	/* Make sure the range is really isolated. */
8636 	if (test_pages_isolated(outer_start, end, 0)) {
8637 		pr_info_ratelimited("%s: [%lx, %lx) PFNs busy\n",
8638 			__func__, outer_start, end);
8639 		ret = -EBUSY;
8640 		goto done;
8641 	}
8642 
8643 	/* Grab isolated pages from freelists. */
8644 	outer_end = isolate_freepages_range(&cc, outer_start, end);
8645 	if (!outer_end) {
8646 		ret = -EBUSY;
8647 		goto done;
8648 	}
8649 
8650 	/* Free head and tail (if any) */
8651 	if (start != outer_start)
8652 		free_contig_range(outer_start, start - outer_start);
8653 	if (end != outer_end)
8654 		free_contig_range(end, outer_end - end);
8655 
8656 done:
8657 	undo_isolate_page_range(pfn_max_align_down(start),
8658 				pfn_max_align_up(end), migratetype);
8659 	return ret;
8660 }
8661 EXPORT_SYMBOL(alloc_contig_range);
8662 
__alloc_contig_pages(unsigned long start_pfn,unsigned long nr_pages,gfp_t gfp_mask)8663 static int __alloc_contig_pages(unsigned long start_pfn,
8664 				unsigned long nr_pages, gfp_t gfp_mask)
8665 {
8666 	unsigned long end_pfn = start_pfn + nr_pages;
8667 
8668 	return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE,
8669 				  gfp_mask);
8670 }
8671 
pfn_range_valid_contig(struct zone * z,unsigned long start_pfn,unsigned long nr_pages)8672 static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn,
8673 				   unsigned long nr_pages)
8674 {
8675 	unsigned long i, end_pfn = start_pfn + nr_pages;
8676 	struct page *page;
8677 
8678 	for (i = start_pfn; i < end_pfn; i++) {
8679 		page = pfn_to_online_page(i);
8680 		if (!page)
8681 			return false;
8682 
8683 		if (page_zone(page) != z)
8684 			return false;
8685 
8686 		if (PageReserved(page))
8687 			return false;
8688 
8689 		if (page_count(page) > 0)
8690 			return false;
8691 
8692 		if (PageHuge(page))
8693 			return false;
8694 	}
8695 	return true;
8696 }
8697 
zone_spans_last_pfn(const struct zone * zone,unsigned long start_pfn,unsigned long nr_pages)8698 static bool zone_spans_last_pfn(const struct zone *zone,
8699 				unsigned long start_pfn, unsigned long nr_pages)
8700 {
8701 	unsigned long last_pfn = start_pfn + nr_pages - 1;
8702 
8703 	return zone_spans_pfn(zone, last_pfn);
8704 }
8705 
8706 /**
8707  * alloc_contig_pages() -- tries to find and allocate contiguous range of pages
8708  * @nr_pages:	Number of contiguous pages to allocate
8709  * @gfp_mask:	GFP mask to limit search and used during compaction
8710  * @nid:	Target node
8711  * @nodemask:	Mask for other possible nodes
8712  *
8713  * This routine is a wrapper around alloc_contig_range(). It scans over zones
8714  * on an applicable zonelist to find a contiguous pfn range which can then be
8715  * tried for allocation with alloc_contig_range(). This routine is intended
8716  * for allocation requests which can not be fulfilled with the buddy allocator.
8717  *
8718  * The allocated memory is always aligned to a page boundary. If nr_pages is a
8719  * power of two then the alignment is guaranteed to be to the given nr_pages
8720  * (e.g. 1GB request would be aligned to 1GB).
8721  *
8722  * Allocated pages can be freed with free_contig_range() or by manually calling
8723  * __free_page() on each allocated page.
8724  *
8725  * Return: pointer to contiguous pages on success, or NULL if not successful.
8726  */
alloc_contig_pages(unsigned long nr_pages,gfp_t gfp_mask,int nid,nodemask_t * nodemask)8727 struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask,
8728 				int nid, nodemask_t *nodemask)
8729 {
8730 	unsigned long ret, pfn, flags;
8731 	struct zonelist *zonelist;
8732 	struct zone *zone;
8733 	struct zoneref *z;
8734 
8735 	zonelist = node_zonelist(nid, gfp_mask);
8736 	for_each_zone_zonelist_nodemask(zone, z, zonelist,
8737 					gfp_zone(gfp_mask), nodemask) {
8738 		spin_lock_irqsave(&zone->lock, flags);
8739 
8740 		pfn = ALIGN(zone->zone_start_pfn, nr_pages);
8741 		while (zone_spans_last_pfn(zone, pfn, nr_pages)) {
8742 			if (pfn_range_valid_contig(zone, pfn, nr_pages)) {
8743 				/*
8744 				 * We release the zone lock here because
8745 				 * alloc_contig_range() will also lock the zone
8746 				 * at some point. If there's an allocation
8747 				 * spinning on this lock, it may win the race
8748 				 * and cause alloc_contig_range() to fail...
8749 				 */
8750 				spin_unlock_irqrestore(&zone->lock, flags);
8751 				ret = __alloc_contig_pages(pfn, nr_pages,
8752 							gfp_mask);
8753 				if (!ret)
8754 					return pfn_to_page(pfn);
8755 				spin_lock_irqsave(&zone->lock, flags);
8756 			}
8757 			pfn += nr_pages;
8758 		}
8759 		spin_unlock_irqrestore(&zone->lock, flags);
8760 	}
8761 	return NULL;
8762 }
8763 #endif /* CONFIG_CONTIG_ALLOC */
8764 
free_contig_range(unsigned long pfn,unsigned int nr_pages)8765 void free_contig_range(unsigned long pfn, unsigned int nr_pages)
8766 {
8767 	unsigned int count = 0;
8768 
8769 	for (; nr_pages--; pfn++) {
8770 		struct page *page = pfn_to_page(pfn);
8771 
8772 		count += page_count(page) != 1;
8773 		__free_page(page);
8774 	}
8775 	WARN(count != 0, "%d pages are still in use!\n", count);
8776 }
8777 EXPORT_SYMBOL(free_contig_range);
8778 
8779 /*
8780  * The zone indicated has a new number of managed_pages; batch sizes and percpu
8781  * page high values need to be recalulated.
8782  */
zone_pcp_update(struct zone * zone)8783 void __meminit zone_pcp_update(struct zone *zone)
8784 {
8785 	mutex_lock(&pcp_batch_high_lock);
8786 	__zone_pcp_update(zone);
8787 	mutex_unlock(&pcp_batch_high_lock);
8788 }
8789 
zone_pcp_reset(struct zone * zone)8790 void zone_pcp_reset(struct zone *zone)
8791 {
8792 	unsigned long flags;
8793 	int cpu;
8794 	struct per_cpu_pageset *pset;
8795 
8796 	/* avoid races with drain_pages()  */
8797 	local_irq_save(flags);
8798 	if (zone->pageset != &boot_pageset) {
8799 		for_each_online_cpu(cpu) {
8800 			pset = per_cpu_ptr(zone->pageset, cpu);
8801 			drain_zonestat(zone, pset);
8802 		}
8803 		free_percpu(zone->pageset);
8804 		zone->pageset = &boot_pageset;
8805 	}
8806 	local_irq_restore(flags);
8807 }
8808 
8809 #ifdef CONFIG_MEMORY_HOTREMOVE
8810 /*
8811  * All pages in the range must be in a single zone, must not contain holes,
8812  * must span full sections, and must be isolated before calling this function.
8813  */
__offline_isolated_pages(unsigned long start_pfn,unsigned long end_pfn)8814 void __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
8815 {
8816 	unsigned long pfn = start_pfn;
8817 	struct page *page;
8818 	struct zone *zone;
8819 	unsigned int order;
8820 	unsigned long flags;
8821 
8822 	offline_mem_sections(pfn, end_pfn);
8823 	zone = page_zone(pfn_to_page(pfn));
8824 	spin_lock_irqsave(&zone->lock, flags);
8825 	while (pfn < end_pfn) {
8826 		page = pfn_to_page(pfn);
8827 		/*
8828 		 * The HWPoisoned page may be not in buddy system, and
8829 		 * page_count() is not 0.
8830 		 */
8831 		if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
8832 			pfn++;
8833 			continue;
8834 		}
8835 		/*
8836 		 * At this point all remaining PageOffline() pages have a
8837 		 * reference count of 0 and can simply be skipped.
8838 		 */
8839 		if (PageOffline(page)) {
8840 			BUG_ON(page_count(page));
8841 			BUG_ON(PageBuddy(page));
8842 			pfn++;
8843 			continue;
8844 		}
8845 
8846 		BUG_ON(page_count(page));
8847 		BUG_ON(!PageBuddy(page));
8848 		order = buddy_order(page);
8849 		del_page_from_free_list(page, zone, order);
8850 		pfn += (1 << order);
8851 	}
8852 	spin_unlock_irqrestore(&zone->lock, flags);
8853 }
8854 #endif
8855 
is_free_buddy_page(struct page * page)8856 bool is_free_buddy_page(struct page *page)
8857 {
8858 	struct zone *zone = page_zone(page);
8859 	unsigned long pfn = page_to_pfn(page);
8860 	unsigned long flags;
8861 	unsigned int order;
8862 
8863 	spin_lock_irqsave(&zone->lock, flags);
8864 	for (order = 0; order < MAX_ORDER; order++) {
8865 		struct page *page_head = page - (pfn & ((1 << order) - 1));
8866 
8867 		if (PageBuddy(page_head) && buddy_order(page_head) >= order)
8868 			break;
8869 	}
8870 	spin_unlock_irqrestore(&zone->lock, flags);
8871 
8872 	return order < MAX_ORDER;
8873 }
8874 
8875 #ifdef CONFIG_MEMORY_FAILURE
8876 /*
8877  * Break down a higher-order page in sub-pages, and keep our target out of
8878  * buddy allocator.
8879  */
break_down_buddy_pages(struct zone * zone,struct page * page,struct page * target,int low,int high,int migratetype)8880 static void break_down_buddy_pages(struct zone *zone, struct page *page,
8881 				   struct page *target, int low, int high,
8882 				   int migratetype)
8883 {
8884 	unsigned long size = 1 << high;
8885 	struct page *current_buddy, *next_page;
8886 
8887 	while (high > low) {
8888 		high--;
8889 		size >>= 1;
8890 
8891 		if (target >= &page[size]) {
8892 			next_page = page + size;
8893 			current_buddy = page;
8894 		} else {
8895 			next_page = page;
8896 			current_buddy = page + size;
8897 		}
8898 
8899 		if (set_page_guard(zone, current_buddy, high, migratetype))
8900 			continue;
8901 
8902 		if (current_buddy != target) {
8903 			add_to_free_list(current_buddy, zone, high, migratetype);
8904 			set_buddy_order(current_buddy, high);
8905 			page = next_page;
8906 		}
8907 	}
8908 }
8909 
8910 /*
8911  * Take a page that will be marked as poisoned off the buddy allocator.
8912  */
take_page_off_buddy(struct page * page)8913 bool take_page_off_buddy(struct page *page)
8914 {
8915 	struct zone *zone = page_zone(page);
8916 	unsigned long pfn = page_to_pfn(page);
8917 	unsigned long flags;
8918 	unsigned int order;
8919 	bool ret = false;
8920 
8921 	spin_lock_irqsave(&zone->lock, flags);
8922 	for (order = 0; order < MAX_ORDER; order++) {
8923 		struct page *page_head = page - (pfn & ((1 << order) - 1));
8924 		int page_order = buddy_order(page_head);
8925 
8926 		if (PageBuddy(page_head) && page_order >= order) {
8927 			unsigned long pfn_head = page_to_pfn(page_head);
8928 			int migratetype = get_pfnblock_migratetype(page_head,
8929 								   pfn_head);
8930 
8931 			del_page_from_free_list(page_head, zone, page_order);
8932 			break_down_buddy_pages(zone, page_head, page, 0,
8933 						page_order, migratetype);
8934 			if (!is_migrate_isolate(migratetype))
8935 				__mod_zone_freepage_state(zone, -1, migratetype);
8936 			ret = true;
8937 			break;
8938 		}
8939 		if (page_count(page_head) > 0)
8940 			break;
8941 	}
8942 	spin_unlock_irqrestore(&zone->lock, flags);
8943 	return ret;
8944 }
8945 #endif
8946