1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/mm/page_alloc.c
4 *
5 * Manages the free list, the system allocates free pages here.
6 * Note that kmalloc() lives in slab.c
7 *
8 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
9 * Swap reorganised 29.12.95, Stephen Tweedie
10 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
11 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
12 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
13 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
14 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
15 * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
16 */
17
18 #include <linux/stddef.h>
19 #include <linux/mm.h>
20 #include <linux/highmem.h>
21 #include <linux/swap.h>
22 #include <linux/interrupt.h>
23 #include <linux/pagemap.h>
24 #include <linux/jiffies.h>
25 #include <linux/memblock.h>
26 #include <linux/compiler.h>
27 #include <linux/kernel.h>
28 #include <linux/kasan.h>
29 #include <linux/module.h>
30 #include <linux/suspend.h>
31 #include <linux/pagevec.h>
32 #include <linux/blkdev.h>
33 #include <linux/slab.h>
34 #include <linux/ratelimit.h>
35 #include <linux/oom.h>
36 #include <linux/topology.h>
37 #include <linux/sysctl.h>
38 #include <linux/cpu.h>
39 #include <linux/cpuset.h>
40 #include <linux/memory_hotplug.h>
41 #include <linux/nodemask.h>
42 #include <linux/vmalloc.h>
43 #include <linux/vmstat.h>
44 #include <linux/mempolicy.h>
45 #include <linux/memremap.h>
46 #include <linux/stop_machine.h>
47 #include <linux/random.h>
48 #include <linux/sort.h>
49 #include <linux/pfn.h>
50 #include <linux/backing-dev.h>
51 #include <linux/fault-inject.h>
52 #include <linux/page-isolation.h>
53 #include <linux/debugobjects.h>
54 #include <linux/kmemleak.h>
55 #include <linux/compaction.h>
56 #include <trace/events/kmem.h>
57 #include <trace/events/oom.h>
58 #include <linux/prefetch.h>
59 #include <linux/mm_inline.h>
60 #include <linux/mmu_notifier.h>
61 #include <linux/migrate.h>
62 #include <linux/hugetlb.h>
63 #include <linux/sched/rt.h>
64 #include <linux/sched/mm.h>
65 #include <linux/page_owner.h>
66 #include <linux/page_pinner.h>
67 #include <linux/kthread.h>
68 #include <linux/memcontrol.h>
69 #include <linux/ftrace.h>
70 #include <linux/lockdep.h>
71 #include <linux/nmi.h>
72 #include <linux/psi.h>
73 #include <linux/padata.h>
74 #include <linux/khugepaged.h>
75 #include <linux/buffer_head.h>
76 #include <trace/hooks/mm.h>
77 #include <trace/hooks/vmscan.h>
78
79 #include <asm/sections.h>
80 #include <asm/tlbflush.h>
81 #include <asm/div64.h>
82 #include "internal.h"
83 #include "shuffle.h"
84 #include "page_reporting.h"
85
86 EXPORT_TRACEPOINT_SYMBOL_GPL(mm_page_alloc);
87
88 /* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */
89 typedef int __bitwise fpi_t;
90
pcp_to_pcpext(struct per_cpu_pages * pcp)91 static inline struct per_cpu_pages_ext *pcp_to_pcpext(struct per_cpu_pages *pcp)
92 {
93 return container_of(pcp, struct per_cpu_pages_ext, pcp);
94 }
95
96 static inline
zone_per_cpu_pageset(struct zone * zone)97 struct per_cpu_pages_ext __percpu *zone_per_cpu_pageset(struct zone *zone)
98 {
99 return (struct per_cpu_pages_ext __percpu *)zone->per_cpu_pageset;
100 }
101
102 /* No special request */
103 #define FPI_NONE ((__force fpi_t)0)
104
105 /*
106 * Skip free page reporting notification for the (possibly merged) page.
107 * This does not hinder free page reporting from grabbing the page,
108 * reporting it and marking it "reported" - it only skips notifying
109 * the free page reporting infrastructure about a newly freed page. For
110 * example, used when temporarily pulling a page from a freelist and
111 * putting it back unmodified.
112 */
113 #define FPI_SKIP_REPORT_NOTIFY ((__force fpi_t)BIT(0))
114
115 /*
116 * Place the (possibly merged) page to the tail of the freelist. Will ignore
117 * page shuffling (relevant code - e.g., memory onlining - is expected to
118 * shuffle the whole zone).
119 *
120 * Note: No code should rely on this flag for correctness - it's purely
121 * to allow for optimizations when handing back either fresh pages
122 * (memory onlining) or untouched pages (page isolation, free page
123 * reporting).
124 */
125 #define FPI_TO_TAIL ((__force fpi_t)BIT(1))
126
127 /*
128 * Don't poison memory with KASAN (only for the tag-based modes).
129 * During boot, all non-reserved memblock memory is exposed to page_alloc.
130 * Poisoning all that memory lengthens boot time, especially on systems with
131 * large amount of RAM. This flag is used to skip that poisoning.
132 * This is only done for the tag-based KASAN modes, as those are able to
133 * detect memory corruptions with the memory tags assigned by default.
134 * All memory allocated normally after boot gets poisoned as usual.
135 */
136 #define FPI_SKIP_KASAN_POISON ((__force fpi_t)BIT(2))
137
138 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
139 static DEFINE_MUTEX(pcp_batch_high_lock);
140 #define MIN_PERCPU_PAGELIST_HIGH_FRACTION (8)
141
142 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
143 /*
144 * On SMP, spin_trylock is sufficient protection.
145 * On PREEMPT_RT, spin_trylock is equivalent on both SMP and UP.
146 */
147 #define pcp_trylock_prepare(flags) do { } while (0)
148 #define pcp_trylock_finish(flag) do { } while (0)
149 #else
150
151 /* UP spin_trylock always succeeds so disable IRQs to prevent re-entrancy. */
152 #define pcp_trylock_prepare(flags) local_irq_save(flags)
153 #define pcp_trylock_finish(flags) local_irq_restore(flags)
154 #endif
155
156 /*
157 * Locking a pcp requires a PCP lookup followed by a spinlock. To avoid
158 * a migration causing the wrong PCP to be locked and remote memory being
159 * potentially allocated, pin the task to the CPU for the lookup+lock.
160 * preempt_disable is used on !RT because it is faster than migrate_disable.
161 * migrate_disable is used on RT because otherwise RT spinlock usage is
162 * interfered with and a high priority task cannot preempt the allocator.
163 */
164 #ifndef CONFIG_PREEMPT_RT
165 #define pcpu_task_pin() preempt_disable()
166 #define pcpu_task_unpin() preempt_enable()
167 #else
168 #define pcpu_task_pin() migrate_disable()
169 #define pcpu_task_unpin() migrate_enable()
170 #endif
171
172 /*
173 * Generic helper to lookup and a per-cpu variable with an embedded spinlock.
174 * Return value should be used with equivalent unlock helper.
175 */
176 #define pcpu_spin_lock(type, member, ptr) \
177 ({ \
178 type *_ret; \
179 pcpu_task_pin(); \
180 _ret = this_cpu_ptr(ptr); \
181 spin_lock(&_ret->member); \
182 &_ret->pcp; \
183 })
184
185 #define pcpu_spin_lock_irqsave(type, member, ptr, flags) \
186 ({ \
187 type *_ret; \
188 pcpu_task_pin(); \
189 _ret = this_cpu_ptr(ptr); \
190 spin_lock_irqsave(&_ret->member, flags); \
191 &_ret->pcp; \
192 })
193
194 #define pcpu_spin_trylock_irqsave(type, member, ptr, flags) \
195 ({ \
196 type *_ret; \
197 pcpu_task_pin(); \
198 _ret = this_cpu_ptr(ptr); \
199 if (!spin_trylock_irqsave(&_ret->member, flags)) { \
200 pcpu_task_unpin(); \
201 _ret = NULL; \
202 } \
203 _ret ? &_ret->pcp : NULL; \
204 })
205
206 #define pcpu_spin_unlock(member, ptr) \
207 ({ \
208 spin_unlock(&ptr->member); \
209 pcpu_task_unpin(); \
210 })
211
212 #define pcpu_spin_unlock_irqrestore(member, ptr, flags) \
213 ({ \
214 spin_unlock_irqrestore(&ptr->member, flags); \
215 pcpu_task_unpin(); \
216 })
217
218 /* struct per_cpu_pages_ext specific helpers. */
219 #define pcp_spin_lock(ptr) \
220 pcpu_spin_lock(struct per_cpu_pages_ext, lock, ptr)
221
222 #define pcp_spin_lock_irqsave(ptr, flags) \
223 pcpu_spin_lock_irqsave(struct per_cpu_pages_ext, lock, ptr, flags)
224
225 #define pcp_spin_trylock_irqsave(ptr, flags) \
226 pcpu_spin_trylock_irqsave(struct per_cpu_pages_ext, lock, ptr, flags)
227
228 #define pcp_spin_unlock(ptr) \
229 pcpu_spin_unlock(lock, ptr)
230
231 #define pcp_spin_unlock_irqrestore(ptr, flags) \
232 pcpu_spin_unlock_irqrestore(lock, pcp_to_pcpext(ptr), flags)
233 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
234 DEFINE_PER_CPU(int, numa_node);
235 EXPORT_PER_CPU_SYMBOL(numa_node);
236 #endif
237
238 DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key);
239
240 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
241 /*
242 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
243 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
244 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
245 * defined in <linux/topology.h>.
246 */
247 DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */
248 EXPORT_PER_CPU_SYMBOL(_numa_mem_);
249 #endif
250
251 static DEFINE_MUTEX(pcpu_drain_mutex);
252
253 #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
254 volatile unsigned long latent_entropy __latent_entropy;
255 EXPORT_SYMBOL(latent_entropy);
256 #endif
257
258 /*
259 * Array of node states.
260 */
261 nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
262 [N_POSSIBLE] = NODE_MASK_ALL,
263 [N_ONLINE] = { { [0] = 1UL } },
264 #ifndef CONFIG_NUMA
265 [N_NORMAL_MEMORY] = { { [0] = 1UL } },
266 #ifdef CONFIG_HIGHMEM
267 [N_HIGH_MEMORY] = { { [0] = 1UL } },
268 #endif
269 [N_MEMORY] = { { [0] = 1UL } },
270 [N_CPU] = { { [0] = 1UL } },
271 #endif /* NUMA */
272 };
273 EXPORT_SYMBOL(node_states);
274
275 atomic_long_t _totalram_pages __read_mostly;
276 EXPORT_SYMBOL(_totalram_pages);
277 unsigned long totalreserve_pages __read_mostly;
278 unsigned long totalcma_pages __read_mostly;
279
280 int percpu_pagelist_high_fraction;
281 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
282 DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc);
283 EXPORT_SYMBOL(init_on_alloc);
284
285 DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free);
286 EXPORT_SYMBOL(init_on_free);
287
288 static bool _init_on_alloc_enabled_early __read_mostly
289 = IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON);
early_init_on_alloc(char * buf)290 static int __init early_init_on_alloc(char *buf)
291 {
292
293 return kstrtobool(buf, &_init_on_alloc_enabled_early);
294 }
295 early_param("init_on_alloc", early_init_on_alloc);
296
297 static bool _init_on_free_enabled_early __read_mostly
298 = IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON);
early_init_on_free(char * buf)299 static int __init early_init_on_free(char *buf)
300 {
301 return kstrtobool(buf, &_init_on_free_enabled_early);
302 }
303 early_param("init_on_free", early_init_on_free);
304
305 /*
306 * A cached value of the page's pageblock's migratetype, used when the page is
307 * put on a pcplist. Used to avoid the pageblock migratetype lookup when
308 * freeing from pcplists in most cases, at the cost of possibly becoming stale.
309 * Also the migratetype set in the page does not necessarily match the pcplist
310 * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any
311 * other index - this ensures that it will be put on the correct CMA freelist.
312 */
get_pcppage_migratetype(struct page * page)313 static inline int get_pcppage_migratetype(struct page *page)
314 {
315 return page->index;
316 }
317
set_pcppage_migratetype(struct page * page,int migratetype)318 static inline void set_pcppage_migratetype(struct page *page, int migratetype)
319 {
320 page->index = migratetype;
321 }
322
323 #ifdef CONFIG_PM_SLEEP
324 /*
325 * The following functions are used by the suspend/hibernate code to temporarily
326 * change gfp_allowed_mask in order to avoid using I/O during memory allocations
327 * while devices are suspended. To avoid races with the suspend/hibernate code,
328 * they should always be called with system_transition_mutex held
329 * (gfp_allowed_mask also should only be modified with system_transition_mutex
330 * held, unless the suspend/hibernate code is guaranteed not to run in parallel
331 * with that modification).
332 */
333
334 static gfp_t saved_gfp_mask;
335
pm_restore_gfp_mask(void)336 void pm_restore_gfp_mask(void)
337 {
338 WARN_ON(!mutex_is_locked(&system_transition_mutex));
339 if (saved_gfp_mask) {
340 gfp_allowed_mask = saved_gfp_mask;
341 saved_gfp_mask = 0;
342 }
343 }
344
pm_restrict_gfp_mask(void)345 void pm_restrict_gfp_mask(void)
346 {
347 WARN_ON(!mutex_is_locked(&system_transition_mutex));
348 WARN_ON(saved_gfp_mask);
349 saved_gfp_mask = gfp_allowed_mask;
350 gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS);
351 }
352
pm_suspended_storage(void)353 bool pm_suspended_storage(void)
354 {
355 if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
356 return false;
357 return true;
358 }
359 #endif /* CONFIG_PM_SLEEP */
360
361 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
362 unsigned int pageblock_order __read_mostly;
363 #endif
364
365 static void __free_pages_ok(struct page *page, unsigned int order,
366 fpi_t fpi_flags);
367
368 /*
369 * results with 256, 32 in the lowmem_reserve sysctl:
370 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
371 * 1G machine -> (16M dma, 784M normal, 224M high)
372 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
373 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
374 * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA
375 *
376 * TBD: should special case ZONE_DMA32 machines here - in those we normally
377 * don't need any ZONE_NORMAL reservation
378 */
379 int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = {
380 #ifdef CONFIG_ZONE_DMA
381 [ZONE_DMA] = 256,
382 #endif
383 #ifdef CONFIG_ZONE_DMA32
384 [ZONE_DMA32] = 256,
385 #endif
386 [ZONE_NORMAL] = 32,
387 #ifdef CONFIG_HIGHMEM
388 [ZONE_HIGHMEM] = 0,
389 #endif
390 [ZONE_MOVABLE] = 0,
391 };
392
393 static char * const zone_names[MAX_NR_ZONES] = {
394 #ifdef CONFIG_ZONE_DMA
395 "DMA",
396 #endif
397 #ifdef CONFIG_ZONE_DMA32
398 "DMA32",
399 #endif
400 "Normal",
401 #ifdef CONFIG_HIGHMEM
402 "HighMem",
403 #endif
404 "Movable",
405 #ifdef CONFIG_ZONE_DEVICE
406 "Device",
407 #endif
408 };
409
410 const char * const migratetype_names[MIGRATE_TYPES] = {
411 "Unmovable",
412 "Movable",
413 "Reclaimable",
414 #ifdef CONFIG_CMA
415 "CMA",
416 #endif
417 "HighAtomic",
418 #ifdef CONFIG_MEMORY_ISOLATION
419 "Isolate",
420 #endif
421 };
422
423 compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS] = {
424 [NULL_COMPOUND_DTOR] = NULL,
425 [COMPOUND_PAGE_DTOR] = free_compound_page,
426 #ifdef CONFIG_HUGETLB_PAGE
427 [HUGETLB_PAGE_DTOR] = free_huge_page,
428 #endif
429 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
430 [TRANSHUGE_PAGE_DTOR] = free_transhuge_page,
431 #endif
432 };
433
434 int min_free_kbytes = 1024;
435 int user_min_free_kbytes = -1;
436 int watermark_boost_factor __read_mostly = 15000;
437 int watermark_scale_factor = 10;
438
439 static unsigned long nr_kernel_pages __initdata;
440 static unsigned long nr_all_pages __initdata;
441 static unsigned long dma_reserve __initdata;
442
443 static unsigned long arch_zone_lowest_possible_pfn[MAX_NR_ZONES] __initdata;
444 static unsigned long arch_zone_highest_possible_pfn[MAX_NR_ZONES] __initdata;
445 static unsigned long required_kernelcore __initdata;
446 static unsigned long required_kernelcore_percent __initdata;
447 static unsigned long required_movablecore __initdata;
448 static unsigned long required_movablecore_percent __initdata;
449 static unsigned long zone_movable_pfn[MAX_NUMNODES] __initdata;
450 static bool mirrored_kernelcore __meminitdata;
451
452 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
453 int movable_zone;
454 EXPORT_SYMBOL(movable_zone);
455
456 #if MAX_NUMNODES > 1
457 unsigned int nr_node_ids __read_mostly = MAX_NUMNODES;
458 unsigned int nr_online_nodes __read_mostly = 1;
459 EXPORT_SYMBOL(nr_node_ids);
460 EXPORT_SYMBOL(nr_online_nodes);
461 #endif
462
463 int page_group_by_mobility_disabled __read_mostly;
464
465 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
466 /*
467 * During boot we initialize deferred pages on-demand, as needed, but once
468 * page_alloc_init_late() has finished, the deferred pages are all initialized,
469 * and we can permanently disable that path.
470 */
471 static DEFINE_STATIC_KEY_TRUE(deferred_pages);
472
deferred_pages_enabled(void)473 static inline bool deferred_pages_enabled(void)
474 {
475 return static_branch_unlikely(&deferred_pages);
476 }
477
478 /* Returns true if the struct page for the pfn is uninitialised */
early_page_uninitialised(unsigned long pfn)479 static inline bool __meminit early_page_uninitialised(unsigned long pfn)
480 {
481 int nid = early_pfn_to_nid(pfn);
482
483 if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
484 return true;
485
486 return false;
487 }
488
489 /*
490 * Returns true when the remaining initialisation should be deferred until
491 * later in the boot cycle when it can be parallelised.
492 */
493 static bool __meminit
defer_init(int nid,unsigned long pfn,unsigned long end_pfn)494 defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
495 {
496 static unsigned long prev_end_pfn, nr_initialised;
497
498 /*
499 * prev_end_pfn static that contains the end of previous zone
500 * No need to protect because called very early in boot before smp_init.
501 */
502 if (prev_end_pfn != end_pfn) {
503 prev_end_pfn = end_pfn;
504 nr_initialised = 0;
505 }
506
507 /* Always populate low zones for address-constrained allocations */
508 if (end_pfn < pgdat_end_pfn(NODE_DATA(nid)))
509 return false;
510
511 if (NODE_DATA(nid)->first_deferred_pfn != ULONG_MAX)
512 return true;
513 /*
514 * We start only with one section of pages, more pages are added as
515 * needed until the rest of deferred pages are initialized.
516 */
517 nr_initialised++;
518 if ((nr_initialised > PAGES_PER_SECTION) &&
519 (pfn & (PAGES_PER_SECTION - 1)) == 0) {
520 NODE_DATA(nid)->first_deferred_pfn = pfn;
521 return true;
522 }
523 return false;
524 }
525 #else
deferred_pages_enabled(void)526 static inline bool deferred_pages_enabled(void)
527 {
528 return false;
529 }
530
early_page_uninitialised(unsigned long pfn)531 static inline bool early_page_uninitialised(unsigned long pfn)
532 {
533 return false;
534 }
535
defer_init(int nid,unsigned long pfn,unsigned long end_pfn)536 static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
537 {
538 return false;
539 }
540 #endif
541
542 /* Return a pointer to the bitmap storing bits affecting a block of pages */
get_pageblock_bitmap(const struct page * page,unsigned long pfn)543 static inline unsigned long *get_pageblock_bitmap(const struct page *page,
544 unsigned long pfn)
545 {
546 #ifdef CONFIG_SPARSEMEM
547 return section_to_usemap(__pfn_to_section(pfn));
548 #else
549 return page_zone(page)->pageblock_flags;
550 #endif /* CONFIG_SPARSEMEM */
551 }
552
pfn_to_bitidx(const struct page * page,unsigned long pfn)553 static inline int pfn_to_bitidx(const struct page *page, unsigned long pfn)
554 {
555 #ifdef CONFIG_SPARSEMEM
556 pfn &= (PAGES_PER_SECTION-1);
557 #else
558 pfn = pfn - round_down(page_zone(page)->zone_start_pfn, pageblock_nr_pages);
559 #endif /* CONFIG_SPARSEMEM */
560 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
561 }
562
563 static __always_inline
__get_pfnblock_flags_mask(const struct page * page,unsigned long pfn,unsigned long mask)564 unsigned long __get_pfnblock_flags_mask(const struct page *page,
565 unsigned long pfn,
566 unsigned long mask)
567 {
568 unsigned long *bitmap;
569 unsigned long bitidx, word_bitidx;
570 unsigned long word;
571
572 bitmap = get_pageblock_bitmap(page, pfn);
573 bitidx = pfn_to_bitidx(page, pfn);
574 word_bitidx = bitidx / BITS_PER_LONG;
575 bitidx &= (BITS_PER_LONG-1);
576 /*
577 * This races, without locks, with set_pfnblock_flags_mask(). Ensure
578 * a consistent read of the memory array, so that results, even though
579 * racy, are not corrupted.
580 */
581 word = READ_ONCE(bitmap[word_bitidx]);
582 return (word >> bitidx) & mask;
583 }
584
585 /**
586 * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
587 * @page: The page within the block of interest
588 * @pfn: The target page frame number
589 * @mask: mask of bits that the caller is interested in
590 *
591 * Return: pageblock_bits flags
592 */
get_pfnblock_flags_mask(const struct page * page,unsigned long pfn,unsigned long mask)593 unsigned long get_pfnblock_flags_mask(const struct page *page,
594 unsigned long pfn, unsigned long mask)
595 {
596 return __get_pfnblock_flags_mask(page, pfn, mask);
597 }
598 EXPORT_SYMBOL_GPL(get_pfnblock_flags_mask);
599
isolate_anon_lru_page(struct page * page)600 int isolate_anon_lru_page(struct page *page)
601 {
602 int ret;
603
604 if (!PageLRU(page) || !PageAnon(page))
605 return -EINVAL;
606
607 if (!get_page_unless_zero(page))
608 return -EINVAL;
609
610 ret = isolate_lru_page(page);
611 put_page(page);
612
613 return ret;
614 }
615 EXPORT_SYMBOL_GPL(isolate_anon_lru_page);
616
get_pfnblock_migratetype(const struct page * page,unsigned long pfn)617 static __always_inline int get_pfnblock_migratetype(const struct page *page,
618 unsigned long pfn)
619 {
620 return __get_pfnblock_flags_mask(page, pfn, MIGRATETYPE_MASK);
621 }
622
623 /**
624 * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
625 * @page: The page within the block of interest
626 * @flags: The flags to set
627 * @pfn: The target page frame number
628 * @mask: mask of bits that the caller is interested in
629 */
set_pfnblock_flags_mask(struct page * page,unsigned long flags,unsigned long pfn,unsigned long mask)630 void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
631 unsigned long pfn,
632 unsigned long mask)
633 {
634 unsigned long *bitmap;
635 unsigned long bitidx, word_bitidx;
636 unsigned long old_word, word;
637
638 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
639 BUILD_BUG_ON(MIGRATE_TYPES > (1 << PB_migratetype_bits));
640
641 bitmap = get_pageblock_bitmap(page, pfn);
642 bitidx = pfn_to_bitidx(page, pfn);
643 word_bitidx = bitidx / BITS_PER_LONG;
644 bitidx &= (BITS_PER_LONG-1);
645
646 VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page);
647
648 mask <<= bitidx;
649 flags <<= bitidx;
650
651 word = READ_ONCE(bitmap[word_bitidx]);
652 for (;;) {
653 old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags);
654 if (word == old_word)
655 break;
656 word = old_word;
657 }
658 }
659
set_pageblock_migratetype(struct page * page,int migratetype)660 void set_pageblock_migratetype(struct page *page, int migratetype)
661 {
662 if (unlikely(page_group_by_mobility_disabled &&
663 migratetype < MIGRATE_PCPTYPES))
664 migratetype = MIGRATE_UNMOVABLE;
665
666 set_pfnblock_flags_mask(page, (unsigned long)migratetype,
667 page_to_pfn(page), MIGRATETYPE_MASK);
668 }
669
670 #ifdef CONFIG_DEBUG_VM
page_outside_zone_boundaries(struct zone * zone,struct page * page)671 static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
672 {
673 int ret = 0;
674 unsigned seq;
675 unsigned long pfn = page_to_pfn(page);
676 unsigned long sp, start_pfn;
677
678 do {
679 seq = zone_span_seqbegin(zone);
680 start_pfn = zone->zone_start_pfn;
681 sp = zone->spanned_pages;
682 if (!zone_spans_pfn(zone, pfn))
683 ret = 1;
684 } while (zone_span_seqretry(zone, seq));
685
686 if (ret)
687 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
688 pfn, zone_to_nid(zone), zone->name,
689 start_pfn, start_pfn + sp);
690
691 return ret;
692 }
693
page_is_consistent(struct zone * zone,struct page * page)694 static int page_is_consistent(struct zone *zone, struct page *page)
695 {
696 if (zone != page_zone(page))
697 return 0;
698
699 return 1;
700 }
701 /*
702 * Temporary debugging check for pages not lying within a given zone.
703 */
bad_range(struct zone * zone,struct page * page)704 static int __maybe_unused bad_range(struct zone *zone, struct page *page)
705 {
706 if (page_outside_zone_boundaries(zone, page))
707 return 1;
708 if (!page_is_consistent(zone, page))
709 return 1;
710
711 return 0;
712 }
713 #else
bad_range(struct zone * zone,struct page * page)714 static inline int __maybe_unused bad_range(struct zone *zone, struct page *page)
715 {
716 return 0;
717 }
718 #endif
719
bad_page(struct page * page,const char * reason)720 static void bad_page(struct page *page, const char *reason)
721 {
722 static unsigned long resume;
723 static unsigned long nr_shown;
724 static unsigned long nr_unshown;
725
726 /*
727 * Allow a burst of 60 reports, then keep quiet for that minute;
728 * or allow a steady drip of one report per second.
729 */
730 if (nr_shown == 60) {
731 if (time_before(jiffies, resume)) {
732 nr_unshown++;
733 goto out;
734 }
735 if (nr_unshown) {
736 pr_alert(
737 "BUG: Bad page state: %lu messages suppressed\n",
738 nr_unshown);
739 nr_unshown = 0;
740 }
741 nr_shown = 0;
742 }
743 if (nr_shown++ == 0)
744 resume = jiffies + 60 * HZ;
745
746 pr_alert("BUG: Bad page state in process %s pfn:%05lx\n",
747 current->comm, page_to_pfn(page));
748 dump_page(page, reason);
749
750 print_modules();
751 dump_stack();
752 out:
753 /* Leave bad fields for debug, except PageBuddy could make trouble */
754 page_mapcount_reset(page); /* remove PageBuddy */
755 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
756 }
757
order_to_pindex(int migratetype,int order)758 static inline unsigned int order_to_pindex(int migratetype, int order)
759 {
760 int base = order;
761
762 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
763 if (order > PAGE_ALLOC_COSTLY_ORDER) {
764 VM_BUG_ON(order != pageblock_order);
765 base = PAGE_ALLOC_COSTLY_ORDER + 1;
766 }
767 #else
768 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
769 #endif
770
771 return (MIGRATE_PCPTYPES * base) + migratetype;
772 }
773
pindex_to_order(unsigned int pindex)774 static inline int pindex_to_order(unsigned int pindex)
775 {
776 int order = pindex / MIGRATE_PCPTYPES;
777
778 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
779 if (order > PAGE_ALLOC_COSTLY_ORDER) {
780 order = pageblock_order;
781 VM_BUG_ON(order != pageblock_order);
782 }
783 #else
784 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
785 #endif
786
787 return order;
788 }
789
pcp_allowed_order(unsigned int order)790 static inline bool pcp_allowed_order(unsigned int order)
791 {
792 if (order <= PAGE_ALLOC_COSTLY_ORDER)
793 return true;
794 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
795 if (order == pageblock_order)
796 return true;
797 #endif
798 return false;
799 }
800
free_the_page(struct page * page,unsigned int order)801 static inline void free_the_page(struct page *page, unsigned int order)
802 {
803 if (pcp_allowed_order(order)) /* Via pcp? */
804 free_unref_page(page, order);
805 else
806 __free_pages_ok(page, order, FPI_NONE);
807 }
808
809 /*
810 * Higher-order pages are called "compound pages". They are structured thusly:
811 *
812 * The first PAGE_SIZE page is called the "head page" and have PG_head set.
813 *
814 * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded
815 * in bit 0 of page->compound_head. The rest of bits is pointer to head page.
816 *
817 * The first tail page's ->compound_dtor holds the offset in array of compound
818 * page destructors. See compound_page_dtors.
819 *
820 * The first tail page's ->compound_order holds the order of allocation.
821 * This usage means that zero-order pages may not be compound.
822 */
823
free_compound_page(struct page * page)824 void free_compound_page(struct page *page)
825 {
826 mem_cgroup_uncharge(page);
827 free_the_page(page, compound_order(page));
828 }
829
prep_compound_page(struct page * page,unsigned int order)830 void prep_compound_page(struct page *page, unsigned int order)
831 {
832 int i;
833 int nr_pages = 1 << order;
834
835 __SetPageHead(page);
836 for (i = 1; i < nr_pages; i++) {
837 struct page *p = page + i;
838 p->mapping = TAIL_MAPPING;
839 set_compound_head(p, page);
840 }
841
842 set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
843 set_compound_order(page, order);
844 atomic_set(compound_mapcount_ptr(page), -1);
845 if (hpage_pincount_available(page))
846 atomic_set(compound_pincount_ptr(page), 0);
847 }
848
849 #ifdef CONFIG_DEBUG_PAGEALLOC
850 unsigned int _debug_guardpage_minorder;
851
852 bool _debug_pagealloc_enabled_early __read_mostly
853 = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT);
854 EXPORT_SYMBOL(_debug_pagealloc_enabled_early);
855 DEFINE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
856 EXPORT_SYMBOL(_debug_pagealloc_enabled);
857
858 DEFINE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
859
early_debug_pagealloc(char * buf)860 static int __init early_debug_pagealloc(char *buf)
861 {
862 return kstrtobool(buf, &_debug_pagealloc_enabled_early);
863 }
864 early_param("debug_pagealloc", early_debug_pagealloc);
865
debug_guardpage_minorder_setup(char * buf)866 static int __init debug_guardpage_minorder_setup(char *buf)
867 {
868 unsigned long res;
869
870 if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) {
871 pr_err("Bad debug_guardpage_minorder value\n");
872 return 0;
873 }
874 _debug_guardpage_minorder = res;
875 pr_info("Setting debug_guardpage_minorder to %lu\n", res);
876 return 0;
877 }
878 early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup);
879
set_page_guard(struct zone * zone,struct page * page,unsigned int order,int migratetype)880 static inline bool set_page_guard(struct zone *zone, struct page *page,
881 unsigned int order, int migratetype)
882 {
883 if (!debug_guardpage_enabled())
884 return false;
885
886 if (order >= debug_guardpage_minorder())
887 return false;
888
889 __SetPageGuard(page);
890 INIT_LIST_HEAD(&page->lru);
891 set_page_private(page, order);
892 /* Guard pages are not available for any usage */
893 __mod_zone_freepage_state(zone, -(1 << order), migratetype);
894
895 return true;
896 }
897
clear_page_guard(struct zone * zone,struct page * page,unsigned int order,int migratetype)898 static inline void clear_page_guard(struct zone *zone, struct page *page,
899 unsigned int order, int migratetype)
900 {
901 if (!debug_guardpage_enabled())
902 return;
903
904 __ClearPageGuard(page);
905
906 set_page_private(page, 0);
907 if (!is_migrate_isolate(migratetype))
908 __mod_zone_freepage_state(zone, (1 << order), migratetype);
909 }
910 #else
set_page_guard(struct zone * zone,struct page * page,unsigned int order,int migratetype)911 static inline bool set_page_guard(struct zone *zone, struct page *page,
912 unsigned int order, int migratetype) { return false; }
clear_page_guard(struct zone * zone,struct page * page,unsigned int order,int migratetype)913 static inline void clear_page_guard(struct zone *zone, struct page *page,
914 unsigned int order, int migratetype) {}
915 #endif
916
917 /*
918 * Enable static keys related to various memory debugging and hardening options.
919 * Some override others, and depend on early params that are evaluated in the
920 * order of appearance. So we need to first gather the full picture of what was
921 * enabled, and then make decisions.
922 */
init_mem_debugging_and_hardening(void)923 void init_mem_debugging_and_hardening(void)
924 {
925 bool page_poisoning_requested = false;
926
927 #ifdef CONFIG_PAGE_POISONING
928 /*
929 * Page poisoning is debug page alloc for some arches. If
930 * either of those options are enabled, enable poisoning.
931 */
932 if (page_poisoning_enabled() ||
933 (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
934 debug_pagealloc_enabled())) {
935 static_branch_enable(&_page_poisoning_enabled);
936 page_poisoning_requested = true;
937 }
938 #endif
939
940 if ((_init_on_alloc_enabled_early || _init_on_free_enabled_early) &&
941 page_poisoning_requested) {
942 pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
943 "will take precedence over init_on_alloc and init_on_free\n");
944 _init_on_alloc_enabled_early = false;
945 _init_on_free_enabled_early = false;
946 }
947
948 if (_init_on_alloc_enabled_early)
949 static_branch_enable(&init_on_alloc);
950 else
951 static_branch_disable(&init_on_alloc);
952
953 if (_init_on_free_enabled_early)
954 static_branch_enable(&init_on_free);
955 else
956 static_branch_disable(&init_on_free);
957
958 #ifdef CONFIG_DEBUG_PAGEALLOC
959 if (!debug_pagealloc_enabled())
960 return;
961
962 static_branch_enable(&_debug_pagealloc_enabled);
963
964 if (!debug_guardpage_minorder())
965 return;
966
967 static_branch_enable(&_debug_guardpage_enabled);
968 #endif
969 }
970
set_buddy_order(struct page * page,unsigned int order)971 static inline void set_buddy_order(struct page *page, unsigned int order)
972 {
973 set_page_private(page, order);
974 __SetPageBuddy(page);
975 }
976
977 /*
978 * This function checks whether a page is free && is the buddy
979 * we can coalesce a page and its buddy if
980 * (a) the buddy is not in a hole (check before calling!) &&
981 * (b) the buddy is in the buddy system &&
982 * (c) a page and its buddy have the same order &&
983 * (d) a page and its buddy are in the same zone.
984 *
985 * For recording whether a page is in the buddy system, we set PageBuddy.
986 * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
987 *
988 * For recording page's order, we use page_private(page).
989 */
page_is_buddy(struct page * page,struct page * buddy,unsigned int order)990 static inline bool page_is_buddy(struct page *page, struct page *buddy,
991 unsigned int order)
992 {
993 if (!page_is_guard(buddy) && !PageBuddy(buddy))
994 return false;
995
996 if (buddy_order(buddy) != order)
997 return false;
998
999 /*
1000 * zone check is done late to avoid uselessly calculating
1001 * zone/node ids for pages that could never merge.
1002 */
1003 if (page_zone_id(page) != page_zone_id(buddy))
1004 return false;
1005
1006 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
1007
1008 return true;
1009 }
1010
1011 #ifdef CONFIG_COMPACTION
task_capc(struct zone * zone)1012 static inline struct capture_control *task_capc(struct zone *zone)
1013 {
1014 struct capture_control *capc = current->capture_control;
1015
1016 return unlikely(capc) &&
1017 !(current->flags & PF_KTHREAD) &&
1018 !capc->page &&
1019 capc->cc->zone == zone ? capc : NULL;
1020 }
1021
1022 static inline bool
compaction_capture(struct capture_control * capc,struct page * page,int order,int migratetype)1023 compaction_capture(struct capture_control *capc, struct page *page,
1024 int order, int migratetype)
1025 {
1026 if (!capc || order != capc->cc->order)
1027 return false;
1028
1029 /* Do not accidentally pollute CMA or isolated regions*/
1030 if (is_migrate_cma(migratetype) ||
1031 is_migrate_isolate(migratetype))
1032 return false;
1033
1034 /*
1035 * Do not let lower order allocations pollute a movable pageblock.
1036 * This might let an unmovable request use a reclaimable pageblock
1037 * and vice-versa but no more than normal fallback logic which can
1038 * have trouble finding a high-order free page.
1039 */
1040 if (order < pageblock_order && migratetype == MIGRATE_MOVABLE)
1041 return false;
1042
1043 capc->page = page;
1044 return true;
1045 }
1046
1047 #else
task_capc(struct zone * zone)1048 static inline struct capture_control *task_capc(struct zone *zone)
1049 {
1050 return NULL;
1051 }
1052
1053 static inline bool
compaction_capture(struct capture_control * capc,struct page * page,int order,int migratetype)1054 compaction_capture(struct capture_control *capc, struct page *page,
1055 int order, int migratetype)
1056 {
1057 return false;
1058 }
1059 #endif /* CONFIG_COMPACTION */
1060
1061 /* Used for pages not on another list */
add_to_free_list(struct page * page,struct zone * zone,unsigned int order,int migratetype)1062 static inline void add_to_free_list(struct page *page, struct zone *zone,
1063 unsigned int order, int migratetype)
1064 {
1065 struct free_area *area = &zone->free_area[order];
1066
1067 list_add(&page->lru, &area->free_list[migratetype]);
1068 area->nr_free++;
1069 }
1070
1071 /* Used for pages not on another list */
add_to_free_list_tail(struct page * page,struct zone * zone,unsigned int order,int migratetype)1072 static inline void add_to_free_list_tail(struct page *page, struct zone *zone,
1073 unsigned int order, int migratetype)
1074 {
1075 struct free_area *area = &zone->free_area[order];
1076
1077 list_add_tail(&page->lru, &area->free_list[migratetype]);
1078 area->nr_free++;
1079 }
1080
1081 /*
1082 * Used for pages which are on another list. Move the pages to the tail
1083 * of the list - so the moved pages won't immediately be considered for
1084 * allocation again (e.g., optimization for memory onlining).
1085 */
move_to_free_list(struct page * page,struct zone * zone,unsigned int order,int migratetype)1086 static inline void move_to_free_list(struct page *page, struct zone *zone,
1087 unsigned int order, int migratetype)
1088 {
1089 struct free_area *area = &zone->free_area[order];
1090
1091 list_move_tail(&page->lru, &area->free_list[migratetype]);
1092 }
1093
del_page_from_free_list(struct page * page,struct zone * zone,unsigned int order)1094 static inline void del_page_from_free_list(struct page *page, struct zone *zone,
1095 unsigned int order)
1096 {
1097 /* clear reported state and update reported page count */
1098 if (page_reported(page))
1099 __ClearPageReported(page);
1100
1101 list_del(&page->lru);
1102 __ClearPageBuddy(page);
1103 set_page_private(page, 0);
1104 zone->free_area[order].nr_free--;
1105 }
1106
1107 /*
1108 * If this is not the largest possible page, check if the buddy
1109 * of the next-highest order is free. If it is, it's possible
1110 * that pages are being freed that will coalesce soon. In case,
1111 * that is happening, add the free page to the tail of the list
1112 * so it's less likely to be used soon and more likely to be merged
1113 * as a higher order page
1114 */
1115 static inline bool
buddy_merge_likely(unsigned long pfn,unsigned long buddy_pfn,struct page * page,unsigned int order)1116 buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn,
1117 struct page *page, unsigned int order)
1118 {
1119 struct page *higher_page, *higher_buddy;
1120 unsigned long combined_pfn;
1121
1122 if (order >= MAX_ORDER - 2)
1123 return false;
1124
1125 combined_pfn = buddy_pfn & pfn;
1126 higher_page = page + (combined_pfn - pfn);
1127 buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1);
1128 higher_buddy = higher_page + (buddy_pfn - combined_pfn);
1129
1130 return page_is_buddy(higher_page, higher_buddy, order + 1);
1131 }
1132
1133 /*
1134 * Freeing function for a buddy system allocator.
1135 *
1136 * The concept of a buddy system is to maintain direct-mapped table
1137 * (containing bit values) for memory blocks of various "orders".
1138 * The bottom level table contains the map for the smallest allocatable
1139 * units of memory (here, pages), and each level above it describes
1140 * pairs of units from the levels below, hence, "buddies".
1141 * At a high level, all that happens here is marking the table entry
1142 * at the bottom level available, and propagating the changes upward
1143 * as necessary, plus some accounting needed to play nicely with other
1144 * parts of the VM system.
1145 * At each level, we keep a list of pages, which are heads of continuous
1146 * free pages of length of (1 << order) and marked with PageBuddy.
1147 * Page's order is recorded in page_private(page) field.
1148 * So when we are allocating or freeing one, we can derive the state of the
1149 * other. That is, if we allocate a small block, and both were
1150 * free, the remainder of the region must be split into blocks.
1151 * If a block is freed, and its buddy is also free, then this
1152 * triggers coalescing into a block of larger size.
1153 *
1154 * -- nyc
1155 */
1156
__free_one_page(struct page * page,unsigned long pfn,struct zone * zone,unsigned int order,int migratetype,fpi_t fpi_flags)1157 static inline void __free_one_page(struct page *page,
1158 unsigned long pfn,
1159 struct zone *zone, unsigned int order,
1160 int migratetype, fpi_t fpi_flags)
1161 {
1162 struct capture_control *capc = task_capc(zone);
1163 unsigned long buddy_pfn;
1164 unsigned long combined_pfn;
1165 unsigned int max_order;
1166 struct page *buddy;
1167 bool to_tail;
1168 bool bypass = false;
1169
1170 trace_android_vh_free_one_page_bypass(page, zone, order,
1171 migratetype, (int)fpi_flags, &bypass);
1172
1173 if (bypass)
1174 return;
1175
1176 max_order = min_t(unsigned int, MAX_ORDER - 1, pageblock_order);
1177
1178 VM_BUG_ON(!zone_is_initialized(zone));
1179 VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
1180
1181 VM_BUG_ON(migratetype == -1);
1182 if (likely(!is_migrate_isolate(migratetype)))
1183 __mod_zone_freepage_state(zone, 1 << order, migratetype);
1184
1185 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page);
1186 VM_BUG_ON_PAGE(bad_range(zone, page), page);
1187
1188 continue_merging:
1189 while (order < max_order) {
1190 if (compaction_capture(capc, page, order, migratetype)) {
1191 __mod_zone_freepage_state(zone, -(1 << order),
1192 migratetype);
1193 return;
1194 }
1195 buddy_pfn = __find_buddy_pfn(pfn, order);
1196 buddy = page + (buddy_pfn - pfn);
1197
1198 if (!page_is_buddy(page, buddy, order))
1199 goto done_merging;
1200 /*
1201 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
1202 * merge with it and move up one order.
1203 */
1204 if (page_is_guard(buddy))
1205 clear_page_guard(zone, buddy, order, migratetype);
1206 else
1207 del_page_from_free_list(buddy, zone, order);
1208 combined_pfn = buddy_pfn & pfn;
1209 page = page + (combined_pfn - pfn);
1210 pfn = combined_pfn;
1211 order++;
1212 }
1213 if (order < MAX_ORDER - 1) {
1214 /* If we are here, it means order is >= pageblock_order.
1215 * We want to prevent merge between freepages on isolate
1216 * pageblock and normal pageblock. Without this, pageblock
1217 * isolation could cause incorrect freepage or CMA accounting.
1218 *
1219 * We don't want to hit this code for the more frequent
1220 * low-order merging.
1221 */
1222 if (unlikely(has_isolate_pageblock(zone))) {
1223 int buddy_mt;
1224
1225 buddy_pfn = __find_buddy_pfn(pfn, order);
1226 buddy = page + (buddy_pfn - pfn);
1227 buddy_mt = get_pageblock_migratetype(buddy);
1228
1229 if (migratetype != buddy_mt
1230 && (is_migrate_isolate(migratetype) ||
1231 is_migrate_isolate(buddy_mt)))
1232 goto done_merging;
1233 }
1234 max_order = order + 1;
1235 goto continue_merging;
1236 }
1237
1238 done_merging:
1239 set_buddy_order(page, order);
1240
1241 if (fpi_flags & FPI_TO_TAIL)
1242 to_tail = true;
1243 else if (is_shuffle_order(order))
1244 to_tail = shuffle_pick_tail();
1245 else
1246 to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order);
1247
1248 if (to_tail)
1249 add_to_free_list_tail(page, zone, order, migratetype);
1250 else
1251 add_to_free_list(page, zone, order, migratetype);
1252
1253 /* Notify page reporting subsystem of freed page */
1254 if (!(fpi_flags & FPI_SKIP_REPORT_NOTIFY))
1255 page_reporting_notify_free(order);
1256 }
1257
1258 /*
1259 * A bad page could be due to a number of fields. Instead of multiple branches,
1260 * try and check multiple fields with one check. The caller must do a detailed
1261 * check if necessary.
1262 */
page_expected_state(struct page * page,unsigned long check_flags)1263 static inline bool page_expected_state(struct page *page,
1264 unsigned long check_flags)
1265 {
1266 if (unlikely(atomic_read(&page->_mapcount) != -1))
1267 return false;
1268
1269 if (unlikely((unsigned long)page->mapping |
1270 page_ref_count(page) |
1271 #ifdef CONFIG_MEMCG
1272 page->memcg_data |
1273 #endif
1274 (page->flags & check_flags)))
1275 return false;
1276
1277 return true;
1278 }
1279
page_bad_reason(struct page * page,unsigned long flags)1280 static const char *page_bad_reason(struct page *page, unsigned long flags)
1281 {
1282 const char *bad_reason = NULL;
1283
1284 if (unlikely(atomic_read(&page->_mapcount) != -1))
1285 bad_reason = "nonzero mapcount";
1286 if (unlikely(page->mapping != NULL))
1287 bad_reason = "non-NULL mapping";
1288 if (unlikely(page_ref_count(page) != 0))
1289 bad_reason = "nonzero _refcount";
1290 if (unlikely(page->flags & flags)) {
1291 if (flags == PAGE_FLAGS_CHECK_AT_PREP)
1292 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag(s) set";
1293 else
1294 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
1295 }
1296 #ifdef CONFIG_MEMCG
1297 if (unlikely(page->memcg_data))
1298 bad_reason = "page still charged to cgroup";
1299 #endif
1300 return bad_reason;
1301 }
1302
check_free_page_bad(struct page * page)1303 static void check_free_page_bad(struct page *page)
1304 {
1305 bad_page(page,
1306 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_FREE));
1307 }
1308
check_free_page(struct page * page)1309 static inline int check_free_page(struct page *page)
1310 {
1311 if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE)))
1312 return 0;
1313
1314 /* Something has gone sideways, find it */
1315 check_free_page_bad(page);
1316 return 1;
1317 }
1318
free_tail_pages_check(struct page * head_page,struct page * page)1319 static int free_tail_pages_check(struct page *head_page, struct page *page)
1320 {
1321 int ret = 1;
1322
1323 /*
1324 * We rely page->lru.next never has bit 0 set, unless the page
1325 * is PageTail(). Let's make sure that's true even for poisoned ->lru.
1326 */
1327 BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1);
1328
1329 if (!IS_ENABLED(CONFIG_DEBUG_VM)) {
1330 ret = 0;
1331 goto out;
1332 }
1333 switch (page - head_page) {
1334 case 1:
1335 /* the first tail page: ->mapping may be compound_mapcount() */
1336 if (unlikely(compound_mapcount(page))) {
1337 bad_page(page, "nonzero compound_mapcount");
1338 goto out;
1339 }
1340 break;
1341 case 2:
1342 /*
1343 * the second tail page: ->mapping is
1344 * deferred_list.next -- ignore value.
1345 */
1346 break;
1347 default:
1348 if (page->mapping != TAIL_MAPPING) {
1349 bad_page(page, "corrupted mapping in tail page");
1350 goto out;
1351 }
1352 break;
1353 }
1354 if (unlikely(!PageTail(page))) {
1355 bad_page(page, "PageTail not set");
1356 goto out;
1357 }
1358 if (unlikely(compound_head(page) != head_page)) {
1359 bad_page(page, "compound_head not consistent");
1360 goto out;
1361 }
1362 ret = 0;
1363 out:
1364 page->mapping = NULL;
1365 clear_compound_head(page);
1366 return ret;
1367 }
1368
1369 /*
1370 * Skip KASAN memory poisoning when either:
1371 *
1372 * 1. Deferred memory initialization has not yet completed,
1373 * see the explanation below.
1374 * 2. Skipping poisoning is requested via FPI_SKIP_KASAN_POISON,
1375 * see the comment next to it.
1376 * 3. Skipping poisoning is requested via __GFP_SKIP_KASAN_POISON,
1377 * see the comment next to it.
1378 *
1379 * Poisoning pages during deferred memory init will greatly lengthen the
1380 * process and cause problem in large memory systems as the deferred pages
1381 * initialization is done with interrupt disabled.
1382 *
1383 * Assuming that there will be no reference to those newly initialized
1384 * pages before they are ever allocated, this should have no effect on
1385 * KASAN memory tracking as the poison will be properly inserted at page
1386 * allocation time. The only corner case is when pages are allocated by
1387 * on-demand allocation and then freed again before the deferred pages
1388 * initialization is done, but this is not likely to happen.
1389 */
should_skip_kasan_poison(struct page * page,fpi_t fpi_flags)1390 static inline bool should_skip_kasan_poison(struct page *page, fpi_t fpi_flags)
1391 {
1392 return deferred_pages_enabled() ||
1393 (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
1394 (fpi_flags & FPI_SKIP_KASAN_POISON)) ||
1395 PageSkipKASanPoison(page);
1396 }
1397
kernel_init_free_pages(struct page * page,int numpages)1398 static void kernel_init_free_pages(struct page *page, int numpages)
1399 {
1400 int i;
1401
1402 /* s390's use of memset() could override KASAN redzones. */
1403 kasan_disable_current();
1404 for (i = 0; i < numpages; i++) {
1405 u8 tag = page_kasan_tag(page + i);
1406 page_kasan_tag_reset(page + i);
1407 clear_highpage(page + i);
1408 page_kasan_tag_set(page + i, tag);
1409 }
1410 kasan_enable_current();
1411 }
1412
free_pages_prepare(struct page * page,unsigned int order,bool check_free,fpi_t fpi_flags)1413 static __always_inline bool free_pages_prepare(struct page *page,
1414 unsigned int order, bool check_free, fpi_t fpi_flags)
1415 {
1416 int bad = 0;
1417 bool skip_kasan_poison = should_skip_kasan_poison(page, fpi_flags);
1418 bool init = want_init_on_free();
1419
1420 VM_BUG_ON_PAGE(PageTail(page), page);
1421
1422 trace_mm_page_free(page, order);
1423
1424 if (unlikely(PageHWPoison(page)) && !order) {
1425 /*
1426 * Do not let hwpoison pages hit pcplists/buddy
1427 * Untie memcg state and reset page's owner
1428 */
1429 if (memcg_kmem_enabled() && PageMemcgKmem(page))
1430 __memcg_kmem_uncharge_page(page, order);
1431 reset_page_owner(page, order);
1432 free_page_pinner(page, order);
1433 return false;
1434 }
1435
1436 /*
1437 * Check tail pages before head page information is cleared to
1438 * avoid checking PageCompound for order-0 pages.
1439 */
1440 if (unlikely(order)) {
1441 bool compound = PageCompound(page);
1442 int i;
1443
1444 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
1445
1446 if (compound) {
1447 ClearPageDoubleMap(page);
1448 ClearPageHasHWPoisoned(page);
1449 }
1450 for (i = 1; i < (1 << order); i++) {
1451 if (compound)
1452 bad += free_tail_pages_check(page, page + i);
1453 if (unlikely(check_free_page(page + i))) {
1454 bad++;
1455 continue;
1456 }
1457 (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1458 }
1459 }
1460 if (PageMappingFlags(page))
1461 page->mapping = NULL;
1462 if (memcg_kmem_enabled() && PageMemcgKmem(page))
1463 __memcg_kmem_uncharge_page(page, order);
1464 if (check_free)
1465 bad += check_free_page(page);
1466 if (bad)
1467 return false;
1468
1469 page_cpupid_reset_last(page);
1470 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1471 reset_page_owner(page, order);
1472 free_page_pinner(page, order);
1473
1474 if (!PageHighMem(page)) {
1475 debug_check_no_locks_freed(page_address(page),
1476 PAGE_SIZE << order);
1477 debug_check_no_obj_freed(page_address(page),
1478 PAGE_SIZE << order);
1479 }
1480
1481 kernel_poison_pages(page, 1 << order);
1482
1483 /*
1484 * As memory initialization might be integrated into KASAN,
1485 * KASAN poisoning and memory initialization code must be
1486 * kept together to avoid discrepancies in behavior.
1487 *
1488 * With hardware tag-based KASAN, memory tags must be set before the
1489 * page becomes unavailable via debug_pagealloc or arch_free_page.
1490 */
1491 if (!skip_kasan_poison) {
1492 kasan_poison_pages(page, order, init);
1493
1494 /* Memory is already initialized if KASAN did it internally. */
1495 if (kasan_has_integrated_init())
1496 init = false;
1497 }
1498 if (init)
1499 kernel_init_free_pages(page, 1 << order);
1500
1501 /*
1502 * arch_free_page() can make the page's contents inaccessible. s390
1503 * does this. So nothing which can access the page's contents should
1504 * happen after this.
1505 */
1506 arch_free_page(page, order);
1507
1508 debug_pagealloc_unmap_pages(page, 1 << order);
1509
1510 return true;
1511 }
1512
1513 #ifdef CONFIG_DEBUG_VM
1514 /*
1515 * With DEBUG_VM enabled, order-0 pages are checked immediately when being freed
1516 * to pcp lists. With debug_pagealloc also enabled, they are also rechecked when
1517 * moved from pcp lists to free lists.
1518 */
free_pcp_prepare(struct page * page,unsigned int order)1519 static bool free_pcp_prepare(struct page *page, unsigned int order)
1520 {
1521 return free_pages_prepare(page, order, true, FPI_NONE);
1522 }
1523
bulkfree_pcp_prepare(struct page * page)1524 static bool bulkfree_pcp_prepare(struct page *page)
1525 {
1526 if (debug_pagealloc_enabled_static())
1527 return check_free_page(page);
1528 else
1529 return false;
1530 }
1531 #else
1532 /*
1533 * With DEBUG_VM disabled, order-0 pages being freed are checked only when
1534 * moving from pcp lists to free list in order to reduce overhead. With
1535 * debug_pagealloc enabled, they are checked also immediately when being freed
1536 * to the pcp lists.
1537 */
free_pcp_prepare(struct page * page,unsigned int order)1538 static bool free_pcp_prepare(struct page *page, unsigned int order)
1539 {
1540 if (debug_pagealloc_enabled_static())
1541 return free_pages_prepare(page, order, true, FPI_NONE);
1542 else
1543 return free_pages_prepare(page, order, false, FPI_NONE);
1544 }
1545
bulkfree_pcp_prepare(struct page * page)1546 static bool bulkfree_pcp_prepare(struct page *page)
1547 {
1548 return check_free_page(page);
1549 }
1550 #endif /* CONFIG_DEBUG_VM */
1551
prefetch_buddy(struct page * page)1552 static inline void prefetch_buddy(struct page *page)
1553 {
1554 unsigned long pfn = page_to_pfn(page);
1555 unsigned long buddy_pfn = __find_buddy_pfn(pfn, 0);
1556 struct page *buddy = page + (buddy_pfn - pfn);
1557
1558 prefetch(buddy);
1559 }
1560
1561 /*
1562 * Frees a number of pages from the PCP lists
1563 * Assumes all pages on list are in same zone, and of same order.
1564 * count is the number of pages to free.
1565 *
1566 * If the zone was previously in an "all pages pinned" state then look to
1567 * see if this freeing clears that state.
1568 *
1569 * And clear the zone's pages_scanned counter, to hold off the "all pages are
1570 * pinned" detection logic.
1571 */
free_pcppages_bulk(struct zone * zone,int count,struct per_cpu_pages * pcp)1572 static void free_pcppages_bulk(struct zone *zone, int count,
1573 struct per_cpu_pages *pcp)
1574 {
1575 int pindex = 0;
1576 int batch_free = 0;
1577 int nr_freed = 0;
1578 unsigned int order;
1579 int prefetch_nr = READ_ONCE(pcp->batch);
1580 bool isolated_pageblocks;
1581 struct page *page, *tmp;
1582 LIST_HEAD(head);
1583
1584 /*
1585 * Ensure proper count is passed which otherwise would stuck in the
1586 * below while (list_empty(list)) loop.
1587 */
1588 count = min(pcp->count, count);
1589 while (count > 0) {
1590 struct list_head *list;
1591
1592 /*
1593 * Remove pages from lists in a round-robin fashion. A
1594 * batch_free count is maintained that is incremented when an
1595 * empty list is encountered. This is so more pages are freed
1596 * off fuller lists instead of spinning excessively around empty
1597 * lists
1598 */
1599 do {
1600 batch_free++;
1601 if (++pindex == NR_PCP_LISTS)
1602 pindex = 0;
1603 list = &pcp->lists[pindex];
1604 } while (list_empty(list));
1605
1606 /* This is the only non-empty list. Free them all. */
1607 if (batch_free == NR_PCP_LISTS)
1608 batch_free = count;
1609
1610 order = pindex_to_order(pindex);
1611 BUILD_BUG_ON(MAX_ORDER >= (1<<NR_PCP_ORDER_WIDTH));
1612 do {
1613 page = list_last_entry(list, struct page, lru);
1614 /* must delete to avoid corrupting pcp list */
1615 list_del(&page->lru);
1616 nr_freed += 1 << order;
1617 count -= 1 << order;
1618
1619 if (bulkfree_pcp_prepare(page))
1620 continue;
1621
1622 /* Encode order with the migratetype */
1623 page->index <<= NR_PCP_ORDER_WIDTH;
1624 page->index |= order;
1625
1626 list_add_tail(&page->lru, &head);
1627
1628 /*
1629 * We are going to put the page back to the global
1630 * pool, prefetch its buddy to speed up later access
1631 * under zone->lock. It is believed the overhead of
1632 * an additional test and calculating buddy_pfn here
1633 * can be offset by reduced memory latency later. To
1634 * avoid excessive prefetching due to large count, only
1635 * prefetch buddy for the first pcp->batch nr of pages.
1636 */
1637 if (prefetch_nr) {
1638 prefetch_buddy(page);
1639 prefetch_nr--;
1640 }
1641 } while (count > 0 && --batch_free && !list_empty(list));
1642 }
1643 pcp->count -= nr_freed;
1644
1645 /* Caller must hold IRQ-safe pcp->lock so IRQs are disabled. */
1646 spin_lock(&zone->lock);
1647 isolated_pageblocks = has_isolate_pageblock(zone);
1648
1649 /*
1650 * Use safe version since after __free_one_page(),
1651 * page->lru.next will not point to original list.
1652 */
1653 list_for_each_entry_safe(page, tmp, &head, lru) {
1654 int mt = get_pcppage_migratetype(page);
1655
1656 /* mt has been encoded with the order (see above) */
1657 order = mt & NR_PCP_ORDER_MASK;
1658 mt >>= NR_PCP_ORDER_WIDTH;
1659
1660 /* MIGRATE_ISOLATE page should not go to pcplists */
1661 VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
1662 /* Pageblock could have been isolated meanwhile */
1663 if (unlikely(isolated_pageblocks))
1664 mt = get_pageblock_migratetype(page);
1665
1666 __free_one_page(page, page_to_pfn(page), zone, order, mt, FPI_NONE);
1667 trace_mm_page_pcpu_drain(page, order, mt);
1668 }
1669 spin_unlock(&zone->lock);
1670 }
1671
free_one_page(struct zone * zone,struct page * page,unsigned long pfn,unsigned int order,int migratetype,fpi_t fpi_flags)1672 static void free_one_page(struct zone *zone,
1673 struct page *page, unsigned long pfn,
1674 unsigned int order,
1675 int migratetype, fpi_t fpi_flags)
1676 {
1677 unsigned long flags;
1678
1679 spin_lock_irqsave(&zone->lock, flags);
1680 if (unlikely(has_isolate_pageblock(zone) ||
1681 is_migrate_isolate(migratetype))) {
1682 migratetype = get_pfnblock_migratetype(page, pfn);
1683 }
1684 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags);
1685 spin_unlock_irqrestore(&zone->lock, flags);
1686 }
1687
__init_single_page(struct page * page,unsigned long pfn,unsigned long zone,int nid)1688 static void __meminit __init_single_page(struct page *page, unsigned long pfn,
1689 unsigned long zone, int nid)
1690 {
1691 mm_zero_struct_page(page);
1692 set_page_links(page, zone, nid, pfn);
1693 init_page_count(page);
1694 page_mapcount_reset(page);
1695 page_cpupid_reset_last(page);
1696 page_kasan_tag_reset(page);
1697
1698 INIT_LIST_HEAD(&page->lru);
1699 #ifdef WANT_PAGE_VIRTUAL
1700 /* The shift won't overflow because ZONE_NORMAL is below 4G. */
1701 if (!is_highmem_idx(zone))
1702 set_page_address(page, __va(pfn << PAGE_SHIFT));
1703 #endif
1704 }
1705
1706 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
init_reserved_page(unsigned long pfn)1707 static void __meminit init_reserved_page(unsigned long pfn)
1708 {
1709 pg_data_t *pgdat;
1710 int nid, zid;
1711
1712 if (!early_page_uninitialised(pfn))
1713 return;
1714
1715 nid = early_pfn_to_nid(pfn);
1716 pgdat = NODE_DATA(nid);
1717
1718 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1719 struct zone *zone = &pgdat->node_zones[zid];
1720
1721 if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone))
1722 break;
1723 }
1724 __init_single_page(pfn_to_page(pfn), pfn, zid, nid);
1725 }
1726 #else
init_reserved_page(unsigned long pfn)1727 static inline void init_reserved_page(unsigned long pfn)
1728 {
1729 }
1730 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1731
1732 /*
1733 * Initialised pages do not have PageReserved set. This function is
1734 * called for each range allocated by the bootmem allocator and
1735 * marks the pages PageReserved. The remaining valid pages are later
1736 * sent to the buddy page allocator.
1737 */
reserve_bootmem_region(phys_addr_t start,phys_addr_t end)1738 void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
1739 {
1740 unsigned long start_pfn = PFN_DOWN(start);
1741 unsigned long end_pfn = PFN_UP(end);
1742
1743 for (; start_pfn < end_pfn; start_pfn++) {
1744 if (pfn_valid(start_pfn)) {
1745 struct page *page = pfn_to_page(start_pfn);
1746
1747 init_reserved_page(start_pfn);
1748
1749 /* Avoid false-positive PageTail() */
1750 INIT_LIST_HEAD(&page->lru);
1751
1752 /*
1753 * no need for atomic set_bit because the struct
1754 * page is not visible yet so nobody should
1755 * access it yet.
1756 */
1757 __SetPageReserved(page);
1758 }
1759 }
1760 }
1761
__free_pages_ok(struct page * page,unsigned int order,fpi_t fpi_flags)1762 static void __free_pages_ok(struct page *page, unsigned int order,
1763 fpi_t fpi_flags)
1764 {
1765 unsigned long flags;
1766 int migratetype;
1767 unsigned long pfn = page_to_pfn(page);
1768 struct zone *zone = page_zone(page);
1769 bool skip_free_unref_page = false;
1770
1771 if (!free_pages_prepare(page, order, true, fpi_flags))
1772 return;
1773
1774 migratetype = get_pfnblock_migratetype(page, pfn);
1775 trace_android_vh_free_unref_page_bypass(page, order, migratetype, &skip_free_unref_page);
1776 if (skip_free_unref_page)
1777 return;
1778
1779 spin_lock_irqsave(&zone->lock, flags);
1780 if (unlikely(has_isolate_pageblock(zone) ||
1781 is_migrate_isolate(migratetype))) {
1782 migratetype = get_pfnblock_migratetype(page, pfn);
1783 }
1784 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags);
1785 spin_unlock_irqrestore(&zone->lock, flags);
1786
1787 __count_vm_events(PGFREE, 1 << order);
1788 }
1789
__free_pages_core(struct page * page,unsigned int order)1790 void __free_pages_core(struct page *page, unsigned int order)
1791 {
1792 unsigned int nr_pages = 1 << order;
1793 struct page *p = page;
1794 unsigned int loop;
1795
1796 /*
1797 * When initializing the memmap, __init_single_page() sets the refcount
1798 * of all pages to 1 ("allocated"/"not free"). We have to set the
1799 * refcount of all involved pages to 0.
1800 */
1801 prefetchw(p);
1802 for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
1803 prefetchw(p + 1);
1804 __ClearPageReserved(p);
1805 set_page_count(p, 0);
1806 }
1807 __ClearPageReserved(p);
1808 set_page_count(p, 0);
1809
1810 atomic_long_add(nr_pages, &page_zone(page)->managed_pages);
1811
1812 /*
1813 * Bypass PCP and place fresh pages right to the tail, primarily
1814 * relevant for memory onlining.
1815 */
1816 __free_pages_ok(page, order, FPI_TO_TAIL | FPI_SKIP_KASAN_POISON);
1817 }
1818
1819 #ifdef CONFIG_NUMA
1820
1821 /*
1822 * During memory init memblocks map pfns to nids. The search is expensive and
1823 * this caches recent lookups. The implementation of __early_pfn_to_nid
1824 * treats start/end as pfns.
1825 */
1826 struct mminit_pfnnid_cache {
1827 unsigned long last_start;
1828 unsigned long last_end;
1829 int last_nid;
1830 };
1831
1832 static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
1833
1834 /*
1835 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
1836 */
__early_pfn_to_nid(unsigned long pfn,struct mminit_pfnnid_cache * state)1837 static int __meminit __early_pfn_to_nid(unsigned long pfn,
1838 struct mminit_pfnnid_cache *state)
1839 {
1840 unsigned long start_pfn, end_pfn;
1841 int nid;
1842
1843 if (state->last_start <= pfn && pfn < state->last_end)
1844 return state->last_nid;
1845
1846 nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
1847 if (nid != NUMA_NO_NODE) {
1848 state->last_start = start_pfn;
1849 state->last_end = end_pfn;
1850 state->last_nid = nid;
1851 }
1852
1853 return nid;
1854 }
1855
early_pfn_to_nid(unsigned long pfn)1856 int __meminit early_pfn_to_nid(unsigned long pfn)
1857 {
1858 static DEFINE_SPINLOCK(early_pfn_lock);
1859 int nid;
1860
1861 spin_lock(&early_pfn_lock);
1862 nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
1863 if (nid < 0)
1864 nid = first_online_node;
1865 spin_unlock(&early_pfn_lock);
1866
1867 return nid;
1868 }
1869 #endif /* CONFIG_NUMA */
1870
memblock_free_pages(struct page * page,unsigned long pfn,unsigned int order)1871 void __init memblock_free_pages(struct page *page, unsigned long pfn,
1872 unsigned int order)
1873 {
1874 if (early_page_uninitialised(pfn))
1875 return;
1876 __free_pages_core(page, order);
1877 }
1878
1879 /*
1880 * Check that the whole (or subset of) a pageblock given by the interval of
1881 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
1882 * with the migration of free compaction scanner.
1883 *
1884 * Return struct page pointer of start_pfn, or NULL if checks were not passed.
1885 *
1886 * It's possible on some configurations to have a setup like node0 node1 node0
1887 * i.e. it's possible that all pages within a zones range of pages do not
1888 * belong to a single zone. We assume that a border between node0 and node1
1889 * can occur within a single pageblock, but not a node0 node1 node0
1890 * interleaving within a single pageblock. It is therefore sufficient to check
1891 * the first and last page of a pageblock and avoid checking each individual
1892 * page in a pageblock.
1893 */
__pageblock_pfn_to_page(unsigned long start_pfn,unsigned long end_pfn,struct zone * zone)1894 struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
1895 unsigned long end_pfn, struct zone *zone)
1896 {
1897 struct page *start_page;
1898 struct page *end_page;
1899
1900 /* end_pfn is one past the range we are checking */
1901 end_pfn--;
1902
1903 if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn))
1904 return NULL;
1905
1906 start_page = pfn_to_online_page(start_pfn);
1907 if (!start_page)
1908 return NULL;
1909
1910 if (page_zone(start_page) != zone)
1911 return NULL;
1912
1913 end_page = pfn_to_page(end_pfn);
1914
1915 /* This gives a shorter code than deriving page_zone(end_page) */
1916 if (page_zone_id(start_page) != page_zone_id(end_page))
1917 return NULL;
1918
1919 return start_page;
1920 }
1921
set_zone_contiguous(struct zone * zone)1922 void set_zone_contiguous(struct zone *zone)
1923 {
1924 unsigned long block_start_pfn = zone->zone_start_pfn;
1925 unsigned long block_end_pfn;
1926
1927 block_end_pfn = ALIGN(block_start_pfn + 1, pageblock_nr_pages);
1928 for (; block_start_pfn < zone_end_pfn(zone);
1929 block_start_pfn = block_end_pfn,
1930 block_end_pfn += pageblock_nr_pages) {
1931
1932 block_end_pfn = min(block_end_pfn, zone_end_pfn(zone));
1933
1934 if (!__pageblock_pfn_to_page(block_start_pfn,
1935 block_end_pfn, zone))
1936 return;
1937 cond_resched();
1938 }
1939
1940 /* We confirm that there is no hole */
1941 zone->contiguous = true;
1942 }
1943
clear_zone_contiguous(struct zone * zone)1944 void clear_zone_contiguous(struct zone *zone)
1945 {
1946 zone->contiguous = false;
1947 }
1948
1949 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
deferred_free_range(unsigned long pfn,unsigned long nr_pages)1950 static void __init deferred_free_range(unsigned long pfn,
1951 unsigned long nr_pages)
1952 {
1953 struct page *page;
1954 unsigned long i;
1955
1956 if (!nr_pages)
1957 return;
1958
1959 page = pfn_to_page(pfn);
1960
1961 /* Free a large naturally-aligned chunk if possible */
1962 if (nr_pages == pageblock_nr_pages &&
1963 (pfn & (pageblock_nr_pages - 1)) == 0) {
1964 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1965 __free_pages_core(page, pageblock_order);
1966 return;
1967 }
1968
1969 for (i = 0; i < nr_pages; i++, page++, pfn++) {
1970 if ((pfn & (pageblock_nr_pages - 1)) == 0)
1971 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1972 __free_pages_core(page, 0);
1973 }
1974 }
1975
1976 /* Completion tracking for deferred_init_memmap() threads */
1977 static atomic_t pgdat_init_n_undone __initdata;
1978 static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
1979
pgdat_init_report_one_done(void)1980 static inline void __init pgdat_init_report_one_done(void)
1981 {
1982 if (atomic_dec_and_test(&pgdat_init_n_undone))
1983 complete(&pgdat_init_all_done_comp);
1984 }
1985
1986 /*
1987 * Returns true if page needs to be initialized or freed to buddy allocator.
1988 *
1989 * First we check if pfn is valid on architectures where it is possible to have
1990 * holes within pageblock_nr_pages. On systems where it is not possible, this
1991 * function is optimized out.
1992 *
1993 * Then, we check if a current large page is valid by only checking the validity
1994 * of the head pfn.
1995 */
deferred_pfn_valid(unsigned long pfn)1996 static inline bool __init deferred_pfn_valid(unsigned long pfn)
1997 {
1998 if (!(pfn & (pageblock_nr_pages - 1)) && !pfn_valid(pfn))
1999 return false;
2000 return true;
2001 }
2002
2003 /*
2004 * Free pages to buddy allocator. Try to free aligned pages in
2005 * pageblock_nr_pages sizes.
2006 */
deferred_free_pages(unsigned long pfn,unsigned long end_pfn)2007 static void __init deferred_free_pages(unsigned long pfn,
2008 unsigned long end_pfn)
2009 {
2010 unsigned long nr_pgmask = pageblock_nr_pages - 1;
2011 unsigned long nr_free = 0;
2012
2013 for (; pfn < end_pfn; pfn++) {
2014 if (!deferred_pfn_valid(pfn)) {
2015 deferred_free_range(pfn - nr_free, nr_free);
2016 nr_free = 0;
2017 } else if (!(pfn & nr_pgmask)) {
2018 deferred_free_range(pfn - nr_free, nr_free);
2019 nr_free = 1;
2020 } else {
2021 nr_free++;
2022 }
2023 }
2024 /* Free the last block of pages to allocator */
2025 deferred_free_range(pfn - nr_free, nr_free);
2026 }
2027
2028 /*
2029 * Initialize struct pages. We minimize pfn page lookups and scheduler checks
2030 * by performing it only once every pageblock_nr_pages.
2031 * Return number of pages initialized.
2032 */
deferred_init_pages(struct zone * zone,unsigned long pfn,unsigned long end_pfn)2033 static unsigned long __init deferred_init_pages(struct zone *zone,
2034 unsigned long pfn,
2035 unsigned long end_pfn)
2036 {
2037 unsigned long nr_pgmask = pageblock_nr_pages - 1;
2038 int nid = zone_to_nid(zone);
2039 unsigned long nr_pages = 0;
2040 int zid = zone_idx(zone);
2041 struct page *page = NULL;
2042
2043 for (; pfn < end_pfn; pfn++) {
2044 if (!deferred_pfn_valid(pfn)) {
2045 page = NULL;
2046 continue;
2047 } else if (!page || !(pfn & nr_pgmask)) {
2048 page = pfn_to_page(pfn);
2049 } else {
2050 page++;
2051 }
2052 __init_single_page(page, pfn, zid, nid);
2053 nr_pages++;
2054 }
2055 return (nr_pages);
2056 }
2057
2058 /*
2059 * This function is meant to pre-load the iterator for the zone init.
2060 * Specifically it walks through the ranges until we are caught up to the
2061 * first_init_pfn value and exits there. If we never encounter the value we
2062 * return false indicating there are no valid ranges left.
2063 */
2064 static bool __init
deferred_init_mem_pfn_range_in_zone(u64 * i,struct zone * zone,unsigned long * spfn,unsigned long * epfn,unsigned long first_init_pfn)2065 deferred_init_mem_pfn_range_in_zone(u64 *i, struct zone *zone,
2066 unsigned long *spfn, unsigned long *epfn,
2067 unsigned long first_init_pfn)
2068 {
2069 u64 j;
2070
2071 /*
2072 * Start out by walking through the ranges in this zone that have
2073 * already been initialized. We don't need to do anything with them
2074 * so we just need to flush them out of the system.
2075 */
2076 for_each_free_mem_pfn_range_in_zone(j, zone, spfn, epfn) {
2077 if (*epfn <= first_init_pfn)
2078 continue;
2079 if (*spfn < first_init_pfn)
2080 *spfn = first_init_pfn;
2081 *i = j;
2082 return true;
2083 }
2084
2085 return false;
2086 }
2087
2088 /*
2089 * Initialize and free pages. We do it in two loops: first we initialize
2090 * struct page, then free to buddy allocator, because while we are
2091 * freeing pages we can access pages that are ahead (computing buddy
2092 * page in __free_one_page()).
2093 *
2094 * In order to try and keep some memory in the cache we have the loop
2095 * broken along max page order boundaries. This way we will not cause
2096 * any issues with the buddy page computation.
2097 */
2098 static unsigned long __init
deferred_init_maxorder(u64 * i,struct zone * zone,unsigned long * start_pfn,unsigned long * end_pfn)2099 deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn,
2100 unsigned long *end_pfn)
2101 {
2102 unsigned long mo_pfn = ALIGN(*start_pfn + 1, MAX_ORDER_NR_PAGES);
2103 unsigned long spfn = *start_pfn, epfn = *end_pfn;
2104 unsigned long nr_pages = 0;
2105 u64 j = *i;
2106
2107 /* First we loop through and initialize the page values */
2108 for_each_free_mem_pfn_range_in_zone_from(j, zone, start_pfn, end_pfn) {
2109 unsigned long t;
2110
2111 if (mo_pfn <= *start_pfn)
2112 break;
2113
2114 t = min(mo_pfn, *end_pfn);
2115 nr_pages += deferred_init_pages(zone, *start_pfn, t);
2116
2117 if (mo_pfn < *end_pfn) {
2118 *start_pfn = mo_pfn;
2119 break;
2120 }
2121 }
2122
2123 /* Reset values and now loop through freeing pages as needed */
2124 swap(j, *i);
2125
2126 for_each_free_mem_pfn_range_in_zone_from(j, zone, &spfn, &epfn) {
2127 unsigned long t;
2128
2129 if (mo_pfn <= spfn)
2130 break;
2131
2132 t = min(mo_pfn, epfn);
2133 deferred_free_pages(spfn, t);
2134
2135 if (mo_pfn <= epfn)
2136 break;
2137 }
2138
2139 return nr_pages;
2140 }
2141
2142 static void __init
deferred_init_memmap_chunk(unsigned long start_pfn,unsigned long end_pfn,void * arg)2143 deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn,
2144 void *arg)
2145 {
2146 unsigned long spfn, epfn;
2147 struct zone *zone = arg;
2148 u64 i;
2149
2150 deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, start_pfn);
2151
2152 /*
2153 * Initialize and free pages in MAX_ORDER sized increments so that we
2154 * can avoid introducing any issues with the buddy allocator.
2155 */
2156 while (spfn < end_pfn) {
2157 deferred_init_maxorder(&i, zone, &spfn, &epfn);
2158 cond_resched();
2159 }
2160 }
2161
2162 /* An arch may override for more concurrency. */
2163 __weak int __init
deferred_page_init_max_threads(const struct cpumask * node_cpumask)2164 deferred_page_init_max_threads(const struct cpumask *node_cpumask)
2165 {
2166 return 1;
2167 }
2168
2169 /* Initialise remaining memory on a node */
deferred_init_memmap(void * data)2170 static int __init deferred_init_memmap(void *data)
2171 {
2172 pg_data_t *pgdat = data;
2173 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
2174 unsigned long spfn = 0, epfn = 0;
2175 unsigned long first_init_pfn, flags;
2176 unsigned long start = jiffies;
2177 struct zone *zone;
2178 int zid, max_threads;
2179 u64 i;
2180
2181 /* Bind memory initialisation thread to a local node if possible */
2182 if (!cpumask_empty(cpumask))
2183 set_cpus_allowed_ptr(current, cpumask);
2184
2185 pgdat_resize_lock(pgdat, &flags);
2186 first_init_pfn = pgdat->first_deferred_pfn;
2187 if (first_init_pfn == ULONG_MAX) {
2188 pgdat_resize_unlock(pgdat, &flags);
2189 pgdat_init_report_one_done();
2190 return 0;
2191 }
2192
2193 /* Sanity check boundaries */
2194 BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn);
2195 BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat));
2196 pgdat->first_deferred_pfn = ULONG_MAX;
2197
2198 /*
2199 * Once we unlock here, the zone cannot be grown anymore, thus if an
2200 * interrupt thread must allocate this early in boot, zone must be
2201 * pre-grown prior to start of deferred page initialization.
2202 */
2203 pgdat_resize_unlock(pgdat, &flags);
2204
2205 /* Only the highest zone is deferred so find it */
2206 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
2207 zone = pgdat->node_zones + zid;
2208 if (first_init_pfn < zone_end_pfn(zone))
2209 break;
2210 }
2211
2212 /* If the zone is empty somebody else may have cleared out the zone */
2213 if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
2214 first_init_pfn))
2215 goto zone_empty;
2216
2217 max_threads = deferred_page_init_max_threads(cpumask);
2218
2219 while (spfn < epfn) {
2220 unsigned long epfn_align = ALIGN(epfn, PAGES_PER_SECTION);
2221 struct padata_mt_job job = {
2222 .thread_fn = deferred_init_memmap_chunk,
2223 .fn_arg = zone,
2224 .start = spfn,
2225 .size = epfn_align - spfn,
2226 .align = PAGES_PER_SECTION,
2227 .min_chunk = PAGES_PER_SECTION,
2228 .max_threads = max_threads,
2229 };
2230
2231 padata_do_multithreaded(&job);
2232 deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
2233 epfn_align);
2234 }
2235 zone_empty:
2236 /* Sanity check that the next zone really is unpopulated */
2237 WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));
2238
2239 pr_info("node %d deferred pages initialised in %ums\n",
2240 pgdat->node_id, jiffies_to_msecs(jiffies - start));
2241
2242 pgdat_init_report_one_done();
2243 return 0;
2244 }
2245
2246 /*
2247 * If this zone has deferred pages, try to grow it by initializing enough
2248 * deferred pages to satisfy the allocation specified by order, rounded up to
2249 * the nearest PAGES_PER_SECTION boundary. So we're adding memory in increments
2250 * of SECTION_SIZE bytes by initializing struct pages in increments of
2251 * PAGES_PER_SECTION * sizeof(struct page) bytes.
2252 *
2253 * Return true when zone was grown, otherwise return false. We return true even
2254 * when we grow less than requested, to let the caller decide if there are
2255 * enough pages to satisfy the allocation.
2256 *
2257 * Note: We use noinline because this function is needed only during boot, and
2258 * it is called from a __ref function _deferred_grow_zone. This way we are
2259 * making sure that it is not inlined into permanent text section.
2260 */
2261 static noinline bool __init
deferred_grow_zone(struct zone * zone,unsigned int order)2262 deferred_grow_zone(struct zone *zone, unsigned int order)
2263 {
2264 unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION);
2265 pg_data_t *pgdat = zone->zone_pgdat;
2266 unsigned long first_deferred_pfn = pgdat->first_deferred_pfn;
2267 unsigned long spfn, epfn, flags;
2268 unsigned long nr_pages = 0;
2269 u64 i;
2270
2271 /* Only the last zone may have deferred pages */
2272 if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat))
2273 return false;
2274
2275 pgdat_resize_lock(pgdat, &flags);
2276
2277 /*
2278 * If someone grew this zone while we were waiting for spinlock, return
2279 * true, as there might be enough pages already.
2280 */
2281 if (first_deferred_pfn != pgdat->first_deferred_pfn) {
2282 pgdat_resize_unlock(pgdat, &flags);
2283 return true;
2284 }
2285
2286 /* If the zone is empty somebody else may have cleared out the zone */
2287 if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
2288 first_deferred_pfn)) {
2289 pgdat->first_deferred_pfn = ULONG_MAX;
2290 pgdat_resize_unlock(pgdat, &flags);
2291 /* Retry only once. */
2292 return first_deferred_pfn != ULONG_MAX;
2293 }
2294
2295 /*
2296 * Initialize and free pages in MAX_ORDER sized increments so
2297 * that we can avoid introducing any issues with the buddy
2298 * allocator.
2299 */
2300 while (spfn < epfn) {
2301 /* update our first deferred PFN for this section */
2302 first_deferred_pfn = spfn;
2303
2304 nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn);
2305 touch_nmi_watchdog();
2306
2307 /* We should only stop along section boundaries */
2308 if ((first_deferred_pfn ^ spfn) < PAGES_PER_SECTION)
2309 continue;
2310
2311 /* If our quota has been met we can stop here */
2312 if (nr_pages >= nr_pages_needed)
2313 break;
2314 }
2315
2316 pgdat->first_deferred_pfn = spfn;
2317 pgdat_resize_unlock(pgdat, &flags);
2318
2319 return nr_pages > 0;
2320 }
2321
2322 /*
2323 * deferred_grow_zone() is __init, but it is called from
2324 * get_page_from_freelist() during early boot until deferred_pages permanently
2325 * disables this call. This is why we have refdata wrapper to avoid warning,
2326 * and to ensure that the function body gets unloaded.
2327 */
2328 static bool __ref
_deferred_grow_zone(struct zone * zone,unsigned int order)2329 _deferred_grow_zone(struct zone *zone, unsigned int order)
2330 {
2331 return deferred_grow_zone(zone, order);
2332 }
2333
2334 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
2335
page_alloc_init_late(void)2336 void __init page_alloc_init_late(void)
2337 {
2338 struct zone *zone;
2339 int nid;
2340
2341 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
2342
2343 /* There will be num_node_state(N_MEMORY) threads */
2344 atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
2345 for_each_node_state(nid, N_MEMORY) {
2346 kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
2347 }
2348
2349 /* Block until all are initialised */
2350 wait_for_completion(&pgdat_init_all_done_comp);
2351
2352 /*
2353 * We initialized the rest of the deferred pages. Permanently disable
2354 * on-demand struct page initialization.
2355 */
2356 static_branch_disable(&deferred_pages);
2357
2358 /* Reinit limits that are based on free pages after the kernel is up */
2359 files_maxfiles_init();
2360 #endif
2361
2362 buffer_init();
2363
2364 /* Discard memblock private memory */
2365 memblock_discard();
2366
2367 for_each_node_state(nid, N_MEMORY)
2368 shuffle_free_memory(NODE_DATA(nid));
2369
2370 for_each_populated_zone(zone)
2371 set_zone_contiguous(zone);
2372 }
2373
2374 #ifdef CONFIG_CMA
2375 /* Free whole pageblock and set its migration type to MIGRATE_CMA. */
init_cma_reserved_pageblock(struct page * page)2376 void __init init_cma_reserved_pageblock(struct page *page)
2377 {
2378 unsigned i = pageblock_nr_pages;
2379 struct page *p = page;
2380
2381 do {
2382 __ClearPageReserved(p);
2383 set_page_count(p, 0);
2384 } while (++p, --i);
2385
2386 set_pageblock_migratetype(page, MIGRATE_CMA);
2387
2388 if (pageblock_order >= MAX_ORDER) {
2389 i = pageblock_nr_pages;
2390 p = page;
2391 do {
2392 set_page_refcounted(p);
2393 __free_pages(p, MAX_ORDER - 1);
2394 p += MAX_ORDER_NR_PAGES;
2395 } while (i -= MAX_ORDER_NR_PAGES);
2396 } else {
2397 set_page_refcounted(page);
2398 __free_pages(page, pageblock_order);
2399 }
2400
2401 adjust_managed_page_count(page, pageblock_nr_pages);
2402 page_zone(page)->cma_pages += pageblock_nr_pages;
2403 }
2404 #endif
2405
2406 /*
2407 * The order of subdivision here is critical for the IO subsystem.
2408 * Please do not alter this order without good reasons and regression
2409 * testing. Specifically, as large blocks of memory are subdivided,
2410 * the order in which smaller blocks are delivered depends on the order
2411 * they're subdivided in this function. This is the primary factor
2412 * influencing the order in which pages are delivered to the IO
2413 * subsystem according to empirical testing, and this is also justified
2414 * by considering the behavior of a buddy system containing a single
2415 * large block of memory acted on by a series of small allocations.
2416 * This behavior is a critical factor in sglist merging's success.
2417 *
2418 * -- nyc
2419 */
expand(struct zone * zone,struct page * page,int low,int high,int migratetype)2420 static inline void expand(struct zone *zone, struct page *page,
2421 int low, int high, int migratetype)
2422 {
2423 unsigned long size = 1 << high;
2424
2425 while (high > low) {
2426 high--;
2427 size >>= 1;
2428 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
2429
2430 /*
2431 * Mark as guard pages (or page), that will allow to
2432 * merge back to allocator when buddy will be freed.
2433 * Corresponding page table entries will not be touched,
2434 * pages will stay not present in virtual address space
2435 */
2436 if (set_page_guard(zone, &page[size], high, migratetype))
2437 continue;
2438
2439 add_to_free_list(&page[size], zone, high, migratetype);
2440 set_buddy_order(&page[size], high);
2441 }
2442 }
2443
check_new_page_bad(struct page * page)2444 static void check_new_page_bad(struct page *page)
2445 {
2446 if (unlikely(page->flags & __PG_HWPOISON)) {
2447 /* Don't complain about hwpoisoned pages */
2448 page_mapcount_reset(page); /* remove PageBuddy */
2449 return;
2450 }
2451
2452 bad_page(page,
2453 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_PREP));
2454 }
2455
2456 /*
2457 * This page is about to be returned from the page allocator
2458 */
check_new_page(struct page * page)2459 static inline int check_new_page(struct page *page)
2460 {
2461 if (likely(page_expected_state(page,
2462 PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON)))
2463 return 0;
2464
2465 check_new_page_bad(page);
2466 return 1;
2467 }
2468
2469 #ifdef CONFIG_DEBUG_VM
2470 /*
2471 * With DEBUG_VM enabled, order-0 pages are checked for expected state when
2472 * being allocated from pcp lists. With debug_pagealloc also enabled, they are
2473 * also checked when pcp lists are refilled from the free lists.
2474 */
check_pcp_refill(struct page * page)2475 static inline bool check_pcp_refill(struct page *page)
2476 {
2477 if (debug_pagealloc_enabled_static())
2478 return check_new_page(page);
2479 else
2480 return false;
2481 }
2482
check_new_pcp(struct page * page)2483 static inline bool check_new_pcp(struct page *page)
2484 {
2485 return check_new_page(page);
2486 }
2487 #else
2488 /*
2489 * With DEBUG_VM disabled, free order-0 pages are checked for expected state
2490 * when pcp lists are being refilled from the free lists. With debug_pagealloc
2491 * enabled, they are also checked when being allocated from the pcp lists.
2492 */
check_pcp_refill(struct page * page)2493 static inline bool check_pcp_refill(struct page *page)
2494 {
2495 return check_new_page(page);
2496 }
check_new_pcp(struct page * page)2497 static inline bool check_new_pcp(struct page *page)
2498 {
2499 if (debug_pagealloc_enabled_static())
2500 return check_new_page(page);
2501 else
2502 return false;
2503 }
2504 #endif /* CONFIG_DEBUG_VM */
2505
check_new_pages(struct page * page,unsigned int order)2506 static bool check_new_pages(struct page *page, unsigned int order)
2507 {
2508 int i;
2509 for (i = 0; i < (1 << order); i++) {
2510 struct page *p = page + i;
2511
2512 if (unlikely(check_new_page(p)))
2513 return true;
2514 }
2515
2516 return false;
2517 }
2518
should_skip_kasan_unpoison(gfp_t flags,bool init_tags)2519 static inline bool should_skip_kasan_unpoison(gfp_t flags, bool init_tags)
2520 {
2521 /* Don't skip if a software KASAN mode is enabled. */
2522 if (IS_ENABLED(CONFIG_KASAN_GENERIC) ||
2523 IS_ENABLED(CONFIG_KASAN_SW_TAGS))
2524 return false;
2525
2526 /* Skip, if hardware tag-based KASAN is not enabled. */
2527 if (!kasan_hw_tags_enabled())
2528 return true;
2529
2530 /*
2531 * With hardware tag-based KASAN enabled, skip if either:
2532 *
2533 * 1. Memory tags have already been cleared via tag_clear_highpage().
2534 * 2. Skipping has been requested via __GFP_SKIP_KASAN_UNPOISON.
2535 */
2536 return init_tags || (flags & __GFP_SKIP_KASAN_UNPOISON);
2537 }
2538
should_skip_init(gfp_t flags)2539 static inline bool should_skip_init(gfp_t flags)
2540 {
2541 /* Don't skip, if hardware tag-based KASAN is not enabled. */
2542 if (!kasan_hw_tags_enabled())
2543 return false;
2544
2545 /* For hardware tag-based KASAN, skip if requested. */
2546 return (flags & __GFP_SKIP_ZERO);
2547 }
2548
post_alloc_hook(struct page * page,unsigned int order,gfp_t gfp_flags)2549 inline void post_alloc_hook(struct page *page, unsigned int order,
2550 gfp_t gfp_flags)
2551 {
2552 bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags) &&
2553 !should_skip_init(gfp_flags);
2554 bool init_tags = init && (gfp_flags & __GFP_ZEROTAGS);
2555
2556 set_page_private(page, 0);
2557 set_page_refcounted(page);
2558
2559 arch_alloc_page(page, order);
2560 debug_pagealloc_map_pages(page, 1 << order);
2561
2562 /*
2563 * Page unpoisoning must happen before memory initialization.
2564 * Otherwise, the poison pattern will be overwritten for __GFP_ZERO
2565 * allocations and the page unpoisoning code will complain.
2566 */
2567 kernel_unpoison_pages(page, 1 << order);
2568
2569 /*
2570 * As memory initialization might be integrated into KASAN,
2571 * KASAN unpoisoning and memory initializion code must be
2572 * kept together to avoid discrepancies in behavior.
2573 */
2574
2575 /*
2576 * If memory tags should be zeroed (which happens only when memory
2577 * should be initialized as well).
2578 */
2579 if (init_tags) {
2580 int i;
2581
2582 /* Initialize both memory and tags. */
2583 for (i = 0; i != 1 << order; ++i)
2584 tag_clear_highpage(page + i);
2585
2586 /* Note that memory is already initialized by the loop above. */
2587 init = false;
2588 }
2589 if (!should_skip_kasan_unpoison(gfp_flags, init_tags)) {
2590 /* Unpoison shadow memory or set memory tags. */
2591 kasan_unpoison_pages(page, order, init);
2592
2593 /* Note that memory is already initialized by KASAN. */
2594 if (kasan_has_integrated_init())
2595 init = false;
2596 }
2597 /* If memory is still not initialized, do it now. */
2598 if (init)
2599 kernel_init_free_pages(page, 1 << order);
2600 /* Propagate __GFP_SKIP_KASAN_POISON to page flags. */
2601 if (kasan_hw_tags_enabled() && (gfp_flags & __GFP_SKIP_KASAN_POISON))
2602 SetPageSkipKASanPoison(page);
2603
2604 set_page_owner(page, order, gfp_flags);
2605 }
2606
prep_new_page(struct page * page,unsigned int order,gfp_t gfp_flags,unsigned int alloc_flags)2607 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
2608 unsigned int alloc_flags)
2609 {
2610 post_alloc_hook(page, order, gfp_flags);
2611
2612 if (order && (gfp_flags & __GFP_COMP))
2613 prep_compound_page(page, order);
2614
2615 /*
2616 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
2617 * allocate the page. The expectation is that the caller is taking
2618 * steps that will free more memory. The caller should avoid the page
2619 * being used for !PFMEMALLOC purposes.
2620 */
2621 if (alloc_flags & ALLOC_NO_WATERMARKS)
2622 set_page_pfmemalloc(page);
2623 else
2624 clear_page_pfmemalloc(page);
2625 trace_android_vh_test_clear_look_around_ref(page);
2626 }
2627
2628 /*
2629 * Go through the free lists for the given migratetype and remove
2630 * the smallest available page from the freelists
2631 */
2632 static __always_inline
__rmqueue_smallest(struct zone * zone,unsigned int order,int migratetype)2633 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
2634 int migratetype)
2635 {
2636 unsigned int current_order;
2637 struct free_area *area;
2638 struct page *page;
2639
2640 /* Find a page of the appropriate size in the preferred list */
2641 for (current_order = order; current_order < MAX_ORDER; ++current_order) {
2642 area = &(zone->free_area[current_order]);
2643 page = get_page_from_free_area(area, migratetype);
2644 if (!page)
2645 continue;
2646 del_page_from_free_list(page, zone, current_order);
2647 expand(zone, page, order, current_order, migratetype);
2648 set_pcppage_migratetype(page, migratetype);
2649 return page;
2650 }
2651
2652 return NULL;
2653 }
2654
2655
2656 /*
2657 * This array describes the order lists are fallen back to when
2658 * the free lists for the desirable migrate type are depleted
2659 */
2660 static int fallbacks[MIGRATE_TYPES][3] = {
2661 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_TYPES },
2662 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES },
2663 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_TYPES },
2664 #ifdef CONFIG_CMA
2665 [MIGRATE_CMA] = { MIGRATE_TYPES }, /* Never used */
2666 #endif
2667 #ifdef CONFIG_MEMORY_ISOLATION
2668 [MIGRATE_ISOLATE] = { MIGRATE_TYPES }, /* Never used */
2669 #endif
2670 };
2671
2672 #ifdef CONFIG_CMA
__rmqueue_cma_fallback(struct zone * zone,unsigned int order)2673 static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone,
2674 unsigned int order)
2675 {
2676 return __rmqueue_smallest(zone, order, MIGRATE_CMA);
2677 }
2678 #else
__rmqueue_cma_fallback(struct zone * zone,unsigned int order)2679 static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
2680 unsigned int order) { return NULL; }
2681 #endif
2682
2683 /*
2684 * Move the free pages in a range to the freelist tail of the requested type.
2685 * Note that start_page and end_pages are not aligned on a pageblock
2686 * boundary. If alignment is required, use move_freepages_block()
2687 */
move_freepages(struct zone * zone,unsigned long start_pfn,unsigned long end_pfn,int migratetype,int * num_movable)2688 static int move_freepages(struct zone *zone,
2689 unsigned long start_pfn, unsigned long end_pfn,
2690 int migratetype, int *num_movable)
2691 {
2692 struct page *page;
2693 unsigned long pfn;
2694 unsigned int order;
2695 int pages_moved = 0;
2696
2697 for (pfn = start_pfn; pfn <= end_pfn;) {
2698 page = pfn_to_page(pfn);
2699 if (!PageBuddy(page)) {
2700 /*
2701 * We assume that pages that could be isolated for
2702 * migration are movable. But we don't actually try
2703 * isolating, as that would be expensive.
2704 */
2705 if (num_movable &&
2706 (PageLRU(page) || __PageMovable(page)))
2707 (*num_movable)++;
2708 pfn++;
2709 continue;
2710 }
2711
2712 /* Make sure we are not inadvertently changing nodes */
2713 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
2714 VM_BUG_ON_PAGE(page_zone(page) != zone, page);
2715
2716 order = buddy_order(page);
2717 move_to_free_list(page, zone, order, migratetype);
2718 pfn += 1 << order;
2719 pages_moved += 1 << order;
2720 }
2721
2722 return pages_moved;
2723 }
2724
move_freepages_block(struct zone * zone,struct page * page,int migratetype,int * num_movable)2725 int move_freepages_block(struct zone *zone, struct page *page,
2726 int migratetype, int *num_movable)
2727 {
2728 unsigned long start_pfn, end_pfn, pfn;
2729
2730 if (num_movable)
2731 *num_movable = 0;
2732
2733 pfn = page_to_pfn(page);
2734 start_pfn = pfn & ~(pageblock_nr_pages - 1);
2735 end_pfn = start_pfn + pageblock_nr_pages - 1;
2736
2737 /* Do not cross zone boundaries */
2738 if (!zone_spans_pfn(zone, start_pfn))
2739 start_pfn = pfn;
2740 if (!zone_spans_pfn(zone, end_pfn))
2741 return 0;
2742
2743 return move_freepages(zone, start_pfn, end_pfn, migratetype,
2744 num_movable);
2745 }
2746
change_pageblock_range(struct page * pageblock_page,int start_order,int migratetype)2747 static void change_pageblock_range(struct page *pageblock_page,
2748 int start_order, int migratetype)
2749 {
2750 int nr_pageblocks = 1 << (start_order - pageblock_order);
2751
2752 while (nr_pageblocks--) {
2753 set_pageblock_migratetype(pageblock_page, migratetype);
2754 pageblock_page += pageblock_nr_pages;
2755 }
2756 }
2757
2758 /*
2759 * When we are falling back to another migratetype during allocation, try to
2760 * steal extra free pages from the same pageblocks to satisfy further
2761 * allocations, instead of polluting multiple pageblocks.
2762 *
2763 * If we are stealing a relatively large buddy page, it is likely there will
2764 * be more free pages in the pageblock, so try to steal them all. For
2765 * reclaimable and unmovable allocations, we steal regardless of page size,
2766 * as fragmentation caused by those allocations polluting movable pageblocks
2767 * is worse than movable allocations stealing from unmovable and reclaimable
2768 * pageblocks.
2769 */
can_steal_fallback(unsigned int order,int start_mt)2770 static bool can_steal_fallback(unsigned int order, int start_mt)
2771 {
2772 /*
2773 * Leaving this order check is intended, although there is
2774 * relaxed order check in next check. The reason is that
2775 * we can actually steal whole pageblock if this condition met,
2776 * but, below check doesn't guarantee it and that is just heuristic
2777 * so could be changed anytime.
2778 */
2779 if (order >= pageblock_order)
2780 return true;
2781
2782 if (order >= pageblock_order / 2 ||
2783 start_mt == MIGRATE_RECLAIMABLE ||
2784 start_mt == MIGRATE_UNMOVABLE ||
2785 page_group_by_mobility_disabled)
2786 return true;
2787
2788 return false;
2789 }
2790
boost_watermark(struct zone * zone)2791 static inline bool boost_watermark(struct zone *zone)
2792 {
2793 unsigned long max_boost;
2794
2795 if (!watermark_boost_factor)
2796 return false;
2797 /*
2798 * Don't bother in zones that are unlikely to produce results.
2799 * On small machines, including kdump capture kernels running
2800 * in a small area, boosting the watermark can cause an out of
2801 * memory situation immediately.
2802 */
2803 if ((pageblock_nr_pages * 4) > zone_managed_pages(zone))
2804 return false;
2805
2806 max_boost = mult_frac(zone->_watermark[WMARK_HIGH],
2807 watermark_boost_factor, 10000);
2808
2809 /*
2810 * high watermark may be uninitialised if fragmentation occurs
2811 * very early in boot so do not boost. We do not fall
2812 * through and boost by pageblock_nr_pages as failing
2813 * allocations that early means that reclaim is not going
2814 * to help and it may even be impossible to reclaim the
2815 * boosted watermark resulting in a hang.
2816 */
2817 if (!max_boost)
2818 return false;
2819
2820 max_boost = max(pageblock_nr_pages, max_boost);
2821
2822 zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages,
2823 max_boost);
2824
2825 return true;
2826 }
2827
2828 /*
2829 * This function implements actual steal behaviour. If order is large enough,
2830 * we can steal whole pageblock. If not, we first move freepages in this
2831 * pageblock to our migratetype and determine how many already-allocated pages
2832 * are there in the pageblock with a compatible migratetype. If at least half
2833 * of pages are free or compatible, we can change migratetype of the pageblock
2834 * itself, so pages freed in the future will be put on the correct free list.
2835 */
steal_suitable_fallback(struct zone * zone,struct page * page,unsigned int alloc_flags,int start_type,bool whole_block)2836 static void steal_suitable_fallback(struct zone *zone, struct page *page,
2837 unsigned int alloc_flags, int start_type, bool whole_block)
2838 {
2839 unsigned int current_order = buddy_order(page);
2840 int free_pages, movable_pages, alike_pages;
2841 int old_block_type;
2842
2843 old_block_type = get_pageblock_migratetype(page);
2844
2845 /*
2846 * This can happen due to races and we want to prevent broken
2847 * highatomic accounting.
2848 */
2849 if (is_migrate_highatomic(old_block_type))
2850 goto single_page;
2851
2852 /* Take ownership for orders >= pageblock_order */
2853 if (current_order >= pageblock_order) {
2854 change_pageblock_range(page, current_order, start_type);
2855 goto single_page;
2856 }
2857
2858 /*
2859 * Boost watermarks to increase reclaim pressure to reduce the
2860 * likelihood of future fallbacks. Wake kswapd now as the node
2861 * may be balanced overall and kswapd will not wake naturally.
2862 */
2863 if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD))
2864 set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
2865
2866 /* We are not allowed to try stealing from the whole block */
2867 if (!whole_block)
2868 goto single_page;
2869
2870 free_pages = move_freepages_block(zone, page, start_type,
2871 &movable_pages);
2872 /*
2873 * Determine how many pages are compatible with our allocation.
2874 * For movable allocation, it's the number of movable pages which
2875 * we just obtained. For other types it's a bit more tricky.
2876 */
2877 if (start_type == MIGRATE_MOVABLE) {
2878 alike_pages = movable_pages;
2879 } else {
2880 /*
2881 * If we are falling back a RECLAIMABLE or UNMOVABLE allocation
2882 * to MOVABLE pageblock, consider all non-movable pages as
2883 * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or
2884 * vice versa, be conservative since we can't distinguish the
2885 * exact migratetype of non-movable pages.
2886 */
2887 if (old_block_type == MIGRATE_MOVABLE)
2888 alike_pages = pageblock_nr_pages
2889 - (free_pages + movable_pages);
2890 else
2891 alike_pages = 0;
2892 }
2893
2894 /* moving whole block can fail due to zone boundary conditions */
2895 if (!free_pages)
2896 goto single_page;
2897
2898 /*
2899 * If a sufficient number of pages in the block are either free or of
2900 * comparable migratability as our allocation, claim the whole block.
2901 */
2902 if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||
2903 page_group_by_mobility_disabled)
2904 set_pageblock_migratetype(page, start_type);
2905
2906 return;
2907
2908 single_page:
2909 move_to_free_list(page, zone, current_order, start_type);
2910 }
2911
2912 /*
2913 * Check whether there is a suitable fallback freepage with requested order.
2914 * If only_stealable is true, this function returns fallback_mt only if
2915 * we can steal other freepages all together. This would help to reduce
2916 * fragmentation due to mixed migratetype pages in one pageblock.
2917 */
find_suitable_fallback(struct free_area * area,unsigned int order,int migratetype,bool only_stealable,bool * can_steal)2918 int find_suitable_fallback(struct free_area *area, unsigned int order,
2919 int migratetype, bool only_stealable, bool *can_steal)
2920 {
2921 int i;
2922 int fallback_mt;
2923
2924 if (area->nr_free == 0)
2925 return -1;
2926
2927 *can_steal = false;
2928 for (i = 0;; i++) {
2929 fallback_mt = fallbacks[migratetype][i];
2930 if (fallback_mt == MIGRATE_TYPES)
2931 break;
2932
2933 if (free_area_empty(area, fallback_mt))
2934 continue;
2935
2936 if (can_steal_fallback(order, migratetype))
2937 *can_steal = true;
2938
2939 if (!only_stealable)
2940 return fallback_mt;
2941
2942 if (*can_steal)
2943 return fallback_mt;
2944 }
2945
2946 return -1;
2947 }
2948
2949 /*
2950 * Reserve a pageblock for exclusive use of high-order atomic allocations if
2951 * there are no empty page blocks that contain a page with a suitable order
2952 */
reserve_highatomic_pageblock(struct page * page,struct zone * zone,unsigned int alloc_order)2953 static void reserve_highatomic_pageblock(struct page *page, struct zone *zone,
2954 unsigned int alloc_order)
2955 {
2956 int mt;
2957 unsigned long max_managed, flags;
2958
2959 /*
2960 * Limit the number reserved to 1 pageblock or roughly 1% of a zone.
2961 * Check is race-prone but harmless.
2962 */
2963 max_managed = (zone_managed_pages(zone) / 100) + pageblock_nr_pages;
2964 if (zone->nr_reserved_highatomic >= max_managed)
2965 return;
2966
2967 spin_lock_irqsave(&zone->lock, flags);
2968
2969 /* Recheck the nr_reserved_highatomic limit under the lock */
2970 if (zone->nr_reserved_highatomic >= max_managed)
2971 goto out_unlock;
2972
2973 /* Yoink! */
2974 mt = get_pageblock_migratetype(page);
2975 if (!is_migrate_highatomic(mt) && !is_migrate_isolate(mt)
2976 && !is_migrate_cma(mt)) {
2977 zone->nr_reserved_highatomic += pageblock_nr_pages;
2978 set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
2979 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL);
2980 }
2981
2982 out_unlock:
2983 spin_unlock_irqrestore(&zone->lock, flags);
2984 }
2985
2986 /*
2987 * Used when an allocation is about to fail under memory pressure. This
2988 * potentially hurts the reliability of high-order allocations when under
2989 * intense memory pressure but failed atomic allocations should be easier
2990 * to recover from than an OOM.
2991 *
2992 * If @force is true, try to unreserve a pageblock even though highatomic
2993 * pageblock is exhausted.
2994 */
unreserve_highatomic_pageblock(const struct alloc_context * ac,bool force)2995 static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
2996 bool force)
2997 {
2998 struct zonelist *zonelist = ac->zonelist;
2999 unsigned long flags;
3000 struct zoneref *z;
3001 struct zone *zone;
3002 struct page *page;
3003 int order;
3004 bool ret;
3005 bool skip_unreserve_highatomic = false;
3006
3007 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx,
3008 ac->nodemask) {
3009 /*
3010 * Preserve at least one pageblock unless memory pressure
3011 * is really high.
3012 */
3013 if (!force && zone->nr_reserved_highatomic <=
3014 pageblock_nr_pages)
3015 continue;
3016
3017 trace_android_vh_unreserve_highatomic_bypass(force, zone,
3018 &skip_unreserve_highatomic);
3019 if (skip_unreserve_highatomic)
3020 continue;
3021
3022 spin_lock_irqsave(&zone->lock, flags);
3023 for (order = 0; order < MAX_ORDER; order++) {
3024 struct free_area *area = &(zone->free_area[order]);
3025
3026 page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC);
3027 if (!page)
3028 continue;
3029
3030 /*
3031 * In page freeing path, migratetype change is racy so
3032 * we can counter several free pages in a pageblock
3033 * in this loop although we changed the pageblock type
3034 * from highatomic to ac->migratetype. So we should
3035 * adjust the count once.
3036 */
3037 if (is_migrate_highatomic_page(page)) {
3038 /*
3039 * It should never happen but changes to
3040 * locking could inadvertently allow a per-cpu
3041 * drain to add pages to MIGRATE_HIGHATOMIC
3042 * while unreserving so be safe and watch for
3043 * underflows.
3044 */
3045 zone->nr_reserved_highatomic -= min(
3046 pageblock_nr_pages,
3047 zone->nr_reserved_highatomic);
3048 }
3049
3050 /*
3051 * Convert to ac->migratetype and avoid the normal
3052 * pageblock stealing heuristics. Minimally, the caller
3053 * is doing the work and needs the pages. More
3054 * importantly, if the block was always converted to
3055 * MIGRATE_UNMOVABLE or another type then the number
3056 * of pageblocks that cannot be completely freed
3057 * may increase.
3058 */
3059 set_pageblock_migratetype(page, ac->migratetype);
3060 ret = move_freepages_block(zone, page, ac->migratetype,
3061 NULL);
3062 if (ret) {
3063 spin_unlock_irqrestore(&zone->lock, flags);
3064 return ret;
3065 }
3066 }
3067 spin_unlock_irqrestore(&zone->lock, flags);
3068 }
3069
3070 return false;
3071 }
3072
3073 /*
3074 * Try finding a free buddy page on the fallback list and put it on the free
3075 * list of requested migratetype, possibly along with other pages from the same
3076 * block, depending on fragmentation avoidance heuristics. Returns true if
3077 * fallback was found so that __rmqueue_smallest() can grab it.
3078 *
3079 * The use of signed ints for order and current_order is a deliberate
3080 * deviation from the rest of this file, to make the for loop
3081 * condition simpler.
3082 */
3083 static __always_inline bool
__rmqueue_fallback(struct zone * zone,int order,int start_migratetype,unsigned int alloc_flags)3084 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
3085 unsigned int alloc_flags)
3086 {
3087 struct free_area *area;
3088 int current_order;
3089 int min_order = order;
3090 struct page *page;
3091 int fallback_mt;
3092 bool can_steal;
3093
3094 /*
3095 * Do not steal pages from freelists belonging to other pageblocks
3096 * i.e. orders < pageblock_order. If there are no local zones free,
3097 * the zonelists will be reiterated without ALLOC_NOFRAGMENT.
3098 */
3099 if (alloc_flags & ALLOC_NOFRAGMENT)
3100 min_order = pageblock_order;
3101
3102 /*
3103 * Find the largest available free page in the other list. This roughly
3104 * approximates finding the pageblock with the most free pages, which
3105 * would be too costly to do exactly.
3106 */
3107 for (current_order = MAX_ORDER - 1; current_order >= min_order;
3108 --current_order) {
3109 area = &(zone->free_area[current_order]);
3110 fallback_mt = find_suitable_fallback(area, current_order,
3111 start_migratetype, false, &can_steal);
3112 if (fallback_mt == -1)
3113 continue;
3114
3115 /*
3116 * We cannot steal all free pages from the pageblock and the
3117 * requested migratetype is movable. In that case it's better to
3118 * steal and split the smallest available page instead of the
3119 * largest available page, because even if the next movable
3120 * allocation falls back into a different pageblock than this
3121 * one, it won't cause permanent fragmentation.
3122 */
3123 if (!can_steal && start_migratetype == MIGRATE_MOVABLE
3124 && current_order > order)
3125 goto find_smallest;
3126
3127 goto do_steal;
3128 }
3129
3130 return false;
3131
3132 find_smallest:
3133 for (current_order = order; current_order < MAX_ORDER;
3134 current_order++) {
3135 area = &(zone->free_area[current_order]);
3136 fallback_mt = find_suitable_fallback(area, current_order,
3137 start_migratetype, false, &can_steal);
3138 if (fallback_mt != -1)
3139 break;
3140 }
3141
3142 /*
3143 * This should not happen - we already found a suitable fallback
3144 * when looking for the largest page.
3145 */
3146 VM_BUG_ON(current_order == MAX_ORDER);
3147
3148 do_steal:
3149 page = get_page_from_free_area(area, fallback_mt);
3150
3151 steal_suitable_fallback(zone, page, alloc_flags, start_migratetype,
3152 can_steal);
3153
3154 trace_mm_page_alloc_extfrag(page, order, current_order,
3155 start_migratetype, fallback_mt);
3156
3157 return true;
3158
3159 }
3160
3161 /*
3162 * Do the hard work of removing an element from the buddy allocator.
3163 * Call me with the zone->lock already held.
3164 */
3165 static __always_inline struct page *
__rmqueue(struct zone * zone,unsigned int order,int migratetype,unsigned int alloc_flags)3166 __rmqueue(struct zone *zone, unsigned int order, int migratetype,
3167 unsigned int alloc_flags)
3168 {
3169 struct page *page = NULL;
3170
3171 trace_android_vh_rmqueue_smallest_bypass(&page, zone, order, migratetype);
3172 if (page)
3173 return page;
3174
3175 retry:
3176 page = __rmqueue_smallest(zone, order, migratetype);
3177
3178 /*
3179 * let normal GFP_MOVABLE has chance to try MIGRATE_CMA
3180 */
3181 if (unlikely(!page) && (migratetype == MIGRATE_MOVABLE)) {
3182 bool try_cma = false;
3183 trace_android_vh_rmqueue_cma_fallback(zone, order, &page);
3184 trace_android_vh_try_cma_fallback(zone, order, &try_cma);
3185 if (try_cma)
3186 page = __rmqueue_cma_fallback(zone, order);
3187 }
3188
3189 if (unlikely(!page) && __rmqueue_fallback(zone, order, migratetype,
3190 alloc_flags))
3191 goto retry;
3192
3193 if (page)
3194 trace_mm_page_alloc_zone_locked(page, order, migratetype);
3195 return page;
3196 }
3197
3198 #ifdef CONFIG_CMA
__rmqueue_cma(struct zone * zone,unsigned int order,int migratetype,unsigned int alloc_flags)3199 static struct page *__rmqueue_cma(struct zone *zone, unsigned int order,
3200 int migratetype,
3201 unsigned int alloc_flags)
3202 {
3203 struct page *page = __rmqueue_cma_fallback(zone, order);
3204
3205 if (page)
3206 trace_mm_page_alloc_zone_locked(page, order, MIGRATE_CMA);
3207 return page;
3208 }
3209 #else
__rmqueue_cma(struct zone * zone,unsigned int order,int migratetype,unsigned int alloc_flags)3210 static inline struct page *__rmqueue_cma(struct zone *zone, unsigned int order,
3211 int migratetype,
3212 unsigned int alloc_flags)
3213 {
3214 return NULL;
3215 }
3216 #endif
3217
3218 /*
3219 * Obtain a specified number of elements from the buddy allocator, all under
3220 * a single hold of the lock, for efficiency. Add them to the supplied list.
3221 * Returns the number of new pages which were placed at *list.
3222 */
rmqueue_bulk(struct zone * zone,unsigned int order,unsigned long count,struct list_head * list,int migratetype,unsigned int alloc_flags)3223 static int rmqueue_bulk(struct zone *zone, unsigned int order,
3224 unsigned long count, struct list_head *list,
3225 int migratetype, unsigned int alloc_flags)
3226 {
3227 int i, allocated = 0;
3228
3229 /* Caller must hold IRQ-safe pcp->lock so IRQs are disabled. */
3230 spin_lock(&zone->lock);
3231 for (i = 0; i < count; ++i) {
3232 struct page *page;
3233
3234 trace_android_rvh_rmqueue_bulk(NULL);
3235 if (is_migrate_cma(migratetype))
3236 page = __rmqueue_cma(zone, order, migratetype,
3237 alloc_flags);
3238 else
3239 page = __rmqueue(zone, order, migratetype, alloc_flags);
3240
3241 if (unlikely(page == NULL))
3242 break;
3243
3244 if (unlikely(check_pcp_refill(page)))
3245 continue;
3246
3247 /*
3248 * Split buddy pages returned by expand() are received here in
3249 * physical page order. The page is added to the tail of
3250 * caller's list. From the callers perspective, the linked list
3251 * is ordered by page number under some conditions. This is
3252 * useful for IO devices that can forward direction from the
3253 * head, thus also in the physical page order. This is useful
3254 * for IO devices that can merge IO requests if the physical
3255 * pages are ordered properly.
3256 */
3257 list_add_tail(&page->lru, list);
3258 allocated++;
3259 if (is_migrate_cma(get_pcppage_migratetype(page)))
3260 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
3261 -(1 << order));
3262 }
3263
3264 /*
3265 * i pages were removed from the buddy list even if some leak due
3266 * to check_pcp_refill failing so adjust NR_FREE_PAGES based
3267 * on i. Do not confuse with 'allocated' which is the number of
3268 * pages added to the pcp list.
3269 */
3270 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
3271 spin_unlock(&zone->lock);
3272 return allocated;
3273 }
3274
3275 /*
3276 * Return the pcp list that corresponds to the migrate type if that list isn't
3277 * empty.
3278 * If the list is empty return NULL.
3279 */
get_populated_pcp_list(struct zone * zone,unsigned int order,struct per_cpu_pages * pcp,int migratetype,unsigned int alloc_flags)3280 static struct list_head *get_populated_pcp_list(struct zone *zone,
3281 unsigned int order, struct per_cpu_pages *pcp,
3282 int migratetype, unsigned int alloc_flags)
3283 {
3284 struct list_head *list = &pcp->lists[order_to_pindex(migratetype, order)];
3285
3286 if (list_empty(list)) {
3287 int batch = READ_ONCE(pcp->batch);
3288 int alloced;
3289
3290 trace_android_vh_rmqueue_bulk_bypass(order, pcp, migratetype, list);
3291 if (!list_empty(list))
3292 return list;
3293
3294 /*
3295 * Scale batch relative to order if batch implies
3296 * free pages can be stored on the PCP. Batch can
3297 * be 1 for small zones or for boot pagesets which
3298 * should never store free pages as the pages may
3299 * belong to arbitrary zones.
3300 */
3301 if (batch > 1)
3302 batch = max(batch >> order, 2);
3303 alloced = rmqueue_bulk(zone, order, batch, list, migratetype, alloc_flags);
3304
3305 pcp->count += alloced << order;
3306 if (list_empty(list))
3307 list = NULL;
3308 }
3309 return list;
3310 }
3311
3312 #ifdef CONFIG_NUMA
3313 /*
3314 * Called from the vmstat counter updater to drain pagesets of this
3315 * currently executing processor on remote nodes after they have
3316 * expired.
3317 */
drain_zone_pages(struct zone * zone,struct per_cpu_pages * pcp)3318 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
3319 {
3320 int to_drain, batch;
3321
3322 batch = READ_ONCE(pcp->batch);
3323 to_drain = min(pcp->count, batch);
3324 if (to_drain > 0) {
3325 unsigned long flags;
3326 struct per_cpu_pages_ext *pcp_ext = pcp_to_pcpext(pcp);
3327
3328 /*
3329 * free_pcppages_bulk expects IRQs disabled for zone->lock
3330 * so even though pcp->lock is not intended to be IRQ-safe,
3331 * it's needed in this context.
3332 */
3333 spin_lock_irqsave(&pcp_ext->lock, flags);
3334 free_pcppages_bulk(zone, to_drain, pcp);
3335 spin_unlock_irqrestore(&pcp_ext->lock, flags);
3336 }
3337 }
3338 #endif
3339
3340 /*
3341 * Drain pcplists of the indicated processor and zone.
3342 */
drain_pages_zone(unsigned int cpu,struct zone * zone)3343 static void drain_pages_zone(unsigned int cpu, struct zone *zone)
3344 {
3345 struct per_cpu_pages *pcp;
3346 struct per_cpu_pages_ext *pcp_ext;
3347
3348 pcp_ext = per_cpu_ptr(zone_per_cpu_pageset(zone), cpu);
3349 pcp = &pcp_ext->pcp;
3350 if (pcp->count) {
3351 unsigned long flags;
3352
3353 /* See drain_zone_pages on why this is disabling IRQs */
3354 spin_lock_irqsave(&pcp_ext->lock, flags);
3355 free_pcppages_bulk(zone, pcp->count, pcp);
3356 spin_unlock_irqrestore(&pcp_ext->lock, flags);
3357 }
3358 }
3359
3360 /*
3361 * Drain pcplists of all zones on the indicated processor.
3362 */
drain_pages(unsigned int cpu)3363 static void drain_pages(unsigned int cpu)
3364 {
3365 struct zone *zone;
3366
3367 for_each_populated_zone(zone) {
3368 drain_pages_zone(cpu, zone);
3369 }
3370 }
3371
3372 /*
3373 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
3374 */
drain_local_pages(struct zone * zone)3375 void drain_local_pages(struct zone *zone)
3376 {
3377 int cpu = smp_processor_id();
3378
3379 if (zone)
3380 drain_pages_zone(cpu, zone);
3381 else
3382 drain_pages(cpu);
3383 }
3384
3385 /*
3386 * The implementation of drain_all_pages(), exposing an extra parameter to
3387 * drain on all cpus.
3388 *
3389 * drain_all_pages() is optimized to only execute on cpus where pcplists are
3390 * not empty. The check for non-emptiness can however race with a free to
3391 * pcplist that has not yet increased the pcp->count from 0 to 1. Callers
3392 * that need the guarantee that every CPU has drained can disable the
3393 * optimizing racy check.
3394 */
__drain_all_pages(struct zone * zone,bool force_all_cpus)3395 static void __drain_all_pages(struct zone *zone, bool force_all_cpus)
3396 {
3397 int cpu;
3398
3399 /*
3400 * Allocate in the BSS so we won't require allocation in
3401 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
3402 */
3403 static cpumask_t cpus_with_pcps;
3404
3405 /*
3406 * Do not drain if one is already in progress unless it's specific to
3407 * a zone. Such callers are primarily CMA and memory hotplug and need
3408 * the drain to be complete when the call returns.
3409 */
3410 if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) {
3411 if (!zone)
3412 return;
3413 mutex_lock(&pcpu_drain_mutex);
3414 }
3415
3416 /*
3417 * We don't care about racing with CPU hotplug event
3418 * as offline notification will cause the notified
3419 * cpu to drain that CPU pcps and on_each_cpu_mask
3420 * disables preemption as part of its processing
3421 */
3422 for_each_online_cpu(cpu) {
3423 struct per_cpu_pages *pcp;
3424 struct zone *z;
3425 bool has_pcps = false;
3426
3427 if (force_all_cpus) {
3428 /*
3429 * The pcp.count check is racy, some callers need a
3430 * guarantee that no cpu is missed.
3431 */
3432 has_pcps = true;
3433 } else if (zone) {
3434 pcp = &per_cpu_ptr(zone_per_cpu_pageset(zone), cpu)->pcp;
3435 if (pcp->count)
3436 has_pcps = true;
3437 } else {
3438 for_each_populated_zone(z) {
3439 pcp = &per_cpu_ptr(zone_per_cpu_pageset(z), cpu)->pcp;
3440 if (pcp->count) {
3441 has_pcps = true;
3442 break;
3443 }
3444 }
3445 }
3446
3447 if (has_pcps)
3448 cpumask_set_cpu(cpu, &cpus_with_pcps);
3449 else
3450 cpumask_clear_cpu(cpu, &cpus_with_pcps);
3451 }
3452
3453 for_each_cpu(cpu, &cpus_with_pcps) {
3454 if (zone)
3455 drain_pages_zone(cpu, zone);
3456 else
3457 drain_pages(cpu);
3458 }
3459
3460 mutex_unlock(&pcpu_drain_mutex);
3461 }
3462
3463 /*
3464 * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
3465 *
3466 * When zone parameter is non-NULL, spill just the single zone's pages.
3467 */
drain_all_pages(struct zone * zone)3468 void drain_all_pages(struct zone *zone)
3469 {
3470 __drain_all_pages(zone, false);
3471 }
3472
3473 #ifdef CONFIG_HIBERNATION
3474
3475 /*
3476 * Touch the watchdog for every WD_PAGE_COUNT pages.
3477 */
3478 #define WD_PAGE_COUNT (128*1024)
3479
mark_free_pages(struct zone * zone)3480 void mark_free_pages(struct zone *zone)
3481 {
3482 unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT;
3483 unsigned long flags;
3484 unsigned int order, t;
3485 struct page *page;
3486
3487 if (zone_is_empty(zone))
3488 return;
3489
3490 spin_lock_irqsave(&zone->lock, flags);
3491
3492 max_zone_pfn = zone_end_pfn(zone);
3493 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
3494 if (pfn_valid(pfn)) {
3495 page = pfn_to_page(pfn);
3496
3497 if (!--page_count) {
3498 touch_nmi_watchdog();
3499 page_count = WD_PAGE_COUNT;
3500 }
3501
3502 if (page_zone(page) != zone)
3503 continue;
3504
3505 if (!swsusp_page_is_forbidden(page))
3506 swsusp_unset_page_free(page);
3507 }
3508
3509 for_each_migratetype_order(order, t) {
3510 list_for_each_entry(page,
3511 &zone->free_area[order].free_list[t], lru) {
3512 unsigned long i;
3513
3514 pfn = page_to_pfn(page);
3515 for (i = 0; i < (1UL << order); i++) {
3516 if (!--page_count) {
3517 touch_nmi_watchdog();
3518 page_count = WD_PAGE_COUNT;
3519 }
3520 swsusp_set_page_free(pfn_to_page(pfn + i));
3521 }
3522 }
3523 }
3524 spin_unlock_irqrestore(&zone->lock, flags);
3525 }
3526 #endif /* CONFIG_PM */
3527
free_unref_page_prepare(struct page * page,unsigned long pfn,unsigned int order)3528 static bool free_unref_page_prepare(struct page *page, unsigned long pfn,
3529 unsigned int order)
3530 {
3531 int migratetype;
3532
3533 if (!free_pcp_prepare(page, order))
3534 return false;
3535
3536 migratetype = get_pfnblock_migratetype(page, pfn);
3537 set_pcppage_migratetype(page, migratetype);
3538 return true;
3539 }
3540
nr_pcp_free(struct per_cpu_pages * pcp,int high,int batch)3541 static int nr_pcp_free(struct per_cpu_pages *pcp, int high, int batch)
3542 {
3543 int min_nr_free, max_nr_free;
3544
3545 /* Check for PCP disabled or boot pageset */
3546 if (unlikely(high < batch))
3547 return 1;
3548
3549 /* Leave at least pcp->batch pages on the list */
3550 min_nr_free = batch;
3551 max_nr_free = high - batch;
3552
3553 /*
3554 * Double the number of pages freed each time there is subsequent
3555 * freeing of pages without any allocation.
3556 */
3557 batch <<= pcp->free_factor;
3558 if (batch < max_nr_free)
3559 pcp->free_factor++;
3560 batch = clamp(batch, min_nr_free, max_nr_free);
3561
3562 return batch;
3563 }
3564
nr_pcp_high(struct per_cpu_pages * pcp,struct zone * zone)3565 static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone)
3566 {
3567 int high = READ_ONCE(pcp->high);
3568
3569 if (unlikely(!high))
3570 return 0;
3571
3572 if (!test_bit(ZONE_RECLAIM_ACTIVE, &zone->flags))
3573 return high;
3574
3575 /*
3576 * If reclaim is active, limit the number of pages that can be
3577 * stored on pcp lists
3578 */
3579 return min(READ_ONCE(pcp->batch) << 2, high);
3580 }
3581
free_unref_page_commit(struct zone * zone,struct per_cpu_pages * pcp,struct page * page,unsigned long pfn,int migratetype,unsigned int order)3582 static void free_unref_page_commit(struct zone *zone, struct per_cpu_pages *pcp,
3583 struct page *page, unsigned long pfn,
3584 int migratetype, unsigned int order)
3585 {
3586 int high;
3587 int pindex;
3588
3589 __count_vm_event(PGFREE);
3590 pindex = order_to_pindex(migratetype, order);
3591 list_add(&page->lru, &pcp->lists[pindex]);
3592 pcp->count += 1 << order;
3593 high = nr_pcp_high(pcp, zone);
3594 if (pcp->count >= high) {
3595 int batch = READ_ONCE(pcp->batch);
3596
3597 free_pcppages_bulk(zone, nr_pcp_free(pcp, high, batch), pcp);
3598 }
3599 }
3600
3601 /*
3602 * Free a pcp page
3603 */
free_unref_page(struct page * page,unsigned int order)3604 void free_unref_page(struct page *page, unsigned int order)
3605 {
3606 unsigned long flags;
3607 unsigned long __maybe_unused UP_flags;
3608 struct per_cpu_pages *pcp;
3609 struct zone *zone;
3610 unsigned long pfn = page_to_pfn(page);
3611 int migratetype, pcpmigratetype;
3612 bool pcp_skip_cma_pages = false;
3613 bool skip_free_unref_page = false;
3614
3615 if (!free_unref_page_prepare(page, pfn, order))
3616 return;
3617
3618 migratetype = get_pcppage_migratetype(page);
3619 trace_android_vh_free_unref_page_bypass(page, order, migratetype, &skip_free_unref_page);
3620 if (skip_free_unref_page)
3621 return;
3622
3623 /*
3624 * We only track unmovable, reclaimable movable, and CMA on pcp lists.
3625 * Place ISOLATE pages on the isolated list because they are being
3626 * offlined but treat HIGHATOMIC and CMA as movable pages so we can
3627 * get those areas back if necessary. Otherwise, we may have to free
3628 * excessively into the page allocator
3629 */
3630 migratetype = pcpmigratetype = get_pcppage_migratetype(page);
3631 if (unlikely(migratetype >= MIGRATE_PCPTYPES)) {
3632 trace_android_vh_pcplist_add_cma_pages_bypass(migratetype,
3633 &pcp_skip_cma_pages);
3634 if (unlikely(is_migrate_isolate(migratetype)) ||
3635 pcp_skip_cma_pages) {
3636 free_one_page(page_zone(page), page, pfn, order, migratetype, FPI_NONE);
3637 return;
3638 }
3639 pcpmigratetype = MIGRATE_MOVABLE;
3640 }
3641
3642 zone = page_zone(page);
3643 pcp_trylock_prepare(UP_flags);
3644 pcp = pcp_spin_trylock_irqsave(zone_per_cpu_pageset(zone), flags);
3645 if (pcp) {
3646 free_unref_page_commit(zone, pcp, page, pfn, pcpmigratetype, order);
3647 pcp_spin_unlock_irqrestore(pcp, flags);
3648 } else {
3649 free_one_page(zone, page, pfn, order, migratetype, FPI_NONE);
3650 }
3651 pcp_trylock_finish(UP_flags);
3652 }
3653
3654 /*
3655 * Free a list of 0-order pages
3656 */
free_unref_page_list(struct list_head * list)3657 void free_unref_page_list(struct list_head *list)
3658 {
3659 struct page *page, *next;
3660 struct per_cpu_pages *pcp = NULL;
3661 struct zone *locked_zone = NULL;
3662 unsigned long flags, pfn;
3663 int batch_count = 0;
3664 int migratetype;
3665 bool pcp_skip_cma_pages = false;
3666
3667 /* Prepare pages for freeing */
3668 list_for_each_entry_safe(page, next, list, lru) {
3669 pfn = page_to_pfn(page);
3670 if (!free_unref_page_prepare(page, pfn, 0)) {
3671 list_del(&page->lru);
3672 continue;
3673 }
3674
3675 /*
3676 * Free isolated pages directly to the allocator, see
3677 * comment in free_unref_page.
3678 */
3679 migratetype = get_pcppage_migratetype(page);
3680 trace_android_vh_pcplist_add_cma_pages_bypass(migratetype,
3681 &pcp_skip_cma_pages);
3682 if (unlikely(is_migrate_isolate(migratetype)) ||
3683 pcp_skip_cma_pages) {
3684 list_del(&page->lru);
3685 free_one_page(page_zone(page), page, pfn, 0, migratetype, FPI_NONE);
3686 continue;
3687 }
3688
3689 set_page_private(page, pfn);
3690 }
3691
3692 list_for_each_entry_safe(page, next, list, lru) {
3693 struct zone *zone = page_zone(page);
3694
3695 /* Different zone, different pcp lock. */
3696 if (zone != locked_zone) {
3697 if (pcp)
3698 pcp_spin_unlock_irqrestore(pcp, flags);
3699
3700 locked_zone = zone;
3701 pcp = pcp_spin_lock_irqsave(zone_per_cpu_pageset(locked_zone), flags);
3702 }
3703
3704 pfn = page_private(page);
3705 set_page_private(page, 0);
3706
3707 /*
3708 * Non-isolated types over MIGRATE_PCPTYPES get added
3709 * to the MIGRATE_MOVABLE pcp list.
3710 */
3711 migratetype = get_pcppage_migratetype(page);
3712 if (unlikely(migratetype >= MIGRATE_PCPTYPES))
3713 migratetype = MIGRATE_MOVABLE;
3714
3715 trace_mm_page_free_batched(page);
3716 free_unref_page_commit(zone, pcp, page, pfn, migratetype, 0);
3717
3718 /*
3719 * Guard against excessive IRQ disabled times when we get
3720 * a large list of pages to free.
3721 */
3722 if (++batch_count == SWAP_CLUSTER_MAX) {
3723 pcp_spin_unlock_irqrestore(pcp, flags);
3724 batch_count = 0;
3725 pcp = pcp_spin_lock_irqsave(zone_per_cpu_pageset(locked_zone), flags);
3726 }
3727 }
3728
3729 if (pcp)
3730 pcp_spin_unlock_irqrestore(pcp, flags);
3731 }
3732
3733 /*
3734 * split_page takes a non-compound higher-order page, and splits it into
3735 * n (1<<order) sub-pages: page[0..n]
3736 * Each sub-page must be freed individually.
3737 *
3738 * Note: this is probably too low level an operation for use in drivers.
3739 * Please consult with lkml before using this in your driver.
3740 */
split_page(struct page * page,unsigned int order)3741 void split_page(struct page *page, unsigned int order)
3742 {
3743 int i;
3744
3745 VM_BUG_ON_PAGE(PageCompound(page), page);
3746 VM_BUG_ON_PAGE(!page_count(page), page);
3747
3748 for (i = 1; i < (1 << order); i++)
3749 set_page_refcounted(page + i);
3750 split_page_owner(page, 1 << order);
3751 split_page_memcg(page, 1 << order);
3752 }
3753 EXPORT_SYMBOL_GPL(split_page);
3754
__isolate_free_page(struct page * page,unsigned int order)3755 int __isolate_free_page(struct page *page, unsigned int order)
3756 {
3757 unsigned long watermark;
3758 struct zone *zone;
3759 int mt;
3760
3761 BUG_ON(!PageBuddy(page));
3762
3763 zone = page_zone(page);
3764 mt = get_pageblock_migratetype(page);
3765
3766 if (!is_migrate_isolate(mt)) {
3767 /*
3768 * Obey watermarks as if the page was being allocated. We can
3769 * emulate a high-order watermark check with a raised order-0
3770 * watermark, because we already know our high-order page
3771 * exists.
3772 */
3773 watermark = zone->_watermark[WMARK_MIN] + (1UL << order);
3774 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
3775 return 0;
3776
3777 __mod_zone_freepage_state(zone, -(1UL << order), mt);
3778 }
3779
3780 /* Remove page from free list */
3781
3782 del_page_from_free_list(page, zone, order);
3783
3784 /*
3785 * Set the pageblock if the isolated page is at least half of a
3786 * pageblock
3787 */
3788 if (order >= pageblock_order - 1) {
3789 struct page *endpage = page + (1 << order) - 1;
3790 for (; page < endpage; page += pageblock_nr_pages) {
3791 int mt = get_pageblock_migratetype(page);
3792 if (!is_migrate_isolate(mt) && !is_migrate_cma(mt)
3793 && !is_migrate_highatomic(mt))
3794 set_pageblock_migratetype(page,
3795 MIGRATE_MOVABLE);
3796 }
3797 }
3798
3799
3800 return 1UL << order;
3801 }
3802
3803 /**
3804 * __putback_isolated_page - Return a now-isolated page back where we got it
3805 * @page: Page that was isolated
3806 * @order: Order of the isolated page
3807 * @mt: The page's pageblock's migratetype
3808 *
3809 * This function is meant to return a page pulled from the free lists via
3810 * __isolate_free_page back to the free lists they were pulled from.
3811 */
__putback_isolated_page(struct page * page,unsigned int order,int mt)3812 void __putback_isolated_page(struct page *page, unsigned int order, int mt)
3813 {
3814 struct zone *zone = page_zone(page);
3815
3816 /* zone lock should be held when this function is called */
3817 lockdep_assert_held(&zone->lock);
3818
3819 /* Return isolated page to tail of freelist. */
3820 __free_one_page(page, page_to_pfn(page), zone, order, mt,
3821 FPI_SKIP_REPORT_NOTIFY | FPI_TO_TAIL);
3822 }
3823
3824 /*
3825 * Update NUMA hit/miss statistics
3826 *
3827 * Must be called with interrupts disabled.
3828 */
zone_statistics(struct zone * preferred_zone,struct zone * z,long nr_account)3829 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z,
3830 long nr_account)
3831 {
3832 #ifdef CONFIG_NUMA
3833 enum numa_stat_item local_stat = NUMA_LOCAL;
3834
3835 /* skip numa counters update if numa stats is disabled */
3836 if (!static_branch_likely(&vm_numa_stat_key))
3837 return;
3838
3839 if (zone_to_nid(z) != numa_node_id())
3840 local_stat = NUMA_OTHER;
3841
3842 if (zone_to_nid(z) == zone_to_nid(preferred_zone))
3843 __count_numa_events(z, NUMA_HIT, nr_account);
3844 else {
3845 __count_numa_events(z, NUMA_MISS, nr_account);
3846 __count_numa_events(preferred_zone, NUMA_FOREIGN, nr_account);
3847 }
3848 __count_numa_events(z, local_stat, nr_account);
3849 #endif
3850 }
3851
3852 #ifdef CONFIG_CMA
3853 /*
3854 * GFP_MOVABLE allocation could drain UNMOVABLE & RECLAIMABLE page blocks via
3855 * the help of CMA which makes GFP_KERNEL failed. Checking if zone_watermark_ok
3856 * again without ALLOC_CMA to see if to use CMA first.
3857 */
use_cma_first(struct zone * zone,unsigned int order,unsigned int alloc_flags)3858 static bool use_cma_first(struct zone *zone, unsigned int order, unsigned int alloc_flags)
3859 {
3860 unsigned long watermark;
3861 bool cma_first = false;
3862
3863 watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
3864 /* check if GFP_MOVABLE pass previous zone_watermark_ok via the help of CMA */
3865 if (zone_watermark_ok(zone, order, watermark, 0, alloc_flags & (~ALLOC_CMA))) {
3866 /*
3867 * Balance movable allocations between regular and CMA areas by
3868 * allocating from CMA when over half of the zone's free memory
3869 * is in the CMA area.
3870 */
3871 cma_first = (zone_page_state(zone, NR_FREE_CMA_PAGES) >
3872 zone_page_state(zone, NR_FREE_PAGES) / 2);
3873 } else {
3874 /*
3875 * watermark failed means UNMOVABLE & RECLAIMBLE is not enough
3876 * now, we should use cma first to keep them stay around the
3877 * corresponding watermark
3878 */
3879 cma_first = true;
3880 }
3881 return cma_first;
3882 }
3883 #else
use_cma_first(struct zone * zone,unsigned int order,unsigned int alloc_flags)3884 static bool use_cma_first(struct zone *zone, unsigned int order, unsigned int alloc_flags)
3885 {
3886 return false;
3887 }
3888 #endif
3889
3890 static __always_inline
rmqueue_buddy(struct zone * preferred_zone,struct zone * zone,unsigned int order,unsigned int alloc_flags,int migratetype)3891 struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone,
3892 unsigned int order, unsigned int alloc_flags,
3893 int migratetype)
3894 {
3895 struct page *page;
3896 unsigned long flags;
3897
3898 do {
3899 page = NULL;
3900 spin_lock_irqsave(&zone->lock, flags);
3901 /*
3902 * order-0 request can reach here when the pcplist is skipped
3903 * due to non-CMA allocation context. HIGHATOMIC area is
3904 * reserved for high-order atomic allocation, so order-0
3905 * request should skip it.
3906 */
3907 if (order > 0 && alloc_flags & ALLOC_HARDER) {
3908 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
3909 if (page)
3910 trace_mm_page_alloc_zone_locked(page, order, migratetype);
3911 }
3912 if (!page) {
3913 /*
3914 * Balance movable allocations between regular and CMA areas by
3915 * allocating from CMA base on judging zone_watermark_ok again
3916 * to see if the latest check got pass via the help of CMA
3917 */
3918 if (alloc_flags & ALLOC_CMA) {
3919 bool use_cma_first_check = false;
3920 bool try_cma;
3921
3922 trace_android_vh_use_cma_first_check(&use_cma_first_check);
3923 try_cma = use_cma_first_check ?
3924 use_cma_first(zone, order, alloc_flags) :
3925 migratetype == MIGRATE_MOVABLE;
3926 if (try_cma)
3927 page = __rmqueue_cma(zone, order, migratetype,
3928 alloc_flags);
3929 }
3930 if (!page)
3931 page = __rmqueue(zone, order, migratetype,
3932 alloc_flags);
3933 }
3934 if (!page) {
3935 spin_unlock_irqrestore(&zone->lock, flags);
3936 return NULL;
3937 }
3938 __mod_zone_freepage_state(zone, -(1 << order),
3939 get_pcppage_migratetype(page));
3940 spin_unlock_irqrestore(&zone->lock, flags);
3941 } while (check_new_pages(page, order));
3942
3943 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
3944 zone_statistics(preferred_zone, zone, 1);
3945
3946 return page;
3947 }
3948
3949 /* Remove page from the per-cpu list, caller must protect the list */
3950 static inline
__rmqueue_pcplist(struct zone * zone,unsigned int order,int migratetype,unsigned int alloc_flags,struct per_cpu_pages * pcp,gfp_t gfp_flags)3951 struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order,
3952 int migratetype,
3953 unsigned int alloc_flags,
3954 struct per_cpu_pages *pcp,
3955 gfp_t gfp_flags)
3956 {
3957 struct page *page = NULL;
3958 struct list_head *list = NULL;
3959
3960 do {
3961 /* First try to get CMA pages */
3962 if (migratetype == MIGRATE_MOVABLE && alloc_flags & ALLOC_CMA)
3963 list = get_populated_pcp_list(zone, order, pcp, get_cma_migrate_type(),
3964 alloc_flags);
3965 if (list == NULL) {
3966 /*
3967 * Either CMA is not suitable or there are no
3968 * free CMA pages.
3969 */
3970 list = get_populated_pcp_list(zone, order, pcp, migratetype, alloc_flags);
3971 if (unlikely(list == NULL) || unlikely(list_empty(list)))
3972 return NULL;
3973 }
3974
3975 page = list_first_entry(list, struct page, lru);
3976 list_del(&page->lru);
3977 pcp->count -= 1 << order;
3978 } while (check_new_pcp(page));
3979
3980 return page;
3981 }
3982
3983 /* Lock and remove page from the per-cpu list */
rmqueue_pcplist(struct zone * preferred_zone,struct zone * zone,unsigned int order,gfp_t gfp_flags,int migratetype,unsigned int alloc_flags)3984 static struct page *rmqueue_pcplist(struct zone *preferred_zone,
3985 struct zone *zone, unsigned int order,
3986 gfp_t gfp_flags, int migratetype,
3987 unsigned int alloc_flags)
3988 {
3989 struct per_cpu_pages *pcp;
3990 struct page *page;
3991 unsigned long flags;
3992 unsigned long __maybe_unused UP_flags;
3993
3994 /*
3995 * spin_trylock may fail due to a parallel drain. In the future, the
3996 * trylock will also protect against IRQ reentrancy.
3997 */
3998 pcp_trylock_prepare(UP_flags);
3999 pcp = pcp_spin_trylock_irqsave(zone_per_cpu_pageset(zone), flags);
4000 if (!pcp) {
4001 pcp_trylock_finish(UP_flags);
4002 return NULL;
4003 }
4004
4005 /*
4006 * On allocation, reduce the number of pages that are batch freed.
4007 * See nr_pcp_free() where free_factor is increased for subsequent
4008 * frees.
4009 */
4010 pcp->free_factor >>= 1;
4011 page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, gfp_flags);
4012 pcp_spin_unlock_irqrestore(pcp, flags);
4013 pcp_trylock_finish(UP_flags);
4014 if (page) {
4015 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1);
4016 zone_statistics(preferred_zone, zone, 1);
4017 }
4018 return page;
4019 }
4020
4021 /*
4022 * Allocate a page from the given zone. Use pcplists for order-0 allocations.
4023 */
4024 static inline
rmqueue(struct zone * preferred_zone,struct zone * zone,unsigned int order,gfp_t gfp_flags,unsigned int alloc_flags,int migratetype)4025 struct page *rmqueue(struct zone *preferred_zone,
4026 struct zone *zone, unsigned int order,
4027 gfp_t gfp_flags, unsigned int alloc_flags,
4028 int migratetype)
4029 {
4030 struct page *page;
4031
4032 if (likely(pcp_allowed_order(order))) {
4033 page = rmqueue_pcplist(preferred_zone, zone, order,
4034 gfp_flags, migratetype, alloc_flags);
4035 if (likely(page))
4036 goto out;
4037 }
4038
4039 /*
4040 * We most definitely don't want callers attempting to
4041 * allocate greater than order-1 page units with __GFP_NOFAIL.
4042 */
4043 WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
4044 page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags,
4045 migratetype);
4046 trace_android_vh_rmqueue(preferred_zone, zone, order,
4047 gfp_flags, alloc_flags, migratetype);
4048
4049 out:
4050 /* Separate test+clear to avoid unnecessary atomics */
4051 if (unlikely(test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags))) {
4052 clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
4053 wakeup_kswapd(zone, 0, 0, zone_idx(zone));
4054 }
4055
4056 VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
4057 return page;
4058 }
4059
4060 #ifdef CONFIG_FAIL_PAGE_ALLOC
4061
4062 static struct {
4063 struct fault_attr attr;
4064
4065 bool ignore_gfp_highmem;
4066 bool ignore_gfp_reclaim;
4067 u32 min_order;
4068 } fail_page_alloc = {
4069 .attr = FAULT_ATTR_INITIALIZER,
4070 .ignore_gfp_reclaim = true,
4071 .ignore_gfp_highmem = true,
4072 .min_order = 1,
4073 };
4074
setup_fail_page_alloc(char * str)4075 static int __init setup_fail_page_alloc(char *str)
4076 {
4077 return setup_fault_attr(&fail_page_alloc.attr, str);
4078 }
4079 __setup("fail_page_alloc=", setup_fail_page_alloc);
4080
__should_fail_alloc_page(gfp_t gfp_mask,unsigned int order)4081 static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
4082 {
4083 if (order < fail_page_alloc.min_order)
4084 return false;
4085 if (gfp_mask & __GFP_NOFAIL)
4086 return false;
4087 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
4088 return false;
4089 if (fail_page_alloc.ignore_gfp_reclaim &&
4090 (gfp_mask & __GFP_DIRECT_RECLAIM))
4091 return false;
4092
4093 return should_fail(&fail_page_alloc.attr, 1 << order);
4094 }
4095
4096 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
4097
fail_page_alloc_debugfs(void)4098 static int __init fail_page_alloc_debugfs(void)
4099 {
4100 umode_t mode = S_IFREG | 0600;
4101 struct dentry *dir;
4102
4103 dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
4104 &fail_page_alloc.attr);
4105
4106 debugfs_create_bool("ignore-gfp-wait", mode, dir,
4107 &fail_page_alloc.ignore_gfp_reclaim);
4108 debugfs_create_bool("ignore-gfp-highmem", mode, dir,
4109 &fail_page_alloc.ignore_gfp_highmem);
4110 debugfs_create_u32("min-order", mode, dir, &fail_page_alloc.min_order);
4111
4112 return 0;
4113 }
4114
4115 late_initcall(fail_page_alloc_debugfs);
4116
4117 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
4118
4119 #else /* CONFIG_FAIL_PAGE_ALLOC */
4120
__should_fail_alloc_page(gfp_t gfp_mask,unsigned int order)4121 static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
4122 {
4123 return false;
4124 }
4125
4126 #endif /* CONFIG_FAIL_PAGE_ALLOC */
4127
should_fail_alloc_page(gfp_t gfp_mask,unsigned int order)4128 noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
4129 {
4130 return __should_fail_alloc_page(gfp_mask, order);
4131 }
4132 ALLOW_ERROR_INJECTION(should_fail_alloc_page, TRUE);
4133
__zone_watermark_unusable_free(struct zone * z,unsigned int order,unsigned int alloc_flags)4134 static inline long __zone_watermark_unusable_free(struct zone *z,
4135 unsigned int order, unsigned int alloc_flags)
4136 {
4137 const bool alloc_harder = (alloc_flags & (ALLOC_HARDER|ALLOC_OOM));
4138 long unusable_free = (1 << order) - 1;
4139
4140 /*
4141 * If the caller does not have rights to ALLOC_HARDER then subtract
4142 * the high-atomic reserves. This will over-estimate the size of the
4143 * atomic reserve but it avoids a search.
4144 */
4145 if (likely(!alloc_harder))
4146 unusable_free += z->nr_reserved_highatomic;
4147
4148 #ifdef CONFIG_CMA
4149 /* If allocation can't use CMA areas don't use free CMA pages */
4150 if (!(alloc_flags & ALLOC_CMA))
4151 unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES);
4152 #endif
4153
4154 return unusable_free;
4155 }
4156
4157 /*
4158 * Return true if free base pages are above 'mark'. For high-order checks it
4159 * will return true of the order-0 watermark is reached and there is at least
4160 * one free page of a suitable size. Checking now avoids taking the zone lock
4161 * to check in the allocation paths if no pages are free.
4162 */
__zone_watermark_ok(struct zone * z,unsigned int order,unsigned long mark,int highest_zoneidx,unsigned int alloc_flags,long free_pages)4163 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
4164 int highest_zoneidx, unsigned int alloc_flags,
4165 long free_pages)
4166 {
4167 long min = mark;
4168 int o;
4169 const bool alloc_harder = (alloc_flags & (ALLOC_HARDER|ALLOC_OOM));
4170
4171 /* free_pages may go negative - that's OK */
4172 free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags);
4173
4174 if (alloc_flags & ALLOC_HIGH)
4175 min -= min / 2;
4176
4177 if (unlikely(alloc_harder)) {
4178 /*
4179 * OOM victims can try even harder than normal ALLOC_HARDER
4180 * users on the grounds that it's definitely going to be in
4181 * the exit path shortly and free memory. Any allocation it
4182 * makes during the free path will be small and short-lived.
4183 */
4184 if (alloc_flags & ALLOC_OOM)
4185 min -= min / 2;
4186 else
4187 min -= min / 4;
4188 }
4189
4190 /*
4191 * Check watermarks for an order-0 allocation request. If these
4192 * are not met, then a high-order request also cannot go ahead
4193 * even if a suitable page happened to be free.
4194 */
4195 if (free_pages <= min + z->lowmem_reserve[highest_zoneidx])
4196 return false;
4197
4198 /* If this is an order-0 request then the watermark is fine */
4199 if (!order)
4200 return true;
4201
4202 /* For a high-order request, check at least one suitable page is free */
4203 for (o = order; o < MAX_ORDER; o++) {
4204 struct free_area *area = &z->free_area[o];
4205 int mt;
4206
4207 if (!area->nr_free)
4208 continue;
4209
4210 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
4211 #ifdef CONFIG_CMA
4212 /*
4213 * Note that this check is needed only
4214 * when MIGRATE_CMA < MIGRATE_PCPTYPES.
4215 */
4216 if (mt == MIGRATE_CMA)
4217 continue;
4218 #endif
4219 if (!free_area_empty(area, mt))
4220 return true;
4221 }
4222
4223 #ifdef CONFIG_CMA
4224 if ((alloc_flags & ALLOC_CMA) &&
4225 !free_area_empty(area, MIGRATE_CMA)) {
4226 return true;
4227 }
4228 #endif
4229 if (alloc_harder && !free_area_empty(area, MIGRATE_HIGHATOMIC))
4230 return true;
4231 }
4232 return false;
4233 }
4234
zone_watermark_ok(struct zone * z,unsigned int order,unsigned long mark,int highest_zoneidx,unsigned int alloc_flags)4235 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
4236 int highest_zoneidx, unsigned int alloc_flags)
4237 {
4238 return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
4239 zone_page_state(z, NR_FREE_PAGES));
4240 }
4241
zone_watermark_fast(struct zone * z,unsigned int order,unsigned long mark,int highest_zoneidx,unsigned int alloc_flags,gfp_t gfp_mask)4242 static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
4243 unsigned long mark, int highest_zoneidx,
4244 unsigned int alloc_flags, gfp_t gfp_mask)
4245 {
4246 long free_pages;
4247
4248 free_pages = zone_page_state(z, NR_FREE_PAGES);
4249
4250 /*
4251 * Fast check for order-0 only. If this fails then the reserves
4252 * need to be calculated.
4253 */
4254 if (!order) {
4255 long usable_free;
4256 long reserved;
4257
4258 usable_free = free_pages;
4259 reserved = __zone_watermark_unusable_free(z, 0, alloc_flags);
4260
4261 /* reserved may over estimate high-atomic reserves. */
4262 usable_free -= min(usable_free, reserved);
4263 if (usable_free > mark + z->lowmem_reserve[highest_zoneidx])
4264 return true;
4265 }
4266
4267 if (__zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
4268 free_pages))
4269 return true;
4270 /*
4271 * Ignore watermark boosting for GFP_ATOMIC order-0 allocations
4272 * when checking the min watermark. The min watermark is the
4273 * point where boosting is ignored so that kswapd is woken up
4274 * when below the low watermark.
4275 */
4276 if (unlikely(!order && (gfp_mask & __GFP_ATOMIC) && z->watermark_boost
4277 && ((alloc_flags & ALLOC_WMARK_MASK) == WMARK_MIN))) {
4278 mark = z->_watermark[WMARK_MIN];
4279 return __zone_watermark_ok(z, order, mark, highest_zoneidx,
4280 alloc_flags, free_pages);
4281 }
4282
4283 return false;
4284 }
4285
zone_watermark_ok_safe(struct zone * z,unsigned int order,unsigned long mark,int highest_zoneidx)4286 bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
4287 unsigned long mark, int highest_zoneidx)
4288 {
4289 long free_pages = zone_page_state(z, NR_FREE_PAGES);
4290
4291 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
4292 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
4293
4294 return __zone_watermark_ok(z, order, mark, highest_zoneidx, 0,
4295 free_pages);
4296 }
4297
4298 #ifdef CONFIG_NUMA
zone_allows_reclaim(struct zone * local_zone,struct zone * zone)4299 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
4300 {
4301 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <=
4302 node_reclaim_distance;
4303 }
4304 #else /* CONFIG_NUMA */
zone_allows_reclaim(struct zone * local_zone,struct zone * zone)4305 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
4306 {
4307 return true;
4308 }
4309 #endif /* CONFIG_NUMA */
4310
4311 /*
4312 * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid
4313 * fragmentation is subtle. If the preferred zone was HIGHMEM then
4314 * premature use of a lower zone may cause lowmem pressure problems that
4315 * are worse than fragmentation. If the next zone is ZONE_DMA then it is
4316 * probably too small. It only makes sense to spread allocations to avoid
4317 * fragmentation between the Normal and DMA32 zones.
4318 */
4319 static inline unsigned int
alloc_flags_nofragment(struct zone * zone,gfp_t gfp_mask)4320 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
4321 {
4322 unsigned int alloc_flags;
4323
4324 /*
4325 * __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD
4326 * to save a branch.
4327 */
4328 alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM);
4329
4330 #ifdef CONFIG_ZONE_DMA32
4331 if (!zone)
4332 return alloc_flags;
4333
4334 if (zone_idx(zone) != ZONE_NORMAL)
4335 return alloc_flags;
4336
4337 /*
4338 * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and
4339 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume
4340 * on UMA that if Normal is populated then so is DMA32.
4341 */
4342 BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1);
4343 if (nr_online_nodes > 1 && !populated_zone(--zone))
4344 return alloc_flags;
4345
4346 alloc_flags |= ALLOC_NOFRAGMENT;
4347 #endif /* CONFIG_ZONE_DMA32 */
4348 return alloc_flags;
4349 }
4350
4351 /* Must be called after current_gfp_context() which can change gfp_mask */
gfp_to_alloc_flags_cma(gfp_t gfp_mask,unsigned int alloc_flags)4352 static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask,
4353 unsigned int alloc_flags)
4354 {
4355 #ifdef CONFIG_CMA
4356 if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE && gfp_mask & __GFP_CMA)
4357 alloc_flags |= ALLOC_CMA;
4358 trace_android_vh_alloc_flags_cma_adjust(gfp_mask, &alloc_flags);
4359 #endif
4360 return alloc_flags;
4361 }
4362
4363 /*
4364 * get_page_from_freelist goes through the zonelist trying to allocate
4365 * a page.
4366 */
4367 static struct page *
get_page_from_freelist(gfp_t gfp_mask,unsigned int order,int alloc_flags,const struct alloc_context * ac)4368 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
4369 const struct alloc_context *ac)
4370 {
4371 struct zoneref *z;
4372 struct zone *zone;
4373 struct pglist_data *last_pgdat_dirty_limit = NULL;
4374 bool no_fallback;
4375
4376 retry:
4377 /*
4378 * Scan zonelist, looking for a zone with enough free.
4379 * See also __cpuset_node_allowed() comment in kernel/cpuset.c.
4380 */
4381 no_fallback = alloc_flags & ALLOC_NOFRAGMENT;
4382 z = ac->preferred_zoneref;
4383 for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx,
4384 ac->nodemask) {
4385 struct page *page;
4386 unsigned long mark;
4387
4388 if (cpusets_enabled() &&
4389 (alloc_flags & ALLOC_CPUSET) &&
4390 !__cpuset_zone_allowed(zone, gfp_mask))
4391 continue;
4392 /*
4393 * When allocating a page cache page for writing, we
4394 * want to get it from a node that is within its dirty
4395 * limit, such that no single node holds more than its
4396 * proportional share of globally allowed dirty pages.
4397 * The dirty limits take into account the node's
4398 * lowmem reserves and high watermark so that kswapd
4399 * should be able to balance it without having to
4400 * write pages from its LRU list.
4401 *
4402 * XXX: For now, allow allocations to potentially
4403 * exceed the per-node dirty limit in the slowpath
4404 * (spread_dirty_pages unset) before going into reclaim,
4405 * which is important when on a NUMA setup the allowed
4406 * nodes are together not big enough to reach the
4407 * global limit. The proper fix for these situations
4408 * will require awareness of nodes in the
4409 * dirty-throttling and the flusher threads.
4410 */
4411 if (ac->spread_dirty_pages) {
4412 if (last_pgdat_dirty_limit == zone->zone_pgdat)
4413 continue;
4414
4415 if (!node_dirty_ok(zone->zone_pgdat)) {
4416 last_pgdat_dirty_limit = zone->zone_pgdat;
4417 continue;
4418 }
4419 }
4420
4421 if (no_fallback && nr_online_nodes > 1 &&
4422 zone != ac->preferred_zoneref->zone) {
4423 int local_nid;
4424
4425 /*
4426 * If moving to a remote node, retry but allow
4427 * fragmenting fallbacks. Locality is more important
4428 * than fragmentation avoidance.
4429 */
4430 local_nid = zone_to_nid(ac->preferred_zoneref->zone);
4431 if (zone_to_nid(zone) != local_nid) {
4432 alloc_flags &= ~ALLOC_NOFRAGMENT;
4433 goto retry;
4434 }
4435 }
4436
4437 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
4438 if (!zone_watermark_fast(zone, order, mark,
4439 ac->highest_zoneidx, alloc_flags,
4440 gfp_mask)) {
4441 int ret;
4442
4443 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
4444 /*
4445 * Watermark failed for this zone, but see if we can
4446 * grow this zone if it contains deferred pages.
4447 */
4448 if (static_branch_unlikely(&deferred_pages)) {
4449 if (_deferred_grow_zone(zone, order))
4450 goto try_this_zone;
4451 }
4452 #endif
4453 /* Checked here to keep the fast path fast */
4454 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
4455 if (alloc_flags & ALLOC_NO_WATERMARKS)
4456 goto try_this_zone;
4457
4458 if (!node_reclaim_enabled() ||
4459 !zone_allows_reclaim(ac->preferred_zoneref->zone, zone))
4460 continue;
4461
4462 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
4463 switch (ret) {
4464 case NODE_RECLAIM_NOSCAN:
4465 /* did not scan */
4466 continue;
4467 case NODE_RECLAIM_FULL:
4468 /* scanned but unreclaimable */
4469 continue;
4470 default:
4471 /* did we reclaim enough */
4472 if (zone_watermark_ok(zone, order, mark,
4473 ac->highest_zoneidx, alloc_flags))
4474 goto try_this_zone;
4475
4476 continue;
4477 }
4478 }
4479
4480 try_this_zone:
4481 page = rmqueue(ac->preferred_zoneref->zone, zone, order,
4482 gfp_mask, alloc_flags, ac->migratetype);
4483 if (page) {
4484 prep_new_page(page, order, gfp_mask, alloc_flags);
4485
4486 /*
4487 * If this is a high-order atomic allocation then check
4488 * if the pageblock should be reserved for the future
4489 */
4490 if (unlikely(order && (alloc_flags & ALLOC_HARDER)))
4491 reserve_highatomic_pageblock(page, zone, order);
4492
4493 return page;
4494 } else {
4495 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
4496 /* Try again if zone has deferred pages */
4497 if (static_branch_unlikely(&deferred_pages)) {
4498 if (_deferred_grow_zone(zone, order))
4499 goto try_this_zone;
4500 }
4501 #endif
4502 }
4503 }
4504
4505 /*
4506 * It's possible on a UMA machine to get through all zones that are
4507 * fragmented. If avoiding fragmentation, reset and try again.
4508 */
4509 if (no_fallback) {
4510 alloc_flags &= ~ALLOC_NOFRAGMENT;
4511 goto retry;
4512 }
4513
4514 return NULL;
4515 }
4516
warn_alloc_show_mem(gfp_t gfp_mask,nodemask_t * nodemask)4517 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
4518 {
4519 unsigned int filter = SHOW_MEM_FILTER_NODES;
4520
4521 /*
4522 * This documents exceptions given to allocations in certain
4523 * contexts that are allowed to allocate outside current's set
4524 * of allowed nodes.
4525 */
4526 if (!(gfp_mask & __GFP_NOMEMALLOC))
4527 if (tsk_is_oom_victim(current) ||
4528 (current->flags & (PF_MEMALLOC | PF_EXITING)))
4529 filter &= ~SHOW_MEM_FILTER_NODES;
4530 if (!in_task() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
4531 filter &= ~SHOW_MEM_FILTER_NODES;
4532
4533 show_mem(filter, nodemask);
4534 }
4535
warn_alloc(gfp_t gfp_mask,nodemask_t * nodemask,const char * fmt,...)4536 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
4537 {
4538 struct va_format vaf;
4539 va_list args;
4540 static DEFINE_RATELIMIT_STATE(nopage_rs, 10*HZ, 1);
4541
4542 if ((gfp_mask & __GFP_NOWARN) ||
4543 !__ratelimit(&nopage_rs) ||
4544 ((gfp_mask & __GFP_DMA) && !has_managed_dma()))
4545 return;
4546
4547 va_start(args, fmt);
4548 vaf.fmt = fmt;
4549 vaf.va = &args;
4550 pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl",
4551 current->comm, &vaf, gfp_mask, &gfp_mask,
4552 nodemask_pr_args(nodemask));
4553 va_end(args);
4554
4555 cpuset_print_current_mems_allowed();
4556 pr_cont("\n");
4557 dump_stack();
4558 warn_alloc_show_mem(gfp_mask, nodemask);
4559 }
4560
4561 static inline struct page *
__alloc_pages_cpuset_fallback(gfp_t gfp_mask,unsigned int order,unsigned int alloc_flags,const struct alloc_context * ac)4562 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order,
4563 unsigned int alloc_flags,
4564 const struct alloc_context *ac)
4565 {
4566 struct page *page;
4567
4568 page = get_page_from_freelist(gfp_mask, order,
4569 alloc_flags|ALLOC_CPUSET, ac);
4570 /*
4571 * fallback to ignore cpuset restriction if our nodes
4572 * are depleted
4573 */
4574 if (!page)
4575 page = get_page_from_freelist(gfp_mask, order,
4576 alloc_flags, ac);
4577
4578 return page;
4579 }
4580
4581 static inline struct page *
__alloc_pages_may_oom(gfp_t gfp_mask,unsigned int order,const struct alloc_context * ac,unsigned long * did_some_progress)4582 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
4583 const struct alloc_context *ac, unsigned long *did_some_progress)
4584 {
4585 struct oom_control oc = {
4586 .zonelist = ac->zonelist,
4587 .nodemask = ac->nodemask,
4588 .memcg = NULL,
4589 .gfp_mask = gfp_mask,
4590 .order = order,
4591 };
4592 struct page *page;
4593
4594 *did_some_progress = 0;
4595
4596 /*
4597 * Acquire the oom lock. If that fails, somebody else is
4598 * making progress for us.
4599 */
4600 if (!mutex_trylock(&oom_lock)) {
4601 *did_some_progress = 1;
4602 schedule_timeout_uninterruptible(1);
4603 return NULL;
4604 }
4605
4606 /*
4607 * Go through the zonelist yet one more time, keep very high watermark
4608 * here, this is only to catch a parallel oom killing, we must fail if
4609 * we're still under heavy pressure. But make sure that this reclaim
4610 * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY
4611 * allocation which will never fail due to oom_lock already held.
4612 */
4613 page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) &
4614 ~__GFP_DIRECT_RECLAIM, order,
4615 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
4616 if (page)
4617 goto out;
4618
4619 /* Coredumps can quickly deplete all memory reserves */
4620 if (current->flags & PF_DUMPCORE)
4621 goto out;
4622 /* The OOM killer will not help higher order allocs */
4623 if (order > PAGE_ALLOC_COSTLY_ORDER)
4624 goto out;
4625 /*
4626 * We have already exhausted all our reclaim opportunities without any
4627 * success so it is time to admit defeat. We will skip the OOM killer
4628 * because it is very likely that the caller has a more reasonable
4629 * fallback than shooting a random task.
4630 *
4631 * The OOM killer may not free memory on a specific node.
4632 */
4633 if (gfp_mask & (__GFP_RETRY_MAYFAIL | __GFP_THISNODE))
4634 goto out;
4635 /* The OOM killer does not needlessly kill tasks for lowmem */
4636 if (ac->highest_zoneidx < ZONE_NORMAL)
4637 goto out;
4638 if (pm_suspended_storage())
4639 goto out;
4640 /*
4641 * XXX: GFP_NOFS allocations should rather fail than rely on
4642 * other request to make a forward progress.
4643 * We are in an unfortunate situation where out_of_memory cannot
4644 * do much for this context but let's try it to at least get
4645 * access to memory reserved if the current task is killed (see
4646 * out_of_memory). Once filesystems are ready to handle allocation
4647 * failures more gracefully we should just bail out here.
4648 */
4649
4650 /* Exhausted what can be done so it's blame time */
4651 if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
4652 *did_some_progress = 1;
4653
4654 /*
4655 * Help non-failing allocations by giving them access to memory
4656 * reserves
4657 */
4658 if (gfp_mask & __GFP_NOFAIL)
4659 page = __alloc_pages_cpuset_fallback(gfp_mask, order,
4660 ALLOC_NO_WATERMARKS, ac);
4661 }
4662 out:
4663 mutex_unlock(&oom_lock);
4664 return page;
4665 }
4666
4667 /*
4668 * Maximum number of compaction retries with a progress before OOM
4669 * killer is consider as the only way to move forward.
4670 */
4671 #define MAX_COMPACT_RETRIES 16
4672
4673 #ifdef CONFIG_COMPACTION
4674 /* Try memory compaction for high-order allocations before reclaim */
4675 static struct page *
__alloc_pages_direct_compact(gfp_t gfp_mask,unsigned int order,unsigned int alloc_flags,const struct alloc_context * ac,enum compact_priority prio,enum compact_result * compact_result)4676 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
4677 unsigned int alloc_flags, const struct alloc_context *ac,
4678 enum compact_priority prio, enum compact_result *compact_result)
4679 {
4680 struct page *page = NULL;
4681 unsigned long pflags;
4682 unsigned int noreclaim_flag;
4683
4684 if (!order)
4685 return NULL;
4686
4687 psi_memstall_enter(&pflags);
4688 noreclaim_flag = memalloc_noreclaim_save();
4689
4690 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
4691 prio, &page);
4692
4693 memalloc_noreclaim_restore(noreclaim_flag);
4694 psi_memstall_leave(&pflags);
4695
4696 if (*compact_result == COMPACT_SKIPPED)
4697 return NULL;
4698 /*
4699 * At least in one zone compaction wasn't deferred or skipped, so let's
4700 * count a compaction stall
4701 */
4702 count_vm_event(COMPACTSTALL);
4703
4704 /* Prep a captured page if available */
4705 if (page)
4706 prep_new_page(page, order, gfp_mask, alloc_flags);
4707
4708 /* Try get a page from the freelist if available */
4709 if (!page)
4710 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4711
4712 if (page) {
4713 struct zone *zone = page_zone(page);
4714
4715 zone->compact_blockskip_flush = false;
4716 compaction_defer_reset(zone, order, true);
4717 count_vm_event(COMPACTSUCCESS);
4718 return page;
4719 }
4720
4721 /*
4722 * It's bad if compaction run occurs and fails. The most likely reason
4723 * is that pages exist, but not enough to satisfy watermarks.
4724 */
4725 count_vm_event(COMPACTFAIL);
4726
4727 cond_resched();
4728
4729 return NULL;
4730 }
4731
4732 static inline bool
should_compact_retry(struct alloc_context * ac,int order,int alloc_flags,enum compact_result compact_result,enum compact_priority * compact_priority,int * compaction_retries)4733 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
4734 enum compact_result compact_result,
4735 enum compact_priority *compact_priority,
4736 int *compaction_retries)
4737 {
4738 int max_retries = MAX_COMPACT_RETRIES;
4739 int min_priority;
4740 bool ret = false;
4741 int retries = *compaction_retries;
4742 enum compact_priority priority = *compact_priority;
4743
4744 if (!order)
4745 return false;
4746
4747 if (fatal_signal_pending(current))
4748 return false;
4749
4750 if (compaction_made_progress(compact_result))
4751 (*compaction_retries)++;
4752
4753 /*
4754 * compaction considers all the zone as desperately out of memory
4755 * so it doesn't really make much sense to retry except when the
4756 * failure could be caused by insufficient priority
4757 */
4758 if (compaction_failed(compact_result))
4759 goto check_priority;
4760
4761 /*
4762 * compaction was skipped because there are not enough order-0 pages
4763 * to work with, so we retry only if it looks like reclaim can help.
4764 */
4765 if (compaction_needs_reclaim(compact_result)) {
4766 ret = compaction_zonelist_suitable(ac, order, alloc_flags);
4767 goto out;
4768 }
4769
4770 /*
4771 * make sure the compaction wasn't deferred or didn't bail out early
4772 * due to locks contention before we declare that we should give up.
4773 * But the next retry should use a higher priority if allowed, so
4774 * we don't just keep bailing out endlessly.
4775 */
4776 if (compaction_withdrawn(compact_result)) {
4777 goto check_priority;
4778 }
4779
4780 /*
4781 * !costly requests are much more important than __GFP_RETRY_MAYFAIL
4782 * costly ones because they are de facto nofail and invoke OOM
4783 * killer to move on while costly can fail and users are ready
4784 * to cope with that. 1/4 retries is rather arbitrary but we
4785 * would need much more detailed feedback from compaction to
4786 * make a better decision.
4787 */
4788 if (order > PAGE_ALLOC_COSTLY_ORDER)
4789 max_retries /= 4;
4790 if (*compaction_retries <= max_retries) {
4791 ret = true;
4792 goto out;
4793 }
4794
4795 /*
4796 * Make sure there are attempts at the highest priority if we exhausted
4797 * all retries or failed at the lower priorities.
4798 */
4799 check_priority:
4800 min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ?
4801 MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY;
4802
4803 if (*compact_priority > min_priority) {
4804 (*compact_priority)--;
4805 *compaction_retries = 0;
4806 ret = true;
4807 }
4808 out:
4809 trace_compact_retry(order, priority, compact_result, retries, max_retries, ret);
4810 return ret;
4811 }
4812 #else
4813 static inline struct page *
__alloc_pages_direct_compact(gfp_t gfp_mask,unsigned int order,unsigned int alloc_flags,const struct alloc_context * ac,enum compact_priority prio,enum compact_result * compact_result)4814 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
4815 unsigned int alloc_flags, const struct alloc_context *ac,
4816 enum compact_priority prio, enum compact_result *compact_result)
4817 {
4818 *compact_result = COMPACT_SKIPPED;
4819 return NULL;
4820 }
4821
4822 static inline bool
should_compact_retry(struct alloc_context * ac,unsigned int order,int alloc_flags,enum compact_result compact_result,enum compact_priority * compact_priority,int * compaction_retries)4823 should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags,
4824 enum compact_result compact_result,
4825 enum compact_priority *compact_priority,
4826 int *compaction_retries)
4827 {
4828 struct zone *zone;
4829 struct zoneref *z;
4830
4831 if (!order || order > PAGE_ALLOC_COSTLY_ORDER)
4832 return false;
4833
4834 /*
4835 * There are setups with compaction disabled which would prefer to loop
4836 * inside the allocator rather than hit the oom killer prematurely.
4837 * Let's give them a good hope and keep retrying while the order-0
4838 * watermarks are OK.
4839 */
4840 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
4841 ac->highest_zoneidx, ac->nodemask) {
4842 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone),
4843 ac->highest_zoneidx, alloc_flags))
4844 return true;
4845 }
4846 return false;
4847 }
4848 #endif /* CONFIG_COMPACTION */
4849
4850 #ifdef CONFIG_LOCKDEP
4851 static struct lockdep_map __fs_reclaim_map =
4852 STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map);
4853
__need_reclaim(gfp_t gfp_mask)4854 static bool __need_reclaim(gfp_t gfp_mask)
4855 {
4856 /* no reclaim without waiting on it */
4857 if (!(gfp_mask & __GFP_DIRECT_RECLAIM))
4858 return false;
4859
4860 /* this guy won't enter reclaim */
4861 if (current->flags & PF_MEMALLOC)
4862 return false;
4863
4864 if (gfp_mask & __GFP_NOLOCKDEP)
4865 return false;
4866
4867 return true;
4868 }
4869
__fs_reclaim_acquire(unsigned long ip)4870 void __fs_reclaim_acquire(unsigned long ip)
4871 {
4872 lock_acquire_exclusive(&__fs_reclaim_map, 0, 0, NULL, ip);
4873 }
4874
__fs_reclaim_release(unsigned long ip)4875 void __fs_reclaim_release(unsigned long ip)
4876 {
4877 lock_release(&__fs_reclaim_map, ip);
4878 }
4879
fs_reclaim_acquire(gfp_t gfp_mask)4880 void fs_reclaim_acquire(gfp_t gfp_mask)
4881 {
4882 gfp_mask = current_gfp_context(gfp_mask);
4883
4884 if (__need_reclaim(gfp_mask)) {
4885 if (gfp_mask & __GFP_FS)
4886 __fs_reclaim_acquire(_RET_IP_);
4887
4888 #ifdef CONFIG_MMU_NOTIFIER
4889 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
4890 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
4891 #endif
4892
4893 }
4894 }
4895 EXPORT_SYMBOL_GPL(fs_reclaim_acquire);
4896
fs_reclaim_release(gfp_t gfp_mask)4897 void fs_reclaim_release(gfp_t gfp_mask)
4898 {
4899 gfp_mask = current_gfp_context(gfp_mask);
4900
4901 if (__need_reclaim(gfp_mask)) {
4902 if (gfp_mask & __GFP_FS)
4903 __fs_reclaim_release(_RET_IP_);
4904 }
4905 }
4906 EXPORT_SYMBOL_GPL(fs_reclaim_release);
4907 #endif
4908
4909 /*
4910 * Zonelists may change due to hotplug during allocation. Detect when zonelists
4911 * have been rebuilt so allocation retries. Reader side does not lock and
4912 * retries the allocation if zonelist changes. Writer side is protected by the
4913 * embedded spin_lock.
4914 */
4915 static DEFINE_SEQLOCK(zonelist_update_seq);
4916
zonelist_iter_begin(void)4917 static unsigned int zonelist_iter_begin(void)
4918 {
4919 if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE))
4920 return read_seqbegin(&zonelist_update_seq);
4921
4922 return 0;
4923 }
4924
check_retry_zonelist(unsigned int seq)4925 static unsigned int check_retry_zonelist(unsigned int seq)
4926 {
4927 if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE))
4928 return read_seqretry(&zonelist_update_seq, seq);
4929
4930 return seq;
4931 }
4932
4933 /* Perform direct synchronous page reclaim */
4934 static unsigned long
__perform_reclaim(gfp_t gfp_mask,unsigned int order,const struct alloc_context * ac)4935 __perform_reclaim(gfp_t gfp_mask, unsigned int order,
4936 const struct alloc_context *ac)
4937 {
4938 unsigned int noreclaim_flag;
4939 unsigned long progress;
4940
4941 cond_resched();
4942
4943 /* We now go into synchronous reclaim */
4944 cpuset_memory_pressure_bump();
4945 fs_reclaim_acquire(gfp_mask);
4946 noreclaim_flag = memalloc_noreclaim_save();
4947
4948 progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
4949 ac->nodemask);
4950
4951 memalloc_noreclaim_restore(noreclaim_flag);
4952 fs_reclaim_release(gfp_mask);
4953
4954 cond_resched();
4955
4956 return progress;
4957 }
4958
4959 /* The really slow allocator path where we enter direct reclaim */
4960 static inline struct page *
__alloc_pages_direct_reclaim(gfp_t gfp_mask,unsigned int order,unsigned int alloc_flags,const struct alloc_context * ac,unsigned long * did_some_progress)4961 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
4962 unsigned int alloc_flags, const struct alloc_context *ac,
4963 unsigned long *did_some_progress)
4964 {
4965 struct page *page = NULL;
4966 unsigned long pflags;
4967 bool drained = false;
4968 bool skip_pcp_drain = false;
4969
4970 psi_memstall_enter(&pflags);
4971 *did_some_progress = __perform_reclaim(gfp_mask, order, ac);
4972 if (unlikely(!(*did_some_progress)))
4973 goto out;
4974
4975 retry:
4976 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4977
4978 /*
4979 * If an allocation failed after direct reclaim, it could be because
4980 * pages are pinned on the per-cpu lists or in high alloc reserves.
4981 * Shrink them and try again
4982 */
4983 if (!page && !drained) {
4984 unreserve_highatomic_pageblock(ac, false);
4985 trace_android_vh_drain_all_pages_bypass(gfp_mask, order,
4986 alloc_flags, ac->migratetype, *did_some_progress, &skip_pcp_drain);
4987 if (!skip_pcp_drain)
4988 drain_all_pages(NULL);
4989 drained = true;
4990 goto retry;
4991 }
4992 out:
4993 psi_memstall_leave(&pflags);
4994
4995 return page;
4996 }
4997
wake_all_kswapds(unsigned int order,gfp_t gfp_mask,const struct alloc_context * ac)4998 static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask,
4999 const struct alloc_context *ac)
5000 {
5001 struct zoneref *z;
5002 struct zone *zone;
5003 pg_data_t *last_pgdat = NULL;
5004 enum zone_type highest_zoneidx = ac->highest_zoneidx;
5005
5006 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx,
5007 ac->nodemask) {
5008 if (last_pgdat != zone->zone_pgdat)
5009 wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx);
5010 last_pgdat = zone->zone_pgdat;
5011 }
5012 }
5013
5014 static inline unsigned int
gfp_to_alloc_flags(gfp_t gfp_mask)5015 gfp_to_alloc_flags(gfp_t gfp_mask)
5016 {
5017 unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
5018
5019 /*
5020 * __GFP_HIGH is assumed to be the same as ALLOC_HIGH
5021 * and __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD
5022 * to save two branches.
5023 */
5024 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
5025 BUILD_BUG_ON(__GFP_KSWAPD_RECLAIM != (__force gfp_t) ALLOC_KSWAPD);
5026
5027 /*
5028 * The caller may dip into page reserves a bit more if the caller
5029 * cannot run direct reclaim, or if the caller has realtime scheduling
5030 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
5031 * set both ALLOC_HARDER (__GFP_ATOMIC) and ALLOC_HIGH (__GFP_HIGH).
5032 */
5033 alloc_flags |= (__force int)
5034 (gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM));
5035
5036 if (gfp_mask & __GFP_ATOMIC) {
5037 /*
5038 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
5039 * if it can't schedule.
5040 */
5041 if (!(gfp_mask & __GFP_NOMEMALLOC))
5042 alloc_flags |= ALLOC_HARDER;
5043 /*
5044 * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
5045 * comment for __cpuset_node_allowed().
5046 */
5047 alloc_flags &= ~ALLOC_CPUSET;
5048 } else if (unlikely(rt_task(current)) && in_task())
5049 alloc_flags |= ALLOC_HARDER;
5050
5051 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags);
5052
5053 return alloc_flags;
5054 }
5055
oom_reserves_allowed(struct task_struct * tsk)5056 static bool oom_reserves_allowed(struct task_struct *tsk)
5057 {
5058 if (!tsk_is_oom_victim(tsk))
5059 return false;
5060
5061 /*
5062 * !MMU doesn't have oom reaper so give access to memory reserves
5063 * only to the thread with TIF_MEMDIE set
5064 */
5065 if (!IS_ENABLED(CONFIG_MMU) && !test_thread_flag(TIF_MEMDIE))
5066 return false;
5067
5068 return true;
5069 }
5070
5071 /*
5072 * Distinguish requests which really need access to full memory
5073 * reserves from oom victims which can live with a portion of it
5074 */
__gfp_pfmemalloc_flags(gfp_t gfp_mask)5075 static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask)
5076 {
5077 if (unlikely(gfp_mask & __GFP_NOMEMALLOC))
5078 return 0;
5079 if (gfp_mask & __GFP_MEMALLOC)
5080 return ALLOC_NO_WATERMARKS;
5081 if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
5082 return ALLOC_NO_WATERMARKS;
5083 if (!in_interrupt()) {
5084 if (current->flags & PF_MEMALLOC)
5085 return ALLOC_NO_WATERMARKS;
5086 else if (oom_reserves_allowed(current))
5087 return ALLOC_OOM;
5088 }
5089
5090 return 0;
5091 }
5092
gfp_pfmemalloc_allowed(gfp_t gfp_mask)5093 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
5094 {
5095 return !!__gfp_pfmemalloc_flags(gfp_mask);
5096 }
5097
5098 /*
5099 * Checks whether it makes sense to retry the reclaim to make a forward progress
5100 * for the given allocation request.
5101 *
5102 * We give up when we either have tried MAX_RECLAIM_RETRIES in a row
5103 * without success, or when we couldn't even meet the watermark if we
5104 * reclaimed all remaining pages on the LRU lists.
5105 *
5106 * Returns true if a retry is viable or false to enter the oom path.
5107 */
5108 static inline bool
should_reclaim_retry(gfp_t gfp_mask,unsigned order,struct alloc_context * ac,int alloc_flags,bool did_some_progress,int * no_progress_loops)5109 should_reclaim_retry(gfp_t gfp_mask, unsigned order,
5110 struct alloc_context *ac, int alloc_flags,
5111 bool did_some_progress, int *no_progress_loops)
5112 {
5113 struct zone *zone;
5114 struct zoneref *z;
5115 bool ret = false;
5116
5117 /*
5118 * Costly allocations might have made a progress but this doesn't mean
5119 * their order will become available due to high fragmentation so
5120 * always increment the no progress counter for them
5121 */
5122 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER)
5123 *no_progress_loops = 0;
5124 else
5125 (*no_progress_loops)++;
5126
5127 /*
5128 * Make sure we converge to OOM if we cannot make any progress
5129 * several times in the row.
5130 */
5131 if (*no_progress_loops > MAX_RECLAIM_RETRIES) {
5132 /* Before OOM, exhaust highatomic_reserve */
5133 return unreserve_highatomic_pageblock(ac, true);
5134 }
5135
5136 /*
5137 * Keep reclaiming pages while there is a chance this will lead
5138 * somewhere. If none of the target zones can satisfy our allocation
5139 * request even if all reclaimable pages are considered then we are
5140 * screwed and have to go OOM.
5141 */
5142 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
5143 ac->highest_zoneidx, ac->nodemask) {
5144 unsigned long available;
5145 unsigned long reclaimable;
5146 unsigned long min_wmark = min_wmark_pages(zone);
5147 bool wmark;
5148
5149 available = reclaimable = zone_reclaimable_pages(zone);
5150 available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
5151
5152 /*
5153 * Would the allocation succeed if we reclaimed all
5154 * reclaimable pages?
5155 */
5156 wmark = __zone_watermark_ok(zone, order, min_wmark,
5157 ac->highest_zoneidx, alloc_flags, available);
5158 trace_reclaim_retry_zone(z, order, reclaimable,
5159 available, min_wmark, *no_progress_loops, wmark);
5160 if (wmark) {
5161 /*
5162 * If we didn't make any progress and have a lot of
5163 * dirty + writeback pages then we should wait for
5164 * an IO to complete to slow down the reclaim and
5165 * prevent from pre mature OOM
5166 */
5167 if (!did_some_progress) {
5168 unsigned long write_pending;
5169
5170 write_pending = zone_page_state_snapshot(zone,
5171 NR_ZONE_WRITE_PENDING);
5172
5173 if (2 * write_pending > reclaimable) {
5174 congestion_wait(BLK_RW_ASYNC, HZ/10);
5175 return true;
5176 }
5177 }
5178
5179 ret = true;
5180 goto out;
5181 }
5182 }
5183
5184 out:
5185 /*
5186 * Memory allocation/reclaim might be called from a WQ context and the
5187 * current implementation of the WQ concurrency control doesn't
5188 * recognize that a particular WQ is congested if the worker thread is
5189 * looping without ever sleeping. Therefore we have to do a short sleep
5190 * here rather than calling cond_resched().
5191 */
5192 if (current->flags & PF_WQ_WORKER)
5193 schedule_timeout_uninterruptible(1);
5194 else
5195 cond_resched();
5196 return ret;
5197 }
5198
5199 static inline bool
check_retry_cpuset(int cpuset_mems_cookie,struct alloc_context * ac)5200 check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac)
5201 {
5202 /*
5203 * It's possible that cpuset's mems_allowed and the nodemask from
5204 * mempolicy don't intersect. This should be normally dealt with by
5205 * policy_nodemask(), but it's possible to race with cpuset update in
5206 * such a way the check therein was true, and then it became false
5207 * before we got our cpuset_mems_cookie here.
5208 * This assumes that for all allocations, ac->nodemask can come only
5209 * from MPOL_BIND mempolicy (whose documented semantics is to be ignored
5210 * when it does not intersect with the cpuset restrictions) or the
5211 * caller can deal with a violated nodemask.
5212 */
5213 if (cpusets_enabled() && ac->nodemask &&
5214 !cpuset_nodemask_valid_mems_allowed(ac->nodemask)) {
5215 ac->nodemask = NULL;
5216 return true;
5217 }
5218
5219 /*
5220 * When updating a task's mems_allowed or mempolicy nodemask, it is
5221 * possible to race with parallel threads in such a way that our
5222 * allocation can fail while the mask is being updated. If we are about
5223 * to fail, check if the cpuset changed during allocation and if so,
5224 * retry.
5225 */
5226 if (read_mems_allowed_retry(cpuset_mems_cookie))
5227 return true;
5228
5229 return false;
5230 }
5231
5232 static inline struct page *
__alloc_pages_slowpath(gfp_t gfp_mask,unsigned int order,struct alloc_context * ac)5233 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
5234 struct alloc_context *ac)
5235 {
5236 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
5237 const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
5238 struct page *page = NULL;
5239 unsigned int alloc_flags;
5240 unsigned long did_some_progress;
5241 enum compact_priority compact_priority;
5242 enum compact_result compact_result;
5243 int compaction_retries;
5244 int no_progress_loops;
5245 unsigned int cpuset_mems_cookie;
5246 unsigned int zonelist_iter_cookie;
5247 int reserve_flags;
5248 unsigned long alloc_start = jiffies;
5249 bool should_alloc_retry = false;
5250 /*
5251 * We also sanity check to catch abuse of atomic reserves being used by
5252 * callers that are not in atomic context.
5253 */
5254 if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
5255 (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
5256 gfp_mask &= ~__GFP_ATOMIC;
5257
5258 restart:
5259 compaction_retries = 0;
5260 no_progress_loops = 0;
5261 compact_priority = DEF_COMPACT_PRIORITY;
5262 cpuset_mems_cookie = read_mems_allowed_begin();
5263 zonelist_iter_cookie = zonelist_iter_begin();
5264
5265 /*
5266 * The fast path uses conservative alloc_flags to succeed only until
5267 * kswapd needs to be woken up, and to avoid the cost of setting up
5268 * alloc_flags precisely. So we do that now.
5269 */
5270 alloc_flags = gfp_to_alloc_flags(gfp_mask);
5271
5272 /*
5273 * We need to recalculate the starting point for the zonelist iterator
5274 * because we might have used different nodemask in the fast path, or
5275 * there was a cpuset modification and we are retrying - otherwise we
5276 * could end up iterating over non-eligible zones endlessly.
5277 */
5278 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
5279 ac->highest_zoneidx, ac->nodemask);
5280 if (!ac->preferred_zoneref->zone)
5281 goto nopage;
5282
5283 if (alloc_flags & ALLOC_KSWAPD)
5284 wake_all_kswapds(order, gfp_mask, ac);
5285
5286 /*
5287 * The adjusted alloc_flags might result in immediate success, so try
5288 * that first
5289 */
5290 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
5291 if (page)
5292 goto got_pg;
5293
5294 /*
5295 * For costly allocations, try direct compaction first, as it's likely
5296 * that we have enough base pages and don't need to reclaim. For non-
5297 * movable high-order allocations, do that as well, as compaction will
5298 * try prevent permanent fragmentation by migrating from blocks of the
5299 * same migratetype.
5300 * Don't try this for allocations that are allowed to ignore
5301 * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen.
5302 */
5303 if (can_direct_reclaim &&
5304 (costly_order ||
5305 (order > 0 && ac->migratetype != MIGRATE_MOVABLE))
5306 && !gfp_pfmemalloc_allowed(gfp_mask)) {
5307 page = __alloc_pages_direct_compact(gfp_mask, order,
5308 alloc_flags, ac,
5309 INIT_COMPACT_PRIORITY,
5310 &compact_result);
5311 if (page)
5312 goto got_pg;
5313
5314 /*
5315 * Checks for costly allocations with __GFP_NORETRY, which
5316 * includes some THP page fault allocations
5317 */
5318 if (costly_order && (gfp_mask & __GFP_NORETRY)) {
5319 /*
5320 * If allocating entire pageblock(s) and compaction
5321 * failed because all zones are below low watermarks
5322 * or is prohibited because it recently failed at this
5323 * order, fail immediately unless the allocator has
5324 * requested compaction and reclaim retry.
5325 *
5326 * Reclaim is
5327 * - potentially very expensive because zones are far
5328 * below their low watermarks or this is part of very
5329 * bursty high order allocations,
5330 * - not guaranteed to help because isolate_freepages()
5331 * may not iterate over freed pages as part of its
5332 * linear scan, and
5333 * - unlikely to make entire pageblocks free on its
5334 * own.
5335 */
5336 if (compact_result == COMPACT_SKIPPED ||
5337 compact_result == COMPACT_DEFERRED)
5338 goto nopage;
5339
5340 /*
5341 * Looks like reclaim/compaction is worth trying, but
5342 * sync compaction could be very expensive, so keep
5343 * using async compaction.
5344 */
5345 compact_priority = INIT_COMPACT_PRIORITY;
5346 }
5347 }
5348
5349 retry:
5350 /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */
5351 if (alloc_flags & ALLOC_KSWAPD)
5352 wake_all_kswapds(order, gfp_mask, ac);
5353
5354 reserve_flags = __gfp_pfmemalloc_flags(gfp_mask);
5355 if (reserve_flags)
5356 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, reserve_flags);
5357
5358 /*
5359 * Reset the nodemask and zonelist iterators if memory policies can be
5360 * ignored. These allocations are high priority and system rather than
5361 * user oriented.
5362 */
5363 if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) {
5364 ac->nodemask = NULL;
5365 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
5366 ac->highest_zoneidx, ac->nodemask);
5367 }
5368
5369 /* Attempt with potentially adjusted zonelist and alloc_flags */
5370 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
5371 if (page)
5372 goto got_pg;
5373
5374 /* Caller is not willing to reclaim, we can't balance anything */
5375 if (!can_direct_reclaim)
5376 goto nopage;
5377
5378 /* Avoid recursion of direct reclaim */
5379 if (current->flags & PF_MEMALLOC)
5380 goto nopage;
5381
5382 trace_android_vh_alloc_pages_reclaim_bypass(gfp_mask, order,
5383 alloc_flags, ac->migratetype, &page);
5384
5385 if (page)
5386 goto got_pg;
5387
5388 trace_android_vh_should_alloc_pages_retry(gfp_mask, order, &alloc_flags,
5389 ac->migratetype, ac->preferred_zoneref->zone, &page, &should_alloc_retry);
5390 if (should_alloc_retry)
5391 goto retry;
5392
5393 /* Try direct reclaim and then allocating */
5394 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
5395 &did_some_progress);
5396 if (page)
5397 goto got_pg;
5398
5399 /* Try direct compaction and then allocating */
5400 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
5401 compact_priority, &compact_result);
5402 if (page)
5403 goto got_pg;
5404
5405 /* Do not loop if specifically requested */
5406 if (gfp_mask & __GFP_NORETRY)
5407 goto nopage;
5408
5409 /*
5410 * Do not retry costly high order allocations unless they are
5411 * __GFP_RETRY_MAYFAIL
5412 */
5413 if (costly_order && !(gfp_mask & __GFP_RETRY_MAYFAIL))
5414 goto nopage;
5415
5416 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
5417 did_some_progress > 0, &no_progress_loops))
5418 goto retry;
5419
5420 /*
5421 * It doesn't make any sense to retry for the compaction if the order-0
5422 * reclaim is not able to make any progress because the current
5423 * implementation of the compaction depends on the sufficient amount
5424 * of free memory (see __compaction_suitable)
5425 */
5426 if (did_some_progress > 0 &&
5427 should_compact_retry(ac, order, alloc_flags,
5428 compact_result, &compact_priority,
5429 &compaction_retries))
5430 goto retry;
5431
5432
5433 /*
5434 * Deal with possible cpuset update races or zonelist updates to avoid
5435 * a unnecessary OOM kill.
5436 */
5437 if (check_retry_cpuset(cpuset_mems_cookie, ac) ||
5438 check_retry_zonelist(zonelist_iter_cookie))
5439 goto restart;
5440
5441 /* Reclaim has failed us, start killing things */
5442 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
5443 if (page)
5444 goto got_pg;
5445
5446 /* Avoid allocations with no watermarks from looping endlessly */
5447 if (tsk_is_oom_victim(current) &&
5448 (alloc_flags & ALLOC_OOM ||
5449 (gfp_mask & __GFP_NOMEMALLOC)))
5450 goto nopage;
5451
5452 /* Retry as long as the OOM killer is making progress */
5453 if (did_some_progress) {
5454 no_progress_loops = 0;
5455 goto retry;
5456 }
5457
5458 nopage:
5459 /*
5460 * Deal with possible cpuset update races or zonelist updates to avoid
5461 * a unnecessary OOM kill.
5462 */
5463 if (check_retry_cpuset(cpuset_mems_cookie, ac) ||
5464 check_retry_zonelist(zonelist_iter_cookie))
5465 goto restart;
5466
5467 /*
5468 * Make sure that __GFP_NOFAIL request doesn't leak out and make sure
5469 * we always retry
5470 */
5471 if (gfp_mask & __GFP_NOFAIL) {
5472 /*
5473 * All existing users of the __GFP_NOFAIL are blockable, so warn
5474 * of any new users that actually require GFP_NOWAIT
5475 */
5476 if (WARN_ON_ONCE(!can_direct_reclaim))
5477 goto fail;
5478
5479 /*
5480 * PF_MEMALLOC request from this context is rather bizarre
5481 * because we cannot reclaim anything and only can loop waiting
5482 * for somebody to do a work for us
5483 */
5484 WARN_ON_ONCE(current->flags & PF_MEMALLOC);
5485
5486 /*
5487 * non failing costly orders are a hard requirement which we
5488 * are not prepared for much so let's warn about these users
5489 * so that we can identify them and convert them to something
5490 * else.
5491 */
5492 WARN_ON_ONCE(order > PAGE_ALLOC_COSTLY_ORDER);
5493
5494 /*
5495 * Help non-failing allocations by giving them access to memory
5496 * reserves but do not use ALLOC_NO_WATERMARKS because this
5497 * could deplete whole memory reserves which would just make
5498 * the situation worse
5499 */
5500 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_HARDER, ac);
5501 if (page)
5502 goto got_pg;
5503
5504 cond_resched();
5505 goto retry;
5506 }
5507 fail:
5508 trace_android_vh_alloc_pages_failure_bypass(gfp_mask, order,
5509 alloc_flags, ac->migratetype, &page);
5510 if (page)
5511 goto got_pg;
5512
5513 warn_alloc(gfp_mask, ac->nodemask,
5514 "page allocation failure: order:%u", order);
5515 got_pg:
5516 trace_android_vh_alloc_pages_slowpath(gfp_mask, order, alloc_start);
5517 return page;
5518 }
5519
prepare_alloc_pages(gfp_t gfp_mask,unsigned int order,int preferred_nid,nodemask_t * nodemask,struct alloc_context * ac,gfp_t * alloc_gfp,unsigned int * alloc_flags)5520 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
5521 int preferred_nid, nodemask_t *nodemask,
5522 struct alloc_context *ac, gfp_t *alloc_gfp,
5523 unsigned int *alloc_flags)
5524 {
5525 ac->highest_zoneidx = gfp_zone(gfp_mask);
5526 ac->zonelist = node_zonelist(preferred_nid, gfp_mask);
5527 ac->nodemask = nodemask;
5528 ac->migratetype = gfp_migratetype(gfp_mask);
5529
5530 if (cpusets_enabled()) {
5531 *alloc_gfp |= __GFP_HARDWALL;
5532 /*
5533 * When we are in the interrupt context, it is irrelevant
5534 * to the current task context. It means that any node ok.
5535 */
5536 if (in_task() && !ac->nodemask)
5537 ac->nodemask = &cpuset_current_mems_allowed;
5538 else
5539 *alloc_flags |= ALLOC_CPUSET;
5540 }
5541
5542 fs_reclaim_acquire(gfp_mask);
5543 fs_reclaim_release(gfp_mask);
5544
5545 might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
5546
5547 if (should_fail_alloc_page(gfp_mask, order))
5548 return false;
5549
5550 *alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags);
5551
5552 /* Dirty zone balancing only done in the fast path */
5553 ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE);
5554
5555 /*
5556 * The preferred zone is used for statistics but crucially it is
5557 * also used as the starting point for the zonelist iterator. It
5558 * may get reset for allocations that ignore memory policies.
5559 */
5560 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
5561 ac->highest_zoneidx, ac->nodemask);
5562
5563 return true;
5564 }
5565
5566 /*
5567 * __alloc_pages_bulk - Allocate a number of order-0 pages to a list or array
5568 * @gfp: GFP flags for the allocation
5569 * @preferred_nid: The preferred NUMA node ID to allocate from
5570 * @nodemask: Set of nodes to allocate from, may be NULL
5571 * @nr_pages: The number of pages desired on the list or array
5572 * @page_list: Optional list to store the allocated pages
5573 * @page_array: Optional array to store the pages
5574 *
5575 * This is a batched version of the page allocator that attempts to
5576 * allocate nr_pages quickly. Pages are added to page_list if page_list
5577 * is not NULL, otherwise it is assumed that the page_array is valid.
5578 *
5579 * For lists, nr_pages is the number of pages that should be allocated.
5580 *
5581 * For arrays, only NULL elements are populated with pages and nr_pages
5582 * is the maximum number of pages that will be stored in the array.
5583 *
5584 * Returns the number of pages on the list or array.
5585 */
__alloc_pages_bulk(gfp_t gfp,int preferred_nid,nodemask_t * nodemask,int nr_pages,struct list_head * page_list,struct page ** page_array)5586 unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
5587 nodemask_t *nodemask, int nr_pages,
5588 struct list_head *page_list,
5589 struct page **page_array)
5590 {
5591 struct page *page;
5592 unsigned long flags;
5593 unsigned long __maybe_unused UP_flags;
5594 struct zone *zone;
5595 struct zoneref *z;
5596 struct per_cpu_pages *pcp;
5597 struct alloc_context ac;
5598 gfp_t alloc_gfp;
5599 unsigned int alloc_flags = ALLOC_WMARK_LOW;
5600 int nr_populated = 0, nr_account = 0;
5601
5602 /*
5603 * Skip populated array elements to determine if any pages need
5604 * to be allocated before disabling IRQs.
5605 */
5606 while (page_array && nr_populated < nr_pages && page_array[nr_populated])
5607 nr_populated++;
5608
5609 /* No pages requested? */
5610 if (unlikely(nr_pages <= 0))
5611 goto out;
5612
5613 /* Already populated array? */
5614 if (unlikely(page_array && nr_pages - nr_populated == 0))
5615 goto out;
5616
5617 /* Bulk allocator does not support memcg accounting. */
5618 if (memcg_kmem_enabled() && (gfp & __GFP_ACCOUNT))
5619 goto failed;
5620
5621 /* Use the single page allocator for one page. */
5622 if (nr_pages - nr_populated == 1)
5623 goto failed;
5624
5625 #ifdef CONFIG_PAGE_OWNER
5626 /*
5627 * PAGE_OWNER may recurse into the allocator to allocate space to
5628 * save the stack with pagesets.lock held. Releasing/reacquiring
5629 * removes much of the performance benefit of bulk allocation so
5630 * force the caller to allocate one page at a time as it'll have
5631 * similar performance to added complexity to the bulk allocator.
5632 */
5633 if (static_branch_unlikely(&page_owner_inited))
5634 goto failed;
5635 #endif
5636
5637 /* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */
5638 gfp &= gfp_allowed_mask;
5639 alloc_gfp = gfp;
5640 if (!prepare_alloc_pages(gfp, 0, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags))
5641 goto out;
5642 gfp = alloc_gfp;
5643
5644 /* Find an allowed local zone that meets the low watermark. */
5645 for_each_zone_zonelist_nodemask(zone, z, ac.zonelist, ac.highest_zoneidx, ac.nodemask) {
5646 unsigned long mark;
5647
5648 if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) &&
5649 !__cpuset_zone_allowed(zone, gfp)) {
5650 continue;
5651 }
5652
5653 if (nr_online_nodes > 1 && zone != ac.preferred_zoneref->zone &&
5654 zone_to_nid(zone) != zone_to_nid(ac.preferred_zoneref->zone)) {
5655 goto failed;
5656 }
5657
5658 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages;
5659 if (zone_watermark_fast(zone, 0, mark,
5660 zonelist_zone_idx(ac.preferred_zoneref),
5661 alloc_flags, gfp)) {
5662 break;
5663 }
5664 }
5665
5666 /*
5667 * If there are no allowed local zones that meets the watermarks then
5668 * try to allocate a single page and reclaim if necessary.
5669 */
5670 if (unlikely(!zone))
5671 goto failed;
5672
5673 /* Is a parallel drain in progress? */
5674 pcp_trylock_prepare(UP_flags);
5675 pcp = pcp_spin_trylock_irqsave(zone_per_cpu_pageset(zone), flags);
5676 if (!pcp)
5677 goto failed_irq;
5678
5679 /* Attempt the batch allocation */
5680 while (nr_populated < nr_pages) {
5681
5682 /* Skip existing pages */
5683 if (page_array && page_array[nr_populated]) {
5684 nr_populated++;
5685 continue;
5686 }
5687
5688 page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags,
5689 pcp, alloc_gfp);
5690 if (unlikely(!page)) {
5691 /* Try and allocate at least one page */
5692 if (!nr_account) {
5693 pcp_spin_unlock_irqrestore(pcp, flags);
5694 goto failed_irq;
5695 }
5696 break;
5697 }
5698 nr_account++;
5699
5700 prep_new_page(page, 0, gfp, 0);
5701 if (page_list)
5702 list_add(&page->lru, page_list);
5703 else
5704 page_array[nr_populated] = page;
5705 nr_populated++;
5706 }
5707
5708 pcp_spin_unlock_irqrestore(pcp, flags);
5709 pcp_trylock_finish(UP_flags);
5710
5711 __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account);
5712 zone_statistics(ac.preferred_zoneref->zone, zone, nr_account);
5713
5714 out:
5715 return nr_populated;
5716
5717 failed_irq:
5718 pcp_trylock_finish(UP_flags);
5719
5720 failed:
5721 page = __alloc_pages(gfp, 0, preferred_nid, nodemask);
5722 if (page) {
5723 if (page_list)
5724 list_add(&page->lru, page_list);
5725 else
5726 page_array[nr_populated] = page;
5727 nr_populated++;
5728 }
5729
5730 goto out;
5731 }
5732 EXPORT_SYMBOL_GPL(__alloc_pages_bulk);
5733
5734 /*
5735 * This is the 'heart' of the zoned buddy allocator.
5736 */
__alloc_pages(gfp_t gfp,unsigned int order,int preferred_nid,nodemask_t * nodemask)5737 struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
5738 nodemask_t *nodemask)
5739 {
5740 struct page *page;
5741 unsigned int alloc_flags = ALLOC_WMARK_LOW;
5742 gfp_t alloc_gfp; /* The gfp_t that was actually used for allocation */
5743 struct alloc_context ac = { };
5744
5745 trace_android_vh_alloc_pages_entry(&gfp, order, preferred_nid, nodemask);
5746 /*
5747 * There are several places where we assume that the order value is sane
5748 * so bail out early if the request is out of bound.
5749 */
5750 if (unlikely(order >= MAX_ORDER)) {
5751 WARN_ON_ONCE(!(gfp & __GFP_NOWARN));
5752 return NULL;
5753 }
5754
5755 gfp &= gfp_allowed_mask;
5756 /*
5757 * Apply scoped allocation constraints. This is mainly about GFP_NOFS
5758 * resp. GFP_NOIO which has to be inherited for all allocation requests
5759 * from a particular context which has been marked by
5760 * memalloc_no{fs,io}_{save,restore}. And PF_MEMALLOC_PIN which ensures
5761 * movable zones are not used during allocation.
5762 */
5763 gfp = current_gfp_context(gfp);
5764 alloc_gfp = gfp;
5765 if (!prepare_alloc_pages(gfp, order, preferred_nid, nodemask, &ac,
5766 &alloc_gfp, &alloc_flags))
5767 return NULL;
5768
5769 /*
5770 * Forbid the first pass from falling back to types that fragment
5771 * memory until all local zones are considered.
5772 */
5773 alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp);
5774
5775 /* First allocation attempt */
5776 page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac);
5777 if (likely(page))
5778 goto out;
5779
5780 alloc_gfp = gfp;
5781 ac.spread_dirty_pages = false;
5782
5783 /*
5784 * Restore the original nodemask if it was potentially replaced with
5785 * &cpuset_current_mems_allowed to optimize the fast-path attempt.
5786 */
5787 ac.nodemask = nodemask;
5788
5789 page = __alloc_pages_slowpath(alloc_gfp, order, &ac);
5790
5791 out:
5792 if (memcg_kmem_enabled() && (gfp & __GFP_ACCOUNT) && page &&
5793 unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) {
5794 __free_pages(page, order);
5795 page = NULL;
5796 }
5797
5798 trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype);
5799
5800 return page;
5801 }
5802 EXPORT_SYMBOL(__alloc_pages);
5803
5804 /*
5805 * Common helper functions. Never use with __GFP_HIGHMEM because the returned
5806 * address cannot represent highmem pages. Use alloc_pages and then kmap if
5807 * you need to access high mem.
5808 */
__get_free_pages(gfp_t gfp_mask,unsigned int order)5809 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
5810 {
5811 struct page *page;
5812
5813 page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order);
5814 if (!page)
5815 return 0;
5816 return (unsigned long) page_address(page);
5817 }
5818 EXPORT_SYMBOL(__get_free_pages);
5819
get_zeroed_page(gfp_t gfp_mask)5820 unsigned long get_zeroed_page(gfp_t gfp_mask)
5821 {
5822 return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
5823 }
5824 EXPORT_SYMBOL(get_zeroed_page);
5825
5826 /**
5827 * __free_pages - Free pages allocated with alloc_pages().
5828 * @page: The page pointer returned from alloc_pages().
5829 * @order: The order of the allocation.
5830 *
5831 * This function can free multi-page allocations that are not compound
5832 * pages. It does not check that the @order passed in matches that of
5833 * the allocation, so it is easy to leak memory. Freeing more memory
5834 * than was allocated will probably emit a warning.
5835 *
5836 * If the last reference to this page is speculative, it will be released
5837 * by put_page() which only frees the first page of a non-compound
5838 * allocation. To prevent the remaining pages from being leaked, we free
5839 * the subsequent pages here. If you want to use the page's reference
5840 * count to decide when to free the allocation, you should allocate a
5841 * compound page, and use put_page() instead of __free_pages().
5842 *
5843 * Context: May be called in interrupt context or while holding a normal
5844 * spinlock, but not in NMI context or while holding a raw spinlock.
5845 */
__free_pages(struct page * page,unsigned int order)5846 void __free_pages(struct page *page, unsigned int order)
5847 {
5848 /* get PageHead before we drop reference */
5849 int head = PageHead(page);
5850
5851 if (put_page_testzero(page))
5852 free_the_page(page, order);
5853 else if (!head)
5854 while (order-- > 0)
5855 free_the_page(page + (1 << order), order);
5856 }
5857 EXPORT_SYMBOL(__free_pages);
5858
free_pages(unsigned long addr,unsigned int order)5859 void free_pages(unsigned long addr, unsigned int order)
5860 {
5861 if (addr != 0) {
5862 VM_BUG_ON(!virt_addr_valid((void *)addr));
5863 __free_pages(virt_to_page((void *)addr), order);
5864 }
5865 }
5866
5867 EXPORT_SYMBOL(free_pages);
5868
5869 /*
5870 * Page Fragment:
5871 * An arbitrary-length arbitrary-offset area of memory which resides
5872 * within a 0 or higher order page. Multiple fragments within that page
5873 * are individually refcounted, in the page's reference counter.
5874 *
5875 * The page_frag functions below provide a simple allocation framework for
5876 * page fragments. This is used by the network stack and network device
5877 * drivers to provide a backing region of memory for use as either an
5878 * sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
5879 */
__page_frag_cache_refill(struct page_frag_cache * nc,gfp_t gfp_mask)5880 static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
5881 gfp_t gfp_mask)
5882 {
5883 struct page *page = NULL;
5884 gfp_t gfp = gfp_mask;
5885
5886 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
5887 gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
5888 __GFP_NOMEMALLOC;
5889 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
5890 PAGE_FRAG_CACHE_MAX_ORDER);
5891 nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
5892 #endif
5893 if (unlikely(!page))
5894 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
5895
5896 nc->va = page ? page_address(page) : NULL;
5897
5898 return page;
5899 }
5900
__page_frag_cache_drain(struct page * page,unsigned int count)5901 void __page_frag_cache_drain(struct page *page, unsigned int count)
5902 {
5903 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
5904
5905 if (page_ref_sub_and_test(page, count))
5906 free_the_page(page, compound_order(page));
5907 }
5908 EXPORT_SYMBOL(__page_frag_cache_drain);
5909
page_frag_alloc_align(struct page_frag_cache * nc,unsigned int fragsz,gfp_t gfp_mask,unsigned int align_mask)5910 void *page_frag_alloc_align(struct page_frag_cache *nc,
5911 unsigned int fragsz, gfp_t gfp_mask,
5912 unsigned int align_mask)
5913 {
5914 unsigned int size = PAGE_SIZE;
5915 struct page *page;
5916 int offset;
5917
5918 if (unlikely(!nc->va)) {
5919 refill:
5920 page = __page_frag_cache_refill(nc, gfp_mask);
5921 if (!page)
5922 return NULL;
5923
5924 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
5925 /* if size can vary use size else just use PAGE_SIZE */
5926 size = nc->size;
5927 #endif
5928 /* Even if we own the page, we do not use atomic_set().
5929 * This would break get_page_unless_zero() users.
5930 */
5931 page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE);
5932
5933 /* reset page count bias and offset to start of new frag */
5934 nc->pfmemalloc = page_is_pfmemalloc(page);
5935 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
5936 nc->offset = size;
5937 }
5938
5939 offset = nc->offset - fragsz;
5940 if (unlikely(offset < 0)) {
5941 page = virt_to_page(nc->va);
5942
5943 if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
5944 goto refill;
5945
5946 if (unlikely(nc->pfmemalloc)) {
5947 free_the_page(page, compound_order(page));
5948 goto refill;
5949 }
5950
5951 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
5952 /* if size can vary use size else just use PAGE_SIZE */
5953 size = nc->size;
5954 #endif
5955 /* OK, page count is 0, we can safely set it */
5956 set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1);
5957
5958 /* reset page count bias and offset to start of new frag */
5959 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
5960 offset = size - fragsz;
5961 if (unlikely(offset < 0)) {
5962 /*
5963 * The caller is trying to allocate a fragment
5964 * with fragsz > PAGE_SIZE but the cache isn't big
5965 * enough to satisfy the request, this may
5966 * happen in low memory conditions.
5967 * We don't release the cache page because
5968 * it could make memory pressure worse
5969 * so we simply return NULL here.
5970 */
5971 return NULL;
5972 }
5973 }
5974
5975 nc->pagecnt_bias--;
5976 offset &= align_mask;
5977 nc->offset = offset;
5978
5979 return nc->va + offset;
5980 }
5981 EXPORT_SYMBOL(page_frag_alloc_align);
5982
5983 /*
5984 * Frees a page fragment allocated out of either a compound or order 0 page.
5985 */
page_frag_free(void * addr)5986 void page_frag_free(void *addr)
5987 {
5988 struct page *page = virt_to_head_page(addr);
5989
5990 if (unlikely(put_page_testzero(page)))
5991 free_the_page(page, compound_order(page));
5992 }
5993 EXPORT_SYMBOL(page_frag_free);
5994
make_alloc_exact(unsigned long addr,unsigned int order,size_t size)5995 static void *make_alloc_exact(unsigned long addr, unsigned int order,
5996 size_t size)
5997 {
5998 if (addr) {
5999 unsigned long alloc_end = addr + (PAGE_SIZE << order);
6000 unsigned long used = addr + PAGE_ALIGN(size);
6001
6002 split_page(virt_to_page((void *)addr), order);
6003 while (used < alloc_end) {
6004 free_page(used);
6005 used += PAGE_SIZE;
6006 }
6007 }
6008 return (void *)addr;
6009 }
6010
6011 /**
6012 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
6013 * @size: the number of bytes to allocate
6014 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
6015 *
6016 * This function is similar to alloc_pages(), except that it allocates the
6017 * minimum number of pages to satisfy the request. alloc_pages() can only
6018 * allocate memory in power-of-two pages.
6019 *
6020 * This function is also limited by MAX_ORDER.
6021 *
6022 * Memory allocated by this function must be released by free_pages_exact().
6023 *
6024 * Return: pointer to the allocated area or %NULL in case of error.
6025 */
alloc_pages_exact(size_t size,gfp_t gfp_mask)6026 void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
6027 {
6028 unsigned int order = get_order(size);
6029 unsigned long addr;
6030
6031 if (WARN_ON_ONCE(gfp_mask & __GFP_COMP))
6032 gfp_mask &= ~__GFP_COMP;
6033
6034 addr = __get_free_pages(gfp_mask, order);
6035 return make_alloc_exact(addr, order, size);
6036 }
6037 EXPORT_SYMBOL(alloc_pages_exact);
6038
6039 /**
6040 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
6041 * pages on a node.
6042 * @nid: the preferred node ID where memory should be allocated
6043 * @size: the number of bytes to allocate
6044 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
6045 *
6046 * Like alloc_pages_exact(), but try to allocate on node nid first before falling
6047 * back.
6048 *
6049 * Return: pointer to the allocated area or %NULL in case of error.
6050 */
alloc_pages_exact_nid(int nid,size_t size,gfp_t gfp_mask)6051 void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
6052 {
6053 unsigned int order = get_order(size);
6054 struct page *p;
6055
6056 if (WARN_ON_ONCE(gfp_mask & __GFP_COMP))
6057 gfp_mask &= ~__GFP_COMP;
6058
6059 p = alloc_pages_node(nid, gfp_mask, order);
6060 if (!p)
6061 return NULL;
6062 return make_alloc_exact((unsigned long)page_address(p), order, size);
6063 }
6064
6065 /**
6066 * free_pages_exact - release memory allocated via alloc_pages_exact()
6067 * @virt: the value returned by alloc_pages_exact.
6068 * @size: size of allocation, same value as passed to alloc_pages_exact().
6069 *
6070 * Release the memory allocated by a previous call to alloc_pages_exact.
6071 */
free_pages_exact(void * virt,size_t size)6072 void free_pages_exact(void *virt, size_t size)
6073 {
6074 unsigned long addr = (unsigned long)virt;
6075 unsigned long end = addr + PAGE_ALIGN(size);
6076
6077 while (addr < end) {
6078 free_page(addr);
6079 addr += PAGE_SIZE;
6080 }
6081 }
6082 EXPORT_SYMBOL(free_pages_exact);
6083
6084 /**
6085 * nr_free_zone_pages - count number of pages beyond high watermark
6086 * @offset: The zone index of the highest zone
6087 *
6088 * nr_free_zone_pages() counts the number of pages which are beyond the
6089 * high watermark within all zones at or below a given zone index. For each
6090 * zone, the number of pages is calculated as:
6091 *
6092 * nr_free_zone_pages = managed_pages - high_pages
6093 *
6094 * Return: number of pages beyond high watermark.
6095 */
nr_free_zone_pages(int offset)6096 static unsigned long nr_free_zone_pages(int offset)
6097 {
6098 struct zoneref *z;
6099 struct zone *zone;
6100
6101 /* Just pick one node, since fallback list is circular */
6102 unsigned long sum = 0;
6103
6104 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
6105
6106 for_each_zone_zonelist(zone, z, zonelist, offset) {
6107 unsigned long size = zone_managed_pages(zone);
6108 unsigned long high = high_wmark_pages(zone);
6109 if (size > high)
6110 sum += size - high;
6111 }
6112
6113 return sum;
6114 }
6115
6116 /**
6117 * nr_free_buffer_pages - count number of pages beyond high watermark
6118 *
6119 * nr_free_buffer_pages() counts the number of pages which are beyond the high
6120 * watermark within ZONE_DMA and ZONE_NORMAL.
6121 *
6122 * Return: number of pages beyond high watermark within ZONE_DMA and
6123 * ZONE_NORMAL.
6124 */
nr_free_buffer_pages(void)6125 unsigned long nr_free_buffer_pages(void)
6126 {
6127 return nr_free_zone_pages(gfp_zone(GFP_USER));
6128 }
6129 EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
6130
show_node(struct zone * zone)6131 static inline void show_node(struct zone *zone)
6132 {
6133 if (IS_ENABLED(CONFIG_NUMA))
6134 printk("Node %d ", zone_to_nid(zone));
6135 }
6136
si_mem_available(void)6137 long si_mem_available(void)
6138 {
6139 long available;
6140 unsigned long pagecache;
6141 unsigned long wmark_low = 0;
6142 unsigned long pages[NR_LRU_LISTS];
6143 unsigned long reclaimable;
6144 struct zone *zone;
6145 int lru;
6146
6147 for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
6148 pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
6149
6150 for_each_zone(zone)
6151 wmark_low += low_wmark_pages(zone);
6152
6153 /*
6154 * Estimate the amount of memory available for userspace allocations,
6155 * without causing swapping.
6156 */
6157 available = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages;
6158
6159 /*
6160 * Not all the page cache can be freed, otherwise the system will
6161 * start swapping. Assume at least half of the page cache, or the
6162 * low watermark worth of cache, needs to stay.
6163 */
6164 pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE];
6165 pagecache -= min(pagecache / 2, wmark_low);
6166 available += pagecache;
6167
6168 /*
6169 * Part of the reclaimable slab and other kernel memory consists of
6170 * items that are in use, and cannot be freed. Cap this estimate at the
6171 * low watermark.
6172 */
6173 reclaimable = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B) +
6174 global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE);
6175 available += reclaimable - min(reclaimable / 2, wmark_low);
6176
6177 if (available < 0)
6178 available = 0;
6179 return available;
6180 }
6181 EXPORT_SYMBOL_GPL(si_mem_available);
6182
si_meminfo(struct sysinfo * val)6183 void si_meminfo(struct sysinfo *val)
6184 {
6185 val->totalram = totalram_pages();
6186 val->sharedram = global_node_page_state(NR_SHMEM);
6187 val->freeram = global_zone_page_state(NR_FREE_PAGES);
6188 val->bufferram = nr_blockdev_pages();
6189 val->totalhigh = totalhigh_pages();
6190 val->freehigh = nr_free_highpages();
6191 val->mem_unit = PAGE_SIZE;
6192 }
6193
6194 EXPORT_SYMBOL(si_meminfo);
6195
6196 #ifdef CONFIG_NUMA
si_meminfo_node(struct sysinfo * val,int nid)6197 void si_meminfo_node(struct sysinfo *val, int nid)
6198 {
6199 int zone_type; /* needs to be signed */
6200 unsigned long managed_pages = 0;
6201 unsigned long managed_highpages = 0;
6202 unsigned long free_highpages = 0;
6203 pg_data_t *pgdat = NODE_DATA(nid);
6204
6205 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
6206 managed_pages += zone_managed_pages(&pgdat->node_zones[zone_type]);
6207 val->totalram = managed_pages;
6208 val->sharedram = node_page_state(pgdat, NR_SHMEM);
6209 val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES);
6210 #ifdef CONFIG_HIGHMEM
6211 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
6212 struct zone *zone = &pgdat->node_zones[zone_type];
6213
6214 if (is_highmem(zone)) {
6215 managed_highpages += zone_managed_pages(zone);
6216 free_highpages += zone_page_state(zone, NR_FREE_PAGES);
6217 }
6218 }
6219 val->totalhigh = managed_highpages;
6220 val->freehigh = free_highpages;
6221 #else
6222 val->totalhigh = managed_highpages;
6223 val->freehigh = free_highpages;
6224 #endif
6225 val->mem_unit = PAGE_SIZE;
6226 }
6227 #endif
6228
6229 /*
6230 * Determine whether the node should be displayed or not, depending on whether
6231 * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
6232 */
show_mem_node_skip(unsigned int flags,int nid,nodemask_t * nodemask)6233 static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask)
6234 {
6235 if (!(flags & SHOW_MEM_FILTER_NODES))
6236 return false;
6237
6238 /*
6239 * no node mask - aka implicit memory numa policy. Do not bother with
6240 * the synchronization - read_mems_allowed_begin - because we do not
6241 * have to be precise here.
6242 */
6243 if (!nodemask)
6244 nodemask = &cpuset_current_mems_allowed;
6245
6246 return !node_isset(nid, *nodemask);
6247 }
6248
6249 #define K(x) ((x) << (PAGE_SHIFT-10))
6250
show_migration_types(unsigned char type)6251 static void show_migration_types(unsigned char type)
6252 {
6253 static const char types[MIGRATE_TYPES] = {
6254 [MIGRATE_UNMOVABLE] = 'U',
6255 [MIGRATE_MOVABLE] = 'M',
6256 [MIGRATE_RECLAIMABLE] = 'E',
6257 [MIGRATE_HIGHATOMIC] = 'H',
6258 #ifdef CONFIG_CMA
6259 [MIGRATE_CMA] = 'C',
6260 #endif
6261 #ifdef CONFIG_MEMORY_ISOLATION
6262 [MIGRATE_ISOLATE] = 'I',
6263 #endif
6264 };
6265 char tmp[MIGRATE_TYPES + 1];
6266 char *p = tmp;
6267 int i;
6268
6269 for (i = 0; i < MIGRATE_TYPES; i++) {
6270 if (type & (1 << i))
6271 *p++ = types[i];
6272 }
6273
6274 *p = '\0';
6275 printk(KERN_CONT "(%s) ", tmp);
6276 }
6277
6278 /*
6279 * Show free area list (used inside shift_scroll-lock stuff)
6280 * We also calculate the percentage fragmentation. We do this by counting the
6281 * memory on each free list with the exception of the first item on the list.
6282 *
6283 * Bits in @filter:
6284 * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's
6285 * cpuset.
6286 */
show_free_areas(unsigned int filter,nodemask_t * nodemask)6287 void show_free_areas(unsigned int filter, nodemask_t *nodemask)
6288 {
6289 unsigned long free_pcp = 0;
6290 int cpu;
6291 struct zone *zone;
6292 pg_data_t *pgdat;
6293
6294 for_each_populated_zone(zone) {
6295 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
6296 continue;
6297
6298 for_each_online_cpu(cpu)
6299 free_pcp += per_cpu_ptr(zone_per_cpu_pageset(zone), cpu)->pcp.count;
6300 }
6301
6302 printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
6303 " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
6304 " unevictable:%lu dirty:%lu writeback:%lu\n"
6305 " slab_reclaimable:%lu slab_unreclaimable:%lu\n"
6306 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
6307 " kernel_misc_reclaimable:%lu\n"
6308 " free:%lu free_pcp:%lu free_cma:%lu\n",
6309 global_node_page_state(NR_ACTIVE_ANON),
6310 global_node_page_state(NR_INACTIVE_ANON),
6311 global_node_page_state(NR_ISOLATED_ANON),
6312 global_node_page_state(NR_ACTIVE_FILE),
6313 global_node_page_state(NR_INACTIVE_FILE),
6314 global_node_page_state(NR_ISOLATED_FILE),
6315 global_node_page_state(NR_UNEVICTABLE),
6316 global_node_page_state(NR_FILE_DIRTY),
6317 global_node_page_state(NR_WRITEBACK),
6318 global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B),
6319 global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B),
6320 global_node_page_state(NR_FILE_MAPPED),
6321 global_node_page_state(NR_SHMEM),
6322 global_node_page_state(NR_PAGETABLE),
6323 global_zone_page_state(NR_BOUNCE),
6324 global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE),
6325 global_zone_page_state(NR_FREE_PAGES),
6326 free_pcp,
6327 global_zone_page_state(NR_FREE_CMA_PAGES));
6328
6329 trace_android_vh_show_mapcount_pages(NULL);
6330 for_each_online_pgdat(pgdat) {
6331 if (show_mem_node_skip(filter, pgdat->node_id, nodemask))
6332 continue;
6333
6334 printk("Node %d"
6335 " active_anon:%lukB"
6336 " inactive_anon:%lukB"
6337 " active_file:%lukB"
6338 " inactive_file:%lukB"
6339 " unevictable:%lukB"
6340 " isolated(anon):%lukB"
6341 " isolated(file):%lukB"
6342 " mapped:%lukB"
6343 " dirty:%lukB"
6344 " writeback:%lukB"
6345 " shmem:%lukB"
6346 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
6347 " shmem_thp: %lukB"
6348 " shmem_pmdmapped: %lukB"
6349 " anon_thp: %lukB"
6350 #endif
6351 " writeback_tmp:%lukB"
6352 " kernel_stack:%lukB"
6353 #ifdef CONFIG_SHADOW_CALL_STACK
6354 " shadow_call_stack:%lukB"
6355 #endif
6356 " pagetables:%lukB"
6357 " all_unreclaimable? %s"
6358 "\n",
6359 pgdat->node_id,
6360 K(node_page_state(pgdat, NR_ACTIVE_ANON)),
6361 K(node_page_state(pgdat, NR_INACTIVE_ANON)),
6362 K(node_page_state(pgdat, NR_ACTIVE_FILE)),
6363 K(node_page_state(pgdat, NR_INACTIVE_FILE)),
6364 K(node_page_state(pgdat, NR_UNEVICTABLE)),
6365 K(node_page_state(pgdat, NR_ISOLATED_ANON)),
6366 K(node_page_state(pgdat, NR_ISOLATED_FILE)),
6367 K(node_page_state(pgdat, NR_FILE_MAPPED)),
6368 K(node_page_state(pgdat, NR_FILE_DIRTY)),
6369 K(node_page_state(pgdat, NR_WRITEBACK)),
6370 K(node_page_state(pgdat, NR_SHMEM)),
6371 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
6372 K(node_page_state(pgdat, NR_SHMEM_THPS)),
6373 K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)),
6374 K(node_page_state(pgdat, NR_ANON_THPS)),
6375 #endif
6376 K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
6377 node_page_state(pgdat, NR_KERNEL_STACK_KB),
6378 #ifdef CONFIG_SHADOW_CALL_STACK
6379 node_page_state(pgdat, NR_KERNEL_SCS_KB),
6380 #endif
6381 K(node_page_state(pgdat, NR_PAGETABLE)),
6382 pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ?
6383 "yes" : "no");
6384 }
6385
6386 for_each_populated_zone(zone) {
6387 int i;
6388
6389 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
6390 continue;
6391
6392 free_pcp = 0;
6393 for_each_online_cpu(cpu)
6394 free_pcp += per_cpu_ptr(zone_per_cpu_pageset(zone), cpu)->pcp.count;
6395
6396 show_node(zone);
6397 printk(KERN_CONT
6398 "%s"
6399 " free:%lukB"
6400 " min:%lukB"
6401 " low:%lukB"
6402 " high:%lukB"
6403 " reserved_highatomic:%luKB"
6404 " active_anon:%lukB"
6405 " inactive_anon:%lukB"
6406 " active_file:%lukB"
6407 " inactive_file:%lukB"
6408 " unevictable:%lukB"
6409 " writepending:%lukB"
6410 " present:%lukB"
6411 " managed:%lukB"
6412 " mlocked:%lukB"
6413 " bounce:%lukB"
6414 " free_pcp:%lukB"
6415 " local_pcp:%ukB"
6416 " free_cma:%lukB"
6417 "\n",
6418 zone->name,
6419 K(zone_page_state(zone, NR_FREE_PAGES)),
6420 K(min_wmark_pages(zone)),
6421 K(low_wmark_pages(zone)),
6422 K(high_wmark_pages(zone)),
6423 K(zone->nr_reserved_highatomic),
6424 K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)),
6425 K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)),
6426 K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)),
6427 K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)),
6428 K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)),
6429 K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)),
6430 K(zone->present_pages),
6431 K(zone_managed_pages(zone)),
6432 K(zone_page_state(zone, NR_MLOCK)),
6433 K(zone_page_state(zone, NR_BOUNCE)),
6434 K(free_pcp),
6435 K(this_cpu_read((zone_per_cpu_pageset(zone))->pcp.count)),
6436 K(zone_page_state(zone, NR_FREE_CMA_PAGES)));
6437 printk("lowmem_reserve[]:");
6438 for (i = 0; i < MAX_NR_ZONES; i++)
6439 printk(KERN_CONT " %ld", zone->lowmem_reserve[i]);
6440 printk(KERN_CONT "\n");
6441 }
6442
6443 for_each_populated_zone(zone) {
6444 unsigned int order;
6445 unsigned long nr[MAX_ORDER], flags, total = 0;
6446 unsigned char types[MAX_ORDER];
6447
6448 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
6449 continue;
6450 show_node(zone);
6451 printk(KERN_CONT "%s: ", zone->name);
6452
6453 spin_lock_irqsave(&zone->lock, flags);
6454 for (order = 0; order < MAX_ORDER; order++) {
6455 struct free_area *area = &zone->free_area[order];
6456 int type;
6457
6458 nr[order] = area->nr_free;
6459 total += nr[order] << order;
6460
6461 types[order] = 0;
6462 for (type = 0; type < MIGRATE_TYPES; type++) {
6463 if (!free_area_empty(area, type))
6464 types[order] |= 1 << type;
6465 }
6466 }
6467 spin_unlock_irqrestore(&zone->lock, flags);
6468 for (order = 0; order < MAX_ORDER; order++) {
6469 printk(KERN_CONT "%lu*%lukB ",
6470 nr[order], K(1UL) << order);
6471 if (nr[order])
6472 show_migration_types(types[order]);
6473 }
6474 printk(KERN_CONT "= %lukB\n", K(total));
6475 }
6476
6477 hugetlb_show_meminfo();
6478
6479 printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES));
6480
6481 show_swap_cache_info();
6482 }
6483
zoneref_set_zone(struct zone * zone,struct zoneref * zoneref)6484 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
6485 {
6486 zoneref->zone = zone;
6487 zoneref->zone_idx = zone_idx(zone);
6488 }
6489
6490 /*
6491 * Builds allocation fallback zone lists.
6492 *
6493 * Add all populated zones of a node to the zonelist.
6494 */
build_zonerefs_node(pg_data_t * pgdat,struct zoneref * zonerefs)6495 static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs)
6496 {
6497 struct zone *zone;
6498 enum zone_type zone_type = MAX_NR_ZONES;
6499 int nr_zones = 0;
6500
6501 do {
6502 zone_type--;
6503 zone = pgdat->node_zones + zone_type;
6504 if (populated_zone(zone)) {
6505 zoneref_set_zone(zone, &zonerefs[nr_zones++]);
6506 check_highest_zone(zone_type);
6507 }
6508 } while (zone_type);
6509
6510 return nr_zones;
6511 }
6512
6513 #ifdef CONFIG_NUMA
6514
__parse_numa_zonelist_order(char * s)6515 static int __parse_numa_zonelist_order(char *s)
6516 {
6517 /*
6518 * We used to support different zonelists modes but they turned
6519 * out to be just not useful. Let's keep the warning in place
6520 * if somebody still use the cmd line parameter so that we do
6521 * not fail it silently
6522 */
6523 if (!(*s == 'd' || *s == 'D' || *s == 'n' || *s == 'N')) {
6524 pr_warn("Ignoring unsupported numa_zonelist_order value: %s\n", s);
6525 return -EINVAL;
6526 }
6527 return 0;
6528 }
6529
6530 char numa_zonelist_order[] = "Node";
6531
6532 /*
6533 * sysctl handler for numa_zonelist_order
6534 */
numa_zonelist_order_handler(struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)6535 int numa_zonelist_order_handler(struct ctl_table *table, int write,
6536 void *buffer, size_t *length, loff_t *ppos)
6537 {
6538 if (write)
6539 return __parse_numa_zonelist_order(buffer);
6540 return proc_dostring(table, write, buffer, length, ppos);
6541 }
6542
6543
6544 #define MAX_NODE_LOAD (nr_online_nodes)
6545 static int node_load[MAX_NUMNODES];
6546
6547 /**
6548 * find_next_best_node - find the next node that should appear in a given node's fallback list
6549 * @node: node whose fallback list we're appending
6550 * @used_node_mask: nodemask_t of already used nodes
6551 *
6552 * We use a number of factors to determine which is the next node that should
6553 * appear on a given node's fallback list. The node should not have appeared
6554 * already in @node's fallback list, and it should be the next closest node
6555 * according to the distance array (which contains arbitrary distance values
6556 * from each node to each node in the system), and should also prefer nodes
6557 * with no CPUs, since presumably they'll have very little allocation pressure
6558 * on them otherwise.
6559 *
6560 * Return: node id of the found node or %NUMA_NO_NODE if no node is found.
6561 */
find_next_best_node(int node,nodemask_t * used_node_mask)6562 int find_next_best_node(int node, nodemask_t *used_node_mask)
6563 {
6564 int n, val;
6565 int min_val = INT_MAX;
6566 int best_node = NUMA_NO_NODE;
6567
6568 /* Use the local node if we haven't already */
6569 if (!node_isset(node, *used_node_mask)) {
6570 node_set(node, *used_node_mask);
6571 return node;
6572 }
6573
6574 for_each_node_state(n, N_MEMORY) {
6575
6576 /* Don't want a node to appear more than once */
6577 if (node_isset(n, *used_node_mask))
6578 continue;
6579
6580 /* Use the distance array to find the distance */
6581 val = node_distance(node, n);
6582
6583 /* Penalize nodes under us ("prefer the next node") */
6584 val += (n < node);
6585
6586 /* Give preference to headless and unused nodes */
6587 if (!cpumask_empty(cpumask_of_node(n)))
6588 val += PENALTY_FOR_NODE_WITH_CPUS;
6589
6590 /* Slight preference for less loaded node */
6591 val *= (MAX_NODE_LOAD*MAX_NUMNODES);
6592 val += node_load[n];
6593
6594 if (val < min_val) {
6595 min_val = val;
6596 best_node = n;
6597 }
6598 }
6599
6600 if (best_node >= 0)
6601 node_set(best_node, *used_node_mask);
6602
6603 return best_node;
6604 }
6605
6606
6607 /*
6608 * Build zonelists ordered by node and zones within node.
6609 * This results in maximum locality--normal zone overflows into local
6610 * DMA zone, if any--but risks exhausting DMA zone.
6611 */
build_zonelists_in_node_order(pg_data_t * pgdat,int * node_order,unsigned nr_nodes)6612 static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order,
6613 unsigned nr_nodes)
6614 {
6615 struct zoneref *zonerefs;
6616 int i;
6617
6618 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
6619
6620 for (i = 0; i < nr_nodes; i++) {
6621 int nr_zones;
6622
6623 pg_data_t *node = NODE_DATA(node_order[i]);
6624
6625 nr_zones = build_zonerefs_node(node, zonerefs);
6626 zonerefs += nr_zones;
6627 }
6628 zonerefs->zone = NULL;
6629 zonerefs->zone_idx = 0;
6630 }
6631
6632 /*
6633 * Build gfp_thisnode zonelists
6634 */
build_thisnode_zonelists(pg_data_t * pgdat)6635 static void build_thisnode_zonelists(pg_data_t *pgdat)
6636 {
6637 struct zoneref *zonerefs;
6638 int nr_zones;
6639
6640 zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs;
6641 nr_zones = build_zonerefs_node(pgdat, zonerefs);
6642 zonerefs += nr_zones;
6643 zonerefs->zone = NULL;
6644 zonerefs->zone_idx = 0;
6645 }
6646
6647 /*
6648 * Build zonelists ordered by zone and nodes within zones.
6649 * This results in conserving DMA zone[s] until all Normal memory is
6650 * exhausted, but results in overflowing to remote node while memory
6651 * may still exist in local DMA zone.
6652 */
6653
build_zonelists(pg_data_t * pgdat)6654 static void build_zonelists(pg_data_t *pgdat)
6655 {
6656 static int node_order[MAX_NUMNODES];
6657 int node, load, nr_nodes = 0;
6658 nodemask_t used_mask = NODE_MASK_NONE;
6659 int local_node, prev_node;
6660
6661 /* NUMA-aware ordering of nodes */
6662 local_node = pgdat->node_id;
6663 load = nr_online_nodes;
6664 prev_node = local_node;
6665
6666 memset(node_order, 0, sizeof(node_order));
6667 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
6668 /*
6669 * We don't want to pressure a particular node.
6670 * So adding penalty to the first node in same
6671 * distance group to make it round-robin.
6672 */
6673 if (node_distance(local_node, node) !=
6674 node_distance(local_node, prev_node))
6675 node_load[node] = load;
6676
6677 node_order[nr_nodes++] = node;
6678 prev_node = node;
6679 load--;
6680 }
6681
6682 build_zonelists_in_node_order(pgdat, node_order, nr_nodes);
6683 build_thisnode_zonelists(pgdat);
6684 }
6685
6686 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
6687 /*
6688 * Return node id of node used for "local" allocations.
6689 * I.e., first node id of first zone in arg node's generic zonelist.
6690 * Used for initializing percpu 'numa_mem', which is used primarily
6691 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
6692 */
local_memory_node(int node)6693 int local_memory_node(int node)
6694 {
6695 struct zoneref *z;
6696
6697 z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
6698 gfp_zone(GFP_KERNEL),
6699 NULL);
6700 return zone_to_nid(z->zone);
6701 }
6702 #endif
6703
6704 static void setup_min_unmapped_ratio(void);
6705 static void setup_min_slab_ratio(void);
6706 #else /* CONFIG_NUMA */
6707
build_zonelists(pg_data_t * pgdat)6708 static void build_zonelists(pg_data_t *pgdat)
6709 {
6710 int node, local_node;
6711 struct zoneref *zonerefs;
6712 int nr_zones;
6713
6714 local_node = pgdat->node_id;
6715
6716 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
6717 nr_zones = build_zonerefs_node(pgdat, zonerefs);
6718 zonerefs += nr_zones;
6719
6720 /*
6721 * Now we build the zonelist so that it contains the zones
6722 * of all the other nodes.
6723 * We don't want to pressure a particular node, so when
6724 * building the zones for node N, we make sure that the
6725 * zones coming right after the local ones are those from
6726 * node N+1 (modulo N)
6727 */
6728 for (node = local_node + 1; node < MAX_NUMNODES; node++) {
6729 if (!node_online(node))
6730 continue;
6731 nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs);
6732 zonerefs += nr_zones;
6733 }
6734 for (node = 0; node < local_node; node++) {
6735 if (!node_online(node))
6736 continue;
6737 nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs);
6738 zonerefs += nr_zones;
6739 }
6740
6741 zonerefs->zone = NULL;
6742 zonerefs->zone_idx = 0;
6743 }
6744
6745 #endif /* CONFIG_NUMA */
6746
6747 /*
6748 * Boot pageset table. One per cpu which is going to be used for all
6749 * zones and all nodes. The parameters will be set in such a way
6750 * that an item put on a list will immediately be handed over to
6751 * the buddy list. This is safe since pageset manipulation is done
6752 * with interrupts disabled.
6753 *
6754 * The boot_pagesets must be kept even after bootup is complete for
6755 * unused processors and/or zones. They do play a role for bootstrapping
6756 * hotplugged processors.
6757 *
6758 * zoneinfo_show() and maybe other functions do
6759 * not check if the processor is online before following the pageset pointer.
6760 * Other parts of the kernel may not check if the zone is available.
6761 */
6762 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats);
6763 /* These effectively disable the pcplists in the boot pageset completely */
6764 #define BOOT_PAGESET_HIGH 0
6765 #define BOOT_PAGESET_BATCH 1
6766 static DEFINE_PER_CPU(struct per_cpu_pages_ext, boot_pageset);
6767 static DEFINE_PER_CPU(struct per_cpu_zonestat, boot_zonestats);
6768 static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
6769
__build_all_zonelists(void * data)6770 static void __build_all_zonelists(void *data)
6771 {
6772 int nid;
6773 int __maybe_unused cpu;
6774 pg_data_t *self = data;
6775 unsigned long flags;
6776
6777 /*
6778 * Explicitly disable this CPU's interrupts before taking seqlock
6779 * to prevent any IRQ handler from calling into the page allocator
6780 * (e.g. GFP_ATOMIC) that could hit zonelist_iter_begin and livelock.
6781 */
6782 local_irq_save(flags);
6783 /*
6784 * Explicitly disable this CPU's synchronous printk() before taking
6785 * seqlock to prevent any printk() from trying to hold port->lock, for
6786 * tty_insert_flip_string_and_push_buffer() on other CPU might be
6787 * calling kmalloc(GFP_ATOMIC | __GFP_NOWARN) with port->lock held.
6788 */
6789 printk_deferred_enter();
6790 write_seqlock(&zonelist_update_seq);
6791
6792 #ifdef CONFIG_NUMA
6793 memset(node_load, 0, sizeof(node_load));
6794 #endif
6795
6796 /*
6797 * This node is hotadded and no memory is yet present. So just
6798 * building zonelists is fine - no need to touch other nodes.
6799 */
6800 if (self && !node_online(self->node_id)) {
6801 build_zonelists(self);
6802 } else {
6803 for_each_online_node(nid) {
6804 pg_data_t *pgdat = NODE_DATA(nid);
6805
6806 build_zonelists(pgdat);
6807 }
6808
6809 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
6810 /*
6811 * We now know the "local memory node" for each node--
6812 * i.e., the node of the first zone in the generic zonelist.
6813 * Set up numa_mem percpu variable for on-line cpus. During
6814 * boot, only the boot cpu should be on-line; we'll init the
6815 * secondary cpus' numa_mem as they come on-line. During
6816 * node/memory hotplug, we'll fixup all on-line cpus.
6817 */
6818 for_each_online_cpu(cpu)
6819 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
6820 #endif
6821 }
6822
6823 write_sequnlock(&zonelist_update_seq);
6824 printk_deferred_exit();
6825 local_irq_restore(flags);
6826 }
6827
6828 static noinline void __init
build_all_zonelists_init(void)6829 build_all_zonelists_init(void)
6830 {
6831 int cpu;
6832
6833 __build_all_zonelists(NULL);
6834
6835 /*
6836 * Initialize the boot_pagesets that are going to be used
6837 * for bootstrapping processors. The real pagesets for
6838 * each zone will be allocated later when the per cpu
6839 * allocator is available.
6840 *
6841 * boot_pagesets are used also for bootstrapping offline
6842 * cpus if the system is already booted because the pagesets
6843 * are needed to initialize allocators on a specific cpu too.
6844 * F.e. the percpu allocator needs the page allocator which
6845 * needs the percpu allocator in order to allocate its pagesets
6846 * (a chicken-egg dilemma).
6847 */
6848 for_each_possible_cpu(cpu)
6849 per_cpu_pages_init(&per_cpu(boot_pageset, cpu).pcp, &per_cpu(boot_zonestats, cpu));
6850
6851 mminit_verify_zonelist();
6852 cpuset_init_current_mems_allowed();
6853 }
6854
6855 /*
6856 * unless system_state == SYSTEM_BOOTING.
6857 *
6858 * __ref due to call of __init annotated helper build_all_zonelists_init
6859 * [protected by SYSTEM_BOOTING].
6860 */
build_all_zonelists(pg_data_t * pgdat)6861 void __ref build_all_zonelists(pg_data_t *pgdat)
6862 {
6863 unsigned long vm_total_pages;
6864
6865 if (system_state == SYSTEM_BOOTING) {
6866 build_all_zonelists_init();
6867 } else {
6868 __build_all_zonelists(pgdat);
6869 /* cpuset refresh routine should be here */
6870 }
6871 /* Get the number of free pages beyond high watermark in all zones. */
6872 vm_total_pages = nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
6873 /*
6874 * Disable grouping by mobility if the number of pages in the
6875 * system is too low to allow the mechanism to work. It would be
6876 * more accurate, but expensive to check per-zone. This check is
6877 * made on memory-hotadd so a system can start with mobility
6878 * disabled and enable it later
6879 */
6880 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
6881 page_group_by_mobility_disabled = 1;
6882 else
6883 page_group_by_mobility_disabled = 0;
6884
6885 pr_info("Built %u zonelists, mobility grouping %s. Total pages: %ld\n",
6886 nr_online_nodes,
6887 page_group_by_mobility_disabled ? "off" : "on",
6888 vm_total_pages);
6889 #ifdef CONFIG_NUMA
6890 pr_info("Policy zone: %s\n", zone_names[policy_zone]);
6891 #endif
6892 }
6893
6894 /* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */
6895 static bool __meminit
overlap_memmap_init(unsigned long zone,unsigned long * pfn)6896 overlap_memmap_init(unsigned long zone, unsigned long *pfn)
6897 {
6898 static struct memblock_region *r;
6899
6900 if (mirrored_kernelcore && zone == ZONE_MOVABLE) {
6901 if (!r || *pfn >= memblock_region_memory_end_pfn(r)) {
6902 for_each_mem_region(r) {
6903 if (*pfn < memblock_region_memory_end_pfn(r))
6904 break;
6905 }
6906 }
6907 if (*pfn >= memblock_region_memory_base_pfn(r) &&
6908 memblock_is_mirror(r)) {
6909 *pfn = memblock_region_memory_end_pfn(r);
6910 return true;
6911 }
6912 }
6913 return false;
6914 }
6915
6916 /*
6917 * Initially all pages are reserved - free ones are freed
6918 * up by memblock_free_all() once the early boot process is
6919 * done. Non-atomic initialization, single-pass.
6920 *
6921 * All aligned pageblocks are initialized to the specified migratetype
6922 * (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related
6923 * zone stats (e.g., nr_isolate_pageblock) are touched.
6924 */
memmap_init_range(unsigned long size,int nid,unsigned long zone,unsigned long start_pfn,unsigned long zone_end_pfn,enum meminit_context context,struct vmem_altmap * altmap,int migratetype)6925 void __meminit memmap_init_range(unsigned long size, int nid, unsigned long zone,
6926 unsigned long start_pfn, unsigned long zone_end_pfn,
6927 enum meminit_context context,
6928 struct vmem_altmap *altmap, int migratetype)
6929 {
6930 unsigned long pfn, end_pfn = start_pfn + size;
6931 struct page *page;
6932
6933 if (highest_memmap_pfn < end_pfn - 1)
6934 highest_memmap_pfn = end_pfn - 1;
6935
6936 #ifdef CONFIG_ZONE_DEVICE
6937 /*
6938 * Honor reservation requested by the driver for this ZONE_DEVICE
6939 * memory. We limit the total number of pages to initialize to just
6940 * those that might contain the memory mapping. We will defer the
6941 * ZONE_DEVICE page initialization until after we have released
6942 * the hotplug lock.
6943 */
6944 if (zone == ZONE_DEVICE) {
6945 if (!altmap)
6946 return;
6947
6948 if (start_pfn == altmap->base_pfn)
6949 start_pfn += altmap->reserve;
6950 end_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
6951 }
6952 #endif
6953
6954 for (pfn = start_pfn; pfn < end_pfn; ) {
6955 /*
6956 * There can be holes in boot-time mem_map[]s handed to this
6957 * function. They do not exist on hotplugged memory.
6958 */
6959 if (context == MEMINIT_EARLY) {
6960 if (overlap_memmap_init(zone, &pfn))
6961 continue;
6962 if (defer_init(nid, pfn, zone_end_pfn))
6963 break;
6964 }
6965
6966 page = pfn_to_page(pfn);
6967 __init_single_page(page, pfn, zone, nid);
6968 if (context == MEMINIT_HOTPLUG)
6969 __SetPageReserved(page);
6970
6971 /*
6972 * Usually, we want to mark the pageblock MIGRATE_MOVABLE,
6973 * such that unmovable allocations won't be scattered all
6974 * over the place during system boot.
6975 */
6976 if (IS_ALIGNED(pfn, pageblock_nr_pages)) {
6977 set_pageblock_migratetype(page, migratetype);
6978 cond_resched();
6979 }
6980 pfn++;
6981 }
6982 }
6983
6984 #ifdef CONFIG_ZONE_DEVICE
memmap_init_zone_device(struct zone * zone,unsigned long start_pfn,unsigned long nr_pages,struct dev_pagemap * pgmap)6985 void __ref memmap_init_zone_device(struct zone *zone,
6986 unsigned long start_pfn,
6987 unsigned long nr_pages,
6988 struct dev_pagemap *pgmap)
6989 {
6990 unsigned long pfn, end_pfn = start_pfn + nr_pages;
6991 struct pglist_data *pgdat = zone->zone_pgdat;
6992 struct vmem_altmap *altmap = pgmap_altmap(pgmap);
6993 unsigned long zone_idx = zone_idx(zone);
6994 unsigned long start = jiffies;
6995 int nid = pgdat->node_id;
6996
6997 if (WARN_ON_ONCE(!pgmap || zone_idx(zone) != ZONE_DEVICE))
6998 return;
6999
7000 /*
7001 * The call to memmap_init should have already taken care
7002 * of the pages reserved for the memmap, so we can just jump to
7003 * the end of that region and start processing the device pages.
7004 */
7005 if (altmap) {
7006 start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
7007 nr_pages = end_pfn - start_pfn;
7008 }
7009
7010 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
7011 struct page *page = pfn_to_page(pfn);
7012
7013 __init_single_page(page, pfn, zone_idx, nid);
7014
7015 /*
7016 * Mark page reserved as it will need to wait for onlining
7017 * phase for it to be fully associated with a zone.
7018 *
7019 * We can use the non-atomic __set_bit operation for setting
7020 * the flag as we are still initializing the pages.
7021 */
7022 __SetPageReserved(page);
7023
7024 /*
7025 * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer
7026 * and zone_device_data. It is a bug if a ZONE_DEVICE page is
7027 * ever freed or placed on a driver-private list.
7028 */
7029 page->pgmap = pgmap;
7030 page->zone_device_data = NULL;
7031
7032 /*
7033 * Mark the block movable so that blocks are reserved for
7034 * movable at startup. This will force kernel allocations
7035 * to reserve their blocks rather than leaking throughout
7036 * the address space during boot when many long-lived
7037 * kernel allocations are made.
7038 *
7039 * Please note that MEMINIT_HOTPLUG path doesn't clear memmap
7040 * because this is done early in section_activate()
7041 */
7042 if (IS_ALIGNED(pfn, pageblock_nr_pages)) {
7043 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
7044 cond_resched();
7045 }
7046 }
7047
7048 pr_info("%s initialised %lu pages in %ums\n", __func__,
7049 nr_pages, jiffies_to_msecs(jiffies - start));
7050 }
7051
7052 #endif
zone_init_free_lists(struct zone * zone)7053 static void __meminit zone_init_free_lists(struct zone *zone)
7054 {
7055 unsigned int order, t;
7056 for_each_migratetype_order(order, t) {
7057 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
7058 zone->free_area[order].nr_free = 0;
7059 }
7060 }
7061
7062 /*
7063 * Only struct pages that correspond to ranges defined by memblock.memory
7064 * are zeroed and initialized by going through __init_single_page() during
7065 * memmap_init_zone_range().
7066 *
7067 * But, there could be struct pages that correspond to holes in
7068 * memblock.memory. This can happen because of the following reasons:
7069 * - physical memory bank size is not necessarily the exact multiple of the
7070 * arbitrary section size
7071 * - early reserved memory may not be listed in memblock.memory
7072 * - memory layouts defined with memmap= kernel parameter may not align
7073 * nicely with memmap sections
7074 *
7075 * Explicitly initialize those struct pages so that:
7076 * - PG_Reserved is set
7077 * - zone and node links point to zone and node that span the page if the
7078 * hole is in the middle of a zone
7079 * - zone and node links point to adjacent zone/node if the hole falls on
7080 * the zone boundary; the pages in such holes will be prepended to the
7081 * zone/node above the hole except for the trailing pages in the last
7082 * section that will be appended to the zone/node below.
7083 */
init_unavailable_range(unsigned long spfn,unsigned long epfn,int zone,int node)7084 static void __init init_unavailable_range(unsigned long spfn,
7085 unsigned long epfn,
7086 int zone, int node)
7087 {
7088 unsigned long pfn;
7089 u64 pgcnt = 0;
7090
7091 for (pfn = spfn; pfn < epfn; pfn++) {
7092 if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages))) {
7093 pfn = ALIGN_DOWN(pfn, pageblock_nr_pages)
7094 + pageblock_nr_pages - 1;
7095 continue;
7096 }
7097 __init_single_page(pfn_to_page(pfn), pfn, zone, node);
7098 __SetPageReserved(pfn_to_page(pfn));
7099 pgcnt++;
7100 }
7101
7102 if (pgcnt)
7103 pr_info("On node %d, zone %s: %lld pages in unavailable ranges",
7104 node, zone_names[zone], pgcnt);
7105 }
7106
memmap_init_zone_range(struct zone * zone,unsigned long start_pfn,unsigned long end_pfn,unsigned long * hole_pfn)7107 static void __init memmap_init_zone_range(struct zone *zone,
7108 unsigned long start_pfn,
7109 unsigned long end_pfn,
7110 unsigned long *hole_pfn)
7111 {
7112 unsigned long zone_start_pfn = zone->zone_start_pfn;
7113 unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages;
7114 int nid = zone_to_nid(zone), zone_id = zone_idx(zone);
7115
7116 start_pfn = clamp(start_pfn, zone_start_pfn, zone_end_pfn);
7117 end_pfn = clamp(end_pfn, zone_start_pfn, zone_end_pfn);
7118
7119 if (start_pfn >= end_pfn)
7120 return;
7121
7122 memmap_init_range(end_pfn - start_pfn, nid, zone_id, start_pfn,
7123 zone_end_pfn, MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
7124
7125 if (*hole_pfn < start_pfn)
7126 init_unavailable_range(*hole_pfn, start_pfn, zone_id, nid);
7127
7128 *hole_pfn = end_pfn;
7129 }
7130
memmap_init(void)7131 static void __init memmap_init(void)
7132 {
7133 unsigned long start_pfn, end_pfn;
7134 unsigned long hole_pfn = 0;
7135 int i, j, zone_id = 0, nid;
7136
7137 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
7138 struct pglist_data *node = NODE_DATA(nid);
7139
7140 for (j = 0; j < MAX_NR_ZONES; j++) {
7141 struct zone *zone = node->node_zones + j;
7142
7143 if (!populated_zone(zone))
7144 continue;
7145
7146 memmap_init_zone_range(zone, start_pfn, end_pfn,
7147 &hole_pfn);
7148 zone_id = j;
7149 }
7150 }
7151
7152 #ifdef CONFIG_SPARSEMEM
7153 /*
7154 * Initialize the memory map for hole in the range [memory_end,
7155 * section_end].
7156 * Append the pages in this hole to the highest zone in the last
7157 * node.
7158 * The call to init_unavailable_range() is outside the ifdef to
7159 * silence the compiler warining about zone_id set but not used;
7160 * for FLATMEM it is a nop anyway
7161 */
7162 end_pfn = round_up(end_pfn, PAGES_PER_SECTION);
7163 if (hole_pfn < end_pfn)
7164 #endif
7165 init_unavailable_range(hole_pfn, end_pfn, zone_id, nid);
7166 }
7167
memmap_alloc(phys_addr_t size,phys_addr_t align,phys_addr_t min_addr,int nid,bool exact_nid)7168 void __init *memmap_alloc(phys_addr_t size, phys_addr_t align,
7169 phys_addr_t min_addr, int nid, bool exact_nid)
7170 {
7171 void *ptr;
7172
7173 if (exact_nid)
7174 ptr = memblock_alloc_exact_nid_raw(size, align, min_addr,
7175 MEMBLOCK_ALLOC_ACCESSIBLE,
7176 nid);
7177 else
7178 ptr = memblock_alloc_try_nid_raw(size, align, min_addr,
7179 MEMBLOCK_ALLOC_ACCESSIBLE,
7180 nid);
7181
7182 if (ptr && size > 0)
7183 page_init_poison(ptr, size);
7184
7185 return ptr;
7186 }
7187
zone_batchsize(struct zone * zone)7188 static int zone_batchsize(struct zone *zone)
7189 {
7190 #ifdef CONFIG_MMU
7191 int batch;
7192
7193 /*
7194 * The number of pages to batch allocate is either ~0.1%
7195 * of the zone or 1MB, whichever is smaller. The batch
7196 * size is striking a balance between allocation latency
7197 * and zone lock contention.
7198 */
7199 batch = min(zone_managed_pages(zone) >> 10, (1024 * 1024) / PAGE_SIZE);
7200 batch /= 4; /* We effectively *= 4 below */
7201 if (batch < 1)
7202 batch = 1;
7203
7204 /*
7205 * Clamp the batch to a 2^n - 1 value. Having a power
7206 * of 2 value was found to be more likely to have
7207 * suboptimal cache aliasing properties in some cases.
7208 *
7209 * For example if 2 tasks are alternately allocating
7210 * batches of pages, one task can end up with a lot
7211 * of pages of one half of the possible page colors
7212 * and the other with pages of the other colors.
7213 */
7214 batch = rounddown_pow_of_two(batch + batch/2) - 1;
7215
7216 return batch;
7217
7218 #else
7219 /* The deferral and batching of frees should be suppressed under NOMMU
7220 * conditions.
7221 *
7222 * The problem is that NOMMU needs to be able to allocate large chunks
7223 * of contiguous memory as there's no hardware page translation to
7224 * assemble apparent contiguous memory from discontiguous pages.
7225 *
7226 * Queueing large contiguous runs of pages for batching, however,
7227 * causes the pages to actually be freed in smaller chunks. As there
7228 * can be a significant delay between the individual batches being
7229 * recycled, this leads to the once large chunks of space being
7230 * fragmented and becoming unavailable for high-order allocations.
7231 */
7232 return 0;
7233 #endif
7234 }
7235
zone_highsize(struct zone * zone,int batch,int cpu_online)7236 static int zone_highsize(struct zone *zone, int batch, int cpu_online)
7237 {
7238 #ifdef CONFIG_MMU
7239 int high;
7240 int nr_split_cpus;
7241 unsigned long total_pages;
7242
7243 if (!percpu_pagelist_high_fraction) {
7244 /*
7245 * By default, the high value of the pcp is based on the zone
7246 * low watermark so that if they are full then background
7247 * reclaim will not be started prematurely.
7248 */
7249 total_pages = low_wmark_pages(zone);
7250 } else {
7251 /*
7252 * If percpu_pagelist_high_fraction is configured, the high
7253 * value is based on a fraction of the managed pages in the
7254 * zone.
7255 */
7256 total_pages = zone_managed_pages(zone) / percpu_pagelist_high_fraction;
7257 }
7258
7259 /*
7260 * Split the high value across all online CPUs local to the zone. Note
7261 * that early in boot that CPUs may not be online yet and that during
7262 * CPU hotplug that the cpumask is not yet updated when a CPU is being
7263 * onlined. For memory nodes that have no CPUs, split pcp->high across
7264 * all online CPUs to mitigate the risk that reclaim is triggered
7265 * prematurely due to pages stored on pcp lists.
7266 */
7267 nr_split_cpus = cpumask_weight(cpumask_of_node(zone_to_nid(zone))) + cpu_online;
7268 if (!nr_split_cpus)
7269 nr_split_cpus = num_online_cpus();
7270 high = total_pages / nr_split_cpus;
7271
7272 /*
7273 * Ensure high is at least batch*4. The multiple is based on the
7274 * historical relationship between high and batch.
7275 */
7276 high = max(high, batch << 2);
7277
7278 return high;
7279 #else
7280 return 0;
7281 #endif
7282 }
7283
7284 /*
7285 * pcp->high and pcp->batch values are related and generally batch is lower
7286 * than high. They are also related to pcp->count such that count is lower
7287 * than high, and as soon as it reaches high, the pcplist is flushed.
7288 *
7289 * However, guaranteeing these relations at all times would require e.g. write
7290 * barriers here but also careful usage of read barriers at the read side, and
7291 * thus be prone to error and bad for performance. Thus the update only prevents
7292 * store tearing. Any new users of pcp->batch and pcp->high should ensure they
7293 * can cope with those fields changing asynchronously, and fully trust only the
7294 * pcp->count field on the local CPU with interrupts disabled.
7295 *
7296 * mutex_is_locked(&pcp_batch_high_lock) required when calling this function
7297 * outside of boot time (or some other assurance that no concurrent updaters
7298 * exist).
7299 */
pageset_update(struct per_cpu_pages * pcp,unsigned long high,unsigned long batch)7300 static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
7301 unsigned long batch)
7302 {
7303 WRITE_ONCE(pcp->batch, batch);
7304 WRITE_ONCE(pcp->high, high);
7305 }
7306
per_cpu_pages_init(struct per_cpu_pages * pcp,struct per_cpu_zonestat * pzstats)7307 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats)
7308 {
7309 struct per_cpu_pages_ext *pcp_ext = pcp_to_pcpext(pcp);
7310 int pindex;
7311
7312 memset(pcp, 0, sizeof(*pcp));
7313 memset(pzstats, 0, sizeof(*pzstats));
7314
7315 spin_lock_init(&pcp_ext->lock);
7316 for (pindex = 0; pindex < NR_PCP_LISTS; pindex++)
7317 INIT_LIST_HEAD(&pcp->lists[pindex]);
7318
7319 /*
7320 * Set batch and high values safe for a boot pageset. A true percpu
7321 * pageset's initialization will update them subsequently. Here we don't
7322 * need to be as careful as pageset_update() as nobody can access the
7323 * pageset yet.
7324 */
7325 pcp->high = BOOT_PAGESET_HIGH;
7326 pcp->batch = BOOT_PAGESET_BATCH;
7327 pcp->free_factor = 0;
7328 }
7329
__zone_set_pageset_high_and_batch(struct zone * zone,unsigned long high,unsigned long batch)7330 static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long high,
7331 unsigned long batch)
7332 {
7333 struct per_cpu_pages *pcp;
7334 int cpu;
7335
7336 for_each_possible_cpu(cpu) {
7337 pcp = &per_cpu_ptr(zone_per_cpu_pageset(zone), cpu)->pcp;
7338 pageset_update(pcp, high, batch);
7339 }
7340 }
7341
7342 /*
7343 * Calculate and set new high and batch values for all per-cpu pagesets of a
7344 * zone based on the zone's size.
7345 */
zone_set_pageset_high_and_batch(struct zone * zone,int cpu_online)7346 static void zone_set_pageset_high_and_batch(struct zone *zone, int cpu_online)
7347 {
7348 int new_high, new_batch;
7349
7350 new_batch = max(1, zone_batchsize(zone));
7351 new_high = zone_highsize(zone, new_batch, cpu_online);
7352
7353 if (zone->pageset_high == new_high &&
7354 zone->pageset_batch == new_batch)
7355 return;
7356
7357 zone->pageset_high = new_high;
7358 zone->pageset_batch = new_batch;
7359
7360 __zone_set_pageset_high_and_batch(zone, new_high, new_batch);
7361 }
7362
setup_zone_pageset(struct zone * zone)7363 void __meminit setup_zone_pageset(struct zone *zone)
7364 {
7365 int cpu;
7366
7367 /* Size may be 0 on !SMP && !NUMA */
7368 if (sizeof(struct per_cpu_zonestat) > 0)
7369 zone->per_cpu_zonestats = alloc_percpu(struct per_cpu_zonestat);
7370
7371 zone->per_cpu_pageset = (struct per_cpu_pages __percpu *)
7372 alloc_percpu(struct per_cpu_pages_ext);
7373 for_each_possible_cpu(cpu) {
7374 struct per_cpu_pages *pcp;
7375 struct per_cpu_zonestat *pzstats;
7376
7377 pcp = &per_cpu_ptr(zone_per_cpu_pageset(zone), cpu)->pcp;
7378 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
7379 per_cpu_pages_init(pcp, pzstats);
7380 }
7381
7382 zone_set_pageset_high_and_batch(zone, 0);
7383 }
7384
7385 /*
7386 * Allocate per cpu pagesets and initialize them.
7387 * Before this call only boot pagesets were available.
7388 */
setup_per_cpu_pageset(void)7389 void __init setup_per_cpu_pageset(void)
7390 {
7391 struct pglist_data *pgdat;
7392 struct zone *zone;
7393 int __maybe_unused cpu;
7394
7395 for_each_populated_zone(zone)
7396 setup_zone_pageset(zone);
7397
7398 #ifdef CONFIG_NUMA
7399 /*
7400 * Unpopulated zones continue using the boot pagesets.
7401 * The numa stats for these pagesets need to be reset.
7402 * Otherwise, they will end up skewing the stats of
7403 * the nodes these zones are associated with.
7404 */
7405 for_each_possible_cpu(cpu) {
7406 struct per_cpu_zonestat *pzstats = &per_cpu(boot_zonestats, cpu);
7407 memset(pzstats->vm_numa_event, 0,
7408 sizeof(pzstats->vm_numa_event));
7409 }
7410 #endif
7411
7412 for_each_online_pgdat(pgdat)
7413 pgdat->per_cpu_nodestats =
7414 alloc_percpu(struct per_cpu_nodestat);
7415 }
7416
zone_pcp_init(struct zone * zone)7417 static __meminit void zone_pcp_init(struct zone *zone)
7418 {
7419 /*
7420 * per cpu subsystem is not up at this point. The following code
7421 * relies on the ability of the linker to provide the
7422 * offset of a (static) per cpu variable into the per cpu area.
7423 */
7424 zone->per_cpu_pageset = (struct per_cpu_pages __percpu *)&boot_pageset;
7425 zone->per_cpu_zonestats = &boot_zonestats;
7426 zone->pageset_high = BOOT_PAGESET_HIGH;
7427 zone->pageset_batch = BOOT_PAGESET_BATCH;
7428
7429 if (populated_zone(zone))
7430 pr_debug(" %s zone: %lu pages, LIFO batch:%u\n", zone->name,
7431 zone->present_pages, zone_batchsize(zone));
7432 }
7433
init_currently_empty_zone(struct zone * zone,unsigned long zone_start_pfn,unsigned long size)7434 void __meminit init_currently_empty_zone(struct zone *zone,
7435 unsigned long zone_start_pfn,
7436 unsigned long size)
7437 {
7438 struct pglist_data *pgdat = zone->zone_pgdat;
7439 int zone_idx = zone_idx(zone) + 1;
7440
7441 if (zone_idx > pgdat->nr_zones)
7442 pgdat->nr_zones = zone_idx;
7443
7444 zone->zone_start_pfn = zone_start_pfn;
7445
7446 mminit_dprintk(MMINIT_TRACE, "memmap_init",
7447 "Initialising map node %d zone %lu pfns %lu -> %lu\n",
7448 pgdat->node_id,
7449 (unsigned long)zone_idx(zone),
7450 zone_start_pfn, (zone_start_pfn + size));
7451
7452 zone_init_free_lists(zone);
7453 zone->initialized = 1;
7454 }
7455
7456 /**
7457 * get_pfn_range_for_nid - Return the start and end page frames for a node
7458 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
7459 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
7460 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
7461 *
7462 * It returns the start and end page frame of a node based on information
7463 * provided by memblock_set_node(). If called for a node
7464 * with no available memory, a warning is printed and the start and end
7465 * PFNs will be 0.
7466 */
get_pfn_range_for_nid(unsigned int nid,unsigned long * start_pfn,unsigned long * end_pfn)7467 void __init get_pfn_range_for_nid(unsigned int nid,
7468 unsigned long *start_pfn, unsigned long *end_pfn)
7469 {
7470 unsigned long this_start_pfn, this_end_pfn;
7471 int i;
7472
7473 *start_pfn = -1UL;
7474 *end_pfn = 0;
7475
7476 for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
7477 *start_pfn = min(*start_pfn, this_start_pfn);
7478 *end_pfn = max(*end_pfn, this_end_pfn);
7479 }
7480
7481 if (*start_pfn == -1UL)
7482 *start_pfn = 0;
7483 }
7484
7485 /*
7486 * This finds a zone that can be used for ZONE_MOVABLE pages. The
7487 * assumption is made that zones within a node are ordered in monotonic
7488 * increasing memory addresses so that the "highest" populated zone is used
7489 */
find_usable_zone_for_movable(void)7490 static void __init find_usable_zone_for_movable(void)
7491 {
7492 int zone_index;
7493 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
7494 if (zone_index == ZONE_MOVABLE)
7495 continue;
7496
7497 if (arch_zone_highest_possible_pfn[zone_index] >
7498 arch_zone_lowest_possible_pfn[zone_index])
7499 break;
7500 }
7501
7502 VM_BUG_ON(zone_index == -1);
7503 movable_zone = zone_index;
7504 }
7505
7506 /*
7507 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
7508 * because it is sized independent of architecture. Unlike the other zones,
7509 * the starting point for ZONE_MOVABLE is not fixed. It may be different
7510 * in each node depending on the size of each node and how evenly kernelcore
7511 * is distributed. This helper function adjusts the zone ranges
7512 * provided by the architecture for a given node by using the end of the
7513 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
7514 * zones within a node are in order of monotonic increases memory addresses
7515 */
adjust_zone_range_for_zone_movable(int nid,unsigned long zone_type,unsigned long node_start_pfn,unsigned long node_end_pfn,unsigned long * zone_start_pfn,unsigned long * zone_end_pfn)7516 static void __init adjust_zone_range_for_zone_movable(int nid,
7517 unsigned long zone_type,
7518 unsigned long node_start_pfn,
7519 unsigned long node_end_pfn,
7520 unsigned long *zone_start_pfn,
7521 unsigned long *zone_end_pfn)
7522 {
7523 /* Only adjust if ZONE_MOVABLE is on this node */
7524 if (zone_movable_pfn[nid]) {
7525 /* Size ZONE_MOVABLE */
7526 if (zone_type == ZONE_MOVABLE) {
7527 *zone_start_pfn = zone_movable_pfn[nid];
7528 *zone_end_pfn = min(node_end_pfn,
7529 arch_zone_highest_possible_pfn[movable_zone]);
7530
7531 /* Adjust for ZONE_MOVABLE starting within this range */
7532 } else if (!mirrored_kernelcore &&
7533 *zone_start_pfn < zone_movable_pfn[nid] &&
7534 *zone_end_pfn > zone_movable_pfn[nid]) {
7535 *zone_end_pfn = zone_movable_pfn[nid];
7536
7537 /* Check if this whole range is within ZONE_MOVABLE */
7538 } else if (*zone_start_pfn >= zone_movable_pfn[nid])
7539 *zone_start_pfn = *zone_end_pfn;
7540 }
7541 }
7542
7543 /*
7544 * Return the number of pages a zone spans in a node, including holes
7545 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
7546 */
zone_spanned_pages_in_node(int nid,unsigned long zone_type,unsigned long node_start_pfn,unsigned long node_end_pfn,unsigned long * zone_start_pfn,unsigned long * zone_end_pfn)7547 static unsigned long __init zone_spanned_pages_in_node(int nid,
7548 unsigned long zone_type,
7549 unsigned long node_start_pfn,
7550 unsigned long node_end_pfn,
7551 unsigned long *zone_start_pfn,
7552 unsigned long *zone_end_pfn)
7553 {
7554 unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
7555 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
7556 /* When hotadd a new node from cpu_up(), the node should be empty */
7557 if (!node_start_pfn && !node_end_pfn)
7558 return 0;
7559
7560 /* Get the start and end of the zone */
7561 *zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
7562 *zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
7563 adjust_zone_range_for_zone_movable(nid, zone_type,
7564 node_start_pfn, node_end_pfn,
7565 zone_start_pfn, zone_end_pfn);
7566
7567 /* Check that this node has pages within the zone's required range */
7568 if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn)
7569 return 0;
7570
7571 /* Move the zone boundaries inside the node if necessary */
7572 *zone_end_pfn = min(*zone_end_pfn, node_end_pfn);
7573 *zone_start_pfn = max(*zone_start_pfn, node_start_pfn);
7574
7575 /* Return the spanned pages */
7576 return *zone_end_pfn - *zone_start_pfn;
7577 }
7578
7579 /*
7580 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
7581 * then all holes in the requested range will be accounted for.
7582 */
__absent_pages_in_range(int nid,unsigned long range_start_pfn,unsigned long range_end_pfn)7583 unsigned long __init __absent_pages_in_range(int nid,
7584 unsigned long range_start_pfn,
7585 unsigned long range_end_pfn)
7586 {
7587 unsigned long nr_absent = range_end_pfn - range_start_pfn;
7588 unsigned long start_pfn, end_pfn;
7589 int i;
7590
7591 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
7592 start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
7593 end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
7594 nr_absent -= end_pfn - start_pfn;
7595 }
7596 return nr_absent;
7597 }
7598
7599 /**
7600 * absent_pages_in_range - Return number of page frames in holes within a range
7601 * @start_pfn: The start PFN to start searching for holes
7602 * @end_pfn: The end PFN to stop searching for holes
7603 *
7604 * Return: the number of pages frames in memory holes within a range.
7605 */
absent_pages_in_range(unsigned long start_pfn,unsigned long end_pfn)7606 unsigned long __init absent_pages_in_range(unsigned long start_pfn,
7607 unsigned long end_pfn)
7608 {
7609 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
7610 }
7611
7612 /* Return the number of page frames in holes in a zone on a node */
zone_absent_pages_in_node(int nid,unsigned long zone_type,unsigned long node_start_pfn,unsigned long node_end_pfn)7613 static unsigned long __init zone_absent_pages_in_node(int nid,
7614 unsigned long zone_type,
7615 unsigned long node_start_pfn,
7616 unsigned long node_end_pfn)
7617 {
7618 unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
7619 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
7620 unsigned long zone_start_pfn, zone_end_pfn;
7621 unsigned long nr_absent;
7622
7623 /* When hotadd a new node from cpu_up(), the node should be empty */
7624 if (!node_start_pfn && !node_end_pfn)
7625 return 0;
7626
7627 zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
7628 zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
7629
7630 adjust_zone_range_for_zone_movable(nid, zone_type,
7631 node_start_pfn, node_end_pfn,
7632 &zone_start_pfn, &zone_end_pfn);
7633 nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
7634
7635 /*
7636 * ZONE_MOVABLE handling.
7637 * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages
7638 * and vice versa.
7639 */
7640 if (mirrored_kernelcore && zone_movable_pfn[nid]) {
7641 unsigned long start_pfn, end_pfn;
7642 struct memblock_region *r;
7643
7644 for_each_mem_region(r) {
7645 start_pfn = clamp(memblock_region_memory_base_pfn(r),
7646 zone_start_pfn, zone_end_pfn);
7647 end_pfn = clamp(memblock_region_memory_end_pfn(r),
7648 zone_start_pfn, zone_end_pfn);
7649
7650 if (zone_type == ZONE_MOVABLE &&
7651 memblock_is_mirror(r))
7652 nr_absent += end_pfn - start_pfn;
7653
7654 if (zone_type == ZONE_NORMAL &&
7655 !memblock_is_mirror(r))
7656 nr_absent += end_pfn - start_pfn;
7657 }
7658 }
7659
7660 return nr_absent;
7661 }
7662
calculate_node_totalpages(struct pglist_data * pgdat,unsigned long node_start_pfn,unsigned long node_end_pfn)7663 static void __init calculate_node_totalpages(struct pglist_data *pgdat,
7664 unsigned long node_start_pfn,
7665 unsigned long node_end_pfn)
7666 {
7667 unsigned long realtotalpages = 0, totalpages = 0;
7668 enum zone_type i;
7669
7670 for (i = 0; i < MAX_NR_ZONES; i++) {
7671 struct zone *zone = pgdat->node_zones + i;
7672 unsigned long zone_start_pfn, zone_end_pfn;
7673 unsigned long spanned, absent;
7674 unsigned long size, real_size;
7675
7676 spanned = zone_spanned_pages_in_node(pgdat->node_id, i,
7677 node_start_pfn,
7678 node_end_pfn,
7679 &zone_start_pfn,
7680 &zone_end_pfn);
7681 absent = zone_absent_pages_in_node(pgdat->node_id, i,
7682 node_start_pfn,
7683 node_end_pfn);
7684
7685 size = spanned;
7686 real_size = size - absent;
7687
7688 if (size)
7689 zone->zone_start_pfn = zone_start_pfn;
7690 else
7691 zone->zone_start_pfn = 0;
7692 zone->spanned_pages = size;
7693 zone->present_pages = real_size;
7694 #if defined(CONFIG_MEMORY_HOTPLUG)
7695 zone->present_early_pages = real_size;
7696 #endif
7697
7698 totalpages += size;
7699 realtotalpages += real_size;
7700 }
7701
7702 pgdat->node_spanned_pages = totalpages;
7703 pgdat->node_present_pages = realtotalpages;
7704 pr_debug("On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages);
7705 }
7706
7707 #ifndef CONFIG_SPARSEMEM
7708 /*
7709 * Calculate the size of the zone->blockflags rounded to an unsigned long
7710 * Start by making sure zonesize is a multiple of pageblock_order by rounding
7711 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
7712 * round what is now in bits to nearest long in bits, then return it in
7713 * bytes.
7714 */
usemap_size(unsigned long zone_start_pfn,unsigned long zonesize)7715 static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
7716 {
7717 unsigned long usemapsize;
7718
7719 zonesize += zone_start_pfn & (pageblock_nr_pages-1);
7720 usemapsize = roundup(zonesize, pageblock_nr_pages);
7721 usemapsize = usemapsize >> pageblock_order;
7722 usemapsize *= NR_PAGEBLOCK_BITS;
7723 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
7724
7725 return usemapsize / 8;
7726 }
7727
setup_usemap(struct zone * zone)7728 static void __ref setup_usemap(struct zone *zone)
7729 {
7730 unsigned long usemapsize = usemap_size(zone->zone_start_pfn,
7731 zone->spanned_pages);
7732 zone->pageblock_flags = NULL;
7733 if (usemapsize) {
7734 zone->pageblock_flags =
7735 memblock_alloc_node(usemapsize, SMP_CACHE_BYTES,
7736 zone_to_nid(zone));
7737 if (!zone->pageblock_flags)
7738 panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n",
7739 usemapsize, zone->name, zone_to_nid(zone));
7740 }
7741 }
7742 #else
setup_usemap(struct zone * zone)7743 static inline void setup_usemap(struct zone *zone) {}
7744 #endif /* CONFIG_SPARSEMEM */
7745
7746 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
7747
7748 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
set_pageblock_order(void)7749 void __init set_pageblock_order(void)
7750 {
7751 unsigned int order;
7752
7753 /* Check that pageblock_nr_pages has not already been setup */
7754 if (pageblock_order)
7755 return;
7756
7757 if (HPAGE_SHIFT > PAGE_SHIFT)
7758 order = HUGETLB_PAGE_ORDER;
7759 else
7760 order = MAX_ORDER - 1;
7761
7762 /*
7763 * Assume the largest contiguous order of interest is a huge page.
7764 * This value may be variable depending on boot parameters on IA64 and
7765 * powerpc.
7766 */
7767 pageblock_order = order;
7768 }
7769 #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
7770
7771 /*
7772 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
7773 * is unused as pageblock_order is set at compile-time. See
7774 * include/linux/pageblock-flags.h for the values of pageblock_order based on
7775 * the kernel config
7776 */
set_pageblock_order(void)7777 void __init set_pageblock_order(void)
7778 {
7779 }
7780
7781 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
7782
calc_memmap_size(unsigned long spanned_pages,unsigned long present_pages)7783 static unsigned long __init calc_memmap_size(unsigned long spanned_pages,
7784 unsigned long present_pages)
7785 {
7786 unsigned long pages = spanned_pages;
7787
7788 /*
7789 * Provide a more accurate estimation if there are holes within
7790 * the zone and SPARSEMEM is in use. If there are holes within the
7791 * zone, each populated memory region may cost us one or two extra
7792 * memmap pages due to alignment because memmap pages for each
7793 * populated regions may not be naturally aligned on page boundary.
7794 * So the (present_pages >> 4) heuristic is a tradeoff for that.
7795 */
7796 if (spanned_pages > present_pages + (present_pages >> 4) &&
7797 IS_ENABLED(CONFIG_SPARSEMEM))
7798 pages = present_pages;
7799
7800 return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
7801 }
7802
7803 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pgdat_init_split_queue(struct pglist_data * pgdat)7804 static void pgdat_init_split_queue(struct pglist_data *pgdat)
7805 {
7806 struct deferred_split *ds_queue = &pgdat->deferred_split_queue;
7807
7808 spin_lock_init(&ds_queue->split_queue_lock);
7809 INIT_LIST_HEAD(&ds_queue->split_queue);
7810 ds_queue->split_queue_len = 0;
7811 }
7812 #else
pgdat_init_split_queue(struct pglist_data * pgdat)7813 static void pgdat_init_split_queue(struct pglist_data *pgdat) {}
7814 #endif
7815
7816 #ifdef CONFIG_COMPACTION
pgdat_init_kcompactd(struct pglist_data * pgdat)7817 static void pgdat_init_kcompactd(struct pglist_data *pgdat)
7818 {
7819 init_waitqueue_head(&pgdat->kcompactd_wait);
7820 }
7821 #else
pgdat_init_kcompactd(struct pglist_data * pgdat)7822 static void pgdat_init_kcompactd(struct pglist_data *pgdat) {}
7823 #endif
7824
pgdat_init_internals(struct pglist_data * pgdat)7825 static void __meminit pgdat_init_internals(struct pglist_data *pgdat)
7826 {
7827 pgdat_resize_init(pgdat);
7828
7829 pgdat_init_split_queue(pgdat);
7830 pgdat_init_kcompactd(pgdat);
7831
7832 init_waitqueue_head(&pgdat->kswapd_wait);
7833 init_waitqueue_head(&pgdat->pfmemalloc_wait);
7834
7835 pgdat_page_ext_init(pgdat);
7836 lruvec_init(&pgdat->__lruvec);
7837 }
7838
zone_init_internals(struct zone * zone,enum zone_type idx,int nid,unsigned long remaining_pages)7839 static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid,
7840 unsigned long remaining_pages)
7841 {
7842 atomic_long_set(&zone->managed_pages, remaining_pages);
7843 zone_set_nid(zone, nid);
7844 zone->name = zone_names[idx];
7845 zone->zone_pgdat = NODE_DATA(nid);
7846 spin_lock_init(&zone->lock);
7847 zone_seqlock_init(zone);
7848 zone_pcp_init(zone);
7849 }
7850
7851 /*
7852 * Set up the zone data structures
7853 * - init pgdat internals
7854 * - init all zones belonging to this node
7855 *
7856 * NOTE: this function is only called during memory hotplug
7857 */
7858 #ifdef CONFIG_MEMORY_HOTPLUG
free_area_init_core_hotplug(int nid)7859 void __ref free_area_init_core_hotplug(int nid)
7860 {
7861 enum zone_type z;
7862 pg_data_t *pgdat = NODE_DATA(nid);
7863
7864 pgdat_init_internals(pgdat);
7865 for (z = 0; z < MAX_NR_ZONES; z++)
7866 zone_init_internals(&pgdat->node_zones[z], z, nid, 0);
7867 }
7868 #endif
7869
7870 /*
7871 * Set up the zone data structures:
7872 * - mark all pages reserved
7873 * - mark all memory queues empty
7874 * - clear the memory bitmaps
7875 *
7876 * NOTE: pgdat should get zeroed by caller.
7877 * NOTE: this function is only called during early init.
7878 */
free_area_init_core(struct pglist_data * pgdat)7879 static void __init free_area_init_core(struct pglist_data *pgdat)
7880 {
7881 enum zone_type j;
7882 int nid = pgdat->node_id;
7883
7884 pgdat_init_internals(pgdat);
7885 pgdat->per_cpu_nodestats = &boot_nodestats;
7886
7887 for (j = 0; j < MAX_NR_ZONES; j++) {
7888 struct zone *zone = pgdat->node_zones + j;
7889 unsigned long size, freesize, memmap_pages;
7890
7891 size = zone->spanned_pages;
7892 freesize = zone->present_pages;
7893
7894 /*
7895 * Adjust freesize so that it accounts for how much memory
7896 * is used by this zone for memmap. This affects the watermark
7897 * and per-cpu initialisations
7898 */
7899 memmap_pages = calc_memmap_size(size, freesize);
7900 if (!is_highmem_idx(j)) {
7901 if (freesize >= memmap_pages) {
7902 freesize -= memmap_pages;
7903 if (memmap_pages)
7904 pr_debug(" %s zone: %lu pages used for memmap\n",
7905 zone_names[j], memmap_pages);
7906 } else
7907 pr_warn(" %s zone: %lu memmap pages exceeds freesize %lu\n",
7908 zone_names[j], memmap_pages, freesize);
7909 }
7910
7911 /* Account for reserved pages */
7912 if (j == 0 && freesize > dma_reserve) {
7913 freesize -= dma_reserve;
7914 pr_debug(" %s zone: %lu pages reserved\n", zone_names[0], dma_reserve);
7915 }
7916
7917 if (!is_highmem_idx(j))
7918 nr_kernel_pages += freesize;
7919 /* Charge for highmem memmap if there are enough kernel pages */
7920 else if (nr_kernel_pages > memmap_pages * 2)
7921 nr_kernel_pages -= memmap_pages;
7922 nr_all_pages += freesize;
7923
7924 /*
7925 * Set an approximate value for lowmem here, it will be adjusted
7926 * when the bootmem allocator frees pages into the buddy system.
7927 * And all highmem pages will be managed by the buddy system.
7928 */
7929 zone_init_internals(zone, j, nid, freesize);
7930
7931 if (!size)
7932 continue;
7933
7934 set_pageblock_order();
7935 setup_usemap(zone);
7936 init_currently_empty_zone(zone, zone->zone_start_pfn, size);
7937 }
7938 }
7939
7940 #ifdef CONFIG_FLATMEM
alloc_node_mem_map(struct pglist_data * pgdat)7941 static void __init alloc_node_mem_map(struct pglist_data *pgdat)
7942 {
7943 unsigned long __maybe_unused start = 0;
7944 unsigned long __maybe_unused offset = 0;
7945
7946 /* Skip empty nodes */
7947 if (!pgdat->node_spanned_pages)
7948 return;
7949
7950 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
7951 offset = pgdat->node_start_pfn - start;
7952 /* ia64 gets its own node_mem_map, before this, without bootmem */
7953 if (!pgdat->node_mem_map) {
7954 unsigned long size, end;
7955 struct page *map;
7956
7957 /*
7958 * The zone's endpoints aren't required to be MAX_ORDER
7959 * aligned but the node_mem_map endpoints must be in order
7960 * for the buddy allocator to function correctly.
7961 */
7962 end = pgdat_end_pfn(pgdat);
7963 end = ALIGN(end, MAX_ORDER_NR_PAGES);
7964 size = (end - start) * sizeof(struct page);
7965 map = memmap_alloc(size, SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
7966 pgdat->node_id, false);
7967 if (!map)
7968 panic("Failed to allocate %ld bytes for node %d memory map\n",
7969 size, pgdat->node_id);
7970 pgdat->node_mem_map = map + offset;
7971 }
7972 pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n",
7973 __func__, pgdat->node_id, (unsigned long)pgdat,
7974 (unsigned long)pgdat->node_mem_map);
7975 #ifndef CONFIG_NUMA
7976 /*
7977 * With no DISCONTIG, the global mem_map is just set as node 0's
7978 */
7979 if (pgdat == NODE_DATA(0)) {
7980 mem_map = NODE_DATA(0)->node_mem_map;
7981 if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
7982 mem_map -= offset;
7983 }
7984 #endif
7985 }
7986 #else
alloc_node_mem_map(struct pglist_data * pgdat)7987 static inline void alloc_node_mem_map(struct pglist_data *pgdat) { }
7988 #endif /* CONFIG_FLATMEM */
7989
7990 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
pgdat_set_deferred_range(pg_data_t * pgdat)7991 static inline void pgdat_set_deferred_range(pg_data_t *pgdat)
7992 {
7993 pgdat->first_deferred_pfn = ULONG_MAX;
7994 }
7995 #else
pgdat_set_deferred_range(pg_data_t * pgdat)7996 static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {}
7997 #endif
7998
free_area_init_node(int nid)7999 static void __init free_area_init_node(int nid)
8000 {
8001 pg_data_t *pgdat = NODE_DATA(nid);
8002 unsigned long start_pfn = 0;
8003 unsigned long end_pfn = 0;
8004
8005 /* pg_data_t should be reset to zero when it's allocated */
8006 WARN_ON(pgdat->nr_zones || pgdat->kswapd_highest_zoneidx);
8007
8008 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
8009
8010 pgdat->node_id = nid;
8011 pgdat->node_start_pfn = start_pfn;
8012 pgdat->per_cpu_nodestats = NULL;
8013
8014 pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
8015 (u64)start_pfn << PAGE_SHIFT,
8016 end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0);
8017 calculate_node_totalpages(pgdat, start_pfn, end_pfn);
8018
8019 alloc_node_mem_map(pgdat);
8020 pgdat_set_deferred_range(pgdat);
8021
8022 free_area_init_core(pgdat);
8023 }
8024
free_area_init_memoryless_node(int nid)8025 void __init free_area_init_memoryless_node(int nid)
8026 {
8027 free_area_init_node(nid);
8028 }
8029
8030 #if MAX_NUMNODES > 1
8031 /*
8032 * Figure out the number of possible node ids.
8033 */
setup_nr_node_ids(void)8034 void __init setup_nr_node_ids(void)
8035 {
8036 unsigned int highest;
8037
8038 highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES);
8039 nr_node_ids = highest + 1;
8040 }
8041 #endif
8042
8043 /**
8044 * node_map_pfn_alignment - determine the maximum internode alignment
8045 *
8046 * This function should be called after node map is populated and sorted.
8047 * It calculates the maximum power of two alignment which can distinguish
8048 * all the nodes.
8049 *
8050 * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
8051 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the
8052 * nodes are shifted by 256MiB, 256MiB. Note that if only the last node is
8053 * shifted, 1GiB is enough and this function will indicate so.
8054 *
8055 * This is used to test whether pfn -> nid mapping of the chosen memory
8056 * model has fine enough granularity to avoid incorrect mapping for the
8057 * populated node map.
8058 *
8059 * Return: the determined alignment in pfn's. 0 if there is no alignment
8060 * requirement (single node).
8061 */
node_map_pfn_alignment(void)8062 unsigned long __init node_map_pfn_alignment(void)
8063 {
8064 unsigned long accl_mask = 0, last_end = 0;
8065 unsigned long start, end, mask;
8066 int last_nid = NUMA_NO_NODE;
8067 int i, nid;
8068
8069 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
8070 if (!start || last_nid < 0 || last_nid == nid) {
8071 last_nid = nid;
8072 last_end = end;
8073 continue;
8074 }
8075
8076 /*
8077 * Start with a mask granular enough to pin-point to the
8078 * start pfn and tick off bits one-by-one until it becomes
8079 * too coarse to separate the current node from the last.
8080 */
8081 mask = ~((1 << __ffs(start)) - 1);
8082 while (mask && last_end <= (start & (mask << 1)))
8083 mask <<= 1;
8084
8085 /* accumulate all internode masks */
8086 accl_mask |= mask;
8087 }
8088
8089 /* convert mask to number of pages */
8090 return ~accl_mask + 1;
8091 }
8092
8093 /**
8094 * find_min_pfn_with_active_regions - Find the minimum PFN registered
8095 *
8096 * Return: the minimum PFN based on information provided via
8097 * memblock_set_node().
8098 */
find_min_pfn_with_active_regions(void)8099 unsigned long __init find_min_pfn_with_active_regions(void)
8100 {
8101 return PHYS_PFN(memblock_start_of_DRAM());
8102 }
8103
8104 /*
8105 * early_calculate_totalpages()
8106 * Sum pages in active regions for movable zone.
8107 * Populate N_MEMORY for calculating usable_nodes.
8108 */
early_calculate_totalpages(void)8109 static unsigned long __init early_calculate_totalpages(void)
8110 {
8111 unsigned long totalpages = 0;
8112 unsigned long start_pfn, end_pfn;
8113 int i, nid;
8114
8115 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
8116 unsigned long pages = end_pfn - start_pfn;
8117
8118 totalpages += pages;
8119 if (pages)
8120 node_set_state(nid, N_MEMORY);
8121 }
8122 return totalpages;
8123 }
8124
8125 /*
8126 * Find the PFN the Movable zone begins in each node. Kernel memory
8127 * is spread evenly between nodes as long as the nodes have enough
8128 * memory. When they don't, some nodes will have more kernelcore than
8129 * others
8130 */
find_zone_movable_pfns_for_nodes(void)8131 static void __init find_zone_movable_pfns_for_nodes(void)
8132 {
8133 int i, nid;
8134 unsigned long usable_startpfn;
8135 unsigned long kernelcore_node, kernelcore_remaining;
8136 /* save the state before borrow the nodemask */
8137 nodemask_t saved_node_state = node_states[N_MEMORY];
8138 unsigned long totalpages = early_calculate_totalpages();
8139 int usable_nodes = nodes_weight(node_states[N_MEMORY]);
8140 struct memblock_region *r;
8141
8142 /* Need to find movable_zone earlier when movable_node is specified. */
8143 find_usable_zone_for_movable();
8144
8145 /*
8146 * If movable_node is specified, ignore kernelcore and movablecore
8147 * options.
8148 */
8149 if (movable_node_is_enabled()) {
8150 for_each_mem_region(r) {
8151 if (!memblock_is_hotpluggable(r))
8152 continue;
8153
8154 nid = memblock_get_region_node(r);
8155
8156 usable_startpfn = PFN_DOWN(r->base);
8157 zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
8158 min(usable_startpfn, zone_movable_pfn[nid]) :
8159 usable_startpfn;
8160 }
8161
8162 goto out2;
8163 }
8164
8165 /*
8166 * If kernelcore=mirror is specified, ignore movablecore option
8167 */
8168 if (mirrored_kernelcore) {
8169 bool mem_below_4gb_not_mirrored = false;
8170
8171 for_each_mem_region(r) {
8172 if (memblock_is_mirror(r))
8173 continue;
8174
8175 nid = memblock_get_region_node(r);
8176
8177 usable_startpfn = memblock_region_memory_base_pfn(r);
8178
8179 if (usable_startpfn < 0x100000) {
8180 mem_below_4gb_not_mirrored = true;
8181 continue;
8182 }
8183
8184 zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
8185 min(usable_startpfn, zone_movable_pfn[nid]) :
8186 usable_startpfn;
8187 }
8188
8189 if (mem_below_4gb_not_mirrored)
8190 pr_warn("This configuration results in unmirrored kernel memory.\n");
8191
8192 goto out2;
8193 }
8194
8195 /*
8196 * If kernelcore=nn% or movablecore=nn% was specified, calculate the
8197 * amount of necessary memory.
8198 */
8199 if (required_kernelcore_percent)
8200 required_kernelcore = (totalpages * 100 * required_kernelcore_percent) /
8201 10000UL;
8202 if (required_movablecore_percent)
8203 required_movablecore = (totalpages * 100 * required_movablecore_percent) /
8204 10000UL;
8205
8206 /*
8207 * If movablecore= was specified, calculate what size of
8208 * kernelcore that corresponds so that memory usable for
8209 * any allocation type is evenly spread. If both kernelcore
8210 * and movablecore are specified, then the value of kernelcore
8211 * will be used for required_kernelcore if it's greater than
8212 * what movablecore would have allowed.
8213 */
8214 if (required_movablecore) {
8215 unsigned long corepages;
8216
8217 /*
8218 * Round-up so that ZONE_MOVABLE is at least as large as what
8219 * was requested by the user
8220 */
8221 required_movablecore =
8222 roundup(required_movablecore, MAX_ORDER_NR_PAGES);
8223 required_movablecore = min(totalpages, required_movablecore);
8224 corepages = totalpages - required_movablecore;
8225
8226 required_kernelcore = max(required_kernelcore, corepages);
8227 }
8228
8229 /*
8230 * If kernelcore was not specified or kernelcore size is larger
8231 * than totalpages, there is no ZONE_MOVABLE.
8232 */
8233 if (!required_kernelcore || required_kernelcore >= totalpages)
8234 goto out;
8235
8236 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
8237 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
8238
8239 restart:
8240 /* Spread kernelcore memory as evenly as possible throughout nodes */
8241 kernelcore_node = required_kernelcore / usable_nodes;
8242 for_each_node_state(nid, N_MEMORY) {
8243 unsigned long start_pfn, end_pfn;
8244
8245 /*
8246 * Recalculate kernelcore_node if the division per node
8247 * now exceeds what is necessary to satisfy the requested
8248 * amount of memory for the kernel
8249 */
8250 if (required_kernelcore < kernelcore_node)
8251 kernelcore_node = required_kernelcore / usable_nodes;
8252
8253 /*
8254 * As the map is walked, we track how much memory is usable
8255 * by the kernel using kernelcore_remaining. When it is
8256 * 0, the rest of the node is usable by ZONE_MOVABLE
8257 */
8258 kernelcore_remaining = kernelcore_node;
8259
8260 /* Go through each range of PFNs within this node */
8261 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
8262 unsigned long size_pages;
8263
8264 start_pfn = max(start_pfn, zone_movable_pfn[nid]);
8265 if (start_pfn >= end_pfn)
8266 continue;
8267
8268 /* Account for what is only usable for kernelcore */
8269 if (start_pfn < usable_startpfn) {
8270 unsigned long kernel_pages;
8271 kernel_pages = min(end_pfn, usable_startpfn)
8272 - start_pfn;
8273
8274 kernelcore_remaining -= min(kernel_pages,
8275 kernelcore_remaining);
8276 required_kernelcore -= min(kernel_pages,
8277 required_kernelcore);
8278
8279 /* Continue if range is now fully accounted */
8280 if (end_pfn <= usable_startpfn) {
8281
8282 /*
8283 * Push zone_movable_pfn to the end so
8284 * that if we have to rebalance
8285 * kernelcore across nodes, we will
8286 * not double account here
8287 */
8288 zone_movable_pfn[nid] = end_pfn;
8289 continue;
8290 }
8291 start_pfn = usable_startpfn;
8292 }
8293
8294 /*
8295 * The usable PFN range for ZONE_MOVABLE is from
8296 * start_pfn->end_pfn. Calculate size_pages as the
8297 * number of pages used as kernelcore
8298 */
8299 size_pages = end_pfn - start_pfn;
8300 if (size_pages > kernelcore_remaining)
8301 size_pages = kernelcore_remaining;
8302 zone_movable_pfn[nid] = start_pfn + size_pages;
8303
8304 /*
8305 * Some kernelcore has been met, update counts and
8306 * break if the kernelcore for this node has been
8307 * satisfied
8308 */
8309 required_kernelcore -= min(required_kernelcore,
8310 size_pages);
8311 kernelcore_remaining -= size_pages;
8312 if (!kernelcore_remaining)
8313 break;
8314 }
8315 }
8316
8317 /*
8318 * If there is still required_kernelcore, we do another pass with one
8319 * less node in the count. This will push zone_movable_pfn[nid] further
8320 * along on the nodes that still have memory until kernelcore is
8321 * satisfied
8322 */
8323 usable_nodes--;
8324 if (usable_nodes && required_kernelcore > usable_nodes)
8325 goto restart;
8326
8327 out2:
8328 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
8329 for (nid = 0; nid < MAX_NUMNODES; nid++) {
8330 unsigned long start_pfn, end_pfn;
8331
8332 zone_movable_pfn[nid] =
8333 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
8334
8335 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
8336 if (zone_movable_pfn[nid] >= end_pfn)
8337 zone_movable_pfn[nid] = 0;
8338 }
8339
8340 out:
8341 /* restore the node_state */
8342 node_states[N_MEMORY] = saved_node_state;
8343 }
8344
8345 /* Any regular or high memory on that node ? */
check_for_memory(pg_data_t * pgdat,int nid)8346 static void check_for_memory(pg_data_t *pgdat, int nid)
8347 {
8348 enum zone_type zone_type;
8349
8350 for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
8351 struct zone *zone = &pgdat->node_zones[zone_type];
8352 if (populated_zone(zone)) {
8353 if (IS_ENABLED(CONFIG_HIGHMEM))
8354 node_set_state(nid, N_HIGH_MEMORY);
8355 if (zone_type <= ZONE_NORMAL)
8356 node_set_state(nid, N_NORMAL_MEMORY);
8357 break;
8358 }
8359 }
8360 }
8361
8362 /*
8363 * Some architectures, e.g. ARC may have ZONE_HIGHMEM below ZONE_NORMAL. For
8364 * such cases we allow max_zone_pfn sorted in the descending order
8365 */
arch_has_descending_max_zone_pfns(void)8366 bool __weak arch_has_descending_max_zone_pfns(void)
8367 {
8368 return false;
8369 }
8370
8371 /**
8372 * free_area_init - Initialise all pg_data_t and zone data
8373 * @max_zone_pfn: an array of max PFNs for each zone
8374 *
8375 * This will call free_area_init_node() for each active node in the system.
8376 * Using the page ranges provided by memblock_set_node(), the size of each
8377 * zone in each node and their holes is calculated. If the maximum PFN
8378 * between two adjacent zones match, it is assumed that the zone is empty.
8379 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
8380 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
8381 * starts where the previous one ended. For example, ZONE_DMA32 starts
8382 * at arch_max_dma_pfn.
8383 */
free_area_init(unsigned long * max_zone_pfn)8384 void __init free_area_init(unsigned long *max_zone_pfn)
8385 {
8386 unsigned long start_pfn, end_pfn;
8387 int i, nid, zone;
8388 bool descending;
8389
8390 /* Record where the zone boundaries are */
8391 memset(arch_zone_lowest_possible_pfn, 0,
8392 sizeof(arch_zone_lowest_possible_pfn));
8393 memset(arch_zone_highest_possible_pfn, 0,
8394 sizeof(arch_zone_highest_possible_pfn));
8395
8396 start_pfn = find_min_pfn_with_active_regions();
8397 descending = arch_has_descending_max_zone_pfns();
8398
8399 for (i = 0; i < MAX_NR_ZONES; i++) {
8400 if (descending)
8401 zone = MAX_NR_ZONES - i - 1;
8402 else
8403 zone = i;
8404
8405 if (zone == ZONE_MOVABLE)
8406 continue;
8407
8408 end_pfn = max(max_zone_pfn[zone], start_pfn);
8409 arch_zone_lowest_possible_pfn[zone] = start_pfn;
8410 arch_zone_highest_possible_pfn[zone] = end_pfn;
8411
8412 start_pfn = end_pfn;
8413 }
8414
8415 /* Find the PFNs that ZONE_MOVABLE begins at in each node */
8416 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
8417 find_zone_movable_pfns_for_nodes();
8418
8419 /* Print out the zone ranges */
8420 pr_info("Zone ranges:\n");
8421 for (i = 0; i < MAX_NR_ZONES; i++) {
8422 if (i == ZONE_MOVABLE)
8423 continue;
8424 pr_info(" %-8s ", zone_names[i]);
8425 if (arch_zone_lowest_possible_pfn[i] ==
8426 arch_zone_highest_possible_pfn[i])
8427 pr_cont("empty\n");
8428 else
8429 pr_cont("[mem %#018Lx-%#018Lx]\n",
8430 (u64)arch_zone_lowest_possible_pfn[i]
8431 << PAGE_SHIFT,
8432 ((u64)arch_zone_highest_possible_pfn[i]
8433 << PAGE_SHIFT) - 1);
8434 }
8435
8436 /* Print out the PFNs ZONE_MOVABLE begins at in each node */
8437 pr_info("Movable zone start for each node\n");
8438 for (i = 0; i < MAX_NUMNODES; i++) {
8439 if (zone_movable_pfn[i])
8440 pr_info(" Node %d: %#018Lx\n", i,
8441 (u64)zone_movable_pfn[i] << PAGE_SHIFT);
8442 }
8443
8444 /*
8445 * Print out the early node map, and initialize the
8446 * subsection-map relative to active online memory ranges to
8447 * enable future "sub-section" extensions of the memory map.
8448 */
8449 pr_info("Early memory node ranges\n");
8450 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
8451 pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid,
8452 (u64)start_pfn << PAGE_SHIFT,
8453 ((u64)end_pfn << PAGE_SHIFT) - 1);
8454 subsection_map_init(start_pfn, end_pfn - start_pfn);
8455 }
8456
8457 /* Initialise every node */
8458 mminit_verify_pageflags_layout();
8459 setup_nr_node_ids();
8460 for_each_online_node(nid) {
8461 pg_data_t *pgdat = NODE_DATA(nid);
8462 free_area_init_node(nid);
8463
8464 /* Any memory on that node */
8465 if (pgdat->node_present_pages)
8466 node_set_state(nid, N_MEMORY);
8467 check_for_memory(pgdat, nid);
8468 }
8469
8470 memmap_init();
8471 }
8472
cmdline_parse_core(char * p,unsigned long * core,unsigned long * percent)8473 static int __init cmdline_parse_core(char *p, unsigned long *core,
8474 unsigned long *percent)
8475 {
8476 unsigned long long coremem;
8477 char *endptr;
8478
8479 if (!p)
8480 return -EINVAL;
8481
8482 /* Value may be a percentage of total memory, otherwise bytes */
8483 coremem = simple_strtoull(p, &endptr, 0);
8484 if (*endptr == '%') {
8485 /* Paranoid check for percent values greater than 100 */
8486 WARN_ON(coremem > 100);
8487
8488 *percent = coremem;
8489 } else {
8490 coremem = memparse(p, &p);
8491 /* Paranoid check that UL is enough for the coremem value */
8492 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
8493
8494 *core = coremem >> PAGE_SHIFT;
8495 *percent = 0UL;
8496 }
8497 return 0;
8498 }
8499
8500 /*
8501 * kernelcore=size sets the amount of memory for use for allocations that
8502 * cannot be reclaimed or migrated.
8503 */
cmdline_parse_kernelcore(char * p)8504 static int __init cmdline_parse_kernelcore(char *p)
8505 {
8506 /* parse kernelcore=mirror */
8507 if (parse_option_str(p, "mirror")) {
8508 mirrored_kernelcore = true;
8509 return 0;
8510 }
8511
8512 return cmdline_parse_core(p, &required_kernelcore,
8513 &required_kernelcore_percent);
8514 }
8515
8516 /*
8517 * movablecore=size sets the amount of memory for use for allocations that
8518 * can be reclaimed or migrated.
8519 */
cmdline_parse_movablecore(char * p)8520 static int __init cmdline_parse_movablecore(char *p)
8521 {
8522 return cmdline_parse_core(p, &required_movablecore,
8523 &required_movablecore_percent);
8524 }
8525
8526 early_param("kernelcore", cmdline_parse_kernelcore);
8527 early_param("movablecore", cmdline_parse_movablecore);
8528
adjust_managed_page_count(struct page * page,long count)8529 void adjust_managed_page_count(struct page *page, long count)
8530 {
8531 atomic_long_add(count, &page_zone(page)->managed_pages);
8532 totalram_pages_add(count);
8533 #ifdef CONFIG_HIGHMEM
8534 if (PageHighMem(page))
8535 totalhigh_pages_add(count);
8536 #endif
8537 }
8538 EXPORT_SYMBOL(adjust_managed_page_count);
8539
free_reserved_area(void * start,void * end,int poison,const char * s)8540 unsigned long free_reserved_area(void *start, void *end, int poison, const char *s)
8541 {
8542 void *pos;
8543 unsigned long pages = 0;
8544
8545 start = (void *)PAGE_ALIGN((unsigned long)start);
8546 end = (void *)((unsigned long)end & PAGE_MASK);
8547 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
8548 struct page *page = virt_to_page(pos);
8549 void *direct_map_addr;
8550
8551 /*
8552 * 'direct_map_addr' might be different from 'pos'
8553 * because some architectures' virt_to_page()
8554 * work with aliases. Getting the direct map
8555 * address ensures that we get a _writeable_
8556 * alias for the memset().
8557 */
8558 direct_map_addr = page_address(page);
8559 /*
8560 * Perform a kasan-unchecked memset() since this memory
8561 * has not been initialized.
8562 */
8563 direct_map_addr = kasan_reset_tag(direct_map_addr);
8564 if ((unsigned int)poison <= 0xFF)
8565 memset(direct_map_addr, poison, PAGE_SIZE);
8566
8567 free_reserved_page(page);
8568 }
8569
8570 if (pages && s)
8571 pr_info("Freeing %s memory: %ldK\n",
8572 s, pages << (PAGE_SHIFT - 10));
8573
8574 return pages;
8575 }
8576
mem_init_print_info(void)8577 void __init mem_init_print_info(void)
8578 {
8579 unsigned long physpages, codesize, datasize, rosize, bss_size;
8580 unsigned long init_code_size, init_data_size;
8581
8582 physpages = get_num_physpages();
8583 codesize = _etext - _stext;
8584 datasize = _edata - _sdata;
8585 rosize = __end_rodata - __start_rodata;
8586 bss_size = __bss_stop - __bss_start;
8587 init_data_size = __init_end - __init_begin;
8588 init_code_size = _einittext - _sinittext;
8589
8590 /*
8591 * Detect special cases and adjust section sizes accordingly:
8592 * 1) .init.* may be embedded into .data sections
8593 * 2) .init.text.* may be out of [__init_begin, __init_end],
8594 * please refer to arch/tile/kernel/vmlinux.lds.S.
8595 * 3) .rodata.* may be embedded into .text or .data sections.
8596 */
8597 #define adj_init_size(start, end, size, pos, adj) \
8598 do { \
8599 if (&start[0] <= &pos[0] && &pos[0] < &end[0] && size > adj) \
8600 size -= adj; \
8601 } while (0)
8602
8603 adj_init_size(__init_begin, __init_end, init_data_size,
8604 _sinittext, init_code_size);
8605 adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size);
8606 adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size);
8607 adj_init_size(_stext, _etext, codesize, __start_rodata, rosize);
8608 adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize);
8609
8610 #undef adj_init_size
8611
8612 pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved"
8613 #ifdef CONFIG_HIGHMEM
8614 ", %luK highmem"
8615 #endif
8616 ")\n",
8617 nr_free_pages() << (PAGE_SHIFT - 10),
8618 physpages << (PAGE_SHIFT - 10),
8619 codesize >> 10, datasize >> 10, rosize >> 10,
8620 (init_data_size + init_code_size) >> 10, bss_size >> 10,
8621 (physpages - totalram_pages() - totalcma_pages) << (PAGE_SHIFT - 10),
8622 totalcma_pages << (PAGE_SHIFT - 10)
8623 #ifdef CONFIG_HIGHMEM
8624 , totalhigh_pages() << (PAGE_SHIFT - 10)
8625 #endif
8626 );
8627 }
8628
8629 /**
8630 * set_dma_reserve - set the specified number of pages reserved in the first zone
8631 * @new_dma_reserve: The number of pages to mark reserved
8632 *
8633 * The per-cpu batchsize and zone watermarks are determined by managed_pages.
8634 * In the DMA zone, a significant percentage may be consumed by kernel image
8635 * and other unfreeable allocations which can skew the watermarks badly. This
8636 * function may optionally be used to account for unfreeable pages in the
8637 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
8638 * smaller per-cpu batchsize.
8639 */
set_dma_reserve(unsigned long new_dma_reserve)8640 void __init set_dma_reserve(unsigned long new_dma_reserve)
8641 {
8642 dma_reserve = new_dma_reserve;
8643 }
8644
page_alloc_cpu_dead(unsigned int cpu)8645 static int page_alloc_cpu_dead(unsigned int cpu)
8646 {
8647 struct zone *zone;
8648
8649 lru_add_drain_cpu(cpu);
8650 drain_pages(cpu);
8651
8652 /*
8653 * Spill the event counters of the dead processor
8654 * into the current processors event counters.
8655 * This artificially elevates the count of the current
8656 * processor.
8657 */
8658 vm_events_fold_cpu(cpu);
8659
8660 /*
8661 * Zero the differential counters of the dead processor
8662 * so that the vm statistics are consistent.
8663 *
8664 * This is only okay since the processor is dead and cannot
8665 * race with what we are doing.
8666 */
8667 cpu_vm_stats_fold(cpu);
8668
8669 for_each_populated_zone(zone)
8670 zone_pcp_update(zone, 0);
8671
8672 return 0;
8673 }
8674
page_alloc_cpu_online(unsigned int cpu)8675 static int page_alloc_cpu_online(unsigned int cpu)
8676 {
8677 struct zone *zone;
8678
8679 for_each_populated_zone(zone)
8680 zone_pcp_update(zone, 1);
8681 return 0;
8682 }
8683
8684 #ifdef CONFIG_NUMA
8685 int hashdist = HASHDIST_DEFAULT;
8686
set_hashdist(char * str)8687 static int __init set_hashdist(char *str)
8688 {
8689 if (!str)
8690 return 0;
8691 hashdist = simple_strtoul(str, &str, 0);
8692 return 1;
8693 }
8694 __setup("hashdist=", set_hashdist);
8695 #endif
8696
page_alloc_init(void)8697 void __init page_alloc_init(void)
8698 {
8699 int ret;
8700
8701 #ifdef CONFIG_NUMA
8702 if (num_node_state(N_MEMORY) == 1)
8703 hashdist = 0;
8704 #endif
8705
8706 ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC,
8707 "mm/page_alloc:pcp",
8708 page_alloc_cpu_online,
8709 page_alloc_cpu_dead);
8710 WARN_ON(ret < 0);
8711 }
8712
8713 /*
8714 * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio
8715 * or min_free_kbytes changes.
8716 */
calculate_totalreserve_pages(void)8717 static void calculate_totalreserve_pages(void)
8718 {
8719 struct pglist_data *pgdat;
8720 unsigned long reserve_pages = 0;
8721 enum zone_type i, j;
8722
8723 for_each_online_pgdat(pgdat) {
8724
8725 pgdat->totalreserve_pages = 0;
8726
8727 for (i = 0; i < MAX_NR_ZONES; i++) {
8728 struct zone *zone = pgdat->node_zones + i;
8729 long max = 0;
8730 unsigned long managed_pages = zone_managed_pages(zone);
8731
8732 /* Find valid and maximum lowmem_reserve in the zone */
8733 for (j = i; j < MAX_NR_ZONES; j++) {
8734 if (zone->lowmem_reserve[j] > max)
8735 max = zone->lowmem_reserve[j];
8736 }
8737
8738 /* we treat the high watermark as reserved pages. */
8739 max += high_wmark_pages(zone);
8740
8741 if (max > managed_pages)
8742 max = managed_pages;
8743
8744 pgdat->totalreserve_pages += max;
8745
8746 reserve_pages += max;
8747 }
8748 }
8749 totalreserve_pages = reserve_pages;
8750 }
8751
8752 /*
8753 * setup_per_zone_lowmem_reserve - called whenever
8754 * sysctl_lowmem_reserve_ratio changes. Ensures that each zone
8755 * has a correct pages reserved value, so an adequate number of
8756 * pages are left in the zone after a successful __alloc_pages().
8757 */
setup_per_zone_lowmem_reserve(void)8758 static void setup_per_zone_lowmem_reserve(void)
8759 {
8760 struct pglist_data *pgdat;
8761 enum zone_type i, j;
8762
8763 for_each_online_pgdat(pgdat) {
8764 for (i = 0; i < MAX_NR_ZONES - 1; i++) {
8765 struct zone *zone = &pgdat->node_zones[i];
8766 int ratio = sysctl_lowmem_reserve_ratio[i];
8767 bool clear = !ratio || !zone_managed_pages(zone);
8768 unsigned long managed_pages = 0;
8769
8770 for (j = i + 1; j < MAX_NR_ZONES; j++) {
8771 struct zone *upper_zone = &pgdat->node_zones[j];
8772
8773 managed_pages += zone_managed_pages(upper_zone);
8774
8775 if (clear)
8776 zone->lowmem_reserve[j] = 0;
8777 else
8778 zone->lowmem_reserve[j] = managed_pages / ratio;
8779 }
8780 }
8781 }
8782
8783 /* update totalreserve_pages */
8784 calculate_totalreserve_pages();
8785 }
8786
__setup_per_zone_wmarks(void)8787 static void __setup_per_zone_wmarks(void)
8788 {
8789 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
8790 unsigned long lowmem_pages = 0;
8791 struct zone *zone;
8792 unsigned long flags;
8793
8794 /* Calculate total number of !ZONE_HIGHMEM pages */
8795 for_each_zone(zone) {
8796 if (!is_highmem(zone))
8797 lowmem_pages += zone_managed_pages(zone);
8798 }
8799
8800 for_each_zone(zone) {
8801 u64 tmp;
8802
8803 spin_lock_irqsave(&zone->lock, flags);
8804 tmp = (u64)pages_min * zone_managed_pages(zone);
8805 do_div(tmp, lowmem_pages);
8806 if (is_highmem(zone)) {
8807 /*
8808 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
8809 * need highmem pages, so cap pages_min to a small
8810 * value here.
8811 *
8812 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
8813 * deltas control async page reclaim, and so should
8814 * not be capped for highmem.
8815 */
8816 unsigned long min_pages;
8817
8818 min_pages = zone_managed_pages(zone) / 1024;
8819 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
8820 zone->_watermark[WMARK_MIN] = min_pages;
8821 } else {
8822 /*
8823 * If it's a lowmem zone, reserve a number of pages
8824 * proportionate to the zone's size.
8825 */
8826 zone->_watermark[WMARK_MIN] = tmp;
8827 }
8828
8829 /*
8830 * Set the kswapd watermarks distance according to the
8831 * scale factor in proportion to available memory, but
8832 * ensure a minimum size on small systems.
8833 */
8834 tmp = max_t(u64, tmp >> 2,
8835 mult_frac(zone_managed_pages(zone),
8836 watermark_scale_factor, 10000));
8837
8838 zone->watermark_boost = 0;
8839 zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp;
8840 zone->_watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2;
8841
8842 spin_unlock_irqrestore(&zone->lock, flags);
8843 }
8844
8845 /* update totalreserve_pages */
8846 calculate_totalreserve_pages();
8847 }
8848
8849 /**
8850 * setup_per_zone_wmarks - called when min_free_kbytes changes
8851 * or when memory is hot-{added|removed}
8852 *
8853 * Ensures that the watermark[min,low,high] values for each zone are set
8854 * correctly with respect to min_free_kbytes.
8855 */
setup_per_zone_wmarks(void)8856 void setup_per_zone_wmarks(void)
8857 {
8858 struct zone *zone;
8859 static DEFINE_SPINLOCK(lock);
8860
8861 spin_lock(&lock);
8862 __setup_per_zone_wmarks();
8863 spin_unlock(&lock);
8864
8865 /*
8866 * The watermark size have changed so update the pcpu batch
8867 * and high limits or the limits may be inappropriate.
8868 */
8869 for_each_zone(zone)
8870 zone_pcp_update(zone, 0);
8871 }
8872
8873 /*
8874 * Initialise min_free_kbytes.
8875 *
8876 * For small machines we want it small (128k min). For large machines
8877 * we want it large (256MB max). But it is not linear, because network
8878 * bandwidth does not increase linearly with machine size. We use
8879 *
8880 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
8881 * min_free_kbytes = sqrt(lowmem_kbytes * 16)
8882 *
8883 * which yields
8884 *
8885 * 16MB: 512k
8886 * 32MB: 724k
8887 * 64MB: 1024k
8888 * 128MB: 1448k
8889 * 256MB: 2048k
8890 * 512MB: 2896k
8891 * 1024MB: 4096k
8892 * 2048MB: 5792k
8893 * 4096MB: 8192k
8894 * 8192MB: 11584k
8895 * 16384MB: 16384k
8896 */
calculate_min_free_kbytes(void)8897 void calculate_min_free_kbytes(void)
8898 {
8899 unsigned long lowmem_kbytes;
8900 int new_min_free_kbytes;
8901
8902 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
8903 new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
8904
8905 if (new_min_free_kbytes > user_min_free_kbytes) {
8906 min_free_kbytes = new_min_free_kbytes;
8907 if (min_free_kbytes < 128)
8908 min_free_kbytes = 128;
8909 if (min_free_kbytes > 262144)
8910 min_free_kbytes = 262144;
8911 } else {
8912 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
8913 new_min_free_kbytes, user_min_free_kbytes);
8914 }
8915 }
8916
init_per_zone_wmark_min(void)8917 int __meminit init_per_zone_wmark_min(void)
8918 {
8919 calculate_min_free_kbytes();
8920 setup_per_zone_wmarks();
8921 refresh_zone_stat_thresholds();
8922 setup_per_zone_lowmem_reserve();
8923
8924 #ifdef CONFIG_NUMA
8925 setup_min_unmapped_ratio();
8926 setup_min_slab_ratio();
8927 #endif
8928
8929 khugepaged_min_free_kbytes_update();
8930
8931 return 0;
8932 }
postcore_initcall(init_per_zone_wmark_min)8933 postcore_initcall(init_per_zone_wmark_min)
8934
8935 /*
8936 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
8937 * that we can call two helper functions whenever min_free_kbytes
8938 * changes.
8939 */
8940 int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
8941 void *buffer, size_t *length, loff_t *ppos)
8942 {
8943 int rc;
8944
8945 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
8946 if (rc)
8947 return rc;
8948
8949 if (write) {
8950 user_min_free_kbytes = min_free_kbytes;
8951 setup_per_zone_wmarks();
8952 }
8953 return 0;
8954 }
8955
watermark_scale_factor_sysctl_handler(struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)8956 int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write,
8957 void *buffer, size_t *length, loff_t *ppos)
8958 {
8959 int rc;
8960
8961 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
8962 if (rc)
8963 return rc;
8964
8965 if (write)
8966 setup_per_zone_wmarks();
8967
8968 return 0;
8969 }
8970
8971 #ifdef CONFIG_NUMA
setup_min_unmapped_ratio(void)8972 static void setup_min_unmapped_ratio(void)
8973 {
8974 pg_data_t *pgdat;
8975 struct zone *zone;
8976
8977 for_each_online_pgdat(pgdat)
8978 pgdat->min_unmapped_pages = 0;
8979
8980 for_each_zone(zone)
8981 zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) *
8982 sysctl_min_unmapped_ratio) / 100;
8983 }
8984
8985
sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)8986 int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
8987 void *buffer, size_t *length, loff_t *ppos)
8988 {
8989 int rc;
8990
8991 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
8992 if (rc)
8993 return rc;
8994
8995 setup_min_unmapped_ratio();
8996
8997 return 0;
8998 }
8999
setup_min_slab_ratio(void)9000 static void setup_min_slab_ratio(void)
9001 {
9002 pg_data_t *pgdat;
9003 struct zone *zone;
9004
9005 for_each_online_pgdat(pgdat)
9006 pgdat->min_slab_pages = 0;
9007
9008 for_each_zone(zone)
9009 zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) *
9010 sysctl_min_slab_ratio) / 100;
9011 }
9012
sysctl_min_slab_ratio_sysctl_handler(struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)9013 int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
9014 void *buffer, size_t *length, loff_t *ppos)
9015 {
9016 int rc;
9017
9018 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
9019 if (rc)
9020 return rc;
9021
9022 setup_min_slab_ratio();
9023
9024 return 0;
9025 }
9026 #endif
9027
9028 /*
9029 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
9030 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
9031 * whenever sysctl_lowmem_reserve_ratio changes.
9032 *
9033 * The reserve ratio obviously has absolutely no relation with the
9034 * minimum watermarks. The lowmem reserve ratio can only make sense
9035 * if in function of the boot time zone sizes.
9036 */
lowmem_reserve_ratio_sysctl_handler(struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)9037 int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write,
9038 void *buffer, size_t *length, loff_t *ppos)
9039 {
9040 int i;
9041
9042 proc_dointvec_minmax(table, write, buffer, length, ppos);
9043
9044 for (i = 0; i < MAX_NR_ZONES; i++) {
9045 if (sysctl_lowmem_reserve_ratio[i] < 1)
9046 sysctl_lowmem_reserve_ratio[i] = 0;
9047 }
9048
9049 setup_per_zone_lowmem_reserve();
9050 return 0;
9051 }
9052
9053 /*
9054 * percpu_pagelist_high_fraction - changes the pcp->high for each zone on each
9055 * cpu. It is the fraction of total pages in each zone that a hot per cpu
9056 * pagelist can have before it gets flushed back to buddy allocator.
9057 */
percpu_pagelist_high_fraction_sysctl_handler(struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)9058 int percpu_pagelist_high_fraction_sysctl_handler(struct ctl_table *table,
9059 int write, void *buffer, size_t *length, loff_t *ppos)
9060 {
9061 struct zone *zone;
9062 int old_percpu_pagelist_high_fraction;
9063 int ret;
9064
9065 mutex_lock(&pcp_batch_high_lock);
9066 old_percpu_pagelist_high_fraction = percpu_pagelist_high_fraction;
9067
9068 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
9069 if (!write || ret < 0)
9070 goto out;
9071
9072 /* Sanity checking to avoid pcp imbalance */
9073 if (percpu_pagelist_high_fraction &&
9074 percpu_pagelist_high_fraction < MIN_PERCPU_PAGELIST_HIGH_FRACTION) {
9075 percpu_pagelist_high_fraction = old_percpu_pagelist_high_fraction;
9076 ret = -EINVAL;
9077 goto out;
9078 }
9079
9080 /* No change? */
9081 if (percpu_pagelist_high_fraction == old_percpu_pagelist_high_fraction)
9082 goto out;
9083
9084 for_each_populated_zone(zone)
9085 zone_set_pageset_high_and_batch(zone, 0);
9086 out:
9087 mutex_unlock(&pcp_batch_high_lock);
9088 return ret;
9089 }
9090
9091 #ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES
9092 /*
9093 * Returns the number of pages that arch has reserved but
9094 * is not known to alloc_large_system_hash().
9095 */
arch_reserved_kernel_pages(void)9096 static unsigned long __init arch_reserved_kernel_pages(void)
9097 {
9098 return 0;
9099 }
9100 #endif
9101
9102 /*
9103 * Adaptive scale is meant to reduce sizes of hash tables on large memory
9104 * machines. As memory size is increased the scale is also increased but at
9105 * slower pace. Starting from ADAPT_SCALE_BASE (64G), every time memory
9106 * quadruples the scale is increased by one, which means the size of hash table
9107 * only doubles, instead of quadrupling as well.
9108 * Because 32-bit systems cannot have large physical memory, where this scaling
9109 * makes sense, it is disabled on such platforms.
9110 */
9111 #if __BITS_PER_LONG > 32
9112 #define ADAPT_SCALE_BASE (64ul << 30)
9113 #define ADAPT_SCALE_SHIFT 2
9114 #define ADAPT_SCALE_NPAGES (ADAPT_SCALE_BASE >> PAGE_SHIFT)
9115 #endif
9116
9117 /*
9118 * allocate a large system hash table from bootmem
9119 * - it is assumed that the hash table must contain an exact power-of-2
9120 * quantity of entries
9121 * - limit is the number of hash buckets, not the total allocation size
9122 */
alloc_large_system_hash(const char * tablename,unsigned long bucketsize,unsigned long numentries,int scale,int flags,unsigned int * _hash_shift,unsigned int * _hash_mask,unsigned long low_limit,unsigned long high_limit)9123 void *__init alloc_large_system_hash(const char *tablename,
9124 unsigned long bucketsize,
9125 unsigned long numentries,
9126 int scale,
9127 int flags,
9128 unsigned int *_hash_shift,
9129 unsigned int *_hash_mask,
9130 unsigned long low_limit,
9131 unsigned long high_limit)
9132 {
9133 unsigned long long max = high_limit;
9134 unsigned long log2qty, size;
9135 void *table = NULL;
9136 gfp_t gfp_flags;
9137 bool virt;
9138 bool huge;
9139
9140 /* allow the kernel cmdline to have a say */
9141 if (!numentries) {
9142 /* round applicable memory size up to nearest megabyte */
9143 numentries = nr_kernel_pages;
9144 numentries -= arch_reserved_kernel_pages();
9145
9146 /* It isn't necessary when PAGE_SIZE >= 1MB */
9147 if (PAGE_SHIFT < 20)
9148 numentries = round_up(numentries, (1<<20)/PAGE_SIZE);
9149
9150 #if __BITS_PER_LONG > 32
9151 if (!high_limit) {
9152 unsigned long adapt;
9153
9154 for (adapt = ADAPT_SCALE_NPAGES; adapt < numentries;
9155 adapt <<= ADAPT_SCALE_SHIFT)
9156 scale++;
9157 }
9158 #endif
9159
9160 /* limit to 1 bucket per 2^scale bytes of low memory */
9161 if (scale > PAGE_SHIFT)
9162 numentries >>= (scale - PAGE_SHIFT);
9163 else
9164 numentries <<= (PAGE_SHIFT - scale);
9165
9166 /* Make sure we've got at least a 0-order allocation.. */
9167 if (unlikely(flags & HASH_SMALL)) {
9168 /* Makes no sense without HASH_EARLY */
9169 WARN_ON(!(flags & HASH_EARLY));
9170 if (!(numentries >> *_hash_shift)) {
9171 numentries = 1UL << *_hash_shift;
9172 BUG_ON(!numentries);
9173 }
9174 } else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
9175 numentries = PAGE_SIZE / bucketsize;
9176 }
9177 numentries = roundup_pow_of_two(numentries);
9178
9179 /* limit allocation size to 1/16 total memory by default */
9180 if (max == 0) {
9181 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
9182 do_div(max, bucketsize);
9183 }
9184 max = min(max, 0x80000000ULL);
9185
9186 if (numentries < low_limit)
9187 numentries = low_limit;
9188 if (numentries > max)
9189 numentries = max;
9190
9191 log2qty = ilog2(numentries);
9192
9193 gfp_flags = (flags & HASH_ZERO) ? GFP_ATOMIC | __GFP_ZERO : GFP_ATOMIC;
9194 do {
9195 virt = false;
9196 size = bucketsize << log2qty;
9197 if (flags & HASH_EARLY) {
9198 if (flags & HASH_ZERO)
9199 table = memblock_alloc(size, SMP_CACHE_BYTES);
9200 else
9201 table = memblock_alloc_raw(size,
9202 SMP_CACHE_BYTES);
9203 } else if (get_order(size) >= MAX_ORDER || hashdist) {
9204 table = __vmalloc(size, gfp_flags);
9205 virt = true;
9206 huge = is_vm_area_hugepages(table);
9207 } else {
9208 /*
9209 * If bucketsize is not a power-of-two, we may free
9210 * some pages at the end of hash table which
9211 * alloc_pages_exact() automatically does
9212 */
9213 table = alloc_pages_exact(size, gfp_flags);
9214 kmemleak_alloc(table, size, 1, gfp_flags);
9215 }
9216 } while (!table && size > PAGE_SIZE && --log2qty);
9217
9218 if (!table)
9219 panic("Failed to allocate %s hash table\n", tablename);
9220
9221 pr_info("%s hash table entries: %ld (order: %d, %lu bytes, %s)\n",
9222 tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size,
9223 virt ? (huge ? "vmalloc hugepage" : "vmalloc") : "linear");
9224
9225 if (_hash_shift)
9226 *_hash_shift = log2qty;
9227 if (_hash_mask)
9228 *_hash_mask = (1 << log2qty) - 1;
9229
9230 return table;
9231 }
9232
9233 /*
9234 * This function checks whether pageblock includes unmovable pages or not.
9235 *
9236 * PageLRU check without isolation or lru_lock could race so that
9237 * MIGRATE_MOVABLE block might include unmovable pages. And __PageMovable
9238 * check without lock_page also may miss some movable non-lru pages at
9239 * race condition. So you can't expect this function should be exact.
9240 *
9241 * Returns a page without holding a reference. If the caller wants to
9242 * dereference that page (e.g., dumping), it has to make sure that it
9243 * cannot get removed (e.g., via memory unplug) concurrently.
9244 *
9245 */
has_unmovable_pages(struct zone * zone,struct page * page,int migratetype,int flags)9246 struct page *has_unmovable_pages(struct zone *zone, struct page *page,
9247 int migratetype, int flags)
9248 {
9249 unsigned long iter = 0;
9250 unsigned long pfn = page_to_pfn(page);
9251 unsigned long offset = pfn % pageblock_nr_pages;
9252
9253 if (is_migrate_cma_page(page)) {
9254 /*
9255 * CMA allocations (alloc_contig_range) really need to mark
9256 * isolate CMA pageblocks even when they are not movable in fact
9257 * so consider them movable here.
9258 */
9259 if (is_migrate_cma(migratetype))
9260 return NULL;
9261
9262 return page;
9263 }
9264
9265 for (; iter < pageblock_nr_pages - offset; iter++) {
9266 page = pfn_to_page(pfn + iter);
9267
9268 /*
9269 * Both, bootmem allocations and memory holes are marked
9270 * PG_reserved and are unmovable. We can even have unmovable
9271 * allocations inside ZONE_MOVABLE, for example when
9272 * specifying "movablecore".
9273 */
9274 if (PageReserved(page))
9275 return page;
9276
9277 /*
9278 * If the zone is movable and we have ruled out all reserved
9279 * pages then it should be reasonably safe to assume the rest
9280 * is movable.
9281 */
9282 if (zone_idx(zone) == ZONE_MOVABLE)
9283 continue;
9284
9285 /*
9286 * Hugepages are not in LRU lists, but they're movable.
9287 * THPs are on the LRU, but need to be counted as #small pages.
9288 * We need not scan over tail pages because we don't
9289 * handle each tail page individually in migration.
9290 */
9291 if (PageHuge(page) || PageTransCompound(page)) {
9292 struct page *head = compound_head(page);
9293 unsigned int skip_pages;
9294
9295 if (PageHuge(page)) {
9296 if (!hugepage_migration_supported(page_hstate(head)))
9297 return page;
9298 } else if (!PageLRU(head) && !__PageMovable(head)) {
9299 return page;
9300 }
9301
9302 skip_pages = compound_nr(head) - (page - head);
9303 iter += skip_pages - 1;
9304 continue;
9305 }
9306
9307 /*
9308 * We can't use page_count without pin a page
9309 * because another CPU can free compound page.
9310 * This check already skips compound tails of THP
9311 * because their page->_refcount is zero at all time.
9312 */
9313 if (!page_ref_count(page)) {
9314 if (PageBuddy(page))
9315 iter += (1 << buddy_order(page)) - 1;
9316 continue;
9317 }
9318
9319 /*
9320 * The HWPoisoned page may be not in buddy system, and
9321 * page_count() is not 0.
9322 */
9323 if ((flags & MEMORY_OFFLINE) && PageHWPoison(page))
9324 continue;
9325
9326 /*
9327 * We treat all PageOffline() pages as movable when offlining
9328 * to give drivers a chance to decrement their reference count
9329 * in MEM_GOING_OFFLINE in order to indicate that these pages
9330 * can be offlined as there are no direct references anymore.
9331 * For actually unmovable PageOffline() where the driver does
9332 * not support this, we will fail later when trying to actually
9333 * move these pages that still have a reference count > 0.
9334 * (false negatives in this function only)
9335 */
9336 if ((flags & MEMORY_OFFLINE) && PageOffline(page))
9337 continue;
9338
9339 if (__PageMovable(page) || PageLRU(page))
9340 continue;
9341
9342 /*
9343 * If there are RECLAIMABLE pages, we need to check
9344 * it. But now, memory offline itself doesn't call
9345 * shrink_node_slabs() and it still to be fixed.
9346 */
9347 return page;
9348 }
9349 return NULL;
9350 }
9351
9352 #ifdef CONFIG_CONTIG_ALLOC
pfn_max_align_down(unsigned long pfn)9353 static unsigned long pfn_max_align_down(unsigned long pfn)
9354 {
9355 return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
9356 pageblock_nr_pages) - 1);
9357 }
9358
pfn_max_align_up(unsigned long pfn)9359 static unsigned long pfn_max_align_up(unsigned long pfn)
9360 {
9361 return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
9362 pageblock_nr_pages));
9363 }
9364
9365 #if defined(CONFIG_DYNAMIC_DEBUG) || \
9366 (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
9367 /* Usage: See admin-guide/dynamic-debug-howto.rst */
alloc_contig_dump_pages(struct list_head * page_list)9368 static void alloc_contig_dump_pages(struct list_head *page_list)
9369 {
9370 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, "migrate failure");
9371
9372 if (DYNAMIC_DEBUG_BRANCH(descriptor)) {
9373 struct page *page;
9374
9375 dump_stack();
9376 list_for_each_entry(page, page_list, lru)
9377 dump_page(page, "migration failure");
9378 }
9379 }
9380 #else
alloc_contig_dump_pages(struct list_head * page_list)9381 static inline void alloc_contig_dump_pages(struct list_head *page_list)
9382 {
9383 }
9384 #endif
9385
9386 /* [start, end) must belong to a single zone. */
__alloc_contig_migrate_range(struct compact_control * cc,unsigned long start,unsigned long end)9387 static int __alloc_contig_migrate_range(struct compact_control *cc,
9388 unsigned long start, unsigned long end)
9389 {
9390 /* This function is based on compact_zone() from compaction.c. */
9391 unsigned int nr_reclaimed;
9392 unsigned long pfn = start;
9393 unsigned int tries = 0;
9394 int ret = 0;
9395 struct migration_target_control mtc = {
9396 .nid = zone_to_nid(cc->zone),
9397 .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
9398 };
9399
9400 lru_cache_disable();
9401
9402 while (pfn < end || !list_empty(&cc->migratepages)) {
9403 if (fatal_signal_pending(current)) {
9404 ret = -EINTR;
9405 break;
9406 }
9407
9408 if (list_empty(&cc->migratepages)) {
9409 cc->nr_migratepages = 0;
9410 ret = isolate_migratepages_range(cc, pfn, end);
9411 if (ret && ret != -EAGAIN)
9412 break;
9413 pfn = cc->migrate_pfn;
9414 tries = 0;
9415 } else if (++tries == 5) {
9416 ret = -EBUSY;
9417 break;
9418 }
9419
9420 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
9421 &cc->migratepages);
9422 cc->nr_migratepages -= nr_reclaimed;
9423
9424 ret = migrate_pages(&cc->migratepages, alloc_migration_target,
9425 NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE, NULL);
9426
9427 /*
9428 * On -ENOMEM, migrate_pages() bails out right away. It is pointless
9429 * to retry again over this error, so do the same here.
9430 */
9431 if (ret == -ENOMEM)
9432 break;
9433 }
9434
9435 lru_cache_enable();
9436 if (ret < 0) {
9437 if (ret == -EBUSY) {
9438 struct page *page;
9439
9440 alloc_contig_dump_pages(&cc->migratepages);
9441 list_for_each_entry(page, &cc->migratepages, lru) {
9442 /* The page will be freed by putback_movable_pages soon */
9443 if (page_count(page) == 1)
9444 continue;
9445 page_pinner_failure_detect(page);
9446 }
9447 }
9448 putback_movable_pages(&cc->migratepages);
9449 return ret;
9450 }
9451 return 0;
9452 }
9453
9454 /**
9455 * alloc_contig_range() -- tries to allocate given range of pages
9456 * @start: start PFN to allocate
9457 * @end: one-past-the-last PFN to allocate
9458 * @migratetype: migratetype of the underlying pageblocks (either
9459 * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks
9460 * in range must have the same migratetype and it must
9461 * be either of the two.
9462 * @gfp_mask: GFP mask to use during compaction
9463 *
9464 * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
9465 * aligned. The PFN range must belong to a single zone.
9466 *
9467 * The first thing this routine does is attempt to MIGRATE_ISOLATE all
9468 * pageblocks in the range. Once isolated, the pageblocks should not
9469 * be modified by others.
9470 *
9471 * Return: zero on success or negative error code. On success all
9472 * pages which PFN is in [start, end) are allocated for the caller and
9473 * need to be freed with free_contig_range().
9474 */
alloc_contig_range(unsigned long start,unsigned long end,unsigned migratetype,gfp_t gfp_mask)9475 int alloc_contig_range(unsigned long start, unsigned long end,
9476 unsigned migratetype, gfp_t gfp_mask)
9477 {
9478 unsigned long outer_start, outer_end;
9479 unsigned int order;
9480 int ret = 0;
9481 bool skip_drain_all_pages = false;
9482
9483 struct compact_control cc = {
9484 .nr_migratepages = 0,
9485 .order = -1,
9486 .zone = page_zone(pfn_to_page(start)),
9487 .mode = MIGRATE_SYNC,
9488 .ignore_skip_hint = true,
9489 .no_set_skip_hint = true,
9490 .gfp_mask = current_gfp_context(gfp_mask),
9491 .alloc_contig = true,
9492 };
9493 INIT_LIST_HEAD(&cc.migratepages);
9494
9495 /*
9496 * What we do here is we mark all pageblocks in range as
9497 * MIGRATE_ISOLATE. Because pageblock and max order pages may
9498 * have different sizes, and due to the way page allocator
9499 * work, we align the range to biggest of the two pages so
9500 * that page allocator won't try to merge buddies from
9501 * different pageblocks and change MIGRATE_ISOLATE to some
9502 * other migration type.
9503 *
9504 * Once the pageblocks are marked as MIGRATE_ISOLATE, we
9505 * migrate the pages from an unaligned range (ie. pages that
9506 * we are interested in). This will put all the pages in
9507 * range back to page allocator as MIGRATE_ISOLATE.
9508 *
9509 * When this is done, we take the pages in range from page
9510 * allocator removing them from the buddy system. This way
9511 * page allocator will never consider using them.
9512 *
9513 * This lets us mark the pageblocks back as
9514 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
9515 * aligned range but not in the unaligned, original range are
9516 * put back to page allocator so that buddy can use them.
9517 */
9518
9519 ret = start_isolate_page_range(pfn_max_align_down(start),
9520 pfn_max_align_up(end), migratetype, 0);
9521 if (ret)
9522 return ret;
9523
9524 trace_android_vh_cma_drain_all_pages_bypass(migratetype,
9525 &skip_drain_all_pages);
9526 if (!skip_drain_all_pages)
9527 drain_all_pages(cc.zone);
9528
9529 /*
9530 * In case of -EBUSY, we'd like to know which page causes problem.
9531 * So, just fall through. test_pages_isolated() has a tracepoint
9532 * which will report the busy page.
9533 *
9534 * It is possible that busy pages could become available before
9535 * the call to test_pages_isolated, and the range will actually be
9536 * allocated. So, if we fall through be sure to clear ret so that
9537 * -EBUSY is not accidentally used or returned to caller.
9538 */
9539 ret = __alloc_contig_migrate_range(&cc, start, end);
9540 if (ret && ret != -EBUSY)
9541 goto done;
9542 ret = 0;
9543
9544 /*
9545 * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
9546 * aligned blocks that are marked as MIGRATE_ISOLATE. What's
9547 * more, all pages in [start, end) are free in page allocator.
9548 * What we are going to do is to allocate all pages from
9549 * [start, end) (that is remove them from page allocator).
9550 *
9551 * The only problem is that pages at the beginning and at the
9552 * end of interesting range may be not aligned with pages that
9553 * page allocator holds, ie. they can be part of higher order
9554 * pages. Because of this, we reserve the bigger range and
9555 * once this is done free the pages we are not interested in.
9556 *
9557 * We don't have to hold zone->lock here because the pages are
9558 * isolated thus they won't get removed from buddy.
9559 */
9560
9561 order = 0;
9562 outer_start = start;
9563 while (!PageBuddy(pfn_to_page(outer_start))) {
9564 if (++order >= MAX_ORDER) {
9565 outer_start = start;
9566 break;
9567 }
9568 outer_start &= ~0UL << order;
9569 }
9570
9571 if (outer_start != start) {
9572 order = buddy_order(pfn_to_page(outer_start));
9573
9574 /*
9575 * outer_start page could be small order buddy page and
9576 * it doesn't include start page. Adjust outer_start
9577 * in this case to report failed page properly
9578 * on tracepoint in test_pages_isolated()
9579 */
9580 if (outer_start + (1UL << order) <= start)
9581 outer_start = start;
9582 }
9583
9584 /* Make sure the range is really isolated. */
9585 if (test_pages_isolated(outer_start, end, 0)) {
9586 ret = -EBUSY;
9587 goto done;
9588 }
9589
9590 /* Grab isolated pages from freelists. */
9591 outer_end = isolate_freepages_range(&cc, outer_start, end);
9592 if (!outer_end) {
9593 ret = -EBUSY;
9594 goto done;
9595 }
9596
9597 /* Free head and tail (if any) */
9598 if (start != outer_start)
9599 free_contig_range(outer_start, start - outer_start);
9600 if (end != outer_end)
9601 free_contig_range(end, outer_end - end);
9602
9603 done:
9604 undo_isolate_page_range(pfn_max_align_down(start),
9605 pfn_max_align_up(end), migratetype);
9606 return ret;
9607 }
9608 EXPORT_SYMBOL(alloc_contig_range);
9609
__alloc_contig_pages(unsigned long start_pfn,unsigned long nr_pages,gfp_t gfp_mask)9610 static int __alloc_contig_pages(unsigned long start_pfn,
9611 unsigned long nr_pages, gfp_t gfp_mask)
9612 {
9613 unsigned long end_pfn = start_pfn + nr_pages;
9614
9615 return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE,
9616 gfp_mask);
9617 }
9618
pfn_range_valid_contig(struct zone * z,unsigned long start_pfn,unsigned long nr_pages)9619 static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn,
9620 unsigned long nr_pages)
9621 {
9622 unsigned long i, end_pfn = start_pfn + nr_pages;
9623 struct page *page;
9624
9625 for (i = start_pfn; i < end_pfn; i++) {
9626 page = pfn_to_online_page(i);
9627 if (!page)
9628 return false;
9629
9630 if (page_zone(page) != z)
9631 return false;
9632
9633 if (PageReserved(page))
9634 return false;
9635
9636 if (PageHuge(page))
9637 return false;
9638 }
9639 return true;
9640 }
9641
zone_spans_last_pfn(const struct zone * zone,unsigned long start_pfn,unsigned long nr_pages)9642 static bool zone_spans_last_pfn(const struct zone *zone,
9643 unsigned long start_pfn, unsigned long nr_pages)
9644 {
9645 unsigned long last_pfn = start_pfn + nr_pages - 1;
9646
9647 return zone_spans_pfn(zone, last_pfn);
9648 }
9649
9650 /**
9651 * alloc_contig_pages() -- tries to find and allocate contiguous range of pages
9652 * @nr_pages: Number of contiguous pages to allocate
9653 * @gfp_mask: GFP mask to limit search and used during compaction
9654 * @nid: Target node
9655 * @nodemask: Mask for other possible nodes
9656 *
9657 * This routine is a wrapper around alloc_contig_range(). It scans over zones
9658 * on an applicable zonelist to find a contiguous pfn range which can then be
9659 * tried for allocation with alloc_contig_range(). This routine is intended
9660 * for allocation requests which can not be fulfilled with the buddy allocator.
9661 *
9662 * The allocated memory is always aligned to a page boundary. If nr_pages is a
9663 * power of two then the alignment is guaranteed to be to the given nr_pages
9664 * (e.g. 1GB request would be aligned to 1GB).
9665 *
9666 * Allocated pages can be freed with free_contig_range() or by manually calling
9667 * __free_page() on each allocated page.
9668 *
9669 * Return: pointer to contiguous pages on success, or NULL if not successful.
9670 */
alloc_contig_pages(unsigned long nr_pages,gfp_t gfp_mask,int nid,nodemask_t * nodemask)9671 struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask,
9672 int nid, nodemask_t *nodemask)
9673 {
9674 unsigned long ret, pfn, flags;
9675 struct zonelist *zonelist;
9676 struct zone *zone;
9677 struct zoneref *z;
9678
9679 zonelist = node_zonelist(nid, gfp_mask);
9680 for_each_zone_zonelist_nodemask(zone, z, zonelist,
9681 gfp_zone(gfp_mask), nodemask) {
9682 spin_lock_irqsave(&zone->lock, flags);
9683
9684 pfn = ALIGN(zone->zone_start_pfn, nr_pages);
9685 while (zone_spans_last_pfn(zone, pfn, nr_pages)) {
9686 if (pfn_range_valid_contig(zone, pfn, nr_pages)) {
9687 /*
9688 * We release the zone lock here because
9689 * alloc_contig_range() will also lock the zone
9690 * at some point. If there's an allocation
9691 * spinning on this lock, it may win the race
9692 * and cause alloc_contig_range() to fail...
9693 */
9694 spin_unlock_irqrestore(&zone->lock, flags);
9695 ret = __alloc_contig_pages(pfn, nr_pages,
9696 gfp_mask);
9697 if (!ret)
9698 return pfn_to_page(pfn);
9699 spin_lock_irqsave(&zone->lock, flags);
9700 }
9701 pfn += nr_pages;
9702 }
9703 spin_unlock_irqrestore(&zone->lock, flags);
9704 }
9705 return NULL;
9706 }
9707 #endif /* CONFIG_CONTIG_ALLOC */
9708
free_contig_range(unsigned long pfn,unsigned long nr_pages)9709 void free_contig_range(unsigned long pfn, unsigned long nr_pages)
9710 {
9711 unsigned long count = 0;
9712
9713 for (; nr_pages--; pfn++) {
9714 struct page *page = pfn_to_page(pfn);
9715
9716 count += page_count(page) != 1;
9717 __free_page(page);
9718 }
9719 WARN(count != 0, "%lu pages are still in use!\n", count);
9720 }
9721 EXPORT_SYMBOL(free_contig_range);
9722
9723 /*
9724 * The zone indicated has a new number of managed_pages; batch sizes and percpu
9725 * page high values need to be recalculated.
9726 */
zone_pcp_update(struct zone * zone,int cpu_online)9727 void zone_pcp_update(struct zone *zone, int cpu_online)
9728 {
9729 mutex_lock(&pcp_batch_high_lock);
9730 zone_set_pageset_high_and_batch(zone, cpu_online);
9731 mutex_unlock(&pcp_batch_high_lock);
9732 }
9733
9734 /*
9735 * Effectively disable pcplists for the zone by setting the high limit to 0
9736 * and draining all cpus. A concurrent page freeing on another CPU that's about
9737 * to put the page on pcplist will either finish before the drain and the page
9738 * will be drained, or observe the new high limit and skip the pcplist.
9739 *
9740 * Must be paired with a call to zone_pcp_enable().
9741 */
zone_pcp_disable(struct zone * zone)9742 void zone_pcp_disable(struct zone *zone)
9743 {
9744 mutex_lock(&pcp_batch_high_lock);
9745 __zone_set_pageset_high_and_batch(zone, 0, 1);
9746 __drain_all_pages(zone, true);
9747 }
9748
zone_pcp_enable(struct zone * zone)9749 void zone_pcp_enable(struct zone *zone)
9750 {
9751 __zone_set_pageset_high_and_batch(zone, zone->pageset_high, zone->pageset_batch);
9752 mutex_unlock(&pcp_batch_high_lock);
9753 }
9754
zone_pcp_reset(struct zone * zone)9755 void zone_pcp_reset(struct zone *zone)
9756 {
9757 int cpu;
9758 struct per_cpu_zonestat *pzstats;
9759
9760 if (zone_per_cpu_pageset(zone) != &boot_pageset) {
9761 for_each_online_cpu(cpu) {
9762 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
9763 drain_zonestat(zone, pzstats);
9764 }
9765 free_percpu(zone->per_cpu_pageset);
9766 free_percpu(zone->per_cpu_zonestats);
9767 zone->per_cpu_pageset = (struct per_cpu_pages __percpu *)&boot_pageset;
9768 zone->per_cpu_zonestats = &boot_zonestats;
9769 }
9770 }
9771
9772 #ifdef CONFIG_MEMORY_HOTREMOVE
9773 /*
9774 * All pages in the range must be in a single zone, must not contain holes,
9775 * must span full sections, and must be isolated before calling this function.
9776 */
__offline_isolated_pages(unsigned long start_pfn,unsigned long end_pfn)9777 void __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
9778 {
9779 unsigned long pfn = start_pfn;
9780 struct page *page;
9781 struct zone *zone;
9782 unsigned int order;
9783 unsigned long flags;
9784
9785 offline_mem_sections(pfn, end_pfn);
9786 zone = page_zone(pfn_to_page(pfn));
9787 spin_lock_irqsave(&zone->lock, flags);
9788 while (pfn < end_pfn) {
9789 page = pfn_to_page(pfn);
9790 /*
9791 * The HWPoisoned page may be not in buddy system, and
9792 * page_count() is not 0.
9793 */
9794 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
9795 pfn++;
9796 continue;
9797 }
9798 /*
9799 * At this point all remaining PageOffline() pages have a
9800 * reference count of 0 and can simply be skipped.
9801 */
9802 if (PageOffline(page)) {
9803 BUG_ON(page_count(page));
9804 BUG_ON(PageBuddy(page));
9805 pfn++;
9806 continue;
9807 }
9808
9809 BUG_ON(page_count(page));
9810 BUG_ON(!PageBuddy(page));
9811 order = buddy_order(page);
9812 del_page_from_free_list(page, zone, order);
9813 pfn += (1 << order);
9814 }
9815 spin_unlock_irqrestore(&zone->lock, flags);
9816 }
9817 #endif
9818
is_free_buddy_page(struct page * page)9819 bool is_free_buddy_page(struct page *page)
9820 {
9821 struct zone *zone = page_zone(page);
9822 unsigned long pfn = page_to_pfn(page);
9823 unsigned long flags;
9824 unsigned int order;
9825
9826 spin_lock_irqsave(&zone->lock, flags);
9827 for (order = 0; order < MAX_ORDER; order++) {
9828 struct page *page_head = page - (pfn & ((1 << order) - 1));
9829
9830 if (PageBuddy(page_head) && buddy_order(page_head) >= order)
9831 break;
9832 }
9833 spin_unlock_irqrestore(&zone->lock, flags);
9834
9835 return order < MAX_ORDER;
9836 }
9837
9838 #ifdef CONFIG_MEMORY_FAILURE
9839 /*
9840 * Break down a higher-order page in sub-pages, and keep our target out of
9841 * buddy allocator.
9842 */
break_down_buddy_pages(struct zone * zone,struct page * page,struct page * target,int low,int high,int migratetype)9843 static void break_down_buddy_pages(struct zone *zone, struct page *page,
9844 struct page *target, int low, int high,
9845 int migratetype)
9846 {
9847 unsigned long size = 1 << high;
9848 struct page *current_buddy, *next_page;
9849
9850 while (high > low) {
9851 high--;
9852 size >>= 1;
9853
9854 if (target >= &page[size]) {
9855 next_page = page + size;
9856 current_buddy = page;
9857 } else {
9858 next_page = page;
9859 current_buddy = page + size;
9860 }
9861 page = next_page;
9862
9863 if (set_page_guard(zone, current_buddy, high, migratetype))
9864 continue;
9865
9866 if (current_buddy != target) {
9867 add_to_free_list(current_buddy, zone, high, migratetype);
9868 set_buddy_order(current_buddy, high);
9869 }
9870 }
9871 }
9872
9873 /*
9874 * Take a page that will be marked as poisoned off the buddy allocator.
9875 */
take_page_off_buddy(struct page * page)9876 bool take_page_off_buddy(struct page *page)
9877 {
9878 struct zone *zone = page_zone(page);
9879 unsigned long pfn = page_to_pfn(page);
9880 unsigned long flags;
9881 unsigned int order;
9882 bool ret = false;
9883
9884 spin_lock_irqsave(&zone->lock, flags);
9885 for (order = 0; order < MAX_ORDER; order++) {
9886 struct page *page_head = page - (pfn & ((1 << order) - 1));
9887 int page_order = buddy_order(page_head);
9888
9889 if (PageBuddy(page_head) && page_order >= order) {
9890 unsigned long pfn_head = page_to_pfn(page_head);
9891 int migratetype = get_pfnblock_migratetype(page_head,
9892 pfn_head);
9893
9894 del_page_from_free_list(page_head, zone, page_order);
9895 break_down_buddy_pages(zone, page_head, page, 0,
9896 page_order, migratetype);
9897 if (!is_migrate_isolate(migratetype))
9898 __mod_zone_freepage_state(zone, -1, migratetype);
9899 ret = true;
9900 break;
9901 }
9902 if (page_count(page_head) > 0)
9903 break;
9904 }
9905 spin_unlock_irqrestore(&zone->lock, flags);
9906 return ret;
9907 }
9908 #endif
9909
9910 #ifdef CONFIG_ZONE_DMA
has_managed_dma(void)9911 bool has_managed_dma(void)
9912 {
9913 struct pglist_data *pgdat;
9914
9915 for_each_online_pgdat(pgdat) {
9916 struct zone *zone = &pgdat->node_zones[ZONE_DMA];
9917
9918 if (managed_zone(zone))
9919 return true;
9920 }
9921 return false;
9922 }
9923 #endif /* CONFIG_ZONE_DMA */
9924