1 /*
2 * linux/mm/page_alloc.c
3 *
4 * Manages the free list, the system allocates free pages here.
5 * Note that kmalloc() lives in slab.c
6 *
7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
8 * Swap reorganised 29.12.95, Stephen Tweedie
9 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
10 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
11 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
12 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
13 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
14 * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
15 */
16
17 #include <linux/stddef.h>
18 #include <linux/mm.h>
19 #include <linux/swap.h>
20 #include <linux/interrupt.h>
21 #include <linux/pagemap.h>
22 #include <linux/jiffies.h>
23 #include <linux/bootmem.h>
24 #include <linux/memblock.h>
25 #include <linux/compiler.h>
26 #include <linux/kernel.h>
27 #include <linux/kmemcheck.h>
28 #include <linux/module.h>
29 #include <linux/suspend.h>
30 #include <linux/pagevec.h>
31 #include <linux/blkdev.h>
32 #include <linux/slab.h>
33 #include <linux/ratelimit.h>
34 #include <linux/oom.h>
35 #include <linux/notifier.h>
36 #include <linux/topology.h>
37 #include <linux/sysctl.h>
38 #include <linux/cpu.h>
39 #include <linux/cpuset.h>
40 #include <linux/memory_hotplug.h>
41 #include <linux/nodemask.h>
42 #include <linux/vmalloc.h>
43 #include <linux/vmstat.h>
44 #include <linux/mempolicy.h>
45 #include <linux/stop_machine.h>
46 #include <linux/sort.h>
47 #include <linux/pfn.h>
48 #include <linux/backing-dev.h>
49 #include <linux/fault-inject.h>
50 #include <linux/page-isolation.h>
51 #include <linux/page_cgroup.h>
52 #include <linux/debugobjects.h>
53 #include <linux/kmemleak.h>
54 #include <linux/compaction.h>
55 #include <trace/events/kmem.h>
56 #include <linux/prefetch.h>
57 #include <linux/mm_inline.h>
58 #include <linux/migrate.h>
59 #include <linux/page-debug-flags.h>
60 #include <linux/hugetlb.h>
61 #include <linux/sched/rt.h>
62
63 #include <asm/sections.h>
64 #include <asm/tlbflush.h>
65 #include <asm/div64.h>
66 #include "internal.h"
67
68 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
69 static DEFINE_MUTEX(pcp_batch_high_lock);
70 #define MIN_PERCPU_PAGELIST_FRACTION (8)
71
72 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
73 DEFINE_PER_CPU(int, numa_node);
74 EXPORT_PER_CPU_SYMBOL(numa_node);
75 #endif
76
77 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
78 /*
79 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
80 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
81 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
82 * defined in <linux/topology.h>.
83 */
84 DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */
85 EXPORT_PER_CPU_SYMBOL(_numa_mem_);
86 int _node_numa_mem_[MAX_NUMNODES];
87 #endif
88
89 /*
90 * Array of node states.
91 */
92 nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
93 [N_POSSIBLE] = NODE_MASK_ALL,
94 [N_ONLINE] = { { [0] = 1UL } },
95 #ifndef CONFIG_NUMA
96 [N_NORMAL_MEMORY] = { { [0] = 1UL } },
97 #ifdef CONFIG_HIGHMEM
98 [N_HIGH_MEMORY] = { { [0] = 1UL } },
99 #endif
100 #ifdef CONFIG_MOVABLE_NODE
101 [N_MEMORY] = { { [0] = 1UL } },
102 #endif
103 [N_CPU] = { { [0] = 1UL } },
104 #endif /* NUMA */
105 };
106 EXPORT_SYMBOL(node_states);
107
108 /* Protect totalram_pages and zone->managed_pages */
109 static DEFINE_SPINLOCK(managed_page_count_lock);
110
111 unsigned long totalram_pages __read_mostly;
112 unsigned long totalreserve_pages __read_mostly;
113 unsigned long totalcma_pages __read_mostly;
114 /*
115 * When calculating the number of globally allowed dirty pages, there
116 * is a certain number of per-zone reserves that should not be
117 * considered dirtyable memory. This is the sum of those reserves
118 * over all existing zones that contribute dirtyable memory.
119 */
120 unsigned long dirty_balance_reserve __read_mostly;
121
122 int percpu_pagelist_fraction;
123 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
124
125 #ifdef CONFIG_PM_SLEEP
126 /*
127 * The following functions are used by the suspend/hibernate code to temporarily
128 * change gfp_allowed_mask in order to avoid using I/O during memory allocations
129 * while devices are suspended. To avoid races with the suspend/hibernate code,
130 * they should always be called with pm_mutex held (gfp_allowed_mask also should
131 * only be modified with pm_mutex held, unless the suspend/hibernate code is
132 * guaranteed not to run in parallel with that modification).
133 */
134
135 static gfp_t saved_gfp_mask;
136
pm_restore_gfp_mask(void)137 void pm_restore_gfp_mask(void)
138 {
139 WARN_ON(!mutex_is_locked(&pm_mutex));
140 if (saved_gfp_mask) {
141 gfp_allowed_mask = saved_gfp_mask;
142 saved_gfp_mask = 0;
143 }
144 }
145
pm_restrict_gfp_mask(void)146 void pm_restrict_gfp_mask(void)
147 {
148 WARN_ON(!mutex_is_locked(&pm_mutex));
149 WARN_ON(saved_gfp_mask);
150 saved_gfp_mask = gfp_allowed_mask;
151 gfp_allowed_mask &= ~GFP_IOFS;
152 }
153
pm_suspended_storage(void)154 bool pm_suspended_storage(void)
155 {
156 if ((gfp_allowed_mask & GFP_IOFS) == GFP_IOFS)
157 return false;
158 return true;
159 }
160 #endif /* CONFIG_PM_SLEEP */
161
162 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
163 unsigned int pageblock_order __read_mostly;
164 #endif
165
166 static void __free_pages_ok(struct page *page, unsigned int order);
167
168 /*
169 * results with 256, 32 in the lowmem_reserve sysctl:
170 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
171 * 1G machine -> (16M dma, 784M normal, 224M high)
172 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
173 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
174 * HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
175 *
176 * TBD: should special case ZONE_DMA32 machines here - in those we normally
177 * don't need any ZONE_NORMAL reservation
178 */
179 int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
180 #ifdef CONFIG_ZONE_DMA
181 256,
182 #endif
183 #ifdef CONFIG_ZONE_DMA32
184 256,
185 #endif
186 #ifdef CONFIG_HIGHMEM
187 32,
188 #endif
189 32,
190 };
191
192 EXPORT_SYMBOL(totalram_pages);
193
194 static char * const zone_names[MAX_NR_ZONES] = {
195 #ifdef CONFIG_ZONE_DMA
196 "DMA",
197 #endif
198 #ifdef CONFIG_ZONE_DMA32
199 "DMA32",
200 #endif
201 "Normal",
202 #ifdef CONFIG_HIGHMEM
203 "HighMem",
204 #endif
205 "Movable",
206 };
207
208 /*
209 * Try to keep at least this much lowmem free. Do not allow normal
210 * allocations below this point, only high priority ones. Automatically
211 * tuned according to the amount of memory in the system.
212 */
213 int min_free_kbytes = 1024;
214 int user_min_free_kbytes = -1;
215 int min_free_order_shift = 1;
216
217 /*
218 * Extra memory for the system to try freeing. Used to temporarily
219 * free memory, to make space for new workloads. Anyone can allocate
220 * down to the min watermarks controlled by min_free_kbytes above.
221 */
222 int extra_free_kbytes = 0;
223
224 static unsigned long __meminitdata nr_kernel_pages;
225 static unsigned long __meminitdata nr_all_pages;
226 static unsigned long __meminitdata dma_reserve;
227
228 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
229 static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
230 static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
231 static unsigned long __initdata required_kernelcore;
232 static unsigned long __initdata required_movablecore;
233 static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
234
235 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
236 int movable_zone;
237 EXPORT_SYMBOL(movable_zone);
238 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
239
240 #if MAX_NUMNODES > 1
241 int nr_node_ids __read_mostly = MAX_NUMNODES;
242 int nr_online_nodes __read_mostly = 1;
243 EXPORT_SYMBOL(nr_node_ids);
244 EXPORT_SYMBOL(nr_online_nodes);
245 #endif
246
247 int page_group_by_mobility_disabled __read_mostly;
248
set_pageblock_migratetype(struct page * page,int migratetype)249 void set_pageblock_migratetype(struct page *page, int migratetype)
250 {
251 if (unlikely(page_group_by_mobility_disabled &&
252 migratetype < MIGRATE_PCPTYPES))
253 migratetype = MIGRATE_UNMOVABLE;
254
255 set_pageblock_flags_group(page, (unsigned long)migratetype,
256 PB_migrate, PB_migrate_end);
257 }
258
259 bool oom_killer_disabled __read_mostly;
260
261 #ifdef CONFIG_DEBUG_VM
page_outside_zone_boundaries(struct zone * zone,struct page * page)262 static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
263 {
264 int ret = 0;
265 unsigned seq;
266 unsigned long pfn = page_to_pfn(page);
267 unsigned long sp, start_pfn;
268
269 do {
270 seq = zone_span_seqbegin(zone);
271 start_pfn = zone->zone_start_pfn;
272 sp = zone->spanned_pages;
273 if (!zone_spans_pfn(zone, pfn))
274 ret = 1;
275 } while (zone_span_seqretry(zone, seq));
276
277 if (ret)
278 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
279 pfn, zone_to_nid(zone), zone->name,
280 start_pfn, start_pfn + sp);
281
282 return ret;
283 }
284
page_is_consistent(struct zone * zone,struct page * page)285 static int page_is_consistent(struct zone *zone, struct page *page)
286 {
287 if (!pfn_valid_within(page_to_pfn(page)))
288 return 0;
289 if (zone != page_zone(page))
290 return 0;
291
292 return 1;
293 }
294 /*
295 * Temporary debugging check for pages not lying within a given zone.
296 */
bad_range(struct zone * zone,struct page * page)297 static int bad_range(struct zone *zone, struct page *page)
298 {
299 if (page_outside_zone_boundaries(zone, page))
300 return 1;
301 if (!page_is_consistent(zone, page))
302 return 1;
303
304 return 0;
305 }
306 #else
bad_range(struct zone * zone,struct page * page)307 static inline int bad_range(struct zone *zone, struct page *page)
308 {
309 return 0;
310 }
311 #endif
312
bad_page(struct page * page,const char * reason,unsigned long bad_flags)313 static void bad_page(struct page *page, const char *reason,
314 unsigned long bad_flags)
315 {
316 static unsigned long resume;
317 static unsigned long nr_shown;
318 static unsigned long nr_unshown;
319
320 /* Don't complain about poisoned pages */
321 if (PageHWPoison(page)) {
322 page_mapcount_reset(page); /* remove PageBuddy */
323 return;
324 }
325
326 /*
327 * Allow a burst of 60 reports, then keep quiet for that minute;
328 * or allow a steady drip of one report per second.
329 */
330 if (nr_shown == 60) {
331 if (time_before(jiffies, resume)) {
332 nr_unshown++;
333 goto out;
334 }
335 if (nr_unshown) {
336 printk(KERN_ALERT
337 "BUG: Bad page state: %lu messages suppressed\n",
338 nr_unshown);
339 nr_unshown = 0;
340 }
341 nr_shown = 0;
342 }
343 if (nr_shown++ == 0)
344 resume = jiffies + 60 * HZ;
345
346 printk(KERN_ALERT "BUG: Bad page state in process %s pfn:%05lx\n",
347 current->comm, page_to_pfn(page));
348 dump_page_badflags(page, reason, bad_flags);
349
350 print_modules();
351 dump_stack();
352 out:
353 /* Leave bad fields for debug, except PageBuddy could make trouble */
354 page_mapcount_reset(page); /* remove PageBuddy */
355 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
356 }
357
358 /*
359 * Higher-order pages are called "compound pages". They are structured thusly:
360 *
361 * The first PAGE_SIZE page is called the "head page".
362 *
363 * The remaining PAGE_SIZE pages are called "tail pages".
364 *
365 * All pages have PG_compound set. All tail pages have their ->first_page
366 * pointing at the head page.
367 *
368 * The first tail page's ->lru.next holds the address of the compound page's
369 * put_page() function. Its ->lru.prev holds the order of allocation.
370 * This usage means that zero-order pages may not be compound.
371 */
372
free_compound_page(struct page * page)373 static void free_compound_page(struct page *page)
374 {
375 __free_pages_ok(page, compound_order(page));
376 }
377
prep_compound_page(struct page * page,unsigned int order)378 void prep_compound_page(struct page *page, unsigned int order)
379 {
380 int i;
381 int nr_pages = 1 << order;
382
383 set_compound_page_dtor(page, free_compound_page);
384 set_compound_order(page, order);
385 __SetPageHead(page);
386 for (i = 1; i < nr_pages; i++) {
387 struct page *p = page + i;
388 set_page_count(p, 0);
389 p->first_page = page;
390 /* Make sure p->first_page is always valid for PageTail() */
391 smp_wmb();
392 __SetPageTail(p);
393 }
394 }
395
396 /* update __split_huge_page_refcount if you change this function */
destroy_compound_page(struct page * page,unsigned long order)397 static int destroy_compound_page(struct page *page, unsigned long order)
398 {
399 int i;
400 int nr_pages = 1 << order;
401 int bad = 0;
402
403 if (unlikely(compound_order(page) != order)) {
404 bad_page(page, "wrong compound order", 0);
405 bad++;
406 }
407
408 __ClearPageHead(page);
409
410 for (i = 1; i < nr_pages; i++) {
411 struct page *p = page + i;
412
413 if (unlikely(!PageTail(p))) {
414 bad_page(page, "PageTail not set", 0);
415 bad++;
416 } else if (unlikely(p->first_page != page)) {
417 bad_page(page, "first_page not consistent", 0);
418 bad++;
419 }
420 __ClearPageTail(p);
421 }
422
423 return bad;
424 }
425
prep_zero_page(struct page * page,unsigned int order,gfp_t gfp_flags)426 static inline void prep_zero_page(struct page *page, unsigned int order,
427 gfp_t gfp_flags)
428 {
429 int i;
430
431 /*
432 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
433 * and __GFP_HIGHMEM from hard or soft interrupt context.
434 */
435 VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
436 for (i = 0; i < (1 << order); i++)
437 clear_highpage(page + i);
438 }
439
440 #ifdef CONFIG_DEBUG_PAGEALLOC
441 unsigned int _debug_guardpage_minorder;
442
debug_guardpage_minorder_setup(char * buf)443 static int __init debug_guardpage_minorder_setup(char *buf)
444 {
445 unsigned long res;
446
447 if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) {
448 printk(KERN_ERR "Bad debug_guardpage_minorder value\n");
449 return 0;
450 }
451 _debug_guardpage_minorder = res;
452 printk(KERN_INFO "Setting debug_guardpage_minorder to %lu\n", res);
453 return 0;
454 }
455 __setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);
456
set_page_guard_flag(struct page * page)457 static inline void set_page_guard_flag(struct page *page)
458 {
459 __set_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
460 }
461
clear_page_guard_flag(struct page * page)462 static inline void clear_page_guard_flag(struct page *page)
463 {
464 __clear_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
465 }
466 #else
set_page_guard_flag(struct page * page)467 static inline void set_page_guard_flag(struct page *page) { }
clear_page_guard_flag(struct page * page)468 static inline void clear_page_guard_flag(struct page *page) { }
469 #endif
470
set_page_order(struct page * page,unsigned int order)471 static inline void set_page_order(struct page *page, unsigned int order)
472 {
473 set_page_private(page, order);
474 __SetPageBuddy(page);
475 }
476
rmv_page_order(struct page * page)477 static inline void rmv_page_order(struct page *page)
478 {
479 __ClearPageBuddy(page);
480 set_page_private(page, 0);
481 }
482
483 /*
484 * This function checks whether a page is free && is the buddy
485 * we can do coalesce a page and its buddy if
486 * (a) the buddy is not in a hole &&
487 * (b) the buddy is in the buddy system &&
488 * (c) a page and its buddy have the same order &&
489 * (d) a page and its buddy are in the same zone.
490 *
491 * For recording whether a page is in the buddy system, we set ->_mapcount
492 * PAGE_BUDDY_MAPCOUNT_VALUE.
493 * Setting, clearing, and testing _mapcount PAGE_BUDDY_MAPCOUNT_VALUE is
494 * serialized by zone->lock.
495 *
496 * For recording page's order, we use page_private(page).
497 */
page_is_buddy(struct page * page,struct page * buddy,unsigned int order)498 static inline int page_is_buddy(struct page *page, struct page *buddy,
499 unsigned int order)
500 {
501 if (!pfn_valid_within(page_to_pfn(buddy)))
502 return 0;
503
504 if (page_is_guard(buddy) && page_order(buddy) == order) {
505 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
506
507 if (page_zone_id(page) != page_zone_id(buddy))
508 return 0;
509
510 return 1;
511 }
512
513 if (PageBuddy(buddy) && page_order(buddy) == order) {
514 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
515
516 /*
517 * zone check is done late to avoid uselessly
518 * calculating zone/node ids for pages that could
519 * never merge.
520 */
521 if (page_zone_id(page) != page_zone_id(buddy))
522 return 0;
523
524 return 1;
525 }
526 return 0;
527 }
528
529 /*
530 * Freeing function for a buddy system allocator.
531 *
532 * The concept of a buddy system is to maintain direct-mapped table
533 * (containing bit values) for memory blocks of various "orders".
534 * The bottom level table contains the map for the smallest allocatable
535 * units of memory (here, pages), and each level above it describes
536 * pairs of units from the levels below, hence, "buddies".
537 * At a high level, all that happens here is marking the table entry
538 * at the bottom level available, and propagating the changes upward
539 * as necessary, plus some accounting needed to play nicely with other
540 * parts of the VM system.
541 * At each level, we keep a list of pages, which are heads of continuous
542 * free pages of length of (1 << order) and marked with _mapcount
543 * PAGE_BUDDY_MAPCOUNT_VALUE. Page's order is recorded in page_private(page)
544 * field.
545 * So when we are allocating or freeing one, we can derive the state of the
546 * other. That is, if we allocate a small block, and both were
547 * free, the remainder of the region must be split into blocks.
548 * If a block is freed, and its buddy is also free, then this
549 * triggers coalescing into a block of larger size.
550 *
551 * -- nyc
552 */
553
__free_one_page(struct page * page,unsigned long pfn,struct zone * zone,unsigned int order,int migratetype)554 static inline void __free_one_page(struct page *page,
555 unsigned long pfn,
556 struct zone *zone, unsigned int order,
557 int migratetype)
558 {
559 unsigned long page_idx;
560 unsigned long combined_idx;
561 unsigned long uninitialized_var(buddy_idx);
562 struct page *buddy;
563 unsigned int max_order;
564
565 max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
566
567 VM_BUG_ON(!zone_is_initialized(zone));
568
569 if (unlikely(PageCompound(page)))
570 if (unlikely(destroy_compound_page(page, order)))
571 return;
572
573 VM_BUG_ON(migratetype == -1);
574 if (likely(!is_migrate_isolate(migratetype)))
575 __mod_zone_freepage_state(zone, 1 << order, migratetype);
576
577 page_idx = pfn & ((1 << MAX_ORDER) - 1);
578
579 VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page);
580 VM_BUG_ON_PAGE(bad_range(zone, page), page);
581
582 continue_merging:
583 while (order < max_order - 1) {
584 buddy_idx = __find_buddy_index(page_idx, order);
585 buddy = page + (buddy_idx - page_idx);
586 if (!page_is_buddy(page, buddy, order))
587 goto done_merging;
588 /*
589 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
590 * merge with it and move up one order.
591 */
592 if (page_is_guard(buddy)) {
593 clear_page_guard_flag(buddy);
594 set_page_private(buddy, 0);
595 if (!is_migrate_isolate(migratetype)) {
596 __mod_zone_freepage_state(zone, 1 << order,
597 migratetype);
598 }
599 } else {
600 list_del(&buddy->lru);
601 zone->free_area[order].nr_free--;
602 rmv_page_order(buddy);
603 }
604 combined_idx = buddy_idx & page_idx;
605 page = page + (combined_idx - page_idx);
606 page_idx = combined_idx;
607 order++;
608 }
609 if (max_order < MAX_ORDER) {
610 /* If we are here, it means order is >= pageblock_order.
611 * We want to prevent merge between freepages on isolate
612 * pageblock and normal pageblock. Without this, pageblock
613 * isolation could cause incorrect freepage or CMA accounting.
614 *
615 * We don't want to hit this code for the more frequent
616 * low-order merging.
617 */
618 if (unlikely(has_isolate_pageblock(zone))) {
619 int buddy_mt;
620
621 buddy_idx = __find_buddy_index(page_idx, order);
622 buddy = page + (buddy_idx - page_idx);
623 buddy_mt = get_pageblock_migratetype(buddy);
624
625 if (migratetype != buddy_mt
626 && (is_migrate_isolate(migratetype) ||
627 is_migrate_isolate(buddy_mt)))
628 goto done_merging;
629 }
630 max_order++;
631 goto continue_merging;
632 }
633
634 done_merging:
635 set_page_order(page, order);
636
637 /*
638 * If this is not the largest possible page, check if the buddy
639 * of the next-highest order is free. If it is, it's possible
640 * that pages are being freed that will coalesce soon. In case,
641 * that is happening, add the free page to the tail of the list
642 * so it's less likely to be used soon and more likely to be merged
643 * as a higher order page
644 */
645 if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
646 struct page *higher_page, *higher_buddy;
647 combined_idx = buddy_idx & page_idx;
648 higher_page = page + (combined_idx - page_idx);
649 buddy_idx = __find_buddy_index(combined_idx, order + 1);
650 higher_buddy = higher_page + (buddy_idx - combined_idx);
651 if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
652 list_add_tail(&page->lru,
653 &zone->free_area[order].free_list[migratetype]);
654 goto out;
655 }
656 }
657
658 list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
659 out:
660 zone->free_area[order].nr_free++;
661 }
662
free_pages_check(struct page * page)663 static inline int free_pages_check(struct page *page)
664 {
665 const char *bad_reason = NULL;
666 unsigned long bad_flags = 0;
667
668 if (unlikely(page_mapcount(page)))
669 bad_reason = "nonzero mapcount";
670 if (unlikely(page->mapping != NULL))
671 bad_reason = "non-NULL mapping";
672 if (unlikely(atomic_read(&page->_count) != 0))
673 bad_reason = "nonzero _count";
674 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) {
675 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
676 bad_flags = PAGE_FLAGS_CHECK_AT_FREE;
677 }
678 if (unlikely(mem_cgroup_bad_page_check(page)))
679 bad_reason = "cgroup check failed";
680 if (unlikely(bad_reason)) {
681 bad_page(page, bad_reason, bad_flags);
682 return 1;
683 }
684 page_cpupid_reset_last(page);
685 if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
686 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
687 return 0;
688 }
689
690 /*
691 * Frees a number of pages from the PCP lists
692 * Assumes all pages on list are in same zone, and of same order.
693 * count is the number of pages to free.
694 *
695 * If the zone was previously in an "all pages pinned" state then look to
696 * see if this freeing clears that state.
697 *
698 * And clear the zone's pages_scanned counter, to hold off the "all pages are
699 * pinned" detection logic.
700 */
free_pcppages_bulk(struct zone * zone,int count,struct per_cpu_pages * pcp)701 static void free_pcppages_bulk(struct zone *zone, int count,
702 struct per_cpu_pages *pcp)
703 {
704 int migratetype = 0;
705 int batch_free = 0;
706 int to_free = count;
707 unsigned long nr_scanned;
708
709 spin_lock(&zone->lock);
710 nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
711 if (nr_scanned)
712 __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
713
714 while (to_free) {
715 struct page *page;
716 struct list_head *list;
717
718 /*
719 * Remove pages from lists in a round-robin fashion. A
720 * batch_free count is maintained that is incremented when an
721 * empty list is encountered. This is so more pages are freed
722 * off fuller lists instead of spinning excessively around empty
723 * lists
724 */
725 do {
726 batch_free++;
727 if (++migratetype == MIGRATE_PCPTYPES)
728 migratetype = 0;
729 list = &pcp->lists[migratetype];
730 } while (list_empty(list));
731
732 /* This is the only non-empty list. Free them all. */
733 if (batch_free == MIGRATE_PCPTYPES)
734 batch_free = to_free;
735
736 do {
737 int mt; /* migratetype of the to-be-freed page */
738
739 page = list_entry(list->prev, struct page, lru);
740 /* must delete as __free_one_page list manipulates */
741 list_del(&page->lru);
742 mt = get_freepage_migratetype(page);
743 if (unlikely(has_isolate_pageblock(zone)))
744 mt = get_pageblock_migratetype(page);
745
746 /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
747 __free_one_page(page, page_to_pfn(page), zone, 0, mt);
748 trace_mm_page_pcpu_drain(page, 0, mt);
749 } while (--to_free && --batch_free && !list_empty(list));
750 }
751 spin_unlock(&zone->lock);
752 }
753
free_one_page(struct zone * zone,struct page * page,unsigned long pfn,unsigned int order,int migratetype)754 static void free_one_page(struct zone *zone,
755 struct page *page, unsigned long pfn,
756 unsigned int order,
757 int migratetype)
758 {
759 unsigned long nr_scanned;
760 spin_lock(&zone->lock);
761 nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
762 if (nr_scanned)
763 __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
764
765 if (unlikely(has_isolate_pageblock(zone) ||
766 is_migrate_isolate(migratetype))) {
767 migratetype = get_pfnblock_migratetype(page, pfn);
768 }
769 __free_one_page(page, pfn, zone, order, migratetype);
770 spin_unlock(&zone->lock);
771 }
772
free_pages_prepare(struct page * page,unsigned int order)773 static bool free_pages_prepare(struct page *page, unsigned int order)
774 {
775 int i;
776 int bad = 0;
777
778 trace_mm_page_free(page, order);
779 kmemcheck_free_shadow(page, order);
780
781 if (PageAnon(page))
782 page->mapping = NULL;
783 for (i = 0; i < (1 << order); i++)
784 bad += free_pages_check(page + i);
785 if (bad)
786 return false;
787
788 if (!PageHighMem(page)) {
789 debug_check_no_locks_freed(page_address(page),
790 PAGE_SIZE << order);
791 debug_check_no_obj_freed(page_address(page),
792 PAGE_SIZE << order);
793 }
794 arch_free_page(page, order);
795 kernel_map_pages(page, 1 << order, 0);
796
797 return true;
798 }
799
__free_pages_ok(struct page * page,unsigned int order)800 static void __free_pages_ok(struct page *page, unsigned int order)
801 {
802 unsigned long flags;
803 int migratetype;
804 unsigned long pfn = page_to_pfn(page);
805
806 if (!free_pages_prepare(page, order))
807 return;
808
809 migratetype = get_pfnblock_migratetype(page, pfn);
810 local_irq_save(flags);
811 __count_vm_events(PGFREE, 1 << order);
812 set_freepage_migratetype(page, migratetype);
813 free_one_page(page_zone(page), page, pfn, order, migratetype);
814 local_irq_restore(flags);
815 }
816
__free_pages_bootmem(struct page * page,unsigned long pfn,unsigned int order)817 void __init __free_pages_bootmem(struct page *page, unsigned long pfn,
818 unsigned int order)
819 {
820 unsigned int nr_pages = 1 << order;
821 struct page *p = page;
822 unsigned int loop;
823
824 prefetchw(p);
825 for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
826 prefetchw(p + 1);
827 __ClearPageReserved(p);
828 set_page_count(p, 0);
829 }
830 __ClearPageReserved(p);
831 set_page_count(p, 0);
832
833 page_zone(page)->managed_pages += nr_pages;
834 set_page_refcounted(page);
835 __free_pages(page, order);
836 }
837
838 #ifdef CONFIG_CMA
839 /* Free whole pageblock and set its migration type to MIGRATE_CMA. */
init_cma_reserved_pageblock(struct page * page)840 void __init init_cma_reserved_pageblock(struct page *page)
841 {
842 unsigned i = pageblock_nr_pages;
843 struct page *p = page;
844
845 do {
846 __ClearPageReserved(p);
847 set_page_count(p, 0);
848 } while (++p, --i);
849
850 set_pageblock_migratetype(page, MIGRATE_CMA);
851
852 if (pageblock_order >= MAX_ORDER) {
853 i = pageblock_nr_pages;
854 p = page;
855 do {
856 set_page_refcounted(p);
857 __free_pages(p, MAX_ORDER - 1);
858 p += MAX_ORDER_NR_PAGES;
859 } while (i -= MAX_ORDER_NR_PAGES);
860 } else {
861 set_page_refcounted(page);
862 __free_pages(page, pageblock_order);
863 }
864
865 adjust_managed_page_count(page, pageblock_nr_pages);
866 }
867 #endif
868
869 /*
870 * The order of subdivision here is critical for the IO subsystem.
871 * Please do not alter this order without good reasons and regression
872 * testing. Specifically, as large blocks of memory are subdivided,
873 * the order in which smaller blocks are delivered depends on the order
874 * they're subdivided in this function. This is the primary factor
875 * influencing the order in which pages are delivered to the IO
876 * subsystem according to empirical testing, and this is also justified
877 * by considering the behavior of a buddy system containing a single
878 * large block of memory acted on by a series of small allocations.
879 * This behavior is a critical factor in sglist merging's success.
880 *
881 * -- nyc
882 */
expand(struct zone * zone,struct page * page,int low,int high,struct free_area * area,int migratetype)883 static inline void expand(struct zone *zone, struct page *page,
884 int low, int high, struct free_area *area,
885 int migratetype)
886 {
887 unsigned long size = 1 << high;
888
889 while (high > low) {
890 area--;
891 high--;
892 size >>= 1;
893 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
894
895 #ifdef CONFIG_DEBUG_PAGEALLOC
896 if (high < debug_guardpage_minorder()) {
897 /*
898 * Mark as guard pages (or page), that will allow to
899 * merge back to allocator when buddy will be freed.
900 * Corresponding page table entries will not be touched,
901 * pages will stay not present in virtual address space
902 */
903 INIT_LIST_HEAD(&page[size].lru);
904 set_page_guard_flag(&page[size]);
905 set_page_private(&page[size], high);
906 /* Guard pages are not available for any usage */
907 __mod_zone_freepage_state(zone, -(1 << high),
908 migratetype);
909 continue;
910 }
911 #endif
912 list_add(&page[size].lru, &area->free_list[migratetype]);
913 area->nr_free++;
914 set_page_order(&page[size], high);
915 }
916 }
917
918 /*
919 * This page is about to be returned from the page allocator
920 */
check_new_page(struct page * page)921 static inline int check_new_page(struct page *page)
922 {
923 const char *bad_reason = NULL;
924 unsigned long bad_flags = 0;
925
926 if (unlikely(page_mapcount(page)))
927 bad_reason = "nonzero mapcount";
928 if (unlikely(page->mapping != NULL))
929 bad_reason = "non-NULL mapping";
930 if (unlikely(atomic_read(&page->_count) != 0))
931 bad_reason = "nonzero _count";
932 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) {
933 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set";
934 bad_flags = PAGE_FLAGS_CHECK_AT_PREP;
935 }
936 if (unlikely(mem_cgroup_bad_page_check(page)))
937 bad_reason = "cgroup check failed";
938 if (unlikely(bad_reason)) {
939 bad_page(page, bad_reason, bad_flags);
940 return 1;
941 }
942 return 0;
943 }
944
prep_new_page(struct page * page,unsigned int order,gfp_t gfp_flags)945 static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags)
946 {
947 int i;
948
949 for (i = 0; i < (1 << order); i++) {
950 struct page *p = page + i;
951 if (unlikely(check_new_page(p)))
952 return 1;
953 }
954
955 set_page_private(page, 0);
956 set_page_refcounted(page);
957
958 arch_alloc_page(page, order);
959 kernel_map_pages(page, 1 << order, 1);
960
961 if (gfp_flags & __GFP_ZERO)
962 prep_zero_page(page, order, gfp_flags);
963
964 if (order && (gfp_flags & __GFP_COMP))
965 prep_compound_page(page, order);
966
967 return 0;
968 }
969
970 /*
971 * Go through the free lists for the given migratetype and remove
972 * the smallest available page from the freelists
973 */
974 static inline
__rmqueue_smallest(struct zone * zone,unsigned int order,int migratetype)975 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
976 int migratetype)
977 {
978 unsigned int current_order;
979 struct free_area *area;
980 struct page *page;
981
982 /* Find a page of the appropriate size in the preferred list */
983 for (current_order = order; current_order < MAX_ORDER; ++current_order) {
984 area = &(zone->free_area[current_order]);
985 if (list_empty(&area->free_list[migratetype]))
986 continue;
987
988 page = list_entry(area->free_list[migratetype].next,
989 struct page, lru);
990 list_del(&page->lru);
991 rmv_page_order(page);
992 area->nr_free--;
993 expand(zone, page, order, current_order, area, migratetype);
994 set_freepage_migratetype(page, migratetype);
995 return page;
996 }
997
998 return NULL;
999 }
1000
1001
1002 /*
1003 * This array describes the order lists are fallen back to when
1004 * the free lists for the desirable migrate type are depleted
1005 */
1006 static int fallbacks[MIGRATE_TYPES][4] = {
1007 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
1008 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
1009 #ifdef CONFIG_CMA
1010 [MIGRATE_MOVABLE] = { MIGRATE_CMA, MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
1011 [MIGRATE_CMA] = { MIGRATE_RESERVE }, /* Never used */
1012 #else
1013 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
1014 #endif
1015 [MIGRATE_RESERVE] = { MIGRATE_RESERVE }, /* Never used */
1016 #ifdef CONFIG_MEMORY_ISOLATION
1017 [MIGRATE_ISOLATE] = { MIGRATE_RESERVE }, /* Never used */
1018 #endif
1019 };
1020
1021 /*
1022 * Move the free pages in a range to the free lists of the requested type.
1023 * Note that start_page and end_pages are not aligned on a pageblock
1024 * boundary. If alignment is required, use move_freepages_block()
1025 */
move_freepages(struct zone * zone,struct page * start_page,struct page * end_page,int migratetype)1026 int move_freepages(struct zone *zone,
1027 struct page *start_page, struct page *end_page,
1028 int migratetype)
1029 {
1030 struct page *page;
1031 unsigned int order;
1032 int pages_moved = 0;
1033
1034 #ifndef CONFIG_HOLES_IN_ZONE
1035 /*
1036 * page_zone is not safe to call in this context when
1037 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
1038 * anyway as we check zone boundaries in move_freepages_block().
1039 * Remove at a later date when no bug reports exist related to
1040 * grouping pages by mobility
1041 */
1042 VM_BUG_ON(page_zone(start_page) != page_zone(end_page));
1043 #endif
1044
1045 for (page = start_page; page <= end_page;) {
1046 if (!pfn_valid_within(page_to_pfn(page))) {
1047 page++;
1048 continue;
1049 }
1050
1051 /* Make sure we are not inadvertently changing nodes */
1052 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
1053
1054 if (!PageBuddy(page)) {
1055 page++;
1056 continue;
1057 }
1058
1059 order = page_order(page);
1060 list_move(&page->lru,
1061 &zone->free_area[order].free_list[migratetype]);
1062 set_freepage_migratetype(page, migratetype);
1063 page += 1 << order;
1064 pages_moved += 1 << order;
1065 }
1066
1067 return pages_moved;
1068 }
1069
move_freepages_block(struct zone * zone,struct page * page,int migratetype)1070 int move_freepages_block(struct zone *zone, struct page *page,
1071 int migratetype)
1072 {
1073 unsigned long start_pfn, end_pfn;
1074 struct page *start_page, *end_page;
1075
1076 start_pfn = page_to_pfn(page);
1077 start_pfn = start_pfn & ~(pageblock_nr_pages-1);
1078 start_page = pfn_to_page(start_pfn);
1079 end_page = start_page + pageblock_nr_pages - 1;
1080 end_pfn = start_pfn + pageblock_nr_pages - 1;
1081
1082 /* Do not cross zone boundaries */
1083 if (!zone_spans_pfn(zone, start_pfn))
1084 start_page = page;
1085 if (!zone_spans_pfn(zone, end_pfn))
1086 return 0;
1087
1088 return move_freepages(zone, start_page, end_page, migratetype);
1089 }
1090
change_pageblock_range(struct page * pageblock_page,int start_order,int migratetype)1091 static void change_pageblock_range(struct page *pageblock_page,
1092 int start_order, int migratetype)
1093 {
1094 int nr_pageblocks = 1 << (start_order - pageblock_order);
1095
1096 while (nr_pageblocks--) {
1097 set_pageblock_migratetype(pageblock_page, migratetype);
1098 pageblock_page += pageblock_nr_pages;
1099 }
1100 }
1101
1102 /*
1103 * When we are falling back to another migratetype during allocation, try to
1104 * steal extra free pages from the same pageblocks to satisfy further
1105 * allocations, instead of polluting multiple pageblocks.
1106 *
1107 * If we are stealing a relatively large buddy page, it is likely there will
1108 * be more free pages in the pageblock, so try to steal them all. For
1109 * reclaimable and unmovable allocations, we steal regardless of page size,
1110 * as fragmentation caused by those allocations polluting movable pageblocks
1111 * is worse than movable allocations stealing from unmovable and reclaimable
1112 * pageblocks.
1113 *
1114 * If we claim more than half of the pageblock, change pageblock's migratetype
1115 * as well.
1116 */
try_to_steal_freepages(struct zone * zone,struct page * page,int start_type,int fallback_type)1117 static void try_to_steal_freepages(struct zone *zone, struct page *page,
1118 int start_type, int fallback_type)
1119 {
1120 unsigned int current_order = page_order(page);
1121
1122 /* Take ownership for orders >= pageblock_order */
1123 if (current_order >= pageblock_order) {
1124 change_pageblock_range(page, current_order, start_type);
1125 return;
1126 }
1127
1128 if (current_order >= pageblock_order / 2 ||
1129 start_type == MIGRATE_RECLAIMABLE ||
1130 start_type == MIGRATE_UNMOVABLE ||
1131 page_group_by_mobility_disabled) {
1132 int pages;
1133
1134 pages = move_freepages_block(zone, page, start_type);
1135
1136 /* Claim the whole block if over half of it is free */
1137 if (pages >= (1 << (pageblock_order-1)) ||
1138 page_group_by_mobility_disabled)
1139 set_pageblock_migratetype(page, start_type);
1140 }
1141 }
1142
1143 /* Remove an element from the buddy allocator from the fallback list */
1144 static inline struct page *
__rmqueue_fallback(struct zone * zone,unsigned int order,int start_migratetype)1145 __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
1146 {
1147 struct free_area *area;
1148 unsigned int current_order;
1149 struct page *page;
1150
1151 /* Find the largest possible block of pages in the other list */
1152 for (current_order = MAX_ORDER-1;
1153 current_order >= order && current_order <= MAX_ORDER-1;
1154 --current_order) {
1155 int i;
1156 for (i = 0;; i++) {
1157 int migratetype = fallbacks[start_migratetype][i];
1158 int buddy_type = start_migratetype;
1159
1160 /* MIGRATE_RESERVE handled later if necessary */
1161 if (migratetype == MIGRATE_RESERVE)
1162 break;
1163
1164 area = &(zone->free_area[current_order]);
1165 if (list_empty(&area->free_list[migratetype]))
1166 continue;
1167
1168 page = list_entry(area->free_list[migratetype].next,
1169 struct page, lru);
1170 area->nr_free--;
1171
1172 if (!is_migrate_cma(migratetype)) {
1173 try_to_steal_freepages(zone, page,
1174 start_migratetype,
1175 migratetype);
1176 } else {
1177 /*
1178 * When borrowing from MIGRATE_CMA, we need to
1179 * release the excess buddy pages to CMA
1180 * itself, and we do not try to steal extra
1181 * free pages.
1182 */
1183 buddy_type = migratetype;
1184 }
1185
1186 /* Remove the page from the freelists */
1187 list_del(&page->lru);
1188 rmv_page_order(page);
1189
1190 expand(zone, page, order, current_order, area,
1191 buddy_type);
1192
1193 /*
1194 * The freepage_migratetype may differ from pageblock's
1195 * migratetype depending on the decisions in
1196 * try_to_steal_freepages(). This is OK as long as it
1197 * does not differ for MIGRATE_CMA pageblocks. For CMA
1198 * we need to make sure unallocated pages flushed from
1199 * pcp lists are returned to the correct freelist.
1200 */
1201 set_freepage_migratetype(page, buddy_type);
1202
1203 trace_mm_page_alloc_extfrag(page, order, current_order,
1204 start_migratetype, migratetype);
1205
1206 return page;
1207 }
1208 }
1209
1210 return NULL;
1211 }
1212
1213 /*
1214 * Do the hard work of removing an element from the buddy allocator.
1215 * Call me with the zone->lock already held.
1216 */
__rmqueue(struct zone * zone,unsigned int order,int migratetype)1217 static struct page *__rmqueue(struct zone *zone, unsigned int order,
1218 int migratetype)
1219 {
1220 struct page *page;
1221
1222 retry_reserve:
1223 page = __rmqueue_smallest(zone, order, migratetype);
1224
1225 if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
1226 page = __rmqueue_fallback(zone, order, migratetype);
1227
1228 /*
1229 * Use MIGRATE_RESERVE rather than fail an allocation. goto
1230 * is used because __rmqueue_smallest is an inline function
1231 * and we want just one call site
1232 */
1233 if (!page) {
1234 migratetype = MIGRATE_RESERVE;
1235 goto retry_reserve;
1236 }
1237 }
1238
1239 trace_mm_page_alloc_zone_locked(page, order, migratetype);
1240 return page;
1241 }
1242
1243 /*
1244 * Obtain a specified number of elements from the buddy allocator, all under
1245 * a single hold of the lock, for efficiency. Add them to the supplied list.
1246 * Returns the number of new pages which were placed at *list.
1247 */
rmqueue_bulk(struct zone * zone,unsigned int order,unsigned long count,struct list_head * list,int migratetype,bool cold)1248 static int rmqueue_bulk(struct zone *zone, unsigned int order,
1249 unsigned long count, struct list_head *list,
1250 int migratetype, bool cold)
1251 {
1252 int i;
1253
1254 spin_lock(&zone->lock);
1255 for (i = 0; i < count; ++i) {
1256 struct page *page = __rmqueue(zone, order, migratetype);
1257 if (unlikely(page == NULL))
1258 break;
1259
1260 /*
1261 * Split buddy pages returned by expand() are received here
1262 * in physical page order. The page is added to the callers and
1263 * list and the list head then moves forward. From the callers
1264 * perspective, the linked list is ordered by page number in
1265 * some conditions. This is useful for IO devices that can
1266 * merge IO requests if the physical pages are ordered
1267 * properly.
1268 */
1269 if (likely(!cold))
1270 list_add(&page->lru, list);
1271 else
1272 list_add_tail(&page->lru, list);
1273 list = &page->lru;
1274 if (is_migrate_cma(get_freepage_migratetype(page)))
1275 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
1276 -(1 << order));
1277 }
1278 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
1279 spin_unlock(&zone->lock);
1280 return i;
1281 }
1282
1283 #ifdef CONFIG_NUMA
1284 /*
1285 * Called from the vmstat counter updater to drain pagesets of this
1286 * currently executing processor on remote nodes after they have
1287 * expired.
1288 *
1289 * Note that this function must be called with the thread pinned to
1290 * a single processor.
1291 */
drain_zone_pages(struct zone * zone,struct per_cpu_pages * pcp)1292 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
1293 {
1294 unsigned long flags;
1295 int to_drain, batch;
1296
1297 local_irq_save(flags);
1298 batch = ACCESS_ONCE(pcp->batch);
1299 to_drain = min(pcp->count, batch);
1300 if (to_drain > 0) {
1301 free_pcppages_bulk(zone, to_drain, pcp);
1302 pcp->count -= to_drain;
1303 }
1304 local_irq_restore(flags);
1305 }
1306 #endif
1307
1308 /*
1309 * Drain pages of the indicated processor.
1310 *
1311 * The processor must either be the current processor and the
1312 * thread pinned to the current processor or a processor that
1313 * is not online.
1314 */
drain_pages(unsigned int cpu)1315 static void drain_pages(unsigned int cpu)
1316 {
1317 unsigned long flags;
1318 struct zone *zone;
1319
1320 for_each_populated_zone(zone) {
1321 struct per_cpu_pageset *pset;
1322 struct per_cpu_pages *pcp;
1323
1324 local_irq_save(flags);
1325 pset = per_cpu_ptr(zone->pageset, cpu);
1326
1327 pcp = &pset->pcp;
1328 if (pcp->count) {
1329 free_pcppages_bulk(zone, pcp->count, pcp);
1330 pcp->count = 0;
1331 }
1332 local_irq_restore(flags);
1333 }
1334 }
1335
1336 /*
1337 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
1338 */
drain_local_pages(void * arg)1339 void drain_local_pages(void *arg)
1340 {
1341 drain_pages(smp_processor_id());
1342 }
1343
1344 /*
1345 * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
1346 *
1347 * Note that this code is protected against sending an IPI to an offline
1348 * CPU but does not guarantee sending an IPI to newly hotplugged CPUs:
1349 * on_each_cpu_mask() blocks hotplug and won't talk to offlined CPUs but
1350 * nothing keeps CPUs from showing up after we populated the cpumask and
1351 * before the call to on_each_cpu_mask().
1352 */
drain_all_pages(void)1353 void drain_all_pages(void)
1354 {
1355 int cpu;
1356 struct per_cpu_pageset *pcp;
1357 struct zone *zone;
1358
1359 /*
1360 * Allocate in the BSS so we wont require allocation in
1361 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
1362 */
1363 static cpumask_t cpus_with_pcps;
1364
1365 /*
1366 * We don't care about racing with CPU hotplug event
1367 * as offline notification will cause the notified
1368 * cpu to drain that CPU pcps and on_each_cpu_mask
1369 * disables preemption as part of its processing
1370 */
1371 for_each_online_cpu(cpu) {
1372 bool has_pcps = false;
1373 for_each_populated_zone(zone) {
1374 pcp = per_cpu_ptr(zone->pageset, cpu);
1375 if (pcp->pcp.count) {
1376 has_pcps = true;
1377 break;
1378 }
1379 }
1380 if (has_pcps)
1381 cpumask_set_cpu(cpu, &cpus_with_pcps);
1382 else
1383 cpumask_clear_cpu(cpu, &cpus_with_pcps);
1384 }
1385 on_each_cpu_mask(&cpus_with_pcps, drain_local_pages, NULL, 1);
1386 }
1387
1388 #ifdef CONFIG_HIBERNATION
1389
mark_free_pages(struct zone * zone)1390 void mark_free_pages(struct zone *zone)
1391 {
1392 unsigned long pfn, max_zone_pfn;
1393 unsigned long flags;
1394 unsigned int order, t;
1395 struct list_head *curr;
1396
1397 if (zone_is_empty(zone))
1398 return;
1399
1400 spin_lock_irqsave(&zone->lock, flags);
1401
1402 max_zone_pfn = zone_end_pfn(zone);
1403 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1404 if (pfn_valid(pfn)) {
1405 struct page *page = pfn_to_page(pfn);
1406
1407 if (!swsusp_page_is_forbidden(page))
1408 swsusp_unset_page_free(page);
1409 }
1410
1411 for_each_migratetype_order(order, t) {
1412 list_for_each(curr, &zone->free_area[order].free_list[t]) {
1413 unsigned long i;
1414
1415 pfn = page_to_pfn(list_entry(curr, struct page, lru));
1416 for (i = 0; i < (1UL << order); i++)
1417 swsusp_set_page_free(pfn_to_page(pfn + i));
1418 }
1419 }
1420 spin_unlock_irqrestore(&zone->lock, flags);
1421 }
1422 #endif /* CONFIG_PM */
1423
1424 /*
1425 * Free a 0-order page
1426 * cold == true ? free a cold page : free a hot page
1427 */
free_hot_cold_page(struct page * page,bool cold)1428 void free_hot_cold_page(struct page *page, bool cold)
1429 {
1430 struct zone *zone = page_zone(page);
1431 struct per_cpu_pages *pcp;
1432 unsigned long flags;
1433 unsigned long pfn = page_to_pfn(page);
1434 int migratetype;
1435
1436 if (!free_pages_prepare(page, 0))
1437 return;
1438
1439 migratetype = get_pfnblock_migratetype(page, pfn);
1440 set_freepage_migratetype(page, migratetype);
1441 local_irq_save(flags);
1442 __count_vm_event(PGFREE);
1443
1444 /*
1445 * We only track unmovable, reclaimable and movable on pcp lists.
1446 * Free ISOLATE pages back to the allocator because they are being
1447 * offlined but treat RESERVE as movable pages so we can get those
1448 * areas back if necessary. Otherwise, we may have to free
1449 * excessively into the page allocator
1450 */
1451 if (migratetype >= MIGRATE_PCPTYPES) {
1452 if (unlikely(is_migrate_isolate(migratetype))) {
1453 free_one_page(zone, page, pfn, 0, migratetype);
1454 goto out;
1455 }
1456 migratetype = MIGRATE_MOVABLE;
1457 }
1458
1459 pcp = &this_cpu_ptr(zone->pageset)->pcp;
1460 if (!cold)
1461 list_add(&page->lru, &pcp->lists[migratetype]);
1462 else
1463 list_add_tail(&page->lru, &pcp->lists[migratetype]);
1464 pcp->count++;
1465 if (pcp->count >= pcp->high) {
1466 unsigned long batch = ACCESS_ONCE(pcp->batch);
1467 free_pcppages_bulk(zone, batch, pcp);
1468 pcp->count -= batch;
1469 }
1470
1471 out:
1472 local_irq_restore(flags);
1473 }
1474
1475 /*
1476 * Free a list of 0-order pages
1477 */
free_hot_cold_page_list(struct list_head * list,bool cold)1478 void free_hot_cold_page_list(struct list_head *list, bool cold)
1479 {
1480 struct page *page, *next;
1481
1482 list_for_each_entry_safe(page, next, list, lru) {
1483 trace_mm_page_free_batched(page, cold);
1484 free_hot_cold_page(page, cold);
1485 }
1486 }
1487
1488 /*
1489 * split_page takes a non-compound higher-order page, and splits it into
1490 * n (1<<order) sub-pages: page[0..n]
1491 * Each sub-page must be freed individually.
1492 *
1493 * Note: this is probably too low level an operation for use in drivers.
1494 * Please consult with lkml before using this in your driver.
1495 */
split_page(struct page * page,unsigned int order)1496 void split_page(struct page *page, unsigned int order)
1497 {
1498 int i;
1499
1500 VM_BUG_ON_PAGE(PageCompound(page), page);
1501 VM_BUG_ON_PAGE(!page_count(page), page);
1502
1503 #ifdef CONFIG_KMEMCHECK
1504 /*
1505 * Split shadow pages too, because free(page[0]) would
1506 * otherwise free the whole shadow.
1507 */
1508 if (kmemcheck_page_is_tracked(page))
1509 split_page(virt_to_page(page[0].shadow), order);
1510 #endif
1511
1512 for (i = 1; i < (1 << order); i++)
1513 set_page_refcounted(page + i);
1514 }
1515 EXPORT_SYMBOL_GPL(split_page);
1516
__isolate_free_page(struct page * page,unsigned int order)1517 int __isolate_free_page(struct page *page, unsigned int order)
1518 {
1519 unsigned long watermark;
1520 struct zone *zone;
1521 int mt;
1522
1523 BUG_ON(!PageBuddy(page));
1524
1525 zone = page_zone(page);
1526 mt = get_pageblock_migratetype(page);
1527
1528 if (!is_migrate_isolate(mt)) {
1529 /* Obey watermarks as if the page was being allocated */
1530 watermark = low_wmark_pages(zone) + (1 << order);
1531 if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
1532 return 0;
1533
1534 __mod_zone_freepage_state(zone, -(1UL << order), mt);
1535 }
1536
1537 /* Remove page from free list */
1538 list_del(&page->lru);
1539 zone->free_area[order].nr_free--;
1540 rmv_page_order(page);
1541
1542 /* Set the pageblock if the isolated page is at least a pageblock */
1543 if (order >= pageblock_order - 1) {
1544 struct page *endpage = page + (1 << order) - 1;
1545 for (; page < endpage; page += pageblock_nr_pages) {
1546 int mt = get_pageblock_migratetype(page);
1547 if (!is_migrate_isolate(mt) && !is_migrate_cma(mt))
1548 set_pageblock_migratetype(page,
1549 MIGRATE_MOVABLE);
1550 }
1551 }
1552
1553 return 1UL << order;
1554 }
1555
1556 /*
1557 * Similar to split_page except the page is already free. As this is only
1558 * being used for migration, the migratetype of the block also changes.
1559 * As this is called with interrupts disabled, the caller is responsible
1560 * for calling arch_alloc_page() and kernel_map_page() after interrupts
1561 * are enabled.
1562 *
1563 * Note: this is probably too low level an operation for use in drivers.
1564 * Please consult with lkml before using this in your driver.
1565 */
split_free_page(struct page * page)1566 int split_free_page(struct page *page)
1567 {
1568 unsigned int order;
1569 int nr_pages;
1570
1571 order = page_order(page);
1572
1573 nr_pages = __isolate_free_page(page, order);
1574 if (!nr_pages)
1575 return 0;
1576
1577 /* Split into individual pages */
1578 set_page_refcounted(page);
1579 split_page(page, order);
1580 return nr_pages;
1581 }
1582
1583 /*
1584 * Really, prep_compound_page() should be called from __rmqueue_bulk(). But
1585 * we cheat by calling it from here, in the order > 0 path. Saves a branch
1586 * or two.
1587 */
1588 static inline
buffered_rmqueue(struct zone * preferred_zone,struct zone * zone,unsigned int order,gfp_t gfp_flags,int migratetype)1589 struct page *buffered_rmqueue(struct zone *preferred_zone,
1590 struct zone *zone, unsigned int order,
1591 gfp_t gfp_flags, int migratetype)
1592 {
1593 unsigned long flags;
1594 struct page *page;
1595 bool cold = ((gfp_flags & __GFP_COLD) != 0);
1596
1597 again:
1598 if (likely(order == 0)) {
1599 struct per_cpu_pages *pcp;
1600 struct list_head *list;
1601
1602 local_irq_save(flags);
1603 pcp = &this_cpu_ptr(zone->pageset)->pcp;
1604 list = &pcp->lists[migratetype];
1605 if (list_empty(list)) {
1606 pcp->count += rmqueue_bulk(zone, 0,
1607 pcp->batch, list,
1608 migratetype, cold);
1609 if (unlikely(list_empty(list)))
1610 goto failed;
1611 }
1612
1613 if (cold)
1614 page = list_entry(list->prev, struct page, lru);
1615 else
1616 page = list_entry(list->next, struct page, lru);
1617
1618 list_del(&page->lru);
1619 pcp->count--;
1620 } else {
1621 if (unlikely(gfp_flags & __GFP_NOFAIL)) {
1622 /*
1623 * __GFP_NOFAIL is not to be used in new code.
1624 *
1625 * All __GFP_NOFAIL callers should be fixed so that they
1626 * properly detect and handle allocation failures.
1627 *
1628 * We most definitely don't want callers attempting to
1629 * allocate greater than order-1 page units with
1630 * __GFP_NOFAIL.
1631 */
1632 WARN_ON_ONCE(order > 1);
1633 }
1634 spin_lock_irqsave(&zone->lock, flags);
1635 page = __rmqueue(zone, order, migratetype);
1636 spin_unlock(&zone->lock);
1637 if (!page)
1638 goto failed;
1639 __mod_zone_freepage_state(zone, -(1 << order),
1640 get_freepage_migratetype(page));
1641 }
1642
1643 __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
1644 if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
1645 !test_bit(ZONE_FAIR_DEPLETED, &zone->flags))
1646 set_bit(ZONE_FAIR_DEPLETED, &zone->flags);
1647
1648 __count_zone_vm_events(PGALLOC, zone, 1 << order);
1649 zone_statistics(preferred_zone, zone, gfp_flags);
1650 local_irq_restore(flags);
1651
1652 VM_BUG_ON_PAGE(bad_range(zone, page), page);
1653 if (prep_new_page(page, order, gfp_flags))
1654 goto again;
1655 return page;
1656
1657 failed:
1658 local_irq_restore(flags);
1659 return NULL;
1660 }
1661
1662 #ifdef CONFIG_FAIL_PAGE_ALLOC
1663
1664 static struct {
1665 struct fault_attr attr;
1666
1667 u32 ignore_gfp_highmem;
1668 u32 ignore_gfp_wait;
1669 u32 min_order;
1670 } fail_page_alloc = {
1671 .attr = FAULT_ATTR_INITIALIZER,
1672 .ignore_gfp_wait = 1,
1673 .ignore_gfp_highmem = 1,
1674 .min_order = 1,
1675 };
1676
setup_fail_page_alloc(char * str)1677 static int __init setup_fail_page_alloc(char *str)
1678 {
1679 return setup_fault_attr(&fail_page_alloc.attr, str);
1680 }
1681 __setup("fail_page_alloc=", setup_fail_page_alloc);
1682
should_fail_alloc_page(gfp_t gfp_mask,unsigned int order)1683 static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1684 {
1685 if (order < fail_page_alloc.min_order)
1686 return false;
1687 if (gfp_mask & __GFP_NOFAIL)
1688 return false;
1689 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
1690 return false;
1691 if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
1692 return false;
1693
1694 return should_fail(&fail_page_alloc.attr, 1 << order);
1695 }
1696
1697 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1698
fail_page_alloc_debugfs(void)1699 static int __init fail_page_alloc_debugfs(void)
1700 {
1701 umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
1702 struct dentry *dir;
1703
1704 dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
1705 &fail_page_alloc.attr);
1706 if (IS_ERR(dir))
1707 return PTR_ERR(dir);
1708
1709 if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
1710 &fail_page_alloc.ignore_gfp_wait))
1711 goto fail;
1712 if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir,
1713 &fail_page_alloc.ignore_gfp_highmem))
1714 goto fail;
1715 if (!debugfs_create_u32("min-order", mode, dir,
1716 &fail_page_alloc.min_order))
1717 goto fail;
1718
1719 return 0;
1720 fail:
1721 debugfs_remove_recursive(dir);
1722
1723 return -ENOMEM;
1724 }
1725
1726 late_initcall(fail_page_alloc_debugfs);
1727
1728 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1729
1730 #else /* CONFIG_FAIL_PAGE_ALLOC */
1731
should_fail_alloc_page(gfp_t gfp_mask,unsigned int order)1732 static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1733 {
1734 return false;
1735 }
1736
1737 #endif /* CONFIG_FAIL_PAGE_ALLOC */
1738
1739 /*
1740 * Return true if free pages are above 'mark'. This takes into account the order
1741 * of the allocation.
1742 */
__zone_watermark_ok(struct zone * z,unsigned int order,unsigned long mark,int classzone_idx,int alloc_flags,long free_pages)1743 static bool __zone_watermark_ok(struct zone *z, unsigned int order,
1744 unsigned long mark, int classzone_idx, int alloc_flags,
1745 long free_pages)
1746 {
1747 /* free_pages my go negative - that's OK */
1748 long min = mark;
1749 int o;
1750 long free_cma = 0;
1751
1752 free_pages -= (1 << order) - 1;
1753 if (alloc_flags & ALLOC_HIGH)
1754 min -= min / 2;
1755 if (alloc_flags & ALLOC_HARDER)
1756 min -= min / 4;
1757 #ifdef CONFIG_CMA
1758 /* If allocation can't use CMA areas don't use free CMA pages */
1759 if (!(alloc_flags & ALLOC_CMA))
1760 free_cma = zone_page_state(z, NR_FREE_CMA_PAGES);
1761 #endif
1762
1763 if (free_pages - free_cma <= min + z->lowmem_reserve[classzone_idx])
1764 return false;
1765 for (o = 0; o < order; o++) {
1766 /* At the next order, this order's pages become unavailable */
1767 free_pages -= z->free_area[o].nr_free << o;
1768
1769 /* Require fewer higher order pages to be free */
1770 min >>= min_free_order_shift;
1771
1772 if (free_pages <= min)
1773 return false;
1774 }
1775 return true;
1776 }
1777
zone_watermark_ok(struct zone * z,unsigned int order,unsigned long mark,int classzone_idx,int alloc_flags)1778 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
1779 int classzone_idx, int alloc_flags)
1780 {
1781 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
1782 zone_page_state(z, NR_FREE_PAGES));
1783 }
1784
zone_watermark_ok_safe(struct zone * z,unsigned int order,unsigned long mark,int classzone_idx,int alloc_flags)1785 bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
1786 unsigned long mark, int classzone_idx, int alloc_flags)
1787 {
1788 long free_pages = zone_page_state(z, NR_FREE_PAGES);
1789
1790 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
1791 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
1792
1793 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
1794 free_pages);
1795 }
1796
1797 #ifdef CONFIG_NUMA
1798 /*
1799 * zlc_setup - Setup for "zonelist cache". Uses cached zone data to
1800 * skip over zones that are not allowed by the cpuset, or that have
1801 * been recently (in last second) found to be nearly full. See further
1802 * comments in mmzone.h. Reduces cache footprint of zonelist scans
1803 * that have to skip over a lot of full or unallowed zones.
1804 *
1805 * If the zonelist cache is present in the passed zonelist, then
1806 * returns a pointer to the allowed node mask (either the current
1807 * tasks mems_allowed, or node_states[N_MEMORY].)
1808 *
1809 * If the zonelist cache is not available for this zonelist, does
1810 * nothing and returns NULL.
1811 *
1812 * If the fullzones BITMAP in the zonelist cache is stale (more than
1813 * a second since last zap'd) then we zap it out (clear its bits.)
1814 *
1815 * We hold off even calling zlc_setup, until after we've checked the
1816 * first zone in the zonelist, on the theory that most allocations will
1817 * be satisfied from that first zone, so best to examine that zone as
1818 * quickly as we can.
1819 */
zlc_setup(struct zonelist * zonelist,int alloc_flags)1820 static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1821 {
1822 struct zonelist_cache *zlc; /* cached zonelist speedup info */
1823 nodemask_t *allowednodes; /* zonelist_cache approximation */
1824
1825 zlc = zonelist->zlcache_ptr;
1826 if (!zlc)
1827 return NULL;
1828
1829 if (time_after(jiffies, zlc->last_full_zap + HZ)) {
1830 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1831 zlc->last_full_zap = jiffies;
1832 }
1833
1834 allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
1835 &cpuset_current_mems_allowed :
1836 &node_states[N_MEMORY];
1837 return allowednodes;
1838 }
1839
1840 /*
1841 * Given 'z' scanning a zonelist, run a couple of quick checks to see
1842 * if it is worth looking at further for free memory:
1843 * 1) Check that the zone isn't thought to be full (doesn't have its
1844 * bit set in the zonelist_cache fullzones BITMAP).
1845 * 2) Check that the zones node (obtained from the zonelist_cache
1846 * z_to_n[] mapping) is allowed in the passed in allowednodes mask.
1847 * Return true (non-zero) if zone is worth looking at further, or
1848 * else return false (zero) if it is not.
1849 *
1850 * This check -ignores- the distinction between various watermarks,
1851 * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ... If a zone is
1852 * found to be full for any variation of these watermarks, it will
1853 * be considered full for up to one second by all requests, unless
1854 * we are so low on memory on all allowed nodes that we are forced
1855 * into the second scan of the zonelist.
1856 *
1857 * In the second scan we ignore this zonelist cache and exactly
1858 * apply the watermarks to all zones, even it is slower to do so.
1859 * We are low on memory in the second scan, and should leave no stone
1860 * unturned looking for a free page.
1861 */
zlc_zone_worth_trying(struct zonelist * zonelist,struct zoneref * z,nodemask_t * allowednodes)1862 static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1863 nodemask_t *allowednodes)
1864 {
1865 struct zonelist_cache *zlc; /* cached zonelist speedup info */
1866 int i; /* index of *z in zonelist zones */
1867 int n; /* node that zone *z is on */
1868
1869 zlc = zonelist->zlcache_ptr;
1870 if (!zlc)
1871 return 1;
1872
1873 i = z - zonelist->_zonerefs;
1874 n = zlc->z_to_n[i];
1875
1876 /* This zone is worth trying if it is allowed but not full */
1877 return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
1878 }
1879
1880 /*
1881 * Given 'z' scanning a zonelist, set the corresponding bit in
1882 * zlc->fullzones, so that subsequent attempts to allocate a page
1883 * from that zone don't waste time re-examining it.
1884 */
zlc_mark_zone_full(struct zonelist * zonelist,struct zoneref * z)1885 static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1886 {
1887 struct zonelist_cache *zlc; /* cached zonelist speedup info */
1888 int i; /* index of *z in zonelist zones */
1889
1890 zlc = zonelist->zlcache_ptr;
1891 if (!zlc)
1892 return;
1893
1894 i = z - zonelist->_zonerefs;
1895
1896 set_bit(i, zlc->fullzones);
1897 }
1898
1899 /*
1900 * clear all zones full, called after direct reclaim makes progress so that
1901 * a zone that was recently full is not skipped over for up to a second
1902 */
zlc_clear_zones_full(struct zonelist * zonelist)1903 static void zlc_clear_zones_full(struct zonelist *zonelist)
1904 {
1905 struct zonelist_cache *zlc; /* cached zonelist speedup info */
1906
1907 zlc = zonelist->zlcache_ptr;
1908 if (!zlc)
1909 return;
1910
1911 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1912 }
1913
zone_local(struct zone * local_zone,struct zone * zone)1914 static bool zone_local(struct zone *local_zone, struct zone *zone)
1915 {
1916 return local_zone->node == zone->node;
1917 }
1918
zone_allows_reclaim(struct zone * local_zone,struct zone * zone)1919 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
1920 {
1921 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <
1922 RECLAIM_DISTANCE;
1923 }
1924
1925 #else /* CONFIG_NUMA */
1926
zlc_setup(struct zonelist * zonelist,int alloc_flags)1927 static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1928 {
1929 return NULL;
1930 }
1931
zlc_zone_worth_trying(struct zonelist * zonelist,struct zoneref * z,nodemask_t * allowednodes)1932 static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1933 nodemask_t *allowednodes)
1934 {
1935 return 1;
1936 }
1937
zlc_mark_zone_full(struct zonelist * zonelist,struct zoneref * z)1938 static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1939 {
1940 }
1941
zlc_clear_zones_full(struct zonelist * zonelist)1942 static void zlc_clear_zones_full(struct zonelist *zonelist)
1943 {
1944 }
1945
zone_local(struct zone * local_zone,struct zone * zone)1946 static bool zone_local(struct zone *local_zone, struct zone *zone)
1947 {
1948 return true;
1949 }
1950
zone_allows_reclaim(struct zone * local_zone,struct zone * zone)1951 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
1952 {
1953 return true;
1954 }
1955
1956 #endif /* CONFIG_NUMA */
1957
reset_alloc_batches(struct zone * preferred_zone)1958 static void reset_alloc_batches(struct zone *preferred_zone)
1959 {
1960 struct zone *zone = preferred_zone->zone_pgdat->node_zones;
1961
1962 do {
1963 mod_zone_page_state(zone, NR_ALLOC_BATCH,
1964 high_wmark_pages(zone) - low_wmark_pages(zone) -
1965 atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
1966 clear_bit(ZONE_FAIR_DEPLETED, &zone->flags);
1967 } while (zone++ != preferred_zone);
1968 }
1969
1970 /*
1971 * get_page_from_freelist goes through the zonelist trying to allocate
1972 * a page.
1973 */
1974 static struct page *
get_page_from_freelist(gfp_t gfp_mask,nodemask_t * nodemask,unsigned int order,struct zonelist * zonelist,int high_zoneidx,int alloc_flags,struct zone * preferred_zone,int classzone_idx,int migratetype)1975 get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
1976 struct zonelist *zonelist, int high_zoneidx, int alloc_flags,
1977 struct zone *preferred_zone, int classzone_idx, int migratetype)
1978 {
1979 struct zoneref *z;
1980 struct page *page = NULL;
1981 struct zone *zone;
1982 nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
1983 int zlc_active = 0; /* set if using zonelist_cache */
1984 int did_zlc_setup = 0; /* just call zlc_setup() one time */
1985 bool consider_zone_dirty = (alloc_flags & ALLOC_WMARK_LOW) &&
1986 (gfp_mask & __GFP_WRITE);
1987 int nr_fair_skipped = 0;
1988 bool zonelist_rescan;
1989
1990 zonelist_scan:
1991 zonelist_rescan = false;
1992
1993 /*
1994 * Scan zonelist, looking for a zone with enough free.
1995 * See also __cpuset_node_allowed_softwall() comment in kernel/cpuset.c.
1996 */
1997 for_each_zone_zonelist_nodemask(zone, z, zonelist,
1998 high_zoneidx, nodemask) {
1999 unsigned long mark;
2000
2001 if (IS_ENABLED(CONFIG_NUMA) && zlc_active &&
2002 !zlc_zone_worth_trying(zonelist, z, allowednodes))
2003 continue;
2004 if (cpusets_enabled() &&
2005 (alloc_flags & ALLOC_CPUSET) &&
2006 !cpuset_zone_allowed_softwall(zone, gfp_mask))
2007 continue;
2008 /*
2009 * Distribute pages in proportion to the individual
2010 * zone size to ensure fair page aging. The zone a
2011 * page was allocated in should have no effect on the
2012 * time the page has in memory before being reclaimed.
2013 */
2014 if (alloc_flags & ALLOC_FAIR) {
2015 if (!zone_local(preferred_zone, zone))
2016 break;
2017 if (test_bit(ZONE_FAIR_DEPLETED, &zone->flags)) {
2018 nr_fair_skipped++;
2019 continue;
2020 }
2021 }
2022 /*
2023 * When allocating a page cache page for writing, we
2024 * want to get it from a zone that is within its dirty
2025 * limit, such that no single zone holds more than its
2026 * proportional share of globally allowed dirty pages.
2027 * The dirty limits take into account the zone's
2028 * lowmem reserves and high watermark so that kswapd
2029 * should be able to balance it without having to
2030 * write pages from its LRU list.
2031 *
2032 * This may look like it could increase pressure on
2033 * lower zones by failing allocations in higher zones
2034 * before they are full. But the pages that do spill
2035 * over are limited as the lower zones are protected
2036 * by this very same mechanism. It should not become
2037 * a practical burden to them.
2038 *
2039 * XXX: For now, allow allocations to potentially
2040 * exceed the per-zone dirty limit in the slowpath
2041 * (ALLOC_WMARK_LOW unset) before going into reclaim,
2042 * which is important when on a NUMA setup the allowed
2043 * zones are together not big enough to reach the
2044 * global limit. The proper fix for these situations
2045 * will require awareness of zones in the
2046 * dirty-throttling and the flusher threads.
2047 */
2048 if (consider_zone_dirty && !zone_dirty_ok(zone))
2049 continue;
2050
2051 mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
2052 if (!zone_watermark_ok(zone, order, mark,
2053 classzone_idx, alloc_flags)) {
2054 int ret;
2055
2056 /* Checked here to keep the fast path fast */
2057 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
2058 if (alloc_flags & ALLOC_NO_WATERMARKS)
2059 goto try_this_zone;
2060
2061 if (IS_ENABLED(CONFIG_NUMA) &&
2062 !did_zlc_setup && nr_online_nodes > 1) {
2063 /*
2064 * we do zlc_setup if there are multiple nodes
2065 * and before considering the first zone allowed
2066 * by the cpuset.
2067 */
2068 allowednodes = zlc_setup(zonelist, alloc_flags);
2069 zlc_active = 1;
2070 did_zlc_setup = 1;
2071 }
2072
2073 if (zone_reclaim_mode == 0 ||
2074 !zone_allows_reclaim(preferred_zone, zone))
2075 goto this_zone_full;
2076
2077 /*
2078 * As we may have just activated ZLC, check if the first
2079 * eligible zone has failed zone_reclaim recently.
2080 */
2081 if (IS_ENABLED(CONFIG_NUMA) && zlc_active &&
2082 !zlc_zone_worth_trying(zonelist, z, allowednodes))
2083 continue;
2084
2085 ret = zone_reclaim(zone, gfp_mask, order);
2086 switch (ret) {
2087 case ZONE_RECLAIM_NOSCAN:
2088 /* did not scan */
2089 continue;
2090 case ZONE_RECLAIM_FULL:
2091 /* scanned but unreclaimable */
2092 continue;
2093 default:
2094 /* did we reclaim enough */
2095 if (zone_watermark_ok(zone, order, mark,
2096 classzone_idx, alloc_flags))
2097 goto try_this_zone;
2098
2099 /*
2100 * Failed to reclaim enough to meet watermark.
2101 * Only mark the zone full if checking the min
2102 * watermark or if we failed to reclaim just
2103 * 1<<order pages or else the page allocator
2104 * fastpath will prematurely mark zones full
2105 * when the watermark is between the low and
2106 * min watermarks.
2107 */
2108 if (((alloc_flags & ALLOC_WMARK_MASK) == ALLOC_WMARK_MIN) ||
2109 ret == ZONE_RECLAIM_SOME)
2110 goto this_zone_full;
2111
2112 continue;
2113 }
2114 }
2115
2116 try_this_zone:
2117 page = buffered_rmqueue(preferred_zone, zone, order,
2118 gfp_mask, migratetype);
2119 if (page)
2120 break;
2121 this_zone_full:
2122 if (IS_ENABLED(CONFIG_NUMA) && zlc_active)
2123 zlc_mark_zone_full(zonelist, z);
2124 }
2125
2126 if (page) {
2127 /*
2128 * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was
2129 * necessary to allocate the page. The expectation is
2130 * that the caller is taking steps that will free more
2131 * memory. The caller should avoid the page being used
2132 * for !PFMEMALLOC purposes.
2133 */
2134 page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS);
2135 return page;
2136 }
2137
2138 /*
2139 * The first pass makes sure allocations are spread fairly within the
2140 * local node. However, the local node might have free pages left
2141 * after the fairness batches are exhausted, and remote zones haven't
2142 * even been considered yet. Try once more without fairness, and
2143 * include remote zones now, before entering the slowpath and waking
2144 * kswapd: prefer spilling to a remote zone over swapping locally.
2145 */
2146 if (alloc_flags & ALLOC_FAIR) {
2147 alloc_flags &= ~ALLOC_FAIR;
2148 if (nr_fair_skipped) {
2149 zonelist_rescan = true;
2150 reset_alloc_batches(preferred_zone);
2151 }
2152 if (nr_online_nodes > 1)
2153 zonelist_rescan = true;
2154 }
2155
2156 if (unlikely(IS_ENABLED(CONFIG_NUMA) && zlc_active)) {
2157 /* Disable zlc cache for second zonelist scan */
2158 zlc_active = 0;
2159 zonelist_rescan = true;
2160 }
2161
2162 if (zonelist_rescan)
2163 goto zonelist_scan;
2164
2165 return NULL;
2166 }
2167
2168 /*
2169 * Large machines with many possible nodes should not always dump per-node
2170 * meminfo in irq context.
2171 */
should_suppress_show_mem(void)2172 static inline bool should_suppress_show_mem(void)
2173 {
2174 bool ret = false;
2175
2176 #if NODES_SHIFT > 8
2177 ret = in_interrupt();
2178 #endif
2179 return ret;
2180 }
2181
2182 static DEFINE_RATELIMIT_STATE(nopage_rs,
2183 DEFAULT_RATELIMIT_INTERVAL,
2184 DEFAULT_RATELIMIT_BURST);
2185
warn_alloc_failed(gfp_t gfp_mask,unsigned int order,const char * fmt,...)2186 void warn_alloc_failed(gfp_t gfp_mask, unsigned int order, const char *fmt, ...)
2187 {
2188 unsigned int filter = SHOW_MEM_FILTER_NODES;
2189
2190 if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) ||
2191 debug_guardpage_minorder() > 0)
2192 return;
2193
2194 /*
2195 * This documents exceptions given to allocations in certain
2196 * contexts that are allowed to allocate outside current's set
2197 * of allowed nodes.
2198 */
2199 if (!(gfp_mask & __GFP_NOMEMALLOC))
2200 if (test_thread_flag(TIF_MEMDIE) ||
2201 (current->flags & (PF_MEMALLOC | PF_EXITING)))
2202 filter &= ~SHOW_MEM_FILTER_NODES;
2203 if (in_interrupt() || !(gfp_mask & __GFP_WAIT))
2204 filter &= ~SHOW_MEM_FILTER_NODES;
2205
2206 if (fmt) {
2207 struct va_format vaf;
2208 va_list args;
2209
2210 va_start(args, fmt);
2211
2212 vaf.fmt = fmt;
2213 vaf.va = &args;
2214
2215 pr_warn("%pV", &vaf);
2216
2217 va_end(args);
2218 }
2219
2220 pr_warn("%s: page allocation failure: order:%u, mode:0x%x\n",
2221 current->comm, order, gfp_mask);
2222
2223 dump_stack();
2224 if (!should_suppress_show_mem())
2225 show_mem(filter);
2226 }
2227
2228 static inline int
should_alloc_retry(gfp_t gfp_mask,unsigned int order,unsigned long did_some_progress,unsigned long pages_reclaimed)2229 should_alloc_retry(gfp_t gfp_mask, unsigned int order,
2230 unsigned long did_some_progress,
2231 unsigned long pages_reclaimed)
2232 {
2233 /* Do not loop if specifically requested */
2234 if (gfp_mask & __GFP_NORETRY)
2235 return 0;
2236
2237 /* Always retry if specifically requested */
2238 if (gfp_mask & __GFP_NOFAIL)
2239 return 1;
2240
2241 /*
2242 * Suspend converts GFP_KERNEL to __GFP_WAIT which can prevent reclaim
2243 * making forward progress without invoking OOM. Suspend also disables
2244 * storage devices so kswapd will not help. Bail if we are suspending.
2245 */
2246 if (!did_some_progress && pm_suspended_storage())
2247 return 0;
2248
2249 /*
2250 * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER
2251 * means __GFP_NOFAIL, but that may not be true in other
2252 * implementations.
2253 */
2254 if (order <= PAGE_ALLOC_COSTLY_ORDER)
2255 return 1;
2256
2257 /*
2258 * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is
2259 * specified, then we retry until we no longer reclaim any pages
2260 * (above), or we've reclaimed an order of pages at least as
2261 * large as the allocation's order. In both cases, if the
2262 * allocation still fails, we stop retrying.
2263 */
2264 if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order))
2265 return 1;
2266
2267 return 0;
2268 }
2269
2270 static inline struct page *
__alloc_pages_may_oom(gfp_t gfp_mask,unsigned int order,struct zonelist * zonelist,enum zone_type high_zoneidx,nodemask_t * nodemask,struct zone * preferred_zone,int classzone_idx,int migratetype)2271 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
2272 struct zonelist *zonelist, enum zone_type high_zoneidx,
2273 nodemask_t *nodemask, struct zone *preferred_zone,
2274 int classzone_idx, int migratetype)
2275 {
2276 struct page *page;
2277
2278 /* Acquire the per-zone oom lock for each zone */
2279 if (!oom_zonelist_trylock(zonelist, gfp_mask)) {
2280 schedule_timeout_uninterruptible(1);
2281 return NULL;
2282 }
2283
2284 /*
2285 * PM-freezer should be notified that there might be an OOM killer on
2286 * its way to kill and wake somebody up. This is too early and we might
2287 * end up not killing anything but false positives are acceptable.
2288 * See freeze_processes.
2289 */
2290 note_oom_kill();
2291
2292 /*
2293 * Go through the zonelist yet one more time, keep very high watermark
2294 * here, this is only to catch a parallel oom killing, we must fail if
2295 * we're still under heavy pressure.
2296 */
2297 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
2298 order, zonelist, high_zoneidx,
2299 ALLOC_WMARK_HIGH|ALLOC_CPUSET,
2300 preferred_zone, classzone_idx, migratetype);
2301 if (page)
2302 goto out;
2303
2304 if (!(gfp_mask & __GFP_NOFAIL)) {
2305 /* The OOM killer will not help higher order allocs */
2306 if (order > PAGE_ALLOC_COSTLY_ORDER)
2307 goto out;
2308 /* The OOM killer does not needlessly kill tasks for lowmem */
2309 if (high_zoneidx < ZONE_NORMAL)
2310 goto out;
2311 /*
2312 * GFP_THISNODE contains __GFP_NORETRY and we never hit this.
2313 * Sanity check for bare calls of __GFP_THISNODE, not real OOM.
2314 * The caller should handle page allocation failure by itself if
2315 * it specifies __GFP_THISNODE.
2316 * Note: Hugepage uses it but will hit PAGE_ALLOC_COSTLY_ORDER.
2317 */
2318 if (gfp_mask & __GFP_THISNODE)
2319 goto out;
2320 }
2321 /* Exhausted what can be done so it's blamo time */
2322 out_of_memory(zonelist, gfp_mask, order, nodemask, false);
2323
2324 out:
2325 oom_zonelist_unlock(zonelist, gfp_mask);
2326 return page;
2327 }
2328
2329 #ifdef CONFIG_COMPACTION
2330 /* Try memory compaction for high-order allocations before reclaim */
2331 static struct page *
__alloc_pages_direct_compact(gfp_t gfp_mask,unsigned int order,struct zonelist * zonelist,enum zone_type high_zoneidx,nodemask_t * nodemask,int alloc_flags,struct zone * preferred_zone,int classzone_idx,int migratetype,enum migrate_mode mode,int * contended_compaction,bool * deferred_compaction)2332 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2333 struct zonelist *zonelist, enum zone_type high_zoneidx,
2334 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
2335 int classzone_idx, int migratetype, enum migrate_mode mode,
2336 int *contended_compaction, bool *deferred_compaction)
2337 {
2338 struct zone *last_compact_zone = NULL;
2339 unsigned long compact_result;
2340 struct page *page;
2341
2342 if (!order)
2343 return NULL;
2344
2345 current->flags |= PF_MEMALLOC;
2346 compact_result = try_to_compact_pages(zonelist, order, gfp_mask,
2347 nodemask, mode,
2348 contended_compaction,
2349 &last_compact_zone);
2350 current->flags &= ~PF_MEMALLOC;
2351
2352 switch (compact_result) {
2353 case COMPACT_DEFERRED:
2354 *deferred_compaction = true;
2355 /* fall-through */
2356 case COMPACT_SKIPPED:
2357 return NULL;
2358 default:
2359 break;
2360 }
2361
2362 /*
2363 * At least in one zone compaction wasn't deferred or skipped, so let's
2364 * count a compaction stall
2365 */
2366 count_vm_event(COMPACTSTALL);
2367
2368 /* Page migration frees to the PCP lists but we want merging */
2369 drain_pages(get_cpu());
2370 put_cpu();
2371
2372 page = get_page_from_freelist(gfp_mask, nodemask,
2373 order, zonelist, high_zoneidx,
2374 alloc_flags & ~ALLOC_NO_WATERMARKS,
2375 preferred_zone, classzone_idx, migratetype);
2376
2377 if (page) {
2378 struct zone *zone = page_zone(page);
2379
2380 zone->compact_blockskip_flush = false;
2381 compaction_defer_reset(zone, order, true);
2382 count_vm_event(COMPACTSUCCESS);
2383 return page;
2384 }
2385
2386 /*
2387 * last_compact_zone is where try_to_compact_pages thought allocation
2388 * should succeed, so it did not defer compaction. But here we know
2389 * that it didn't succeed, so we do the defer.
2390 */
2391 if (last_compact_zone && mode != MIGRATE_ASYNC)
2392 defer_compaction(last_compact_zone, order);
2393
2394 /*
2395 * It's bad if compaction run occurs and fails. The most likely reason
2396 * is that pages exist, but not enough to satisfy watermarks.
2397 */
2398 count_vm_event(COMPACTFAIL);
2399
2400 cond_resched();
2401
2402 return NULL;
2403 }
2404 #else
2405 static inline struct page *
__alloc_pages_direct_compact(gfp_t gfp_mask,unsigned int order,struct zonelist * zonelist,enum zone_type high_zoneidx,nodemask_t * nodemask,int alloc_flags,struct zone * preferred_zone,int classzone_idx,int migratetype,enum migrate_mode mode,int * contended_compaction,bool * deferred_compaction)2406 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2407 struct zonelist *zonelist, enum zone_type high_zoneidx,
2408 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
2409 int classzone_idx, int migratetype, enum migrate_mode mode,
2410 int *contended_compaction, bool *deferred_compaction)
2411 {
2412 return NULL;
2413 }
2414 #endif /* CONFIG_COMPACTION */
2415
2416 /* Perform direct synchronous page reclaim */
2417 static int
__perform_reclaim(gfp_t gfp_mask,unsigned int order,struct zonelist * zonelist,nodemask_t * nodemask)2418 __perform_reclaim(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist,
2419 nodemask_t *nodemask)
2420 {
2421 struct reclaim_state reclaim_state;
2422 int progress;
2423
2424 cond_resched();
2425
2426 /* We now go into synchronous reclaim */
2427 cpuset_memory_pressure_bump();
2428 current->flags |= PF_MEMALLOC;
2429 lockdep_set_current_reclaim_state(gfp_mask);
2430 reclaim_state.reclaimed_slab = 0;
2431 current->reclaim_state = &reclaim_state;
2432
2433 progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
2434
2435 current->reclaim_state = NULL;
2436 lockdep_clear_current_reclaim_state();
2437 current->flags &= ~PF_MEMALLOC;
2438
2439 cond_resched();
2440
2441 return progress;
2442 }
2443
2444 /* The really slow allocator path where we enter direct reclaim */
2445 static inline struct page *
__alloc_pages_direct_reclaim(gfp_t gfp_mask,unsigned int order,struct zonelist * zonelist,enum zone_type high_zoneidx,nodemask_t * nodemask,int alloc_flags,struct zone * preferred_zone,int classzone_idx,int migratetype,unsigned long * did_some_progress)2446 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
2447 struct zonelist *zonelist, enum zone_type high_zoneidx,
2448 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
2449 int classzone_idx, int migratetype, unsigned long *did_some_progress)
2450 {
2451 struct page *page = NULL;
2452 bool drained = false;
2453
2454 *did_some_progress = __perform_reclaim(gfp_mask, order, zonelist,
2455 nodemask);
2456 if (unlikely(!(*did_some_progress)))
2457 return NULL;
2458
2459 /* After successful reclaim, reconsider all zones for allocation */
2460 if (IS_ENABLED(CONFIG_NUMA))
2461 zlc_clear_zones_full(zonelist);
2462
2463 retry:
2464 page = get_page_from_freelist(gfp_mask, nodemask, order,
2465 zonelist, high_zoneidx,
2466 alloc_flags & ~ALLOC_NO_WATERMARKS,
2467 preferred_zone, classzone_idx,
2468 migratetype);
2469
2470 /*
2471 * If an allocation failed after direct reclaim, it could be because
2472 * pages are pinned on the per-cpu lists. Drain them and try again
2473 */
2474 if (!page && !drained) {
2475 drain_all_pages();
2476 drained = true;
2477 goto retry;
2478 }
2479
2480 return page;
2481 }
2482
2483 /*
2484 * This is called in the allocator slow-path if the allocation request is of
2485 * sufficient urgency to ignore watermarks and take other desperate measures
2486 */
2487 static inline struct page *
__alloc_pages_high_priority(gfp_t gfp_mask,unsigned int order,struct zonelist * zonelist,enum zone_type high_zoneidx,nodemask_t * nodemask,struct zone * preferred_zone,int classzone_idx,int migratetype)2488 __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
2489 struct zonelist *zonelist, enum zone_type high_zoneidx,
2490 nodemask_t *nodemask, struct zone *preferred_zone,
2491 int classzone_idx, int migratetype)
2492 {
2493 struct page *page;
2494
2495 do {
2496 page = get_page_from_freelist(gfp_mask, nodemask, order,
2497 zonelist, high_zoneidx, ALLOC_NO_WATERMARKS,
2498 preferred_zone, classzone_idx, migratetype);
2499
2500 if (!page && gfp_mask & __GFP_NOFAIL)
2501 wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
2502 } while (!page && (gfp_mask & __GFP_NOFAIL));
2503
2504 return page;
2505 }
2506
wake_all_kswapds(unsigned int order,struct zonelist * zonelist,enum zone_type high_zoneidx,struct zone * preferred_zone,nodemask_t * nodemask)2507 static void wake_all_kswapds(unsigned int order,
2508 struct zonelist *zonelist,
2509 enum zone_type high_zoneidx,
2510 struct zone *preferred_zone,
2511 nodemask_t *nodemask)
2512 {
2513 struct zoneref *z;
2514 struct zone *zone;
2515
2516 for_each_zone_zonelist_nodemask(zone, z, zonelist,
2517 high_zoneidx, nodemask)
2518 wakeup_kswapd(zone, order, zone_idx(preferred_zone));
2519 }
2520
2521 static inline int
gfp_to_alloc_flags(gfp_t gfp_mask)2522 gfp_to_alloc_flags(gfp_t gfp_mask)
2523 {
2524 int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
2525 const bool atomic = !(gfp_mask & (__GFP_WAIT | __GFP_NO_KSWAPD));
2526
2527 /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
2528 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
2529
2530 /*
2531 * The caller may dip into page reserves a bit more if the caller
2532 * cannot run direct reclaim, or if the caller has realtime scheduling
2533 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
2534 * set both ALLOC_HARDER (atomic == true) and ALLOC_HIGH (__GFP_HIGH).
2535 */
2536 alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
2537
2538 if (atomic) {
2539 /*
2540 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
2541 * if it can't schedule.
2542 */
2543 if (!(gfp_mask & __GFP_NOMEMALLOC))
2544 alloc_flags |= ALLOC_HARDER;
2545 /*
2546 * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
2547 * comment for __cpuset_node_allowed_softwall().
2548 */
2549 alloc_flags &= ~ALLOC_CPUSET;
2550 } else if (unlikely(rt_task(current)) && !in_interrupt())
2551 alloc_flags |= ALLOC_HARDER;
2552
2553 if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
2554 if (gfp_mask & __GFP_MEMALLOC)
2555 alloc_flags |= ALLOC_NO_WATERMARKS;
2556 else if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
2557 alloc_flags |= ALLOC_NO_WATERMARKS;
2558 else if (!in_interrupt() &&
2559 ((current->flags & PF_MEMALLOC) ||
2560 unlikely(test_thread_flag(TIF_MEMDIE))))
2561 alloc_flags |= ALLOC_NO_WATERMARKS;
2562 }
2563 #ifdef CONFIG_CMA
2564 if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
2565 alloc_flags |= ALLOC_CMA;
2566 #endif
2567 return alloc_flags;
2568 }
2569
gfp_pfmemalloc_allowed(gfp_t gfp_mask)2570 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
2571 {
2572 return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS);
2573 }
2574
2575 static inline struct page *
__alloc_pages_slowpath(gfp_t gfp_mask,unsigned int order,struct zonelist * zonelist,enum zone_type high_zoneidx,nodemask_t * nodemask,struct zone * preferred_zone,int classzone_idx,int migratetype)2576 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
2577 struct zonelist *zonelist, enum zone_type high_zoneidx,
2578 nodemask_t *nodemask, struct zone *preferred_zone,
2579 int classzone_idx, int migratetype)
2580 {
2581 const gfp_t wait = gfp_mask & __GFP_WAIT;
2582 struct page *page = NULL;
2583 int alloc_flags;
2584 unsigned long pages_reclaimed = 0;
2585 unsigned long did_some_progress;
2586 enum migrate_mode migration_mode = MIGRATE_ASYNC;
2587 bool deferred_compaction = false;
2588 int contended_compaction = COMPACT_CONTENDED_NONE;
2589
2590 /*
2591 * In the slowpath, we sanity check order to avoid ever trying to
2592 * reclaim >= MAX_ORDER areas which will never succeed. Callers may
2593 * be using allocators in order of preference for an area that is
2594 * too large.
2595 */
2596 if (order >= MAX_ORDER) {
2597 WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
2598 return NULL;
2599 }
2600
2601 /*
2602 * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
2603 * __GFP_NOWARN set) should not cause reclaim since the subsystem
2604 * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
2605 * using a larger set of nodes after it has established that the
2606 * allowed per node queues are empty and that nodes are
2607 * over allocated.
2608 */
2609 if (IS_ENABLED(CONFIG_NUMA) &&
2610 (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
2611 goto nopage;
2612
2613 restart:
2614 if (!(gfp_mask & __GFP_NO_KSWAPD))
2615 wake_all_kswapds(order, zonelist, high_zoneidx,
2616 preferred_zone, nodemask);
2617
2618 /*
2619 * OK, we're below the kswapd watermark and have kicked background
2620 * reclaim. Now things get more complex, so set up alloc_flags according
2621 * to how we want to proceed.
2622 */
2623 alloc_flags = gfp_to_alloc_flags(gfp_mask);
2624
2625 /*
2626 * Find the true preferred zone if the allocation is unconstrained by
2627 * cpusets.
2628 */
2629 if (!(alloc_flags & ALLOC_CPUSET) && !nodemask) {
2630 struct zoneref *preferred_zoneref;
2631 preferred_zoneref = first_zones_zonelist(zonelist, high_zoneidx,
2632 NULL, &preferred_zone);
2633 classzone_idx = zonelist_zone_idx(preferred_zoneref);
2634 }
2635
2636 rebalance:
2637 /* This is the last chance, in general, before the goto nopage. */
2638 page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
2639 high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
2640 preferred_zone, classzone_idx, migratetype);
2641 if (page)
2642 goto got_pg;
2643
2644 /* Allocate without watermarks if the context allows */
2645 if (alloc_flags & ALLOC_NO_WATERMARKS) {
2646 /*
2647 * Ignore mempolicies if ALLOC_NO_WATERMARKS on the grounds
2648 * the allocation is high priority and these type of
2649 * allocations are system rather than user orientated
2650 */
2651 zonelist = node_zonelist(numa_node_id(), gfp_mask);
2652
2653 page = __alloc_pages_high_priority(gfp_mask, order,
2654 zonelist, high_zoneidx, nodemask,
2655 preferred_zone, classzone_idx, migratetype);
2656 if (page) {
2657 goto got_pg;
2658 }
2659 }
2660
2661 /* Atomic allocations - we can't balance anything */
2662 if (!wait) {
2663 /*
2664 * All existing users of the deprecated __GFP_NOFAIL are
2665 * blockable, so warn of any new users that actually allow this
2666 * type of allocation to fail.
2667 */
2668 WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL);
2669 goto nopage;
2670 }
2671
2672 /* Avoid recursion of direct reclaim */
2673 if (current->flags & PF_MEMALLOC)
2674 goto nopage;
2675
2676 /* Avoid allocations with no watermarks from looping endlessly */
2677 if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
2678 goto nopage;
2679
2680 /*
2681 * Try direct compaction. The first pass is asynchronous. Subsequent
2682 * attempts after direct reclaim are synchronous
2683 */
2684 page = __alloc_pages_direct_compact(gfp_mask, order, zonelist,
2685 high_zoneidx, nodemask, alloc_flags,
2686 preferred_zone,
2687 classzone_idx, migratetype,
2688 migration_mode, &contended_compaction,
2689 &deferred_compaction);
2690 if (page)
2691 goto got_pg;
2692
2693 /* Checks for THP-specific high-order allocations */
2694 if ((gfp_mask & GFP_TRANSHUGE) == GFP_TRANSHUGE) {
2695 /*
2696 * If compaction is deferred for high-order allocations, it is
2697 * because sync compaction recently failed. If this is the case
2698 * and the caller requested a THP allocation, we do not want
2699 * to heavily disrupt the system, so we fail the allocation
2700 * instead of entering direct reclaim.
2701 */
2702 if (deferred_compaction)
2703 goto nopage;
2704
2705 /*
2706 * In all zones where compaction was attempted (and not
2707 * deferred or skipped), lock contention has been detected.
2708 * For THP allocation we do not want to disrupt the others
2709 * so we fallback to base pages instead.
2710 */
2711 if (contended_compaction == COMPACT_CONTENDED_LOCK)
2712 goto nopage;
2713
2714 /*
2715 * If compaction was aborted due to need_resched(), we do not
2716 * want to further increase allocation latency, unless it is
2717 * khugepaged trying to collapse.
2718 */
2719 if (contended_compaction == COMPACT_CONTENDED_SCHED
2720 && !(current->flags & PF_KTHREAD))
2721 goto nopage;
2722 }
2723
2724 /*
2725 * It can become very expensive to allocate transparent hugepages at
2726 * fault, so use asynchronous memory compaction for THP unless it is
2727 * khugepaged trying to collapse.
2728 */
2729 if ((gfp_mask & GFP_TRANSHUGE) != GFP_TRANSHUGE ||
2730 (current->flags & PF_KTHREAD))
2731 migration_mode = MIGRATE_SYNC_LIGHT;
2732
2733 /* Try direct reclaim and then allocating */
2734 page = __alloc_pages_direct_reclaim(gfp_mask, order,
2735 zonelist, high_zoneidx,
2736 nodemask,
2737 alloc_flags, preferred_zone,
2738 classzone_idx, migratetype,
2739 &did_some_progress);
2740 if (page)
2741 goto got_pg;
2742
2743 /*
2744 * If we failed to make any progress reclaiming, then we are
2745 * running out of options and have to consider going OOM
2746 */
2747 if (!did_some_progress) {
2748 if (oom_gfp_allowed(gfp_mask)) {
2749 if (oom_killer_disabled)
2750 goto nopage;
2751 /* Coredumps can quickly deplete all memory reserves */
2752 if ((current->flags & PF_DUMPCORE) &&
2753 !(gfp_mask & __GFP_NOFAIL))
2754 goto nopage;
2755 page = __alloc_pages_may_oom(gfp_mask, order,
2756 zonelist, high_zoneidx,
2757 nodemask, preferred_zone,
2758 classzone_idx, migratetype);
2759 if (page)
2760 goto got_pg;
2761
2762 if (!(gfp_mask & __GFP_NOFAIL)) {
2763 /*
2764 * The oom killer is not called for high-order
2765 * allocations that may fail, so if no progress
2766 * is being made, there are no other options and
2767 * retrying is unlikely to help.
2768 */
2769 if (order > PAGE_ALLOC_COSTLY_ORDER)
2770 goto nopage;
2771 /*
2772 * The oom killer is not called for lowmem
2773 * allocations to prevent needlessly killing
2774 * innocent tasks.
2775 */
2776 if (high_zoneidx < ZONE_NORMAL)
2777 goto nopage;
2778 }
2779
2780 goto restart;
2781 }
2782 }
2783
2784 /* Check if we should retry the allocation */
2785 pages_reclaimed += did_some_progress;
2786 if (should_alloc_retry(gfp_mask, order, did_some_progress,
2787 pages_reclaimed)) {
2788 /* Wait for some write requests to complete then retry */
2789 wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
2790 goto rebalance;
2791 } else {
2792 /*
2793 * High-order allocations do not necessarily loop after
2794 * direct reclaim and reclaim/compaction depends on compaction
2795 * being called after reclaim so call directly if necessary
2796 */
2797 page = __alloc_pages_direct_compact(gfp_mask, order, zonelist,
2798 high_zoneidx, nodemask, alloc_flags,
2799 preferred_zone,
2800 classzone_idx, migratetype,
2801 migration_mode, &contended_compaction,
2802 &deferred_compaction);
2803 if (page)
2804 goto got_pg;
2805 }
2806
2807 nopage:
2808 warn_alloc_failed(gfp_mask, order, NULL);
2809 return page;
2810 got_pg:
2811 if (kmemcheck_enabled)
2812 kmemcheck_pagealloc_alloc(page, order, gfp_mask);
2813
2814 return page;
2815 }
2816
2817 /*
2818 * This is the 'heart' of the zoned buddy allocator.
2819 */
2820 struct page *
__alloc_pages_nodemask(gfp_t gfp_mask,unsigned int order,struct zonelist * zonelist,nodemask_t * nodemask)2821 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
2822 struct zonelist *zonelist, nodemask_t *nodemask)
2823 {
2824 enum zone_type high_zoneidx = gfp_zone(gfp_mask);
2825 struct zone *preferred_zone;
2826 struct zoneref *preferred_zoneref;
2827 struct page *page = NULL;
2828 int migratetype = gfpflags_to_migratetype(gfp_mask);
2829 unsigned int cpuset_mems_cookie;
2830 int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET|ALLOC_FAIR;
2831 int classzone_idx;
2832
2833 gfp_mask &= gfp_allowed_mask;
2834
2835 lockdep_trace_alloc(gfp_mask);
2836
2837 might_sleep_if(gfp_mask & __GFP_WAIT);
2838
2839 if (should_fail_alloc_page(gfp_mask, order))
2840 return NULL;
2841
2842 /*
2843 * Check the zones suitable for the gfp_mask contain at least one
2844 * valid zone. It's possible to have an empty zonelist as a result
2845 * of GFP_THISNODE and a memoryless node
2846 */
2847 if (unlikely(!zonelist->_zonerefs->zone))
2848 return NULL;
2849
2850 if (IS_ENABLED(CONFIG_CMA) && migratetype == MIGRATE_MOVABLE)
2851 alloc_flags |= ALLOC_CMA;
2852
2853 retry_cpuset:
2854 cpuset_mems_cookie = read_mems_allowed_begin();
2855
2856 /* The preferred zone is used for statistics later */
2857 preferred_zoneref = first_zones_zonelist(zonelist, high_zoneidx,
2858 nodemask ? : &cpuset_current_mems_allowed,
2859 &preferred_zone);
2860 if (!preferred_zone)
2861 goto out;
2862 classzone_idx = zonelist_zone_idx(preferred_zoneref);
2863
2864 /* First allocation attempt */
2865 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
2866 zonelist, high_zoneidx, alloc_flags,
2867 preferred_zone, classzone_idx, migratetype);
2868 if (unlikely(!page)) {
2869 /*
2870 * Runtime PM, block IO and its error handling path
2871 * can deadlock because I/O on the device might not
2872 * complete.
2873 */
2874 gfp_mask = memalloc_noio_flags(gfp_mask);
2875 page = __alloc_pages_slowpath(gfp_mask, order,
2876 zonelist, high_zoneidx, nodemask,
2877 preferred_zone, classzone_idx, migratetype);
2878 }
2879
2880 trace_mm_page_alloc(page, order, gfp_mask, migratetype);
2881
2882 out:
2883 /*
2884 * When updating a task's mems_allowed, it is possible to race with
2885 * parallel threads in such a way that an allocation can fail while
2886 * the mask is being updated. If a page allocation is about to fail,
2887 * check if the cpuset changed during allocation and if so, retry.
2888 */
2889 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
2890 goto retry_cpuset;
2891
2892 return page;
2893 }
2894 EXPORT_SYMBOL(__alloc_pages_nodemask);
2895
2896 /*
2897 * Common helper functions.
2898 */
__get_free_pages(gfp_t gfp_mask,unsigned int order)2899 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
2900 {
2901 struct page *page;
2902
2903 /*
2904 * __get_free_pages() returns a 32-bit address, which cannot represent
2905 * a highmem page
2906 */
2907 VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
2908
2909 page = alloc_pages(gfp_mask, order);
2910 if (!page)
2911 return 0;
2912 return (unsigned long) page_address(page);
2913 }
2914 EXPORT_SYMBOL(__get_free_pages);
2915
get_zeroed_page(gfp_t gfp_mask)2916 unsigned long get_zeroed_page(gfp_t gfp_mask)
2917 {
2918 return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
2919 }
2920 EXPORT_SYMBOL(get_zeroed_page);
2921
__free_pages(struct page * page,unsigned int order)2922 void __free_pages(struct page *page, unsigned int order)
2923 {
2924 if (put_page_testzero(page)) {
2925 if (order == 0)
2926 free_hot_cold_page(page, false);
2927 else
2928 __free_pages_ok(page, order);
2929 }
2930 }
2931
2932 EXPORT_SYMBOL(__free_pages);
2933
free_pages(unsigned long addr,unsigned int order)2934 void free_pages(unsigned long addr, unsigned int order)
2935 {
2936 if (addr != 0) {
2937 VM_BUG_ON(!virt_addr_valid((void *)addr));
2938 __free_pages(virt_to_page((void *)addr), order);
2939 }
2940 }
2941
2942 EXPORT_SYMBOL(free_pages);
2943
2944 /*
2945 * alloc_kmem_pages charges newly allocated pages to the kmem resource counter
2946 * of the current memory cgroup.
2947 *
2948 * It should be used when the caller would like to use kmalloc, but since the
2949 * allocation is large, it has to fall back to the page allocator.
2950 */
alloc_kmem_pages(gfp_t gfp_mask,unsigned int order)2951 struct page *alloc_kmem_pages(gfp_t gfp_mask, unsigned int order)
2952 {
2953 struct page *page;
2954 struct mem_cgroup *memcg = NULL;
2955
2956 if (!memcg_kmem_newpage_charge(gfp_mask, &memcg, order))
2957 return NULL;
2958 page = alloc_pages(gfp_mask, order);
2959 memcg_kmem_commit_charge(page, memcg, order);
2960 return page;
2961 }
2962
alloc_kmem_pages_node(int nid,gfp_t gfp_mask,unsigned int order)2963 struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
2964 {
2965 struct page *page;
2966 struct mem_cgroup *memcg = NULL;
2967
2968 if (!memcg_kmem_newpage_charge(gfp_mask, &memcg, order))
2969 return NULL;
2970 page = alloc_pages_node(nid, gfp_mask, order);
2971 memcg_kmem_commit_charge(page, memcg, order);
2972 return page;
2973 }
2974
2975 /*
2976 * __free_kmem_pages and free_kmem_pages will free pages allocated with
2977 * alloc_kmem_pages.
2978 */
__free_kmem_pages(struct page * page,unsigned int order)2979 void __free_kmem_pages(struct page *page, unsigned int order)
2980 {
2981 memcg_kmem_uncharge_pages(page, order);
2982 __free_pages(page, order);
2983 }
2984
free_kmem_pages(unsigned long addr,unsigned int order)2985 void free_kmem_pages(unsigned long addr, unsigned int order)
2986 {
2987 if (addr != 0) {
2988 VM_BUG_ON(!virt_addr_valid((void *)addr));
2989 __free_kmem_pages(virt_to_page((void *)addr), order);
2990 }
2991 }
2992
make_alloc_exact(unsigned long addr,unsigned int order,size_t size)2993 static void *make_alloc_exact(unsigned long addr, unsigned int order,
2994 size_t size)
2995 {
2996 if (addr) {
2997 unsigned long alloc_end = addr + (PAGE_SIZE << order);
2998 unsigned long used = addr + PAGE_ALIGN(size);
2999
3000 split_page(virt_to_page((void *)addr), order);
3001 while (used < alloc_end) {
3002 free_page(used);
3003 used += PAGE_SIZE;
3004 }
3005 }
3006 return (void *)addr;
3007 }
3008
3009 /**
3010 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
3011 * @size: the number of bytes to allocate
3012 * @gfp_mask: GFP flags for the allocation
3013 *
3014 * This function is similar to alloc_pages(), except that it allocates the
3015 * minimum number of pages to satisfy the request. alloc_pages() can only
3016 * allocate memory in power-of-two pages.
3017 *
3018 * This function is also limited by MAX_ORDER.
3019 *
3020 * Memory allocated by this function must be released by free_pages_exact().
3021 */
alloc_pages_exact(size_t size,gfp_t gfp_mask)3022 void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
3023 {
3024 unsigned int order = get_order(size);
3025 unsigned long addr;
3026
3027 addr = __get_free_pages(gfp_mask, order);
3028 return make_alloc_exact(addr, order, size);
3029 }
3030 EXPORT_SYMBOL(alloc_pages_exact);
3031
3032 /**
3033 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
3034 * pages on a node.
3035 * @nid: the preferred node ID where memory should be allocated
3036 * @size: the number of bytes to allocate
3037 * @gfp_mask: GFP flags for the allocation
3038 *
3039 * Like alloc_pages_exact(), but try to allocate on node nid first before falling
3040 * back.
3041 * Note this is not alloc_pages_exact_node() which allocates on a specific node,
3042 * but is not exact.
3043 */
alloc_pages_exact_nid(int nid,size_t size,gfp_t gfp_mask)3044 void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
3045 {
3046 unsigned int order = get_order(size);
3047 struct page *p = alloc_pages_node(nid, gfp_mask, order);
3048 if (!p)
3049 return NULL;
3050 return make_alloc_exact((unsigned long)page_address(p), order, size);
3051 }
3052
3053 /**
3054 * free_pages_exact - release memory allocated via alloc_pages_exact()
3055 * @virt: the value returned by alloc_pages_exact.
3056 * @size: size of allocation, same value as passed to alloc_pages_exact().
3057 *
3058 * Release the memory allocated by a previous call to alloc_pages_exact.
3059 */
free_pages_exact(void * virt,size_t size)3060 void free_pages_exact(void *virt, size_t size)
3061 {
3062 unsigned long addr = (unsigned long)virt;
3063 unsigned long end = addr + PAGE_ALIGN(size);
3064
3065 while (addr < end) {
3066 free_page(addr);
3067 addr += PAGE_SIZE;
3068 }
3069 }
3070 EXPORT_SYMBOL(free_pages_exact);
3071
3072 /**
3073 * nr_free_zone_pages - count number of pages beyond high watermark
3074 * @offset: The zone index of the highest zone
3075 *
3076 * nr_free_zone_pages() counts the number of counts pages which are beyond the
3077 * high watermark within all zones at or below a given zone index. For each
3078 * zone, the number of pages is calculated as:
3079 * managed_pages - high_pages
3080 */
nr_free_zone_pages(int offset)3081 static unsigned long nr_free_zone_pages(int offset)
3082 {
3083 struct zoneref *z;
3084 struct zone *zone;
3085
3086 /* Just pick one node, since fallback list is circular */
3087 unsigned long sum = 0;
3088
3089 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
3090
3091 for_each_zone_zonelist(zone, z, zonelist, offset) {
3092 unsigned long size = zone->managed_pages;
3093 unsigned long high = high_wmark_pages(zone);
3094 if (size > high)
3095 sum += size - high;
3096 }
3097
3098 return sum;
3099 }
3100
3101 /**
3102 * nr_free_buffer_pages - count number of pages beyond high watermark
3103 *
3104 * nr_free_buffer_pages() counts the number of pages which are beyond the high
3105 * watermark within ZONE_DMA and ZONE_NORMAL.
3106 */
nr_free_buffer_pages(void)3107 unsigned long nr_free_buffer_pages(void)
3108 {
3109 return nr_free_zone_pages(gfp_zone(GFP_USER));
3110 }
3111 EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
3112
3113 /**
3114 * nr_free_pagecache_pages - count number of pages beyond high watermark
3115 *
3116 * nr_free_pagecache_pages() counts the number of pages which are beyond the
3117 * high watermark within all zones.
3118 */
nr_free_pagecache_pages(void)3119 unsigned long nr_free_pagecache_pages(void)
3120 {
3121 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
3122 }
3123
show_node(struct zone * zone)3124 static inline void show_node(struct zone *zone)
3125 {
3126 if (IS_ENABLED(CONFIG_NUMA))
3127 printk("Node %d ", zone_to_nid(zone));
3128 }
3129
si_meminfo(struct sysinfo * val)3130 void si_meminfo(struct sysinfo *val)
3131 {
3132 val->totalram = totalram_pages;
3133 val->sharedram = global_page_state(NR_SHMEM);
3134 val->freeram = global_page_state(NR_FREE_PAGES);
3135 val->bufferram = nr_blockdev_pages();
3136 val->totalhigh = totalhigh_pages;
3137 val->freehigh = nr_free_highpages();
3138 val->mem_unit = PAGE_SIZE;
3139 }
3140
3141 EXPORT_SYMBOL(si_meminfo);
3142
3143 #ifdef CONFIG_NUMA
si_meminfo_node(struct sysinfo * val,int nid)3144 void si_meminfo_node(struct sysinfo *val, int nid)
3145 {
3146 int zone_type; /* needs to be signed */
3147 unsigned long managed_pages = 0;
3148 pg_data_t *pgdat = NODE_DATA(nid);
3149
3150 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
3151 managed_pages += pgdat->node_zones[zone_type].managed_pages;
3152 val->totalram = managed_pages;
3153 val->sharedram = node_page_state(nid, NR_SHMEM);
3154 val->freeram = node_page_state(nid, NR_FREE_PAGES);
3155 #ifdef CONFIG_HIGHMEM
3156 val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].managed_pages;
3157 val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
3158 NR_FREE_PAGES);
3159 #else
3160 val->totalhigh = 0;
3161 val->freehigh = 0;
3162 #endif
3163 val->mem_unit = PAGE_SIZE;
3164 }
3165 #endif
3166
3167 /*
3168 * Determine whether the node should be displayed or not, depending on whether
3169 * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
3170 */
skip_free_areas_node(unsigned int flags,int nid)3171 bool skip_free_areas_node(unsigned int flags, int nid)
3172 {
3173 bool ret = false;
3174 unsigned int cpuset_mems_cookie;
3175
3176 if (!(flags & SHOW_MEM_FILTER_NODES))
3177 goto out;
3178
3179 do {
3180 cpuset_mems_cookie = read_mems_allowed_begin();
3181 ret = !node_isset(nid, cpuset_current_mems_allowed);
3182 } while (read_mems_allowed_retry(cpuset_mems_cookie));
3183 out:
3184 return ret;
3185 }
3186
3187 #define K(x) ((x) << (PAGE_SHIFT-10))
3188
show_migration_types(unsigned char type)3189 static void show_migration_types(unsigned char type)
3190 {
3191 static const char types[MIGRATE_TYPES] = {
3192 [MIGRATE_UNMOVABLE] = 'U',
3193 [MIGRATE_RECLAIMABLE] = 'E',
3194 [MIGRATE_MOVABLE] = 'M',
3195 [MIGRATE_RESERVE] = 'R',
3196 #ifdef CONFIG_CMA
3197 [MIGRATE_CMA] = 'C',
3198 #endif
3199 #ifdef CONFIG_MEMORY_ISOLATION
3200 [MIGRATE_ISOLATE] = 'I',
3201 #endif
3202 };
3203 char tmp[MIGRATE_TYPES + 1];
3204 char *p = tmp;
3205 int i;
3206
3207 for (i = 0; i < MIGRATE_TYPES; i++) {
3208 if (type & (1 << i))
3209 *p++ = types[i];
3210 }
3211
3212 *p = '\0';
3213 printk("(%s) ", tmp);
3214 }
3215
3216 /*
3217 * Show free area list (used inside shift_scroll-lock stuff)
3218 * We also calculate the percentage fragmentation. We do this by counting the
3219 * memory on each free list with the exception of the first item on the list.
3220 * Suppresses nodes that are not allowed by current's cpuset if
3221 * SHOW_MEM_FILTER_NODES is passed.
3222 */
show_free_areas(unsigned int filter)3223 void show_free_areas(unsigned int filter)
3224 {
3225 int cpu;
3226 struct zone *zone;
3227
3228 for_each_populated_zone(zone) {
3229 if (skip_free_areas_node(filter, zone_to_nid(zone)))
3230 continue;
3231 show_node(zone);
3232 printk("%s per-cpu:\n", zone->name);
3233
3234 for_each_online_cpu(cpu) {
3235 struct per_cpu_pageset *pageset;
3236
3237 pageset = per_cpu_ptr(zone->pageset, cpu);
3238
3239 printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
3240 cpu, pageset->pcp.high,
3241 pageset->pcp.batch, pageset->pcp.count);
3242 }
3243 }
3244
3245 printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
3246 " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
3247 " unevictable:%lu"
3248 " dirty:%lu writeback:%lu unstable:%lu\n"
3249 " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n"
3250 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
3251 " free_cma:%lu\n",
3252 global_page_state(NR_ACTIVE_ANON),
3253 global_page_state(NR_INACTIVE_ANON),
3254 global_page_state(NR_ISOLATED_ANON),
3255 global_page_state(NR_ACTIVE_FILE),
3256 global_page_state(NR_INACTIVE_FILE),
3257 global_page_state(NR_ISOLATED_FILE),
3258 global_page_state(NR_UNEVICTABLE),
3259 global_page_state(NR_FILE_DIRTY),
3260 global_page_state(NR_WRITEBACK),
3261 global_page_state(NR_UNSTABLE_NFS),
3262 global_page_state(NR_FREE_PAGES),
3263 global_page_state(NR_SLAB_RECLAIMABLE),
3264 global_page_state(NR_SLAB_UNRECLAIMABLE),
3265 global_page_state(NR_FILE_MAPPED),
3266 global_page_state(NR_SHMEM),
3267 global_page_state(NR_PAGETABLE),
3268 global_page_state(NR_BOUNCE),
3269 global_page_state(NR_FREE_CMA_PAGES));
3270
3271 for_each_populated_zone(zone) {
3272 int i;
3273
3274 if (skip_free_areas_node(filter, zone_to_nid(zone)))
3275 continue;
3276 show_node(zone);
3277 printk("%s"
3278 " free:%lukB"
3279 " min:%lukB"
3280 " low:%lukB"
3281 " high:%lukB"
3282 " active_anon:%lukB"
3283 " inactive_anon:%lukB"
3284 " active_file:%lukB"
3285 " inactive_file:%lukB"
3286 " unevictable:%lukB"
3287 " isolated(anon):%lukB"
3288 " isolated(file):%lukB"
3289 " present:%lukB"
3290 " managed:%lukB"
3291 " mlocked:%lukB"
3292 " dirty:%lukB"
3293 " writeback:%lukB"
3294 " mapped:%lukB"
3295 " shmem:%lukB"
3296 " slab_reclaimable:%lukB"
3297 " slab_unreclaimable:%lukB"
3298 " kernel_stack:%lukB"
3299 " pagetables:%lukB"
3300 " unstable:%lukB"
3301 " bounce:%lukB"
3302 " free_cma:%lukB"
3303 " writeback_tmp:%lukB"
3304 " pages_scanned:%lu"
3305 " all_unreclaimable? %s"
3306 "\n",
3307 zone->name,
3308 K(zone_page_state(zone, NR_FREE_PAGES)),
3309 K(min_wmark_pages(zone)),
3310 K(low_wmark_pages(zone)),
3311 K(high_wmark_pages(zone)),
3312 K(zone_page_state(zone, NR_ACTIVE_ANON)),
3313 K(zone_page_state(zone, NR_INACTIVE_ANON)),
3314 K(zone_page_state(zone, NR_ACTIVE_FILE)),
3315 K(zone_page_state(zone, NR_INACTIVE_FILE)),
3316 K(zone_page_state(zone, NR_UNEVICTABLE)),
3317 K(zone_page_state(zone, NR_ISOLATED_ANON)),
3318 K(zone_page_state(zone, NR_ISOLATED_FILE)),
3319 K(zone->present_pages),
3320 K(zone->managed_pages),
3321 K(zone_page_state(zone, NR_MLOCK)),
3322 K(zone_page_state(zone, NR_FILE_DIRTY)),
3323 K(zone_page_state(zone, NR_WRITEBACK)),
3324 K(zone_page_state(zone, NR_FILE_MAPPED)),
3325 K(zone_page_state(zone, NR_SHMEM)),
3326 K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
3327 K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
3328 zone_page_state(zone, NR_KERNEL_STACK) *
3329 THREAD_SIZE / 1024,
3330 K(zone_page_state(zone, NR_PAGETABLE)),
3331 K(zone_page_state(zone, NR_UNSTABLE_NFS)),
3332 K(zone_page_state(zone, NR_BOUNCE)),
3333 K(zone_page_state(zone, NR_FREE_CMA_PAGES)),
3334 K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
3335 K(zone_page_state(zone, NR_PAGES_SCANNED)),
3336 (!zone_reclaimable(zone) ? "yes" : "no")
3337 );
3338 printk("lowmem_reserve[]:");
3339 for (i = 0; i < MAX_NR_ZONES; i++)
3340 printk(" %ld", zone->lowmem_reserve[i]);
3341 printk("\n");
3342 }
3343
3344 for_each_populated_zone(zone) {
3345 unsigned int order;
3346 unsigned long nr[MAX_ORDER], flags, total = 0;
3347 unsigned char types[MAX_ORDER];
3348
3349 if (skip_free_areas_node(filter, zone_to_nid(zone)))
3350 continue;
3351 show_node(zone);
3352 printk("%s: ", zone->name);
3353
3354 spin_lock_irqsave(&zone->lock, flags);
3355 for (order = 0; order < MAX_ORDER; order++) {
3356 struct free_area *area = &zone->free_area[order];
3357 int type;
3358
3359 nr[order] = area->nr_free;
3360 total += nr[order] << order;
3361
3362 types[order] = 0;
3363 for (type = 0; type < MIGRATE_TYPES; type++) {
3364 if (!list_empty(&area->free_list[type]))
3365 types[order] |= 1 << type;
3366 }
3367 }
3368 spin_unlock_irqrestore(&zone->lock, flags);
3369 for (order = 0; order < MAX_ORDER; order++) {
3370 printk("%lu*%lukB ", nr[order], K(1UL) << order);
3371 if (nr[order])
3372 show_migration_types(types[order]);
3373 }
3374 printk("= %lukB\n", K(total));
3375 }
3376
3377 hugetlb_show_meminfo();
3378
3379 printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
3380
3381 show_swap_cache_info();
3382 }
3383
zoneref_set_zone(struct zone * zone,struct zoneref * zoneref)3384 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
3385 {
3386 zoneref->zone = zone;
3387 zoneref->zone_idx = zone_idx(zone);
3388 }
3389
3390 /*
3391 * Builds allocation fallback zone lists.
3392 *
3393 * Add all populated zones of a node to the zonelist.
3394 */
build_zonelists_node(pg_data_t * pgdat,struct zonelist * zonelist,int nr_zones)3395 static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
3396 int nr_zones)
3397 {
3398 struct zone *zone;
3399 enum zone_type zone_type = MAX_NR_ZONES;
3400
3401 do {
3402 zone_type--;
3403 zone = pgdat->node_zones + zone_type;
3404 if (populated_zone(zone)) {
3405 zoneref_set_zone(zone,
3406 &zonelist->_zonerefs[nr_zones++]);
3407 check_highest_zone(zone_type);
3408 }
3409 } while (zone_type);
3410
3411 return nr_zones;
3412 }
3413
3414
3415 /*
3416 * zonelist_order:
3417 * 0 = automatic detection of better ordering.
3418 * 1 = order by ([node] distance, -zonetype)
3419 * 2 = order by (-zonetype, [node] distance)
3420 *
3421 * If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
3422 * the same zonelist. So only NUMA can configure this param.
3423 */
3424 #define ZONELIST_ORDER_DEFAULT 0
3425 #define ZONELIST_ORDER_NODE 1
3426 #define ZONELIST_ORDER_ZONE 2
3427
3428 /* zonelist order in the kernel.
3429 * set_zonelist_order() will set this to NODE or ZONE.
3430 */
3431 static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
3432 static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
3433
3434
3435 #ifdef CONFIG_NUMA
3436 /* The value user specified ....changed by config */
3437 static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
3438 /* string for sysctl */
3439 #define NUMA_ZONELIST_ORDER_LEN 16
3440 char numa_zonelist_order[16] = "default";
3441
3442 /*
3443 * interface for configure zonelist ordering.
3444 * command line option "numa_zonelist_order"
3445 * = "[dD]efault - default, automatic configuration.
3446 * = "[nN]ode - order by node locality, then by zone within node
3447 * = "[zZ]one - order by zone, then by locality within zone
3448 */
3449
__parse_numa_zonelist_order(char * s)3450 static int __parse_numa_zonelist_order(char *s)
3451 {
3452 if (*s == 'd' || *s == 'D') {
3453 user_zonelist_order = ZONELIST_ORDER_DEFAULT;
3454 } else if (*s == 'n' || *s == 'N') {
3455 user_zonelist_order = ZONELIST_ORDER_NODE;
3456 } else if (*s == 'z' || *s == 'Z') {
3457 user_zonelist_order = ZONELIST_ORDER_ZONE;
3458 } else {
3459 printk(KERN_WARNING
3460 "Ignoring invalid numa_zonelist_order value: "
3461 "%s\n", s);
3462 return -EINVAL;
3463 }
3464 return 0;
3465 }
3466
setup_numa_zonelist_order(char * s)3467 static __init int setup_numa_zonelist_order(char *s)
3468 {
3469 int ret;
3470
3471 if (!s)
3472 return 0;
3473
3474 ret = __parse_numa_zonelist_order(s);
3475 if (ret == 0)
3476 strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN);
3477
3478 return ret;
3479 }
3480 early_param("numa_zonelist_order", setup_numa_zonelist_order);
3481
3482 /*
3483 * sysctl handler for numa_zonelist_order
3484 */
numa_zonelist_order_handler(struct ctl_table * table,int write,void __user * buffer,size_t * length,loff_t * ppos)3485 int numa_zonelist_order_handler(struct ctl_table *table, int write,
3486 void __user *buffer, size_t *length,
3487 loff_t *ppos)
3488 {
3489 char saved_string[NUMA_ZONELIST_ORDER_LEN];
3490 int ret;
3491 static DEFINE_MUTEX(zl_order_mutex);
3492
3493 mutex_lock(&zl_order_mutex);
3494 if (write) {
3495 if (strlen((char *)table->data) >= NUMA_ZONELIST_ORDER_LEN) {
3496 ret = -EINVAL;
3497 goto out;
3498 }
3499 strcpy(saved_string, (char *)table->data);
3500 }
3501 ret = proc_dostring(table, write, buffer, length, ppos);
3502 if (ret)
3503 goto out;
3504 if (write) {
3505 int oldval = user_zonelist_order;
3506
3507 ret = __parse_numa_zonelist_order((char *)table->data);
3508 if (ret) {
3509 /*
3510 * bogus value. restore saved string
3511 */
3512 strncpy((char *)table->data, saved_string,
3513 NUMA_ZONELIST_ORDER_LEN);
3514 user_zonelist_order = oldval;
3515 } else if (oldval != user_zonelist_order) {
3516 mutex_lock(&zonelists_mutex);
3517 build_all_zonelists(NULL, NULL);
3518 mutex_unlock(&zonelists_mutex);
3519 }
3520 }
3521 out:
3522 mutex_unlock(&zl_order_mutex);
3523 return ret;
3524 }
3525
3526
3527 #define MAX_NODE_LOAD (nr_online_nodes)
3528 static int node_load[MAX_NUMNODES];
3529
3530 /**
3531 * find_next_best_node - find the next node that should appear in a given node's fallback list
3532 * @node: node whose fallback list we're appending
3533 * @used_node_mask: nodemask_t of already used nodes
3534 *
3535 * We use a number of factors to determine which is the next node that should
3536 * appear on a given node's fallback list. The node should not have appeared
3537 * already in @node's fallback list, and it should be the next closest node
3538 * according to the distance array (which contains arbitrary distance values
3539 * from each node to each node in the system), and should also prefer nodes
3540 * with no CPUs, since presumably they'll have very little allocation pressure
3541 * on them otherwise.
3542 * It returns -1 if no node is found.
3543 */
find_next_best_node(int node,nodemask_t * used_node_mask)3544 static int find_next_best_node(int node, nodemask_t *used_node_mask)
3545 {
3546 int n, val;
3547 int min_val = INT_MAX;
3548 int best_node = NUMA_NO_NODE;
3549 const struct cpumask *tmp = cpumask_of_node(0);
3550
3551 /* Use the local node if we haven't already */
3552 if (!node_isset(node, *used_node_mask)) {
3553 node_set(node, *used_node_mask);
3554 return node;
3555 }
3556
3557 for_each_node_state(n, N_MEMORY) {
3558
3559 /* Don't want a node to appear more than once */
3560 if (node_isset(n, *used_node_mask))
3561 continue;
3562
3563 /* Use the distance array to find the distance */
3564 val = node_distance(node, n);
3565
3566 /* Penalize nodes under us ("prefer the next node") */
3567 val += (n < node);
3568
3569 /* Give preference to headless and unused nodes */
3570 tmp = cpumask_of_node(n);
3571 if (!cpumask_empty(tmp))
3572 val += PENALTY_FOR_NODE_WITH_CPUS;
3573
3574 /* Slight preference for less loaded node */
3575 val *= (MAX_NODE_LOAD*MAX_NUMNODES);
3576 val += node_load[n];
3577
3578 if (val < min_val) {
3579 min_val = val;
3580 best_node = n;
3581 }
3582 }
3583
3584 if (best_node >= 0)
3585 node_set(best_node, *used_node_mask);
3586
3587 return best_node;
3588 }
3589
3590
3591 /*
3592 * Build zonelists ordered by node and zones within node.
3593 * This results in maximum locality--normal zone overflows into local
3594 * DMA zone, if any--but risks exhausting DMA zone.
3595 */
build_zonelists_in_node_order(pg_data_t * pgdat,int node)3596 static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
3597 {
3598 int j;
3599 struct zonelist *zonelist;
3600
3601 zonelist = &pgdat->node_zonelists[0];
3602 for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
3603 ;
3604 j = build_zonelists_node(NODE_DATA(node), zonelist, j);
3605 zonelist->_zonerefs[j].zone = NULL;
3606 zonelist->_zonerefs[j].zone_idx = 0;
3607 }
3608
3609 /*
3610 * Build gfp_thisnode zonelists
3611 */
build_thisnode_zonelists(pg_data_t * pgdat)3612 static void build_thisnode_zonelists(pg_data_t *pgdat)
3613 {
3614 int j;
3615 struct zonelist *zonelist;
3616
3617 zonelist = &pgdat->node_zonelists[1];
3618 j = build_zonelists_node(pgdat, zonelist, 0);
3619 zonelist->_zonerefs[j].zone = NULL;
3620 zonelist->_zonerefs[j].zone_idx = 0;
3621 }
3622
3623 /*
3624 * Build zonelists ordered by zone and nodes within zones.
3625 * This results in conserving DMA zone[s] until all Normal memory is
3626 * exhausted, but results in overflowing to remote node while memory
3627 * may still exist in local DMA zone.
3628 */
3629 static int node_order[MAX_NUMNODES];
3630
build_zonelists_in_zone_order(pg_data_t * pgdat,int nr_nodes)3631 static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
3632 {
3633 int pos, j, node;
3634 int zone_type; /* needs to be signed */
3635 struct zone *z;
3636 struct zonelist *zonelist;
3637
3638 zonelist = &pgdat->node_zonelists[0];
3639 pos = 0;
3640 for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
3641 for (j = 0; j < nr_nodes; j++) {
3642 node = node_order[j];
3643 z = &NODE_DATA(node)->node_zones[zone_type];
3644 if (populated_zone(z)) {
3645 zoneref_set_zone(z,
3646 &zonelist->_zonerefs[pos++]);
3647 check_highest_zone(zone_type);
3648 }
3649 }
3650 }
3651 zonelist->_zonerefs[pos].zone = NULL;
3652 zonelist->_zonerefs[pos].zone_idx = 0;
3653 }
3654
3655 #if defined(CONFIG_64BIT)
3656 /*
3657 * Devices that require DMA32/DMA are relatively rare and do not justify a
3658 * penalty to every machine in case the specialised case applies. Default
3659 * to Node-ordering on 64-bit NUMA machines
3660 */
default_zonelist_order(void)3661 static int default_zonelist_order(void)
3662 {
3663 return ZONELIST_ORDER_NODE;
3664 }
3665 #else
3666 /*
3667 * On 32-bit, the Normal zone needs to be preserved for allocations accessible
3668 * by the kernel. If processes running on node 0 deplete the low memory zone
3669 * then reclaim will occur more frequency increasing stalls and potentially
3670 * be easier to OOM if a large percentage of the zone is under writeback or
3671 * dirty. The problem is significantly worse if CONFIG_HIGHPTE is not set.
3672 * Hence, default to zone ordering on 32-bit.
3673 */
default_zonelist_order(void)3674 static int default_zonelist_order(void)
3675 {
3676 return ZONELIST_ORDER_ZONE;
3677 }
3678 #endif /* CONFIG_64BIT */
3679
set_zonelist_order(void)3680 static void set_zonelist_order(void)
3681 {
3682 if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
3683 current_zonelist_order = default_zonelist_order();
3684 else
3685 current_zonelist_order = user_zonelist_order;
3686 }
3687
build_zonelists(pg_data_t * pgdat)3688 static void build_zonelists(pg_data_t *pgdat)
3689 {
3690 int j, node, load;
3691 enum zone_type i;
3692 nodemask_t used_mask;
3693 int local_node, prev_node;
3694 struct zonelist *zonelist;
3695 unsigned int order = current_zonelist_order;
3696
3697 /* initialize zonelists */
3698 for (i = 0; i < MAX_ZONELISTS; i++) {
3699 zonelist = pgdat->node_zonelists + i;
3700 zonelist->_zonerefs[0].zone = NULL;
3701 zonelist->_zonerefs[0].zone_idx = 0;
3702 }
3703
3704 /* NUMA-aware ordering of nodes */
3705 local_node = pgdat->node_id;
3706 load = nr_online_nodes;
3707 prev_node = local_node;
3708 nodes_clear(used_mask);
3709
3710 memset(node_order, 0, sizeof(node_order));
3711 j = 0;
3712
3713 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
3714 /*
3715 * We don't want to pressure a particular node.
3716 * So adding penalty to the first node in same
3717 * distance group to make it round-robin.
3718 */
3719 if (node_distance(local_node, node) !=
3720 node_distance(local_node, prev_node))
3721 node_load[node] = load;
3722
3723 prev_node = node;
3724 load--;
3725 if (order == ZONELIST_ORDER_NODE)
3726 build_zonelists_in_node_order(pgdat, node);
3727 else
3728 node_order[j++] = node; /* remember order */
3729 }
3730
3731 if (order == ZONELIST_ORDER_ZONE) {
3732 /* calculate node order -- i.e., DMA last! */
3733 build_zonelists_in_zone_order(pgdat, j);
3734 }
3735
3736 build_thisnode_zonelists(pgdat);
3737 }
3738
3739 /* Construct the zonelist performance cache - see further mmzone.h */
build_zonelist_cache(pg_data_t * pgdat)3740 static void build_zonelist_cache(pg_data_t *pgdat)
3741 {
3742 struct zonelist *zonelist;
3743 struct zonelist_cache *zlc;
3744 struct zoneref *z;
3745
3746 zonelist = &pgdat->node_zonelists[0];
3747 zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
3748 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
3749 for (z = zonelist->_zonerefs; z->zone; z++)
3750 zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z);
3751 }
3752
3753 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
3754 /*
3755 * Return node id of node used for "local" allocations.
3756 * I.e., first node id of first zone in arg node's generic zonelist.
3757 * Used for initializing percpu 'numa_mem', which is used primarily
3758 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
3759 */
local_memory_node(int node)3760 int local_memory_node(int node)
3761 {
3762 struct zone *zone;
3763
3764 (void)first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
3765 gfp_zone(GFP_KERNEL),
3766 NULL,
3767 &zone);
3768 return zone->node;
3769 }
3770 #endif
3771
3772 #else /* CONFIG_NUMA */
3773
set_zonelist_order(void)3774 static void set_zonelist_order(void)
3775 {
3776 current_zonelist_order = ZONELIST_ORDER_ZONE;
3777 }
3778
build_zonelists(pg_data_t * pgdat)3779 static void build_zonelists(pg_data_t *pgdat)
3780 {
3781 int node, local_node;
3782 enum zone_type j;
3783 struct zonelist *zonelist;
3784
3785 local_node = pgdat->node_id;
3786
3787 zonelist = &pgdat->node_zonelists[0];
3788 j = build_zonelists_node(pgdat, zonelist, 0);
3789
3790 /*
3791 * Now we build the zonelist so that it contains the zones
3792 * of all the other nodes.
3793 * We don't want to pressure a particular node, so when
3794 * building the zones for node N, we make sure that the
3795 * zones coming right after the local ones are those from
3796 * node N+1 (modulo N)
3797 */
3798 for (node = local_node + 1; node < MAX_NUMNODES; node++) {
3799 if (!node_online(node))
3800 continue;
3801 j = build_zonelists_node(NODE_DATA(node), zonelist, j);
3802 }
3803 for (node = 0; node < local_node; node++) {
3804 if (!node_online(node))
3805 continue;
3806 j = build_zonelists_node(NODE_DATA(node), zonelist, j);
3807 }
3808
3809 zonelist->_zonerefs[j].zone = NULL;
3810 zonelist->_zonerefs[j].zone_idx = 0;
3811 }
3812
3813 /* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */
build_zonelist_cache(pg_data_t * pgdat)3814 static void build_zonelist_cache(pg_data_t *pgdat)
3815 {
3816 pgdat->node_zonelists[0].zlcache_ptr = NULL;
3817 }
3818
3819 #endif /* CONFIG_NUMA */
3820
3821 /*
3822 * Boot pageset table. One per cpu which is going to be used for all
3823 * zones and all nodes. The parameters will be set in such a way
3824 * that an item put on a list will immediately be handed over to
3825 * the buddy list. This is safe since pageset manipulation is done
3826 * with interrupts disabled.
3827 *
3828 * The boot_pagesets must be kept even after bootup is complete for
3829 * unused processors and/or zones. They do play a role for bootstrapping
3830 * hotplugged processors.
3831 *
3832 * zoneinfo_show() and maybe other functions do
3833 * not check if the processor is online before following the pageset pointer.
3834 * Other parts of the kernel may not check if the zone is available.
3835 */
3836 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
3837 static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
3838 static void setup_zone_pageset(struct zone *zone);
3839
3840 /*
3841 * Global mutex to protect against size modification of zonelists
3842 * as well as to serialize pageset setup for the new populated zone.
3843 */
3844 DEFINE_MUTEX(zonelists_mutex);
3845
3846 /* return values int ....just for stop_machine() */
__build_all_zonelists(void * data)3847 static int __build_all_zonelists(void *data)
3848 {
3849 int nid;
3850 int cpu;
3851 pg_data_t *self = data;
3852
3853 #ifdef CONFIG_NUMA
3854 memset(node_load, 0, sizeof(node_load));
3855 #endif
3856
3857 if (self && !node_online(self->node_id)) {
3858 build_zonelists(self);
3859 build_zonelist_cache(self);
3860 }
3861
3862 for_each_online_node(nid) {
3863 pg_data_t *pgdat = NODE_DATA(nid);
3864
3865 build_zonelists(pgdat);
3866 build_zonelist_cache(pgdat);
3867 }
3868
3869 /*
3870 * Initialize the boot_pagesets that are going to be used
3871 * for bootstrapping processors. The real pagesets for
3872 * each zone will be allocated later when the per cpu
3873 * allocator is available.
3874 *
3875 * boot_pagesets are used also for bootstrapping offline
3876 * cpus if the system is already booted because the pagesets
3877 * are needed to initialize allocators on a specific cpu too.
3878 * F.e. the percpu allocator needs the page allocator which
3879 * needs the percpu allocator in order to allocate its pagesets
3880 * (a chicken-egg dilemma).
3881 */
3882 for_each_possible_cpu(cpu) {
3883 setup_pageset(&per_cpu(boot_pageset, cpu), 0);
3884
3885 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
3886 /*
3887 * We now know the "local memory node" for each node--
3888 * i.e., the node of the first zone in the generic zonelist.
3889 * Set up numa_mem percpu variable for on-line cpus. During
3890 * boot, only the boot cpu should be on-line; we'll init the
3891 * secondary cpus' numa_mem as they come on-line. During
3892 * node/memory hotplug, we'll fixup all on-line cpus.
3893 */
3894 if (cpu_online(cpu))
3895 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
3896 #endif
3897 }
3898
3899 return 0;
3900 }
3901
3902 /*
3903 * Called with zonelists_mutex held always
3904 * unless system_state == SYSTEM_BOOTING.
3905 */
build_all_zonelists(pg_data_t * pgdat,struct zone * zone)3906 void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone)
3907 {
3908 set_zonelist_order();
3909
3910 if (system_state == SYSTEM_BOOTING) {
3911 __build_all_zonelists(NULL);
3912 mminit_verify_zonelist();
3913 cpuset_init_current_mems_allowed();
3914 } else {
3915 #ifdef CONFIG_MEMORY_HOTPLUG
3916 if (zone)
3917 setup_zone_pageset(zone);
3918 #endif
3919 /* we have to stop all cpus to guarantee there is no user
3920 of zonelist */
3921 stop_machine(__build_all_zonelists, pgdat, NULL);
3922 /* cpuset refresh routine should be here */
3923 }
3924 vm_total_pages = nr_free_pagecache_pages();
3925 /*
3926 * Disable grouping by mobility if the number of pages in the
3927 * system is too low to allow the mechanism to work. It would be
3928 * more accurate, but expensive to check per-zone. This check is
3929 * made on memory-hotadd so a system can start with mobility
3930 * disabled and enable it later
3931 */
3932 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
3933 page_group_by_mobility_disabled = 1;
3934 else
3935 page_group_by_mobility_disabled = 0;
3936
3937 printk("Built %i zonelists in %s order, mobility grouping %s. "
3938 "Total pages: %ld\n",
3939 nr_online_nodes,
3940 zonelist_order_name[current_zonelist_order],
3941 page_group_by_mobility_disabled ? "off" : "on",
3942 vm_total_pages);
3943 #ifdef CONFIG_NUMA
3944 printk("Policy zone: %s\n", zone_names[policy_zone]);
3945 #endif
3946 }
3947
3948 /*
3949 * Helper functions to size the waitqueue hash table.
3950 * Essentially these want to choose hash table sizes sufficiently
3951 * large so that collisions trying to wait on pages are rare.
3952 * But in fact, the number of active page waitqueues on typical
3953 * systems is ridiculously low, less than 200. So this is even
3954 * conservative, even though it seems large.
3955 *
3956 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
3957 * waitqueues, i.e. the size of the waitq table given the number of pages.
3958 */
3959 #define PAGES_PER_WAITQUEUE 256
3960
3961 #ifndef CONFIG_MEMORY_HOTPLUG
wait_table_hash_nr_entries(unsigned long pages)3962 static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
3963 {
3964 unsigned long size = 1;
3965
3966 pages /= PAGES_PER_WAITQUEUE;
3967
3968 while (size < pages)
3969 size <<= 1;
3970
3971 /*
3972 * Once we have dozens or even hundreds of threads sleeping
3973 * on IO we've got bigger problems than wait queue collision.
3974 * Limit the size of the wait table to a reasonable size.
3975 */
3976 size = min(size, 4096UL);
3977
3978 return max(size, 4UL);
3979 }
3980 #else
3981 /*
3982 * A zone's size might be changed by hot-add, so it is not possible to determine
3983 * a suitable size for its wait_table. So we use the maximum size now.
3984 *
3985 * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie:
3986 *
3987 * i386 (preemption config) : 4096 x 16 = 64Kbyte.
3988 * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
3989 * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte.
3990 *
3991 * The maximum entries are prepared when a zone's memory is (512K + 256) pages
3992 * or more by the traditional way. (See above). It equals:
3993 *
3994 * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte.
3995 * ia64(16K page size) : = ( 8G + 4M)byte.
3996 * powerpc (64K page size) : = (32G +16M)byte.
3997 */
wait_table_hash_nr_entries(unsigned long pages)3998 static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
3999 {
4000 return 4096UL;
4001 }
4002 #endif
4003
4004 /*
4005 * This is an integer logarithm so that shifts can be used later
4006 * to extract the more random high bits from the multiplicative
4007 * hash function before the remainder is taken.
4008 */
wait_table_bits(unsigned long size)4009 static inline unsigned long wait_table_bits(unsigned long size)
4010 {
4011 return ffz(~size);
4012 }
4013
4014 /*
4015 * Check if a pageblock contains reserved pages
4016 */
pageblock_is_reserved(unsigned long start_pfn,unsigned long end_pfn)4017 static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
4018 {
4019 unsigned long pfn;
4020
4021 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
4022 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
4023 return 1;
4024 }
4025 return 0;
4026 }
4027
4028 /*
4029 * Mark a number of pageblocks as MIGRATE_RESERVE. The number
4030 * of blocks reserved is based on min_wmark_pages(zone). The memory within
4031 * the reserve will tend to store contiguous free pages. Setting min_free_kbytes
4032 * higher will lead to a bigger reserve which will get freed as contiguous
4033 * blocks as reclaim kicks in
4034 */
setup_zone_migrate_reserve(struct zone * zone)4035 static void setup_zone_migrate_reserve(struct zone *zone)
4036 {
4037 unsigned long start_pfn, pfn, end_pfn, block_end_pfn;
4038 struct page *page;
4039 unsigned long block_migratetype;
4040 int reserve;
4041 int old_reserve;
4042
4043 /*
4044 * Get the start pfn, end pfn and the number of blocks to reserve
4045 * We have to be careful to be aligned to pageblock_nr_pages to
4046 * make sure that we always check pfn_valid for the first page in
4047 * the block.
4048 */
4049 start_pfn = zone->zone_start_pfn;
4050 end_pfn = zone_end_pfn(zone);
4051 start_pfn = roundup(start_pfn, pageblock_nr_pages);
4052 reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
4053 pageblock_order;
4054
4055 /*
4056 * Reserve blocks are generally in place to help high-order atomic
4057 * allocations that are short-lived. A min_free_kbytes value that
4058 * would result in more than 2 reserve blocks for atomic allocations
4059 * is assumed to be in place to help anti-fragmentation for the
4060 * future allocation of hugepages at runtime.
4061 */
4062 reserve = min(2, reserve);
4063 old_reserve = zone->nr_migrate_reserve_block;
4064
4065 /* When memory hot-add, we almost always need to do nothing */
4066 if (reserve == old_reserve)
4067 return;
4068 zone->nr_migrate_reserve_block = reserve;
4069
4070 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
4071 if (!pfn_valid(pfn))
4072 continue;
4073 page = pfn_to_page(pfn);
4074
4075 /* Watch out for overlapping nodes */
4076 if (page_to_nid(page) != zone_to_nid(zone))
4077 continue;
4078
4079 block_migratetype = get_pageblock_migratetype(page);
4080
4081 /* Only test what is necessary when the reserves are not met */
4082 if (reserve > 0) {
4083 /*
4084 * Blocks with reserved pages will never free, skip
4085 * them.
4086 */
4087 block_end_pfn = min(pfn + pageblock_nr_pages, end_pfn);
4088 if (pageblock_is_reserved(pfn, block_end_pfn))
4089 continue;
4090
4091 /* If this block is reserved, account for it */
4092 if (block_migratetype == MIGRATE_RESERVE) {
4093 reserve--;
4094 continue;
4095 }
4096
4097 /* Suitable for reserving if this block is movable */
4098 if (block_migratetype == MIGRATE_MOVABLE) {
4099 set_pageblock_migratetype(page,
4100 MIGRATE_RESERVE);
4101 move_freepages_block(zone, page,
4102 MIGRATE_RESERVE);
4103 reserve--;
4104 continue;
4105 }
4106 } else if (!old_reserve) {
4107 /*
4108 * At boot time we don't need to scan the whole zone
4109 * for turning off MIGRATE_RESERVE.
4110 */
4111 break;
4112 }
4113
4114 /*
4115 * If the reserve is met and this is a previous reserved block,
4116 * take it back
4117 */
4118 if (block_migratetype == MIGRATE_RESERVE) {
4119 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
4120 move_freepages_block(zone, page, MIGRATE_MOVABLE);
4121 }
4122 }
4123 }
4124
4125 /*
4126 * Initially all pages are reserved - free ones are freed
4127 * up by free_all_bootmem() once the early boot process is
4128 * done. Non-atomic initialization, single-pass.
4129 */
memmap_init_zone(unsigned long size,int nid,unsigned long zone,unsigned long start_pfn,enum memmap_context context)4130 void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
4131 unsigned long start_pfn, enum memmap_context context)
4132 {
4133 struct page *page;
4134 unsigned long end_pfn = start_pfn + size;
4135 unsigned long pfn;
4136 struct zone *z;
4137
4138 if (highest_memmap_pfn < end_pfn - 1)
4139 highest_memmap_pfn = end_pfn - 1;
4140
4141 z = &NODE_DATA(nid)->node_zones[zone];
4142 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
4143 /*
4144 * There can be holes in boot-time mem_map[]s
4145 * handed to this function. They do not
4146 * exist on hotplugged memory.
4147 */
4148 if (context == MEMMAP_EARLY) {
4149 if (!early_pfn_valid(pfn))
4150 continue;
4151 if (!early_pfn_in_nid(pfn, nid))
4152 continue;
4153 }
4154 page = pfn_to_page(pfn);
4155 set_page_links(page, zone, nid, pfn);
4156 mminit_verify_page_links(page, zone, nid, pfn);
4157 init_page_count(page);
4158 page_mapcount_reset(page);
4159 page_cpupid_reset_last(page);
4160 SetPageReserved(page);
4161 /*
4162 * Mark the block movable so that blocks are reserved for
4163 * movable at startup. This will force kernel allocations
4164 * to reserve their blocks rather than leaking throughout
4165 * the address space during boot when many long-lived
4166 * kernel allocations are made. Later some blocks near
4167 * the start are marked MIGRATE_RESERVE by
4168 * setup_zone_migrate_reserve()
4169 *
4170 * bitmap is created for zone's valid pfn range. but memmap
4171 * can be created for invalid pages (for alignment)
4172 * check here not to call set_pageblock_migratetype() against
4173 * pfn out of zone.
4174 */
4175 if ((z->zone_start_pfn <= pfn)
4176 && (pfn < zone_end_pfn(z))
4177 && !(pfn & (pageblock_nr_pages - 1)))
4178 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
4179
4180 INIT_LIST_HEAD(&page->lru);
4181 #ifdef WANT_PAGE_VIRTUAL
4182 /* The shift won't overflow because ZONE_NORMAL is below 4G. */
4183 if (!is_highmem_idx(zone))
4184 set_page_address(page, __va(pfn << PAGE_SHIFT));
4185 #endif
4186 }
4187 }
4188
zone_init_free_lists(struct zone * zone)4189 static void __meminit zone_init_free_lists(struct zone *zone)
4190 {
4191 unsigned int order, t;
4192 for_each_migratetype_order(order, t) {
4193 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
4194 zone->free_area[order].nr_free = 0;
4195 }
4196 }
4197
4198 #ifndef __HAVE_ARCH_MEMMAP_INIT
4199 #define memmap_init(size, nid, zone, start_pfn) \
4200 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
4201 #endif
4202
zone_batchsize(struct zone * zone)4203 static int zone_batchsize(struct zone *zone)
4204 {
4205 #ifdef CONFIG_MMU
4206 int batch;
4207
4208 /*
4209 * The per-cpu-pages pools are set to around 1000th of the
4210 * size of the zone. But no more than 1/2 of a meg.
4211 *
4212 * OK, so we don't know how big the cache is. So guess.
4213 */
4214 batch = zone->managed_pages / 1024;
4215 if (batch * PAGE_SIZE > 512 * 1024)
4216 batch = (512 * 1024) / PAGE_SIZE;
4217 batch /= 4; /* We effectively *= 4 below */
4218 if (batch < 1)
4219 batch = 1;
4220
4221 /*
4222 * Clamp the batch to a 2^n - 1 value. Having a power
4223 * of 2 value was found to be more likely to have
4224 * suboptimal cache aliasing properties in some cases.
4225 *
4226 * For example if 2 tasks are alternately allocating
4227 * batches of pages, one task can end up with a lot
4228 * of pages of one half of the possible page colors
4229 * and the other with pages of the other colors.
4230 */
4231 batch = rounddown_pow_of_two(batch + batch/2) - 1;
4232
4233 return batch;
4234
4235 #else
4236 /* The deferral and batching of frees should be suppressed under NOMMU
4237 * conditions.
4238 *
4239 * The problem is that NOMMU needs to be able to allocate large chunks
4240 * of contiguous memory as there's no hardware page translation to
4241 * assemble apparent contiguous memory from discontiguous pages.
4242 *
4243 * Queueing large contiguous runs of pages for batching, however,
4244 * causes the pages to actually be freed in smaller chunks. As there
4245 * can be a significant delay between the individual batches being
4246 * recycled, this leads to the once large chunks of space being
4247 * fragmented and becoming unavailable for high-order allocations.
4248 */
4249 return 0;
4250 #endif
4251 }
4252
4253 /*
4254 * pcp->high and pcp->batch values are related and dependent on one another:
4255 * ->batch must never be higher then ->high.
4256 * The following function updates them in a safe manner without read side
4257 * locking.
4258 *
4259 * Any new users of pcp->batch and pcp->high should ensure they can cope with
4260 * those fields changing asynchronously (acording the the above rule).
4261 *
4262 * mutex_is_locked(&pcp_batch_high_lock) required when calling this function
4263 * outside of boot time (or some other assurance that no concurrent updaters
4264 * exist).
4265 */
pageset_update(struct per_cpu_pages * pcp,unsigned long high,unsigned long batch)4266 static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
4267 unsigned long batch)
4268 {
4269 /* start with a fail safe value for batch */
4270 pcp->batch = 1;
4271 smp_wmb();
4272
4273 /* Update high, then batch, in order */
4274 pcp->high = high;
4275 smp_wmb();
4276
4277 pcp->batch = batch;
4278 }
4279
4280 /* a companion to pageset_set_high() */
pageset_set_batch(struct per_cpu_pageset * p,unsigned long batch)4281 static void pageset_set_batch(struct per_cpu_pageset *p, unsigned long batch)
4282 {
4283 pageset_update(&p->pcp, 6 * batch, max(1UL, 1 * batch));
4284 }
4285
pageset_init(struct per_cpu_pageset * p)4286 static void pageset_init(struct per_cpu_pageset *p)
4287 {
4288 struct per_cpu_pages *pcp;
4289 int migratetype;
4290
4291 memset(p, 0, sizeof(*p));
4292
4293 pcp = &p->pcp;
4294 pcp->count = 0;
4295 for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
4296 INIT_LIST_HEAD(&pcp->lists[migratetype]);
4297 }
4298
setup_pageset(struct per_cpu_pageset * p,unsigned long batch)4299 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
4300 {
4301 pageset_init(p);
4302 pageset_set_batch(p, batch);
4303 }
4304
4305 /*
4306 * pageset_set_high() sets the high water mark for hot per_cpu_pagelist
4307 * to the value high for the pageset p.
4308 */
pageset_set_high(struct per_cpu_pageset * p,unsigned long high)4309 static void pageset_set_high(struct per_cpu_pageset *p,
4310 unsigned long high)
4311 {
4312 unsigned long batch = max(1UL, high / 4);
4313 if ((high / 4) > (PAGE_SHIFT * 8))
4314 batch = PAGE_SHIFT * 8;
4315
4316 pageset_update(&p->pcp, high, batch);
4317 }
4318
pageset_set_high_and_batch(struct zone * zone,struct per_cpu_pageset * pcp)4319 static void pageset_set_high_and_batch(struct zone *zone,
4320 struct per_cpu_pageset *pcp)
4321 {
4322 if (percpu_pagelist_fraction)
4323 pageset_set_high(pcp,
4324 (zone->managed_pages /
4325 percpu_pagelist_fraction));
4326 else
4327 pageset_set_batch(pcp, zone_batchsize(zone));
4328 }
4329
zone_pageset_init(struct zone * zone,int cpu)4330 static void __meminit zone_pageset_init(struct zone *zone, int cpu)
4331 {
4332 struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
4333
4334 pageset_init(pcp);
4335 pageset_set_high_and_batch(zone, pcp);
4336 }
4337
setup_zone_pageset(struct zone * zone)4338 static void __meminit setup_zone_pageset(struct zone *zone)
4339 {
4340 int cpu;
4341 zone->pageset = alloc_percpu(struct per_cpu_pageset);
4342 for_each_possible_cpu(cpu)
4343 zone_pageset_init(zone, cpu);
4344 }
4345
4346 /*
4347 * Allocate per cpu pagesets and initialize them.
4348 * Before this call only boot pagesets were available.
4349 */
setup_per_cpu_pageset(void)4350 void __init setup_per_cpu_pageset(void)
4351 {
4352 struct zone *zone;
4353
4354 for_each_populated_zone(zone)
4355 setup_zone_pageset(zone);
4356 }
4357
4358 static noinline __init_refok
zone_wait_table_init(struct zone * zone,unsigned long zone_size_pages)4359 int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
4360 {
4361 int i;
4362 size_t alloc_size;
4363
4364 /*
4365 * The per-page waitqueue mechanism uses hashed waitqueues
4366 * per zone.
4367 */
4368 zone->wait_table_hash_nr_entries =
4369 wait_table_hash_nr_entries(zone_size_pages);
4370 zone->wait_table_bits =
4371 wait_table_bits(zone->wait_table_hash_nr_entries);
4372 alloc_size = zone->wait_table_hash_nr_entries
4373 * sizeof(wait_queue_head_t);
4374
4375 if (!slab_is_available()) {
4376 zone->wait_table = (wait_queue_head_t *)
4377 memblock_virt_alloc_node_nopanic(
4378 alloc_size, zone->zone_pgdat->node_id);
4379 } else {
4380 /*
4381 * This case means that a zone whose size was 0 gets new memory
4382 * via memory hot-add.
4383 * But it may be the case that a new node was hot-added. In
4384 * this case vmalloc() will not be able to use this new node's
4385 * memory - this wait_table must be initialized to use this new
4386 * node itself as well.
4387 * To use this new node's memory, further consideration will be
4388 * necessary.
4389 */
4390 zone->wait_table = vmalloc(alloc_size);
4391 }
4392 if (!zone->wait_table)
4393 return -ENOMEM;
4394
4395 for (i = 0; i < zone->wait_table_hash_nr_entries; ++i)
4396 init_waitqueue_head(zone->wait_table + i);
4397
4398 return 0;
4399 }
4400
zone_pcp_init(struct zone * zone)4401 static __meminit void zone_pcp_init(struct zone *zone)
4402 {
4403 /*
4404 * per cpu subsystem is not up at this point. The following code
4405 * relies on the ability of the linker to provide the
4406 * offset of a (static) per cpu variable into the per cpu area.
4407 */
4408 zone->pageset = &boot_pageset;
4409
4410 if (populated_zone(zone))
4411 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n",
4412 zone->name, zone->present_pages,
4413 zone_batchsize(zone));
4414 }
4415
init_currently_empty_zone(struct zone * zone,unsigned long zone_start_pfn,unsigned long size,enum memmap_context context)4416 int __meminit init_currently_empty_zone(struct zone *zone,
4417 unsigned long zone_start_pfn,
4418 unsigned long size,
4419 enum memmap_context context)
4420 {
4421 struct pglist_data *pgdat = zone->zone_pgdat;
4422 int ret;
4423 ret = zone_wait_table_init(zone, size);
4424 if (ret)
4425 return ret;
4426 pgdat->nr_zones = zone_idx(zone) + 1;
4427
4428 zone->zone_start_pfn = zone_start_pfn;
4429
4430 mminit_dprintk(MMINIT_TRACE, "memmap_init",
4431 "Initialising map node %d zone %lu pfns %lu -> %lu\n",
4432 pgdat->node_id,
4433 (unsigned long)zone_idx(zone),
4434 zone_start_pfn, (zone_start_pfn + size));
4435
4436 zone_init_free_lists(zone);
4437
4438 return 0;
4439 }
4440
4441 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
4442 #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
4443 /*
4444 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
4445 */
__early_pfn_to_nid(unsigned long pfn)4446 int __meminit __early_pfn_to_nid(unsigned long pfn)
4447 {
4448 unsigned long start_pfn, end_pfn;
4449 int nid;
4450 /*
4451 * NOTE: The following SMP-unsafe globals are only used early in boot
4452 * when the kernel is running single-threaded.
4453 */
4454 static unsigned long __meminitdata last_start_pfn, last_end_pfn;
4455 static int __meminitdata last_nid;
4456
4457 if (last_start_pfn <= pfn && pfn < last_end_pfn)
4458 return last_nid;
4459
4460 nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
4461 if (nid != -1) {
4462 last_start_pfn = start_pfn;
4463 last_end_pfn = end_pfn;
4464 last_nid = nid;
4465 }
4466
4467 return nid;
4468 }
4469 #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
4470
early_pfn_to_nid(unsigned long pfn)4471 int __meminit early_pfn_to_nid(unsigned long pfn)
4472 {
4473 int nid;
4474
4475 nid = __early_pfn_to_nid(pfn);
4476 if (nid >= 0)
4477 return nid;
4478 /* just returns 0 */
4479 return 0;
4480 }
4481
4482 #ifdef CONFIG_NODES_SPAN_OTHER_NODES
early_pfn_in_nid(unsigned long pfn,int node)4483 bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
4484 {
4485 int nid;
4486
4487 nid = __early_pfn_to_nid(pfn);
4488 if (nid >= 0 && nid != node)
4489 return false;
4490 return true;
4491 }
4492 #endif
4493
4494 /**
4495 * free_bootmem_with_active_regions - Call memblock_free_early_nid for each active range
4496 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
4497 * @max_low_pfn: The highest PFN that will be passed to memblock_free_early_nid
4498 *
4499 * If an architecture guarantees that all ranges registered contain no holes
4500 * and may be freed, this this function may be used instead of calling
4501 * memblock_free_early_nid() manually.
4502 */
free_bootmem_with_active_regions(int nid,unsigned long max_low_pfn)4503 void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
4504 {
4505 unsigned long start_pfn, end_pfn;
4506 int i, this_nid;
4507
4508 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) {
4509 start_pfn = min(start_pfn, max_low_pfn);
4510 end_pfn = min(end_pfn, max_low_pfn);
4511
4512 if (start_pfn < end_pfn)
4513 memblock_free_early_nid(PFN_PHYS(start_pfn),
4514 (end_pfn - start_pfn) << PAGE_SHIFT,
4515 this_nid);
4516 }
4517 }
4518
4519 /**
4520 * sparse_memory_present_with_active_regions - Call memory_present for each active range
4521 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
4522 *
4523 * If an architecture guarantees that all ranges registered contain no holes and may
4524 * be freed, this function may be used instead of calling memory_present() manually.
4525 */
sparse_memory_present_with_active_regions(int nid)4526 void __init sparse_memory_present_with_active_regions(int nid)
4527 {
4528 unsigned long start_pfn, end_pfn;
4529 int i, this_nid;
4530
4531 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid)
4532 memory_present(this_nid, start_pfn, end_pfn);
4533 }
4534
4535 /**
4536 * get_pfn_range_for_nid - Return the start and end page frames for a node
4537 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
4538 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
4539 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
4540 *
4541 * It returns the start and end page frame of a node based on information
4542 * provided by memblock_set_node(). If called for a node
4543 * with no available memory, a warning is printed and the start and end
4544 * PFNs will be 0.
4545 */
get_pfn_range_for_nid(unsigned int nid,unsigned long * start_pfn,unsigned long * end_pfn)4546 void __meminit get_pfn_range_for_nid(unsigned int nid,
4547 unsigned long *start_pfn, unsigned long *end_pfn)
4548 {
4549 unsigned long this_start_pfn, this_end_pfn;
4550 int i;
4551
4552 *start_pfn = -1UL;
4553 *end_pfn = 0;
4554
4555 for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
4556 *start_pfn = min(*start_pfn, this_start_pfn);
4557 *end_pfn = max(*end_pfn, this_end_pfn);
4558 }
4559
4560 if (*start_pfn == -1UL)
4561 *start_pfn = 0;
4562 }
4563
4564 /*
4565 * This finds a zone that can be used for ZONE_MOVABLE pages. The
4566 * assumption is made that zones within a node are ordered in monotonic
4567 * increasing memory addresses so that the "highest" populated zone is used
4568 */
find_usable_zone_for_movable(void)4569 static void __init find_usable_zone_for_movable(void)
4570 {
4571 int zone_index;
4572 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
4573 if (zone_index == ZONE_MOVABLE)
4574 continue;
4575
4576 if (arch_zone_highest_possible_pfn[zone_index] >
4577 arch_zone_lowest_possible_pfn[zone_index])
4578 break;
4579 }
4580
4581 VM_BUG_ON(zone_index == -1);
4582 movable_zone = zone_index;
4583 }
4584
4585 /*
4586 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
4587 * because it is sized independent of architecture. Unlike the other zones,
4588 * the starting point for ZONE_MOVABLE is not fixed. It may be different
4589 * in each node depending on the size of each node and how evenly kernelcore
4590 * is distributed. This helper function adjusts the zone ranges
4591 * provided by the architecture for a given node by using the end of the
4592 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
4593 * zones within a node are in order of monotonic increases memory addresses
4594 */
adjust_zone_range_for_zone_movable(int nid,unsigned long zone_type,unsigned long node_start_pfn,unsigned long node_end_pfn,unsigned long * zone_start_pfn,unsigned long * zone_end_pfn)4595 static void __meminit adjust_zone_range_for_zone_movable(int nid,
4596 unsigned long zone_type,
4597 unsigned long node_start_pfn,
4598 unsigned long node_end_pfn,
4599 unsigned long *zone_start_pfn,
4600 unsigned long *zone_end_pfn)
4601 {
4602 /* Only adjust if ZONE_MOVABLE is on this node */
4603 if (zone_movable_pfn[nid]) {
4604 /* Size ZONE_MOVABLE */
4605 if (zone_type == ZONE_MOVABLE) {
4606 *zone_start_pfn = zone_movable_pfn[nid];
4607 *zone_end_pfn = min(node_end_pfn,
4608 arch_zone_highest_possible_pfn[movable_zone]);
4609
4610 /* Adjust for ZONE_MOVABLE starting within this range */
4611 } else if (*zone_start_pfn < zone_movable_pfn[nid] &&
4612 *zone_end_pfn > zone_movable_pfn[nid]) {
4613 *zone_end_pfn = zone_movable_pfn[nid];
4614
4615 /* Check if this whole range is within ZONE_MOVABLE */
4616 } else if (*zone_start_pfn >= zone_movable_pfn[nid])
4617 *zone_start_pfn = *zone_end_pfn;
4618 }
4619 }
4620
4621 /*
4622 * Return the number of pages a zone spans in a node, including holes
4623 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
4624 */
zone_spanned_pages_in_node(int nid,unsigned long zone_type,unsigned long node_start_pfn,unsigned long node_end_pfn,unsigned long * ignored)4625 static unsigned long __meminit zone_spanned_pages_in_node(int nid,
4626 unsigned long zone_type,
4627 unsigned long node_start_pfn,
4628 unsigned long node_end_pfn,
4629 unsigned long *ignored)
4630 {
4631 unsigned long zone_start_pfn, zone_end_pfn;
4632
4633 /* Get the start and end of the zone */
4634 zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
4635 zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
4636 adjust_zone_range_for_zone_movable(nid, zone_type,
4637 node_start_pfn, node_end_pfn,
4638 &zone_start_pfn, &zone_end_pfn);
4639
4640 /* Check that this node has pages within the zone's required range */
4641 if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
4642 return 0;
4643
4644 /* Move the zone boundaries inside the node if necessary */
4645 zone_end_pfn = min(zone_end_pfn, node_end_pfn);
4646 zone_start_pfn = max(zone_start_pfn, node_start_pfn);
4647
4648 /* Return the spanned pages */
4649 return zone_end_pfn - zone_start_pfn;
4650 }
4651
4652 /*
4653 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
4654 * then all holes in the requested range will be accounted for.
4655 */
__absent_pages_in_range(int nid,unsigned long range_start_pfn,unsigned long range_end_pfn)4656 unsigned long __meminit __absent_pages_in_range(int nid,
4657 unsigned long range_start_pfn,
4658 unsigned long range_end_pfn)
4659 {
4660 unsigned long nr_absent = range_end_pfn - range_start_pfn;
4661 unsigned long start_pfn, end_pfn;
4662 int i;
4663
4664 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
4665 start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
4666 end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
4667 nr_absent -= end_pfn - start_pfn;
4668 }
4669 return nr_absent;
4670 }
4671
4672 /**
4673 * absent_pages_in_range - Return number of page frames in holes within a range
4674 * @start_pfn: The start PFN to start searching for holes
4675 * @end_pfn: The end PFN to stop searching for holes
4676 *
4677 * It returns the number of pages frames in memory holes within a range.
4678 */
absent_pages_in_range(unsigned long start_pfn,unsigned long end_pfn)4679 unsigned long __init absent_pages_in_range(unsigned long start_pfn,
4680 unsigned long end_pfn)
4681 {
4682 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
4683 }
4684
4685 /* Return the number of page frames in holes in a zone on a node */
zone_absent_pages_in_node(int nid,unsigned long zone_type,unsigned long node_start_pfn,unsigned long node_end_pfn,unsigned long * ignored)4686 static unsigned long __meminit zone_absent_pages_in_node(int nid,
4687 unsigned long zone_type,
4688 unsigned long node_start_pfn,
4689 unsigned long node_end_pfn,
4690 unsigned long *ignored)
4691 {
4692 unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
4693 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
4694 unsigned long zone_start_pfn, zone_end_pfn;
4695
4696 zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
4697 zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
4698
4699 adjust_zone_range_for_zone_movable(nid, zone_type,
4700 node_start_pfn, node_end_pfn,
4701 &zone_start_pfn, &zone_end_pfn);
4702 return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
4703 }
4704
4705 #else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
zone_spanned_pages_in_node(int nid,unsigned long zone_type,unsigned long node_start_pfn,unsigned long node_end_pfn,unsigned long * zones_size)4706 static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
4707 unsigned long zone_type,
4708 unsigned long node_start_pfn,
4709 unsigned long node_end_pfn,
4710 unsigned long *zones_size)
4711 {
4712 return zones_size[zone_type];
4713 }
4714
zone_absent_pages_in_node(int nid,unsigned long zone_type,unsigned long node_start_pfn,unsigned long node_end_pfn,unsigned long * zholes_size)4715 static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
4716 unsigned long zone_type,
4717 unsigned long node_start_pfn,
4718 unsigned long node_end_pfn,
4719 unsigned long *zholes_size)
4720 {
4721 if (!zholes_size)
4722 return 0;
4723
4724 return zholes_size[zone_type];
4725 }
4726
4727 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
4728
calculate_node_totalpages(struct pglist_data * pgdat,unsigned long node_start_pfn,unsigned long node_end_pfn,unsigned long * zones_size,unsigned long * zholes_size)4729 static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
4730 unsigned long node_start_pfn,
4731 unsigned long node_end_pfn,
4732 unsigned long *zones_size,
4733 unsigned long *zholes_size)
4734 {
4735 unsigned long realtotalpages, totalpages = 0;
4736 enum zone_type i;
4737
4738 for (i = 0; i < MAX_NR_ZONES; i++)
4739 totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
4740 node_start_pfn,
4741 node_end_pfn,
4742 zones_size);
4743 pgdat->node_spanned_pages = totalpages;
4744
4745 realtotalpages = totalpages;
4746 for (i = 0; i < MAX_NR_ZONES; i++)
4747 realtotalpages -=
4748 zone_absent_pages_in_node(pgdat->node_id, i,
4749 node_start_pfn, node_end_pfn,
4750 zholes_size);
4751 pgdat->node_present_pages = realtotalpages;
4752 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
4753 realtotalpages);
4754 }
4755
4756 #ifndef CONFIG_SPARSEMEM
4757 /*
4758 * Calculate the size of the zone->blockflags rounded to an unsigned long
4759 * Start by making sure zonesize is a multiple of pageblock_order by rounding
4760 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
4761 * round what is now in bits to nearest long in bits, then return it in
4762 * bytes.
4763 */
usemap_size(unsigned long zone_start_pfn,unsigned long zonesize)4764 static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
4765 {
4766 unsigned long usemapsize;
4767
4768 zonesize += zone_start_pfn & (pageblock_nr_pages-1);
4769 usemapsize = roundup(zonesize, pageblock_nr_pages);
4770 usemapsize = usemapsize >> pageblock_order;
4771 usemapsize *= NR_PAGEBLOCK_BITS;
4772 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
4773
4774 return usemapsize / 8;
4775 }
4776
setup_usemap(struct pglist_data * pgdat,struct zone * zone,unsigned long zone_start_pfn,unsigned long zonesize)4777 static void __init setup_usemap(struct pglist_data *pgdat,
4778 struct zone *zone,
4779 unsigned long zone_start_pfn,
4780 unsigned long zonesize)
4781 {
4782 unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
4783 zone->pageblock_flags = NULL;
4784 if (usemapsize)
4785 zone->pageblock_flags =
4786 memblock_virt_alloc_node_nopanic(usemapsize,
4787 pgdat->node_id);
4788 }
4789 #else
setup_usemap(struct pglist_data * pgdat,struct zone * zone,unsigned long zone_start_pfn,unsigned long zonesize)4790 static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
4791 unsigned long zone_start_pfn, unsigned long zonesize) {}
4792 #endif /* CONFIG_SPARSEMEM */
4793
4794 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
4795
4796 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
set_pageblock_order(void)4797 void __paginginit set_pageblock_order(void)
4798 {
4799 unsigned int order;
4800
4801 /* Check that pageblock_nr_pages has not already been setup */
4802 if (pageblock_order)
4803 return;
4804
4805 if (HPAGE_SHIFT > PAGE_SHIFT)
4806 order = HUGETLB_PAGE_ORDER;
4807 else
4808 order = MAX_ORDER - 1;
4809
4810 /*
4811 * Assume the largest contiguous order of interest is a huge page.
4812 * This value may be variable depending on boot parameters on IA64 and
4813 * powerpc.
4814 */
4815 pageblock_order = order;
4816 }
4817 #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
4818
4819 /*
4820 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
4821 * is unused as pageblock_order is set at compile-time. See
4822 * include/linux/pageblock-flags.h for the values of pageblock_order based on
4823 * the kernel config
4824 */
set_pageblock_order(void)4825 void __paginginit set_pageblock_order(void)
4826 {
4827 }
4828
4829 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
4830
calc_memmap_size(unsigned long spanned_pages,unsigned long present_pages)4831 static unsigned long __paginginit calc_memmap_size(unsigned long spanned_pages,
4832 unsigned long present_pages)
4833 {
4834 unsigned long pages = spanned_pages;
4835
4836 /*
4837 * Provide a more accurate estimation if there are holes within
4838 * the zone and SPARSEMEM is in use. If there are holes within the
4839 * zone, each populated memory region may cost us one or two extra
4840 * memmap pages due to alignment because memmap pages for each
4841 * populated regions may not naturally algined on page boundary.
4842 * So the (present_pages >> 4) heuristic is a tradeoff for that.
4843 */
4844 if (spanned_pages > present_pages + (present_pages >> 4) &&
4845 IS_ENABLED(CONFIG_SPARSEMEM))
4846 pages = present_pages;
4847
4848 return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
4849 }
4850
4851 /*
4852 * Set up the zone data structures:
4853 * - mark all pages reserved
4854 * - mark all memory queues empty
4855 * - clear the memory bitmaps
4856 *
4857 * NOTE: pgdat should get zeroed by caller.
4858 */
free_area_init_core(struct pglist_data * pgdat,unsigned long node_start_pfn,unsigned long node_end_pfn,unsigned long * zones_size,unsigned long * zholes_size)4859 static void __paginginit free_area_init_core(struct pglist_data *pgdat,
4860 unsigned long node_start_pfn, unsigned long node_end_pfn,
4861 unsigned long *zones_size, unsigned long *zholes_size)
4862 {
4863 enum zone_type j;
4864 int nid = pgdat->node_id;
4865 unsigned long zone_start_pfn = pgdat->node_start_pfn;
4866 int ret;
4867
4868 pgdat_resize_init(pgdat);
4869 #ifdef CONFIG_NUMA_BALANCING
4870 spin_lock_init(&pgdat->numabalancing_migrate_lock);
4871 pgdat->numabalancing_migrate_nr_pages = 0;
4872 pgdat->numabalancing_migrate_next_window = jiffies;
4873 #endif
4874 init_waitqueue_head(&pgdat->kswapd_wait);
4875 init_waitqueue_head(&pgdat->pfmemalloc_wait);
4876 pgdat_page_cgroup_init(pgdat);
4877
4878 for (j = 0; j < MAX_NR_ZONES; j++) {
4879 struct zone *zone = pgdat->node_zones + j;
4880 unsigned long size, realsize, freesize, memmap_pages;
4881
4882 size = zone_spanned_pages_in_node(nid, j, node_start_pfn,
4883 node_end_pfn, zones_size);
4884 realsize = freesize = size - zone_absent_pages_in_node(nid, j,
4885 node_start_pfn,
4886 node_end_pfn,
4887 zholes_size);
4888
4889 /*
4890 * Adjust freesize so that it accounts for how much memory
4891 * is used by this zone for memmap. This affects the watermark
4892 * and per-cpu initialisations
4893 */
4894 memmap_pages = calc_memmap_size(size, realsize);
4895 if (freesize >= memmap_pages) {
4896 freesize -= memmap_pages;
4897 if (memmap_pages)
4898 printk(KERN_DEBUG
4899 " %s zone: %lu pages used for memmap\n",
4900 zone_names[j], memmap_pages);
4901 } else
4902 printk(KERN_WARNING
4903 " %s zone: %lu pages exceeds freesize %lu\n",
4904 zone_names[j], memmap_pages, freesize);
4905
4906 /* Account for reserved pages */
4907 if (j == 0 && freesize > dma_reserve) {
4908 freesize -= dma_reserve;
4909 printk(KERN_DEBUG " %s zone: %lu pages reserved\n",
4910 zone_names[0], dma_reserve);
4911 }
4912
4913 if (!is_highmem_idx(j))
4914 nr_kernel_pages += freesize;
4915 /* Charge for highmem memmap if there are enough kernel pages */
4916 else if (nr_kernel_pages > memmap_pages * 2)
4917 nr_kernel_pages -= memmap_pages;
4918 nr_all_pages += freesize;
4919
4920 zone->spanned_pages = size;
4921 zone->present_pages = realsize;
4922 /*
4923 * Set an approximate value for lowmem here, it will be adjusted
4924 * when the bootmem allocator frees pages into the buddy system.
4925 * And all highmem pages will be managed by the buddy system.
4926 */
4927 zone->managed_pages = is_highmem_idx(j) ? realsize : freesize;
4928 #ifdef CONFIG_NUMA
4929 zone->node = nid;
4930 zone->min_unmapped_pages = (freesize*sysctl_min_unmapped_ratio)
4931 / 100;
4932 zone->min_slab_pages = (freesize * sysctl_min_slab_ratio) / 100;
4933 #endif
4934 zone->name = zone_names[j];
4935 spin_lock_init(&zone->lock);
4936 spin_lock_init(&zone->lru_lock);
4937 zone_seqlock_init(zone);
4938 zone->zone_pgdat = pgdat;
4939 zone_pcp_init(zone);
4940
4941 /* For bootup, initialized properly in watermark setup */
4942 mod_zone_page_state(zone, NR_ALLOC_BATCH, zone->managed_pages);
4943
4944 lruvec_init(&zone->lruvec);
4945 if (!size)
4946 continue;
4947
4948 set_pageblock_order();
4949 setup_usemap(pgdat, zone, zone_start_pfn, size);
4950 ret = init_currently_empty_zone(zone, zone_start_pfn,
4951 size, MEMMAP_EARLY);
4952 BUG_ON(ret);
4953 memmap_init(size, nid, j, zone_start_pfn);
4954 zone_start_pfn += size;
4955 }
4956 }
4957
alloc_node_mem_map(struct pglist_data * pgdat)4958 static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
4959 {
4960 /* Skip empty nodes */
4961 if (!pgdat->node_spanned_pages)
4962 return;
4963
4964 #ifdef CONFIG_FLAT_NODE_MEM_MAP
4965 /* ia64 gets its own node_mem_map, before this, without bootmem */
4966 if (!pgdat->node_mem_map) {
4967 unsigned long size, start, end;
4968 struct page *map;
4969
4970 /*
4971 * The zone's endpoints aren't required to be MAX_ORDER
4972 * aligned but the node_mem_map endpoints must be in order
4973 * for the buddy allocator to function correctly.
4974 */
4975 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
4976 end = pgdat_end_pfn(pgdat);
4977 end = ALIGN(end, MAX_ORDER_NR_PAGES);
4978 size = (end - start) * sizeof(struct page);
4979 map = alloc_remap(pgdat->node_id, size);
4980 if (!map)
4981 map = memblock_virt_alloc_node_nopanic(size,
4982 pgdat->node_id);
4983 pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
4984 }
4985 #ifndef CONFIG_NEED_MULTIPLE_NODES
4986 /*
4987 * With no DISCONTIG, the global mem_map is just set as node 0's
4988 */
4989 if (pgdat == NODE_DATA(0)) {
4990 mem_map = NODE_DATA(0)->node_mem_map;
4991 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
4992 if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
4993 mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
4994 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
4995 }
4996 #endif
4997 #endif /* CONFIG_FLAT_NODE_MEM_MAP */
4998 }
4999
free_area_init_node(int nid,unsigned long * zones_size,unsigned long node_start_pfn,unsigned long * zholes_size)5000 void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
5001 unsigned long node_start_pfn, unsigned long *zholes_size)
5002 {
5003 pg_data_t *pgdat = NODE_DATA(nid);
5004 unsigned long start_pfn = 0;
5005 unsigned long end_pfn = 0;
5006
5007 /* pg_data_t should be reset to zero when it's allocated */
5008 WARN_ON(pgdat->nr_zones || pgdat->classzone_idx);
5009
5010 pgdat->node_id = nid;
5011 pgdat->node_start_pfn = node_start_pfn;
5012 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5013 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
5014 printk(KERN_INFO "Initmem setup node %d [mem %#010Lx-%#010Lx]\n", nid,
5015 (u64) start_pfn << PAGE_SHIFT, (u64) (end_pfn << PAGE_SHIFT) - 1);
5016 #endif
5017 calculate_node_totalpages(pgdat, start_pfn, end_pfn,
5018 zones_size, zholes_size);
5019
5020 alloc_node_mem_map(pgdat);
5021 #ifdef CONFIG_FLAT_NODE_MEM_MAP
5022 printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
5023 nid, (unsigned long)pgdat,
5024 (unsigned long)pgdat->node_mem_map);
5025 #endif
5026
5027 free_area_init_core(pgdat, start_pfn, end_pfn,
5028 zones_size, zholes_size);
5029 }
5030
5031 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5032
5033 #if MAX_NUMNODES > 1
5034 /*
5035 * Figure out the number of possible node ids.
5036 */
setup_nr_node_ids(void)5037 void __init setup_nr_node_ids(void)
5038 {
5039 unsigned int node;
5040 unsigned int highest = 0;
5041
5042 for_each_node_mask(node, node_possible_map)
5043 highest = node;
5044 nr_node_ids = highest + 1;
5045 }
5046 #endif
5047
5048 /**
5049 * node_map_pfn_alignment - determine the maximum internode alignment
5050 *
5051 * This function should be called after node map is populated and sorted.
5052 * It calculates the maximum power of two alignment which can distinguish
5053 * all the nodes.
5054 *
5055 * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
5056 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the
5057 * nodes are shifted by 256MiB, 256MiB. Note that if only the last node is
5058 * shifted, 1GiB is enough and this function will indicate so.
5059 *
5060 * This is used to test whether pfn -> nid mapping of the chosen memory
5061 * model has fine enough granularity to avoid incorrect mapping for the
5062 * populated node map.
5063 *
5064 * Returns the determined alignment in pfn's. 0 if there is no alignment
5065 * requirement (single node).
5066 */
node_map_pfn_alignment(void)5067 unsigned long __init node_map_pfn_alignment(void)
5068 {
5069 unsigned long accl_mask = 0, last_end = 0;
5070 unsigned long start, end, mask;
5071 int last_nid = -1;
5072 int i, nid;
5073
5074 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
5075 if (!start || last_nid < 0 || last_nid == nid) {
5076 last_nid = nid;
5077 last_end = end;
5078 continue;
5079 }
5080
5081 /*
5082 * Start with a mask granular enough to pin-point to the
5083 * start pfn and tick off bits one-by-one until it becomes
5084 * too coarse to separate the current node from the last.
5085 */
5086 mask = ~((1 << __ffs(start)) - 1);
5087 while (mask && last_end <= (start & (mask << 1)))
5088 mask <<= 1;
5089
5090 /* accumulate all internode masks */
5091 accl_mask |= mask;
5092 }
5093
5094 /* convert mask to number of pages */
5095 return ~accl_mask + 1;
5096 }
5097
5098 /* Find the lowest pfn for a node */
find_min_pfn_for_node(int nid)5099 static unsigned long __init find_min_pfn_for_node(int nid)
5100 {
5101 unsigned long min_pfn = ULONG_MAX;
5102 unsigned long start_pfn;
5103 int i;
5104
5105 for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL)
5106 min_pfn = min(min_pfn, start_pfn);
5107
5108 if (min_pfn == ULONG_MAX) {
5109 printk(KERN_WARNING
5110 "Could not find start_pfn for node %d\n", nid);
5111 return 0;
5112 }
5113
5114 return min_pfn;
5115 }
5116
5117 /**
5118 * find_min_pfn_with_active_regions - Find the minimum PFN registered
5119 *
5120 * It returns the minimum PFN based on information provided via
5121 * memblock_set_node().
5122 */
find_min_pfn_with_active_regions(void)5123 unsigned long __init find_min_pfn_with_active_regions(void)
5124 {
5125 return find_min_pfn_for_node(MAX_NUMNODES);
5126 }
5127
5128 /*
5129 * early_calculate_totalpages()
5130 * Sum pages in active regions for movable zone.
5131 * Populate N_MEMORY for calculating usable_nodes.
5132 */
early_calculate_totalpages(void)5133 static unsigned long __init early_calculate_totalpages(void)
5134 {
5135 unsigned long totalpages = 0;
5136 unsigned long start_pfn, end_pfn;
5137 int i, nid;
5138
5139 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
5140 unsigned long pages = end_pfn - start_pfn;
5141
5142 totalpages += pages;
5143 if (pages)
5144 node_set_state(nid, N_MEMORY);
5145 }
5146 return totalpages;
5147 }
5148
5149 /*
5150 * Find the PFN the Movable zone begins in each node. Kernel memory
5151 * is spread evenly between nodes as long as the nodes have enough
5152 * memory. When they don't, some nodes will have more kernelcore than
5153 * others
5154 */
find_zone_movable_pfns_for_nodes(void)5155 static void __init find_zone_movable_pfns_for_nodes(void)
5156 {
5157 int i, nid;
5158 unsigned long usable_startpfn;
5159 unsigned long kernelcore_node, kernelcore_remaining;
5160 /* save the state before borrow the nodemask */
5161 nodemask_t saved_node_state = node_states[N_MEMORY];
5162 unsigned long totalpages = early_calculate_totalpages();
5163 int usable_nodes = nodes_weight(node_states[N_MEMORY]);
5164 struct memblock_region *r;
5165
5166 /* Need to find movable_zone earlier when movable_node is specified. */
5167 find_usable_zone_for_movable();
5168
5169 /*
5170 * If movable_node is specified, ignore kernelcore and movablecore
5171 * options.
5172 */
5173 if (movable_node_is_enabled()) {
5174 for_each_memblock(memory, r) {
5175 if (!memblock_is_hotpluggable(r))
5176 continue;
5177
5178 nid = r->nid;
5179
5180 usable_startpfn = PFN_DOWN(r->base);
5181 zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
5182 min(usable_startpfn, zone_movable_pfn[nid]) :
5183 usable_startpfn;
5184 }
5185
5186 goto out2;
5187 }
5188
5189 /*
5190 * If movablecore=nn[KMG] was specified, calculate what size of
5191 * kernelcore that corresponds so that memory usable for
5192 * any allocation type is evenly spread. If both kernelcore
5193 * and movablecore are specified, then the value of kernelcore
5194 * will be used for required_kernelcore if it's greater than
5195 * what movablecore would have allowed.
5196 */
5197 if (required_movablecore) {
5198 unsigned long corepages;
5199
5200 /*
5201 * Round-up so that ZONE_MOVABLE is at least as large as what
5202 * was requested by the user
5203 */
5204 required_movablecore =
5205 roundup(required_movablecore, MAX_ORDER_NR_PAGES);
5206 corepages = totalpages - required_movablecore;
5207
5208 required_kernelcore = max(required_kernelcore, corepages);
5209 }
5210
5211 /* If kernelcore was not specified, there is no ZONE_MOVABLE */
5212 if (!required_kernelcore)
5213 goto out;
5214
5215 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
5216 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
5217
5218 restart:
5219 /* Spread kernelcore memory as evenly as possible throughout nodes */
5220 kernelcore_node = required_kernelcore / usable_nodes;
5221 for_each_node_state(nid, N_MEMORY) {
5222 unsigned long start_pfn, end_pfn;
5223
5224 /*
5225 * Recalculate kernelcore_node if the division per node
5226 * now exceeds what is necessary to satisfy the requested
5227 * amount of memory for the kernel
5228 */
5229 if (required_kernelcore < kernelcore_node)
5230 kernelcore_node = required_kernelcore / usable_nodes;
5231
5232 /*
5233 * As the map is walked, we track how much memory is usable
5234 * by the kernel using kernelcore_remaining. When it is
5235 * 0, the rest of the node is usable by ZONE_MOVABLE
5236 */
5237 kernelcore_remaining = kernelcore_node;
5238
5239 /* Go through each range of PFNs within this node */
5240 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
5241 unsigned long size_pages;
5242
5243 start_pfn = max(start_pfn, zone_movable_pfn[nid]);
5244 if (start_pfn >= end_pfn)
5245 continue;
5246
5247 /* Account for what is only usable for kernelcore */
5248 if (start_pfn < usable_startpfn) {
5249 unsigned long kernel_pages;
5250 kernel_pages = min(end_pfn, usable_startpfn)
5251 - start_pfn;
5252
5253 kernelcore_remaining -= min(kernel_pages,
5254 kernelcore_remaining);
5255 required_kernelcore -= min(kernel_pages,
5256 required_kernelcore);
5257
5258 /* Continue if range is now fully accounted */
5259 if (end_pfn <= usable_startpfn) {
5260
5261 /*
5262 * Push zone_movable_pfn to the end so
5263 * that if we have to rebalance
5264 * kernelcore across nodes, we will
5265 * not double account here
5266 */
5267 zone_movable_pfn[nid] = end_pfn;
5268 continue;
5269 }
5270 start_pfn = usable_startpfn;
5271 }
5272
5273 /*
5274 * The usable PFN range for ZONE_MOVABLE is from
5275 * start_pfn->end_pfn. Calculate size_pages as the
5276 * number of pages used as kernelcore
5277 */
5278 size_pages = end_pfn - start_pfn;
5279 if (size_pages > kernelcore_remaining)
5280 size_pages = kernelcore_remaining;
5281 zone_movable_pfn[nid] = start_pfn + size_pages;
5282
5283 /*
5284 * Some kernelcore has been met, update counts and
5285 * break if the kernelcore for this node has been
5286 * satisfied
5287 */
5288 required_kernelcore -= min(required_kernelcore,
5289 size_pages);
5290 kernelcore_remaining -= size_pages;
5291 if (!kernelcore_remaining)
5292 break;
5293 }
5294 }
5295
5296 /*
5297 * If there is still required_kernelcore, we do another pass with one
5298 * less node in the count. This will push zone_movable_pfn[nid] further
5299 * along on the nodes that still have memory until kernelcore is
5300 * satisfied
5301 */
5302 usable_nodes--;
5303 if (usable_nodes && required_kernelcore > usable_nodes)
5304 goto restart;
5305
5306 out2:
5307 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
5308 for (nid = 0; nid < MAX_NUMNODES; nid++)
5309 zone_movable_pfn[nid] =
5310 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
5311
5312 out:
5313 /* restore the node_state */
5314 node_states[N_MEMORY] = saved_node_state;
5315 }
5316
5317 /* Any regular or high memory on that node ? */
check_for_memory(pg_data_t * pgdat,int nid)5318 static void check_for_memory(pg_data_t *pgdat, int nid)
5319 {
5320 enum zone_type zone_type;
5321
5322 if (N_MEMORY == N_NORMAL_MEMORY)
5323 return;
5324
5325 for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
5326 struct zone *zone = &pgdat->node_zones[zone_type];
5327 if (populated_zone(zone)) {
5328 node_set_state(nid, N_HIGH_MEMORY);
5329 if (N_NORMAL_MEMORY != N_HIGH_MEMORY &&
5330 zone_type <= ZONE_NORMAL)
5331 node_set_state(nid, N_NORMAL_MEMORY);
5332 break;
5333 }
5334 }
5335 }
5336
5337 /**
5338 * free_area_init_nodes - Initialise all pg_data_t and zone data
5339 * @max_zone_pfn: an array of max PFNs for each zone
5340 *
5341 * This will call free_area_init_node() for each active node in the system.
5342 * Using the page ranges provided by memblock_set_node(), the size of each
5343 * zone in each node and their holes is calculated. If the maximum PFN
5344 * between two adjacent zones match, it is assumed that the zone is empty.
5345 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
5346 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
5347 * starts where the previous one ended. For example, ZONE_DMA32 starts
5348 * at arch_max_dma_pfn.
5349 */
free_area_init_nodes(unsigned long * max_zone_pfn)5350 void __init free_area_init_nodes(unsigned long *max_zone_pfn)
5351 {
5352 unsigned long start_pfn, end_pfn;
5353 int i, nid;
5354
5355 /* Record where the zone boundaries are */
5356 memset(arch_zone_lowest_possible_pfn, 0,
5357 sizeof(arch_zone_lowest_possible_pfn));
5358 memset(arch_zone_highest_possible_pfn, 0,
5359 sizeof(arch_zone_highest_possible_pfn));
5360
5361 start_pfn = find_min_pfn_with_active_regions();
5362
5363 for (i = 0; i < MAX_NR_ZONES; i++) {
5364 if (i == ZONE_MOVABLE)
5365 continue;
5366
5367 end_pfn = max(max_zone_pfn[i], start_pfn);
5368 arch_zone_lowest_possible_pfn[i] = start_pfn;
5369 arch_zone_highest_possible_pfn[i] = end_pfn;
5370
5371 start_pfn = end_pfn;
5372 }
5373 arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
5374 arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
5375
5376 /* Find the PFNs that ZONE_MOVABLE begins at in each node */
5377 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
5378 find_zone_movable_pfns_for_nodes();
5379
5380 /* Print out the zone ranges */
5381 printk("Zone ranges:\n");
5382 for (i = 0; i < MAX_NR_ZONES; i++) {
5383 if (i == ZONE_MOVABLE)
5384 continue;
5385 printk(KERN_CONT " %-8s ", zone_names[i]);
5386 if (arch_zone_lowest_possible_pfn[i] ==
5387 arch_zone_highest_possible_pfn[i])
5388 printk(KERN_CONT "empty\n");
5389 else
5390 printk(KERN_CONT "[mem %0#10lx-%0#10lx]\n",
5391 arch_zone_lowest_possible_pfn[i] << PAGE_SHIFT,
5392 (arch_zone_highest_possible_pfn[i]
5393 << PAGE_SHIFT) - 1);
5394 }
5395
5396 /* Print out the PFNs ZONE_MOVABLE begins at in each node */
5397 printk("Movable zone start for each node\n");
5398 for (i = 0; i < MAX_NUMNODES; i++) {
5399 if (zone_movable_pfn[i])
5400 printk(" Node %d: %#010lx\n", i,
5401 zone_movable_pfn[i] << PAGE_SHIFT);
5402 }
5403
5404 /* Print out the early node map */
5405 printk("Early memory node ranges\n");
5406 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
5407 printk(" node %3d: [mem %#010lx-%#010lx]\n", nid,
5408 start_pfn << PAGE_SHIFT, (end_pfn << PAGE_SHIFT) - 1);
5409
5410 /* Initialise every node */
5411 mminit_verify_pageflags_layout();
5412 setup_nr_node_ids();
5413 for_each_online_node(nid) {
5414 pg_data_t *pgdat = NODE_DATA(nid);
5415 free_area_init_node(nid, NULL,
5416 find_min_pfn_for_node(nid), NULL);
5417
5418 /* Any memory on that node */
5419 if (pgdat->node_present_pages)
5420 node_set_state(nid, N_MEMORY);
5421 check_for_memory(pgdat, nid);
5422 }
5423 }
5424
cmdline_parse_core(char * p,unsigned long * core)5425 static int __init cmdline_parse_core(char *p, unsigned long *core)
5426 {
5427 unsigned long long coremem;
5428 if (!p)
5429 return -EINVAL;
5430
5431 coremem = memparse(p, &p);
5432 *core = coremem >> PAGE_SHIFT;
5433
5434 /* Paranoid check that UL is enough for the coremem value */
5435 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
5436
5437 return 0;
5438 }
5439
5440 /*
5441 * kernelcore=size sets the amount of memory for use for allocations that
5442 * cannot be reclaimed or migrated.
5443 */
cmdline_parse_kernelcore(char * p)5444 static int __init cmdline_parse_kernelcore(char *p)
5445 {
5446 return cmdline_parse_core(p, &required_kernelcore);
5447 }
5448
5449 /*
5450 * movablecore=size sets the amount of memory for use for allocations that
5451 * can be reclaimed or migrated.
5452 */
cmdline_parse_movablecore(char * p)5453 static int __init cmdline_parse_movablecore(char *p)
5454 {
5455 return cmdline_parse_core(p, &required_movablecore);
5456 }
5457
5458 early_param("kernelcore", cmdline_parse_kernelcore);
5459 early_param("movablecore", cmdline_parse_movablecore);
5460
5461 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
5462
adjust_managed_page_count(struct page * page,long count)5463 void adjust_managed_page_count(struct page *page, long count)
5464 {
5465 spin_lock(&managed_page_count_lock);
5466 page_zone(page)->managed_pages += count;
5467 totalram_pages += count;
5468 #ifdef CONFIG_HIGHMEM
5469 if (PageHighMem(page))
5470 totalhigh_pages += count;
5471 #endif
5472 spin_unlock(&managed_page_count_lock);
5473 }
5474 EXPORT_SYMBOL(adjust_managed_page_count);
5475
free_reserved_area(void * start,void * end,int poison,char * s)5476 unsigned long free_reserved_area(void *start, void *end, int poison, char *s)
5477 {
5478 void *pos;
5479 unsigned long pages = 0;
5480
5481 start = (void *)PAGE_ALIGN((unsigned long)start);
5482 end = (void *)((unsigned long)end & PAGE_MASK);
5483 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
5484 if ((unsigned int)poison <= 0xFF)
5485 memset(pos, poison, PAGE_SIZE);
5486 free_reserved_page(virt_to_page(pos));
5487 }
5488
5489 if (pages && s)
5490 pr_info("Freeing %s memory: %ldK\n",
5491 s, pages << (PAGE_SHIFT - 10));
5492
5493 return pages;
5494 }
5495 EXPORT_SYMBOL(free_reserved_area);
5496
5497 #ifdef CONFIG_HIGHMEM
free_highmem_page(struct page * page)5498 void free_highmem_page(struct page *page)
5499 {
5500 __free_reserved_page(page);
5501 totalram_pages++;
5502 page_zone(page)->managed_pages++;
5503 totalhigh_pages++;
5504 }
5505 #endif
5506
5507
mem_init_print_info(const char * str)5508 void __init mem_init_print_info(const char *str)
5509 {
5510 unsigned long physpages, codesize, datasize, rosize, bss_size;
5511 unsigned long init_code_size, init_data_size;
5512
5513 physpages = get_num_physpages();
5514 codesize = _etext - _stext;
5515 datasize = _edata - _sdata;
5516 rosize = __end_rodata - __start_rodata;
5517 bss_size = __bss_stop - __bss_start;
5518 init_data_size = __init_end - __init_begin;
5519 init_code_size = _einittext - _sinittext;
5520
5521 /*
5522 * Detect special cases and adjust section sizes accordingly:
5523 * 1) .init.* may be embedded into .data sections
5524 * 2) .init.text.* may be out of [__init_begin, __init_end],
5525 * please refer to arch/tile/kernel/vmlinux.lds.S.
5526 * 3) .rodata.* may be embedded into .text or .data sections.
5527 */
5528 #define adj_init_size(start, end, size, pos, adj) \
5529 do { \
5530 if (start <= pos && pos < end && size > adj) \
5531 size -= adj; \
5532 } while (0)
5533
5534 adj_init_size(__init_begin, __init_end, init_data_size,
5535 _sinittext, init_code_size);
5536 adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size);
5537 adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size);
5538 adj_init_size(_stext, _etext, codesize, __start_rodata, rosize);
5539 adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize);
5540
5541 #undef adj_init_size
5542
5543 printk("Memory: %luK/%luK available "
5544 "(%luK kernel code, %luK rwdata, %luK rodata, "
5545 "%luK init, %luK bss, %luK reserved, %luK cma-reserved"
5546 #ifdef CONFIG_HIGHMEM
5547 ", %luK highmem"
5548 #endif
5549 "%s%s)\n",
5550 nr_free_pages() << (PAGE_SHIFT-10), physpages << (PAGE_SHIFT-10),
5551 codesize >> 10, datasize >> 10, rosize >> 10,
5552 (init_data_size + init_code_size) >> 10, bss_size >> 10,
5553 (physpages - totalram_pages - totalcma_pages) << (PAGE_SHIFT-10),
5554 totalcma_pages << (PAGE_SHIFT-10),
5555 #ifdef CONFIG_HIGHMEM
5556 totalhigh_pages << (PAGE_SHIFT-10),
5557 #endif
5558 str ? ", " : "", str ? str : "");
5559 }
5560
5561 /**
5562 * set_dma_reserve - set the specified number of pages reserved in the first zone
5563 * @new_dma_reserve: The number of pages to mark reserved
5564 *
5565 * The per-cpu batchsize and zone watermarks are determined by present_pages.
5566 * In the DMA zone, a significant percentage may be consumed by kernel image
5567 * and other unfreeable allocations which can skew the watermarks badly. This
5568 * function may optionally be used to account for unfreeable pages in the
5569 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
5570 * smaller per-cpu batchsize.
5571 */
set_dma_reserve(unsigned long new_dma_reserve)5572 void __init set_dma_reserve(unsigned long new_dma_reserve)
5573 {
5574 dma_reserve = new_dma_reserve;
5575 }
5576
free_area_init(unsigned long * zones_size)5577 void __init free_area_init(unsigned long *zones_size)
5578 {
5579 free_area_init_node(0, zones_size,
5580 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
5581 }
5582
page_alloc_cpu_notify(struct notifier_block * self,unsigned long action,void * hcpu)5583 static int page_alloc_cpu_notify(struct notifier_block *self,
5584 unsigned long action, void *hcpu)
5585 {
5586 int cpu = (unsigned long)hcpu;
5587
5588 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
5589 lru_add_drain_cpu(cpu);
5590 drain_pages(cpu);
5591
5592 /*
5593 * Spill the event counters of the dead processor
5594 * into the current processors event counters.
5595 * This artificially elevates the count of the current
5596 * processor.
5597 */
5598 vm_events_fold_cpu(cpu);
5599
5600 /*
5601 * Zero the differential counters of the dead processor
5602 * so that the vm statistics are consistent.
5603 *
5604 * This is only okay since the processor is dead and cannot
5605 * race with what we are doing.
5606 */
5607 cpu_vm_stats_fold(cpu);
5608 }
5609 return NOTIFY_OK;
5610 }
5611
page_alloc_init(void)5612 void __init page_alloc_init(void)
5613 {
5614 hotcpu_notifier(page_alloc_cpu_notify, 0);
5615 }
5616
5617 /*
5618 * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio
5619 * or min_free_kbytes changes.
5620 */
calculate_totalreserve_pages(void)5621 static void calculate_totalreserve_pages(void)
5622 {
5623 struct pglist_data *pgdat;
5624 unsigned long reserve_pages = 0;
5625 enum zone_type i, j;
5626
5627 for_each_online_pgdat(pgdat) {
5628 for (i = 0; i < MAX_NR_ZONES; i++) {
5629 struct zone *zone = pgdat->node_zones + i;
5630 long max = 0;
5631
5632 /* Find valid and maximum lowmem_reserve in the zone */
5633 for (j = i; j < MAX_NR_ZONES; j++) {
5634 if (zone->lowmem_reserve[j] > max)
5635 max = zone->lowmem_reserve[j];
5636 }
5637
5638 /* we treat the high watermark as reserved pages. */
5639 max += high_wmark_pages(zone);
5640
5641 if (max > zone->managed_pages)
5642 max = zone->managed_pages;
5643 reserve_pages += max;
5644 /*
5645 * Lowmem reserves are not available to
5646 * GFP_HIGHUSER page cache allocations and
5647 * kswapd tries to balance zones to their high
5648 * watermark. As a result, neither should be
5649 * regarded as dirtyable memory, to prevent a
5650 * situation where reclaim has to clean pages
5651 * in order to balance the zones.
5652 */
5653 zone->dirty_balance_reserve = max;
5654 }
5655 }
5656 dirty_balance_reserve = reserve_pages;
5657 totalreserve_pages = reserve_pages;
5658 }
5659
5660 /*
5661 * setup_per_zone_lowmem_reserve - called whenever
5662 * sysctl_lower_zone_reserve_ratio changes. Ensures that each zone
5663 * has a correct pages reserved value, so an adequate number of
5664 * pages are left in the zone after a successful __alloc_pages().
5665 */
setup_per_zone_lowmem_reserve(void)5666 static void setup_per_zone_lowmem_reserve(void)
5667 {
5668 struct pglist_data *pgdat;
5669 enum zone_type j, idx;
5670
5671 for_each_online_pgdat(pgdat) {
5672 for (j = 0; j < MAX_NR_ZONES; j++) {
5673 struct zone *zone = pgdat->node_zones + j;
5674 unsigned long managed_pages = zone->managed_pages;
5675
5676 zone->lowmem_reserve[j] = 0;
5677
5678 idx = j;
5679 while (idx) {
5680 struct zone *lower_zone;
5681
5682 idx--;
5683
5684 if (sysctl_lowmem_reserve_ratio[idx] < 1)
5685 sysctl_lowmem_reserve_ratio[idx] = 1;
5686
5687 lower_zone = pgdat->node_zones + idx;
5688 lower_zone->lowmem_reserve[j] = managed_pages /
5689 sysctl_lowmem_reserve_ratio[idx];
5690 managed_pages += lower_zone->managed_pages;
5691 }
5692 }
5693 }
5694
5695 /* update totalreserve_pages */
5696 calculate_totalreserve_pages();
5697 }
5698
__setup_per_zone_wmarks(void)5699 static void __setup_per_zone_wmarks(void)
5700 {
5701 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
5702 unsigned long pages_low = extra_free_kbytes >> (PAGE_SHIFT - 10);
5703 unsigned long lowmem_pages = 0;
5704 struct zone *zone;
5705 unsigned long flags;
5706
5707 /* Calculate total number of !ZONE_HIGHMEM pages */
5708 for_each_zone(zone) {
5709 if (!is_highmem(zone))
5710 lowmem_pages += zone->managed_pages;
5711 }
5712
5713 for_each_zone(zone) {
5714 u64 min, low;
5715
5716 spin_lock_irqsave(&zone->lock, flags);
5717 min = (u64)pages_min * zone->managed_pages;
5718 do_div(min, lowmem_pages);
5719 low = (u64)pages_low * zone->managed_pages;
5720 do_div(low, vm_total_pages);
5721
5722 if (is_highmem(zone)) {
5723 /*
5724 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
5725 * need highmem pages, so cap pages_min to a small
5726 * value here.
5727 *
5728 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
5729 * deltas controls asynch page reclaim, and so should
5730 * not be capped for highmem.
5731 */
5732 unsigned long min_pages;
5733
5734 min_pages = zone->managed_pages / 1024;
5735 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
5736 zone->watermark[WMARK_MIN] = min_pages;
5737 } else {
5738 /*
5739 * If it's a lowmem zone, reserve a number of pages
5740 * proportionate to the zone's size.
5741 */
5742 zone->watermark[WMARK_MIN] = min;
5743 }
5744
5745 zone->watermark[WMARK_LOW] = min_wmark_pages(zone) +
5746 low + (min >> 2);
5747 zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) +
5748 low + (min >> 1);
5749
5750 __mod_zone_page_state(zone, NR_ALLOC_BATCH,
5751 high_wmark_pages(zone) - low_wmark_pages(zone) -
5752 atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
5753
5754 setup_zone_migrate_reserve(zone);
5755 spin_unlock_irqrestore(&zone->lock, flags);
5756 }
5757
5758 /* update totalreserve_pages */
5759 calculate_totalreserve_pages();
5760 }
5761
5762 /**
5763 * setup_per_zone_wmarks - called when min_free_kbytes changes
5764 * or when memory is hot-{added|removed}
5765 *
5766 * Ensures that the watermark[min,low,high] values for each zone are set
5767 * correctly with respect to min_free_kbytes.
5768 */
setup_per_zone_wmarks(void)5769 void setup_per_zone_wmarks(void)
5770 {
5771 mutex_lock(&zonelists_mutex);
5772 __setup_per_zone_wmarks();
5773 mutex_unlock(&zonelists_mutex);
5774 }
5775
5776 /*
5777 * The inactive anon list should be small enough that the VM never has to
5778 * do too much work, but large enough that each inactive page has a chance
5779 * to be referenced again before it is swapped out.
5780 *
5781 * The inactive_anon ratio is the target ratio of ACTIVE_ANON to
5782 * INACTIVE_ANON pages on this zone's LRU, maintained by the
5783 * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of
5784 * the anonymous pages are kept on the inactive list.
5785 *
5786 * total target max
5787 * memory ratio inactive anon
5788 * -------------------------------------
5789 * 10MB 1 5MB
5790 * 100MB 1 50MB
5791 * 1GB 3 250MB
5792 * 10GB 10 0.9GB
5793 * 100GB 31 3GB
5794 * 1TB 101 10GB
5795 * 10TB 320 32GB
5796 */
calculate_zone_inactive_ratio(struct zone * zone)5797 static void __meminit calculate_zone_inactive_ratio(struct zone *zone)
5798 {
5799 unsigned int gb, ratio;
5800
5801 /* Zone size in gigabytes */
5802 gb = zone->managed_pages >> (30 - PAGE_SHIFT);
5803 if (gb)
5804 ratio = int_sqrt(10 * gb);
5805 else
5806 ratio = 1;
5807
5808 zone->inactive_ratio = ratio;
5809 }
5810
setup_per_zone_inactive_ratio(void)5811 static void __meminit setup_per_zone_inactive_ratio(void)
5812 {
5813 struct zone *zone;
5814
5815 for_each_zone(zone)
5816 calculate_zone_inactive_ratio(zone);
5817 }
5818
5819 /*
5820 * Initialise min_free_kbytes.
5821 *
5822 * For small machines we want it small (128k min). For large machines
5823 * we want it large (64MB max). But it is not linear, because network
5824 * bandwidth does not increase linearly with machine size. We use
5825 *
5826 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
5827 * min_free_kbytes = sqrt(lowmem_kbytes * 16)
5828 *
5829 * which yields
5830 *
5831 * 16MB: 512k
5832 * 32MB: 724k
5833 * 64MB: 1024k
5834 * 128MB: 1448k
5835 * 256MB: 2048k
5836 * 512MB: 2896k
5837 * 1024MB: 4096k
5838 * 2048MB: 5792k
5839 * 4096MB: 8192k
5840 * 8192MB: 11584k
5841 * 16384MB: 16384k
5842 */
init_per_zone_wmark_min(void)5843 int __meminit init_per_zone_wmark_min(void)
5844 {
5845 unsigned long lowmem_kbytes;
5846 int new_min_free_kbytes;
5847
5848 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
5849 new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
5850
5851 if (new_min_free_kbytes > user_min_free_kbytes) {
5852 min_free_kbytes = new_min_free_kbytes;
5853 if (min_free_kbytes < 128)
5854 min_free_kbytes = 128;
5855 if (min_free_kbytes > 65536)
5856 min_free_kbytes = 65536;
5857 } else {
5858 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
5859 new_min_free_kbytes, user_min_free_kbytes);
5860 }
5861 setup_per_zone_wmarks();
5862 refresh_zone_stat_thresholds();
5863 setup_per_zone_lowmem_reserve();
5864 setup_per_zone_inactive_ratio();
5865 return 0;
5866 }
module_init(init_per_zone_wmark_min)5867 module_init(init_per_zone_wmark_min)
5868
5869 /*
5870 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
5871 * that we can call two helper functions whenever min_free_kbytes
5872 * or extra_free_kbytes changes.
5873 */
5874 int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
5875 void __user *buffer, size_t *length, loff_t *ppos)
5876 {
5877 int rc;
5878
5879 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
5880 if (rc)
5881 return rc;
5882
5883 if (write) {
5884 user_min_free_kbytes = min_free_kbytes;
5885 setup_per_zone_wmarks();
5886 }
5887 return 0;
5888 }
5889
5890 #ifdef CONFIG_NUMA
sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table * table,int write,void __user * buffer,size_t * length,loff_t * ppos)5891 int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
5892 void __user *buffer, size_t *length, loff_t *ppos)
5893 {
5894 struct zone *zone;
5895 int rc;
5896
5897 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
5898 if (rc)
5899 return rc;
5900
5901 for_each_zone(zone)
5902 zone->min_unmapped_pages = (zone->managed_pages *
5903 sysctl_min_unmapped_ratio) / 100;
5904 return 0;
5905 }
5906
sysctl_min_slab_ratio_sysctl_handler(struct ctl_table * table,int write,void __user * buffer,size_t * length,loff_t * ppos)5907 int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
5908 void __user *buffer, size_t *length, loff_t *ppos)
5909 {
5910 struct zone *zone;
5911 int rc;
5912
5913 rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
5914 if (rc)
5915 return rc;
5916
5917 for_each_zone(zone)
5918 zone->min_slab_pages = (zone->managed_pages *
5919 sysctl_min_slab_ratio) / 100;
5920 return 0;
5921 }
5922 #endif
5923
5924 /*
5925 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
5926 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
5927 * whenever sysctl_lowmem_reserve_ratio changes.
5928 *
5929 * The reserve ratio obviously has absolutely no relation with the
5930 * minimum watermarks. The lowmem reserve ratio can only make sense
5931 * if in function of the boot time zone sizes.
5932 */
lowmem_reserve_ratio_sysctl_handler(struct ctl_table * table,int write,void __user * buffer,size_t * length,loff_t * ppos)5933 int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write,
5934 void __user *buffer, size_t *length, loff_t *ppos)
5935 {
5936 proc_dointvec_minmax(table, write, buffer, length, ppos);
5937 setup_per_zone_lowmem_reserve();
5938 return 0;
5939 }
5940
5941 /*
5942 * percpu_pagelist_fraction - changes the pcp->high for each zone on each
5943 * cpu. It is the fraction of total pages in each zone that a hot per cpu
5944 * pagelist can have before it gets flushed back to buddy allocator.
5945 */
percpu_pagelist_fraction_sysctl_handler(struct ctl_table * table,int write,void __user * buffer,size_t * length,loff_t * ppos)5946 int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write,
5947 void __user *buffer, size_t *length, loff_t *ppos)
5948 {
5949 struct zone *zone;
5950 int old_percpu_pagelist_fraction;
5951 int ret;
5952
5953 mutex_lock(&pcp_batch_high_lock);
5954 old_percpu_pagelist_fraction = percpu_pagelist_fraction;
5955
5956 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
5957 if (!write || ret < 0)
5958 goto out;
5959
5960 /* Sanity checking to avoid pcp imbalance */
5961 if (percpu_pagelist_fraction &&
5962 percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION) {
5963 percpu_pagelist_fraction = old_percpu_pagelist_fraction;
5964 ret = -EINVAL;
5965 goto out;
5966 }
5967
5968 /* No change? */
5969 if (percpu_pagelist_fraction == old_percpu_pagelist_fraction)
5970 goto out;
5971
5972 for_each_populated_zone(zone) {
5973 unsigned int cpu;
5974
5975 for_each_possible_cpu(cpu)
5976 pageset_set_high_and_batch(zone,
5977 per_cpu_ptr(zone->pageset, cpu));
5978 }
5979 out:
5980 mutex_unlock(&pcp_batch_high_lock);
5981 return ret;
5982 }
5983
5984 int hashdist = HASHDIST_DEFAULT;
5985
5986 #ifdef CONFIG_NUMA
set_hashdist(char * str)5987 static int __init set_hashdist(char *str)
5988 {
5989 if (!str)
5990 return 0;
5991 hashdist = simple_strtoul(str, &str, 0);
5992 return 1;
5993 }
5994 __setup("hashdist=", set_hashdist);
5995 #endif
5996
5997 /*
5998 * allocate a large system hash table from bootmem
5999 * - it is assumed that the hash table must contain an exact power-of-2
6000 * quantity of entries
6001 * - limit is the number of hash buckets, not the total allocation size
6002 */
alloc_large_system_hash(const char * tablename,unsigned long bucketsize,unsigned long numentries,int scale,int flags,unsigned int * _hash_shift,unsigned int * _hash_mask,unsigned long low_limit,unsigned long high_limit)6003 void *__init alloc_large_system_hash(const char *tablename,
6004 unsigned long bucketsize,
6005 unsigned long numentries,
6006 int scale,
6007 int flags,
6008 unsigned int *_hash_shift,
6009 unsigned int *_hash_mask,
6010 unsigned long low_limit,
6011 unsigned long high_limit)
6012 {
6013 unsigned long long max = high_limit;
6014 unsigned long log2qty, size;
6015 void *table = NULL;
6016
6017 /* allow the kernel cmdline to have a say */
6018 if (!numentries) {
6019 /* round applicable memory size up to nearest megabyte */
6020 numentries = nr_kernel_pages;
6021
6022 /* It isn't necessary when PAGE_SIZE >= 1MB */
6023 if (PAGE_SHIFT < 20)
6024 numentries = round_up(numentries, (1<<20)/PAGE_SIZE);
6025
6026 /* limit to 1 bucket per 2^scale bytes of low memory */
6027 if (scale > PAGE_SHIFT)
6028 numentries >>= (scale - PAGE_SHIFT);
6029 else
6030 numentries <<= (PAGE_SHIFT - scale);
6031
6032 /* Make sure we've got at least a 0-order allocation.. */
6033 if (unlikely(flags & HASH_SMALL)) {
6034 /* Makes no sense without HASH_EARLY */
6035 WARN_ON(!(flags & HASH_EARLY));
6036 if (!(numentries >> *_hash_shift)) {
6037 numentries = 1UL << *_hash_shift;
6038 BUG_ON(!numentries);
6039 }
6040 } else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
6041 numentries = PAGE_SIZE / bucketsize;
6042 }
6043 numentries = roundup_pow_of_two(numentries);
6044
6045 /* limit allocation size to 1/16 total memory by default */
6046 if (max == 0) {
6047 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
6048 do_div(max, bucketsize);
6049 }
6050 max = min(max, 0x80000000ULL);
6051
6052 if (numentries < low_limit)
6053 numentries = low_limit;
6054 if (numentries > max)
6055 numentries = max;
6056
6057 log2qty = ilog2(numentries);
6058
6059 do {
6060 size = bucketsize << log2qty;
6061 if (flags & HASH_EARLY)
6062 table = memblock_virt_alloc_nopanic(size, 0);
6063 else if (hashdist)
6064 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
6065 else {
6066 /*
6067 * If bucketsize is not a power-of-two, we may free
6068 * some pages at the end of hash table which
6069 * alloc_pages_exact() automatically does
6070 */
6071 if (get_order(size) < MAX_ORDER) {
6072 table = alloc_pages_exact(size, GFP_ATOMIC);
6073 kmemleak_alloc(table, size, 1, GFP_ATOMIC);
6074 }
6075 }
6076 } while (!table && size > PAGE_SIZE && --log2qty);
6077
6078 if (!table)
6079 panic("Failed to allocate %s hash table\n", tablename);
6080
6081 printk(KERN_INFO "%s hash table entries: %ld (order: %d, %lu bytes)\n",
6082 tablename,
6083 (1UL << log2qty),
6084 ilog2(size) - PAGE_SHIFT,
6085 size);
6086
6087 if (_hash_shift)
6088 *_hash_shift = log2qty;
6089 if (_hash_mask)
6090 *_hash_mask = (1 << log2qty) - 1;
6091
6092 return table;
6093 }
6094
6095 /* Return a pointer to the bitmap storing bits affecting a block of pages */
get_pageblock_bitmap(struct zone * zone,unsigned long pfn)6096 static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
6097 unsigned long pfn)
6098 {
6099 #ifdef CONFIG_SPARSEMEM
6100 return __pfn_to_section(pfn)->pageblock_flags;
6101 #else
6102 return zone->pageblock_flags;
6103 #endif /* CONFIG_SPARSEMEM */
6104 }
6105
pfn_to_bitidx(struct zone * zone,unsigned long pfn)6106 static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
6107 {
6108 #ifdef CONFIG_SPARSEMEM
6109 pfn &= (PAGES_PER_SECTION-1);
6110 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
6111 #else
6112 pfn = pfn - round_down(zone->zone_start_pfn, pageblock_nr_pages);
6113 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
6114 #endif /* CONFIG_SPARSEMEM */
6115 }
6116
6117 /**
6118 * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
6119 * @page: The page within the block of interest
6120 * @pfn: The target page frame number
6121 * @end_bitidx: The last bit of interest to retrieve
6122 * @mask: mask of bits that the caller is interested in
6123 *
6124 * Return: pageblock_bits flags
6125 */
get_pfnblock_flags_mask(struct page * page,unsigned long pfn,unsigned long end_bitidx,unsigned long mask)6126 unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
6127 unsigned long end_bitidx,
6128 unsigned long mask)
6129 {
6130 struct zone *zone;
6131 unsigned long *bitmap;
6132 unsigned long bitidx, word_bitidx;
6133 unsigned long word;
6134
6135 zone = page_zone(page);
6136 bitmap = get_pageblock_bitmap(zone, pfn);
6137 bitidx = pfn_to_bitidx(zone, pfn);
6138 word_bitidx = bitidx / BITS_PER_LONG;
6139 bitidx &= (BITS_PER_LONG-1);
6140
6141 word = bitmap[word_bitidx];
6142 bitidx += end_bitidx;
6143 return (word >> (BITS_PER_LONG - bitidx - 1)) & mask;
6144 }
6145
6146 /**
6147 * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
6148 * @page: The page within the block of interest
6149 * @flags: The flags to set
6150 * @pfn: The target page frame number
6151 * @end_bitidx: The last bit of interest
6152 * @mask: mask of bits that the caller is interested in
6153 */
set_pfnblock_flags_mask(struct page * page,unsigned long flags,unsigned long pfn,unsigned long end_bitidx,unsigned long mask)6154 void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
6155 unsigned long pfn,
6156 unsigned long end_bitidx,
6157 unsigned long mask)
6158 {
6159 struct zone *zone;
6160 unsigned long *bitmap;
6161 unsigned long bitidx, word_bitidx;
6162 unsigned long old_word, word;
6163
6164 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
6165
6166 zone = page_zone(page);
6167 bitmap = get_pageblock_bitmap(zone, pfn);
6168 bitidx = pfn_to_bitidx(zone, pfn);
6169 word_bitidx = bitidx / BITS_PER_LONG;
6170 bitidx &= (BITS_PER_LONG-1);
6171
6172 VM_BUG_ON_PAGE(!zone_spans_pfn(zone, pfn), page);
6173
6174 bitidx += end_bitidx;
6175 mask <<= (BITS_PER_LONG - bitidx - 1);
6176 flags <<= (BITS_PER_LONG - bitidx - 1);
6177
6178 word = ACCESS_ONCE(bitmap[word_bitidx]);
6179 for (;;) {
6180 old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags);
6181 if (word == old_word)
6182 break;
6183 word = old_word;
6184 }
6185 }
6186
6187 /*
6188 * This function checks whether pageblock includes unmovable pages or not.
6189 * If @count is not zero, it is okay to include less @count unmovable pages
6190 *
6191 * PageLRU check without isolation or lru_lock could race so that
6192 * MIGRATE_MOVABLE block might include unmovable pages. It means you can't
6193 * expect this function should be exact.
6194 */
has_unmovable_pages(struct zone * zone,struct page * page,int count,bool skip_hwpoisoned_pages)6195 bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
6196 bool skip_hwpoisoned_pages)
6197 {
6198 unsigned long pfn, iter, found;
6199 int mt;
6200
6201 /*
6202 * For avoiding noise data, lru_add_drain_all() should be called
6203 * If ZONE_MOVABLE, the zone never contains unmovable pages
6204 */
6205 if (zone_idx(zone) == ZONE_MOVABLE)
6206 return false;
6207 mt = get_pageblock_migratetype(page);
6208 if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt))
6209 return false;
6210
6211 pfn = page_to_pfn(page);
6212 for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
6213 unsigned long check = pfn + iter;
6214
6215 if (!pfn_valid_within(check))
6216 continue;
6217
6218 page = pfn_to_page(check);
6219
6220 /*
6221 * Hugepages are not in LRU lists, but they're movable.
6222 * We need not scan over tail pages bacause we don't
6223 * handle each tail page individually in migration.
6224 */
6225 if (PageHuge(page)) {
6226 iter = round_up(iter + 1, 1<<compound_order(page)) - 1;
6227 continue;
6228 }
6229
6230 /*
6231 * We can't use page_count without pin a page
6232 * because another CPU can free compound page.
6233 * This check already skips compound tails of THP
6234 * because their page->_count is zero at all time.
6235 */
6236 if (!atomic_read(&page->_count)) {
6237 if (PageBuddy(page))
6238 iter += (1 << page_order(page)) - 1;
6239 continue;
6240 }
6241
6242 /*
6243 * The HWPoisoned page may be not in buddy system, and
6244 * page_count() is not 0.
6245 */
6246 if (skip_hwpoisoned_pages && PageHWPoison(page))
6247 continue;
6248
6249 if (!PageLRU(page))
6250 found++;
6251 /*
6252 * If there are RECLAIMABLE pages, we need to check it.
6253 * But now, memory offline itself doesn't call shrink_slab()
6254 * and it still to be fixed.
6255 */
6256 /*
6257 * If the page is not RAM, page_count()should be 0.
6258 * we don't need more check. This is an _used_ not-movable page.
6259 *
6260 * The problematic thing here is PG_reserved pages. PG_reserved
6261 * is set to both of a memory hole page and a _used_ kernel
6262 * page at boot.
6263 */
6264 if (found > count)
6265 return true;
6266 }
6267 return false;
6268 }
6269
is_pageblock_removable_nolock(struct page * page)6270 bool is_pageblock_removable_nolock(struct page *page)
6271 {
6272 struct zone *zone;
6273 unsigned long pfn;
6274
6275 /*
6276 * We have to be careful here because we are iterating over memory
6277 * sections which are not zone aware so we might end up outside of
6278 * the zone but still within the section.
6279 * We have to take care about the node as well. If the node is offline
6280 * its NODE_DATA will be NULL - see page_zone.
6281 */
6282 if (!node_online(page_to_nid(page)))
6283 return false;
6284
6285 zone = page_zone(page);
6286 pfn = page_to_pfn(page);
6287 if (!zone_spans_pfn(zone, pfn))
6288 return false;
6289
6290 return !has_unmovable_pages(zone, page, 0, true);
6291 }
6292
6293 #ifdef CONFIG_CMA
6294
pfn_max_align_down(unsigned long pfn)6295 static unsigned long pfn_max_align_down(unsigned long pfn)
6296 {
6297 return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
6298 pageblock_nr_pages) - 1);
6299 }
6300
pfn_max_align_up(unsigned long pfn)6301 static unsigned long pfn_max_align_up(unsigned long pfn)
6302 {
6303 return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
6304 pageblock_nr_pages));
6305 }
6306
6307 /* [start, end) must belong to a single zone. */
__alloc_contig_migrate_range(struct compact_control * cc,unsigned long start,unsigned long end)6308 static int __alloc_contig_migrate_range(struct compact_control *cc,
6309 unsigned long start, unsigned long end)
6310 {
6311 /* This function is based on compact_zone() from compaction.c. */
6312 unsigned long nr_reclaimed;
6313 unsigned long pfn = start;
6314 unsigned int tries = 0;
6315 int ret = 0;
6316
6317 migrate_prep();
6318
6319 while (pfn < end || !list_empty(&cc->migratepages)) {
6320 if (fatal_signal_pending(current)) {
6321 ret = -EINTR;
6322 break;
6323 }
6324
6325 if (list_empty(&cc->migratepages)) {
6326 cc->nr_migratepages = 0;
6327 pfn = isolate_migratepages_range(cc, pfn, end);
6328 if (!pfn) {
6329 ret = -EINTR;
6330 break;
6331 }
6332 tries = 0;
6333 } else if (++tries == 5) {
6334 ret = ret < 0 ? ret : -EBUSY;
6335 break;
6336 }
6337
6338 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
6339 &cc->migratepages);
6340 cc->nr_migratepages -= nr_reclaimed;
6341
6342 ret = migrate_pages(&cc->migratepages, alloc_migrate_target,
6343 NULL, 0, cc->mode, MR_CMA);
6344 }
6345 if (ret < 0) {
6346 putback_movable_pages(&cc->migratepages);
6347 return ret;
6348 }
6349 return 0;
6350 }
6351
6352 /**
6353 * alloc_contig_range() -- tries to allocate given range of pages
6354 * @start: start PFN to allocate
6355 * @end: one-past-the-last PFN to allocate
6356 * @migratetype: migratetype of the underlaying pageblocks (either
6357 * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks
6358 * in range must have the same migratetype and it must
6359 * be either of the two.
6360 *
6361 * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
6362 * aligned, however it's the caller's responsibility to guarantee that
6363 * we are the only thread that changes migrate type of pageblocks the
6364 * pages fall in.
6365 *
6366 * The PFN range must belong to a single zone.
6367 *
6368 * Returns zero on success or negative error code. On success all
6369 * pages which PFN is in [start, end) are allocated for the caller and
6370 * need to be freed with free_contig_range().
6371 */
alloc_contig_range(unsigned long start,unsigned long end,unsigned migratetype)6372 int alloc_contig_range(unsigned long start, unsigned long end,
6373 unsigned migratetype)
6374 {
6375 unsigned long outer_start, outer_end;
6376 unsigned int order;
6377 int ret = 0;
6378
6379 struct compact_control cc = {
6380 .nr_migratepages = 0,
6381 .order = -1,
6382 .zone = page_zone(pfn_to_page(start)),
6383 .mode = MIGRATE_SYNC,
6384 .ignore_skip_hint = true,
6385 };
6386 INIT_LIST_HEAD(&cc.migratepages);
6387
6388 /*
6389 * What we do here is we mark all pageblocks in range as
6390 * MIGRATE_ISOLATE. Because pageblock and max order pages may
6391 * have different sizes, and due to the way page allocator
6392 * work, we align the range to biggest of the two pages so
6393 * that page allocator won't try to merge buddies from
6394 * different pageblocks and change MIGRATE_ISOLATE to some
6395 * other migration type.
6396 *
6397 * Once the pageblocks are marked as MIGRATE_ISOLATE, we
6398 * migrate the pages from an unaligned range (ie. pages that
6399 * we are interested in). This will put all the pages in
6400 * range back to page allocator as MIGRATE_ISOLATE.
6401 *
6402 * When this is done, we take the pages in range from page
6403 * allocator removing them from the buddy system. This way
6404 * page allocator will never consider using them.
6405 *
6406 * This lets us mark the pageblocks back as
6407 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
6408 * aligned range but not in the unaligned, original range are
6409 * put back to page allocator so that buddy can use them.
6410 */
6411
6412 ret = start_isolate_page_range(pfn_max_align_down(start),
6413 pfn_max_align_up(end), migratetype,
6414 false);
6415 if (ret)
6416 return ret;
6417
6418 ret = __alloc_contig_migrate_range(&cc, start, end);
6419 if (ret)
6420 goto done;
6421
6422 /*
6423 * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
6424 * aligned blocks that are marked as MIGRATE_ISOLATE. What's
6425 * more, all pages in [start, end) are free in page allocator.
6426 * What we are going to do is to allocate all pages from
6427 * [start, end) (that is remove them from page allocator).
6428 *
6429 * The only problem is that pages at the beginning and at the
6430 * end of interesting range may be not aligned with pages that
6431 * page allocator holds, ie. they can be part of higher order
6432 * pages. Because of this, we reserve the bigger range and
6433 * once this is done free the pages we are not interested in.
6434 *
6435 * We don't have to hold zone->lock here because the pages are
6436 * isolated thus they won't get removed from buddy.
6437 */
6438
6439 lru_add_drain_all();
6440 drain_all_pages();
6441
6442 order = 0;
6443 outer_start = start;
6444 while (!PageBuddy(pfn_to_page(outer_start))) {
6445 if (++order >= MAX_ORDER) {
6446 ret = -EBUSY;
6447 goto done;
6448 }
6449 outer_start &= ~0UL << order;
6450 }
6451
6452 /* Make sure the range is really isolated. */
6453 if (test_pages_isolated(outer_start, end, false)) {
6454 pr_info_ratelimited("%s: [%lx, %lx) PFNs busy\n",
6455 __func__, outer_start, end);
6456 ret = -EBUSY;
6457 goto done;
6458 }
6459
6460 /* Grab isolated pages from freelists. */
6461 outer_end = isolate_freepages_range(&cc, outer_start, end);
6462 if (!outer_end) {
6463 ret = -EBUSY;
6464 goto done;
6465 }
6466
6467 /* Free head and tail (if any) */
6468 if (start != outer_start)
6469 free_contig_range(outer_start, start - outer_start);
6470 if (end != outer_end)
6471 free_contig_range(end, outer_end - end);
6472
6473 done:
6474 undo_isolate_page_range(pfn_max_align_down(start),
6475 pfn_max_align_up(end), migratetype);
6476 return ret;
6477 }
6478
free_contig_range(unsigned long pfn,unsigned nr_pages)6479 void free_contig_range(unsigned long pfn, unsigned nr_pages)
6480 {
6481 unsigned int count = 0;
6482
6483 for (; nr_pages--; pfn++) {
6484 struct page *page = pfn_to_page(pfn);
6485
6486 count += page_count(page) != 1;
6487 __free_page(page);
6488 }
6489 WARN(count != 0, "%d pages are still in use!\n", count);
6490 }
6491 #endif
6492
6493 #ifdef CONFIG_MEMORY_HOTPLUG
6494 /*
6495 * The zone indicated has a new number of managed_pages; batch sizes and percpu
6496 * page high values need to be recalulated.
6497 */
zone_pcp_update(struct zone * zone)6498 void __meminit zone_pcp_update(struct zone *zone)
6499 {
6500 unsigned cpu;
6501 mutex_lock(&pcp_batch_high_lock);
6502 for_each_possible_cpu(cpu)
6503 pageset_set_high_and_batch(zone,
6504 per_cpu_ptr(zone->pageset, cpu));
6505 mutex_unlock(&pcp_batch_high_lock);
6506 }
6507 #endif
6508
zone_pcp_reset(struct zone * zone)6509 void zone_pcp_reset(struct zone *zone)
6510 {
6511 unsigned long flags;
6512 int cpu;
6513 struct per_cpu_pageset *pset;
6514
6515 /* avoid races with drain_pages() */
6516 local_irq_save(flags);
6517 if (zone->pageset != &boot_pageset) {
6518 for_each_online_cpu(cpu) {
6519 pset = per_cpu_ptr(zone->pageset, cpu);
6520 drain_zonestat(zone, pset);
6521 }
6522 free_percpu(zone->pageset);
6523 zone->pageset = &boot_pageset;
6524 }
6525 local_irq_restore(flags);
6526 }
6527
6528 #ifdef CONFIG_MEMORY_HOTREMOVE
6529 /*
6530 * All pages in the range must be isolated before calling this.
6531 */
6532 void
__offline_isolated_pages(unsigned long start_pfn,unsigned long end_pfn)6533 __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
6534 {
6535 struct page *page;
6536 struct zone *zone;
6537 unsigned int order, i;
6538 unsigned long pfn;
6539 unsigned long flags;
6540 /* find the first valid pfn */
6541 for (pfn = start_pfn; pfn < end_pfn; pfn++)
6542 if (pfn_valid(pfn))
6543 break;
6544 if (pfn == end_pfn)
6545 return;
6546 zone = page_zone(pfn_to_page(pfn));
6547 spin_lock_irqsave(&zone->lock, flags);
6548 pfn = start_pfn;
6549 while (pfn < end_pfn) {
6550 if (!pfn_valid(pfn)) {
6551 pfn++;
6552 continue;
6553 }
6554 page = pfn_to_page(pfn);
6555 /*
6556 * The HWPoisoned page may be not in buddy system, and
6557 * page_count() is not 0.
6558 */
6559 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
6560 pfn++;
6561 SetPageReserved(page);
6562 continue;
6563 }
6564
6565 BUG_ON(page_count(page));
6566 BUG_ON(!PageBuddy(page));
6567 order = page_order(page);
6568 #ifdef CONFIG_DEBUG_VM
6569 printk(KERN_INFO "remove from free list %lx %d %lx\n",
6570 pfn, 1 << order, end_pfn);
6571 #endif
6572 list_del(&page->lru);
6573 rmv_page_order(page);
6574 zone->free_area[order].nr_free--;
6575 for (i = 0; i < (1 << order); i++)
6576 SetPageReserved((page+i));
6577 pfn += (1 << order);
6578 }
6579 spin_unlock_irqrestore(&zone->lock, flags);
6580 }
6581 #endif
6582
6583 #ifdef CONFIG_MEMORY_FAILURE
is_free_buddy_page(struct page * page)6584 bool is_free_buddy_page(struct page *page)
6585 {
6586 struct zone *zone = page_zone(page);
6587 unsigned long pfn = page_to_pfn(page);
6588 unsigned long flags;
6589 unsigned int order;
6590
6591 spin_lock_irqsave(&zone->lock, flags);
6592 for (order = 0; order < MAX_ORDER; order++) {
6593 struct page *page_head = page - (pfn & ((1 << order) - 1));
6594
6595 if (PageBuddy(page_head) && page_order(page_head) >= order)
6596 break;
6597 }
6598 spin_unlock_irqrestore(&zone->lock, flags);
6599
6600 return order < MAX_ORDER;
6601 }
6602 #endif
6603