• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  /*
2   *  linux/mm/memory_hotplug.c
3   *
4   *  Copyright (C)
5   */
6  
7  #include <linux/stddef.h>
8  #include <linux/mm.h>
9  #include <linux/swap.h>
10  #include <linux/interrupt.h>
11  #include <linux/pagemap.h>
12  #include <linux/compiler.h>
13  #include <linux/export.h>
14  #include <linux/pagevec.h>
15  #include <linux/writeback.h>
16  #include <linux/slab.h>
17  #include <linux/sysctl.h>
18  #include <linux/cpu.h>
19  #include <linux/memory.h>
20  #include <linux/memremap.h>
21  #include <linux/memory_hotplug.h>
22  #include <linux/highmem.h>
23  #include <linux/vmalloc.h>
24  #include <linux/ioport.h>
25  #include <linux/delay.h>
26  #include <linux/migrate.h>
27  #include <linux/page-isolation.h>
28  #include <linux/pfn.h>
29  #include <linux/suspend.h>
30  #include <linux/mm_inline.h>
31  #include <linux/firmware-map.h>
32  #include <linux/stop_machine.h>
33  #include <linux/hugetlb.h>
34  #include <linux/memblock.h>
35  #include <linux/bootmem.h>
36  #include <linux/compaction.h>
37  
38  #include <asm/tlbflush.h>
39  
40  #include "internal.h"
41  
42  /*
43   * online_page_callback contains pointer to current page onlining function.
44   * Initially it is generic_online_page(). If it is required it could be
45   * changed by calling set_online_page_callback() for callback registration
46   * and restore_online_page_callback() for generic callback restore.
47   */
48  
49  static void generic_online_page(struct page *page);
50  
51  static online_page_callback_t online_page_callback = generic_online_page;
52  static DEFINE_MUTEX(online_page_callback_lock);
53  
54  /* The same as the cpu_hotplug lock, but for memory hotplug. */
55  static struct {
56  	struct task_struct *active_writer;
57  	struct mutex lock; /* Synchronizes accesses to refcount, */
58  	/*
59  	 * Also blocks the new readers during
60  	 * an ongoing mem hotplug operation.
61  	 */
62  	int refcount;
63  
64  #ifdef CONFIG_DEBUG_LOCK_ALLOC
65  	struct lockdep_map dep_map;
66  #endif
67  } mem_hotplug = {
68  	.active_writer = NULL,
69  	.lock = __MUTEX_INITIALIZER(mem_hotplug.lock),
70  	.refcount = 0,
71  #ifdef CONFIG_DEBUG_LOCK_ALLOC
72  	.dep_map = {.name = "mem_hotplug.lock" },
73  #endif
74  };
75  
76  /* Lockdep annotations for get/put_online_mems() and mem_hotplug_begin/end() */
77  #define memhp_lock_acquire_read() lock_map_acquire_read(&mem_hotplug.dep_map)
78  #define memhp_lock_acquire()      lock_map_acquire(&mem_hotplug.dep_map)
79  #define memhp_lock_release()      lock_map_release(&mem_hotplug.dep_map)
80  
81  #ifndef CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE
82  bool memhp_auto_online;
83  #else
84  bool memhp_auto_online = true;
85  #endif
86  EXPORT_SYMBOL_GPL(memhp_auto_online);
87  
setup_memhp_default_state(char * str)88  static int __init setup_memhp_default_state(char *str)
89  {
90  	if (!strcmp(str, "online"))
91  		memhp_auto_online = true;
92  	else if (!strcmp(str, "offline"))
93  		memhp_auto_online = false;
94  
95  	return 1;
96  }
97  __setup("memhp_default_state=", setup_memhp_default_state);
98  
get_online_mems(void)99  void get_online_mems(void)
100  {
101  	might_sleep();
102  	if (mem_hotplug.active_writer == current)
103  		return;
104  	memhp_lock_acquire_read();
105  	mutex_lock(&mem_hotplug.lock);
106  	mem_hotplug.refcount++;
107  	mutex_unlock(&mem_hotplug.lock);
108  
109  }
110  
put_online_mems(void)111  void put_online_mems(void)
112  {
113  	if (mem_hotplug.active_writer == current)
114  		return;
115  	mutex_lock(&mem_hotplug.lock);
116  
117  	if (WARN_ON(!mem_hotplug.refcount))
118  		mem_hotplug.refcount++; /* try to fix things up */
119  
120  	if (!--mem_hotplug.refcount && unlikely(mem_hotplug.active_writer))
121  		wake_up_process(mem_hotplug.active_writer);
122  	mutex_unlock(&mem_hotplug.lock);
123  	memhp_lock_release();
124  
125  }
126  
mem_hotplug_begin(void)127  void mem_hotplug_begin(void)
128  {
129  	mem_hotplug.active_writer = current;
130  
131  	memhp_lock_acquire();
132  	for (;;) {
133  		mutex_lock(&mem_hotplug.lock);
134  		if (likely(!mem_hotplug.refcount))
135  			break;
136  		__set_current_state(TASK_UNINTERRUPTIBLE);
137  		mutex_unlock(&mem_hotplug.lock);
138  		schedule();
139  	}
140  }
141  
mem_hotplug_done(void)142  void mem_hotplug_done(void)
143  {
144  	mem_hotplug.active_writer = NULL;
145  	mutex_unlock(&mem_hotplug.lock);
146  	memhp_lock_release();
147  }
148  
149  /* add this memory to iomem resource */
register_memory_resource(u64 start,u64 size)150  static struct resource *register_memory_resource(u64 start, u64 size)
151  {
152  	struct resource *res;
153  	res = kzalloc(sizeof(struct resource), GFP_KERNEL);
154  	if (!res)
155  		return ERR_PTR(-ENOMEM);
156  
157  	res->name = "System RAM";
158  	res->start = start;
159  	res->end = start + size - 1;
160  	res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
161  	if (request_resource(&iomem_resource, res) < 0) {
162  		pr_debug("System RAM resource %pR cannot be added\n", res);
163  		kfree(res);
164  		return ERR_PTR(-EEXIST);
165  	}
166  	return res;
167  }
168  
release_memory_resource(struct resource * res)169  static void release_memory_resource(struct resource *res)
170  {
171  	if (!res)
172  		return;
173  	release_resource(res);
174  	kfree(res);
175  	return;
176  }
177  
178  #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
get_page_bootmem(unsigned long info,struct page * page,unsigned long type)179  void get_page_bootmem(unsigned long info,  struct page *page,
180  		      unsigned long type)
181  {
182  	page->freelist = (void *)type;
183  	SetPagePrivate(page);
184  	set_page_private(page, info);
185  	page_ref_inc(page);
186  }
187  
put_page_bootmem(struct page * page)188  void put_page_bootmem(struct page *page)
189  {
190  	unsigned long type;
191  
192  	type = (unsigned long) page->freelist;
193  	BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE ||
194  	       type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE);
195  
196  	if (page_ref_dec_return(page) == 1) {
197  		page->freelist = NULL;
198  		ClearPagePrivate(page);
199  		set_page_private(page, 0);
200  		INIT_LIST_HEAD(&page->lru);
201  		free_reserved_page(page);
202  	}
203  }
204  
205  #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
206  #ifndef CONFIG_SPARSEMEM_VMEMMAP
register_page_bootmem_info_section(unsigned long start_pfn)207  static void register_page_bootmem_info_section(unsigned long start_pfn)
208  {
209  	unsigned long *usemap, mapsize, section_nr, i;
210  	struct mem_section *ms;
211  	struct page *page, *memmap;
212  
213  	section_nr = pfn_to_section_nr(start_pfn);
214  	ms = __nr_to_section(section_nr);
215  
216  	/* Get section's memmap address */
217  	memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
218  
219  	/*
220  	 * Get page for the memmap's phys address
221  	 * XXX: need more consideration for sparse_vmemmap...
222  	 */
223  	page = virt_to_page(memmap);
224  	mapsize = sizeof(struct page) * PAGES_PER_SECTION;
225  	mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT;
226  
227  	/* remember memmap's page */
228  	for (i = 0; i < mapsize; i++, page++)
229  		get_page_bootmem(section_nr, page, SECTION_INFO);
230  
231  	usemap = __nr_to_section(section_nr)->pageblock_flags;
232  	page = virt_to_page(usemap);
233  
234  	mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT;
235  
236  	for (i = 0; i < mapsize; i++, page++)
237  		get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
238  
239  }
240  #else /* CONFIG_SPARSEMEM_VMEMMAP */
register_page_bootmem_info_section(unsigned long start_pfn)241  static void register_page_bootmem_info_section(unsigned long start_pfn)
242  {
243  	unsigned long *usemap, mapsize, section_nr, i;
244  	struct mem_section *ms;
245  	struct page *page, *memmap;
246  
247  	if (!pfn_valid(start_pfn))
248  		return;
249  
250  	section_nr = pfn_to_section_nr(start_pfn);
251  	ms = __nr_to_section(section_nr);
252  
253  	memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
254  
255  	register_page_bootmem_memmap(section_nr, memmap, PAGES_PER_SECTION);
256  
257  	usemap = __nr_to_section(section_nr)->pageblock_flags;
258  	page = virt_to_page(usemap);
259  
260  	mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT;
261  
262  	for (i = 0; i < mapsize; i++, page++)
263  		get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
264  }
265  #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
266  
register_page_bootmem_info_node(struct pglist_data * pgdat)267  void __init register_page_bootmem_info_node(struct pglist_data *pgdat)
268  {
269  	unsigned long i, pfn, end_pfn, nr_pages;
270  	int node = pgdat->node_id;
271  	struct page *page;
272  
273  	nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT;
274  	page = virt_to_page(pgdat);
275  
276  	for (i = 0; i < nr_pages; i++, page++)
277  		get_page_bootmem(node, page, NODE_INFO);
278  
279  	pfn = pgdat->node_start_pfn;
280  	end_pfn = pgdat_end_pfn(pgdat);
281  
282  	/* register section info */
283  	for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
284  		/*
285  		 * Some platforms can assign the same pfn to multiple nodes - on
286  		 * node0 as well as nodeN.  To avoid registering a pfn against
287  		 * multiple nodes we check that this pfn does not already
288  		 * reside in some other nodes.
289  		 */
290  		if (pfn_valid(pfn) && (early_pfn_to_nid(pfn) == node))
291  			register_page_bootmem_info_section(pfn);
292  	}
293  }
294  #endif /* CONFIG_HAVE_BOOTMEM_INFO_NODE */
295  
grow_zone_span(struct zone * zone,unsigned long start_pfn,unsigned long end_pfn)296  static void __meminit grow_zone_span(struct zone *zone, unsigned long start_pfn,
297  				     unsigned long end_pfn)
298  {
299  	unsigned long old_zone_end_pfn;
300  
301  	zone_span_writelock(zone);
302  
303  	old_zone_end_pfn = zone_end_pfn(zone);
304  	if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn)
305  		zone->zone_start_pfn = start_pfn;
306  
307  	zone->spanned_pages = max(old_zone_end_pfn, end_pfn) -
308  				zone->zone_start_pfn;
309  
310  	zone_span_writeunlock(zone);
311  }
312  
resize_zone(struct zone * zone,unsigned long start_pfn,unsigned long end_pfn)313  static void resize_zone(struct zone *zone, unsigned long start_pfn,
314  		unsigned long end_pfn)
315  {
316  	zone_span_writelock(zone);
317  
318  	if (end_pfn - start_pfn) {
319  		zone->zone_start_pfn = start_pfn;
320  		zone->spanned_pages = end_pfn - start_pfn;
321  	} else {
322  		/*
323  		 * make it consist as free_area_init_core(),
324  		 * if spanned_pages = 0, then keep start_pfn = 0
325  		 */
326  		zone->zone_start_pfn = 0;
327  		zone->spanned_pages = 0;
328  	}
329  
330  	zone_span_writeunlock(zone);
331  }
332  
fix_zone_id(struct zone * zone,unsigned long start_pfn,unsigned long end_pfn)333  static void fix_zone_id(struct zone *zone, unsigned long start_pfn,
334  		unsigned long end_pfn)
335  {
336  	enum zone_type zid = zone_idx(zone);
337  	int nid = zone->zone_pgdat->node_id;
338  	unsigned long pfn;
339  
340  	for (pfn = start_pfn; pfn < end_pfn; pfn++)
341  		set_page_links(pfn_to_page(pfn), zid, nid, pfn);
342  }
343  
344  /* Can fail with -ENOMEM from allocating a wait table with vmalloc() or
345   * alloc_bootmem_node_nopanic()/memblock_virt_alloc_node_nopanic() */
ensure_zone_is_initialized(struct zone * zone,unsigned long start_pfn,unsigned long num_pages)346  static int __ref ensure_zone_is_initialized(struct zone *zone,
347  			unsigned long start_pfn, unsigned long num_pages)
348  {
349  	if (!zone_is_initialized(zone))
350  		return init_currently_empty_zone(zone, start_pfn, num_pages);
351  
352  	return 0;
353  }
354  
move_pfn_range_left(struct zone * z1,struct zone * z2,unsigned long start_pfn,unsigned long end_pfn)355  static int __meminit move_pfn_range_left(struct zone *z1, struct zone *z2,
356  		unsigned long start_pfn, unsigned long end_pfn)
357  {
358  	int ret;
359  	unsigned long flags;
360  	unsigned long z1_start_pfn;
361  
362  	ret = ensure_zone_is_initialized(z1, start_pfn, end_pfn - start_pfn);
363  	if (ret)
364  		return ret;
365  
366  	pgdat_resize_lock(z1->zone_pgdat, &flags);
367  
368  	/* can't move pfns which are higher than @z2 */
369  	if (end_pfn > zone_end_pfn(z2))
370  		goto out_fail;
371  	/* the move out part must be at the left most of @z2 */
372  	if (start_pfn > z2->zone_start_pfn)
373  		goto out_fail;
374  	/* must included/overlap */
375  	if (end_pfn <= z2->zone_start_pfn)
376  		goto out_fail;
377  
378  	/* use start_pfn for z1's start_pfn if z1 is empty */
379  	if (!zone_is_empty(z1))
380  		z1_start_pfn = z1->zone_start_pfn;
381  	else
382  		z1_start_pfn = start_pfn;
383  
384  	resize_zone(z1, z1_start_pfn, end_pfn);
385  	resize_zone(z2, end_pfn, zone_end_pfn(z2));
386  
387  	pgdat_resize_unlock(z1->zone_pgdat, &flags);
388  
389  	fix_zone_id(z1, start_pfn, end_pfn);
390  
391  	return 0;
392  out_fail:
393  	pgdat_resize_unlock(z1->zone_pgdat, &flags);
394  	return -1;
395  }
396  
move_pfn_range_right(struct zone * z1,struct zone * z2,unsigned long start_pfn,unsigned long end_pfn)397  static int __meminit move_pfn_range_right(struct zone *z1, struct zone *z2,
398  		unsigned long start_pfn, unsigned long end_pfn)
399  {
400  	int ret;
401  	unsigned long flags;
402  	unsigned long z2_end_pfn;
403  
404  	ret = ensure_zone_is_initialized(z2, start_pfn, end_pfn - start_pfn);
405  	if (ret)
406  		return ret;
407  
408  	pgdat_resize_lock(z1->zone_pgdat, &flags);
409  
410  	/* can't move pfns which are lower than @z1 */
411  	if (z1->zone_start_pfn > start_pfn)
412  		goto out_fail;
413  	/* the move out part mast at the right most of @z1 */
414  	if (zone_end_pfn(z1) >  end_pfn)
415  		goto out_fail;
416  	/* must included/overlap */
417  	if (start_pfn >= zone_end_pfn(z1))
418  		goto out_fail;
419  
420  	/* use end_pfn for z2's end_pfn if z2 is empty */
421  	if (!zone_is_empty(z2))
422  		z2_end_pfn = zone_end_pfn(z2);
423  	else
424  		z2_end_pfn = end_pfn;
425  
426  	resize_zone(z1, z1->zone_start_pfn, start_pfn);
427  	resize_zone(z2, start_pfn, z2_end_pfn);
428  
429  	pgdat_resize_unlock(z1->zone_pgdat, &flags);
430  
431  	fix_zone_id(z2, start_pfn, end_pfn);
432  
433  	return 0;
434  out_fail:
435  	pgdat_resize_unlock(z1->zone_pgdat, &flags);
436  	return -1;
437  }
438  
move_pfn_range(int zone_shift,unsigned long start_pfn,unsigned long end_pfn)439  static struct zone * __meminit move_pfn_range(int zone_shift,
440  		unsigned long start_pfn, unsigned long end_pfn)
441  {
442  	struct zone *zone = page_zone(pfn_to_page(start_pfn));
443  	int ret = 0;
444  
445  	if (zone_shift < 0)
446  		ret = move_pfn_range_left(zone + zone_shift, zone,
447  					  start_pfn, end_pfn);
448  	else if (zone_shift)
449  		ret = move_pfn_range_right(zone, zone + zone_shift,
450  					   start_pfn, end_pfn);
451  
452  	if (ret)
453  		return NULL;
454  
455  	return zone + zone_shift;
456  }
457  
grow_pgdat_span(struct pglist_data * pgdat,unsigned long start_pfn,unsigned long end_pfn)458  static void __meminit grow_pgdat_span(struct pglist_data *pgdat, unsigned long start_pfn,
459  				      unsigned long end_pfn)
460  {
461  	unsigned long old_pgdat_end_pfn = pgdat_end_pfn(pgdat);
462  
463  	if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn)
464  		pgdat->node_start_pfn = start_pfn;
465  
466  	pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) -
467  					pgdat->node_start_pfn;
468  }
469  
__add_zone(struct zone * zone,unsigned long phys_start_pfn)470  static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn)
471  {
472  	struct pglist_data *pgdat = zone->zone_pgdat;
473  	int nr_pages = PAGES_PER_SECTION;
474  	int nid = pgdat->node_id;
475  	int zone_type;
476  	unsigned long flags, pfn;
477  	int ret;
478  
479  	zone_type = zone - pgdat->node_zones;
480  	ret = ensure_zone_is_initialized(zone, phys_start_pfn, nr_pages);
481  	if (ret)
482  		return ret;
483  
484  	pgdat_resize_lock(zone->zone_pgdat, &flags);
485  	grow_zone_span(zone, phys_start_pfn, phys_start_pfn + nr_pages);
486  	grow_pgdat_span(zone->zone_pgdat, phys_start_pfn,
487  			phys_start_pfn + nr_pages);
488  	pgdat_resize_unlock(zone->zone_pgdat, &flags);
489  	memmap_init_zone(nr_pages, nid, zone_type,
490  			 phys_start_pfn, MEMMAP_HOTPLUG);
491  
492  	/* online_page_range is called later and expects pages reserved */
493  	for (pfn = phys_start_pfn; pfn < phys_start_pfn + nr_pages; pfn++) {
494  		if (!pfn_valid(pfn))
495  			continue;
496  
497  		SetPageReserved(pfn_to_page(pfn));
498  	}
499  	return 0;
500  }
501  
__add_section(int nid,struct zone * zone,unsigned long phys_start_pfn)502  static int __meminit __add_section(int nid, struct zone *zone,
503  					unsigned long phys_start_pfn)
504  {
505  	int ret;
506  
507  	if (pfn_valid(phys_start_pfn))
508  		return -EEXIST;
509  
510  	ret = sparse_add_one_section(zone, phys_start_pfn);
511  
512  	if (ret < 0)
513  		return ret;
514  
515  	ret = __add_zone(zone, phys_start_pfn);
516  
517  	if (ret < 0)
518  		return ret;
519  
520  	return register_new_memory(nid, __pfn_to_section(phys_start_pfn));
521  }
522  
523  /*
524   * Reasonably generic function for adding memory.  It is
525   * expected that archs that support memory hotplug will
526   * call this function after deciding the zone to which to
527   * add the new pages.
528   */
__add_pages(int nid,struct zone * zone,unsigned long phys_start_pfn,unsigned long nr_pages)529  int __ref __add_pages(int nid, struct zone *zone, unsigned long phys_start_pfn,
530  			unsigned long nr_pages)
531  {
532  	unsigned long i;
533  	int err = 0;
534  	int start_sec, end_sec;
535  	struct vmem_altmap *altmap;
536  
537  	clear_zone_contiguous(zone);
538  
539  	/* during initialize mem_map, align hot-added range to section */
540  	start_sec = pfn_to_section_nr(phys_start_pfn);
541  	end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1);
542  
543  	altmap = to_vmem_altmap((unsigned long) pfn_to_page(phys_start_pfn));
544  	if (altmap) {
545  		/*
546  		 * Validate altmap is within bounds of the total request
547  		 */
548  		if (altmap->base_pfn != phys_start_pfn
549  				|| vmem_altmap_offset(altmap) > nr_pages) {
550  			pr_warn_once("memory add fail, invalid altmap\n");
551  			err = -EINVAL;
552  			goto out;
553  		}
554  		altmap->alloc = 0;
555  	}
556  
557  	for (i = start_sec; i <= end_sec; i++) {
558  		err = __add_section(nid, zone, section_nr_to_pfn(i));
559  
560  		/*
561  		 * EEXIST is finally dealt with by ioresource collision
562  		 * check. see add_memory() => register_memory_resource()
563  		 * Warning will be printed if there is collision.
564  		 */
565  		if (err && (err != -EEXIST))
566  			break;
567  		err = 0;
568  	}
569  	vmemmap_populate_print_last();
570  out:
571  	set_zone_contiguous(zone);
572  	return err;
573  }
574  EXPORT_SYMBOL_GPL(__add_pages);
575  
576  #ifdef CONFIG_MEMORY_HOTREMOVE
577  /* find the smallest valid pfn in the range [start_pfn, end_pfn) */
find_smallest_section_pfn(int nid,struct zone * zone,unsigned long start_pfn,unsigned long end_pfn)578  static int find_smallest_section_pfn(int nid, struct zone *zone,
579  				     unsigned long start_pfn,
580  				     unsigned long end_pfn)
581  {
582  	struct mem_section *ms;
583  
584  	for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SECTION) {
585  		ms = __pfn_to_section(start_pfn);
586  
587  		if (unlikely(!valid_section(ms)))
588  			continue;
589  
590  		if (unlikely(pfn_to_nid(start_pfn) != nid))
591  			continue;
592  
593  		if (zone && zone != page_zone(pfn_to_page(start_pfn)))
594  			continue;
595  
596  		return start_pfn;
597  	}
598  
599  	return 0;
600  }
601  
602  /* find the biggest valid pfn in the range [start_pfn, end_pfn). */
find_biggest_section_pfn(int nid,struct zone * zone,unsigned long start_pfn,unsigned long end_pfn)603  static int find_biggest_section_pfn(int nid, struct zone *zone,
604  				    unsigned long start_pfn,
605  				    unsigned long end_pfn)
606  {
607  	struct mem_section *ms;
608  	unsigned long pfn;
609  
610  	/* pfn is the end pfn of a memory section. */
611  	pfn = end_pfn - 1;
612  	for (; pfn >= start_pfn; pfn -= PAGES_PER_SECTION) {
613  		ms = __pfn_to_section(pfn);
614  
615  		if (unlikely(!valid_section(ms)))
616  			continue;
617  
618  		if (unlikely(pfn_to_nid(pfn) != nid))
619  			continue;
620  
621  		if (zone && zone != page_zone(pfn_to_page(pfn)))
622  			continue;
623  
624  		return pfn;
625  	}
626  
627  	return 0;
628  }
629  
shrink_zone_span(struct zone * zone,unsigned long start_pfn,unsigned long end_pfn)630  static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
631  			     unsigned long end_pfn)
632  {
633  	unsigned long zone_start_pfn = zone->zone_start_pfn;
634  	unsigned long z = zone_end_pfn(zone); /* zone_end_pfn namespace clash */
635  	unsigned long zone_end_pfn = z;
636  	unsigned long pfn;
637  	struct mem_section *ms;
638  	int nid = zone_to_nid(zone);
639  
640  	zone_span_writelock(zone);
641  	if (zone_start_pfn == start_pfn) {
642  		/*
643  		 * If the section is smallest section in the zone, it need
644  		 * shrink zone->zone_start_pfn and zone->zone_spanned_pages.
645  		 * In this case, we find second smallest valid mem_section
646  		 * for shrinking zone.
647  		 */
648  		pfn = find_smallest_section_pfn(nid, zone, end_pfn,
649  						zone_end_pfn);
650  		if (pfn) {
651  			zone->zone_start_pfn = pfn;
652  			zone->spanned_pages = zone_end_pfn - pfn;
653  		}
654  	} else if (zone_end_pfn == end_pfn) {
655  		/*
656  		 * If the section is biggest section in the zone, it need
657  		 * shrink zone->spanned_pages.
658  		 * In this case, we find second biggest valid mem_section for
659  		 * shrinking zone.
660  		 */
661  		pfn = find_biggest_section_pfn(nid, zone, zone_start_pfn,
662  					       start_pfn);
663  		if (pfn)
664  			zone->spanned_pages = pfn - zone_start_pfn + 1;
665  	}
666  
667  	/*
668  	 * The section is not biggest or smallest mem_section in the zone, it
669  	 * only creates a hole in the zone. So in this case, we need not
670  	 * change the zone. But perhaps, the zone has only hole data. Thus
671  	 * it check the zone has only hole or not.
672  	 */
673  	pfn = zone_start_pfn;
674  	for (; pfn < zone_end_pfn; pfn += PAGES_PER_SECTION) {
675  		ms = __pfn_to_section(pfn);
676  
677  		if (unlikely(!valid_section(ms)))
678  			continue;
679  
680  		if (page_zone(pfn_to_page(pfn)) != zone)
681  			continue;
682  
683  		 /* If the section is current section, it continues the loop */
684  		if (start_pfn == pfn)
685  			continue;
686  
687  		/* If we find valid section, we have nothing to do */
688  		zone_span_writeunlock(zone);
689  		return;
690  	}
691  
692  	/* The zone has no valid section */
693  	zone->zone_start_pfn = 0;
694  	zone->spanned_pages = 0;
695  	zone_span_writeunlock(zone);
696  }
697  
shrink_pgdat_span(struct pglist_data * pgdat,unsigned long start_pfn,unsigned long end_pfn)698  static void shrink_pgdat_span(struct pglist_data *pgdat,
699  			      unsigned long start_pfn, unsigned long end_pfn)
700  {
701  	unsigned long pgdat_start_pfn = pgdat->node_start_pfn;
702  	unsigned long p = pgdat_end_pfn(pgdat); /* pgdat_end_pfn namespace clash */
703  	unsigned long pgdat_end_pfn = p;
704  	unsigned long pfn;
705  	struct mem_section *ms;
706  	int nid = pgdat->node_id;
707  
708  	if (pgdat_start_pfn == start_pfn) {
709  		/*
710  		 * If the section is smallest section in the pgdat, it need
711  		 * shrink pgdat->node_start_pfn and pgdat->node_spanned_pages.
712  		 * In this case, we find second smallest valid mem_section
713  		 * for shrinking zone.
714  		 */
715  		pfn = find_smallest_section_pfn(nid, NULL, end_pfn,
716  						pgdat_end_pfn);
717  		if (pfn) {
718  			pgdat->node_start_pfn = pfn;
719  			pgdat->node_spanned_pages = pgdat_end_pfn - pfn;
720  		}
721  	} else if (pgdat_end_pfn == end_pfn) {
722  		/*
723  		 * If the section is biggest section in the pgdat, it need
724  		 * shrink pgdat->node_spanned_pages.
725  		 * In this case, we find second biggest valid mem_section for
726  		 * shrinking zone.
727  		 */
728  		pfn = find_biggest_section_pfn(nid, NULL, pgdat_start_pfn,
729  					       start_pfn);
730  		if (pfn)
731  			pgdat->node_spanned_pages = pfn - pgdat_start_pfn + 1;
732  	}
733  
734  	/*
735  	 * If the section is not biggest or smallest mem_section in the pgdat,
736  	 * it only creates a hole in the pgdat. So in this case, we need not
737  	 * change the pgdat.
738  	 * But perhaps, the pgdat has only hole data. Thus it check the pgdat
739  	 * has only hole or not.
740  	 */
741  	pfn = pgdat_start_pfn;
742  	for (; pfn < pgdat_end_pfn; pfn += PAGES_PER_SECTION) {
743  		ms = __pfn_to_section(pfn);
744  
745  		if (unlikely(!valid_section(ms)))
746  			continue;
747  
748  		if (pfn_to_nid(pfn) != nid)
749  			continue;
750  
751  		 /* If the section is current section, it continues the loop */
752  		if (start_pfn == pfn)
753  			continue;
754  
755  		/* If we find valid section, we have nothing to do */
756  		return;
757  	}
758  
759  	/* The pgdat has no valid section */
760  	pgdat->node_start_pfn = 0;
761  	pgdat->node_spanned_pages = 0;
762  }
763  
__remove_zone(struct zone * zone,unsigned long start_pfn)764  static void __remove_zone(struct zone *zone, unsigned long start_pfn)
765  {
766  	struct pglist_data *pgdat = zone->zone_pgdat;
767  	int nr_pages = PAGES_PER_SECTION;
768  	int zone_type;
769  	unsigned long flags;
770  
771  	zone_type = zone - pgdat->node_zones;
772  
773  	pgdat_resize_lock(zone->zone_pgdat, &flags);
774  	shrink_zone_span(zone, start_pfn, start_pfn + nr_pages);
775  	shrink_pgdat_span(pgdat, start_pfn, start_pfn + nr_pages);
776  	pgdat_resize_unlock(zone->zone_pgdat, &flags);
777  }
778  
__remove_section(struct zone * zone,struct mem_section * ms,unsigned long map_offset)779  static int __remove_section(struct zone *zone, struct mem_section *ms,
780  		unsigned long map_offset)
781  {
782  	unsigned long start_pfn;
783  	int scn_nr;
784  	int ret = -EINVAL;
785  
786  	if (!valid_section(ms))
787  		return ret;
788  
789  	ret = unregister_memory_section(ms);
790  	if (ret)
791  		return ret;
792  
793  	scn_nr = __section_nr(ms);
794  	start_pfn = section_nr_to_pfn(scn_nr);
795  	__remove_zone(zone, start_pfn);
796  
797  	sparse_remove_one_section(zone, ms, map_offset);
798  	return 0;
799  }
800  
801  /**
802   * __remove_pages() - remove sections of pages from a zone
803   * @zone: zone from which pages need to be removed
804   * @phys_start_pfn: starting pageframe (must be aligned to start of a section)
805   * @nr_pages: number of pages to remove (must be multiple of section size)
806   *
807   * Generic helper function to remove section mappings and sysfs entries
808   * for the section of the memory we are removing. Caller needs to make
809   * sure that pages are marked reserved and zones are adjust properly by
810   * calling offline_pages().
811   */
__remove_pages(struct zone * zone,unsigned long phys_start_pfn,unsigned long nr_pages)812  int __remove_pages(struct zone *zone, unsigned long phys_start_pfn,
813  		 unsigned long nr_pages)
814  {
815  	unsigned long i;
816  	unsigned long map_offset = 0;
817  	int sections_to_remove, ret = 0;
818  
819  	/* In the ZONE_DEVICE case device driver owns the memory region */
820  	if (is_dev_zone(zone)) {
821  		struct page *page = pfn_to_page(phys_start_pfn);
822  		struct vmem_altmap *altmap;
823  
824  		altmap = to_vmem_altmap((unsigned long) page);
825  		if (altmap)
826  			map_offset = vmem_altmap_offset(altmap);
827  	} else {
828  		resource_size_t start, size;
829  
830  		start = phys_start_pfn << PAGE_SHIFT;
831  		size = nr_pages * PAGE_SIZE;
832  
833  		ret = release_mem_region_adjustable(&iomem_resource, start,
834  					size);
835  		if (ret) {
836  			resource_size_t endres = start + size - 1;
837  
838  			pr_warn("Unable to release resource <%pa-%pa> (%d)\n",
839  					&start, &endres, ret);
840  		}
841  	}
842  
843  	clear_zone_contiguous(zone);
844  
845  	/*
846  	 * We can only remove entire sections
847  	 */
848  	BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK);
849  	BUG_ON(nr_pages % PAGES_PER_SECTION);
850  
851  	sections_to_remove = nr_pages / PAGES_PER_SECTION;
852  	for (i = 0; i < sections_to_remove; i++) {
853  		unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION;
854  
855  		ret = __remove_section(zone, __pfn_to_section(pfn), map_offset);
856  		map_offset = 0;
857  		if (ret)
858  			break;
859  	}
860  
861  	set_zone_contiguous(zone);
862  
863  	return ret;
864  }
865  EXPORT_SYMBOL_GPL(__remove_pages);
866  #endif /* CONFIG_MEMORY_HOTREMOVE */
867  
set_online_page_callback(online_page_callback_t callback)868  int set_online_page_callback(online_page_callback_t callback)
869  {
870  	int rc = -EINVAL;
871  
872  	get_online_mems();
873  	mutex_lock(&online_page_callback_lock);
874  
875  	if (online_page_callback == generic_online_page) {
876  		online_page_callback = callback;
877  		rc = 0;
878  	}
879  
880  	mutex_unlock(&online_page_callback_lock);
881  	put_online_mems();
882  
883  	return rc;
884  }
885  EXPORT_SYMBOL_GPL(set_online_page_callback);
886  
restore_online_page_callback(online_page_callback_t callback)887  int restore_online_page_callback(online_page_callback_t callback)
888  {
889  	int rc = -EINVAL;
890  
891  	get_online_mems();
892  	mutex_lock(&online_page_callback_lock);
893  
894  	if (online_page_callback == callback) {
895  		online_page_callback = generic_online_page;
896  		rc = 0;
897  	}
898  
899  	mutex_unlock(&online_page_callback_lock);
900  	put_online_mems();
901  
902  	return rc;
903  }
904  EXPORT_SYMBOL_GPL(restore_online_page_callback);
905  
__online_page_set_limits(struct page * page)906  void __online_page_set_limits(struct page *page)
907  {
908  }
909  EXPORT_SYMBOL_GPL(__online_page_set_limits);
910  
__online_page_increment_counters(struct page * page)911  void __online_page_increment_counters(struct page *page)
912  {
913  	adjust_managed_page_count(page, 1);
914  }
915  EXPORT_SYMBOL_GPL(__online_page_increment_counters);
916  
__online_page_free(struct page * page)917  void __online_page_free(struct page *page)
918  {
919  	__free_reserved_page(page);
920  }
921  EXPORT_SYMBOL_GPL(__online_page_free);
922  
generic_online_page(struct page * page)923  static void generic_online_page(struct page *page)
924  {
925  	__online_page_set_limits(page);
926  	__online_page_increment_counters(page);
927  	__online_page_free(page);
928  }
929  
online_pages_range(unsigned long start_pfn,unsigned long nr_pages,void * arg)930  static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages,
931  			void *arg)
932  {
933  	unsigned long i;
934  	unsigned long onlined_pages = *(unsigned long *)arg;
935  	struct page *page;
936  	if (PageReserved(pfn_to_page(start_pfn)))
937  		for (i = 0; i < nr_pages; i++) {
938  			page = pfn_to_page(start_pfn + i);
939  			(*online_page_callback)(page);
940  			onlined_pages++;
941  		}
942  	*(unsigned long *)arg = onlined_pages;
943  	return 0;
944  }
945  
946  #ifdef CONFIG_MOVABLE_NODE
947  /*
948   * When CONFIG_MOVABLE_NODE, we permit onlining of a node which doesn't have
949   * normal memory.
950   */
can_online_high_movable(struct zone * zone)951  static bool can_online_high_movable(struct zone *zone)
952  {
953  	return true;
954  }
955  #else /* CONFIG_MOVABLE_NODE */
956  /* ensure every online node has NORMAL memory */
can_online_high_movable(struct zone * zone)957  static bool can_online_high_movable(struct zone *zone)
958  {
959  	return node_state(zone_to_nid(zone), N_NORMAL_MEMORY);
960  }
961  #endif /* CONFIG_MOVABLE_NODE */
962  
963  /* check which state of node_states will be changed when online memory */
node_states_check_changes_online(unsigned long nr_pages,struct zone * zone,struct memory_notify * arg)964  static void node_states_check_changes_online(unsigned long nr_pages,
965  	struct zone *zone, struct memory_notify *arg)
966  {
967  	int nid = zone_to_nid(zone);
968  	enum zone_type zone_last = ZONE_NORMAL;
969  
970  	/*
971  	 * If we have HIGHMEM or movable node, node_states[N_NORMAL_MEMORY]
972  	 * contains nodes which have zones of 0...ZONE_NORMAL,
973  	 * set zone_last to ZONE_NORMAL.
974  	 *
975  	 * If we don't have HIGHMEM nor movable node,
976  	 * node_states[N_NORMAL_MEMORY] contains nodes which have zones of
977  	 * 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE.
978  	 */
979  	if (N_MEMORY == N_NORMAL_MEMORY)
980  		zone_last = ZONE_MOVABLE;
981  
982  	/*
983  	 * if the memory to be online is in a zone of 0...zone_last, and
984  	 * the zones of 0...zone_last don't have memory before online, we will
985  	 * need to set the node to node_states[N_NORMAL_MEMORY] after
986  	 * the memory is online.
987  	 */
988  	if (zone_idx(zone) <= zone_last && !node_state(nid, N_NORMAL_MEMORY))
989  		arg->status_change_nid_normal = nid;
990  	else
991  		arg->status_change_nid_normal = -1;
992  
993  #ifdef CONFIG_HIGHMEM
994  	/*
995  	 * If we have movable node, node_states[N_HIGH_MEMORY]
996  	 * contains nodes which have zones of 0...ZONE_HIGHMEM,
997  	 * set zone_last to ZONE_HIGHMEM.
998  	 *
999  	 * If we don't have movable node, node_states[N_NORMAL_MEMORY]
1000  	 * contains nodes which have zones of 0...ZONE_MOVABLE,
1001  	 * set zone_last to ZONE_MOVABLE.
1002  	 */
1003  	zone_last = ZONE_HIGHMEM;
1004  	if (N_MEMORY == N_HIGH_MEMORY)
1005  		zone_last = ZONE_MOVABLE;
1006  
1007  	if (zone_idx(zone) <= zone_last && !node_state(nid, N_HIGH_MEMORY))
1008  		arg->status_change_nid_high = nid;
1009  	else
1010  		arg->status_change_nid_high = -1;
1011  #else
1012  	arg->status_change_nid_high = arg->status_change_nid_normal;
1013  #endif
1014  
1015  	/*
1016  	 * if the node don't have memory befor online, we will need to
1017  	 * set the node to node_states[N_MEMORY] after the memory
1018  	 * is online.
1019  	 */
1020  	if (!node_state(nid, N_MEMORY))
1021  		arg->status_change_nid = nid;
1022  	else
1023  		arg->status_change_nid = -1;
1024  }
1025  
node_states_set_node(int node,struct memory_notify * arg)1026  static void node_states_set_node(int node, struct memory_notify *arg)
1027  {
1028  	if (arg->status_change_nid_normal >= 0)
1029  		node_set_state(node, N_NORMAL_MEMORY);
1030  
1031  	if (arg->status_change_nid_high >= 0)
1032  		node_set_state(node, N_HIGH_MEMORY);
1033  
1034  	node_set_state(node, N_MEMORY);
1035  }
1036  
zone_can_shift(unsigned long pfn,unsigned long nr_pages,enum zone_type target,int * zone_shift)1037  bool zone_can_shift(unsigned long pfn, unsigned long nr_pages,
1038  		   enum zone_type target, int *zone_shift)
1039  {
1040  	struct zone *zone = page_zone(pfn_to_page(pfn));
1041  	enum zone_type idx = zone_idx(zone);
1042  	int i;
1043  
1044  	*zone_shift = 0;
1045  
1046  	if (idx < target) {
1047  		/* pages must be at end of current zone */
1048  		if (pfn + nr_pages != zone_end_pfn(zone))
1049  			return false;
1050  
1051  		/* no zones in use between current zone and target */
1052  		for (i = idx + 1; i < target; i++)
1053  			if (zone_is_initialized(zone - idx + i))
1054  				return false;
1055  	}
1056  
1057  	if (target < idx) {
1058  		/* pages must be at beginning of current zone */
1059  		if (pfn != zone->zone_start_pfn)
1060  			return false;
1061  
1062  		/* no zones in use between current zone and target */
1063  		for (i = target + 1; i < idx; i++)
1064  			if (zone_is_initialized(zone - idx + i))
1065  				return false;
1066  	}
1067  
1068  	*zone_shift = target - idx;
1069  	return true;
1070  }
1071  
1072  /* Must be protected by mem_hotplug_begin() */
online_pages(unsigned long pfn,unsigned long nr_pages,int online_type)1073  int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_type)
1074  {
1075  	unsigned long flags;
1076  	unsigned long onlined_pages = 0;
1077  	struct zone *zone;
1078  	int need_zonelists_rebuild = 0;
1079  	int nid;
1080  	int ret;
1081  	struct memory_notify arg;
1082  	int zone_shift = 0;
1083  
1084  	/*
1085  	 * This doesn't need a lock to do pfn_to_page().
1086  	 * The section can't be removed here because of the
1087  	 * memory_block->state_mutex.
1088  	 */
1089  	zone = page_zone(pfn_to_page(pfn));
1090  
1091  	if ((zone_idx(zone) > ZONE_NORMAL ||
1092  	    online_type == MMOP_ONLINE_MOVABLE) &&
1093  	    !can_online_high_movable(zone))
1094  		return -EINVAL;
1095  
1096  	if (online_type == MMOP_ONLINE_KERNEL) {
1097  		if (!zone_can_shift(pfn, nr_pages, ZONE_NORMAL, &zone_shift))
1098  			return -EINVAL;
1099  	} else if (online_type == MMOP_ONLINE_MOVABLE) {
1100  		if (!zone_can_shift(pfn, nr_pages, ZONE_MOVABLE, &zone_shift))
1101  			return -EINVAL;
1102  	}
1103  
1104  	zone = move_pfn_range(zone_shift, pfn, pfn + nr_pages);
1105  	if (!zone)
1106  		return -EINVAL;
1107  
1108  	arg.start_pfn = pfn;
1109  	arg.nr_pages = nr_pages;
1110  	node_states_check_changes_online(nr_pages, zone, &arg);
1111  
1112  	nid = zone_to_nid(zone);
1113  
1114  	ret = memory_notify(MEM_GOING_ONLINE, &arg);
1115  	ret = notifier_to_errno(ret);
1116  	if (ret)
1117  		goto failed_addition;
1118  
1119  	/*
1120  	 * If this zone is not populated, then it is not in zonelist.
1121  	 * This means the page allocator ignores this zone.
1122  	 * So, zonelist must be updated after online.
1123  	 */
1124  	mutex_lock(&zonelists_mutex);
1125  	if (!populated_zone(zone)) {
1126  		need_zonelists_rebuild = 1;
1127  		build_all_zonelists(NULL, zone);
1128  	}
1129  
1130  	ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages,
1131  		online_pages_range);
1132  	if (ret) {
1133  		if (need_zonelists_rebuild)
1134  			zone_pcp_reset(zone);
1135  		mutex_unlock(&zonelists_mutex);
1136  		goto failed_addition;
1137  	}
1138  
1139  	zone->present_pages += onlined_pages;
1140  
1141  	pgdat_resize_lock(zone->zone_pgdat, &flags);
1142  	zone->zone_pgdat->node_present_pages += onlined_pages;
1143  	pgdat_resize_unlock(zone->zone_pgdat, &flags);
1144  
1145  	if (onlined_pages) {
1146  		node_states_set_node(nid, &arg);
1147  		if (need_zonelists_rebuild)
1148  			build_all_zonelists(NULL, NULL);
1149  		else
1150  			zone_pcp_update(zone);
1151  	}
1152  
1153  	mutex_unlock(&zonelists_mutex);
1154  
1155  	init_per_zone_wmark_min();
1156  
1157  	if (onlined_pages) {
1158  		kswapd_run(nid);
1159  		kcompactd_run(nid);
1160  	}
1161  
1162  	vm_total_pages = nr_free_pagecache_pages();
1163  
1164  	writeback_set_ratelimit();
1165  
1166  	if (onlined_pages)
1167  		memory_notify(MEM_ONLINE, &arg);
1168  	return 0;
1169  
1170  failed_addition:
1171  	pr_debug("online_pages [mem %#010llx-%#010llx] failed\n",
1172  		 (unsigned long long) pfn << PAGE_SHIFT,
1173  		 (((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1);
1174  	memory_notify(MEM_CANCEL_ONLINE, &arg);
1175  	return ret;
1176  }
1177  #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
1178  
reset_node_present_pages(pg_data_t * pgdat)1179  static void reset_node_present_pages(pg_data_t *pgdat)
1180  {
1181  	struct zone *z;
1182  
1183  	for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
1184  		z->present_pages = 0;
1185  
1186  	pgdat->node_present_pages = 0;
1187  }
1188  
1189  /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
hotadd_new_pgdat(int nid,u64 start)1190  static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
1191  {
1192  	struct pglist_data *pgdat;
1193  	unsigned long zones_size[MAX_NR_ZONES] = {0};
1194  	unsigned long zholes_size[MAX_NR_ZONES] = {0};
1195  	unsigned long start_pfn = PFN_DOWN(start);
1196  
1197  	pgdat = NODE_DATA(nid);
1198  	if (!pgdat) {
1199  		pgdat = arch_alloc_nodedata(nid);
1200  		if (!pgdat)
1201  			return NULL;
1202  
1203  		arch_refresh_nodedata(nid, pgdat);
1204  	} else {
1205  		/* Reset the nr_zones, order and classzone_idx before reuse */
1206  		pgdat->nr_zones = 0;
1207  		pgdat->kswapd_order = 0;
1208  		pgdat->kswapd_classzone_idx = 0;
1209  	}
1210  
1211  	/* we can use NODE_DATA(nid) from here */
1212  
1213  	/* init node's zones as empty zones, we don't have any present pages.*/
1214  	free_area_init_node(nid, zones_size, start_pfn, zholes_size);
1215  	pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat);
1216  
1217  	/*
1218  	 * The node we allocated has no zone fallback lists. For avoiding
1219  	 * to access not-initialized zonelist, build here.
1220  	 */
1221  	mutex_lock(&zonelists_mutex);
1222  	build_all_zonelists(pgdat, NULL);
1223  	mutex_unlock(&zonelists_mutex);
1224  
1225  	/*
1226  	 * zone->managed_pages is set to an approximate value in
1227  	 * free_area_init_core(), which will cause
1228  	 * /sys/device/system/node/nodeX/meminfo has wrong data.
1229  	 * So reset it to 0 before any memory is onlined.
1230  	 */
1231  	reset_node_managed_pages(pgdat);
1232  
1233  	/*
1234  	 * When memory is hot-added, all the memory is in offline state. So
1235  	 * clear all zones' present_pages because they will be updated in
1236  	 * online_pages() and offline_pages().
1237  	 */
1238  	reset_node_present_pages(pgdat);
1239  
1240  	return pgdat;
1241  }
1242  
rollback_node_hotadd(int nid,pg_data_t * pgdat)1243  static void rollback_node_hotadd(int nid, pg_data_t *pgdat)
1244  {
1245  	arch_refresh_nodedata(nid, NULL);
1246  	free_percpu(pgdat->per_cpu_nodestats);
1247  	arch_free_nodedata(pgdat);
1248  	return;
1249  }
1250  
1251  
1252  /**
1253   * try_online_node - online a node if offlined
1254   *
1255   * called by cpu_up() to online a node without onlined memory.
1256   */
try_online_node(int nid)1257  int try_online_node(int nid)
1258  {
1259  	pg_data_t	*pgdat;
1260  	int	ret;
1261  
1262  	if (node_online(nid))
1263  		return 0;
1264  
1265  	mem_hotplug_begin();
1266  	pgdat = hotadd_new_pgdat(nid, 0);
1267  	if (!pgdat) {
1268  		pr_err("Cannot online node %d due to NULL pgdat\n", nid);
1269  		ret = -ENOMEM;
1270  		goto out;
1271  	}
1272  	node_set_online(nid);
1273  	ret = register_one_node(nid);
1274  	BUG_ON(ret);
1275  
1276  	if (pgdat->node_zonelists->_zonerefs->zone == NULL) {
1277  		mutex_lock(&zonelists_mutex);
1278  		build_all_zonelists(NULL, NULL);
1279  		mutex_unlock(&zonelists_mutex);
1280  	}
1281  
1282  out:
1283  	mem_hotplug_done();
1284  	return ret;
1285  }
1286  
check_hotplug_memory_range(u64 start,u64 size)1287  static int check_hotplug_memory_range(u64 start, u64 size)
1288  {
1289  	u64 start_pfn = PFN_DOWN(start);
1290  	u64 nr_pages = size >> PAGE_SHIFT;
1291  
1292  	/* Memory range must be aligned with section */
1293  	if ((start_pfn & ~PAGE_SECTION_MASK) ||
1294  	    (nr_pages % PAGES_PER_SECTION) || (!nr_pages)) {
1295  		pr_err("Section-unaligned hotplug range: start 0x%llx, size 0x%llx\n",
1296  				(unsigned long long)start,
1297  				(unsigned long long)size);
1298  		return -EINVAL;
1299  	}
1300  
1301  	return 0;
1302  }
1303  
1304  /*
1305   * If movable zone has already been setup, newly added memory should be check.
1306   * If its address is higher than movable zone, it should be added as movable.
1307   * Without this check, movable zone may overlap with other zone.
1308   */
should_add_memory_movable(int nid,u64 start,u64 size)1309  static int should_add_memory_movable(int nid, u64 start, u64 size)
1310  {
1311  	unsigned long start_pfn = start >> PAGE_SHIFT;
1312  	pg_data_t *pgdat = NODE_DATA(nid);
1313  	struct zone *movable_zone = pgdat->node_zones + ZONE_MOVABLE;
1314  
1315  	if (zone_is_empty(movable_zone))
1316  		return 0;
1317  
1318  	if (movable_zone->zone_start_pfn <= start_pfn)
1319  		return 1;
1320  
1321  	return 0;
1322  }
1323  
zone_for_memory(int nid,u64 start,u64 size,int zone_default,bool for_device)1324  int zone_for_memory(int nid, u64 start, u64 size, int zone_default,
1325  		bool for_device)
1326  {
1327  #ifdef CONFIG_ZONE_DEVICE
1328  	if (for_device)
1329  		return ZONE_DEVICE;
1330  #endif
1331  	if (should_add_memory_movable(nid, start, size))
1332  		return ZONE_MOVABLE;
1333  
1334  	return zone_default;
1335  }
1336  
online_memory_block(struct memory_block * mem,void * arg)1337  static int online_memory_block(struct memory_block *mem, void *arg)
1338  {
1339  	return memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE);
1340  }
1341  
1342  /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
add_memory_resource(int nid,struct resource * res,bool online)1343  int __ref add_memory_resource(int nid, struct resource *res, bool online)
1344  {
1345  	u64 start, size;
1346  	pg_data_t *pgdat = NULL;
1347  	bool new_pgdat;
1348  	bool new_node;
1349  	int ret;
1350  
1351  	start = res->start;
1352  	size = resource_size(res);
1353  
1354  	ret = check_hotplug_memory_range(start, size);
1355  	if (ret)
1356  		return ret;
1357  
1358  	{	/* Stupid hack to suppress address-never-null warning */
1359  		void *p = NODE_DATA(nid);
1360  		new_pgdat = !p;
1361  	}
1362  
1363  	mem_hotplug_begin();
1364  
1365  	/*
1366  	 * Add new range to memblock so that when hotadd_new_pgdat() is called
1367  	 * to allocate new pgdat, get_pfn_range_for_nid() will be able to find
1368  	 * this new range and calculate total pages correctly.  The range will
1369  	 * be removed at hot-remove time.
1370  	 */
1371  	memblock_add_node(start, size, nid);
1372  
1373  	new_node = !node_online(nid);
1374  	if (new_node) {
1375  		pgdat = hotadd_new_pgdat(nid, start);
1376  		ret = -ENOMEM;
1377  		if (!pgdat)
1378  			goto error;
1379  	}
1380  
1381  	/* call arch's memory hotadd */
1382  	ret = arch_add_memory(nid, start, size, false);
1383  
1384  	if (ret < 0)
1385  		goto error;
1386  
1387  	/* we online node here. we can't roll back from here. */
1388  	node_set_online(nid);
1389  
1390  	if (new_node) {
1391  		ret = register_one_node(nid);
1392  		/*
1393  		 * If sysfs file of new node can't create, cpu on the node
1394  		 * can't be hot-added. There is no rollback way now.
1395  		 * So, check by BUG_ON() to catch it reluctantly..
1396  		 */
1397  		BUG_ON(ret);
1398  	}
1399  
1400  	/* create new memmap entry */
1401  	firmware_map_add_hotplug(start, start + size, "System RAM");
1402  
1403  	/* online pages if requested */
1404  	if (online)
1405  		walk_memory_range(PFN_DOWN(start), PFN_UP(start + size - 1),
1406  				  NULL, online_memory_block);
1407  
1408  	goto out;
1409  
1410  error:
1411  	/* rollback pgdat allocation and others */
1412  	if (new_pgdat)
1413  		rollback_node_hotadd(nid, pgdat);
1414  	memblock_remove(start, size);
1415  
1416  out:
1417  	mem_hotplug_done();
1418  	return ret;
1419  }
1420  EXPORT_SYMBOL_GPL(add_memory_resource);
1421  
add_memory(int nid,u64 start,u64 size)1422  int __ref add_memory(int nid, u64 start, u64 size)
1423  {
1424  	struct resource *res;
1425  	int ret;
1426  
1427  	res = register_memory_resource(start, size);
1428  	if (IS_ERR(res))
1429  		return PTR_ERR(res);
1430  
1431  	ret = add_memory_resource(nid, res, memhp_auto_online);
1432  	if (ret < 0)
1433  		release_memory_resource(res);
1434  	return ret;
1435  }
1436  EXPORT_SYMBOL_GPL(add_memory);
1437  
1438  #ifdef CONFIG_MEMORY_HOTREMOVE
1439  /*
1440   * A free page on the buddy free lists (not the per-cpu lists) has PageBuddy
1441   * set and the size of the free page is given by page_order(). Using this,
1442   * the function determines if the pageblock contains only free pages.
1443   * Due to buddy contraints, a free page at least the size of a pageblock will
1444   * be located at the start of the pageblock
1445   */
pageblock_free(struct page * page)1446  static inline int pageblock_free(struct page *page)
1447  {
1448  	return PageBuddy(page) && page_order(page) >= pageblock_order;
1449  }
1450  
1451  /* Return the start of the next active pageblock after a given page */
next_active_pageblock(struct page * page)1452  static struct page *next_active_pageblock(struct page *page)
1453  {
1454  	/* Ensure the starting page is pageblock-aligned */
1455  	BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1));
1456  
1457  	/* If the entire pageblock is free, move to the end of free page */
1458  	if (pageblock_free(page)) {
1459  		int order;
1460  		/* be careful. we don't have locks, page_order can be changed.*/
1461  		order = page_order(page);
1462  		if ((order < MAX_ORDER) && (order >= pageblock_order))
1463  			return page + (1 << order);
1464  	}
1465  
1466  	return page + pageblock_nr_pages;
1467  }
1468  
1469  /* Checks if this range of memory is likely to be hot-removable. */
is_mem_section_removable(unsigned long start_pfn,unsigned long nr_pages)1470  bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
1471  {
1472  	struct page *page = pfn_to_page(start_pfn);
1473  	struct page *end_page = page + nr_pages;
1474  
1475  	/* Check the starting page of each pageblock within the range */
1476  	for (; page < end_page; page = next_active_pageblock(page)) {
1477  		if (!is_pageblock_removable_nolock(page))
1478  			return false;
1479  		cond_resched();
1480  	}
1481  
1482  	/* All pageblocks in the memory block are likely to be hot-removable */
1483  	return true;
1484  }
1485  
1486  /*
1487   * Confirm all pages in a range [start, end) belong to the same zone.
1488   * When true, return its valid [start, end).
1489   */
test_pages_in_a_zone(unsigned long start_pfn,unsigned long end_pfn,unsigned long * valid_start,unsigned long * valid_end)1490  int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
1491  			 unsigned long *valid_start, unsigned long *valid_end)
1492  {
1493  	unsigned long pfn, sec_end_pfn;
1494  	unsigned long start, end;
1495  	struct zone *zone = NULL;
1496  	struct page *page;
1497  	int i;
1498  	for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn + 1);
1499  	     pfn < end_pfn;
1500  	     pfn = sec_end_pfn, sec_end_pfn += PAGES_PER_SECTION) {
1501  		/* Make sure the memory section is present first */
1502  		if (!present_section_nr(pfn_to_section_nr(pfn)))
1503  			continue;
1504  		for (; pfn < sec_end_pfn && pfn < end_pfn;
1505  		     pfn += MAX_ORDER_NR_PAGES) {
1506  			i = 0;
1507  			/* This is just a CONFIG_HOLES_IN_ZONE check.*/
1508  			while ((i < MAX_ORDER_NR_PAGES) &&
1509  				!pfn_valid_within(pfn + i))
1510  				i++;
1511  			if (i == MAX_ORDER_NR_PAGES)
1512  				continue;
1513  			page = pfn_to_page(pfn + i);
1514  			if (zone && page_zone(page) != zone)
1515  				return 0;
1516  			if (!zone)
1517  				start = pfn + i;
1518  			zone = page_zone(page);
1519  			end = pfn + MAX_ORDER_NR_PAGES;
1520  		}
1521  	}
1522  
1523  	if (zone) {
1524  		*valid_start = start;
1525  		*valid_end = end;
1526  		return 1;
1527  	} else {
1528  		return 0;
1529  	}
1530  }
1531  
1532  /*
1533   * Scan pfn range [start,end) to find movable/migratable pages (LRU pages
1534   * and hugepages). We scan pfn because it's much easier than scanning over
1535   * linked list. This function returns the pfn of the first found movable
1536   * page if it's found, otherwise 0.
1537   */
scan_movable_pages(unsigned long start,unsigned long end)1538  static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
1539  {
1540  	unsigned long pfn;
1541  	struct page *page;
1542  	for (pfn = start; pfn < end; pfn++) {
1543  		if (pfn_valid(pfn)) {
1544  			page = pfn_to_page(pfn);
1545  			if (PageLRU(page))
1546  				return pfn;
1547  			if (PageHuge(page)) {
1548  				if (page_huge_active(page))
1549  					return pfn;
1550  				else
1551  					pfn = round_up(pfn + 1,
1552  						1 << compound_order(page)) - 1;
1553  			}
1554  		}
1555  	}
1556  	return 0;
1557  }
1558  
new_node_page(struct page * page,unsigned long private,int ** result)1559  static struct page *new_node_page(struct page *page, unsigned long private,
1560  		int **result)
1561  {
1562  	gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE;
1563  	int nid = page_to_nid(page);
1564  	nodemask_t nmask = node_states[N_MEMORY];
1565  	struct page *new_page = NULL;
1566  
1567  	/*
1568  	 * TODO: allocate a destination hugepage from a nearest neighbor node,
1569  	 * accordance with memory policy of the user process if possible. For
1570  	 * now as a simple work-around, we use the next node for destination.
1571  	 */
1572  	if (PageHuge(page))
1573  		return alloc_huge_page_node(page_hstate(compound_head(page)),
1574  					next_node_in(nid, nmask));
1575  
1576  	node_clear(nid, nmask);
1577  
1578  	if (PageHighMem(page)
1579  	    || (zone_idx(page_zone(page)) == ZONE_MOVABLE))
1580  		gfp_mask |= __GFP_HIGHMEM;
1581  
1582  	if (!nodes_empty(nmask))
1583  		new_page = __alloc_pages_nodemask(gfp_mask, 0,
1584  					node_zonelist(nid, gfp_mask), &nmask);
1585  	if (!new_page)
1586  		new_page = __alloc_pages(gfp_mask, 0,
1587  					node_zonelist(nid, gfp_mask));
1588  
1589  	return new_page;
1590  }
1591  
1592  #define NR_OFFLINE_AT_ONCE_PAGES	(256)
1593  static int
do_migrate_range(unsigned long start_pfn,unsigned long end_pfn)1594  do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
1595  {
1596  	unsigned long pfn;
1597  	struct page *page;
1598  	int move_pages = NR_OFFLINE_AT_ONCE_PAGES;
1599  	int not_managed = 0;
1600  	int ret = 0;
1601  	LIST_HEAD(source);
1602  
1603  	for (pfn = start_pfn; pfn < end_pfn && move_pages > 0; pfn++) {
1604  		if (!pfn_valid(pfn))
1605  			continue;
1606  		page = pfn_to_page(pfn);
1607  
1608  		if (PageHuge(page)) {
1609  			struct page *head = compound_head(page);
1610  			pfn = page_to_pfn(head) + (1<<compound_order(head)) - 1;
1611  			if (compound_order(head) > PFN_SECTION_SHIFT) {
1612  				ret = -EBUSY;
1613  				break;
1614  			}
1615  			if (isolate_huge_page(page, &source))
1616  				move_pages -= 1 << compound_order(head);
1617  			continue;
1618  		}
1619  
1620  		if (!get_page_unless_zero(page))
1621  			continue;
1622  		/*
1623  		 * We can skip free pages. And we can only deal with pages on
1624  		 * LRU.
1625  		 */
1626  		ret = isolate_lru_page(page);
1627  		if (!ret) { /* Success */
1628  			put_page(page);
1629  			list_add_tail(&page->lru, &source);
1630  			move_pages--;
1631  			inc_node_page_state(page, NR_ISOLATED_ANON +
1632  					    page_is_file_cache(page));
1633  
1634  		} else {
1635  #ifdef CONFIG_DEBUG_VM
1636  			pr_alert("removing pfn %lx from LRU failed\n", pfn);
1637  			dump_page(page, "failed to remove from LRU");
1638  #endif
1639  			put_page(page);
1640  			/* Because we don't have big zone->lock. we should
1641  			   check this again here. */
1642  			if (page_count(page)) {
1643  				not_managed++;
1644  				ret = -EBUSY;
1645  				break;
1646  			}
1647  		}
1648  	}
1649  	if (!list_empty(&source)) {
1650  		if (not_managed) {
1651  			putback_movable_pages(&source);
1652  			goto out;
1653  		}
1654  
1655  		/* Allocate a new page from the nearest neighbor node */
1656  		ret = migrate_pages(&source, new_node_page, NULL, 0,
1657  					MIGRATE_SYNC, MR_MEMORY_HOTPLUG);
1658  		if (ret)
1659  			putback_movable_pages(&source);
1660  	}
1661  out:
1662  	return ret;
1663  }
1664  
1665  /*
1666   * remove from free_area[] and mark all as Reserved.
1667   */
1668  static int
offline_isolated_pages_cb(unsigned long start,unsigned long nr_pages,void * data)1669  offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages,
1670  			void *data)
1671  {
1672  	__offline_isolated_pages(start, start + nr_pages);
1673  	return 0;
1674  }
1675  
1676  static void
offline_isolated_pages(unsigned long start_pfn,unsigned long end_pfn)1677  offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
1678  {
1679  	walk_system_ram_range(start_pfn, end_pfn - start_pfn, NULL,
1680  				offline_isolated_pages_cb);
1681  }
1682  
1683  /*
1684   * Check all pages in range, recoreded as memory resource, are isolated.
1685   */
1686  static int
check_pages_isolated_cb(unsigned long start_pfn,unsigned long nr_pages,void * data)1687  check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages,
1688  			void *data)
1689  {
1690  	int ret;
1691  	long offlined = *(long *)data;
1692  	ret = test_pages_isolated(start_pfn, start_pfn + nr_pages, true);
1693  	offlined = nr_pages;
1694  	if (!ret)
1695  		*(long *)data += offlined;
1696  	return ret;
1697  }
1698  
1699  static long
check_pages_isolated(unsigned long start_pfn,unsigned long end_pfn)1700  check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
1701  {
1702  	long offlined = 0;
1703  	int ret;
1704  
1705  	ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, &offlined,
1706  			check_pages_isolated_cb);
1707  	if (ret < 0)
1708  		offlined = (long)ret;
1709  	return offlined;
1710  }
1711  
1712  #ifdef CONFIG_MOVABLE_NODE
1713  /*
1714   * When CONFIG_MOVABLE_NODE, we permit offlining of a node which doesn't have
1715   * normal memory.
1716   */
can_offline_normal(struct zone * zone,unsigned long nr_pages)1717  static bool can_offline_normal(struct zone *zone, unsigned long nr_pages)
1718  {
1719  	return true;
1720  }
1721  #else /* CONFIG_MOVABLE_NODE */
1722  /* ensure the node has NORMAL memory if it is still online */
can_offline_normal(struct zone * zone,unsigned long nr_pages)1723  static bool can_offline_normal(struct zone *zone, unsigned long nr_pages)
1724  {
1725  	struct pglist_data *pgdat = zone->zone_pgdat;
1726  	unsigned long present_pages = 0;
1727  	enum zone_type zt;
1728  
1729  	for (zt = 0; zt <= ZONE_NORMAL; zt++)
1730  		present_pages += pgdat->node_zones[zt].present_pages;
1731  
1732  	if (present_pages > nr_pages)
1733  		return true;
1734  
1735  	present_pages = 0;
1736  	for (; zt <= ZONE_MOVABLE; zt++)
1737  		present_pages += pgdat->node_zones[zt].present_pages;
1738  
1739  	/*
1740  	 * we can't offline the last normal memory until all
1741  	 * higher memory is offlined.
1742  	 */
1743  	return present_pages == 0;
1744  }
1745  #endif /* CONFIG_MOVABLE_NODE */
1746  
cmdline_parse_movable_node(char * p)1747  static int __init cmdline_parse_movable_node(char *p)
1748  {
1749  #ifdef CONFIG_MOVABLE_NODE
1750  	/*
1751  	 * Memory used by the kernel cannot be hot-removed because Linux
1752  	 * cannot migrate the kernel pages. When memory hotplug is
1753  	 * enabled, we should prevent memblock from allocating memory
1754  	 * for the kernel.
1755  	 *
1756  	 * ACPI SRAT records all hotpluggable memory ranges. But before
1757  	 * SRAT is parsed, we don't know about it.
1758  	 *
1759  	 * The kernel image is loaded into memory at very early time. We
1760  	 * cannot prevent this anyway. So on NUMA system, we set any
1761  	 * node the kernel resides in as un-hotpluggable.
1762  	 *
1763  	 * Since on modern servers, one node could have double-digit
1764  	 * gigabytes memory, we can assume the memory around the kernel
1765  	 * image is also un-hotpluggable. So before SRAT is parsed, just
1766  	 * allocate memory near the kernel image to try the best to keep
1767  	 * the kernel away from hotpluggable memory.
1768  	 */
1769  	memblock_set_bottom_up(true);
1770  	movable_node_enabled = true;
1771  #else
1772  	pr_warn("movable_node option not supported\n");
1773  #endif
1774  	return 0;
1775  }
1776  early_param("movable_node", cmdline_parse_movable_node);
1777  
1778  /* check which state of node_states will be changed when offline memory */
node_states_check_changes_offline(unsigned long nr_pages,struct zone * zone,struct memory_notify * arg)1779  static void node_states_check_changes_offline(unsigned long nr_pages,
1780  		struct zone *zone, struct memory_notify *arg)
1781  {
1782  	struct pglist_data *pgdat = zone->zone_pgdat;
1783  	unsigned long present_pages = 0;
1784  	enum zone_type zt, zone_last = ZONE_NORMAL;
1785  
1786  	/*
1787  	 * If we have HIGHMEM or movable node, node_states[N_NORMAL_MEMORY]
1788  	 * contains nodes which have zones of 0...ZONE_NORMAL,
1789  	 * set zone_last to ZONE_NORMAL.
1790  	 *
1791  	 * If we don't have HIGHMEM nor movable node,
1792  	 * node_states[N_NORMAL_MEMORY] contains nodes which have zones of
1793  	 * 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE.
1794  	 */
1795  	if (N_MEMORY == N_NORMAL_MEMORY)
1796  		zone_last = ZONE_MOVABLE;
1797  
1798  	/*
1799  	 * check whether node_states[N_NORMAL_MEMORY] will be changed.
1800  	 * If the memory to be offline is in a zone of 0...zone_last,
1801  	 * and it is the last present memory, 0...zone_last will
1802  	 * become empty after offline , thus we can determind we will
1803  	 * need to clear the node from node_states[N_NORMAL_MEMORY].
1804  	 */
1805  	for (zt = 0; zt <= zone_last; zt++)
1806  		present_pages += pgdat->node_zones[zt].present_pages;
1807  	if (zone_idx(zone) <= zone_last && nr_pages >= present_pages)
1808  		arg->status_change_nid_normal = zone_to_nid(zone);
1809  	else
1810  		arg->status_change_nid_normal = -1;
1811  
1812  #ifdef CONFIG_HIGHMEM
1813  	/*
1814  	 * If we have movable node, node_states[N_HIGH_MEMORY]
1815  	 * contains nodes which have zones of 0...ZONE_HIGHMEM,
1816  	 * set zone_last to ZONE_HIGHMEM.
1817  	 *
1818  	 * If we don't have movable node, node_states[N_NORMAL_MEMORY]
1819  	 * contains nodes which have zones of 0...ZONE_MOVABLE,
1820  	 * set zone_last to ZONE_MOVABLE.
1821  	 */
1822  	zone_last = ZONE_HIGHMEM;
1823  	if (N_MEMORY == N_HIGH_MEMORY)
1824  		zone_last = ZONE_MOVABLE;
1825  
1826  	for (; zt <= zone_last; zt++)
1827  		present_pages += pgdat->node_zones[zt].present_pages;
1828  	if (zone_idx(zone) <= zone_last && nr_pages >= present_pages)
1829  		arg->status_change_nid_high = zone_to_nid(zone);
1830  	else
1831  		arg->status_change_nid_high = -1;
1832  #else
1833  	arg->status_change_nid_high = arg->status_change_nid_normal;
1834  #endif
1835  
1836  	/*
1837  	 * node_states[N_HIGH_MEMORY] contains nodes which have 0...ZONE_MOVABLE
1838  	 */
1839  	zone_last = ZONE_MOVABLE;
1840  
1841  	/*
1842  	 * check whether node_states[N_HIGH_MEMORY] will be changed
1843  	 * If we try to offline the last present @nr_pages from the node,
1844  	 * we can determind we will need to clear the node from
1845  	 * node_states[N_HIGH_MEMORY].
1846  	 */
1847  	for (; zt <= zone_last; zt++)
1848  		present_pages += pgdat->node_zones[zt].present_pages;
1849  	if (nr_pages >= present_pages)
1850  		arg->status_change_nid = zone_to_nid(zone);
1851  	else
1852  		arg->status_change_nid = -1;
1853  }
1854  
node_states_clear_node(int node,struct memory_notify * arg)1855  static void node_states_clear_node(int node, struct memory_notify *arg)
1856  {
1857  	if (arg->status_change_nid_normal >= 0)
1858  		node_clear_state(node, N_NORMAL_MEMORY);
1859  
1860  	if ((N_MEMORY != N_NORMAL_MEMORY) &&
1861  	    (arg->status_change_nid_high >= 0))
1862  		node_clear_state(node, N_HIGH_MEMORY);
1863  
1864  	if ((N_MEMORY != N_HIGH_MEMORY) &&
1865  	    (arg->status_change_nid >= 0))
1866  		node_clear_state(node, N_MEMORY);
1867  }
1868  
__offline_pages(unsigned long start_pfn,unsigned long end_pfn,unsigned long timeout)1869  static int __ref __offline_pages(unsigned long start_pfn,
1870  		  unsigned long end_pfn, unsigned long timeout)
1871  {
1872  	unsigned long pfn, nr_pages, expire;
1873  	long offlined_pages;
1874  	int ret, drain, retry_max, node;
1875  	unsigned long flags;
1876  	unsigned long valid_start, valid_end;
1877  	struct zone *zone;
1878  	struct memory_notify arg;
1879  
1880  	/* at least, alignment against pageblock is necessary */
1881  	if (!IS_ALIGNED(start_pfn, pageblock_nr_pages))
1882  		return -EINVAL;
1883  	if (!IS_ALIGNED(end_pfn, pageblock_nr_pages))
1884  		return -EINVAL;
1885  	/* This makes hotplug much easier...and readable.
1886  	   we assume this for now. .*/
1887  	if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end))
1888  		return -EINVAL;
1889  
1890  	zone = page_zone(pfn_to_page(valid_start));
1891  	node = zone_to_nid(zone);
1892  	nr_pages = end_pfn - start_pfn;
1893  
1894  	if (zone_idx(zone) <= ZONE_NORMAL && !can_offline_normal(zone, nr_pages))
1895  		return -EINVAL;
1896  
1897  	/* set above range as isolated */
1898  	ret = start_isolate_page_range(start_pfn, end_pfn,
1899  				       MIGRATE_MOVABLE, true);
1900  	if (ret)
1901  		return ret;
1902  
1903  	arg.start_pfn = start_pfn;
1904  	arg.nr_pages = nr_pages;
1905  	node_states_check_changes_offline(nr_pages, zone, &arg);
1906  
1907  	ret = memory_notify(MEM_GOING_OFFLINE, &arg);
1908  	ret = notifier_to_errno(ret);
1909  	if (ret)
1910  		goto failed_removal;
1911  
1912  	pfn = start_pfn;
1913  	expire = jiffies + timeout;
1914  	drain = 0;
1915  	retry_max = 5;
1916  repeat:
1917  	/* start memory hot removal */
1918  	ret = -EAGAIN;
1919  	if (time_after(jiffies, expire))
1920  		goto failed_removal;
1921  	ret = -EINTR;
1922  	if (signal_pending(current))
1923  		goto failed_removal;
1924  	ret = 0;
1925  	if (drain) {
1926  		lru_add_drain_all();
1927  		cond_resched();
1928  		drain_all_pages(zone);
1929  	}
1930  
1931  	pfn = scan_movable_pages(start_pfn, end_pfn);
1932  	if (pfn) { /* We have movable pages */
1933  		ret = do_migrate_range(pfn, end_pfn);
1934  		if (!ret) {
1935  			drain = 1;
1936  			goto repeat;
1937  		} else {
1938  			if (ret < 0)
1939  				if (--retry_max == 0)
1940  					goto failed_removal;
1941  			yield();
1942  			drain = 1;
1943  			goto repeat;
1944  		}
1945  	}
1946  	/* drain all zone's lru pagevec, this is asynchronous... */
1947  	lru_add_drain_all();
1948  	yield();
1949  	/* drain pcp pages, this is synchronous. */
1950  	drain_all_pages(zone);
1951  	/*
1952  	 * dissolve free hugepages in the memory block before doing offlining
1953  	 * actually in order to make hugetlbfs's object counting consistent.
1954  	 */
1955  	ret = dissolve_free_huge_pages(start_pfn, end_pfn);
1956  	if (ret)
1957  		goto failed_removal;
1958  	/* check again */
1959  	offlined_pages = check_pages_isolated(start_pfn, end_pfn);
1960  	if (offlined_pages < 0) {
1961  		ret = -EBUSY;
1962  		goto failed_removal;
1963  	}
1964  	pr_info("Offlined Pages %ld\n", offlined_pages);
1965  	/* Ok, all of our target is isolated.
1966  	   We cannot do rollback at this point. */
1967  	offline_isolated_pages(start_pfn, end_pfn);
1968  	/* reset pagetype flags and makes migrate type to be MOVABLE */
1969  	undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
1970  	/* removal success */
1971  	adjust_managed_page_count(pfn_to_page(start_pfn), -offlined_pages);
1972  	zone->present_pages -= offlined_pages;
1973  
1974  	pgdat_resize_lock(zone->zone_pgdat, &flags);
1975  	zone->zone_pgdat->node_present_pages -= offlined_pages;
1976  	pgdat_resize_unlock(zone->zone_pgdat, &flags);
1977  
1978  	init_per_zone_wmark_min();
1979  
1980  	if (!populated_zone(zone)) {
1981  		zone_pcp_reset(zone);
1982  		mutex_lock(&zonelists_mutex);
1983  		build_all_zonelists(NULL, NULL);
1984  		mutex_unlock(&zonelists_mutex);
1985  	} else
1986  		zone_pcp_update(zone);
1987  
1988  	node_states_clear_node(node, &arg);
1989  	if (arg.status_change_nid >= 0) {
1990  		kswapd_stop(node);
1991  		kcompactd_stop(node);
1992  	}
1993  
1994  	vm_total_pages = nr_free_pagecache_pages();
1995  	writeback_set_ratelimit();
1996  
1997  	memory_notify(MEM_OFFLINE, &arg);
1998  	return 0;
1999  
2000  failed_removal:
2001  	pr_debug("memory offlining [mem %#010llx-%#010llx] failed\n",
2002  		 (unsigned long long) start_pfn << PAGE_SHIFT,
2003  		 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
2004  	memory_notify(MEM_CANCEL_OFFLINE, &arg);
2005  	/* pushback to free area */
2006  	undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
2007  	return ret;
2008  }
2009  
2010  /* Must be protected by mem_hotplug_begin() */
offline_pages(unsigned long start_pfn,unsigned long nr_pages)2011  int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
2012  {
2013  	return __offline_pages(start_pfn, start_pfn + nr_pages, 120 * HZ);
2014  }
2015  #endif /* CONFIG_MEMORY_HOTREMOVE */
2016  
2017  /**
2018   * walk_memory_range - walks through all mem sections in [start_pfn, end_pfn)
2019   * @start_pfn: start pfn of the memory range
2020   * @end_pfn: end pfn of the memory range
2021   * @arg: argument passed to func
2022   * @func: callback for each memory section walked
2023   *
2024   * This function walks through all present mem sections in range
2025   * [start_pfn, end_pfn) and call func on each mem section.
2026   *
2027   * Returns the return value of func.
2028   */
walk_memory_range(unsigned long start_pfn,unsigned long end_pfn,void * arg,int (* func)(struct memory_block *,void *))2029  int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
2030  		void *arg, int (*func)(struct memory_block *, void *))
2031  {
2032  	struct memory_block *mem = NULL;
2033  	struct mem_section *section;
2034  	unsigned long pfn, section_nr;
2035  	int ret;
2036  
2037  	for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
2038  		section_nr = pfn_to_section_nr(pfn);
2039  		if (!present_section_nr(section_nr))
2040  			continue;
2041  
2042  		section = __nr_to_section(section_nr);
2043  		/* same memblock? */
2044  		if (mem)
2045  			if ((section_nr >= mem->start_section_nr) &&
2046  			    (section_nr <= mem->end_section_nr))
2047  				continue;
2048  
2049  		mem = find_memory_block_hinted(section, mem);
2050  		if (!mem)
2051  			continue;
2052  
2053  		ret = func(mem, arg);
2054  		if (ret) {
2055  			kobject_put(&mem->dev.kobj);
2056  			return ret;
2057  		}
2058  	}
2059  
2060  	if (mem)
2061  		kobject_put(&mem->dev.kobj);
2062  
2063  	return 0;
2064  }
2065  
2066  #ifdef CONFIG_MEMORY_HOTREMOVE
check_memblock_offlined_cb(struct memory_block * mem,void * arg)2067  static int check_memblock_offlined_cb(struct memory_block *mem, void *arg)
2068  {
2069  	int ret = !is_memblock_offlined(mem);
2070  
2071  	if (unlikely(ret)) {
2072  		phys_addr_t beginpa, endpa;
2073  
2074  		beginpa = PFN_PHYS(section_nr_to_pfn(mem->start_section_nr));
2075  		endpa = PFN_PHYS(section_nr_to_pfn(mem->end_section_nr + 1))-1;
2076  		pr_warn("removing memory fails, because memory [%pa-%pa] is onlined\n",
2077  			&beginpa, &endpa);
2078  	}
2079  
2080  	return ret;
2081  }
2082  
check_cpu_on_node(pg_data_t * pgdat)2083  static int check_cpu_on_node(pg_data_t *pgdat)
2084  {
2085  	int cpu;
2086  
2087  	for_each_present_cpu(cpu) {
2088  		if (cpu_to_node(cpu) == pgdat->node_id)
2089  			/*
2090  			 * the cpu on this node isn't removed, and we can't
2091  			 * offline this node.
2092  			 */
2093  			return -EBUSY;
2094  	}
2095  
2096  	return 0;
2097  }
2098  
unmap_cpu_on_node(pg_data_t * pgdat)2099  static void unmap_cpu_on_node(pg_data_t *pgdat)
2100  {
2101  #ifdef CONFIG_ACPI_NUMA
2102  	int cpu;
2103  
2104  	for_each_possible_cpu(cpu)
2105  		if (cpu_to_node(cpu) == pgdat->node_id)
2106  			numa_clear_node(cpu);
2107  #endif
2108  }
2109  
check_and_unmap_cpu_on_node(pg_data_t * pgdat)2110  static int check_and_unmap_cpu_on_node(pg_data_t *pgdat)
2111  {
2112  	int ret;
2113  
2114  	ret = check_cpu_on_node(pgdat);
2115  	if (ret)
2116  		return ret;
2117  
2118  	/*
2119  	 * the node will be offlined when we come here, so we can clear
2120  	 * the cpu_to_node() now.
2121  	 */
2122  
2123  	unmap_cpu_on_node(pgdat);
2124  	return 0;
2125  }
2126  
2127  /**
2128   * try_offline_node
2129   *
2130   * Offline a node if all memory sections and cpus of the node are removed.
2131   *
2132   * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
2133   * and online/offline operations before this call.
2134   */
try_offline_node(int nid)2135  void try_offline_node(int nid)
2136  {
2137  	pg_data_t *pgdat = NODE_DATA(nid);
2138  	unsigned long start_pfn = pgdat->node_start_pfn;
2139  	unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
2140  	unsigned long pfn;
2141  
2142  	for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
2143  		unsigned long section_nr = pfn_to_section_nr(pfn);
2144  
2145  		if (!present_section_nr(section_nr))
2146  			continue;
2147  
2148  		if (pfn_to_nid(pfn) != nid)
2149  			continue;
2150  
2151  		/*
2152  		 * some memory sections of this node are not removed, and we
2153  		 * can't offline node now.
2154  		 */
2155  		return;
2156  	}
2157  
2158  	if (check_and_unmap_cpu_on_node(pgdat))
2159  		return;
2160  
2161  	/*
2162  	 * all memory/cpu of this node are removed, we can offline this
2163  	 * node now.
2164  	 */
2165  	node_set_offline(nid);
2166  	unregister_one_node(nid);
2167  }
2168  EXPORT_SYMBOL(try_offline_node);
2169  
2170  /**
2171   * remove_memory
2172   *
2173   * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
2174   * and online/offline operations before this call, as required by
2175   * try_offline_node().
2176   */
remove_memory(int nid,u64 start,u64 size)2177  void __ref remove_memory(int nid, u64 start, u64 size)
2178  {
2179  	int ret;
2180  
2181  	BUG_ON(check_hotplug_memory_range(start, size));
2182  
2183  	mem_hotplug_begin();
2184  
2185  	/*
2186  	 * All memory blocks must be offlined before removing memory.  Check
2187  	 * whether all memory blocks in question are offline and trigger a BUG()
2188  	 * if this is not the case.
2189  	 */
2190  	ret = walk_memory_range(PFN_DOWN(start), PFN_UP(start + size - 1), NULL,
2191  				check_memblock_offlined_cb);
2192  	if (ret)
2193  		BUG();
2194  
2195  	/* remove memmap entry */
2196  	firmware_map_remove(start, start + size, "System RAM");
2197  	memblock_free(start, size);
2198  	memblock_remove(start, size);
2199  
2200  	arch_remove_memory(start, size);
2201  
2202  	try_offline_node(nid);
2203  
2204  	mem_hotplug_done();
2205  }
2206  EXPORT_SYMBOL_GPL(remove_memory);
2207  #endif /* CONFIG_MEMORY_HOTREMOVE */
2208