1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Procedures for maintaining information about logical memory blocks.
4 *
5 * Peter Bergner, IBM Corp. June 2001.
6 * Copyright (C) 2001 Peter Bergner.
7 */
8
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/init.h>
12 #include <linux/bitops.h>
13 #include <linux/poison.h>
14 #include <linux/pfn.h>
15 #include <linux/debugfs.h>
16 #include <linux/kmemleak.h>
17 #include <linux/seq_file.h>
18 #include <linux/memblock.h>
19
20 #include <asm/sections.h>
21 #include <linux/io.h>
22
23 #include "internal.h"
24
25 #define INIT_MEMBLOCK_REGIONS 128
26 #define INIT_PHYSMEM_REGIONS 4
27
28 #ifndef INIT_MEMBLOCK_RESERVED_REGIONS
29 # define INIT_MEMBLOCK_RESERVED_REGIONS INIT_MEMBLOCK_REGIONS
30 #endif
31
32 /**
33 * DOC: memblock overview
34 *
35 * Memblock is a method of managing memory regions during the early
36 * boot period when the usual kernel memory allocators are not up and
37 * running.
38 *
39 * Memblock views the system memory as collections of contiguous
40 * regions. There are several types of these collections:
41 *
42 * * ``memory`` - describes the physical memory available to the
43 * kernel; this may differ from the actual physical memory installed
44 * in the system, for instance when the memory is restricted with
45 * ``mem=`` command line parameter
46 * * ``reserved`` - describes the regions that were allocated
47 * * ``physmem`` - describes the actual physical memory available during
48 * boot regardless of the possible restrictions and memory hot(un)plug;
49 * the ``physmem`` type is only available on some architectures.
50 *
51 * Each region is represented by struct memblock_region that
52 * defines the region extents, its attributes and NUMA node id on NUMA
53 * systems. Every memory type is described by the struct memblock_type
54 * which contains an array of memory regions along with
55 * the allocator metadata. The "memory" and "reserved" types are nicely
56 * wrapped with struct memblock. This structure is statically
57 * initialized at build time. The region arrays are initially sized to
58 * %INIT_MEMBLOCK_REGIONS for "memory" and %INIT_MEMBLOCK_RESERVED_REGIONS
59 * for "reserved". The region array for "physmem" is initially sized to
60 * %INIT_PHYSMEM_REGIONS.
61 * The memblock_allow_resize() enables automatic resizing of the region
62 * arrays during addition of new regions. This feature should be used
63 * with care so that memory allocated for the region array will not
64 * overlap with areas that should be reserved, for example initrd.
65 *
66 * The early architecture setup should tell memblock what the physical
67 * memory layout is by using memblock_add() or memblock_add_node()
68 * functions. The first function does not assign the region to a NUMA
69 * node and it is appropriate for UMA systems. Yet, it is possible to
70 * use it on NUMA systems as well and assign the region to a NUMA node
71 * later in the setup process using memblock_set_node(). The
72 * memblock_add_node() performs such an assignment directly.
73 *
74 * Once memblock is setup the memory can be allocated using one of the
75 * API variants:
76 *
77 * * memblock_phys_alloc*() - these functions return the **physical**
78 * address of the allocated memory
79 * * memblock_alloc*() - these functions return the **virtual** address
80 * of the allocated memory.
81 *
82 * Note, that both API variants use implicit assumptions about allowed
83 * memory ranges and the fallback methods. Consult the documentation
84 * of memblock_alloc_internal() and memblock_alloc_range_nid()
85 * functions for more elaborate description.
86 *
87 * As the system boot progresses, the architecture specific mem_init()
88 * function frees all the memory to the buddy page allocator.
89 *
90 * Unless an architecture enables %CONFIG_ARCH_KEEP_MEMBLOCK, the
91 * memblock data structures (except "physmem") will be discarded after the
92 * system initialization completes.
93 */
94
95 #ifndef CONFIG_NEED_MULTIPLE_NODES
96 struct pglist_data __refdata contig_page_data;
97 EXPORT_SYMBOL(contig_page_data);
98 #endif
99
100 unsigned long max_low_pfn;
101 unsigned long min_low_pfn;
102 unsigned long max_pfn;
103 unsigned long long max_possible_pfn;
104
105 static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
106 static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_RESERVED_REGIONS] __initdata_memblock;
107 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
108 static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS];
109 #endif
110
111 struct memblock memblock __initdata_memblock = {
112 .memory.regions = memblock_memory_init_regions,
113 .memory.cnt = 1, /* empty dummy entry */
114 .memory.max = INIT_MEMBLOCK_REGIONS,
115 .memory.name = "memory",
116
117 .reserved.regions = memblock_reserved_init_regions,
118 .reserved.cnt = 1, /* empty dummy entry */
119 .reserved.max = INIT_MEMBLOCK_RESERVED_REGIONS,
120 .reserved.name = "reserved",
121
122 .bottom_up = false,
123 .current_limit = MEMBLOCK_ALLOC_ANYWHERE,
124 };
125
126 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
127 struct memblock_type physmem = {
128 .regions = memblock_physmem_init_regions,
129 .cnt = 1, /* empty dummy entry */
130 .max = INIT_PHYSMEM_REGIONS,
131 .name = "physmem",
132 };
133 #endif
134
135 /*
136 * keep a pointer to &memblock.memory in the text section to use it in
137 * __next_mem_range() and its helpers.
138 * For architectures that do not keep memblock data after init, this
139 * pointer will be reset to NULL at memblock_discard()
140 */
141 static __refdata struct memblock_type *memblock_memory = &memblock.memory;
142
143 #define for_each_memblock_type(i, memblock_type, rgn) \
144 for (i = 0, rgn = &memblock_type->regions[0]; \
145 i < memblock_type->cnt; \
146 i++, rgn = &memblock_type->regions[i])
147
148 #define memblock_dbg(fmt, ...) \
149 do { \
150 if (memblock_debug) \
151 pr_info(fmt, ##__VA_ARGS__); \
152 } while (0)
153
154 static int memblock_debug __initdata_memblock;
155 static bool system_has_some_mirror __initdata_memblock = false;
156 static int memblock_can_resize __initdata_memblock;
157 static int memblock_memory_in_slab __initdata_memblock = 0;
158 static int memblock_reserved_in_slab __initdata_memblock = 0;
159
choose_memblock_flags(void)160 static enum memblock_flags __init_memblock choose_memblock_flags(void)
161 {
162 return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE;
163 }
164
165 /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
memblock_cap_size(phys_addr_t base,phys_addr_t * size)166 static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
167 {
168 return *size = min(*size, PHYS_ADDR_MAX - base);
169 }
170
171 /*
172 * Address comparison utilities
173 */
memblock_addrs_overlap(phys_addr_t base1,phys_addr_t size1,phys_addr_t base2,phys_addr_t size2)174 static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
175 phys_addr_t base2, phys_addr_t size2)
176 {
177 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
178 }
179
memblock_overlaps_region(struct memblock_type * type,phys_addr_t base,phys_addr_t size)180 bool __init_memblock memblock_overlaps_region(struct memblock_type *type,
181 phys_addr_t base, phys_addr_t size)
182 {
183 unsigned long i;
184
185 memblock_cap_size(base, &size);
186
187 for (i = 0; i < type->cnt; i++)
188 if (memblock_addrs_overlap(base, size, type->regions[i].base,
189 type->regions[i].size))
190 break;
191 return i < type->cnt;
192 }
193
194 /**
195 * __memblock_find_range_bottom_up - find free area utility in bottom-up
196 * @start: start of candidate range
197 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
198 * %MEMBLOCK_ALLOC_ACCESSIBLE
199 * @size: size of free area to find
200 * @align: alignment of free area to find
201 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
202 * @flags: pick from blocks based on memory attributes
203 *
204 * Utility called from memblock_find_in_range_node(), find free area bottom-up.
205 *
206 * Return:
207 * Found address on success, 0 on failure.
208 */
209 static phys_addr_t __init_memblock
__memblock_find_range_bottom_up(phys_addr_t start,phys_addr_t end,phys_addr_t size,phys_addr_t align,int nid,enum memblock_flags flags)210 __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
211 phys_addr_t size, phys_addr_t align, int nid,
212 enum memblock_flags flags)
213 {
214 phys_addr_t this_start, this_end, cand;
215 u64 i;
216
217 for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) {
218 this_start = clamp(this_start, start, end);
219 this_end = clamp(this_end, start, end);
220
221 cand = round_up(this_start, align);
222 if (cand < this_end && this_end - cand >= size)
223 return cand;
224 }
225
226 return 0;
227 }
228
229 /**
230 * __memblock_find_range_top_down - find free area utility, in top-down
231 * @start: start of candidate range
232 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
233 * %MEMBLOCK_ALLOC_ACCESSIBLE
234 * @size: size of free area to find
235 * @align: alignment of free area to find
236 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
237 * @flags: pick from blocks based on memory attributes
238 *
239 * Utility called from memblock_find_in_range_node(), find free area top-down.
240 *
241 * Return:
242 * Found address on success, 0 on failure.
243 */
244 static phys_addr_t __init_memblock
__memblock_find_range_top_down(phys_addr_t start,phys_addr_t end,phys_addr_t size,phys_addr_t align,int nid,enum memblock_flags flags)245 __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
246 phys_addr_t size, phys_addr_t align, int nid,
247 enum memblock_flags flags)
248 {
249 phys_addr_t this_start, this_end, cand;
250 u64 i;
251
252 for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end,
253 NULL) {
254 this_start = clamp(this_start, start, end);
255 this_end = clamp(this_end, start, end);
256
257 if (this_end < size)
258 continue;
259
260 cand = round_down(this_end - size, align);
261 if (cand >= this_start)
262 return cand;
263 }
264
265 return 0;
266 }
267
268 /**
269 * memblock_find_in_range_node - find free area in given range and node
270 * @size: size of free area to find
271 * @align: alignment of free area to find
272 * @start: start of candidate range
273 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
274 * %MEMBLOCK_ALLOC_ACCESSIBLE
275 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
276 * @flags: pick from blocks based on memory attributes
277 *
278 * Find @size free area aligned to @align in the specified range and node.
279 *
280 * Return:
281 * Found address on success, 0 on failure.
282 */
memblock_find_in_range_node(phys_addr_t size,phys_addr_t align,phys_addr_t start,phys_addr_t end,int nid,enum memblock_flags flags)283 static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
284 phys_addr_t align, phys_addr_t start,
285 phys_addr_t end, int nid,
286 enum memblock_flags flags)
287 {
288 /* pump up @end */
289 if (end == MEMBLOCK_ALLOC_ACCESSIBLE ||
290 end == MEMBLOCK_ALLOC_NOLEAKTRACE)
291 end = memblock.current_limit;
292
293 /* avoid allocating the first page */
294 start = max_t(phys_addr_t, start, PAGE_SIZE);
295 end = max(start, end);
296
297 if (memblock_bottom_up())
298 return __memblock_find_range_bottom_up(start, end, size, align,
299 nid, flags);
300 else
301 return __memblock_find_range_top_down(start, end, size, align,
302 nid, flags);
303 }
304
305 /**
306 * memblock_find_in_range - find free area in given range
307 * @start: start of candidate range
308 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
309 * %MEMBLOCK_ALLOC_ACCESSIBLE
310 * @size: size of free area to find
311 * @align: alignment of free area to find
312 *
313 * Find @size free area aligned to @align in the specified range.
314 *
315 * Return:
316 * Found address on success, 0 on failure.
317 */
memblock_find_in_range(phys_addr_t start,phys_addr_t end,phys_addr_t size,phys_addr_t align)318 phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
319 phys_addr_t end, phys_addr_t size,
320 phys_addr_t align)
321 {
322 phys_addr_t ret;
323 enum memblock_flags flags = choose_memblock_flags();
324
325 again:
326 ret = memblock_find_in_range_node(size, align, start, end,
327 NUMA_NO_NODE, flags);
328
329 if (!ret && (flags & MEMBLOCK_MIRROR)) {
330 pr_warn("Could not allocate %pap bytes of mirrored memory\n",
331 &size);
332 flags &= ~MEMBLOCK_MIRROR;
333 goto again;
334 }
335
336 return ret;
337 }
338
memblock_remove_region(struct memblock_type * type,unsigned long r)339 static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
340 {
341 type->total_size -= type->regions[r].size;
342 memmove(&type->regions[r], &type->regions[r + 1],
343 (type->cnt - (r + 1)) * sizeof(type->regions[r]));
344 type->cnt--;
345
346 /* Special case for empty arrays */
347 if (type->cnt == 0) {
348 WARN_ON(type->total_size != 0);
349 type->cnt = 1;
350 type->regions[0].base = 0;
351 type->regions[0].size = 0;
352 type->regions[0].flags = 0;
353 memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
354 }
355 }
356
357 #ifndef CONFIG_ARCH_KEEP_MEMBLOCK
358 /**
359 * memblock_discard - discard memory and reserved arrays if they were allocated
360 */
memblock_discard(void)361 void __init memblock_discard(void)
362 {
363 phys_addr_t addr, size;
364
365 if (memblock.reserved.regions != memblock_reserved_init_regions) {
366 addr = __pa(memblock.reserved.regions);
367 size = PAGE_ALIGN(sizeof(struct memblock_region) *
368 memblock.reserved.max);
369 if (memblock_reserved_in_slab)
370 kfree(memblock.reserved.regions);
371 else
372 __memblock_free_late(addr, size);
373 }
374
375 if (memblock.memory.regions != memblock_memory_init_regions) {
376 addr = __pa(memblock.memory.regions);
377 size = PAGE_ALIGN(sizeof(struct memblock_region) *
378 memblock.memory.max);
379 if (memblock_memory_in_slab)
380 kfree(memblock.memory.regions);
381 else
382 __memblock_free_late(addr, size);
383 }
384
385 memblock_memory = NULL;
386 }
387 #endif
388
389 /**
390 * memblock_double_array - double the size of the memblock regions array
391 * @type: memblock type of the regions array being doubled
392 * @new_area_start: starting address of memory range to avoid overlap with
393 * @new_area_size: size of memory range to avoid overlap with
394 *
395 * Double the size of the @type regions array. If memblock is being used to
396 * allocate memory for a new reserved regions array and there is a previously
397 * allocated memory range [@new_area_start, @new_area_start + @new_area_size]
398 * waiting to be reserved, ensure the memory used by the new array does
399 * not overlap.
400 *
401 * Return:
402 * 0 on success, -1 on failure.
403 */
memblock_double_array(struct memblock_type * type,phys_addr_t new_area_start,phys_addr_t new_area_size)404 static int __init_memblock memblock_double_array(struct memblock_type *type,
405 phys_addr_t new_area_start,
406 phys_addr_t new_area_size)
407 {
408 struct memblock_region *new_array, *old_array;
409 phys_addr_t old_alloc_size, new_alloc_size;
410 phys_addr_t old_size, new_size, addr, new_end;
411 int use_slab = slab_is_available();
412 int *in_slab;
413
414 /* We don't allow resizing until we know about the reserved regions
415 * of memory that aren't suitable for allocation
416 */
417 if (!memblock_can_resize)
418 return -1;
419
420 /* Calculate new doubled size */
421 old_size = type->max * sizeof(struct memblock_region);
422 new_size = old_size << 1;
423 /*
424 * We need to allocated new one align to PAGE_SIZE,
425 * so we can free them completely later.
426 */
427 old_alloc_size = PAGE_ALIGN(old_size);
428 new_alloc_size = PAGE_ALIGN(new_size);
429
430 /* Retrieve the slab flag */
431 if (type == &memblock.memory)
432 in_slab = &memblock_memory_in_slab;
433 else
434 in_slab = &memblock_reserved_in_slab;
435
436 /* Try to find some space for it */
437 if (use_slab) {
438 new_array = kmalloc(new_size, GFP_KERNEL);
439 addr = new_array ? __pa(new_array) : 0;
440 } else {
441 /* only exclude range when trying to double reserved.regions */
442 if (type != &memblock.reserved)
443 new_area_start = new_area_size = 0;
444
445 addr = memblock_find_in_range(new_area_start + new_area_size,
446 memblock.current_limit,
447 new_alloc_size, PAGE_SIZE);
448 if (!addr && new_area_size)
449 addr = memblock_find_in_range(0,
450 min(new_area_start, memblock.current_limit),
451 new_alloc_size, PAGE_SIZE);
452
453 new_array = addr ? __va(addr) : NULL;
454 }
455 if (!addr) {
456 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
457 type->name, type->max, type->max * 2);
458 return -1;
459 }
460
461 new_end = addr + new_size - 1;
462 memblock_dbg("memblock: %s is doubled to %ld at [%pa-%pa]",
463 type->name, type->max * 2, &addr, &new_end);
464
465 /*
466 * Found space, we now need to move the array over before we add the
467 * reserved region since it may be our reserved array itself that is
468 * full.
469 */
470 memcpy(new_array, type->regions, old_size);
471 memset(new_array + type->max, 0, old_size);
472 old_array = type->regions;
473 type->regions = new_array;
474 type->max <<= 1;
475
476 /* Free old array. We needn't free it if the array is the static one */
477 if (*in_slab)
478 kfree(old_array);
479 else if (old_array != memblock_memory_init_regions &&
480 old_array != memblock_reserved_init_regions)
481 memblock_free(__pa(old_array), old_alloc_size);
482
483 /*
484 * Reserve the new array if that comes from the memblock. Otherwise, we
485 * needn't do it
486 */
487 if (!use_slab)
488 BUG_ON(memblock_reserve(addr, new_alloc_size));
489
490 /* Update slab flag */
491 *in_slab = use_slab;
492
493 return 0;
494 }
495
496 /**
497 * memblock_merge_regions - merge neighboring compatible regions
498 * @type: memblock type to scan
499 *
500 * Scan @type and merge neighboring compatible regions.
501 */
memblock_merge_regions(struct memblock_type * type)502 static void __init_memblock memblock_merge_regions(struct memblock_type *type)
503 {
504 int i = 0;
505
506 /* cnt never goes below 1 */
507 while (i < type->cnt - 1) {
508 struct memblock_region *this = &type->regions[i];
509 struct memblock_region *next = &type->regions[i + 1];
510
511 if (this->base + this->size != next->base ||
512 memblock_get_region_node(this) !=
513 memblock_get_region_node(next) ||
514 this->flags != next->flags) {
515 BUG_ON(this->base + this->size > next->base);
516 i++;
517 continue;
518 }
519
520 this->size += next->size;
521 /* move forward from next + 1, index of which is i + 2 */
522 memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
523 type->cnt--;
524 }
525 }
526
527 /**
528 * memblock_insert_region - insert new memblock region
529 * @type: memblock type to insert into
530 * @idx: index for the insertion point
531 * @base: base address of the new region
532 * @size: size of the new region
533 * @nid: node id of the new region
534 * @flags: flags of the new region
535 *
536 * Insert new memblock region [@base, @base + @size) into @type at @idx.
537 * @type must already have extra room to accommodate the new region.
538 */
memblock_insert_region(struct memblock_type * type,int idx,phys_addr_t base,phys_addr_t size,int nid,enum memblock_flags flags)539 static void __init_memblock memblock_insert_region(struct memblock_type *type,
540 int idx, phys_addr_t base,
541 phys_addr_t size,
542 int nid,
543 enum memblock_flags flags)
544 {
545 struct memblock_region *rgn = &type->regions[idx];
546
547 BUG_ON(type->cnt >= type->max);
548 memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
549 rgn->base = base;
550 rgn->size = size;
551 rgn->flags = flags;
552 memblock_set_region_node(rgn, nid);
553 type->cnt++;
554 type->total_size += size;
555 }
556
557 /**
558 * memblock_add_range - add new memblock region
559 * @type: memblock type to add new region into
560 * @base: base address of the new region
561 * @size: size of the new region
562 * @nid: nid of the new region
563 * @flags: flags of the new region
564 *
565 * Add new memblock region [@base, @base + @size) into @type. The new region
566 * is allowed to overlap with existing ones - overlaps don't affect already
567 * existing regions. @type is guaranteed to be minimal (all neighbouring
568 * compatible regions are merged) after the addition.
569 *
570 * Return:
571 * 0 on success, -errno on failure.
572 */
memblock_add_range(struct memblock_type * type,phys_addr_t base,phys_addr_t size,int nid,enum memblock_flags flags)573 static int __init_memblock memblock_add_range(struct memblock_type *type,
574 phys_addr_t base, phys_addr_t size,
575 int nid, enum memblock_flags flags)
576 {
577 bool insert = false;
578 phys_addr_t obase = base;
579 phys_addr_t end = base + memblock_cap_size(base, &size);
580 int idx, nr_new;
581 struct memblock_region *rgn;
582
583 if (!size)
584 return 0;
585
586 /* special case for empty array */
587 if (type->regions[0].size == 0) {
588 WARN_ON(type->cnt != 1 || type->total_size);
589 type->regions[0].base = base;
590 type->regions[0].size = size;
591 type->regions[0].flags = flags;
592 memblock_set_region_node(&type->regions[0], nid);
593 type->total_size = size;
594 return 0;
595 }
596 repeat:
597 /*
598 * The following is executed twice. Once with %false @insert and
599 * then with %true. The first counts the number of regions needed
600 * to accommodate the new area. The second actually inserts them.
601 */
602 base = obase;
603 nr_new = 0;
604
605 for_each_memblock_type(idx, type, rgn) {
606 phys_addr_t rbase = rgn->base;
607 phys_addr_t rend = rbase + rgn->size;
608
609 if (rbase >= end)
610 break;
611 if (rend <= base)
612 continue;
613 /*
614 * @rgn overlaps. If it separates the lower part of new
615 * area, insert that portion.
616 */
617 if (rbase > base) {
618 #ifdef CONFIG_NEED_MULTIPLE_NODES
619 WARN_ON(nid != memblock_get_region_node(rgn));
620 #endif
621 WARN_ON(flags != rgn->flags);
622 nr_new++;
623 if (insert)
624 memblock_insert_region(type, idx++, base,
625 rbase - base, nid,
626 flags);
627 }
628 /* area below @rend is dealt with, forget about it */
629 base = min(rend, end);
630 }
631
632 /* insert the remaining portion */
633 if (base < end) {
634 nr_new++;
635 if (insert)
636 memblock_insert_region(type, idx, base, end - base,
637 nid, flags);
638 }
639
640 if (!nr_new)
641 return 0;
642
643 /*
644 * If this was the first round, resize array and repeat for actual
645 * insertions; otherwise, merge and return.
646 */
647 if (!insert) {
648 while (type->cnt + nr_new > type->max)
649 if (memblock_double_array(type, obase, size) < 0)
650 return -ENOMEM;
651 insert = true;
652 goto repeat;
653 } else {
654 memblock_merge_regions(type);
655 return 0;
656 }
657 }
658
659 /**
660 * memblock_add_node - add new memblock region within a NUMA node
661 * @base: base address of the new region
662 * @size: size of the new region
663 * @nid: nid of the new region
664 *
665 * Add new memblock region [@base, @base + @size) to the "memory"
666 * type. See memblock_add_range() description for mode details
667 *
668 * Return:
669 * 0 on success, -errno on failure.
670 */
memblock_add_node(phys_addr_t base,phys_addr_t size,int nid)671 int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
672 int nid)
673 {
674 return memblock_add_range(&memblock.memory, base, size, nid, 0);
675 }
676
677 /**
678 * memblock_add - add new memblock region
679 * @base: base address of the new region
680 * @size: size of the new region
681 *
682 * Add new memblock region [@base, @base + @size) to the "memory"
683 * type. See memblock_add_range() description for mode details
684 *
685 * Return:
686 * 0 on success, -errno on failure.
687 */
memblock_add(phys_addr_t base,phys_addr_t size)688 int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
689 {
690 phys_addr_t end = base + size - 1;
691
692 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
693 &base, &end, (void *)_RET_IP_);
694
695 return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0);
696 }
697
698 /**
699 * memblock_isolate_range - isolate given range into disjoint memblocks
700 * @type: memblock type to isolate range for
701 * @base: base of range to isolate
702 * @size: size of range to isolate
703 * @start_rgn: out parameter for the start of isolated region
704 * @end_rgn: out parameter for the end of isolated region
705 *
706 * Walk @type and ensure that regions don't cross the boundaries defined by
707 * [@base, @base + @size). Crossing regions are split at the boundaries,
708 * which may create at most two more regions. The index of the first
709 * region inside the range is returned in *@start_rgn and end in *@end_rgn.
710 *
711 * Return:
712 * 0 on success, -errno on failure.
713 */
memblock_isolate_range(struct memblock_type * type,phys_addr_t base,phys_addr_t size,int * start_rgn,int * end_rgn)714 static int __init_memblock memblock_isolate_range(struct memblock_type *type,
715 phys_addr_t base, phys_addr_t size,
716 int *start_rgn, int *end_rgn)
717 {
718 phys_addr_t end = base + memblock_cap_size(base, &size);
719 int idx;
720 struct memblock_region *rgn;
721
722 *start_rgn = *end_rgn = 0;
723
724 if (!size)
725 return 0;
726
727 /* we'll create at most two more regions */
728 while (type->cnt + 2 > type->max)
729 if (memblock_double_array(type, base, size) < 0)
730 return -ENOMEM;
731
732 for_each_memblock_type(idx, type, rgn) {
733 phys_addr_t rbase = rgn->base;
734 phys_addr_t rend = rbase + rgn->size;
735
736 if (rbase >= end)
737 break;
738 if (rend <= base)
739 continue;
740
741 if (rbase < base) {
742 /*
743 * @rgn intersects from below. Split and continue
744 * to process the next region - the new top half.
745 */
746 rgn->base = base;
747 rgn->size -= base - rbase;
748 type->total_size -= base - rbase;
749 memblock_insert_region(type, idx, rbase, base - rbase,
750 memblock_get_region_node(rgn),
751 rgn->flags);
752 } else if (rend > end) {
753 /*
754 * @rgn intersects from above. Split and redo the
755 * current region - the new bottom half.
756 */
757 rgn->base = end;
758 rgn->size -= end - rbase;
759 type->total_size -= end - rbase;
760 memblock_insert_region(type, idx--, rbase, end - rbase,
761 memblock_get_region_node(rgn),
762 rgn->flags);
763 } else {
764 /* @rgn is fully contained, record it */
765 if (!*end_rgn)
766 *start_rgn = idx;
767 *end_rgn = idx + 1;
768 }
769 }
770
771 return 0;
772 }
773
memblock_remove_range(struct memblock_type * type,phys_addr_t base,phys_addr_t size)774 static int __init_memblock memblock_remove_range(struct memblock_type *type,
775 phys_addr_t base, phys_addr_t size)
776 {
777 int start_rgn, end_rgn;
778 int i, ret;
779
780 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
781 if (ret)
782 return ret;
783
784 for (i = end_rgn - 1; i >= start_rgn; i--)
785 memblock_remove_region(type, i);
786 return 0;
787 }
788
memblock_remove(phys_addr_t base,phys_addr_t size)789 int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
790 {
791 phys_addr_t end = base + size - 1;
792
793 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
794 &base, &end, (void *)_RET_IP_);
795
796 return memblock_remove_range(&memblock.memory, base, size);
797 }
798
799 /**
800 * memblock_free - free boot memory block
801 * @base: phys starting address of the boot memory block
802 * @size: size of the boot memory block in bytes
803 *
804 * Free boot memory block previously allocated by memblock_alloc_xx() API.
805 * The freeing memory will not be released to the buddy allocator.
806 */
memblock_free(phys_addr_t base,phys_addr_t size)807 int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
808 {
809 phys_addr_t end = base + size - 1;
810
811 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
812 &base, &end, (void *)_RET_IP_);
813
814 kmemleak_free_part_phys(base, size);
815 return memblock_remove_range(&memblock.reserved, base, size);
816 }
817 #ifdef CONFIG_ARCH_KEEP_MEMBLOCK
818 EXPORT_SYMBOL_GPL(memblock_free);
819 #endif
820
memblock_reserve(phys_addr_t base,phys_addr_t size)821 int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
822 {
823 phys_addr_t end = base + size - 1;
824
825 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
826 &base, &end, (void *)_RET_IP_);
827
828 return memblock_add_range(&memblock.reserved, base, size, MAX_NUMNODES, 0);
829 }
830
831 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
memblock_physmem_add(phys_addr_t base,phys_addr_t size)832 int __init_memblock memblock_physmem_add(phys_addr_t base, phys_addr_t size)
833 {
834 phys_addr_t end = base + size - 1;
835
836 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
837 &base, &end, (void *)_RET_IP_);
838
839 return memblock_add_range(&physmem, base, size, MAX_NUMNODES, 0);
840 }
841 #endif
842
843 /**
844 * memblock_setclr_flag - set or clear flag for a memory region
845 * @base: base address of the region
846 * @size: size of the region
847 * @set: set or clear the flag
848 * @flag: the flag to udpate
849 *
850 * This function isolates region [@base, @base + @size), and sets/clears flag
851 *
852 * Return: 0 on success, -errno on failure.
853 */
memblock_setclr_flag(phys_addr_t base,phys_addr_t size,int set,int flag)854 static int __init_memblock memblock_setclr_flag(phys_addr_t base,
855 phys_addr_t size, int set, int flag)
856 {
857 struct memblock_type *type = &memblock.memory;
858 int i, ret, start_rgn, end_rgn;
859
860 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
861 if (ret)
862 return ret;
863
864 for (i = start_rgn; i < end_rgn; i++) {
865 struct memblock_region *r = &type->regions[i];
866
867 if (set)
868 r->flags |= flag;
869 else
870 r->flags &= ~flag;
871 }
872
873 memblock_merge_regions(type);
874 return 0;
875 }
876
877 /**
878 * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG.
879 * @base: the base phys addr of the region
880 * @size: the size of the region
881 *
882 * Return: 0 on success, -errno on failure.
883 */
memblock_mark_hotplug(phys_addr_t base,phys_addr_t size)884 int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
885 {
886 return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG);
887 }
888
889 /**
890 * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region.
891 * @base: the base phys addr of the region
892 * @size: the size of the region
893 *
894 * Return: 0 on success, -errno on failure.
895 */
memblock_clear_hotplug(phys_addr_t base,phys_addr_t size)896 int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
897 {
898 return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG);
899 }
900
901 /**
902 * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR.
903 * @base: the base phys addr of the region
904 * @size: the size of the region
905 *
906 * Return: 0 on success, -errno on failure.
907 */
memblock_mark_mirror(phys_addr_t base,phys_addr_t size)908 int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
909 {
910 system_has_some_mirror = true;
911
912 return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR);
913 }
914
915 /**
916 * memblock_mark_nomap - Mark a memory region with flag MEMBLOCK_NOMAP.
917 * @base: the base phys addr of the region
918 * @size: the size of the region
919 *
920 * Return: 0 on success, -errno on failure.
921 */
memblock_mark_nomap(phys_addr_t base,phys_addr_t size)922 int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size)
923 {
924 return memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP);
925 }
926
927 /**
928 * memblock_clear_nomap - Clear flag MEMBLOCK_NOMAP for a specified region.
929 * @base: the base phys addr of the region
930 * @size: the size of the region
931 *
932 * Return: 0 on success, -errno on failure.
933 */
memblock_clear_nomap(phys_addr_t base,phys_addr_t size)934 int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size)
935 {
936 return memblock_setclr_flag(base, size, 0, MEMBLOCK_NOMAP);
937 }
938
should_skip_region(struct memblock_type * type,struct memblock_region * m,int nid,int flags)939 static bool should_skip_region(struct memblock_type *type,
940 struct memblock_region *m,
941 int nid, int flags)
942 {
943 int m_nid = memblock_get_region_node(m);
944
945 /* we never skip regions when iterating memblock.reserved or physmem */
946 if (type != memblock_memory)
947 return false;
948
949 /* only memory regions are associated with nodes, check it */
950 if (nid != NUMA_NO_NODE && nid != m_nid)
951 return true;
952
953 /* skip hotpluggable memory regions if needed */
954 if (movable_node_is_enabled() && memblock_is_hotpluggable(m) &&
955 !(flags & MEMBLOCK_HOTPLUG))
956 return true;
957
958 /* if we want mirror memory skip non-mirror memory regions */
959 if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
960 return true;
961
962 /* skip nomap memory unless we were asked for it explicitly */
963 if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
964 return true;
965
966 return false;
967 }
968
969 /**
970 * __next_mem_range - next function for for_each_free_mem_range() etc.
971 * @idx: pointer to u64 loop variable
972 * @nid: node selector, %NUMA_NO_NODE for all nodes
973 * @flags: pick from blocks based on memory attributes
974 * @type_a: pointer to memblock_type from where the range is taken
975 * @type_b: pointer to memblock_type which excludes memory from being taken
976 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
977 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
978 * @out_nid: ptr to int for nid of the range, can be %NULL
979 *
980 * Find the first area from *@idx which matches @nid, fill the out
981 * parameters, and update *@idx for the next iteration. The lower 32bit of
982 * *@idx contains index into type_a and the upper 32bit indexes the
983 * areas before each region in type_b. For example, if type_b regions
984 * look like the following,
985 *
986 * 0:[0-16), 1:[32-48), 2:[128-130)
987 *
988 * The upper 32bit indexes the following regions.
989 *
990 * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
991 *
992 * As both region arrays are sorted, the function advances the two indices
993 * in lockstep and returns each intersection.
994 */
__next_mem_range(u64 * idx,int nid,enum memblock_flags flags,struct memblock_type * type_a,struct memblock_type * type_b,phys_addr_t * out_start,phys_addr_t * out_end,int * out_nid)995 void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
996 struct memblock_type *type_a,
997 struct memblock_type *type_b, phys_addr_t *out_start,
998 phys_addr_t *out_end, int *out_nid)
999 {
1000 int idx_a = *idx & 0xffffffff;
1001 int idx_b = *idx >> 32;
1002
1003 if (WARN_ONCE(nid == MAX_NUMNODES,
1004 "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1005 nid = NUMA_NO_NODE;
1006
1007 for (; idx_a < type_a->cnt; idx_a++) {
1008 struct memblock_region *m = &type_a->regions[idx_a];
1009
1010 phys_addr_t m_start = m->base;
1011 phys_addr_t m_end = m->base + m->size;
1012 int m_nid = memblock_get_region_node(m);
1013
1014 if (should_skip_region(type_a, m, nid, flags))
1015 continue;
1016
1017 if (!type_b) {
1018 if (out_start)
1019 *out_start = m_start;
1020 if (out_end)
1021 *out_end = m_end;
1022 if (out_nid)
1023 *out_nid = m_nid;
1024 idx_a++;
1025 *idx = (u32)idx_a | (u64)idx_b << 32;
1026 return;
1027 }
1028
1029 /* scan areas before each reservation */
1030 for (; idx_b < type_b->cnt + 1; idx_b++) {
1031 struct memblock_region *r;
1032 phys_addr_t r_start;
1033 phys_addr_t r_end;
1034
1035 r = &type_b->regions[idx_b];
1036 r_start = idx_b ? r[-1].base + r[-1].size : 0;
1037 r_end = idx_b < type_b->cnt ?
1038 r->base : PHYS_ADDR_MAX;
1039
1040 /*
1041 * if idx_b advanced past idx_a,
1042 * break out to advance idx_a
1043 */
1044 if (r_start >= m_end)
1045 break;
1046 /* if the two regions intersect, we're done */
1047 if (m_start < r_end) {
1048 if (out_start)
1049 *out_start =
1050 max(m_start, r_start);
1051 if (out_end)
1052 *out_end = min(m_end, r_end);
1053 if (out_nid)
1054 *out_nid = m_nid;
1055 /*
1056 * The region which ends first is
1057 * advanced for the next iteration.
1058 */
1059 if (m_end <= r_end)
1060 idx_a++;
1061 else
1062 idx_b++;
1063 *idx = (u32)idx_a | (u64)idx_b << 32;
1064 return;
1065 }
1066 }
1067 }
1068
1069 /* signal end of iteration */
1070 *idx = ULLONG_MAX;
1071 }
1072
1073 /**
1074 * __next_mem_range_rev - generic next function for for_each_*_range_rev()
1075 *
1076 * @idx: pointer to u64 loop variable
1077 * @nid: node selector, %NUMA_NO_NODE for all nodes
1078 * @flags: pick from blocks based on memory attributes
1079 * @type_a: pointer to memblock_type from where the range is taken
1080 * @type_b: pointer to memblock_type which excludes memory from being taken
1081 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
1082 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
1083 * @out_nid: ptr to int for nid of the range, can be %NULL
1084 *
1085 * Finds the next range from type_a which is not marked as unsuitable
1086 * in type_b.
1087 *
1088 * Reverse of __next_mem_range().
1089 */
__next_mem_range_rev(u64 * idx,int nid,enum memblock_flags flags,struct memblock_type * type_a,struct memblock_type * type_b,phys_addr_t * out_start,phys_addr_t * out_end,int * out_nid)1090 void __init_memblock __next_mem_range_rev(u64 *idx, int nid,
1091 enum memblock_flags flags,
1092 struct memblock_type *type_a,
1093 struct memblock_type *type_b,
1094 phys_addr_t *out_start,
1095 phys_addr_t *out_end, int *out_nid)
1096 {
1097 int idx_a = *idx & 0xffffffff;
1098 int idx_b = *idx >> 32;
1099
1100 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1101 nid = NUMA_NO_NODE;
1102
1103 if (*idx == (u64)ULLONG_MAX) {
1104 idx_a = type_a->cnt - 1;
1105 if (type_b != NULL)
1106 idx_b = type_b->cnt;
1107 else
1108 idx_b = 0;
1109 }
1110
1111 for (; idx_a >= 0; idx_a--) {
1112 struct memblock_region *m = &type_a->regions[idx_a];
1113
1114 phys_addr_t m_start = m->base;
1115 phys_addr_t m_end = m->base + m->size;
1116 int m_nid = memblock_get_region_node(m);
1117
1118 if (should_skip_region(type_a, m, nid, flags))
1119 continue;
1120
1121 if (!type_b) {
1122 if (out_start)
1123 *out_start = m_start;
1124 if (out_end)
1125 *out_end = m_end;
1126 if (out_nid)
1127 *out_nid = m_nid;
1128 idx_a--;
1129 *idx = (u32)idx_a | (u64)idx_b << 32;
1130 return;
1131 }
1132
1133 /* scan areas before each reservation */
1134 for (; idx_b >= 0; idx_b--) {
1135 struct memblock_region *r;
1136 phys_addr_t r_start;
1137 phys_addr_t r_end;
1138
1139 r = &type_b->regions[idx_b];
1140 r_start = idx_b ? r[-1].base + r[-1].size : 0;
1141 r_end = idx_b < type_b->cnt ?
1142 r->base : PHYS_ADDR_MAX;
1143 /*
1144 * if idx_b advanced past idx_a,
1145 * break out to advance idx_a
1146 */
1147
1148 if (r_end <= m_start)
1149 break;
1150 /* if the two regions intersect, we're done */
1151 if (m_end > r_start) {
1152 if (out_start)
1153 *out_start = max(m_start, r_start);
1154 if (out_end)
1155 *out_end = min(m_end, r_end);
1156 if (out_nid)
1157 *out_nid = m_nid;
1158 if (m_start >= r_start)
1159 idx_a--;
1160 else
1161 idx_b--;
1162 *idx = (u32)idx_a | (u64)idx_b << 32;
1163 return;
1164 }
1165 }
1166 }
1167 /* signal end of iteration */
1168 *idx = ULLONG_MAX;
1169 }
1170
1171 /*
1172 * Common iterator interface used to define for_each_mem_pfn_range().
1173 */
__next_mem_pfn_range(int * idx,int nid,unsigned long * out_start_pfn,unsigned long * out_end_pfn,int * out_nid)1174 void __init_memblock __next_mem_pfn_range(int *idx, int nid,
1175 unsigned long *out_start_pfn,
1176 unsigned long *out_end_pfn, int *out_nid)
1177 {
1178 struct memblock_type *type = &memblock.memory;
1179 struct memblock_region *r;
1180 int r_nid;
1181
1182 while (++*idx < type->cnt) {
1183 r = &type->regions[*idx];
1184 r_nid = memblock_get_region_node(r);
1185
1186 if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
1187 continue;
1188 if (nid == MAX_NUMNODES || nid == r_nid)
1189 break;
1190 }
1191 if (*idx >= type->cnt) {
1192 *idx = -1;
1193 return;
1194 }
1195
1196 if (out_start_pfn)
1197 *out_start_pfn = PFN_UP(r->base);
1198 if (out_end_pfn)
1199 *out_end_pfn = PFN_DOWN(r->base + r->size);
1200 if (out_nid)
1201 *out_nid = r_nid;
1202 }
1203
1204 /**
1205 * memblock_set_node - set node ID on memblock regions
1206 * @base: base of area to set node ID for
1207 * @size: size of area to set node ID for
1208 * @type: memblock type to set node ID for
1209 * @nid: node ID to set
1210 *
1211 * Set the nid of memblock @type regions in [@base, @base + @size) to @nid.
1212 * Regions which cross the area boundaries are split as necessary.
1213 *
1214 * Return:
1215 * 0 on success, -errno on failure.
1216 */
memblock_set_node(phys_addr_t base,phys_addr_t size,struct memblock_type * type,int nid)1217 int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
1218 struct memblock_type *type, int nid)
1219 {
1220 #ifdef CONFIG_NEED_MULTIPLE_NODES
1221 int start_rgn, end_rgn;
1222 int i, ret;
1223
1224 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
1225 if (ret)
1226 return ret;
1227
1228 for (i = start_rgn; i < end_rgn; i++)
1229 memblock_set_region_node(&type->regions[i], nid);
1230
1231 memblock_merge_regions(type);
1232 #endif
1233 return 0;
1234 }
1235
1236 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1237 /**
1238 * __next_mem_pfn_range_in_zone - iterator for for_each_*_range_in_zone()
1239 *
1240 * @idx: pointer to u64 loop variable
1241 * @zone: zone in which all of the memory blocks reside
1242 * @out_spfn: ptr to ulong for start pfn of the range, can be %NULL
1243 * @out_epfn: ptr to ulong for end pfn of the range, can be %NULL
1244 *
1245 * This function is meant to be a zone/pfn specific wrapper for the
1246 * for_each_mem_range type iterators. Specifically they are used in the
1247 * deferred memory init routines and as such we were duplicating much of
1248 * this logic throughout the code. So instead of having it in multiple
1249 * locations it seemed like it would make more sense to centralize this to
1250 * one new iterator that does everything they need.
1251 */
1252 void __init_memblock
__next_mem_pfn_range_in_zone(u64 * idx,struct zone * zone,unsigned long * out_spfn,unsigned long * out_epfn)1253 __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
1254 unsigned long *out_spfn, unsigned long *out_epfn)
1255 {
1256 int zone_nid = zone_to_nid(zone);
1257 phys_addr_t spa, epa;
1258 int nid;
1259
1260 __next_mem_range(idx, zone_nid, MEMBLOCK_NONE,
1261 &memblock.memory, &memblock.reserved,
1262 &spa, &epa, &nid);
1263
1264 while (*idx != U64_MAX) {
1265 unsigned long epfn = PFN_DOWN(epa);
1266 unsigned long spfn = PFN_UP(spa);
1267
1268 /*
1269 * Verify the end is at least past the start of the zone and
1270 * that we have at least one PFN to initialize.
1271 */
1272 if (zone->zone_start_pfn < epfn && spfn < epfn) {
1273 /* if we went too far just stop searching */
1274 if (zone_end_pfn(zone) <= spfn) {
1275 *idx = U64_MAX;
1276 break;
1277 }
1278
1279 if (out_spfn)
1280 *out_spfn = max(zone->zone_start_pfn, spfn);
1281 if (out_epfn)
1282 *out_epfn = min(zone_end_pfn(zone), epfn);
1283
1284 return;
1285 }
1286
1287 __next_mem_range(idx, zone_nid, MEMBLOCK_NONE,
1288 &memblock.memory, &memblock.reserved,
1289 &spa, &epa, &nid);
1290 }
1291
1292 /* signal end of iteration */
1293 if (out_spfn)
1294 *out_spfn = ULONG_MAX;
1295 if (out_epfn)
1296 *out_epfn = 0;
1297 }
1298
1299 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1300
1301 /**
1302 * memblock_alloc_range_nid - allocate boot memory block
1303 * @size: size of memory block to be allocated in bytes
1304 * @align: alignment of the region and block's size
1305 * @start: the lower bound of the memory region to allocate (phys address)
1306 * @end: the upper bound of the memory region to allocate (phys address)
1307 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1308 * @exact_nid: control the allocation fall back to other nodes
1309 *
1310 * The allocation is performed from memory region limited by
1311 * memblock.current_limit if @end == %MEMBLOCK_ALLOC_ACCESSIBLE.
1312 *
1313 * If the specified node can not hold the requested memory and @exact_nid
1314 * is false, the allocation falls back to any node in the system.
1315 *
1316 * For systems with memory mirroring, the allocation is attempted first
1317 * from the regions with mirroring enabled and then retried from any
1318 * memory region.
1319 *
1320 * In addition, function sets the min_count to 0 using kmemleak_alloc_phys for
1321 * allocated boot memory block, so that it is never reported as leaks.
1322 *
1323 * Return:
1324 * Physical address of allocated memory block on success, %0 on failure.
1325 */
memblock_alloc_range_nid(phys_addr_t size,phys_addr_t align,phys_addr_t start,phys_addr_t end,int nid,bool exact_nid)1326 phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
1327 phys_addr_t align, phys_addr_t start,
1328 phys_addr_t end, int nid,
1329 bool exact_nid)
1330 {
1331 enum memblock_flags flags = choose_memblock_flags();
1332 phys_addr_t found;
1333
1334 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1335 nid = NUMA_NO_NODE;
1336
1337 if (!align) {
1338 /* Can't use WARNs this early in boot on powerpc */
1339 dump_stack();
1340 align = SMP_CACHE_BYTES;
1341 }
1342
1343 again:
1344 found = memblock_find_in_range_node(size, align, start, end, nid,
1345 flags);
1346 if (found && !memblock_reserve(found, size))
1347 goto done;
1348
1349 if (nid != NUMA_NO_NODE && !exact_nid) {
1350 found = memblock_find_in_range_node(size, align, start,
1351 end, NUMA_NO_NODE,
1352 flags);
1353 if (found && !memblock_reserve(found, size))
1354 goto done;
1355 }
1356
1357 if (flags & MEMBLOCK_MIRROR) {
1358 flags &= ~MEMBLOCK_MIRROR;
1359 pr_warn("Could not allocate %pap bytes of mirrored memory\n",
1360 &size);
1361 goto again;
1362 }
1363
1364 return 0;
1365
1366 done:
1367 /*
1368 * Skip kmemleak for those places like kasan_init() and
1369 * early_pgtable_alloc() due to high volume.
1370 */
1371 if (end != MEMBLOCK_ALLOC_NOLEAKTRACE)
1372 /*
1373 * The min_count is set to 0 so that memblock allocated
1374 * blocks are never reported as leaks. This is because many
1375 * of these blocks are only referred via the physical
1376 * address which is not looked up by kmemleak.
1377 */
1378 kmemleak_alloc_phys(found, size, 0, 0);
1379
1380 return found;
1381 }
1382
1383 /**
1384 * memblock_phys_alloc_range - allocate a memory block inside specified range
1385 * @size: size of memory block to be allocated in bytes
1386 * @align: alignment of the region and block's size
1387 * @start: the lower bound of the memory region to allocate (physical address)
1388 * @end: the upper bound of the memory region to allocate (physical address)
1389 *
1390 * Allocate @size bytes in the between @start and @end.
1391 *
1392 * Return: physical address of the allocated memory block on success,
1393 * %0 on failure.
1394 */
memblock_phys_alloc_range(phys_addr_t size,phys_addr_t align,phys_addr_t start,phys_addr_t end)1395 phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size,
1396 phys_addr_t align,
1397 phys_addr_t start,
1398 phys_addr_t end)
1399 {
1400 memblock_dbg("%s: %llu bytes align=0x%llx from=%pa max_addr=%pa %pS\n",
1401 __func__, (u64)size, (u64)align, &start, &end,
1402 (void *)_RET_IP_);
1403 return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE,
1404 false);
1405 }
1406
1407 /**
1408 * memblock_phys_alloc_try_nid - allocate a memory block from specified MUMA node
1409 * @size: size of memory block to be allocated in bytes
1410 * @align: alignment of the region and block's size
1411 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1412 *
1413 * Allocates memory block from the specified NUMA node. If the node
1414 * has no available memory, attempts to allocated from any node in the
1415 * system.
1416 *
1417 * Return: physical address of the allocated memory block on success,
1418 * %0 on failure.
1419 */
memblock_phys_alloc_try_nid(phys_addr_t size,phys_addr_t align,int nid)1420 phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
1421 {
1422 return memblock_alloc_range_nid(size, align, 0,
1423 MEMBLOCK_ALLOC_ACCESSIBLE, nid, false);
1424 }
1425
1426 /**
1427 * memblock_alloc_internal - allocate boot memory block
1428 * @size: size of memory block to be allocated in bytes
1429 * @align: alignment of the region and block's size
1430 * @min_addr: the lower bound of the memory region to allocate (phys address)
1431 * @max_addr: the upper bound of the memory region to allocate (phys address)
1432 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1433 * @exact_nid: control the allocation fall back to other nodes
1434 *
1435 * Allocates memory block using memblock_alloc_range_nid() and
1436 * converts the returned physical address to virtual.
1437 *
1438 * The @min_addr limit is dropped if it can not be satisfied and the allocation
1439 * will fall back to memory below @min_addr. Other constraints, such
1440 * as node and mirrored memory will be handled again in
1441 * memblock_alloc_range_nid().
1442 *
1443 * Return:
1444 * Virtual address of allocated memory block on success, NULL on failure.
1445 */
memblock_alloc_internal(phys_addr_t size,phys_addr_t align,phys_addr_t min_addr,phys_addr_t max_addr,int nid,bool exact_nid)1446 static void * __init memblock_alloc_internal(
1447 phys_addr_t size, phys_addr_t align,
1448 phys_addr_t min_addr, phys_addr_t max_addr,
1449 int nid, bool exact_nid)
1450 {
1451 phys_addr_t alloc;
1452
1453 /*
1454 * Detect any accidental use of these APIs after slab is ready, as at
1455 * this moment memblock may be deinitialized already and its
1456 * internal data may be destroyed (after execution of memblock_free_all)
1457 */
1458 if (WARN_ON_ONCE(slab_is_available()))
1459 return kzalloc_node(size, GFP_NOWAIT, nid);
1460
1461 if (max_addr > memblock.current_limit)
1462 max_addr = memblock.current_limit;
1463
1464 alloc = memblock_alloc_range_nid(size, align, min_addr, max_addr, nid,
1465 exact_nid);
1466
1467 /* retry allocation without lower limit */
1468 if (!alloc && min_addr)
1469 alloc = memblock_alloc_range_nid(size, align, 0, max_addr, nid,
1470 exact_nid);
1471
1472 if (!alloc)
1473 return NULL;
1474
1475 return phys_to_virt(alloc);
1476 }
1477
1478 /**
1479 * memblock_alloc_exact_nid_raw - allocate boot memory block on the exact node
1480 * without zeroing memory
1481 * @size: size of memory block to be allocated in bytes
1482 * @align: alignment of the region and block's size
1483 * @min_addr: the lower bound of the memory region from where the allocation
1484 * is preferred (phys address)
1485 * @max_addr: the upper bound of the memory region from where the allocation
1486 * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1487 * allocate only from memory limited by memblock.current_limit value
1488 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1489 *
1490 * Public function, provides additional debug information (including caller
1491 * info), if enabled. Does not zero allocated memory.
1492 *
1493 * Return:
1494 * Virtual address of allocated memory block on success, NULL on failure.
1495 */
memblock_alloc_exact_nid_raw(phys_addr_t size,phys_addr_t align,phys_addr_t min_addr,phys_addr_t max_addr,int nid)1496 void * __init memblock_alloc_exact_nid_raw(
1497 phys_addr_t size, phys_addr_t align,
1498 phys_addr_t min_addr, phys_addr_t max_addr,
1499 int nid)
1500 {
1501 void *ptr;
1502
1503 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1504 __func__, (u64)size, (u64)align, nid, &min_addr,
1505 &max_addr, (void *)_RET_IP_);
1506
1507 ptr = memblock_alloc_internal(size, align,
1508 min_addr, max_addr, nid, true);
1509 if (ptr && size > 0)
1510 page_init_poison(ptr, size);
1511
1512 return ptr;
1513 }
1514
1515 /**
1516 * memblock_alloc_try_nid_raw - allocate boot memory block without zeroing
1517 * memory and without panicking
1518 * @size: size of memory block to be allocated in bytes
1519 * @align: alignment of the region and block's size
1520 * @min_addr: the lower bound of the memory region from where the allocation
1521 * is preferred (phys address)
1522 * @max_addr: the upper bound of the memory region from where the allocation
1523 * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1524 * allocate only from memory limited by memblock.current_limit value
1525 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1526 *
1527 * Public function, provides additional debug information (including caller
1528 * info), if enabled. Does not zero allocated memory, does not panic if request
1529 * cannot be satisfied.
1530 *
1531 * Return:
1532 * Virtual address of allocated memory block on success, NULL on failure.
1533 */
memblock_alloc_try_nid_raw(phys_addr_t size,phys_addr_t align,phys_addr_t min_addr,phys_addr_t max_addr,int nid)1534 void * __init memblock_alloc_try_nid_raw(
1535 phys_addr_t size, phys_addr_t align,
1536 phys_addr_t min_addr, phys_addr_t max_addr,
1537 int nid)
1538 {
1539 void *ptr;
1540
1541 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1542 __func__, (u64)size, (u64)align, nid, &min_addr,
1543 &max_addr, (void *)_RET_IP_);
1544
1545 ptr = memblock_alloc_internal(size, align,
1546 min_addr, max_addr, nid, false);
1547 if (ptr && size > 0)
1548 page_init_poison(ptr, size);
1549
1550 return ptr;
1551 }
1552
1553 /**
1554 * memblock_alloc_try_nid - allocate boot memory block
1555 * @size: size of memory block to be allocated in bytes
1556 * @align: alignment of the region and block's size
1557 * @min_addr: the lower bound of the memory region from where the allocation
1558 * is preferred (phys address)
1559 * @max_addr: the upper bound of the memory region from where the allocation
1560 * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1561 * allocate only from memory limited by memblock.current_limit value
1562 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1563 *
1564 * Public function, provides additional debug information (including caller
1565 * info), if enabled. This function zeroes the allocated memory.
1566 *
1567 * Return:
1568 * Virtual address of allocated memory block on success, NULL on failure.
1569 */
memblock_alloc_try_nid(phys_addr_t size,phys_addr_t align,phys_addr_t min_addr,phys_addr_t max_addr,int nid)1570 void * __init memblock_alloc_try_nid(
1571 phys_addr_t size, phys_addr_t align,
1572 phys_addr_t min_addr, phys_addr_t max_addr,
1573 int nid)
1574 {
1575 void *ptr;
1576
1577 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1578 __func__, (u64)size, (u64)align, nid, &min_addr,
1579 &max_addr, (void *)_RET_IP_);
1580 ptr = memblock_alloc_internal(size, align,
1581 min_addr, max_addr, nid, false);
1582 if (ptr)
1583 memset(ptr, 0, size);
1584
1585 return ptr;
1586 }
1587
1588 /**
1589 * __memblock_free_late - free pages directly to buddy allocator
1590 * @base: phys starting address of the boot memory block
1591 * @size: size of the boot memory block in bytes
1592 *
1593 * This is only useful when the memblock allocator has already been torn
1594 * down, but we are still initializing the system. Pages are released directly
1595 * to the buddy allocator.
1596 */
__memblock_free_late(phys_addr_t base,phys_addr_t size)1597 void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
1598 {
1599 phys_addr_t cursor, end;
1600
1601 end = base + size - 1;
1602 memblock_dbg("%s: [%pa-%pa] %pS\n",
1603 __func__, &base, &end, (void *)_RET_IP_);
1604 kmemleak_free_part_phys(base, size);
1605 cursor = PFN_UP(base);
1606 end = PFN_DOWN(base + size);
1607
1608 for (; cursor < end; cursor++) {
1609 memblock_free_pages(pfn_to_page(cursor), cursor, 0);
1610 totalram_pages_inc();
1611 }
1612 }
1613
1614 /*
1615 * Remaining API functions
1616 */
1617
memblock_phys_mem_size(void)1618 phys_addr_t __init_memblock memblock_phys_mem_size(void)
1619 {
1620 return memblock.memory.total_size;
1621 }
1622
memblock_reserved_size(void)1623 phys_addr_t __init_memblock memblock_reserved_size(void)
1624 {
1625 return memblock.reserved.total_size;
1626 }
1627
1628 /* lowest address */
memblock_start_of_DRAM(void)1629 phys_addr_t __init_memblock memblock_start_of_DRAM(void)
1630 {
1631 return memblock.memory.regions[0].base;
1632 }
1633
memblock_end_of_DRAM(void)1634 phys_addr_t __init_memblock memblock_end_of_DRAM(void)
1635 {
1636 int idx = memblock.memory.cnt - 1;
1637
1638 return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
1639 }
1640 EXPORT_SYMBOL_GPL(memblock_end_of_DRAM);
1641
__find_max_addr(phys_addr_t limit)1642 static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit)
1643 {
1644 phys_addr_t max_addr = PHYS_ADDR_MAX;
1645 struct memblock_region *r;
1646
1647 /*
1648 * translate the memory @limit size into the max address within one of
1649 * the memory memblock regions, if the @limit exceeds the total size
1650 * of those regions, max_addr will keep original value PHYS_ADDR_MAX
1651 */
1652 for_each_mem_region(r) {
1653 if (limit <= r->size) {
1654 max_addr = r->base + limit;
1655 break;
1656 }
1657 limit -= r->size;
1658 }
1659
1660 return max_addr;
1661 }
1662
memblock_enforce_memory_limit(phys_addr_t limit)1663 void __init memblock_enforce_memory_limit(phys_addr_t limit)
1664 {
1665 phys_addr_t max_addr;
1666
1667 if (!limit)
1668 return;
1669
1670 max_addr = __find_max_addr(limit);
1671
1672 /* @limit exceeds the total size of the memory, do nothing */
1673 if (max_addr == PHYS_ADDR_MAX)
1674 return;
1675
1676 /* truncate both memory and reserved regions */
1677 memblock_remove_range(&memblock.memory, max_addr,
1678 PHYS_ADDR_MAX);
1679 memblock_remove_range(&memblock.reserved, max_addr,
1680 PHYS_ADDR_MAX);
1681 }
1682
memblock_cap_memory_range(phys_addr_t base,phys_addr_t size)1683 void __init memblock_cap_memory_range(phys_addr_t base, phys_addr_t size)
1684 {
1685 int start_rgn, end_rgn;
1686 int i, ret;
1687
1688 if (!size)
1689 return;
1690
1691 ret = memblock_isolate_range(&memblock.memory, base, size,
1692 &start_rgn, &end_rgn);
1693 if (ret)
1694 return;
1695
1696 /* remove all the MAP regions */
1697 for (i = memblock.memory.cnt - 1; i >= end_rgn; i--)
1698 if (!memblock_is_nomap(&memblock.memory.regions[i]))
1699 memblock_remove_region(&memblock.memory, i);
1700
1701 for (i = start_rgn - 1; i >= 0; i--)
1702 if (!memblock_is_nomap(&memblock.memory.regions[i]))
1703 memblock_remove_region(&memblock.memory, i);
1704
1705 /* truncate the reserved regions */
1706 memblock_remove_range(&memblock.reserved, 0, base);
1707 memblock_remove_range(&memblock.reserved,
1708 base + size, PHYS_ADDR_MAX);
1709 }
1710
memblock_mem_limit_remove_map(phys_addr_t limit)1711 void __init memblock_mem_limit_remove_map(phys_addr_t limit)
1712 {
1713 phys_addr_t max_addr;
1714
1715 if (!limit)
1716 return;
1717
1718 max_addr = __find_max_addr(limit);
1719
1720 /* @limit exceeds the total size of the memory, do nothing */
1721 if (max_addr == PHYS_ADDR_MAX)
1722 return;
1723
1724 memblock_cap_memory_range(0, max_addr);
1725 }
1726
memblock_search(struct memblock_type * type,phys_addr_t addr)1727 static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
1728 {
1729 unsigned int left = 0, right = type->cnt;
1730
1731 do {
1732 unsigned int mid = (right + left) / 2;
1733
1734 if (addr < type->regions[mid].base)
1735 right = mid;
1736 else if (addr >= (type->regions[mid].base +
1737 type->regions[mid].size))
1738 left = mid + 1;
1739 else
1740 return mid;
1741 } while (left < right);
1742 return -1;
1743 }
1744
memblock_is_reserved(phys_addr_t addr)1745 bool __init_memblock memblock_is_reserved(phys_addr_t addr)
1746 {
1747 return memblock_search(&memblock.reserved, addr) != -1;
1748 }
1749
memblock_is_memory(phys_addr_t addr)1750 bool __init_memblock memblock_is_memory(phys_addr_t addr)
1751 {
1752 return memblock_search(&memblock.memory, addr) != -1;
1753 }
1754
memblock_is_map_memory(phys_addr_t addr)1755 bool __init_memblock memblock_is_map_memory(phys_addr_t addr)
1756 {
1757 int i = memblock_search(&memblock.memory, addr);
1758
1759 if (i == -1)
1760 return false;
1761 return !memblock_is_nomap(&memblock.memory.regions[i]);
1762 }
1763
memblock_search_pfn_nid(unsigned long pfn,unsigned long * start_pfn,unsigned long * end_pfn)1764 int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
1765 unsigned long *start_pfn, unsigned long *end_pfn)
1766 {
1767 struct memblock_type *type = &memblock.memory;
1768 int mid = memblock_search(type, PFN_PHYS(pfn));
1769
1770 if (mid == -1)
1771 return -1;
1772
1773 *start_pfn = PFN_DOWN(type->regions[mid].base);
1774 *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size);
1775
1776 return memblock_get_region_node(&type->regions[mid]);
1777 }
1778
1779 /**
1780 * memblock_is_region_memory - check if a region is a subset of memory
1781 * @base: base of region to check
1782 * @size: size of region to check
1783 *
1784 * Check if the region [@base, @base + @size) is a subset of a memory block.
1785 *
1786 * Return:
1787 * 0 if false, non-zero if true
1788 */
memblock_is_region_memory(phys_addr_t base,phys_addr_t size)1789 bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
1790 {
1791 int idx = memblock_search(&memblock.memory, base);
1792 phys_addr_t end = base + memblock_cap_size(base, &size);
1793
1794 if (idx == -1)
1795 return false;
1796 return (memblock.memory.regions[idx].base +
1797 memblock.memory.regions[idx].size) >= end;
1798 }
1799
1800 /**
1801 * memblock_is_region_reserved - check if a region intersects reserved memory
1802 * @base: base of region to check
1803 * @size: size of region to check
1804 *
1805 * Check if the region [@base, @base + @size) intersects a reserved
1806 * memory block.
1807 *
1808 * Return:
1809 * True if they intersect, false if not.
1810 */
memblock_is_region_reserved(phys_addr_t base,phys_addr_t size)1811 bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
1812 {
1813 return memblock_overlaps_region(&memblock.reserved, base, size);
1814 }
1815
memblock_trim_memory(phys_addr_t align)1816 void __init_memblock memblock_trim_memory(phys_addr_t align)
1817 {
1818 phys_addr_t start, end, orig_start, orig_end;
1819 struct memblock_region *r;
1820
1821 for_each_mem_region(r) {
1822 orig_start = r->base;
1823 orig_end = r->base + r->size;
1824 start = round_up(orig_start, align);
1825 end = round_down(orig_end, align);
1826
1827 if (start == orig_start && end == orig_end)
1828 continue;
1829
1830 if (start < end) {
1831 r->base = start;
1832 r->size = end - start;
1833 } else {
1834 memblock_remove_region(&memblock.memory,
1835 r - memblock.memory.regions);
1836 r--;
1837 }
1838 }
1839 }
1840
memblock_set_current_limit(phys_addr_t limit)1841 void __init_memblock memblock_set_current_limit(phys_addr_t limit)
1842 {
1843 memblock.current_limit = limit;
1844 }
1845
memblock_get_current_limit(void)1846 phys_addr_t __init_memblock memblock_get_current_limit(void)
1847 {
1848 return memblock.current_limit;
1849 }
1850
memblock_dump(struct memblock_type * type)1851 static void __init_memblock memblock_dump(struct memblock_type *type)
1852 {
1853 phys_addr_t base, end, size;
1854 enum memblock_flags flags;
1855 int idx;
1856 struct memblock_region *rgn;
1857
1858 pr_info(" %s.cnt = 0x%lx\n", type->name, type->cnt);
1859
1860 for_each_memblock_type(idx, type, rgn) {
1861 char nid_buf[32] = "";
1862
1863 base = rgn->base;
1864 size = rgn->size;
1865 end = base + size - 1;
1866 flags = rgn->flags;
1867 #ifdef CONFIG_NEED_MULTIPLE_NODES
1868 if (memblock_get_region_node(rgn) != MAX_NUMNODES)
1869 snprintf(nid_buf, sizeof(nid_buf), " on node %d",
1870 memblock_get_region_node(rgn));
1871 #endif
1872 pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#x\n",
1873 type->name, idx, &base, &end, &size, nid_buf, flags);
1874 }
1875 }
1876
__memblock_dump_all(void)1877 static void __init_memblock __memblock_dump_all(void)
1878 {
1879 pr_info("MEMBLOCK configuration:\n");
1880 pr_info(" memory size = %pa reserved size = %pa\n",
1881 &memblock.memory.total_size,
1882 &memblock.reserved.total_size);
1883
1884 memblock_dump(&memblock.memory);
1885 memblock_dump(&memblock.reserved);
1886 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
1887 memblock_dump(&physmem);
1888 #endif
1889 }
1890
memblock_dump_all(void)1891 void __init_memblock memblock_dump_all(void)
1892 {
1893 if (memblock_debug)
1894 __memblock_dump_all();
1895 }
1896
memblock_allow_resize(void)1897 void __init memblock_allow_resize(void)
1898 {
1899 memblock_can_resize = 1;
1900 }
1901
early_memblock(char * p)1902 static int __init early_memblock(char *p)
1903 {
1904 if (p && strstr(p, "debug"))
1905 memblock_debug = 1;
1906 return 0;
1907 }
1908 early_param("memblock", early_memblock);
1909
__free_pages_memory(unsigned long start,unsigned long end)1910 static void __init __free_pages_memory(unsigned long start, unsigned long end)
1911 {
1912 int order;
1913
1914 while (start < end) {
1915 order = min(MAX_ORDER - 1UL, __ffs(start));
1916
1917 while (start + (1UL << order) > end)
1918 order--;
1919
1920 memblock_free_pages(pfn_to_page(start), start, order);
1921
1922 start += (1UL << order);
1923 }
1924 }
1925
__free_memory_core(phys_addr_t start,phys_addr_t end)1926 static unsigned long __init __free_memory_core(phys_addr_t start,
1927 phys_addr_t end)
1928 {
1929 unsigned long start_pfn = PFN_UP(start);
1930 unsigned long end_pfn = min_t(unsigned long,
1931 PFN_DOWN(end), max_low_pfn);
1932
1933 if (start_pfn >= end_pfn)
1934 return 0;
1935
1936 __free_pages_memory(start_pfn, end_pfn);
1937
1938 return end_pfn - start_pfn;
1939 }
1940
free_low_memory_core_early(void)1941 static unsigned long __init free_low_memory_core_early(void)
1942 {
1943 unsigned long count = 0;
1944 phys_addr_t start, end;
1945 u64 i;
1946
1947 memblock_clear_hotplug(0, -1);
1948
1949 for_each_reserved_mem_range(i, &start, &end)
1950 reserve_bootmem_region(start, end);
1951
1952 /*
1953 * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id
1954 * because in some case like Node0 doesn't have RAM installed
1955 * low ram will be on Node1
1956 */
1957 for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end,
1958 NULL)
1959 count += __free_memory_core(start, end);
1960
1961 return count;
1962 }
1963
1964 static int reset_managed_pages_done __initdata;
1965
reset_node_managed_pages(pg_data_t * pgdat)1966 void reset_node_managed_pages(pg_data_t *pgdat)
1967 {
1968 struct zone *z;
1969
1970 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
1971 atomic_long_set(&z->managed_pages, 0);
1972 }
1973
reset_all_zones_managed_pages(void)1974 void __init reset_all_zones_managed_pages(void)
1975 {
1976 struct pglist_data *pgdat;
1977
1978 if (reset_managed_pages_done)
1979 return;
1980
1981 for_each_online_pgdat(pgdat)
1982 reset_node_managed_pages(pgdat);
1983
1984 reset_managed_pages_done = 1;
1985 }
1986
1987 /**
1988 * memblock_free_all - release free pages to the buddy allocator
1989 *
1990 * Return: the number of pages actually released.
1991 */
memblock_free_all(void)1992 unsigned long __init memblock_free_all(void)
1993 {
1994 unsigned long pages;
1995
1996 reset_all_zones_managed_pages();
1997
1998 pages = free_low_memory_core_early();
1999 totalram_pages_add(pages);
2000
2001 return pages;
2002 }
2003
2004 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_ARCH_KEEP_MEMBLOCK)
2005
memblock_debug_show(struct seq_file * m,void * private)2006 static int memblock_debug_show(struct seq_file *m, void *private)
2007 {
2008 struct memblock_type *type = m->private;
2009 struct memblock_region *reg;
2010 int i;
2011 phys_addr_t end;
2012
2013 for (i = 0; i < type->cnt; i++) {
2014 reg = &type->regions[i];
2015 end = reg->base + reg->size - 1;
2016
2017 seq_printf(m, "%4d: ", i);
2018 seq_printf(m, "%pa..%pa\n", ®->base, &end);
2019 }
2020 return 0;
2021 }
2022 DEFINE_SHOW_ATTRIBUTE(memblock_debug);
2023
memblock_init_debugfs(void)2024 static int __init memblock_init_debugfs(void)
2025 {
2026 struct dentry *root = debugfs_create_dir("memblock", NULL);
2027
2028 debugfs_create_file("memory", 0444, root,
2029 &memblock.memory, &memblock_debug_fops);
2030 debugfs_create_file("reserved", 0444, root,
2031 &memblock.reserved, &memblock_debug_fops);
2032 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
2033 debugfs_create_file("physmem", 0444, root, &physmem,
2034 &memblock_debug_fops);
2035 #endif
2036
2037 return 0;
2038 }
2039 __initcall(memblock_init_debugfs);
2040
2041 #endif /* CONFIG_DEBUG_FS */
2042