1 /*
2 * Procedures for maintaining information about logical memory blocks.
3 *
4 * Peter Bergner, IBM Corp. June 2001.
5 * Copyright (C) 2001 Peter Bergner.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/bitops.h>
17 #include <linux/poison.h>
18 #include <linux/pfn.h>
19 #include <linux/debugfs.h>
20 #include <linux/seq_file.h>
21 #include <linux/memblock.h>
22
23 #include <asm-generic/sections.h>
24 #include <linux/io.h>
25
26 #include "internal.h"
27
28 static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
29 static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
30 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
31 static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock;
32 #endif
33
34 struct memblock memblock __initdata_memblock = {
35 .memory.regions = memblock_memory_init_regions,
36 .memory.cnt = 1, /* empty dummy entry */
37 .memory.max = INIT_MEMBLOCK_REGIONS,
38
39 .reserved.regions = memblock_reserved_init_regions,
40 .reserved.cnt = 1, /* empty dummy entry */
41 .reserved.max = INIT_MEMBLOCK_REGIONS,
42
43 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
44 .physmem.regions = memblock_physmem_init_regions,
45 .physmem.cnt = 1, /* empty dummy entry */
46 .physmem.max = INIT_PHYSMEM_REGIONS,
47 #endif
48
49 .bottom_up = false,
50 .current_limit = MEMBLOCK_ALLOC_ANYWHERE,
51 };
52
53 int memblock_debug __initdata_memblock;
54 #ifdef CONFIG_MOVABLE_NODE
55 bool movable_node_enabled __initdata_memblock = false;
56 #endif
57 static bool system_has_some_mirror __initdata_memblock = false;
58 static int memblock_can_resize __initdata_memblock;
59 static int memblock_memory_in_slab __initdata_memblock = 0;
60 static int memblock_reserved_in_slab __initdata_memblock = 0;
61
choose_memblock_flags(void)62 ulong __init_memblock choose_memblock_flags(void)
63 {
64 return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE;
65 }
66
67 /* inline so we don't get a warning when pr_debug is compiled out */
68 static __init_memblock const char *
memblock_type_name(struct memblock_type * type)69 memblock_type_name(struct memblock_type *type)
70 {
71 if (type == &memblock.memory)
72 return "memory";
73 else if (type == &memblock.reserved)
74 return "reserved";
75 else
76 return "unknown";
77 }
78
79 /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
memblock_cap_size(phys_addr_t base,phys_addr_t * size)80 static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
81 {
82 return *size = min(*size, (phys_addr_t)ULLONG_MAX - base);
83 }
84
85 /*
86 * Address comparison utilities
87 */
memblock_addrs_overlap(phys_addr_t base1,phys_addr_t size1,phys_addr_t base2,phys_addr_t size2)88 static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
89 phys_addr_t base2, phys_addr_t size2)
90 {
91 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
92 }
93
memblock_overlaps_region(struct memblock_type * type,phys_addr_t base,phys_addr_t size)94 bool __init_memblock memblock_overlaps_region(struct memblock_type *type,
95 phys_addr_t base, phys_addr_t size)
96 {
97 unsigned long i;
98
99 for (i = 0; i < type->cnt; i++) {
100 phys_addr_t rgnbase = type->regions[i].base;
101 phys_addr_t rgnsize = type->regions[i].size;
102 if (memblock_addrs_overlap(base, size, rgnbase, rgnsize))
103 break;
104 }
105
106 return i < type->cnt;
107 }
108
109 /*
110 * __memblock_find_range_bottom_up - find free area utility in bottom-up
111 * @start: start of candidate range
112 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
113 * @size: size of free area to find
114 * @align: alignment of free area to find
115 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
116 * @flags: pick from blocks based on memory attributes
117 *
118 * Utility called from memblock_find_in_range_node(), find free area bottom-up.
119 *
120 * RETURNS:
121 * Found address on success, 0 on failure.
122 */
123 static phys_addr_t __init_memblock
__memblock_find_range_bottom_up(phys_addr_t start,phys_addr_t end,phys_addr_t size,phys_addr_t align,int nid,ulong flags)124 __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
125 phys_addr_t size, phys_addr_t align, int nid,
126 ulong flags)
127 {
128 phys_addr_t this_start, this_end, cand;
129 u64 i;
130
131 for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) {
132 this_start = clamp(this_start, start, end);
133 this_end = clamp(this_end, start, end);
134
135 cand = round_up(this_start, align);
136 if (cand < this_end && this_end - cand >= size)
137 return cand;
138 }
139
140 return 0;
141 }
142
143 /**
144 * __memblock_find_range_top_down - find free area utility, in top-down
145 * @start: start of candidate range
146 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
147 * @size: size of free area to find
148 * @align: alignment of free area to find
149 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
150 * @flags: pick from blocks based on memory attributes
151 *
152 * Utility called from memblock_find_in_range_node(), find free area top-down.
153 *
154 * RETURNS:
155 * Found address on success, 0 on failure.
156 */
157 static phys_addr_t __init_memblock
__memblock_find_range_top_down(phys_addr_t start,phys_addr_t end,phys_addr_t size,phys_addr_t align,int nid,ulong flags)158 __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
159 phys_addr_t size, phys_addr_t align, int nid,
160 ulong flags)
161 {
162 phys_addr_t this_start, this_end, cand;
163 u64 i;
164
165 for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end,
166 NULL) {
167 this_start = clamp(this_start, start, end);
168 this_end = clamp(this_end, start, end);
169
170 if (this_end < size)
171 continue;
172
173 cand = round_down(this_end - size, align);
174 if (cand >= this_start)
175 return cand;
176 }
177
178 return 0;
179 }
180
181 /**
182 * memblock_find_in_range_node - find free area in given range and node
183 * @size: size of free area to find
184 * @align: alignment of free area to find
185 * @start: start of candidate range
186 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
187 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
188 * @flags: pick from blocks based on memory attributes
189 *
190 * Find @size free area aligned to @align in the specified range and node.
191 *
192 * RETURNS:
193 * Found address on success, 0 on failure.
194 */
memblock_find_in_range_node(phys_addr_t size,phys_addr_t align,phys_addr_t start,phys_addr_t end,int nid,ulong flags)195 phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
196 phys_addr_t align, phys_addr_t start,
197 phys_addr_t end, int nid, ulong flags)
198 {
199 /* pump up @end */
200 if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
201 end = memblock.current_limit;
202
203 /* avoid allocating the first page */
204 start = max_t(phys_addr_t, start, PAGE_SIZE);
205 end = max(start, end);
206
207 if (memblock_bottom_up())
208 return __memblock_find_range_bottom_up(start, end, size, align,
209 nid, flags);
210 else
211 return __memblock_find_range_top_down(start, end, size, align,
212 nid, flags);
213 }
214
215 /**
216 * memblock_find_in_range - find free area in given range
217 * @start: start of candidate range
218 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
219 * @size: size of free area to find
220 * @align: alignment of free area to find
221 *
222 * Find @size free area aligned to @align in the specified range.
223 *
224 * RETURNS:
225 * Found address on success, 0 on failure.
226 */
memblock_find_in_range(phys_addr_t start,phys_addr_t end,phys_addr_t size,phys_addr_t align)227 phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
228 phys_addr_t end, phys_addr_t size,
229 phys_addr_t align)
230 {
231 phys_addr_t ret;
232 ulong flags = choose_memblock_flags();
233
234 again:
235 ret = memblock_find_in_range_node(size, align, start, end,
236 NUMA_NO_NODE, flags);
237
238 if (!ret && (flags & MEMBLOCK_MIRROR)) {
239 pr_warn("Could not allocate %pap bytes of mirrored memory\n",
240 &size);
241 flags &= ~MEMBLOCK_MIRROR;
242 goto again;
243 }
244
245 return ret;
246 }
247
memblock_remove_region(struct memblock_type * type,unsigned long r)248 static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
249 {
250 type->total_size -= type->regions[r].size;
251 memmove(&type->regions[r], &type->regions[r + 1],
252 (type->cnt - (r + 1)) * sizeof(type->regions[r]));
253 type->cnt--;
254
255 /* Special case for empty arrays */
256 if (type->cnt == 0) {
257 WARN_ON(type->total_size != 0);
258 type->cnt = 1;
259 type->regions[0].base = 0;
260 type->regions[0].size = 0;
261 type->regions[0].flags = 0;
262 memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
263 }
264 }
265
266 #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
267
get_allocated_memblock_reserved_regions_info(phys_addr_t * addr)268 phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info(
269 phys_addr_t *addr)
270 {
271 if (memblock.reserved.regions == memblock_reserved_init_regions)
272 return 0;
273
274 *addr = __pa(memblock.reserved.regions);
275
276 return PAGE_ALIGN(sizeof(struct memblock_region) *
277 memblock.reserved.max);
278 }
279
get_allocated_memblock_memory_regions_info(phys_addr_t * addr)280 phys_addr_t __init_memblock get_allocated_memblock_memory_regions_info(
281 phys_addr_t *addr)
282 {
283 if (memblock.memory.regions == memblock_memory_init_regions)
284 return 0;
285
286 *addr = __pa(memblock.memory.regions);
287
288 return PAGE_ALIGN(sizeof(struct memblock_region) *
289 memblock.memory.max);
290 }
291
292 #endif
293
294 /**
295 * memblock_double_array - double the size of the memblock regions array
296 * @type: memblock type of the regions array being doubled
297 * @new_area_start: starting address of memory range to avoid overlap with
298 * @new_area_size: size of memory range to avoid overlap with
299 *
300 * Double the size of the @type regions array. If memblock is being used to
301 * allocate memory for a new reserved regions array and there is a previously
302 * allocated memory range [@new_area_start,@new_area_start+@new_area_size]
303 * waiting to be reserved, ensure the memory used by the new array does
304 * not overlap.
305 *
306 * RETURNS:
307 * 0 on success, -1 on failure.
308 */
memblock_double_array(struct memblock_type * type,phys_addr_t new_area_start,phys_addr_t new_area_size)309 static int __init_memblock memblock_double_array(struct memblock_type *type,
310 phys_addr_t new_area_start,
311 phys_addr_t new_area_size)
312 {
313 struct memblock_region *new_array, *old_array;
314 phys_addr_t old_alloc_size, new_alloc_size;
315 phys_addr_t old_size, new_size, addr;
316 int use_slab = slab_is_available();
317 int *in_slab;
318
319 /* We don't allow resizing until we know about the reserved regions
320 * of memory that aren't suitable for allocation
321 */
322 if (!memblock_can_resize)
323 return -1;
324
325 /* Calculate new doubled size */
326 old_size = type->max * sizeof(struct memblock_region);
327 new_size = old_size << 1;
328 /*
329 * We need to allocated new one align to PAGE_SIZE,
330 * so we can free them completely later.
331 */
332 old_alloc_size = PAGE_ALIGN(old_size);
333 new_alloc_size = PAGE_ALIGN(new_size);
334
335 /* Retrieve the slab flag */
336 if (type == &memblock.memory)
337 in_slab = &memblock_memory_in_slab;
338 else
339 in_slab = &memblock_reserved_in_slab;
340
341 /* Try to find some space for it.
342 *
343 * WARNING: We assume that either slab_is_available() and we use it or
344 * we use MEMBLOCK for allocations. That means that this is unsafe to
345 * use when bootmem is currently active (unless bootmem itself is
346 * implemented on top of MEMBLOCK which isn't the case yet)
347 *
348 * This should however not be an issue for now, as we currently only
349 * call into MEMBLOCK while it's still active, or much later when slab
350 * is active for memory hotplug operations
351 */
352 if (use_slab) {
353 new_array = kmalloc(new_size, GFP_KERNEL);
354 addr = new_array ? __pa(new_array) : 0;
355 } else {
356 /* only exclude range when trying to double reserved.regions */
357 if (type != &memblock.reserved)
358 new_area_start = new_area_size = 0;
359
360 addr = memblock_find_in_range(new_area_start + new_area_size,
361 memblock.current_limit,
362 new_alloc_size, PAGE_SIZE);
363 if (!addr && new_area_size)
364 addr = memblock_find_in_range(0,
365 min(new_area_start, memblock.current_limit),
366 new_alloc_size, PAGE_SIZE);
367
368 new_array = addr ? __va(addr) : NULL;
369 }
370 if (!addr) {
371 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
372 memblock_type_name(type), type->max, type->max * 2);
373 return -1;
374 }
375
376 memblock_dbg("memblock: %s is doubled to %ld at [%#010llx-%#010llx]",
377 memblock_type_name(type), type->max * 2, (u64)addr,
378 (u64)addr + new_size - 1);
379
380 /*
381 * Found space, we now need to move the array over before we add the
382 * reserved region since it may be our reserved array itself that is
383 * full.
384 */
385 memcpy(new_array, type->regions, old_size);
386 memset(new_array + type->max, 0, old_size);
387 old_array = type->regions;
388 type->regions = new_array;
389 type->max <<= 1;
390
391 /* Free old array. We needn't free it if the array is the static one */
392 if (*in_slab)
393 kfree(old_array);
394 else if (old_array != memblock_memory_init_regions &&
395 old_array != memblock_reserved_init_regions)
396 memblock_free(__pa(old_array), old_alloc_size);
397
398 /*
399 * Reserve the new array if that comes from the memblock. Otherwise, we
400 * needn't do it
401 */
402 if (!use_slab)
403 BUG_ON(memblock_reserve(addr, new_alloc_size));
404
405 /* Update slab flag */
406 *in_slab = use_slab;
407
408 return 0;
409 }
410
411 /**
412 * memblock_merge_regions - merge neighboring compatible regions
413 * @type: memblock type to scan
414 *
415 * Scan @type and merge neighboring compatible regions.
416 */
memblock_merge_regions(struct memblock_type * type)417 static void __init_memblock memblock_merge_regions(struct memblock_type *type)
418 {
419 int i = 0;
420
421 /* cnt never goes below 1 */
422 while (i < type->cnt - 1) {
423 struct memblock_region *this = &type->regions[i];
424 struct memblock_region *next = &type->regions[i + 1];
425
426 if (this->base + this->size != next->base ||
427 memblock_get_region_node(this) !=
428 memblock_get_region_node(next) ||
429 this->flags != next->flags) {
430 BUG_ON(this->base + this->size > next->base);
431 i++;
432 continue;
433 }
434
435 this->size += next->size;
436 /* move forward from next + 1, index of which is i + 2 */
437 memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
438 type->cnt--;
439 }
440 }
441
442 /**
443 * memblock_insert_region - insert new memblock region
444 * @type: memblock type to insert into
445 * @idx: index for the insertion point
446 * @base: base address of the new region
447 * @size: size of the new region
448 * @nid: node id of the new region
449 * @flags: flags of the new region
450 *
451 * Insert new memblock region [@base,@base+@size) into @type at @idx.
452 * @type must already have extra room to accomodate the new region.
453 */
memblock_insert_region(struct memblock_type * type,int idx,phys_addr_t base,phys_addr_t size,int nid,unsigned long flags)454 static void __init_memblock memblock_insert_region(struct memblock_type *type,
455 int idx, phys_addr_t base,
456 phys_addr_t size,
457 int nid, unsigned long flags)
458 {
459 struct memblock_region *rgn = &type->regions[idx];
460
461 BUG_ON(type->cnt >= type->max);
462 memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
463 rgn->base = base;
464 rgn->size = size;
465 rgn->flags = flags;
466 memblock_set_region_node(rgn, nid);
467 type->cnt++;
468 type->total_size += size;
469 }
470
471 /**
472 * memblock_add_range - add new memblock region
473 * @type: memblock type to add new region into
474 * @base: base address of the new region
475 * @size: size of the new region
476 * @nid: nid of the new region
477 * @flags: flags of the new region
478 *
479 * Add new memblock region [@base,@base+@size) into @type. The new region
480 * is allowed to overlap with existing ones - overlaps don't affect already
481 * existing regions. @type is guaranteed to be minimal (all neighbouring
482 * compatible regions are merged) after the addition.
483 *
484 * RETURNS:
485 * 0 on success, -errno on failure.
486 */
memblock_add_range(struct memblock_type * type,phys_addr_t base,phys_addr_t size,int nid,unsigned long flags)487 int __init_memblock memblock_add_range(struct memblock_type *type,
488 phys_addr_t base, phys_addr_t size,
489 int nid, unsigned long flags)
490 {
491 bool insert = false;
492 phys_addr_t obase = base;
493 phys_addr_t end = base + memblock_cap_size(base, &size);
494 int i, nr_new;
495
496 if (!size)
497 return 0;
498
499 /* special case for empty array */
500 if (type->regions[0].size == 0) {
501 WARN_ON(type->cnt != 1 || type->total_size);
502 type->regions[0].base = base;
503 type->regions[0].size = size;
504 type->regions[0].flags = flags;
505 memblock_set_region_node(&type->regions[0], nid);
506 type->total_size = size;
507 return 0;
508 }
509 repeat:
510 /*
511 * The following is executed twice. Once with %false @insert and
512 * then with %true. The first counts the number of regions needed
513 * to accomodate the new area. The second actually inserts them.
514 */
515 base = obase;
516 nr_new = 0;
517
518 for (i = 0; i < type->cnt; i++) {
519 struct memblock_region *rgn = &type->regions[i];
520 phys_addr_t rbase = rgn->base;
521 phys_addr_t rend = rbase + rgn->size;
522
523 if (rbase >= end)
524 break;
525 if (rend <= base)
526 continue;
527 /*
528 * @rgn overlaps. If it separates the lower part of new
529 * area, insert that portion.
530 */
531 if (rbase > base) {
532 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
533 WARN_ON(nid != memblock_get_region_node(rgn));
534 #endif
535 WARN_ON(flags != rgn->flags);
536 nr_new++;
537 if (insert)
538 memblock_insert_region(type, i++, base,
539 rbase - base, nid,
540 flags);
541 }
542 /* area below @rend is dealt with, forget about it */
543 base = min(rend, end);
544 }
545
546 /* insert the remaining portion */
547 if (base < end) {
548 nr_new++;
549 if (insert)
550 memblock_insert_region(type, i, base, end - base,
551 nid, flags);
552 }
553
554 /*
555 * If this was the first round, resize array and repeat for actual
556 * insertions; otherwise, merge and return.
557 */
558 if (!insert) {
559 while (type->cnt + nr_new > type->max)
560 if (memblock_double_array(type, obase, size) < 0)
561 return -ENOMEM;
562 insert = true;
563 goto repeat;
564 } else {
565 memblock_merge_regions(type);
566 return 0;
567 }
568 }
569
memblock_add_node(phys_addr_t base,phys_addr_t size,int nid)570 int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
571 int nid)
572 {
573 return memblock_add_range(&memblock.memory, base, size, nid, 0);
574 }
575
memblock_add_region(phys_addr_t base,phys_addr_t size,int nid,unsigned long flags)576 static int __init_memblock memblock_add_region(phys_addr_t base,
577 phys_addr_t size,
578 int nid,
579 unsigned long flags)
580 {
581 struct memblock_type *type = &memblock.memory;
582
583 memblock_dbg("memblock_add: [%#016llx-%#016llx] flags %#02lx %pF\n",
584 (unsigned long long)base,
585 (unsigned long long)base + size - 1,
586 flags, (void *)_RET_IP_);
587
588 return memblock_add_range(type, base, size, nid, flags);
589 }
590
memblock_add(phys_addr_t base,phys_addr_t size)591 int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
592 {
593 return memblock_add_region(base, size, MAX_NUMNODES, 0);
594 }
595
596 /**
597 * memblock_isolate_range - isolate given range into disjoint memblocks
598 * @type: memblock type to isolate range for
599 * @base: base of range to isolate
600 * @size: size of range to isolate
601 * @start_rgn: out parameter for the start of isolated region
602 * @end_rgn: out parameter for the end of isolated region
603 *
604 * Walk @type and ensure that regions don't cross the boundaries defined by
605 * [@base,@base+@size). Crossing regions are split at the boundaries,
606 * which may create at most two more regions. The index of the first
607 * region inside the range is returned in *@start_rgn and end in *@end_rgn.
608 *
609 * RETURNS:
610 * 0 on success, -errno on failure.
611 */
memblock_isolate_range(struct memblock_type * type,phys_addr_t base,phys_addr_t size,int * start_rgn,int * end_rgn)612 static int __init_memblock memblock_isolate_range(struct memblock_type *type,
613 phys_addr_t base, phys_addr_t size,
614 int *start_rgn, int *end_rgn)
615 {
616 phys_addr_t end = base + memblock_cap_size(base, &size);
617 int i;
618
619 *start_rgn = *end_rgn = 0;
620
621 if (!size)
622 return 0;
623
624 /* we'll create at most two more regions */
625 while (type->cnt + 2 > type->max)
626 if (memblock_double_array(type, base, size) < 0)
627 return -ENOMEM;
628
629 for (i = 0; i < type->cnt; i++) {
630 struct memblock_region *rgn = &type->regions[i];
631 phys_addr_t rbase = rgn->base;
632 phys_addr_t rend = rbase + rgn->size;
633
634 if (rbase >= end)
635 break;
636 if (rend <= base)
637 continue;
638
639 if (rbase < base) {
640 /*
641 * @rgn intersects from below. Split and continue
642 * to process the next region - the new top half.
643 */
644 rgn->base = base;
645 rgn->size -= base - rbase;
646 type->total_size -= base - rbase;
647 memblock_insert_region(type, i, rbase, base - rbase,
648 memblock_get_region_node(rgn),
649 rgn->flags);
650 } else if (rend > end) {
651 /*
652 * @rgn intersects from above. Split and redo the
653 * current region - the new bottom half.
654 */
655 rgn->base = end;
656 rgn->size -= end - rbase;
657 type->total_size -= end - rbase;
658 memblock_insert_region(type, i--, rbase, end - rbase,
659 memblock_get_region_node(rgn),
660 rgn->flags);
661 } else {
662 /* @rgn is fully contained, record it */
663 if (!*end_rgn)
664 *start_rgn = i;
665 *end_rgn = i + 1;
666 }
667 }
668
669 return 0;
670 }
671
memblock_remove_range(struct memblock_type * type,phys_addr_t base,phys_addr_t size)672 static int __init_memblock memblock_remove_range(struct memblock_type *type,
673 phys_addr_t base, phys_addr_t size)
674 {
675 int start_rgn, end_rgn;
676 int i, ret;
677
678 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
679 if (ret)
680 return ret;
681
682 for (i = end_rgn - 1; i >= start_rgn; i--)
683 memblock_remove_region(type, i);
684 return 0;
685 }
686
memblock_remove(phys_addr_t base,phys_addr_t size)687 int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
688 {
689 return memblock_remove_range(&memblock.memory, base, size);
690 }
691
692
memblock_free(phys_addr_t base,phys_addr_t size)693 int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
694 {
695 memblock_dbg(" memblock_free: [%#016llx-%#016llx] %pF\n",
696 (unsigned long long)base,
697 (unsigned long long)base + size - 1,
698 (void *)_RET_IP_);
699
700 kmemleak_free_part(__va(base), size);
701 return memblock_remove_range(&memblock.reserved, base, size);
702 }
703
memblock_reserve_region(phys_addr_t base,phys_addr_t size,int nid,unsigned long flags)704 static int __init_memblock memblock_reserve_region(phys_addr_t base,
705 phys_addr_t size,
706 int nid,
707 unsigned long flags)
708 {
709 struct memblock_type *type = &memblock.reserved;
710
711 memblock_dbg("memblock_reserve: [%#016llx-%#016llx] flags %#02lx %pF\n",
712 (unsigned long long)base,
713 (unsigned long long)base + size - 1,
714 flags, (void *)_RET_IP_);
715
716 return memblock_add_range(type, base, size, nid, flags);
717 }
718
memblock_reserve(phys_addr_t base,phys_addr_t size)719 int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
720 {
721 return memblock_reserve_region(base, size, MAX_NUMNODES, 0);
722 }
723
724 /**
725 *
726 * This function isolates region [@base, @base + @size), and sets/clears flag
727 *
728 * Return 0 on success, -errno on failure.
729 */
memblock_setclr_flag(phys_addr_t base,phys_addr_t size,int set,int flag)730 static int __init_memblock memblock_setclr_flag(phys_addr_t base,
731 phys_addr_t size, int set, int flag)
732 {
733 struct memblock_type *type = &memblock.memory;
734 int i, ret, start_rgn, end_rgn;
735
736 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
737 if (ret)
738 return ret;
739
740 for (i = start_rgn; i < end_rgn; i++)
741 if (set)
742 memblock_set_region_flags(&type->regions[i], flag);
743 else
744 memblock_clear_region_flags(&type->regions[i], flag);
745
746 memblock_merge_regions(type);
747 return 0;
748 }
749
750 /**
751 * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG.
752 * @base: the base phys addr of the region
753 * @size: the size of the region
754 *
755 * Return 0 on success, -errno on failure.
756 */
memblock_mark_hotplug(phys_addr_t base,phys_addr_t size)757 int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
758 {
759 return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG);
760 }
761
762 /**
763 * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region.
764 * @base: the base phys addr of the region
765 * @size: the size of the region
766 *
767 * Return 0 on success, -errno on failure.
768 */
memblock_clear_hotplug(phys_addr_t base,phys_addr_t size)769 int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
770 {
771 return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG);
772 }
773
774 /**
775 * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR.
776 * @base: the base phys addr of the region
777 * @size: the size of the region
778 *
779 * Return 0 on success, -errno on failure.
780 */
memblock_mark_mirror(phys_addr_t base,phys_addr_t size)781 int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
782 {
783 system_has_some_mirror = true;
784
785 return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR);
786 }
787
788 /**
789 * memblock_mark_nomap - Mark a memory region with flag MEMBLOCK_NOMAP.
790 * @base: the base phys addr of the region
791 * @size: the size of the region
792 *
793 * Return 0 on success, -errno on failure.
794 */
memblock_mark_nomap(phys_addr_t base,phys_addr_t size)795 int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size)
796 {
797 return memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP);
798 }
799
800 /**
801 * __next_reserved_mem_region - next function for for_each_reserved_region()
802 * @idx: pointer to u64 loop variable
803 * @out_start: ptr to phys_addr_t for start address of the region, can be %NULL
804 * @out_end: ptr to phys_addr_t for end address of the region, can be %NULL
805 *
806 * Iterate over all reserved memory regions.
807 */
__next_reserved_mem_region(u64 * idx,phys_addr_t * out_start,phys_addr_t * out_end)808 void __init_memblock __next_reserved_mem_region(u64 *idx,
809 phys_addr_t *out_start,
810 phys_addr_t *out_end)
811 {
812 struct memblock_type *type = &memblock.reserved;
813
814 if (*idx >= 0 && *idx < type->cnt) {
815 struct memblock_region *r = &type->regions[*idx];
816 phys_addr_t base = r->base;
817 phys_addr_t size = r->size;
818
819 if (out_start)
820 *out_start = base;
821 if (out_end)
822 *out_end = base + size - 1;
823
824 *idx += 1;
825 return;
826 }
827
828 /* signal end of iteration */
829 *idx = ULLONG_MAX;
830 }
831
832 /**
833 * __next__mem_range - next function for for_each_free_mem_range() etc.
834 * @idx: pointer to u64 loop variable
835 * @nid: node selector, %NUMA_NO_NODE for all nodes
836 * @flags: pick from blocks based on memory attributes
837 * @type_a: pointer to memblock_type from where the range is taken
838 * @type_b: pointer to memblock_type which excludes memory from being taken
839 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
840 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
841 * @out_nid: ptr to int for nid of the range, can be %NULL
842 *
843 * Find the first area from *@idx which matches @nid, fill the out
844 * parameters, and update *@idx for the next iteration. The lower 32bit of
845 * *@idx contains index into type_a and the upper 32bit indexes the
846 * areas before each region in type_b. For example, if type_b regions
847 * look like the following,
848 *
849 * 0:[0-16), 1:[32-48), 2:[128-130)
850 *
851 * The upper 32bit indexes the following regions.
852 *
853 * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
854 *
855 * As both region arrays are sorted, the function advances the two indices
856 * in lockstep and returns each intersection.
857 */
__next_mem_range(u64 * idx,int nid,ulong flags,struct memblock_type * type_a,struct memblock_type * type_b,phys_addr_t * out_start,phys_addr_t * out_end,int * out_nid)858 void __init_memblock __next_mem_range(u64 *idx, int nid, ulong flags,
859 struct memblock_type *type_a,
860 struct memblock_type *type_b,
861 phys_addr_t *out_start,
862 phys_addr_t *out_end, int *out_nid)
863 {
864 int idx_a = *idx & 0xffffffff;
865 int idx_b = *idx >> 32;
866
867 if (WARN_ONCE(nid == MAX_NUMNODES,
868 "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
869 nid = NUMA_NO_NODE;
870
871 for (; idx_a < type_a->cnt; idx_a++) {
872 struct memblock_region *m = &type_a->regions[idx_a];
873
874 phys_addr_t m_start = m->base;
875 phys_addr_t m_end = m->base + m->size;
876 int m_nid = memblock_get_region_node(m);
877
878 /* only memory regions are associated with nodes, check it */
879 if (nid != NUMA_NO_NODE && nid != m_nid)
880 continue;
881
882 /* skip hotpluggable memory regions if needed */
883 if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
884 continue;
885
886 /* if we want mirror memory skip non-mirror memory regions */
887 if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
888 continue;
889
890 /* skip nomap memory unless we were asked for it explicitly */
891 if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
892 continue;
893
894 if (!type_b) {
895 if (out_start)
896 *out_start = m_start;
897 if (out_end)
898 *out_end = m_end;
899 if (out_nid)
900 *out_nid = m_nid;
901 idx_a++;
902 *idx = (u32)idx_a | (u64)idx_b << 32;
903 return;
904 }
905
906 /* scan areas before each reservation */
907 for (; idx_b < type_b->cnt + 1; idx_b++) {
908 struct memblock_region *r;
909 phys_addr_t r_start;
910 phys_addr_t r_end;
911
912 r = &type_b->regions[idx_b];
913 r_start = idx_b ? r[-1].base + r[-1].size : 0;
914 r_end = idx_b < type_b->cnt ?
915 r->base : ULLONG_MAX;
916
917 /*
918 * if idx_b advanced past idx_a,
919 * break out to advance idx_a
920 */
921 if (r_start >= m_end)
922 break;
923 /* if the two regions intersect, we're done */
924 if (m_start < r_end) {
925 if (out_start)
926 *out_start =
927 max(m_start, r_start);
928 if (out_end)
929 *out_end = min(m_end, r_end);
930 if (out_nid)
931 *out_nid = m_nid;
932 /*
933 * The region which ends first is
934 * advanced for the next iteration.
935 */
936 if (m_end <= r_end)
937 idx_a++;
938 else
939 idx_b++;
940 *idx = (u32)idx_a | (u64)idx_b << 32;
941 return;
942 }
943 }
944 }
945
946 /* signal end of iteration */
947 *idx = ULLONG_MAX;
948 }
949
950 /**
951 * __next_mem_range_rev - generic next function for for_each_*_range_rev()
952 *
953 * Finds the next range from type_a which is not marked as unsuitable
954 * in type_b.
955 *
956 * @idx: pointer to u64 loop variable
957 * @nid: node selector, %NUMA_NO_NODE for all nodes
958 * @flags: pick from blocks based on memory attributes
959 * @type_a: pointer to memblock_type from where the range is taken
960 * @type_b: pointer to memblock_type which excludes memory from being taken
961 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
962 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
963 * @out_nid: ptr to int for nid of the range, can be %NULL
964 *
965 * Reverse of __next_mem_range().
966 */
__next_mem_range_rev(u64 * idx,int nid,ulong flags,struct memblock_type * type_a,struct memblock_type * type_b,phys_addr_t * out_start,phys_addr_t * out_end,int * out_nid)967 void __init_memblock __next_mem_range_rev(u64 *idx, int nid, ulong flags,
968 struct memblock_type *type_a,
969 struct memblock_type *type_b,
970 phys_addr_t *out_start,
971 phys_addr_t *out_end, int *out_nid)
972 {
973 int idx_a = *idx & 0xffffffff;
974 int idx_b = *idx >> 32;
975
976 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
977 nid = NUMA_NO_NODE;
978
979 if (*idx == (u64)ULLONG_MAX) {
980 idx_a = type_a->cnt - 1;
981 idx_b = type_b->cnt;
982 }
983
984 for (; idx_a >= 0; idx_a--) {
985 struct memblock_region *m = &type_a->regions[idx_a];
986
987 phys_addr_t m_start = m->base;
988 phys_addr_t m_end = m->base + m->size;
989 int m_nid = memblock_get_region_node(m);
990
991 /* only memory regions are associated with nodes, check it */
992 if (nid != NUMA_NO_NODE && nid != m_nid)
993 continue;
994
995 /* skip hotpluggable memory regions if needed */
996 if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
997 continue;
998
999 /* if we want mirror memory skip non-mirror memory regions */
1000 if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
1001 continue;
1002
1003 /* skip nomap memory unless we were asked for it explicitly */
1004 if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
1005 continue;
1006
1007 if (!type_b) {
1008 if (out_start)
1009 *out_start = m_start;
1010 if (out_end)
1011 *out_end = m_end;
1012 if (out_nid)
1013 *out_nid = m_nid;
1014 idx_a++;
1015 *idx = (u32)idx_a | (u64)idx_b << 32;
1016 return;
1017 }
1018
1019 /* scan areas before each reservation */
1020 for (; idx_b >= 0; idx_b--) {
1021 struct memblock_region *r;
1022 phys_addr_t r_start;
1023 phys_addr_t r_end;
1024
1025 r = &type_b->regions[idx_b];
1026 r_start = idx_b ? r[-1].base + r[-1].size : 0;
1027 r_end = idx_b < type_b->cnt ?
1028 r->base : ULLONG_MAX;
1029 /*
1030 * if idx_b advanced past idx_a,
1031 * break out to advance idx_a
1032 */
1033
1034 if (r_end <= m_start)
1035 break;
1036 /* if the two regions intersect, we're done */
1037 if (m_end > r_start) {
1038 if (out_start)
1039 *out_start = max(m_start, r_start);
1040 if (out_end)
1041 *out_end = min(m_end, r_end);
1042 if (out_nid)
1043 *out_nid = m_nid;
1044 if (m_start >= r_start)
1045 idx_a--;
1046 else
1047 idx_b--;
1048 *idx = (u32)idx_a | (u64)idx_b << 32;
1049 return;
1050 }
1051 }
1052 }
1053 /* signal end of iteration */
1054 *idx = ULLONG_MAX;
1055 }
1056
1057 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1058 /*
1059 * Common iterator interface used to define for_each_mem_range().
1060 */
__next_mem_pfn_range(int * idx,int nid,unsigned long * out_start_pfn,unsigned long * out_end_pfn,int * out_nid)1061 void __init_memblock __next_mem_pfn_range(int *idx, int nid,
1062 unsigned long *out_start_pfn,
1063 unsigned long *out_end_pfn, int *out_nid)
1064 {
1065 struct memblock_type *type = &memblock.memory;
1066 struct memblock_region *r;
1067
1068 while (++*idx < type->cnt) {
1069 r = &type->regions[*idx];
1070
1071 if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
1072 continue;
1073 if (nid == MAX_NUMNODES || nid == r->nid)
1074 break;
1075 }
1076 if (*idx >= type->cnt) {
1077 *idx = -1;
1078 return;
1079 }
1080
1081 if (out_start_pfn)
1082 *out_start_pfn = PFN_UP(r->base);
1083 if (out_end_pfn)
1084 *out_end_pfn = PFN_DOWN(r->base + r->size);
1085 if (out_nid)
1086 *out_nid = r->nid;
1087 }
1088
1089 /**
1090 * memblock_set_node - set node ID on memblock regions
1091 * @base: base of area to set node ID for
1092 * @size: size of area to set node ID for
1093 * @type: memblock type to set node ID for
1094 * @nid: node ID to set
1095 *
1096 * Set the nid of memblock @type regions in [@base,@base+@size) to @nid.
1097 * Regions which cross the area boundaries are split as necessary.
1098 *
1099 * RETURNS:
1100 * 0 on success, -errno on failure.
1101 */
memblock_set_node(phys_addr_t base,phys_addr_t size,struct memblock_type * type,int nid)1102 int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
1103 struct memblock_type *type, int nid)
1104 {
1105 int start_rgn, end_rgn;
1106 int i, ret;
1107
1108 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
1109 if (ret)
1110 return ret;
1111
1112 for (i = start_rgn; i < end_rgn; i++)
1113 memblock_set_region_node(&type->regions[i], nid);
1114
1115 memblock_merge_regions(type);
1116 return 0;
1117 }
1118 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
1119
memblock_alloc_range_nid(phys_addr_t size,phys_addr_t align,phys_addr_t start,phys_addr_t end,int nid,ulong flags)1120 static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
1121 phys_addr_t align, phys_addr_t start,
1122 phys_addr_t end, int nid, ulong flags)
1123 {
1124 phys_addr_t found;
1125
1126 if (!align)
1127 align = SMP_CACHE_BYTES;
1128
1129 found = memblock_find_in_range_node(size, align, start, end, nid,
1130 flags);
1131 if (found && !memblock_reserve(found, size)) {
1132 /*
1133 * The min_count is set to 0 so that memblock allocations are
1134 * never reported as leaks.
1135 */
1136 kmemleak_alloc(__va(found), size, 0, 0);
1137 return found;
1138 }
1139 return 0;
1140 }
1141
memblock_alloc_range(phys_addr_t size,phys_addr_t align,phys_addr_t start,phys_addr_t end,ulong flags)1142 phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
1143 phys_addr_t start, phys_addr_t end,
1144 ulong flags)
1145 {
1146 return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE,
1147 flags);
1148 }
1149
memblock_alloc_base_nid(phys_addr_t size,phys_addr_t align,phys_addr_t max_addr,int nid,ulong flags)1150 static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size,
1151 phys_addr_t align, phys_addr_t max_addr,
1152 int nid, ulong flags)
1153 {
1154 return memblock_alloc_range_nid(size, align, 0, max_addr, nid, flags);
1155 }
1156
memblock_alloc_nid(phys_addr_t size,phys_addr_t align,int nid)1157 phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
1158 {
1159 ulong flags = choose_memblock_flags();
1160 phys_addr_t ret;
1161
1162 again:
1163 ret = memblock_alloc_base_nid(size, align, MEMBLOCK_ALLOC_ACCESSIBLE,
1164 nid, flags);
1165
1166 if (!ret && (flags & MEMBLOCK_MIRROR)) {
1167 flags &= ~MEMBLOCK_MIRROR;
1168 goto again;
1169 }
1170 return ret;
1171 }
1172
__memblock_alloc_base(phys_addr_t size,phys_addr_t align,phys_addr_t max_addr)1173 phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
1174 {
1175 return memblock_alloc_base_nid(size, align, max_addr, NUMA_NO_NODE,
1176 MEMBLOCK_NONE);
1177 }
1178
memblock_alloc_base(phys_addr_t size,phys_addr_t align,phys_addr_t max_addr)1179 phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
1180 {
1181 phys_addr_t alloc;
1182
1183 alloc = __memblock_alloc_base(size, align, max_addr);
1184
1185 if (alloc == 0)
1186 panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
1187 (unsigned long long) size, (unsigned long long) max_addr);
1188
1189 return alloc;
1190 }
1191
memblock_alloc(phys_addr_t size,phys_addr_t align)1192 phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
1193 {
1194 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
1195 }
1196
memblock_alloc_try_nid(phys_addr_t size,phys_addr_t align,int nid)1197 phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
1198 {
1199 phys_addr_t res = memblock_alloc_nid(size, align, nid);
1200
1201 if (res)
1202 return res;
1203 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
1204 }
1205
1206 /**
1207 * memblock_virt_alloc_internal - allocate boot memory block
1208 * @size: size of memory block to be allocated in bytes
1209 * @align: alignment of the region and block's size
1210 * @min_addr: the lower bound of the memory region to allocate (phys address)
1211 * @max_addr: the upper bound of the memory region to allocate (phys address)
1212 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1213 *
1214 * The @min_addr limit is dropped if it can not be satisfied and the allocation
1215 * will fall back to memory below @min_addr. Also, allocation may fall back
1216 * to any node in the system if the specified node can not
1217 * hold the requested memory.
1218 *
1219 * The allocation is performed from memory region limited by
1220 * memblock.current_limit if @max_addr == %BOOTMEM_ALLOC_ACCESSIBLE.
1221 *
1222 * The memory block is aligned on SMP_CACHE_BYTES if @align == 0.
1223 *
1224 * The phys address of allocated boot memory block is converted to virtual and
1225 * allocated memory is reset to 0.
1226 *
1227 * In addition, function sets the min_count to 0 using kmemleak_alloc for
1228 * allocated boot memory block, so that it is never reported as leaks.
1229 *
1230 * RETURNS:
1231 * Virtual address of allocated memory block on success, NULL on failure.
1232 */
memblock_virt_alloc_internal(phys_addr_t size,phys_addr_t align,phys_addr_t min_addr,phys_addr_t max_addr,int nid)1233 static void * __init memblock_virt_alloc_internal(
1234 phys_addr_t size, phys_addr_t align,
1235 phys_addr_t min_addr, phys_addr_t max_addr,
1236 int nid)
1237 {
1238 phys_addr_t alloc;
1239 void *ptr;
1240 ulong flags = choose_memblock_flags();
1241
1242 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1243 nid = NUMA_NO_NODE;
1244
1245 /*
1246 * Detect any accidental use of these APIs after slab is ready, as at
1247 * this moment memblock may be deinitialized already and its
1248 * internal data may be destroyed (after execution of free_all_bootmem)
1249 */
1250 if (WARN_ON_ONCE(slab_is_available()))
1251 return kzalloc_node(size, GFP_NOWAIT, nid);
1252
1253 if (!align)
1254 align = SMP_CACHE_BYTES;
1255
1256 if (max_addr > memblock.current_limit)
1257 max_addr = memblock.current_limit;
1258
1259 again:
1260 alloc = memblock_find_in_range_node(size, align, min_addr, max_addr,
1261 nid, flags);
1262 if (alloc)
1263 goto done;
1264
1265 if (nid != NUMA_NO_NODE) {
1266 alloc = memblock_find_in_range_node(size, align, min_addr,
1267 max_addr, NUMA_NO_NODE,
1268 flags);
1269 if (alloc)
1270 goto done;
1271 }
1272
1273 if (min_addr) {
1274 min_addr = 0;
1275 goto again;
1276 }
1277
1278 if (flags & MEMBLOCK_MIRROR) {
1279 flags &= ~MEMBLOCK_MIRROR;
1280 pr_warn("Could not allocate %pap bytes of mirrored memory\n",
1281 &size);
1282 goto again;
1283 }
1284
1285 return NULL;
1286 done:
1287 memblock_reserve(alloc, size);
1288 ptr = phys_to_virt(alloc);
1289 memset(ptr, 0, size);
1290
1291 /*
1292 * The min_count is set to 0 so that bootmem allocated blocks
1293 * are never reported as leaks. This is because many of these blocks
1294 * are only referred via the physical address which is not
1295 * looked up by kmemleak.
1296 */
1297 kmemleak_alloc(ptr, size, 0, 0);
1298
1299 return ptr;
1300 }
1301
1302 /**
1303 * memblock_virt_alloc_try_nid_nopanic - allocate boot memory block
1304 * @size: size of memory block to be allocated in bytes
1305 * @align: alignment of the region and block's size
1306 * @min_addr: the lower bound of the memory region from where the allocation
1307 * is preferred (phys address)
1308 * @max_addr: the upper bound of the memory region from where the allocation
1309 * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
1310 * allocate only from memory limited by memblock.current_limit value
1311 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1312 *
1313 * Public version of _memblock_virt_alloc_try_nid_nopanic() which provides
1314 * additional debug information (including caller info), if enabled.
1315 *
1316 * RETURNS:
1317 * Virtual address of allocated memory block on success, NULL on failure.
1318 */
memblock_virt_alloc_try_nid_nopanic(phys_addr_t size,phys_addr_t align,phys_addr_t min_addr,phys_addr_t max_addr,int nid)1319 void * __init memblock_virt_alloc_try_nid_nopanic(
1320 phys_addr_t size, phys_addr_t align,
1321 phys_addr_t min_addr, phys_addr_t max_addr,
1322 int nid)
1323 {
1324 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n",
1325 __func__, (u64)size, (u64)align, nid, (u64)min_addr,
1326 (u64)max_addr, (void *)_RET_IP_);
1327 return memblock_virt_alloc_internal(size, align, min_addr,
1328 max_addr, nid);
1329 }
1330
1331 /**
1332 * memblock_virt_alloc_try_nid - allocate boot memory block with panicking
1333 * @size: size of memory block to be allocated in bytes
1334 * @align: alignment of the region and block's size
1335 * @min_addr: the lower bound of the memory region from where the allocation
1336 * is preferred (phys address)
1337 * @max_addr: the upper bound of the memory region from where the allocation
1338 * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
1339 * allocate only from memory limited by memblock.current_limit value
1340 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1341 *
1342 * Public panicking version of _memblock_virt_alloc_try_nid_nopanic()
1343 * which provides debug information (including caller info), if enabled,
1344 * and panics if the request can not be satisfied.
1345 *
1346 * RETURNS:
1347 * Virtual address of allocated memory block on success, NULL on failure.
1348 */
memblock_virt_alloc_try_nid(phys_addr_t size,phys_addr_t align,phys_addr_t min_addr,phys_addr_t max_addr,int nid)1349 void * __init memblock_virt_alloc_try_nid(
1350 phys_addr_t size, phys_addr_t align,
1351 phys_addr_t min_addr, phys_addr_t max_addr,
1352 int nid)
1353 {
1354 void *ptr;
1355
1356 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n",
1357 __func__, (u64)size, (u64)align, nid, (u64)min_addr,
1358 (u64)max_addr, (void *)_RET_IP_);
1359 ptr = memblock_virt_alloc_internal(size, align,
1360 min_addr, max_addr, nid);
1361 if (ptr)
1362 return ptr;
1363
1364 panic("%s: Failed to allocate %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx\n",
1365 __func__, (u64)size, (u64)align, nid, (u64)min_addr,
1366 (u64)max_addr);
1367 return NULL;
1368 }
1369
1370 /**
1371 * __memblock_free_early - free boot memory block
1372 * @base: phys starting address of the boot memory block
1373 * @size: size of the boot memory block in bytes
1374 *
1375 * Free boot memory block previously allocated by memblock_virt_alloc_xx() API.
1376 * The freeing memory will not be released to the buddy allocator.
1377 */
__memblock_free_early(phys_addr_t base,phys_addr_t size)1378 void __init __memblock_free_early(phys_addr_t base, phys_addr_t size)
1379 {
1380 memblock_dbg("%s: [%#016llx-%#016llx] %pF\n",
1381 __func__, (u64)base, (u64)base + size - 1,
1382 (void *)_RET_IP_);
1383 kmemleak_free_part(__va(base), size);
1384 memblock_remove_range(&memblock.reserved, base, size);
1385 }
1386
1387 /*
1388 * __memblock_free_late - free bootmem block pages directly to buddy allocator
1389 * @addr: phys starting address of the boot memory block
1390 * @size: size of the boot memory block in bytes
1391 *
1392 * This is only useful when the bootmem allocator has already been torn
1393 * down, but we are still initializing the system. Pages are released directly
1394 * to the buddy allocator, no bootmem metadata is updated because it is gone.
1395 */
__memblock_free_late(phys_addr_t base,phys_addr_t size)1396 void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
1397 {
1398 u64 cursor, end;
1399
1400 memblock_dbg("%s: [%#016llx-%#016llx] %pF\n",
1401 __func__, (u64)base, (u64)base + size - 1,
1402 (void *)_RET_IP_);
1403 kmemleak_free_part(__va(base), size);
1404 cursor = PFN_UP(base);
1405 end = PFN_DOWN(base + size);
1406
1407 for (; cursor < end; cursor++) {
1408 __free_pages_bootmem(pfn_to_page(cursor), cursor, 0);
1409 totalram_pages++;
1410 }
1411 }
1412
1413 /*
1414 * Remaining API functions
1415 */
1416
memblock_phys_mem_size(void)1417 phys_addr_t __init memblock_phys_mem_size(void)
1418 {
1419 return memblock.memory.total_size;
1420 }
1421
memblock_mem_size(unsigned long limit_pfn)1422 phys_addr_t __init memblock_mem_size(unsigned long limit_pfn)
1423 {
1424 unsigned long pages = 0;
1425 struct memblock_region *r;
1426 unsigned long start_pfn, end_pfn;
1427
1428 for_each_memblock(memory, r) {
1429 start_pfn = memblock_region_memory_base_pfn(r);
1430 end_pfn = memblock_region_memory_end_pfn(r);
1431 start_pfn = min_t(unsigned long, start_pfn, limit_pfn);
1432 end_pfn = min_t(unsigned long, end_pfn, limit_pfn);
1433 pages += end_pfn - start_pfn;
1434 }
1435
1436 return PFN_PHYS(pages);
1437 }
1438
1439 /* lowest address */
memblock_start_of_DRAM(void)1440 phys_addr_t __init_memblock memblock_start_of_DRAM(void)
1441 {
1442 return memblock.memory.regions[0].base;
1443 }
1444
memblock_end_of_DRAM(void)1445 phys_addr_t __init_memblock memblock_end_of_DRAM(void)
1446 {
1447 int idx = memblock.memory.cnt - 1;
1448
1449 return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
1450 }
1451
memblock_enforce_memory_limit(phys_addr_t limit)1452 void __init memblock_enforce_memory_limit(phys_addr_t limit)
1453 {
1454 phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX;
1455 struct memblock_region *r;
1456
1457 if (!limit)
1458 return;
1459
1460 /* find out max address */
1461 for_each_memblock(memory, r) {
1462 if (limit <= r->size) {
1463 max_addr = r->base + limit;
1464 break;
1465 }
1466 limit -= r->size;
1467 }
1468
1469 /* truncate both memory and reserved regions */
1470 memblock_remove_range(&memblock.memory, max_addr,
1471 (phys_addr_t)ULLONG_MAX);
1472 memblock_remove_range(&memblock.reserved, max_addr,
1473 (phys_addr_t)ULLONG_MAX);
1474 }
1475
memblock_search(struct memblock_type * type,phys_addr_t addr)1476 static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
1477 {
1478 unsigned int left = 0, right = type->cnt;
1479
1480 do {
1481 unsigned int mid = (right + left) / 2;
1482
1483 if (addr < type->regions[mid].base)
1484 right = mid;
1485 else if (addr >= (type->regions[mid].base +
1486 type->regions[mid].size))
1487 left = mid + 1;
1488 else
1489 return mid;
1490 } while (left < right);
1491 return -1;
1492 }
1493
memblock_is_reserved(phys_addr_t addr)1494 int __init memblock_is_reserved(phys_addr_t addr)
1495 {
1496 return memblock_search(&memblock.reserved, addr) != -1;
1497 }
1498
memblock_is_memory(phys_addr_t addr)1499 int __init_memblock memblock_is_memory(phys_addr_t addr)
1500 {
1501 return memblock_search(&memblock.memory, addr) != -1;
1502 }
1503
memblock_is_map_memory(phys_addr_t addr)1504 int __init_memblock memblock_is_map_memory(phys_addr_t addr)
1505 {
1506 int i = memblock_search(&memblock.memory, addr);
1507
1508 if (i == -1)
1509 return false;
1510 return !memblock_is_nomap(&memblock.memory.regions[i]);
1511 }
1512
1513 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
memblock_search_pfn_nid(unsigned long pfn,unsigned long * start_pfn,unsigned long * end_pfn)1514 int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
1515 unsigned long *start_pfn, unsigned long *end_pfn)
1516 {
1517 struct memblock_type *type = &memblock.memory;
1518 int mid = memblock_search(type, PFN_PHYS(pfn));
1519
1520 if (mid == -1)
1521 return -1;
1522
1523 *start_pfn = PFN_DOWN(type->regions[mid].base);
1524 *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size);
1525
1526 return type->regions[mid].nid;
1527 }
1528 #endif
1529
1530 /**
1531 * memblock_is_region_memory - check if a region is a subset of memory
1532 * @base: base of region to check
1533 * @size: size of region to check
1534 *
1535 * Check if the region [@base, @base+@size) is a subset of a memory block.
1536 *
1537 * RETURNS:
1538 * 0 if false, non-zero if true
1539 */
memblock_is_region_memory(phys_addr_t base,phys_addr_t size)1540 int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
1541 {
1542 int idx = memblock_search(&memblock.memory, base);
1543 phys_addr_t end = base + memblock_cap_size(base, &size);
1544
1545 if (idx == -1)
1546 return 0;
1547 return memblock.memory.regions[idx].base <= base &&
1548 (memblock.memory.regions[idx].base +
1549 memblock.memory.regions[idx].size) >= end;
1550 }
1551
1552 /**
1553 * memblock_is_region_reserved - check if a region intersects reserved memory
1554 * @base: base of region to check
1555 * @size: size of region to check
1556 *
1557 * Check if the region [@base, @base+@size) intersects a reserved memory block.
1558 *
1559 * RETURNS:
1560 * True if they intersect, false if not.
1561 */
memblock_is_region_reserved(phys_addr_t base,phys_addr_t size)1562 bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
1563 {
1564 memblock_cap_size(base, &size);
1565 return memblock_overlaps_region(&memblock.reserved, base, size);
1566 }
1567
memblock_trim_memory(phys_addr_t align)1568 void __init_memblock memblock_trim_memory(phys_addr_t align)
1569 {
1570 phys_addr_t start, end, orig_start, orig_end;
1571 struct memblock_region *r;
1572
1573 for_each_memblock(memory, r) {
1574 orig_start = r->base;
1575 orig_end = r->base + r->size;
1576 start = round_up(orig_start, align);
1577 end = round_down(orig_end, align);
1578
1579 if (start == orig_start && end == orig_end)
1580 continue;
1581
1582 if (start < end) {
1583 r->base = start;
1584 r->size = end - start;
1585 } else {
1586 memblock_remove_region(&memblock.memory,
1587 r - memblock.memory.regions);
1588 r--;
1589 }
1590 }
1591 }
1592
memblock_set_current_limit(phys_addr_t limit)1593 void __init_memblock memblock_set_current_limit(phys_addr_t limit)
1594 {
1595 memblock.current_limit = limit;
1596 }
1597
memblock_get_current_limit(void)1598 phys_addr_t __init_memblock memblock_get_current_limit(void)
1599 {
1600 return memblock.current_limit;
1601 }
1602
memblock_dump(struct memblock_type * type,char * name)1603 static void __init_memblock memblock_dump(struct memblock_type *type, char *name)
1604 {
1605 unsigned long long base, size;
1606 unsigned long flags;
1607 int i;
1608
1609 pr_info(" %s.cnt = 0x%lx\n", name, type->cnt);
1610
1611 for (i = 0; i < type->cnt; i++) {
1612 struct memblock_region *rgn = &type->regions[i];
1613 char nid_buf[32] = "";
1614
1615 base = rgn->base;
1616 size = rgn->size;
1617 flags = rgn->flags;
1618 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1619 if (memblock_get_region_node(rgn) != MAX_NUMNODES)
1620 snprintf(nid_buf, sizeof(nid_buf), " on node %d",
1621 memblock_get_region_node(rgn));
1622 #endif
1623 pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes%s flags: %#lx\n",
1624 name, i, base, base + size - 1, size, nid_buf, flags);
1625 }
1626 }
1627
1628 extern unsigned long __init_memblock
memblock_reserved_memory_within(phys_addr_t start_addr,phys_addr_t end_addr)1629 memblock_reserved_memory_within(phys_addr_t start_addr, phys_addr_t end_addr)
1630 {
1631 struct memblock_type *type = &memblock.reserved;
1632 unsigned long size = 0;
1633 int idx;
1634
1635 for (idx = 0; idx < type->cnt; idx++) {
1636 struct memblock_region *rgn = &type->regions[idx];
1637 phys_addr_t start, end;
1638
1639 if (rgn->base + rgn->size < start_addr)
1640 continue;
1641 if (rgn->base > end_addr)
1642 continue;
1643
1644 start = rgn->base;
1645 end = start + rgn->size;
1646 size += end - start;
1647 }
1648
1649 return size;
1650 }
1651
__memblock_dump_all(void)1652 void __init_memblock __memblock_dump_all(void)
1653 {
1654 pr_info("MEMBLOCK configuration:\n");
1655 pr_info(" memory size = %#llx reserved size = %#llx\n",
1656 (unsigned long long)memblock.memory.total_size,
1657 (unsigned long long)memblock.reserved.total_size);
1658
1659 memblock_dump(&memblock.memory, "memory");
1660 memblock_dump(&memblock.reserved, "reserved");
1661 }
1662
memblock_allow_resize(void)1663 void __init memblock_allow_resize(void)
1664 {
1665 memblock_can_resize = 1;
1666 }
1667
early_memblock(char * p)1668 static int __init early_memblock(char *p)
1669 {
1670 if (p && strstr(p, "debug"))
1671 memblock_debug = 1;
1672 return 0;
1673 }
1674 early_param("memblock", early_memblock);
1675
1676 #if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK)
1677
memblock_debug_show(struct seq_file * m,void * private)1678 static int memblock_debug_show(struct seq_file *m, void *private)
1679 {
1680 struct memblock_type *type = m->private;
1681 struct memblock_region *reg;
1682 int i;
1683
1684 for (i = 0; i < type->cnt; i++) {
1685 reg = &type->regions[i];
1686 seq_printf(m, "%4d: ", i);
1687 if (sizeof(phys_addr_t) == 4)
1688 seq_printf(m, "0x%08lx..0x%08lx\n",
1689 (unsigned long)reg->base,
1690 (unsigned long)(reg->base + reg->size - 1));
1691 else
1692 seq_printf(m, "0x%016llx..0x%016llx\n",
1693 (unsigned long long)reg->base,
1694 (unsigned long long)(reg->base + reg->size - 1));
1695
1696 }
1697 return 0;
1698 }
1699
memblock_debug_open(struct inode * inode,struct file * file)1700 static int memblock_debug_open(struct inode *inode, struct file *file)
1701 {
1702 return single_open(file, memblock_debug_show, inode->i_private);
1703 }
1704
1705 static const struct file_operations memblock_debug_fops = {
1706 .open = memblock_debug_open,
1707 .read = seq_read,
1708 .llseek = seq_lseek,
1709 .release = single_release,
1710 };
1711
memblock_init_debugfs(void)1712 static int __init memblock_init_debugfs(void)
1713 {
1714 struct dentry *root = debugfs_create_dir("memblock", NULL);
1715 if (!root)
1716 return -ENXIO;
1717 debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops);
1718 debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops);
1719 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
1720 debugfs_create_file("physmem", S_IRUGO, root, &memblock.physmem, &memblock_debug_fops);
1721 #endif
1722
1723 return 0;
1724 }
1725 __initcall(memblock_init_debugfs);
1726
1727 #endif /* CONFIG_DEBUG_FS */
1728