• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  bootmem - A boot-time physical memory allocator and configurator
3  *
4  *  Copyright (C) 1999 Ingo Molnar
5  *                1999 Kanoj Sarcar, SGI
6  *                2008 Johannes Weiner
7  *
8  * Access to this subsystem has to be serialized externally (which is true
9  * for the boot process anyway).
10  */
11 #include <linux/init.h>
12 #include <linux/pfn.h>
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/kmemleak.h>
16 #include <linux/range.h>
17 #include <linux/bug.h>
18 #include <linux/io.h>
19 #include <linux/bootmem.h>
20 
21 #include "internal.h"
22 
23 #ifndef CONFIG_NEED_MULTIPLE_NODES
24 struct pglist_data __refdata contig_page_data = {
25 	.bdata = &bootmem_node_data[0]
26 };
27 EXPORT_SYMBOL(contig_page_data);
28 #endif
29 
30 unsigned long max_low_pfn;
31 unsigned long min_low_pfn;
32 unsigned long max_pfn;
33 unsigned long long max_possible_pfn;
34 
35 bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata;
36 
37 static struct list_head bdata_list __initdata = LIST_HEAD_INIT(bdata_list);
38 
39 static int bootmem_debug;
40 
bootmem_debug_setup(char * buf)41 static int __init bootmem_debug_setup(char *buf)
42 {
43 	bootmem_debug = 1;
44 	return 0;
45 }
46 early_param("bootmem_debug", bootmem_debug_setup);
47 
48 #define bdebug(fmt, args...) ({				\
49 	if (unlikely(bootmem_debug))			\
50 		pr_info("bootmem::%s " fmt,		\
51 			__func__, ## args);		\
52 })
53 
bootmap_bytes(unsigned long pages)54 static unsigned long __init bootmap_bytes(unsigned long pages)
55 {
56 	unsigned long bytes = DIV_ROUND_UP(pages, 8);
57 
58 	return ALIGN(bytes, sizeof(long));
59 }
60 
61 /**
62  * bootmem_bootmap_pages - calculate bitmap size in pages
63  * @pages: number of pages the bitmap has to represent
64  */
bootmem_bootmap_pages(unsigned long pages)65 unsigned long __init bootmem_bootmap_pages(unsigned long pages)
66 {
67 	unsigned long bytes = bootmap_bytes(pages);
68 
69 	return PAGE_ALIGN(bytes) >> PAGE_SHIFT;
70 }
71 
72 /*
73  * link bdata in order
74  */
link_bootmem(bootmem_data_t * bdata)75 static void __init link_bootmem(bootmem_data_t *bdata)
76 {
77 	bootmem_data_t *ent;
78 
79 	list_for_each_entry(ent, &bdata_list, list) {
80 		if (bdata->node_min_pfn < ent->node_min_pfn) {
81 			list_add_tail(&bdata->list, &ent->list);
82 			return;
83 		}
84 	}
85 
86 	list_add_tail(&bdata->list, &bdata_list);
87 }
88 
89 /*
90  * Called once to set up the allocator itself.
91  */
init_bootmem_core(bootmem_data_t * bdata,unsigned long mapstart,unsigned long start,unsigned long end)92 static unsigned long __init init_bootmem_core(bootmem_data_t *bdata,
93 	unsigned long mapstart, unsigned long start, unsigned long end)
94 {
95 	unsigned long mapsize;
96 
97 	mminit_validate_memmodel_limits(&start, &end);
98 	bdata->node_bootmem_map = phys_to_virt(PFN_PHYS(mapstart));
99 	bdata->node_min_pfn = start;
100 	bdata->node_low_pfn = end;
101 	link_bootmem(bdata);
102 
103 	/*
104 	 * Initially all pages are reserved - setup_arch() has to
105 	 * register free RAM areas explicitly.
106 	 */
107 	mapsize = bootmap_bytes(end - start);
108 	memset(bdata->node_bootmem_map, 0xff, mapsize);
109 
110 	bdebug("nid=%td start=%lx map=%lx end=%lx mapsize=%lx\n",
111 		bdata - bootmem_node_data, start, mapstart, end, mapsize);
112 
113 	return mapsize;
114 }
115 
116 /**
117  * init_bootmem_node - register a node as boot memory
118  * @pgdat: node to register
119  * @freepfn: pfn where the bitmap for this node is to be placed
120  * @startpfn: first pfn on the node
121  * @endpfn: first pfn after the node
122  *
123  * Returns the number of bytes needed to hold the bitmap for this node.
124  */
init_bootmem_node(pg_data_t * pgdat,unsigned long freepfn,unsigned long startpfn,unsigned long endpfn)125 unsigned long __init init_bootmem_node(pg_data_t *pgdat, unsigned long freepfn,
126 				unsigned long startpfn, unsigned long endpfn)
127 {
128 	return init_bootmem_core(pgdat->bdata, freepfn, startpfn, endpfn);
129 }
130 
131 /**
132  * init_bootmem - register boot memory
133  * @start: pfn where the bitmap is to be placed
134  * @pages: number of available physical pages
135  *
136  * Returns the number of bytes needed to hold the bitmap.
137  */
init_bootmem(unsigned long start,unsigned long pages)138 unsigned long __init init_bootmem(unsigned long start, unsigned long pages)
139 {
140 	max_low_pfn = pages;
141 	min_low_pfn = start;
142 	return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages);
143 }
144 
145 /*
146  * free_bootmem_late - free bootmem pages directly to page allocator
147  * @addr: starting physical address of the range
148  * @size: size of the range in bytes
149  *
150  * This is only useful when the bootmem allocator has already been torn
151  * down, but we are still initializing the system.  Pages are given directly
152  * to the page allocator, no bootmem metadata is updated because it is gone.
153  */
free_bootmem_late(unsigned long physaddr,unsigned long size)154 void __init free_bootmem_late(unsigned long physaddr, unsigned long size)
155 {
156 	unsigned long cursor, end;
157 
158 	kmemleak_free_part_phys(physaddr, size);
159 
160 	cursor = PFN_UP(physaddr);
161 	end = PFN_DOWN(physaddr + size);
162 
163 	for (; cursor < end; cursor++) {
164 		__free_pages_bootmem(pfn_to_page(cursor), cursor, 0);
165 		totalram_pages++;
166 	}
167 }
168 
free_all_bootmem_core(bootmem_data_t * bdata)169 static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
170 {
171 	struct page *page;
172 	unsigned long *map, start, end, pages, cur, count = 0;
173 
174 	if (!bdata->node_bootmem_map)
175 		return 0;
176 
177 	map = bdata->node_bootmem_map;
178 	start = bdata->node_min_pfn;
179 	end = bdata->node_low_pfn;
180 
181 	bdebug("nid=%td start=%lx end=%lx\n",
182 		bdata - bootmem_node_data, start, end);
183 
184 	while (start < end) {
185 		unsigned long idx, vec;
186 		unsigned shift;
187 
188 		idx = start - bdata->node_min_pfn;
189 		shift = idx & (BITS_PER_LONG - 1);
190 		/*
191 		 * vec holds at most BITS_PER_LONG map bits,
192 		 * bit 0 corresponds to start.
193 		 */
194 		vec = ~map[idx / BITS_PER_LONG];
195 
196 		if (shift) {
197 			vec >>= shift;
198 			if (end - start >= BITS_PER_LONG)
199 				vec |= ~map[idx / BITS_PER_LONG + 1] <<
200 					(BITS_PER_LONG - shift);
201 		}
202 		/*
203 		 * If we have a properly aligned and fully unreserved
204 		 * BITS_PER_LONG block of pages in front of us, free
205 		 * it in one go.
206 		 */
207 		if (IS_ALIGNED(start, BITS_PER_LONG) && vec == ~0UL) {
208 			int order = ilog2(BITS_PER_LONG);
209 
210 			__free_pages_bootmem(pfn_to_page(start), start, order);
211 			count += BITS_PER_LONG;
212 			start += BITS_PER_LONG;
213 		} else {
214 			cur = start;
215 
216 			start = ALIGN(start + 1, BITS_PER_LONG);
217 			while (vec && cur != start) {
218 				if (vec & 1) {
219 					page = pfn_to_page(cur);
220 					__free_pages_bootmem(page, cur, 0);
221 					count++;
222 				}
223 				vec >>= 1;
224 				++cur;
225 			}
226 		}
227 	}
228 
229 	cur = bdata->node_min_pfn;
230 	page = virt_to_page(bdata->node_bootmem_map);
231 	pages = bdata->node_low_pfn - bdata->node_min_pfn;
232 	pages = bootmem_bootmap_pages(pages);
233 	count += pages;
234 	while (pages--)
235 		__free_pages_bootmem(page++, cur++, 0);
236 	bdata->node_bootmem_map = NULL;
237 
238 	bdebug("nid=%td released=%lx\n", bdata - bootmem_node_data, count);
239 
240 	return count;
241 }
242 
243 static int reset_managed_pages_done __initdata;
244 
reset_node_managed_pages(pg_data_t * pgdat)245 void reset_node_managed_pages(pg_data_t *pgdat)
246 {
247 	struct zone *z;
248 
249 	for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
250 		z->managed_pages = 0;
251 }
252 
reset_all_zones_managed_pages(void)253 void __init reset_all_zones_managed_pages(void)
254 {
255 	struct pglist_data *pgdat;
256 
257 	if (reset_managed_pages_done)
258 		return;
259 
260 	for_each_online_pgdat(pgdat)
261 		reset_node_managed_pages(pgdat);
262 
263 	reset_managed_pages_done = 1;
264 }
265 
266 /**
267  * free_all_bootmem - release free pages to the buddy allocator
268  *
269  * Returns the number of pages actually released.
270  */
free_all_bootmem(void)271 unsigned long __init free_all_bootmem(void)
272 {
273 	unsigned long total_pages = 0;
274 	bootmem_data_t *bdata;
275 
276 	reset_all_zones_managed_pages();
277 
278 	list_for_each_entry(bdata, &bdata_list, list)
279 		total_pages += free_all_bootmem_core(bdata);
280 
281 	totalram_pages += total_pages;
282 
283 	return total_pages;
284 }
285 
__free(bootmem_data_t * bdata,unsigned long sidx,unsigned long eidx)286 static void __init __free(bootmem_data_t *bdata,
287 			unsigned long sidx, unsigned long eidx)
288 {
289 	unsigned long idx;
290 
291 	bdebug("nid=%td start=%lx end=%lx\n", bdata - bootmem_node_data,
292 		sidx + bdata->node_min_pfn,
293 		eidx + bdata->node_min_pfn);
294 
295 	if (WARN_ON(bdata->node_bootmem_map == NULL))
296 		return;
297 
298 	if (bdata->hint_idx > sidx)
299 		bdata->hint_idx = sidx;
300 
301 	for (idx = sidx; idx < eidx; idx++)
302 		if (!test_and_clear_bit(idx, bdata->node_bootmem_map))
303 			BUG();
304 }
305 
__reserve(bootmem_data_t * bdata,unsigned long sidx,unsigned long eidx,int flags)306 static int __init __reserve(bootmem_data_t *bdata, unsigned long sidx,
307 			unsigned long eidx, int flags)
308 {
309 	unsigned long idx;
310 	int exclusive = flags & BOOTMEM_EXCLUSIVE;
311 
312 	bdebug("nid=%td start=%lx end=%lx flags=%x\n",
313 		bdata - bootmem_node_data,
314 		sidx + bdata->node_min_pfn,
315 		eidx + bdata->node_min_pfn,
316 		flags);
317 
318 	if (WARN_ON(bdata->node_bootmem_map == NULL))
319 		return 0;
320 
321 	for (idx = sidx; idx < eidx; idx++)
322 		if (test_and_set_bit(idx, bdata->node_bootmem_map)) {
323 			if (exclusive) {
324 				__free(bdata, sidx, idx);
325 				return -EBUSY;
326 			}
327 			bdebug("silent double reserve of PFN %lx\n",
328 				idx + bdata->node_min_pfn);
329 		}
330 	return 0;
331 }
332 
mark_bootmem_node(bootmem_data_t * bdata,unsigned long start,unsigned long end,int reserve,int flags)333 static int __init mark_bootmem_node(bootmem_data_t *bdata,
334 				unsigned long start, unsigned long end,
335 				int reserve, int flags)
336 {
337 	unsigned long sidx, eidx;
338 
339 	bdebug("nid=%td start=%lx end=%lx reserve=%d flags=%x\n",
340 		bdata - bootmem_node_data, start, end, reserve, flags);
341 
342 	BUG_ON(start < bdata->node_min_pfn);
343 	BUG_ON(end > bdata->node_low_pfn);
344 
345 	sidx = start - bdata->node_min_pfn;
346 	eidx = end - bdata->node_min_pfn;
347 
348 	if (reserve)
349 		return __reserve(bdata, sidx, eidx, flags);
350 	else
351 		__free(bdata, sidx, eidx);
352 	return 0;
353 }
354 
mark_bootmem(unsigned long start,unsigned long end,int reserve,int flags)355 static int __init mark_bootmem(unsigned long start, unsigned long end,
356 				int reserve, int flags)
357 {
358 	unsigned long pos;
359 	bootmem_data_t *bdata;
360 
361 	pos = start;
362 	list_for_each_entry(bdata, &bdata_list, list) {
363 		int err;
364 		unsigned long max;
365 
366 		if (pos < bdata->node_min_pfn ||
367 		    pos >= bdata->node_low_pfn) {
368 			BUG_ON(pos != start);
369 			continue;
370 		}
371 
372 		max = min(bdata->node_low_pfn, end);
373 
374 		err = mark_bootmem_node(bdata, pos, max, reserve, flags);
375 		if (reserve && err) {
376 			mark_bootmem(start, pos, 0, 0);
377 			return err;
378 		}
379 
380 		if (max == end)
381 			return 0;
382 		pos = bdata->node_low_pfn;
383 	}
384 	BUG();
385 }
386 
387 /**
388  * free_bootmem_node - mark a page range as usable
389  * @pgdat: node the range resides on
390  * @physaddr: starting address of the range
391  * @size: size of the range in bytes
392  *
393  * Partial pages will be considered reserved and left as they are.
394  *
395  * The range must reside completely on the specified node.
396  */
free_bootmem_node(pg_data_t * pgdat,unsigned long physaddr,unsigned long size)397 void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
398 			      unsigned long size)
399 {
400 	unsigned long start, end;
401 
402 	kmemleak_free_part_phys(physaddr, size);
403 
404 	start = PFN_UP(physaddr);
405 	end = PFN_DOWN(physaddr + size);
406 
407 	mark_bootmem_node(pgdat->bdata, start, end, 0, 0);
408 }
409 
410 /**
411  * free_bootmem - mark a page range as usable
412  * @addr: starting physical address of the range
413  * @size: size of the range in bytes
414  *
415  * Partial pages will be considered reserved and left as they are.
416  *
417  * The range must be contiguous but may span node boundaries.
418  */
free_bootmem(unsigned long physaddr,unsigned long size)419 void __init free_bootmem(unsigned long physaddr, unsigned long size)
420 {
421 	unsigned long start, end;
422 
423 	kmemleak_free_part_phys(physaddr, size);
424 
425 	start = PFN_UP(physaddr);
426 	end = PFN_DOWN(physaddr + size);
427 
428 	mark_bootmem(start, end, 0, 0);
429 }
430 
431 /**
432  * reserve_bootmem_node - mark a page range as reserved
433  * @pgdat: node the range resides on
434  * @physaddr: starting address of the range
435  * @size: size of the range in bytes
436  * @flags: reservation flags (see linux/bootmem.h)
437  *
438  * Partial pages will be reserved.
439  *
440  * The range must reside completely on the specified node.
441  */
reserve_bootmem_node(pg_data_t * pgdat,unsigned long physaddr,unsigned long size,int flags)442 int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
443 				 unsigned long size, int flags)
444 {
445 	unsigned long start, end;
446 
447 	start = PFN_DOWN(physaddr);
448 	end = PFN_UP(physaddr + size);
449 
450 	return mark_bootmem_node(pgdat->bdata, start, end, 1, flags);
451 }
452 
453 /**
454  * reserve_bootmem - mark a page range as reserved
455  * @addr: starting address of the range
456  * @size: size of the range in bytes
457  * @flags: reservation flags (see linux/bootmem.h)
458  *
459  * Partial pages will be reserved.
460  *
461  * The range must be contiguous but may span node boundaries.
462  */
reserve_bootmem(unsigned long addr,unsigned long size,int flags)463 int __init reserve_bootmem(unsigned long addr, unsigned long size,
464 			    int flags)
465 {
466 	unsigned long start, end;
467 
468 	start = PFN_DOWN(addr);
469 	end = PFN_UP(addr + size);
470 
471 	return mark_bootmem(start, end, 1, flags);
472 }
473 
align_idx(struct bootmem_data * bdata,unsigned long idx,unsigned long step)474 static unsigned long __init align_idx(struct bootmem_data *bdata,
475 				      unsigned long idx, unsigned long step)
476 {
477 	unsigned long base = bdata->node_min_pfn;
478 
479 	/*
480 	 * Align the index with respect to the node start so that the
481 	 * combination of both satisfies the requested alignment.
482 	 */
483 
484 	return ALIGN(base + idx, step) - base;
485 }
486 
align_off(struct bootmem_data * bdata,unsigned long off,unsigned long align)487 static unsigned long __init align_off(struct bootmem_data *bdata,
488 				      unsigned long off, unsigned long align)
489 {
490 	unsigned long base = PFN_PHYS(bdata->node_min_pfn);
491 
492 	/* Same as align_idx for byte offsets */
493 
494 	return ALIGN(base + off, align) - base;
495 }
496 
alloc_bootmem_bdata(struct bootmem_data * bdata,unsigned long size,unsigned long align,unsigned long goal,unsigned long limit)497 static void * __init alloc_bootmem_bdata(struct bootmem_data *bdata,
498 					unsigned long size, unsigned long align,
499 					unsigned long goal, unsigned long limit)
500 {
501 	unsigned long fallback = 0;
502 	unsigned long min, max, start, sidx, midx, step;
503 
504 	bdebug("nid=%td size=%lx [%lu pages] align=%lx goal=%lx limit=%lx\n",
505 		bdata - bootmem_node_data, size, PAGE_ALIGN(size) >> PAGE_SHIFT,
506 		align, goal, limit);
507 
508 	BUG_ON(!size);
509 	BUG_ON(align & (align - 1));
510 	BUG_ON(limit && goal + size > limit);
511 
512 	if (!bdata->node_bootmem_map)
513 		return NULL;
514 
515 	min = bdata->node_min_pfn;
516 	max = bdata->node_low_pfn;
517 
518 	goal >>= PAGE_SHIFT;
519 	limit >>= PAGE_SHIFT;
520 
521 	if (limit && max > limit)
522 		max = limit;
523 	if (max <= min)
524 		return NULL;
525 
526 	step = max(align >> PAGE_SHIFT, 1UL);
527 
528 	if (goal && min < goal && goal < max)
529 		start = ALIGN(goal, step);
530 	else
531 		start = ALIGN(min, step);
532 
533 	sidx = start - bdata->node_min_pfn;
534 	midx = max - bdata->node_min_pfn;
535 
536 	if (bdata->hint_idx > sidx) {
537 		/*
538 		 * Handle the valid case of sidx being zero and still
539 		 * catch the fallback below.
540 		 */
541 		fallback = sidx + 1;
542 		sidx = align_idx(bdata, bdata->hint_idx, step);
543 	}
544 
545 	while (1) {
546 		int merge;
547 		void *region;
548 		unsigned long eidx, i, start_off, end_off;
549 find_block:
550 		sidx = find_next_zero_bit(bdata->node_bootmem_map, midx, sidx);
551 		sidx = align_idx(bdata, sidx, step);
552 		eidx = sidx + PFN_UP(size);
553 
554 		if (sidx >= midx || eidx > midx)
555 			break;
556 
557 		for (i = sidx; i < eidx; i++)
558 			if (test_bit(i, bdata->node_bootmem_map)) {
559 				sidx = align_idx(bdata, i, step);
560 				if (sidx == i)
561 					sidx += step;
562 				goto find_block;
563 			}
564 
565 		if (bdata->last_end_off & (PAGE_SIZE - 1) &&
566 				PFN_DOWN(bdata->last_end_off) + 1 == sidx)
567 			start_off = align_off(bdata, bdata->last_end_off, align);
568 		else
569 			start_off = PFN_PHYS(sidx);
570 
571 		merge = PFN_DOWN(start_off) < sidx;
572 		end_off = start_off + size;
573 
574 		bdata->last_end_off = end_off;
575 		bdata->hint_idx = PFN_UP(end_off);
576 
577 		/*
578 		 * Reserve the area now:
579 		 */
580 		if (__reserve(bdata, PFN_DOWN(start_off) + merge,
581 				PFN_UP(end_off), BOOTMEM_EXCLUSIVE))
582 			BUG();
583 
584 		region = phys_to_virt(PFN_PHYS(bdata->node_min_pfn) +
585 				start_off);
586 		memset(region, 0, size);
587 		/*
588 		 * The min_count is set to 0 so that bootmem allocated blocks
589 		 * are never reported as leaks.
590 		 */
591 		kmemleak_alloc(region, size, 0, 0);
592 		return region;
593 	}
594 
595 	if (fallback) {
596 		sidx = align_idx(bdata, fallback - 1, step);
597 		fallback = 0;
598 		goto find_block;
599 	}
600 
601 	return NULL;
602 }
603 
alloc_bootmem_core(unsigned long size,unsigned long align,unsigned long goal,unsigned long limit)604 static void * __init alloc_bootmem_core(unsigned long size,
605 					unsigned long align,
606 					unsigned long goal,
607 					unsigned long limit)
608 {
609 	bootmem_data_t *bdata;
610 	void *region;
611 
612 	if (WARN_ON_ONCE(slab_is_available()))
613 		return kzalloc(size, GFP_NOWAIT);
614 
615 	list_for_each_entry(bdata, &bdata_list, list) {
616 		if (goal && bdata->node_low_pfn <= PFN_DOWN(goal))
617 			continue;
618 		if (limit && bdata->node_min_pfn >= PFN_DOWN(limit))
619 			break;
620 
621 		region = alloc_bootmem_bdata(bdata, size, align, goal, limit);
622 		if (region)
623 			return region;
624 	}
625 
626 	return NULL;
627 }
628 
___alloc_bootmem_nopanic(unsigned long size,unsigned long align,unsigned long goal,unsigned long limit)629 static void * __init ___alloc_bootmem_nopanic(unsigned long size,
630 					      unsigned long align,
631 					      unsigned long goal,
632 					      unsigned long limit)
633 {
634 	void *ptr;
635 
636 restart:
637 	ptr = alloc_bootmem_core(size, align, goal, limit);
638 	if (ptr)
639 		return ptr;
640 	if (goal) {
641 		goal = 0;
642 		goto restart;
643 	}
644 
645 	return NULL;
646 }
647 
648 /**
649  * __alloc_bootmem_nopanic - allocate boot memory without panicking
650  * @size: size of the request in bytes
651  * @align: alignment of the region
652  * @goal: preferred starting address of the region
653  *
654  * The goal is dropped if it can not be satisfied and the allocation will
655  * fall back to memory below @goal.
656  *
657  * Allocation may happen on any node in the system.
658  *
659  * Returns NULL on failure.
660  */
__alloc_bootmem_nopanic(unsigned long size,unsigned long align,unsigned long goal)661 void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
662 					unsigned long goal)
663 {
664 	unsigned long limit = 0;
665 
666 	return ___alloc_bootmem_nopanic(size, align, goal, limit);
667 }
668 
___alloc_bootmem(unsigned long size,unsigned long align,unsigned long goal,unsigned long limit)669 static void * __init ___alloc_bootmem(unsigned long size, unsigned long align,
670 					unsigned long goal, unsigned long limit)
671 {
672 	void *mem = ___alloc_bootmem_nopanic(size, align, goal, limit);
673 
674 	if (mem)
675 		return mem;
676 	/*
677 	 * Whoops, we cannot satisfy the allocation request.
678 	 */
679 	pr_alert("bootmem alloc of %lu bytes failed!\n", size);
680 	panic("Out of memory");
681 	return NULL;
682 }
683 
684 /**
685  * __alloc_bootmem - allocate boot memory
686  * @size: size of the request in bytes
687  * @align: alignment of the region
688  * @goal: preferred starting address of the region
689  *
690  * The goal is dropped if it can not be satisfied and the allocation will
691  * fall back to memory below @goal.
692  *
693  * Allocation may happen on any node in the system.
694  *
695  * The function panics if the request can not be satisfied.
696  */
__alloc_bootmem(unsigned long size,unsigned long align,unsigned long goal)697 void * __init __alloc_bootmem(unsigned long size, unsigned long align,
698 			      unsigned long goal)
699 {
700 	unsigned long limit = 0;
701 
702 	return ___alloc_bootmem(size, align, goal, limit);
703 }
704 
___alloc_bootmem_node_nopanic(pg_data_t * pgdat,unsigned long size,unsigned long align,unsigned long goal,unsigned long limit)705 void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
706 				unsigned long size, unsigned long align,
707 				unsigned long goal, unsigned long limit)
708 {
709 	void *ptr;
710 
711 	if (WARN_ON_ONCE(slab_is_available()))
712 		return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
713 again:
714 
715 	/* do not panic in alloc_bootmem_bdata() */
716 	if (limit && goal + size > limit)
717 		limit = 0;
718 
719 	ptr = alloc_bootmem_bdata(pgdat->bdata, size, align, goal, limit);
720 	if (ptr)
721 		return ptr;
722 
723 	ptr = alloc_bootmem_core(size, align, goal, limit);
724 	if (ptr)
725 		return ptr;
726 
727 	if (goal) {
728 		goal = 0;
729 		goto again;
730 	}
731 
732 	return NULL;
733 }
734 
__alloc_bootmem_node_nopanic(pg_data_t * pgdat,unsigned long size,unsigned long align,unsigned long goal)735 void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
736 				   unsigned long align, unsigned long goal)
737 {
738 	return ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0);
739 }
740 
___alloc_bootmem_node(pg_data_t * pgdat,unsigned long size,unsigned long align,unsigned long goal,unsigned long limit)741 void * __init ___alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
742 				    unsigned long align, unsigned long goal,
743 				    unsigned long limit)
744 {
745 	void *ptr;
746 
747 	ptr = ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0);
748 	if (ptr)
749 		return ptr;
750 
751 	pr_alert("bootmem alloc of %lu bytes failed!\n", size);
752 	panic("Out of memory");
753 	return NULL;
754 }
755 
756 /**
757  * __alloc_bootmem_node - allocate boot memory from a specific node
758  * @pgdat: node to allocate from
759  * @size: size of the request in bytes
760  * @align: alignment of the region
761  * @goal: preferred starting address of the region
762  *
763  * The goal is dropped if it can not be satisfied and the allocation will
764  * fall back to memory below @goal.
765  *
766  * Allocation may fall back to any node in the system if the specified node
767  * can not hold the requested memory.
768  *
769  * The function panics if the request can not be satisfied.
770  */
__alloc_bootmem_node(pg_data_t * pgdat,unsigned long size,unsigned long align,unsigned long goal)771 void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
772 				   unsigned long align, unsigned long goal)
773 {
774 	if (WARN_ON_ONCE(slab_is_available()))
775 		return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
776 
777 	return  ___alloc_bootmem_node(pgdat, size, align, goal, 0);
778 }
779 
__alloc_bootmem_node_high(pg_data_t * pgdat,unsigned long size,unsigned long align,unsigned long goal)780 void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
781 				   unsigned long align, unsigned long goal)
782 {
783 #ifdef MAX_DMA32_PFN
784 	unsigned long end_pfn;
785 
786 	if (WARN_ON_ONCE(slab_is_available()))
787 		return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
788 
789 	/* update goal according ...MAX_DMA32_PFN */
790 	end_pfn = pgdat_end_pfn(pgdat);
791 
792 	if (end_pfn > MAX_DMA32_PFN + (128 >> (20 - PAGE_SHIFT)) &&
793 	    (goal >> PAGE_SHIFT) < MAX_DMA32_PFN) {
794 		void *ptr;
795 		unsigned long new_goal;
796 
797 		new_goal = MAX_DMA32_PFN << PAGE_SHIFT;
798 		ptr = alloc_bootmem_bdata(pgdat->bdata, size, align,
799 						 new_goal, 0);
800 		if (ptr)
801 			return ptr;
802 	}
803 #endif
804 
805 	return __alloc_bootmem_node(pgdat, size, align, goal);
806 
807 }
808 
809 /**
810  * __alloc_bootmem_low - allocate low boot memory
811  * @size: size of the request in bytes
812  * @align: alignment of the region
813  * @goal: preferred starting address of the region
814  *
815  * The goal is dropped if it can not be satisfied and the allocation will
816  * fall back to memory below @goal.
817  *
818  * Allocation may happen on any node in the system.
819  *
820  * The function panics if the request can not be satisfied.
821  */
__alloc_bootmem_low(unsigned long size,unsigned long align,unsigned long goal)822 void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
823 				  unsigned long goal)
824 {
825 	return ___alloc_bootmem(size, align, goal, ARCH_LOW_ADDRESS_LIMIT);
826 }
827 
__alloc_bootmem_low_nopanic(unsigned long size,unsigned long align,unsigned long goal)828 void * __init __alloc_bootmem_low_nopanic(unsigned long size,
829 					  unsigned long align,
830 					  unsigned long goal)
831 {
832 	return ___alloc_bootmem_nopanic(size, align, goal,
833 					ARCH_LOW_ADDRESS_LIMIT);
834 }
835 
836 /**
837  * __alloc_bootmem_low_node - allocate low boot memory from a specific node
838  * @pgdat: node to allocate from
839  * @size: size of the request in bytes
840  * @align: alignment of the region
841  * @goal: preferred starting address of the region
842  *
843  * The goal is dropped if it can not be satisfied and the allocation will
844  * fall back to memory below @goal.
845  *
846  * Allocation may fall back to any node in the system if the specified node
847  * can not hold the requested memory.
848  *
849  * The function panics if the request can not be satisfied.
850  */
__alloc_bootmem_low_node(pg_data_t * pgdat,unsigned long size,unsigned long align,unsigned long goal)851 void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
852 				       unsigned long align, unsigned long goal)
853 {
854 	if (WARN_ON_ONCE(slab_is_available()))
855 		return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
856 
857 	return ___alloc_bootmem_node(pgdat, size, align,
858 				     goal, ARCH_LOW_ADDRESS_LIMIT);
859 }
860