• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Handle the memory map.
3  * The functions here do the job until bootmem takes over.
4  *
5  *  Getting sanitize_e820_map() in sync with i386 version by applying change:
6  *  -  Provisions for empty E820 memory regions (reported by certain BIOSes).
7  *     Alex Achenbach <xela@slit.de>, December 2002.
8  *  Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
9  *
10  */
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/init.h>
14 #include <linux/bootmem.h>
15 #include <linux/ioport.h>
16 #include <linux/string.h>
17 #include <linux/kexec.h>
18 #include <linux/module.h>
19 #include <linux/mm.h>
20 #include <linux/pfn.h>
21 #include <linux/suspend.h>
22 #include <linux/firmware-map.h>
23 
24 #include <asm/pgtable.h>
25 #include <asm/page.h>
26 #include <asm/e820.h>
27 #include <asm/proto.h>
28 #include <asm/setup.h>
29 #include <asm/trampoline.h>
30 
31 /*
32  * The e820 map is the map that gets modified e.g. with command line parameters
33  * and that is also registered with modifications in the kernel resource tree
34  * with the iomem_resource as parent.
35  *
36  * The e820_saved is directly saved after the BIOS-provided memory map is
37  * copied. It doesn't get modified afterwards. It's registered for the
38  * /sys/firmware/memmap interface.
39  *
40  * That memory map is not modified and is used as base for kexec. The kexec'd
41  * kernel should get the same memory map as the firmware provides. Then the
42  * user can e.g. boot the original kernel with mem=1G while still booting the
43  * next kernel with full memory.
44  */
45 struct e820map e820;
46 struct e820map e820_saved;
47 
48 /* For PCI or other memory-mapped resources */
49 unsigned long pci_mem_start = 0xaeedbabe;
50 #ifdef CONFIG_PCI
51 EXPORT_SYMBOL(pci_mem_start);
52 #endif
53 
54 /*
55  * This function checks if any part of the range <start,end> is mapped
56  * with type.
57  */
58 int
e820_any_mapped(u64 start,u64 end,unsigned type)59 e820_any_mapped(u64 start, u64 end, unsigned type)
60 {
61 	int i;
62 
63 	for (i = 0; i < e820.nr_map; i++) {
64 		struct e820entry *ei = &e820.map[i];
65 
66 		if (type && ei->type != type)
67 			continue;
68 		if (ei->addr >= end || ei->addr + ei->size <= start)
69 			continue;
70 		return 1;
71 	}
72 	return 0;
73 }
74 EXPORT_SYMBOL_GPL(e820_any_mapped);
75 
76 /*
77  * This function checks if the entire range <start,end> is mapped with type.
78  *
79  * Note: this function only works correct if the e820 table is sorted and
80  * not-overlapping, which is the case
81  */
e820_all_mapped(u64 start,u64 end,unsigned type)82 int __init e820_all_mapped(u64 start, u64 end, unsigned type)
83 {
84 	int i;
85 
86 	for (i = 0; i < e820.nr_map; i++) {
87 		struct e820entry *ei = &e820.map[i];
88 
89 		if (type && ei->type != type)
90 			continue;
91 		/* is the region (part) in overlap with the current region ?*/
92 		if (ei->addr >= end || ei->addr + ei->size <= start)
93 			continue;
94 
95 		/* if the region is at the beginning of <start,end> we move
96 		 * start to the end of the region since it's ok until there
97 		 */
98 		if (ei->addr <= start)
99 			start = ei->addr + ei->size;
100 		/*
101 		 * if start is now at or beyond end, we're done, full
102 		 * coverage
103 		 */
104 		if (start >= end)
105 			return 1;
106 	}
107 	return 0;
108 }
109 
110 /*
111  * Add a memory region to the kernel e820 map.
112  */
e820_add_region(u64 start,u64 size,int type)113 void __init e820_add_region(u64 start, u64 size, int type)
114 {
115 	int x = e820.nr_map;
116 
117 	if (x == ARRAY_SIZE(e820.map)) {
118 		printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
119 		return;
120 	}
121 
122 	e820.map[x].addr = start;
123 	e820.map[x].size = size;
124 	e820.map[x].type = type;
125 	e820.nr_map++;
126 }
127 
e820_print_map(char * who)128 void __init e820_print_map(char *who)
129 {
130 	int i;
131 
132 	for (i = 0; i < e820.nr_map; i++) {
133 		printk(KERN_INFO " %s: %016Lx - %016Lx ", who,
134 		       (unsigned long long) e820.map[i].addr,
135 		       (unsigned long long)
136 		       (e820.map[i].addr + e820.map[i].size));
137 		switch (e820.map[i].type) {
138 		case E820_RAM:
139 		case E820_RESERVED_KERN:
140 			printk(KERN_CONT "(usable)\n");
141 			break;
142 		case E820_RESERVED:
143 			printk(KERN_CONT "(reserved)\n");
144 			break;
145 		case E820_ACPI:
146 			printk(KERN_CONT "(ACPI data)\n");
147 			break;
148 		case E820_NVS:
149 			printk(KERN_CONT "(ACPI NVS)\n");
150 			break;
151 		case E820_UNUSABLE:
152 			printk("(unusable)\n");
153 			break;
154 		default:
155 			printk(KERN_CONT "type %u\n", e820.map[i].type);
156 			break;
157 		}
158 	}
159 }
160 
161 /*
162  * Sanitize the BIOS e820 map.
163  *
164  * Some e820 responses include overlapping entries. The following
165  * replaces the original e820 map with a new one, removing overlaps,
166  * and resolving conflicting memory types in favor of highest
167  * numbered type.
168  *
169  * The input parameter biosmap points to an array of 'struct
170  * e820entry' which on entry has elements in the range [0, *pnr_map)
171  * valid, and which has space for up to max_nr_map entries.
172  * On return, the resulting sanitized e820 map entries will be in
173  * overwritten in the same location, starting at biosmap.
174  *
175  * The integer pointed to by pnr_map must be valid on entry (the
176  * current number of valid entries located at biosmap) and will
177  * be updated on return, with the new number of valid entries
178  * (something no more than max_nr_map.)
179  *
180  * The return value from sanitize_e820_map() is zero if it
181  * successfully 'sanitized' the map entries passed in, and is -1
182  * if it did nothing, which can happen if either of (1) it was
183  * only passed one map entry, or (2) any of the input map entries
184  * were invalid (start + size < start, meaning that the size was
185  * so big the described memory range wrapped around through zero.)
186  *
187  *	Visually we're performing the following
188  *	(1,2,3,4 = memory types)...
189  *
190  *	Sample memory map (w/overlaps):
191  *	   ____22__________________
192  *	   ______________________4_
193  *	   ____1111________________
194  *	   _44_____________________
195  *	   11111111________________
196  *	   ____________________33__
197  *	   ___________44___________
198  *	   __________33333_________
199  *	   ______________22________
200  *	   ___________________2222_
201  *	   _________111111111______
202  *	   _____________________11_
203  *	   _________________4______
204  *
205  *	Sanitized equivalent (no overlap):
206  *	   1_______________________
207  *	   _44_____________________
208  *	   ___1____________________
209  *	   ____22__________________
210  *	   ______11________________
211  *	   _________1______________
212  *	   __________3_____________
213  *	   ___________44___________
214  *	   _____________33_________
215  *	   _______________2________
216  *	   ________________1_______
217  *	   _________________4______
218  *	   ___________________2____
219  *	   ____________________33__
220  *	   ______________________4_
221  */
222 
sanitize_e820_map(struct e820entry * biosmap,int max_nr_map,int * pnr_map)223 int __init sanitize_e820_map(struct e820entry *biosmap, int max_nr_map,
224 				int *pnr_map)
225 {
226 	struct change_member {
227 		struct e820entry *pbios; /* pointer to original bios entry */
228 		unsigned long long addr; /* address for this change point */
229 	};
230 	static struct change_member change_point_list[2*E820_X_MAX] __initdata;
231 	static struct change_member *change_point[2*E820_X_MAX] __initdata;
232 	static struct e820entry *overlap_list[E820_X_MAX] __initdata;
233 	static struct e820entry new_bios[E820_X_MAX] __initdata;
234 	struct change_member *change_tmp;
235 	unsigned long current_type, last_type;
236 	unsigned long long last_addr;
237 	int chgidx, still_changing;
238 	int overlap_entries;
239 	int new_bios_entry;
240 	int old_nr, new_nr, chg_nr;
241 	int i;
242 
243 	/* if there's only one memory region, don't bother */
244 	if (*pnr_map < 2)
245 		return -1;
246 
247 	old_nr = *pnr_map;
248 	BUG_ON(old_nr > max_nr_map);
249 
250 	/* bail out if we find any unreasonable addresses in bios map */
251 	for (i = 0; i < old_nr; i++)
252 		if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr)
253 			return -1;
254 
255 	/* create pointers for initial change-point information (for sorting) */
256 	for (i = 0; i < 2 * old_nr; i++)
257 		change_point[i] = &change_point_list[i];
258 
259 	/* record all known change-points (starting and ending addresses),
260 	   omitting those that are for empty memory regions */
261 	chgidx = 0;
262 	for (i = 0; i < old_nr; i++)	{
263 		if (biosmap[i].size != 0) {
264 			change_point[chgidx]->addr = biosmap[i].addr;
265 			change_point[chgidx++]->pbios = &biosmap[i];
266 			change_point[chgidx]->addr = biosmap[i].addr +
267 				biosmap[i].size;
268 			change_point[chgidx++]->pbios = &biosmap[i];
269 		}
270 	}
271 	chg_nr = chgidx;
272 
273 	/* sort change-point list by memory addresses (low -> high) */
274 	still_changing = 1;
275 	while (still_changing)	{
276 		still_changing = 0;
277 		for (i = 1; i < chg_nr; i++)  {
278 			unsigned long long curaddr, lastaddr;
279 			unsigned long long curpbaddr, lastpbaddr;
280 
281 			curaddr = change_point[i]->addr;
282 			lastaddr = change_point[i - 1]->addr;
283 			curpbaddr = change_point[i]->pbios->addr;
284 			lastpbaddr = change_point[i - 1]->pbios->addr;
285 
286 			/*
287 			 * swap entries, when:
288 			 *
289 			 * curaddr > lastaddr or
290 			 * curaddr == lastaddr and curaddr == curpbaddr and
291 			 * lastaddr != lastpbaddr
292 			 */
293 			if (curaddr < lastaddr ||
294 			    (curaddr == lastaddr && curaddr == curpbaddr &&
295 			     lastaddr != lastpbaddr)) {
296 				change_tmp = change_point[i];
297 				change_point[i] = change_point[i-1];
298 				change_point[i-1] = change_tmp;
299 				still_changing = 1;
300 			}
301 		}
302 	}
303 
304 	/* create a new bios memory map, removing overlaps */
305 	overlap_entries = 0;	 /* number of entries in the overlap table */
306 	new_bios_entry = 0;	 /* index for creating new bios map entries */
307 	last_type = 0;		 /* start with undefined memory type */
308 	last_addr = 0;		 /* start with 0 as last starting address */
309 
310 	/* loop through change-points, determining affect on the new bios map */
311 	for (chgidx = 0; chgidx < chg_nr; chgidx++) {
312 		/* keep track of all overlapping bios entries */
313 		if (change_point[chgidx]->addr ==
314 		    change_point[chgidx]->pbios->addr) {
315 			/*
316 			 * add map entry to overlap list (> 1 entry
317 			 * implies an overlap)
318 			 */
319 			overlap_list[overlap_entries++] =
320 				change_point[chgidx]->pbios;
321 		} else {
322 			/*
323 			 * remove entry from list (order independent,
324 			 * so swap with last)
325 			 */
326 			for (i = 0; i < overlap_entries; i++) {
327 				if (overlap_list[i] ==
328 				    change_point[chgidx]->pbios)
329 					overlap_list[i] =
330 						overlap_list[overlap_entries-1];
331 			}
332 			overlap_entries--;
333 		}
334 		/*
335 		 * if there are overlapping entries, decide which
336 		 * "type" to use (larger value takes precedence --
337 		 * 1=usable, 2,3,4,4+=unusable)
338 		 */
339 		current_type = 0;
340 		for (i = 0; i < overlap_entries; i++)
341 			if (overlap_list[i]->type > current_type)
342 				current_type = overlap_list[i]->type;
343 		/*
344 		 * continue building up new bios map based on this
345 		 * information
346 		 */
347 		if (current_type != last_type)	{
348 			if (last_type != 0)	 {
349 				new_bios[new_bios_entry].size =
350 					change_point[chgidx]->addr - last_addr;
351 				/*
352 				 * move forward only if the new size
353 				 * was non-zero
354 				 */
355 				if (new_bios[new_bios_entry].size != 0)
356 					/*
357 					 * no more space left for new
358 					 * bios entries ?
359 					 */
360 					if (++new_bios_entry >= max_nr_map)
361 						break;
362 			}
363 			if (current_type != 0)	{
364 				new_bios[new_bios_entry].addr =
365 					change_point[chgidx]->addr;
366 				new_bios[new_bios_entry].type = current_type;
367 				last_addr = change_point[chgidx]->addr;
368 			}
369 			last_type = current_type;
370 		}
371 	}
372 	/* retain count for new bios entries */
373 	new_nr = new_bios_entry;
374 
375 	/* copy new bios mapping into original location */
376 	memcpy(biosmap, new_bios, new_nr * sizeof(struct e820entry));
377 	*pnr_map = new_nr;
378 
379 	return 0;
380 }
381 
__append_e820_map(struct e820entry * biosmap,int nr_map)382 static int __init __append_e820_map(struct e820entry *biosmap, int nr_map)
383 {
384 	while (nr_map) {
385 		u64 start = biosmap->addr;
386 		u64 size = biosmap->size;
387 		u64 end = start + size;
388 		u32 type = biosmap->type;
389 
390 		/* Overflow in 64 bits? Ignore the memory map. */
391 		if (start > end)
392 			return -1;
393 
394 		e820_add_region(start, size, type);
395 
396 		biosmap++;
397 		nr_map--;
398 	}
399 	return 0;
400 }
401 
402 /*
403  * Copy the BIOS e820 map into a safe place.
404  *
405  * Sanity-check it while we're at it..
406  *
407  * If we're lucky and live on a modern system, the setup code
408  * will have given us a memory map that we can use to properly
409  * set up memory.  If we aren't, we'll fake a memory map.
410  */
append_e820_map(struct e820entry * biosmap,int nr_map)411 static int __init append_e820_map(struct e820entry *biosmap, int nr_map)
412 {
413 	/* Only one memory region (or negative)? Ignore it */
414 	if (nr_map < 2)
415 		return -1;
416 
417 	return __append_e820_map(biosmap, nr_map);
418 }
419 
e820_update_range_map(struct e820map * e820x,u64 start,u64 size,unsigned old_type,unsigned new_type)420 static u64 __init e820_update_range_map(struct e820map *e820x, u64 start,
421 					u64 size, unsigned old_type,
422 					unsigned new_type)
423 {
424 	int i;
425 	u64 real_updated_size = 0;
426 
427 	BUG_ON(old_type == new_type);
428 
429 	if (size > (ULLONG_MAX - start))
430 		size = ULLONG_MAX - start;
431 
432 	for (i = 0; i < e820.nr_map; i++) {
433 		struct e820entry *ei = &e820x->map[i];
434 		u64 final_start, final_end;
435 		if (ei->type != old_type)
436 			continue;
437 		/* totally covered? */
438 		if (ei->addr >= start &&
439 		    (ei->addr + ei->size) <= (start + size)) {
440 			ei->type = new_type;
441 			real_updated_size += ei->size;
442 			continue;
443 		}
444 		/* partially covered */
445 		final_start = max(start, ei->addr);
446 		final_end = min(start + size, ei->addr + ei->size);
447 		if (final_start >= final_end)
448 			continue;
449 		e820_add_region(final_start, final_end - final_start,
450 					 new_type);
451 		real_updated_size += final_end - final_start;
452 
453 		ei->size -= final_end - final_start;
454 		if (ei->addr < final_start)
455 			continue;
456 		ei->addr = final_end;
457 	}
458 	return real_updated_size;
459 }
460 
e820_update_range(u64 start,u64 size,unsigned old_type,unsigned new_type)461 u64 __init e820_update_range(u64 start, u64 size, unsigned old_type,
462 			     unsigned new_type)
463 {
464 	return e820_update_range_map(&e820, start, size, old_type, new_type);
465 }
466 
e820_update_range_saved(u64 start,u64 size,unsigned old_type,unsigned new_type)467 static u64 __init e820_update_range_saved(u64 start, u64 size,
468 					  unsigned old_type, unsigned new_type)
469 {
470 	return e820_update_range_map(&e820_saved, start, size, old_type,
471 				     new_type);
472 }
473 
474 /* make e820 not cover the range */
e820_remove_range(u64 start,u64 size,unsigned old_type,int checktype)475 u64 __init e820_remove_range(u64 start, u64 size, unsigned old_type,
476 			     int checktype)
477 {
478 	int i;
479 	u64 real_removed_size = 0;
480 
481 	if (size > (ULLONG_MAX - start))
482 		size = ULLONG_MAX - start;
483 
484 	for (i = 0; i < e820.nr_map; i++) {
485 		struct e820entry *ei = &e820.map[i];
486 		u64 final_start, final_end;
487 
488 		if (checktype && ei->type != old_type)
489 			continue;
490 		/* totally covered? */
491 		if (ei->addr >= start &&
492 		    (ei->addr + ei->size) <= (start + size)) {
493 			real_removed_size += ei->size;
494 			memset(ei, 0, sizeof(struct e820entry));
495 			continue;
496 		}
497 		/* partially covered */
498 		final_start = max(start, ei->addr);
499 		final_end = min(start + size, ei->addr + ei->size);
500 		if (final_start >= final_end)
501 			continue;
502 		real_removed_size += final_end - final_start;
503 
504 		ei->size -= final_end - final_start;
505 		if (ei->addr < final_start)
506 			continue;
507 		ei->addr = final_end;
508 	}
509 	return real_removed_size;
510 }
511 
update_e820(void)512 void __init update_e820(void)
513 {
514 	int nr_map;
515 
516 	nr_map = e820.nr_map;
517 	if (sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &nr_map))
518 		return;
519 	e820.nr_map = nr_map;
520 	printk(KERN_INFO "modified physical RAM map:\n");
521 	e820_print_map("modified");
522 }
update_e820_saved(void)523 static void __init update_e820_saved(void)
524 {
525 	int nr_map;
526 
527 	nr_map = e820_saved.nr_map;
528 	if (sanitize_e820_map(e820_saved.map, ARRAY_SIZE(e820_saved.map), &nr_map))
529 		return;
530 	e820_saved.nr_map = nr_map;
531 }
532 #define MAX_GAP_END 0x100000000ull
533 /*
534  * Search for a gap in the e820 memory space from start_addr to end_addr.
535  */
e820_search_gap(unsigned long * gapstart,unsigned long * gapsize,unsigned long start_addr,unsigned long long end_addr)536 __init int e820_search_gap(unsigned long *gapstart, unsigned long *gapsize,
537 		unsigned long start_addr, unsigned long long end_addr)
538 {
539 	unsigned long long last;
540 	int i = e820.nr_map;
541 	int found = 0;
542 
543 	last = (end_addr && end_addr < MAX_GAP_END) ? end_addr : MAX_GAP_END;
544 
545 	while (--i >= 0) {
546 		unsigned long long start = e820.map[i].addr;
547 		unsigned long long end = start + e820.map[i].size;
548 
549 		if (end < start_addr)
550 			continue;
551 
552 		/*
553 		 * Since "last" is at most 4GB, we know we'll
554 		 * fit in 32 bits if this condition is true
555 		 */
556 		if (last > end) {
557 			unsigned long gap = last - end;
558 
559 			if (gap >= *gapsize) {
560 				*gapsize = gap;
561 				*gapstart = end;
562 				found = 1;
563 			}
564 		}
565 		if (start < last)
566 			last = start;
567 	}
568 	return found;
569 }
570 
571 /*
572  * Search for the biggest gap in the low 32 bits of the e820
573  * memory space.  We pass this space to PCI to assign MMIO resources
574  * for hotplug or unconfigured devices in.
575  * Hopefully the BIOS let enough space left.
576  */
e820_setup_gap(void)577 __init void e820_setup_gap(void)
578 {
579 	unsigned long gapstart, gapsize, round;
580 	int found;
581 
582 	gapstart = 0x10000000;
583 	gapsize = 0x400000;
584 	found  = e820_search_gap(&gapstart, &gapsize, 0, MAX_GAP_END);
585 
586 #ifdef CONFIG_X86_64
587 	if (!found) {
588 		gapstart = (max_pfn << PAGE_SHIFT) + 1024*1024;
589 		printk(KERN_ERR "PCI: Warning: Cannot find a gap in the 32bit "
590 		       "address range\n"
591 		       KERN_ERR "PCI: Unassigned devices with 32bit resource "
592 		       "registers may break!\n");
593 	}
594 #endif
595 
596 	/*
597 	 * See how much we want to round up: start off with
598 	 * rounding to the next 1MB area.
599 	 */
600 	round = 0x100000;
601 	while ((gapsize >> 4) > round)
602 		round += round;
603 	/* Fun with two's complement */
604 	pci_mem_start = (gapstart + round) & -round;
605 
606 	printk(KERN_INFO
607 	       "Allocating PCI resources starting at %lx (gap: %lx:%lx)\n",
608 	       pci_mem_start, gapstart, gapsize);
609 }
610 
611 /**
612  * Because of the size limitation of struct boot_params, only first
613  * 128 E820 memory entries are passed to kernel via
614  * boot_params.e820_map, others are passed via SETUP_E820_EXT node of
615  * linked list of struct setup_data, which is parsed here.
616  */
parse_e820_ext(struct setup_data * sdata,unsigned long pa_data)617 void __init parse_e820_ext(struct setup_data *sdata, unsigned long pa_data)
618 {
619 	u32 map_len;
620 	int entries;
621 	struct e820entry *extmap;
622 
623 	entries = sdata->len / sizeof(struct e820entry);
624 	map_len = sdata->len + sizeof(struct setup_data);
625 	if (map_len > PAGE_SIZE)
626 		sdata = early_ioremap(pa_data, map_len);
627 	extmap = (struct e820entry *)(sdata->data);
628 	__append_e820_map(extmap, entries);
629 	sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
630 	if (map_len > PAGE_SIZE)
631 		early_iounmap(sdata, map_len);
632 	printk(KERN_INFO "extended physical RAM map:\n");
633 	e820_print_map("extended");
634 }
635 
636 #if defined(CONFIG_X86_64) || \
637 	(defined(CONFIG_X86_32) && defined(CONFIG_HIBERNATION))
638 /**
639  * Find the ranges of physical addresses that do not correspond to
640  * e820 RAM areas and mark the corresponding pages as nosave for
641  * hibernation (32 bit) or software suspend and suspend to RAM (64 bit).
642  *
643  * This function requires the e820 map to be sorted and without any
644  * overlapping entries and assumes the first e820 area to be RAM.
645  */
e820_mark_nosave_regions(unsigned long limit_pfn)646 void __init e820_mark_nosave_regions(unsigned long limit_pfn)
647 {
648 	int i;
649 	unsigned long pfn;
650 
651 	pfn = PFN_DOWN(e820.map[0].addr + e820.map[0].size);
652 	for (i = 1; i < e820.nr_map; i++) {
653 		struct e820entry *ei = &e820.map[i];
654 
655 		if (pfn < PFN_UP(ei->addr))
656 			register_nosave_region(pfn, PFN_UP(ei->addr));
657 
658 		pfn = PFN_DOWN(ei->addr + ei->size);
659 		if (ei->type != E820_RAM && ei->type != E820_RESERVED_KERN)
660 			register_nosave_region(PFN_UP(ei->addr), pfn);
661 
662 		if (pfn >= limit_pfn)
663 			break;
664 	}
665 }
666 #endif
667 
668 #ifdef CONFIG_HIBERNATION
669 /**
670  * Mark ACPI NVS memory region, so that we can save/restore it during
671  * hibernation and the subsequent resume.
672  */
e820_mark_nvs_memory(void)673 static int __init e820_mark_nvs_memory(void)
674 {
675 	int i;
676 
677 	for (i = 0; i < e820.nr_map; i++) {
678 		struct e820entry *ei = &e820.map[i];
679 
680 		if (ei->type == E820_NVS)
681 			hibernate_nvs_register(ei->addr, ei->size);
682 	}
683 
684 	return 0;
685 }
686 core_initcall(e820_mark_nvs_memory);
687 #endif
688 
689 /*
690  * Early reserved memory areas.
691  */
692 #define MAX_EARLY_RES 20
693 
694 struct early_res {
695 	u64 start, end;
696 	char name[16];
697 	char overlap_ok;
698 };
699 static struct early_res early_res[MAX_EARLY_RES] __initdata = {
700 	{ 0, PAGE_SIZE, "BIOS data page" },	/* BIOS data page */
701 	{}
702 };
703 
find_overlapped_early(u64 start,u64 end)704 static int __init find_overlapped_early(u64 start, u64 end)
705 {
706 	int i;
707 	struct early_res *r;
708 
709 	for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) {
710 		r = &early_res[i];
711 		if (end > r->start && start < r->end)
712 			break;
713 	}
714 
715 	return i;
716 }
717 
718 /*
719  * Drop the i-th range from the early reservation map,
720  * by copying any higher ranges down one over it, and
721  * clearing what had been the last slot.
722  */
drop_range(int i)723 static void __init drop_range(int i)
724 {
725 	int j;
726 
727 	for (j = i + 1; j < MAX_EARLY_RES && early_res[j].end; j++)
728 		;
729 
730 	memmove(&early_res[i], &early_res[i + 1],
731 	       (j - 1 - i) * sizeof(struct early_res));
732 
733 	early_res[j - 1].end = 0;
734 }
735 
736 /*
737  * Split any existing ranges that:
738  *  1) are marked 'overlap_ok', and
739  *  2) overlap with the stated range [start, end)
740  * into whatever portion (if any) of the existing range is entirely
741  * below or entirely above the stated range.  Drop the portion
742  * of the existing range that overlaps with the stated range,
743  * which will allow the caller of this routine to then add that
744  * stated range without conflicting with any existing range.
745  */
drop_overlaps_that_are_ok(u64 start,u64 end)746 static void __init drop_overlaps_that_are_ok(u64 start, u64 end)
747 {
748 	int i;
749 	struct early_res *r;
750 	u64 lower_start, lower_end;
751 	u64 upper_start, upper_end;
752 	char name[16];
753 
754 	for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) {
755 		r = &early_res[i];
756 
757 		/* Continue past non-overlapping ranges */
758 		if (end <= r->start || start >= r->end)
759 			continue;
760 
761 		/*
762 		 * Leave non-ok overlaps as is; let caller
763 		 * panic "Overlapping early reservations"
764 		 * when it hits this overlap.
765 		 */
766 		if (!r->overlap_ok)
767 			return;
768 
769 		/*
770 		 * We have an ok overlap.  We will drop it from the early
771 		 * reservation map, and add back in any non-overlapping
772 		 * portions (lower or upper) as separate, overlap_ok,
773 		 * non-overlapping ranges.
774 		 */
775 
776 		/* 1. Note any non-overlapping (lower or upper) ranges. */
777 		strncpy(name, r->name, sizeof(name) - 1);
778 
779 		lower_start = lower_end = 0;
780 		upper_start = upper_end = 0;
781 		if (r->start < start) {
782 		 	lower_start = r->start;
783 			lower_end = start;
784 		}
785 		if (r->end > end) {
786 			upper_start = end;
787 			upper_end = r->end;
788 		}
789 
790 		/* 2. Drop the original ok overlapping range */
791 		drop_range(i);
792 
793 		i--;		/* resume for-loop on copied down entry */
794 
795 		/* 3. Add back in any non-overlapping ranges. */
796 		if (lower_end)
797 			reserve_early_overlap_ok(lower_start, lower_end, name);
798 		if (upper_end)
799 			reserve_early_overlap_ok(upper_start, upper_end, name);
800 	}
801 }
802 
__reserve_early(u64 start,u64 end,char * name,int overlap_ok)803 static void __init __reserve_early(u64 start, u64 end, char *name,
804 						int overlap_ok)
805 {
806 	int i;
807 	struct early_res *r;
808 
809 	i = find_overlapped_early(start, end);
810 	if (i >= MAX_EARLY_RES)
811 		panic("Too many early reservations");
812 	r = &early_res[i];
813 	if (r->end)
814 		panic("Overlapping early reservations "
815 		      "%llx-%llx %s to %llx-%llx %s\n",
816 		      start, end - 1, name?name:"", r->start,
817 		      r->end - 1, r->name);
818 	r->start = start;
819 	r->end = end;
820 	r->overlap_ok = overlap_ok;
821 	if (name)
822 		strncpy(r->name, name, sizeof(r->name) - 1);
823 }
824 
825 /*
826  * A few early reservtations come here.
827  *
828  * The 'overlap_ok' in the name of this routine does -not- mean it
829  * is ok for these reservations to overlap an earlier reservation.
830  * Rather it means that it is ok for subsequent reservations to
831  * overlap this one.
832  *
833  * Use this entry point to reserve early ranges when you are doing
834  * so out of "Paranoia", reserving perhaps more memory than you need,
835  * just in case, and don't mind a subsequent overlapping reservation
836  * that is known to be needed.
837  *
838  * The drop_overlaps_that_are_ok() call here isn't really needed.
839  * It would be needed if we had two colliding 'overlap_ok'
840  * reservations, so that the second such would not panic on the
841  * overlap with the first.  We don't have any such as of this
842  * writing, but might as well tolerate such if it happens in
843  * the future.
844  */
reserve_early_overlap_ok(u64 start,u64 end,char * name)845 void __init reserve_early_overlap_ok(u64 start, u64 end, char *name)
846 {
847 	drop_overlaps_that_are_ok(start, end);
848 	__reserve_early(start, end, name, 1);
849 }
850 
851 /*
852  * Most early reservations come here.
853  *
854  * We first have drop_overlaps_that_are_ok() drop any pre-existing
855  * 'overlap_ok' ranges, so that we can then reserve this memory
856  * range without risk of panic'ing on an overlapping overlap_ok
857  * early reservation.
858  */
reserve_early(u64 start,u64 end,char * name)859 void __init reserve_early(u64 start, u64 end, char *name)
860 {
861 	drop_overlaps_that_are_ok(start, end);
862 	__reserve_early(start, end, name, 0);
863 }
864 
free_early(u64 start,u64 end)865 void __init free_early(u64 start, u64 end)
866 {
867 	struct early_res *r;
868 	int i;
869 
870 	i = find_overlapped_early(start, end);
871 	r = &early_res[i];
872 	if (i >= MAX_EARLY_RES || r->end != end || r->start != start)
873 		panic("free_early on not reserved area: %llx-%llx!",
874 			 start, end - 1);
875 
876 	drop_range(i);
877 }
878 
early_res_to_bootmem(u64 start,u64 end)879 void __init early_res_to_bootmem(u64 start, u64 end)
880 {
881 	int i, count;
882 	u64 final_start, final_end;
883 
884 	count  = 0;
885 	for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++)
886 		count++;
887 
888 	printk(KERN_INFO "(%d early reservations) ==> bootmem [%010llx - %010llx]\n",
889 			 count, start, end);
890 	for (i = 0; i < count; i++) {
891 		struct early_res *r = &early_res[i];
892 		printk(KERN_INFO "  #%d [%010llx - %010llx] %16s", i,
893 			r->start, r->end, r->name);
894 		final_start = max(start, r->start);
895 		final_end = min(end, r->end);
896 		if (final_start >= final_end) {
897 			printk(KERN_CONT "\n");
898 			continue;
899 		}
900 		printk(KERN_CONT " ==> [%010llx - %010llx]\n",
901 			final_start, final_end);
902 		reserve_bootmem_generic(final_start, final_end - final_start,
903 				BOOTMEM_DEFAULT);
904 	}
905 }
906 
907 /* Check for already reserved areas */
bad_addr(u64 * addrp,u64 size,u64 align)908 static inline int __init bad_addr(u64 *addrp, u64 size, u64 align)
909 {
910 	int i;
911 	u64 addr = *addrp;
912 	int changed = 0;
913 	struct early_res *r;
914 again:
915 	i = find_overlapped_early(addr, addr + size);
916 	r = &early_res[i];
917 	if (i < MAX_EARLY_RES && r->end) {
918 		*addrp = addr = round_up(r->end, align);
919 		changed = 1;
920 		goto again;
921 	}
922 	return changed;
923 }
924 
925 /* Check for already reserved areas */
bad_addr_size(u64 * addrp,u64 * sizep,u64 align)926 static inline int __init bad_addr_size(u64 *addrp, u64 *sizep, u64 align)
927 {
928 	int i;
929 	u64 addr = *addrp, last;
930 	u64 size = *sizep;
931 	int changed = 0;
932 again:
933 	last = addr + size;
934 	for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) {
935 		struct early_res *r = &early_res[i];
936 		if (last > r->start && addr < r->start) {
937 			size = r->start - addr;
938 			changed = 1;
939 			goto again;
940 		}
941 		if (last > r->end && addr < r->end) {
942 			addr = round_up(r->end, align);
943 			size = last - addr;
944 			changed = 1;
945 			goto again;
946 		}
947 		if (last <= r->end && addr >= r->start) {
948 			(*sizep)++;
949 			return 0;
950 		}
951 	}
952 	if (changed) {
953 		*addrp = addr;
954 		*sizep = size;
955 	}
956 	return changed;
957 }
958 
959 /*
960  * Find a free area with specified alignment in a specific range.
961  */
find_e820_area(u64 start,u64 end,u64 size,u64 align)962 u64 __init find_e820_area(u64 start, u64 end, u64 size, u64 align)
963 {
964 	int i;
965 
966 	for (i = 0; i < e820.nr_map; i++) {
967 		struct e820entry *ei = &e820.map[i];
968 		u64 addr, last;
969 		u64 ei_last;
970 
971 		if (ei->type != E820_RAM)
972 			continue;
973 		addr = round_up(ei->addr, align);
974 		ei_last = ei->addr + ei->size;
975 		if (addr < start)
976 			addr = round_up(start, align);
977 		if (addr >= ei_last)
978 			continue;
979 		while (bad_addr(&addr, size, align) && addr+size <= ei_last)
980 			;
981 		last = addr + size;
982 		if (last > ei_last)
983 			continue;
984 		if (last > end)
985 			continue;
986 		return addr;
987 	}
988 	return -1ULL;
989 }
990 
991 /*
992  * Find next free range after *start
993  */
find_e820_area_size(u64 start,u64 * sizep,u64 align)994 u64 __init find_e820_area_size(u64 start, u64 *sizep, u64 align)
995 {
996 	int i;
997 
998 	for (i = 0; i < e820.nr_map; i++) {
999 		struct e820entry *ei = &e820.map[i];
1000 		u64 addr, last;
1001 		u64 ei_last;
1002 
1003 		if (ei->type != E820_RAM)
1004 			continue;
1005 		addr = round_up(ei->addr, align);
1006 		ei_last = ei->addr + ei->size;
1007 		if (addr < start)
1008 			addr = round_up(start, align);
1009 		if (addr >= ei_last)
1010 			continue;
1011 		*sizep = ei_last - addr;
1012 		while (bad_addr_size(&addr, sizep, align) &&
1013 			addr + *sizep <= ei_last)
1014 			;
1015 		last = addr + *sizep;
1016 		if (last > ei_last)
1017 			continue;
1018 		return addr;
1019 	}
1020 	return -1UL;
1021 
1022 }
1023 
1024 /*
1025  * pre allocated 4k and reserved it in e820
1026  */
early_reserve_e820(u64 startt,u64 sizet,u64 align)1027 u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align)
1028 {
1029 	u64 size = 0;
1030 	u64 addr;
1031 	u64 start;
1032 
1033 	start = startt;
1034 	while (size < sizet)
1035 		start = find_e820_area_size(start, &size, align);
1036 
1037 	if (size < sizet)
1038 		return 0;
1039 
1040 	addr = round_down(start + size - sizet, align);
1041 	e820_update_range(addr, sizet, E820_RAM, E820_RESERVED);
1042 	e820_update_range_saved(addr, sizet, E820_RAM, E820_RESERVED);
1043 	printk(KERN_INFO "update e820 for early_reserve_e820\n");
1044 	update_e820();
1045 	update_e820_saved();
1046 
1047 	return addr;
1048 }
1049 
1050 #ifdef CONFIG_X86_32
1051 # ifdef CONFIG_X86_PAE
1052 #  define MAX_ARCH_PFN		(1ULL<<(36-PAGE_SHIFT))
1053 # else
1054 #  define MAX_ARCH_PFN		(1ULL<<(32-PAGE_SHIFT))
1055 # endif
1056 #else /* CONFIG_X86_32 */
1057 # define MAX_ARCH_PFN MAXMEM>>PAGE_SHIFT
1058 #endif
1059 
1060 /*
1061  * Find the highest page frame number we have available
1062  */
e820_end_pfn(unsigned long limit_pfn,unsigned type)1063 static unsigned long __init e820_end_pfn(unsigned long limit_pfn, unsigned type)
1064 {
1065 	int i;
1066 	unsigned long last_pfn = 0;
1067 	unsigned long max_arch_pfn = MAX_ARCH_PFN;
1068 
1069 	for (i = 0; i < e820.nr_map; i++) {
1070 		struct e820entry *ei = &e820.map[i];
1071 		unsigned long start_pfn;
1072 		unsigned long end_pfn;
1073 
1074 		if (ei->type != type)
1075 			continue;
1076 
1077 		start_pfn = ei->addr >> PAGE_SHIFT;
1078 		end_pfn = (ei->addr + ei->size) >> PAGE_SHIFT;
1079 
1080 		if (start_pfn >= limit_pfn)
1081 			continue;
1082 		if (end_pfn > limit_pfn) {
1083 			last_pfn = limit_pfn;
1084 			break;
1085 		}
1086 		if (end_pfn > last_pfn)
1087 			last_pfn = end_pfn;
1088 	}
1089 
1090 	if (last_pfn > max_arch_pfn)
1091 		last_pfn = max_arch_pfn;
1092 
1093 	printk(KERN_INFO "last_pfn = %#lx max_arch_pfn = %#lx\n",
1094 			 last_pfn, max_arch_pfn);
1095 	return last_pfn;
1096 }
e820_end_of_ram_pfn(void)1097 unsigned long __init e820_end_of_ram_pfn(void)
1098 {
1099 	return e820_end_pfn(MAX_ARCH_PFN, E820_RAM);
1100 }
1101 
e820_end_of_low_ram_pfn(void)1102 unsigned long __init e820_end_of_low_ram_pfn(void)
1103 {
1104 	return e820_end_pfn(1UL<<(32 - PAGE_SHIFT), E820_RAM);
1105 }
1106 /*
1107  * Finds an active region in the address range from start_pfn to last_pfn and
1108  * returns its range in ei_startpfn and ei_endpfn for the e820 entry.
1109  */
e820_find_active_region(const struct e820entry * ei,unsigned long start_pfn,unsigned long last_pfn,unsigned long * ei_startpfn,unsigned long * ei_endpfn)1110 int __init e820_find_active_region(const struct e820entry *ei,
1111 				  unsigned long start_pfn,
1112 				  unsigned long last_pfn,
1113 				  unsigned long *ei_startpfn,
1114 				  unsigned long *ei_endpfn)
1115 {
1116 	u64 align = PAGE_SIZE;
1117 
1118 	*ei_startpfn = round_up(ei->addr, align) >> PAGE_SHIFT;
1119 	*ei_endpfn = round_down(ei->addr + ei->size, align) >> PAGE_SHIFT;
1120 
1121 	/* Skip map entries smaller than a page */
1122 	if (*ei_startpfn >= *ei_endpfn)
1123 		return 0;
1124 
1125 	/* Skip if map is outside the node */
1126 	if (ei->type != E820_RAM || *ei_endpfn <= start_pfn ||
1127 				    *ei_startpfn >= last_pfn)
1128 		return 0;
1129 
1130 	/* Check for overlaps */
1131 	if (*ei_startpfn < start_pfn)
1132 		*ei_startpfn = start_pfn;
1133 	if (*ei_endpfn > last_pfn)
1134 		*ei_endpfn = last_pfn;
1135 
1136 	return 1;
1137 }
1138 
1139 /* Walk the e820 map and register active regions within a node */
e820_register_active_regions(int nid,unsigned long start_pfn,unsigned long last_pfn)1140 void __init e820_register_active_regions(int nid, unsigned long start_pfn,
1141 					 unsigned long last_pfn)
1142 {
1143 	unsigned long ei_startpfn;
1144 	unsigned long ei_endpfn;
1145 	int i;
1146 
1147 	for (i = 0; i < e820.nr_map; i++)
1148 		if (e820_find_active_region(&e820.map[i],
1149 					    start_pfn, last_pfn,
1150 					    &ei_startpfn, &ei_endpfn))
1151 			add_active_range(nid, ei_startpfn, ei_endpfn);
1152 }
1153 
1154 /*
1155  * Find the hole size (in bytes) in the memory range.
1156  * @start: starting address of the memory range to scan
1157  * @end: ending address of the memory range to scan
1158  */
e820_hole_size(u64 start,u64 end)1159 u64 __init e820_hole_size(u64 start, u64 end)
1160 {
1161 	unsigned long start_pfn = start >> PAGE_SHIFT;
1162 	unsigned long last_pfn = end >> PAGE_SHIFT;
1163 	unsigned long ei_startpfn, ei_endpfn, ram = 0;
1164 	int i;
1165 
1166 	for (i = 0; i < e820.nr_map; i++) {
1167 		if (e820_find_active_region(&e820.map[i],
1168 					    start_pfn, last_pfn,
1169 					    &ei_startpfn, &ei_endpfn))
1170 			ram += ei_endpfn - ei_startpfn;
1171 	}
1172 	return end - start - ((u64)ram << PAGE_SHIFT);
1173 }
1174 
early_panic(char * msg)1175 static void early_panic(char *msg)
1176 {
1177 	early_printk(msg);
1178 	panic(msg);
1179 }
1180 
1181 static int userdef __initdata;
1182 
1183 /* "mem=nopentium" disables the 4MB page tables. */
parse_memopt(char * p)1184 static int __init parse_memopt(char *p)
1185 {
1186 	u64 mem_size;
1187 
1188 	if (!p)
1189 		return -EINVAL;
1190 
1191 #ifdef CONFIG_X86_32
1192 	if (!strcmp(p, "nopentium")) {
1193 		setup_clear_cpu_cap(X86_FEATURE_PSE);
1194 		return 0;
1195 	}
1196 #endif
1197 
1198 	userdef = 1;
1199 	mem_size = memparse(p, &p);
1200 	e820_remove_range(mem_size, ULLONG_MAX - mem_size, E820_RAM, 1);
1201 
1202 	return 0;
1203 }
1204 early_param("mem", parse_memopt);
1205 
parse_memmap_opt(char * p)1206 static int __init parse_memmap_opt(char *p)
1207 {
1208 	char *oldp;
1209 	u64 start_at, mem_size;
1210 
1211 	if (!p)
1212 		return -EINVAL;
1213 
1214 	if (!strncmp(p, "exactmap", 8)) {
1215 #ifdef CONFIG_CRASH_DUMP
1216 		/*
1217 		 * If we are doing a crash dump, we still need to know
1218 		 * the real mem size before original memory map is
1219 		 * reset.
1220 		 */
1221 		saved_max_pfn = e820_end_of_ram_pfn();
1222 #endif
1223 		e820.nr_map = 0;
1224 		userdef = 1;
1225 		return 0;
1226 	}
1227 
1228 	oldp = p;
1229 	mem_size = memparse(p, &p);
1230 	if (p == oldp)
1231 		return -EINVAL;
1232 
1233 	userdef = 1;
1234 	if (*p == '@') {
1235 		start_at = memparse(p+1, &p);
1236 		e820_add_region(start_at, mem_size, E820_RAM);
1237 	} else if (*p == '#') {
1238 		start_at = memparse(p+1, &p);
1239 		e820_add_region(start_at, mem_size, E820_ACPI);
1240 	} else if (*p == '$') {
1241 		start_at = memparse(p+1, &p);
1242 		e820_add_region(start_at, mem_size, E820_RESERVED);
1243 	} else
1244 		e820_remove_range(mem_size, ULLONG_MAX - mem_size, E820_RAM, 1);
1245 
1246 	return *p == '\0' ? 0 : -EINVAL;
1247 }
1248 early_param("memmap", parse_memmap_opt);
1249 
finish_e820_parsing(void)1250 void __init finish_e820_parsing(void)
1251 {
1252 	if (userdef) {
1253 		int nr = e820.nr_map;
1254 
1255 		if (sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &nr) < 0)
1256 			early_panic("Invalid user supplied memory map");
1257 		e820.nr_map = nr;
1258 
1259 		printk(KERN_INFO "user-defined physical RAM map:\n");
1260 		e820_print_map("user");
1261 	}
1262 }
1263 
e820_type_to_string(int e820_type)1264 static inline const char *e820_type_to_string(int e820_type)
1265 {
1266 	switch (e820_type) {
1267 	case E820_RESERVED_KERN:
1268 	case E820_RAM:	return "System RAM";
1269 	case E820_ACPI:	return "ACPI Tables";
1270 	case E820_NVS:	return "ACPI Non-volatile Storage";
1271 	case E820_UNUSABLE:	return "Unusable memory";
1272 	default:	return "reserved";
1273 	}
1274 }
1275 
1276 /*
1277  * Mark e820 reserved areas as busy for the resource manager.
1278  */
1279 static struct resource __initdata *e820_res;
e820_reserve_resources(void)1280 void __init e820_reserve_resources(void)
1281 {
1282 	int i;
1283 	struct resource *res;
1284 	u64 end;
1285 
1286 	res = alloc_bootmem_low(sizeof(struct resource) * e820.nr_map);
1287 	e820_res = res;
1288 	for (i = 0; i < e820.nr_map; i++) {
1289 		end = e820.map[i].addr + e820.map[i].size - 1;
1290 		if (end != (resource_size_t)end) {
1291 			res++;
1292 			continue;
1293 		}
1294 		res->name = e820_type_to_string(e820.map[i].type);
1295 		res->start = e820.map[i].addr;
1296 		res->end = end;
1297 
1298 		res->flags = IORESOURCE_MEM;
1299 
1300 		/*
1301 		 * don't register the region that could be conflicted with
1302 		 * pci device BAR resource and insert them later in
1303 		 * pcibios_resource_survey()
1304 		 */
1305 		if (e820.map[i].type != E820_RESERVED || res->start < (1ULL<<20)) {
1306 			res->flags |= IORESOURCE_BUSY;
1307 			insert_resource(&iomem_resource, res);
1308 		}
1309 		res++;
1310 	}
1311 
1312 	for (i = 0; i < e820_saved.nr_map; i++) {
1313 		struct e820entry *entry = &e820_saved.map[i];
1314 		firmware_map_add_early(entry->addr,
1315 			entry->addr + entry->size - 1,
1316 			e820_type_to_string(entry->type));
1317 	}
1318 }
1319 
e820_reserve_resources_late(void)1320 void __init e820_reserve_resources_late(void)
1321 {
1322 	int i;
1323 	struct resource *res;
1324 
1325 	res = e820_res;
1326 	for (i = 0; i < e820.nr_map; i++) {
1327 		if (!res->parent && res->end)
1328 			insert_resource_expand_to_fit(&iomem_resource, res);
1329 		res++;
1330 	}
1331 }
1332 
default_machine_specific_memory_setup(void)1333 char *__init default_machine_specific_memory_setup(void)
1334 {
1335 	char *who = "BIOS-e820";
1336 	int new_nr;
1337 	/*
1338 	 * Try to copy the BIOS-supplied E820-map.
1339 	 *
1340 	 * Otherwise fake a memory map; one section from 0k->640k,
1341 	 * the next section from 1mb->appropriate_mem_k
1342 	 */
1343 	new_nr = boot_params.e820_entries;
1344 	sanitize_e820_map(boot_params.e820_map,
1345 			ARRAY_SIZE(boot_params.e820_map),
1346 			&new_nr);
1347 	boot_params.e820_entries = new_nr;
1348 	if (append_e820_map(boot_params.e820_map, boot_params.e820_entries)
1349 	  < 0) {
1350 		u64 mem_size;
1351 
1352 		/* compare results from other methods and take the greater */
1353 		if (boot_params.alt_mem_k
1354 		    < boot_params.screen_info.ext_mem_k) {
1355 			mem_size = boot_params.screen_info.ext_mem_k;
1356 			who = "BIOS-88";
1357 		} else {
1358 			mem_size = boot_params.alt_mem_k;
1359 			who = "BIOS-e801";
1360 		}
1361 
1362 		e820.nr_map = 0;
1363 		e820_add_region(0, LOWMEMSIZE(), E820_RAM);
1364 		e820_add_region(HIGH_MEMORY, mem_size << 10, E820_RAM);
1365 	}
1366 
1367 	/* In case someone cares... */
1368 	return who;
1369 }
1370 
machine_specific_memory_setup(void)1371 char *__init __attribute__((weak)) machine_specific_memory_setup(void)
1372 {
1373 	if (x86_quirks->arch_memory_setup) {
1374 		char *who = x86_quirks->arch_memory_setup();
1375 
1376 		if (who)
1377 			return who;
1378 	}
1379 	return default_machine_specific_memory_setup();
1380 }
1381 
1382 /* Overridden in paravirt.c if CONFIG_PARAVIRT */
memory_setup(void)1383 char * __init __attribute__((weak)) memory_setup(void)
1384 {
1385 	return machine_specific_memory_setup();
1386 }
1387 
setup_memory_map(void)1388 void __init setup_memory_map(void)
1389 {
1390 	char *who;
1391 
1392 	who = memory_setup();
1393 	memcpy(&e820_saved, &e820, sizeof(struct e820map));
1394 	printk(KERN_INFO "BIOS-provided physical RAM map:\n");
1395 	e820_print_map(who);
1396 }
1397