• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Machine specific setup for xen
4  *
5  * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
6  */
7 
8 #include <linux/init.h>
9 #include <linux/sched.h>
10 #include <linux/mm.h>
11 #include <linux/pm.h>
12 #include <linux/memblock.h>
13 #include <linux/cpuidle.h>
14 #include <linux/cpufreq.h>
15 #include <linux/memory_hotplug.h>
16 
17 #include <asm/elf.h>
18 #include <asm/vdso.h>
19 #include <asm/e820/api.h>
20 #include <asm/setup.h>
21 #include <asm/acpi.h>
22 #include <asm/numa.h>
23 #include <asm/idtentry.h>
24 #include <asm/xen/hypervisor.h>
25 #include <asm/xen/hypercall.h>
26 
27 #include <xen/xen.h>
28 #include <xen/page.h>
29 #include <xen/interface/callback.h>
30 #include <xen/interface/memory.h>
31 #include <xen/interface/physdev.h>
32 #include <xen/features.h>
33 #include <xen/hvc-console.h>
34 #include "xen-ops.h"
35 #include "mmu.h"
36 
37 #define GB(x) ((uint64_t)(x) * 1024 * 1024 * 1024)
38 
39 /* Amount of extra memory space we add to the e820 ranges */
40 struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
41 
42 /* Number of pages released from the initial allocation. */
43 unsigned long xen_released_pages;
44 
45 /* E820 map used during setting up memory. */
46 static struct e820_table xen_e820_table __initdata;
47 
48 /*
49  * Buffer used to remap identity mapped pages. We only need the virtual space.
50  * The physical page behind this address is remapped as needed to different
51  * buffer pages.
52  */
53 #define REMAP_SIZE	(P2M_PER_PAGE - 3)
54 static struct {
55 	unsigned long	next_area_mfn;
56 	unsigned long	target_pfn;
57 	unsigned long	size;
58 	unsigned long	mfns[REMAP_SIZE];
59 } xen_remap_buf __initdata __aligned(PAGE_SIZE);
60 static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY;
61 
62 /*
63  * The maximum amount of extra memory compared to the base size.  The
64  * main scaling factor is the size of struct page.  At extreme ratios
65  * of base:extra, all the base memory can be filled with page
66  * structures for the extra memory, leaving no space for anything
67  * else.
68  *
69  * 10x seems like a reasonable balance between scaling flexibility and
70  * leaving a practically usable system.
71  */
72 #define EXTRA_MEM_RATIO		(10)
73 
74 static bool xen_512gb_limit __initdata = IS_ENABLED(CONFIG_XEN_512GB);
75 
xen_parse_512gb(void)76 static void __init xen_parse_512gb(void)
77 {
78 	bool val = false;
79 	char *arg;
80 
81 	arg = strstr(xen_start_info->cmd_line, "xen_512gb_limit");
82 	if (!arg)
83 		return;
84 
85 	arg = strstr(xen_start_info->cmd_line, "xen_512gb_limit=");
86 	if (!arg)
87 		val = true;
88 	else if (strtobool(arg + strlen("xen_512gb_limit="), &val))
89 		return;
90 
91 	xen_512gb_limit = val;
92 }
93 
xen_add_extra_mem(unsigned long start_pfn,unsigned long n_pfns)94 static void __init xen_add_extra_mem(unsigned long start_pfn,
95 				     unsigned long n_pfns)
96 {
97 	int i;
98 
99 	/*
100 	 * No need to check for zero size, should happen rarely and will only
101 	 * write a new entry regarded to be unused due to zero size.
102 	 */
103 	for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
104 		/* Add new region. */
105 		if (xen_extra_mem[i].n_pfns == 0) {
106 			xen_extra_mem[i].start_pfn = start_pfn;
107 			xen_extra_mem[i].n_pfns = n_pfns;
108 			break;
109 		}
110 		/* Append to existing region. */
111 		if (xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns ==
112 		    start_pfn) {
113 			xen_extra_mem[i].n_pfns += n_pfns;
114 			break;
115 		}
116 	}
117 	if (i == XEN_EXTRA_MEM_MAX_REGIONS)
118 		printk(KERN_WARNING "Warning: not enough extra memory regions\n");
119 
120 	memblock_reserve(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns));
121 }
122 
xen_del_extra_mem(unsigned long start_pfn,unsigned long n_pfns)123 static void __init xen_del_extra_mem(unsigned long start_pfn,
124 				     unsigned long n_pfns)
125 {
126 	int i;
127 	unsigned long start_r, size_r;
128 
129 	for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
130 		start_r = xen_extra_mem[i].start_pfn;
131 		size_r = xen_extra_mem[i].n_pfns;
132 
133 		/* Start of region. */
134 		if (start_r == start_pfn) {
135 			BUG_ON(n_pfns > size_r);
136 			xen_extra_mem[i].start_pfn += n_pfns;
137 			xen_extra_mem[i].n_pfns -= n_pfns;
138 			break;
139 		}
140 		/* End of region. */
141 		if (start_r + size_r == start_pfn + n_pfns) {
142 			BUG_ON(n_pfns > size_r);
143 			xen_extra_mem[i].n_pfns -= n_pfns;
144 			break;
145 		}
146 		/* Mid of region. */
147 		if (start_pfn > start_r && start_pfn < start_r + size_r) {
148 			BUG_ON(start_pfn + n_pfns > start_r + size_r);
149 			xen_extra_mem[i].n_pfns = start_pfn - start_r;
150 			/* Calling memblock_reserve() again is okay. */
151 			xen_add_extra_mem(start_pfn + n_pfns, start_r + size_r -
152 					  (start_pfn + n_pfns));
153 			break;
154 		}
155 	}
156 	memblock_free(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns));
157 }
158 
159 /*
160  * Called during boot before the p2m list can take entries beyond the
161  * hypervisor supplied p2m list. Entries in extra mem are to be regarded as
162  * invalid.
163  */
xen_chk_extra_mem(unsigned long pfn)164 unsigned long __ref xen_chk_extra_mem(unsigned long pfn)
165 {
166 	int i;
167 
168 	for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
169 		if (pfn >= xen_extra_mem[i].start_pfn &&
170 		    pfn < xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns)
171 			return INVALID_P2M_ENTRY;
172 	}
173 
174 	return IDENTITY_FRAME(pfn);
175 }
176 
177 /*
178  * Mark all pfns of extra mem as invalid in p2m list.
179  */
xen_inv_extra_mem(void)180 void __init xen_inv_extra_mem(void)
181 {
182 	unsigned long pfn, pfn_s, pfn_e;
183 	int i;
184 
185 	for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
186 		if (!xen_extra_mem[i].n_pfns)
187 			continue;
188 		pfn_s = xen_extra_mem[i].start_pfn;
189 		pfn_e = pfn_s + xen_extra_mem[i].n_pfns;
190 		for (pfn = pfn_s; pfn < pfn_e; pfn++)
191 			set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
192 	}
193 }
194 
195 /*
196  * Finds the next RAM pfn available in the E820 map after min_pfn.
197  * This function updates min_pfn with the pfn found and returns
198  * the size of that range or zero if not found.
199  */
xen_find_pfn_range(unsigned long * min_pfn)200 static unsigned long __init xen_find_pfn_range(unsigned long *min_pfn)
201 {
202 	const struct e820_entry *entry = xen_e820_table.entries;
203 	unsigned int i;
204 	unsigned long done = 0;
205 
206 	for (i = 0; i < xen_e820_table.nr_entries; i++, entry++) {
207 		unsigned long s_pfn;
208 		unsigned long e_pfn;
209 
210 		if (entry->type != E820_TYPE_RAM)
211 			continue;
212 
213 		e_pfn = PFN_DOWN(entry->addr + entry->size);
214 
215 		/* We only care about E820 after this */
216 		if (e_pfn <= *min_pfn)
217 			continue;
218 
219 		s_pfn = PFN_UP(entry->addr);
220 
221 		/* If min_pfn falls within the E820 entry, we want to start
222 		 * at the min_pfn PFN.
223 		 */
224 		if (s_pfn <= *min_pfn) {
225 			done = e_pfn - *min_pfn;
226 		} else {
227 			done = e_pfn - s_pfn;
228 			*min_pfn = s_pfn;
229 		}
230 		break;
231 	}
232 
233 	return done;
234 }
235 
xen_free_mfn(unsigned long mfn)236 static int __init xen_free_mfn(unsigned long mfn)
237 {
238 	struct xen_memory_reservation reservation = {
239 		.address_bits = 0,
240 		.extent_order = 0,
241 		.domid        = DOMID_SELF
242 	};
243 
244 	set_xen_guest_handle(reservation.extent_start, &mfn);
245 	reservation.nr_extents = 1;
246 
247 	return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
248 }
249 
250 /*
251  * This releases a chunk of memory and then does the identity map. It's used
252  * as a fallback if the remapping fails.
253  */
xen_set_identity_and_release_chunk(unsigned long start_pfn,unsigned long end_pfn,unsigned long nr_pages)254 static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
255 			unsigned long end_pfn, unsigned long nr_pages)
256 {
257 	unsigned long pfn, end;
258 	int ret;
259 
260 	WARN_ON(start_pfn > end_pfn);
261 
262 	/* Release pages first. */
263 	end = min(end_pfn, nr_pages);
264 	for (pfn = start_pfn; pfn < end; pfn++) {
265 		unsigned long mfn = pfn_to_mfn(pfn);
266 
267 		/* Make sure pfn exists to start with */
268 		if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
269 			continue;
270 
271 		ret = xen_free_mfn(mfn);
272 		WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
273 
274 		if (ret == 1) {
275 			xen_released_pages++;
276 			if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY))
277 				break;
278 		} else
279 			break;
280 	}
281 
282 	set_phys_range_identity(start_pfn, end_pfn);
283 }
284 
285 /*
286  * Helper function to update the p2m and m2p tables and kernel mapping.
287  */
xen_update_mem_tables(unsigned long pfn,unsigned long mfn)288 static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn)
289 {
290 	struct mmu_update update = {
291 		.ptr = ((uint64_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE,
292 		.val = pfn
293 	};
294 
295 	/* Update p2m */
296 	if (!set_phys_to_machine(pfn, mfn)) {
297 		WARN(1, "Failed to set p2m mapping for pfn=%ld mfn=%ld\n",
298 		     pfn, mfn);
299 		BUG();
300 	}
301 
302 	/* Update m2p */
303 	if (HYPERVISOR_mmu_update(&update, 1, NULL, DOMID_SELF) < 0) {
304 		WARN(1, "Failed to set m2p mapping for mfn=%ld pfn=%ld\n",
305 		     mfn, pfn);
306 		BUG();
307 	}
308 
309 	/* Update kernel mapping, but not for highmem. */
310 	if (pfn >= PFN_UP(__pa(high_memory - 1)))
311 		return;
312 
313 	if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT),
314 					 mfn_pte(mfn, PAGE_KERNEL), 0)) {
315 		WARN(1, "Failed to update kernel mapping for mfn=%ld pfn=%ld\n",
316 		      mfn, pfn);
317 		BUG();
318 	}
319 }
320 
321 /*
322  * This function updates the p2m and m2p tables with an identity map from
323  * start_pfn to start_pfn+size and prepares remapping the underlying RAM of the
324  * original allocation at remap_pfn. The information needed for remapping is
325  * saved in the memory itself to avoid the need for allocating buffers. The
326  * complete remap information is contained in a list of MFNs each containing
327  * up to REMAP_SIZE MFNs and the start target PFN for doing the remap.
328  * This enables us to preserve the original mfn sequence while doing the
329  * remapping at a time when the memory management is capable of allocating
330  * virtual and physical memory in arbitrary amounts, see 'xen_remap_memory' and
331  * its callers.
332  */
xen_do_set_identity_and_remap_chunk(unsigned long start_pfn,unsigned long size,unsigned long remap_pfn)333 static void __init xen_do_set_identity_and_remap_chunk(
334         unsigned long start_pfn, unsigned long size, unsigned long remap_pfn)
335 {
336 	unsigned long buf = (unsigned long)&xen_remap_buf;
337 	unsigned long mfn_save, mfn;
338 	unsigned long ident_pfn_iter, remap_pfn_iter;
339 	unsigned long ident_end_pfn = start_pfn + size;
340 	unsigned long left = size;
341 	unsigned int i, chunk;
342 
343 	WARN_ON(size == 0);
344 
345 	mfn_save = virt_to_mfn(buf);
346 
347 	for (ident_pfn_iter = start_pfn, remap_pfn_iter = remap_pfn;
348 	     ident_pfn_iter < ident_end_pfn;
349 	     ident_pfn_iter += REMAP_SIZE, remap_pfn_iter += REMAP_SIZE) {
350 		chunk = (left < REMAP_SIZE) ? left : REMAP_SIZE;
351 
352 		/* Map first pfn to xen_remap_buf */
353 		mfn = pfn_to_mfn(ident_pfn_iter);
354 		set_pte_mfn(buf, mfn, PAGE_KERNEL);
355 
356 		/* Save mapping information in page */
357 		xen_remap_buf.next_area_mfn = xen_remap_mfn;
358 		xen_remap_buf.target_pfn = remap_pfn_iter;
359 		xen_remap_buf.size = chunk;
360 		for (i = 0; i < chunk; i++)
361 			xen_remap_buf.mfns[i] = pfn_to_mfn(ident_pfn_iter + i);
362 
363 		/* Put remap buf into list. */
364 		xen_remap_mfn = mfn;
365 
366 		/* Set identity map */
367 		set_phys_range_identity(ident_pfn_iter, ident_pfn_iter + chunk);
368 
369 		left -= chunk;
370 	}
371 
372 	/* Restore old xen_remap_buf mapping */
373 	set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
374 }
375 
376 /*
377  * This function takes a contiguous pfn range that needs to be identity mapped
378  * and:
379  *
380  *  1) Finds a new range of pfns to use to remap based on E820 and remap_pfn.
381  *  2) Calls the do_ function to actually do the mapping/remapping work.
382  *
383  * The goal is to not allocate additional memory but to remap the existing
384  * pages. In the case of an error the underlying memory is simply released back
385  * to Xen and not remapped.
386  */
xen_set_identity_and_remap_chunk(unsigned long start_pfn,unsigned long end_pfn,unsigned long nr_pages,unsigned long remap_pfn)387 static unsigned long __init xen_set_identity_and_remap_chunk(
388 	unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
389 	unsigned long remap_pfn)
390 {
391 	unsigned long pfn;
392 	unsigned long i = 0;
393 	unsigned long n = end_pfn - start_pfn;
394 
395 	if (remap_pfn == 0)
396 		remap_pfn = nr_pages;
397 
398 	while (i < n) {
399 		unsigned long cur_pfn = start_pfn + i;
400 		unsigned long left = n - i;
401 		unsigned long size = left;
402 		unsigned long remap_range_size;
403 
404 		/* Do not remap pages beyond the current allocation */
405 		if (cur_pfn >= nr_pages) {
406 			/* Identity map remaining pages */
407 			set_phys_range_identity(cur_pfn, cur_pfn + size);
408 			break;
409 		}
410 		if (cur_pfn + size > nr_pages)
411 			size = nr_pages - cur_pfn;
412 
413 		remap_range_size = xen_find_pfn_range(&remap_pfn);
414 		if (!remap_range_size) {
415 			pr_warn("Unable to find available pfn range, not remapping identity pages\n");
416 			xen_set_identity_and_release_chunk(cur_pfn,
417 						cur_pfn + left, nr_pages);
418 			break;
419 		}
420 		/* Adjust size to fit in current e820 RAM region */
421 		if (size > remap_range_size)
422 			size = remap_range_size;
423 
424 		xen_do_set_identity_and_remap_chunk(cur_pfn, size, remap_pfn);
425 
426 		/* Update variables to reflect new mappings. */
427 		i += size;
428 		remap_pfn += size;
429 	}
430 
431 	/*
432 	 * If the PFNs are currently mapped, the VA mapping also needs
433 	 * to be updated to be 1:1.
434 	 */
435 	for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++)
436 		(void)HYPERVISOR_update_va_mapping(
437 			(unsigned long)__va(pfn << PAGE_SHIFT),
438 			mfn_pte(pfn, PAGE_KERNEL_IO), 0);
439 
440 	return remap_pfn;
441 }
442 
xen_count_remap_pages(unsigned long start_pfn,unsigned long end_pfn,unsigned long nr_pages,unsigned long remap_pages)443 static unsigned long __init xen_count_remap_pages(
444 	unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
445 	unsigned long remap_pages)
446 {
447 	if (start_pfn >= nr_pages)
448 		return remap_pages;
449 
450 	return remap_pages + min(end_pfn, nr_pages) - start_pfn;
451 }
452 
xen_foreach_remap_area(unsigned long nr_pages,unsigned long (* func)(unsigned long start_pfn,unsigned long end_pfn,unsigned long nr_pages,unsigned long last_val))453 static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages,
454 	unsigned long (*func)(unsigned long start_pfn, unsigned long end_pfn,
455 			      unsigned long nr_pages, unsigned long last_val))
456 {
457 	phys_addr_t start = 0;
458 	unsigned long ret_val = 0;
459 	const struct e820_entry *entry = xen_e820_table.entries;
460 	int i;
461 
462 	/*
463 	 * Combine non-RAM regions and gaps until a RAM region (or the
464 	 * end of the map) is reached, then call the provided function
465 	 * to perform its duty on the non-RAM region.
466 	 *
467 	 * The combined non-RAM regions are rounded to a whole number
468 	 * of pages so any partial pages are accessible via the 1:1
469 	 * mapping.  This is needed for some BIOSes that put (for
470 	 * example) the DMI tables in a reserved region that begins on
471 	 * a non-page boundary.
472 	 */
473 	for (i = 0; i < xen_e820_table.nr_entries; i++, entry++) {
474 		phys_addr_t end = entry->addr + entry->size;
475 		if (entry->type == E820_TYPE_RAM || i == xen_e820_table.nr_entries - 1) {
476 			unsigned long start_pfn = PFN_DOWN(start);
477 			unsigned long end_pfn = PFN_UP(end);
478 
479 			if (entry->type == E820_TYPE_RAM)
480 				end_pfn = PFN_UP(entry->addr);
481 
482 			if (start_pfn < end_pfn)
483 				ret_val = func(start_pfn, end_pfn, nr_pages,
484 					       ret_val);
485 			start = end;
486 		}
487 	}
488 
489 	return ret_val;
490 }
491 
492 /*
493  * Remap the memory prepared in xen_do_set_identity_and_remap_chunk().
494  * The remap information (which mfn remap to which pfn) is contained in the
495  * to be remapped memory itself in a linked list anchored at xen_remap_mfn.
496  * This scheme allows to remap the different chunks in arbitrary order while
497  * the resulting mapping will be independent from the order.
498  */
xen_remap_memory(void)499 void __init xen_remap_memory(void)
500 {
501 	unsigned long buf = (unsigned long)&xen_remap_buf;
502 	unsigned long mfn_save, pfn;
503 	unsigned long remapped = 0;
504 	unsigned int i;
505 	unsigned long pfn_s = ~0UL;
506 	unsigned long len = 0;
507 
508 	mfn_save = virt_to_mfn(buf);
509 
510 	while (xen_remap_mfn != INVALID_P2M_ENTRY) {
511 		/* Map the remap information */
512 		set_pte_mfn(buf, xen_remap_mfn, PAGE_KERNEL);
513 
514 		BUG_ON(xen_remap_mfn != xen_remap_buf.mfns[0]);
515 
516 		pfn = xen_remap_buf.target_pfn;
517 		for (i = 0; i < xen_remap_buf.size; i++) {
518 			xen_update_mem_tables(pfn, xen_remap_buf.mfns[i]);
519 			remapped++;
520 			pfn++;
521 		}
522 		if (pfn_s == ~0UL || pfn == pfn_s) {
523 			pfn_s = xen_remap_buf.target_pfn;
524 			len += xen_remap_buf.size;
525 		} else if (pfn_s + len == xen_remap_buf.target_pfn) {
526 			len += xen_remap_buf.size;
527 		} else {
528 			xen_del_extra_mem(pfn_s, len);
529 			pfn_s = xen_remap_buf.target_pfn;
530 			len = xen_remap_buf.size;
531 		}
532 		xen_remap_mfn = xen_remap_buf.next_area_mfn;
533 	}
534 
535 	if (pfn_s != ~0UL && len)
536 		xen_del_extra_mem(pfn_s, len);
537 
538 	set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
539 
540 	pr_info("Remapped %ld page(s)\n", remapped);
541 }
542 
xen_get_pages_limit(void)543 static unsigned long __init xen_get_pages_limit(void)
544 {
545 	unsigned long limit;
546 
547 	limit = MAXMEM / PAGE_SIZE;
548 	if (!xen_initial_domain() && xen_512gb_limit)
549 		limit = GB(512) / PAGE_SIZE;
550 
551 	return limit;
552 }
553 
xen_get_max_pages(void)554 static unsigned long __init xen_get_max_pages(void)
555 {
556 	unsigned long max_pages, limit;
557 	domid_t domid = DOMID_SELF;
558 	long ret;
559 
560 	limit = xen_get_pages_limit();
561 	max_pages = limit;
562 
563 	/*
564 	 * For the initial domain we use the maximum reservation as
565 	 * the maximum page.
566 	 *
567 	 * For guest domains the current maximum reservation reflects
568 	 * the current maximum rather than the static maximum. In this
569 	 * case the e820 map provided to us will cover the static
570 	 * maximum region.
571 	 */
572 	if (xen_initial_domain()) {
573 		ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
574 		if (ret > 0)
575 			max_pages = ret;
576 	}
577 
578 	return min(max_pages, limit);
579 }
580 
xen_align_and_add_e820_region(phys_addr_t start,phys_addr_t size,int type)581 static void __init xen_align_and_add_e820_region(phys_addr_t start,
582 						 phys_addr_t size, int type)
583 {
584 	phys_addr_t end = start + size;
585 
586 	/* Align RAM regions to page boundaries. */
587 	if (type == E820_TYPE_RAM) {
588 		start = PAGE_ALIGN(start);
589 		end &= ~((phys_addr_t)PAGE_SIZE - 1);
590 #ifdef CONFIG_MEMORY_HOTPLUG
591 		/*
592 		 * Don't allow adding memory not in E820 map while booting the
593 		 * system. Once the balloon driver is up it will remove that
594 		 * restriction again.
595 		 */
596 		max_mem_size = end;
597 #endif
598 	}
599 
600 	e820__range_add(start, end - start, type);
601 }
602 
xen_ignore_unusable(void)603 static void __init xen_ignore_unusable(void)
604 {
605 	struct e820_entry *entry = xen_e820_table.entries;
606 	unsigned int i;
607 
608 	for (i = 0; i < xen_e820_table.nr_entries; i++, entry++) {
609 		if (entry->type == E820_TYPE_UNUSABLE)
610 			entry->type = E820_TYPE_RAM;
611 	}
612 }
613 
xen_is_e820_reserved(phys_addr_t start,phys_addr_t size)614 bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size)
615 {
616 	struct e820_entry *entry;
617 	unsigned mapcnt;
618 	phys_addr_t end;
619 
620 	if (!size)
621 		return false;
622 
623 	end = start + size;
624 	entry = xen_e820_table.entries;
625 
626 	for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++) {
627 		if (entry->type == E820_TYPE_RAM && entry->addr <= start &&
628 		    (entry->addr + entry->size) >= end)
629 			return false;
630 
631 		entry++;
632 	}
633 
634 	return true;
635 }
636 
637 /*
638  * Find a free area in physical memory not yet reserved and compliant with
639  * E820 map.
640  * Used to relocate pre-allocated areas like initrd or p2m list which are in
641  * conflict with the to be used E820 map.
642  * In case no area is found, return 0. Otherwise return the physical address
643  * of the area which is already reserved for convenience.
644  */
xen_find_free_area(phys_addr_t size)645 phys_addr_t __init xen_find_free_area(phys_addr_t size)
646 {
647 	unsigned mapcnt;
648 	phys_addr_t addr, start;
649 	struct e820_entry *entry = xen_e820_table.entries;
650 
651 	for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++, entry++) {
652 		if (entry->type != E820_TYPE_RAM || entry->size < size)
653 			continue;
654 		start = entry->addr;
655 		for (addr = start; addr < start + size; addr += PAGE_SIZE) {
656 			if (!memblock_is_reserved(addr))
657 				continue;
658 			start = addr + PAGE_SIZE;
659 			if (start + size > entry->addr + entry->size)
660 				break;
661 		}
662 		if (addr >= start + size) {
663 			memblock_reserve(start, size);
664 			return start;
665 		}
666 	}
667 
668 	return 0;
669 }
670 
671 /*
672  * Like memcpy, but with physical addresses for dest and src.
673  */
xen_phys_memcpy(phys_addr_t dest,phys_addr_t src,phys_addr_t n)674 static void __init xen_phys_memcpy(phys_addr_t dest, phys_addr_t src,
675 				   phys_addr_t n)
676 {
677 	phys_addr_t dest_off, src_off, dest_len, src_len, len;
678 	void *from, *to;
679 
680 	while (n) {
681 		dest_off = dest & ~PAGE_MASK;
682 		src_off = src & ~PAGE_MASK;
683 		dest_len = n;
684 		if (dest_len > (NR_FIX_BTMAPS << PAGE_SHIFT) - dest_off)
685 			dest_len = (NR_FIX_BTMAPS << PAGE_SHIFT) - dest_off;
686 		src_len = n;
687 		if (src_len > (NR_FIX_BTMAPS << PAGE_SHIFT) - src_off)
688 			src_len = (NR_FIX_BTMAPS << PAGE_SHIFT) - src_off;
689 		len = min(dest_len, src_len);
690 		to = early_memremap(dest - dest_off, dest_len + dest_off);
691 		from = early_memremap(src - src_off, src_len + src_off);
692 		memcpy(to, from, len);
693 		early_memunmap(to, dest_len + dest_off);
694 		early_memunmap(from, src_len + src_off);
695 		n -= len;
696 		dest += len;
697 		src += len;
698 	}
699 }
700 
701 /*
702  * Reserve Xen mfn_list.
703  */
xen_reserve_xen_mfnlist(void)704 static void __init xen_reserve_xen_mfnlist(void)
705 {
706 	phys_addr_t start, size;
707 
708 	if (xen_start_info->mfn_list >= __START_KERNEL_map) {
709 		start = __pa(xen_start_info->mfn_list);
710 		size = PFN_ALIGN(xen_start_info->nr_pages *
711 				 sizeof(unsigned long));
712 	} else {
713 		start = PFN_PHYS(xen_start_info->first_p2m_pfn);
714 		size = PFN_PHYS(xen_start_info->nr_p2m_frames);
715 	}
716 
717 	memblock_reserve(start, size);
718 	if (!xen_is_e820_reserved(start, size))
719 		return;
720 
721 	xen_relocate_p2m();
722 	memblock_free(start, size);
723 }
724 
725 /**
726  * machine_specific_memory_setup - Hook for machine specific memory setup.
727  **/
xen_memory_setup(void)728 char * __init xen_memory_setup(void)
729 {
730 	unsigned long max_pfn, pfn_s, n_pfns;
731 	phys_addr_t mem_end, addr, size, chunk_size;
732 	u32 type;
733 	int rc;
734 	struct xen_memory_map memmap;
735 	unsigned long max_pages;
736 	unsigned long extra_pages = 0;
737 	int i;
738 	int op;
739 
740 	xen_parse_512gb();
741 	max_pfn = xen_get_pages_limit();
742 	max_pfn = min(max_pfn, xen_start_info->nr_pages);
743 	mem_end = PFN_PHYS(max_pfn);
744 
745 	memmap.nr_entries = ARRAY_SIZE(xen_e820_table.entries);
746 	set_xen_guest_handle(memmap.buffer, xen_e820_table.entries);
747 
748 #if defined(CONFIG_MEMORY_HOTPLUG) && defined(CONFIG_XEN_BALLOON)
749 	xen_saved_max_mem_size = max_mem_size;
750 #endif
751 
752 	op = xen_initial_domain() ?
753 		XENMEM_machine_memory_map :
754 		XENMEM_memory_map;
755 	rc = HYPERVISOR_memory_op(op, &memmap);
756 	if (rc == -ENOSYS) {
757 		BUG_ON(xen_initial_domain());
758 		memmap.nr_entries = 1;
759 		xen_e820_table.entries[0].addr = 0ULL;
760 		xen_e820_table.entries[0].size = mem_end;
761 		/* 8MB slack (to balance backend allocations). */
762 		xen_e820_table.entries[0].size += 8ULL << 20;
763 		xen_e820_table.entries[0].type = E820_TYPE_RAM;
764 		rc = 0;
765 	}
766 	BUG_ON(rc);
767 	BUG_ON(memmap.nr_entries == 0);
768 	xen_e820_table.nr_entries = memmap.nr_entries;
769 
770 	/*
771 	 * Xen won't allow a 1:1 mapping to be created to UNUSABLE
772 	 * regions, so if we're using the machine memory map leave the
773 	 * region as RAM as it is in the pseudo-physical map.
774 	 *
775 	 * UNUSABLE regions in domUs are not handled and will need
776 	 * a patch in the future.
777 	 */
778 	if (xen_initial_domain())
779 		xen_ignore_unusable();
780 
781 	/* Make sure the Xen-supplied memory map is well-ordered. */
782 	e820__update_table(&xen_e820_table);
783 
784 	max_pages = xen_get_max_pages();
785 
786 	/* How many extra pages do we need due to remapping? */
787 	max_pages += xen_foreach_remap_area(max_pfn, xen_count_remap_pages);
788 
789 	if (max_pages > max_pfn)
790 		extra_pages += max_pages - max_pfn;
791 
792 	/*
793 	 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
794 	 * factor the base size.
795 	 *
796 	 * Make sure we have no memory above max_pages, as this area
797 	 * isn't handled by the p2m management.
798 	 */
799 	extra_pages = min3(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
800 			   extra_pages, max_pages - max_pfn);
801 	i = 0;
802 	addr = xen_e820_table.entries[0].addr;
803 	size = xen_e820_table.entries[0].size;
804 	while (i < xen_e820_table.nr_entries) {
805 		bool discard = false;
806 
807 		chunk_size = size;
808 		type = xen_e820_table.entries[i].type;
809 
810 		if (type == E820_TYPE_RAM) {
811 			if (addr < mem_end) {
812 				chunk_size = min(size, mem_end - addr);
813 			} else if (extra_pages) {
814 				chunk_size = min(size, PFN_PHYS(extra_pages));
815 				pfn_s = PFN_UP(addr);
816 				n_pfns = PFN_DOWN(addr + chunk_size) - pfn_s;
817 				extra_pages -= n_pfns;
818 				xen_add_extra_mem(pfn_s, n_pfns);
819 				xen_max_p2m_pfn = pfn_s + n_pfns;
820 			} else
821 				discard = true;
822 		}
823 
824 		if (!discard)
825 			xen_align_and_add_e820_region(addr, chunk_size, type);
826 
827 		addr += chunk_size;
828 		size -= chunk_size;
829 		if (size == 0) {
830 			i++;
831 			if (i < xen_e820_table.nr_entries) {
832 				addr = xen_e820_table.entries[i].addr;
833 				size = xen_e820_table.entries[i].size;
834 			}
835 		}
836 	}
837 
838 	/*
839 	 * Set the rest as identity mapped, in case PCI BARs are
840 	 * located here.
841 	 */
842 	set_phys_range_identity(addr / PAGE_SIZE, ~0ul);
843 
844 	/*
845 	 * In domU, the ISA region is normal, usable memory, but we
846 	 * reserve ISA memory anyway because too many things poke
847 	 * about in there.
848 	 */
849 	e820__range_add(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_TYPE_RESERVED);
850 
851 	e820__update_table(e820_table);
852 
853 	/*
854 	 * Check whether the kernel itself conflicts with the target E820 map.
855 	 * Failing now is better than running into weird problems later due
856 	 * to relocating (and even reusing) pages with kernel text or data.
857 	 */
858 	if (xen_is_e820_reserved(__pa_symbol(_text),
859 			__pa_symbol(__bss_stop) - __pa_symbol(_text))) {
860 		xen_raw_console_write("Xen hypervisor allocated kernel memory conflicts with E820 map\n");
861 		BUG();
862 	}
863 
864 	/*
865 	 * Check for a conflict of the hypervisor supplied page tables with
866 	 * the target E820 map.
867 	 */
868 	xen_pt_check_e820();
869 
870 	xen_reserve_xen_mfnlist();
871 
872 	/* Check for a conflict of the initrd with the target E820 map. */
873 	if (xen_is_e820_reserved(boot_params.hdr.ramdisk_image,
874 				 boot_params.hdr.ramdisk_size)) {
875 		phys_addr_t new_area, start, size;
876 
877 		new_area = xen_find_free_area(boot_params.hdr.ramdisk_size);
878 		if (!new_area) {
879 			xen_raw_console_write("Can't find new memory area for initrd needed due to E820 map conflict\n");
880 			BUG();
881 		}
882 
883 		start = boot_params.hdr.ramdisk_image;
884 		size = boot_params.hdr.ramdisk_size;
885 		xen_phys_memcpy(new_area, start, size);
886 		pr_info("initrd moved from [mem %#010llx-%#010llx] to [mem %#010llx-%#010llx]\n",
887 			start, start + size, new_area, new_area + size);
888 		memblock_free(start, size);
889 		boot_params.hdr.ramdisk_image = new_area;
890 		boot_params.ext_ramdisk_image = new_area >> 32;
891 	}
892 
893 	/*
894 	 * Set identity map on non-RAM pages and prepare remapping the
895 	 * underlying RAM.
896 	 */
897 	xen_foreach_remap_area(max_pfn, xen_set_identity_and_remap_chunk);
898 
899 	pr_info("Released %ld page(s)\n", xen_released_pages);
900 
901 	return "Xen";
902 }
903 
register_callback(unsigned type,const void * func)904 static int register_callback(unsigned type, const void *func)
905 {
906 	struct callback_register callback = {
907 		.type = type,
908 		.address = XEN_CALLBACK(__KERNEL_CS, func),
909 		.flags = CALLBACKF_mask_events,
910 	};
911 
912 	return HYPERVISOR_callback_op(CALLBACKOP_register, &callback);
913 }
914 
xen_enable_sysenter(void)915 void xen_enable_sysenter(void)
916 {
917 	int ret;
918 	unsigned sysenter_feature;
919 
920 	sysenter_feature = X86_FEATURE_SYSENTER32;
921 
922 	if (!boot_cpu_has(sysenter_feature))
923 		return;
924 
925 	ret = register_callback(CALLBACKTYPE_sysenter, xen_entry_SYSENTER_compat);
926 	if(ret != 0)
927 		setup_clear_cpu_cap(sysenter_feature);
928 }
929 
xen_enable_syscall(void)930 void xen_enable_syscall(void)
931 {
932 	int ret;
933 
934 	ret = register_callback(CALLBACKTYPE_syscall, xen_entry_SYSCALL_64);
935 	if (ret != 0) {
936 		printk(KERN_ERR "Failed to set syscall callback: %d\n", ret);
937 		/* Pretty fatal; 64-bit userspace has no other
938 		   mechanism for syscalls. */
939 	}
940 
941 	if (boot_cpu_has(X86_FEATURE_SYSCALL32)) {
942 		ret = register_callback(CALLBACKTYPE_syscall32,
943 					xen_entry_SYSCALL_compat);
944 		if (ret != 0)
945 			setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
946 	}
947 }
948 
xen_pvmmu_arch_setup(void)949 static void __init xen_pvmmu_arch_setup(void)
950 {
951 	HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
952 	HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);
953 
954 	HYPERVISOR_vm_assist(VMASST_CMD_enable,
955 			     VMASST_TYPE_pae_extended_cr3);
956 
957 	if (register_callback(CALLBACKTYPE_event,
958 			      xen_asm_exc_xen_hypervisor_callback) ||
959 	    register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback))
960 		BUG();
961 
962 	xen_enable_sysenter();
963 	xen_enable_syscall();
964 }
965 
966 /* This function is not called for HVM domains */
xen_arch_setup(void)967 void __init xen_arch_setup(void)
968 {
969 	xen_panic_handler_init();
970 	xen_pvmmu_arch_setup();
971 
972 #ifdef CONFIG_ACPI
973 	if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
974 		printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
975 		disable_acpi();
976 	}
977 #endif
978 
979 	memcpy(boot_command_line, xen_start_info->cmd_line,
980 	       MAX_GUEST_CMDLINE > COMMAND_LINE_SIZE ?
981 	       COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE);
982 
983 	/* Set up idle, making sure it calls safe_halt() pvop */
984 	disable_cpuidle();
985 	disable_cpufreq();
986 	WARN_ON(xen_set_default_idle());
987 #ifdef CONFIG_NUMA
988 	numa_off = 1;
989 #endif
990 }
991