• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*P:700
2  * The pagetable code, on the other hand, still shows the scars of
3  * previous encounters.  It's functional, and as neat as it can be in the
4  * circumstances, but be wary, for these things are subtle and break easily.
5  * The Guest provides a virtual to physical mapping, but we can neither trust
6  * it nor use it: we verify and convert it here then point the CPU to the
7  * converted Guest pages when running the Guest.
8 :*/
9 
10 /* Copyright (C) Rusty Russell IBM Corporation 2013.
11  * GPL v2 and any later version */
12 #include <linux/mm.h>
13 #include <linux/gfp.h>
14 #include <linux/types.h>
15 #include <linux/spinlock.h>
16 #include <linux/random.h>
17 #include <linux/percpu.h>
18 #include <asm/tlbflush.h>
19 #include <asm/uaccess.h>
20 #include "lg.h"
21 
22 /*M:008
23  * We hold reference to pages, which prevents them from being swapped.
24  * It'd be nice to have a callback in the "struct mm_struct" when Linux wants
25  * to swap out.  If we had this, and a shrinker callback to trim PTE pages, we
26  * could probably consider launching Guests as non-root.
27 :*/
28 
29 /*H:300
30  * The Page Table Code
31  *
32  * We use two-level page tables for the Guest, or three-level with PAE.  If
33  * you're not entirely comfortable with virtual addresses, physical addresses
34  * and page tables then I recommend you review arch/x86/lguest/boot.c's "Page
35  * Table Handling" (with diagrams!).
36  *
37  * The Guest keeps page tables, but we maintain the actual ones here: these are
38  * called "shadow" page tables.  Which is a very Guest-centric name: these are
39  * the real page tables the CPU uses, although we keep them up to date to
40  * reflect the Guest's.  (See what I mean about weird naming?  Since when do
41  * shadows reflect anything?)
42  *
43  * Anyway, this is the most complicated part of the Host code.  There are seven
44  * parts to this:
45  *  (i) Looking up a page table entry when the Guest faults,
46  *  (ii) Making sure the Guest stack is mapped,
47  *  (iii) Setting up a page table entry when the Guest tells us one has changed,
48  *  (iv) Switching page tables,
49  *  (v) Flushing (throwing away) page tables,
50  *  (vi) Mapping the Switcher when the Guest is about to run,
51  *  (vii) Setting up the page tables initially.
52 :*/
53 
54 /*
55  * The Switcher uses the complete top PTE page.  That's 1024 PTE entries (4MB)
56  * or 512 PTE entries with PAE (2MB).
57  */
58 #define SWITCHER_PGD_INDEX (PTRS_PER_PGD - 1)
59 
60 /*
61  * For PAE we need the PMD index as well. We use the last 2MB, so we
62  * will need the last pmd entry of the last pmd page.
63  */
64 #ifdef CONFIG_X86_PAE
65 #define CHECK_GPGD_MASK		_PAGE_PRESENT
66 #else
67 #define CHECK_GPGD_MASK		_PAGE_TABLE
68 #endif
69 
70 /*H:320
71  * The page table code is curly enough to need helper functions to keep it
72  * clear and clean.  The kernel itself provides many of them; one advantage
73  * of insisting that the Guest and Host use the same CONFIG_X86_PAE setting.
74  *
75  * There are two functions which return pointers to the shadow (aka "real")
76  * page tables.
77  *
78  * spgd_addr() takes the virtual address and returns a pointer to the top-level
79  * page directory entry (PGD) for that address.  Since we keep track of several
80  * page tables, the "i" argument tells us which one we're interested in (it's
81  * usually the current one).
82  */
spgd_addr(struct lg_cpu * cpu,u32 i,unsigned long vaddr)83 static pgd_t *spgd_addr(struct lg_cpu *cpu, u32 i, unsigned long vaddr)
84 {
85 	unsigned int index = pgd_index(vaddr);
86 
87 	/* Return a pointer index'th pgd entry for the i'th page table. */
88 	return &cpu->lg->pgdirs[i].pgdir[index];
89 }
90 
91 #ifdef CONFIG_X86_PAE
92 /*
93  * This routine then takes the PGD entry given above, which contains the
94  * address of the PMD page.  It then returns a pointer to the PMD entry for the
95  * given address.
96  */
spmd_addr(struct lg_cpu * cpu,pgd_t spgd,unsigned long vaddr)97 static pmd_t *spmd_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr)
98 {
99 	unsigned int index = pmd_index(vaddr);
100 	pmd_t *page;
101 
102 	/* You should never call this if the PGD entry wasn't valid */
103 	BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT));
104 	page = __va(pgd_pfn(spgd) << PAGE_SHIFT);
105 
106 	return &page[index];
107 }
108 #endif
109 
110 /*
111  * This routine then takes the page directory entry returned above, which
112  * contains the address of the page table entry (PTE) page.  It then returns a
113  * pointer to the PTE entry for the given address.
114  */
spte_addr(struct lg_cpu * cpu,pgd_t spgd,unsigned long vaddr)115 static pte_t *spte_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr)
116 {
117 #ifdef CONFIG_X86_PAE
118 	pmd_t *pmd = spmd_addr(cpu, spgd, vaddr);
119 	pte_t *page = __va(pmd_pfn(*pmd) << PAGE_SHIFT);
120 
121 	/* You should never call this if the PMD entry wasn't valid */
122 	BUG_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT));
123 #else
124 	pte_t *page = __va(pgd_pfn(spgd) << PAGE_SHIFT);
125 	/* You should never call this if the PGD entry wasn't valid */
126 	BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT));
127 #endif
128 
129 	return &page[pte_index(vaddr)];
130 }
131 
132 /*
133  * These functions are just like the above, except they access the Guest
134  * page tables.  Hence they return a Guest address.
135  */
gpgd_addr(struct lg_cpu * cpu,unsigned long vaddr)136 static unsigned long gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr)
137 {
138 	unsigned int index = vaddr >> (PGDIR_SHIFT);
139 	return cpu->lg->pgdirs[cpu->cpu_pgd].gpgdir + index * sizeof(pgd_t);
140 }
141 
142 #ifdef CONFIG_X86_PAE
143 /* Follow the PGD to the PMD. */
gpmd_addr(pgd_t gpgd,unsigned long vaddr)144 static unsigned long gpmd_addr(pgd_t gpgd, unsigned long vaddr)
145 {
146 	unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT;
147 	BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT));
148 	return gpage + pmd_index(vaddr) * sizeof(pmd_t);
149 }
150 
151 /* Follow the PMD to the PTE. */
gpte_addr(struct lg_cpu * cpu,pmd_t gpmd,unsigned long vaddr)152 static unsigned long gpte_addr(struct lg_cpu *cpu,
153 			       pmd_t gpmd, unsigned long vaddr)
154 {
155 	unsigned long gpage = pmd_pfn(gpmd) << PAGE_SHIFT;
156 
157 	BUG_ON(!(pmd_flags(gpmd) & _PAGE_PRESENT));
158 	return gpage + pte_index(vaddr) * sizeof(pte_t);
159 }
160 #else
161 /* Follow the PGD to the PTE (no mid-level for !PAE). */
gpte_addr(struct lg_cpu * cpu,pgd_t gpgd,unsigned long vaddr)162 static unsigned long gpte_addr(struct lg_cpu *cpu,
163 				pgd_t gpgd, unsigned long vaddr)
164 {
165 	unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT;
166 
167 	BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT));
168 	return gpage + pte_index(vaddr) * sizeof(pte_t);
169 }
170 #endif
171 /*:*/
172 
173 /*M:007
174  * get_pfn is slow: we could probably try to grab batches of pages here as
175  * an optimization (ie. pre-faulting).
176 :*/
177 
178 /*H:350
179  * This routine takes a page number given by the Guest and converts it to
180  * an actual, physical page number.  It can fail for several reasons: the
181  * virtual address might not be mapped by the Launcher, the write flag is set
182  * and the page is read-only, or the write flag was set and the page was
183  * shared so had to be copied, but we ran out of memory.
184  *
185  * This holds a reference to the page, so release_pte() is careful to put that
186  * back.
187  */
get_pfn(unsigned long virtpfn,int write)188 static unsigned long get_pfn(unsigned long virtpfn, int write)
189 {
190 	struct page *page;
191 
192 	/* gup me one page at this address please! */
193 	if (get_user_pages_fast(virtpfn << PAGE_SHIFT, 1, write, &page) == 1)
194 		return page_to_pfn(page);
195 
196 	/* This value indicates failure. */
197 	return -1UL;
198 }
199 
200 /*H:340
201  * Converting a Guest page table entry to a shadow (ie. real) page table
202  * entry can be a little tricky.  The flags are (almost) the same, but the
203  * Guest PTE contains a virtual page number: the CPU needs the real page
204  * number.
205  */
gpte_to_spte(struct lg_cpu * cpu,pte_t gpte,int write)206 static pte_t gpte_to_spte(struct lg_cpu *cpu, pte_t gpte, int write)
207 {
208 	unsigned long pfn, base, flags;
209 
210 	/*
211 	 * The Guest sets the global flag, because it thinks that it is using
212 	 * PGE.  We only told it to use PGE so it would tell us whether it was
213 	 * flushing a kernel mapping or a userspace mapping.  We don't actually
214 	 * use the global bit, so throw it away.
215 	 */
216 	flags = (pte_flags(gpte) & ~_PAGE_GLOBAL);
217 
218 	/* The Guest's pages are offset inside the Launcher. */
219 	base = (unsigned long)cpu->lg->mem_base / PAGE_SIZE;
220 
221 	/*
222 	 * We need a temporary "unsigned long" variable to hold the answer from
223 	 * get_pfn(), because it returns 0xFFFFFFFF on failure, which wouldn't
224 	 * fit in spte.pfn.  get_pfn() finds the real physical number of the
225 	 * page, given the virtual number.
226 	 */
227 	pfn = get_pfn(base + pte_pfn(gpte), write);
228 	if (pfn == -1UL) {
229 		kill_guest(cpu, "failed to get page %lu", pte_pfn(gpte));
230 		/*
231 		 * When we destroy the Guest, we'll go through the shadow page
232 		 * tables and release_pte() them.  Make sure we don't think
233 		 * this one is valid!
234 		 */
235 		flags = 0;
236 	}
237 	/* Now we assemble our shadow PTE from the page number and flags. */
238 	return pfn_pte(pfn, __pgprot(flags));
239 }
240 
241 /*H:460 And to complete the chain, release_pte() looks like this: */
release_pte(pte_t pte)242 static void release_pte(pte_t pte)
243 {
244 	/*
245 	 * Remember that get_user_pages_fast() took a reference to the page, in
246 	 * get_pfn()?  We have to put it back now.
247 	 */
248 	if (pte_flags(pte) & _PAGE_PRESENT)
249 		put_page(pte_page(pte));
250 }
251 /*:*/
252 
gpte_in_iomem(struct lg_cpu * cpu,pte_t gpte)253 static bool gpte_in_iomem(struct lg_cpu *cpu, pte_t gpte)
254 {
255 	/* We don't handle large pages. */
256 	if (pte_flags(gpte) & _PAGE_PSE)
257 		return false;
258 
259 	return (pte_pfn(gpte) >= cpu->lg->pfn_limit
260 		&& pte_pfn(gpte) < cpu->lg->device_limit);
261 }
262 
check_gpte(struct lg_cpu * cpu,pte_t gpte)263 static bool check_gpte(struct lg_cpu *cpu, pte_t gpte)
264 {
265 	if ((pte_flags(gpte) & _PAGE_PSE) ||
266 	    pte_pfn(gpte) >= cpu->lg->pfn_limit) {
267 		kill_guest(cpu, "bad page table entry");
268 		return false;
269 	}
270 	return true;
271 }
272 
check_gpgd(struct lg_cpu * cpu,pgd_t gpgd)273 static bool check_gpgd(struct lg_cpu *cpu, pgd_t gpgd)
274 {
275 	if ((pgd_flags(gpgd) & ~CHECK_GPGD_MASK) ||
276 	    (pgd_pfn(gpgd) >= cpu->lg->pfn_limit)) {
277 		kill_guest(cpu, "bad page directory entry");
278 		return false;
279 	}
280 	return true;
281 }
282 
283 #ifdef CONFIG_X86_PAE
check_gpmd(struct lg_cpu * cpu,pmd_t gpmd)284 static bool check_gpmd(struct lg_cpu *cpu, pmd_t gpmd)
285 {
286 	if ((pmd_flags(gpmd) & ~_PAGE_TABLE) ||
287 	    (pmd_pfn(gpmd) >= cpu->lg->pfn_limit)) {
288 		kill_guest(cpu, "bad page middle directory entry");
289 		return false;
290 	}
291 	return true;
292 }
293 #endif
294 
295 /*H:331
296  * This is the core routine to walk the shadow page tables and find the page
297  * table entry for a specific address.
298  *
299  * If allocate is set, then we allocate any missing levels, setting the flags
300  * on the new page directory and mid-level directories using the arguments
301  * (which are copied from the Guest's page table entries).
302  */
find_spte(struct lg_cpu * cpu,unsigned long vaddr,bool allocate,int pgd_flags,int pmd_flags)303 static pte_t *find_spte(struct lg_cpu *cpu, unsigned long vaddr, bool allocate,
304 			int pgd_flags, int pmd_flags)
305 {
306 	pgd_t *spgd;
307 	/* Mid level for PAE. */
308 #ifdef CONFIG_X86_PAE
309 	pmd_t *spmd;
310 #endif
311 
312 	/* Get top level entry. */
313 	spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr);
314 	if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) {
315 		/* No shadow entry: allocate a new shadow PTE page. */
316 		unsigned long ptepage;
317 
318 		/* If they didn't want us to allocate anything, stop. */
319 		if (!allocate)
320 			return NULL;
321 
322 		ptepage = get_zeroed_page(GFP_KERNEL);
323 		/*
324 		 * This is not really the Guest's fault, but killing it is
325 		 * simple for this corner case.
326 		 */
327 		if (!ptepage) {
328 			kill_guest(cpu, "out of memory allocating pte page");
329 			return NULL;
330 		}
331 		/*
332 		 * And we copy the flags to the shadow PGD entry.  The page
333 		 * number in the shadow PGD is the page we just allocated.
334 		 */
335 		set_pgd(spgd, __pgd(__pa(ptepage) | pgd_flags));
336 	}
337 
338 	/*
339 	 * Intel's Physical Address Extension actually uses three levels of
340 	 * page tables, so we need to look in the mid-level.
341 	 */
342 #ifdef CONFIG_X86_PAE
343 	/* Now look at the mid-level shadow entry. */
344 	spmd = spmd_addr(cpu, *spgd, vaddr);
345 
346 	if (!(pmd_flags(*spmd) & _PAGE_PRESENT)) {
347 		/* No shadow entry: allocate a new shadow PTE page. */
348 		unsigned long ptepage;
349 
350 		/* If they didn't want us to allocate anything, stop. */
351 		if (!allocate)
352 			return NULL;
353 
354 		ptepage = get_zeroed_page(GFP_KERNEL);
355 
356 		/*
357 		 * This is not really the Guest's fault, but killing it is
358 		 * simple for this corner case.
359 		 */
360 		if (!ptepage) {
361 			kill_guest(cpu, "out of memory allocating pmd page");
362 			return NULL;
363 		}
364 
365 		/*
366 		 * And we copy the flags to the shadow PMD entry.  The page
367 		 * number in the shadow PMD is the page we just allocated.
368 		 */
369 		set_pmd(spmd, __pmd(__pa(ptepage) | pmd_flags));
370 	}
371 #endif
372 
373 	/* Get the pointer to the shadow PTE entry we're going to set. */
374 	return spte_addr(cpu, *spgd, vaddr);
375 }
376 
377 /*H:330
378  * (i) Looking up a page table entry when the Guest faults.
379  *
380  * We saw this call in run_guest(): when we see a page fault in the Guest, we
381  * come here.  That's because we only set up the shadow page tables lazily as
382  * they're needed, so we get page faults all the time and quietly fix them up
383  * and return to the Guest without it knowing.
384  *
385  * If we fixed up the fault (ie. we mapped the address), this routine returns
386  * true.  Otherwise, it was a real fault and we need to tell the Guest.
387  *
388  * There's a corner case: they're trying to access memory between
389  * pfn_limit and device_limit, which is I/O memory.  In this case, we
390  * return false and set @iomem to the physical address, so the the
391  * Launcher can handle the instruction manually.
392  */
demand_page(struct lg_cpu * cpu,unsigned long vaddr,int errcode,unsigned long * iomem)393 bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode,
394 		 unsigned long *iomem)
395 {
396 	unsigned long gpte_ptr;
397 	pte_t gpte;
398 	pte_t *spte;
399 	pmd_t gpmd;
400 	pgd_t gpgd;
401 
402 	*iomem = 0;
403 
404 	/* We never demand page the Switcher, so trying is a mistake. */
405 	if (vaddr >= switcher_addr)
406 		return false;
407 
408 	/* First step: get the top-level Guest page table entry. */
409 	if (unlikely(cpu->linear_pages)) {
410 		/* Faking up a linear mapping. */
411 		gpgd = __pgd(CHECK_GPGD_MASK);
412 	} else {
413 		gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
414 		/* Toplevel not present?  We can't map it in. */
415 		if (!(pgd_flags(gpgd) & _PAGE_PRESENT))
416 			return false;
417 
418 		/*
419 		 * This kills the Guest if it has weird flags or tries to
420 		 * refer to a "physical" address outside the bounds.
421 		 */
422 		if (!check_gpgd(cpu, gpgd))
423 			return false;
424 	}
425 
426 	/* This "mid-level" entry is only used for non-linear, PAE mode. */
427 	gpmd = __pmd(_PAGE_TABLE);
428 
429 #ifdef CONFIG_X86_PAE
430 	if (likely(!cpu->linear_pages)) {
431 		gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t);
432 		/* Middle level not present?  We can't map it in. */
433 		if (!(pmd_flags(gpmd) & _PAGE_PRESENT))
434 			return false;
435 
436 		/*
437 		 * This kills the Guest if it has weird flags or tries to
438 		 * refer to a "physical" address outside the bounds.
439 		 */
440 		if (!check_gpmd(cpu, gpmd))
441 			return false;
442 	}
443 
444 	/*
445 	 * OK, now we look at the lower level in the Guest page table: keep its
446 	 * address, because we might update it later.
447 	 */
448 	gpte_ptr = gpte_addr(cpu, gpmd, vaddr);
449 #else
450 	/*
451 	 * OK, now we look at the lower level in the Guest page table: keep its
452 	 * address, because we might update it later.
453 	 */
454 	gpte_ptr = gpte_addr(cpu, gpgd, vaddr);
455 #endif
456 
457 	if (unlikely(cpu->linear_pages)) {
458 		/* Linear?  Make up a PTE which points to same page. */
459 		gpte = __pte((vaddr & PAGE_MASK) | _PAGE_RW | _PAGE_PRESENT);
460 	} else {
461 		/* Read the actual PTE value. */
462 		gpte = lgread(cpu, gpte_ptr, pte_t);
463 	}
464 
465 	/* If this page isn't in the Guest page tables, we can't page it in. */
466 	if (!(pte_flags(gpte) & _PAGE_PRESENT))
467 		return false;
468 
469 	/*
470 	 * Check they're not trying to write to a page the Guest wants
471 	 * read-only (bit 2 of errcode == write).
472 	 */
473 	if ((errcode & 2) && !(pte_flags(gpte) & _PAGE_RW))
474 		return false;
475 
476 	/* User access to a kernel-only page? (bit 3 == user access) */
477 	if ((errcode & 4) && !(pte_flags(gpte) & _PAGE_USER))
478 		return false;
479 
480 	/* If they're accessing io memory, we expect a fault. */
481 	if (gpte_in_iomem(cpu, gpte)) {
482 		*iomem = (pte_pfn(gpte) << PAGE_SHIFT) | (vaddr & ~PAGE_MASK);
483 		return false;
484 	}
485 
486 	/*
487 	 * Check that the Guest PTE flags are OK, and the page number is below
488 	 * the pfn_limit (ie. not mapping the Launcher binary).
489 	 */
490 	if (!check_gpte(cpu, gpte))
491 		return false;
492 
493 	/* Add the _PAGE_ACCESSED and (for a write) _PAGE_DIRTY flag */
494 	gpte = pte_mkyoung(gpte);
495 	if (errcode & 2)
496 		gpte = pte_mkdirty(gpte);
497 
498 	/* Get the pointer to the shadow PTE entry we're going to set. */
499 	spte = find_spte(cpu, vaddr, true, pgd_flags(gpgd), pmd_flags(gpmd));
500 	if (!spte)
501 		return false;
502 
503 	/*
504 	 * If there was a valid shadow PTE entry here before, we release it.
505 	 * This can happen with a write to a previously read-only entry.
506 	 */
507 	release_pte(*spte);
508 
509 	/*
510 	 * If this is a write, we insist that the Guest page is writable (the
511 	 * final arg to gpte_to_spte()).
512 	 */
513 	if (pte_dirty(gpte))
514 		*spte = gpte_to_spte(cpu, gpte, 1);
515 	else
516 		/*
517 		 * If this is a read, don't set the "writable" bit in the page
518 		 * table entry, even if the Guest says it's writable.  That way
519 		 * we will come back here when a write does actually occur, so
520 		 * we can update the Guest's _PAGE_DIRTY flag.
521 		 */
522 		set_pte(spte, gpte_to_spte(cpu, pte_wrprotect(gpte), 0));
523 
524 	/*
525 	 * Finally, we write the Guest PTE entry back: we've set the
526 	 * _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags.
527 	 */
528 	if (likely(!cpu->linear_pages))
529 		lgwrite(cpu, gpte_ptr, pte_t, gpte);
530 
531 	/*
532 	 * The fault is fixed, the page table is populated, the mapping
533 	 * manipulated, the result returned and the code complete.  A small
534 	 * delay and a trace of alliteration are the only indications the Guest
535 	 * has that a page fault occurred at all.
536 	 */
537 	return true;
538 }
539 
540 /*H:360
541  * (ii) Making sure the Guest stack is mapped.
542  *
543  * Remember that direct traps into the Guest need a mapped Guest kernel stack.
544  * pin_stack_pages() calls us here: we could simply call demand_page(), but as
545  * we've seen that logic is quite long, and usually the stack pages are already
546  * mapped, so it's overkill.
547  *
548  * This is a quick version which answers the question: is this virtual address
549  * mapped by the shadow page tables, and is it writable?
550  */
page_writable(struct lg_cpu * cpu,unsigned long vaddr)551 static bool page_writable(struct lg_cpu *cpu, unsigned long vaddr)
552 {
553 	pte_t *spte;
554 	unsigned long flags;
555 
556 	/* You can't put your stack in the Switcher! */
557 	if (vaddr >= switcher_addr)
558 		return false;
559 
560 	/* If there's no shadow PTE, it's not writable. */
561 	spte = find_spte(cpu, vaddr, false, 0, 0);
562 	if (!spte)
563 		return false;
564 
565 	/*
566 	 * Check the flags on the pte entry itself: it must be present and
567 	 * writable.
568 	 */
569 	flags = pte_flags(*spte);
570 	return (flags & (_PAGE_PRESENT|_PAGE_RW)) == (_PAGE_PRESENT|_PAGE_RW);
571 }
572 
573 /*
574  * So, when pin_stack_pages() asks us to pin a page, we check if it's already
575  * in the page tables, and if not, we call demand_page() with error code 2
576  * (meaning "write").
577  */
pin_page(struct lg_cpu * cpu,unsigned long vaddr)578 void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
579 {
580 	unsigned long iomem;
581 
582 	if (!page_writable(cpu, vaddr) && !demand_page(cpu, vaddr, 2, &iomem))
583 		kill_guest(cpu, "bad stack page %#lx", vaddr);
584 }
585 /*:*/
586 
587 #ifdef CONFIG_X86_PAE
release_pmd(pmd_t * spmd)588 static void release_pmd(pmd_t *spmd)
589 {
590 	/* If the entry's not present, there's nothing to release. */
591 	if (pmd_flags(*spmd) & _PAGE_PRESENT) {
592 		unsigned int i;
593 		pte_t *ptepage = __va(pmd_pfn(*spmd) << PAGE_SHIFT);
594 		/* For each entry in the page, we might need to release it. */
595 		for (i = 0; i < PTRS_PER_PTE; i++)
596 			release_pte(ptepage[i]);
597 		/* Now we can free the page of PTEs */
598 		free_page((long)ptepage);
599 		/* And zero out the PMD entry so we never release it twice. */
600 		set_pmd(spmd, __pmd(0));
601 	}
602 }
603 
release_pgd(pgd_t * spgd)604 static void release_pgd(pgd_t *spgd)
605 {
606 	/* If the entry's not present, there's nothing to release. */
607 	if (pgd_flags(*spgd) & _PAGE_PRESENT) {
608 		unsigned int i;
609 		pmd_t *pmdpage = __va(pgd_pfn(*spgd) << PAGE_SHIFT);
610 
611 		for (i = 0; i < PTRS_PER_PMD; i++)
612 			release_pmd(&pmdpage[i]);
613 
614 		/* Now we can free the page of PMDs */
615 		free_page((long)pmdpage);
616 		/* And zero out the PGD entry so we never release it twice. */
617 		set_pgd(spgd, __pgd(0));
618 	}
619 }
620 
621 #else /* !CONFIG_X86_PAE */
622 /*H:450
623  * If we chase down the release_pgd() code, the non-PAE version looks like
624  * this.  The PAE version is almost identical, but instead of calling
625  * release_pte it calls release_pmd(), which looks much like this.
626  */
release_pgd(pgd_t * spgd)627 static void release_pgd(pgd_t *spgd)
628 {
629 	/* If the entry's not present, there's nothing to release. */
630 	if (pgd_flags(*spgd) & _PAGE_PRESENT) {
631 		unsigned int i;
632 		/*
633 		 * Converting the pfn to find the actual PTE page is easy: turn
634 		 * the page number into a physical address, then convert to a
635 		 * virtual address (easy for kernel pages like this one).
636 		 */
637 		pte_t *ptepage = __va(pgd_pfn(*spgd) << PAGE_SHIFT);
638 		/* For each entry in the page, we might need to release it. */
639 		for (i = 0; i < PTRS_PER_PTE; i++)
640 			release_pte(ptepage[i]);
641 		/* Now we can free the page of PTEs */
642 		free_page((long)ptepage);
643 		/* And zero out the PGD entry so we never release it twice. */
644 		*spgd = __pgd(0);
645 	}
646 }
647 #endif
648 
649 /*H:445
650  * We saw flush_user_mappings() twice: once from the flush_user_mappings()
651  * hypercall and once in new_pgdir() when we re-used a top-level pgdir page.
652  * It simply releases every PTE page from 0 up to the Guest's kernel address.
653  */
flush_user_mappings(struct lguest * lg,int idx)654 static void flush_user_mappings(struct lguest *lg, int idx)
655 {
656 	unsigned int i;
657 	/* Release every pgd entry up to the kernel's address. */
658 	for (i = 0; i < pgd_index(lg->kernel_address); i++)
659 		release_pgd(lg->pgdirs[idx].pgdir + i);
660 }
661 
662 /*H:440
663  * (v) Flushing (throwing away) page tables,
664  *
665  * The Guest has a hypercall to throw away the page tables: it's used when a
666  * large number of mappings have been changed.
667  */
guest_pagetable_flush_user(struct lg_cpu * cpu)668 void guest_pagetable_flush_user(struct lg_cpu *cpu)
669 {
670 	/* Drop the userspace part of the current page table. */
671 	flush_user_mappings(cpu->lg, cpu->cpu_pgd);
672 }
673 /*:*/
674 
675 /* We walk down the guest page tables to get a guest-physical address */
__guest_pa(struct lg_cpu * cpu,unsigned long vaddr,unsigned long * paddr)676 bool __guest_pa(struct lg_cpu *cpu, unsigned long vaddr, unsigned long *paddr)
677 {
678 	pgd_t gpgd;
679 	pte_t gpte;
680 #ifdef CONFIG_X86_PAE
681 	pmd_t gpmd;
682 #endif
683 
684 	/* Still not set up?  Just map 1:1. */
685 	if (unlikely(cpu->linear_pages)) {
686 		*paddr = vaddr;
687 		return true;
688 	}
689 
690 	/* First step: get the top-level Guest page table entry. */
691 	gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
692 	/* Toplevel not present?  We can't map it in. */
693 	if (!(pgd_flags(gpgd) & _PAGE_PRESENT))
694 		goto fail;
695 
696 #ifdef CONFIG_X86_PAE
697 	gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t);
698 	if (!(pmd_flags(gpmd) & _PAGE_PRESENT))
699 		goto fail;
700 	gpte = lgread(cpu, gpte_addr(cpu, gpmd, vaddr), pte_t);
701 #else
702 	gpte = lgread(cpu, gpte_addr(cpu, gpgd, vaddr), pte_t);
703 #endif
704 	if (!(pte_flags(gpte) & _PAGE_PRESENT))
705 		goto fail;
706 
707 	*paddr = pte_pfn(gpte) * PAGE_SIZE | (vaddr & ~PAGE_MASK);
708 	return true;
709 
710 fail:
711 	*paddr = -1UL;
712 	return false;
713 }
714 
715 /*
716  * This is the version we normally use: kills the Guest if it uses a
717  * bad address
718  */
guest_pa(struct lg_cpu * cpu,unsigned long vaddr)719 unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr)
720 {
721 	unsigned long paddr;
722 
723 	if (!__guest_pa(cpu, vaddr, &paddr))
724 		kill_guest(cpu, "Bad address %#lx", vaddr);
725 	return paddr;
726 }
727 
728 /*
729  * We keep several page tables.  This is a simple routine to find the page
730  * table (if any) corresponding to this top-level address the Guest has given
731  * us.
732  */
find_pgdir(struct lguest * lg,unsigned long pgtable)733 static unsigned int find_pgdir(struct lguest *lg, unsigned long pgtable)
734 {
735 	unsigned int i;
736 	for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
737 		if (lg->pgdirs[i].pgdir && lg->pgdirs[i].gpgdir == pgtable)
738 			break;
739 	return i;
740 }
741 
742 /*H:435
743  * And this is us, creating the new page directory.  If we really do
744  * allocate a new one (and so the kernel parts are not there), we set
745  * blank_pgdir.
746  */
new_pgdir(struct lg_cpu * cpu,unsigned long gpgdir,int * blank_pgdir)747 static unsigned int new_pgdir(struct lg_cpu *cpu,
748 			      unsigned long gpgdir,
749 			      int *blank_pgdir)
750 {
751 	unsigned int next;
752 
753 	/*
754 	 * We pick one entry at random to throw out.  Choosing the Least
755 	 * Recently Used might be better, but this is easy.
756 	 */
757 	next = prandom_u32() % ARRAY_SIZE(cpu->lg->pgdirs);
758 	/* If it's never been allocated at all before, try now. */
759 	if (!cpu->lg->pgdirs[next].pgdir) {
760 		cpu->lg->pgdirs[next].pgdir =
761 					(pgd_t *)get_zeroed_page(GFP_KERNEL);
762 		/* If the allocation fails, just keep using the one we have */
763 		if (!cpu->lg->pgdirs[next].pgdir)
764 			next = cpu->cpu_pgd;
765 		else {
766 			/*
767 			 * This is a blank page, so there are no kernel
768 			 * mappings: caller must map the stack!
769 			 */
770 			*blank_pgdir = 1;
771 		}
772 	}
773 	/* Record which Guest toplevel this shadows. */
774 	cpu->lg->pgdirs[next].gpgdir = gpgdir;
775 	/* Release all the non-kernel mappings. */
776 	flush_user_mappings(cpu->lg, next);
777 
778 	/* This hasn't run on any CPU at all. */
779 	cpu->lg->pgdirs[next].last_host_cpu = -1;
780 
781 	return next;
782 }
783 
784 /*H:501
785  * We do need the Switcher code mapped at all times, so we allocate that
786  * part of the Guest page table here.  We map the Switcher code immediately,
787  * but defer mapping of the guest register page and IDT/LDT etc page until
788  * just before we run the guest in map_switcher_in_guest().
789  *
790  * We *could* do this setup in map_switcher_in_guest(), but at that point
791  * we've interrupts disabled, and allocating pages like that is fraught: we
792  * can't sleep if we need to free up some memory.
793  */
allocate_switcher_mapping(struct lg_cpu * cpu)794 static bool allocate_switcher_mapping(struct lg_cpu *cpu)
795 {
796 	int i;
797 
798 	for (i = 0; i < TOTAL_SWITCHER_PAGES; i++) {
799 		pte_t *pte = find_spte(cpu, switcher_addr + i * PAGE_SIZE, true,
800 				       CHECK_GPGD_MASK, _PAGE_TABLE);
801 		if (!pte)
802 			return false;
803 
804 		/*
805 		 * Map the switcher page if not already there.  It might
806 		 * already be there because we call allocate_switcher_mapping()
807 		 * in guest_set_pgd() just in case it did discard our Switcher
808 		 * mapping, but it probably didn't.
809 		 */
810 		if (i == 0 && !(pte_flags(*pte) & _PAGE_PRESENT)) {
811 			/* Get a reference to the Switcher page. */
812 			get_page(lg_switcher_pages[0]);
813 			/* Create a read-only, exectuable, kernel-style PTE */
814 			set_pte(pte,
815 				mk_pte(lg_switcher_pages[0], PAGE_KERNEL_RX));
816 		}
817 	}
818 	cpu->lg->pgdirs[cpu->cpu_pgd].switcher_mapped = true;
819 	return true;
820 }
821 
822 /*H:470
823  * Finally, a routine which throws away everything: all PGD entries in all
824  * the shadow page tables, including the Guest's kernel mappings.  This is used
825  * when we destroy the Guest.
826  */
release_all_pagetables(struct lguest * lg)827 static void release_all_pagetables(struct lguest *lg)
828 {
829 	unsigned int i, j;
830 
831 	/* Every shadow pagetable this Guest has */
832 	for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) {
833 		if (!lg->pgdirs[i].pgdir)
834 			continue;
835 
836 		/* Every PGD entry. */
837 		for (j = 0; j < PTRS_PER_PGD; j++)
838 			release_pgd(lg->pgdirs[i].pgdir + j);
839 		lg->pgdirs[i].switcher_mapped = false;
840 		lg->pgdirs[i].last_host_cpu = -1;
841 	}
842 }
843 
844 /*
845  * We also throw away everything when a Guest tells us it's changed a kernel
846  * mapping.  Since kernel mappings are in every page table, it's easiest to
847  * throw them all away.  This traps the Guest in amber for a while as
848  * everything faults back in, but it's rare.
849  */
guest_pagetable_clear_all(struct lg_cpu * cpu)850 void guest_pagetable_clear_all(struct lg_cpu *cpu)
851 {
852 	release_all_pagetables(cpu->lg);
853 	/* We need the Guest kernel stack mapped again. */
854 	pin_stack_pages(cpu);
855 	/* And we need Switcher allocated. */
856 	if (!allocate_switcher_mapping(cpu))
857 		kill_guest(cpu, "Cannot populate switcher mapping");
858 }
859 
860 /*H:430
861  * (iv) Switching page tables
862  *
863  * Now we've seen all the page table setting and manipulation, let's see
864  * what happens when the Guest changes page tables (ie. changes the top-level
865  * pgdir).  This occurs on almost every context switch.
866  */
guest_new_pagetable(struct lg_cpu * cpu,unsigned long pgtable)867 void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable)
868 {
869 	int newpgdir, repin = 0;
870 
871 	/*
872 	 * The very first time they call this, we're actually running without
873 	 * any page tables; we've been making it up.  Throw them away now.
874 	 */
875 	if (unlikely(cpu->linear_pages)) {
876 		release_all_pagetables(cpu->lg);
877 		cpu->linear_pages = false;
878 		/* Force allocation of a new pgdir. */
879 		newpgdir = ARRAY_SIZE(cpu->lg->pgdirs);
880 	} else {
881 		/* Look to see if we have this one already. */
882 		newpgdir = find_pgdir(cpu->lg, pgtable);
883 	}
884 
885 	/*
886 	 * If not, we allocate or mug an existing one: if it's a fresh one,
887 	 * repin gets set to 1.
888 	 */
889 	if (newpgdir == ARRAY_SIZE(cpu->lg->pgdirs))
890 		newpgdir = new_pgdir(cpu, pgtable, &repin);
891 	/* Change the current pgd index to the new one. */
892 	cpu->cpu_pgd = newpgdir;
893 	/*
894 	 * If it was completely blank, we map in the Guest kernel stack and
895 	 * the Switcher.
896 	 */
897 	if (repin)
898 		pin_stack_pages(cpu);
899 
900 	if (!cpu->lg->pgdirs[cpu->cpu_pgd].switcher_mapped) {
901 		if (!allocate_switcher_mapping(cpu))
902 			kill_guest(cpu, "Cannot populate switcher mapping");
903 	}
904 }
905 /*:*/
906 
907 /*M:009
908  * Since we throw away all mappings when a kernel mapping changes, our
909  * performance sucks for guests using highmem.  In fact, a guest with
910  * PAGE_OFFSET 0xc0000000 (the default) and more than about 700MB of RAM is
911  * usually slower than a Guest with less memory.
912  *
913  * This, of course, cannot be fixed.  It would take some kind of... well, I
914  * don't know, but the term "puissant code-fu" comes to mind.
915 :*/
916 
917 /*H:420
918  * This is the routine which actually sets the page table entry for then
919  * "idx"'th shadow page table.
920  *
921  * Normally, we can just throw out the old entry and replace it with 0: if they
922  * use it demand_page() will put the new entry in.  We need to do this anyway:
923  * The Guest expects _PAGE_ACCESSED to be set on its PTE the first time a page
924  * is read from, and _PAGE_DIRTY when it's written to.
925  *
926  * But Avi Kivity pointed out that most Operating Systems (Linux included) set
927  * these bits on PTEs immediately anyway.  This is done to save the CPU from
928  * having to update them, but it helps us the same way: if they set
929  * _PAGE_ACCESSED then we can put a read-only PTE entry in immediately, and if
930  * they set _PAGE_DIRTY then we can put a writable PTE entry in immediately.
931  */
__guest_set_pte(struct lg_cpu * cpu,int idx,unsigned long vaddr,pte_t gpte)932 static void __guest_set_pte(struct lg_cpu *cpu, int idx,
933 		       unsigned long vaddr, pte_t gpte)
934 {
935 	/* Look up the matching shadow page directory entry. */
936 	pgd_t *spgd = spgd_addr(cpu, idx, vaddr);
937 #ifdef CONFIG_X86_PAE
938 	pmd_t *spmd;
939 #endif
940 
941 	/* If the top level isn't present, there's no entry to update. */
942 	if (pgd_flags(*spgd) & _PAGE_PRESENT) {
943 #ifdef CONFIG_X86_PAE
944 		spmd = spmd_addr(cpu, *spgd, vaddr);
945 		if (pmd_flags(*spmd) & _PAGE_PRESENT) {
946 #endif
947 			/* Otherwise, start by releasing the existing entry. */
948 			pte_t *spte = spte_addr(cpu, *spgd, vaddr);
949 			release_pte(*spte);
950 
951 			/*
952 			 * If they're setting this entry as dirty or accessed,
953 			 * we might as well put that entry they've given us in
954 			 * now.  This shaves 10% off a copy-on-write
955 			 * micro-benchmark.
956 			 */
957 			if ((pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED))
958 			    && !gpte_in_iomem(cpu, gpte)) {
959 				if (!check_gpte(cpu, gpte))
960 					return;
961 				set_pte(spte,
962 					gpte_to_spte(cpu, gpte,
963 						pte_flags(gpte) & _PAGE_DIRTY));
964 			} else {
965 				/*
966 				 * Otherwise kill it and we can demand_page()
967 				 * it in later.
968 				 */
969 				set_pte(spte, __pte(0));
970 			}
971 #ifdef CONFIG_X86_PAE
972 		}
973 #endif
974 	}
975 }
976 
977 /*H:410
978  * Updating a PTE entry is a little trickier.
979  *
980  * We keep track of several different page tables (the Guest uses one for each
981  * process, so it makes sense to cache at least a few).  Each of these have
982  * identical kernel parts: ie. every mapping above PAGE_OFFSET is the same for
983  * all processes.  So when the page table above that address changes, we update
984  * all the page tables, not just the current one.  This is rare.
985  *
986  * The benefit is that when we have to track a new page table, we can keep all
987  * the kernel mappings.  This speeds up context switch immensely.
988  */
guest_set_pte(struct lg_cpu * cpu,unsigned long gpgdir,unsigned long vaddr,pte_t gpte)989 void guest_set_pte(struct lg_cpu *cpu,
990 		   unsigned long gpgdir, unsigned long vaddr, pte_t gpte)
991 {
992 	/* We don't let you remap the Switcher; we need it to get back! */
993 	if (vaddr >= switcher_addr) {
994 		kill_guest(cpu, "attempt to set pte into Switcher pages");
995 		return;
996 	}
997 
998 	/*
999 	 * Kernel mappings must be changed on all top levels.  Slow, but doesn't
1000 	 * happen often.
1001 	 */
1002 	if (vaddr >= cpu->lg->kernel_address) {
1003 		unsigned int i;
1004 		for (i = 0; i < ARRAY_SIZE(cpu->lg->pgdirs); i++)
1005 			if (cpu->lg->pgdirs[i].pgdir)
1006 				__guest_set_pte(cpu, i, vaddr, gpte);
1007 	} else {
1008 		/* Is this page table one we have a shadow for? */
1009 		int pgdir = find_pgdir(cpu->lg, gpgdir);
1010 		if (pgdir != ARRAY_SIZE(cpu->lg->pgdirs))
1011 			/* If so, do the update. */
1012 			__guest_set_pte(cpu, pgdir, vaddr, gpte);
1013 	}
1014 }
1015 
1016 /*H:400
1017  * (iii) Setting up a page table entry when the Guest tells us one has changed.
1018  *
1019  * Just like we did in interrupts_and_traps.c, it makes sense for us to deal
1020  * with the other side of page tables while we're here: what happens when the
1021  * Guest asks for a page table to be updated?
1022  *
1023  * We already saw that demand_page() will fill in the shadow page tables when
1024  * needed, so we can simply remove shadow page table entries whenever the Guest
1025  * tells us they've changed.  When the Guest tries to use the new entry it will
1026  * fault and demand_page() will fix it up.
1027  *
1028  * So with that in mind here's our code to update a (top-level) PGD entry:
1029  */
guest_set_pgd(struct lguest * lg,unsigned long gpgdir,u32 idx)1030 void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 idx)
1031 {
1032 	int pgdir;
1033 
1034 	if (idx > PTRS_PER_PGD) {
1035 		kill_guest(&lg->cpus[0], "Attempt to set pgd %u/%u",
1036 			   idx, PTRS_PER_PGD);
1037 		return;
1038 	}
1039 
1040 	/* If they're talking about a page table we have a shadow for... */
1041 	pgdir = find_pgdir(lg, gpgdir);
1042 	if (pgdir < ARRAY_SIZE(lg->pgdirs)) {
1043 		/* ... throw it away. */
1044 		release_pgd(lg->pgdirs[pgdir].pgdir + idx);
1045 		/* That might have been the Switcher mapping, remap it. */
1046 		if (!allocate_switcher_mapping(&lg->cpus[0])) {
1047 			kill_guest(&lg->cpus[0],
1048 				   "Cannot populate switcher mapping");
1049 		}
1050 		lg->pgdirs[pgdir].last_host_cpu = -1;
1051 	}
1052 }
1053 
1054 #ifdef CONFIG_X86_PAE
1055 /* For setting a mid-level, we just throw everything away.  It's easy. */
guest_set_pmd(struct lguest * lg,unsigned long pmdp,u32 idx)1056 void guest_set_pmd(struct lguest *lg, unsigned long pmdp, u32 idx)
1057 {
1058 	guest_pagetable_clear_all(&lg->cpus[0]);
1059 }
1060 #endif
1061 
1062 /*H:500
1063  * (vii) Setting up the page tables initially.
1064  *
1065  * When a Guest is first created, set initialize a shadow page table which
1066  * we will populate on future faults.  The Guest doesn't have any actual
1067  * pagetables yet, so we set linear_pages to tell demand_page() to fake it
1068  * for the moment.
1069  *
1070  * We do need the Switcher to be mapped at all times, so we allocate that
1071  * part of the Guest page table here.
1072  */
init_guest_pagetable(struct lguest * lg)1073 int init_guest_pagetable(struct lguest *lg)
1074 {
1075 	struct lg_cpu *cpu = &lg->cpus[0];
1076 	int allocated = 0;
1077 
1078 	/* lg (and lg->cpus[]) starts zeroed: this allocates a new pgdir */
1079 	cpu->cpu_pgd = new_pgdir(cpu, 0, &allocated);
1080 	if (!allocated)
1081 		return -ENOMEM;
1082 
1083 	/* We start with a linear mapping until the initialize. */
1084 	cpu->linear_pages = true;
1085 
1086 	/* Allocate the page tables for the Switcher. */
1087 	if (!allocate_switcher_mapping(cpu)) {
1088 		release_all_pagetables(lg);
1089 		return -ENOMEM;
1090 	}
1091 
1092 	return 0;
1093 }
1094 
1095 /*H:508 When the Guest calls LHCALL_LGUEST_INIT we do more setup. */
page_table_guest_data_init(struct lg_cpu * cpu)1096 void page_table_guest_data_init(struct lg_cpu *cpu)
1097 {
1098 	/*
1099 	 * We tell the Guest that it can't use the virtual addresses
1100 	 * used by the Switcher.  This trick is equivalent to 4GB -
1101 	 * switcher_addr.
1102 	 */
1103 	u32 top = ~switcher_addr + 1;
1104 
1105 	/* We get the kernel address: above this is all kernel memory. */
1106 	if (get_user(cpu->lg->kernel_address,
1107 		     &cpu->lg->lguest_data->kernel_address)
1108 		/*
1109 		 * We tell the Guest that it can't use the top virtual
1110 		 * addresses (used by the Switcher).
1111 		 */
1112 	    || put_user(top, &cpu->lg->lguest_data->reserve_mem)) {
1113 		kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data);
1114 		return;
1115 	}
1116 
1117 	/*
1118 	 * In flush_user_mappings() we loop from 0 to
1119 	 * "pgd_index(lg->kernel_address)".  This assumes it won't hit the
1120 	 * Switcher mappings, so check that now.
1121 	 */
1122 	if (cpu->lg->kernel_address >= switcher_addr)
1123 		kill_guest(cpu, "bad kernel address %#lx",
1124 				 cpu->lg->kernel_address);
1125 }
1126 
1127 /* When a Guest dies, our cleanup is fairly simple. */
free_guest_pagetable(struct lguest * lg)1128 void free_guest_pagetable(struct lguest *lg)
1129 {
1130 	unsigned int i;
1131 
1132 	/* Throw away all page table pages. */
1133 	release_all_pagetables(lg);
1134 	/* Now free the top levels: free_page() can handle 0 just fine. */
1135 	for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
1136 		free_page((long)lg->pgdirs[i].pgdir);
1137 }
1138 
1139 /*H:481
1140  * This clears the Switcher mappings for cpu #i.
1141  */
remove_switcher_percpu_map(struct lg_cpu * cpu,unsigned int i)1142 static void remove_switcher_percpu_map(struct lg_cpu *cpu, unsigned int i)
1143 {
1144 	unsigned long base = switcher_addr + PAGE_SIZE + i * PAGE_SIZE*2;
1145 	pte_t *pte;
1146 
1147 	/* Clear the mappings for both pages. */
1148 	pte = find_spte(cpu, base, false, 0, 0);
1149 	release_pte(*pte);
1150 	set_pte(pte, __pte(0));
1151 
1152 	pte = find_spte(cpu, base + PAGE_SIZE, false, 0, 0);
1153 	release_pte(*pte);
1154 	set_pte(pte, __pte(0));
1155 }
1156 
1157 /*H:480
1158  * (vi) Mapping the Switcher when the Guest is about to run.
1159  *
1160  * The Switcher and the two pages for this CPU need to be visible in the Guest
1161  * (and not the pages for other CPUs).
1162  *
1163  * The pages for the pagetables have all been allocated before: we just need
1164  * to make sure the actual PTEs are up-to-date for the CPU we're about to run
1165  * on.
1166  */
map_switcher_in_guest(struct lg_cpu * cpu,struct lguest_pages * pages)1167 void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages)
1168 {
1169 	unsigned long base;
1170 	struct page *percpu_switcher_page, *regs_page;
1171 	pte_t *pte;
1172 	struct pgdir *pgdir = &cpu->lg->pgdirs[cpu->cpu_pgd];
1173 
1174 	/* Switcher page should always be mapped by now! */
1175 	BUG_ON(!pgdir->switcher_mapped);
1176 
1177 	/*
1178 	 * Remember that we have two pages for each Host CPU, so we can run a
1179 	 * Guest on each CPU without them interfering.  We need to make sure
1180 	 * those pages are mapped correctly in the Guest, but since we usually
1181 	 * run on the same CPU, we cache that, and only update the mappings
1182 	 * when we move.
1183 	 */
1184 	if (pgdir->last_host_cpu == raw_smp_processor_id())
1185 		return;
1186 
1187 	/* -1 means unknown so we remove everything. */
1188 	if (pgdir->last_host_cpu == -1) {
1189 		unsigned int i;
1190 		for_each_possible_cpu(i)
1191 			remove_switcher_percpu_map(cpu, i);
1192 	} else {
1193 		/* We know exactly what CPU mapping to remove. */
1194 		remove_switcher_percpu_map(cpu, pgdir->last_host_cpu);
1195 	}
1196 
1197 	/*
1198 	 * When we're running the Guest, we want the Guest's "regs" page to
1199 	 * appear where the first Switcher page for this CPU is.  This is an
1200 	 * optimization: when the Switcher saves the Guest registers, it saves
1201 	 * them into the first page of this CPU's "struct lguest_pages": if we
1202 	 * make sure the Guest's register page is already mapped there, we
1203 	 * don't have to copy them out again.
1204 	 */
1205 	/* Find the shadow PTE for this regs page. */
1206 	base = switcher_addr + PAGE_SIZE
1207 		+ raw_smp_processor_id() * sizeof(struct lguest_pages);
1208 	pte = find_spte(cpu, base, false, 0, 0);
1209 	regs_page = pfn_to_page(__pa(cpu->regs_page) >> PAGE_SHIFT);
1210 	get_page(regs_page);
1211 	set_pte(pte, mk_pte(regs_page, __pgprot(__PAGE_KERNEL & ~_PAGE_GLOBAL)));
1212 
1213 	/*
1214 	 * We map the second page of the struct lguest_pages read-only in
1215 	 * the Guest: the IDT, GDT and other things it's not supposed to
1216 	 * change.
1217 	 */
1218 	pte = find_spte(cpu, base + PAGE_SIZE, false, 0, 0);
1219 	percpu_switcher_page
1220 		= lg_switcher_pages[1 + raw_smp_processor_id()*2 + 1];
1221 	get_page(percpu_switcher_page);
1222 	set_pte(pte, mk_pte(percpu_switcher_page,
1223 			    __pgprot(__PAGE_KERNEL_RO & ~_PAGE_GLOBAL)));
1224 
1225 	pgdir->last_host_cpu = raw_smp_processor_id();
1226 }
1227 
1228 /*H:490
1229  * We've made it through the page table code.  Perhaps our tired brains are
1230  * still processing the details, or perhaps we're simply glad it's over.
1231  *
1232  * If nothing else, note that all this complexity in juggling shadow page tables
1233  * in sync with the Guest's page tables is for one reason: for most Guests this
1234  * page table dance determines how bad performance will be.  This is why Xen
1235  * uses exotic direct Guest pagetable manipulation, and why both Intel and AMD
1236  * have implemented shadow page table support directly into hardware.
1237  *
1238  * There is just one file remaining in the Host.
1239  */
1240