• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*P:700 The pagetable code, on the other hand, still shows the scars of
2  * previous encounters.  It's functional, and as neat as it can be in the
3  * circumstances, but be wary, for these things are subtle and break easily.
4  * The Guest provides a virtual to physical mapping, but we can neither trust
5  * it nor use it: we verify and convert it here then point the CPU to the
6  * converted Guest pages when running the Guest. :*/
7 
8 /* Copyright (C) Rusty Russell IBM Corporation 2006.
9  * GPL v2 and any later version */
10 #include <linux/mm.h>
11 #include <linux/types.h>
12 #include <linux/spinlock.h>
13 #include <linux/random.h>
14 #include <linux/percpu.h>
15 #include <asm/tlbflush.h>
16 #include <asm/uaccess.h>
17 #include <asm/bootparam.h>
18 #include "lg.h"
19 
20 /*M:008 We hold reference to pages, which prevents them from being swapped.
21  * It'd be nice to have a callback in the "struct mm_struct" when Linux wants
22  * to swap out.  If we had this, and a shrinker callback to trim PTE pages, we
23  * could probably consider launching Guests as non-root. :*/
24 
25 /*H:300
26  * The Page Table Code
27  *
28  * We use two-level page tables for the Guest.  If you're not entirely
29  * comfortable with virtual addresses, physical addresses and page tables then
30  * I recommend you review arch/x86/lguest/boot.c's "Page Table Handling" (with
31  * diagrams!).
32  *
33  * The Guest keeps page tables, but we maintain the actual ones here: these are
34  * called "shadow" page tables.  Which is a very Guest-centric name: these are
35  * the real page tables the CPU uses, although we keep them up to date to
36  * reflect the Guest's.  (See what I mean about weird naming?  Since when do
37  * shadows reflect anything?)
38  *
39  * Anyway, this is the most complicated part of the Host code.  There are seven
40  * parts to this:
41  *  (i) Looking up a page table entry when the Guest faults,
42  *  (ii) Making sure the Guest stack is mapped,
43  *  (iii) Setting up a page table entry when the Guest tells us one has changed,
44  *  (iv) Switching page tables,
45  *  (v) Flushing (throwing away) page tables,
46  *  (vi) Mapping the Switcher when the Guest is about to run,
47  *  (vii) Setting up the page tables initially.
48  :*/
49 
50 
51 /* 1024 entries in a page table page maps 1024 pages: 4MB.  The Switcher is
52  * conveniently placed at the top 4MB, so it uses a separate, complete PTE
53  * page.  */
54 #define SWITCHER_PGD_INDEX (PTRS_PER_PGD - 1)
55 
56 /* We actually need a separate PTE page for each CPU.  Remember that after the
57  * Switcher code itself comes two pages for each CPU, and we don't want this
58  * CPU's guest to see the pages of any other CPU. */
59 static DEFINE_PER_CPU(pte_t *, switcher_pte_pages);
60 #define switcher_pte_page(cpu) per_cpu(switcher_pte_pages, cpu)
61 
62 /*H:320 The page table code is curly enough to need helper functions to keep it
63  * clear and clean.
64  *
65  * There are two functions which return pointers to the shadow (aka "real")
66  * page tables.
67  *
68  * spgd_addr() takes the virtual address and returns a pointer to the top-level
69  * page directory entry (PGD) for that address.  Since we keep track of several
70  * page tables, the "i" argument tells us which one we're interested in (it's
71  * usually the current one). */
spgd_addr(struct lg_cpu * cpu,u32 i,unsigned long vaddr)72 static pgd_t *spgd_addr(struct lg_cpu *cpu, u32 i, unsigned long vaddr)
73 {
74 	unsigned int index = pgd_index(vaddr);
75 
76 	/* We kill any Guest trying to touch the Switcher addresses. */
77 	if (index >= SWITCHER_PGD_INDEX) {
78 		kill_guest(cpu, "attempt to access switcher pages");
79 		index = 0;
80 	}
81 	/* Return a pointer index'th pgd entry for the i'th page table. */
82 	return &cpu->lg->pgdirs[i].pgdir[index];
83 }
84 
85 /* This routine then takes the page directory entry returned above, which
86  * contains the address of the page table entry (PTE) page.  It then returns a
87  * pointer to the PTE entry for the given address. */
spte_addr(pgd_t spgd,unsigned long vaddr)88 static pte_t *spte_addr(pgd_t spgd, unsigned long vaddr)
89 {
90 	pte_t *page = __va(pgd_pfn(spgd) << PAGE_SHIFT);
91 	/* You should never call this if the PGD entry wasn't valid */
92 	BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT));
93 	return &page[(vaddr >> PAGE_SHIFT) % PTRS_PER_PTE];
94 }
95 
96 /* These two functions just like the above two, except they access the Guest
97  * page tables.  Hence they return a Guest address. */
gpgd_addr(struct lg_cpu * cpu,unsigned long vaddr)98 static unsigned long gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr)
99 {
100 	unsigned int index = vaddr >> (PGDIR_SHIFT);
101 	return cpu->lg->pgdirs[cpu->cpu_pgd].gpgdir + index * sizeof(pgd_t);
102 }
103 
gpte_addr(pgd_t gpgd,unsigned long vaddr)104 static unsigned long gpte_addr(pgd_t gpgd, unsigned long vaddr)
105 {
106 	unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT;
107 	BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT));
108 	return gpage + ((vaddr>>PAGE_SHIFT) % PTRS_PER_PTE) * sizeof(pte_t);
109 }
110 /*:*/
111 
112 /*M:014 get_pfn is slow: we could probably try to grab batches of pages here as
113  * an optimization (ie. pre-faulting). :*/
114 
115 /*H:350 This routine takes a page number given by the Guest and converts it to
116  * an actual, physical page number.  It can fail for several reasons: the
117  * virtual address might not be mapped by the Launcher, the write flag is set
118  * and the page is read-only, or the write flag was set and the page was
119  * shared so had to be copied, but we ran out of memory.
120  *
121  * This holds a reference to the page, so release_pte() is careful to put that
122  * back. */
get_pfn(unsigned long virtpfn,int write)123 static unsigned long get_pfn(unsigned long virtpfn, int write)
124 {
125 	struct page *page;
126 
127 	/* gup me one page at this address please! */
128 	if (get_user_pages_fast(virtpfn << PAGE_SHIFT, 1, write, &page) == 1)
129 		return page_to_pfn(page);
130 
131 	/* This value indicates failure. */
132 	return -1UL;
133 }
134 
135 /*H:340 Converting a Guest page table entry to a shadow (ie. real) page table
136  * entry can be a little tricky.  The flags are (almost) the same, but the
137  * Guest PTE contains a virtual page number: the CPU needs the real page
138  * number. */
gpte_to_spte(struct lg_cpu * cpu,pte_t gpte,int write)139 static pte_t gpte_to_spte(struct lg_cpu *cpu, pte_t gpte, int write)
140 {
141 	unsigned long pfn, base, flags;
142 
143 	/* The Guest sets the global flag, because it thinks that it is using
144 	 * PGE.  We only told it to use PGE so it would tell us whether it was
145 	 * flushing a kernel mapping or a userspace mapping.  We don't actually
146 	 * use the global bit, so throw it away. */
147 	flags = (pte_flags(gpte) & ~_PAGE_GLOBAL);
148 
149 	/* The Guest's pages are offset inside the Launcher. */
150 	base = (unsigned long)cpu->lg->mem_base / PAGE_SIZE;
151 
152 	/* We need a temporary "unsigned long" variable to hold the answer from
153 	 * get_pfn(), because it returns 0xFFFFFFFF on failure, which wouldn't
154 	 * fit in spte.pfn.  get_pfn() finds the real physical number of the
155 	 * page, given the virtual number. */
156 	pfn = get_pfn(base + pte_pfn(gpte), write);
157 	if (pfn == -1UL) {
158 		kill_guest(cpu, "failed to get page %lu", pte_pfn(gpte));
159 		/* When we destroy the Guest, we'll go through the shadow page
160 		 * tables and release_pte() them.  Make sure we don't think
161 		 * this one is valid! */
162 		flags = 0;
163 	}
164 	/* Now we assemble our shadow PTE from the page number and flags. */
165 	return pfn_pte(pfn, __pgprot(flags));
166 }
167 
168 /*H:460 And to complete the chain, release_pte() looks like this: */
release_pte(pte_t pte)169 static void release_pte(pte_t pte)
170 {
171 	/* Remember that get_user_pages_fast() took a reference to the page, in
172 	 * get_pfn()?  We have to put it back now. */
173 	if (pte_flags(pte) & _PAGE_PRESENT)
174 		put_page(pfn_to_page(pte_pfn(pte)));
175 }
176 /*:*/
177 
check_gpte(struct lg_cpu * cpu,pte_t gpte)178 static void check_gpte(struct lg_cpu *cpu, pte_t gpte)
179 {
180 	if ((pte_flags(gpte) & _PAGE_PSE) ||
181 	    pte_pfn(gpte) >= cpu->lg->pfn_limit)
182 		kill_guest(cpu, "bad page table entry");
183 }
184 
check_gpgd(struct lg_cpu * cpu,pgd_t gpgd)185 static void check_gpgd(struct lg_cpu *cpu, pgd_t gpgd)
186 {
187 	if ((pgd_flags(gpgd) & ~_PAGE_TABLE) ||
188 	   (pgd_pfn(gpgd) >= cpu->lg->pfn_limit))
189 		kill_guest(cpu, "bad page directory entry");
190 }
191 
192 /*H:330
193  * (i) Looking up a page table entry when the Guest faults.
194  *
195  * We saw this call in run_guest(): when we see a page fault in the Guest, we
196  * come here.  That's because we only set up the shadow page tables lazily as
197  * they're needed, so we get page faults all the time and quietly fix them up
198  * and return to the Guest without it knowing.
199  *
200  * If we fixed up the fault (ie. we mapped the address), this routine returns
201  * true.  Otherwise, it was a real fault and we need to tell the Guest. */
demand_page(struct lg_cpu * cpu,unsigned long vaddr,int errcode)202 int demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
203 {
204 	pgd_t gpgd;
205 	pgd_t *spgd;
206 	unsigned long gpte_ptr;
207 	pte_t gpte;
208 	pte_t *spte;
209 
210 	/* First step: get the top-level Guest page table entry. */
211 	gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
212 	/* Toplevel not present?  We can't map it in. */
213 	if (!(pgd_flags(gpgd) & _PAGE_PRESENT))
214 		return 0;
215 
216 	/* Now look at the matching shadow entry. */
217 	spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr);
218 	if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) {
219 		/* No shadow entry: allocate a new shadow PTE page. */
220 		unsigned long ptepage = get_zeroed_page(GFP_KERNEL);
221 		/* This is not really the Guest's fault, but killing it is
222 		 * simple for this corner case. */
223 		if (!ptepage) {
224 			kill_guest(cpu, "out of memory allocating pte page");
225 			return 0;
226 		}
227 		/* We check that the Guest pgd is OK. */
228 		check_gpgd(cpu, gpgd);
229 		/* And we copy the flags to the shadow PGD entry.  The page
230 		 * number in the shadow PGD is the page we just allocated. */
231 		*spgd = __pgd(__pa(ptepage) | pgd_flags(gpgd));
232 	}
233 
234 	/* OK, now we look at the lower level in the Guest page table: keep its
235 	 * address, because we might update it later. */
236 	gpte_ptr = gpte_addr(gpgd, vaddr);
237 	gpte = lgread(cpu, gpte_ptr, pte_t);
238 
239 	/* If this page isn't in the Guest page tables, we can't page it in. */
240 	if (!(pte_flags(gpte) & _PAGE_PRESENT))
241 		return 0;
242 
243 	/* Check they're not trying to write to a page the Guest wants
244 	 * read-only (bit 2 of errcode == write). */
245 	if ((errcode & 2) && !(pte_flags(gpte) & _PAGE_RW))
246 		return 0;
247 
248 	/* User access to a kernel-only page? (bit 3 == user access) */
249 	if ((errcode & 4) && !(pte_flags(gpte) & _PAGE_USER))
250 		return 0;
251 
252 	/* Check that the Guest PTE flags are OK, and the page number is below
253 	 * the pfn_limit (ie. not mapping the Launcher binary). */
254 	check_gpte(cpu, gpte);
255 
256 	/* Add the _PAGE_ACCESSED and (for a write) _PAGE_DIRTY flag */
257 	gpte = pte_mkyoung(gpte);
258 	if (errcode & 2)
259 		gpte = pte_mkdirty(gpte);
260 
261 	/* Get the pointer to the shadow PTE entry we're going to set. */
262 	spte = spte_addr(*spgd, vaddr);
263 	/* If there was a valid shadow PTE entry here before, we release it.
264 	 * This can happen with a write to a previously read-only entry. */
265 	release_pte(*spte);
266 
267 	/* If this is a write, we insist that the Guest page is writable (the
268 	 * final arg to gpte_to_spte()). */
269 	if (pte_dirty(gpte))
270 		*spte = gpte_to_spte(cpu, gpte, 1);
271 	else
272 		/* If this is a read, don't set the "writable" bit in the page
273 		 * table entry, even if the Guest says it's writable.  That way
274 		 * we will come back here when a write does actually occur, so
275 		 * we can update the Guest's _PAGE_DIRTY flag. */
276 		*spte = gpte_to_spte(cpu, pte_wrprotect(gpte), 0);
277 
278 	/* Finally, we write the Guest PTE entry back: we've set the
279 	 * _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags. */
280 	lgwrite(cpu, gpte_ptr, pte_t, gpte);
281 
282 	/* The fault is fixed, the page table is populated, the mapping
283 	 * manipulated, the result returned and the code complete.  A small
284 	 * delay and a trace of alliteration are the only indications the Guest
285 	 * has that a page fault occurred at all. */
286 	return 1;
287 }
288 
289 /*H:360
290  * (ii) Making sure the Guest stack is mapped.
291  *
292  * Remember that direct traps into the Guest need a mapped Guest kernel stack.
293  * pin_stack_pages() calls us here: we could simply call demand_page(), but as
294  * we've seen that logic is quite long, and usually the stack pages are already
295  * mapped, so it's overkill.
296  *
297  * This is a quick version which answers the question: is this virtual address
298  * mapped by the shadow page tables, and is it writable? */
page_writable(struct lg_cpu * cpu,unsigned long vaddr)299 static int page_writable(struct lg_cpu *cpu, unsigned long vaddr)
300 {
301 	pgd_t *spgd;
302 	unsigned long flags;
303 
304 	/* Look at the current top level entry: is it present? */
305 	spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr);
306 	if (!(pgd_flags(*spgd) & _PAGE_PRESENT))
307 		return 0;
308 
309 	/* Check the flags on the pte entry itself: it must be present and
310 	 * writable. */
311 	flags = pte_flags(*(spte_addr(*spgd, vaddr)));
312 
313 	return (flags & (_PAGE_PRESENT|_PAGE_RW)) == (_PAGE_PRESENT|_PAGE_RW);
314 }
315 
316 /* So, when pin_stack_pages() asks us to pin a page, we check if it's already
317  * in the page tables, and if not, we call demand_page() with error code 2
318  * (meaning "write"). */
pin_page(struct lg_cpu * cpu,unsigned long vaddr)319 void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
320 {
321 	if (!page_writable(cpu, vaddr) && !demand_page(cpu, vaddr, 2))
322 		kill_guest(cpu, "bad stack page %#lx", vaddr);
323 }
324 
325 /*H:450 If we chase down the release_pgd() code, it looks like this: */
release_pgd(struct lguest * lg,pgd_t * spgd)326 static void release_pgd(struct lguest *lg, pgd_t *spgd)
327 {
328 	/* If the entry's not present, there's nothing to release. */
329 	if (pgd_flags(*spgd) & _PAGE_PRESENT) {
330 		unsigned int i;
331 		/* Converting the pfn to find the actual PTE page is easy: turn
332 		 * the page number into a physical address, then convert to a
333 		 * virtual address (easy for kernel pages like this one). */
334 		pte_t *ptepage = __va(pgd_pfn(*spgd) << PAGE_SHIFT);
335 		/* For each entry in the page, we might need to release it. */
336 		for (i = 0; i < PTRS_PER_PTE; i++)
337 			release_pte(ptepage[i]);
338 		/* Now we can free the page of PTEs */
339 		free_page((long)ptepage);
340 		/* And zero out the PGD entry so we never release it twice. */
341 		*spgd = __pgd(0);
342 	}
343 }
344 
345 /*H:445 We saw flush_user_mappings() twice: once from the flush_user_mappings()
346  * hypercall and once in new_pgdir() when we re-used a top-level pgdir page.
347  * It simply releases every PTE page from 0 up to the Guest's kernel address. */
flush_user_mappings(struct lguest * lg,int idx)348 static void flush_user_mappings(struct lguest *lg, int idx)
349 {
350 	unsigned int i;
351 	/* Release every pgd entry up to the kernel's address. */
352 	for (i = 0; i < pgd_index(lg->kernel_address); i++)
353 		release_pgd(lg, lg->pgdirs[idx].pgdir + i);
354 }
355 
356 /*H:440 (v) Flushing (throwing away) page tables,
357  *
358  * The Guest has a hypercall to throw away the page tables: it's used when a
359  * large number of mappings have been changed. */
guest_pagetable_flush_user(struct lg_cpu * cpu)360 void guest_pagetable_flush_user(struct lg_cpu *cpu)
361 {
362 	/* Drop the userspace part of the current page table. */
363 	flush_user_mappings(cpu->lg, cpu->cpu_pgd);
364 }
365 /*:*/
366 
367 /* We walk down the guest page tables to get a guest-physical address */
guest_pa(struct lg_cpu * cpu,unsigned long vaddr)368 unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr)
369 {
370 	pgd_t gpgd;
371 	pte_t gpte;
372 
373 	/* First step: get the top-level Guest page table entry. */
374 	gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
375 	/* Toplevel not present?  We can't map it in. */
376 	if (!(pgd_flags(gpgd) & _PAGE_PRESENT))
377 		kill_guest(cpu, "Bad address %#lx", vaddr);
378 
379 	gpte = lgread(cpu, gpte_addr(gpgd, vaddr), pte_t);
380 	if (!(pte_flags(gpte) & _PAGE_PRESENT))
381 		kill_guest(cpu, "Bad address %#lx", vaddr);
382 
383 	return pte_pfn(gpte) * PAGE_SIZE | (vaddr & ~PAGE_MASK);
384 }
385 
386 /* We keep several page tables.  This is a simple routine to find the page
387  * table (if any) corresponding to this top-level address the Guest has given
388  * us. */
find_pgdir(struct lguest * lg,unsigned long pgtable)389 static unsigned int find_pgdir(struct lguest *lg, unsigned long pgtable)
390 {
391 	unsigned int i;
392 	for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
393 		if (lg->pgdirs[i].pgdir && lg->pgdirs[i].gpgdir == pgtable)
394 			break;
395 	return i;
396 }
397 
398 /*H:435 And this is us, creating the new page directory.  If we really do
399  * allocate a new one (and so the kernel parts are not there), we set
400  * blank_pgdir. */
new_pgdir(struct lg_cpu * cpu,unsigned long gpgdir,int * blank_pgdir)401 static unsigned int new_pgdir(struct lg_cpu *cpu,
402 			      unsigned long gpgdir,
403 			      int *blank_pgdir)
404 {
405 	unsigned int next;
406 
407 	/* We pick one entry at random to throw out.  Choosing the Least
408 	 * Recently Used might be better, but this is easy. */
409 	next = random32() % ARRAY_SIZE(cpu->lg->pgdirs);
410 	/* If it's never been allocated at all before, try now. */
411 	if (!cpu->lg->pgdirs[next].pgdir) {
412 		cpu->lg->pgdirs[next].pgdir =
413 					(pgd_t *)get_zeroed_page(GFP_KERNEL);
414 		/* If the allocation fails, just keep using the one we have */
415 		if (!cpu->lg->pgdirs[next].pgdir)
416 			next = cpu->cpu_pgd;
417 		else
418 			/* This is a blank page, so there are no kernel
419 			 * mappings: caller must map the stack! */
420 			*blank_pgdir = 1;
421 	}
422 	/* Record which Guest toplevel this shadows. */
423 	cpu->lg->pgdirs[next].gpgdir = gpgdir;
424 	/* Release all the non-kernel mappings. */
425 	flush_user_mappings(cpu->lg, next);
426 
427 	return next;
428 }
429 
430 /*H:430 (iv) Switching page tables
431  *
432  * Now we've seen all the page table setting and manipulation, let's see what
433  * what happens when the Guest changes page tables (ie. changes the top-level
434  * pgdir).  This occurs on almost every context switch. */
guest_new_pagetable(struct lg_cpu * cpu,unsigned long pgtable)435 void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable)
436 {
437 	int newpgdir, repin = 0;
438 
439 	/* Look to see if we have this one already. */
440 	newpgdir = find_pgdir(cpu->lg, pgtable);
441 	/* If not, we allocate or mug an existing one: if it's a fresh one,
442 	 * repin gets set to 1. */
443 	if (newpgdir == ARRAY_SIZE(cpu->lg->pgdirs))
444 		newpgdir = new_pgdir(cpu, pgtable, &repin);
445 	/* Change the current pgd index to the new one. */
446 	cpu->cpu_pgd = newpgdir;
447 	/* If it was completely blank, we map in the Guest kernel stack */
448 	if (repin)
449 		pin_stack_pages(cpu);
450 }
451 
452 /*H:470 Finally, a routine which throws away everything: all PGD entries in all
453  * the shadow page tables, including the Guest's kernel mappings.  This is used
454  * when we destroy the Guest. */
release_all_pagetables(struct lguest * lg)455 static void release_all_pagetables(struct lguest *lg)
456 {
457 	unsigned int i, j;
458 
459 	/* Every shadow pagetable this Guest has */
460 	for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
461 		if (lg->pgdirs[i].pgdir)
462 			/* Every PGD entry except the Switcher at the top */
463 			for (j = 0; j < SWITCHER_PGD_INDEX; j++)
464 				release_pgd(lg, lg->pgdirs[i].pgdir + j);
465 }
466 
467 /* We also throw away everything when a Guest tells us it's changed a kernel
468  * mapping.  Since kernel mappings are in every page table, it's easiest to
469  * throw them all away.  This traps the Guest in amber for a while as
470  * everything faults back in, but it's rare. */
guest_pagetable_clear_all(struct lg_cpu * cpu)471 void guest_pagetable_clear_all(struct lg_cpu *cpu)
472 {
473 	release_all_pagetables(cpu->lg);
474 	/* We need the Guest kernel stack mapped again. */
475 	pin_stack_pages(cpu);
476 }
477 /*:*/
478 /*M:009 Since we throw away all mappings when a kernel mapping changes, our
479  * performance sucks for guests using highmem.  In fact, a guest with
480  * PAGE_OFFSET 0xc0000000 (the default) and more than about 700MB of RAM is
481  * usually slower than a Guest with less memory.
482  *
483  * This, of course, cannot be fixed.  It would take some kind of... well, I
484  * don't know, but the term "puissant code-fu" comes to mind. :*/
485 
486 /*H:420 This is the routine which actually sets the page table entry for then
487  * "idx"'th shadow page table.
488  *
489  * Normally, we can just throw out the old entry and replace it with 0: if they
490  * use it demand_page() will put the new entry in.  We need to do this anyway:
491  * The Guest expects _PAGE_ACCESSED to be set on its PTE the first time a page
492  * is read from, and _PAGE_DIRTY when it's written to.
493  *
494  * But Avi Kivity pointed out that most Operating Systems (Linux included) set
495  * these bits on PTEs immediately anyway.  This is done to save the CPU from
496  * having to update them, but it helps us the same way: if they set
497  * _PAGE_ACCESSED then we can put a read-only PTE entry in immediately, and if
498  * they set _PAGE_DIRTY then we can put a writable PTE entry in immediately.
499  */
do_set_pte(struct lg_cpu * cpu,int idx,unsigned long vaddr,pte_t gpte)500 static void do_set_pte(struct lg_cpu *cpu, int idx,
501 		       unsigned long vaddr, pte_t gpte)
502 {
503 	/* Look up the matching shadow page directory entry. */
504 	pgd_t *spgd = spgd_addr(cpu, idx, vaddr);
505 
506 	/* If the top level isn't present, there's no entry to update. */
507 	if (pgd_flags(*spgd) & _PAGE_PRESENT) {
508 		/* Otherwise, we start by releasing the existing entry. */
509 		pte_t *spte = spte_addr(*spgd, vaddr);
510 		release_pte(*spte);
511 
512 		/* If they're setting this entry as dirty or accessed, we might
513 		 * as well put that entry they've given us in now.  This shaves
514 		 * 10% off a copy-on-write micro-benchmark. */
515 		if (pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) {
516 			check_gpte(cpu, gpte);
517 			*spte = gpte_to_spte(cpu, gpte,
518 					     pte_flags(gpte) & _PAGE_DIRTY);
519 		} else
520 			/* Otherwise kill it and we can demand_page() it in
521 			 * later. */
522 			*spte = __pte(0);
523 	}
524 }
525 
526 /*H:410 Updating a PTE entry is a little trickier.
527  *
528  * We keep track of several different page tables (the Guest uses one for each
529  * process, so it makes sense to cache at least a few).  Each of these have
530  * identical kernel parts: ie. every mapping above PAGE_OFFSET is the same for
531  * all processes.  So when the page table above that address changes, we update
532  * all the page tables, not just the current one.  This is rare.
533  *
534  * The benefit is that when we have to track a new page table, we can keep all
535  * the kernel mappings.  This speeds up context switch immensely. */
guest_set_pte(struct lg_cpu * cpu,unsigned long gpgdir,unsigned long vaddr,pte_t gpte)536 void guest_set_pte(struct lg_cpu *cpu,
537 		   unsigned long gpgdir, unsigned long vaddr, pte_t gpte)
538 {
539 	/* Kernel mappings must be changed on all top levels.  Slow, but doesn't
540 	 * happen often. */
541 	if (vaddr >= cpu->lg->kernel_address) {
542 		unsigned int i;
543 		for (i = 0; i < ARRAY_SIZE(cpu->lg->pgdirs); i++)
544 			if (cpu->lg->pgdirs[i].pgdir)
545 				do_set_pte(cpu, i, vaddr, gpte);
546 	} else {
547 		/* Is this page table one we have a shadow for? */
548 		int pgdir = find_pgdir(cpu->lg, gpgdir);
549 		if (pgdir != ARRAY_SIZE(cpu->lg->pgdirs))
550 			/* If so, do the update. */
551 			do_set_pte(cpu, pgdir, vaddr, gpte);
552 	}
553 }
554 
555 /*H:400
556  * (iii) Setting up a page table entry when the Guest tells us one has changed.
557  *
558  * Just like we did in interrupts_and_traps.c, it makes sense for us to deal
559  * with the other side of page tables while we're here: what happens when the
560  * Guest asks for a page table to be updated?
561  *
562  * We already saw that demand_page() will fill in the shadow page tables when
563  * needed, so we can simply remove shadow page table entries whenever the Guest
564  * tells us they've changed.  When the Guest tries to use the new entry it will
565  * fault and demand_page() will fix it up.
566  *
567  * So with that in mind here's our code to to update a (top-level) PGD entry:
568  */
guest_set_pmd(struct lguest * lg,unsigned long gpgdir,u32 idx)569 void guest_set_pmd(struct lguest *lg, unsigned long gpgdir, u32 idx)
570 {
571 	int pgdir;
572 
573 	/* The kernel seems to try to initialize this early on: we ignore its
574 	 * attempts to map over the Switcher. */
575 	if (idx >= SWITCHER_PGD_INDEX)
576 		return;
577 
578 	/* If they're talking about a page table we have a shadow for... */
579 	pgdir = find_pgdir(lg, gpgdir);
580 	if (pgdir < ARRAY_SIZE(lg->pgdirs))
581 		/* ... throw it away. */
582 		release_pgd(lg, lg->pgdirs[pgdir].pgdir + idx);
583 }
584 
585 /* Once we know how much memory we have we can construct simple identity
586  * (which set virtual == physical) and linear mappings
587  * which will get the Guest far enough into the boot to create its own.
588  *
589  * We lay them out of the way, just below the initrd (which is why we need to
590  * know its size here). */
setup_pagetables(struct lguest * lg,unsigned long mem,unsigned long initrd_size)591 static unsigned long setup_pagetables(struct lguest *lg,
592 				      unsigned long mem,
593 				      unsigned long initrd_size)
594 {
595 	pgd_t __user *pgdir;
596 	pte_t __user *linear;
597 	unsigned int mapped_pages, i, linear_pages, phys_linear;
598 	unsigned long mem_base = (unsigned long)lg->mem_base;
599 
600 	/* We have mapped_pages frames to map, so we need
601 	 * linear_pages page tables to map them. */
602 	mapped_pages = mem / PAGE_SIZE;
603 	linear_pages = (mapped_pages + PTRS_PER_PTE - 1) / PTRS_PER_PTE;
604 
605 	/* We put the toplevel page directory page at the top of memory. */
606 	pgdir = (pgd_t *)(mem + mem_base - initrd_size - PAGE_SIZE);
607 
608 	/* Now we use the next linear_pages pages as pte pages */
609 	linear = (void *)pgdir - linear_pages * PAGE_SIZE;
610 
611 	/* Linear mapping is easy: put every page's address into the
612 	 * mapping in order. */
613 	for (i = 0; i < mapped_pages; i++) {
614 		pte_t pte;
615 		pte = pfn_pte(i, __pgprot(_PAGE_PRESENT|_PAGE_RW|_PAGE_USER));
616 		if (copy_to_user(&linear[i], &pte, sizeof(pte)) != 0)
617 			return -EFAULT;
618 	}
619 
620 	/* The top level points to the linear page table pages above.
621 	 * We setup the identity and linear mappings here. */
622 	phys_linear = (unsigned long)linear - mem_base;
623 	for (i = 0; i < mapped_pages; i += PTRS_PER_PTE) {
624 		pgd_t pgd;
625 		pgd = __pgd((phys_linear + i * sizeof(pte_t)) |
626 			    (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER));
627 
628 		if (copy_to_user(&pgdir[i / PTRS_PER_PTE], &pgd, sizeof(pgd))
629 		    || copy_to_user(&pgdir[pgd_index(PAGE_OFFSET)
630 					   + i / PTRS_PER_PTE],
631 				    &pgd, sizeof(pgd)))
632 			return -EFAULT;
633 	}
634 
635 	/* We return the top level (guest-physical) address: remember where
636 	 * this is. */
637 	return (unsigned long)pgdir - mem_base;
638 }
639 
640 /*H:500 (vii) Setting up the page tables initially.
641  *
642  * When a Guest is first created, the Launcher tells us where the toplevel of
643  * its first page table is.  We set some things up here: */
init_guest_pagetable(struct lguest * lg)644 int init_guest_pagetable(struct lguest *lg)
645 {
646 	u64 mem;
647 	u32 initrd_size;
648 	struct boot_params __user *boot = (struct boot_params *)lg->mem_base;
649 
650 	/* Get the Guest memory size and the ramdisk size from the boot header
651 	 * located at lg->mem_base (Guest address 0). */
652 	if (copy_from_user(&mem, &boot->e820_map[0].size, sizeof(mem))
653 	    || get_user(initrd_size, &boot->hdr.ramdisk_size))
654 		return -EFAULT;
655 
656 	/* We start on the first shadow page table, and give it a blank PGD
657 	 * page. */
658 	lg->pgdirs[0].gpgdir = setup_pagetables(lg, mem, initrd_size);
659 	if (IS_ERR_VALUE(lg->pgdirs[0].gpgdir))
660 		return lg->pgdirs[0].gpgdir;
661 	lg->pgdirs[0].pgdir = (pgd_t *)get_zeroed_page(GFP_KERNEL);
662 	if (!lg->pgdirs[0].pgdir)
663 		return -ENOMEM;
664 	lg->cpus[0].cpu_pgd = 0;
665 	return 0;
666 }
667 
668 /* When the Guest calls LHCALL_LGUEST_INIT we do more setup. */
page_table_guest_data_init(struct lg_cpu * cpu)669 void page_table_guest_data_init(struct lg_cpu *cpu)
670 {
671 	/* We get the kernel address: above this is all kernel memory. */
672 	if (get_user(cpu->lg->kernel_address,
673 		     &cpu->lg->lguest_data->kernel_address)
674 	    /* We tell the Guest that it can't use the top 4MB of virtual
675 	     * addresses used by the Switcher. */
676 	    || put_user(4U*1024*1024, &cpu->lg->lguest_data->reserve_mem)
677 	    || put_user(cpu->lg->pgdirs[0].gpgdir, &cpu->lg->lguest_data->pgdir))
678 		kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data);
679 
680 	/* In flush_user_mappings() we loop from 0 to
681 	 * "pgd_index(lg->kernel_address)".  This assumes it won't hit the
682 	 * Switcher mappings, so check that now. */
683 	if (pgd_index(cpu->lg->kernel_address) >= SWITCHER_PGD_INDEX)
684 		kill_guest(cpu, "bad kernel address %#lx",
685 				 cpu->lg->kernel_address);
686 }
687 
688 /* When a Guest dies, our cleanup is fairly simple. */
free_guest_pagetable(struct lguest * lg)689 void free_guest_pagetable(struct lguest *lg)
690 {
691 	unsigned int i;
692 
693 	/* Throw away all page table pages. */
694 	release_all_pagetables(lg);
695 	/* Now free the top levels: free_page() can handle 0 just fine. */
696 	for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
697 		free_page((long)lg->pgdirs[i].pgdir);
698 }
699 
700 /*H:480 (vi) Mapping the Switcher when the Guest is about to run.
701  *
702  * The Switcher and the two pages for this CPU need to be visible in the
703  * Guest (and not the pages for other CPUs).  We have the appropriate PTE pages
704  * for each CPU already set up, we just need to hook them in now we know which
705  * Guest is about to run on this CPU. */
map_switcher_in_guest(struct lg_cpu * cpu,struct lguest_pages * pages)706 void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages)
707 {
708 	pte_t *switcher_pte_page = __get_cpu_var(switcher_pte_pages);
709 	pgd_t switcher_pgd;
710 	pte_t regs_pte;
711 	unsigned long pfn;
712 
713 	/* Make the last PGD entry for this Guest point to the Switcher's PTE
714 	 * page for this CPU (with appropriate flags). */
715 	switcher_pgd = __pgd(__pa(switcher_pte_page) | __PAGE_KERNEL);
716 
717 	cpu->lg->pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX] = switcher_pgd;
718 
719 	/* We also change the Switcher PTE page.  When we're running the Guest,
720 	 * we want the Guest's "regs" page to appear where the first Switcher
721 	 * page for this CPU is.  This is an optimization: when the Switcher
722 	 * saves the Guest registers, it saves them into the first page of this
723 	 * CPU's "struct lguest_pages": if we make sure the Guest's register
724 	 * page is already mapped there, we don't have to copy them out
725 	 * again. */
726 	pfn = __pa(cpu->regs_page) >> PAGE_SHIFT;
727 	regs_pte = pfn_pte(pfn, __pgprot(__PAGE_KERNEL));
728 	switcher_pte_page[(unsigned long)pages/PAGE_SIZE%PTRS_PER_PTE] = regs_pte;
729 }
730 /*:*/
731 
free_switcher_pte_pages(void)732 static void free_switcher_pte_pages(void)
733 {
734 	unsigned int i;
735 
736 	for_each_possible_cpu(i)
737 		free_page((long)switcher_pte_page(i));
738 }
739 
740 /*H:520 Setting up the Switcher PTE page for given CPU is fairly easy, given
741  * the CPU number and the "struct page"s for the Switcher code itself.
742  *
743  * Currently the Switcher is less than a page long, so "pages" is always 1. */
populate_switcher_pte_page(unsigned int cpu,struct page * switcher_page[],unsigned int pages)744 static __init void populate_switcher_pte_page(unsigned int cpu,
745 					      struct page *switcher_page[],
746 					      unsigned int pages)
747 {
748 	unsigned int i;
749 	pte_t *pte = switcher_pte_page(cpu);
750 
751 	/* The first entries are easy: they map the Switcher code. */
752 	for (i = 0; i < pages; i++) {
753 		pte[i] = mk_pte(switcher_page[i],
754 				__pgprot(_PAGE_PRESENT|_PAGE_ACCESSED));
755 	}
756 
757 	/* The only other thing we map is this CPU's pair of pages. */
758 	i = pages + cpu*2;
759 
760 	/* First page (Guest registers) is writable from the Guest */
761 	pte[i] = pfn_pte(page_to_pfn(switcher_page[i]),
762 			 __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_RW));
763 
764 	/* The second page contains the "struct lguest_ro_state", and is
765 	 * read-only. */
766 	pte[i+1] = pfn_pte(page_to_pfn(switcher_page[i+1]),
767 			   __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED));
768 }
769 
770 /* We've made it through the page table code.  Perhaps our tired brains are
771  * still processing the details, or perhaps we're simply glad it's over.
772  *
773  * If nothing else, note that all this complexity in juggling shadow page tables
774  * in sync with the Guest's page tables is for one reason: for most Guests this
775  * page table dance determines how bad performance will be.  This is why Xen
776  * uses exotic direct Guest pagetable manipulation, and why both Intel and AMD
777  * have implemented shadow page table support directly into hardware.
778  *
779  * There is just one file remaining in the Host. */
780 
781 /*H:510 At boot or module load time, init_pagetables() allocates and populates
782  * the Switcher PTE page for each CPU. */
init_pagetables(struct page ** switcher_page,unsigned int pages)783 __init int init_pagetables(struct page **switcher_page, unsigned int pages)
784 {
785 	unsigned int i;
786 
787 	for_each_possible_cpu(i) {
788 		switcher_pte_page(i) = (pte_t *)get_zeroed_page(GFP_KERNEL);
789 		if (!switcher_pte_page(i)) {
790 			free_switcher_pte_pages();
791 			return -ENOMEM;
792 		}
793 		populate_switcher_pte_page(i, switcher_page, pages);
794 	}
795 	return 0;
796 }
797 /*:*/
798 
799 /* Cleaning up simply involves freeing the PTE page for each CPU. */
free_pagetables(void)800 void free_pagetables(void)
801 {
802 	free_switcher_pte_pages();
803 }
804