• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*P:800
2  * Interrupts (traps) are complicated enough to earn their own file.
3  * There are three classes of interrupts:
4  *
5  * 1) Real hardware interrupts which occur while we're running the Guest,
6  * 2) Interrupts for virtual devices attached to the Guest, and
7  * 3) Traps and faults from the Guest.
8  *
9  * Real hardware interrupts must be delivered to the Host, not the Guest.
10  * Virtual interrupts must be delivered to the Guest, but we make them look
11  * just like real hardware would deliver them.  Traps from the Guest can be set
12  * up to go directly back into the Guest, but sometimes the Host wants to see
13  * them first, so we also have a way of "reflecting" them into the Guest as if
14  * they had been delivered to it directly.
15 :*/
16 #include <linux/uaccess.h>
17 #include <linux/interrupt.h>
18 #include <linux/module.h>
19 #include <linux/sched.h>
20 #include "lg.h"
21 
22 /* Allow Guests to use a non-128 (ie. non-Linux) syscall trap. */
23 static unsigned int syscall_vector = IA32_SYSCALL_VECTOR;
24 module_param(syscall_vector, uint, 0444);
25 
26 /* The address of the interrupt handler is split into two bits: */
idt_address(u32 lo,u32 hi)27 static unsigned long idt_address(u32 lo, u32 hi)
28 {
29 	return (lo & 0x0000FFFF) | (hi & 0xFFFF0000);
30 }
31 
32 /*
33  * The "type" of the interrupt handler is a 4 bit field: we only support a
34  * couple of types.
35  */
idt_type(u32 lo,u32 hi)36 static int idt_type(u32 lo, u32 hi)
37 {
38 	return (hi >> 8) & 0xF;
39 }
40 
41 /* An IDT entry can't be used unless the "present" bit is set. */
idt_present(u32 lo,u32 hi)42 static bool idt_present(u32 lo, u32 hi)
43 {
44 	return (hi & 0x8000);
45 }
46 
47 /*
48  * We need a helper to "push" a value onto the Guest's stack, since that's a
49  * big part of what delivering an interrupt does.
50  */
push_guest_stack(struct lg_cpu * cpu,unsigned long * gstack,u32 val)51 static void push_guest_stack(struct lg_cpu *cpu, unsigned long *gstack, u32 val)
52 {
53 	/* Stack grows upwards: move stack then write value. */
54 	*gstack -= 4;
55 	lgwrite(cpu, *gstack, u32, val);
56 }
57 
58 /*H:210
59  * The push_guest_interrupt_stack() routine saves Guest state on the stack for
60  * an interrupt or trap.  The mechanics of delivering traps and interrupts to
61  * the Guest are the same, except some traps have an "error code" which gets
62  * pushed onto the stack as well: the caller tells us if this is one.
63  *
64  * We set up the stack just like the CPU does for a real interrupt, so it's
65  * identical for the Guest (and the standard "iret" instruction will undo
66  * it).
67  */
push_guest_interrupt_stack(struct lg_cpu * cpu,bool has_err)68 static void push_guest_interrupt_stack(struct lg_cpu *cpu, bool has_err)
69 {
70 	unsigned long gstack, origstack;
71 	u32 eflags, ss, irq_enable;
72 	unsigned long virtstack;
73 
74 	/*
75 	 * There are two cases for interrupts: one where the Guest is already
76 	 * in the kernel, and a more complex one where the Guest is in
77 	 * userspace.  We check the privilege level to find out.
78 	 */
79 	if ((cpu->regs->ss&0x3) != GUEST_PL) {
80 		/*
81 		 * The Guest told us their kernel stack with the SET_STACK
82 		 * hypercall: both the virtual address and the segment.
83 		 */
84 		virtstack = cpu->esp1;
85 		ss = cpu->ss1;
86 
87 		origstack = gstack = guest_pa(cpu, virtstack);
88 		/*
89 		 * We push the old stack segment and pointer onto the new
90 		 * stack: when the Guest does an "iret" back from the interrupt
91 		 * handler the CPU will notice they're dropping privilege
92 		 * levels and expect these here.
93 		 */
94 		push_guest_stack(cpu, &gstack, cpu->regs->ss);
95 		push_guest_stack(cpu, &gstack, cpu->regs->esp);
96 	} else {
97 		/* We're staying on the same Guest (kernel) stack. */
98 		virtstack = cpu->regs->esp;
99 		ss = cpu->regs->ss;
100 
101 		origstack = gstack = guest_pa(cpu, virtstack);
102 	}
103 
104 	/*
105 	 * Remember that we never let the Guest actually disable interrupts, so
106 	 * the "Interrupt Flag" bit is always set.  We copy that bit from the
107 	 * Guest's "irq_enabled" field into the eflags word: we saw the Guest
108 	 * copy it back in "lguest_iret".
109 	 */
110 	eflags = cpu->regs->eflags;
111 	if (get_user(irq_enable, &cpu->lg->lguest_data->irq_enabled) == 0
112 	    && !(irq_enable & X86_EFLAGS_IF))
113 		eflags &= ~X86_EFLAGS_IF;
114 
115 	/*
116 	 * An interrupt is expected to push three things on the stack: the old
117 	 * "eflags" word, the old code segment, and the old instruction
118 	 * pointer.
119 	 */
120 	push_guest_stack(cpu, &gstack, eflags);
121 	push_guest_stack(cpu, &gstack, cpu->regs->cs);
122 	push_guest_stack(cpu, &gstack, cpu->regs->eip);
123 
124 	/* For the six traps which supply an error code, we push that, too. */
125 	if (has_err)
126 		push_guest_stack(cpu, &gstack, cpu->regs->errcode);
127 
128 	/* Adjust the stack pointer and stack segment. */
129 	cpu->regs->ss = ss;
130 	cpu->regs->esp = virtstack + (gstack - origstack);
131 }
132 
133 /*
134  * This actually makes the Guest start executing the given interrupt/trap
135  * handler.
136  *
137  * "lo" and "hi" are the two parts of the Interrupt Descriptor Table for this
138  * interrupt or trap.  It's split into two parts for traditional reasons: gcc
139  * on i386 used to be frightened by 64 bit numbers.
140  */
guest_run_interrupt(struct lg_cpu * cpu,u32 lo,u32 hi)141 static void guest_run_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi)
142 {
143 	/* If we're already in the kernel, we don't change stacks. */
144 	if ((cpu->regs->ss&0x3) != GUEST_PL)
145 		cpu->regs->ss = cpu->esp1;
146 
147 	/*
148 	 * Set the code segment and the address to execute.
149 	 */
150 	cpu->regs->cs = (__KERNEL_CS|GUEST_PL);
151 	cpu->regs->eip = idt_address(lo, hi);
152 
153 	/*
154 	 * Trapping always clears these flags:
155 	 * TF: Trap flag
156 	 * VM: Virtual 8086 mode
157 	 * RF: Resume
158 	 * NT: Nested task.
159 	 */
160 	cpu->regs->eflags &=
161 		~(X86_EFLAGS_TF|X86_EFLAGS_VM|X86_EFLAGS_RF|X86_EFLAGS_NT);
162 
163 	/*
164 	 * There are two kinds of interrupt handlers: 0xE is an "interrupt
165 	 * gate" which expects interrupts to be disabled on entry.
166 	 */
167 	if (idt_type(lo, hi) == 0xE)
168 		if (put_user(0, &cpu->lg->lguest_data->irq_enabled))
169 			kill_guest(cpu, "Disabling interrupts");
170 }
171 
172 /* This restores the eflags word which was pushed on the stack by a trap */
restore_eflags(struct lg_cpu * cpu)173 static void restore_eflags(struct lg_cpu *cpu)
174 {
175 	/* This is the physical address of the stack. */
176 	unsigned long stack_pa = guest_pa(cpu, cpu->regs->esp);
177 
178 	/*
179 	 * Stack looks like this:
180 	 * Address	Contents
181 	 * esp		EIP
182 	 * esp + 4	CS
183 	 * esp + 8	EFLAGS
184 	 */
185 	cpu->regs->eflags = lgread(cpu, stack_pa + 8, u32);
186 	cpu->regs->eflags &=
187 		~(X86_EFLAGS_TF|X86_EFLAGS_VM|X86_EFLAGS_RF|X86_EFLAGS_NT);
188 }
189 
190 /*H:205
191  * Virtual Interrupts.
192  *
193  * interrupt_pending() returns the first pending interrupt which isn't blocked
194  * by the Guest.  It is called before every entry to the Guest, and just before
195  * we go to sleep when the Guest has halted itself.
196  */
interrupt_pending(struct lg_cpu * cpu,bool * more)197 unsigned int interrupt_pending(struct lg_cpu *cpu, bool *more)
198 {
199 	unsigned int irq;
200 	DECLARE_BITMAP(blk, LGUEST_IRQS);
201 
202 	/* If the Guest hasn't even initialized yet, we can do nothing. */
203 	if (!cpu->lg->lguest_data)
204 		return LGUEST_IRQS;
205 
206 	/*
207 	 * Take our "irqs_pending" array and remove any interrupts the Guest
208 	 * wants blocked: the result ends up in "blk".
209 	 */
210 	if (copy_from_user(&blk, cpu->lg->lguest_data->blocked_interrupts,
211 			   sizeof(blk)))
212 		return LGUEST_IRQS;
213 	bitmap_andnot(blk, cpu->irqs_pending, blk, LGUEST_IRQS);
214 
215 	/* Find the first interrupt. */
216 	irq = find_first_bit(blk, LGUEST_IRQS);
217 	*more = find_next_bit(blk, LGUEST_IRQS, irq+1);
218 
219 	return irq;
220 }
221 
222 /*
223  * This actually diverts the Guest to running an interrupt handler, once an
224  * interrupt has been identified by interrupt_pending().
225  */
try_deliver_interrupt(struct lg_cpu * cpu,unsigned int irq,bool more)226 void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more)
227 {
228 	struct desc_struct *idt;
229 
230 	BUG_ON(irq >= LGUEST_IRQS);
231 
232 	/* If they're halted, interrupts restart them. */
233 	if (cpu->halted) {
234 		/* Re-enable interrupts. */
235 		if (put_user(X86_EFLAGS_IF, &cpu->lg->lguest_data->irq_enabled))
236 			kill_guest(cpu, "Re-enabling interrupts");
237 		cpu->halted = 0;
238 	} else {
239 		/* Otherwise we check if they have interrupts disabled. */
240 		u32 irq_enabled;
241 		if (get_user(irq_enabled, &cpu->lg->lguest_data->irq_enabled))
242 			irq_enabled = 0;
243 		if (!irq_enabled) {
244 			/* Make sure they know an IRQ is pending. */
245 			put_user(X86_EFLAGS_IF,
246 				 &cpu->lg->lguest_data->irq_pending);
247 			return;
248 		}
249 	}
250 
251 	/*
252 	 * Look at the IDT entry the Guest gave us for this interrupt.  The
253 	 * first 32 (FIRST_EXTERNAL_VECTOR) entries are for traps, so we skip
254 	 * over them.
255 	 */
256 	idt = &cpu->arch.idt[FIRST_EXTERNAL_VECTOR+irq];
257 	/* If they don't have a handler (yet?), we just ignore it */
258 	if (idt_present(idt->a, idt->b)) {
259 		/* OK, mark it no longer pending and deliver it. */
260 		clear_bit(irq, cpu->irqs_pending);
261 
262 		/*
263 		 * They may be about to iret, where they asked us never to
264 		 * deliver interrupts.  In this case, we can emulate that iret
265 		 * then immediately deliver the interrupt.  This is basically
266 		 * a noop: the iret would pop the interrupt frame and restore
267 		 * eflags, and then we'd set it up again.  So just restore the
268 		 * eflags word and jump straight to the handler in this case.
269 		 *
270 		 * Denys Vlasenko points out that this isn't quite right: if
271 		 * the iret was returning to userspace, then that interrupt
272 		 * would reset the stack pointer (which the Guest told us
273 		 * about via LHCALL_SET_STACK).  But unless the Guest is being
274 		 * *really* weird, that will be the same as the current stack
275 		 * anyway.
276 		 */
277 		if (cpu->regs->eip == cpu->lg->noirq_iret) {
278 			restore_eflags(cpu);
279 		} else {
280 			/*
281 			 * set_guest_interrupt() takes a flag to say whether
282 			 * this interrupt pushes an error code onto the stack
283 			 * as well: virtual interrupts never do.
284 			 */
285 			push_guest_interrupt_stack(cpu, false);
286 		}
287 		/* Actually make Guest cpu jump to handler. */
288 		guest_run_interrupt(cpu, idt->a, idt->b);
289 	}
290 
291 	/*
292 	 * Every time we deliver an interrupt, we update the timestamp in the
293 	 * Guest's lguest_data struct.  It would be better for the Guest if we
294 	 * did this more often, but it can actually be quite slow: doing it
295 	 * here is a compromise which means at least it gets updated every
296 	 * timer interrupt.
297 	 */
298 	write_timestamp(cpu);
299 
300 	/*
301 	 * If there are no other interrupts we want to deliver, clear
302 	 * the pending flag.
303 	 */
304 	if (!more)
305 		put_user(0, &cpu->lg->lguest_data->irq_pending);
306 }
307 
308 /* And this is the routine when we want to set an interrupt for the Guest. */
set_interrupt(struct lg_cpu * cpu,unsigned int irq)309 void set_interrupt(struct lg_cpu *cpu, unsigned int irq)
310 {
311 	/*
312 	 * Next time the Guest runs, the core code will see if it can deliver
313 	 * this interrupt.
314 	 */
315 	set_bit(irq, cpu->irqs_pending);
316 
317 	/*
318 	 * Make sure it sees it; it might be asleep (eg. halted), or running
319 	 * the Guest right now, in which case kick_process() will knock it out.
320 	 */
321 	if (!wake_up_process(cpu->tsk))
322 		kick_process(cpu->tsk);
323 }
324 /*:*/
325 
326 /*
327  * Linux uses trap 128 for system calls.  Plan9 uses 64, and Ron Minnich sent
328  * me a patch, so we support that too.  It'd be a big step for lguest if half
329  * the Plan 9 user base were to start using it.
330  *
331  * Actually now I think of it, it's possible that Ron *is* half the Plan 9
332  * userbase.  Oh well.
333  */
could_be_syscall(unsigned int num)334 static bool could_be_syscall(unsigned int num)
335 {
336 	/* Normal Linux IA32_SYSCALL_VECTOR or reserved vector? */
337 	return num == IA32_SYSCALL_VECTOR || num == syscall_vector;
338 }
339 
340 /* The syscall vector it wants must be unused by Host. */
check_syscall_vector(struct lguest * lg)341 bool check_syscall_vector(struct lguest *lg)
342 {
343 	u32 vector;
344 
345 	if (get_user(vector, &lg->lguest_data->syscall_vec))
346 		return false;
347 
348 	return could_be_syscall(vector);
349 }
350 
init_interrupts(void)351 int init_interrupts(void)
352 {
353 	/* If they want some strange system call vector, reserve it now */
354 	if (syscall_vector != IA32_SYSCALL_VECTOR) {
355 		if (test_bit(syscall_vector, used_vectors) ||
356 		    vector_used_by_percpu_irq(syscall_vector)) {
357 			printk(KERN_ERR "lg: couldn't reserve syscall %u\n",
358 				 syscall_vector);
359 			return -EBUSY;
360 		}
361 		set_bit(syscall_vector, used_vectors);
362 	}
363 
364 	return 0;
365 }
366 
free_interrupts(void)367 void free_interrupts(void)
368 {
369 	if (syscall_vector != IA32_SYSCALL_VECTOR)
370 		clear_bit(syscall_vector, used_vectors);
371 }
372 
373 /*H:220
374  * Now we've got the routines to deliver interrupts, delivering traps like
375  * page fault is easy.  The only trick is that Intel decided that some traps
376  * should have error codes:
377  */
has_err(unsigned int trap)378 static bool has_err(unsigned int trap)
379 {
380 	return (trap == 8 || (trap >= 10 && trap <= 14) || trap == 17);
381 }
382 
383 /* deliver_trap() returns true if it could deliver the trap. */
deliver_trap(struct lg_cpu * cpu,unsigned int num)384 bool deliver_trap(struct lg_cpu *cpu, unsigned int num)
385 {
386 	/*
387 	 * Trap numbers are always 8 bit, but we set an impossible trap number
388 	 * for traps inside the Switcher, so check that here.
389 	 */
390 	if (num >= ARRAY_SIZE(cpu->arch.idt))
391 		return false;
392 
393 	/*
394 	 * Early on the Guest hasn't set the IDT entries (or maybe it put a
395 	 * bogus one in): if we fail here, the Guest will be killed.
396 	 */
397 	if (!idt_present(cpu->arch.idt[num].a, cpu->arch.idt[num].b))
398 		return false;
399 	push_guest_interrupt_stack(cpu, has_err(num));
400 	guest_run_interrupt(cpu, cpu->arch.idt[num].a,
401 			    cpu->arch.idt[num].b);
402 	return true;
403 }
404 
405 /*H:250
406  * Here's the hard part: returning to the Host every time a trap happens
407  * and then calling deliver_trap() and re-entering the Guest is slow.
408  * Particularly because Guest userspace system calls are traps (usually trap
409  * 128).
410  *
411  * So we'd like to set up the IDT to tell the CPU to deliver traps directly
412  * into the Guest.  This is possible, but the complexities cause the size of
413  * this file to double!  However, 150 lines of code is worth writing for taking
414  * system calls down from 1750ns to 270ns.  Plus, if lguest didn't do it, all
415  * the other hypervisors would beat it up at lunchtime.
416  *
417  * This routine indicates if a particular trap number could be delivered
418  * directly.
419  */
direct_trap(unsigned int num)420 static bool direct_trap(unsigned int num)
421 {
422 	/*
423 	 * Hardware interrupts don't go to the Guest at all (except system
424 	 * call).
425 	 */
426 	if (num >= FIRST_EXTERNAL_VECTOR && !could_be_syscall(num))
427 		return false;
428 
429 	/*
430 	 * The Host needs to see page faults (for shadow paging and to save the
431 	 * fault address), general protection faults (in/out emulation) and
432 	 * device not available (TS handling) and of course, the hypercall trap.
433 	 */
434 	return num != 14 && num != 13 && num != 7 && num != LGUEST_TRAP_ENTRY;
435 }
436 /*:*/
437 
438 /*M:005
439  * The Guest has the ability to turn its interrupt gates into trap gates,
440  * if it is careful.  The Host will let trap gates can go directly to the
441  * Guest, but the Guest needs the interrupts atomically disabled for an
442  * interrupt gate.  The Host could provide a mechanism to register more
443  * "no-interrupt" regions, and the Guest could point the trap gate at
444  * instructions within that region, where it can safely disable interrupts.
445  */
446 
447 /*M:006
448  * The Guests do not use the sysenter (fast system call) instruction,
449  * because it's hardcoded to enter privilege level 0 and so can't go direct.
450  * It's about twice as fast as the older "int 0x80" system call, so it might
451  * still be worthwhile to handle it in the Switcher and lcall down to the
452  * Guest.  The sysenter semantics are hairy tho: search for that keyword in
453  * entry.S
454 :*/
455 
456 /*H:260
457  * When we make traps go directly into the Guest, we need to make sure
458  * the kernel stack is valid (ie. mapped in the page tables).  Otherwise, the
459  * CPU trying to deliver the trap will fault while trying to push the interrupt
460  * words on the stack: this is called a double fault, and it forces us to kill
461  * the Guest.
462  *
463  * Which is deeply unfair, because (literally!) it wasn't the Guests' fault.
464  */
pin_stack_pages(struct lg_cpu * cpu)465 void pin_stack_pages(struct lg_cpu *cpu)
466 {
467 	unsigned int i;
468 
469 	/*
470 	 * Depending on the CONFIG_4KSTACKS option, the Guest can have one or
471 	 * two pages of stack space.
472 	 */
473 	for (i = 0; i < cpu->lg->stack_pages; i++)
474 		/*
475 		 * The stack grows *upwards*, so the address we're given is the
476 		 * start of the page after the kernel stack.  Subtract one to
477 		 * get back onto the first stack page, and keep subtracting to
478 		 * get to the rest of the stack pages.
479 		 */
480 		pin_page(cpu, cpu->esp1 - 1 - i * PAGE_SIZE);
481 }
482 
483 /*
484  * Direct traps also mean that we need to know whenever the Guest wants to use
485  * a different kernel stack, so we can change the guest TSS to use that
486  * stack.  The TSS entries expect a virtual address, so unlike most addresses
487  * the Guest gives us, the "esp" (stack pointer) value here is virtual, not
488  * physical.
489  *
490  * In Linux each process has its own kernel stack, so this happens a lot: we
491  * change stacks on each context switch.
492  */
guest_set_stack(struct lg_cpu * cpu,u32 seg,u32 esp,unsigned int pages)493 void guest_set_stack(struct lg_cpu *cpu, u32 seg, u32 esp, unsigned int pages)
494 {
495 	/*
496 	 * You're not allowed a stack segment with privilege level 0: bad Guest!
497 	 */
498 	if ((seg & 0x3) != GUEST_PL)
499 		kill_guest(cpu, "bad stack segment %i", seg);
500 	/* We only expect one or two stack pages. */
501 	if (pages > 2)
502 		kill_guest(cpu, "bad stack pages %u", pages);
503 	/* Save where the stack is, and how many pages */
504 	cpu->ss1 = seg;
505 	cpu->esp1 = esp;
506 	cpu->lg->stack_pages = pages;
507 	/* Make sure the new stack pages are mapped */
508 	pin_stack_pages(cpu);
509 }
510 
511 /*
512  * All this reference to mapping stacks leads us neatly into the other complex
513  * part of the Host: page table handling.
514  */
515 
516 /*H:235
517  * This is the routine which actually checks the Guest's IDT entry and
518  * transfers it into the entry in "struct lguest":
519  */
set_trap(struct lg_cpu * cpu,struct desc_struct * trap,unsigned int num,u32 lo,u32 hi)520 static void set_trap(struct lg_cpu *cpu, struct desc_struct *trap,
521 		     unsigned int num, u32 lo, u32 hi)
522 {
523 	u8 type = idt_type(lo, hi);
524 
525 	/* We zero-out a not-present entry */
526 	if (!idt_present(lo, hi)) {
527 		trap->a = trap->b = 0;
528 		return;
529 	}
530 
531 	/* We only support interrupt and trap gates. */
532 	if (type != 0xE && type != 0xF)
533 		kill_guest(cpu, "bad IDT type %i", type);
534 
535 	/*
536 	 * We only copy the handler address, present bit, privilege level and
537 	 * type.  The privilege level controls where the trap can be triggered
538 	 * manually with an "int" instruction.  This is usually GUEST_PL,
539 	 * except for system calls which userspace can use.
540 	 */
541 	trap->a = ((__KERNEL_CS|GUEST_PL)<<16) | (lo&0x0000FFFF);
542 	trap->b = (hi&0xFFFFEF00);
543 }
544 
545 /*H:230
546  * While we're here, dealing with delivering traps and interrupts to the
547  * Guest, we might as well complete the picture: how the Guest tells us where
548  * it wants them to go.  This would be simple, except making traps fast
549  * requires some tricks.
550  *
551  * We saw the Guest setting Interrupt Descriptor Table (IDT) entries with the
552  * LHCALL_LOAD_IDT_ENTRY hypercall before: that comes here.
553  */
load_guest_idt_entry(struct lg_cpu * cpu,unsigned int num,u32 lo,u32 hi)554 void load_guest_idt_entry(struct lg_cpu *cpu, unsigned int num, u32 lo, u32 hi)
555 {
556 	/*
557 	 * Guest never handles: NMI, doublefault, spurious interrupt or
558 	 * hypercall.  We ignore when it tries to set them.
559 	 */
560 	if (num == 2 || num == 8 || num == 15 || num == LGUEST_TRAP_ENTRY)
561 		return;
562 
563 	/*
564 	 * Mark the IDT as changed: next time the Guest runs we'll know we have
565 	 * to copy this again.
566 	 */
567 	cpu->changed |= CHANGED_IDT;
568 
569 	/* Check that the Guest doesn't try to step outside the bounds. */
570 	if (num >= ARRAY_SIZE(cpu->arch.idt))
571 		kill_guest(cpu, "Setting idt entry %u", num);
572 	else
573 		set_trap(cpu, &cpu->arch.idt[num], num, lo, hi);
574 }
575 
576 /*
577  * The default entry for each interrupt points into the Switcher routines which
578  * simply return to the Host.  The run_guest() loop will then call
579  * deliver_trap() to bounce it back into the Guest.
580  */
default_idt_entry(struct desc_struct * idt,int trap,const unsigned long handler,const struct desc_struct * base)581 static void default_idt_entry(struct desc_struct *idt,
582 			      int trap,
583 			      const unsigned long handler,
584 			      const struct desc_struct *base)
585 {
586 	/* A present interrupt gate. */
587 	u32 flags = 0x8e00;
588 
589 	/*
590 	 * Set the privilege level on the entry for the hypercall: this allows
591 	 * the Guest to use the "int" instruction to trigger it.
592 	 */
593 	if (trap == LGUEST_TRAP_ENTRY)
594 		flags |= (GUEST_PL << 13);
595 	else if (base)
596 		/*
597 		 * Copy privilege level from what Guest asked for.  This allows
598 		 * debug (int 3) traps from Guest userspace, for example.
599 		 */
600 		flags |= (base->b & 0x6000);
601 
602 	/* Now pack it into the IDT entry in its weird format. */
603 	idt->a = (LGUEST_CS<<16) | (handler&0x0000FFFF);
604 	idt->b = (handler&0xFFFF0000) | flags;
605 }
606 
607 /* When the Guest first starts, we put default entries into the IDT. */
setup_default_idt_entries(struct lguest_ro_state * state,const unsigned long * def)608 void setup_default_idt_entries(struct lguest_ro_state *state,
609 			       const unsigned long *def)
610 {
611 	unsigned int i;
612 
613 	for (i = 0; i < ARRAY_SIZE(state->guest_idt); i++)
614 		default_idt_entry(&state->guest_idt[i], i, def[i], NULL);
615 }
616 
617 /*H:240
618  * We don't use the IDT entries in the "struct lguest" directly, instead
619  * we copy them into the IDT which we've set up for Guests on this CPU, just
620  * before we run the Guest.  This routine does that copy.
621  */
copy_traps(const struct lg_cpu * cpu,struct desc_struct * idt,const unsigned long * def)622 void copy_traps(const struct lg_cpu *cpu, struct desc_struct *idt,
623 		const unsigned long *def)
624 {
625 	unsigned int i;
626 
627 	/*
628 	 * We can simply copy the direct traps, otherwise we use the default
629 	 * ones in the Switcher: they will return to the Host.
630 	 */
631 	for (i = 0; i < ARRAY_SIZE(cpu->arch.idt); i++) {
632 		const struct desc_struct *gidt = &cpu->arch.idt[i];
633 
634 		/* If no Guest can ever override this trap, leave it alone. */
635 		if (!direct_trap(i))
636 			continue;
637 
638 		/*
639 		 * Only trap gates (type 15) can go direct to the Guest.
640 		 * Interrupt gates (type 14) disable interrupts as they are
641 		 * entered, which we never let the Guest do.  Not present
642 		 * entries (type 0x0) also can't go direct, of course.
643 		 *
644 		 * If it can't go direct, we still need to copy the priv. level:
645 		 * they might want to give userspace access to a software
646 		 * interrupt.
647 		 */
648 		if (idt_type(gidt->a, gidt->b) == 0xF)
649 			idt[i] = *gidt;
650 		else
651 			default_idt_entry(&idt[i], i, def[i], gidt);
652 	}
653 }
654 
655 /*H:200
656  * The Guest Clock.
657  *
658  * There are two sources of virtual interrupts.  We saw one in lguest_user.c:
659  * the Launcher sending interrupts for virtual devices.  The other is the Guest
660  * timer interrupt.
661  *
662  * The Guest uses the LHCALL_SET_CLOCKEVENT hypercall to tell us how long to
663  * the next timer interrupt (in nanoseconds).  We use the high-resolution timer
664  * infrastructure to set a callback at that time.
665  *
666  * 0 means "turn off the clock".
667  */
guest_set_clockevent(struct lg_cpu * cpu,unsigned long delta)668 void guest_set_clockevent(struct lg_cpu *cpu, unsigned long delta)
669 {
670 	ktime_t expires;
671 
672 	if (unlikely(delta == 0)) {
673 		/* Clock event device is shutting down. */
674 		hrtimer_cancel(&cpu->hrt);
675 		return;
676 	}
677 
678 	/*
679 	 * We use wallclock time here, so the Guest might not be running for
680 	 * all the time between now and the timer interrupt it asked for.  This
681 	 * is almost always the right thing to do.
682 	 */
683 	expires = ktime_add_ns(ktime_get_real(), delta);
684 	hrtimer_start(&cpu->hrt, expires, HRTIMER_MODE_ABS);
685 }
686 
687 /* This is the function called when the Guest's timer expires. */
clockdev_fn(struct hrtimer * timer)688 static enum hrtimer_restart clockdev_fn(struct hrtimer *timer)
689 {
690 	struct lg_cpu *cpu = container_of(timer, struct lg_cpu, hrt);
691 
692 	/* Remember the first interrupt is the timer interrupt. */
693 	set_interrupt(cpu, 0);
694 	return HRTIMER_NORESTART;
695 }
696 
697 /* This sets up the timer for this Guest. */
init_clockdev(struct lg_cpu * cpu)698 void init_clockdev(struct lg_cpu *cpu)
699 {
700 	hrtimer_init(&cpu->hrt, CLOCK_REALTIME, HRTIMER_MODE_ABS);
701 	cpu->hrt.function = clockdev_fn;
702 }
703