• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/*
2 * Asm versions of Xen pv-ops, suitable for either direct use or
3 * inlining.  The inline versions are the same as the direct-use
4 * versions, with the pre- and post-amble chopped off.
5 *
6 * This code is encoded for size rather than absolute efficiency, with
7 * a view to being able to inline as much as possible.
8 *
9 * We only bother with direct forms (ie, vcpu in pda) of the
10 * operations here; the indirect forms are better handled in C, since
11 * they're generally too large to inline anyway.
12 */
13
14#include <asm/thread_info.h>
15#include <asm/processor-flags.h>
16#include <asm/segment.h>
17
18#include <xen/interface/xen.h>
19
20#include "xen-asm.h"
21
22/*
23 * Force an event check by making a hypercall, but preserve regs
24 * before making the call.
25 */
26check_events:
27	push %eax
28	push %ecx
29	push %edx
30	call xen_force_evtchn_callback
31	pop %edx
32	pop %ecx
33	pop %eax
34	ret
35
36/*
37 * We can't use sysexit directly, because we're not running in ring0.
38 * But we can easily fake it up using iret.  Assuming xen_sysexit is
39 * jumped to with a standard stack frame, we can just strip it back to
40 * a standard iret frame and use iret.
41 */
42ENTRY(xen_sysexit)
43	movl PT_EAX(%esp), %eax			/* Shouldn't be necessary? */
44	orl $X86_EFLAGS_IF, PT_EFLAGS(%esp)
45	lea PT_EIP(%esp), %esp
46
47	jmp xen_iret
48ENDPROC(xen_sysexit)
49
50/*
51 * This is run where a normal iret would be run, with the same stack setup:
52 *	8: eflags
53 *	4: cs
54 *	esp-> 0: eip
55 *
56 * This attempts to make sure that any pending events are dealt with
57 * on return to usermode, but there is a small window in which an
58 * event can happen just before entering usermode.  If the nested
59 * interrupt ends up setting one of the TIF_WORK_MASK pending work
60 * flags, they will not be tested again before returning to
61 * usermode. This means that a process can end up with pending work,
62 * which will be unprocessed until the process enters and leaves the
63 * kernel again, which could be an unbounded amount of time.  This
64 * means that a pending signal or reschedule event could be
65 * indefinitely delayed.
66 *
67 * The fix is to notice a nested interrupt in the critical window, and
68 * if one occurs, then fold the nested interrupt into the current
69 * interrupt stack frame, and re-process it iteratively rather than
70 * recursively.  This means that it will exit via the normal path, and
71 * all pending work will be dealt with appropriately.
72 *
73 * Because the nested interrupt handler needs to deal with the current
74 * stack state in whatever form its in, we keep things simple by only
75 * using a single register which is pushed/popped on the stack.
76 */
77ENTRY(xen_iret)
78	/* test eflags for special cases */
79	testl $(X86_EFLAGS_VM | XEN_EFLAGS_NMI), 8(%esp)
80	jnz hyper_iret
81
82	push %eax
83	ESP_OFFSET=4	# bytes pushed onto stack
84
85	/*
86	 * Store vcpu_info pointer for easy access.  Do it this way to
87	 * avoid having to reload %fs
88	 */
89#ifdef CONFIG_SMP
90	GET_THREAD_INFO(%eax)
91	movl %ss:TI_cpu(%eax), %eax
92	movl %ss:__per_cpu_offset(,%eax,4), %eax
93	mov %ss:xen_vcpu(%eax), %eax
94#else
95	movl %ss:xen_vcpu, %eax
96#endif
97
98	/* check IF state we're restoring */
99	testb $X86_EFLAGS_IF>>8, 8+1+ESP_OFFSET(%esp)
100
101	/*
102	 * Maybe enable events.  Once this happens we could get a
103	 * recursive event, so the critical region starts immediately
104	 * afterwards.  However, if that happens we don't end up
105	 * resuming the code, so we don't have to be worried about
106	 * being preempted to another CPU.
107	 */
108	setz %ss:XEN_vcpu_info_mask(%eax)
109xen_iret_start_crit:
110
111	/* check for unmasked and pending */
112	cmpw $0x0001, %ss:XEN_vcpu_info_pending(%eax)
113
114	/*
115	 * If there's something pending, mask events again so we can
116	 * jump back into xen_hypervisor_callback. Otherwise do not
117	 * touch XEN_vcpu_info_mask.
118	 */
119	jne 1f
120	movb $1, %ss:XEN_vcpu_info_mask(%eax)
121
1221:	popl %eax
123
124	/*
125	 * From this point on the registers are restored and the stack
126	 * updated, so we don't need to worry about it if we're
127	 * preempted
128	 */
129iret_restore_end:
130
131	/*
132	 * Jump to hypervisor_callback after fixing up the stack.
133	 * Events are masked, so jumping out of the critical region is
134	 * OK.
135	 */
136	je xen_hypervisor_callback
137
1381:	iret
139xen_iret_end_crit:
140.section __ex_table, "a"
141	.align 4
142	.long 1b, iret_exc
143.previous
144
145hyper_iret:
146	/* put this out of line since its very rarely used */
147	jmp hypercall_page + __HYPERVISOR_iret * 32
148
149	.globl xen_iret_start_crit, xen_iret_end_crit
150
151/*
152 * This is called by xen_hypervisor_callback in entry.S when it sees
153 * that the EIP at the time of interrupt was between
154 * xen_iret_start_crit and xen_iret_end_crit.  We're passed the EIP in
155 * %eax so we can do a more refined determination of what to do.
156 *
157 * The stack format at this point is:
158 *	----------------
159 *	 ss		: (ss/esp may be present if we came from usermode)
160 *	 esp		:
161 *	 eflags		}  outer exception info
162 *	 cs		}
163 *	 eip		}
164 *	---------------- <- edi (copy dest)
165 *	 eax		:  outer eax if it hasn't been restored
166 *	----------------
167 *	 eflags		}  nested exception info
168 *	 cs		}   (no ss/esp because we're nested
169 *	 eip		}    from the same ring)
170 *	 orig_eax	}<- esi (copy src)
171 *	 - - - - - - - -
172 *	 fs		}
173 *	 es		}
174 *	 ds		}  SAVE_ALL state
175 *	 eax		}
176 *	  :		:
177 *	 ebx		}<- esp
178 *	----------------
179 *
180 * In order to deliver the nested exception properly, we need to shift
181 * everything from the return addr up to the error code so it sits
182 * just under the outer exception info.  This means that when we
183 * handle the exception, we do it in the context of the outer
184 * exception rather than starting a new one.
185 *
186 * The only caveat is that if the outer eax hasn't been restored yet
187 * (ie, it's still on stack), we need to insert its value into the
188 * SAVE_ALL state before going on, since it's usermode state which we
189 * eventually need to restore.
190 */
191ENTRY(xen_iret_crit_fixup)
192	/*
193	 * Paranoia: Make sure we're really coming from kernel space.
194	 * One could imagine a case where userspace jumps into the
195	 * critical range address, but just before the CPU delivers a
196	 * GP, it decides to deliver an interrupt instead.  Unlikely?
197	 * Definitely.  Easy to avoid?  Yes.  The Intel documents
198	 * explicitly say that the reported EIP for a bad jump is the
199	 * jump instruction itself, not the destination, but some
200	 * virtual environments get this wrong.
201	 */
202	movl PT_CS(%esp), %ecx
203	andl $SEGMENT_RPL_MASK, %ecx
204	cmpl $USER_RPL, %ecx
205	je 2f
206
207	lea PT_ORIG_EAX(%esp), %esi
208	lea PT_EFLAGS(%esp), %edi
209
210	/*
211	 * If eip is before iret_restore_end then stack
212	 * hasn't been restored yet.
213	 */
214	cmp $iret_restore_end, %eax
215	jae 1f
216
217	movl 0+4(%edi), %eax		/* copy EAX (just above top of frame) */
218	movl %eax, PT_EAX(%esp)
219
220	lea ESP_OFFSET(%edi), %edi	/* move dest up over saved regs */
221
222	/* set up the copy */
2231:	std
224	mov $PT_EIP / 4, %ecx		/* saved regs up to orig_eax */
225	rep movsl
226	cld
227
228	lea 4(%edi), %esp		/* point esp to new frame */
2292:	jmp xen_do_upcall
230
231