• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Asm versions of Xen pv-ops, suitable for direct use.
4 *
5 * We only bother with direct forms (ie, vcpu in percpu data) of the
6 * operations here; the indirect forms are better handled in C.
7 */
8
9#include <asm/errno.h>
10#include <asm/asm-offsets.h>
11#include <asm/percpu.h>
12#include <asm/processor-flags.h>
13#include <asm/segment.h>
14#include <asm/thread_info.h>
15#include <asm/asm.h>
16#include <asm/frame.h>
17#include <asm/unwind_hints.h>
18
19#include <xen/interface/xen.h>
20
21#include <linux/init.h>
22#include <linux/linkage.h>
23#include <../entry/calling.h>
24
25/*
26 * Enable events.  This clears the event mask and tests the pending
27 * event status with one and operation.  If there are pending events,
28 * then enter the hypervisor to get them handled.
29 */
30SYM_FUNC_START(xen_irq_enable_direct)
31	FRAME_BEGIN
32	/* Unmask events */
33	movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
34
35	/*
36	 * Preempt here doesn't matter because that will deal with any
37	 * pending interrupts.  The pending check may end up being run
38	 * on the wrong CPU, but that doesn't hurt.
39	 */
40
41	/* Test for pending */
42	testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
43	jz 1f
44
45	call check_events
461:
47	FRAME_END
48	RET
49SYM_FUNC_END(xen_irq_enable_direct)
50
51
52/*
53 * Disabling events is simply a matter of making the event mask
54 * non-zero.
55 */
56SYM_FUNC_START(xen_irq_disable_direct)
57	movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
58	RET
59SYM_FUNC_END(xen_irq_disable_direct)
60
61/*
62 * (xen_)save_fl is used to get the current interrupt enable status.
63 * Callers expect the status to be in X86_EFLAGS_IF, and other bits
64 * may be set in the return value.  We take advantage of this by
65 * making sure that X86_EFLAGS_IF has the right value (and other bits
66 * in that byte are 0), but other bits in the return value are
67 * undefined.  We need to toggle the state of the bit, because Xen and
68 * x86 use opposite senses (mask vs enable).
69 */
70SYM_FUNC_START(xen_save_fl_direct)
71	testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
72	setz %ah
73	addb %ah, %ah
74	RET
75SYM_FUNC_END(xen_save_fl_direct)
76
77
78/*
79 * In principle the caller should be passing us a value return from
80 * xen_save_fl_direct, but for robustness sake we test only the
81 * X86_EFLAGS_IF flag rather than the whole byte. After setting the
82 * interrupt mask state, it checks for unmasked pending events and
83 * enters the hypervisor to get them delivered if so.
84 */
85SYM_FUNC_START(xen_restore_fl_direct)
86	FRAME_BEGIN
87	testw $X86_EFLAGS_IF, %di
88	setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
89	/*
90	 * Preempt here doesn't matter because that will deal with any
91	 * pending interrupts.  The pending check may end up being run
92	 * on the wrong CPU, but that doesn't hurt.
93	 */
94
95	/* check for unmasked and pending */
96	cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
97	jnz 1f
98	call check_events
991:
100	FRAME_END
101	RET
102SYM_FUNC_END(xen_restore_fl_direct)
103
104
105/*
106 * Force an event check by making a hypercall, but preserve regs
107 * before making the call.
108 */
109SYM_FUNC_START(check_events)
110	FRAME_BEGIN
111	push %rax
112	push %rcx
113	push %rdx
114	push %rsi
115	push %rdi
116	push %r8
117	push %r9
118	push %r10
119	push %r11
120	call xen_force_evtchn_callback
121	pop %r11
122	pop %r10
123	pop %r9
124	pop %r8
125	pop %rdi
126	pop %rsi
127	pop %rdx
128	pop %rcx
129	pop %rax
130	FRAME_END
131	RET
132SYM_FUNC_END(check_events)
133
134SYM_FUNC_START(xen_read_cr2)
135	FRAME_BEGIN
136	_ASM_MOV PER_CPU_VAR(xen_vcpu), %_ASM_AX
137	_ASM_MOV XEN_vcpu_info_arch_cr2(%_ASM_AX), %_ASM_AX
138	FRAME_END
139	RET
140SYM_FUNC_END(xen_read_cr2);
141
142SYM_FUNC_START(xen_read_cr2_direct)
143	FRAME_BEGIN
144	_ASM_MOV PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_arch_cr2, %_ASM_AX
145	FRAME_END
146	RET
147SYM_FUNC_END(xen_read_cr2_direct);
148
149.macro xen_pv_trap name
150SYM_CODE_START(xen_\name)
151	UNWIND_HINT_ENTRY
152	pop %rcx
153	pop %r11
154	jmp  \name
155SYM_CODE_END(xen_\name)
156_ASM_NOKPROBE(xen_\name)
157.endm
158
159xen_pv_trap asm_exc_divide_error
160xen_pv_trap asm_xenpv_exc_debug
161xen_pv_trap asm_exc_int3
162xen_pv_trap asm_xenpv_exc_nmi
163xen_pv_trap asm_exc_overflow
164xen_pv_trap asm_exc_bounds
165xen_pv_trap asm_exc_invalid_op
166xen_pv_trap asm_exc_device_not_available
167xen_pv_trap asm_exc_double_fault
168xen_pv_trap asm_exc_coproc_segment_overrun
169xen_pv_trap asm_exc_invalid_tss
170xen_pv_trap asm_exc_segment_not_present
171xen_pv_trap asm_exc_stack_segment
172xen_pv_trap asm_exc_general_protection
173xen_pv_trap asm_exc_page_fault
174xen_pv_trap asm_exc_spurious_interrupt_bug
175xen_pv_trap asm_exc_coprocessor_error
176xen_pv_trap asm_exc_alignment_check
177#ifdef CONFIG_X86_MCE
178xen_pv_trap asm_exc_machine_check
179#endif /* CONFIG_X86_MCE */
180xen_pv_trap asm_exc_simd_coprocessor_error
181#ifdef CONFIG_IA32_EMULATION
182xen_pv_trap entry_INT80_compat
183#endif
184xen_pv_trap asm_exc_xen_unknown_trap
185xen_pv_trap asm_exc_xen_hypervisor_callback
186
187	__INIT
188SYM_CODE_START(xen_early_idt_handler_array)
189	i = 0
190	.rept NUM_EXCEPTION_VECTORS
191	UNWIND_HINT_EMPTY
192	pop %rcx
193	pop %r11
194	jmp early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE
195	i = i + 1
196	.fill xen_early_idt_handler_array + i*XEN_EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
197	.endr
198SYM_CODE_END(xen_early_idt_handler_array)
199	__FINIT
200
201hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
202/*
203 * Xen64 iret frame:
204 *
205 *	ss
206 *	rsp
207 *	rflags
208 *	cs
209 *	rip		<-- standard iret frame
210 *
211 *	flags
212 *
213 *	rcx		}
214 *	r11		}<-- pushed by hypercall page
215 * rsp->rax		}
216 */
217SYM_CODE_START(xen_iret)
218	UNWIND_HINT_EMPTY
219	pushq $0
220	jmp hypercall_iret
221SYM_CODE_END(xen_iret)
222
223SYM_CODE_START(xen_sysret64)
224	UNWIND_HINT_EMPTY
225	/*
226	 * We're already on the usermode stack at this point, but
227	 * still with the kernel gs, so we can easily switch back.
228	 *
229	 * tss.sp2 is scratch space.
230	 */
231	movq %rsp, PER_CPU_VAR(cpu_tss_rw + TSS_sp2)
232	movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
233
234	pushq $__USER_DS
235	pushq PER_CPU_VAR(cpu_tss_rw + TSS_sp2)
236	pushq %r11
237	pushq $__USER_CS
238	pushq %rcx
239
240	pushq $VGCF_in_syscall
241	jmp hypercall_iret
242SYM_CODE_END(xen_sysret64)
243
244/*
245 * XEN pv doesn't use trampoline stack, PER_CPU_VAR(cpu_tss_rw + TSS_sp0) is
246 * also the kernel stack.  Reusing swapgs_restore_regs_and_return_to_usermode()
247 * in XEN pv would cause %rsp to move up to the top of the kernel stack and
248 * leave the IRET frame below %rsp, which is dangerous to be corrupted if #NMI
249 * interrupts. And swapgs_restore_regs_and_return_to_usermode() pushing the IRET
250 * frame at the same address is useless.
251 */
252SYM_CODE_START(xenpv_restore_regs_and_return_to_usermode)
253	UNWIND_HINT_REGS
254	POP_REGS
255
256	/* stackleak_erase() can work safely on the kernel stack. */
257	STACKLEAK_ERASE_NOCLOBBER
258
259	addq	$8, %rsp	/* skip regs->orig_ax */
260	jmp xen_iret
261SYM_CODE_END(xenpv_restore_regs_and_return_to_usermode)
262
263/*
264 * Xen handles syscall callbacks much like ordinary exceptions, which
265 * means we have:
266 * - kernel gs
267 * - kernel rsp
268 * - an iret-like stack frame on the stack (including rcx and r11):
269 *	ss
270 *	rsp
271 *	rflags
272 *	cs
273 *	rip
274 *	r11
275 * rsp->rcx
276 */
277
278/* Normal 64-bit system call target */
279SYM_CODE_START(xen_entry_SYSCALL_64)
280	UNWIND_HINT_ENTRY
281	popq %rcx
282	popq %r11
283
284	/*
285	 * Neither Xen nor the kernel really knows what the old SS and
286	 * CS were.  The kernel expects __USER_DS and __USER_CS, so
287	 * report those values even though Xen will guess its own values.
288	 */
289	movq $__USER_DS, 4*8(%rsp)
290	movq $__USER_CS, 1*8(%rsp)
291
292	jmp entry_SYSCALL_64_after_hwframe
293SYM_CODE_END(xen_entry_SYSCALL_64)
294
295#ifdef CONFIG_IA32_EMULATION
296
297/* 32-bit compat syscall target */
298SYM_CODE_START(xen_entry_SYSCALL_compat)
299	UNWIND_HINT_ENTRY
300	popq %rcx
301	popq %r11
302
303	/*
304	 * Neither Xen nor the kernel really knows what the old SS and
305	 * CS were.  The kernel expects __USER32_DS and __USER32_CS, so
306	 * report those values even though Xen will guess its own values.
307	 */
308	movq $__USER32_DS, 4*8(%rsp)
309	movq $__USER32_CS, 1*8(%rsp)
310
311	jmp entry_SYSCALL_compat_after_hwframe
312SYM_CODE_END(xen_entry_SYSCALL_compat)
313
314/* 32-bit compat sysenter target */
315SYM_CODE_START(xen_entry_SYSENTER_compat)
316	UNWIND_HINT_ENTRY
317	/*
318	 * NB: Xen is polite and clears TF from EFLAGS for us.  This means
319	 * that we don't need to guard against single step exceptions here.
320	 */
321	popq %rcx
322	popq %r11
323
324	/*
325	 * Neither Xen nor the kernel really knows what the old SS and
326	 * CS were.  The kernel expects __USER32_DS and __USER32_CS, so
327	 * report those values even though Xen will guess its own values.
328	 */
329	movq $__USER32_DS, 4*8(%rsp)
330	movq $__USER32_CS, 1*8(%rsp)
331
332	jmp entry_SYSENTER_compat_after_hwframe
333SYM_CODE_END(xen_entry_SYSENTER_compat)
334
335#else /* !CONFIG_IA32_EMULATION */
336
337SYM_CODE_START(xen_entry_SYSCALL_compat)
338SYM_CODE_START(xen_entry_SYSENTER_compat)
339	UNWIND_HINT_ENTRY
340	lea 16(%rsp), %rsp	/* strip %rcx, %r11 */
341	mov $-ENOSYS, %rax
342	pushq $0
343	jmp hypercall_iret
344SYM_CODE_END(xen_entry_SYSENTER_compat)
345SYM_CODE_END(xen_entry_SYSCALL_compat)
346
347#endif	/* CONFIG_IA32_EMULATION */
348