• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/* SPDX-License-Identifier: GPL-2.0 */
2#include <linux/linkage.h>
3#include <asm/asm.h>
4#include <asm/bitsperlong.h>
5#include <asm/kvm_vcpu_regs.h>
6#include <asm/nospec-branch.h>
7#include <asm/segment.h>
8#include "run_flags.h"
9
10#define WORD_SIZE (BITS_PER_LONG / 8)
11
12#define VCPU_RAX	__VCPU_REGS_RAX * WORD_SIZE
13#define VCPU_RCX	__VCPU_REGS_RCX * WORD_SIZE
14#define VCPU_RDX	__VCPU_REGS_RDX * WORD_SIZE
15#define VCPU_RBX	__VCPU_REGS_RBX * WORD_SIZE
16/* Intentionally omit RSP as it's context switched by hardware */
17#define VCPU_RBP	__VCPU_REGS_RBP * WORD_SIZE
18#define VCPU_RSI	__VCPU_REGS_RSI * WORD_SIZE
19#define VCPU_RDI	__VCPU_REGS_RDI * WORD_SIZE
20
21#ifdef CONFIG_X86_64
22#define VCPU_R8		__VCPU_REGS_R8  * WORD_SIZE
23#define VCPU_R9		__VCPU_REGS_R9  * WORD_SIZE
24#define VCPU_R10	__VCPU_REGS_R10 * WORD_SIZE
25#define VCPU_R11	__VCPU_REGS_R11 * WORD_SIZE
26#define VCPU_R12	__VCPU_REGS_R12 * WORD_SIZE
27#define VCPU_R13	__VCPU_REGS_R13 * WORD_SIZE
28#define VCPU_R14	__VCPU_REGS_R14 * WORD_SIZE
29#define VCPU_R15	__VCPU_REGS_R15 * WORD_SIZE
30#endif
31
32.section .noinstr.text, "ax"
33
34/**
35 * __vmx_vcpu_run - Run a vCPU via a transition to VMX guest mode
36 * @vmx:	struct vcpu_vmx *
37 * @regs:	unsigned long * (to guest registers)
38 * @flags:	VMX_RUN_VMRESUME:	use VMRESUME instead of VMLAUNCH
39 *		VMX_RUN_SAVE_SPEC_CTRL: save guest SPEC_CTRL into vmx->spec_ctrl
40 *
41 * Returns:
42 *	0 on VM-Exit, 1 on VM-Fail
43 */
44SYM_FUNC_START(__vmx_vcpu_run)
45	push %_ASM_BP
46	mov  %_ASM_SP, %_ASM_BP
47#ifdef CONFIG_X86_64
48	push %r15
49	push %r14
50	push %r13
51	push %r12
52#else
53	push %edi
54	push %esi
55#endif
56	push %_ASM_BX
57
58	/* Save @vmx for SPEC_CTRL handling */
59	push %_ASM_ARG1
60
61	/* Save @flags for SPEC_CTRL handling */
62	push %_ASM_ARG3
63
64	/*
65	 * Save @regs, _ASM_ARG2 may be modified by vmx_update_host_rsp() and
66	 * @regs is needed after VM-Exit to save the guest's register values.
67	 */
68	push %_ASM_ARG2
69
70	/* Copy @flags to BL, _ASM_ARG3 is volatile. */
71	mov %_ASM_ARG3B, %bl
72
73	lea (%_ASM_SP), %_ASM_ARG2
74	call vmx_update_host_rsp
75
76	/* Load @regs to RAX. */
77	mov (%_ASM_SP), %_ASM_AX
78
79	/* Check if vmlaunch or vmresume is needed */
80	testb $VMX_RUN_VMRESUME, %bl
81
82	/* Load guest registers.  Don't clobber flags. */
83	mov VCPU_RCX(%_ASM_AX), %_ASM_CX
84	mov VCPU_RDX(%_ASM_AX), %_ASM_DX
85	mov VCPU_RBX(%_ASM_AX), %_ASM_BX
86	mov VCPU_RBP(%_ASM_AX), %_ASM_BP
87	mov VCPU_RSI(%_ASM_AX), %_ASM_SI
88	mov VCPU_RDI(%_ASM_AX), %_ASM_DI
89#ifdef CONFIG_X86_64
90	mov VCPU_R8 (%_ASM_AX),  %r8
91	mov VCPU_R9 (%_ASM_AX),  %r9
92	mov VCPU_R10(%_ASM_AX), %r10
93	mov VCPU_R11(%_ASM_AX), %r11
94	mov VCPU_R12(%_ASM_AX), %r12
95	mov VCPU_R13(%_ASM_AX), %r13
96	mov VCPU_R14(%_ASM_AX), %r14
97	mov VCPU_R15(%_ASM_AX), %r15
98#endif
99	/* Load guest RAX.  This kills the @regs pointer! */
100	mov VCPU_RAX(%_ASM_AX), %_ASM_AX
101
102	/* Check EFLAGS.ZF from 'testb' above */
103	jz .Lvmlaunch
104
105	/*
106	 * After a successful VMRESUME/VMLAUNCH, control flow "magically"
107	 * resumes below at 'vmx_vmexit' due to the VMCS HOST_RIP setting.
108	 * So this isn't a typical function and objtool needs to be told to
109	 * save the unwind state here and restore it below.
110	 */
111	UNWIND_HINT_SAVE
112
113/*
114 * If VMRESUME/VMLAUNCH and corresponding vmexit succeed, execution resumes at
115 * the 'vmx_vmexit' label below.
116 */
117.Lvmresume:
118	vmresume
119	jmp .Lvmfail
120
121.Lvmlaunch:
122	vmlaunch
123	jmp .Lvmfail
124
125	_ASM_EXTABLE(.Lvmresume, .Lfixup)
126	_ASM_EXTABLE(.Lvmlaunch, .Lfixup)
127
128SYM_INNER_LABEL(vmx_vmexit, SYM_L_GLOBAL)
129
130	/* Restore unwind state from before the VMRESUME/VMLAUNCH. */
131	UNWIND_HINT_RESTORE
132
133	/* Temporarily save guest's RAX. */
134	push %_ASM_AX
135
136	/* Reload @regs to RAX. */
137	mov WORD_SIZE(%_ASM_SP), %_ASM_AX
138
139	/* Save all guest registers, including RAX from the stack */
140	pop           VCPU_RAX(%_ASM_AX)
141	mov %_ASM_CX, VCPU_RCX(%_ASM_AX)
142	mov %_ASM_DX, VCPU_RDX(%_ASM_AX)
143	mov %_ASM_BX, VCPU_RBX(%_ASM_AX)
144	mov %_ASM_BP, VCPU_RBP(%_ASM_AX)
145	mov %_ASM_SI, VCPU_RSI(%_ASM_AX)
146	mov %_ASM_DI, VCPU_RDI(%_ASM_AX)
147#ifdef CONFIG_X86_64
148	mov %r8,  VCPU_R8 (%_ASM_AX)
149	mov %r9,  VCPU_R9 (%_ASM_AX)
150	mov %r10, VCPU_R10(%_ASM_AX)
151	mov %r11, VCPU_R11(%_ASM_AX)
152	mov %r12, VCPU_R12(%_ASM_AX)
153	mov %r13, VCPU_R13(%_ASM_AX)
154	mov %r14, VCPU_R14(%_ASM_AX)
155	mov %r15, VCPU_R15(%_ASM_AX)
156#endif
157
158	/* Clear return value to indicate VM-Exit (as opposed to VM-Fail). */
159	xor %ebx, %ebx
160
161.Lclear_regs:
162	/*
163	 * Clear all general purpose registers except RSP and RBX to prevent
164	 * speculative use of the guest's values, even those that are reloaded
165	 * via the stack.  In theory, an L1 cache miss when restoring registers
166	 * could lead to speculative execution with the guest's values.
167	 * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
168	 * free.  RSP and RAX are exempt as RSP is restored by hardware during
169	 * VM-Exit and RBX is explicitly loaded with 0 or 1 to hold the return
170	 * value.
171	 */
172	xor %eax, %eax
173	xor %ecx, %ecx
174	xor %edx, %edx
175	xor %ebp, %ebp
176	xor %esi, %esi
177	xor %edi, %edi
178#ifdef CONFIG_X86_64
179	xor %r8d,  %r8d
180	xor %r9d,  %r9d
181	xor %r10d, %r10d
182	xor %r11d, %r11d
183	xor %r12d, %r12d
184	xor %r13d, %r13d
185	xor %r14d, %r14d
186	xor %r15d, %r15d
187#endif
188
189	/* "POP" @regs. */
190	add $WORD_SIZE, %_ASM_SP
191
192	/*
193	 * IMPORTANT: RSB filling and SPEC_CTRL handling must be done before
194	 * the first unbalanced RET after vmexit!
195	 *
196	 * For retpoline or IBRS, RSB filling is needed to prevent poisoned RSB
197	 * entries and (in some cases) RSB underflow.
198	 *
199	 * eIBRS has its own protection against poisoned RSB, so it doesn't
200	 * need the RSB filling sequence.  But it does need to be enabled, and a
201	 * single call to retire, before the first unbalanced RET.
202         */
203
204	FILL_RETURN_BUFFER %_ASM_CX, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT,\
205			   X86_FEATURE_RSB_VMEXIT_LITE
206
207
208	pop %_ASM_ARG2	/* @flags */
209	pop %_ASM_ARG1	/* @vmx */
210
211	call vmx_spec_ctrl_restore_host
212
213	/* Put return value in AX */
214	mov %_ASM_BX, %_ASM_AX
215
216	pop %_ASM_BX
217#ifdef CONFIG_X86_64
218	pop %r12
219	pop %r13
220	pop %r14
221	pop %r15
222#else
223	pop %esi
224	pop %edi
225#endif
226	pop %_ASM_BP
227	RET
228
229.Lfixup:
230	cmpb $0, kvm_rebooting
231	jne .Lvmfail
232	ud2
233.Lvmfail:
234	/* VM-Fail: set return value to 1 */
235	mov $1, %_ASM_BX
236	jmp .Lclear_regs
237
238SYM_FUNC_END(__vmx_vcpu_run)
239
240
241.section .text, "ax"
242
243/**
244 * vmread_error_trampoline - Trampoline from inline asm to vmread_error()
245 * @field:	VMCS field encoding that failed
246 * @fault:	%true if the VMREAD faulted, %false if it failed
247
248 * Save and restore volatile registers across a call to vmread_error().  Note,
249 * all parameters are passed on the stack.
250 */
251SYM_FUNC_START(vmread_error_trampoline)
252	push %_ASM_BP
253	mov  %_ASM_SP, %_ASM_BP
254
255	push %_ASM_AX
256	push %_ASM_CX
257	push %_ASM_DX
258#ifdef CONFIG_X86_64
259	push %rdi
260	push %rsi
261	push %r8
262	push %r9
263	push %r10
264	push %r11
265#endif
266#ifdef CONFIG_X86_64
267	/* Load @field and @fault to arg1 and arg2 respectively. */
268	mov 3*WORD_SIZE(%rbp), %_ASM_ARG2
269	mov 2*WORD_SIZE(%rbp), %_ASM_ARG1
270#else
271	/* Parameters are passed on the stack for 32-bit (see asmlinkage). */
272	push 3*WORD_SIZE(%ebp)
273	push 2*WORD_SIZE(%ebp)
274#endif
275
276	call vmread_error
277
278#ifndef CONFIG_X86_64
279	add $8, %esp
280#endif
281
282	/* Zero out @fault, which will be popped into the result register. */
283	_ASM_MOV $0, 3*WORD_SIZE(%_ASM_BP)
284
285#ifdef CONFIG_X86_64
286	pop %r11
287	pop %r10
288	pop %r9
289	pop %r8
290	pop %rsi
291	pop %rdi
292#endif
293	pop %_ASM_DX
294	pop %_ASM_CX
295	pop %_ASM_AX
296	pop %_ASM_BP
297
298	RET
299SYM_FUNC_END(vmread_error_trampoline)
300
301SYM_FUNC_START(vmx_do_interrupt_nmi_irqoff)
302	/*
303	 * Unconditionally create a stack frame, getting the correct RSP on the
304	 * stack (for x86-64) would take two instructions anyways, and RBP can
305	 * be used to restore RSP to make objtool happy (see below).
306	 */
307	push %_ASM_BP
308	mov %_ASM_SP, %_ASM_BP
309
310#ifdef CONFIG_X86_64
311	/*
312	 * Align RSP to a 16-byte boundary (to emulate CPU behavior) before
313	 * creating the synthetic interrupt stack frame for the IRQ/NMI.
314	 */
315	and  $-16, %rsp
316	push $__KERNEL_DS
317	push %rbp
318#endif
319	pushf
320	push $__KERNEL_CS
321	CALL_NOSPEC _ASM_ARG1
322
323	/*
324	 * "Restore" RSP from RBP, even though IRET has already unwound RSP to
325	 * the correct value.  objtool doesn't know the callee will IRET and,
326	 * without the explicit restore, thinks the stack is getting walloped.
327	 * Using an unwind hint is problematic due to x86-64's dynamic alignment.
328	 */
329	mov %_ASM_BP, %_ASM_SP
330	pop %_ASM_BP
331	RET
332SYM_FUNC_END(vmx_do_interrupt_nmi_irqoff)
333