1/* SPDX-License-Identifier: GPL-2.0 */ 2#include <linux/linkage.h> 3#include <asm/asm.h> 4#include <asm/asm-offsets.h> 5#include <asm/bitsperlong.h> 6#include <asm/kvm_vcpu_regs.h> 7#include <asm/nospec-branch.h> 8#include "kvm-asm-offsets.h" 9 10#define WORD_SIZE (BITS_PER_LONG / 8) 11 12/* Intentionally omit RAX as it's context switched by hardware */ 13#define VCPU_RCX (SVM_vcpu_arch_regs + __VCPU_REGS_RCX * WORD_SIZE) 14#define VCPU_RDX (SVM_vcpu_arch_regs + __VCPU_REGS_RDX * WORD_SIZE) 15#define VCPU_RBX (SVM_vcpu_arch_regs + __VCPU_REGS_RBX * WORD_SIZE) 16/* Intentionally omit RSP as it's context switched by hardware */ 17#define VCPU_RBP (SVM_vcpu_arch_regs + __VCPU_REGS_RBP * WORD_SIZE) 18#define VCPU_RSI (SVM_vcpu_arch_regs + __VCPU_REGS_RSI * WORD_SIZE) 19#define VCPU_RDI (SVM_vcpu_arch_regs + __VCPU_REGS_RDI * WORD_SIZE) 20 21#ifdef CONFIG_X86_64 22#define VCPU_R8 (SVM_vcpu_arch_regs + __VCPU_REGS_R8 * WORD_SIZE) 23#define VCPU_R9 (SVM_vcpu_arch_regs + __VCPU_REGS_R9 * WORD_SIZE) 24#define VCPU_R10 (SVM_vcpu_arch_regs + __VCPU_REGS_R10 * WORD_SIZE) 25#define VCPU_R11 (SVM_vcpu_arch_regs + __VCPU_REGS_R11 * WORD_SIZE) 26#define VCPU_R12 (SVM_vcpu_arch_regs + __VCPU_REGS_R12 * WORD_SIZE) 27#define VCPU_R13 (SVM_vcpu_arch_regs + __VCPU_REGS_R13 * WORD_SIZE) 28#define VCPU_R14 (SVM_vcpu_arch_regs + __VCPU_REGS_R14 * WORD_SIZE) 29#define VCPU_R15 (SVM_vcpu_arch_regs + __VCPU_REGS_R15 * WORD_SIZE) 30#endif 31 32#define SVM_vmcb01_pa (SVM_vmcb01 + KVM_VMCB_pa) 33 34.section .noinstr.text, "ax" 35 36.macro RESTORE_GUEST_SPEC_CTRL 37 /* No need to do anything if SPEC_CTRL is unset or V_SPEC_CTRL is set */ 38 ALTERNATIVE_2 "", \ 39 "jmp 800f", X86_FEATURE_MSR_SPEC_CTRL, \ 40 "", X86_FEATURE_V_SPEC_CTRL 41801: 42.endm 43.macro RESTORE_GUEST_SPEC_CTRL_BODY 44800: 45 /* 46 * SPEC_CTRL handling: if the guest's SPEC_CTRL value differs from the 47 * host's, write the MSR. This is kept out-of-line so that the common 48 * case does not have to jump. 49 * 50 * IMPORTANT: To avoid RSB underflow attacks and any other nastiness, 51 * there must not be any returns or indirect branches between this code 52 * and vmentry. 53 */ 54 movl SVM_spec_ctrl(%_ASM_DI), %eax 55 cmp PER_CPU_VAR(x86_spec_ctrl_current), %eax 56 je 801b 57 mov $MSR_IA32_SPEC_CTRL, %ecx 58 xor %edx, %edx 59 wrmsr 60 jmp 801b 61.endm 62 63.macro RESTORE_HOST_SPEC_CTRL 64 /* No need to do anything if SPEC_CTRL is unset or V_SPEC_CTRL is set */ 65 ALTERNATIVE_2 "", \ 66 "jmp 900f", X86_FEATURE_MSR_SPEC_CTRL, \ 67 "", X86_FEATURE_V_SPEC_CTRL 68901: 69.endm 70.macro RESTORE_HOST_SPEC_CTRL_BODY 71900: 72 /* Same for after vmexit. */ 73 mov $MSR_IA32_SPEC_CTRL, %ecx 74 75 /* 76 * Load the value that the guest had written into MSR_IA32_SPEC_CTRL, 77 * if it was not intercepted during guest execution. 78 */ 79 cmpb $0, (%_ASM_SP) 80 jnz 998f 81 rdmsr 82 movl %eax, SVM_spec_ctrl(%_ASM_DI) 83998: 84 85 /* Now restore the host value of the MSR if different from the guest's. */ 86 movl PER_CPU_VAR(x86_spec_ctrl_current), %eax 87 cmp SVM_spec_ctrl(%_ASM_DI), %eax 88 je 901b 89 xor %edx, %edx 90 wrmsr 91 jmp 901b 92.endm 93 94 95/** 96 * __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode 97 * @svm: struct vcpu_svm * 98 * @spec_ctrl_intercepted: bool 99 */ 100SYM_FUNC_START(__svm_vcpu_run) 101 push %_ASM_BP 102#ifdef CONFIG_X86_64 103 push %r15 104 push %r14 105 push %r13 106 push %r12 107#else 108 push %edi 109 push %esi 110#endif 111 push %_ASM_BX 112 113 /* 114 * Save variables needed after vmexit on the stack, in inverse 115 * order compared to when they are needed. 116 */ 117 118 /* Accessed directly from the stack in RESTORE_HOST_SPEC_CTRL. */ 119 push %_ASM_ARG2 120 121 /* Needed to restore access to percpu variables. */ 122 __ASM_SIZE(push) PER_CPU_VAR(svm_data + SD_save_area_pa) 123 124 /* Finally save @svm. */ 125 push %_ASM_ARG1 126 127.ifnc _ASM_ARG1, _ASM_DI 128 /* 129 * Stash @svm in RDI early. On 32-bit, arguments are in RAX, RCX 130 * and RDX which are clobbered by RESTORE_GUEST_SPEC_CTRL. 131 */ 132 mov %_ASM_ARG1, %_ASM_DI 133.endif 134 135 /* Clobbers RAX, RCX, RDX. */ 136 RESTORE_GUEST_SPEC_CTRL 137 138 /* 139 * Use a single vmcb (vmcb01 because it's always valid) for 140 * context switching guest state via VMLOAD/VMSAVE, that way 141 * the state doesn't need to be copied between vmcb01 and 142 * vmcb02 when switching vmcbs for nested virtualization. 143 */ 144 mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX 1451: vmload %_ASM_AX 1462: 147 148 /* Get svm->current_vmcb->pa into RAX. */ 149 mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX 150 mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX 151 152 /* Load guest registers. */ 153 mov VCPU_RCX(%_ASM_DI), %_ASM_CX 154 mov VCPU_RDX(%_ASM_DI), %_ASM_DX 155 mov VCPU_RBX(%_ASM_DI), %_ASM_BX 156 mov VCPU_RBP(%_ASM_DI), %_ASM_BP 157 mov VCPU_RSI(%_ASM_DI), %_ASM_SI 158#ifdef CONFIG_X86_64 159 mov VCPU_R8 (%_ASM_DI), %r8 160 mov VCPU_R9 (%_ASM_DI), %r9 161 mov VCPU_R10(%_ASM_DI), %r10 162 mov VCPU_R11(%_ASM_DI), %r11 163 mov VCPU_R12(%_ASM_DI), %r12 164 mov VCPU_R13(%_ASM_DI), %r13 165 mov VCPU_R14(%_ASM_DI), %r14 166 mov VCPU_R15(%_ASM_DI), %r15 167#endif 168 mov VCPU_RDI(%_ASM_DI), %_ASM_DI 169 170 /* Clobbers EFLAGS.ZF */ 171 VM_CLEAR_CPU_BUFFERS 172 173 /* Enter guest mode */ 174 sti 175 1763: vmrun %_ASM_AX 1774: 178 cli 179 180 /* Pop @svm to RAX while it's the only available register. */ 181 pop %_ASM_AX 182 183 /* Save all guest registers. */ 184 mov %_ASM_CX, VCPU_RCX(%_ASM_AX) 185 mov %_ASM_DX, VCPU_RDX(%_ASM_AX) 186 mov %_ASM_BX, VCPU_RBX(%_ASM_AX) 187 mov %_ASM_BP, VCPU_RBP(%_ASM_AX) 188 mov %_ASM_SI, VCPU_RSI(%_ASM_AX) 189 mov %_ASM_DI, VCPU_RDI(%_ASM_AX) 190#ifdef CONFIG_X86_64 191 mov %r8, VCPU_R8 (%_ASM_AX) 192 mov %r9, VCPU_R9 (%_ASM_AX) 193 mov %r10, VCPU_R10(%_ASM_AX) 194 mov %r11, VCPU_R11(%_ASM_AX) 195 mov %r12, VCPU_R12(%_ASM_AX) 196 mov %r13, VCPU_R13(%_ASM_AX) 197 mov %r14, VCPU_R14(%_ASM_AX) 198 mov %r15, VCPU_R15(%_ASM_AX) 199#endif 200 201 /* @svm can stay in RDI from now on. */ 202 mov %_ASM_AX, %_ASM_DI 203 204 mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX 2055: vmsave %_ASM_AX 2066: 207 208 /* Restores GSBASE among other things, allowing access to percpu data. */ 209 pop %_ASM_AX 2107: vmload %_ASM_AX 2118: 212 213#ifdef CONFIG_RETPOLINE 214 /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */ 215 FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE 216#endif 217 218 /* Clobbers RAX, RCX, RDX. */ 219 RESTORE_HOST_SPEC_CTRL 220 221 /* 222 * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be 223 * untrained as soon as we exit the VM and are back to the 224 * kernel. This should be done before re-enabling interrupts 225 * because interrupt handlers won't sanitize 'ret' if the return is 226 * from the kernel. 227 */ 228 UNTRAIN_RET_VM 229 230 /* 231 * Clear all general purpose registers except RSP and RAX to prevent 232 * speculative use of the guest's values, even those that are reloaded 233 * via the stack. In theory, an L1 cache miss when restoring registers 234 * could lead to speculative execution with the guest's values. 235 * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially 236 * free. RSP and RAX are exempt as they are restored by hardware 237 * during VM-Exit. 238 */ 239 xor %ecx, %ecx 240 xor %edx, %edx 241 xor %ebx, %ebx 242 xor %ebp, %ebp 243 xor %esi, %esi 244 xor %edi, %edi 245#ifdef CONFIG_X86_64 246 xor %r8d, %r8d 247 xor %r9d, %r9d 248 xor %r10d, %r10d 249 xor %r11d, %r11d 250 xor %r12d, %r12d 251 xor %r13d, %r13d 252 xor %r14d, %r14d 253 xor %r15d, %r15d 254#endif 255 256 /* "Pop" @spec_ctrl_intercepted. */ 257 pop %_ASM_BX 258 259 pop %_ASM_BX 260 261#ifdef CONFIG_X86_64 262 pop %r12 263 pop %r13 264 pop %r14 265 pop %r15 266#else 267 pop %esi 268 pop %edi 269#endif 270 pop %_ASM_BP 271 RET 272 273 RESTORE_GUEST_SPEC_CTRL_BODY 274 RESTORE_HOST_SPEC_CTRL_BODY 275 27610: cmpb $0, kvm_rebooting 277 jne 2b 278 ud2 27930: cmpb $0, kvm_rebooting 280 jne 4b 281 ud2 28250: cmpb $0, kvm_rebooting 283 jne 6b 284 ud2 28570: cmpb $0, kvm_rebooting 286 jne 8b 287 ud2 288 289 _ASM_EXTABLE(1b, 10b) 290 _ASM_EXTABLE(3b, 30b) 291 _ASM_EXTABLE(5b, 50b) 292 _ASM_EXTABLE(7b, 70b) 293 294SYM_FUNC_END(__svm_vcpu_run) 295 296/** 297 * __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode 298 * @svm: struct vcpu_svm * 299 * @spec_ctrl_intercepted: bool 300 */ 301SYM_FUNC_START(__svm_sev_es_vcpu_run) 302 push %_ASM_BP 303#ifdef CONFIG_X86_64 304 push %r15 305 push %r14 306 push %r13 307 push %r12 308#else 309 push %edi 310 push %esi 311#endif 312 push %_ASM_BX 313 314 /* 315 * Save variables needed after vmexit on the stack, in inverse 316 * order compared to when they are needed. 317 */ 318 319 /* Accessed directly from the stack in RESTORE_HOST_SPEC_CTRL. */ 320 push %_ASM_ARG2 321 322 /* Save @svm. */ 323 push %_ASM_ARG1 324 325.ifnc _ASM_ARG1, _ASM_DI 326 /* 327 * Stash @svm in RDI early. On 32-bit, arguments are in RAX, RCX 328 * and RDX which are clobbered by RESTORE_GUEST_SPEC_CTRL. 329 */ 330 mov %_ASM_ARG1, %_ASM_DI 331.endif 332 333 /* Clobbers RAX, RCX, RDX. */ 334 RESTORE_GUEST_SPEC_CTRL 335 336 /* Get svm->current_vmcb->pa into RAX. */ 337 mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX 338 mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX 339 340 /* Clobbers EFLAGS.ZF */ 341 VM_CLEAR_CPU_BUFFERS 342 343 /* Enter guest mode */ 344 sti 345 3461: vmrun %_ASM_AX 347 3482: cli 349 350 /* Pop @svm to RDI, guest registers have been saved already. */ 351 pop %_ASM_DI 352 353#ifdef CONFIG_RETPOLINE 354 /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */ 355 FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE 356#endif 357 358 /* Clobbers RAX, RCX, RDX. */ 359 RESTORE_HOST_SPEC_CTRL 360 361 /* 362 * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be 363 * untrained as soon as we exit the VM and are back to the 364 * kernel. This should be done before re-enabling interrupts 365 * because interrupt handlers won't sanitize RET if the return is 366 * from the kernel. 367 */ 368 UNTRAIN_RET_VM 369 370 /* "Pop" @spec_ctrl_intercepted. */ 371 pop %_ASM_BX 372 373 pop %_ASM_BX 374 375#ifdef CONFIG_X86_64 376 pop %r12 377 pop %r13 378 pop %r14 379 pop %r15 380#else 381 pop %esi 382 pop %edi 383#endif 384 pop %_ASM_BP 385 RET 386 387 RESTORE_GUEST_SPEC_CTRL_BODY 388 RESTORE_HOST_SPEC_CTRL_BODY 389 3903: cmpb $0, kvm_rebooting 391 jne 2b 392 ud2 393 394 _ASM_EXTABLE(1b, 3b) 395 396SYM_FUNC_END(__svm_sev_es_vcpu_run) 397