1/* 2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University 3 * Author: Christoffer Dall <c.dall@virtualopensystems.com> 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License, version 2, as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 17 */ 18 19#include <linux/arm-smccc.h> 20#include <linux/linkage.h> 21#include <asm/kvm_arm.h> 22#include <asm/kvm_asm.h> 23 24 .arch_extension virt 25 26 .text 27 .pushsection .hyp.text, "ax" 28 29.macro load_vcpu reg 30 mrc p15, 4, \reg, c13, c0, 2 @ HTPIDR 31.endm 32 33/******************************************************************** 34 * Hypervisor exception vector and handlers 35 * 36 * 37 * The KVM/ARM Hypervisor ABI is defined as follows: 38 * 39 * Entry to Hyp mode from the host kernel will happen _only_ when an HVC 40 * instruction is issued since all traps are disabled when running the host 41 * kernel as per the Hyp-mode initialization at boot time. 42 * 43 * HVC instructions cause a trap to the vector page + offset 0x14 (see hyp_hvc 44 * below) when the HVC instruction is called from SVC mode (i.e. a guest or the 45 * host kernel) and they cause a trap to the vector page + offset 0x8 when HVC 46 * instructions are called from within Hyp-mode. 47 * 48 * Hyp-ABI: Calling HYP-mode functions from host (in SVC mode): 49 * Switching to Hyp mode is done through a simple HVC #0 instruction. The 50 * exception vector code will check that the HVC comes from VMID==0. 51 * - r0 contains a pointer to a HYP function 52 * - r1, r2, and r3 contain arguments to the above function. 53 * - The HYP function will be called with its arguments in r0, r1 and r2. 54 * On HYP function return, we return directly to SVC. 55 * 56 * Note that the above is used to execute code in Hyp-mode from a host-kernel 57 * point of view, and is a different concept from performing a world-switch and 58 * executing guest code SVC mode (with a VMID != 0). 59 */ 60 61 .align 5 62__kvm_hyp_vector: 63 .global __kvm_hyp_vector 64 65 @ Hyp-mode exception vector 66 W(b) hyp_reset 67 W(b) hyp_undef 68 W(b) hyp_svc 69 W(b) hyp_pabt 70 W(b) hyp_dabt 71 W(b) hyp_hvc 72 W(b) hyp_irq 73 W(b) hyp_fiq 74 75#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR 76 .align 5 77__kvm_hyp_vector_ic_inv: 78 .global __kvm_hyp_vector_ic_inv 79 80 /* 81 * We encode the exception entry in the bottom 3 bits of 82 * SP, and we have to guarantee to be 8 bytes aligned. 83 */ 84 W(add) sp, sp, #1 /* Reset 7 */ 85 W(add) sp, sp, #1 /* Undef 6 */ 86 W(add) sp, sp, #1 /* Syscall 5 */ 87 W(add) sp, sp, #1 /* Prefetch abort 4 */ 88 W(add) sp, sp, #1 /* Data abort 3 */ 89 W(add) sp, sp, #1 /* HVC 2 */ 90 W(add) sp, sp, #1 /* IRQ 1 */ 91 W(nop) /* FIQ 0 */ 92 93 mcr p15, 0, r0, c7, c5, 0 /* ICIALLU */ 94 isb 95 96 b decode_vectors 97 98 .align 5 99__kvm_hyp_vector_bp_inv: 100 .global __kvm_hyp_vector_bp_inv 101 102 /* 103 * We encode the exception entry in the bottom 3 bits of 104 * SP, and we have to guarantee to be 8 bytes aligned. 105 */ 106 W(add) sp, sp, #1 /* Reset 7 */ 107 W(add) sp, sp, #1 /* Undef 6 */ 108 W(add) sp, sp, #1 /* Syscall 5 */ 109 W(add) sp, sp, #1 /* Prefetch abort 4 */ 110 W(add) sp, sp, #1 /* Data abort 3 */ 111 W(add) sp, sp, #1 /* HVC 2 */ 112 W(add) sp, sp, #1 /* IRQ 1 */ 113 W(nop) /* FIQ 0 */ 114 115 mcr p15, 0, r0, c7, c5, 6 /* BPIALL */ 116 isb 117 118decode_vectors: 119 120#ifdef CONFIG_THUMB2_KERNEL 121 /* 122 * Yet another silly hack: Use VPIDR as a temp register. 123 * Thumb2 is really a pain, as SP cannot be used with most 124 * of the bitwise instructions. The vect_br macro ensures 125 * things gets cleaned-up. 126 */ 127 mcr p15, 4, r0, c0, c0, 0 /* VPIDR */ 128 mov r0, sp 129 and r0, r0, #7 130 sub sp, sp, r0 131 push {r1, r2} 132 mov r1, r0 133 mrc p15, 4, r0, c0, c0, 0 /* VPIDR */ 134 mrc p15, 0, r2, c0, c0, 0 /* MIDR */ 135 mcr p15, 4, r2, c0, c0, 0 /* VPIDR */ 136#endif 137 138.macro vect_br val, targ 139ARM( eor sp, sp, #\val ) 140ARM( tst sp, #7 ) 141ARM( eorne sp, sp, #\val ) 142 143THUMB( cmp r1, #\val ) 144THUMB( popeq {r1, r2} ) 145 146 beq \targ 147.endm 148 149 vect_br 0, hyp_fiq 150 vect_br 1, hyp_irq 151 vect_br 2, hyp_hvc 152 vect_br 3, hyp_dabt 153 vect_br 4, hyp_pabt 154 vect_br 5, hyp_svc 155 vect_br 6, hyp_undef 156 vect_br 7, hyp_reset 157#endif 158 159.macro invalid_vector label, cause 160 .align 161\label: mov r0, #\cause 162 b __hyp_panic 163.endm 164 165 invalid_vector hyp_reset ARM_EXCEPTION_RESET 166 invalid_vector hyp_undef ARM_EXCEPTION_UNDEFINED 167 invalid_vector hyp_svc ARM_EXCEPTION_SOFTWARE 168 invalid_vector hyp_pabt ARM_EXCEPTION_PREF_ABORT 169 invalid_vector hyp_fiq ARM_EXCEPTION_FIQ 170 171ENTRY(__hyp_do_panic) 172 mrs lr, cpsr 173 bic lr, lr, #MODE_MASK 174 orr lr, lr, #SVC_MODE 175THUMB( orr lr, lr, #PSR_T_BIT ) 176 msr spsr_cxsf, lr 177 ldr lr, =panic 178 msr ELR_hyp, lr 179 ldr lr, =kvm_call_hyp 180 clrex 181 eret 182ENDPROC(__hyp_do_panic) 183 184hyp_hvc: 185 /* 186 * Getting here is either because of a trap from a guest, 187 * or from executing HVC from the host kernel, which means 188 * "do something in Hyp mode". 189 */ 190 push {r0, r1, r2} 191 192 @ Check syndrome register 193 mrc p15, 4, r1, c5, c2, 0 @ HSR 194 lsr r0, r1, #HSR_EC_SHIFT 195 cmp r0, #HSR_EC_HVC 196 bne guest_trap @ Not HVC instr. 197 198 /* 199 * Let's check if the HVC came from VMID 0 and allow simple 200 * switch to Hyp mode 201 */ 202 mrrc p15, 6, r0, r2, c2 203 lsr r2, r2, #16 204 and r2, r2, #0xff 205 cmp r2, #0 206 bne guest_hvc_trap @ Guest called HVC 207 208 /* 209 * Getting here means host called HVC, we shift parameters and branch 210 * to Hyp function. 211 */ 212 pop {r0, r1, r2} 213 214 /* 215 * Check if we have a kernel function, which is guaranteed to be 216 * bigger than the maximum hyp stub hypercall 217 */ 218 cmp r0, #HVC_STUB_HCALL_NR 219 bhs 1f 220 221 /* 222 * Not a kernel function, treat it as a stub hypercall. 223 * Compute the physical address for __kvm_handle_stub_hvc 224 * (as the code lives in the idmaped page) and branch there. 225 * We hijack ip (r12) as a tmp register. 226 */ 227 push {r1} 228 ldr r1, =kimage_voffset 229 ldr r1, [r1] 230 ldr ip, =__kvm_handle_stub_hvc 231 sub ip, ip, r1 232 pop {r1} 233 234 bx ip 235 2361: 237 /* 238 * Pushing r2 here is just a way of keeping the stack aligned to 239 * 8 bytes on any path that can trigger a HYP exception. Here, 240 * we may well be about to jump into the guest, and the guest 241 * exit would otherwise be badly decoded by our fancy 242 * "decode-exception-without-a-branch" code... 243 */ 244 push {r2, lr} 245 246 mov lr, r0 247 mov r0, r1 248 mov r1, r2 249 mov r2, r3 250 251THUMB( orr lr, #1) 252 blx lr @ Call the HYP function 253 254 pop {r2, lr} 255 eret 256 257guest_hvc_trap: 258 movw r2, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1 259 movt r2, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1 260 ldr r0, [sp] @ Guest's r0 261 teq r0, r2 262 bne guest_trap 263 add sp, sp, #12 264 @ Returns: 265 @ r0 = 0 266 @ r1 = HSR value (perfectly predictable) 267 @ r2 = ARM_SMCCC_ARCH_WORKAROUND_1 268 mov r0, #0 269 eret 270 271guest_trap: 272 load_vcpu r0 @ Load VCPU pointer to r0 273 274#ifdef CONFIG_VFPv3 275 @ Check for a VFP access 276 lsr r1, r1, #HSR_EC_SHIFT 277 cmp r1, #HSR_EC_CP_0_13 278 beq __vfp_guest_restore 279#endif 280 281 mov r1, #ARM_EXCEPTION_HVC 282 b __guest_exit 283 284hyp_irq: 285 push {r0, r1, r2} 286 mov r1, #ARM_EXCEPTION_IRQ 287 load_vcpu r0 @ Load VCPU pointer to r0 288 b __guest_exit 289 290hyp_dabt: 291 push {r0, r1} 292 mrs r0, ELR_hyp 293 ldr r1, =abort_guest_exit_start 294THUMB( add r1, r1, #1) 295 cmp r0, r1 296 ldrne r1, =abort_guest_exit_end 297THUMB( addne r1, r1, #1) 298 cmpne r0, r1 299 pop {r0, r1} 300 bne __hyp_panic 301 302 orr r0, r0, #(1 << ARM_EXIT_WITH_ABORT_BIT) 303 eret 304 305 .ltorg 306 307 .popsection 308