1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2012,2013 - ARM Ltd 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 */ 6 7 #ifndef __ARM_KVM_ASM_H__ 8 #define __ARM_KVM_ASM_H__ 9 10 #include <asm/hyp_image.h> 11 #include <asm/insn.h> 12 #include <asm/virt.h> 13 14 #define ARM_EXIT_WITH_SERROR_BIT 31 15 #define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT)) 16 #define ARM_EXCEPTION_IS_TRAP(x) (ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_TRAP) 17 #define ARM_SERROR_PENDING(x) !!((x) & (1U << ARM_EXIT_WITH_SERROR_BIT)) 18 19 #define ARM_EXCEPTION_IRQ 0 20 #define ARM_EXCEPTION_EL1_SERROR 1 21 #define ARM_EXCEPTION_TRAP 2 22 #define ARM_EXCEPTION_IL 3 23 /* The hyp-stub will return this for any kvm_call_hyp() call */ 24 #define ARM_EXCEPTION_HYP_GONE HVC_STUB_ERR 25 26 #define kvm_arm_exception_type \ 27 {ARM_EXCEPTION_IRQ, "IRQ" }, \ 28 {ARM_EXCEPTION_EL1_SERROR, "SERROR" }, \ 29 {ARM_EXCEPTION_TRAP, "TRAP" }, \ 30 {ARM_EXCEPTION_HYP_GONE, "HYP_GONE" } 31 32 /* 33 * Size of the HYP vectors preamble. kvm_patch_vector_branch() generates code 34 * that jumps over this. 35 */ 36 #define KVM_VECTOR_PREAMBLE (2 * AARCH64_INSN_SIZE) 37 38 #define KVM_HOST_SMCCC_ID(id) \ 39 ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ 40 ARM_SMCCC_SMC_64, \ 41 ARM_SMCCC_OWNER_VENDOR_HYP, \ 42 (id)) 43 44 #define KVM_HOST_SMCCC_FUNC(name) KVM_HOST_SMCCC_ID(__KVM_HOST_SMCCC_FUNC_##name) 45 46 #define __KVM_HOST_SMCCC_FUNC___kvm_hyp_init 0 47 48 #ifndef __ASSEMBLY__ 49 50 #include <linux/mm.h> 51 52 enum __kvm_host_smccc_func { 53 /* Hypercalls available only prior to pKVM finalisation */ 54 /* __KVM_HOST_SMCCC_FUNC___kvm_hyp_init */ 55 __KVM_HOST_SMCCC_FUNC___kvm_get_mdcr_el2 = __KVM_HOST_SMCCC_FUNC___kvm_hyp_init + 1, 56 __KVM_HOST_SMCCC_FUNC___pkvm_init, 57 __KVM_HOST_SMCCC_FUNC___pkvm_create_private_mapping, 58 __KVM_HOST_SMCCC_FUNC___pkvm_cpu_set_vector, 59 __KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs, 60 __KVM_HOST_SMCCC_FUNC___vgic_v3_init_lrs, 61 __KVM_HOST_SMCCC_FUNC___vgic_v3_get_gic_config, 62 __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize, 63 64 /* Hypercalls available after pKVM finalisation */ 65 __KVM_HOST_SMCCC_FUNC___pkvm_host_share_hyp, 66 __KVM_HOST_SMCCC_FUNC___pkvm_host_unshare_hyp, 67 __KVM_HOST_SMCCC_FUNC___pkvm_host_reclaim_page, 68 __KVM_HOST_SMCCC_FUNC___pkvm_host_donate_guest, 69 __KVM_HOST_SMCCC_FUNC___kvm_adjust_pc, 70 __KVM_HOST_SMCCC_FUNC___kvm_vcpu_run, 71 __KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context, 72 __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa, 73 __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid, 74 __KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context, 75 __KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff, 76 __KVM_HOST_SMCCC_FUNC___vgic_v3_save_vmcr_aprs, 77 __KVM_HOST_SMCCC_FUNC___vgic_v3_restore_vmcr_aprs, 78 __KVM_HOST_SMCCC_FUNC___pkvm_init_shadow, 79 __KVM_HOST_SMCCC_FUNC___pkvm_init_shadow_vcpu, 80 __KVM_HOST_SMCCC_FUNC___pkvm_teardown_shadow, 81 __KVM_HOST_SMCCC_FUNC___pkvm_vcpu_load, 82 __KVM_HOST_SMCCC_FUNC___pkvm_vcpu_put, 83 __KVM_HOST_SMCCC_FUNC___pkvm_vcpu_sync_state, 84 __KVM_HOST_SMCCC_FUNC___pkvm_iommu_driver_init, 85 __KVM_HOST_SMCCC_FUNC___pkvm_iommu_register, 86 __KVM_HOST_SMCCC_FUNC___pkvm_iommu_pm_notify, 87 __KVM_HOST_SMCCC_FUNC___pkvm_iommu_finalize, 88 }; 89 90 #define DECLARE_KVM_VHE_SYM(sym) extern char sym[] 91 #define DECLARE_KVM_NVHE_SYM(sym) extern char kvm_nvhe_sym(sym)[] 92 93 /* 94 * Define a pair of symbols sharing the same name but one defined in 95 * VHE and the other in nVHE hyp implementations. 96 */ 97 #define DECLARE_KVM_HYP_SYM(sym) \ 98 DECLARE_KVM_VHE_SYM(sym); \ 99 DECLARE_KVM_NVHE_SYM(sym) 100 101 #define DECLARE_KVM_VHE_PER_CPU(type, sym) \ 102 DECLARE_PER_CPU(type, sym) 103 #define DECLARE_KVM_NVHE_PER_CPU(type, sym) \ 104 DECLARE_PER_CPU(type, kvm_nvhe_sym(sym)) 105 106 #define DECLARE_KVM_HYP_PER_CPU(type, sym) \ 107 DECLARE_KVM_VHE_PER_CPU(type, sym); \ 108 DECLARE_KVM_NVHE_PER_CPU(type, sym) 109 110 /* 111 * Compute pointer to a symbol defined in nVHE percpu region. 112 * Returns NULL if percpu memory has not been allocated yet. 113 */ 114 #define this_cpu_ptr_nvhe_sym(sym) per_cpu_ptr_nvhe_sym(sym, smp_processor_id()) 115 #define per_cpu_ptr_nvhe_sym(sym, cpu) \ 116 ({ \ 117 unsigned long base, off; \ 118 base = kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu]; \ 119 off = (unsigned long)&CHOOSE_NVHE_SYM(sym) - \ 120 (unsigned long)&CHOOSE_NVHE_SYM(__per_cpu_start); \ 121 base ? (typeof(CHOOSE_NVHE_SYM(sym))*)(base + off) : NULL; \ 122 }) 123 124 #if defined(__KVM_NVHE_HYPERVISOR__) 125 126 #define CHOOSE_NVHE_SYM(sym) sym 127 #define CHOOSE_HYP_SYM(sym) CHOOSE_NVHE_SYM(sym) 128 129 /* The nVHE hypervisor shouldn't even try to access VHE symbols */ 130 extern void *__nvhe_undefined_symbol; 131 #define CHOOSE_VHE_SYM(sym) __nvhe_undefined_symbol 132 #define this_cpu_ptr_hyp_sym(sym) (&__nvhe_undefined_symbol) 133 #define per_cpu_ptr_hyp_sym(sym, cpu) (&__nvhe_undefined_symbol) 134 135 #elif defined(__KVM_VHE_HYPERVISOR__) 136 137 #define CHOOSE_VHE_SYM(sym) sym 138 #define CHOOSE_HYP_SYM(sym) CHOOSE_VHE_SYM(sym) 139 140 /* The VHE hypervisor shouldn't even try to access nVHE symbols */ 141 extern void *__vhe_undefined_symbol; 142 #define CHOOSE_NVHE_SYM(sym) __vhe_undefined_symbol 143 #define this_cpu_ptr_hyp_sym(sym) (&__vhe_undefined_symbol) 144 #define per_cpu_ptr_hyp_sym(sym, cpu) (&__vhe_undefined_symbol) 145 146 #else 147 148 /* 149 * BIG FAT WARNINGS: 150 * 151 * - Don't be tempted to change the following is_kernel_in_hyp_mode() 152 * to has_vhe(). has_vhe() is implemented as a *final* capability, 153 * while this is used early at boot time, when the capabilities are 154 * not final yet.... 155 * 156 * - Don't let the nVHE hypervisor have access to this, as it will 157 * pick the *wrong* symbol (yes, it runs at EL2...). 158 */ 159 #define CHOOSE_HYP_SYM(sym) (is_kernel_in_hyp_mode() \ 160 ? CHOOSE_VHE_SYM(sym) \ 161 : CHOOSE_NVHE_SYM(sym)) 162 163 #define this_cpu_ptr_hyp_sym(sym) (is_kernel_in_hyp_mode() \ 164 ? this_cpu_ptr(&sym) \ 165 : this_cpu_ptr_nvhe_sym(sym)) 166 167 #define per_cpu_ptr_hyp_sym(sym, cpu) (is_kernel_in_hyp_mode() \ 168 ? per_cpu_ptr(&sym, cpu) \ 169 : per_cpu_ptr_nvhe_sym(sym, cpu)) 170 171 #define CHOOSE_VHE_SYM(sym) sym 172 #define CHOOSE_NVHE_SYM(sym) kvm_nvhe_sym(sym) 173 174 #endif 175 176 struct kvm_nvhe_init_params { 177 unsigned long mair_el2; 178 unsigned long tcr_el2; 179 unsigned long tpidr_el2; 180 unsigned long stack_hyp_va; 181 phys_addr_t pgd_pa; 182 unsigned long hcr_el2; 183 unsigned long vttbr; 184 unsigned long vtcr; 185 }; 186 187 /* Translate a kernel address @ptr into its equivalent linear mapping */ 188 #define kvm_ksym_ref(ptr) \ 189 ({ \ 190 void *val = (ptr); \ 191 if (!is_kernel_in_hyp_mode()) \ 192 val = lm_alias((ptr)); \ 193 val; \ 194 }) 195 #define kvm_ksym_ref_nvhe(sym) kvm_ksym_ref(kvm_nvhe_sym(sym)) 196 197 struct kvm; 198 struct kvm_vcpu; 199 struct kvm_s2_mmu; 200 201 DECLARE_KVM_NVHE_SYM(__kvm_hyp_init); 202 DECLARE_KVM_HYP_SYM(__kvm_hyp_vector); 203 #define __kvm_hyp_init CHOOSE_NVHE_SYM(__kvm_hyp_init) 204 #define __kvm_hyp_vector CHOOSE_HYP_SYM(__kvm_hyp_vector) 205 206 extern unsigned long kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[]; 207 DECLARE_KVM_NVHE_SYM(__per_cpu_start); 208 DECLARE_KVM_NVHE_SYM(__per_cpu_end); 209 210 DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs); 211 #define __bp_harden_hyp_vecs CHOOSE_HYP_SYM(__bp_harden_hyp_vecs) 212 213 extern void __kvm_flush_vm_context(void); 214 extern void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu); 215 extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa, 216 int level); 217 extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu); 218 219 extern void __kvm_timer_set_cntvoff(u64 cntvoff); 220 221 extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); 222 223 extern void __kvm_adjust_pc(struct kvm_vcpu *vcpu); 224 225 extern u64 __vgic_v3_get_gic_config(void); 226 extern void __vgic_v3_init_lrs(void); 227 228 extern u64 __kvm_get_mdcr_el2(void); 229 230 #define __KVM_EXTABLE(from, to) \ 231 " .pushsection __kvm_ex_table, \"a\"\n" \ 232 " .align 3\n" \ 233 " .long (" #from " - .), (" #to " - .)\n" \ 234 " .popsection\n" 235 236 237 #define __kvm_at(at_op, addr) \ 238 ( { \ 239 int __kvm_at_err = 0; \ 240 u64 spsr, elr; \ 241 asm volatile( \ 242 " mrs %1, spsr_el2\n" \ 243 " mrs %2, elr_el2\n" \ 244 "1: at "at_op", %3\n" \ 245 " isb\n" \ 246 " b 9f\n" \ 247 "2: msr spsr_el2, %1\n" \ 248 " msr elr_el2, %2\n" \ 249 " mov %w0, %4\n" \ 250 "9:\n" \ 251 __KVM_EXTABLE(1b, 2b) \ 252 : "+r" (__kvm_at_err), "=&r" (spsr), "=&r" (elr) \ 253 : "r" (addr), "i" (-EFAULT)); \ 254 __kvm_at_err; \ 255 } ) 256 257 258 #else /* __ASSEMBLY__ */ 259 260 .macro get_host_ctxt reg, tmp 261 adr_this_cpu \reg, kvm_host_data, \tmp 262 add \reg, \reg, #HOST_DATA_CONTEXT 263 .endm 264 265 .macro get_vcpu_ptr vcpu, ctxt 266 get_host_ctxt \ctxt, \vcpu 267 ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU] 268 .endm 269 270 .macro get_loaded_vcpu vcpu, ctxt 271 adr_this_cpu \ctxt, kvm_hyp_ctxt, \vcpu 272 ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU] 273 .endm 274 275 .macro set_loaded_vcpu vcpu, ctxt, tmp 276 adr_this_cpu \ctxt, kvm_hyp_ctxt, \tmp 277 str \vcpu, [\ctxt, #HOST_CONTEXT_VCPU] 278 .endm 279 280 /* 281 * KVM extable for unexpected exceptions. 282 * In the same format _asm_extable, but output to a different section so that 283 * it can be mapped to EL2. The KVM version is not sorted. The caller must 284 * ensure: 285 * x18 has the hypervisor value to allow any Shadow-Call-Stack instrumented 286 * code to write to it, and that SPSR_EL2 and ELR_EL2 are restored by the fixup. 287 */ 288 .macro _kvm_extable, from, to 289 .pushsection __kvm_ex_table, "a" 290 .align 3 291 .long (\from - .), (\to - .) 292 .popsection 293 .endm 294 295 #define CPU_XREG_OFFSET(x) (CPU_USER_PT_REGS + 8*x) 296 #define CPU_LR_OFFSET CPU_XREG_OFFSET(30) 297 #define CPU_SP_EL0_OFFSET (CPU_LR_OFFSET + 8) 298 299 /* 300 * We treat x18 as callee-saved as the host may use it as a platform 301 * register (e.g. for shadow call stack). 302 */ 303 .macro save_callee_saved_regs ctxt 304 str x18, [\ctxt, #CPU_XREG_OFFSET(18)] 305 stp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)] 306 stp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)] 307 stp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)] 308 stp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)] 309 stp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)] 310 stp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)] 311 .endm 312 313 .macro restore_callee_saved_regs ctxt 314 // We require \ctxt is not x18-x28 315 ldr x18, [\ctxt, #CPU_XREG_OFFSET(18)] 316 ldp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)] 317 ldp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)] 318 ldp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)] 319 ldp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)] 320 ldp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)] 321 ldp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)] 322 .endm 323 324 .macro save_sp_el0 ctxt, tmp 325 mrs \tmp, sp_el0 326 str \tmp, [\ctxt, #CPU_SP_EL0_OFFSET] 327 .endm 328 329 .macro restore_sp_el0 ctxt, tmp 330 ldr \tmp, [\ctxt, #CPU_SP_EL0_OFFSET] 331 msr sp_el0, \tmp 332 .endm 333 334 #endif 335 336 #endif /* __ARM_KVM_ASM_H__ */ 337