1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2012,2013 - ARM Ltd 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 */ 6 7 #ifndef __ARM_KVM_ASM_H__ 8 #define __ARM_KVM_ASM_H__ 9 10 #include <asm/hyp_image.h> 11 #include <asm/insn.h> 12 #include <asm/virt.h> 13 14 #define ARM_EXIT_WITH_SERROR_BIT 31 15 #define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT)) 16 #define ARM_EXCEPTION_IS_TRAP(x) (ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_TRAP) 17 #define ARM_SERROR_PENDING(x) !!((x) & (1U << ARM_EXIT_WITH_SERROR_BIT)) 18 19 #define ARM_EXCEPTION_IRQ 0 20 #define ARM_EXCEPTION_EL1_SERROR 1 21 #define ARM_EXCEPTION_TRAP 2 22 #define ARM_EXCEPTION_IL 3 23 /* The hyp-stub will return this for any kvm_call_hyp() call */ 24 #define ARM_EXCEPTION_HYP_GONE HVC_STUB_ERR 25 #define ARM_EXCEPTION_HYP_REQ 5 26 27 #define kvm_arm_exception_type \ 28 {ARM_EXCEPTION_IRQ, "IRQ" }, \ 29 {ARM_EXCEPTION_EL1_SERROR, "SERROR" }, \ 30 {ARM_EXCEPTION_TRAP, "TRAP" }, \ 31 {ARM_EXCEPTION_HYP_GONE, "HYP_GONE" }, \ 32 {ARM_EXCEPTION_HYP_REQ, "HYP_REQ" } 33 34 /* 35 * Size of the HYP vectors preamble. kvm_patch_vector_branch() generates code 36 * that jumps over this. 37 */ 38 #define KVM_VECTOR_PREAMBLE (2 * AARCH64_INSN_SIZE) 39 40 #define KVM_HOST_SMCCC_ID(id) \ 41 ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ 42 ARM_SMCCC_SMC_64, \ 43 ARM_SMCCC_OWNER_VENDOR_HYP, \ 44 (id)) 45 46 #define KVM_HOST_SMCCC_FUNC(name) KVM_HOST_SMCCC_ID(__KVM_HOST_SMCCC_FUNC_##name) 47 48 #define __KVM_HOST_SMCCC_FUNC___kvm_hyp_init 0 49 50 #ifndef __ASSEMBLY__ 51 52 #include <linux/mm.h> 53 54 enum __kvm_host_smccc_func { 55 /* Hypercalls available only prior to pKVM finalisation */ 56 /* __KVM_HOST_SMCCC_FUNC___kvm_hyp_init */ 57 __KVM_HOST_SMCCC_FUNC___kvm_get_mdcr_el2 = __KVM_HOST_SMCCC_FUNC___kvm_hyp_init + 1, 58 __KVM_HOST_SMCCC_FUNC___pkvm_init, 59 __KVM_HOST_SMCCC_FUNC___pkvm_create_private_mapping, 60 __KVM_HOST_SMCCC_FUNC___pkvm_cpu_set_vector, 61 __KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs, 62 __KVM_HOST_SMCCC_FUNC___vgic_v3_init_lrs, 63 __KVM_HOST_SMCCC_FUNC___vgic_v3_get_gic_config, 64 __KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context, 65 __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa, 66 __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa_nsh, 67 __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid, 68 __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_range, 69 __KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context, 70 __KVM_HOST_SMCCC_FUNC___pkvm_alloc_module_va, 71 __KVM_HOST_SMCCC_FUNC___pkvm_map_module_page, 72 __KVM_HOST_SMCCC_FUNC___pkvm_unmap_module_page, 73 __KVM_HOST_SMCCC_FUNC___pkvm_init_module, 74 __KVM_HOST_SMCCC_FUNC___pkvm_register_hcall, 75 __KVM_HOST_SMCCC_FUNC___pkvm_iommu_init, 76 __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize, 77 78 /* Hypercalls available after pKVM finalisation */ 79 __KVM_HOST_SMCCC_FUNC___pkvm_host_share_hyp, 80 __KVM_HOST_SMCCC_FUNC___pkvm_host_unshare_hyp, 81 __KVM_HOST_SMCCC_FUNC___pkvm_host_map_guest, 82 __KVM_HOST_SMCCC_FUNC___pkvm_host_unmap_guest, 83 __KVM_HOST_SMCCC_FUNC___pkvm_relax_perms, 84 __KVM_HOST_SMCCC_FUNC___pkvm_wrprotect, 85 __KVM_HOST_SMCCC_FUNC___pkvm_dirty_log, 86 __KVM_HOST_SMCCC_FUNC___pkvm_tlb_flush_vmid, 87 __KVM_HOST_SMCCC_FUNC___kvm_adjust_pc, 88 __KVM_HOST_SMCCC_FUNC___kvm_vcpu_run, 89 __KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff, 90 __KVM_HOST_SMCCC_FUNC___vgic_v3_save_vmcr_aprs, 91 __KVM_HOST_SMCCC_FUNC___vgic_v3_restore_vmcr_aprs, 92 __KVM_HOST_SMCCC_FUNC___pkvm_init_vm, 93 __KVM_HOST_SMCCC_FUNC___pkvm_init_vcpu, 94 __KVM_HOST_SMCCC_FUNC___pkvm_start_teardown_vm, 95 __KVM_HOST_SMCCC_FUNC___pkvm_finalize_teardown_vm, 96 __KVM_HOST_SMCCC_FUNC___pkvm_reclaim_dying_guest_page, 97 __KVM_HOST_SMCCC_FUNC___pkvm_vcpu_load, 98 __KVM_HOST_SMCCC_FUNC___pkvm_vcpu_put, 99 __KVM_HOST_SMCCC_FUNC___pkvm_vcpu_sync_state, 100 __KVM_HOST_SMCCC_FUNC___pkvm_load_tracing, 101 __KVM_HOST_SMCCC_FUNC___pkvm_teardown_tracing, 102 __KVM_HOST_SMCCC_FUNC___pkvm_enable_tracing, 103 __KVM_HOST_SMCCC_FUNC___pkvm_swap_reader_tracing, 104 __KVM_HOST_SMCCC_FUNC___pkvm_enable_event, 105 __KVM_HOST_SMCCC_FUNC___pkvm_hyp_alloc_mgt_refill, 106 __KVM_HOST_SMCCC_FUNC___pkvm_hyp_alloc_mgt_reclaimable, 107 __KVM_HOST_SMCCC_FUNC___pkvm_hyp_alloc_mgt_reclaim, 108 __KVM_HOST_SMCCC_FUNC___pkvm_host_iommu_alloc_domain, 109 __KVM_HOST_SMCCC_FUNC___pkvm_host_iommu_free_domain, 110 __KVM_HOST_SMCCC_FUNC___pkvm_host_iommu_attach_dev, 111 __KVM_HOST_SMCCC_FUNC___pkvm_host_iommu_detach_dev, 112 __KVM_HOST_SMCCC_FUNC___pkvm_host_iommu_map_pages, 113 __KVM_HOST_SMCCC_FUNC___pkvm_host_iommu_unmap_pages, 114 __KVM_HOST_SMCCC_FUNC___pkvm_host_iommu_iova_to_phys, 115 __KVM_HOST_SMCCC_FUNC___pkvm_host_hvc_pd, 116 __KVM_HOST_SMCCC_FUNC___pkvm_stage2_snapshot, 117 118 /* 119 * Start of the dynamically registered hypercalls. Start a bit 120 * further, just in case some modules... 121 */ 122 __KVM_HOST_SMCCC_FUNC___dynamic_hcalls = 128, 123 }; 124 125 #define DECLARE_KVM_VHE_SYM(sym) extern char sym[] 126 #define DECLARE_KVM_NVHE_SYM(sym) extern char kvm_nvhe_sym(sym)[] 127 128 /* 129 * Define a pair of symbols sharing the same name but one defined in 130 * VHE and the other in nVHE hyp implementations. 131 */ 132 #define DECLARE_KVM_HYP_SYM(sym) \ 133 DECLARE_KVM_VHE_SYM(sym); \ 134 DECLARE_KVM_NVHE_SYM(sym) 135 136 #define DECLARE_KVM_VHE_PER_CPU(type, sym) \ 137 DECLARE_PER_CPU(type, sym) 138 #define DECLARE_KVM_NVHE_PER_CPU(type, sym) \ 139 DECLARE_PER_CPU(type, kvm_nvhe_sym(sym)) 140 141 #define DECLARE_KVM_HYP_PER_CPU(type, sym) \ 142 DECLARE_KVM_VHE_PER_CPU(type, sym); \ 143 DECLARE_KVM_NVHE_PER_CPU(type, sym) 144 145 /* 146 * Compute pointer to a symbol defined in nVHE percpu region. 147 * Returns NULL if percpu memory has not been allocated yet. 148 */ 149 #define this_cpu_ptr_nvhe_sym(sym) per_cpu_ptr_nvhe_sym(sym, smp_processor_id()) 150 #define per_cpu_ptr_nvhe_sym(sym, cpu) \ 151 ({ \ 152 unsigned long base, off; \ 153 base = kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu]; \ 154 off = (unsigned long)&CHOOSE_NVHE_SYM(sym) - \ 155 (unsigned long)&CHOOSE_NVHE_SYM(__per_cpu_start); \ 156 base ? (typeof(CHOOSE_NVHE_SYM(sym))*)(base + off) : NULL; \ 157 }) 158 159 #if defined(__KVM_NVHE_HYPERVISOR__) 160 161 #define CHOOSE_NVHE_SYM(sym) sym 162 #define CHOOSE_HYP_SYM(sym) CHOOSE_NVHE_SYM(sym) 163 164 /* The nVHE hypervisor shouldn't even try to access VHE symbols */ 165 extern void *__nvhe_undefined_symbol; 166 #define CHOOSE_VHE_SYM(sym) __nvhe_undefined_symbol 167 #define this_cpu_ptr_hyp_sym(sym) (&__nvhe_undefined_symbol) 168 #define per_cpu_ptr_hyp_sym(sym, cpu) (&__nvhe_undefined_symbol) 169 170 /* 171 * pKVM uses the module_ops struct to expose services to modules but 172 * doesn't allow fine-grained definition of the license for each export, 173 * and doesn't have a way to check the license of the loaded module. 174 * Given that said module may be proprietary, let's seek GPL compliance 175 * by preventing the accidental export of GPL symbols to hyp modules via 176 * pKVM's module_ops struct. 177 */ 178 #ifdef EXPORT_SYMBOL_GPL 179 #undef EXPORT_SYMBOL_GPL 180 #endif 181 #define EXPORT_SYMBOL_GPL(sym) BUILD_BUG() 182 183 #elif defined(__KVM_VHE_HYPERVISOR__) 184 185 #define CHOOSE_VHE_SYM(sym) sym 186 #define CHOOSE_HYP_SYM(sym) CHOOSE_VHE_SYM(sym) 187 188 /* The VHE hypervisor shouldn't even try to access nVHE symbols */ 189 extern void *__vhe_undefined_symbol; 190 #define CHOOSE_NVHE_SYM(sym) __vhe_undefined_symbol 191 #define this_cpu_ptr_hyp_sym(sym) (&__vhe_undefined_symbol) 192 #define per_cpu_ptr_hyp_sym(sym, cpu) (&__vhe_undefined_symbol) 193 194 #else 195 196 /* 197 * BIG FAT WARNINGS: 198 * 199 * - Don't be tempted to change the following is_kernel_in_hyp_mode() 200 * to has_vhe(). has_vhe() is implemented as a *final* capability, 201 * while this is used early at boot time, when the capabilities are 202 * not final yet.... 203 * 204 * - Don't let the nVHE hypervisor have access to this, as it will 205 * pick the *wrong* symbol (yes, it runs at EL2...). 206 */ 207 #define CHOOSE_HYP_SYM(sym) (is_kernel_in_hyp_mode() \ 208 ? CHOOSE_VHE_SYM(sym) \ 209 : CHOOSE_NVHE_SYM(sym)) 210 211 #define this_cpu_ptr_hyp_sym(sym) (is_kernel_in_hyp_mode() \ 212 ? this_cpu_ptr(&sym) \ 213 : this_cpu_ptr_nvhe_sym(sym)) 214 215 #define per_cpu_ptr_hyp_sym(sym, cpu) (is_kernel_in_hyp_mode() \ 216 ? per_cpu_ptr(&sym, cpu) \ 217 : per_cpu_ptr_nvhe_sym(sym, cpu)) 218 219 #define CHOOSE_VHE_SYM(sym) sym 220 #define CHOOSE_NVHE_SYM(sym) kvm_nvhe_sym(sym) 221 222 #endif 223 224 struct kvm_nvhe_init_params { 225 unsigned long mair_el2; 226 unsigned long tcr_el2; 227 unsigned long tpidr_el2; 228 unsigned long stack_hyp_va; 229 unsigned long stack_pa; 230 phys_addr_t pgd_pa; 231 unsigned long hcr_el2; 232 unsigned long hfgwtr_el2; 233 unsigned long vttbr; 234 unsigned long vtcr; 235 }; 236 237 /* 238 * Used by the host in EL1 to dump the nVHE hypervisor backtrace on 239 * hyp_panic() in non-protected mode. 240 * 241 * @stack_base: hyp VA of the hyp_stack base. 242 * @overflow_stack_base: hyp VA of the hyp_overflow_stack base. 243 * @fp: hyp FP where the backtrace begins. 244 * @pc: hyp PC where the backtrace begins. 245 */ 246 struct kvm_nvhe_stacktrace_info { 247 unsigned long stack_base; 248 unsigned long overflow_stack_base; 249 unsigned long fp; 250 unsigned long pc; 251 }; 252 253 /* Translate a kernel address @ptr into its equivalent linear mapping */ 254 #define kvm_ksym_ref(ptr) \ 255 ({ \ 256 void *val = (ptr); \ 257 if (!is_kernel_in_hyp_mode()) \ 258 val = lm_alias((ptr)); \ 259 val; \ 260 }) 261 #define kvm_ksym_ref_nvhe(sym) kvm_ksym_ref(kvm_nvhe_sym(sym)) 262 263 struct kvm; 264 struct kvm_vcpu; 265 struct kvm_s2_mmu; 266 267 DECLARE_KVM_NVHE_SYM(__kvm_hyp_init); 268 DECLARE_KVM_HYP_SYM(__kvm_hyp_vector); 269 #define __kvm_hyp_init CHOOSE_NVHE_SYM(__kvm_hyp_init) 270 #define __kvm_hyp_vector CHOOSE_HYP_SYM(__kvm_hyp_vector) 271 272 extern unsigned long kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[]; 273 DECLARE_KVM_NVHE_SYM(__per_cpu_start); 274 DECLARE_KVM_NVHE_SYM(__per_cpu_end); 275 276 extern unsigned long kvm_nvhe_sym(kvm_arm_hyp_host_fp_state)[]; 277 278 DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs); 279 #define __bp_harden_hyp_vecs CHOOSE_HYP_SYM(__bp_harden_hyp_vecs) 280 281 extern void __kvm_flush_vm_context(void); 282 extern void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu); 283 extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa, 284 int level); 285 extern void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu, 286 phys_addr_t ipa, 287 int level); 288 extern void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu, 289 phys_addr_t start, unsigned long pages); 290 extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu); 291 292 extern void __kvm_timer_set_cntvoff(u64 cntvoff); 293 294 extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); 295 296 extern void __kvm_adjust_pc(struct kvm_vcpu *vcpu); 297 298 extern u64 __vgic_v3_get_gic_config(void); 299 extern void __vgic_v3_init_lrs(void); 300 301 extern u64 __kvm_get_mdcr_el2(void); 302 303 #define __KVM_EXTABLE(from, to) \ 304 " .pushsection __kvm_ex_table, \"a\"\n" \ 305 " .align 3\n" \ 306 " .long (" #from " - .), (" #to " - .)\n" \ 307 " .popsection\n" 308 309 310 #define __kvm_at(at_op, addr) \ 311 ( { \ 312 int __kvm_at_err = 0; \ 313 u64 spsr, elr; \ 314 asm volatile( \ 315 " mrs %1, spsr_el2\n" \ 316 " mrs %2, elr_el2\n" \ 317 "1: at "at_op", %3\n" \ 318 " isb\n" \ 319 " b 9f\n" \ 320 "2: msr spsr_el2, %1\n" \ 321 " msr elr_el2, %2\n" \ 322 " mov %w0, %4\n" \ 323 "9:\n" \ 324 __KVM_EXTABLE(1b, 2b) \ 325 : "+r" (__kvm_at_err), "=&r" (spsr), "=&r" (elr) \ 326 : "r" (addr), "i" (-EFAULT)); \ 327 __kvm_at_err; \ 328 } ) 329 330 void __noreturn hyp_panic(void); 331 asmlinkage void kvm_unexpected_el2_exception(void); 332 asmlinkage void __noreturn hyp_panic(void); 333 asmlinkage void __noreturn hyp_panic_bad_stack(void); 334 asmlinkage void kvm_unexpected_el2_exception(void); 335 struct kvm_cpu_context; 336 void handle_trap(struct kvm_cpu_context *host_ctxt); 337 asmlinkage void __noreturn __kvm_host_psci_cpu_entry(bool is_cpu_on); 338 void __noreturn __pkvm_init_finalise(void); 339 void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc); 340 void kvm_patch_vector_branch(struct alt_instr *alt, 341 __le32 *origptr, __le32 *updptr, int nr_inst); 342 void kvm_get_kimage_voffset(struct alt_instr *alt, 343 __le32 *origptr, __le32 *updptr, int nr_inst); 344 void kvm_compute_final_ctr_el0(struct alt_instr *alt, 345 __le32 *origptr, __le32 *updptr, int nr_inst); 346 void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr, u64 elr_virt, 347 u64 elr_phys, u64 par, uintptr_t vcpu, u64 far, u64 hpfar); 348 349 #else /* __ASSEMBLY__ */ 350 351 .macro get_host_ctxt reg, tmp 352 adr_this_cpu \reg, kvm_host_data, \tmp 353 add \reg, \reg, #HOST_DATA_CONTEXT 354 .endm 355 356 .macro get_vcpu_ptr vcpu, ctxt 357 get_host_ctxt \ctxt, \vcpu 358 ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU] 359 .endm 360 361 .macro get_loaded_vcpu vcpu, ctxt 362 adr_this_cpu \ctxt, kvm_hyp_ctxt, \vcpu 363 ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU] 364 .endm 365 366 .macro set_loaded_vcpu vcpu, ctxt, tmp 367 adr_this_cpu \ctxt, kvm_hyp_ctxt, \tmp 368 str \vcpu, [\ctxt, #HOST_CONTEXT_VCPU] 369 .endm 370 371 /* 372 * KVM extable for unexpected exceptions. 373 * Create a struct kvm_exception_table_entry output to a section that can be 374 * mapped by EL2. The table is not sorted. 375 * 376 * The caller must ensure: 377 * x18 has the hypervisor value to allow any Shadow-Call-Stack instrumented 378 * code to write to it, and that SPSR_EL2 and ELR_EL2 are restored by the fixup. 379 */ 380 .macro _kvm_extable, from, to 381 .pushsection __kvm_ex_table, "a" 382 .align 3 383 .long (\from - .), (\to - .) 384 .popsection 385 .endm 386 387 #define CPU_XREG_OFFSET(x) (CPU_USER_PT_REGS + 8*x) 388 #define CPU_LR_OFFSET CPU_XREG_OFFSET(30) 389 #define CPU_SP_EL0_OFFSET (CPU_LR_OFFSET + 8) 390 391 /* 392 * We treat x18 as callee-saved as the host may use it as a platform 393 * register (e.g. for shadow call stack). 394 */ 395 .macro save_callee_saved_regs ctxt 396 str x18, [\ctxt, #CPU_XREG_OFFSET(18)] 397 stp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)] 398 stp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)] 399 stp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)] 400 stp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)] 401 stp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)] 402 stp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)] 403 .endm 404 405 .macro restore_callee_saved_regs ctxt 406 // We require \ctxt is not x18-x28 407 ldr x18, [\ctxt, #CPU_XREG_OFFSET(18)] 408 ldp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)] 409 ldp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)] 410 ldp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)] 411 ldp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)] 412 ldp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)] 413 ldp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)] 414 .endm 415 416 .macro save_sp_el0 ctxt, tmp 417 mrs \tmp, sp_el0 418 str \tmp, [\ctxt, #CPU_SP_EL0_OFFSET] 419 .endm 420 421 .macro restore_sp_el0 ctxt, tmp 422 ldr \tmp, [\ctxt, #CPU_SP_EL0_OFFSET] 423 msr sp_el0, \tmp 424 .endm 425 426 #endif 427 428 #endif /* __ARM_KVM_ASM_H__ */ 429