1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2020 - Google Inc
4 * Author: Andrew Scull <ascull@google.com>
5 */
6
7 #include <hyp/switch.h>
8
9 #include <asm/pgtable-types.h>
10 #include <asm/kvm_asm.h>
11 #include <asm/kvm_emulate.h>
12 #include <asm/kvm_host.h>
13 #include <asm/kvm_hyp.h>
14 #include <asm/kvm_mmu.h>
15
16 #include <nvhe/mem_protect.h>
17 #include <nvhe/mm.h>
18 #include <nvhe/trap_handler.h>
19
20 DEFINE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
21
22 void __kvm_hyp_host_forward_smc(struct kvm_cpu_context *host_ctxt);
23
handle___kvm_vcpu_run(struct kvm_cpu_context * host_ctxt)24 static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
25 {
26 DECLARE_REG(struct kvm_vcpu *, vcpu, host_ctxt, 1);
27
28 cpu_reg(host_ctxt, 1) = __kvm_vcpu_run(kern_hyp_va(vcpu));
29 }
30
handle___kvm_flush_vm_context(struct kvm_cpu_context * host_ctxt)31 static void handle___kvm_flush_vm_context(struct kvm_cpu_context *host_ctxt)
32 {
33 __kvm_flush_vm_context();
34 }
35
handle___kvm_tlb_flush_vmid_ipa(struct kvm_cpu_context * host_ctxt)36 static void handle___kvm_tlb_flush_vmid_ipa(struct kvm_cpu_context *host_ctxt)
37 {
38 DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
39 DECLARE_REG(phys_addr_t, ipa, host_ctxt, 2);
40 DECLARE_REG(int, level, host_ctxt, 3);
41
42 __kvm_tlb_flush_vmid_ipa(kern_hyp_va(mmu), ipa, level);
43 }
44
handle___kvm_tlb_flush_vmid(struct kvm_cpu_context * host_ctxt)45 static void handle___kvm_tlb_flush_vmid(struct kvm_cpu_context *host_ctxt)
46 {
47 DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
48
49 __kvm_tlb_flush_vmid(kern_hyp_va(mmu));
50 }
51
handle___kvm_flush_cpu_context(struct kvm_cpu_context * host_ctxt)52 static void handle___kvm_flush_cpu_context(struct kvm_cpu_context *host_ctxt)
53 {
54 DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
55
56 __kvm_flush_cpu_context(kern_hyp_va(mmu));
57 }
58
handle___kvm_timer_set_cntvoff(struct kvm_cpu_context * host_ctxt)59 static void handle___kvm_timer_set_cntvoff(struct kvm_cpu_context *host_ctxt)
60 {
61 __kvm_timer_set_cntvoff(cpu_reg(host_ctxt, 1));
62 }
63
handle___kvm_enable_ssbs(struct kvm_cpu_context * host_ctxt)64 static void handle___kvm_enable_ssbs(struct kvm_cpu_context *host_ctxt)
65 {
66 u64 tmp;
67
68 tmp = read_sysreg_el2(SYS_SCTLR);
69 tmp |= SCTLR_ELx_DSSBS;
70 write_sysreg_el2(tmp, SYS_SCTLR);
71 }
72
handle___vgic_v3_get_gic_config(struct kvm_cpu_context * host_ctxt)73 static void handle___vgic_v3_get_gic_config(struct kvm_cpu_context *host_ctxt)
74 {
75 cpu_reg(host_ctxt, 1) = __vgic_v3_get_gic_config();
76 }
77
handle___vgic_v3_read_vmcr(struct kvm_cpu_context * host_ctxt)78 static void handle___vgic_v3_read_vmcr(struct kvm_cpu_context *host_ctxt)
79 {
80 cpu_reg(host_ctxt, 1) = __vgic_v3_read_vmcr();
81 }
82
handle___vgic_v3_write_vmcr(struct kvm_cpu_context * host_ctxt)83 static void handle___vgic_v3_write_vmcr(struct kvm_cpu_context *host_ctxt)
84 {
85 __vgic_v3_write_vmcr(cpu_reg(host_ctxt, 1));
86 }
87
handle___vgic_v3_init_lrs(struct kvm_cpu_context * host_ctxt)88 static void handle___vgic_v3_init_lrs(struct kvm_cpu_context *host_ctxt)
89 {
90 __vgic_v3_init_lrs();
91 }
92
handle___kvm_get_mdcr_el2(struct kvm_cpu_context * host_ctxt)93 static void handle___kvm_get_mdcr_el2(struct kvm_cpu_context *host_ctxt)
94 {
95 cpu_reg(host_ctxt, 1) = __kvm_get_mdcr_el2();
96 }
97
handle___vgic_v3_save_aprs(struct kvm_cpu_context * host_ctxt)98 static void handle___vgic_v3_save_aprs(struct kvm_cpu_context *host_ctxt)
99 {
100 DECLARE_REG(struct vgic_v3_cpu_if *, cpu_if, host_ctxt, 1);
101
102 __vgic_v3_save_aprs(kern_hyp_va(cpu_if));
103 }
104
handle___vgic_v3_restore_aprs(struct kvm_cpu_context * host_ctxt)105 static void handle___vgic_v3_restore_aprs(struct kvm_cpu_context *host_ctxt)
106 {
107 DECLARE_REG(struct vgic_v3_cpu_if *, cpu_if, host_ctxt, 1);
108
109 __vgic_v3_restore_aprs(kern_hyp_va(cpu_if));
110 }
111
handle___pkvm_init(struct kvm_cpu_context * host_ctxt)112 static void handle___pkvm_init(struct kvm_cpu_context *host_ctxt)
113 {
114 DECLARE_REG(phys_addr_t, phys, host_ctxt, 1);
115 DECLARE_REG(unsigned long, size, host_ctxt, 2);
116 DECLARE_REG(unsigned long, nr_cpus, host_ctxt, 3);
117 DECLARE_REG(unsigned long *, per_cpu_base, host_ctxt, 4);
118 DECLARE_REG(u32, hyp_va_bits, host_ctxt, 5);
119
120 /*
121 * __pkvm_init() will return only if an error occurred, otherwise it
122 * will tail-call in __pkvm_init_finalise() which will have to deal
123 * with the host context directly.
124 */
125 cpu_reg(host_ctxt, 1) = __pkvm_init(phys, size, nr_cpus, per_cpu_base,
126 hyp_va_bits);
127 }
128
handle___pkvm_cpu_set_vector(struct kvm_cpu_context * host_ctxt)129 static void handle___pkvm_cpu_set_vector(struct kvm_cpu_context *host_ctxt)
130 {
131 DECLARE_REG(enum arm64_hyp_spectre_vector, slot, host_ctxt, 1);
132
133 cpu_reg(host_ctxt, 1) = pkvm_cpu_set_vector(slot);
134 }
135
handle___pkvm_create_mappings(struct kvm_cpu_context * host_ctxt)136 static void handle___pkvm_create_mappings(struct kvm_cpu_context *host_ctxt)
137 {
138 DECLARE_REG(unsigned long, start, host_ctxt, 1);
139 DECLARE_REG(unsigned long, size, host_ctxt, 2);
140 DECLARE_REG(unsigned long, phys, host_ctxt, 3);
141 DECLARE_REG(enum kvm_pgtable_prot, prot, host_ctxt, 4);
142
143 cpu_reg(host_ctxt, 1) = __pkvm_create_mappings(start, size, phys, prot);
144 }
145
handle___pkvm_create_private_mapping(struct kvm_cpu_context * host_ctxt)146 static void handle___pkvm_create_private_mapping(struct kvm_cpu_context *host_ctxt)
147 {
148 DECLARE_REG(phys_addr_t, phys, host_ctxt, 1);
149 DECLARE_REG(size_t, size, host_ctxt, 2);
150 DECLARE_REG(enum kvm_pgtable_prot, prot, host_ctxt, 3);
151
152 cpu_reg(host_ctxt, 1) = __pkvm_create_private_mapping(phys, size, prot);
153 }
154
handle___pkvm_prot_finalize(struct kvm_cpu_context * host_ctxt)155 static void handle___pkvm_prot_finalize(struct kvm_cpu_context *host_ctxt)
156 {
157 cpu_reg(host_ctxt, 1) = __pkvm_prot_finalize();
158 }
159
handle___pkvm_mark_hyp(struct kvm_cpu_context * host_ctxt)160 static void handle___pkvm_mark_hyp(struct kvm_cpu_context *host_ctxt)
161 {
162 DECLARE_REG(phys_addr_t, start, host_ctxt, 1);
163 DECLARE_REG(phys_addr_t, end, host_ctxt, 2);
164
165 cpu_reg(host_ctxt, 1) = __pkvm_mark_hyp(start, end);
166 }
167 typedef void (*hcall_t)(struct kvm_cpu_context *);
168
169 #define HANDLE_FUNC(x) [__KVM_HOST_SMCCC_FUNC_##x] = (hcall_t)handle_##x
170
171 static const hcall_t host_hcall[] = {
172 HANDLE_FUNC(__kvm_vcpu_run),
173 HANDLE_FUNC(__kvm_flush_vm_context),
174 HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa),
175 HANDLE_FUNC(__kvm_tlb_flush_vmid),
176 HANDLE_FUNC(__kvm_flush_cpu_context),
177 HANDLE_FUNC(__kvm_timer_set_cntvoff),
178 HANDLE_FUNC(__kvm_enable_ssbs),
179 HANDLE_FUNC(__vgic_v3_get_gic_config),
180 HANDLE_FUNC(__vgic_v3_read_vmcr),
181 HANDLE_FUNC(__vgic_v3_write_vmcr),
182 HANDLE_FUNC(__vgic_v3_init_lrs),
183 HANDLE_FUNC(__kvm_get_mdcr_el2),
184 HANDLE_FUNC(__vgic_v3_save_aprs),
185 HANDLE_FUNC(__vgic_v3_restore_aprs),
186 HANDLE_FUNC(__pkvm_init),
187 HANDLE_FUNC(__pkvm_cpu_set_vector),
188 HANDLE_FUNC(__pkvm_create_mappings),
189 HANDLE_FUNC(__pkvm_create_private_mapping),
190 HANDLE_FUNC(__pkvm_prot_finalize),
191 HANDLE_FUNC(__pkvm_mark_hyp),
192 };
193
handle_host_hcall(struct kvm_cpu_context * host_ctxt)194 static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
195 {
196 DECLARE_REG(unsigned long, id, host_ctxt, 0);
197 hcall_t hfn;
198
199 id -= KVM_HOST_SMCCC_ID(0);
200
201 if (unlikely(id >= ARRAY_SIZE(host_hcall)))
202 goto inval;
203
204 hfn = host_hcall[id];
205 if (unlikely(!hfn))
206 goto inval;
207
208 cpu_reg(host_ctxt, 0) = SMCCC_RET_SUCCESS;
209 hfn(host_ctxt);
210
211 return;
212 inval:
213 cpu_reg(host_ctxt, 0) = SMCCC_RET_NOT_SUPPORTED;
214 }
215
default_host_smc_handler(struct kvm_cpu_context * host_ctxt)216 static void default_host_smc_handler(struct kvm_cpu_context *host_ctxt)
217 {
218 __kvm_hyp_host_forward_smc(host_ctxt);
219 }
220
handle_host_smc(struct kvm_cpu_context * host_ctxt)221 static void handle_host_smc(struct kvm_cpu_context *host_ctxt)
222 {
223 bool handled;
224
225 handled = kvm_host_psci_handler(host_ctxt);
226 if (!handled)
227 default_host_smc_handler(host_ctxt);
228
229 /* SMC was trapped, move ELR past the current PC. */
230 kvm_skip_host_instr();
231 }
232
handle_trap(struct kvm_cpu_context * host_ctxt)233 void handle_trap(struct kvm_cpu_context *host_ctxt)
234 {
235 u64 esr = read_sysreg_el2(SYS_ESR);
236
237 switch (ESR_ELx_EC(esr)) {
238 case ESR_ELx_EC_HVC64:
239 handle_host_hcall(host_ctxt);
240 break;
241 case ESR_ELx_EC_SMC64:
242 handle_host_smc(host_ctxt);
243 break;
244 case ESR_ELx_EC_SVE:
245 sysreg_clear_set(cptr_el2, CPTR_EL2_TZ, 0);
246 isb();
247 sve_cond_update_zcr_vq(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2);
248 break;
249 case ESR_ELx_EC_IABT_LOW:
250 case ESR_ELx_EC_DABT_LOW:
251 handle_host_mem_abort(host_ctxt);
252 break;
253 default:
254 hyp_panic();
255 }
256 }
257