1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2020 - Google Inc
4 * Author: Andrew Scull <ascull@google.com>
5 */
6
7 #include <kvm/arm_hypercalls.h>
8
9 #include <hyp/adjust_pc.h>
10 #include <hyp/switch.h>
11
12 #include <asm/pgtable-types.h>
13 #include <asm/kvm_asm.h>
14 #include <asm/kvm_emulate.h>
15 #include <asm/kvm_host.h>
16 #include <asm/kvm_hyp.h>
17 #include <asm/kvm_hypevents.h>
18 #include <asm/kvm_mmu.h>
19
20 #include <nvhe/alloc.h>
21 #include <nvhe/alloc_mgt.h>
22 #include <nvhe/ffa.h>
23 #include <nvhe/iommu.h>
24 #include <nvhe/mem_protect.h>
25 #include <nvhe/modules.h>
26 #include <nvhe/mm.h>
27 #include <nvhe/pkvm.h>
28 #include <nvhe/pviommu-host.h>
29 #include <nvhe/trace.h>
30 #include <nvhe/trap_handler.h>
31
32 #include <linux/irqchip/arm-gic-v3.h>
33 #include <uapi/linux/psci.h>
34
35 #include "../../sys_regs.h"
36
37 DEFINE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
38
39 /*
40 * Holds one request only, in theory we can compress more, but
41 * typically HVC returns on first failure.
42 */
43 DEFINE_PER_CPU(struct kvm_hyp_req, host_hyp_reqs);
44
45 /* Serialize request in SMCCC return context. */
hyp_reqs_smccc_encode(unsigned long ret,struct kvm_cpu_context * host_ctxt,struct kvm_hyp_req * req)46 static inline void hyp_reqs_smccc_encode(unsigned long ret, struct kvm_cpu_context *host_ctxt,
47 struct kvm_hyp_req *req)
48 {
49 cpu_reg(host_ctxt, 1) = ret;
50 cpu_reg(host_ctxt, 2) = 0;
51 cpu_reg(host_ctxt, 3) = 0;
52
53 if (req->type == KVM_HYP_REQ_TYPE_MEM) {
54 cpu_reg(host_ctxt, 2) = FIELD_PREP(SMCCC_REQ_TYPE_MASK, req->type) |
55 FIELD_PREP(SMCCC_REQ_DEST_MASK, req->mem.dest);
56
57 cpu_reg(host_ctxt, 3) = FIELD_PREP(SMCCC_REQ_NR_PAGES_MASK, req->mem.nr_pages) |
58 FIELD_PREP(SMCCC_REQ_SZ_ALLOC_MASK, req->mem.sz_alloc);
59 }
60
61 /* We can't encode others */
62 WARN_ON((req->type != KVM_HYP_REQ_TYPE_MEM) && ((req->type != KVM_HYP_LAST_REQ)));
63 req->type = KVM_HYP_LAST_REQ;
64 }
65
66 void __kvm_hyp_host_forward_smc(struct kvm_cpu_context *host_ctxt);
67
68 static bool (*default_trap_handler)(struct user_pt_regs *regs);
69 static bool (*unmask_serror)(void);
70 static void (*mask_serror)(void);
71
__pkvm_register_default_trap_handler(bool (* cb)(struct user_pt_regs *))72 int __pkvm_register_default_trap_handler(bool (*cb)(struct user_pt_regs *))
73 {
74 return cmpxchg(&default_trap_handler, NULL, cb) ? -EBUSY : 0;
75 }
76
__pkvm_unmask_serror(void)77 void __pkvm_unmask_serror(void)
78 {
79 u64 hcr = read_sysreg(HCR_EL2);
80
81 if (!unmask_serror || !unmask_serror())
82 return;
83
84 write_sysreg(hcr | HCR_AMO, HCR_EL2);
85 asm volatile("msr daifclr, #4");
86 isb();
87 }
88
__pkvm_mask_serror(void)89 static void __pkvm_mask_serror(void)
90 {
91 u64 hcr = read_sysreg(HCR_EL2);
92
93 if (!mask_serror)
94 return;
95
96 mask_serror();
97
98 write_sysreg(hcr & ~HCR_AMO, HCR_EL2);
99 asm volatile("msr daifset, #4");
100 isb();
101 }
102
__pkvm_register_unmask_serror(bool (* unmask)(void),void (* mask)(void))103 int __pkvm_register_unmask_serror(bool (*unmask)(void),
104 void (*mask)(void))
105 {
106 static bool registered;
107
108 if (!unmask || !mask)
109 return -EINVAL;
110
111 if (cmpxchg(®istered, false, true))
112 return -EBUSY;
113
114 mask_serror = mask;
115 /*
116 * Paired with the CB + isb() in __pkvm_unmask_serror(). Makes sure a
117 * reader can't unmask serrors before being able to mask them.
118 */
119 smp_wmb();
120 unmask_serror = unmask;
121
122 return 0;
123 }
124
__hyp_enter(void)125 void __hyp_enter(void)
126 {
127 trace_hyp_enter();
128 __pkvm_unmask_serror();
129 }
130
__hyp_exit(void)131 void __hyp_exit(void)
132 {
133 __pkvm_mask_serror();
134 trace_hyp_exit();
135 }
136
pkvm_refill_memcache(struct pkvm_hyp_vcpu * hyp_vcpu)137 static int pkvm_refill_memcache(struct pkvm_hyp_vcpu *hyp_vcpu)
138 {
139 struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
140
141 return refill_memcache(&hyp_vcpu->vcpu.arch.stage2_mc,
142 host_vcpu->arch.stage2_mc.nr_pages,
143 &host_vcpu->arch.stage2_mc);
144 }
145
146 typedef void (*hyp_entry_exit_handler_fn)(struct pkvm_hyp_vcpu *);
147
handle_pvm_entry_wfx(struct pkvm_hyp_vcpu * hyp_vcpu)148 static void handle_pvm_entry_wfx(struct pkvm_hyp_vcpu *hyp_vcpu)
149 {
150 if (vcpu_get_flag(hyp_vcpu->host_vcpu, INCREMENT_PC)) {
151 vcpu_clear_flag(&hyp_vcpu->vcpu, PC_UPDATE_REQ);
152 kvm_incr_pc(&hyp_vcpu->vcpu);
153 }
154 }
155
handle_pvm_entry_psci(struct pkvm_hyp_vcpu * hyp_vcpu)156 static void handle_pvm_entry_psci(struct pkvm_hyp_vcpu *hyp_vcpu)
157 {
158 u32 psci_fn = smccc_get_function(&hyp_vcpu->vcpu);
159 u64 ret = READ_ONCE(hyp_vcpu->host_vcpu->arch.ctxt.regs.regs[0]);
160
161 switch (psci_fn) {
162 case PSCI_0_2_FN_CPU_ON:
163 case PSCI_0_2_FN64_CPU_ON:
164 /*
165 * Check whether the cpu_on request to the host was successful.
166 * If not, reset the vcpu state from ON_PENDING to OFF.
167 * This could happen if this vcpu attempted to turn on the other
168 * vcpu while the other one is in the process of turning itself
169 * off.
170 */
171 if (ret != PSCI_RET_SUCCESS) {
172 unsigned long cpu_id = smccc_get_arg1(&hyp_vcpu->vcpu);
173 struct pkvm_hyp_vcpu *target_vcpu;
174 struct pkvm_hyp_vm *hyp_vm;
175
176 hyp_vm = pkvm_hyp_vcpu_to_hyp_vm(hyp_vcpu);
177 target_vcpu = pkvm_mpidr_to_hyp_vcpu(hyp_vm, cpu_id);
178
179 if (target_vcpu && READ_ONCE(target_vcpu->power_state) == PSCI_0_2_AFFINITY_LEVEL_ON_PENDING)
180 WRITE_ONCE(target_vcpu->power_state, PSCI_0_2_AFFINITY_LEVEL_OFF);
181
182 ret = PSCI_RET_INTERNAL_FAILURE;
183 }
184
185 break;
186 default:
187 break;
188 }
189
190 vcpu_set_reg(&hyp_vcpu->vcpu, 0, ret);
191 }
192
handle_pvm_entry_hvc64(struct pkvm_hyp_vcpu * hyp_vcpu)193 static void handle_pvm_entry_hvc64(struct pkvm_hyp_vcpu *hyp_vcpu)
194 {
195 u32 fn = smccc_get_function(&hyp_vcpu->vcpu);
196
197 switch (fn) {
198 case ARM_SMCCC_VENDOR_HYP_KVM_MEM_SHARE_FUNC_ID:
199 fallthrough;
200 case ARM_SMCCC_VENDOR_HYP_KVM_MEM_UNSHARE_FUNC_ID:
201 fallthrough;
202 case ARM_SMCCC_VENDOR_HYP_KVM_MEM_RELINQUISH_FUNC_ID:
203 vcpu_set_reg(&hyp_vcpu->vcpu, 0, SMCCC_RET_SUCCESS);
204 break;
205 default:
206 handle_pvm_entry_psci(hyp_vcpu);
207 break;
208 }
209 }
210
handle_pvm_entry_sys64(struct pkvm_hyp_vcpu * hyp_vcpu)211 static void handle_pvm_entry_sys64(struct pkvm_hyp_vcpu *hyp_vcpu)
212 {
213 struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
214
215 /* Exceptions have priority on anything else */
216 if (vcpu_get_flag(host_vcpu, PENDING_EXCEPTION)) {
217 /* Exceptions caused by this should be undef exceptions. */
218 u32 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
219
220 __vcpu_sys_reg(&hyp_vcpu->vcpu, ESR_EL1) = esr;
221 kvm_pend_exception(&hyp_vcpu->vcpu, EXCEPT_AA64_EL1_SYNC);
222 return;
223 }
224
225 if (vcpu_get_flag(host_vcpu, INCREMENT_PC)) {
226 vcpu_clear_flag(&hyp_vcpu->vcpu, PC_UPDATE_REQ);
227 kvm_incr_pc(&hyp_vcpu->vcpu);
228 }
229
230 if (!esr_sys64_to_params(hyp_vcpu->vcpu.arch.fault.esr_el2).is_write) {
231 /* r0 as transfer register between the guest and the host. */
232 u64 rt_val = READ_ONCE(host_vcpu->arch.ctxt.regs.regs[0]);
233 int rt = kvm_vcpu_sys_get_rt(&hyp_vcpu->vcpu);
234
235 vcpu_set_reg(&hyp_vcpu->vcpu, rt, rt_val);
236 }
237 }
238
handle_pvm_entry_iabt(struct pkvm_hyp_vcpu * hyp_vcpu)239 static void handle_pvm_entry_iabt(struct pkvm_hyp_vcpu *hyp_vcpu)
240 {
241 unsigned long cpsr = *vcpu_cpsr(&hyp_vcpu->vcpu);
242 u32 esr = ESR_ELx_IL;
243
244 if (!vcpu_get_flag(hyp_vcpu->host_vcpu, PENDING_EXCEPTION))
245 return;
246
247 /*
248 * If the host wants to inject an exception, get syndrom and
249 * fault address.
250 */
251 if ((cpsr & PSR_MODE_MASK) == PSR_MODE_EL0t)
252 esr |= (ESR_ELx_EC_IABT_LOW << ESR_ELx_EC_SHIFT);
253 else
254 esr |= (ESR_ELx_EC_IABT_CUR << ESR_ELx_EC_SHIFT);
255
256 esr |= ESR_ELx_FSC_EXTABT;
257
258 __vcpu_sys_reg(&hyp_vcpu->vcpu, ESR_EL1) = esr;
259 __vcpu_sys_reg(&hyp_vcpu->vcpu, FAR_EL1) =
260 kvm_vcpu_get_hfar(&hyp_vcpu->vcpu);
261
262 /* Tell the run loop that we want to inject something */
263 kvm_pend_exception(&hyp_vcpu->vcpu, EXCEPT_AA64_EL1_SYNC);
264 }
265
handle_pvm_entry_dabt(struct pkvm_hyp_vcpu * hyp_vcpu)266 static void handle_pvm_entry_dabt(struct pkvm_hyp_vcpu *hyp_vcpu)
267 {
268 struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
269 bool pc_update;
270
271 /* Exceptions have priority over anything else */
272 if (vcpu_get_flag(host_vcpu, PENDING_EXCEPTION)) {
273 unsigned long cpsr = *vcpu_cpsr(&hyp_vcpu->vcpu);
274 u32 esr = ESR_ELx_IL;
275
276 if ((cpsr & PSR_MODE_MASK) == PSR_MODE_EL0t)
277 esr |= (ESR_ELx_EC_DABT_LOW << ESR_ELx_EC_SHIFT);
278 else
279 esr |= (ESR_ELx_EC_DABT_CUR << ESR_ELx_EC_SHIFT);
280
281 esr |= ESR_ELx_FSC_EXTABT;
282
283 __vcpu_sys_reg(&hyp_vcpu->vcpu, ESR_EL1) = esr;
284 __vcpu_sys_reg(&hyp_vcpu->vcpu, FAR_EL1) =
285 kvm_vcpu_get_hfar(&hyp_vcpu->vcpu);
286
287 /* Tell the run loop that we want to inject something */
288 kvm_pend_exception(&hyp_vcpu->vcpu, EXCEPT_AA64_EL1_SYNC);
289
290 /* Cancel potential in-flight MMIO */
291 hyp_vcpu->vcpu.mmio_needed = false;
292 return;
293 }
294
295 /* Handle PC increment on MMIO */
296 pc_update = (hyp_vcpu->vcpu.mmio_needed &&
297 vcpu_get_flag(host_vcpu, INCREMENT_PC));
298 if (pc_update) {
299 vcpu_clear_flag(&hyp_vcpu->vcpu, PC_UPDATE_REQ);
300 kvm_incr_pc(&hyp_vcpu->vcpu);
301 }
302
303 /* If we were doing an MMIO read access, update the register*/
304 if (pc_update && !kvm_vcpu_dabt_iswrite(&hyp_vcpu->vcpu)) {
305 /* r0 as transfer register between the guest and the host. */
306 u64 rd_val = READ_ONCE(host_vcpu->arch.ctxt.regs.regs[0]);
307 int rd = kvm_vcpu_dabt_get_rd(&hyp_vcpu->vcpu);
308
309 vcpu_set_reg(&hyp_vcpu->vcpu, rd, rd_val);
310 }
311
312 hyp_vcpu->vcpu.mmio_needed = false;
313 }
314
handle_pvm_exit_wfx(struct pkvm_hyp_vcpu * hyp_vcpu)315 static void handle_pvm_exit_wfx(struct pkvm_hyp_vcpu *hyp_vcpu)
316 {
317 WRITE_ONCE(hyp_vcpu->host_vcpu->arch.ctxt.regs.pstate,
318 hyp_vcpu->vcpu.arch.ctxt.regs.pstate & PSR_MODE_MASK);
319 WRITE_ONCE(hyp_vcpu->host_vcpu->arch.fault.esr_el2,
320 hyp_vcpu->vcpu.arch.fault.esr_el2);
321 }
322
handle_pvm_exit_sys64(struct pkvm_hyp_vcpu * hyp_vcpu)323 static void handle_pvm_exit_sys64(struct pkvm_hyp_vcpu *hyp_vcpu)
324 {
325 struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
326 u32 esr_el2 = hyp_vcpu->vcpu.arch.fault.esr_el2;
327
328 /* r0 as transfer register between the guest and the host. */
329 WRITE_ONCE(host_vcpu->arch.fault.esr_el2,
330 esr_el2 & ~ESR_ELx_SYS64_ISS_RT_MASK);
331
332 /* The mode is required for the host to emulate some sysregs */
333 WRITE_ONCE(host_vcpu->arch.ctxt.regs.pstate,
334 hyp_vcpu->vcpu.arch.ctxt.regs.pstate & PSR_MODE_MASK);
335
336 if (esr_sys64_to_params(esr_el2).is_write) {
337 int rt = kvm_vcpu_sys_get_rt(&hyp_vcpu->vcpu);
338 u64 rt_val = vcpu_get_reg(&hyp_vcpu->vcpu, rt);
339
340 WRITE_ONCE(host_vcpu->arch.ctxt.regs.regs[0], rt_val);
341 }
342 }
343
handle_pvm_exit_hvc64(struct pkvm_hyp_vcpu * hyp_vcpu)344 static void handle_pvm_exit_hvc64(struct pkvm_hyp_vcpu *hyp_vcpu)
345 {
346 struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
347 int n, i;
348
349 switch (smccc_get_function(&hyp_vcpu->vcpu)) {
350 /*
351 * CPU_ON takes 3 arguments, however, to wake up the target vcpu the
352 * host only needs to know the target's cpu_id, which is passed as the
353 * first argument. The processing of the reset state is done at hyp.
354 */
355 case PSCI_0_2_FN_CPU_ON:
356 case PSCI_0_2_FN64_CPU_ON:
357 n = 2;
358 break;
359
360 case PSCI_0_2_FN_CPU_OFF:
361 case PSCI_0_2_FN_SYSTEM_OFF:
362 case PSCI_0_2_FN_SYSTEM_RESET:
363 case PSCI_0_2_FN_CPU_SUSPEND:
364 case PSCI_0_2_FN64_CPU_SUSPEND:
365 n = 1;
366 break;
367
368 case ARM_SMCCC_VENDOR_HYP_KVM_MEM_RELINQUISH_FUNC_ID:
369 n = 4;
370 break;
371
372 case PSCI_1_1_FN_SYSTEM_RESET2:
373 case PSCI_1_1_FN64_SYSTEM_RESET2:
374 n = 3;
375 break;
376
377 /*
378 * The rest are either blocked or handled by HYP, so we should
379 * really never be here.
380 */
381 default:
382 BUG();
383 }
384
385 WRITE_ONCE(host_vcpu->arch.fault.esr_el2,
386 hyp_vcpu->vcpu.arch.fault.esr_el2);
387
388 /* Pass the hvc function id (r0) as well as any potential arguments. */
389 for (i = 0; i < n; i++) {
390 WRITE_ONCE(host_vcpu->arch.ctxt.regs.regs[i],
391 vcpu_get_reg(&hyp_vcpu->vcpu, i));
392 }
393 }
394
handle_pvm_exit_iabt(struct pkvm_hyp_vcpu * hyp_vcpu)395 static void handle_pvm_exit_iabt(struct pkvm_hyp_vcpu *hyp_vcpu)
396 {
397 WRITE_ONCE(hyp_vcpu->host_vcpu->arch.fault.esr_el2,
398 hyp_vcpu->vcpu.arch.fault.esr_el2);
399 WRITE_ONCE(hyp_vcpu->host_vcpu->arch.fault.hpfar_el2,
400 hyp_vcpu->vcpu.arch.fault.hpfar_el2);
401 }
402
handle_pvm_exit_dabt(struct pkvm_hyp_vcpu * hyp_vcpu)403 static void handle_pvm_exit_dabt(struct pkvm_hyp_vcpu *hyp_vcpu)
404 {
405 struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
406
407 hyp_vcpu->vcpu.mmio_needed = __pkvm_check_ioguard_page(hyp_vcpu);
408
409 if (hyp_vcpu->vcpu.mmio_needed) {
410 /* r0 as transfer register between the guest and the host. */
411 WRITE_ONCE(host_vcpu->arch.fault.esr_el2,
412 hyp_vcpu->vcpu.arch.fault.esr_el2 & ~ESR_ELx_SRT_MASK);
413
414 if (kvm_vcpu_dabt_iswrite(&hyp_vcpu->vcpu)) {
415 int rt = kvm_vcpu_dabt_get_rd(&hyp_vcpu->vcpu);
416 u64 rt_val = vcpu_get_reg(&hyp_vcpu->vcpu, rt);
417
418 WRITE_ONCE(host_vcpu->arch.ctxt.regs.regs[0], rt_val);
419 }
420 } else {
421 WRITE_ONCE(host_vcpu->arch.fault.esr_el2,
422 hyp_vcpu->vcpu.arch.fault.esr_el2 & ~ESR_ELx_ISV);
423 }
424
425 WRITE_ONCE(host_vcpu->arch.ctxt.regs.pstate,
426 hyp_vcpu->vcpu.arch.ctxt.regs.pstate & PSR_MODE_MASK);
427 WRITE_ONCE(host_vcpu->arch.fault.far_el2,
428 hyp_vcpu->vcpu.arch.fault.far_el2 & GENMASK(11, 0));
429 WRITE_ONCE(host_vcpu->arch.fault.hpfar_el2,
430 hyp_vcpu->vcpu.arch.fault.hpfar_el2);
431 WRITE_ONCE(__vcpu_sys_reg(host_vcpu, SCTLR_EL1),
432 __vcpu_sys_reg(&hyp_vcpu->vcpu, SCTLR_EL1) &
433 (SCTLR_ELx_EE | SCTLR_EL1_E0E));
434 }
435
handle_vm_entry_generic(struct pkvm_hyp_vcpu * hyp_vcpu)436 static void handle_vm_entry_generic(struct pkvm_hyp_vcpu *hyp_vcpu)
437 {
438 vcpu_copy_flag(&hyp_vcpu->vcpu, hyp_vcpu->host_vcpu, PC_UPDATE_REQ);
439 }
440
handle_vm_exit_generic(struct pkvm_hyp_vcpu * hyp_vcpu)441 static void handle_vm_exit_generic(struct pkvm_hyp_vcpu *hyp_vcpu)
442 {
443 WRITE_ONCE(hyp_vcpu->host_vcpu->arch.fault.esr_el2,
444 hyp_vcpu->vcpu.arch.fault.esr_el2);
445 }
446
handle_vm_exit_abt(struct pkvm_hyp_vcpu * hyp_vcpu)447 static void handle_vm_exit_abt(struct pkvm_hyp_vcpu *hyp_vcpu)
448 {
449 struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
450
451 WRITE_ONCE(host_vcpu->arch.fault.esr_el2,
452 hyp_vcpu->vcpu.arch.fault.esr_el2);
453 WRITE_ONCE(host_vcpu->arch.fault.far_el2,
454 hyp_vcpu->vcpu.arch.fault.far_el2);
455 WRITE_ONCE(host_vcpu->arch.fault.hpfar_el2,
456 hyp_vcpu->vcpu.arch.fault.hpfar_el2);
457 WRITE_ONCE(host_vcpu->arch.fault.disr_el1,
458 hyp_vcpu->vcpu.arch.fault.disr_el1);
459 }
460
461 static const hyp_entry_exit_handler_fn entry_hyp_pvm_handlers[] = {
462 [0 ... ESR_ELx_EC_MAX] = NULL,
463 [ESR_ELx_EC_WFx] = handle_pvm_entry_wfx,
464 [ESR_ELx_EC_HVC64] = handle_pvm_entry_hvc64,
465 [ESR_ELx_EC_SYS64] = handle_pvm_entry_sys64,
466 [ESR_ELx_EC_IABT_LOW] = handle_pvm_entry_iabt,
467 [ESR_ELx_EC_DABT_LOW] = handle_pvm_entry_dabt,
468 };
469
470 static const hyp_entry_exit_handler_fn exit_hyp_pvm_handlers[] = {
471 [0 ... ESR_ELx_EC_MAX] = NULL,
472 [ESR_ELx_EC_WFx] = handle_pvm_exit_wfx,
473 [ESR_ELx_EC_HVC64] = handle_pvm_exit_hvc64,
474 [ESR_ELx_EC_SYS64] = handle_pvm_exit_sys64,
475 [ESR_ELx_EC_IABT_LOW] = handle_pvm_exit_iabt,
476 [ESR_ELx_EC_DABT_LOW] = handle_pvm_exit_dabt,
477 };
478
479 static const hyp_entry_exit_handler_fn entry_hyp_vm_handlers[] = {
480 [0 ... ESR_ELx_EC_MAX] = handle_vm_entry_generic,
481 };
482
483 static const hyp_entry_exit_handler_fn exit_hyp_vm_handlers[] = {
484 [0 ... ESR_ELx_EC_MAX] = handle_vm_exit_generic,
485 [ESR_ELx_EC_IABT_LOW] = handle_vm_exit_abt,
486 [ESR_ELx_EC_DABT_LOW] = handle_vm_exit_abt,
487 };
488
__hyp_sve_save_guest(struct kvm_vcpu * vcpu)489 static void __hyp_sve_save_guest(struct kvm_vcpu *vcpu)
490 {
491 __vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_el1(SYS_ZCR);
492 /*
493 * On saving/restoring guest sve state, always use the maximum VL for
494 * the guest. The layout of the data when saving the sve state depends
495 * on the VL, so use a consistent (i.e., the maximum) guest VL.
496 */
497 sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2);
498 __sve_save_state(vcpu_sve_pffr(vcpu), &vcpu->arch.ctxt.fp_regs.fpsr, true);
499 write_sysreg_s(sve_vq_from_vl(kvm_host_sve_max_vl) - 1, SYS_ZCR_EL2);
500 }
501
__hyp_sve_restore_host(void)502 static void __hyp_sve_restore_host(void)
503 {
504 struct cpu_sve_state *sve_state = *host_data_ptr(sve_state);
505
506 /*
507 * On saving/restoring host sve state, always use the maximum VL for
508 * the host. The layout of the data when saving the sve state depends
509 * on the VL, so use a consistent (i.e., the maximum) host VL.
510 *
511 * Note that this constrains the PE to the maximum shared VL
512 * that was discovered, if we wish to use larger VLs this will
513 * need to be revisited.
514 */
515 write_sysreg_s(sve_vq_from_vl(kvm_host_sve_max_vl) - 1, SYS_ZCR_EL2);
516 __sve_restore_state(sve_state->sve_regs + sve_ffr_offset(kvm_host_sve_max_vl),
517 &sve_state->fpsr,
518 true);
519 write_sysreg_el1(sve_state->zcr_el1, SYS_ZCR);
520 }
521
fpsimd_sve_flush(void)522 static void fpsimd_sve_flush(void)
523 {
524 *host_data_ptr(fp_owner) = FP_STATE_HOST_OWNED;
525 }
526
fpsimd_sve_sync(struct kvm_vcpu * vcpu)527 static void fpsimd_sve_sync(struct kvm_vcpu *vcpu)
528 {
529 bool has_fpmr;
530
531 if (!guest_owns_fp_regs())
532 return;
533
534 cpacr_clear_set(0, CPACR_ELx_FPEN | CPACR_ELx_ZEN);
535 isb();
536
537 if (vcpu_has_sve(vcpu))
538 __hyp_sve_save_guest(vcpu);
539 else
540 __fpsimd_save_state(&vcpu->arch.ctxt.fp_regs);
541
542 has_fpmr = kvm_has_fpmr(kern_hyp_va(vcpu->kvm));
543 if (has_fpmr)
544 __vcpu_sys_reg(vcpu, FPMR) = read_sysreg_s(SYS_FPMR);
545
546 if (system_supports_sve())
547 __hyp_sve_restore_host();
548 else
549 __fpsimd_restore_state(host_data_ptr(host_ctxt.fp_regs));
550
551 if (has_fpmr)
552 write_sysreg_s(*host_data_ptr(fpmr), SYS_FPMR);
553
554 *host_data_ptr(fp_owner) = FP_STATE_HOST_OWNED;
555 }
556
flush_hyp_vgic_state(struct pkvm_hyp_vcpu * hyp_vcpu)557 static void flush_hyp_vgic_state(struct pkvm_hyp_vcpu *hyp_vcpu)
558 {
559 struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
560 struct vgic_v3_cpu_if *host_cpu_if, *hyp_cpu_if;
561 unsigned int used_lrs, max_lrs, i;
562
563 host_cpu_if = &host_vcpu->arch.vgic_cpu.vgic_v3;
564 hyp_cpu_if = &hyp_vcpu->vcpu.arch.vgic_cpu.vgic_v3;
565
566 max_lrs = (read_gicreg(ICH_VTR_EL2) & 0xf) + 1;
567 used_lrs = READ_ONCE(host_cpu_if->used_lrs);
568 used_lrs = min(used_lrs, max_lrs);
569
570 hyp_cpu_if->vgic_hcr = READ_ONCE(host_cpu_if->vgic_hcr);
571 /* Should be a one-off */
572 hyp_cpu_if->vgic_sre = (ICC_SRE_EL1_DIB |
573 ICC_SRE_EL1_DFB |
574 ICC_SRE_EL1_SRE);
575 hyp_cpu_if->used_lrs = used_lrs;
576
577 for (i = 0; i < used_lrs; i++)
578 hyp_cpu_if->vgic_lr[i] = READ_ONCE(host_cpu_if->vgic_lr[i]);
579 }
580
sync_hyp_vgic_state(struct pkvm_hyp_vcpu * hyp_vcpu)581 static void sync_hyp_vgic_state(struct pkvm_hyp_vcpu *hyp_vcpu)
582 {
583 struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
584 struct vgic_v3_cpu_if *host_cpu_if, *hyp_cpu_if;
585 unsigned int i;
586
587 host_cpu_if = &host_vcpu->arch.vgic_cpu.vgic_v3;
588 hyp_cpu_if = &hyp_vcpu->vcpu.arch.vgic_cpu.vgic_v3;
589
590 WRITE_ONCE(host_cpu_if->vgic_hcr, hyp_cpu_if->vgic_hcr);
591
592 for (i = 0; i < hyp_cpu_if->used_lrs; i++)
593 WRITE_ONCE(host_cpu_if->vgic_lr[i], hyp_cpu_if->vgic_lr[i]);
594 }
595
flush_hyp_timer_state(struct pkvm_hyp_vcpu * hyp_vcpu)596 static void flush_hyp_timer_state(struct pkvm_hyp_vcpu *hyp_vcpu)
597 {
598 if (!pkvm_hyp_vcpu_is_protected(hyp_vcpu))
599 return;
600
601 /*
602 * A hyp vcpu has no offset, and sees vtime == ptime. The
603 * ptimer is fully emulated by EL1 and cannot be trusted.
604 */
605 write_sysreg(0, cntvoff_el2);
606 isb();
607 write_sysreg_el0(__vcpu_sys_reg(&hyp_vcpu->vcpu, CNTV_CVAL_EL0),
608 SYS_CNTV_CVAL);
609 write_sysreg_el0(__vcpu_sys_reg(&hyp_vcpu->vcpu, CNTV_CTL_EL0),
610 SYS_CNTV_CTL);
611 }
612
sync_hyp_timer_state(struct pkvm_hyp_vcpu * hyp_vcpu)613 static void sync_hyp_timer_state(struct pkvm_hyp_vcpu *hyp_vcpu)
614 {
615 if (!pkvm_hyp_vcpu_is_protected(hyp_vcpu))
616 return;
617
618 /*
619 * Preserve the vtimer state so that it is always correct,
620 * even if the host tries to make a mess.
621 */
622 __vcpu_sys_reg(&hyp_vcpu->vcpu, CNTV_CVAL_EL0) =
623 read_sysreg_el0(SYS_CNTV_CVAL);
624 __vcpu_sys_reg(&hyp_vcpu->vcpu, CNTV_CTL_EL0) =
625 read_sysreg_el0(SYS_CNTV_CTL);
626 }
627
628 #define copy_sysreg(REG) do { \
629 BUILD_BUG_ON(REG <= __INVALID_SYSREG__); \
630 BUILD_BUG_ON(REG >= NR_SYS_REGS); \
631 to_vcpu->arch.ctxt.sys_regs[REG] = from_vcpu->arch.ctxt.sys_regs[REG]; \
632 } while (0)
633
__copy_vcpu_state(const struct kvm_vcpu * from_vcpu,struct kvm_vcpu * to_vcpu)634 static void __copy_vcpu_state(const struct kvm_vcpu *from_vcpu,
635 struct kvm_vcpu *to_vcpu)
636 {
637 int reg;
638
639 to_vcpu->arch.ctxt.regs = from_vcpu->arch.ctxt.regs;
640 to_vcpu->arch.ctxt.spsr_abt = from_vcpu->arch.ctxt.spsr_abt;
641 to_vcpu->arch.ctxt.spsr_und = from_vcpu->arch.ctxt.spsr_und;
642 to_vcpu->arch.ctxt.spsr_irq = from_vcpu->arch.ctxt.spsr_irq;
643 to_vcpu->arch.ctxt.spsr_fiq = from_vcpu->arch.ctxt.spsr_fiq;
644
645 /*
646 * Copy the sysregs, but don't mess with the timer state which
647 * is directly handled by EL1 and is expected to be preserved.
648 * Note that the sysreg enum is sparse and not sorted, therefore,
649 * explicitly specify the registers to copy.
650 */
651 copy_sysreg(MPIDR_EL1);
652 copy_sysreg(CLIDR_EL1);
653 copy_sysreg(CSSELR_EL1);
654 copy_sysreg(TPIDR_EL0);
655 copy_sysreg(TPIDRRO_EL0);
656 copy_sysreg(TPIDR_EL1);
657 copy_sysreg(CNTKCTL_EL1);
658 copy_sysreg(PAR_EL1);
659 copy_sysreg(MDCCINT_EL1);
660 copy_sysreg(OSLSR_EL1);
661 copy_sysreg(DISR_EL1);
662
663 copy_sysreg(PMCR_EL0);
664 copy_sysreg(PMSELR_EL0);
665 for (reg = PMEVCNTR0_EL0; reg <= PMEVCNTR30_EL0; reg++)
666 copy_sysreg(reg);
667 copy_sysreg(PMCCNTR_EL0);
668 for (reg = PMEVTYPER0_EL0; reg <= PMEVTYPER30_EL0; reg++)
669 copy_sysreg(reg);
670 copy_sysreg(PMCCFILTR_EL0);
671 copy_sysreg(PMCNTENSET_EL0);
672 copy_sysreg(PMINTENSET_EL1);
673 copy_sysreg(PMOVSSET_EL0);
674 copy_sysreg(PMUSERENR_EL0);
675
676 copy_sysreg(APIAKEYLO_EL1);
677 copy_sysreg(APIAKEYHI_EL1);
678 copy_sysreg(APIBKEYLO_EL1);
679 copy_sysreg(APIBKEYHI_EL1);
680 copy_sysreg(APDAKEYLO_EL1);
681 copy_sysreg(APDAKEYHI_EL1);
682 copy_sysreg(APDBKEYLO_EL1);
683 copy_sysreg(APDBKEYHI_EL1);
684 copy_sysreg(APGAKEYLO_EL1);
685 copy_sysreg(APGAKEYHI_EL1);
686
687 copy_sysreg(RGSR_EL1);
688 copy_sysreg(GCR_EL1);
689 copy_sysreg(TFSRE0_EL1);
690
691 copy_sysreg(SCTLR_EL1);
692 copy_sysreg(ACTLR_EL1);
693 copy_sysreg(CPACR_EL1);
694 copy_sysreg(ZCR_EL1);
695 copy_sysreg(TTBR0_EL1);
696 copy_sysreg(TTBR1_EL1);
697 copy_sysreg(TCR_EL1);
698 copy_sysreg(TCR2_EL1);
699 copy_sysreg(ESR_EL1);
700 copy_sysreg(AFSR0_EL1);
701 copy_sysreg(AFSR1_EL1);
702 copy_sysreg(FAR_EL1);
703 copy_sysreg(MAIR_EL1);
704 copy_sysreg(VBAR_EL1);
705 copy_sysreg(CONTEXTIDR_EL1);
706 copy_sysreg(AMAIR_EL1);
707 copy_sysreg(MDSCR_EL1);
708 copy_sysreg(ELR_EL1);
709 copy_sysreg(SP_EL1);
710 copy_sysreg(SPSR_EL1);
711 copy_sysreg(TFSR_EL1);
712
713 copy_sysreg(PIR_EL1);
714 copy_sysreg(PIRE0_EL1);
715 }
716
__sync_hyp_vcpu(struct pkvm_hyp_vcpu * hyp_vcpu)717 static void __sync_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
718 {
719 __copy_vcpu_state(&hyp_vcpu->vcpu, hyp_vcpu->host_vcpu);
720 }
721
__flush_hyp_vcpu(struct pkvm_hyp_vcpu * hyp_vcpu)722 static void __flush_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
723 {
724 __copy_vcpu_state(hyp_vcpu->host_vcpu, &hyp_vcpu->vcpu);
725 }
726
flush_debug_state(struct pkvm_hyp_vcpu * hyp_vcpu)727 static void flush_debug_state(struct pkvm_hyp_vcpu *hyp_vcpu)
728 {
729 struct kvm_vcpu *vcpu = &hyp_vcpu->vcpu;
730 struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
731 u64 mdcr_el2 = READ_ONCE(host_vcpu->arch.mdcr_el2);
732
733 /*
734 * Propagate the monitor debug configuration of the vcpu from host.
735 * Preserve HPMN, which is set-up by some knowledgeable bootcode.
736 * Ensure that MDCR_EL2_E2PB_MASK and MDCR_EL2_E2TB_MASK are clear,
737 * as guests should not be able to access profiling and trace buffers.
738 * Ensure that RES0 bits are clear.
739 */
740 mdcr_el2 &= ~(MDCR_EL2_RES0 |
741 MDCR_EL2_HPMN_MASK |
742 (MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT) |
743 (MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT));
744 vcpu->arch.mdcr_el2 = read_sysreg(mdcr_el2) & MDCR_EL2_HPMN_MASK;
745 vcpu->arch.mdcr_el2 |= mdcr_el2;
746
747 vcpu->arch.pmu = host_vcpu->arch.pmu;
748 vcpu->guest_debug = READ_ONCE(host_vcpu->guest_debug);
749
750 if (!kvm_vcpu_needs_debug_regs(vcpu))
751 return;
752
753 __vcpu_save_guest_debug_regs(vcpu);
754
755 /* Switch debug_ptr to the external_debug_state if done by the host. */
756 if (kern_hyp_va(READ_ONCE(host_vcpu->arch.debug_ptr)) ==
757 &host_vcpu->arch.external_debug_state)
758 vcpu->arch.debug_ptr = &host_vcpu->arch.external_debug_state;
759
760 /* Propagate any special handling for single step from host. */
761 vcpu_write_sys_reg(vcpu, vcpu_read_sys_reg(host_vcpu, MDSCR_EL1),
762 MDSCR_EL1);
763 *vcpu_cpsr(vcpu) = *vcpu_cpsr(host_vcpu);
764 }
765
sync_debug_state(struct pkvm_hyp_vcpu * hyp_vcpu)766 static void sync_debug_state(struct pkvm_hyp_vcpu *hyp_vcpu)
767 {
768 struct kvm_vcpu *vcpu = &hyp_vcpu->vcpu;
769 struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
770
771 if (!kvm_vcpu_needs_debug_regs(vcpu))
772 return;
773
774 __vcpu_restore_guest_debug_regs(vcpu);
775 vcpu_write_sys_reg(host_vcpu, vcpu_read_sys_reg(vcpu, MDSCR_EL1),
776 MDSCR_EL1);
777 *vcpu_cpsr(host_vcpu) = *vcpu_cpsr(vcpu);
778
779 vcpu->arch.debug_ptr = &host_vcpu->arch.vcpu_debug_state;
780 }
781
__flush_hyp_reqs(struct pkvm_hyp_vcpu * hyp_vcpu)782 static void __flush_hyp_reqs(struct pkvm_hyp_vcpu *hyp_vcpu)
783 {
784 struct kvm_hyp_req *hyp_req = hyp_vcpu->vcpu.arch.hyp_reqs;
785
786 hyp_req->type = KVM_HYP_LAST_REQ;
787
788 /* One of the request might have been TYPE_MEM/DEST_VCPU_MEMCACHE */
789 pkvm_refill_memcache(hyp_vcpu);
790 }
791
flush_hyp_vcpu(struct pkvm_hyp_vcpu * hyp_vcpu)792 static void flush_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
793 {
794 struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
795 hyp_entry_exit_handler_fn ec_handler;
796 u8 esr_ec;
797
798 fpsimd_sve_flush();
799
800 /*
801 * If we deal with a non-protected guest and the state is potentially
802 * dirty (from a host perspective), copy the state back into the hyp
803 * vcpu.
804 */
805 if (!pkvm_hyp_vcpu_is_protected(hyp_vcpu)) {
806 if (vcpu_get_flag(host_vcpu, PKVM_HOST_STATE_DIRTY))
807 __flush_hyp_vcpu(hyp_vcpu);
808
809 hyp_vcpu->vcpu.arch.iflags = READ_ONCE(host_vcpu->arch.iflags);
810 flush_debug_state(hyp_vcpu);
811
812 hyp_vcpu->vcpu.arch.hcr_el2 &= ~(HCR_TWI | HCR_TWE);
813 hyp_vcpu->vcpu.arch.hcr_el2 |= READ_ONCE(host_vcpu->arch.hcr_el2) &
814 (HCR_TWI | HCR_TWE);
815 }
816
817 hyp_vcpu->vcpu.arch.vsesr_el2 = host_vcpu->arch.vsesr_el2;
818
819 flush_hyp_vgic_state(hyp_vcpu);
820 flush_hyp_timer_state(hyp_vcpu);
821
822 switch (ARM_EXCEPTION_CODE(hyp_vcpu->exit_code)) {
823 case ARM_EXCEPTION_IRQ:
824 case ARM_EXCEPTION_EL1_SERROR:
825 case ARM_EXCEPTION_IL:
826 break;
827 case ARM_EXCEPTION_TRAP:
828 esr_ec = ESR_ELx_EC(kvm_vcpu_get_esr(&hyp_vcpu->vcpu));
829
830 if (pkvm_hyp_vcpu_is_protected(hyp_vcpu))
831 ec_handler = entry_hyp_pvm_handlers[esr_ec];
832 else
833 ec_handler = entry_hyp_vm_handlers[esr_ec];
834
835 if (ec_handler)
836 ec_handler(hyp_vcpu);
837 break;
838 case ARM_EXCEPTION_HYP_REQ:
839 __flush_hyp_reqs(hyp_vcpu);
840 break;
841 default:
842 BUG();
843 }
844
845 hyp_vcpu->exit_code = 0;
846 }
847
sync_hyp_vcpu(struct pkvm_hyp_vcpu * hyp_vcpu,u64 * exit_code)848 static void sync_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu, u64 *exit_code)
849 {
850 struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
851 hyp_entry_exit_handler_fn ec_handler;
852 u8 esr_ec;
853
854 fpsimd_sve_sync(&hyp_vcpu->vcpu);
855
856 if (!pkvm_hyp_vcpu_is_protected(hyp_vcpu))
857 sync_debug_state(hyp_vcpu);
858
859 /*
860 * Don't sync the vcpu GPR/sysreg state after a run. Instead,
861 * leave it in the hyp vCPU until someone actually requires it.
862 */
863 sync_hyp_vgic_state(hyp_vcpu);
864 sync_hyp_timer_state(hyp_vcpu);
865
866 switch (ARM_EXCEPTION_CODE(*exit_code)) {
867 case ARM_EXCEPTION_IRQ:
868 case ARM_EXCEPTION_HYP_REQ:
869 break;
870 case ARM_EXCEPTION_TRAP:
871 esr_ec = ESR_ELx_EC(kvm_vcpu_get_esr(&hyp_vcpu->vcpu));
872
873 if (pkvm_hyp_vcpu_is_protected(hyp_vcpu))
874 ec_handler = exit_hyp_pvm_handlers[esr_ec];
875 else
876 ec_handler = exit_hyp_vm_handlers[esr_ec];
877
878 if (ec_handler) {
879 ec_handler(hyp_vcpu);
880 } else {
881 /*
882 * If we have no handler we should not be punting this
883 * trap to Host, as it will have no sync'ed context to
884 * handle (for example: ESR_EL2).
885 */
886 vcpu_illegal_trap(&hyp_vcpu->vcpu, exit_code);
887 }
888 break;
889 case ARM_EXCEPTION_EL1_SERROR:
890 case ARM_EXCEPTION_IL:
891 break;
892 default:
893 BUG();
894 }
895
896 if (pkvm_hyp_vcpu_is_protected(hyp_vcpu))
897 vcpu_clear_flag(host_vcpu, PC_UPDATE_REQ);
898 else
899 host_vcpu->arch.iflags = hyp_vcpu->vcpu.arch.iflags;
900
901 hyp_vcpu->exit_code = *exit_code;
902 }
903
handle___pkvm_vcpu_load(struct kvm_cpu_context * host_ctxt)904 static void handle___pkvm_vcpu_load(struct kvm_cpu_context *host_ctxt)
905 {
906 DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1);
907 DECLARE_REG(unsigned int, vcpu_idx, host_ctxt, 2);
908 DECLARE_REG(u64, hcr_el2, host_ctxt, 3);
909 struct pkvm_hyp_vcpu *hyp_vcpu;
910 int __percpu *last_vcpu_ran;
911 int *last_ran;
912
913 if (!is_protected_kvm_enabled())
914 return;
915
916 hyp_vcpu = pkvm_load_hyp_vcpu(handle, vcpu_idx);
917 if (!hyp_vcpu)
918 return;
919
920 /*
921 * Guarantee that both TLBs and I-cache are private to each vcpu. If a
922 * vcpu from the same VM has previously run on the same physical CPU,
923 * nuke the relevant contexts.
924 */
925 last_vcpu_ran = hyp_vcpu->vcpu.arch.hw_mmu->last_vcpu_ran;
926 last_ran = (__force int *) &last_vcpu_ran[hyp_smp_processor_id()];
927 if (*last_ran != hyp_vcpu->vcpu.vcpu_id) {
928 __kvm_flush_cpu_context(hyp_vcpu->vcpu.arch.hw_mmu);
929 *last_ran = hyp_vcpu->vcpu.vcpu_id;
930 }
931
932 if (pkvm_hyp_vcpu_is_protected(hyp_vcpu)) {
933 /* Propagate WFx trapping flags */
934 hyp_vcpu->vcpu.arch.hcr_el2 &= ~(HCR_TWE | HCR_TWI);
935 hyp_vcpu->vcpu.arch.hcr_el2 |= hcr_el2 & (HCR_TWE | HCR_TWI);
936 }
937 }
938
handle___pkvm_vcpu_put(struct kvm_cpu_context * host_ctxt)939 static void handle___pkvm_vcpu_put(struct kvm_cpu_context *host_ctxt)
940 {
941 struct pkvm_hyp_vcpu *hyp_vcpu;
942
943 if (!is_protected_kvm_enabled())
944 return;
945
946 hyp_vcpu = pkvm_get_loaded_hyp_vcpu();
947 if (hyp_vcpu) {
948 struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
949
950 if (!pkvm_hyp_vcpu_is_protected(hyp_vcpu) &&
951 !vcpu_get_flag(host_vcpu, PKVM_HOST_STATE_DIRTY)) {
952 __sync_hyp_vcpu(hyp_vcpu);
953 }
954
955 pkvm_put_hyp_vcpu(hyp_vcpu);
956 }
957 }
958
handle___pkvm_vcpu_sync_state(struct kvm_cpu_context * host_ctxt)959 static void handle___pkvm_vcpu_sync_state(struct kvm_cpu_context *host_ctxt)
960 {
961 struct pkvm_hyp_vcpu *hyp_vcpu;
962
963 if (!is_protected_kvm_enabled())
964 return;
965
966 hyp_vcpu = pkvm_get_loaded_hyp_vcpu();
967 if (!hyp_vcpu || pkvm_hyp_vcpu_is_protected(hyp_vcpu))
968 return;
969
970 __sync_hyp_vcpu(hyp_vcpu);
971 }
972
__get_host_hyp_vcpus(struct kvm_vcpu * arg,struct pkvm_hyp_vcpu ** hyp_vcpup)973 static struct kvm_vcpu *__get_host_hyp_vcpus(struct kvm_vcpu *arg,
974 struct pkvm_hyp_vcpu **hyp_vcpup)
975 {
976 struct kvm_vcpu *host_vcpu = kern_hyp_va(arg);
977 struct pkvm_hyp_vcpu *hyp_vcpu = NULL;
978
979 if (unlikely(is_protected_kvm_enabled())) {
980 hyp_vcpu = pkvm_get_loaded_hyp_vcpu();
981
982 if (!hyp_vcpu || hyp_vcpu->host_vcpu != host_vcpu) {
983 hyp_vcpu = NULL;
984 host_vcpu = NULL;
985 }
986 }
987
988 *hyp_vcpup = hyp_vcpu;
989 return host_vcpu;
990 }
991
992 #define get_host_hyp_vcpus(ctxt, regnr, hyp_vcpup) \
993 ({ \
994 DECLARE_REG(struct kvm_vcpu *, __vcpu, ctxt, regnr); \
995 __get_host_hyp_vcpus(__vcpu, hyp_vcpup); \
996 })
997
998 #define get_host_hyp_vcpus_from_vgic_v3_cpu_if(ctxt, regnr, hyp_vcpup) \
999 ({ \
1000 DECLARE_REG(struct vgic_v3_cpu_if *, cif, ctxt, regnr); \
1001 struct kvm_vcpu *__vcpu = container_of(cif, \
1002 struct kvm_vcpu, \
1003 arch.vgic_cpu.vgic_v3); \
1004 \
1005 __get_host_hyp_vcpus(__vcpu, hyp_vcpup); \
1006 })
1007
handle___kvm_vcpu_run(struct kvm_cpu_context * host_ctxt)1008 static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
1009 {
1010 struct pkvm_hyp_vcpu *hyp_vcpu;
1011 struct kvm_vcpu *host_vcpu;
1012 u64 ret = ARM_EXCEPTION_IL;
1013
1014 host_vcpu = get_host_hyp_vcpus(host_ctxt, 1, &hyp_vcpu);
1015
1016 if (!host_vcpu)
1017 goto out;
1018
1019 if (unlikely(hyp_vcpu)) {
1020 /*
1021 * KVM (and pKVM) doesn't support SME guests for now, and
1022 * ensures that SME features aren't enabled in pstate when
1023 * loading a vcpu. Therefore, if SME features enabled the host
1024 * is misbehaving.
1025 */
1026 if (unlikely(system_supports_sme() && read_sysreg_s(SYS_SVCR)))
1027 goto out;
1028
1029 if (hyp_vcpu->power_state == PSCI_0_2_AFFINITY_LEVEL_ON_PENDING)
1030 pkvm_reset_vcpu(hyp_vcpu);
1031
1032 if (unlikely(hyp_vcpu->power_state != PSCI_0_2_AFFINITY_LEVEL_ON))
1033 goto out;
1034
1035 flush_hyp_vcpu(hyp_vcpu);
1036 ret = __kvm_vcpu_run(&hyp_vcpu->vcpu);
1037 sync_hyp_vcpu(hyp_vcpu, &ret);
1038 } else {
1039 /* The host is fully trusted, run its vCPU directly. */
1040 fpsimd_lazy_switch_to_guest(host_vcpu);
1041 ret = __kvm_vcpu_run(host_vcpu);
1042 fpsimd_lazy_switch_to_host(host_vcpu);
1043 }
1044 out:
1045 cpu_reg(host_ctxt, 1) = ret;
1046 }
1047
handle___pkvm_host_donate_guest(struct kvm_cpu_context * host_ctxt)1048 static void handle___pkvm_host_donate_guest(struct kvm_cpu_context *host_ctxt)
1049 {
1050 DECLARE_REG(u64, pfn, host_ctxt, 1);
1051 DECLARE_REG(u64, gfn, host_ctxt, 2);
1052 DECLARE_REG(u64, nr_pages, host_ctxt, 3);
1053 struct pkvm_hyp_vcpu *hyp_vcpu;
1054 int ret = -EINVAL;
1055
1056 if (!is_protected_kvm_enabled())
1057 goto out;
1058
1059 hyp_vcpu = pkvm_get_loaded_hyp_vcpu();
1060 if (!hyp_vcpu || !pkvm_hyp_vcpu_is_protected(hyp_vcpu))
1061 goto out;
1062
1063 ret = pkvm_refill_memcache(hyp_vcpu);
1064 if (ret)
1065 goto out;
1066
1067 ret = __pkvm_host_donate_guest(pfn, gfn, hyp_vcpu, nr_pages);
1068 out:
1069 cpu_reg(host_ctxt, 1) = ret;
1070 }
1071
handle___pkvm_host_donate_guest_sglist(struct kvm_cpu_context * host_ctxt)1072 static void handle___pkvm_host_donate_guest_sglist(struct kvm_cpu_context *host_ctxt)
1073 {
1074 struct pkvm_hyp_vcpu *hyp_vcpu;
1075 int ret = -EINVAL;
1076
1077 if (!is_protected_kvm_enabled())
1078 goto out;
1079
1080 hyp_vcpu = pkvm_get_loaded_hyp_vcpu();
1081 if (!hyp_vcpu || !pkvm_hyp_vcpu_is_protected(hyp_vcpu))
1082 goto out;
1083
1084 ret = pkvm_refill_memcache(hyp_vcpu);
1085 if (ret)
1086 goto out;
1087
1088 ret = __pkvm_host_donate_sglist_guest(hyp_vcpu);
1089
1090 out:
1091 cpu_reg(host_ctxt, 1) = ret;
1092 }
1093
handle___pkvm_host_share_guest(struct kvm_cpu_context * host_ctxt)1094 static void handle___pkvm_host_share_guest(struct kvm_cpu_context *host_ctxt)
1095 {
1096 DECLARE_REG(u64, pfn, host_ctxt, 1);
1097 DECLARE_REG(u64, gfn, host_ctxt, 2);
1098 DECLARE_REG(enum kvm_pgtable_prot, prot, host_ctxt, 3);
1099 DECLARE_REG(u64, nr_pages, host_ctxt, 4);
1100 struct pkvm_hyp_vcpu *hyp_vcpu;
1101 int ret = -EINVAL;
1102
1103 if (!is_protected_kvm_enabled())
1104 goto out;
1105
1106 hyp_vcpu = pkvm_get_loaded_hyp_vcpu();
1107 if (!hyp_vcpu || pkvm_hyp_vcpu_is_protected(hyp_vcpu))
1108 goto out;
1109
1110 ret = pkvm_refill_memcache(hyp_vcpu);
1111 if (ret)
1112 goto out;
1113
1114 ret = __pkvm_host_share_guest(pfn, gfn, hyp_vcpu, prot, nr_pages);
1115 out:
1116 cpu_reg(host_ctxt, 1) = ret;
1117 }
1118
handle___pkvm_host_unshare_guest(struct kvm_cpu_context * host_ctxt)1119 static void handle___pkvm_host_unshare_guest(struct kvm_cpu_context *host_ctxt)
1120 {
1121 DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1);
1122 DECLARE_REG(u64, gfn, host_ctxt, 2);
1123 DECLARE_REG(u64, nr_pages, host_ctxt, 3);
1124 struct pkvm_hyp_vm *hyp_vm;
1125 int ret = -EINVAL;
1126
1127 if (!is_protected_kvm_enabled())
1128 goto out;
1129
1130 hyp_vm = get_np_pkvm_hyp_vm(handle);
1131 if (!hyp_vm)
1132 goto out;
1133
1134 ret = __pkvm_host_unshare_guest(gfn, hyp_vm, nr_pages);
1135 put_pkvm_hyp_vm(hyp_vm);
1136 out:
1137 cpu_reg(host_ctxt, 1) = ret;
1138 }
1139
handle___pkvm_host_relax_perms_guest(struct kvm_cpu_context * host_ctxt)1140 static void handle___pkvm_host_relax_perms_guest(struct kvm_cpu_context *host_ctxt)
1141 {
1142 DECLARE_REG(u64, gfn, host_ctxt, 1);
1143 DECLARE_REG(enum kvm_pgtable_prot, prot, host_ctxt, 2);
1144 struct pkvm_hyp_vcpu *hyp_vcpu;
1145 int ret = -EINVAL;
1146
1147 if (!is_protected_kvm_enabled())
1148 goto out;
1149
1150 hyp_vcpu = pkvm_get_loaded_hyp_vcpu();
1151 if (!hyp_vcpu || pkvm_hyp_vcpu_is_protected(hyp_vcpu))
1152 goto out;
1153
1154 ret = __pkvm_host_relax_perms_guest(gfn, hyp_vcpu, prot);
1155 out:
1156 cpu_reg(host_ctxt, 1) = ret;
1157 }
1158
handle___pkvm_host_wrprotect_guest(struct kvm_cpu_context * host_ctxt)1159 static void handle___pkvm_host_wrprotect_guest(struct kvm_cpu_context *host_ctxt)
1160 {
1161 DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1);
1162 DECLARE_REG(u64, gfn, host_ctxt, 2);
1163 DECLARE_REG(u64, size, host_ctxt, 3);
1164 struct pkvm_hyp_vm *hyp_vm;
1165 int ret = -EINVAL;
1166
1167 if (!is_protected_kvm_enabled())
1168 goto out;
1169
1170 hyp_vm = get_pkvm_hyp_vm(handle);
1171 if (!hyp_vm)
1172 goto out;
1173
1174 ret = __pkvm_host_wrprotect_guest(gfn, hyp_vm, size);
1175 put_pkvm_hyp_vm(hyp_vm);
1176 out:
1177 cpu_reg(host_ctxt, 1) = ret;
1178 }
1179
handle___pkvm_host_test_clear_young_guest(struct kvm_cpu_context * host_ctxt)1180 static void handle___pkvm_host_test_clear_young_guest(struct kvm_cpu_context *host_ctxt)
1181 {
1182 DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1);
1183 DECLARE_REG(u64, gfn, host_ctxt, 2);
1184 DECLARE_REG(u64, size, host_ctxt, 3);
1185 DECLARE_REG(bool, mkold, host_ctxt, 4);
1186 struct pkvm_hyp_vm *hyp_vm;
1187 int ret = -EINVAL;
1188
1189 if (!is_protected_kvm_enabled())
1190 goto out;
1191
1192 hyp_vm = get_np_pkvm_hyp_vm(handle);
1193 if (!hyp_vm)
1194 goto out;
1195
1196 ret = __pkvm_host_test_clear_young_guest(gfn, size, mkold, hyp_vm);
1197 put_pkvm_hyp_vm(hyp_vm);
1198 out:
1199 cpu_reg(host_ctxt, 1) = ret;
1200 }
1201
handle___pkvm_host_mkyoung_guest(struct kvm_cpu_context * host_ctxt)1202 static void handle___pkvm_host_mkyoung_guest(struct kvm_cpu_context *host_ctxt)
1203 {
1204 DECLARE_REG(u64, gfn, host_ctxt, 1);
1205 struct pkvm_hyp_vcpu *hyp_vcpu;
1206 kvm_pte_t pte = 0;
1207
1208 if (!is_protected_kvm_enabled())
1209 goto out;
1210
1211 hyp_vcpu = pkvm_get_loaded_hyp_vcpu();
1212 if (!hyp_vcpu || pkvm_hyp_vcpu_is_protected(hyp_vcpu))
1213 goto out;
1214
1215 pte = __pkvm_host_mkyoung_guest(gfn, hyp_vcpu);
1216 out:
1217 cpu_reg(host_ctxt, 1) = pte;
1218 }
1219
handle___pkvm_host_split_guest(struct kvm_cpu_context * host_ctxt)1220 static void handle___pkvm_host_split_guest(struct kvm_cpu_context *host_ctxt)
1221 {
1222 DECLARE_REG(u64, gfn, host_ctxt, 1);
1223 DECLARE_REG(u64, size, host_ctxt, 2);
1224 struct pkvm_hyp_vcpu *hyp_vcpu;
1225 int ret = -EINVAL;
1226
1227 if (!is_protected_kvm_enabled())
1228 goto out;
1229
1230 hyp_vcpu = pkvm_get_loaded_hyp_vcpu();
1231 if (!hyp_vcpu)
1232 goto out;
1233
1234 if (!pkvm_hyp_vcpu_is_protected(hyp_vcpu))
1235 goto out;
1236
1237 ret = __pkvm_host_split_guest(gfn, size, hyp_vcpu);
1238
1239 out:
1240 cpu_reg(host_ctxt, 1) = ret;
1241 }
1242
handle___kvm_adjust_pc(struct kvm_cpu_context * host_ctxt)1243 static void handle___kvm_adjust_pc(struct kvm_cpu_context *host_ctxt)
1244 {
1245 struct pkvm_hyp_vcpu *hyp_vcpu;
1246 struct kvm_vcpu *host_vcpu;
1247
1248 host_vcpu = get_host_hyp_vcpus(host_ctxt, 1, &hyp_vcpu);
1249 if (!host_vcpu)
1250 return;
1251
1252 if (hyp_vcpu) {
1253 /* This only applies to non-protected VMs */
1254 if (pkvm_hyp_vcpu_is_protected(hyp_vcpu))
1255 return;
1256
1257 __kvm_adjust_pc(&hyp_vcpu->vcpu);
1258 } else {
1259 __kvm_adjust_pc(host_vcpu);
1260 }
1261 }
1262
handle___kvm_flush_vm_context(struct kvm_cpu_context * host_ctxt)1263 static void handle___kvm_flush_vm_context(struct kvm_cpu_context *host_ctxt)
1264 {
1265 __kvm_flush_vm_context();
1266 }
1267
handle___kvm_tlb_flush_vmid_ipa(struct kvm_cpu_context * host_ctxt)1268 static void handle___kvm_tlb_flush_vmid_ipa(struct kvm_cpu_context *host_ctxt)
1269 {
1270 DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
1271 DECLARE_REG(phys_addr_t, ipa, host_ctxt, 2);
1272 DECLARE_REG(int, level, host_ctxt, 3);
1273
1274 __kvm_tlb_flush_vmid_ipa(kern_hyp_va(mmu), ipa, level);
1275 }
1276
handle___kvm_tlb_flush_vmid_ipa_nsh(struct kvm_cpu_context * host_ctxt)1277 static void handle___kvm_tlb_flush_vmid_ipa_nsh(struct kvm_cpu_context *host_ctxt)
1278 {
1279 DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
1280 DECLARE_REG(phys_addr_t, ipa, host_ctxt, 2);
1281 DECLARE_REG(int, level, host_ctxt, 3);
1282
1283 __kvm_tlb_flush_vmid_ipa_nsh(kern_hyp_va(mmu), ipa, level);
1284 }
1285
1286 static void
handle___kvm_tlb_flush_vmid_range(struct kvm_cpu_context * host_ctxt)1287 handle___kvm_tlb_flush_vmid_range(struct kvm_cpu_context *host_ctxt)
1288 {
1289 DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
1290 DECLARE_REG(phys_addr_t, start, host_ctxt, 2);
1291 DECLARE_REG(unsigned long, pages, host_ctxt, 3);
1292
1293 __kvm_tlb_flush_vmid_range(kern_hyp_va(mmu), start, pages);
1294 }
1295
handle___kvm_tlb_flush_vmid(struct kvm_cpu_context * host_ctxt)1296 static void handle___kvm_tlb_flush_vmid(struct kvm_cpu_context *host_ctxt)
1297 {
1298 DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
1299
1300 __kvm_tlb_flush_vmid(kern_hyp_va(mmu));
1301 }
1302
handle___pkvm_tlb_flush_vmid(struct kvm_cpu_context * host_ctxt)1303 static void handle___pkvm_tlb_flush_vmid(struct kvm_cpu_context *host_ctxt)
1304 {
1305 DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1);
1306 struct pkvm_hyp_vm *hyp_vm;
1307
1308 if (!is_protected_kvm_enabled())
1309 return;
1310
1311 hyp_vm = get_pkvm_hyp_vm(handle);
1312 if (!hyp_vm)
1313 return;
1314
1315 __kvm_tlb_flush_vmid(&hyp_vm->kvm.arch.mmu);
1316 put_pkvm_hyp_vm(hyp_vm);
1317 }
1318
handle___kvm_flush_cpu_context(struct kvm_cpu_context * host_ctxt)1319 static void handle___kvm_flush_cpu_context(struct kvm_cpu_context *host_ctxt)
1320 {
1321 DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
1322
1323 __kvm_flush_cpu_context(kern_hyp_va(mmu));
1324 }
1325
handle___kvm_timer_set_cntvoff(struct kvm_cpu_context * host_ctxt)1326 static void handle___kvm_timer_set_cntvoff(struct kvm_cpu_context *host_ctxt)
1327 {
1328 __kvm_timer_set_cntvoff(cpu_reg(host_ctxt, 1));
1329 }
1330
handle___kvm_enable_ssbs(struct kvm_cpu_context * host_ctxt)1331 static void handle___kvm_enable_ssbs(struct kvm_cpu_context *host_ctxt)
1332 {
1333 u64 tmp;
1334
1335 tmp = read_sysreg_el2(SYS_SCTLR);
1336 tmp |= SCTLR_ELx_DSSBS;
1337 write_sysreg_el2(tmp, SYS_SCTLR);
1338 }
1339
handle___vgic_v3_get_gic_config(struct kvm_cpu_context * host_ctxt)1340 static void handle___vgic_v3_get_gic_config(struct kvm_cpu_context *host_ctxt)
1341 {
1342 cpu_reg(host_ctxt, 1) = __vgic_v3_get_gic_config();
1343 }
1344
handle___vgic_v3_init_lrs(struct kvm_cpu_context * host_ctxt)1345 static void handle___vgic_v3_init_lrs(struct kvm_cpu_context *host_ctxt)
1346 {
1347 __vgic_v3_init_lrs();
1348 }
1349
handle___kvm_get_mdcr_el2(struct kvm_cpu_context * host_ctxt)1350 static void handle___kvm_get_mdcr_el2(struct kvm_cpu_context *host_ctxt)
1351 {
1352 cpu_reg(host_ctxt, 1) = __kvm_get_mdcr_el2();
1353 }
1354
handle___vgic_v3_save_vmcr_aprs(struct kvm_cpu_context * host_ctxt)1355 static void handle___vgic_v3_save_vmcr_aprs(struct kvm_cpu_context *host_ctxt)
1356 {
1357 struct pkvm_hyp_vcpu *hyp_vcpu;
1358 struct kvm_vcpu *host_vcpu;
1359
1360 host_vcpu = get_host_hyp_vcpus_from_vgic_v3_cpu_if(host_ctxt, 1,
1361 &hyp_vcpu);
1362 if (!host_vcpu)
1363 return;
1364
1365 if (unlikely(hyp_vcpu)) {
1366 struct vgic_v3_cpu_if *hyp_cpu_if, *host_cpu_if;
1367 int i;
1368
1369 hyp_cpu_if = &hyp_vcpu->vcpu.arch.vgic_cpu.vgic_v3;
1370 __vgic_v3_save_vmcr_aprs(hyp_cpu_if);
1371
1372 host_cpu_if = &host_vcpu->arch.vgic_cpu.vgic_v3;
1373 host_cpu_if->vgic_vmcr = hyp_cpu_if->vgic_vmcr;
1374 for (i = 0; i < ARRAY_SIZE(host_cpu_if->vgic_ap0r); i++) {
1375 host_cpu_if->vgic_ap0r[i] = hyp_cpu_if->vgic_ap0r[i];
1376 host_cpu_if->vgic_ap1r[i] = hyp_cpu_if->vgic_ap1r[i];
1377 }
1378 } else {
1379 __vgic_v3_save_vmcr_aprs(&host_vcpu->arch.vgic_cpu.vgic_v3);
1380 }
1381 }
1382
handle___vgic_v3_restore_vmcr_aprs(struct kvm_cpu_context * host_ctxt)1383 static void handle___vgic_v3_restore_vmcr_aprs(struct kvm_cpu_context *host_ctxt)
1384 {
1385 struct pkvm_hyp_vcpu *hyp_vcpu;
1386 struct kvm_vcpu *host_vcpu;
1387
1388 host_vcpu = get_host_hyp_vcpus_from_vgic_v3_cpu_if(host_ctxt, 1,
1389 &hyp_vcpu);
1390 if (!host_vcpu)
1391 return;
1392
1393 if (unlikely(hyp_vcpu)) {
1394 struct vgic_v3_cpu_if *hyp_cpu_if, *host_cpu_if;
1395 int i;
1396
1397 hyp_cpu_if = &hyp_vcpu->vcpu.arch.vgic_cpu.vgic_v3;
1398 host_cpu_if = &host_vcpu->arch.vgic_cpu.vgic_v3;
1399
1400 hyp_cpu_if->vgic_vmcr = host_cpu_if->vgic_vmcr;
1401 /* Should be a one-off */
1402 hyp_cpu_if->vgic_sre = (ICC_SRE_EL1_DIB |
1403 ICC_SRE_EL1_DFB |
1404 ICC_SRE_EL1_SRE);
1405 for (i = 0; i < ARRAY_SIZE(host_cpu_if->vgic_ap0r); i++) {
1406 hyp_cpu_if->vgic_ap0r[i] = host_cpu_if->vgic_ap0r[i];
1407 hyp_cpu_if->vgic_ap1r[i] = host_cpu_if->vgic_ap1r[i];
1408 }
1409
1410 __vgic_v3_restore_vmcr_aprs(hyp_cpu_if);
1411 } else {
1412 __vgic_v3_restore_vmcr_aprs(&host_vcpu->arch.vgic_cpu.vgic_v3);
1413 }
1414 }
1415
handle___pkvm_init(struct kvm_cpu_context * host_ctxt)1416 static void handle___pkvm_init(struct kvm_cpu_context *host_ctxt)
1417 {
1418 DECLARE_REG(phys_addr_t, phys, host_ctxt, 1);
1419 DECLARE_REG(unsigned long, size, host_ctxt, 2);
1420 DECLARE_REG(unsigned long, nr_cpus, host_ctxt, 3);
1421 DECLARE_REG(unsigned long *, per_cpu_base, host_ctxt, 4);
1422 DECLARE_REG(u32, hyp_va_bits, host_ctxt, 5);
1423
1424 /*
1425 * __pkvm_init() will return only if an error occurred, otherwise it
1426 * will tail-call in __pkvm_init_finalise() which will have to deal
1427 * with the host context directly.
1428 */
1429 cpu_reg(host_ctxt, 1) = __pkvm_init(phys, size, nr_cpus, per_cpu_base,
1430 hyp_va_bits);
1431 }
1432
handle___pkvm_cpu_set_vector(struct kvm_cpu_context * host_ctxt)1433 static void handle___pkvm_cpu_set_vector(struct kvm_cpu_context *host_ctxt)
1434 {
1435 DECLARE_REG(enum arm64_hyp_spectre_vector, slot, host_ctxt, 1);
1436
1437 cpu_reg(host_ctxt, 1) = pkvm_cpu_set_vector(slot);
1438 }
1439
handle___pkvm_host_share_hyp(struct kvm_cpu_context * host_ctxt)1440 static void handle___pkvm_host_share_hyp(struct kvm_cpu_context *host_ctxt)
1441 {
1442 DECLARE_REG(u64, pfn, host_ctxt, 1);
1443
1444 cpu_reg(host_ctxt, 1) = __pkvm_host_share_hyp(pfn);
1445 }
1446
handle___pkvm_host_unshare_hyp(struct kvm_cpu_context * host_ctxt)1447 static void handle___pkvm_host_unshare_hyp(struct kvm_cpu_context *host_ctxt)
1448 {
1449 DECLARE_REG(u64, pfn, host_ctxt, 1);
1450
1451 cpu_reg(host_ctxt, 1) = __pkvm_host_unshare_hyp(pfn);
1452 }
1453
handle___pkvm_reclaim_dying_guest_page(struct kvm_cpu_context * host_ctxt)1454 static void handle___pkvm_reclaim_dying_guest_page(struct kvm_cpu_context *host_ctxt)
1455 {
1456 DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1);
1457 DECLARE_REG(u64, pfn, host_ctxt, 2);
1458 DECLARE_REG(u64, gfn, host_ctxt, 3);
1459 DECLARE_REG(u64, order, host_ctxt, 4);
1460
1461 cpu_reg(host_ctxt, 1) =
1462 __pkvm_reclaim_dying_guest_page(handle, pfn, gfn, order);
1463 }
1464
handle___pkvm_reclaim_dying_guest_ffa_resources(struct kvm_cpu_context * host_ctxt)1465 static void handle___pkvm_reclaim_dying_guest_ffa_resources(struct kvm_cpu_context *host_ctxt)
1466 {
1467 DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1);
1468
1469 cpu_reg(host_ctxt, 1) = __pkvm_reclaim_dying_guest_ffa_resources(handle);
1470 }
1471
handle___pkvm_notify_guest_vm_avail(struct kvm_cpu_context * host_ctxt)1472 static void handle___pkvm_notify_guest_vm_avail(struct kvm_cpu_context *host_ctxt)
1473 {
1474 DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1);
1475
1476 cpu_reg(host_ctxt, 1) = __pkvm_notify_guest_vm_avail(handle);
1477 }
1478
handle___pkvm_create_private_mapping(struct kvm_cpu_context * host_ctxt)1479 static void handle___pkvm_create_private_mapping(struct kvm_cpu_context *host_ctxt)
1480 {
1481 DECLARE_REG(phys_addr_t, phys, host_ctxt, 1);
1482 DECLARE_REG(size_t, size, host_ctxt, 2);
1483 DECLARE_REG(enum kvm_pgtable_prot, prot, host_ctxt, 3);
1484
1485 /*
1486 * __pkvm_create_private_mapping() populates a pointer with the
1487 * hypervisor start address of the allocation.
1488 *
1489 * However, handle___pkvm_create_private_mapping() hypercall crosses the
1490 * EL1/EL2 boundary so the pointer would not be valid in this context.
1491 *
1492 * Instead pass the allocation address as the return value (or return
1493 * ERR_PTR() on failure).
1494 */
1495 unsigned long haddr;
1496 int err = __pkvm_create_private_mapping(phys, size, prot, &haddr);
1497
1498 if (err)
1499 haddr = (unsigned long)ERR_PTR(err);
1500
1501 cpu_reg(host_ctxt, 1) = haddr;
1502 }
1503
handle___pkvm_prot_finalize(struct kvm_cpu_context * host_ctxt)1504 static void handle___pkvm_prot_finalize(struct kvm_cpu_context *host_ctxt)
1505 {
1506 cpu_reg(host_ctxt, 1) = __pkvm_prot_finalize();
1507 }
1508
handle___pkvm_init_vm(struct kvm_cpu_context * host_ctxt)1509 static void handle___pkvm_init_vm(struct kvm_cpu_context *host_ctxt)
1510 {
1511 DECLARE_REG(struct kvm *, host_kvm, host_ctxt, 1);
1512 DECLARE_REG(unsigned long, pgd_hva, host_ctxt, 2);
1513
1514 host_kvm = kern_hyp_va(host_kvm);
1515 cpu_reg(host_ctxt, 1) = __pkvm_init_vm(host_kvm, pgd_hva);
1516 cpu_reg(host_ctxt, 3) = hyp_alloc_missing_donations();
1517 }
1518
handle___pkvm_init_vcpu(struct kvm_cpu_context * host_ctxt)1519 static void handle___pkvm_init_vcpu(struct kvm_cpu_context *host_ctxt)
1520 {
1521 DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1);
1522 DECLARE_REG(struct kvm_vcpu *, host_vcpu, host_ctxt, 2);
1523
1524 host_vcpu = kern_hyp_va(host_vcpu);
1525 cpu_reg(host_ctxt, 1) = __pkvm_init_vcpu(handle, host_vcpu);
1526 cpu_reg(host_ctxt, 3) = hyp_alloc_missing_donations();
1527 }
1528
handle___pkvm_start_teardown_vm(struct kvm_cpu_context * host_ctxt)1529 static void handle___pkvm_start_teardown_vm(struct kvm_cpu_context *host_ctxt)
1530 {
1531 DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1);
1532
1533 cpu_reg(host_ctxt, 1) = __pkvm_start_teardown_vm(handle);
1534 }
1535
handle___pkvm_finalize_teardown_vm(struct kvm_cpu_context * host_ctxt)1536 static void handle___pkvm_finalize_teardown_vm(struct kvm_cpu_context *host_ctxt)
1537 {
1538 DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1);
1539
1540 cpu_reg(host_ctxt, 1) = __pkvm_finalize_teardown_vm(handle);
1541 }
1542
handle___pkvm_update_clock_tracing(struct kvm_cpu_context * host_ctxt)1543 static void handle___pkvm_update_clock_tracing(struct kvm_cpu_context *host_ctxt)
1544 {
1545 DECLARE_REG(u32, mult, host_ctxt, 1);
1546 DECLARE_REG(u32, shift, host_ctxt, 2);
1547 DECLARE_REG(u64, epoch_ns, host_ctxt, 3);
1548 DECLARE_REG(u64, epoch_cyc, host_ctxt, 4);
1549
1550 __pkvm_update_clock_tracing(mult, shift, epoch_ns, epoch_cyc);
1551
1552 cpu_reg(host_ctxt, 1) = 0;
1553 }
1554
handle___pkvm_load_tracing(struct kvm_cpu_context * host_ctxt)1555 static void handle___pkvm_load_tracing(struct kvm_cpu_context *host_ctxt)
1556 {
1557 DECLARE_REG(unsigned long, desc_hva, host_ctxt, 1);
1558 DECLARE_REG(size_t, desc_size, host_ctxt, 2);
1559
1560 cpu_reg(host_ctxt, 1) = __pkvm_load_tracing(desc_hva, desc_size);
1561 cpu_reg(host_ctxt, 3) = hyp_alloc_missing_donations();
1562 }
1563
handle___pkvm_teardown_tracing(struct kvm_cpu_context * host_ctxt)1564 static void handle___pkvm_teardown_tracing(struct kvm_cpu_context *host_ctxt)
1565 {
1566 __pkvm_teardown_tracing();
1567
1568 cpu_reg(host_ctxt, 1) = 0;
1569 }
1570
handle___pkvm_enable_tracing(struct kvm_cpu_context * host_ctxt)1571 static void handle___pkvm_enable_tracing(struct kvm_cpu_context *host_ctxt)
1572 {
1573 DECLARE_REG(bool, enable, host_ctxt, 1);
1574
1575 cpu_reg(host_ctxt, 1) = __pkvm_enable_tracing(enable);
1576 }
1577
handle___pkvm_reset_tracing(struct kvm_cpu_context * host_ctxt)1578 static void handle___pkvm_reset_tracing(struct kvm_cpu_context *host_ctxt)
1579 {
1580 DECLARE_REG(unsigned int, cpu, host_ctxt, 1);
1581
1582 cpu_reg(host_ctxt, 1) = __pkvm_reset_tracing(cpu);
1583 }
1584
handle___pkvm_swap_reader_tracing(struct kvm_cpu_context * host_ctxt)1585 static void handle___pkvm_swap_reader_tracing(struct kvm_cpu_context *host_ctxt)
1586 {
1587 DECLARE_REG(unsigned int, cpu, host_ctxt, 1);
1588
1589 cpu_reg(host_ctxt, 1) = __pkvm_swap_reader_tracing(cpu);
1590 }
1591
handle___pkvm_enable_event(struct kvm_cpu_context * host_ctxt)1592 static void handle___pkvm_enable_event(struct kvm_cpu_context *host_ctxt)
1593 {
1594 DECLARE_REG(unsigned short, id, host_ctxt, 1);
1595 DECLARE_REG(bool, enable, host_ctxt, 2);
1596
1597 cpu_reg(host_ctxt, 1) = __pkvm_enable_event(id, enable);
1598 }
1599
handle___pkvm_selftest_event(struct kvm_cpu_context * host_ctxt)1600 static void handle___pkvm_selftest_event(struct kvm_cpu_context *host_ctxt)
1601 {
1602 int smc_ret = SMCCC_RET_NOT_SUPPORTED, ret = -EOPNOTSUPP;
1603
1604 #ifdef CONFIG_PKVM_SELFTESTS
1605 trace_selftest();
1606 smc_ret = SMCCC_RET_SUCCESS;
1607 ret = 0;
1608 #endif
1609 cpu_reg(host_ctxt, 0) = smc_ret;
1610 cpu_reg(host_ctxt, 1) = ret;
1611 }
1612
handle___pkvm_sync_ftrace(struct kvm_cpu_context * host_ctxt)1613 static void handle___pkvm_sync_ftrace(struct kvm_cpu_context *host_ctxt)
1614 {
1615 DECLARE_REG(unsigned long, host_func_pg, host_ctxt, 1);
1616
1617 cpu_reg(host_ctxt, 1) = __pkvm_sync_ftrace(host_func_pg);
1618 }
1619
handle___pkvm_disable_ftrace(struct kvm_cpu_context * host_ctxt)1620 static void handle___pkvm_disable_ftrace(struct kvm_cpu_context *host_ctxt)
1621 {
1622 cpu_reg(host_ctxt, 1) = __pkvm_disable_ftrace();
1623 }
1624
handle___pkvm_alloc_module_va(struct kvm_cpu_context * host_ctxt)1625 static void handle___pkvm_alloc_module_va(struct kvm_cpu_context *host_ctxt)
1626 {
1627 DECLARE_REG(u64, nr_pages, host_ctxt, 1);
1628
1629 cpu_reg(host_ctxt, 1) = (u64)__pkvm_alloc_module_va(nr_pages);
1630 }
1631
handle___pkvm_map_module_page(struct kvm_cpu_context * host_ctxt)1632 static void handle___pkvm_map_module_page(struct kvm_cpu_context *host_ctxt)
1633 {
1634 DECLARE_REG(u64, pfn, host_ctxt, 1);
1635 DECLARE_REG(void *, va, host_ctxt, 2);
1636 DECLARE_REG(enum kvm_pgtable_prot, prot, host_ctxt, 3);
1637
1638 cpu_reg(host_ctxt, 1) = (u64)__pkvm_map_module_page(pfn, va, prot, false);
1639 }
1640
handle___pkvm_unmap_module_page(struct kvm_cpu_context * host_ctxt)1641 static void handle___pkvm_unmap_module_page(struct kvm_cpu_context *host_ctxt)
1642 {
1643 DECLARE_REG(u64, pfn, host_ctxt, 1);
1644 DECLARE_REG(void *, va, host_ctxt, 2);
1645
1646 __pkvm_unmap_module_page(pfn, va);
1647 }
1648
handle___pkvm_init_module(struct kvm_cpu_context * host_ctxt)1649 static void handle___pkvm_init_module(struct kvm_cpu_context *host_ctxt)
1650 {
1651 DECLARE_REG(void *, host_mod, host_ctxt, 1);
1652
1653 cpu_reg(host_ctxt, 1) = __pkvm_init_module(host_mod);
1654 }
1655
handle___pkvm_register_hcall(struct kvm_cpu_context * host_ctxt)1656 static void handle___pkvm_register_hcall(struct kvm_cpu_context *host_ctxt)
1657 {
1658 DECLARE_REG(unsigned long, hfn_hyp_va, host_ctxt, 1);
1659
1660 cpu_reg(host_ctxt, 1) = __pkvm_register_hcall(hfn_hyp_va);
1661 }
1662
handle___pkvm_hyp_alloc_mgt_refill(struct kvm_cpu_context * host_ctxt)1663 static void handle___pkvm_hyp_alloc_mgt_refill(struct kvm_cpu_context *host_ctxt)
1664 {
1665 DECLARE_REG(unsigned long, id, host_ctxt, 1);
1666 DECLARE_REG(phys_addr_t, phys, host_ctxt, 2);
1667 DECLARE_REG(unsigned long, nr_pages, host_ctxt, 3);
1668 struct kvm_hyp_memcache mc = {
1669 .head = phys,
1670 .nr_pages = nr_pages,
1671 };
1672
1673 cpu_reg(host_ctxt, 1) = hyp_alloc_mgt_refill(id, &mc);
1674 cpu_reg(host_ctxt, 2) = mc.head;
1675 cpu_reg(host_ctxt, 3) = mc.nr_pages;
1676 }
1677
handle___pkvm_hyp_alloc_mgt_reclaimable(struct kvm_cpu_context * host_ctxt)1678 static void handle___pkvm_hyp_alloc_mgt_reclaimable(struct kvm_cpu_context *host_ctxt)
1679 {
1680 cpu_reg(host_ctxt, 1) = hyp_alloc_mgt_reclaimable();
1681 }
1682
handle___pkvm_hyp_alloc_mgt_reclaim(struct kvm_cpu_context * host_ctxt)1683 static void handle___pkvm_hyp_alloc_mgt_reclaim(struct kvm_cpu_context *host_ctxt)
1684 {
1685 DECLARE_REG(int, target, host_ctxt, 1);
1686 struct kvm_hyp_memcache mc = {
1687 .head = 0,
1688 .nr_pages = 0,
1689 };
1690
1691 hyp_alloc_mgt_reclaim(&mc, target);
1692
1693 cpu_reg(host_ctxt, 1) = mc.head;
1694 cpu_reg(host_ctxt, 2) = mc.nr_pages;
1695 }
1696
handle___pkvm_host_iommu_alloc_domain(struct kvm_cpu_context * host_ctxt)1697 static void handle___pkvm_host_iommu_alloc_domain(struct kvm_cpu_context *host_ctxt)
1698 {
1699 int ret;
1700 DECLARE_REG(pkvm_handle_t, domain, host_ctxt, 1);
1701 DECLARE_REG(int, type, host_ctxt, 2);
1702
1703 ret = kvm_iommu_alloc_domain(domain, type);
1704 hyp_reqs_smccc_encode(ret, host_ctxt, this_cpu_ptr(&host_hyp_reqs));
1705 }
1706
handle___pkvm_host_iommu_free_domain(struct kvm_cpu_context * host_ctxt)1707 static void handle___pkvm_host_iommu_free_domain(struct kvm_cpu_context *host_ctxt)
1708 {
1709 int ret;
1710 DECLARE_REG(pkvm_handle_t, domain, host_ctxt, 1);
1711
1712 ret = kvm_iommu_free_domain(domain);
1713 hyp_reqs_smccc_encode(ret, host_ctxt, this_cpu_ptr(&host_hyp_reqs));
1714 }
1715
handle___pkvm_host_iommu_attach_dev(struct kvm_cpu_context * host_ctxt)1716 static void handle___pkvm_host_iommu_attach_dev(struct kvm_cpu_context *host_ctxt)
1717 {
1718 int ret;
1719 DECLARE_REG(pkvm_handle_t, iommu, host_ctxt, 1);
1720 DECLARE_REG(pkvm_handle_t, domain, host_ctxt, 2);
1721 DECLARE_REG(unsigned int, endpoint, host_ctxt, 3);
1722 DECLARE_REG(unsigned int, pasid, host_ctxt, 4);
1723 DECLARE_REG(unsigned int, pasid_bits, host_ctxt, 5);
1724 DECLARE_REG(unsigned long, flags, host_ctxt, 6);
1725
1726 ret = kvm_iommu_attach_dev(iommu, domain, endpoint,
1727 pasid, pasid_bits, flags);
1728 hyp_reqs_smccc_encode(ret, host_ctxt, this_cpu_ptr(&host_hyp_reqs));
1729 }
1730
handle___pkvm_host_iommu_detach_dev(struct kvm_cpu_context * host_ctxt)1731 static void handle___pkvm_host_iommu_detach_dev(struct kvm_cpu_context *host_ctxt)
1732 {
1733 int ret;
1734 DECLARE_REG(pkvm_handle_t, iommu, host_ctxt, 1);
1735 DECLARE_REG(pkvm_handle_t, domain, host_ctxt, 2);
1736 DECLARE_REG(unsigned int, endpoint, host_ctxt, 3);
1737 DECLARE_REG(unsigned int, pasid, host_ctxt, 4);
1738
1739 ret = kvm_iommu_detach_dev(iommu, domain, endpoint, pasid);
1740 hyp_reqs_smccc_encode(ret, host_ctxt, this_cpu_ptr(&host_hyp_reqs));
1741 }
1742
handle___pkvm_host_iommu_map_pages(struct kvm_cpu_context * host_ctxt)1743 static void handle___pkvm_host_iommu_map_pages(struct kvm_cpu_context *host_ctxt)
1744 {
1745 int ret;
1746 unsigned long mapped;
1747 DECLARE_REG(pkvm_handle_t, domain, host_ctxt, 1);
1748 DECLARE_REG(unsigned long, iova, host_ctxt, 2);
1749 DECLARE_REG(phys_addr_t, paddr, host_ctxt, 3);
1750 DECLARE_REG(size_t, pgsize, host_ctxt, 4);
1751 DECLARE_REG(size_t, pgcount, host_ctxt, 5);
1752 DECLARE_REG(unsigned int, prot, host_ctxt, 6);
1753
1754 ret = kvm_iommu_map_pages(domain, iova, paddr,
1755 pgsize, pgcount, prot, &mapped);
1756 cpu_reg(host_ctxt, 0) = ret;
1757 hyp_reqs_smccc_encode(mapped, host_ctxt, this_cpu_ptr(&host_hyp_reqs));
1758 }
1759
handle___pkvm_host_iommu_unmap_pages(struct kvm_cpu_context * host_ctxt)1760 static void handle___pkvm_host_iommu_unmap_pages(struct kvm_cpu_context *host_ctxt)
1761 {
1762 unsigned long ret;
1763 DECLARE_REG(pkvm_handle_t, domain, host_ctxt, 1);
1764 DECLARE_REG(unsigned long, iova, host_ctxt, 2);
1765 DECLARE_REG(size_t, pgsize, host_ctxt, 3);
1766 DECLARE_REG(size_t, pgcount, host_ctxt, 4);
1767
1768 ret = kvm_iommu_unmap_pages(domain, iova,
1769 pgsize, pgcount);
1770 hyp_reqs_smccc_encode(ret, host_ctxt, this_cpu_ptr(&host_hyp_reqs));
1771 }
1772
handle___pkvm_host_iommu_iova_to_phys(struct kvm_cpu_context * host_ctxt)1773 static void handle___pkvm_host_iommu_iova_to_phys(struct kvm_cpu_context *host_ctxt)
1774 {
1775 DECLARE_REG(pkvm_handle_t, domain, host_ctxt, 1);
1776 DECLARE_REG(unsigned long, iova, host_ctxt, 2);
1777
1778 cpu_reg(host_ctxt, 1) = kvm_iommu_iova_to_phys(domain, iova);
1779 }
1780
handle___pkvm_host_hvc_pd(struct kvm_cpu_context * host_ctxt)1781 static void handle___pkvm_host_hvc_pd(struct kvm_cpu_context *host_ctxt)
1782 {
1783 DECLARE_REG(u64, device_id, host_ctxt, 1);
1784 DECLARE_REG(u64, on, host_ctxt, 2);
1785
1786 cpu_reg(host_ctxt, 1) = pkvm_host_hvc_pd(device_id, on);
1787 }
1788
handle___pkvm_iommu_init(struct kvm_cpu_context * host_ctxt)1789 static void handle___pkvm_iommu_init(struct kvm_cpu_context *host_ctxt)
1790 {
1791 DECLARE_REG(struct kvm_iommu_ops *, ops, host_ctxt, 1);
1792 DECLARE_REG(unsigned long, mc_head, host_ctxt, 2);
1793 DECLARE_REG(unsigned long, nr_pages, host_ctxt, 3);
1794 struct kvm_hyp_memcache mc = {.head = mc_head, .nr_pages = nr_pages};
1795
1796 cpu_reg(host_ctxt, 1) = kvm_iommu_init(ops, &mc);
1797 }
1798
handle___pkvm_ptdump(struct kvm_cpu_context * host_ctxt)1799 static void handle___pkvm_ptdump(struct kvm_cpu_context *host_ctxt)
1800 {
1801 DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1);
1802 DECLARE_REG(enum pkvm_ptdump_ops, op, host_ctxt, 2);
1803 DECLARE_REG(struct pkvm_ptdump_log_hdr *, log, host_ctxt, 3);
1804
1805 if (op == PKVM_PTDUMP_GET_LEVEL || op == PKVM_PTDUMP_GET_RANGE)
1806 cpu_reg(host_ctxt, 1) = __pkvm_ptdump_get_config(handle, op);
1807 else if (op == PKVM_PTDUMP_WALK_RANGE)
1808 cpu_reg(host_ctxt, 1) = __pkvm_ptdump_walk_range(handle, log);
1809 else
1810 cpu_reg(host_ctxt, 0) = SMCCC_RET_NOT_SUPPORTED;
1811 }
1812
handle___pkvm_devices_init(struct kvm_cpu_context * host_ctxt)1813 static void handle___pkvm_devices_init(struct kvm_cpu_context *host_ctxt)
1814 {
1815 /*
1816 * Devices must be initialised after the IOMMUs driver is initialised.
1817 * We do this in a separate HVC to avoid complexity.
1818 */
1819 cpu_reg(host_ctxt, 1) = pkvm_init_devices();
1820 }
1821
handle___pkvm_host_iommu_map_sg(struct kvm_cpu_context * host_ctxt)1822 static void handle___pkvm_host_iommu_map_sg(struct kvm_cpu_context *host_ctxt)
1823 {
1824 unsigned long ret;
1825 DECLARE_REG(pkvm_handle_t, domain, host_ctxt, 1);
1826 DECLARE_REG(unsigned long, iova, host_ctxt, 2);
1827 DECLARE_REG(struct kvm_iommu_sg *, sg, host_ctxt, 3);
1828 DECLARE_REG(unsigned int, nent, host_ctxt, 4);
1829 DECLARE_REG(unsigned int, prot, host_ctxt, 5);
1830
1831 ret = kvm_iommu_map_sg(domain, iova, kern_hyp_va(sg), nent, prot);
1832 hyp_reqs_smccc_encode(ret, host_ctxt, this_cpu_ptr(&host_hyp_reqs));
1833 }
1834
handle___pkvm_host_donate_hyp_mmio(struct kvm_cpu_context * host_ctxt)1835 static void handle___pkvm_host_donate_hyp_mmio(struct kvm_cpu_context *host_ctxt)
1836 {
1837 DECLARE_REG(u64, pfn, host_ctxt, 1);
1838 DECLARE_REG(u64, nr_pages, host_ctxt, 2);
1839
1840 if (!is_protected_kvm_enabled())
1841 return;
1842
1843 cpu_reg(host_ctxt, 1) = pkvm_device_hyp_assign_mmio(pfn, nr_pages);
1844 }
1845
handle___pkvm_host_reclaim_hyp_mmio(struct kvm_cpu_context * host_ctxt)1846 static void handle___pkvm_host_reclaim_hyp_mmio(struct kvm_cpu_context *host_ctxt)
1847 {
1848 DECLARE_REG(u64, pfn, host_ctxt, 1);
1849 DECLARE_REG(u64, nr_pages, host_ctxt, 2);
1850
1851 if (!is_protected_kvm_enabled())
1852 return;
1853
1854 cpu_reg(host_ctxt, 1) = pkvm_device_reclaim_mmio(pfn, nr_pages);
1855 }
1856
handle___pkvm_host_map_guest_mmio(struct kvm_cpu_context * host_ctxt)1857 static void handle___pkvm_host_map_guest_mmio(struct kvm_cpu_context *host_ctxt)
1858 {
1859 DECLARE_REG(u64, pfn, host_ctxt, 1);
1860 DECLARE_REG(u64, gfn, host_ctxt, 2);
1861 struct pkvm_hyp_vcpu *hyp_vcpu;
1862 int ret = -EINVAL;
1863
1864 if (!is_protected_kvm_enabled())
1865 goto out;
1866
1867 hyp_vcpu = pkvm_get_loaded_hyp_vcpu();
1868 if (!hyp_vcpu)
1869 goto out;
1870
1871 if (!pkvm_hyp_vcpu_is_protected(hyp_vcpu))
1872 goto out;
1873
1874 /* Top-up our per-vcpu memcache from the host's */
1875 ret = pkvm_refill_memcache(hyp_vcpu);
1876 if (ret)
1877 goto out;
1878
1879 ret = pkvm_host_map_guest_mmio(hyp_vcpu, pfn, gfn);
1880
1881 out:
1882 cpu_reg(host_ctxt, 1) = ret;
1883 }
1884
handle___pkvm_pviommu_attach(struct kvm_cpu_context * host_ctxt)1885 static void handle___pkvm_pviommu_attach(struct kvm_cpu_context *host_ctxt)
1886 {
1887 DECLARE_REG(struct kvm *, host_kvm, host_ctxt, 1);
1888 DECLARE_REG(int, pviommu, host_ctxt, 2);
1889
1890 cpu_reg(host_ctxt, 1) = pkvm_pviommu_attach(host_kvm, pviommu);
1891 }
1892
handle___pkvm_pviommu_add_vsid(struct kvm_cpu_context * host_ctxt)1893 static void handle___pkvm_pviommu_add_vsid(struct kvm_cpu_context *host_ctxt)
1894 {
1895 DECLARE_REG(struct kvm *, host_kvm, host_ctxt, 1);
1896 DECLARE_REG(pkvm_handle_t, pviommu, host_ctxt, 2);
1897 DECLARE_REG(pkvm_handle_t, iommu, host_ctxt, 3);
1898 DECLARE_REG(pkvm_handle_t, sid, host_ctxt, 4);
1899 DECLARE_REG(pkvm_handle_t, vsid, host_ctxt, 5);
1900
1901 cpu_reg(host_ctxt, 1) = pkvm_pviommu_add_vsid(host_kvm, pviommu, iommu, sid, vsid);
1902 }
1903
handle___pkvm_host_get_ffa_version(struct kvm_cpu_context * host_ctxt)1904 static void handle___pkvm_host_get_ffa_version(struct kvm_cpu_context *host_ctxt)
1905 {
1906 cpu_reg(host_ctxt, 1) = ffa_get_hypervisor_version();
1907 }
1908
1909 typedef void (*hcall_t)(struct kvm_cpu_context *);
1910
1911 #define HANDLE_FUNC(x) [__KVM_HOST_SMCCC_FUNC_##x] = (hcall_t)handle_##x
1912
1913 static const hcall_t host_hcall[] = {
1914 /* ___kvm_hyp_init */
1915 HANDLE_FUNC(__kvm_get_mdcr_el2),
1916 HANDLE_FUNC(__pkvm_init),
1917 HANDLE_FUNC(__pkvm_create_private_mapping),
1918 HANDLE_FUNC(__pkvm_cpu_set_vector),
1919 HANDLE_FUNC(__kvm_enable_ssbs),
1920 HANDLE_FUNC(__vgic_v3_init_lrs),
1921 HANDLE_FUNC(__vgic_v3_get_gic_config),
1922 HANDLE_FUNC(__kvm_flush_vm_context),
1923 HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa),
1924 HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa_nsh),
1925 HANDLE_FUNC(__kvm_tlb_flush_vmid),
1926 HANDLE_FUNC(__kvm_tlb_flush_vmid_range),
1927 HANDLE_FUNC(__kvm_flush_cpu_context),
1928 HANDLE_FUNC(__pkvm_alloc_module_va),
1929 HANDLE_FUNC(__pkvm_map_module_page),
1930 HANDLE_FUNC(__pkvm_unmap_module_page),
1931 HANDLE_FUNC(__pkvm_init_module),
1932 HANDLE_FUNC(__pkvm_register_hcall),
1933 HANDLE_FUNC(__pkvm_iommu_init),
1934 HANDLE_FUNC(__pkvm_devices_init),
1935 HANDLE_FUNC(__pkvm_prot_finalize),
1936
1937 HANDLE_FUNC(__pkvm_host_share_hyp),
1938 HANDLE_FUNC(__pkvm_host_unshare_hyp),
1939 HANDLE_FUNC(__pkvm_host_donate_guest),
1940 HANDLE_FUNC(__pkvm_host_donate_guest_sglist),
1941 HANDLE_FUNC(__pkvm_host_share_guest),
1942 HANDLE_FUNC(__pkvm_host_unshare_guest),
1943 HANDLE_FUNC(__pkvm_host_relax_perms_guest),
1944 HANDLE_FUNC(__pkvm_host_wrprotect_guest),
1945 HANDLE_FUNC(__pkvm_host_test_clear_young_guest),
1946 HANDLE_FUNC(__pkvm_host_mkyoung_guest),
1947 HANDLE_FUNC(__pkvm_host_split_guest),
1948 HANDLE_FUNC(__kvm_adjust_pc),
1949 HANDLE_FUNC(__kvm_vcpu_run),
1950 HANDLE_FUNC(__kvm_timer_set_cntvoff),
1951 HANDLE_FUNC(__vgic_v3_save_vmcr_aprs),
1952 HANDLE_FUNC(__vgic_v3_restore_vmcr_aprs),
1953 HANDLE_FUNC(__pkvm_init_vm),
1954 HANDLE_FUNC(__pkvm_init_vcpu),
1955 HANDLE_FUNC(__pkvm_start_teardown_vm),
1956 HANDLE_FUNC(__pkvm_finalize_teardown_vm),
1957 HANDLE_FUNC(__pkvm_reclaim_dying_guest_page),
1958 HANDLE_FUNC(__pkvm_reclaim_dying_guest_ffa_resources),
1959 HANDLE_FUNC(__pkvm_notify_guest_vm_avail),
1960 HANDLE_FUNC(__pkvm_vcpu_load),
1961 HANDLE_FUNC(__pkvm_vcpu_put),
1962 HANDLE_FUNC(__pkvm_vcpu_sync_state),
1963 HANDLE_FUNC(__pkvm_update_clock_tracing),
1964 HANDLE_FUNC(__pkvm_load_tracing),
1965 HANDLE_FUNC(__pkvm_teardown_tracing),
1966 HANDLE_FUNC(__pkvm_enable_tracing),
1967 HANDLE_FUNC(__pkvm_reset_tracing),
1968 HANDLE_FUNC(__pkvm_swap_reader_tracing),
1969 HANDLE_FUNC(__pkvm_enable_event),
1970 HANDLE_FUNC(__pkvm_selftest_event),
1971 HANDLE_FUNC(__pkvm_sync_ftrace),
1972 HANDLE_FUNC(__pkvm_disable_ftrace),
1973 HANDLE_FUNC(__pkvm_tlb_flush_vmid),
1974 HANDLE_FUNC(__pkvm_hyp_alloc_mgt_refill),
1975 HANDLE_FUNC(__pkvm_hyp_alloc_mgt_reclaimable),
1976 HANDLE_FUNC(__pkvm_hyp_alloc_mgt_reclaim),
1977 HANDLE_FUNC(__pkvm_host_iommu_alloc_domain),
1978 HANDLE_FUNC(__pkvm_host_iommu_free_domain),
1979 HANDLE_FUNC(__pkvm_host_iommu_attach_dev),
1980 HANDLE_FUNC(__pkvm_host_iommu_detach_dev),
1981 HANDLE_FUNC(__pkvm_host_iommu_map_pages),
1982 HANDLE_FUNC(__pkvm_host_iommu_unmap_pages),
1983 HANDLE_FUNC(__pkvm_host_iommu_iova_to_phys),
1984 HANDLE_FUNC(__pkvm_host_hvc_pd),
1985 HANDLE_FUNC(__pkvm_ptdump),
1986 HANDLE_FUNC(__pkvm_host_iommu_map_sg),
1987 HANDLE_FUNC(__pkvm_host_donate_hyp_mmio),
1988 HANDLE_FUNC(__pkvm_host_reclaim_hyp_mmio),
1989 HANDLE_FUNC(__pkvm_host_map_guest_mmio),
1990 HANDLE_FUNC(__pkvm_pviommu_attach),
1991 HANDLE_FUNC(__pkvm_pviommu_add_vsid),
1992 HANDLE_FUNC(__pkvm_host_get_ffa_version),
1993 };
1994
handle_host_hcall(struct kvm_cpu_context * host_ctxt)1995 static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
1996 {
1997 DECLARE_REG(unsigned long, id, host_ctxt, 0);
1998 unsigned long hcall_min = 0;
1999 hcall_t hfn;
2000
2001 /*
2002 * If pKVM has been initialised then reject any calls to the
2003 * early "privileged" hypercalls. Note that we cannot reject
2004 * calls to __pkvm_prot_finalize for two reasons: (1) The static
2005 * key used to determine initialisation must be toggled prior to
2006 * finalisation and (2) finalisation is performed on a per-CPU
2007 * basis. This is all fine, however, since __pkvm_prot_finalize
2008 * returns -EPERM after the first call for a given CPU.
2009 */
2010 if (static_branch_unlikely(&kvm_protected_mode_initialized))
2011 hcall_min = __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize;
2012
2013 id &= ~ARM_SMCCC_CALL_HINTS;
2014 id -= KVM_HOST_SMCCC_ID(0);
2015
2016 if (handle_host_dynamic_hcall(&host_ctxt->regs, id) == HCALL_HANDLED)
2017 goto end;
2018
2019 if (unlikely(id < hcall_min || id >= ARRAY_SIZE(host_hcall)))
2020 goto inval;
2021
2022 hfn = host_hcall[id];
2023 if (unlikely(!hfn))
2024 goto inval;
2025
2026 cpu_reg(host_ctxt, 0) = SMCCC_RET_SUCCESS;
2027 hfn(host_ctxt);
2028 end:
2029 trace_host_hcall(id, 0);
2030
2031 return;
2032 inval:
2033 trace_host_hcall(id, 1);
2034 cpu_reg(host_ctxt, 0) = SMCCC_RET_NOT_SUPPORTED;
2035 }
2036
handle_host_smc(struct kvm_cpu_context * host_ctxt)2037 static void handle_host_smc(struct kvm_cpu_context *host_ctxt)
2038 {
2039 DECLARE_REG(u64, func_id, host_ctxt, 0);
2040 bool handled;
2041
2042 func_id &= ~ARM_SMCCC_CALL_HINTS;
2043
2044 handled = kvm_host_psci_handler(host_ctxt, func_id);
2045 if (!handled)
2046 handled = kvm_host_ffa_handler(host_ctxt, func_id);
2047 if (!handled)
2048 handled = kvm_host_scmi_handler(host_ctxt);
2049 if (!handled)
2050 handled = module_handle_host_smc(&host_ctxt->regs);
2051 if (!handled) {
2052 __hyp_exit();
2053 __kvm_hyp_host_forward_smc(host_ctxt);
2054 __hyp_enter();
2055 }
2056
2057 trace_host_smc(func_id, !handled);
2058
2059 /* SMC was trapped, move ELR past the current PC. */
2060 kvm_skip_host_instr();
2061 }
2062
handle_trap(struct kvm_cpu_context * host_ctxt)2063 void handle_trap(struct kvm_cpu_context *host_ctxt)
2064 {
2065 u64 esr = read_sysreg_el2(SYS_ESR);
2066
2067 __hyp_enter();
2068
2069 switch (ESR_ELx_EC(esr)) {
2070 case ESR_ELx_EC_HVC64:
2071 handle_host_hcall(host_ctxt);
2072 break;
2073 case ESR_ELx_EC_SMC64:
2074 handle_host_smc(host_ctxt);
2075 break;
2076 case ESR_ELx_EC_IABT_LOW:
2077 case ESR_ELx_EC_DABT_LOW:
2078 handle_host_mem_abort(host_ctxt);
2079 break;
2080 default:
2081 BUG_ON(!READ_ONCE(default_trap_handler) || !default_trap_handler(&host_ctxt->regs));
2082 }
2083
2084 __hyp_exit();
2085 }
2086