1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2021 Google LLC
4 * Author: Fuad Tabba <tabba@google.com>
5 */
6
7 #ifndef __ARM64_KVM_NVHE_PKVM_H__
8 #define __ARM64_KVM_NVHE_PKVM_H__
9
10 #include <asm/kvm_pkvm.h>
11
12 #include <nvhe/gfp.h>
13 #include <nvhe/spinlock.h>
14
15 /*
16 * Misconfiguration events that can undermine pKVM security.
17 */
18 enum pkvm_system_misconfiguration {
19 NO_DMA_ISOLATION,
20 };
21
22 /*
23 * Holds the relevant data for maintaining the vcpu state completely at hyp.
24 */
25 struct pkvm_hyp_vcpu {
26 struct kvm_vcpu vcpu;
27
28 /* Backpointer to the host's (untrusted) vCPU instance. */
29 struct kvm_vcpu *host_vcpu;
30
31 /*
32 * If this hyp vCPU is loaded, then this is a backpointer to the
33 * per-cpu pointer tracking us. Otherwise, NULL if not loaded.
34 */
35 struct pkvm_hyp_vcpu **loaded_hyp_vcpu;
36
37 /* Tracks exit code for the protected guest. */
38 u32 exit_code;
39
40 /*
41 * Track the power state transition of a protected vcpu.
42 * Can be in one of three states:
43 * PSCI_0_2_AFFINITY_LEVEL_ON
44 * PSCI_0_2_AFFINITY_LEVEL_OFF
45 * PSCI_0_2_AFFINITY_LEVEL_PENDING
46 */
47 int power_state;
48 };
49
50 /*
51 * Holds the relevant data for running a protected vm.
52 */
53 struct pkvm_hyp_vm {
54 struct kvm kvm;
55
56 /* Backpointer to the host's (untrusted) KVM instance. */
57 struct kvm *host_kvm;
58
59 /* The guest's stage-2 page-table managed by the hypervisor. */
60 struct kvm_pgtable pgt;
61 struct kvm_pgtable_mm_ops mm_ops;
62 struct hyp_pool pool;
63 hyp_spinlock_t lock;
64
65 /* Primary vCPU pending entry to the pvmfw */
66 struct pkvm_hyp_vcpu *pvmfw_entry_vcpu;
67
68 /*
69 * The number of vcpus initialized and ready to run.
70 * Modifying this is protected by 'vm_table_lock'.
71 */
72 unsigned int nr_vcpus;
73
74 /*
75 * True when the guest is being torn down. When in this state, the
76 * guest's vCPUs can't be loaded anymore, but its pages can be
77 * reclaimed by the host.
78 */
79 bool is_dying;
80
81 /* Array of the hyp vCPU structures for this VM. */
82 struct pkvm_hyp_vcpu *vcpus[];
83 };
84
85 static inline struct pkvm_hyp_vm *
pkvm_hyp_vcpu_to_hyp_vm(struct pkvm_hyp_vcpu * hyp_vcpu)86 pkvm_hyp_vcpu_to_hyp_vm(struct pkvm_hyp_vcpu *hyp_vcpu)
87 {
88 return container_of(hyp_vcpu->vcpu.kvm, struct pkvm_hyp_vm, kvm);
89 }
90
vcpu_is_protected(struct kvm_vcpu * vcpu)91 static inline bool vcpu_is_protected(struct kvm_vcpu *vcpu)
92 {
93 if (!is_protected_kvm_enabled())
94 return false;
95
96 return vcpu->kvm->arch.pkvm.enabled;
97 }
98
pkvm_hyp_vcpu_is_protected(struct pkvm_hyp_vcpu * hyp_vcpu)99 static inline bool pkvm_hyp_vcpu_is_protected(struct pkvm_hyp_vcpu *hyp_vcpu)
100 {
101 return vcpu_is_protected(&hyp_vcpu->vcpu);
102 }
103
104 extern phys_addr_t pvmfw_base;
105 extern phys_addr_t pvmfw_size;
106
107 void pkvm_hyp_vm_table_init(void *tbl);
108
109 int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva,
110 unsigned long pgd_hva, unsigned long last_ran_hva);
111 int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu,
112 unsigned long vcpu_hva);
113 int __pkvm_start_teardown_vm(pkvm_handle_t handle);
114 int __pkvm_finalize_teardown_vm(pkvm_handle_t handle);
115 int __pkvm_reclaim_dying_guest_page(pkvm_handle_t handle, u64 pfn, u64 ipa);
116
117 struct pkvm_hyp_vcpu *pkvm_load_hyp_vcpu(pkvm_handle_t handle,
118 unsigned int vcpu_idx);
119 void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu);
120 struct pkvm_hyp_vcpu *pkvm_get_loaded_hyp_vcpu(void);
121
122 u64 pvm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id);
123 bool kvm_handle_pvm_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code);
124 bool kvm_handle_pvm_restricted(struct kvm_vcpu *vcpu, u64 *exit_code);
125 void kvm_reset_pvm_sys_regs(struct kvm_vcpu *vcpu);
126 int kvm_check_pvm_sysreg_table(void);
127
128 void pkvm_reset_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu);
129
130 bool kvm_handle_pvm_hvc64(struct kvm_vcpu *vcpu, u64 *exit_code);
131 bool kvm_hyp_handle_hvc64(struct kvm_vcpu *vcpu, u64 *exit_code);
132
133 struct pkvm_hyp_vcpu *pkvm_mpidr_to_hyp_vcpu(struct pkvm_hyp_vm *vm, u64 mpidr);
134
pkvm_hyp_vm_has_pvmfw(struct pkvm_hyp_vm * vm)135 static inline bool pkvm_hyp_vm_has_pvmfw(struct pkvm_hyp_vm *vm)
136 {
137 return vm->kvm.arch.pkvm.pvmfw_load_addr != PVMFW_INVALID_LOAD_ADDR;
138 }
139
pkvm_ipa_range_has_pvmfw(struct pkvm_hyp_vm * vm,u64 ipa_start,u64 ipa_end)140 static inline bool pkvm_ipa_range_has_pvmfw(struct pkvm_hyp_vm *vm,
141 u64 ipa_start, u64 ipa_end)
142 {
143 struct kvm_protected_vm *pkvm = &vm->kvm.arch.pkvm;
144 u64 pvmfw_load_end = pkvm->pvmfw_load_addr + pvmfw_size;
145
146 if (!pkvm_hyp_vm_has_pvmfw(vm))
147 return false;
148
149 return ipa_end > pkvm->pvmfw_load_addr && ipa_start < pvmfw_load_end;
150 }
151
pkvm_set_max_sve_vq(void)152 static inline void pkvm_set_max_sve_vq(void)
153 {
154 sve_cond_update_zcr_vq(sve_vq_from_vl(kvm_host_sve_max_vl) - 1,
155 SYS_ZCR_EL2);
156 }
157
158 int pkvm_load_pvmfw_pages(struct pkvm_hyp_vm *vm, u64 ipa, phys_addr_t phys,
159 u64 size);
160 void pkvm_poison_pvmfw_pages(void);
161
162 #endif /* __ARM64_KVM_NVHE_PKVM_H__ */
163