1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2021 Google LLC
4 * Author: Fuad Tabba <tabba@google.com>
5 */
6
7 #ifndef __ARM64_KVM_NVHE_PKVM_H__
8 #define __ARM64_KVM_NVHE_PKVM_H__
9
10 #include <asm/kvm_pkvm.h>
11 #include <kvm/power_domain.h>
12
13 #include <nvhe/gfp.h>
14 #include <nvhe/spinlock.h>
15
16 /*
17 * Holds the relevant data for maintaining the vcpu state completely at hyp.
18 */
19 struct pkvm_hyp_vcpu {
20 struct kvm_vcpu vcpu;
21
22 /* Backpointer to the host's (untrusted) vCPU instance. */
23 struct kvm_vcpu *host_vcpu;
24
25 /*
26 * If this hyp vCPU is loaded, then this is a backpointer to the
27 * per-cpu pointer tracking us. Otherwise, NULL if not loaded.
28 */
29 struct pkvm_hyp_vcpu **loaded_hyp_vcpu;
30
31 /* Tracks exit code for the protected guest. */
32 u32 exit_code;
33
34 /*
35 * Track the power state transition of a protected vcpu.
36 * Can be in one of three states:
37 * PSCI_0_2_AFFINITY_LEVEL_ON
38 * PSCI_0_2_AFFINITY_LEVEL_OFF
39 * PSCI_0_2_AFFINITY_LEVEL_PENDING
40 */
41 int power_state;
42 };
43
44 /* Holds the hyp address of the mapped RX/TX buffers inside the hypervisor */
45 struct kvm_ffa_buffers {
46 void *tx;
47 u64 tx_ipa;
48 void *rx;
49 u64 rx_ipa;
50 struct list_head xfer_list;
51 u64 vm_avail_bitmap;
52 u64 vm_creating_bitmap;
53 };
54
55 /*
56 * Holds the relevant data for running a protected vm.
57 */
58 struct pkvm_hyp_vm {
59 struct kvm kvm;
60
61 /* Backpointer to the host's (untrusted) KVM instance. */
62 struct kvm *host_kvm;
63
64 /* The guest's stage-2 page-table managed by the hypervisor. */
65 struct kvm_pgtable pgt;
66 struct kvm_pgtable_mm_ops mm_ops;
67 struct hyp_pool pool;
68 hyp_spinlock_t pgtable_lock;
69
70 /* pvIOMMUs attached. */
71 struct list_head pviommus;
72 struct hyp_pool iommu_pool;
73 struct list_head domains;
74 /* Primary vCPU pending entry to the pvmfw */
75 struct pkvm_hyp_vcpu *pvmfw_entry_vcpu;
76
77 unsigned short refcount;
78
79 hyp_spinlock_t vcpus_lock;
80
81 /*
82 * True when the guest is being torn down. When in this state, the
83 * guest's vCPUs can't be loaded anymore, but its pages can be
84 * reclaimed by the host.
85 */
86 bool is_dying;
87
88 struct kvm_ffa_buffers ffa_buf;
89 struct list_head vm_list;
90
91 /* Array of the hyp vCPU structures for this VM. */
92 struct pkvm_hyp_vcpu *vcpus[];
93 };
94
95 struct ffa_mem_transfer *__pkvm_get_vm_ffa_transfer(u16 handle);
96
97 static inline struct pkvm_hyp_vm *
pkvm_hyp_vcpu_to_hyp_vm(struct pkvm_hyp_vcpu * hyp_vcpu)98 pkvm_hyp_vcpu_to_hyp_vm(struct pkvm_hyp_vcpu *hyp_vcpu)
99 {
100 return container_of(hyp_vcpu->vcpu.kvm, struct pkvm_hyp_vm, kvm);
101 }
102
pkvm_hyp_vcpu_is_protected(struct pkvm_hyp_vcpu * hyp_vcpu)103 static inline bool pkvm_hyp_vcpu_is_protected(struct pkvm_hyp_vcpu *hyp_vcpu)
104 {
105 return vcpu_is_protected(&hyp_vcpu->vcpu);
106 }
107
pkvm_hyp_vm_is_protected(struct pkvm_hyp_vm * hyp_vm)108 static inline bool pkvm_hyp_vm_is_protected(struct pkvm_hyp_vm *hyp_vm)
109 {
110 return kvm_vm_is_protected(&hyp_vm->kvm);
111 }
112
113 extern phys_addr_t pvmfw_base;
114 extern phys_addr_t pvmfw_size;
115
116 void pkvm_hyp_vm_table_init(void *tbl);
117
118 struct kvm_hyp_req *
119 pkvm_hyp_req_reserve(struct pkvm_hyp_vcpu *hyp_vcpu, u8 type);
120
121 int __pkvm_init_vm(struct kvm *host_kvm, unsigned long pgd_hva);
122 int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu);
123 int __pkvm_start_teardown_vm(pkvm_handle_t handle);
124 int __pkvm_finalize_teardown_vm(pkvm_handle_t handle);
125 int __pkvm_reclaim_dying_guest_page(pkvm_handle_t handle, u64 pfn, u64 gfn, u8 order);
126 int __pkvm_reclaim_dying_guest_ffa_resources(pkvm_handle_t handle);
127 int __pkvm_notify_guest_vm_avail(pkvm_handle_t handle);
128
129 struct pkvm_hyp_vcpu *pkvm_load_hyp_vcpu(pkvm_handle_t handle,
130 unsigned int vcpu_idx);
131 void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu);
132 struct pkvm_hyp_vcpu *pkvm_get_loaded_hyp_vcpu(void);
133
134 struct pkvm_hyp_vm *get_pkvm_hyp_vm(pkvm_handle_t handle);
135 void put_pkvm_hyp_vm(struct pkvm_hyp_vm *hyp_vm);
136 struct pkvm_hyp_vm *get_np_pkvm_hyp_vm(pkvm_handle_t handle);
137
138 bool kvm_handle_pvm_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code);
139 bool kvm_handle_pvm_restricted(struct kvm_vcpu *vcpu, u64 *exit_code);
140 void kvm_init_pvm_id_regs(struct kvm_vcpu *vcpu);
141 void kvm_reset_pvm_sys_regs(struct kvm_vcpu *vcpu);
142 int kvm_check_pvm_sysreg_table(void);
143
144 void pkvm_reset_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu);
145
146 bool kvm_handle_pvm_hvc64(struct kvm_vcpu *vcpu, u64 *exit_code);
147 bool kvm_hyp_handle_hvc64(struct kvm_vcpu *vcpu, u64 *exit_code);
148
149 bool kvm_handle_pvm_smc64(struct kvm_vcpu *vcpu, u64 *exit_code);
150
151 struct pkvm_hyp_vcpu *pkvm_mpidr_to_hyp_vcpu(struct pkvm_hyp_vm *vm, u64 mpidr);
152
pkvm_hyp_vm_has_pvmfw(struct pkvm_hyp_vm * vm)153 static inline bool pkvm_hyp_vm_has_pvmfw(struct pkvm_hyp_vm *vm)
154 {
155 return vm->kvm.arch.pkvm.pvmfw_load_addr != PVMFW_INVALID_LOAD_ADDR;
156 }
157
pkvm_ipa_range_has_pvmfw(struct pkvm_hyp_vm * vm,u64 ipa_start,u64 ipa_end)158 static inline bool pkvm_ipa_range_has_pvmfw(struct pkvm_hyp_vm *vm,
159 u64 ipa_start, u64 ipa_end)
160 {
161 struct kvm_protected_vm *pkvm = &vm->kvm.arch.pkvm;
162 u64 pvmfw_load_end = pkvm->pvmfw_load_addr + pvmfw_size;
163
164 if (!pkvm_hyp_vm_has_pvmfw(vm))
165 return false;
166
167 return ipa_end > pkvm->pvmfw_load_addr && ipa_start < pvmfw_load_end;
168 }
169
170 int pkvm_load_pvmfw_pages(struct pkvm_hyp_vm *vm, u64 ipa, phys_addr_t phys,
171 u64 size);
172 void pkvm_poison_pvmfw_pages(void);
173
174 int pkvm_timer_init(void);
175 void pkvm_udelay(unsigned long usecs);
176
177 #define MAX_POWER_DOMAINS 40
178
179 struct kvm_power_domain_ops {
180 int (*power_on)(struct kvm_power_domain *pd);
181 int (*power_off)(struct kvm_power_domain *pd);
182 };
183
184 int pkvm_init_hvc_pd(struct kvm_power_domain *pd,
185 const struct kvm_power_domain_ops *ops);
186
187 int pkvm_host_hvc_pd(u64 device_id, u64 on);
188 int pkvm_init_scmi_pd(struct kvm_power_domain *pd,
189 const struct kvm_power_domain_ops *ops);
190
191 bool pkvm_device_request_mmio(struct pkvm_hyp_vcpu *hyp_vcpu, u64 *exit_code);
192 bool pkvm_device_request_dma(struct pkvm_hyp_vcpu *hyp_vcpu, u64 *exit_code);
193 void pkvm_devices_teardown(struct pkvm_hyp_vm *vm);
194 int pkvm_devices_get_context(u64 iommu_id, u32 endpoint_id, struct pkvm_hyp_vm *vm);
195 void pkvm_devices_put_context(u64 iommu_id, u32 endpoint_id);
196
197 /*
198 * Register a power domain. When the hypervisor catches power requests from the
199 * host for this power domain, it calls the power ops with @pd as argument.
200 */
pkvm_init_power_domain(struct kvm_power_domain * pd,const struct kvm_power_domain_ops * ops)201 static inline int pkvm_init_power_domain(struct kvm_power_domain *pd,
202 const struct kvm_power_domain_ops *ops)
203 {
204 switch (pd->type) {
205 case KVM_POWER_DOMAIN_NONE:
206 return 0;
207 case KVM_POWER_DOMAIN_HOST_HVC:
208 return pkvm_init_hvc_pd(pd, ops);
209 case KVM_POWER_DOMAIN_ARM_SCMI:
210 return pkvm_init_scmi_pd(pd, ops);
211 default:
212 return -EOPNOTSUPP;
213 }
214 }
215
216 int pkvm_init_devices(void);
217 int pkvm_device_hyp_assign_mmio(u64 pfn, u64 nr_pages);
218 int pkvm_device_reclaim_mmio(u64 pfn, u64 nr_pages);
219 int pkvm_host_map_guest_mmio(struct pkvm_hyp_vcpu *hyp_vcpu, u64 pfn, u64 gfn);
220 int pkvm_device_register_reset(u64 phys, void *cookie,
221 int (*cb)(void *cookie, bool host_to_guest));
222 int pkvm_handle_empty_memcache(struct pkvm_hyp_vcpu *hyp_vcpu, u64 *exit_code);
223 u32 hyp_vcpu_to_ffa_handle(struct pkvm_hyp_vcpu *hyp_vcpu);
224 u32 vm_handle_to_ffa_handle(pkvm_handle_t vm_handle);
225
226 #endif /* __ARM64_KVM_NVHE_PKVM_H__ */
227