• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2021 Google LLC
4  * Author: Fuad Tabba <tabba@google.com>
5  */
6 
7 #ifndef __ARM64_KVM_NVHE_PKVM_H__
8 #define __ARM64_KVM_NVHE_PKVM_H__
9 
10 #include <asm/kvm_pkvm.h>
11 #include <kvm/power_domain.h>
12 
13 #include <nvhe/gfp.h>
14 #include <nvhe/spinlock.h>
15 
16 /*
17  * Holds the relevant data for maintaining the vcpu state completely at hyp.
18  */
19 struct pkvm_hyp_vcpu {
20 	struct kvm_vcpu vcpu;
21 
22 	/* Backpointer to the host's (untrusted) vCPU instance. */
23 	struct kvm_vcpu *host_vcpu;
24 
25 	/*
26 	 * If this hyp vCPU is loaded, then this is a backpointer to the
27 	 * per-cpu pointer tracking us. Otherwise, NULL if not loaded.
28 	 */
29 	struct pkvm_hyp_vcpu **loaded_hyp_vcpu;
30 
31 	/* Tracks exit code for the protected guest. */
32 	u32 exit_code;
33 
34 	/*
35 	 * Track the power state transition of a protected vcpu.
36 	 * Can be in one of three states:
37 	 * PSCI_0_2_AFFINITY_LEVEL_ON
38 	 * PSCI_0_2_AFFINITY_LEVEL_OFF
39 	 * PSCI_0_2_AFFINITY_LEVEL_PENDING
40 	 */
41 	int power_state;
42 };
43 
44 /*
45  * Holds the relevant data for running a protected vm.
46  */
47 struct pkvm_hyp_vm {
48 	struct kvm kvm;
49 
50 	/* Backpointer to the host's (untrusted) KVM instance. */
51 	struct kvm *host_kvm;
52 
53 	/* The guest's stage-2 page-table managed by the hypervisor. */
54 	struct kvm_pgtable pgt;
55 	struct kvm_pgtable_mm_ops mm_ops;
56 	struct hyp_pool pool;
57 	hyp_spinlock_t pgtable_lock;
58 
59 	/* Primary vCPU pending entry to the pvmfw */
60 	struct pkvm_hyp_vcpu *pvmfw_entry_vcpu;
61 
62 	unsigned short refcount;
63 
64 	/*
65 	 * The number of vcpus initialized and ready to run.
66 	 */
67 	unsigned int nr_vcpus;
68 	hyp_spinlock_t vcpus_lock;
69 
70 	/*
71 	 * True when the guest is being torn down. When in this state, the
72 	 * guest's vCPUs can't be loaded anymore, but its pages can be
73 	 * reclaimed by the host.
74 	 */
75 	bool is_dying;
76 
77 	/* Array of the hyp vCPU structures for this VM. */
78 	struct pkvm_hyp_vcpu *vcpus[];
79 };
80 
81 static inline struct pkvm_hyp_vm *
pkvm_hyp_vcpu_to_hyp_vm(struct pkvm_hyp_vcpu * hyp_vcpu)82 pkvm_hyp_vcpu_to_hyp_vm(struct pkvm_hyp_vcpu *hyp_vcpu)
83 {
84 	return container_of(hyp_vcpu->vcpu.kvm, struct pkvm_hyp_vm, kvm);
85 }
86 
kvm_is_protected(struct kvm * kvm)87 static inline bool kvm_is_protected(struct kvm *kvm)
88 {
89 	if (!is_protected_kvm_enabled())
90 		return false;
91 
92 	return kvm->arch.pkvm.enabled;
93 }
94 
vcpu_is_protected(struct kvm_vcpu * vcpu)95 static inline bool vcpu_is_protected(struct kvm_vcpu *vcpu)
96 {
97 	return kvm_is_protected(vcpu->kvm);
98 }
99 
pkvm_hyp_vm_is_protected(struct pkvm_hyp_vm * hyp_vm)100 static inline bool pkvm_hyp_vm_is_protected(struct pkvm_hyp_vm *hyp_vm)
101 {
102 	return kvm_is_protected(&hyp_vm->kvm);
103 }
104 
pkvm_hyp_vcpu_is_protected(struct pkvm_hyp_vcpu * hyp_vcpu)105 static inline bool pkvm_hyp_vcpu_is_protected(struct pkvm_hyp_vcpu *hyp_vcpu)
106 {
107 	return vcpu_is_protected(&hyp_vcpu->vcpu);
108 }
109 
110 extern phys_addr_t pvmfw_base;
111 extern phys_addr_t pvmfw_size;
112 
113 void pkvm_hyp_vm_table_init(void *tbl);
114 
115 struct kvm_hyp_req *
116 pkvm_hyp_req_reserve(struct pkvm_hyp_vcpu *hyp_vcpu, u8 type);
117 
118 int __pkvm_init_vm(struct kvm *host_kvm, unsigned long pgd_hva);
119 int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu);
120 int __pkvm_start_teardown_vm(pkvm_handle_t handle);
121 int __pkvm_finalize_teardown_vm(pkvm_handle_t handle);
122 int __pkvm_reclaim_dying_guest_page(pkvm_handle_t handle, u64 pfn, u64 gfn, u8 order);
123 
124 struct pkvm_hyp_vcpu *pkvm_load_hyp_vcpu(pkvm_handle_t handle,
125 					 unsigned int vcpu_idx);
126 void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu);
127 struct pkvm_hyp_vcpu *pkvm_get_loaded_hyp_vcpu(void);
128 
129 struct pkvm_hyp_vm *pkvm_get_hyp_vm(pkvm_handle_t handle);
130 void pkvm_put_hyp_vm(struct pkvm_hyp_vm *hyp_vm);
131 
132 u64 pvm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id);
133 bool kvm_handle_pvm_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code);
134 bool kvm_handle_pvm_restricted(struct kvm_vcpu *vcpu, u64 *exit_code);
135 void kvm_reset_pvm_sys_regs(struct kvm_vcpu *vcpu);
136 int kvm_check_pvm_sysreg_table(void);
137 
138 void pkvm_reset_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu);
139 
140 bool kvm_handle_pvm_hvc64(struct kvm_vcpu *vcpu, u64 *exit_code);
141 bool kvm_hyp_handle_hvc64(struct kvm_vcpu *vcpu, u64 *exit_code);
142 
143 struct pkvm_hyp_vcpu *pkvm_mpidr_to_hyp_vcpu(struct pkvm_hyp_vm *vm, u64 mpidr);
144 
pkvm_hyp_vm_has_pvmfw(struct pkvm_hyp_vm * vm)145 static inline bool pkvm_hyp_vm_has_pvmfw(struct pkvm_hyp_vm *vm)
146 {
147 	return vm->kvm.arch.pkvm.pvmfw_load_addr != PVMFW_INVALID_LOAD_ADDR;
148 }
149 
pkvm_ipa_range_has_pvmfw(struct pkvm_hyp_vm * vm,u64 ipa_start,u64 ipa_end)150 static inline bool pkvm_ipa_range_has_pvmfw(struct pkvm_hyp_vm *vm,
151 					    u64 ipa_start, u64 ipa_end)
152 {
153 	struct kvm_protected_vm *pkvm = &vm->kvm.arch.pkvm;
154 	u64 pvmfw_load_end = pkvm->pvmfw_load_addr + pvmfw_size;
155 
156 	if (!pkvm_hyp_vm_has_pvmfw(vm))
157 		return false;
158 
159 	return ipa_end > pkvm->pvmfw_load_addr && ipa_start < pvmfw_load_end;
160 }
161 
162 int pkvm_load_pvmfw_pages(struct pkvm_hyp_vm *vm, u64 ipa, phys_addr_t phys,
163 			  u64 size);
164 void pkvm_poison_pvmfw_pages(void);
165 
166 int pkvm_timer_init(void);
167 void pkvm_udelay(unsigned long usecs);
168 
169 #define MAX_POWER_DOMAINS		32
170 
171 struct kvm_power_domain_ops {
172 	int (*power_on)(struct kvm_power_domain *pd);
173 	int (*power_off)(struct kvm_power_domain *pd);
174 };
175 
176 int pkvm_init_hvc_pd(struct kvm_power_domain *pd,
177 		     const struct kvm_power_domain_ops *ops);
178 
179 int pkvm_host_hvc_pd(u64 device_id, u64 on);
180 
181 /*
182  * Register a power domain. When the hypervisor catches power requests from the
183  * host for this power domain, it calls the power ops with @pd as argument.
184  */
pkvm_init_power_domain(struct kvm_power_domain * pd,const struct kvm_power_domain_ops * ops)185 static inline int pkvm_init_power_domain(struct kvm_power_domain *pd,
186 					 const struct kvm_power_domain_ops *ops)
187 {
188 	switch (pd->type) {
189 	case KVM_POWER_DOMAIN_NONE:
190 		return 0;
191 	case KVM_POWER_DOMAIN_HOST_HVC:
192 		return pkvm_init_hvc_pd(pd, ops);
193 	default:
194 		return -EOPNOTSUPP;
195 	}
196 }
197 
198 #ifdef CONFIG_NVHE_EL2_DEBUG
199 int pkvm_stage2_snapshot_by_handle(struct kvm_pgtable_snapshot *snap,
200 				   pkvm_handle_t handle);
201 #endif /* CONFIG_NVHE_EL2_DEBUG */
202 #endif /* __ARM64_KVM_NVHE_PKVM_H__ */
203