1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2020 - Google LLC
4 * Author: Quentin Perret <qperret@google.com>
5 * Author: Fuad Tabba <tabba@google.com>
6 */
7 #ifndef __ARM64_KVM_PKVM_H__
8 #define __ARM64_KVM_PKVM_H__
9
10 #include <linux/arm_ffa.h>
11 #include <linux/memblock.h>
12 #include <linux/scatterlist.h>
13 #include <asm/kvm_pgtable.h>
14 #include <asm/sysreg.h>
15
16 /* Maximum number of VMs that can co-exist under pKVM. */
17 #define KVM_MAX_PVMS 255
18
19 #define HYP_MEMBLOCK_REGIONS 128
20 #define PVMFW_INVALID_LOAD_ADDR (-1)
21
22 int pkvm_vm_ioctl_enable_cap(struct kvm *kvm,struct kvm_enable_cap *cap);
23 int pkvm_init_host_vm(struct kvm *kvm, unsigned long type);
24 int pkvm_create_hyp_vm(struct kvm *kvm);
25 void pkvm_destroy_hyp_vm(struct kvm *kvm);
26 bool pkvm_is_hyp_created(struct kvm *kvm);
27 int pkvm_create_hyp_vcpu(struct kvm_vcpu *vcpu);
28 void pkvm_host_reclaim_page(struct kvm *host_kvm, phys_addr_t ipa);
29 int pvkm_enable_smc_forwarding(struct file *kvm_file);
30 /*
31 * This functions as an allow-list of protected VM capabilities.
32 * Features not explicitly allowed by this function are denied.
33 */
kvm_pvm_ext_allowed(long ext)34 static inline bool kvm_pvm_ext_allowed(long ext)
35 {
36 switch (ext) {
37 case KVM_CAP_IRQCHIP:
38 case KVM_CAP_ONE_REG:
39 case KVM_CAP_ARM_PSCI:
40 case KVM_CAP_ARM_PSCI_0_2:
41 case KVM_CAP_NR_VCPUS:
42 case KVM_CAP_MAX_VCPUS:
43 case KVM_CAP_MAX_VCPU_ID:
44 case KVM_CAP_MSI_DEVID:
45 case KVM_CAP_ARM_VM_IPA_SIZE:
46 case KVM_CAP_ARM_PMU_V3:
47 case KVM_CAP_ARM_SVE:
48 case KVM_CAP_ARM_PTRAUTH_ADDRESS:
49 case KVM_CAP_ARM_PTRAUTH_GENERIC:
50 case KVM_CAP_ARM_PROTECTED_VM:
51 return true;
52 default:
53 return false;
54 }
55 }
56
pvm_supported_vcpu_features(void)57 static inline unsigned long pvm_supported_vcpu_features(void)
58 {
59 unsigned long features = 0;
60
61 set_bit(KVM_ARM_VCPU_POWER_OFF, &features);
62
63 if (kvm_pvm_ext_allowed(KVM_CAP_ARM_EL1_32BIT))
64 set_bit(KVM_ARM_VCPU_EL1_32BIT, &features);
65
66 if (kvm_pvm_ext_allowed(KVM_CAP_ARM_PSCI_0_2))
67 set_bit(KVM_ARM_VCPU_PSCI_0_2, &features);
68
69 if (kvm_pvm_ext_allowed(KVM_CAP_ARM_PMU_V3))
70 set_bit(KVM_ARM_VCPU_PMU_V3, &features);
71
72 if (kvm_pvm_ext_allowed(KVM_CAP_ARM_SVE))
73 set_bit(KVM_ARM_VCPU_SVE, &features);
74
75 if (kvm_pvm_ext_allowed(KVM_CAP_ARM_PTRAUTH_ADDRESS) &&
76 kvm_pvm_ext_allowed(KVM_CAP_ARM_PTRAUTH_GENERIC)) {
77 set_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, &features);
78 set_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, &features);
79 }
80
81 return features;
82 }
83
84 /* All HAFGRTR_EL2 bits are AMU */
85 #define HAFGRTR_AMU __HAFGRTR_EL2_MASK
86
87 #define PVM_HAFGRTR_EL2_SET \
88 (kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, IMP) ? 0ULL : HAFGRTR_AMU)
89
90 #define PVM_HAFGRTR_EL2_CLR (0ULL)
91
92 /* No support for debug, trace, of PMU for protected VMs */
93 #define PVM_HDFGRTR_EL2_SET __HDFGRTR_EL2_MASK
94 #define PVM_HDFGRTR_EL2_CLR __HDFGRTR_EL2_nMASK
95
96 #define PVM_HDFGWTR_EL2_SET __HDFGWTR_EL2_MASK
97 #define PVM_HDFGWTR_EL2_CLR __HDFGWTR_EL2_nMASK
98
99 #define HFGxTR_RAS_IMP (\
100 HFGxTR_EL2_ERXADDR_EL1 | \
101 HFGxTR_EL2_ERXPFGF_EL1 | \
102 HFGxTR_EL2_ERXMISCn_EL1 | \
103 HFGxTR_EL2_ERXSTATUS_EL1 | \
104 HFGxTR_EL2_ERXCTLR_EL1 | \
105 HFGxTR_EL2_ERXFR_EL1 | \
106 HFGxTR_EL2_ERRSELR_EL1 | \
107 HFGxTR_EL2_ERRIDR_EL1 \
108 )
109 #define HFGxTR_RAS_V1P1 (\
110 HFGxTR_EL2_ERXPFGCDN_EL1 | \
111 HFGxTR_EL2_ERXPFGCTL_EL1 \
112 )
113 #define HFGxTR_GIC HFGxTR_EL2_ICC_IGRPENn_EL1
114 #define HFGxTR_CSV2 (\
115 HFGxTR_EL2_SCXTNUM_EL0 | \
116 HFGxTR_EL2_SCXTNUM_EL1 \
117 )
118 #define HFGxTR_LOR (\
119 HFGxTR_EL2_LORSA_EL1 | \
120 HFGxTR_EL2_LORN_EL1 | \
121 HFGxTR_EL2_LORID_EL1 | \
122 HFGxTR_EL2_LOREA_EL1 | \
123 HFGxTR_EL2_LORC_EL1 \
124 )
125 #define HFGxTR_PAUTH (\
126 HFGxTR_EL2_APIBKey | \
127 HFGxTR_EL2_APIAKey | \
128 HFGxTR_EL2_APGAKey | \
129 HFGxTR_EL2_APDBKey | \
130 HFGxTR_EL2_APDAKey \
131 )
132 #define HFGxTR_nAIE (\
133 HFGxTR_EL2_nAMAIR2_EL1 | \
134 HFGxTR_EL2_nMAIR2_EL1 \
135 )
136 #define HFGxTR_nS2POE HFGxTR_EL2_nS2POR_EL1
137 #define HFGxTR_nS1POE (\
138 HFGxTR_EL2_nPOR_EL1 | \
139 HFGxTR_EL2_nPOR_EL0 \
140 )
141 #define HFGxTR_nS1PIE (\
142 HFGxTR_EL2_nPIR_EL1 | \
143 HFGxTR_EL2_nPIRE0_EL1 \
144 )
145 #define HFGxTR_nTHE HFGxTR_EL2_nRCWMASK_EL1
146 #define HFGxTR_nSME (\
147 HFGxTR_EL2_nTPIDR2_EL0 | \
148 HFGxTR_EL2_nSMPRI_EL1 \
149 )
150 #define HFGxTR_nGCS (\
151 HFGxTR_EL2_nGCS_EL1 | \
152 HFGxTR_EL2_nGCS_EL0 \
153 )
154 #define HFGxTR_nLS64 HFGxTR_EL2_nACCDATA_EL1
155
156 #define PVM_HFGXTR_EL2_SET \
157 (kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, IMP) ? 0ULL : HFGxTR_RAS_IMP) | \
158 (kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, V1P1) ? 0ULL : HFGxTR_RAS_V1P1) | \
159 (kvm_has_feat(kvm, ID_AA64PFR0_EL1, GIC, IMP) ? 0ULL : HFGxTR_GIC) | \
160 (kvm_has_feat(kvm, ID_AA64PFR0_EL1, CSV2, IMP) ? 0ULL : HFGxTR_CSV2) | \
161 (kvm_has_feat(kvm, ID_AA64MMFR1_EL1, LO, IMP) ? 0ULL : HFGxTR_LOR) | \
162 (vcpu_has_ptrauth(vcpu) ? 0ULL : HFGxTR_PAUTH) | \
163 (vcpu_has_ptrauth(vcpu) ? 0ULL : HFGxTR_PAUTH) | \
164 0
165
166 #define PVM_HFGXTR_EL2_CLR \
167 (kvm_has_feat(kvm, ID_AA64MMFR3_EL1, AIE, IMP) ? 0ULL : HFGxTR_nAIE) | \
168 (kvm_has_feat(kvm, ID_AA64MMFR3_EL1, S2POE, IMP) ? 0ULL : HFGxTR_nS2POE) | \
169 (kvm_has_feat(kvm, ID_AA64MMFR3_EL1, S1POE, IMP) ? 0ULL : HFGxTR_nS1POE) | \
170 (kvm_has_feat(kvm, ID_AA64MMFR3_EL1, S1PIE, IMP) ? 0ULL : HFGxTR_nS1PIE) | \
171 (kvm_has_feat(kvm, ID_AA64PFR1_EL1, THE, IMP) ? 0ULL : HFGxTR_nTHE) | \
172 (kvm_has_feat(kvm, ID_AA64PFR1_EL1, SME, IMP) ? 0ULL : HFGxTR_nSME) | \
173 (kvm_has_feat(kvm, ID_AA64PFR1_EL1, GCS, IMP) ? 0ULL : HFGxTR_nGCS) | \
174 (kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64) ? 0ULL : HFGxTR_nLS64) | \
175 0
176
177 #define PVM_HFGRTR_EL2_SET PVM_HFGXTR_EL2_SET
178 #define PVM_HFGWTR_EL2_SET PVM_HFGXTR_EL2_SET
179 #define PVM_HFGRTR_EL2_CLR PVM_HFGXTR_EL2_CLR
180 #define PVM_HFGWTR_EL2_CLR PVM_HFGXTR_EL2_CLR
181
182 #define HFGITR_SPECRES (\
183 HFGITR_EL2_CPPRCTX | \
184 HFGITR_EL2_DVPRCTX | \
185 HFGITR_EL2_CFPRCTX \
186 )
187 #define HFGITR_TLBIOS (\
188 HFGITR_EL2_TLBIVAALE1OS | \
189 HFGITR_EL2_TLBIVALE1OS | \
190 HFGITR_EL2_TLBIVAAE1OS | \
191 HFGITR_EL2_TLBIASIDE1OS | \
192 HFGITR_EL2_TLBIVAE1OS | \
193 HFGITR_EL2_TLBIVMALLE1OS \
194 )
195 #define HFGITR_TLBIRANGE \
196 (\
197 HFGITR_TLBIOS | \
198 HFGITR_EL2_TLBIRVAALE1 | \
199 HFGITR_EL2_TLBIRVALE1 | \
200 HFGITR_EL2_TLBIRVAAE1 | \
201 HFGITR_EL2_TLBIRVAE1 | \
202 HFGITR_EL2_TLBIRVAE1 | \
203 HFGITR_EL2_TLBIRVAALE1IS | \
204 HFGITR_EL2_TLBIRVALE1IS | \
205 HFGITR_EL2_TLBIRVAAE1IS | \
206 HFGITR_EL2_TLBIRVAE1IS | \
207 HFGITR_EL2_TLBIVAALE1IS | \
208 HFGITR_EL2_TLBIVALE1IS | \
209 HFGITR_EL2_TLBIVAAE1IS | \
210 HFGITR_EL2_TLBIASIDE1IS | \
211 HFGITR_EL2_TLBIVAE1IS | \
212 HFGITR_EL2_TLBIVMALLE1IS | \
213 HFGITR_EL2_TLBIRVAALE1OS | \
214 HFGITR_EL2_TLBIRVALE1OS | \
215 HFGITR_EL2_TLBIRVAAE1OS | \
216 HFGITR_EL2_TLBIRVAE1OS \
217 )
218 #define HFGITR_TLB HFGITR_TLBIRANGE
219 #define HFGITR_PAN2 (\
220 HFGITR_EL2_ATS1E1WP | \
221 HFGITR_EL2_ATS1E1RP | \
222 HFGITR_EL2_ATS1E0W | \
223 HFGITR_EL2_ATS1E0R | \
224 HFGITR_EL2_ATS1E1W | \
225 HFGITR_EL2_ATS1E1R \
226 )
227 #define HFGITR_PAN HFGITR_PAN2
228 #define HFGITR_DPB2 HFGITR_EL2_DCCVADP
229 #define HFGITR_DPB_IMP HFGITR_EL2_DCCVAP
230 #define HFGITR_DPB (HFGITR_DPB_IMP | HFGITR_DPB2)
231 #define HFGITR_nGCS (\
232 HFGITR_EL2_nGCSEPP | \
233 HFGITR_EL2_nGCSSTR_EL1 | \
234 HFGITR_EL2_nGCSPUSHM_EL1 \
235 )
236 #define HFGITR_nBRBE (\
237 HFGITR_EL2_nBRBIALL | \
238 HFGITR_EL2_nBRBINJ \
239 )
240
241 #define PVM_HFGITR_EL2_SET \
242 (kvm_has_feat(kvm, ID_AA64ISAR2_EL1, ATS1A, IMP) ? 0ULL : HFGITR_EL2_ATS1E1A) | \
243 (kvm_has_feat(kvm, ID_AA64ISAR1_EL1, SPECRES, IMP) ? 0ULL : HFGITR_SPECRES) | \
244 (kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS) ? 0ULL : HFGITR_TLB) | \
245 (kvm_has_feat(kvm, ID_AA64MMFR1_EL1, PAN, IMP) ? 0ULL : HFGITR_PAN) | \
246 (kvm_has_feat(kvm, ID_AA64ISAR1_EL1, DPB, IMP) ? 0ULL : HFGITR_DPB) | \
247 0
248
249 #define PVM_HFGITR_EL2_CLR \
250 (kvm_has_feat(kvm, ID_AA64PFR1_EL1, GCS, IMP) ? 0ULL : HFGITR_nGCS) | \
251 (kvm_has_feat(kvm, ID_AA64DFR0_EL1, BRBE, IMP) ? 0ULL : HFGITR_nBRBE) | \
252 0
253
254 #define HCRX_NMI HCRX_EL2_TALLINT
255
256 #define HCRX_nPAuth_LR HCRX_EL2_PACMEn
257 #define HCRX_nFPMR HCRX_EL2_EnFPM
258 #define HCRX_nGCS HCRX_EL2_GCSEn
259 #define HCRX_nSYSREG128 HCRX_EL2_EnIDCP128
260 #define HCRX_nADERR HCRX_EL2_EnSDERR
261 #define HCRX_nDoubleFault2 HCRX_EL2_TMEA
262 #define HCRX_nANERR HCRX_EL2_EnSNERR
263 #define HCRX_nD128 HCRX_EL2_D128En
264 #define HCRX_nTHE HCRX_EL2_PTTWI
265 #define HCRX_nSCTLR2 HCRX_EL2_SCTLR2En
266 #define HCRX_nTCR2 HCRX_EL2_TCR2En
267 #define HCRX_nMOPS (HCRX_EL2_MSCEn | HCRX_EL2_MCE2)
268 #define HCRX_nCMOW HCRX_EL2_CMOW
269 #define HCRX_nNMI (HCRX_EL2_VFNMI | HCRX_EL2_VINMI)
270 #define HCRX_SME HCRX_EL2_SMPME
271 #define HCRX_nXS (HCRX_EL2_FGTnXS | HCRX_EL2_FnXS)
272 #define HCRX_nLS64 (HCRX_EL2_EnASR| HCRX_EL2_EnALS | HCRX_EL2_EnAS0)
273
274 #define PVM_HCRX_EL2_SET \
275 (kvm_has_feat(kvm, ID_AA64PFR1_EL1, NMI, IMP) ? 0ULL : HCRX_NMI) | \
276 (kvm_has_feat(kvm, ID_AA64PFR1_EL1, SME, IMP) ? 0ULL : HCRX_SME) | \
277 0
278
279 #define PVM_HCRX_EL2_CLR \
280 (vcpu_has_ptrauth(vcpu) ? HCRX_nPAuth_LR : 0ULL) | \
281 (kvm_has_feat(kvm, ID_AA64PFR1_EL1, GCS, IMP) ? 0ULL : HCRX_nGCS) | \
282 (kvm_has_feat(kvm, ID_AA64ISAR2_EL1, SYSREG_128, IMP) ? 0ULL : HCRX_nSYSREG128) | \
283 (kvm_has_feat(kvm, ID_AA64MMFR3_EL1, ADERR, FEAT_ADERR) ? 0ULL : HCRX_nADERR) | \
284 (kvm_has_feat(kvm, ID_AA64PFR1_EL1, DF2, IMP) ? 0ULL : HCRX_nDoubleFault2) | \
285 (kvm_has_feat(kvm, ID_AA64MMFR3_EL1, ANERR, FEAT_ANERR) ? 0ULL : HCRX_nANERR) | \
286 (true /* trap unless ID_AA64MMFR0_EL1 PARANGE == 0b111 */ ? 0ULL : HCRX_nD128) | \
287 (kvm_has_feat(kvm, ID_AA64PFR1_EL1, THE, IMP) ? 0ULL : HCRX_nTHE) | \
288 (kvm_has_feat(kvm, ID_AA64MMFR3_EL1, SCTLRX, IMP) ? 0ULL : HCRX_nSCTLR2) | \
289 (kvm_has_feat(kvm, ID_AA64MMFR3_EL1, TCRX, IMP) ? 0ULL : HCRX_nTCR2) | \
290 (kvm_has_feat(kvm, ID_AA64ISAR2_EL1, MOPS, IMP) ? 0ULL : HCRX_nMOPS) | \
291 (kvm_has_feat(kvm, ID_AA64MMFR1_EL1, CMOW, IMP) ? 0ULL : HCRX_nCMOW) | \
292 (kvm_has_feat(kvm, ID_AA64PFR1_EL1, NMI, IMP) ? 0ULL : HCRX_nNMI) | \
293 (kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP) ? 0ULL : HCRX_nXS) | \
294 (kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64) ? 0ULL : HCRX_nLS64) | \
295 0
296
297 enum pkvm_moveable_reg_type {
298 PKVM_MREG_MEMORY,
299 PKVM_MREG_PROTECTED_RANGE,
300 PKVM_MREG_ASSIGN_MMIO,
301 };
302
303 struct pkvm_moveable_reg {
304 phys_addr_t start;
305 u64 size;
306 enum pkvm_moveable_reg_type type;
307 };
308
309 #define PKVM_NR_MOVEABLE_REGS 512
310 extern struct pkvm_moveable_reg kvm_nvhe_sym(pkvm_moveable_regs)[];
311 extern unsigned int kvm_nvhe_sym(pkvm_moveable_regs_nr);
312
313 extern struct memblock_region kvm_nvhe_sym(hyp_memory)[];
314 extern unsigned int kvm_nvhe_sym(hyp_memblock_nr);
315
316 extern phys_addr_t kvm_nvhe_sym(pvmfw_base);
317 extern phys_addr_t kvm_nvhe_sym(pvmfw_size);
318
319 static inline unsigned long
hyp_vmemmap_memblock_size(struct memblock_region * reg,size_t vmemmap_entry_size)320 hyp_vmemmap_memblock_size(struct memblock_region *reg, size_t vmemmap_entry_size)
321 {
322 unsigned long nr_pages = reg->size >> PAGE_SHIFT;
323 unsigned long start, end;
324
325 start = (reg->base >> PAGE_SHIFT) * vmemmap_entry_size;
326 end = start + nr_pages * vmemmap_entry_size;
327 start = ALIGN_DOWN(start, PAGE_SIZE);
328 end = ALIGN(end, PAGE_SIZE);
329
330 return end - start;
331 }
332
hyp_vmemmap_pages(size_t vmemmap_entry_size)333 static inline unsigned long hyp_vmemmap_pages(size_t vmemmap_entry_size)
334 {
335 unsigned long res = 0, i;
336
337 for (i = 0; i < kvm_nvhe_sym(hyp_memblock_nr); i++) {
338 res += hyp_vmemmap_memblock_size(&kvm_nvhe_sym(hyp_memory)[i],
339 vmemmap_entry_size);
340 }
341
342 return res >> PAGE_SHIFT;
343 }
344
hyp_vm_table_pages(void)345 static inline unsigned long hyp_vm_table_pages(void)
346 {
347 return PAGE_ALIGN(KVM_MAX_PVMS * sizeof(void *)) >> PAGE_SHIFT;
348 }
349
__hyp_pgtable_max_pages(unsigned long nr_pages)350 static inline unsigned long __hyp_pgtable_max_pages(unsigned long nr_pages)
351 {
352 unsigned long total = 0;
353 int i;
354
355 /* Provision the worst case scenario */
356 for (i = KVM_PGTABLE_FIRST_LEVEL; i <= KVM_PGTABLE_LAST_LEVEL; i++) {
357 nr_pages = DIV_ROUND_UP(nr_pages, PTRS_PER_PTE);
358 total += nr_pages;
359 }
360
361 return total;
362 }
363
__hyp_pgtable_moveable_regs_pages(void)364 static inline unsigned long __hyp_pgtable_moveable_regs_pages(void)
365 {
366 unsigned long res = 0, i;
367
368 /* Cover all of moveable regions with page-granularity */
369 for (i = 0; i < kvm_nvhe_sym(pkvm_moveable_regs_nr); i++) {
370 struct pkvm_moveable_reg *reg = &kvm_nvhe_sym(pkvm_moveable_regs)[i];
371 res += __hyp_pgtable_max_pages(reg->size >> PAGE_SHIFT);
372 }
373
374 return res;
375 }
376
377 extern u64 kvm_nvhe_sym(hyp_lm_size_mb);
378
hyp_s1_pgtable_pages(void)379 static inline unsigned long hyp_s1_pgtable_pages(void)
380 {
381 unsigned long res;
382
383 if (!kvm_nvhe_sym(hyp_lm_size_mb))
384 res = __hyp_pgtable_moveable_regs_pages();
385 else
386 res = __hyp_pgtable_max_pages(kvm_nvhe_sym(hyp_lm_size_mb) * SZ_1M / PAGE_SIZE);
387
388 /* Allow 1 GiB for private mappings */
389 res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
390
391 return res;
392 }
393
host_s2_pgtable_pages(void)394 static inline unsigned long host_s2_pgtable_pages(void)
395 {
396 unsigned long res;
397
398 /*
399 * Include an extra 16 pages to safely upper-bound the worst case of
400 * concatenated pgds.
401 */
402 res = __hyp_pgtable_moveable_regs_pages() + 16;
403
404 /* Allow 1 GiB for non-moveable regions */
405 res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
406
407 return res;
408 }
409
410 #ifdef CONFIG_PKVM_SELFTESTS
pkvm_selftest_pages(void)411 static inline unsigned long pkvm_selftest_pages(void) { return 32; }
412 #else
pkvm_selftest_pages(void)413 static inline unsigned long pkvm_selftest_pages(void) { return 0; }
414 #endif
415
416 #define KVM_FFA_MBOX_NR_PAGES 1
417 #define KVM_FFA_SPM_HANDLE_NR_PAGES 2
418
419 /*
420 * Maximum number of consitutents allowed in a descriptor. This number is
421 * arbitrary, see comment below on SG_MAX_SEGMENTS in hyp_ffa_proxy_pages().
422 */
423 #define KVM_FFA_MAX_NR_CONSTITUENTS 4096
424
hyp_ffa_proxy_pages(void)425 static inline unsigned long hyp_ffa_proxy_pages(void)
426 {
427 size_t desc_max;
428 unsigned long num_pages;
429
430 /*
431 * SG_MAX_SEGMENTS is supposed to bound the number of elements in an
432 * sglist, which should match the number of consituents in the
433 * corresponding FFA descriptor. As such, the EL2 buffer needs to be
434 * large enough to hold a descriptor with SG_MAX_SEGMENTS consituents
435 * at least. But the kernel's DMA code doesn't enforce the limit, and
436 * it is sometimes abused, so let's allow larger descriptors and hope
437 * for the best.
438 */
439 BUILD_BUG_ON(KVM_FFA_MAX_NR_CONSTITUENTS < SG_MAX_SEGMENTS);
440
441 /*
442 * The hypervisor FFA proxy needs enough memory to buffer a fragmented
443 * descriptor returned from EL3 in response to a RETRIEVE_REQ call.
444 */
445 desc_max = sizeof(struct ffa_mem_region) +
446 sizeof(struct ffa_mem_region_attributes) +
447 sizeof(struct ffa_composite_mem_region) +
448 KVM_FFA_MAX_NR_CONSTITUENTS * sizeof(struct ffa_mem_region_addr_range);
449
450 /* Plus a page each for the hypervisor's RX and TX mailboxes. */
451 num_pages = (2 * KVM_FFA_MBOX_NR_PAGES) + DIV_ROUND_UP(desc_max, PAGE_SIZE);
452
453 return num_pages;
454 }
455
pkvm_host_sve_state_size(void)456 static inline size_t pkvm_host_sve_state_size(void)
457 {
458 if (!system_supports_sve())
459 return 0;
460
461 return size_add(sizeof(struct cpu_sve_state),
462 SVE_SIG_REGS_SIZE(sve_vq_from_vl(kvm_host_sve_max_vl)));
463 }
464
465 int __pkvm_topup_hyp_alloc(unsigned long nr_pages);
466
467 #define kvm_call_refill_hyp_nvhe(f, ...) \
468 ({ \
469 struct arm_smccc_res res; \
470 int __ret; \
471 do { \
472 __ret = -1; \
473 arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(f), \
474 ##__VA_ARGS__, &res); \
475 if (WARN_ON(res.a0 != SMCCC_RET_SUCCESS)) \
476 break; \
477 \
478 __ret = res.a1; \
479 if (__ret == -ENOMEM && res.a3) { \
480 __ret = __pkvm_topup_hyp_alloc(res.a3); \
481 } else { \
482 break; \
483 } \
484 } while (!__ret); \
485 __ret; \
486 })
487
488 enum pkvm_ptdump_ops {
489 PKVM_PTDUMP_GET_LEVEL,
490 PKVM_PTDUMP_GET_RANGE,
491 PKVM_PTDUMP_WALK_RANGE,
492 };
493
494 struct pkvm_ptdump_log {
495 /* VA_BIT - PAGE_SHIFT + 1 (INVALID_PTDUMP_PFN) */
496 u64 pfn: 41;
497 bool valid: 1;
498 bool r: 1;
499 bool w: 1;
500 char xn: 2;
501 bool table: 1;
502 u16 page_state: 2;
503 u16 level: 8;
504 } __packed;
505
506 #define INVALID_PTDUMP_PFN (BIT(41) - 1)
507
508 struct pkvm_ptdump_log_hdr {
509 /* The next page */
510 u64 pfn_next: 48;
511 /* The write index in the log page */
512 u64 w_index: 16;
513 };
514
515 struct pkvm_mapping {
516 struct rb_node node;
517 u64 gfn;
518 u64 pfn;
519 u64 nr_pages;
520 u64 __subtree_last; /* Internal member for interval tree */
521 };
522
523 int pkvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
524 struct kvm_pgtable_mm_ops *mm_ops, struct kvm_pgtable_pte_ops *pte_ops);
525 void pkvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
526 int pkvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
527 enum kvm_pgtable_prot prot, void *mc,
528 enum kvm_pgtable_walk_flags flags);
529 int pkvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size);
530 int pkvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size);
531 int pkvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size);
532 bool pkvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr, u64 size, bool mkold);
533 int pkvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr, enum kvm_pgtable_prot prot,
534 enum kvm_pgtable_walk_flags flags);
535 kvm_pte_t pkvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr,
536 enum kvm_pgtable_walk_flags flags);
537 int pkvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size, void *mc);
538 void pkvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops,
539 struct kvm_pgtable_pte_ops *pte_ops,
540 void *pgtable, s8 level);
541 kvm_pte_t *pkvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt, u64 phys, s8 level,
542 enum kvm_pgtable_prot prot, void *mc,
543 bool force_pte);
544 #endif /* __ARM64_KVM_PKVM_H__ */
545