1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2020 Google LLC
4 * Author: Will Deacon <will@kernel.org>
5 */
6
7 #ifndef __ARM64_KVM_PGTABLE_H__
8 #define __ARM64_KVM_PGTABLE_H__
9
10 #include <linux/bits.h>
11 #include <linux/kvm_host.h>
12 #include <linux/types.h>
13
14 #define KVM_PGTABLE_FIRST_LEVEL -1
15 #define KVM_PGTABLE_LAST_LEVEL 3
16
17 /*
18 * The largest supported block sizes for KVM (no 52-bit PA support):
19 * - 4K (level 1): 1GB
20 * - 16K (level 2): 32MB
21 * - 64K (level 2): 512MB
22 */
23 #ifdef CONFIG_ARM64_4K_PAGES
24 #define KVM_PGTABLE_MIN_BLOCK_LEVEL 1
25 #else
26 #define KVM_PGTABLE_MIN_BLOCK_LEVEL 2
27 #endif
28
29 #define kvm_lpa2_is_enabled() system_supports_lpa2()
30
kvm_get_parange_max(void)31 static inline u64 kvm_get_parange_max(void)
32 {
33 if (kvm_lpa2_is_enabled() ||
34 (IS_ENABLED(CONFIG_ARM64_PA_BITS_52) && PAGE_SHIFT == 16))
35 return ID_AA64MMFR0_EL1_PARANGE_52;
36 else
37 return ID_AA64MMFR0_EL1_PARANGE_48;
38 }
39
kvm_get_parange(u64 mmfr0)40 static inline u64 kvm_get_parange(u64 mmfr0)
41 {
42 u64 parange_max = kvm_get_parange_max();
43 u64 parange = cpuid_feature_extract_unsigned_field(mmfr0,
44 ID_AA64MMFR0_EL1_PARANGE_SHIFT);
45 if (parange > parange_max)
46 parange = parange_max;
47
48 return parange;
49 }
50
51 typedef u64 kvm_pte_t;
52
53 #define KVM_PTE_VALID BIT(0)
54
55 #define KVM_PTE_ADDR_MASK GENMASK(47, PAGE_SHIFT)
56 #define KVM_PTE_ADDR_51_48 GENMASK(15, 12)
57 #define KVM_PTE_ADDR_MASK_LPA2 GENMASK(49, PAGE_SHIFT)
58 #define KVM_PTE_ADDR_51_50_LPA2 GENMASK(9, 8)
59
60 #define KVM_PHYS_INVALID (-1ULL)
61
62 #define KVM_PTE_TYPE BIT(1)
63 #define KVM_PTE_TYPE_BLOCK 0
64 #define KVM_PTE_TYPE_PAGE 1
65 #define KVM_PTE_TYPE_TABLE 1
66
67 #define KVM_PTE_LEAF_ATTR_LO \
68 (GENMASK(11, 2) & ~(kvm_lpa2_is_enabled() ? \
69 KVM_PTE_LEAF_ATTR_LO_S2_SH : 0))
70
71 #define KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX GENMASK(4, 2)
72 #define KVM_PTE_LEAF_ATTR_LO_S1_AP GENMASK(7, 6)
73 #define KVM_PTE_LEAF_ATTR_LO_S1_AP_RO \
74 ({ cpus_have_final_cap(ARM64_KVM_HVHE) ? 2 : 3; })
75 #define KVM_PTE_LEAF_ATTR_LO_S1_AP_RW \
76 ({ cpus_have_final_cap(ARM64_KVM_HVHE) ? 0 : 1; })
77 #define KVM_PTE_LEAF_ATTR_LO_S1_SH GENMASK(9, 8)
78 #define KVM_PTE_LEAF_ATTR_LO_S1_SH_IS 3
79 #define KVM_PTE_LEAF_ATTR_LO_S1_AF BIT(10)
80
81 #define KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR GENMASK(5, 2)
82 #define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R BIT(6)
83 #define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W BIT(7)
84 #define KVM_PTE_LEAF_ATTR_LO_S2_SH GENMASK(9, 8)
85 #define KVM_PTE_LEAF_ATTR_LO_S2_SH_IS 3
86 #define KVM_PTE_LEAF_ATTR_LO_S2_AF BIT(10)
87
88 #define KVM_PTE_LEAF_ATTR_HI GENMASK(63, 50)
89
90 #define KVM_PTE_LEAF_ATTR_HI_SW GENMASK(58, 55)
91
92 #define KVM_PTE_LEAF_ATTR_HI_S1_XN BIT(54)
93
94 #define KVM_PTE_LEAF_ATTR_HI_S2_XN_PXN 1
95 #define KVM_PTE_LEAF_ATTR_HI_S2_XN_UXN 3
96 #define KVM_PTE_LEAF_ATTR_HI_S2_XN_XN 2
97 #define KVM_PTE_LEAF_ATTR_HI_S2_XN GENMASK(54, 53)
98
99 #define KVM_PTE_LEAF_ATTR_HI_S1_GP BIT(50)
100
101 #define KVM_PTE_LEAF_ATTR_S2_PERMS (KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R | \
102 KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W | \
103 KVM_PTE_LEAF_ATTR_HI_S2_XN)
104
105 #define KVM_INVALID_PTE_OWNER_MASK GENMASK(9, 2)
106 #define KVM_MAX_OWNER_ID FIELD_MAX(KVM_INVALID_PTE_OWNER_MASK)
107
108 /*
109 * Used to indicate a pte for which a 'break-before-make' sequence is in
110 * progress.
111 */
112 #define KVM_INVALID_PTE_LOCKED BIT(10)
113
kvm_pte_valid(kvm_pte_t pte)114 static inline bool kvm_pte_valid(kvm_pte_t pte)
115 {
116 return pte & KVM_PTE_VALID;
117 }
118
kvm_pte_to_phys(kvm_pte_t pte)119 static inline u64 kvm_pte_to_phys(kvm_pte_t pte)
120 {
121 u64 pa;
122
123 if (kvm_lpa2_is_enabled()) {
124 pa = pte & KVM_PTE_ADDR_MASK_LPA2;
125 pa |= FIELD_GET(KVM_PTE_ADDR_51_50_LPA2, pte) << 50;
126 } else {
127 pa = pte & KVM_PTE_ADDR_MASK;
128 if (PAGE_SHIFT == 16)
129 pa |= FIELD_GET(KVM_PTE_ADDR_51_48, pte) << 48;
130 }
131
132 return pa;
133 }
134
kvm_phys_to_pte(u64 pa)135 static inline kvm_pte_t kvm_phys_to_pte(u64 pa)
136 {
137 kvm_pte_t pte;
138
139 if (kvm_lpa2_is_enabled()) {
140 pte = pa & KVM_PTE_ADDR_MASK_LPA2;
141 pa &= GENMASK(51, 50);
142 pte |= FIELD_PREP(KVM_PTE_ADDR_51_50_LPA2, pa >> 50);
143 } else {
144 pte = pa & KVM_PTE_ADDR_MASK;
145 if (PAGE_SHIFT == 16) {
146 pa &= GENMASK(51, 48);
147 pte |= FIELD_PREP(KVM_PTE_ADDR_51_48, pa >> 48);
148 }
149 }
150
151 return pte;
152 }
153
kvm_pte_to_pfn(kvm_pte_t pte)154 static inline kvm_pfn_t kvm_pte_to_pfn(kvm_pte_t pte)
155 {
156 return __phys_to_pfn(kvm_pte_to_phys(pte));
157 }
158
kvm_granule_shift(s8 level)159 static inline u64 kvm_granule_shift(s8 level)
160 {
161 /* Assumes KVM_PGTABLE_LAST_LEVEL is 3 */
162 return ARM64_HW_PGTABLE_LEVEL_SHIFT(level);
163 }
164
kvm_granule_size(s8 level)165 static inline u64 kvm_granule_size(s8 level)
166 {
167 return BIT(kvm_granule_shift(level));
168 }
169
kvm_level_supports_block_mapping(s8 level)170 static inline bool kvm_level_supports_block_mapping(s8 level)
171 {
172 return level >= KVM_PGTABLE_MIN_BLOCK_LEVEL;
173 }
174
kvm_supported_block_sizes(void)175 static inline u32 kvm_supported_block_sizes(void)
176 {
177 s8 level = KVM_PGTABLE_MIN_BLOCK_LEVEL;
178 u32 r = 0;
179
180 for (; level <= KVM_PGTABLE_LAST_LEVEL; level++)
181 r |= BIT(kvm_granule_shift(level));
182
183 return r;
184 }
185
kvm_is_block_size_supported(u64 size)186 static inline bool kvm_is_block_size_supported(u64 size)
187 {
188 bool is_power_of_two = IS_ALIGNED(size, size);
189
190 return is_power_of_two && (size & kvm_supported_block_sizes());
191 }
192
kvm_pte_table(kvm_pte_t pte,u32 level)193 static inline bool kvm_pte_table(kvm_pte_t pte, u32 level)
194 {
195 if (level == KVM_PGTABLE_LAST_LEVEL)
196 return false;
197
198 if (!kvm_pte_valid(pte))
199 return false;
200
201 return FIELD_GET(KVM_PTE_TYPE, pte) == KVM_PTE_TYPE_TABLE;
202 }
203
204 /**
205 * struct kvm_pgtable_mm_ops - Memory management callbacks.
206 * @zalloc_page: Allocate a single zeroed memory page.
207 * The @arg parameter can be used by the walker
208 * to pass a memcache. The initial refcount of
209 * the page is 1.
210 * @zalloc_pages_exact: Allocate an exact number of zeroed memory pages.
211 * The @size parameter is in bytes, and is rounded
212 * up to the next page boundary. The resulting
213 * allocation is physically contiguous.
214 * @free_pages_exact: Free an exact number of memory pages previously
215 * allocated by zalloc_pages_exact.
216 * @free_unlinked_table: Free an unlinked paging structure by unlinking and
217 * dropping references.
218 * @get_page: Increment the refcount on a page.
219 * @put_page: Decrement the refcount on a page. When the
220 * refcount reaches 0 the page is automatically
221 * freed.
222 * @page_count: Return the refcount of a page.
223 * @phys_to_virt: Convert a physical address into a virtual
224 * address mapped in the current context.
225 * @virt_to_phys: Convert a virtual address mapped in the current
226 * context into a physical address.
227 * @dcache_clean_inval_poc: Clean and invalidate the data cache to the PoC
228 * for the specified memory address range.
229 * @icache_inval_pou: Invalidate the instruction cache to the PoU
230 * for the specified memory address range.
231 */
232 struct kvm_pgtable_mm_ops {
233 void* (*zalloc_page)(void *arg);
234 void* (*zalloc_pages_exact)(size_t size);
235 void (*free_pages_exact)(void *addr, size_t size);
236 void (*free_unlinked_table)(void *addr, s8 level);
237 void (*get_page)(void *addr);
238 void (*put_page)(void *addr);
239 int (*page_count)(void *addr);
240 void* (*phys_to_virt)(phys_addr_t phys);
241 phys_addr_t (*virt_to_phys)(void *addr);
242 void (*dcache_clean_inval_poc)(void *addr, size_t size);
243 void (*icache_inval_pou)(void *addr, size_t size);
244 };
245
kvm_pte_follow(kvm_pte_t pte,struct kvm_pgtable_mm_ops * mm_ops)246 static inline kvm_pte_t *kvm_pte_follow(kvm_pte_t pte, struct kvm_pgtable_mm_ops *mm_ops)
247 {
248 return mm_ops->phys_to_virt(kvm_pte_to_phys(pte));
249 }
250
251 /**
252 * enum kvm_pgtable_stage2_flags - Stage-2 page-table flags.
253 * @KVM_PGTABLE_S2_NOFWB: Don't enforce Normal-WB even if the CPUs have
254 * ARM64_HAS_STAGE2_FWB.
255 * @KVM_PGTABLE_S2_IDMAP: Only use identity mappings.
256 * @KVM_PGTABLE_S2_PREFAULT_BLOCK:
257 * Prefault a table, when a block is broken down.
258 */
259 enum kvm_pgtable_stage2_flags {
260 KVM_PGTABLE_S2_NOFWB = BIT(0),
261 KVM_PGTABLE_S2_IDMAP = BIT(1),
262 KVM_PGTABLE_S2_PREFAULT_BLOCK = BIT(2),
263 };
264
265 /**
266 * enum kvm_pgtable_prot - Page-table permissions and attributes.
267 * @KVM_PGTABLE_PROT_X: Execute permission.
268 * @KVM_PGTABLE_PROT_W: Write permission.
269 * @KVM_PGTABLE_PROT_R: Read permission.
270 * @KVM_PGTABLE_PROT_DEVICE: Device attributes.
271 * @KVM_PGTABLE_PROT_NORMAL_NC: Normal noncacheable attributes.
272 * @KVM_PGTABLE_PROT_PXN: Privileged execute-never.
273 * @KVM_PGTABLE_PROT_UXN: Unprivileged execute-never.
274 * @KVM_PGTABLE_PROT_SW0: Software bit 0.
275 * @KVM_PGTABLE_PROT_SW1: Software bit 1.
276 * @KVM_PGTABLE_PROT_SW2: Software bit 2.
277 * @KVM_PGTABLE_PROT_SW3: Software bit 3.
278 */
279 enum kvm_pgtable_prot {
280 KVM_PGTABLE_PROT_X = BIT(0),
281 KVM_PGTABLE_PROT_W = BIT(1),
282 KVM_PGTABLE_PROT_R = BIT(2),
283
284 KVM_PGTABLE_PROT_DEVICE = BIT(3),
285 KVM_PGTABLE_PROT_NORMAL_NC = BIT(4),
286 KVM_PGTABLE_PROT_PXN = BIT(5),
287 KVM_PGTABLE_PROT_UXN = BIT(6),
288
289 KVM_PGTABLE_PROT_SW0 = BIT(55),
290 KVM_PGTABLE_PROT_SW1 = BIT(56),
291 KVM_PGTABLE_PROT_SW2 = BIT(57),
292 KVM_PGTABLE_PROT_SW3 = BIT(58),
293 };
294
295 /*
296 * Stage-2 invalid-PTE annotations. These are non-overlapping bitfields which
297 * define mutually-exclusive PTE states.
298 */
299
300 /* (Host S2) The owner of the corresponding physical page. */
301 #define KVM_INVALID_PTE_OWNER_MASK GENMASK(9, 2)
302
303 /*
304 * Used to indicate a pte for which a 'break-before-make' sequence is in
305 * progress.
306 */
307 #define KVM_INVALID_PTE_LOCKED BIT(10)
308
309 /* Indicates a valid MMIO mapping registered via ioguard. */
310 #define KVM_INVALID_PTE_MMIO_NOTE BIT(11)
311
312 #define KVM_PGTABLE_PROT_RW (KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_W)
313 #define KVM_PGTABLE_PROT_RWX (KVM_PGTABLE_PROT_RW | KVM_PGTABLE_PROT_X)
314
315 #define PKVM_HOST_MEM_PROT KVM_PGTABLE_PROT_RWX
316 #define PKVM_HOST_MMIO_PROT KVM_PGTABLE_PROT_RW
317
318 #define KVM_HOST_S2_DEFAULT_MASK (KVM_PTE_LEAF_ATTR_HI | \
319 KVM_PTE_LEAF_ATTR_LO)
320
321 #define KVM_HOST_S2_DEFAULT_MEM_PTE \
322 (PTE_S2_MEMATTR(MT_S2_NORMAL) | \
323 KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R | \
324 KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W | \
325 KVM_PTE_LEAF_ATTR_LO_S2_AF | \
326 (kvm_lpa2_is_enabled() ? 0 : \
327 FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S2_SH, KVM_PTE_LEAF_ATTR_LO_S2_SH_IS)))
328
329 #define KVM_HOST_S2_DEFAULT_MMIO_PTE \
330 (KVM_HOST_S2_DEFAULT_MEM_PTE | \
331 FIELD_PREP(KVM_PTE_LEAF_ATTR_HI_S2_XN, KVM_PTE_LEAF_ATTR_HI_S2_XN_XN))
332
333 #define PAGE_HYP KVM_PGTABLE_PROT_RW
334 #define PAGE_HYP_EXEC (KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_X)
335 #define PAGE_HYP_RO (KVM_PGTABLE_PROT_R)
336 #define PAGE_HYP_DEVICE (PAGE_HYP | KVM_PGTABLE_PROT_DEVICE)
337
338 typedef bool (*kvm_pgtable_force_pte_cb_t)(u64 addr, u64 end,
339 enum kvm_pgtable_prot prot);
340
341 typedef bool (*kvm_pgtable_pte_is_counted_cb_t)(kvm_pte_t pte, u32 level);
342
343 /**
344 * struct kvm_pgtable_pte_ops - PTE callbacks.
345 * @force_pte_cb: Force the mapping granularity to pages and
346 * return true if we support this instead of
347 * block mappings.
348 * @pte_is_counted_cb Verify the attributes of the @pte argument
349 * and return true if the descriptor needs to be
350 * refcounted, otherwise return false.
351 */
352 struct kvm_pgtable_pte_ops {
353 kvm_pgtable_force_pte_cb_t force_pte_cb;
354 kvm_pgtable_pte_is_counted_cb_t pte_is_counted_cb;
355 };
356
357 /**
358 * enum kvm_pgtable_walk_flags - Flags to control a depth-first page-table walk.
359 * @KVM_PGTABLE_WALK_LEAF: Visit leaf entries, including invalid
360 * entries.
361 * @KVM_PGTABLE_WALK_TABLE_PRE: Visit table entries before their
362 * children.
363 * @KVM_PGTABLE_WALK_TABLE_POST: Visit table entries after their
364 * children.
365 * @KVM_PGTABLE_WALK_SHARED: Indicates the page-tables may be shared
366 * with other software walkers.
367 * @KVM_PGTABLE_WALK_HANDLE_FAULT: Indicates the page-table walk was
368 * invoked from a fault handler.
369 * @KVM_PGTABLE_WALK_SKIP_BBM_TLBI: Visit and update table entries
370 * without Break-before-make's
371 * TLB invalidation.
372 * @KVM_PGTABLE_WALK_SKIP_CMO: Visit and update table entries
373 * without Cache maintenance
374 * operations required.
375 */
376 enum kvm_pgtable_walk_flags {
377 KVM_PGTABLE_WALK_LEAF = BIT(0),
378 KVM_PGTABLE_WALK_TABLE_PRE = BIT(1),
379 KVM_PGTABLE_WALK_TABLE_POST = BIT(2),
380 KVM_PGTABLE_WALK_SHARED = BIT(3),
381 KVM_PGTABLE_WALK_HANDLE_FAULT = BIT(4),
382 KVM_PGTABLE_WALK_SKIP_BBM_TLBI = BIT(5),
383 KVM_PGTABLE_WALK_SKIP_CMO = BIT(6),
384 };
385
386 struct kvm_pgtable_visit_ctx {
387 kvm_pte_t *ptep;
388 kvm_pte_t old;
389 void *arg;
390 struct kvm_pgtable_mm_ops *mm_ops;
391 u64 start;
392 struct kvm_pgtable_pte_ops *pte_ops;
393 u64 addr;
394 u64 end;
395 s8 level;
396 enum kvm_pgtable_walk_flags flags;
397 };
398
399 typedef int (*kvm_pgtable_visitor_fn_t)(const struct kvm_pgtable_visit_ctx *ctx,
400 enum kvm_pgtable_walk_flags visit);
401
kvm_pgtable_walk_shared(const struct kvm_pgtable_visit_ctx * ctx)402 static inline bool kvm_pgtable_walk_shared(const struct kvm_pgtable_visit_ctx *ctx)
403 {
404 return ctx->flags & KVM_PGTABLE_WALK_SHARED;
405 }
406
407 /**
408 * struct kvm_pgtable_walker - Hook into a page-table walk.
409 * @cb: Callback function to invoke during the walk.
410 * @arg: Argument passed to the callback function.
411 * @flags: Bitwise-OR of flags to identify the entry types on which to
412 * invoke the callback function.
413 */
414 struct kvm_pgtable_walker {
415 const kvm_pgtable_visitor_fn_t cb;
416 void * const arg;
417 const enum kvm_pgtable_walk_flags flags;
418 };
419
420 /*
421 * RCU cannot be used in a non-kernel context such as the hyp. As such, page
422 * table walkers used in hyp do not call into RCU and instead use other
423 * synchronization mechanisms (such as a spinlock).
424 */
425 #if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__)
426
427 typedef kvm_pte_t *kvm_pteref_t;
428
kvm_dereference_pteref(struct kvm_pgtable_walker * walker,kvm_pteref_t pteref)429 static inline kvm_pte_t *kvm_dereference_pteref(struct kvm_pgtable_walker *walker,
430 kvm_pteref_t pteref)
431 {
432 return pteref;
433 }
434
kvm_pgtable_walk_begin(struct kvm_pgtable_walker * walker)435 static inline int kvm_pgtable_walk_begin(struct kvm_pgtable_walker *walker)
436 {
437 /*
438 * Due to the lack of RCU (or a similar protection scheme), only
439 * non-shared table walkers are allowed in the hypervisor.
440 */
441 if (walker->flags & KVM_PGTABLE_WALK_SHARED)
442 return -EPERM;
443
444 return 0;
445 }
446
kvm_pgtable_walk_end(struct kvm_pgtable_walker * walker)447 static inline void kvm_pgtable_walk_end(struct kvm_pgtable_walker *walker) {}
448
kvm_pgtable_walk_lock_held(void)449 static inline bool kvm_pgtable_walk_lock_held(void)
450 {
451 return true;
452 }
453
454 #else
455
456 typedef kvm_pte_t __rcu *kvm_pteref_t;
457
kvm_dereference_pteref(struct kvm_pgtable_walker * walker,kvm_pteref_t pteref)458 static inline kvm_pte_t *kvm_dereference_pteref(struct kvm_pgtable_walker *walker,
459 kvm_pteref_t pteref)
460 {
461 return rcu_dereference_check(pteref, !(walker->flags & KVM_PGTABLE_WALK_SHARED));
462 }
463
kvm_pgtable_walk_begin(struct kvm_pgtable_walker * walker)464 static inline int kvm_pgtable_walk_begin(struct kvm_pgtable_walker *walker)
465 {
466 if (walker->flags & KVM_PGTABLE_WALK_SHARED)
467 rcu_read_lock();
468
469 return 0;
470 }
471
kvm_pgtable_walk_end(struct kvm_pgtable_walker * walker)472 static inline void kvm_pgtable_walk_end(struct kvm_pgtable_walker *walker)
473 {
474 if (walker->flags & KVM_PGTABLE_WALK_SHARED)
475 rcu_read_unlock();
476 }
477
kvm_pgtable_walk_lock_held(void)478 static inline bool kvm_pgtable_walk_lock_held(void)
479 {
480 return rcu_read_lock_held();
481 }
482
483 #endif
484
485 /**
486 * struct kvm_pgtable - KVM page-table.
487 * @ia_bits: Maximum input address size, in bits.
488 * @start_level: Level at which the page-table walk starts.
489 * @pgd: Pointer to the first top-level entry of the page-table.
490 * @mm_ops: Memory management callbacks.
491 * @mmu: Stage-2 KVM MMU struct. Unused for stage-1 page-tables.
492 * @flags: Stage-2 page-table flags.
493 * @pte_ops: PTE callbacks.
494 */
495 struct kvm_pgtable {
496 union {
497 struct rb_root_cached pkvm_mappings;
498 struct {
499 u32 ia_bits;
500 s8 start_level;
501 kvm_pteref_t pgd;
502 struct kvm_pgtable_mm_ops *mm_ops;
503
504 /* Stage-2 only */
505 enum kvm_pgtable_stage2_flags flags;
506 kvm_pgtable_force_pte_cb_t force_pte_cb;
507 struct kvm_pgtable_pte_ops *pte_ops;
508 };
509 };
510 struct kvm_s2_mmu *mmu;
511 };
512
513 /**
514 * kvm_pgtable_hyp_init() - Initialise a hypervisor stage-1 page-table.
515 * @pgt: Uninitialised page-table structure to initialise.
516 * @va_bits: Maximum virtual address bits.
517 * @mm_ops: Memory management callbacks.
518 *
519 * Return: 0 on success, negative error code on failure.
520 */
521 int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits,
522 struct kvm_pgtable_mm_ops *mm_ops);
523
524 /**
525 * kvm_pgtable_hyp_destroy() - Destroy an unused hypervisor stage-1 page-table.
526 * @pgt: Page-table structure initialised by kvm_pgtable_hyp_init().
527 *
528 * The page-table is assumed to be unreachable by any hardware walkers prior
529 * to freeing and therefore no TLB invalidation is performed.
530 */
531 void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt);
532
533 /**
534 * kvm_pgtable_hyp_map() - Install a mapping in a hypervisor stage-1 page-table.
535 * @pgt: Page-table structure initialised by kvm_pgtable_hyp_init().
536 * @addr: Virtual address at which to place the mapping.
537 * @size: Size of the mapping.
538 * @phys: Physical address of the memory to map.
539 * @prot: Permissions and attributes for the mapping.
540 *
541 * The offset of @addr within a page is ignored, @size is rounded-up to
542 * the next page boundary and @phys is rounded-down to the previous page
543 * boundary.
544 *
545 * If device attributes are not explicitly requested in @prot, then the
546 * mapping will be normal, cacheable. Attempts to install a new mapping
547 * for a virtual address that is already mapped will be rejected with an
548 * error and a WARN().
549 *
550 * Return: 0 on success, negative error code on failure.
551 */
552 int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
553 enum kvm_pgtable_prot prot);
554
555 /**
556 * kvm_pgtable_hyp_unmap() - Remove a mapping from a hypervisor stage-1 page-table.
557 * @pgt: Page-table structure initialised by kvm_pgtable_hyp_init().
558 * @addr: Virtual address from which to remove the mapping.
559 * @size: Size of the mapping.
560 *
561 * The offset of @addr within a page is ignored, @size is rounded-up to
562 * the next page boundary and @phys is rounded-down to the previous page
563 * boundary.
564 *
565 * TLB invalidation is performed for each page-table entry cleared during the
566 * unmapping operation and the reference count for the page-table page
567 * containing the cleared entry is decremented, with unreferenced pages being
568 * freed. The unmapping operation will stop early if it encounters either an
569 * invalid page-table entry or a valid block mapping which maps beyond the range
570 * being unmapped.
571 *
572 * Return: Number of bytes unmapped, which may be 0.
573 */
574 u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size);
575
576 /**
577 * kvm_get_vtcr() - Helper to construct VTCR_EL2
578 * @mmfr0: Sanitized value of SYS_ID_AA64MMFR0_EL1 register.
579 * @mmfr1: Sanitized value of SYS_ID_AA64MMFR1_EL1 register.
580 * @phys_shfit: Value to set in VTCR_EL2.T0SZ.
581 *
582 * The VTCR value is common across all the physical CPUs on the system.
583 * We use system wide sanitised values to fill in different fields,
584 * except for Hardware Management of Access Flags. HA Flag is set
585 * unconditionally on all CPUs, as it is safe to run with or without
586 * the feature and the bit is RES0 on CPUs that don't support it.
587 *
588 * Return: VTCR_EL2 value
589 */
590 u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift);
591
592 /**
593 * kvm_pgtable_stage2_pgd_size() - Helper to compute size of a stage-2 PGD
594 * @vtcr: Content of the VTCR register.
595 *
596 * Return: the size (in bytes) of the stage-2 PGD
597 */
598 size_t kvm_pgtable_stage2_pgd_size(u64 vtcr);
599
600 /**
601 * __kvm_pgtable_stage2_init() - Initialise a guest stage-2 page-table.
602 * @pgt: Uninitialised page-table structure to initialise.
603 * @mmu: S2 MMU context for this S2 translation
604 * @mm_ops: Memory management callbacks.
605 * @flags: Stage-2 configuration flags.
606 * @pte_ops: PTE callbacks.
607 *
608 * Return: 0 on success, negative error code on failure.
609 */
610 int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
611 struct kvm_pgtable_mm_ops *mm_ops,
612 enum kvm_pgtable_stage2_flags flags,
613 struct kvm_pgtable_pte_ops *pte_ops);
614
kvm_pgtable_stage2_init(struct kvm_pgtable * pgt,struct kvm_s2_mmu * mmu,struct kvm_pgtable_mm_ops * mm_ops,struct kvm_pgtable_pte_ops * pte_ops)615 static inline int kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
616 struct kvm_pgtable_mm_ops *mm_ops,
617 struct kvm_pgtable_pte_ops *pte_ops)
618 {
619 return __kvm_pgtable_stage2_init(pgt, mmu, mm_ops, 0, pte_ops);
620 }
621
622 /**
623 * kvm_pgtable_stage2_destroy() - Destroy an unused guest stage-2 page-table.
624 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
625 *
626 * The page-table is assumed to be unreachable by any hardware walkers prior
627 * to freeing and therefore no TLB invalidation is performed.
628 */
629 void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
630
631 /**
632 * kvm_pgtable_stage2_free_unlinked() - Free an unlinked stage-2 paging structure.
633 * @mm_ops: Memory management callbacks.
634 * @pte_ops: Pagetable entries management callbacks.
635 * @pgtable: Unlinked stage-2 paging structure to be freed.
636 * @level: Level of the stage-2 paging structure to be freed.
637 *
638 * The page-table is assumed to be unreachable by any hardware walkers prior to
639 * freeing and therefore no TLB invalidation is performed.
640 */
641 void kvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops,
642 struct kvm_pgtable_pte_ops *pte_ops,
643 void *pgtable, s8 level);
644
645 /**
646 * kvm_pgtable_stage2_create_unlinked() - Create an unlinked stage-2 paging structure.
647 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
648 * @phys: Physical address of the memory to map.
649 * @level: Starting level of the stage-2 paging structure to be created.
650 * @prot: Permissions and attributes for the mapping.
651 * @mc: Cache of pre-allocated and zeroed memory from which to allocate
652 * page-table pages.
653 * @force_pte: Force mappings to PAGE_SIZE granularity.
654 *
655 * Returns an unlinked page-table tree. This new page-table tree is
656 * not reachable (i.e., it is unlinked) from the root pgd and it's
657 * therefore unreachableby the hardware page-table walker. No TLB
658 * invalidation or CMOs are performed.
659 *
660 * If device attributes are not explicitly requested in @prot, then the
661 * mapping will be normal, cacheable.
662 *
663 * Return: The fully populated (unlinked) stage-2 paging structure, or
664 * an ERR_PTR(error) on failure.
665 */
666 kvm_pte_t *kvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt,
667 u64 phys, s8 level,
668 enum kvm_pgtable_prot prot,
669 void *mc, bool force_pte);
670
671 /**
672 * kvm_pgtable_stage2_map() - Install a mapping in a guest stage-2 page-table.
673 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
674 * @addr: Intermediate physical address at which to place the mapping.
675 * @size: Size of the mapping.
676 * @phys: Physical address of the memory to map.
677 * @prot: Permissions and attributes for the mapping.
678 * @mc: Cache of pre-allocated and zeroed memory from which to allocate
679 * page-table pages.
680 * @flags: Flags to control the page-table walk (ex. a shared walk)
681 *
682 * The offset of @addr within a page is ignored, @size is rounded-up to
683 * the next page boundary and @phys is rounded-down to the previous page
684 * boundary.
685 *
686 * If device attributes are not explicitly requested in @prot, then the
687 * mapping will be normal, cacheable.
688 *
689 * Note that the update of a valid leaf PTE in this function will be aborted,
690 * if it's trying to recreate the exact same mapping or only change the access
691 * permissions. Instead, the vCPU will exit one more time from guest if still
692 * needed and then go through the path of relaxing permissions.
693 *
694 * Note that this function will both coalesce existing table entries and split
695 * existing block mappings, relying on page-faults to fault back areas outside
696 * of the new mapping lazily.
697 *
698 * Return: 0 on success, negative error code on failure.
699 */
700 int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
701 u64 phys, enum kvm_pgtable_prot prot,
702 void *mc, enum kvm_pgtable_walk_flags flags);
703
704 /**
705 * kvm_pgtable_stage2_annotate() - Unmap and annotate pages in the IPA space
706 * to track ownership (and more).
707 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
708 * @addr: Base intermediate physical address to annotate.
709 * @size: Size of the annotated range.
710 * @mc: Cache of pre-allocated and zeroed memory from which to allocate
711 * page-table pages.
712 * @annotation: A 63 bit value that will be stored in the page tables.
713 * @annotation[0] must be 0, and @annotation[63:1] is stored
714 * in the page tables.
715 *
716 * By default, all page-tables are owned by identifier 0. This function can be
717 * used to mark portions of the IPA space as owned by other entities. When a
718 * stage 2 is used with identity-mappings, these annotations allow to use the
719 * page-table data structure as a simple rmap.
720 *
721 * Return: 0 on success, negative error code on failure.
722 */
723 int kvm_pgtable_stage2_annotate(struct kvm_pgtable *pgt, u64 addr, u64 size,
724 void *mc, kvm_pte_t annotation);
725
726 /**
727 * kvm_pgtable_stage2_unmap() - Remove a mapping from a guest stage-2 page-table.
728 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
729 * @addr: Intermediate physical address from which to remove the mapping.
730 * @size: Size of the mapping.
731 *
732 * The offset of @addr within a page is ignored and @size is rounded-up to
733 * the next page boundary.
734 *
735 * TLB invalidation is performed for each page-table entry cleared during the
736 * unmapping operation and the reference count for the page-table page
737 * containing the cleared entry is decremented, with unreferenced pages being
738 * freed. Unmapping a cacheable page will ensure that it is clean to the PoC if
739 * FWB is not supported by the CPU.
740 *
741 * Return: 0 on success, negative error code on failure.
742 */
743 int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size);
744
745 /**
746 * kvm_pgtable_stage2_reclaim_leaves() - Attempt to reclaim leaf page-table
747 * pages by coalescing table entries into
748 * block mappings.
749 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
750 * @addr: Intermediate physical address from which to reclaim leaves.
751 * @size: Size of the range.
752 *
753 * The offset of @addr within a page is ignored and @size is rounded-up to
754 * the next page boundary.
755 *
756 * Return: 0 on success, negative error code on failure.
757 */
758 int kvm_pgtable_stage2_reclaim_leaves(struct kvm_pgtable *pgt, u64 addr, u64 size);
759
760 /**
761 * kvm_pgtable_stage2_wrprotect() - Write-protect guest stage-2 address range
762 * without TLB invalidation.
763 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
764 * @addr: Intermediate physical address from which to write-protect,
765 * @size: Size of the range.
766 *
767 * The offset of @addr within a page is ignored and @size is rounded-up to
768 * the next page boundary.
769 *
770 * Note that it is the caller's responsibility to invalidate the TLB after
771 * calling this function to ensure that the updated permissions are visible
772 * to the CPUs.
773 *
774 * Return: 0 on success, negative error code on failure.
775 */
776 int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size);
777
778 /**
779 * kvm_pgtable_stage2_mkyoung() - Set the access flag in a page-table entry.
780 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
781 * @addr: Intermediate physical address to identify the page-table entry.
782 * @flags: Flags to control the page-table walk (ex. a shared walk)
783 *
784 * The offset of @addr within a page is ignored.
785 *
786 * If there is a valid, leaf page-table entry used to translate @addr, then
787 * set the access flag in that entry.
788 *
789 * Return: The old page-table entry prior to setting the flag, 0 on failure.
790 */
791 kvm_pte_t kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr,
792 enum kvm_pgtable_walk_flags flags);
793
794 /**
795 * kvm_pgtable_stage2_test_clear_young() - Test and optionally clear the access
796 * flag in a page-table entry.
797 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
798 * @addr: Intermediate physical address to identify the page-table entry.
799 * @size: Size of the address range to visit.
800 * @mkold: True if the access flag should be cleared.
801 *
802 * The offset of @addr within a page is ignored.
803 *
804 * Tests and conditionally clears the access flag for every valid, leaf
805 * page-table entry used to translate the range [@addr, @addr + @size).
806 *
807 * Note that it is the caller's responsibility to invalidate the TLB after
808 * calling this function to ensure that the updated permissions are visible
809 * to the CPUs.
810 *
811 * Return: True if any of the visited PTEs had the access flag set.
812 */
813 bool kvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr,
814 u64 size, bool mkold);
815
816 /**
817 * kvm_pgtable_stage2_relax_perms() - Relax the permissions enforced by a
818 * page-table entry.
819 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
820 * @addr: Intermediate physical address to identify the page-table entry.
821 * @prot: Additional permissions to grant for the mapping.
822 * @flags: Flags to control the page-table walk (ex. a shared walk)
823 *
824 * The offset of @addr within a page is ignored.
825 *
826 * If there is a valid, leaf page-table entry used to translate @addr, then
827 * relax the permissions in that entry according to the read, write and
828 * execute permissions specified by @prot. No permissions are removed, and
829 * TLB invalidation is performed after updating the entry. Software bits cannot
830 * be set or cleared using kvm_pgtable_stage2_relax_perms().
831 *
832 * Return: 0 on success, negative error code on failure.
833 */
834 int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr,
835 enum kvm_pgtable_prot prot,
836 enum kvm_pgtable_walk_flags flags);
837
838 /**
839 * kvm_pgtable_stage2_flush_range() - Clean and invalidate data cache to Point
840 * of Coherency for guest stage-2 address
841 * range.
842 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
843 * @addr: Intermediate physical address from which to flush.
844 * @size: Size of the range.
845 *
846 * The offset of @addr within a page is ignored and @size is rounded-up to
847 * the next page boundary.
848 *
849 * Return: 0 on success, negative error code on failure.
850 */
851 int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size);
852
853 /**
854 * kvm_pgtable_stage2_split() - Split a range of huge pages into leaf PTEs pointing
855 * to PAGE_SIZE guest pages.
856 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init().
857 * @addr: Intermediate physical address from which to split.
858 * @size: Size of the range.
859 * @mc: Cache of pre-allocated and zeroed memory from which to allocate
860 * page-table pages.
861 *
862 * The function tries to split any level 1 or 2 entry that overlaps
863 * with the input range (given by @addr and @size).
864 *
865 * Return: 0 on success, negative error code on failure. Note that
866 * kvm_pgtable_stage2_split() is best effort: it tries to break as many
867 * blocks in the input range as allowed by @mc_capacity.
868 */
869 int kvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size, void *mc);
870
871 /**
872 * kvm_pgtable_walk() - Walk a page-table.
873 * @pgt: Page-table structure initialised by kvm_pgtable_*_init().
874 * @addr: Input address for the start of the walk.
875 * @size: Size of the range to walk.
876 * @walker: Walker callback description.
877 *
878 * The offset of @addr within a page is ignored and @size is rounded-up to
879 * the next page boundary.
880 *
881 * The walker will walk the page-table entries corresponding to the input
882 * address range specified, visiting entries according to the walker flags.
883 * Invalid entries are treated as leaf entries. The visited page table entry is
884 * reloaded after invoking the walker callback, allowing the walker to descend
885 * into a newly installed table.
886 *
887 * Returning a negative error code from the walker callback function will
888 * terminate the walk immediately with the same error code.
889 *
890 * Return: 0 on success, negative error code on failure.
891 */
892 int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size,
893 struct kvm_pgtable_walker *walker);
894
895 /**
896 * kvm_pgtable_get_leaf() - Walk a page-table and retrieve the leaf entry
897 * with its level.
898 * @pgt: Page-table structure initialised by kvm_pgtable_*_init()
899 * or a similar initialiser.
900 * @addr: Input address for the start of the walk.
901 * @ptep: Pointer to storage for the retrieved PTE.
902 * @level: Pointer to storage for the level of the retrieved PTE.
903 *
904 * The offset of @addr within a page is ignored.
905 *
906 * The walker will walk the page-table entries corresponding to the input
907 * address specified, retrieving the leaf corresponding to this address.
908 * Invalid entries are treated as leaf entries.
909 *
910 * Return: 0 on success, negative error code on failure.
911 */
912 int kvm_pgtable_get_leaf(struct kvm_pgtable *pgt, u64 addr,
913 kvm_pte_t *ptep, s8 *level);
914
915 /**
916 * kvm_pgtable_stage2_pte_prot() - Retrieve the protection attributes of a
917 * stage-2 Page-Table Entry.
918 * @pte: Page-table entry
919 *
920 * Return: protection attributes of the page-table entry in the enum
921 * kvm_pgtable_prot format.
922 */
923 enum kvm_pgtable_prot kvm_pgtable_stage2_pte_prot(kvm_pte_t pte);
924
925 /**
926 * kvm_pgtable_hyp_pte_prot() - Retrieve the protection attributes of a stage-1
927 * Page-Table Entry.
928 * @pte: Page-table entry
929 *
930 * Return: protection attributes of the page-table entry in the enum
931 * kvm_pgtable_prot format.
932 */
933 enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte);
934
935 /**
936 * kvm_tlb_flush_vmid_range() - Invalidate/flush a range of TLB entries
937 *
938 * @mmu: Stage-2 KVM MMU struct
939 * @addr: The base Intermediate physical address from which to invalidate
940 * @size: Size of the range from the base to invalidate
941 */
942 void kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
943 phys_addr_t addr, size_t size);
944 #endif /* __ARM64_KVM_PGTABLE_H__ */
945