• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020 Google LLC
4  * Author: Will Deacon <will@kernel.org>
5  */
6 
7 #ifndef __ARM64_KVM_PGTABLE_H__
8 #define __ARM64_KVM_PGTABLE_H__
9 
10 #include <linux/bits.h>
11 #include <linux/kvm_host.h>
12 #include <linux/types.h>
13 
14 #define KVM_PGTABLE_MAX_LEVELS		4U
15 
16 /*
17  * The largest supported block sizes for KVM (no 52-bit PA support):
18  *  - 4K (level 1):	1GB
19  *  - 16K (level 2):	32MB
20  *  - 64K (level 2):	512MB
21  */
22 #ifdef CONFIG_ARM64_4K_PAGES
23 #define KVM_PGTABLE_MIN_BLOCK_LEVEL	1U
24 #else
25 #define KVM_PGTABLE_MIN_BLOCK_LEVEL	2U
26 #endif
27 
kvm_get_parange(u64 mmfr0)28 static inline u64 kvm_get_parange(u64 mmfr0)
29 {
30 	u64 parange = cpuid_feature_extract_unsigned_field(mmfr0,
31 				ID_AA64MMFR0_EL1_PARANGE_SHIFT);
32 	if (parange > ID_AA64MMFR0_EL1_PARANGE_MAX)
33 		parange = ID_AA64MMFR0_EL1_PARANGE_MAX;
34 
35 	return parange;
36 }
37 
38 typedef u64 kvm_pte_t;
39 
40 #define KVM_PTE_VALID			BIT(0)
41 
42 #define KVM_PTE_ADDR_MASK		GENMASK(47, PAGE_SHIFT)
43 #define KVM_PTE_ADDR_51_48		GENMASK(15, 12)
44 
45 #define KVM_PHYS_INVALID		(-1ULL)
46 
47 #define KVM_PTE_TYPE			BIT(1)
48 #define KVM_PTE_TYPE_BLOCK		0
49 #define KVM_PTE_TYPE_PAGE		1
50 #define KVM_PTE_TYPE_TABLE		1
51 
52 #define KVM_PTE_LEAF_ATTR_LO		GENMASK(11, 2)
53 
54 #define KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX	GENMASK(4, 2)
55 #define KVM_PTE_LEAF_ATTR_LO_S1_AP	GENMASK(7, 6)
56 #define KVM_PTE_LEAF_ATTR_LO_S1_AP_RO		\
57 	({ cpus_have_final_cap(ARM64_KVM_HVHE) ? 2 : 3; })
58 #define KVM_PTE_LEAF_ATTR_LO_S1_AP_RW		\
59 	({ cpus_have_final_cap(ARM64_KVM_HVHE) ? 0 : 1; })
60 #define KVM_PTE_LEAF_ATTR_LO_S1_SH	GENMASK(9, 8)
61 #define KVM_PTE_LEAF_ATTR_LO_S1_SH_IS	3
62 #define KVM_PTE_LEAF_ATTR_LO_S1_AF	BIT(10)
63 
64 #define KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR	GENMASK(5, 2)
65 #define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R	BIT(6)
66 #define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W	BIT(7)
67 #define KVM_PTE_LEAF_ATTR_LO_S2_SH	GENMASK(9, 8)
68 #define KVM_PTE_LEAF_ATTR_LO_S2_SH_IS	3
69 #define KVM_PTE_LEAF_ATTR_LO_S2_AF	BIT(10)
70 
71 #define KVM_PTE_LEAF_ATTR_HI		GENMASK(63, 50)
72 
73 #define KVM_PTE_LEAF_ATTR_HI_SW		GENMASK(58, 55)
74 
75 #define KVM_PTE_LEAF_ATTR_HI_S1_XN	BIT(54)
76 
77 #define KVM_PTE_LEAF_ATTR_HI_S2_XN_PXN	1
78 #define KVM_PTE_LEAF_ATTR_HI_S2_XN_UXN	3
79 #define KVM_PTE_LEAF_ATTR_HI_S2_XN_XN	2
80 #define KVM_PTE_LEAF_ATTR_HI_S2_XN	GENMASK(54, 53)
81 
82 #define KVM_PTE_LEAF_ATTR_HI_S1_GP	BIT(50)
83 
kvm_pte_valid(kvm_pte_t pte)84 static inline bool kvm_pte_valid(kvm_pte_t pte)
85 {
86 	return pte & KVM_PTE_VALID;
87 }
88 
kvm_pte_to_phys(kvm_pte_t pte)89 static inline u64 kvm_pte_to_phys(kvm_pte_t pte)
90 {
91 	u64 pa = pte & KVM_PTE_ADDR_MASK;
92 
93 	if (PAGE_SHIFT == 16)
94 		pa |= FIELD_GET(KVM_PTE_ADDR_51_48, pte) << 48;
95 
96 	return pa;
97 }
98 
kvm_phys_to_pte(u64 pa)99 static inline kvm_pte_t kvm_phys_to_pte(u64 pa)
100 {
101 	kvm_pte_t pte = pa & KVM_PTE_ADDR_MASK;
102 
103 	if (PAGE_SHIFT == 16) {
104 		pa &= GENMASK(51, 48);
105 		pte |= FIELD_PREP(KVM_PTE_ADDR_51_48, pa >> 48);
106 	}
107 
108 	return pte;
109 }
110 
kvm_pte_to_pfn(kvm_pte_t pte)111 static inline kvm_pfn_t kvm_pte_to_pfn(kvm_pte_t pte)
112 {
113 	return __phys_to_pfn(kvm_pte_to_phys(pte));
114 }
115 
kvm_granule_shift(u32 level)116 static inline u64 kvm_granule_shift(u32 level)
117 {
118 	/* Assumes KVM_PGTABLE_MAX_LEVELS is 4 */
119 	return ARM64_HW_PGTABLE_LEVEL_SHIFT(level);
120 }
121 
kvm_granule_size(u32 level)122 static inline u64 kvm_granule_size(u32 level)
123 {
124 	return BIT(kvm_granule_shift(level));
125 }
126 
kvm_level_supports_block_mapping(u32 level)127 static inline bool kvm_level_supports_block_mapping(u32 level)
128 {
129 	return level >= KVM_PGTABLE_MIN_BLOCK_LEVEL;
130 }
131 
kvm_supported_block_sizes(void)132 static inline u32 kvm_supported_block_sizes(void)
133 {
134 	u32 level = KVM_PGTABLE_MIN_BLOCK_LEVEL;
135 	u32 r = 0;
136 
137 	for (; level < KVM_PGTABLE_MAX_LEVELS; level++)
138 		r |= BIT(kvm_granule_shift(level));
139 
140 	return r;
141 }
142 
kvm_is_block_size_supported(u64 size)143 static inline bool kvm_is_block_size_supported(u64 size)
144 {
145 	bool is_power_of_two = IS_ALIGNED(size, size);
146 
147 	return is_power_of_two && (size & kvm_supported_block_sizes());
148 }
149 
kvm_pte_table(kvm_pte_t pte,u32 level)150 static inline bool kvm_pte_table(kvm_pte_t pte, u32 level)
151 {
152 	if (level == KVM_PGTABLE_MAX_LEVELS - 1)
153 		return false;
154 
155 	if (!kvm_pte_valid(pte))
156 		return false;
157 
158 	return FIELD_GET(KVM_PTE_TYPE, pte) == KVM_PTE_TYPE_TABLE;
159 }
160 
161 /**
162  * struct kvm_pgtable_mm_ops - Memory management callbacks.
163  * @zalloc_page:		Allocate a single zeroed memory page.
164  *				The @arg parameter can be used by the walker
165  *				to pass a memcache. The initial refcount of
166  *				the page is 1.
167  * @zalloc_pages_exact:		Allocate an exact number of zeroed memory pages.
168  *				The @size parameter is in bytes, and is rounded
169  *				up to the next page boundary. The resulting
170  *				allocation is physically contiguous.
171  * @free_pages_exact:		Free an exact number of memory pages previously
172  *				allocated by zalloc_pages_exact.
173  * @free_unlinked_table:	Free an unlinked paging structure by unlinking and
174  *				dropping references.
175  * @get_page:			Increment the refcount on a page.
176  * @put_page:			Decrement the refcount on a page. When the
177  *				refcount reaches 0 the page is automatically
178  *				freed.
179  * @page_count:			Return the refcount of a page.
180  * @phys_to_virt:		Convert a physical address into a virtual
181  *				address	mapped in the current context.
182  * @virt_to_phys:		Convert a virtual address mapped in the current
183  *				context into a physical address.
184  * @dcache_clean_inval_poc:	Clean and invalidate the data cache to the PoC
185  *				for the	specified memory address range.
186  * @icache_inval_pou:		Invalidate the instruction cache to the PoU
187  *				for the specified memory address range.
188  */
189 struct kvm_pgtable_mm_ops {
190 	void*		(*zalloc_page)(void *arg);
191 	void*		(*zalloc_pages_exact)(size_t size);
192 	void		(*free_pages_exact)(void *addr, size_t size);
193 	void		(*free_unlinked_table)(void *addr, u32 level);
194 	void		(*get_page)(void *addr);
195 	void		(*put_page)(void *addr);
196 	int		(*page_count)(void *addr);
197 	void*		(*phys_to_virt)(phys_addr_t phys);
198 	phys_addr_t	(*virt_to_phys)(void *addr);
199 	void		(*dcache_clean_inval_poc)(void *addr, size_t size);
200 	void		(*icache_inval_pou)(void *addr, size_t size);
201 };
202 
kvm_pte_follow(kvm_pte_t pte,struct kvm_pgtable_mm_ops * mm_ops)203 static inline kvm_pte_t *kvm_pte_follow(kvm_pte_t pte, struct kvm_pgtable_mm_ops *mm_ops)
204 {
205 	return mm_ops->phys_to_virt(kvm_pte_to_phys(pte));
206 }
207 
208 /**
209  * enum kvm_pgtable_stage2_flags - Stage-2 page-table flags.
210  * @KVM_PGTABLE_S2_NOFWB:	Don't enforce Normal-WB even if the CPUs have
211  *				ARM64_HAS_STAGE2_FWB.
212  * @KVM_PGTABLE_S2_IDMAP:	Only use identity mappings.
213  * @KVM_PGTABLE_S2_PREFAULT_BLOCK:
214  * 				Prefault a table, when a block is broken down.
215  */
216 enum kvm_pgtable_stage2_flags {
217 	KVM_PGTABLE_S2_NOFWB			= BIT(0),
218 	KVM_PGTABLE_S2_IDMAP			= BIT(1),
219 	KVM_PGTABLE_S2_PREFAULT_BLOCK		= BIT(2),
220 };
221 
222 /**
223  * enum kvm_pgtable_prot - Page-table permissions and attributes.
224  * @KVM_PGTABLE_PROT_X:		Execute permission.
225  * @KVM_PGTABLE_PROT_W:		Write permission.
226  * @KVM_PGTABLE_PROT_R:		Read permission.
227  * @KVM_PGTABLE_PROT_DEVICE:	Device attributes.
228  * @KVM_PGTABLE_PROT_NC:	Normal non-cacheable attributes.
229  * @KVM_PGTABLE_PROT_PXN:	Privileged execute-never.
230  * @KVM_PGTABLE_PROT_UXN:	Unprivileged execute-never.
231  * @KVM_PGTABLE_PROT_SW0:	Software bit 0.
232  * @KVM_PGTABLE_PROT_SW1:	Software bit 1.
233  * @KVM_PGTABLE_PROT_SW2:	Software bit 2.
234  * @KVM_PGTABLE_PROT_SW3:	Software bit 3.
235  */
236 enum kvm_pgtable_prot {
237 	KVM_PGTABLE_PROT_X			= BIT(0),
238 	KVM_PGTABLE_PROT_W			= BIT(1),
239 	KVM_PGTABLE_PROT_R			= BIT(2),
240 
241 	KVM_PGTABLE_PROT_DEVICE			= BIT(3),
242 	KVM_PGTABLE_PROT_NC			= BIT(4),
243 	KVM_PGTABLE_PROT_PXN			= BIT(5),
244 	KVM_PGTABLE_PROT_UXN			= BIT(6),
245 
246 	KVM_PGTABLE_PROT_SW0			= BIT(55),
247 	KVM_PGTABLE_PROT_SW1			= BIT(56),
248 	KVM_PGTABLE_PROT_SW2			= BIT(57),
249 	KVM_PGTABLE_PROT_SW3			= BIT(58),
250 };
251 
252 /*
253  * Stage-2 invalid-PTE annotations. These are non-overlapping bitfields which
254  * define mutually-exclusive PTE states.
255  */
256 
257 /* (Host S2) The owner of the corresponding physical page. */
258 #define KVM_INVALID_PTE_OWNER_MASK	GENMASK(9, 2)
259 
260 /*
261  * Used to indicate a pte for which a 'break-before-make' sequence is in
262  * progress.
263  */
264 #define KVM_INVALID_PTE_LOCKED		BIT(10)
265 
266 /* This corresponds to page-table locking order */
267 enum pkvm_component_id {
268 	PKVM_ID_HOST,
269 	PKVM_ID_HYP,
270 	PKVM_ID_FFA,
271 	PKVM_ID_GUEST,
272 	PKVM_ID_PROTECTED,
273 	PKVM_ID_MAX = PKVM_ID_PROTECTED,
274 };
275 
276 /* Indicates a valid MMIO mapping registered via ioguard. */
277 #define KVM_INVALID_PTE_MMIO_NOTE	BIT(11)
278 
279 #define KVM_PGTABLE_PROT_RW	(KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_W)
280 #define KVM_PGTABLE_PROT_RWX	(KVM_PGTABLE_PROT_RW | KVM_PGTABLE_PROT_X)
281 
282 #define PKVM_HOST_MEM_PROT	KVM_PGTABLE_PROT_RWX
283 #define PKVM_HOST_MMIO_PROT	KVM_PGTABLE_PROT_RW
284 
285 #define KVM_HOST_S2_DEFAULT_MASK   (KVM_PTE_LEAF_ATTR_HI |	\
286 				    KVM_PTE_LEAF_ATTR_LO)
287 
288 #define KVM_HOST_S2_DEFAULT_MEM_PTE		\
289 	(PTE_S2_MEMATTR(MT_S2_NORMAL) |		\
290 	KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R |	\
291 	KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W |	\
292 	KVM_PTE_LEAF_ATTR_LO_S2_AF |		\
293 	FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S2_SH, KVM_PTE_LEAF_ATTR_LO_S2_SH_IS))
294 
295 #define KVM_HOST_S2_DEFAULT_MMIO_PTE		\
296 	(KVM_HOST_S2_DEFAULT_MEM_PTE |		\
297 	FIELD_PREP(KVM_PTE_LEAF_ATTR_HI_S2_XN, KVM_PTE_LEAF_ATTR_HI_S2_XN_XN))
298 
299 #define PAGE_HYP		KVM_PGTABLE_PROT_RW
300 #define PAGE_HYP_EXEC		(KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_X)
301 #define PAGE_HYP_RO		(KVM_PGTABLE_PROT_R)
302 #define PAGE_HYP_DEVICE		(PAGE_HYP | KVM_PGTABLE_PROT_DEVICE)
303 
304 typedef bool (*kvm_pgtable_force_pte_cb_t)(u64 addr, u64 end,
305 					   enum kvm_pgtable_prot prot);
306 
307 typedef bool (*kvm_pgtable_pte_is_counted_cb_t)(kvm_pte_t pte, u32 level);
308 
309 /**
310  * struct kvm_pgtable_pte_ops - PTE callbacks.
311  * @force_pte_cb:		Force the mapping granularity to pages and
312  *				return true if we support this instead of
313  *				block mappings.
314  * @pte_is_counted_cb		Verify the attributes of the @pte argument
315  *				and return true if the descriptor needs to be
316  *				refcounted, otherwise return false.
317  */
318 struct kvm_pgtable_pte_ops {
319 	kvm_pgtable_force_pte_cb_t		force_pte_cb;
320 	kvm_pgtable_pte_is_counted_cb_t		pte_is_counted_cb;
321 };
322 
323 /**
324  * enum kvm_pgtable_walk_flags - Flags to control a depth-first page-table walk.
325  * @KVM_PGTABLE_WALK_LEAF:		Visit leaf entries, including invalid
326  *					entries.
327  * @KVM_PGTABLE_WALK_TABLE_PRE:		Visit table entries before their
328  *					children.
329  * @KVM_PGTABLE_WALK_TABLE_POST:	Visit table entries after their
330  *					children.
331  * @KVM_PGTABLE_WALK_SHARED:		Indicates the page-tables may be shared
332  *					with other software walkers.
333  * @KVM_PGTABLE_WALK_HANDLE_FAULT:	Indicates the page-table walk was
334  *					invoked from a fault handler.
335  * @KVM_PGTABLE_WALK_SKIP_BBM_TLBI:	Visit and update table entries
336  *					without Break-before-make's
337  *					TLB invalidation.
338  * @KVM_PGTABLE_WALK_SKIP_CMO:		Visit and update table entries
339  *					without Cache maintenance
340  *					operations required.
341  */
342 enum kvm_pgtable_walk_flags {
343 	KVM_PGTABLE_WALK_LEAF			= BIT(0),
344 	KVM_PGTABLE_WALK_TABLE_PRE		= BIT(1),
345 	KVM_PGTABLE_WALK_TABLE_POST		= BIT(2),
346 	KVM_PGTABLE_WALK_SHARED			= BIT(3),
347 	KVM_PGTABLE_WALK_HANDLE_FAULT		= BIT(4),
348 	KVM_PGTABLE_WALK_SKIP_BBM_TLBI		= BIT(5),
349 	KVM_PGTABLE_WALK_SKIP_CMO		= BIT(6),
350 };
351 
352 struct kvm_pgtable_visit_ctx {
353 	kvm_pte_t				*ptep;
354 	kvm_pte_t				old;
355 	void					*arg;
356 	struct kvm_pgtable_mm_ops		*mm_ops;
357 	u64					start;
358 	struct kvm_pgtable_pte_ops		*pte_ops;
359 	u64					addr;
360 	u64					end;
361 	u32					level;
362 	enum kvm_pgtable_walk_flags		flags;
363 };
364 
365 typedef int (*kvm_pgtable_visitor_fn_t)(const struct kvm_pgtable_visit_ctx *ctx,
366 					enum kvm_pgtable_walk_flags visit);
367 
kvm_pgtable_walk_shared(const struct kvm_pgtable_visit_ctx * ctx)368 static inline bool kvm_pgtable_walk_shared(const struct kvm_pgtable_visit_ctx *ctx)
369 {
370 	return ctx->flags & KVM_PGTABLE_WALK_SHARED;
371 }
372 
373 /**
374  * struct kvm_pgtable_walker - Hook into a page-table walk.
375  * @cb:		Callback function to invoke during the walk.
376  * @arg:	Argument passed to the callback function.
377  * @flags:	Bitwise-OR of flags to identify the entry types on which to
378  *		invoke the callback function.
379  */
380 struct kvm_pgtable_walker {
381 	const kvm_pgtable_visitor_fn_t		cb;
382 	void * const				arg;
383 	const enum kvm_pgtable_walk_flags	flags;
384 };
385 
386 /*
387  * RCU cannot be used in a non-kernel context such as the hyp. As such, page
388  * table walkers used in hyp do not call into RCU and instead use other
389  * synchronization mechanisms (such as a spinlock).
390  */
391 #if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__)
392 
393 typedef kvm_pte_t *kvm_pteref_t;
394 
kvm_dereference_pteref(struct kvm_pgtable_walker * walker,kvm_pteref_t pteref)395 static inline kvm_pte_t *kvm_dereference_pteref(struct kvm_pgtable_walker *walker,
396 						kvm_pteref_t pteref)
397 {
398 	return pteref;
399 }
400 
kvm_pgtable_walk_begin(struct kvm_pgtable_walker * walker)401 static inline int kvm_pgtable_walk_begin(struct kvm_pgtable_walker *walker)
402 {
403 	/*
404 	 * Due to the lack of RCU (or a similar protection scheme), only
405 	 * non-shared table walkers are allowed in the hypervisor.
406 	 */
407 	if (walker->flags & KVM_PGTABLE_WALK_SHARED)
408 		return -EPERM;
409 
410 	return 0;
411 }
412 
kvm_pgtable_walk_end(struct kvm_pgtable_walker * walker)413 static inline void kvm_pgtable_walk_end(struct kvm_pgtable_walker *walker) {}
414 
kvm_pgtable_walk_lock_held(void)415 static inline bool kvm_pgtable_walk_lock_held(void)
416 {
417 	return true;
418 }
419 
420 #else
421 
422 typedef kvm_pte_t __rcu *kvm_pteref_t;
423 
kvm_dereference_pteref(struct kvm_pgtable_walker * walker,kvm_pteref_t pteref)424 static inline kvm_pte_t *kvm_dereference_pteref(struct kvm_pgtable_walker *walker,
425 						kvm_pteref_t pteref)
426 {
427 	return rcu_dereference_check(pteref, !(walker->flags & KVM_PGTABLE_WALK_SHARED));
428 }
429 
kvm_pgtable_walk_begin(struct kvm_pgtable_walker * walker)430 static inline int kvm_pgtable_walk_begin(struct kvm_pgtable_walker *walker)
431 {
432 	if (walker->flags & KVM_PGTABLE_WALK_SHARED)
433 		rcu_read_lock();
434 
435 	return 0;
436 }
437 
kvm_pgtable_walk_end(struct kvm_pgtable_walker * walker)438 static inline void kvm_pgtable_walk_end(struct kvm_pgtable_walker *walker)
439 {
440 	if (walker->flags & KVM_PGTABLE_WALK_SHARED)
441 		rcu_read_unlock();
442 }
443 
kvm_pgtable_walk_lock_held(void)444 static inline bool kvm_pgtable_walk_lock_held(void)
445 {
446 	return rcu_read_lock_held();
447 }
448 
449 #endif
450 
451 /**
452  * struct kvm_pgtable - KVM page-table.
453  * @ia_bits:		Maximum input address size, in bits.
454  * @start_level:	Level at which the page-table walk starts.
455  * @pgd:		Pointer to the first top-level entry of the page-table.
456  * @mm_ops:		Memory management callbacks.
457  * @mmu:		Stage-2 KVM MMU struct. Unused for stage-1 page-tables.
458  * @flags:		Stage-2 page-table flags.
459  * @pte_ops:		PTE callbacks.
460  */
461 struct kvm_pgtable {
462 	u32					ia_bits;
463 	u32					start_level;
464 	kvm_pteref_t				pgd;
465 	struct kvm_pgtable_mm_ops		*mm_ops;
466 
467 	/* Stage-2 only */
468 	struct kvm_s2_mmu			*mmu;
469 	enum kvm_pgtable_stage2_flags		flags;
470 	struct kvm_pgtable_pte_ops		*pte_ops;
471 };
472 
473 /**
474  * struct kvm_pgtable_snapshot - Snapshot page-table.
475  * @pgtable:		The page-table configuration.
476  * @mc:			Memcache used for pagetable pages allocation.
477  * @pgd_hva:		Host virtual address of a physically contiguous buffer
478  *			used for storing the PGD.
479  * @pgd_pages:		The size of the phyisically contiguous buffer in pages.
480  * @used_pages_hva:	Host virtual address of a physically contiguous buffer
481  *			used for storing the consumed pages from the memcache.
482  * @num_used_pages		The size of the used buffer in pages.
483  * @used_pages_idx		The current index of the used pages array.
484  */
485 struct kvm_pgtable_snapshot {
486 	struct kvm_pgtable			pgtable;
487 	struct kvm_hyp_memcache			mc;
488 	void					*pgd_hva;
489 	size_t					pgd_pages;
490 	phys_addr_t				*used_pages_hva;
491 	size_t					num_used_pages;
492 	size_t					used_pages_idx;
493 };
494 
495 /**
496  * kvm_pgtable_hyp_init() - Initialise a hypervisor stage-1 page-table.
497  * @pgt:	Uninitialised page-table structure to initialise.
498  * @va_bits:	Maximum virtual address bits.
499  * @mm_ops:	Memory management callbacks.
500  *
501  * Return: 0 on success, negative error code on failure.
502  */
503 int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits,
504 			 struct kvm_pgtable_mm_ops *mm_ops);
505 
506 /**
507  * kvm_pgtable_hyp_destroy() - Destroy an unused hypervisor stage-1 page-table.
508  * @pgt:	Page-table structure initialised by kvm_pgtable_hyp_init().
509  *
510  * The page-table is assumed to be unreachable by any hardware walkers prior
511  * to freeing and therefore no TLB invalidation is performed.
512  */
513 void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt);
514 
515 /**
516  * kvm_pgtable_hyp_map() - Install a mapping in a hypervisor stage-1 page-table.
517  * @pgt:	Page-table structure initialised by kvm_pgtable_hyp_init().
518  * @addr:	Virtual address at which to place the mapping.
519  * @size:	Size of the mapping.
520  * @phys:	Physical address of the memory to map.
521  * @prot:	Permissions and attributes for the mapping.
522  *
523  * The offset of @addr within a page is ignored, @size is rounded-up to
524  * the next page boundary and @phys is rounded-down to the previous page
525  * boundary.
526  *
527  * If device attributes are not explicitly requested in @prot, then the
528  * mapping will be normal, cacheable. Attempts to install a new mapping
529  * for a virtual address that is already mapped will be rejected with an
530  * error and a WARN().
531  *
532  * Return: 0 on success, negative error code on failure.
533  */
534 int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
535 			enum kvm_pgtable_prot prot);
536 
537 /**
538  * kvm_pgtable_hyp_unmap() - Remove a mapping from a hypervisor stage-1 page-table.
539  * @pgt:	Page-table structure initialised by kvm_pgtable_hyp_init().
540  * @addr:	Virtual address from which to remove the mapping.
541  * @size:	Size of the mapping.
542  *
543  * The offset of @addr within a page is ignored, @size is rounded-up to
544  * the next page boundary and @phys is rounded-down to the previous page
545  * boundary.
546  *
547  * TLB invalidation is performed for each page-table entry cleared during the
548  * unmapping operation and the reference count for the page-table page
549  * containing the cleared entry is decremented, with unreferenced pages being
550  * freed. The unmapping operation will stop early if it encounters either an
551  * invalid page-table entry or a valid block mapping which maps beyond the range
552  * being unmapped.
553  *
554  * Return: Number of bytes unmapped, which may be 0.
555  */
556 u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size);
557 
558 /**
559  * kvm_get_vtcr() - Helper to construct VTCR_EL2
560  * @mmfr0:	Sanitized value of SYS_ID_AA64MMFR0_EL1 register.
561  * @mmfr1:	Sanitized value of SYS_ID_AA64MMFR1_EL1 register.
562  * @phys_shfit:	Value to set in VTCR_EL2.T0SZ.
563  *
564  * The VTCR value is common across all the physical CPUs on the system.
565  * We use system wide sanitised values to fill in different fields,
566  * except for Hardware Management of Access Flags. HA Flag is set
567  * unconditionally on all CPUs, as it is safe to run with or without
568  * the feature and the bit is RES0 on CPUs that don't support it.
569  *
570  * Return: VTCR_EL2 value
571  */
572 u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift);
573 
574 /**
575  * kvm_pgtable_stage2_pgd_size() - Helper to compute size of a stage-2 PGD
576  * @vtcr:	Content of the VTCR register.
577  *
578  * Return: the size (in bytes) of the stage-2 PGD
579  */
580 size_t kvm_pgtable_stage2_pgd_size(u64 vtcr);
581 
582 /**
583  * __kvm_pgtable_stage2_init() - Initialise a guest stage-2 page-table.
584  * @pgt:	Uninitialised page-table structure to initialise.
585  * @mmu:	S2 MMU context for this S2 translation
586  * @mm_ops:	Memory management callbacks.
587  * @flags:	Stage-2 configuration flags.
588  * @pte_ops:	PTE callbacks.
589  *
590  * Return: 0 on success, negative error code on failure.
591  */
592 int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
593 			      struct kvm_pgtable_mm_ops *mm_ops,
594 			      enum kvm_pgtable_stage2_flags flags,
595 			      struct kvm_pgtable_pte_ops *pte_ops);
596 
597 #define kvm_pgtable_stage2_init(pgt, mmu, mm_ops, pte_ops) \
598 	__kvm_pgtable_stage2_init(pgt, mmu, mm_ops, 0, pte_ops)
599 
600 /**
601  * kvm_pgtable_stage2_destroy() - Destroy an unused guest stage-2 page-table.
602  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
603  *
604  * The page-table is assumed to be unreachable by any hardware walkers prior
605  * to freeing and therefore no TLB invalidation is performed.
606  */
607 void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
608 
609 /**
610  * kvm_pgtable_stage2_free_unlinked() - Free an unlinked stage-2 paging structure.
611  * @mm_ops:	Memory management callbacks.
612  * @pte_ops:	Pagetable entries management callbacks.
613  * @pgtable:	Unlinked stage-2 paging structure to be freed.
614  * @level:	Level of the stage-2 paging structure to be freed.
615  *
616  * The page-table is assumed to be unreachable by any hardware walkers prior to
617  * freeing and therefore no TLB invalidation is performed.
618  */
619 void kvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops,
620 				      struct kvm_pgtable_pte_ops *pte_ops,
621 				      void *pgtable, u32 level);
622 
623 /**
624  * kvm_pgtable_stage2_create_unlinked() - Create an unlinked stage-2 paging structure.
625  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
626  * @phys:	Physical address of the memory to map.
627  * @level:	Starting level of the stage-2 paging structure to be created.
628  * @prot:	Permissions and attributes for the mapping.
629  * @mc:		Cache of pre-allocated and zeroed memory from which to allocate
630  *		page-table pages.
631  * @force_pte:  Force mappings to PAGE_SIZE granularity.
632  *
633  * Returns an unlinked page-table tree.  This new page-table tree is
634  * not reachable (i.e., it is unlinked) from the root pgd and it's
635  * therefore unreachableby the hardware page-table walker. No TLB
636  * invalidation or CMOs are performed.
637  *
638  * If device attributes are not explicitly requested in @prot, then the
639  * mapping will be normal, cacheable.
640  *
641  * Return: The fully populated (unlinked) stage-2 paging structure, or
642  * an ERR_PTR(error) on failure.
643  */
644 kvm_pte_t *kvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt,
645 					      u64 phys, u32 level,
646 					      enum kvm_pgtable_prot prot,
647 					      void *mc, bool force_pte);
648 
649 /**
650  * kvm_pgtable_stage2_map() - Install a mapping in a guest stage-2 page-table.
651  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
652  * @addr:	Intermediate physical address at which to place the mapping.
653  * @size:	Size of the mapping.
654  * @phys:	Physical address of the memory to map.
655  * @prot:	Permissions and attributes for the mapping.
656  * @mc:		Cache of pre-allocated and zeroed memory from which to allocate
657  *		page-table pages.
658  * @flags:	Flags to control the page-table walk (ex. a shared walk)
659  *
660  * The offset of @addr within a page is ignored, @size is rounded-up to
661  * the next page boundary and @phys is rounded-down to the previous page
662  * boundary.
663  *
664  * If device attributes are not explicitly requested in @prot, then the
665  * mapping will be normal, cacheable.
666  *
667  * Note that the update of a valid leaf PTE in this function will be aborted,
668  * if it's trying to recreate the exact same mapping or only change the access
669  * permissions. Instead, the vCPU will exit one more time from guest if still
670  * needed and then go through the path of relaxing permissions.
671  *
672  * Note that this function will both coalesce existing table entries and split
673  * existing block mappings, relying on page-faults to fault back areas outside
674  * of the new mapping lazily.
675  *
676  * Return: 0 on success, negative error code on failure.
677  */
678 int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
679 			   u64 phys, enum kvm_pgtable_prot prot,
680 			   void *mc, enum kvm_pgtable_walk_flags flags);
681 
682 /**
683  * kvm_pgtable_stage2_annotate() - Unmap and annotate pages in the IPA space
684  *				   to track ownership (and more).
685  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
686  * @addr:	Base intermediate physical address to annotate.
687  * @size:	Size of the annotated range.
688  * @mc:		Cache of pre-allocated and zeroed memory from which to allocate
689  *		page-table pages.
690  * @annotation:	A 63 bit value that will be stored in the page tables.
691  *		@annotation[0] must be 0, and @annotation[63:1] is stored
692  *		in the page tables.
693  *
694  * By default, all page-tables are owned by identifier 0. This function can be
695  * used to mark portions of the IPA space as owned by other entities. When a
696  * stage 2 is used with identity-mappings, these annotations allow to use the
697  * page-table data structure as a simple rmap.
698  *
699  * Return: 0 on success, negative error code on failure.
700  */
701 int kvm_pgtable_stage2_annotate(struct kvm_pgtable *pgt, u64 addr, u64 size,
702 				void *mc, kvm_pte_t annotation);
703 
704 /**
705  * kvm_pgtable_stage2_unmap() - Remove a mapping from a guest stage-2 page-table.
706  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
707  * @addr:	Intermediate physical address from which to remove the mapping.
708  * @size:	Size of the mapping.
709  *
710  * The offset of @addr within a page is ignored and @size is rounded-up to
711  * the next page boundary.
712  *
713  * TLB invalidation is performed for each page-table entry cleared during the
714  * unmapping operation and the reference count for the page-table page
715  * containing the cleared entry is decremented, with unreferenced pages being
716  * freed. Unmapping a cacheable page will ensure that it is clean to the PoC if
717  * FWB is not supported by the CPU.
718  *
719  * Return: 0 on success, negative error code on failure.
720  */
721 int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size);
722 
723 /**
724  * kvm_pgtable_stage2_reclaim_leaves() - Attempt to reclaim leaf page-table
725  *					 pages by coalescing table entries into
726  *					 block mappings.
727  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
728  * @addr:	Intermediate physical address from which to reclaim leaves.
729  * @size:	Size of the range.
730  *
731  * The offset of @addr within a page is ignored and @size is rounded-up to
732  * the next page boundary.
733  *
734  * Return: 0 on success, negative error code on failure.
735  */
736 int kvm_pgtable_stage2_reclaim_leaves(struct kvm_pgtable *pgt, u64 addr, u64 size);
737 
738 /**
739  * kvm_pgtable_stage2_wrprotect() - Write-protect guest stage-2 address range
740  *                                  without TLB invalidation.
741  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
742  * @addr:	Intermediate physical address from which to write-protect,
743  * @size:	Size of the range.
744  *
745  * The offset of @addr within a page is ignored and @size is rounded-up to
746  * the next page boundary.
747  *
748  * Note that it is the caller's responsibility to invalidate the TLB after
749  * calling this function to ensure that the updated permissions are visible
750  * to the CPUs.
751  *
752  * Return: 0 on success, negative error code on failure.
753  */
754 int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size);
755 
756 /**
757  * kvm_pgtable_stage2_mkyoung() - Set the access flag in a page-table entry.
758  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
759  * @addr:	Intermediate physical address to identify the page-table entry.
760  *
761  * The offset of @addr within a page is ignored.
762  *
763  * If there is a valid, leaf page-table entry used to translate @addr, then
764  * set the access flag in that entry.
765  *
766  * Return: The old page-table entry prior to setting the flag, 0 on failure.
767  */
768 kvm_pte_t kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr);
769 
770 /**
771  * kvm_pgtable_stage2_test_clear_young() - Test and optionally clear the access
772  *					   flag in a page-table entry.
773  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
774  * @addr:	Intermediate physical address to identify the page-table entry.
775  * @size:	Size of the address range to visit.
776  * @mkold:	True if the access flag should be cleared.
777  *
778  * The offset of @addr within a page is ignored.
779  *
780  * Tests and conditionally clears the access flag for every valid, leaf
781  * page-table entry used to translate the range [@addr, @addr + @size).
782  *
783  * Note that it is the caller's responsibility to invalidate the TLB after
784  * calling this function to ensure that the updated permissions are visible
785  * to the CPUs.
786  *
787  * Return: True if any of the visited PTEs had the access flag set.
788  */
789 bool kvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr,
790 					 u64 size, bool mkold);
791 
792 /**
793  * kvm_pgtable_stage2_relax_perms() - Relax the permissions enforced by a
794  *				      page-table entry.
795  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
796  * @addr:	Intermediate physical address to identify the page-table entry.
797  * @prot:	Additional permissions to grant for the mapping.
798  *
799  * The offset of @addr within a page is ignored.
800  *
801  * If there is a valid, leaf page-table entry used to translate @addr, then
802  * relax the permissions in that entry according to the read, write and
803  * execute permissions specified by @prot. No permissions are removed, and
804  * TLB invalidation is performed after updating the entry. Software bits cannot
805  * be set or cleared using kvm_pgtable_stage2_relax_perms().
806  *
807  * Return: 0 on success, negative error code on failure.
808  */
809 int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr,
810 				   enum kvm_pgtable_prot prot);
811 
812 /**
813  * __kvm_pgtable_stage2_relax_perms() - Relax the permissions enforced by a
814  *				        page-table entry.
815  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
816  * @addr:	Intermediate physical address to identify the page-table entry.
817  * @prot:	Additional permissions to grant for the mapping.
818  * @flags:	Flags for the page-table walker
819  *
820  * The offset of @addr within a page is ignored.
821  *
822  * If there is a valid, leaf page-table entry used to translate @addr, then
823  * relax the permissions in that entry according to the read, write and
824  * execute permissions specified by @prot. No permissions are removed, and
825  * TLB invalidation is performed after updating the entry. Software bits cannot
826  * be set or cleared using kvm_pgtable_stage2_relax_perms().
827  *
828  * Return: 0 on success, negative error code on failure.
829  */
830 int __kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr,
831 				     enum kvm_pgtable_prot prot,
832 				     enum kvm_pgtable_walk_flags flags);
833 
834 /**
835  * kvm_pgtable_stage2_flush_range() - Clean and invalidate data cache to Point
836  * 				      of Coherency for guest stage-2 address
837  *				      range.
838  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
839  * @addr:	Intermediate physical address from which to flush.
840  * @size:	Size of the range.
841  *
842  * The offset of @addr within a page is ignored and @size is rounded-up to
843  * the next page boundary.
844  *
845  * Return: 0 on success, negative error code on failure.
846  */
847 int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size);
848 
849 /**
850  * kvm_pgtable_stage2_split() - Split a range of huge pages into leaf PTEs pointing
851  *				to PAGE_SIZE guest pages.
852  * @pgt:	 Page-table structure initialised by kvm_pgtable_stage2_init().
853  * @addr:	 Intermediate physical address from which to split.
854  * @size:	 Size of the range.
855  * @mc:		 Cache of pre-allocated and zeroed memory from which to allocate
856  *		 page-table pages.
857  *
858  * The function tries to split any level 1 or 2 entry that overlaps
859  * with the input range (given by @addr and @size).
860  *
861  * Return: 0 on success, negative error code on failure. Note that
862  * kvm_pgtable_stage2_split() is best effort: it tries to break as many
863  * blocks in the input range as allowed by @mc_capacity.
864  */
865 int kvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size,
866 			     struct kvm_mmu_memory_cache *mc);
867 
868 /**
869  * kvm_pgtable_walk() - Walk a page-table.
870  * @pgt:	Page-table structure initialised by kvm_pgtable_*_init().
871  * @addr:	Input address for the start of the walk.
872  * @size:	Size of the range to walk.
873  * @walker:	Walker callback description.
874  *
875  * The offset of @addr within a page is ignored and @size is rounded-up to
876  * the next page boundary.
877  *
878  * The walker will walk the page-table entries corresponding to the input
879  * address range specified, visiting entries according to the walker flags.
880  * Invalid entries are treated as leaf entries. The visited page table entry is
881  * reloaded after invoking the walker callback, allowing the walker to descend
882  * into a newly installed table.
883  *
884  * Returning a negative error code from the walker callback function will
885  * terminate the walk immediately with the same error code.
886  *
887  * Return: 0 on success, negative error code on failure.
888  */
889 int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size,
890 		     struct kvm_pgtable_walker *walker);
891 
892 /**
893  * kvm_pgtable_get_leaf() - Walk a page-table and retrieve the leaf entry
894  *			    with its level.
895  * @pgt:	Page-table structure initialised by kvm_pgtable_*_init()
896  *		or a similar initialiser.
897  * @addr:	Input address for the start of the walk.
898  * @ptep:	Pointer to storage for the retrieved PTE.
899  * @level:	Pointer to storage for the level of the retrieved PTE.
900  *
901  * The offset of @addr within a page is ignored.
902  *
903  * The walker will walk the page-table entries corresponding to the input
904  * address specified, retrieving the leaf corresponding to this address.
905  * Invalid entries are treated as leaf entries.
906  *
907  * Return: 0 on success, negative error code on failure.
908  */
909 int kvm_pgtable_get_leaf(struct kvm_pgtable *pgt, u64 addr,
910 			 kvm_pte_t *ptep, u32 *level);
911 
912 /**
913  * kvm_pgtable_stage2_pte_prot() - Retrieve the protection attributes of a
914  *				   stage-2 Page-Table Entry.
915  * @pte:	Page-table entry
916  *
917  * Return: protection attributes of the page-table entry in the enum
918  *	   kvm_pgtable_prot format.
919  */
920 enum kvm_pgtable_prot kvm_pgtable_stage2_pte_prot(kvm_pte_t pte);
921 
922 /**
923  * kvm_pgtable_hyp_pte_prot() - Retrieve the protection attributes of a stage-1
924  *				Page-Table Entry.
925  * @pte:	Page-table entry
926  *
927  * Return: protection attributes of the page-table entry in the enum
928  *	   kvm_pgtable_prot format.
929  */
930 enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte);
931 
932 /**
933  * kvm_tlb_flush_vmid_range() - Invalidate/flush a range of TLB entries
934  *
935  * @mmu:	Stage-2 KVM MMU struct
936  * @addr:	The base Intermediate physical address from which to invalidate
937  * @size:	Size of the range from the base to invalidate
938  */
939 void kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
940 				phys_addr_t addr, size_t size);
941 
942 #ifdef CONFIG_NVHE_EL2_DEBUG
943 /**
944  * kvm_pgtable_stage2_snapshot() - Snapshot the pagetable
945  *
946  * @dest_pgt:	Destination pagetable
947  * @src_pgt:	Source pagetable
948  * @pgd_len:	The size of the PGD
949  */
950 int kvm_pgtable_stage2_snapshot(struct kvm_pgtable_snapshot *dest_pgt,
951 				struct kvm_pgtable *src_pgt,
952 				size_t pgd_len);
953 #else
kvm_pgtable_stage2_snapshot(struct kvm_pgtable_snapshot * dest_pgt,struct kvm_pgtable * src_pgt,size_t pgd_len)954 static inline int kvm_pgtable_stage2_snapshot(struct kvm_pgtable_snapshot *dest_pgt,
955 					      struct kvm_pgtable *src_pgt,
956 					      size_t pgd_len)
957 {
958 	return -EPERM;
959 }
960 #endif	/* CONFIG_NVHE_EL2_DEBUG */
961 #endif	/* __ARM64_KVM_PGTABLE_H__ */
962