1 /* SPDX-License-Identifier: GPL-2.0-only */
2 #ifndef __KVM_HYP_MEMORY_H
3 #define __KVM_HYP_MEMORY_H
4
5 #include <asm/kvm_mmu.h>
6 #include <asm/page.h>
7
8 #include <linux/types.h>
9 #include <nvhe/refcount.h>
10
11 /*
12 * Bits 0-1 are reserved to track the memory ownership state of each page:
13 * 00: The page is owned exclusively by the page-table owner.
14 * 01: The page is owned by the page-table owner, but is shared
15 * with another entity.
16 * 10: The page is shared with, but not owned by the page-table owner.
17 * 11: This is an MMIO page that is mapped in the host IOMMU.
18 */
19 enum pkvm_page_state {
20 PKVM_PAGE_OWNED = 0ULL,
21 PKVM_PAGE_SHARED_OWNED = BIT(0),
22 PKVM_PAGE_SHARED_BORROWED = BIT(1),
23 PKVM_PAGE_MMIO_DMA = BIT(0) | BIT(1),
24
25 /* Special non-meta state that only applies to host pages. Will not go in PTE SW bits. */
26 PKVM_MODULE_OWNED_PAGE = BIT(2),
27 PKVM_NOPAGE = BIT(3),
28
29 /*
30 * Meta-states which aren't encoded directly in the PTE's SW bits (or
31 * the hyp_vmemmap entry for the host)
32 */
33 PKVM_PAGE_RESTRICTED_PROT = BIT(4),
34 PKVM_MMIO = BIT(5),
35 };
36 #define PKVM_PAGE_META_STATES_MASK (~(BIT(0) | BIT(1)))
37
38 #define PKVM_PAGE_STATE_PROT_MASK (KVM_PGTABLE_PROT_SW0 | KVM_PGTABLE_PROT_SW1)
pkvm_mkstate(enum kvm_pgtable_prot prot,enum pkvm_page_state state)39 static inline enum kvm_pgtable_prot pkvm_mkstate(enum kvm_pgtable_prot prot,
40 enum pkvm_page_state state)
41 {
42 BUG_ON(state & PKVM_PAGE_META_STATES_MASK);
43 prot &= ~PKVM_PAGE_STATE_PROT_MASK;
44 prot |= FIELD_PREP(PKVM_PAGE_STATE_PROT_MASK, state);
45 return prot;
46 }
47
pkvm_getstate(enum kvm_pgtable_prot prot)48 static inline enum pkvm_page_state pkvm_getstate(enum kvm_pgtable_prot prot)
49 {
50 return FIELD_GET(PKVM_PAGE_STATE_PROT_MASK, prot);
51 }
52
53 struct hyp_page {
54 unsigned short refcount;
55 u8 order;
56
57 /* Host (non-meta) state. Guarded by the host stage-2 lock. */
58 enum pkvm_page_state host_state : 8;
59 };
60
61 extern u64 __hyp_vmemmap;
62 #define hyp_vmemmap ((struct hyp_page *)__hyp_vmemmap)
63
64 #define __hyp_va(phys) ((void *)((phys_addr_t)(phys) - hyp_physvirt_offset))
65
hyp_phys_to_virt(phys_addr_t phys)66 static inline void *hyp_phys_to_virt(phys_addr_t phys)
67 {
68 return __hyp_va(phys);
69 }
70
hyp_virt_to_phys(void * addr)71 static inline phys_addr_t hyp_virt_to_phys(void *addr)
72 {
73 return __hyp_pa(addr);
74 }
75
76 #define hyp_phys_to_pfn(phys) ((phys) >> PAGE_SHIFT)
77 #define hyp_pfn_to_phys(pfn) ((phys_addr_t)((pfn) << PAGE_SHIFT))
78 #define hyp_phys_to_page(phys) (&hyp_vmemmap[hyp_phys_to_pfn(phys)])
79 #define hyp_virt_to_page(virt) hyp_phys_to_page(__hyp_pa(virt))
80 #define hyp_virt_to_pfn(virt) hyp_phys_to_pfn(__hyp_pa(virt))
81
82 #define hyp_page_to_pfn(page) ((struct hyp_page *)(page) - hyp_vmemmap)
83 #define hyp_page_to_phys(page) hyp_pfn_to_phys((hyp_page_to_pfn(page)))
84 #define hyp_page_to_virt(page) __hyp_va(hyp_page_to_phys(page))
85 #define hyp_page_to_pool(page) (((struct hyp_page *)page)->pool)
86
87 /*
88 * Refcounting wrappers for 'struct hyp_page'.
89 */
hyp_page_count(void * addr)90 static inline int hyp_page_count(void *addr)
91 {
92 struct hyp_page *p = hyp_virt_to_page(addr);
93
94 return hyp_refcount_get(p->refcount);
95 }
96
hyp_page_ref_inc(struct hyp_page * p)97 static inline void hyp_page_ref_inc(struct hyp_page *p)
98 {
99 hyp_refcount_inc(p->refcount);
100 }
101
hyp_page_ref_dec(struct hyp_page * p)102 static inline void hyp_page_ref_dec(struct hyp_page *p)
103 {
104 hyp_refcount_dec(p->refcount);
105 }
106
hyp_page_ref_dec_and_test(struct hyp_page * p)107 static inline int hyp_page_ref_dec_and_test(struct hyp_page *p)
108 {
109 return hyp_refcount_dec(p->refcount) == 0;
110 }
111
hyp_set_page_refcounted(struct hyp_page * p)112 static inline void hyp_set_page_refcounted(struct hyp_page *p)
113 {
114 hyp_refcount_set(p->refcount, 1);
115 }
116 #endif /* __KVM_HYP_MEMORY_H */
117