1 /* SPDX-License-Identifier: GPL-2.0-only */
2 #ifndef __KVM_HYP_MEMORY_H
3 #define __KVM_HYP_MEMORY_H
4
5 #include <asm/kvm_mmu.h>
6 #include <asm/page.h>
7
8 #include <linux/types.h>
9 #include <nvhe/refcount.h>
10
11 /*
12 * Bits 0-1 are reserved to track the memory ownership state of each page:
13 * 00: The page is owned exclusively by the page-table owner.
14 * 01: The page is owned by the page-table owner, but is shared
15 * with another entity.
16 * 10: The page is shared with, but not owned by the page-table owner.
17 * 11: The page is tainted by host, and can't transition.
18 */
19 enum pkvm_page_state {
20 PKVM_PAGE_OWNED = 0ULL,
21 PKVM_PAGE_SHARED_OWNED = BIT(0),
22 PKVM_PAGE_SHARED_BORROWED = BIT(1),
23 PKVM_PAGE_TAINTED = BIT(0) | BIT(1),
24
25 /* Special non-meta state that only applies to host pages. Will not go in PTE SW bits. */
26 PKVM_MODULE_OWNED_PAGE = BIT(2),
27 PKVM_NOPAGE = BIT(3),
28
29 /*
30 * Meta-states which aren't encoded directly in the PTE's SW bits (or
31 * the hyp_vmemmap entry for the host)
32 */
33 PKVM_PAGE_RESTRICTED_PROT = BIT(4),
34 PKVM_MMIO = BIT(5),
35 };
36 #define PKVM_PAGE_META_STATES_MASK (~(BIT(0) | BIT(1)))
37
38 #define PKVM_PAGE_STATE_PROT_MASK (KVM_PGTABLE_PROT_SW0 | KVM_PGTABLE_PROT_SW1)
pkvm_mkstate(enum kvm_pgtable_prot prot,enum pkvm_page_state state)39 static inline enum kvm_pgtable_prot pkvm_mkstate(enum kvm_pgtable_prot prot,
40 enum pkvm_page_state state)
41 {
42 BUG_ON(state & PKVM_PAGE_META_STATES_MASK);
43 prot &= ~PKVM_PAGE_STATE_PROT_MASK;
44 prot |= FIELD_PREP(PKVM_PAGE_STATE_PROT_MASK, state);
45 return prot;
46 }
47
pkvm_getstate(enum kvm_pgtable_prot prot)48 static inline enum pkvm_page_state pkvm_getstate(enum kvm_pgtable_prot prot)
49 {
50 return FIELD_GET(PKVM_PAGE_STATE_PROT_MASK, prot);
51 }
52
53 struct hyp_page {
54 unsigned short refcount;
55 u8 order;
56
57 /* Host (non-meta) state. Guarded by the host stage-2 lock. */
58 enum pkvm_page_state host_state : 8;
59
60 u32 host_share_guest_count;
61 };
62
63 extern u64 __hyp_vmemmap;
64 #define hyp_vmemmap ((struct hyp_page *)__hyp_vmemmap)
65
66 #define __hyp_va(phys) ((void *)((phys_addr_t)(phys) - hyp_physvirt_offset))
67
hyp_phys_to_virt(phys_addr_t phys)68 static inline void *hyp_phys_to_virt(phys_addr_t phys)
69 {
70 return __hyp_va(phys);
71 }
72
hyp_virt_to_phys(void * addr)73 static inline phys_addr_t hyp_virt_to_phys(void *addr)
74 {
75 return __hyp_pa(addr);
76 }
77
78 #define hyp_phys_to_pfn(phys) ((phys) >> PAGE_SHIFT)
79 #define hyp_pfn_to_phys(pfn) ((phys_addr_t)((pfn) << PAGE_SHIFT))
80 #define hyp_phys_to_page(phys) (&hyp_vmemmap[hyp_phys_to_pfn(phys)])
81 #define hyp_virt_to_page(virt) hyp_phys_to_page(__hyp_pa(virt))
82 #define hyp_virt_to_pfn(virt) hyp_phys_to_pfn(__hyp_pa(virt))
83
84 #define hyp_page_to_pfn(page) ((struct hyp_page *)(page) - hyp_vmemmap)
85 #define hyp_page_to_phys(page) hyp_pfn_to_phys((hyp_page_to_pfn(page)))
86 #define hyp_page_to_virt(page) __hyp_va(hyp_page_to_phys(page))
87 #define hyp_page_to_pool(page) (((struct hyp_page *)page)->pool)
88
89 /*
90 * Refcounting wrappers for 'struct hyp_page'.
91 */
hyp_page_count(void * addr)92 static inline int hyp_page_count(void *addr)
93 {
94 struct hyp_page *p = hyp_virt_to_page(addr);
95
96 return hyp_refcount_get(p->refcount);
97 }
98
hyp_page_ref_inc(struct hyp_page * p)99 static inline void hyp_page_ref_inc(struct hyp_page *p)
100 {
101 hyp_refcount_inc(p->refcount);
102 }
103
hyp_page_ref_dec(struct hyp_page * p)104 static inline void hyp_page_ref_dec(struct hyp_page *p)
105 {
106 hyp_refcount_dec(p->refcount);
107 }
108
hyp_page_ref_dec_and_test(struct hyp_page * p)109 static inline int hyp_page_ref_dec_and_test(struct hyp_page *p)
110 {
111 return hyp_refcount_dec(p->refcount) == 0;
112 }
113
hyp_set_page_refcounted(struct hyp_page * p)114 static inline void hyp_set_page_refcounted(struct hyp_page *p)
115 {
116 hyp_refcount_set(p->refcount, 1);
117 }
118 #endif /* __KVM_HYP_MEMORY_H */
119