• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2020 Google LLC
4  * Author: Quentin Perret <qperret@google.com>
5  */
6 
7 #ifndef __KVM_NVHE_MEM_PROTECT__
8 #define __KVM_NVHE_MEM_PROTECT__
9 #include <linux/kvm_host.h>
10 #include <asm/kvm_hyp.h>
11 #include <asm/kvm_mmu.h>
12 #include <asm/kvm_pgtable.h>
13 #include <asm/virt.h>
14 #include <nvhe/pkvm.h>
15 #include <nvhe/spinlock.h>
16 
17 /*
18  * SW bits 0-1 are reserved to track the memory ownership state of each page:
19  *   00: The page is owned exclusively by the page-table owner.
20  *   01: The page is owned by the page-table owner, but is shared
21  *       with another entity.
22  *   10: The page is shared with, but not owned by the page-table owner.
23  *   11: Reserved for future use (lending).
24  */
25 enum pkvm_page_state {
26 	PKVM_PAGE_OWNED			= 0ULL,
27 	PKVM_PAGE_SHARED_OWNED		= KVM_PGTABLE_PROT_SW0,
28 	PKVM_PAGE_SHARED_BORROWED	= KVM_PGTABLE_PROT_SW1,
29 	__PKVM_PAGE_RESERVED		= KVM_PGTABLE_PROT_SW0 |
30 					  KVM_PGTABLE_PROT_SW1,
31 
32 	/* Meta-states which aren't encoded directly in the PTE's SW bits */
33 	PKVM_NOPAGE			= BIT(0),
34 	PKVM_PAGE_RESTRICTED_PROT	= BIT(1),
35 	PKVM_MODULE_DONT_TOUCH		= BIT(2),
36 };
37 
38 #define PKVM_PAGE_STATE_PROT_MASK	(KVM_PGTABLE_PROT_SW0 | KVM_PGTABLE_PROT_SW1)
pkvm_mkstate(enum kvm_pgtable_prot prot,enum pkvm_page_state state)39 static inline enum kvm_pgtable_prot pkvm_mkstate(enum kvm_pgtable_prot prot,
40 						 enum pkvm_page_state state)
41 {
42 	return (prot & ~PKVM_PAGE_STATE_PROT_MASK) | state;
43 }
44 
pkvm_getstate(enum kvm_pgtable_prot prot)45 static inline enum pkvm_page_state pkvm_getstate(enum kvm_pgtable_prot prot)
46 {
47 	return prot & PKVM_PAGE_STATE_PROT_MASK;
48 }
49 
50 struct host_mmu {
51 	struct kvm_arch arch;
52 	struct kvm_pgtable pgt;
53 	struct kvm_pgtable_mm_ops mm_ops;
54 	hyp_spinlock_t lock;
55 };
56 extern struct host_mmu host_mmu;
57 
58 /* This corresponds to page-table locking order */
59 enum pkvm_component_id {
60 	PKVM_ID_HOST,
61 	PKVM_ID_HYP,
62 	PKVM_ID_GUEST,
63 	PKVM_ID_FFA,
64 	PKVM_ID_PROTECTED,
65 	PKVM_ID_MAX = PKVM_ID_PROTECTED,
66 };
67 
68 extern unsigned long hyp_nr_cpus;
69 
70 int __pkvm_prot_finalize(void);
71 int __pkvm_host_share_hyp(u64 pfn);
72 int __pkvm_host_unshare_hyp(u64 pfn);
73 int __pkvm_host_reclaim_page(struct pkvm_hyp_vm *vm, u64 pfn, u64 ipa);
74 int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages);
75 int ___pkvm_host_donate_hyp(u64 pfn, u64 nr_pages, bool accept_mmio);
76 int __pkvm_host_donate_hyp_locked(u64 pfn, u64 nr_pages);
77 int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages);
78 int __pkvm_host_share_guest(u64 pfn, u64 gfn, struct pkvm_hyp_vcpu *vcpu);
79 int __pkvm_host_donate_guest(u64 pfn, u64 gfn, struct pkvm_hyp_vcpu *vcpu);
80 int __pkvm_guest_share_host(struct pkvm_hyp_vcpu *hyp_vcpu, u64 ipa);
81 int __pkvm_guest_unshare_host(struct pkvm_hyp_vcpu *hyp_vcpu, u64 ipa);
82 int __pkvm_guest_relinquish_to_host(struct pkvm_hyp_vcpu *vcpu,
83 				    u64 ipa, u64 *ppa);
84 int __pkvm_install_ioguard_page(struct pkvm_hyp_vcpu *hyp_vcpu, u64 ipa);
85 int __pkvm_remove_ioguard_page(struct pkvm_hyp_vcpu *hyp_vcpu, u64 ipa);
86 bool __pkvm_check_ioguard_page(struct pkvm_hyp_vcpu *hyp_vcpu);
87 int __pkvm_host_share_ffa(u64 pfn, u64 nr_pages);
88 int __pkvm_host_unshare_ffa(u64 pfn, u64 nr_pages);
89 
90 bool addr_is_memory(phys_addr_t phys);
91 int host_stage2_idmap_locked(phys_addr_t addr, u64 size, enum kvm_pgtable_prot prot,
92 			     bool update_iommu);
93 int host_stage2_set_owner_locked(phys_addr_t addr, u64 size, enum pkvm_component_id owner_id);
94 int host_stage2_unmap_reg_locked(phys_addr_t start, u64 size);
95 int kvm_host_prepare_stage2(void *pgt_pool_base);
96 int kvm_guest_prepare_stage2(struct pkvm_hyp_vm *vm, void *pgd);
97 void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt);
98 
99 int hyp_register_host_perm_fault_handler(int (*cb)(struct kvm_cpu_context *ctxt, u64 esr, u64 addr));
100 int hyp_pin_shared_mem(void *from, void *to);
101 void hyp_unpin_shared_mem(void *from, void *to);
102 int host_stage2_get_leaf(phys_addr_t phys, kvm_pte_t *ptep, u32 *level);
103 int refill_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages,
104 		    struct kvm_hyp_memcache *host_mc);
105 
106 int module_change_host_page_prot(u64 pfn, enum kvm_pgtable_prot prot);
107 int module_change_host_page_prot_range(u64 pfn, enum kvm_pgtable_prot prot, u64 nr_pages);
108 
109 void destroy_hyp_vm_pgt(struct pkvm_hyp_vm *vm);
110 void drain_hyp_pool(struct pkvm_hyp_vm *vm, struct kvm_hyp_memcache *mc);
111 
112 void psci_mem_protect_inc(u64 n);
113 void psci_mem_protect_dec(u64 n);
114 
__load_host_stage2(void)115 static __always_inline void __load_host_stage2(void)
116 {
117 	if (static_branch_likely(&kvm_protected_mode_initialized))
118 		__load_stage2(&host_mmu.arch.mmu, &host_mmu.arch);
119 	else
120 		write_sysreg(0, vttbr_el2);
121 }
122 #endif /* __KVM_NVHE_MEM_PROTECT__ */
123