1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2020 Google LLC
4 * Author: Quentin Perret <qperret@google.com>
5 */
6
7 #ifndef __KVM_NVHE_MEM_PROTECT__
8 #define __KVM_NVHE_MEM_PROTECT__
9 #include <linux/kvm_host.h>
10 #include <asm/kvm_hyp.h>
11 #include <asm/kvm_mmu.h>
12 #include <asm/kvm_pgtable.h>
13 #include <asm/kvm_pkvm_module.h>
14 #include <asm/virt.h>
15 #include <nvhe/memory.h>
16 #include <nvhe/pkvm.h>
17 #include <nvhe/spinlock.h>
18
19 struct host_mmu {
20 struct kvm_arch arch;
21 struct kvm_pgtable pgt;
22 struct kvm_pgtable_mm_ops mm_ops;
23 hyp_spinlock_t lock;
24 };
25 extern struct host_mmu host_mmu;
26
27 /* This corresponds to page-table locking order */
28 enum pkvm_component_id {
29 PKVM_ID_HOST,
30 PKVM_ID_HYP,
31 PKVM_ID_FFA,
32 PKVM_ID_GUEST,
33 PKVM_ID_PROTECTED,
34 PKVM_ID_MAX = PKVM_ID_PROTECTED,
35 };
36
37 extern unsigned long hyp_nr_cpus;
38
39 extern struct kvm_hyp_pinned_page *hyp_ppages;
40
41 int __pkvm_prot_finalize(void);
42 int __pkvm_host_share_hyp(u64 pfn);
43 int __pkvm_host_unshare_hyp(u64 pfn);
44 int __pkvm_host_reclaim_page(struct pkvm_hyp_vm *vm, u64 pfn, u64 ipa, u8 order);
45 int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages);
46 int ___pkvm_host_donate_hyp(u64 pfn, u64 nr_pages, bool accept_mmio);
47 int ___pkvm_host_donate_hyp_prot(u64 pfn, u64 nr_pages,
48 bool accept_mmio, enum kvm_pgtable_prot prot);
49 int __pkvm_host_donate_sglist_hyp(struct pkvm_sglist_page *sglist, size_t nr_pages);
50 int __pkvm_host_donate_hyp_locked(u64 pfn, u64 nr_pages, enum kvm_pgtable_prot prot);
51 int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages);
52 int __pkvm_guest_share_hyp_page(struct pkvm_hyp_vcpu *vcpu, u64 ipa, u64 *hyp_va);
53 int __pkvm_guest_unshare_hyp_page(struct pkvm_hyp_vcpu *vcpu, u64 ipa);
54 int __pkvm_guest_share_ffa_page(struct pkvm_hyp_vcpu *vcpu, u64 ipa, phys_addr_t *phys);
55 int __pkvm_guest_unshare_ffa_page(struct pkvm_hyp_vcpu *vcpu, u64 ipa);
56 int __pkvm_host_share_ffa(u64 pfn, u64 nr_pages);
57 int __pkvm_host_unshare_ffa(u64 pfn, u64 nr_pages);
58 int __pkvm_host_donate_guest(u64 pfn, u64 gfn, struct pkvm_hyp_vcpu *vcpu, u64 nr_pages);
59 int __pkvm_host_donate_sglist_guest(struct pkvm_hyp_vcpu *vcpu);
60 int __pkvm_host_share_guest(u64 pfn, u64 gfn, struct pkvm_hyp_vcpu *vcpu,
61 enum kvm_pgtable_prot prot, u64 nr_pages);
62 int __pkvm_host_unshare_guest(u64 gfn, struct pkvm_hyp_vm *vm, u64 nr_pages);
63 int __pkvm_host_relax_perms_guest(u64 gfn, struct pkvm_hyp_vcpu *vcpu, enum kvm_pgtable_prot prot);
64 int __pkvm_host_wrprotect_guest(u64 gfn, struct pkvm_hyp_vm *hyp_vm, u64 size);
65 int __pkvm_host_test_clear_young_guest(u64 gfn, u64 size, bool mkold, struct pkvm_hyp_vm *vm);
66 kvm_pte_t __pkvm_host_mkyoung_guest(u64 gfn, struct pkvm_hyp_vcpu *vcpu);
67 int __pkvm_host_split_guest(u64 gfn, u64 size, struct pkvm_hyp_vcpu *vcpu);
68 int __pkvm_host_donate_ffa(u64 pfn, u64 nr_pages);
69 int __pkvm_host_reclaim_ffa(u64 pfn, u64 nr_pages);
70 int __pkvm_guest_share_host(struct pkvm_hyp_vcpu *hyp_vcpu, u64 ipa,
71 u64 nr_pages, u64 *nr_shared);
72 int __pkvm_guest_unshare_host(struct pkvm_hyp_vcpu *hyp_vcpu, u64 ipa,
73 u64 nr_pages, u64 *nr_unshared);
74 int __pkvm_install_ioguard_page(struct pkvm_hyp_vcpu *hyp_vcpu, u64 ipa,
75 u64 nr_pages, u64 *nr_guarded);
76 bool __pkvm_check_ioguard_page(struct pkvm_hyp_vcpu *hyp_vcpu);
77 int __pkvm_guest_relinquish_to_host(struct pkvm_hyp_vcpu *vcpu,
78 u64 ipa, u64 *ppa);
79 int __pkvm_use_dma(u64 phys_addr, size_t size, struct pkvm_hyp_vcpu *hyp_vcpu);
80 int __pkvm_unuse_dma(u64 phys_addr, size_t size, struct pkvm_hyp_vcpu *hyp_vcpu);
81 u64 __pkvm_ptdump_get_config(pkvm_handle_t handle, enum pkvm_ptdump_ops op);
82 u64 __pkvm_ptdump_walk_range(pkvm_handle_t handle, struct pkvm_ptdump_log_hdr *log_hva);
83
84 int hyp_check_range_owned(u64 addr, u64 size);
85 int __pkvm_install_guest_mmio(struct pkvm_hyp_vcpu *hyp_vcpu, u64 pfn, u64 gfn);
86
87 int pkvm_get_guest_pa_request(struct pkvm_hyp_vcpu *hyp_vcpu, u64 ipa,
88 size_t ipa_size_request, u64 *out_pa, s8 *out_level);
89 int pkvm_get_guest_pa_request_use_dma(struct pkvm_hyp_vcpu *hyp_vcpu, u64 ipa,
90 size_t ipa_size_request, u64 *out_pa, s8 *level);
91 bool addr_is_memory(phys_addr_t phys);
92 int host_stage2_idmap_locked(phys_addr_t addr, u64 size,
93 enum kvm_pgtable_prot prot,
94 bool update_iommu);
95 int host_stage2_set_owner_locked(phys_addr_t addr, u64 size, u8 owner_id);
96 int host_stage2_unmap_reg_locked(phys_addr_t start, u64 size);
97 int kvm_host_prepare_stage2(void *pgt_pool_base);
98 int kvm_guest_prepare_stage2(struct pkvm_hyp_vm *vm, void *pgd);
99 void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt);
100
101 int hyp_pin_shared_mem(void *from, void *to);
102 void hyp_unpin_shared_mem(void *from, void *to);
103 int host_stage2_get_leaf(phys_addr_t phys, kvm_pte_t *ptep, s8 *level);
104 int refill_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages,
105 struct kvm_hyp_memcache *host_mc);
106
107 int refill_hyp_pool(struct hyp_pool *pool, struct kvm_hyp_memcache *host_mc);
108 int reclaim_hyp_pool(struct hyp_pool *pool, struct kvm_hyp_memcache *host_mc,
109 int nr_pages);
110
111 void destroy_hyp_vm_pgt(struct pkvm_hyp_vm *vm);
112 void drain_hyp_pool(struct hyp_pool *pool, struct kvm_hyp_memcache *mc);
113
114 int module_change_host_page_prot(u64 pfn, enum kvm_pgtable_prot prot, u64 nr_pages, bool update_iommu);
115
116 void psci_mem_protect_inc(u64 n);
117 void psci_mem_protect_dec(u64 n);
118
__load_host_stage2(void)119 static __always_inline void __load_host_stage2(void)
120 {
121 if (static_branch_likely(&kvm_protected_mode_initialized))
122 __load_stage2(&host_mmu.arch.mmu, &host_mmu.arch);
123 else
124 write_sysreg(0, vttbr_el2);
125 }
126
127 #ifdef CONFIG_PKVM_SELFTESTS
128 void pkvm_ownership_selftest(void *base);
129 #else
pkvm_ownership_selftest(void * base)130 static inline void pkvm_ownership_selftest(void *base) { }
131 #endif
132 #endif /* __KVM_NVHE_MEM_PROTECT__ */
133