• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 #ifndef __ARM64_KVM_PKVM_MODULE_H__
4 #define __ARM64_KVM_PKVM_MODULE_H__
5 
6 #include <asm/kvm_pgtable.h>
7 #include <linux/android_kabi.h>
8 #include <linux/export.h>
9 
10 typedef void (*dyn_hcall_t)(struct kvm_cpu_context *);
11 
12 enum pkvm_psci_notification {
13 	PKVM_PSCI_CPU_SUSPEND,
14 	PKVM_PSCI_SYSTEM_SUSPEND,
15 	PKVM_PSCI_CPU_ENTRY,
16 };
17 
18 #ifdef CONFIG_MODULES
19 /**
20  * struct pkvm_module_ops - pKVM modules callbacks
21  * @create_private_mapping:	Map a memory region into the hypervisor private
22  *				range. @haddr returns the virtual address where
23  *				the mapping starts. It can't be unmapped. Host
24  *				access permissions are unaffected.
25  * @alloc_module_va:		Reserve a range of VA space in the hypervisor
26  *				private range. This is handy for modules that
27  *				need to map plugin code in a similar fashion to
28  *				how pKVM maps module code. That space could also
29  *				be used to map memory temporarily, when the
30  *				fixmap granularity (PAGE_SIZE) is too small.
31  * @map_module_page:		Used in conjunction with @alloc_module_va. When
32  *				@is_protected is not set, the page is also
33  *				unmapped from the host stage-2.
34  * @register_serial_driver:	Register a driver for a serial interface. The
35  *				framework only needs a single callback
36  *				@hyp_putc_cb which is expected to print a single
37  *				character.
38  * @puts:			If a serial interface is registered, print a
39  *				string, else does nothing.
40  * @putx64:			If a serial interface is registered, print a
41  *				64-bit number, else does nothing.
42  * @fixmap_map:			Map a page in the per-CPU hypervisor fixmap.
43  *				This is intended to be used for temporary
44  *				mappings in the hypervisor VA space.
45  *				@fixmap_unmap must be called between each
46  *				mapping to do cache maintenance and ensure the
47  *				new mapping is visible.
48  * @fixmap_unmap:		Unmap a page from the hypervisor fixmap. This
49  * 				call is required between each @fixmap_map().
50  * @linear_map_early:		Map a large portion of memory into the
51  *				hypervisor linear VA space. This is intended to
52  *				be used only for module bootstrap and must be
53  *				unmapped before the host is deprivilged.
54  * @linear_unmap_early:		See @linear_map_early.
55  * @flush_dcache_to_poc:	Clean the data cache to the point of coherency.
56  *				This is not a requirement for any other of the
57  *				pkvm_module_ops callbacks.
58  * @update_hcr_el2:		Modify the running value of HCR_EL2. pKVM will
59  *				save/restore the new value across power
60  *				management transitions.
61  * @update_hfgwtr_el2:		Modify the running value of HFGWTR_EL2. pKVM
62  *				will save/restore the new value across power
63  *				management transitions.
64  * @register_host_perm_fault_handler:
65  *				@cb is called whenever the host generates an
66  *				abort with the fault status code Permission
67  *				Fault. Returning -EPERM lets pKVM handle the
68  *				abort. This is useful when a module changes the
69  *				host stage-2 permissions for certain pages.
70  * @host_stage2_mod_prot:	Apply @prot to the page @pfn. This requires a
71  *				permission fault handler to be registered (see
72  *				@register_host_perm_fault_handler), otherwise
73  *				pKVM will be unable to handle this fault and the
74  *				CPU will be stuck in an infinite loop.
75  * @host_stage2_mod_prot_range:	Similar to @host_stage2_mod_prot, but takes a
76  *				range as an argument (@nr_pages). This
77  *				considerably speeds up the process for a
78  *				contiguous memory region, compared to the
79  *				per-page @host_stage2_mod_prot.
80  * @host_stage2_get_leaf:	Query the host's stage2 page-table entry for
81  *				the page @phys.
82  * @register_host_smc_handler:	@cb is called whenever the host issues an SMC
83  *				pKVM couldn't handle. If @cb returns false, the
84  *				SMC will be forwarded to EL3.
85  * @register_default_trap_handler:
86  *				@cb is called whenever EL2 traps EL1 and pKVM
87  *				has not handled it. If @cb returns false, the
88  *				hypervisor will panic. This trap handler must be
89  *				registered whenever changes are made to HCR
90  *				(@update_hcr_el2) or HFGWTR
91  *				(@update_hfgwtr_el2).
92  * @register_illegal_abt_notifier:
93  *				To notify the module of a pending illegal abort
94  *				from the host. On @cb return, the abort will be
95  *				injected back into the host.
96  * @register_psci_notifier:	To notify the module of a pending PSCI event.
97  * @register_hyp_panic_notifier:
98  *				To notify the module of a pending hypervisor
99  *				panic. On return from @cb, the panic will occur.
100  * @host_donate_hyp:		The page @pfn is unmapped from the host and
101  *				full control is given to the hypervisor.
102  * @hyp_donate_host:		The page @pfn whom control has previously been
103  *				given to the hypervisor (@host_donate_hyp) is
104  *				given back to the host.
105  * @host_share_hyp:		The page @pfn will be shared between the host
106  *				and the hypervisor. Must be followed by
107  *				@pin_shared_mem.
108  * @host_unshare_hyp:		The page @pfn will be unshared and unmapped from
109  *				the hypervisor. Must be called after
110  *				@unpin_shared_mem.
111  * @pin_shared_mem:		After @host_share_hyp, the newly shared page is
112  *				still owned by the host. @pin_shared_mem will
113  *				prevent the host from reclaiming that page until
114  *				the hypervisor releases it (@unpin_shared_mem)
115  * @unpin_shared_mem:		Enable the host to reclaim the shared memory
116  *				(@host_unshare_hyp).
117  * @memcpy:			Same as kernel memcpy, but use hypervisor VAs.
118  * @memset:			Same as kernel memset, but use a hypervisor VA.
119  * @hyp_pa:			Return the physical address for a hypervisor
120  *				virtual address in the linear range.
121  * @hyp_va:			Convert a physical address into a virtual one.
122  * @kern_hyp_va:		Convert a kernel virtual address into an
123  *				hypervisor virtual one.
124  */
125 struct pkvm_module_ops {
126 	int (*create_private_mapping)(phys_addr_t phys, size_t size,
127 				      enum kvm_pgtable_prot prot,
128 				      unsigned long *haddr);
129 	void *(*alloc_module_va)(u64 nr_pages);
130 	int (*map_module_page)(u64 pfn, void *va, enum kvm_pgtable_prot prot, bool is_protected);
131 	int (*register_serial_driver)(void (*hyp_putc_cb)(char));
132 	void (*puts)(const char *str);
133 	void (*putx64)(u64 num);
134 	void *(*fixmap_map)(phys_addr_t phys);
135 	void (*fixmap_unmap)(void);
136 	void *(*linear_map_early)(phys_addr_t phys, size_t size, enum kvm_pgtable_prot prot);
137 	void (*linear_unmap_early)(void *addr, size_t size);
138 	void (*flush_dcache_to_poc)(void *addr, size_t size);
139 	void (*update_hcr_el2)(unsigned long set_mask, unsigned long clear_mask);
140 	void (*update_hfgwtr_el2)(unsigned long set_mask, unsigned long clear_mask);
141 	int (*register_host_perm_fault_handler)(int (*cb)(struct kvm_cpu_context *ctxt, u64 esr, u64 addr));
142 	int (*host_stage2_mod_prot)(u64 pfn, enum kvm_pgtable_prot prot);
143 	int (*host_stage2_get_leaf)(phys_addr_t phys, kvm_pte_t *ptep, u32 *level);
144 	int (*register_host_smc_handler)(bool (*cb)(struct kvm_cpu_context *));
145 	int (*register_default_trap_handler)(bool (*cb)(struct kvm_cpu_context *));
146 	int (*register_illegal_abt_notifier)(void (*cb)(struct kvm_cpu_context *));
147 	int (*register_psci_notifier)(void (*cb)(enum pkvm_psci_notification, struct kvm_cpu_context *));
148 	int (*register_hyp_panic_notifier)(void (*cb)(struct kvm_cpu_context *host_ctxt));
149 	int (*host_donate_hyp)(u64 pfn, u64 nr_pages);
150 	int (*hyp_donate_host)(u64 pfn, u64 nr_pages);
151 	int (*host_share_hyp)(u64 pfn);
152 	int (*host_unshare_hyp)(u64 pfn);
153 	int (*pin_shared_mem)(void *from, void *to);
154 	void (*unpin_shared_mem)(void *from, void *to);
155 	void* (*memcpy)(void *to, const void *from, size_t count);
156 	void* (*memset)(void *dst, int c, size_t count);
157 	phys_addr_t (*hyp_pa)(void *x);
158 	void* (*hyp_va)(phys_addr_t phys);
159 	unsigned long (*kern_hyp_va)(unsigned long x);
160 
161 	ANDROID_KABI_USE(1, int (*host_stage2_mod_prot_range)(
162 				 u64 pfn, enum kvm_pgtable_prot prot,
163 				 u64 nr_pages));
164 
165 	ANDROID_KABI_RESERVE(2);
166 	ANDROID_KABI_RESERVE(3);
167 	ANDROID_KABI_RESERVE(4);
168 	ANDROID_KABI_RESERVE(5);
169 	ANDROID_KABI_RESERVE(6);
170 	ANDROID_KABI_RESERVE(7);
171 	ANDROID_KABI_RESERVE(8);
172 	ANDROID_KABI_RESERVE(9);
173 	ANDROID_KABI_RESERVE(10);
174 	ANDROID_KABI_RESERVE(11);
175 	ANDROID_KABI_RESERVE(12);
176 	ANDROID_KABI_RESERVE(13);
177 	ANDROID_KABI_RESERVE(14);
178 	ANDROID_KABI_RESERVE(15);
179 	ANDROID_KABI_RESERVE(16);
180 	ANDROID_KABI_RESERVE(17);
181 	ANDROID_KABI_RESERVE(18);
182 	ANDROID_KABI_RESERVE(19);
183 	ANDROID_KABI_RESERVE(20);
184 	ANDROID_KABI_RESERVE(21);
185 	ANDROID_KABI_RESERVE(22);
186 	ANDROID_KABI_RESERVE(23);
187 	ANDROID_KABI_RESERVE(24);
188 	ANDROID_KABI_RESERVE(25);
189 	ANDROID_KABI_RESERVE(26);
190 	ANDROID_KABI_RESERVE(27);
191 	ANDROID_KABI_RESERVE(28);
192 	ANDROID_KABI_RESERVE(29);
193 	ANDROID_KABI_RESERVE(30);
194 	ANDROID_KABI_RESERVE(31);
195 	ANDROID_KABI_RESERVE(32);
196 };
197 
198 int __pkvm_load_el2_module(struct module *this, unsigned long *token);
199 
200 int __pkvm_register_el2_call(unsigned long hfn_hyp_va);
201 #else
__pkvm_load_el2_module(struct module * this,unsigned long * token)202 static inline int __pkvm_load_el2_module(struct module *this,
203 					 unsigned long *token)
204 {
205 	return -ENOSYS;
206 }
207 
__pkvm_register_el2_call(unsigned long hfn_hyp_va)208 static inline int __pkvm_register_el2_call(unsigned long hfn_hyp_va)
209 {
210 	return -ENOSYS;
211 }
212 #endif /* CONFIG_MODULES */
213 
214 int pkvm_load_early_modules(void);
215 
216 #ifdef MODULE
217 /*
218  * Convert an EL2 module addr from the kernel VA to the hyp VA
219  */
220 #define pkvm_el2_mod_va(kern_va, token)					\
221 ({									\
222 	unsigned long hyp_text_kern_va =				\
223 		(unsigned long)THIS_MODULE->arch.hyp.text.start;	\
224 	unsigned long offset;						\
225 									\
226 	offset = (unsigned long)kern_va - hyp_text_kern_va;		\
227 	token + offset;							\
228 })
229 
230 /*
231  * function_nocfi() does not work with function pointers, hence the macro in
232  * lieu of a function.
233  */
234 #define pkvm_load_el2_module(init_fn, token)				\
235 ({									\
236 	THIS_MODULE->arch.hyp.init = function_nocfi(init_fn);		\
237 	__pkvm_load_el2_module(THIS_MODULE, token);			\
238 })
239 
240 #define pkvm_register_el2_mod_call(hfn, token)				\
241 ({									\
242 	__pkvm_register_el2_call(pkvm_el2_mod_va(function_nocfi(hfn),	\
243 						 token));		\
244 })
245 
246 #define pkvm_el2_mod_call(id, ...)					\
247 	({								\
248 		struct arm_smccc_res res;				\
249 									\
250 		arm_smccc_1_1_hvc(KVM_HOST_SMCCC_ID(id),		\
251 				  ##__VA_ARGS__, &res);			\
252 		WARN_ON(res.a0 != SMCCC_RET_SUCCESS);			\
253 									\
254 		res.a1;							\
255 	})
256 #endif
257 #endif
258