1 /* SPDX-License-Identifier: GPL-2.0 */
2
3 #ifndef __ARM64_KVM_PKVM_MODULE_H__
4 #define __ARM64_KVM_PKVM_MODULE_H__
5
6 #include <asm/kvm_pgtable.h>
7 #include <linux/android_kabi.h>
8 #include <linux/export.h>
9 #include <linux/android_kabi.h>
10
11 typedef void (*dyn_hcall_t)(struct user_pt_regs *);
12 struct kvm_hyp_iommu;
13 struct iommu_iotlb_gather;
14 struct kvm_hyp_iommu_domain;
15 struct kvm_iommu_paddr_cache;
16
17 #ifdef CONFIG_MODULES
18 enum pkvm_psci_notification {
19 PKVM_PSCI_CPU_SUSPEND,
20 PKVM_PSCI_SYSTEM_SUSPEND,
21 PKVM_PSCI_CPU_ENTRY,
22 };
23
24 /**
25 * struct pkvm_module_ops - pKVM modules callbacks
26 * @create_private_mapping: Map a memory region into the hypervisor private
27 * range. @haddr returns the virtual address where
28 * the mapping starts. It can't be unmapped. Host
29 * access permissions are unaffected.
30 * @alloc_module_va: Reserve a range of VA space in the hypervisor
31 * private range. This is handy for modules that
32 * need to map plugin code in a similar fashion to
33 * how pKVM maps module code. That space could also
34 * be used to map memory temporarily, when the
35 * fixmap granularity (PAGE_SIZE) is too small.
36 * @map_module_page: Used in conjunction with @alloc_module_va. When
37 * @is_protected is not set, the page is also
38 * unmapped from the host stage-2.
39 * @register_serial_driver: Register a driver for a serial interface. The
40 * framework only needs a single callback
41 * @hyp_putc_cb which is expected to print a single
42 * character.
43 * @puts: If a serial interface is registered, print a
44 * string, else does nothing.
45 * @putx64: If a serial interface is registered, print a
46 * 64-bit number, else does nothing.
47 * @fixmap_map: Map a page in the per-CPU hypervisor fixmap.
48 * This is intended to be used for temporary
49 * mappings in the hypervisor VA space.
50 * @fixmap_unmap must be called between each
51 * mapping to do cache maintenance and ensure the
52 * new mapping is visible.
53 * @fixmap_unmap: Unmap a page from the hypervisor fixmap. This
54 * call is required between each @fixmap_map().
55 * @linear_map_early: Map a large portion of memory into the
56 * hypervisor linear VA space. This is intended to
57 * be used only for module bootstrap and must be
58 * unmapped before the host is deprivilged.
59 * @linear_unmap_early: See @linear_map_early.
60 * @flush_dcache_to_poc: Clean the data cache to the point of coherency.
61 * This is not a requirement for any other of the
62 * pkvm_module_ops callbacks.
63 * @update_hcr_el2: Modify the running value of HCR_EL2. pKVM will
64 * save/restore the new value across power
65 * management transitions.
66 * @update_hfgwtr_el2: Modify the running value of HFGWTR_EL2. pKVM
67 * will save/restore the new value across power
68 * management transitions.
69 * @register_host_perm_fault_handler:
70 * @cb is called whenever the host generates an
71 * abort with the fault status code Permission
72 * Fault. This is useful when a module changes the
73 * host stage-2 permissions for certain pages.
74 * Up-to 16 handlers can be registered. Returning
75 * -EPERM lets pKVM handle the abort while on 0,
76 * the next handler will be called. The handler
77 * order depends on the registration order.
78 * @host_stage2_mod_prot: Apply @prot to the page @pfn. This requires a
79 * permission fault handler to be registered (see
80 * @register_host_perm_fault_handler), otherwise
81 * pKVM will be unable to handle this fault and the
82 * CPU will be stuck in an infinite loop. @nr_pages
83 * allows to apply this prot on a range of
84 * contiguous memory.
85 * @host_stage2_get_leaf: Query the host's stage2 page-table entry for
86 * the page @phys.
87 * @register_host_smc_handler: @cb is called whenever the host issues an SMC
88 * pKVM couldn't handle. If @cb returns false, the
89 * SMC will be forwarded to EL3.
90 * @register_default_trap_handler:
91 * @cb is called whenever EL2 traps EL1 and pKVM
92 * has not handled it. If @cb returns false, the
93 * hypervisor will panic. This trap handler must be
94 * registered whenever changes are made to HCR
95 * (@update_hcr_el2) or HFGWTR
96 * (@update_hfgwtr_el2).
97 * @register_illegal_abt_notifier:
98 * To notify the module of a pending illegal abort
99 * from the host. On @cb return, the abort will be
100 * injected back into the host.
101 * @register_psci_notifier: To notify the module of a pending PSCI event.
102 * @register_hyp_panic_notifier:
103 * To notify the module of a pending hypervisor
104 * panic. On return from @cb, the panic will occur.
105 * @register_unmask_serror: When @unmask returns true, the hypervisor will
106 * unmask SErrors at EL2. Although the hypervisor
107 * cannot recover from an SError (and will panic if
108 * one occurs), they can be useful for debugging in
109 * some situations. @mask is the @unmask twin and
110 * is called before remasking SErrors.
111 * @host_donate_hyp: The page @pfn is unmapped from the host and
112 * full control is given to the hypervisor.
113 * @host_donate_hyp_prot: As host_donate_hyp_prot, but this variant sets
114 * the prot of the hyp.
115 * @hyp_donate_host: The page @pfn whom control has previously been
116 * given to the hypervisor (@host_donate_hyp) is
117 * given back to the host.
118 * @host_share_hyp: The page @pfn will be shared between the host
119 * and the hypervisor. Must be followed by
120 * @pin_shared_mem.
121 * @host_unshare_hyp: The page @pfn will be unshared and unmapped from
122 * the hypervisor. Must be called after
123 * @unpin_shared_mem.
124 * @pin_shared_mem: After @host_share_hyp, the newly shared page is
125 * still owned by the host. @pin_shared_mem will
126 * prevent the host from reclaiming that page until
127 * the hypervisor releases it (@unpin_shared_mem)
128 * @unpin_shared_mem: Enable the host to reclaim the shared memory
129 * (@host_unshare_hyp).
130 * @memcpy: Same as kernel memcpy, but use hypervisor VAs.
131 * @memset: Same as kernel memset, but use a hypervisor VA.
132 * @hyp_pa: Return the physical address for a hypervisor
133 * virtual address in the linear range.
134 * @hyp_va: Convert a physical address into a virtual one.
135 * @kern_hyp_va: Convert a kernel virtual address into an
136 * hypervisor virtual one.
137 * @hyp_alloc: Allocate memory in hyp VA space.
138 * @hyp_alloc_errno: Error in case hyp_alloc() returns NULL.
139 * @hyp_free: Free memory allocated from hyp_alloc().
140 * @iommu_donate_pages: Allocate memory from IOMMU pool.
141 * @iommu_reclaim_pages: Reclaim memory from iommu_donate_pages()
142 * @iommu_request: Fill a request that is returned from the entry HVC (see hyp-main.c).
143 * @iommu_init_device: Initialize common IOMMU fields.
144 * @udelay: Delay in us.
145 * @hyp_alloc_missing_donations:
146 Missing donations if allocator returns NULL
147 * @__list_add_valid_or_report: Needed if the code uses linked lists.
148 * @__list_del_entry_valid_or_report:
149 Needed if the code uses linked lists.
150 * @iommu_iotlb_gather_add_page: Add a page to the iotlb_gather druing unmap for the IOMMU.
151 * @iommu_donate_pages_atomic: Allocate memory from IOMMU identity pool.
152 * @iommu_reclaim_pages_atomic: Reclaim memory from iommu_donate_pages_atomic()
153 * @hyp_smp_processor_id: Current CPU id
154 */
155 struct pkvm_module_ops {
156 int (*create_private_mapping)(phys_addr_t phys, size_t size,
157 enum kvm_pgtable_prot prot,
158 unsigned long *haddr);
159 void *(*alloc_module_va)(u64 nr_pages);
160 int (*map_module_page)(u64 pfn, void *va, enum kvm_pgtable_prot prot, bool is_protected);
161 int (*register_serial_driver)(void (*hyp_putc_cb)(char));
162 void (*putc)(char c);
163 void (*puts)(const char *s);
164 void (*putx64)(u64 x);
165 void *(*fixmap_map)(phys_addr_t phys);
166 void (*fixmap_unmap)(void);
167 void *(*linear_map_early)(phys_addr_t phys, size_t size, enum kvm_pgtable_prot prot);
168 void (*linear_unmap_early)(void *addr, size_t size);
169 void (*flush_dcache_to_poc)(void *addr, size_t size);
170 void (*update_hcr_el2)(unsigned long set_mask, unsigned long clear_mask);
171 void (*update_hfgwtr_el2)(unsigned long set_mask, unsigned long clear_mask);
172 int (*register_host_perm_fault_handler)(int (*cb)(struct user_pt_regs *regs, u64 esr, u64 addr));
173 int (*host_stage2_mod_prot)(u64 pfn, enum kvm_pgtable_prot prot, u64 nr_pages, bool update_iommu);
174 int (*host_stage2_get_leaf)(phys_addr_t phys, kvm_pte_t *ptep, u32 *level);
175 int (*register_host_smc_handler)(bool (*cb)(struct user_pt_regs *));
176 int (*register_default_trap_handler)(bool (*cb)(struct user_pt_regs *));
177 int (*register_illegal_abt_notifier)(void (*cb)(struct user_pt_regs *));
178 int (*register_psci_notifier)(void (*cb)(enum pkvm_psci_notification, struct user_pt_regs *));
179 int (*register_hyp_panic_notifier)(void (*cb)(struct user_pt_regs *));
180 int (*register_unmask_serror)(bool (*unmask)(void), void (*mask)(void));
181 int (*host_donate_hyp)(u64 pfn, u64 nr_pages, bool accept_mmio);
182 int (*host_donate_hyp_prot)(u64 pfn, u64 nr_pages, bool accept_mmio, enum kvm_pgtable_prot prot);
183 int (*hyp_donate_host)(u64 pfn, u64 nr_pages);
184 int (*host_share_hyp)(u64 pfn);
185 int (*host_unshare_hyp)(u64 pfn);
186 int (*pin_shared_mem)(void *from, void *to);
187 void (*unpin_shared_mem)(void *from, void *to);
188 void* (*memcpy)(void *to, const void *from, size_t count);
189 void* (*memset)(void *dst, int c, size_t count);
190 phys_addr_t (*hyp_pa)(void *x);
191 void* (*hyp_va)(phys_addr_t phys);
192 unsigned long (*kern_hyp_va)(unsigned long x);
193 void * (*hyp_alloc)(size_t size);
194 int (*hyp_alloc_errno)(void);
195 void (*hyp_free)(void *addr);
196 void * (*iommu_donate_pages)(u8 order, bool request);
197 void (*iommu_reclaim_pages)(void *p, u8 order);
198 int (*iommu_request)(struct kvm_hyp_req *req);
199 int (*iommu_init_device)(struct kvm_hyp_iommu *iommu);
200 void (*udelay)(unsigned long usecs);
201 u8 (*hyp_alloc_missing_donations)(void);
202 #ifdef CONFIG_LIST_HARDENED
203 /* These 2 functions change calling convention based on CONFIG_DEBUG_LIST. */
204 typeof(__list_add_valid_or_report) *list_add_valid_or_report;
205 typeof(__list_del_entry_valid_or_report) *list_del_entry_valid_or_report;
206 #endif
207 void (*iommu_iotlb_gather_add_page)(struct kvm_hyp_iommu_domain *domain,
208 struct iommu_iotlb_gather *gather,
209 unsigned long iova, size_t size);
210 int (*register_hyp_event_ids)(unsigned long start, unsigned long end);
211 void* (*tracing_reserve_entry)(unsigned long length);
212 void (*tracing_commit_entry)(void);
213 void * (*iommu_donate_pages_atomic)(u8 order);
214 void (*iommu_reclaim_pages_atomic)(void *p, u8 order);
215 int (*iommu_snapshot_host_stage2)(struct kvm_hyp_iommu_domain *domain);
216 int (*hyp_smp_processor_id)(void);
217 ANDROID_KABI_USE(1, void (*iommu_flush_unmap_cache)(struct kvm_iommu_paddr_cache *cache));
218 ANDROID_KABI_RESERVE(2);
219 ANDROID_KABI_RESERVE(3);
220 ANDROID_KABI_RESERVE(4);
221 ANDROID_KABI_RESERVE(5);
222 ANDROID_KABI_RESERVE(6);
223 ANDROID_KABI_RESERVE(7);
224 ANDROID_KABI_RESERVE(8);
225 ANDROID_KABI_RESERVE(9);
226 ANDROID_KABI_RESERVE(10);
227 ANDROID_KABI_RESERVE(11);
228 ANDROID_KABI_RESERVE(12);
229 ANDROID_KABI_RESERVE(13);
230 ANDROID_KABI_RESERVE(14);
231 ANDROID_KABI_RESERVE(15);
232 ANDROID_KABI_RESERVE(16);
233 ANDROID_KABI_RESERVE(17);
234 ANDROID_KABI_RESERVE(18);
235 ANDROID_KABI_RESERVE(19);
236 ANDROID_KABI_RESERVE(20);
237 ANDROID_KABI_RESERVE(21);
238 ANDROID_KABI_RESERVE(22);
239 ANDROID_KABI_RESERVE(23);
240 ANDROID_KABI_RESERVE(24);
241 ANDROID_KABI_RESERVE(25);
242 ANDROID_KABI_RESERVE(26);
243 ANDROID_KABI_RESERVE(27);
244 ANDROID_KABI_RESERVE(28);
245 ANDROID_KABI_RESERVE(29);
246 ANDROID_KABI_RESERVE(30);
247 ANDROID_KABI_RESERVE(31);
248 ANDROID_KABI_RESERVE(32);
249 };
250
251 int __pkvm_load_el2_module(struct module *this, unsigned long *token);
252
253 int __pkvm_register_el2_call(unsigned long hfn_hyp_va);
254
255 unsigned long pkvm_el2_mod_kern_va(unsigned long addr);
256 #else
__pkvm_load_el2_module(struct module * this,unsigned long * token)257 static inline int __pkvm_load_el2_module(struct module *this,
258 unsigned long *token)
259 {
260 return -ENOSYS;
261 }
262
__pkvm_register_el2_call(unsigned long hfn_hyp_va)263 static inline int __pkvm_register_el2_call(unsigned long hfn_hyp_va)
264 {
265 return -ENOSYS;
266 }
267
pkvm_el2_mod_kern_va(unsigned long addr)268 static inline unsigned long pkvm_el2_mod_kern_va(unsigned long addr)
269 {
270 return 0;
271 }
272 #endif /* CONFIG_MODULES */
273
274 int pkvm_load_early_modules(void);
275
276 #ifdef MODULE
277 /*
278 * Convert an EL2 module addr from the kernel VA to the hyp VA
279 */
280 #define pkvm_el2_mod_va(kern_va, token) \
281 ({ \
282 unsigned long hyp_mod_kern_va = \
283 (unsigned long)THIS_MODULE->arch.hyp.sections.start; \
284 unsigned long offset; \
285 \
286 offset = (unsigned long)kern_va - hyp_mod_kern_va; \
287 token + offset; \
288 })
289
290 #define pkvm_load_el2_module(init_fn, token) \
291 ({ \
292 THIS_MODULE->arch.hyp.init = init_fn; \
293 __pkvm_load_el2_module(THIS_MODULE, token); \
294 })
295
pkvm_register_el2_mod_call(dyn_hcall_t hfn,unsigned long token)296 static inline int pkvm_register_el2_mod_call(dyn_hcall_t hfn,
297 unsigned long token)
298 {
299 return __pkvm_register_el2_call(pkvm_el2_mod_va(hfn, token));
300 }
301
302 #define pkvm_el2_mod_call(id, ...) \
303 ({ \
304 struct arm_smccc_res res; \
305 \
306 arm_smccc_1_1_hvc(KVM_HOST_SMCCC_ID(id), \
307 ##__VA_ARGS__, &res); \
308 WARN_ON(res.a0 != SMCCC_RET_SUCCESS); \
309 \
310 res.a1; \
311 })
312 #endif
313 #endif
314