1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2023 MediaTek Inc.
4 */
5
6 #include <linux/arm-smccc.h>
7 #include <linux/err.h>
8 #include <linux/uaccess.h>
9
10 #define CREATE_TRACE_POINTS
11 #include <trace/events/geniezone.h>
12 #include <linux/gzvm.h>
13 #include <linux/soc/mediatek/gzvm_drv.h>
14 #include "gzvm_arch_common.h"
15
16 #define PAR_PA47_MASK GENMASK_ULL(47, 12)
17
18 static struct timecycle clock_scale_factor;
19
gzvm_vtimer_get_clock_mult(void)20 u32 gzvm_vtimer_get_clock_mult(void)
21 {
22 return clock_scale_factor.mult;
23 }
24
gzvm_vtimer_get_clock_shift(void)25 u32 gzvm_vtimer_get_clock_shift(void)
26 {
27 return clock_scale_factor.shift;
28 }
29
30 /**
31 * gzvm_hypcall_wrapper() - the wrapper for hvc calls
32 * @a0: arguments passed in registers 0
33 * @a1: arguments passed in registers 1
34 * @a2: arguments passed in registers 2
35 * @a3: arguments passed in registers 3
36 * @a4: arguments passed in registers 4
37 * @a5: arguments passed in registers 5
38 * @a6: arguments passed in registers 6
39 * @a7: arguments passed in registers 7
40 * @res: result values from registers 0 to 3
41 *
42 * Return: The wrapper helps caller to convert geniezone errno to Linux errno.
43 */
gzvm_hypcall_wrapper(unsigned long a0,unsigned long a1,unsigned long a2,unsigned long a3,unsigned long a4,unsigned long a5,unsigned long a6,unsigned long a7,struct arm_smccc_res * res)44 int gzvm_hypcall_wrapper(unsigned long a0, unsigned long a1,
45 unsigned long a2, unsigned long a3,
46 unsigned long a4, unsigned long a5,
47 unsigned long a6, unsigned long a7,
48 struct arm_smccc_res *res)
49 {
50 struct arm_smccc_1_2_regs res_1_2;
51 struct arm_smccc_1_2_regs args = {
52 .a0 = a0,
53 .a1 = a1,
54 .a2 = a2,
55 .a3 = a3,
56 .a4 = a4,
57 .a5 = a5,
58 .a6 = a6,
59 .a7 = a7,
60 };
61 trace_mtk_hypcall_enter(a0);
62 arm_smccc_1_2_hvc(&args, &res_1_2);
63 res->a0 = res_1_2.a0;
64 res->a1 = res_1_2.a1;
65 res->a2 = res_1_2.a2;
66 res->a3 = res_1_2.a3;
67 trace_mtk_hypcall_leave(a0, (res->a0 != ERR_NOT_SUPPORTED) ? 0 : 1);
68
69 return gzvm_err_to_errno(res->a0);
70 }
71
gzvm_arch_inform_exit(u16 vm_id)72 int gzvm_arch_inform_exit(u16 vm_id)
73 {
74 struct arm_smccc_res res;
75 int ret;
76
77 ret = gzvm_hypcall_wrapper(MT_HVC_GZVM_INFORM_EXIT, vm_id, 0, 0, 0, 0, 0, 0, &res);
78 if (ret)
79 return -ENXIO;
80
81 return 0;
82 }
83
gzvm_arch_probe(void)84 int gzvm_arch_probe(void)
85 {
86 struct arm_smccc_res res;
87 int ret;
88
89 ret = gzvm_hypcall_wrapper(MT_HVC_GZVM_PROBE, 0, 0, 0, 0, 0, 0, 0, &res);
90 if (ret)
91 return -ENXIO;
92
93 return 0;
94 }
95
gzvm_arch_drv_init(void)96 int gzvm_arch_drv_init(void)
97 {
98 /* timecycle init mult shift */
99 clocks_calc_mult_shift(&clock_scale_factor.mult,
100 &clock_scale_factor.shift,
101 arch_timer_get_cntfrq(),
102 NSEC_PER_SEC,
103 30);
104
105 return 0;
106 }
107
gzvm_arch_set_memregion(u16 vm_id,size_t buf_size,phys_addr_t region)108 int gzvm_arch_set_memregion(u16 vm_id, size_t buf_size,
109 phys_addr_t region)
110 {
111 struct arm_smccc_res res;
112
113 return gzvm_hypcall_wrapper(MT_HVC_GZVM_SET_MEMREGION, vm_id,
114 buf_size, region, 0, 0, 0, 0, &res);
115 }
116
gzvm_cap_vm_gpa_size(void __user * argp)117 static int gzvm_cap_vm_gpa_size(void __user *argp)
118 {
119 __u64 value = CONFIG_ARM64_PA_BITS;
120
121 if (copy_to_user(argp, &value, sizeof(__u64)))
122 return -EFAULT;
123
124 return 0;
125 }
126
gzvm_arch_check_extension(struct gzvm * gzvm,__u64 cap,void __user * argp)127 int gzvm_arch_check_extension(struct gzvm *gzvm, __u64 cap, void __user *argp)
128 {
129 int ret;
130
131 switch (cap) {
132 case GZVM_CAP_PROTECTED_VM: {
133 __u64 success = 1;
134
135 if (copy_to_user(argp, &success, sizeof(__u64)))
136 return -EFAULT;
137
138 return 0;
139 }
140 case GZVM_CAP_VM_GPA_SIZE: {
141 ret = gzvm_cap_vm_gpa_size(argp);
142 return ret;
143 }
144 default:
145 break;
146 }
147
148 return -EOPNOTSUPP;
149 }
150
151 /**
152 * gzvm_arch_create_vm() - create vm
153 * @vm_type: VM type. Only supports Linux VM now.
154 *
155 * Return:
156 * * positive value - VM ID
157 * * -ENOMEM - Memory not enough for storing VM data
158 */
gzvm_arch_create_vm(unsigned long vm_type)159 int gzvm_arch_create_vm(unsigned long vm_type)
160 {
161 struct arm_smccc_res res;
162 int ret;
163
164 ret = gzvm_hypcall_wrapper(MT_HVC_GZVM_CREATE_VM, vm_type, 0, 0, 0, 0,
165 0, 0, &res);
166 return ret ? ret : res.a1;
167 }
168
gzvm_arch_destroy_vm(u16 vm_id)169 int gzvm_arch_destroy_vm(u16 vm_id)
170 {
171 struct arm_smccc_res res;
172
173 return gzvm_hypcall_wrapper(MT_HVC_GZVM_DESTROY_VM, vm_id, 0, 0, 0, 0,
174 0, 0, &res);
175 }
176
gzvm_arch_memregion_purpose(struct gzvm * gzvm,struct gzvm_userspace_memory_region * mem)177 int gzvm_arch_memregion_purpose(struct gzvm *gzvm,
178 struct gzvm_userspace_memory_region *mem)
179 {
180 struct arm_smccc_res res;
181
182 return gzvm_hypcall_wrapper(MT_HVC_GZVM_MEMREGION_PURPOSE, gzvm->vm_id,
183 mem->guest_phys_addr, mem->memory_size,
184 mem->flags, 0, 0, 0, &res);
185 }
186
gzvm_arch_set_dtb_config(struct gzvm * gzvm,struct gzvm_dtb_config * cfg)187 int gzvm_arch_set_dtb_config(struct gzvm *gzvm, struct gzvm_dtb_config *cfg)
188 {
189 struct arm_smccc_res res;
190
191 return gzvm_hypcall_wrapper(MT_HVC_GZVM_SET_DTB_CONFIG, gzvm->vm_id,
192 cfg->dtb_addr, cfg->dtb_size, 0, 0, 0, 0,
193 &res);
194 }
195
gzvm_vm_arch_enable_cap(struct gzvm * gzvm,struct gzvm_enable_cap * cap,struct arm_smccc_res * res)196 static int gzvm_vm_arch_enable_cap(struct gzvm *gzvm,
197 struct gzvm_enable_cap *cap,
198 struct arm_smccc_res *res)
199 {
200 return gzvm_hypcall_wrapper(MT_HVC_GZVM_ENABLE_CAP, gzvm->vm_id,
201 cap->cap, cap->args[0], cap->args[1],
202 cap->args[2], cap->args[3], cap->args[4],
203 res);
204 }
205
206 /**
207 * gzvm_vm_ioctl_get_pvmfw_size() - Get pvmfw size from hypervisor, return
208 * in x1, and return to userspace in args
209 * @gzvm: Pointer to struct gzvm.
210 * @cap: Pointer to struct gzvm_enable_cap.
211 * @argp: Pointer to struct gzvm_enable_cap in user space.
212 *
213 * Return:
214 * * 0 - Succeed
215 * * -EINVAL - Hypervisor return invalid results
216 * * -EFAULT - Fail to copy back to userspace buffer
217 */
gzvm_vm_ioctl_get_pvmfw_size(struct gzvm * gzvm,struct gzvm_enable_cap * cap,void __user * argp)218 static int gzvm_vm_ioctl_get_pvmfw_size(struct gzvm *gzvm,
219 struct gzvm_enable_cap *cap,
220 void __user *argp)
221 {
222 struct arm_smccc_res res = {0};
223
224 if (gzvm_vm_arch_enable_cap(gzvm, cap, &res) != 0)
225 return -EINVAL;
226
227 cap->args[1] = res.a1;
228 if (copy_to_user(argp, cap, sizeof(*cap)))
229 return -EFAULT;
230
231 return 0;
232 }
233
234 /**
235 * fill_constituents() - Populate pa to buffer until full
236 * @consti: Pointer to struct mem_region_addr_range.
237 * @consti_cnt: Constituent count.
238 * @max_nr_consti: Maximum number of constituent count.
239 * @gfn: Guest frame number.
240 * @total_pages: Total page numbers.
241 * @slot: Pointer to struct gzvm_memslot.
242 * @gzvm: Pointer to struct gzvm.
243 *
244 * Return: how many pages we've fill in, negative if error
245 */
fill_constituents(struct mem_region_addr_range * consti,int * consti_cnt,int max_nr_consti,u64 gfn,u32 total_pages,struct gzvm_memslot * slot,struct gzvm * gzvm)246 static int fill_constituents(struct mem_region_addr_range *consti,
247 int *consti_cnt, int max_nr_consti, u64 gfn,
248 u32 total_pages, struct gzvm_memslot *slot,
249 struct gzvm *gzvm)
250 {
251 u64 pfn = 0, prev_pfn = 0, gfn_end = 0;
252 int nr_pages = 0;
253 int i = -1;
254
255 if (unlikely(total_pages == 0))
256 return -EINVAL;
257 gfn_end = gfn + total_pages;
258
259 while (i < max_nr_consti && gfn < gfn_end) {
260 if (gzvm_vm_allocate_guest_page(gzvm, slot, gfn, &pfn) != 0)
261 return -EFAULT;
262 if (pfn == (prev_pfn + 1)) {
263 consti[i].pg_cnt++;
264 } else {
265 i++;
266 if (i >= max_nr_consti)
267 break;
268 consti[i].address = PFN_PHYS(pfn);
269 consti[i].pg_cnt = 1;
270 }
271 prev_pfn = pfn;
272 gfn++;
273 nr_pages++;
274 }
275 if (i != max_nr_consti)
276 i++;
277 *consti_cnt = i;
278
279 return nr_pages;
280 }
281
282 /**
283 * gzvm_vm_populate_mem_region() - Iterate all mem slot and populate pa to
284 * buffer until it's full
285 * @gzvm: Pointer to struct gzvm.
286 * @slot_id: Memory slot id to be populated.
287 *
288 * Return: 0 if it is successful, negative if error
289 */
gzvm_vm_populate_mem_region(struct gzvm * gzvm,int slot_id)290 int gzvm_vm_populate_mem_region(struct gzvm *gzvm, int slot_id)
291 {
292 struct gzvm_memslot *memslot = &gzvm->memslot[slot_id];
293 struct gzvm_memory_region_ranges *region;
294 int max_nr_consti, remain_pages;
295 u64 gfn, gfn_end;
296 u32 buf_size;
297
298 buf_size = PAGE_SIZE * 2;
299 region = alloc_pages_exact(buf_size, GFP_KERNEL);
300 if (!region)
301 return -ENOMEM;
302
303 max_nr_consti = (buf_size - sizeof(*region)) /
304 sizeof(struct mem_region_addr_range);
305
306 region->slot = memslot->slot_id;
307 remain_pages = memslot->npages;
308 gfn = memslot->base_gfn;
309 gfn_end = gfn + remain_pages;
310
311 while (gfn < gfn_end) {
312 int nr_pages;
313
314 nr_pages = fill_constituents(region->constituents,
315 ®ion->constituent_cnt,
316 max_nr_consti, gfn,
317 remain_pages, memslot, gzvm);
318
319 if (nr_pages < 0) {
320 pr_err("Failed to fill constituents\n");
321 free_pages_exact(region, buf_size);
322 return -EFAULT;
323 }
324
325 region->gpa = PFN_PHYS(gfn);
326 region->total_pages = nr_pages;
327 remain_pages -= nr_pages;
328 gfn += nr_pages;
329
330 if (gzvm_arch_set_memregion(gzvm->vm_id, buf_size,
331 virt_to_phys(region))) {
332 pr_err("Failed to register memregion to hypervisor\n");
333 free_pages_exact(region, buf_size);
334 return -EFAULT;
335 }
336 }
337 free_pages_exact(region, buf_size);
338
339 return 0;
340 }
341
populate_all_mem_regions(struct gzvm * gzvm)342 static int populate_all_mem_regions(struct gzvm *gzvm)
343 {
344 int ret, i;
345
346 for (i = 0; i < GZVM_MAX_MEM_REGION; i++) {
347 if (gzvm->memslot[i].npages == 0)
348 continue;
349
350 ret = gzvm_vm_populate_mem_region(gzvm, i);
351 if (ret != 0)
352 return ret;
353 }
354
355 return 0;
356 }
357
358 /**
359 * gzvm_vm_ioctl_cap_pvm() - Proceed GZVM_CAP_PROTECTED_VM's subcommands
360 * @gzvm: Pointer to struct gzvm.
361 * @cap: Pointer to struct gzvm_enable_cap.
362 * @argp: Pointer to struct gzvm_enable_cap in user space.
363 *
364 * Return:
365 * * 0 - Succeed
366 * * -EINVAL - Invalid subcommand or arguments
367 */
gzvm_vm_ioctl_cap_pvm(struct gzvm * gzvm,struct gzvm_enable_cap * cap,void __user * argp)368 static int gzvm_vm_ioctl_cap_pvm(struct gzvm *gzvm,
369 struct gzvm_enable_cap *cap,
370 void __user *argp)
371 {
372 struct arm_smccc_res res = {0};
373 int ret;
374
375 switch (cap->args[0]) {
376 case GZVM_CAP_PVM_SET_PVMFW_GPA:
377 fallthrough;
378 case GZVM_CAP_PVM_SET_PROTECTED_VM:
379 /*
380 * If the hypervisor doesn't support block-based demand paging, we
381 * populate memory in advance to improve performance for protected VM.
382 */
383 if (gzvm->demand_page_gran == PAGE_SIZE)
384 populate_all_mem_regions(gzvm);
385 ret = gzvm_vm_arch_enable_cap(gzvm, cap, &res);
386 return ret;
387 case GZVM_CAP_PVM_GET_PVMFW_SIZE:
388 ret = gzvm_vm_ioctl_get_pvmfw_size(gzvm, cap, argp);
389 return ret;
390 default:
391 break;
392 }
393
394 return -EINVAL;
395 }
396
gzvm_vm_ioctl_arch_enable_cap(struct gzvm * gzvm,struct gzvm_enable_cap * cap,void __user * argp)397 int gzvm_vm_ioctl_arch_enable_cap(struct gzvm *gzvm,
398 struct gzvm_enable_cap *cap,
399 void __user *argp)
400 {
401 struct arm_smccc_res res = {0};
402 int ret;
403
404 switch (cap->cap) {
405 case GZVM_CAP_PROTECTED_VM:
406 ret = gzvm_vm_ioctl_cap_pvm(gzvm, cap, argp);
407 return ret;
408
409 case GZVM_CAP_ENABLE_DEMAND_PAGING:
410 fallthrough;
411 case GZVM_CAP_BLOCK_BASED_DEMAND_PAGING:
412 ret = gzvm_vm_arch_enable_cap(gzvm, cap, &res);
413 return ret;
414 case GZVM_CAP_ENABLE_IDLE:
415 ret = gzvm_vm_arch_enable_cap(gzvm, cap, &res);
416 return ret;
417 default:
418 break;
419 }
420
421 return -EINVAL;
422 }
423
gzvm_arch_map_guest(u16 vm_id,int memslot_id,u64 pfn,u64 gfn,u64 nr_pages)424 int gzvm_arch_map_guest(u16 vm_id, int memslot_id, u64 pfn, u64 gfn,
425 u64 nr_pages)
426 {
427 struct arm_smccc_res res;
428
429 return gzvm_hypcall_wrapper(MT_HVC_GZVM_MAP_GUEST, vm_id, memslot_id,
430 pfn, gfn, nr_pages, 0, 0, &res);
431 }
432
gzvm_arch_map_guest_block(u16 vm_id,int memslot_id,u64 gfn,u64 nr_pages)433 int gzvm_arch_map_guest_block(u16 vm_id, int memslot_id, u64 gfn, u64 nr_pages)
434 {
435 struct arm_smccc_res res;
436
437 return gzvm_hypcall_wrapper(MT_HVC_GZVM_MAP_GUEST_BLOCK, vm_id,
438 memslot_id, gfn, nr_pages, 0, 0, 0, &res);
439 }
440
gzvm_arch_get_statistics(struct gzvm * gzvm)441 int gzvm_arch_get_statistics(struct gzvm *gzvm)
442 {
443 struct arm_smccc_res res;
444 int ret;
445
446 ret = gzvm_hypcall_wrapper(MT_HVC_GZVM_GET_STATISTICS, gzvm->vm_id,
447 0, 0, 0, 0, 0, 0, &res);
448
449 gzvm->stat.protected_hyp_mem = ((ret == 0) ? res.a1 : 0);
450 gzvm->stat.protected_shared_mem = ((ret == 0) ? res.a2 : 0);
451 return ret;
452 }
453