• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2023 MediaTek Inc.
4  */
5 
6 #include <linux/arm-smccc.h>
7 #include <linux/err.h>
8 #include <linux/uaccess.h>
9 
10 #define CREATE_TRACE_POINTS
11 #include <trace/events/geniezone.h>
12 #include <linux/gzvm.h>
13 #include <linux/soc/mediatek/gzvm_drv.h>
14 #include "gzvm_arch_common.h"
15 
16 #define PAR_PA47_MASK GENMASK_ULL(47, 12)
17 
18 static struct timecycle clock_scale_factor;
19 
gzvm_vtimer_get_clock_mult(void)20 u32 gzvm_vtimer_get_clock_mult(void)
21 {
22 	return clock_scale_factor.mult;
23 }
24 
gzvm_vtimer_get_clock_shift(void)25 u32 gzvm_vtimer_get_clock_shift(void)
26 {
27 	return clock_scale_factor.shift;
28 }
29 
30 /**
31  * gzvm_hypcall_wrapper() - the wrapper for hvc calls
32  * @a0: arguments passed in registers 0
33  * @a1: arguments passed in registers 1
34  * @a2: arguments passed in registers 2
35  * @a3: arguments passed in registers 3
36  * @a4: arguments passed in registers 4
37  * @a5: arguments passed in registers 5
38  * @a6: arguments passed in registers 6
39  * @a7: arguments passed in registers 7
40  * @res: result values from registers 0 to 3
41  *
42  * Return: The wrapper helps caller to convert geniezone errno to Linux errno.
43  */
gzvm_hypcall_wrapper(unsigned long a0,unsigned long a1,unsigned long a2,unsigned long a3,unsigned long a4,unsigned long a5,unsigned long a6,unsigned long a7,struct arm_smccc_res * res)44 int gzvm_hypcall_wrapper(unsigned long a0, unsigned long a1,
45 			 unsigned long a2, unsigned long a3,
46 			 unsigned long a4, unsigned long a5,
47 			 unsigned long a6, unsigned long a7,
48 			 struct arm_smccc_res *res)
49 {
50 	struct arm_smccc_1_2_regs res_1_2;
51 	struct arm_smccc_1_2_regs args = {
52 		.a0 = a0,
53 		.a1 = a1,
54 		.a2 = a2,
55 		.a3 = a3,
56 		.a4 = a4,
57 		.a5 = a5,
58 		.a6 = a6,
59 		.a7 = a7,
60 	};
61 	trace_mtk_hypcall_enter(a0);
62 	arm_smccc_1_2_hvc(&args, &res_1_2);
63 	res->a0 = res_1_2.a0;
64 	res->a1 = res_1_2.a1;
65 	res->a2 = res_1_2.a2;
66 	res->a3 = res_1_2.a3;
67 	trace_mtk_hypcall_leave(a0, (res->a0 != ERR_NOT_SUPPORTED) ? 0 : 1);
68 
69 	return gzvm_err_to_errno(res->a0);
70 }
71 
gzvm_arch_inform_exit(u16 vm_id)72 int gzvm_arch_inform_exit(u16 vm_id)
73 {
74 	struct arm_smccc_res res;
75 	int ret;
76 
77 	ret = gzvm_hypcall_wrapper(MT_HVC_GZVM_INFORM_EXIT, vm_id, 0, 0, 0, 0, 0, 0, &res);
78 	if (ret)
79 		return -ENXIO;
80 
81 	return 0;
82 }
83 
gzvm_arch_probe(struct gzvm_version drv_version,struct gzvm_version * hyp_version)84 int gzvm_arch_probe(struct gzvm_version drv_version,
85 		    struct gzvm_version *hyp_version)
86 {
87 	struct arm_smccc_res res;
88 	int ret;
89 
90 	ret = gzvm_hypcall_wrapper(MT_HVC_GZVM_PROBE,
91 				   drv_version.major,
92 				   drv_version.minor,
93 				   drv_version.sub,
94 				   0, 0, 0, 0, &res);
95 	if (ret)
96 		return -ENXIO;
97 
98 	hyp_version->major = (u32)res.a1;
99 	hyp_version->minor = (u32)res.a2;
100 	hyp_version->sub = res.a3;
101 
102 	return 0;
103 }
104 
gzvm_arch_drv_init(void)105 int gzvm_arch_drv_init(void)
106 {
107 	/* timecycle init mult shift */
108 	clocks_calc_mult_shift(&clock_scale_factor.mult,
109 			       &clock_scale_factor.shift,
110 			       arch_timer_get_cntfrq(),
111 			       NSEC_PER_SEC,
112 			       30);
113 
114 	return 0;
115 }
116 
gzvm_arch_set_memregion(u16 vm_id,size_t buf_size,phys_addr_t region)117 int gzvm_arch_set_memregion(u16 vm_id, size_t buf_size,
118 			    phys_addr_t region)
119 {
120 	struct arm_smccc_res res;
121 
122 	return gzvm_hypcall_wrapper(MT_HVC_GZVM_SET_MEMREGION, vm_id,
123 				    buf_size, region, 0, 0, 0, 0, &res);
124 }
125 
gzvm_cap_vm_gpa_size(void __user * argp)126 static int gzvm_cap_vm_gpa_size(void __user *argp)
127 {
128 	__u64 value = CONFIG_ARM64_PA_BITS;
129 
130 	if (copy_to_user(argp, &value, sizeof(__u64)))
131 		return -EFAULT;
132 
133 	return 0;
134 }
135 
gzvm_arch_check_extension(struct gzvm * gzvm,__u64 cap,void __user * argp)136 int gzvm_arch_check_extension(struct gzvm *gzvm, __u64 cap, void __user *argp)
137 {
138 	int ret;
139 
140 	switch (cap) {
141 	case GZVM_CAP_PROTECTED_VM: {
142 		__u64 success = 1;
143 
144 		if (copy_to_user(argp, &success, sizeof(__u64)))
145 			return -EFAULT;
146 
147 		return 0;
148 	}
149 	case GZVM_CAP_VM_GPA_SIZE: {
150 		ret = gzvm_cap_vm_gpa_size(argp);
151 		return ret;
152 	}
153 	default:
154 		break;
155 	}
156 
157 	return -EOPNOTSUPP;
158 }
159 
160 /**
161  * gzvm_arch_create_vm() - create vm
162  * @vm_type: VM type. Only supports Linux VM now.
163  *
164  * Return:
165  * * positive value	- VM ID
166  * * -ENOMEM		- Memory not enough for storing VM data
167  */
gzvm_arch_create_vm(unsigned long vm_type)168 int gzvm_arch_create_vm(unsigned long vm_type)
169 {
170 	struct arm_smccc_res res;
171 	int ret;
172 
173 	ret = gzvm_hypcall_wrapper(MT_HVC_GZVM_CREATE_VM, vm_type, 0, 0, 0, 0,
174 				   0, 0, &res);
175 	return ret ? ret : res.a1;
176 }
177 
gzvm_arch_destroy_vm(u16 vm_id,u64 destroy_page_gran)178 int gzvm_arch_destroy_vm(u16 vm_id, u64 destroy_page_gran)
179 {
180 	struct arm_smccc_res res;
181 	int ret;
182 
183 	do {
184 		ret = gzvm_hypcall_wrapper(MT_HVC_GZVM_DESTROY_VM, vm_id,
185 					   destroy_page_gran, 0, 0,
186 					   0, 0, 0, &res);
187 	} while (ret == -EAGAIN);
188 
189 	return ret;
190 }
191 
gzvm_arch_memregion_purpose(struct gzvm * gzvm,struct gzvm_userspace_memory_region * mem)192 int gzvm_arch_memregion_purpose(struct gzvm *gzvm,
193 				struct gzvm_userspace_memory_region *mem)
194 {
195 	struct arm_smccc_res res;
196 
197 	return gzvm_hypcall_wrapper(MT_HVC_GZVM_MEMREGION_PURPOSE, gzvm->vm_id,
198 				    mem->guest_phys_addr, mem->memory_size,
199 				    mem->flags, 0, 0, 0, &res);
200 }
201 
gzvm_arch_set_dtb_config(struct gzvm * gzvm,struct gzvm_dtb_config * cfg)202 int gzvm_arch_set_dtb_config(struct gzvm *gzvm, struct gzvm_dtb_config *cfg)
203 {
204 	struct arm_smccc_res res;
205 
206 	return gzvm_hypcall_wrapper(MT_HVC_GZVM_SET_DTB_CONFIG, gzvm->vm_id,
207 				    cfg->dtb_addr, cfg->dtb_size, 0, 0, 0, 0,
208 				    &res);
209 }
210 
gzvm_vm_arch_enable_cap(struct gzvm * gzvm,struct gzvm_enable_cap * cap,struct arm_smccc_res * res)211 static int gzvm_vm_arch_enable_cap(struct gzvm *gzvm,
212 				   struct gzvm_enable_cap *cap,
213 				   struct arm_smccc_res *res)
214 {
215 	return gzvm_hypcall_wrapper(MT_HVC_GZVM_ENABLE_CAP, gzvm->vm_id,
216 				    cap->cap, cap->args[0], cap->args[1],
217 				    cap->args[2], cap->args[3], cap->args[4],
218 				    res);
219 }
220 
gzvm_arch_enable_cap(struct gzvm_enable_cap * cap,struct arm_smccc_res * res)221 static int gzvm_arch_enable_cap(struct gzvm_enable_cap *cap,
222 				struct arm_smccc_res *res)
223 {
224 	return gzvm_hypcall_wrapper(MT_HVC_GZVM_ENABLE_CAP, 0,
225 				    cap->cap, cap->args[0], cap->args[1],
226 				    cap->args[2], cap->args[3], cap->args[4],
227 				    res);
228 }
229 
gzvm_arch_query_hyp_batch_pages(struct gzvm_enable_cap * cap,void __user * argp)230 int gzvm_arch_query_hyp_batch_pages(struct gzvm_enable_cap *cap,
231 				    void __user *argp)
232 {
233 	struct arm_smccc_res res = {0};
234 	int ret;
235 
236 	ret = gzvm_arch_enable_cap(cap, &res);
237 
238 	if (ret)
239 		return ret;
240 
241 	if (res.a1 == 0 ||
242 	    GZVM_BLOCK_BASED_DEMAND_PAGE_SIZE % (PAGE_SIZE * res.a1) != 0)
243 		return -EFAULT;
244 
245 	cap->args[0] = res.a1;
246 
247 	return ret;
248 }
249 
gzvm_arch_query_destroy_batch_pages(struct gzvm_enable_cap * cap,void __user * argp)250 int gzvm_arch_query_destroy_batch_pages(struct gzvm_enable_cap *cap,
251 					void __user *argp)
252 {
253 	struct arm_smccc_res res = {0};
254 	int ret;
255 
256 	ret = gzvm_arch_enable_cap(cap, &res);
257 	/* destroy page batch size should be power of 2 */
258 	if (ret || ((res.a1 & (res.a1 - 1)) != 0))
259 		return -EINVAL;
260 
261 	cap->args[0] = res.a1;
262 	return ret;
263 }
264 
265 /**
266  * gzvm_vm_ioctl_get_pvmfw_size() - Get pvmfw size from hypervisor, return
267  *				    in x1, and return to userspace in args
268  * @gzvm: Pointer to struct gzvm.
269  * @cap: Pointer to struct gzvm_enable_cap.
270  * @argp: Pointer to struct gzvm_enable_cap in user space.
271  *
272  * Return:
273  * * 0			- Succeed
274  * * -EINVAL		- Hypervisor return invalid results
275  * * -EFAULT		- Fail to copy back to userspace buffer
276  */
gzvm_vm_ioctl_get_pvmfw_size(struct gzvm * gzvm,struct gzvm_enable_cap * cap,void __user * argp)277 static int gzvm_vm_ioctl_get_pvmfw_size(struct gzvm *gzvm,
278 					struct gzvm_enable_cap *cap,
279 					void __user *argp)
280 {
281 	struct arm_smccc_res res = {0};
282 
283 	if (gzvm_vm_arch_enable_cap(gzvm, cap, &res) != 0)
284 		return -EINVAL;
285 
286 	cap->args[1] = res.a1;
287 	if (copy_to_user(argp, cap, sizeof(*cap)))
288 		return -EFAULT;
289 
290 	return 0;
291 }
292 
293 /**
294  * fill_constituents() - Populate pa to buffer until full
295  * @consti: Pointer to struct mem_region_addr_range.
296  * @consti_cnt: Constituent count.
297  * @max_nr_consti: Maximum number of constituent count.
298  * @gfn: Guest frame number.
299  * @total_pages: Total page numbers.
300  * @slot: Pointer to struct gzvm_memslot.
301  * @gzvm: Pointer to struct gzvm.
302  *
303  * Return: how many pages we've fill in, negative if error
304  */
fill_constituents(struct mem_region_addr_range * consti,int * consti_cnt,int max_nr_consti,u64 gfn,u32 total_pages,struct gzvm_memslot * slot,struct gzvm * gzvm)305 static int fill_constituents(struct mem_region_addr_range *consti,
306 			     int *consti_cnt, int max_nr_consti, u64 gfn,
307 			     u32 total_pages, struct gzvm_memslot *slot,
308 			     struct gzvm *gzvm)
309 {
310 	u64 pfn = 0, prev_pfn = 0, gfn_end = 0;
311 	int nr_pages = 0;
312 	int i = -1;
313 
314 	if (unlikely(total_pages == 0))
315 		return -EINVAL;
316 	gfn_end = gfn + total_pages;
317 
318 	while (i < max_nr_consti && gfn < gfn_end) {
319 		if (gzvm_vm_allocate_guest_page(gzvm, slot, gfn, &pfn) != 0)
320 			return -EFAULT;
321 		if (pfn == (prev_pfn + 1)) {
322 			consti[i].pg_cnt++;
323 		} else {
324 			i++;
325 			if (i >= max_nr_consti)
326 				break;
327 			consti[i].address = PFN_PHYS(pfn);
328 			consti[i].pg_cnt = 1;
329 		}
330 		prev_pfn = pfn;
331 		gfn++;
332 		nr_pages++;
333 	}
334 	if (i != max_nr_consti)
335 		i++;
336 	*consti_cnt = i;
337 
338 	return nr_pages;
339 }
340 
341 /**
342  * gzvm_vm_populate_mem_region() - Iterate all mem slot and populate pa to
343  * buffer until it's full
344  * @gzvm: Pointer to struct gzvm.
345  * @slot_id: Memory slot id to be populated.
346  *
347  * Return: 0 if it is successful, negative if error
348  */
gzvm_vm_populate_mem_region(struct gzvm * gzvm,int slot_id)349 int gzvm_vm_populate_mem_region(struct gzvm *gzvm, int slot_id)
350 {
351 	struct gzvm_memslot *memslot = &gzvm->memslot[slot_id];
352 	struct gzvm_memory_region_ranges *region;
353 	int max_nr_consti, remain_pages;
354 	u64 gfn, gfn_end;
355 	u32 buf_size;
356 
357 	buf_size = PAGE_SIZE * 2;
358 	region = alloc_pages_exact(buf_size, GFP_KERNEL);
359 	if (!region)
360 		return -ENOMEM;
361 
362 	max_nr_consti = (buf_size - sizeof(*region)) /
363 			sizeof(struct mem_region_addr_range);
364 
365 	region->slot = memslot->slot_id;
366 	remain_pages = memslot->npages;
367 	gfn = memslot->base_gfn;
368 	gfn_end = gfn + remain_pages;
369 
370 	while (gfn < gfn_end) {
371 		int nr_pages;
372 
373 		nr_pages = fill_constituents(region->constituents,
374 					     &region->constituent_cnt,
375 					     max_nr_consti, gfn,
376 					     remain_pages, memslot, gzvm);
377 
378 		if (nr_pages < 0) {
379 			pr_err("Failed to fill constituents\n");
380 			free_pages_exact(region, buf_size);
381 			return -EFAULT;
382 		}
383 
384 		region->gpa = PFN_PHYS(gfn);
385 		region->total_pages = nr_pages;
386 		remain_pages -= nr_pages;
387 		gfn += nr_pages;
388 
389 		if (gzvm_arch_set_memregion(gzvm->vm_id, buf_size,
390 					    virt_to_phys(region))) {
391 			pr_err("Failed to register memregion to hypervisor\n");
392 			free_pages_exact(region, buf_size);
393 			return -EFAULT;
394 		}
395 	}
396 	free_pages_exact(region, buf_size);
397 
398 	return 0;
399 }
400 
populate_all_mem_regions(struct gzvm * gzvm)401 static int populate_all_mem_regions(struct gzvm *gzvm)
402 {
403 	int ret, i;
404 
405 	for (i = 0; i < GZVM_MAX_MEM_REGION; i++) {
406 		if (gzvm->memslot[i].npages == 0)
407 			continue;
408 
409 		ret = gzvm_vm_populate_mem_region(gzvm, i);
410 		if (ret != 0)
411 			return ret;
412 	}
413 
414 	return 0;
415 }
416 
417 /**
418  * gzvm_vm_ioctl_cap_pvm() - Proceed GZVM_CAP_PROTECTED_VM's subcommands
419  * @gzvm: Pointer to struct gzvm.
420  * @cap: Pointer to struct gzvm_enable_cap.
421  * @argp: Pointer to struct gzvm_enable_cap in user space.
422  *
423  * Return:
424  * * 0			- Succeed
425  * * -EINVAL		- Invalid subcommand or arguments
426  */
gzvm_vm_ioctl_cap_pvm(struct gzvm * gzvm,struct gzvm_enable_cap * cap,void __user * argp)427 static int gzvm_vm_ioctl_cap_pvm(struct gzvm *gzvm,
428 				 struct gzvm_enable_cap *cap,
429 				 void __user *argp)
430 {
431 	struct arm_smccc_res res = {0};
432 	int ret;
433 
434 	switch (cap->args[0]) {
435 	case GZVM_CAP_PVM_SET_PVMFW_GPA:
436 		fallthrough;
437 	case GZVM_CAP_PVM_SET_PROTECTED_VM:
438 		/*
439 		 * If the hypervisor doesn't support block-based demand paging, we
440 		 * populate memory in advance to improve performance for protected VM.
441 		 */
442 		if (gzvm->demand_page_gran == PAGE_SIZE)
443 			populate_all_mem_regions(gzvm);
444 		ret = gzvm_vm_arch_enable_cap(gzvm, cap, &res);
445 		return ret;
446 	case GZVM_CAP_PVM_GET_PVMFW_SIZE:
447 		ret = gzvm_vm_ioctl_get_pvmfw_size(gzvm, cap, argp);
448 		return ret;
449 	default:
450 		break;
451 	}
452 
453 	return -EINVAL;
454 }
455 
gzvm_vm_ioctl_arch_enable_cap(struct gzvm * gzvm,struct gzvm_enable_cap * cap,void __user * argp)456 int gzvm_vm_ioctl_arch_enable_cap(struct gzvm *gzvm,
457 				  struct gzvm_enable_cap *cap,
458 				  void __user *argp)
459 {
460 	struct arm_smccc_res res = {0};
461 	int ret;
462 
463 	switch (cap->cap) {
464 	case GZVM_CAP_PROTECTED_VM:
465 		ret = gzvm_vm_ioctl_cap_pvm(gzvm, cap, argp);
466 		return ret;
467 
468 	case GZVM_CAP_ENABLE_DEMAND_PAGING:
469 		fallthrough;
470 	case GZVM_CAP_BLOCK_BASED_DEMAND_PAGING:
471 		ret = gzvm_vm_arch_enable_cap(gzvm, cap, &res);
472 		return ret;
473 	case GZVM_CAP_ENABLE_IDLE:
474 		ret = gzvm_vm_arch_enable_cap(gzvm, cap, &res);
475 		return ret;
476 	default:
477 		break;
478 	}
479 
480 	return -EINVAL;
481 }
482 
gzvm_arch_map_guest(u16 vm_id,int memslot_id,u64 pfn,u64 gfn,u64 nr_pages)483 int gzvm_arch_map_guest(u16 vm_id, int memslot_id, u64 pfn, u64 gfn,
484 			u64 nr_pages)
485 {
486 	struct arm_smccc_res res;
487 
488 	return gzvm_hypcall_wrapper(MT_HVC_GZVM_MAP_GUEST, vm_id, memslot_id,
489 				    pfn, gfn, nr_pages, 0, 0, &res);
490 }
491 
gzvm_arch_map_guest_block(u16 vm_id,int memslot_id,u64 gfn,u64 nr_pages)492 int gzvm_arch_map_guest_block(u16 vm_id, int memslot_id, u64 gfn, u64 nr_pages)
493 {
494 	struct arm_smccc_res res;
495 
496 	return gzvm_hypcall_wrapper(MT_HVC_GZVM_MAP_GUEST_BLOCK, vm_id,
497 				    memslot_id, gfn, nr_pages, 0, 0, 0, &res);
498 }
499 
gzvm_arch_get_statistics(struct gzvm * gzvm)500 int gzvm_arch_get_statistics(struct gzvm *gzvm)
501 {
502 	struct arm_smccc_res res;
503 	int ret;
504 
505 	ret = gzvm_hypcall_wrapper(MT_HVC_GZVM_GET_STATISTICS, gzvm->vm_id,
506 				   0, 0, 0, 0, 0, 0, &res);
507 
508 	gzvm->stat.protected_hyp_mem = ((ret == 0) ? res.a1 : 0);
509 	gzvm->stat.protected_shared_mem = ((ret == 0) ? res.a2 : 0);
510 	return ret;
511 }
512