• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2023 MediaTek Inc.
4  */
5 
6 #include <linux/anon_inodes.h>
7 #include <linux/debugfs.h>
8 #include <linux/file.h>
9 #include <linux/kdev_t.h>
10 #include <linux/mm.h>
11 #include <linux/module.h>
12 #include <linux/platform_device.h>
13 #include <linux/slab.h>
14 #include <linux/soc/mediatek/gzvm_drv.h>
15 #include <trace/hooks/gzvm.h>
16 #include "gzvm_common.h"
17 
18 static DEFINE_MUTEX(gzvm_list_lock);
19 static LIST_HEAD(gzvm_list);
20 
gzvm_gfn_to_hva_memslot(struct gzvm_memslot * memslot,u64 gfn,u64 * hva_memslot)21 int gzvm_gfn_to_hva_memslot(struct gzvm_memslot *memslot, u64 gfn,
22 			    u64 *hva_memslot)
23 {
24 	u64 offset;
25 
26 	if (gfn < memslot->base_gfn)
27 		return -EINVAL;
28 
29 	offset = gfn - memslot->base_gfn;
30 	*hva_memslot = memslot->userspace_addr + offset * PAGE_SIZE;
31 	return 0;
32 }
33 
34 /**
35  * gzvm_find_memslot() - Find memslot containing this @gpa
36  * @vm: Pointer to struct gzvm
37  * @gfn: Guest frame number
38  *
39  * Return:
40  * * >=0		- Index of memslot
41  * * -EFAULT		- Not found
42  */
gzvm_find_memslot(struct gzvm * vm,u64 gfn)43 int gzvm_find_memslot(struct gzvm *vm, u64 gfn)
44 {
45 	int i;
46 
47 	for (i = 0; i < GZVM_MAX_MEM_REGION; i++) {
48 		if (vm->memslot[i].npages == 0)
49 			continue;
50 
51 		if (gfn >= vm->memslot[i].base_gfn &&
52 		    gfn < vm->memslot[i].base_gfn + vm->memslot[i].npages)
53 			return i;
54 	}
55 
56 	return -EFAULT;
57 }
58 
59 /**
60  * register_memslot_addr_range() - Register memory region to GenieZone
61  * @gzvm: Pointer to struct gzvm
62  * @memslot: Pointer to struct gzvm_memslot
63  *
64  * Return: 0 for success, negative number for error
65  */
66 static int
register_memslot_addr_range(struct gzvm * gzvm,struct gzvm_memslot * memslot)67 register_memslot_addr_range(struct gzvm *gzvm, struct gzvm_memslot *memslot)
68 {
69 	struct gzvm_memory_region_ranges *region;
70 	u32 buf_size = PAGE_SIZE * 2;
71 	u64 gfn;
72 
73 	region = alloc_pages_exact(buf_size, GFP_KERNEL);
74 	if (!region)
75 		return -ENOMEM;
76 
77 	region->slot = memslot->slot_id;
78 	region->total_pages = memslot->npages;
79 	gfn = memslot->base_gfn;
80 	region->gpa = PFN_PHYS(gfn);
81 
82 	if (gzvm_arch_set_memregion(gzvm->vm_id, buf_size,
83 				    virt_to_phys(region))) {
84 		pr_err("Failed to register memregion to hypervisor\n");
85 		free_pages_exact(region, buf_size);
86 		return -EFAULT;
87 	}
88 
89 	free_pages_exact(region, buf_size);
90 
91 	if (gzvm->mem_alloc_mode == GZVM_DEMAND_PAGING)
92 		return 0;
93 	return gzvm_vm_populate_mem_region(gzvm, memslot->slot_id);
94 }
95 
96 /**
97  * memory_region_pre_check() - Preliminary check for userspace memory region
98  * @gzvm: Pointer to struct gzvm.
99  * @mem: Input memory region from user.
100  *
101  * Return: true for check passed, false for invalid input.
102  */
103 static bool
memory_region_pre_check(struct gzvm * gzvm,struct gzvm_userspace_memory_region * mem)104 memory_region_pre_check(struct gzvm *gzvm,
105 			struct gzvm_userspace_memory_region *mem)
106 {
107 	if (mem->slot >= GZVM_MAX_MEM_REGION)
108 		return false;
109 
110 	if (!PAGE_ALIGNED(mem->guest_phys_addr) ||
111 	    !PAGE_ALIGNED(mem->memory_size))
112 		return false;
113 
114 	if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
115 		return false;
116 
117 	if ((mem->memory_size >> PAGE_SHIFT) > GZVM_MEM_MAX_NR_PAGES)
118 		return false;
119 
120 	return true;
121 }
122 
123 /**
124  * gzvm_vm_ioctl_set_memory_region() - Set memory region of guest
125  * @gzvm: Pointer to struct gzvm.
126  * @mem: Input memory region from user.
127  *
128  * Return: 0 for success, negative number for error
129  *
130  * -EXIO		- The memslot is out-of-range
131  * -EFAULT		- Cannot find corresponding vma
132  * -EINVAL		- Region size and VMA size mismatch
133  */
134 static int
gzvm_vm_ioctl_set_memory_region(struct gzvm * gzvm,struct gzvm_userspace_memory_region * mem)135 gzvm_vm_ioctl_set_memory_region(struct gzvm *gzvm,
136 				struct gzvm_userspace_memory_region *mem)
137 {
138 	int ret;
139 	struct vm_area_struct *vma;
140 	struct gzvm_memslot *memslot;
141 	unsigned long size;
142 
143 	if (memory_region_pre_check(gzvm, mem) != true)
144 		return -EINVAL;
145 
146 	memslot = &gzvm->memslot[mem->slot];
147 
148 	vma = vma_lookup(gzvm->mm, mem->userspace_addr);
149 	if (!vma)
150 		return -EFAULT;
151 
152 	size = vma->vm_end - vma->vm_start;
153 	if (size != mem->memory_size)
154 		return -EINVAL;
155 
156 	memslot->base_gfn = __phys_to_pfn(mem->guest_phys_addr);
157 	memslot->npages = size >> PAGE_SHIFT;
158 	memslot->userspace_addr = mem->userspace_addr;
159 	memslot->vma = vma;
160 	memslot->flags = mem->flags;
161 	memslot->slot_id = mem->slot;
162 
163 	ret = gzvm_arch_memregion_purpose(gzvm, mem);
164 	if (ret) {
165 		pr_err("Failed to config memory region for the specified purpose\n");
166 		return -EFAULT;
167 	}
168 	return register_memslot_addr_range(gzvm, memslot);
169 }
170 
gzvm_irqchip_inject_irq(struct gzvm * gzvm,unsigned int vcpu_idx,u32 irq,bool level)171 int gzvm_irqchip_inject_irq(struct gzvm *gzvm, unsigned int vcpu_idx,
172 			    u32 irq, bool level)
173 {
174 	gzvm_vcpu_wakeup_all(gzvm);
175 	return gzvm_arch_inject_irq(gzvm, vcpu_idx, irq, level);
176 }
177 
gzvm_vm_ioctl_irq_line(struct gzvm * gzvm,struct gzvm_irq_level * irq_level)178 static int gzvm_vm_ioctl_irq_line(struct gzvm *gzvm,
179 				  struct gzvm_irq_level *irq_level)
180 {
181 	u32 irq = irq_level->irq;
182 	u32 vcpu_idx, vcpu2_idx, irq_num;
183 	bool level = irq_level->level;
184 
185 	vcpu_idx = FIELD_GET(GZVM_IRQ_LINE_VCPU, irq);
186 	vcpu2_idx = FIELD_GET(GZVM_IRQ_LINE_VCPU2, irq) * (GZVM_IRQ_VCPU_MASK + 1);
187 	irq_num = FIELD_GET(GZVM_IRQ_LINE_NUM, irq);
188 
189 	return gzvm_irqchip_inject_irq(gzvm, vcpu_idx + vcpu2_idx, irq_num,
190 				       level);
191 }
192 
gzvm_vm_ioctl_create_device(struct gzvm * gzvm,void __user * argp)193 static int gzvm_vm_ioctl_create_device(struct gzvm *gzvm, void __user *argp)
194 {
195 	struct gzvm_create_device *gzvm_dev;
196 	void *dev_data = NULL;
197 	int ret;
198 
199 	gzvm_dev = (struct gzvm_create_device *)alloc_pages_exact(PAGE_SIZE,
200 								  GFP_KERNEL);
201 	if (!gzvm_dev)
202 		return -ENOMEM;
203 	if (copy_from_user(gzvm_dev, argp, sizeof(*gzvm_dev))) {
204 		ret = -EFAULT;
205 		goto err_free_dev;
206 	}
207 
208 	if (gzvm_dev->attr_addr != 0 && gzvm_dev->attr_size != 0) {
209 		size_t attr_size = gzvm_dev->attr_size;
210 		void __user *attr_addr = u64_to_user_ptr(gzvm_dev->attr_addr);
211 
212 		/* Size of device specific data should not be over a page. */
213 		if (attr_size > PAGE_SIZE)
214 			return -EINVAL;
215 
216 		dev_data = alloc_pages_exact(attr_size, GFP_KERNEL);
217 		if (!dev_data) {
218 			ret = -ENOMEM;
219 			goto err_free_dev;
220 		}
221 
222 		if (copy_from_user(dev_data, attr_addr, attr_size)) {
223 			ret = -EFAULT;
224 			goto err_free_dev_data;
225 		}
226 		gzvm_dev->attr_addr = virt_to_phys(dev_data);
227 	}
228 
229 	ret = gzvm_arch_create_device(gzvm->vm_id, gzvm_dev);
230 err_free_dev_data:
231 	if (dev_data)
232 		free_pages_exact(dev_data, 0);
233 err_free_dev:
234 	free_pages_exact(gzvm_dev, 0);
235 	return ret;
236 }
237 
gzvm_vm_ioctl_enable_cap(struct gzvm * gzvm,struct gzvm_enable_cap * cap,void __user * argp)238 static int gzvm_vm_ioctl_enable_cap(struct gzvm *gzvm,
239 				    struct gzvm_enable_cap *cap,
240 				    void __user *argp)
241 {
242 	return gzvm_vm_ioctl_arch_enable_cap(gzvm, cap, argp);
243 }
244 
245 /* gzvm_vm_ioctl() - Ioctl handler of VM FD */
gzvm_vm_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)246 static long gzvm_vm_ioctl(struct file *filp, unsigned int ioctl,
247 			  unsigned long arg)
248 {
249 	long ret;
250 	void __user *argp = (void __user *)arg;
251 	struct gzvm *gzvm = filp->private_data;
252 
253 	switch (ioctl) {
254 	case GZVM_CHECK_EXTENSION: {
255 		ret = gzvm_dev_ioctl_check_extension(gzvm, arg);
256 		break;
257 	}
258 	case GZVM_CREATE_VCPU: {
259 		ret = gzvm_vm_ioctl_create_vcpu(gzvm, arg);
260 		break;
261 	}
262 	case GZVM_SET_USER_MEMORY_REGION: {
263 		struct gzvm_userspace_memory_region userspace_mem;
264 
265 		if (copy_from_user(&userspace_mem, argp, sizeof(userspace_mem)))
266 			return -EFAULT;
267 
268 		ret = gzvm_vm_ioctl_set_memory_region(gzvm, &userspace_mem);
269 		break;
270 	}
271 	case GZVM_IRQ_LINE: {
272 		struct gzvm_irq_level irq_event;
273 
274 		if (copy_from_user(&irq_event, argp, sizeof(irq_event))) {
275 			ret = -EFAULT;
276 			goto out;
277 		}
278 		ret = gzvm_vm_ioctl_irq_line(gzvm, &irq_event);
279 		break;
280 	}
281 	case GZVM_CREATE_DEVICE: {
282 		ret = gzvm_vm_ioctl_create_device(gzvm, argp);
283 		break;
284 	}
285 	case GZVM_IRQFD: {
286 		struct gzvm_irqfd data;
287 
288 		if (copy_from_user(&data, argp, sizeof(data))) {
289 			ret = -EFAULT;
290 			goto out;
291 		}
292 		ret = gzvm_irqfd(gzvm, &data);
293 		break;
294 	}
295 	case GZVM_IOEVENTFD: {
296 		struct gzvm_ioeventfd data;
297 
298 		if (copy_from_user(&data, argp, sizeof(data))) {
299 			ret = -EFAULT;
300 			goto out;
301 		}
302 		ret = gzvm_ioeventfd(gzvm, &data);
303 		break;
304 	}
305 	case GZVM_ENABLE_CAP: {
306 		struct gzvm_enable_cap cap;
307 
308 		if (copy_from_user(&cap, argp, sizeof(cap))) {
309 			ret = -EFAULT;
310 			goto out;
311 		}
312 		ret = gzvm_vm_ioctl_enable_cap(gzvm, &cap, argp);
313 		break;
314 	}
315 	case GZVM_SET_DTB_CONFIG: {
316 		struct gzvm_dtb_config cfg;
317 
318 		if (copy_from_user(&cfg, argp, sizeof(cfg))) {
319 			ret = -EFAULT;
320 			goto out;
321 		}
322 		ret = gzvm_arch_set_dtb_config(gzvm, &cfg);
323 		break;
324 	}
325 	default:
326 		ret = -ENOTTY;
327 	}
328 out:
329 	return ret;
330 }
331 
332 /* Invoker of this function is responsible for locking */
gzvm_destroy_all_ppage(struct gzvm * gzvm)333 static void gzvm_destroy_all_ppage(struct gzvm *gzvm)
334 {
335 	struct gzvm_pinned_page *ppage;
336 	struct rb_node *node;
337 
338 	node = rb_first(&gzvm->pinned_pages);
339 	while (node) {
340 		ppage = rb_entry(node, struct gzvm_pinned_page, node);
341 		unpin_user_pages_dirty_lock(&ppage->page, 1, true);
342 		node = rb_next(node);
343 		rb_erase(&ppage->node, &gzvm->pinned_pages);
344 		kfree(ppage);
345 	}
346 }
347 
gzvm_destroy_vm_debugfs(struct gzvm * vm)348 static int gzvm_destroy_vm_debugfs(struct gzvm *vm)
349 {
350 	debugfs_remove_recursive(vm->debug_dir);
351 	return 0;
352 }
353 
gzvm_destroy_vm(struct gzvm * gzvm)354 static void gzvm_destroy_vm(struct gzvm *gzvm)
355 {
356 	size_t allocated_size;
357 
358 	pr_debug("VM-%u is going to be destroyed\n", gzvm->vm_id);
359 
360 	mutex_lock(&gzvm->lock);
361 
362 	gzvm_vm_irqfd_release(gzvm);
363 	gzvm_vm_ioeventfd_release(gzvm);
364 	gzvm_destroy_vcpus(gzvm);
365 	gzvm_arch_destroy_vm(gzvm->vm_id, gzvm->gzvm_drv->destroy_batch_pages);
366 
367 	mutex_lock(&gzvm_list_lock);
368 	list_del(&gzvm->vm_list);
369 	mutex_unlock(&gzvm_list_lock);
370 
371 	if (gzvm->demand_page_buffer) {
372 		allocated_size = GZVM_BLOCK_BASED_DEMAND_PAGE_SIZE / PAGE_SIZE *
373 				 sizeof(u64);
374 		free_pages_exact(gzvm->demand_page_buffer, allocated_size);
375 	}
376 
377 	mutex_unlock(&gzvm->lock);
378 
379 	trace_android_vh_gzvm_destroy_vm_post_process(gzvm);
380 
381 	/* No need to lock here becauese it's single-threaded execution */
382 	gzvm_destroy_all_ppage(gzvm);
383 
384 	gzvm_destroy_vm_debugfs(gzvm);
385 
386 	kfree(gzvm);
387 }
388 
__gzvm_vm_put(struct kref * kref)389 static void __gzvm_vm_put(struct kref *kref)
390 {
391 	struct gzvm *gzvm = container_of(kref, struct gzvm, kref);
392 
393 	gzvm_destroy_vm(gzvm);
394 }
395 
gzvm_vm_put(struct gzvm * gzvm)396 void gzvm_vm_put(struct gzvm *gzvm)
397 {
398 	kref_put(&gzvm->kref, __gzvm_vm_put);
399 }
400 
gzvm_vm_get(struct gzvm * gzvm)401 void gzvm_vm_get(struct gzvm *gzvm)
402 {
403 	kref_get(&gzvm->kref);
404 }
405 
gzvm_vm_release(struct inode * inode,struct file * filp)406 static int gzvm_vm_release(struct inode *inode, struct file *filp)
407 {
408 	struct gzvm *gzvm = filp->private_data;
409 
410 	gzvm_vm_put(gzvm);
411 
412 	return 0;
413 }
414 
415 static const struct file_operations gzvm_vm_fops = {
416 	.release        = gzvm_vm_release,
417 	.unlocked_ioctl = gzvm_vm_ioctl,
418 };
419 
420 /**
421  * setup_vm_demand_paging() - Query hypervisor suitable demand page size and set
422  * @vm: gzvm instance for setting up demand page size
423  *
424  * Return: void
425  */
setup_vm_demand_paging(struct gzvm * vm)426 static void setup_vm_demand_paging(struct gzvm *vm)
427 {
428 	struct gzvm_enable_cap cap = {0};
429 	u32 buf_size;
430 	void *buffer;
431 	int ret;
432 
433 	buf_size = (GZVM_BLOCK_BASED_DEMAND_PAGE_SIZE / PAGE_SIZE) *
434 		    sizeof(pte_t);
435 	mutex_init(&vm->demand_paging_lock);
436 	buffer = alloc_pages_exact(buf_size, GFP_KERNEL);
437 	if (!buffer) {
438 		/* Fall back to use default page size for demand paging */
439 		vm->demand_page_gran = PAGE_SIZE;
440 		vm->demand_page_buffer = NULL;
441 		return;
442 	}
443 
444 	cap.cap = GZVM_CAP_BLOCK_BASED_DEMAND_PAGING;
445 	cap.args[0] = GZVM_BLOCK_BASED_DEMAND_PAGE_SIZE;
446 	cap.args[1] = (__u64)virt_to_phys(buffer);
447 	/* demand_page_buffer is freed when destroy VM */
448 	vm->demand_page_buffer = buffer;
449 
450 	ret = gzvm_vm_ioctl_enable_cap(vm, &cap, NULL);
451 	if (ret == 0) {
452 		vm->demand_page_gran = GZVM_BLOCK_BASED_DEMAND_PAGE_SIZE;
453 		/* freed when destroy vm */
454 		vm->demand_page_buffer = buffer;
455 	} else {
456 		vm->demand_page_gran = PAGE_SIZE;
457 		vm->demand_page_buffer = NULL;
458 		free_pages_exact(buffer, buf_size);
459 	}
460 }
461 
462 /**
463  * hyp_mem_read() - Get size of hypervisor-allocated memory and stage 2 table
464  * @file: Pointer to struct file
465  * @buf: User space buffer for storing the return value
466  * @len: Size of @buf, in bytes
467  * @offset: Pointer to loff_t
468  *
469  * Return: Size of hypervisor-allocated memory and stage 2 table, in bytes
470  */
hyp_mem_read(struct file * file,char __user * buf,size_t len,loff_t * offset)471 static ssize_t hyp_mem_read(struct file *file, char __user *buf, size_t len,
472 			    loff_t *offset)
473 {
474 	char tmp_buffer[GZVM_MAX_DEBUGFS_VALUE_SIZE] = {0};
475 	struct gzvm *vm = file->private_data;
476 	int ret;
477 
478 	if (*offset == 0) {
479 		ret = gzvm_arch_get_statistics(vm);
480 		if (ret)
481 			return ret;
482 		snprintf(tmp_buffer, sizeof(tmp_buffer), "%llu\n",
483 			 vm->stat.protected_hyp_mem);
484 		if (copy_to_user(buf, tmp_buffer, sizeof(tmp_buffer)))
485 			return -EFAULT;
486 		*offset += sizeof(tmp_buffer);
487 		return sizeof(tmp_buffer);
488 	}
489 	return 0;
490 }
491 
492 /**
493  * shared_mem_read() - Get size of memory shared between host and guest
494  * @file: Pointer to struct file
495  * @buf: User space buffer for storing the return value
496  * @len: Size of @buf, in bytes
497  * @offset: Pointer to loff_t
498  *
499  * Return: Size of memory shared between host and guest, in bytes
500  */
shared_mem_read(struct file * file,char __user * buf,size_t len,loff_t * offset)501 static ssize_t shared_mem_read(struct file *file, char __user *buf, size_t len,
502 			       loff_t *offset)
503 {
504 	char tmp_buffer[GZVM_MAX_DEBUGFS_VALUE_SIZE] = {0};
505 	struct gzvm *vm = file->private_data;
506 	int ret;
507 
508 	if (*offset == 0) {
509 		ret = gzvm_arch_get_statistics(vm);
510 		if (ret)
511 			return ret;
512 		snprintf(tmp_buffer, sizeof(tmp_buffer), "%llu\n",
513 			 vm->stat.protected_shared_mem);
514 		if (copy_to_user(buf, tmp_buffer, sizeof(tmp_buffer)))
515 			return -EFAULT;
516 		*offset += sizeof(tmp_buffer);
517 		return sizeof(tmp_buffer);
518 	}
519 	return 0;
520 }
521 
522 static const struct file_operations hyp_mem_fops = {
523 	.open = simple_open,
524 	.read = hyp_mem_read,
525 };
526 
527 static const struct file_operations shared_mem_fops = {
528 	.open = simple_open,
529 	.read = shared_mem_read,
530 };
531 
gzvm_create_vm_debugfs(struct gzvm * vm)532 static int gzvm_create_vm_debugfs(struct gzvm *vm)
533 {
534 	struct dentry *dent;
535 	char dir_name[GZVM_MAX_DEBUGFS_DIR_NAME_SIZE];
536 
537 	if (!vm->gzvm_drv->gzvm_debugfs_dir) {
538 		pr_warn("VM debugfs directory is not exist\n");
539 		return -EFAULT;
540 	}
541 
542 	if (vm->debug_dir) {
543 		pr_warn("VM debugfs directory is duplicated\n");
544 		return 0;
545 	}
546 
547 	snprintf(dir_name, sizeof(dir_name), "%d-%d", task_pid_nr(current), vm->vm_id);
548 
549 	dent = debugfs_lookup(dir_name, vm->gzvm_drv->gzvm_debugfs_dir);
550 	if (dent) {
551 		pr_warn("Debugfs directory is duplicated\n");
552 		dput(dent);
553 		return 0;
554 	}
555 	dent = debugfs_create_dir(dir_name, vm->gzvm_drv->gzvm_debugfs_dir);
556 	vm->debug_dir = dent;
557 
558 	debugfs_create_file("protected_shared_mem", 0444, dent, vm, &shared_mem_fops);
559 	debugfs_create_file("protected_hyp_mem", 0444, dent, vm, &hyp_mem_fops);
560 
561 	return 0;
562 }
563 
setup_mem_alloc_mode(struct gzvm * vm)564 static int setup_mem_alloc_mode(struct gzvm *vm)
565 {
566 	int ret;
567 	struct gzvm_enable_cap cap = {0};
568 
569 	cap.cap = GZVM_CAP_ENABLE_DEMAND_PAGING;
570 
571 	ret = gzvm_vm_ioctl_enable_cap(vm, &cap, NULL);
572 	if (!ret) {
573 		vm->mem_alloc_mode = GZVM_DEMAND_PAGING;
574 		setup_vm_demand_paging(vm);
575 	} else {
576 		vm->mem_alloc_mode = GZVM_FULLY_POPULATED;
577 	}
578 
579 	return 0;
580 }
581 
enable_idle_support(struct gzvm * vm)582 static int enable_idle_support(struct gzvm *vm)
583 {
584 	int ret;
585 	struct gzvm_enable_cap cap = {0};
586 
587 	cap.cap = GZVM_CAP_ENABLE_IDLE;
588 	ret = gzvm_vm_ioctl_enable_cap(vm, &cap, NULL);
589 	if (ret)
590 		pr_info("Hypervisor doesn't support idle\n");
591 	return ret;
592 }
593 
gzvm_create_vm(struct gzvm_driver * drv,unsigned long vm_type)594 static struct gzvm *gzvm_create_vm(struct gzvm_driver *drv, unsigned long vm_type)
595 {
596 	int ret;
597 	struct gzvm *gzvm;
598 
599 	gzvm = kzalloc(sizeof(*gzvm), GFP_KERNEL);
600 	if (!gzvm)
601 		return ERR_PTR(-ENOMEM);
602 
603 	ret = gzvm_arch_create_vm(vm_type);
604 	if (ret < 0) {
605 		kfree(gzvm);
606 		return ERR_PTR(ret);
607 	}
608 
609 	gzvm->gzvm_drv = drv;
610 	gzvm->vm_id = ret;
611 	gzvm->mm = current->mm;
612 	mutex_init(&gzvm->lock);
613 	mutex_init(&gzvm->mem_lock);
614 	gzvm->pinned_pages = RB_ROOT;
615 
616 	kref_init(&gzvm->kref);
617 
618 	ret = gzvm_vm_irqfd_init(gzvm);
619 	if (ret) {
620 		pr_err("Failed to initialize irqfd\n");
621 		kfree(gzvm);
622 		return ERR_PTR(ret);
623 	}
624 
625 	ret = gzvm_init_ioeventfd(gzvm);
626 	if (ret) {
627 		pr_err("Failed to initialize ioeventfd\n");
628 		kfree(gzvm);
629 		return ERR_PTR(ret);
630 	}
631 
632 	setup_mem_alloc_mode(gzvm);
633 
634 	mutex_lock(&gzvm_list_lock);
635 	list_add(&gzvm->vm_list, &gzvm_list);
636 	mutex_unlock(&gzvm_list_lock);
637 
638 	ret = gzvm_create_vm_debugfs(gzvm);
639 	if (ret)
640 		pr_debug("Failed to create debugfs for VM-%u\n", gzvm->vm_id);
641 
642 	pr_debug("VM-%u is created\n", gzvm->vm_id);
643 
644 	enable_idle_support(gzvm);
645 
646 	return gzvm;
647 }
648 
649 /**
650  * gzvm_dev_ioctl_create_vm - Create vm fd
651  * @vm_type: VM type. Only supports Linux VM now
652  * @drv: GenieZone driver info to be stored in struct gzvm for future usage
653  *
654  * Return: fd of vm, negative if error
655  */
gzvm_dev_ioctl_create_vm(struct gzvm_driver * drv,unsigned long vm_type)656 int gzvm_dev_ioctl_create_vm(struct gzvm_driver *drv, unsigned long vm_type)
657 {
658 	struct gzvm *gzvm;
659 
660 	gzvm = gzvm_create_vm(drv, vm_type);
661 	if (IS_ERR(gzvm))
662 		return PTR_ERR(gzvm);
663 
664 	return anon_inode_getfd("gzvm-vm", &gzvm_vm_fops, gzvm,
665 			       O_RDWR | O_CLOEXEC);
666 }
667