• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * ACRN Hypervisor Service Module (HSM)
4  *
5  * Copyright (C) 2020 Intel Corporation. All rights reserved.
6  *
7  * Authors:
8  *	Fengwei Yin <fengwei.yin@intel.com>
9  *	Yakui Zhao <yakui.zhao@intel.com>
10  */
11 
12 #include <linux/cpu.h>
13 #include <linux/io.h>
14 #include <linux/mm.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 
18 #include <asm/acrn.h>
19 #include <asm/hypervisor.h>
20 
21 #include "acrn_drv.h"
22 
23 /*
24  * When /dev/acrn_hsm is opened, a 'struct acrn_vm' object is created to
25  * represent a VM instance and continues to be associated with the opened file
26  * descriptor. All ioctl operations on this file descriptor will be targeted to
27  * the VM instance. Release of this file descriptor will destroy the object.
28  */
acrn_dev_open(struct inode * inode,struct file * filp)29 static int acrn_dev_open(struct inode *inode, struct file *filp)
30 {
31 	struct acrn_vm *vm;
32 
33 	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
34 	if (!vm)
35 		return -ENOMEM;
36 
37 	vm->vmid = ACRN_INVALID_VMID;
38 	filp->private_data = vm;
39 	return 0;
40 }
41 
pmcmd_ioctl(u64 cmd,void __user * uptr)42 static int pmcmd_ioctl(u64 cmd, void __user *uptr)
43 {
44 	struct acrn_pstate_data *px_data;
45 	struct acrn_cstate_data *cx_data;
46 	u64 *pm_info;
47 	int ret = 0;
48 
49 	switch (cmd & PMCMD_TYPE_MASK) {
50 	case ACRN_PMCMD_GET_PX_CNT:
51 	case ACRN_PMCMD_GET_CX_CNT:
52 		pm_info = kmalloc(sizeof(u64), GFP_KERNEL);
53 		if (!pm_info)
54 			return -ENOMEM;
55 
56 		ret = hcall_get_cpu_state(cmd, virt_to_phys(pm_info));
57 		if (ret < 0) {
58 			kfree(pm_info);
59 			break;
60 		}
61 
62 		if (copy_to_user(uptr, pm_info, sizeof(u64)))
63 			ret = -EFAULT;
64 		kfree(pm_info);
65 		break;
66 	case ACRN_PMCMD_GET_PX_DATA:
67 		px_data = kmalloc(sizeof(*px_data), GFP_KERNEL);
68 		if (!px_data)
69 			return -ENOMEM;
70 
71 		ret = hcall_get_cpu_state(cmd, virt_to_phys(px_data));
72 		if (ret < 0) {
73 			kfree(px_data);
74 			break;
75 		}
76 
77 		if (copy_to_user(uptr, px_data, sizeof(*px_data)))
78 			ret = -EFAULT;
79 		kfree(px_data);
80 		break;
81 	case ACRN_PMCMD_GET_CX_DATA:
82 		cx_data = kmalloc(sizeof(*cx_data), GFP_KERNEL);
83 		if (!cx_data)
84 			return -ENOMEM;
85 
86 		ret = hcall_get_cpu_state(cmd, virt_to_phys(cx_data));
87 		if (ret < 0) {
88 			kfree(cx_data);
89 			break;
90 		}
91 
92 		if (copy_to_user(uptr, cx_data, sizeof(*cx_data)))
93 			ret = -EFAULT;
94 		kfree(cx_data);
95 		break;
96 	default:
97 		break;
98 	}
99 
100 	return ret;
101 }
102 
103 /*
104  * HSM relies on hypercall layer of the ACRN hypervisor to do the
105  * sanity check against the input parameters.
106  */
acrn_dev_ioctl(struct file * filp,unsigned int cmd,unsigned long ioctl_param)107 static long acrn_dev_ioctl(struct file *filp, unsigned int cmd,
108 			   unsigned long ioctl_param)
109 {
110 	struct acrn_vm *vm = filp->private_data;
111 	struct acrn_vm_creation *vm_param;
112 	struct acrn_vcpu_regs *cpu_regs;
113 	struct acrn_ioreq_notify notify;
114 	struct acrn_ptdev_irq *irq_info;
115 	struct acrn_ioeventfd ioeventfd;
116 	struct acrn_vm_memmap memmap;
117 	struct acrn_msi_entry *msi;
118 	struct acrn_pcidev *pcidev;
119 	struct acrn_irqfd irqfd;
120 	struct page *page;
121 	u64 cstate_cmd;
122 	int i, ret = 0;
123 
124 	if (vm->vmid == ACRN_INVALID_VMID && cmd != ACRN_IOCTL_CREATE_VM) {
125 		dev_dbg(acrn_dev.this_device,
126 			"ioctl 0x%x: Invalid VM state!\n", cmd);
127 		return -EINVAL;
128 	}
129 
130 	switch (cmd) {
131 	case ACRN_IOCTL_CREATE_VM:
132 		vm_param = memdup_user((void __user *)ioctl_param,
133 				       sizeof(struct acrn_vm_creation));
134 		if (IS_ERR(vm_param))
135 			return PTR_ERR(vm_param);
136 
137 		if ((vm_param->reserved0 | vm_param->reserved1) != 0) {
138 			kfree(vm_param);
139 			return -EINVAL;
140 		}
141 
142 		vm = acrn_vm_create(vm, vm_param);
143 		if (!vm) {
144 			ret = -EINVAL;
145 			kfree(vm_param);
146 			break;
147 		}
148 
149 		if (copy_to_user((void __user *)ioctl_param, vm_param,
150 				 sizeof(struct acrn_vm_creation))) {
151 			acrn_vm_destroy(vm);
152 			ret = -EFAULT;
153 		}
154 
155 		kfree(vm_param);
156 		break;
157 	case ACRN_IOCTL_START_VM:
158 		ret = hcall_start_vm(vm->vmid);
159 		if (ret < 0)
160 			dev_dbg(acrn_dev.this_device,
161 				"Failed to start VM %u!\n", vm->vmid);
162 		break;
163 	case ACRN_IOCTL_PAUSE_VM:
164 		ret = hcall_pause_vm(vm->vmid);
165 		if (ret < 0)
166 			dev_dbg(acrn_dev.this_device,
167 				"Failed to pause VM %u!\n", vm->vmid);
168 		break;
169 	case ACRN_IOCTL_RESET_VM:
170 		ret = hcall_reset_vm(vm->vmid);
171 		if (ret < 0)
172 			dev_dbg(acrn_dev.this_device,
173 				"Failed to restart VM %u!\n", vm->vmid);
174 		break;
175 	case ACRN_IOCTL_DESTROY_VM:
176 		ret = acrn_vm_destroy(vm);
177 		break;
178 	case ACRN_IOCTL_SET_VCPU_REGS:
179 		cpu_regs = memdup_user((void __user *)ioctl_param,
180 				       sizeof(struct acrn_vcpu_regs));
181 		if (IS_ERR(cpu_regs))
182 			return PTR_ERR(cpu_regs);
183 
184 		for (i = 0; i < ARRAY_SIZE(cpu_regs->reserved); i++)
185 			if (cpu_regs->reserved[i]) {
186 				kfree(cpu_regs);
187 				return -EINVAL;
188 			}
189 
190 		for (i = 0; i < ARRAY_SIZE(cpu_regs->vcpu_regs.reserved_32); i++)
191 			if (cpu_regs->vcpu_regs.reserved_32[i]) {
192 				kfree(cpu_regs);
193 				return -EINVAL;
194 			}
195 
196 		for (i = 0; i < ARRAY_SIZE(cpu_regs->vcpu_regs.reserved_64); i++)
197 			if (cpu_regs->vcpu_regs.reserved_64[i]) {
198 				kfree(cpu_regs);
199 				return -EINVAL;
200 			}
201 
202 		for (i = 0; i < ARRAY_SIZE(cpu_regs->vcpu_regs.gdt.reserved); i++)
203 			if (cpu_regs->vcpu_regs.gdt.reserved[i] |
204 			    cpu_regs->vcpu_regs.idt.reserved[i]) {
205 				kfree(cpu_regs);
206 				return -EINVAL;
207 			}
208 
209 		ret = hcall_set_vcpu_regs(vm->vmid, virt_to_phys(cpu_regs));
210 		if (ret < 0)
211 			dev_dbg(acrn_dev.this_device,
212 				"Failed to set regs state of VM%u!\n",
213 				vm->vmid);
214 		kfree(cpu_regs);
215 		break;
216 	case ACRN_IOCTL_SET_MEMSEG:
217 		if (copy_from_user(&memmap, (void __user *)ioctl_param,
218 				   sizeof(memmap)))
219 			return -EFAULT;
220 
221 		ret = acrn_vm_memseg_map(vm, &memmap);
222 		break;
223 	case ACRN_IOCTL_UNSET_MEMSEG:
224 		if (copy_from_user(&memmap, (void __user *)ioctl_param,
225 				   sizeof(memmap)))
226 			return -EFAULT;
227 
228 		ret = acrn_vm_memseg_unmap(vm, &memmap);
229 		break;
230 	case ACRN_IOCTL_ASSIGN_PCIDEV:
231 		pcidev = memdup_user((void __user *)ioctl_param,
232 				     sizeof(struct acrn_pcidev));
233 		if (IS_ERR(pcidev))
234 			return PTR_ERR(pcidev);
235 
236 		ret = hcall_assign_pcidev(vm->vmid, virt_to_phys(pcidev));
237 		if (ret < 0)
238 			dev_dbg(acrn_dev.this_device,
239 				"Failed to assign pci device!\n");
240 		kfree(pcidev);
241 		break;
242 	case ACRN_IOCTL_DEASSIGN_PCIDEV:
243 		pcidev = memdup_user((void __user *)ioctl_param,
244 				     sizeof(struct acrn_pcidev));
245 		if (IS_ERR(pcidev))
246 			return PTR_ERR(pcidev);
247 
248 		ret = hcall_deassign_pcidev(vm->vmid, virt_to_phys(pcidev));
249 		if (ret < 0)
250 			dev_dbg(acrn_dev.this_device,
251 				"Failed to deassign pci device!\n");
252 		kfree(pcidev);
253 		break;
254 	case ACRN_IOCTL_SET_PTDEV_INTR:
255 		irq_info = memdup_user((void __user *)ioctl_param,
256 				       sizeof(struct acrn_ptdev_irq));
257 		if (IS_ERR(irq_info))
258 			return PTR_ERR(irq_info);
259 
260 		ret = hcall_set_ptdev_intr(vm->vmid, virt_to_phys(irq_info));
261 		if (ret < 0)
262 			dev_dbg(acrn_dev.this_device,
263 				"Failed to configure intr for ptdev!\n");
264 		kfree(irq_info);
265 		break;
266 	case ACRN_IOCTL_RESET_PTDEV_INTR:
267 		irq_info = memdup_user((void __user *)ioctl_param,
268 				       sizeof(struct acrn_ptdev_irq));
269 		if (IS_ERR(irq_info))
270 			return PTR_ERR(irq_info);
271 
272 		ret = hcall_reset_ptdev_intr(vm->vmid, virt_to_phys(irq_info));
273 		if (ret < 0)
274 			dev_dbg(acrn_dev.this_device,
275 				"Failed to reset intr for ptdev!\n");
276 		kfree(irq_info);
277 		break;
278 	case ACRN_IOCTL_SET_IRQLINE:
279 		ret = hcall_set_irqline(vm->vmid, ioctl_param);
280 		if (ret < 0)
281 			dev_dbg(acrn_dev.this_device,
282 				"Failed to set interrupt line!\n");
283 		break;
284 	case ACRN_IOCTL_INJECT_MSI:
285 		msi = memdup_user((void __user *)ioctl_param,
286 				  sizeof(struct acrn_msi_entry));
287 		if (IS_ERR(msi))
288 			return PTR_ERR(msi);
289 
290 		ret = hcall_inject_msi(vm->vmid, virt_to_phys(msi));
291 		if (ret < 0)
292 			dev_dbg(acrn_dev.this_device,
293 				"Failed to inject MSI!\n");
294 		kfree(msi);
295 		break;
296 	case ACRN_IOCTL_VM_INTR_MONITOR:
297 		ret = pin_user_pages_fast(ioctl_param, 1,
298 					  FOLL_WRITE | FOLL_LONGTERM, &page);
299 		if (unlikely(ret != 1)) {
300 			dev_dbg(acrn_dev.this_device,
301 				"Failed to pin intr hdr buffer!\n");
302 			return -EFAULT;
303 		}
304 
305 		ret = hcall_vm_intr_monitor(vm->vmid, page_to_phys(page));
306 		if (ret < 0) {
307 			unpin_user_page(page);
308 			dev_dbg(acrn_dev.this_device,
309 				"Failed to monitor intr data!\n");
310 			return ret;
311 		}
312 		if (vm->monitor_page)
313 			unpin_user_page(vm->monitor_page);
314 		vm->monitor_page = page;
315 		break;
316 	case ACRN_IOCTL_CREATE_IOREQ_CLIENT:
317 		if (vm->default_client)
318 			return -EEXIST;
319 		if (!acrn_ioreq_client_create(vm, NULL, NULL, true, "acrndm"))
320 			ret = -EINVAL;
321 		break;
322 	case ACRN_IOCTL_DESTROY_IOREQ_CLIENT:
323 		if (vm->default_client)
324 			acrn_ioreq_client_destroy(vm->default_client);
325 		break;
326 	case ACRN_IOCTL_ATTACH_IOREQ_CLIENT:
327 		if (vm->default_client)
328 			ret = acrn_ioreq_client_wait(vm->default_client);
329 		else
330 			ret = -ENODEV;
331 		break;
332 	case ACRN_IOCTL_NOTIFY_REQUEST_FINISH:
333 		if (copy_from_user(&notify, (void __user *)ioctl_param,
334 				   sizeof(struct acrn_ioreq_notify)))
335 			return -EFAULT;
336 
337 		if (notify.reserved != 0)
338 			return -EINVAL;
339 
340 		ret = acrn_ioreq_request_default_complete(vm, notify.vcpu);
341 		break;
342 	case ACRN_IOCTL_CLEAR_VM_IOREQ:
343 		acrn_ioreq_request_clear(vm);
344 		break;
345 	case ACRN_IOCTL_PM_GET_CPU_STATE:
346 		if (copy_from_user(&cstate_cmd, (void __user *)ioctl_param,
347 				   sizeof(cstate_cmd)))
348 			return -EFAULT;
349 
350 		ret = pmcmd_ioctl(cstate_cmd, (void __user *)ioctl_param);
351 		break;
352 	case ACRN_IOCTL_IOEVENTFD:
353 		if (copy_from_user(&ioeventfd, (void __user *)ioctl_param,
354 				   sizeof(ioeventfd)))
355 			return -EFAULT;
356 
357 		if (ioeventfd.reserved != 0)
358 			return -EINVAL;
359 
360 		ret = acrn_ioeventfd_config(vm, &ioeventfd);
361 		break;
362 	case ACRN_IOCTL_IRQFD:
363 		if (copy_from_user(&irqfd, (void __user *)ioctl_param,
364 				   sizeof(irqfd)))
365 			return -EFAULT;
366 		ret = acrn_irqfd_config(vm, &irqfd);
367 		break;
368 	default:
369 		dev_dbg(acrn_dev.this_device, "Unknown IOCTL 0x%x!\n", cmd);
370 		ret = -ENOTTY;
371 	}
372 
373 	return ret;
374 }
375 
acrn_dev_release(struct inode * inode,struct file * filp)376 static int acrn_dev_release(struct inode *inode, struct file *filp)
377 {
378 	struct acrn_vm *vm = filp->private_data;
379 
380 	acrn_vm_destroy(vm);
381 	kfree(vm);
382 	return 0;
383 }
384 
remove_cpu_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)385 static ssize_t remove_cpu_store(struct device *dev,
386 				struct device_attribute *attr,
387 				const char *buf, size_t count)
388 {
389 	u64 cpu, lapicid;
390 	int ret;
391 
392 	if (kstrtoull(buf, 0, &cpu) < 0)
393 		return -EINVAL;
394 
395 	if (cpu >= num_possible_cpus() || cpu == 0 || !cpu_is_hotpluggable(cpu))
396 		return -EINVAL;
397 
398 	if (cpu_online(cpu))
399 		remove_cpu(cpu);
400 
401 	lapicid = cpu_data(cpu).apicid;
402 	dev_dbg(dev, "Try to remove cpu %lld with lapicid %lld\n", cpu, lapicid);
403 	ret = hcall_sos_remove_cpu(lapicid);
404 	if (ret < 0) {
405 		dev_err(dev, "Failed to remove cpu %lld!\n", cpu);
406 		goto fail_remove;
407 	}
408 
409 	return count;
410 
411 fail_remove:
412 	add_cpu(cpu);
413 	return ret;
414 }
415 static DEVICE_ATTR_WO(remove_cpu);
416 
acrn_attr_visible(struct kobject * kobj,struct attribute * a,int n)417 static umode_t acrn_attr_visible(struct kobject *kobj, struct attribute *a, int n)
418 {
419        if (a == &dev_attr_remove_cpu.attr)
420                return IS_ENABLED(CONFIG_HOTPLUG_CPU) ? a->mode : 0;
421 
422        return a->mode;
423 }
424 
425 static struct attribute *acrn_attrs[] = {
426 	&dev_attr_remove_cpu.attr,
427 	NULL
428 };
429 
430 static struct attribute_group acrn_attr_group = {
431 	.attrs = acrn_attrs,
432 	.is_visible = acrn_attr_visible,
433 };
434 
435 static const struct attribute_group *acrn_attr_groups[] = {
436 	&acrn_attr_group,
437 	NULL
438 };
439 
440 static const struct file_operations acrn_fops = {
441 	.owner		= THIS_MODULE,
442 	.open		= acrn_dev_open,
443 	.release	= acrn_dev_release,
444 	.unlocked_ioctl = acrn_dev_ioctl,
445 };
446 
447 struct miscdevice acrn_dev = {
448 	.minor	= MISC_DYNAMIC_MINOR,
449 	.name	= "acrn_hsm",
450 	.fops	= &acrn_fops,
451 	.groups	= acrn_attr_groups,
452 };
453 
hsm_init(void)454 static int __init hsm_init(void)
455 {
456 	int ret;
457 
458 	if (x86_hyper_type != X86_HYPER_ACRN)
459 		return -ENODEV;
460 
461 	if (!(cpuid_eax(ACRN_CPUID_FEATURES) & ACRN_FEATURE_PRIVILEGED_VM))
462 		return -EPERM;
463 
464 	ret = misc_register(&acrn_dev);
465 	if (ret) {
466 		pr_err("Create misc dev failed!\n");
467 		return ret;
468 	}
469 
470 	ret = acrn_ioreq_intr_setup();
471 	if (ret) {
472 		pr_err("Setup I/O request handler failed!\n");
473 		misc_deregister(&acrn_dev);
474 		return ret;
475 	}
476 	return 0;
477 }
478 
hsm_exit(void)479 static void __exit hsm_exit(void)
480 {
481 	acrn_ioreq_intr_remove();
482 	misc_deregister(&acrn_dev);
483 }
484 module_init(hsm_init);
485 module_exit(hsm_exit);
486 
487 MODULE_AUTHOR("Intel Corporation");
488 MODULE_LICENSE("GPL");
489 MODULE_DESCRIPTION("ACRN Hypervisor Service Module (HSM)");
490