• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2023 Google LLC
4  * Author: Mostafa Saleh <smostafa@google.com>
5  *
6  * pKVM provides mutual distrust between host kernel and protected VMs(pVM)
7  * One solution to provide DMA isolation in this model, is to move the IOMMU
8  * control to the hypervisor and para-virtualize the IOMMU interface for
9  * the host and guest kernels. (none of them have direct access to IOMMU
10  * programming interface).
11  * In the case of device assignment, the host can't map memory for the
12  * guest kernel in the IOMMU (as it is not trusted).
13  * So, what the host kernel would attach a blocking domain, when VFIO
14  * assigns the device to user space, so it can't issue any DMA, and
15  * when the guest take control it can program the IOMMU through hypervisor.
16  * This looks similar to noiommu but with one main difference is that
17  * group->type is VFIO_IOMMU, which attaches the groups to a blocking domain.
18  */
19 
20 #include <linux/module.h>
21 #include <linux/vfio.h>
22 #include "vfio.h"
23 
pkvm_iommu_open(unsigned long arg)24 static void *pkvm_iommu_open(unsigned long arg)
25 {
26 	if (arg != VFIO_PKVM_IOMMU)
27 		return ERR_PTR(-EINVAL);
28 
29 	return NULL;
30 }
31 
pkvm_iommu_release(void * iommu_data)32 static void pkvm_iommu_release(void *iommu_data)
33 {
34 }
35 
pkvm_iommu_ioctl(void * iommu_data,unsigned int cmd,unsigned long arg)36 static long pkvm_iommu_ioctl(void *iommu_data,
37 			     unsigned int cmd, unsigned long arg)
38 {
39 	if (cmd == VFIO_CHECK_EXTENSION)
40 		return arg == VFIO_PKVM_IOMMU;
41 
42 	return -ENOTTY;
43 }
44 
pkvm_iommu_attach_group(void * iommu_data,struct iommu_group * iommu_group,enum vfio_group_type type)45 static int pkvm_iommu_attach_group(void *iommu_data,
46 				   struct iommu_group *iommu_group,
47 				   enum vfio_group_type type)
48 {
49 	/*
50 	 * VFIO already calls iommu_group_claim_dma_owner() which attaches
51 	 * the group to a blocking domain.
52 	 */
53 
54 	return 0;
55 }
56 
pkvm_iommu_detach_group(void * iommu_data,struct iommu_group * iommu_group)57 static void pkvm_iommu_detach_group(void *iommu_data,
58 				    struct iommu_group *iommu_group)
59 {
60 	/*
61 	 * VFIO calls iommu_group_release_dma_owner().
62 	 */
63 }
64 
pkvm_iommu_register_device(void * iommu_data,struct vfio_device * vdev)65 static void pkvm_iommu_register_device(void *iommu_data,
66 				       struct vfio_device *vdev)
67 {
68 	vdev->protected = true;
69 }
70 
pkvm_iommu_unregister_device(void * iommu_data,struct vfio_device * vdev)71 static void pkvm_iommu_unregister_device(void *iommu_data,
72 					 struct vfio_device *vdev)
73 {
74 }
75 
76 static const struct vfio_iommu_driver_ops pkvm_iommu_ops = {
77 	.name			= "vfio-pkvm-iommu",
78 	.owner			= THIS_MODULE,
79 	.open			= pkvm_iommu_open,
80 	.release		= pkvm_iommu_release,
81 	.ioctl			= pkvm_iommu_ioctl,
82 	.attach_group		= pkvm_iommu_attach_group,
83 	.detach_group		= pkvm_iommu_detach_group,
84 	.register_device	= pkvm_iommu_register_device,
85 	.unregister_device	= pkvm_iommu_unregister_device,
86 };
87 
pkvm_iommu_init(void)88 static int __init pkvm_iommu_init(void)
89 {
90 	return vfio_register_iommu_driver(&pkvm_iommu_ops);
91 }
92 
pkvm_iommu_exit(void)93 static void __exit pkvm_iommu_exit(void)
94 {
95 	vfio_unregister_iommu_driver(&pkvm_iommu_ops);
96 }
97 
98 module_init(pkvm_iommu_init);
99 module_exit(pkvm_iommu_exit);
100 
101 MODULE_LICENSE("GPL");
102 MODULE_AUTHOR("smostafa@google.com");
103 MODULE_DESCRIPTION("VFIO IOMMU for pKVM pvIOMMU");
104