1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
4 */
5 #include <linux/vfio.h>
6 #include <linux/iommufd.h>
7
8 #include "vfio.h"
9
10 MODULE_IMPORT_NS(IOMMUFD);
11 MODULE_IMPORT_NS(IOMMUFD_VFIO);
12
vfio_iommufd_device_has_compat_ioas(struct vfio_device * vdev,struct iommufd_ctx * ictx)13 bool vfio_iommufd_device_has_compat_ioas(struct vfio_device *vdev,
14 struct iommufd_ctx *ictx)
15 {
16 u32 ioas_id;
17
18 return !iommufd_vfio_compat_ioas_get_id(ictx, &ioas_id);
19 }
20
vfio_df_iommufd_bind(struct vfio_device_file * df)21 int vfio_df_iommufd_bind(struct vfio_device_file *df)
22 {
23 struct vfio_device *vdev = df->device;
24 struct iommufd_ctx *ictx = df->iommufd;
25
26 lockdep_assert_held(&vdev->dev_set->lock);
27
28 /* Returns 0 to permit device opening under noiommu mode */
29 if (vfio_device_is_noiommu(vdev))
30 return 0;
31
32 return vdev->ops->bind_iommufd(vdev, ictx, &df->devid);
33 }
34
vfio_iommufd_compat_attach_ioas(struct vfio_device * vdev,struct iommufd_ctx * ictx)35 int vfio_iommufd_compat_attach_ioas(struct vfio_device *vdev,
36 struct iommufd_ctx *ictx)
37 {
38 u32 ioas_id;
39 int ret;
40
41 lockdep_assert_held(&vdev->dev_set->lock);
42
43 /* compat noiommu does not need to do ioas attach */
44 if (vfio_device_is_noiommu(vdev))
45 return 0;
46
47 ret = iommufd_vfio_compat_ioas_get_id(ictx, &ioas_id);
48 if (ret)
49 return ret;
50
51 /* The legacy path has no way to return the selected pt_id */
52 return vdev->ops->attach_ioas(vdev, &ioas_id);
53 }
54
vfio_df_iommufd_unbind(struct vfio_device_file * df)55 void vfio_df_iommufd_unbind(struct vfio_device_file *df)
56 {
57 struct vfio_device *vdev = df->device;
58
59 lockdep_assert_held(&vdev->dev_set->lock);
60
61 if (vfio_device_is_noiommu(vdev))
62 return;
63
64 if (vdev->ops->unbind_iommufd)
65 vdev->ops->unbind_iommufd(vdev);
66 }
67
vfio_iommufd_device_ictx(struct vfio_device * vdev)68 struct iommufd_ctx *vfio_iommufd_device_ictx(struct vfio_device *vdev)
69 {
70 if (vdev->iommufd_device)
71 return iommufd_device_to_ictx(vdev->iommufd_device);
72 return NULL;
73 }
74 EXPORT_SYMBOL_GPL(vfio_iommufd_device_ictx);
75
vfio_iommufd_device_id(struct vfio_device * vdev)76 static int vfio_iommufd_device_id(struct vfio_device *vdev)
77 {
78 if (vdev->iommufd_device)
79 return iommufd_device_to_id(vdev->iommufd_device);
80 return -EINVAL;
81 }
82
83 /*
84 * Return devid for a device.
85 * valid ID for the device that is owned by the ictx
86 * -ENOENT = device is owned but there is no ID
87 * -ENODEV or other error = device is not owned
88 */
vfio_iommufd_get_dev_id(struct vfio_device * vdev,struct iommufd_ctx * ictx)89 int vfio_iommufd_get_dev_id(struct vfio_device *vdev, struct iommufd_ctx *ictx)
90 {
91 struct iommu_group *group;
92 int devid;
93
94 if (vfio_iommufd_device_ictx(vdev) == ictx)
95 return vfio_iommufd_device_id(vdev);
96
97 group = iommu_group_get(vdev->dev);
98 if (!group)
99 return -ENODEV;
100
101 if (iommufd_ctx_has_group(ictx, group))
102 devid = -ENOENT;
103 else
104 devid = -ENODEV;
105
106 iommu_group_put(group);
107
108 return devid;
109 }
110 EXPORT_SYMBOL_GPL(vfio_iommufd_get_dev_id);
111
112 /*
113 * The physical standard ops mean that the iommufd_device is bound to the
114 * physical device vdev->dev that was provided to vfio_init_group_dev(). Drivers
115 * using this ops set should call vfio_register_group_dev()
116 */
vfio_iommufd_physical_bind(struct vfio_device * vdev,struct iommufd_ctx * ictx,u32 * out_device_id)117 int vfio_iommufd_physical_bind(struct vfio_device *vdev,
118 struct iommufd_ctx *ictx, u32 *out_device_id)
119 {
120 struct iommufd_device *idev;
121
122 idev = iommufd_device_bind(ictx, vdev->dev, out_device_id);
123 if (IS_ERR(idev))
124 return PTR_ERR(idev);
125 vdev->iommufd_device = idev;
126 return 0;
127 }
128 EXPORT_SYMBOL_GPL(vfio_iommufd_physical_bind);
129
vfio_iommufd_physical_unbind(struct vfio_device * vdev)130 void vfio_iommufd_physical_unbind(struct vfio_device *vdev)
131 {
132 lockdep_assert_held(&vdev->dev_set->lock);
133
134 if (vdev->iommufd_attached) {
135 iommufd_device_detach(vdev->iommufd_device);
136 vdev->iommufd_attached = false;
137 }
138 iommufd_device_unbind(vdev->iommufd_device);
139 vdev->iommufd_device = NULL;
140 }
141 EXPORT_SYMBOL_GPL(vfio_iommufd_physical_unbind);
142
vfio_iommufd_physical_attach_ioas(struct vfio_device * vdev,u32 * pt_id)143 int vfio_iommufd_physical_attach_ioas(struct vfio_device *vdev, u32 *pt_id)
144 {
145 int rc;
146
147 lockdep_assert_held(&vdev->dev_set->lock);
148
149 if (WARN_ON(!vdev->iommufd_device))
150 return -EINVAL;
151
152 if (vdev->iommufd_attached)
153 rc = iommufd_device_replace(vdev->iommufd_device, pt_id);
154 else
155 rc = iommufd_device_attach(vdev->iommufd_device, pt_id);
156 if (rc)
157 return rc;
158 vdev->iommufd_attached = true;
159 return 0;
160 }
161 EXPORT_SYMBOL_GPL(vfio_iommufd_physical_attach_ioas);
162
vfio_iommufd_physical_detach_ioas(struct vfio_device * vdev)163 void vfio_iommufd_physical_detach_ioas(struct vfio_device *vdev)
164 {
165 lockdep_assert_held(&vdev->dev_set->lock);
166
167 if (WARN_ON(!vdev->iommufd_device) || !vdev->iommufd_attached)
168 return;
169
170 iommufd_device_detach(vdev->iommufd_device);
171 vdev->iommufd_attached = false;
172 }
173 EXPORT_SYMBOL_GPL(vfio_iommufd_physical_detach_ioas);
174
175 /*
176 * The emulated standard ops mean that vfio_device is going to use the
177 * "mdev path" and will call vfio_pin_pages()/vfio_dma_rw(). Drivers using this
178 * ops set should call vfio_register_emulated_iommu_dev(). Drivers that do
179 * not call vfio_pin_pages()/vfio_dma_rw() have no need to provide dma_unmap.
180 */
181
vfio_emulated_unmap(void * data,unsigned long iova,unsigned long length)182 static void vfio_emulated_unmap(void *data, unsigned long iova,
183 unsigned long length)
184 {
185 struct vfio_device *vdev = data;
186
187 if (vdev->ops->dma_unmap)
188 vdev->ops->dma_unmap(vdev, iova, length);
189 }
190
191 static const struct iommufd_access_ops vfio_user_ops = {
192 .needs_pin_pages = 1,
193 .unmap = vfio_emulated_unmap,
194 };
195
vfio_iommufd_emulated_bind(struct vfio_device * vdev,struct iommufd_ctx * ictx,u32 * out_device_id)196 int vfio_iommufd_emulated_bind(struct vfio_device *vdev,
197 struct iommufd_ctx *ictx, u32 *out_device_id)
198 {
199 struct iommufd_access *user;
200
201 lockdep_assert_held(&vdev->dev_set->lock);
202
203 user = iommufd_access_create(ictx, &vfio_user_ops, vdev, out_device_id);
204 if (IS_ERR(user))
205 return PTR_ERR(user);
206 vdev->iommufd_access = user;
207 return 0;
208 }
209 EXPORT_SYMBOL_GPL(vfio_iommufd_emulated_bind);
210
vfio_iommufd_emulated_unbind(struct vfio_device * vdev)211 void vfio_iommufd_emulated_unbind(struct vfio_device *vdev)
212 {
213 lockdep_assert_held(&vdev->dev_set->lock);
214
215 if (vdev->iommufd_access) {
216 iommufd_access_destroy(vdev->iommufd_access);
217 vdev->iommufd_attached = false;
218 vdev->iommufd_access = NULL;
219 }
220 }
221 EXPORT_SYMBOL_GPL(vfio_iommufd_emulated_unbind);
222
vfio_iommufd_emulated_attach_ioas(struct vfio_device * vdev,u32 * pt_id)223 int vfio_iommufd_emulated_attach_ioas(struct vfio_device *vdev, u32 *pt_id)
224 {
225 int rc;
226
227 lockdep_assert_held(&vdev->dev_set->lock);
228
229 if (vdev->iommufd_attached)
230 rc = iommufd_access_replace(vdev->iommufd_access, *pt_id);
231 else
232 rc = iommufd_access_attach(vdev->iommufd_access, *pt_id);
233 if (rc)
234 return rc;
235 vdev->iommufd_attached = true;
236 return 0;
237 }
238 EXPORT_SYMBOL_GPL(vfio_iommufd_emulated_attach_ioas);
239
vfio_iommufd_emulated_detach_ioas(struct vfio_device * vdev)240 void vfio_iommufd_emulated_detach_ioas(struct vfio_device *vdev)
241 {
242 lockdep_assert_held(&vdev->dev_set->lock);
243
244 if (WARN_ON(!vdev->iommufd_access) ||
245 !vdev->iommufd_attached)
246 return;
247
248 iommufd_access_detach(vdev->iommufd_access);
249 vdev->iommufd_attached = false;
250 }
251 EXPORT_SYMBOL_GPL(vfio_iommufd_emulated_detach_ioas);
252