1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * VFIO-KVM bridge pseudo device
4 *
5 * Copyright (C) 2013 Red Hat, Inc. All rights reserved.
6 * Author: Alex Williamson <alex.williamson@redhat.com>
7 */
8
9 #include <linux/errno.h>
10 #include <linux/file.h>
11 #include <linux/kvm_host.h>
12 #include <linux/list.h>
13 #include <linux/module.h>
14 #include <linux/mutex.h>
15 #include <linux/slab.h>
16 #include <linux/uaccess.h>
17 #include <linux/vfio.h>
18 #include "vfio.h"
19
20 #ifdef CONFIG_SPAPR_TCE_IOMMU
21 #include <asm/kvm_ppc.h>
22 #endif
23
24 struct kvm_vfio_group {
25 struct list_head node;
26 struct vfio_group *vfio_group;
27 };
28
29 struct kvm_vfio {
30 struct list_head group_list;
31 struct mutex lock;
32 bool noncoherent;
33 };
34
kvm_vfio_group_get_external_user(struct file * filep)35 static struct vfio_group *kvm_vfio_group_get_external_user(struct file *filep)
36 {
37 struct vfio_group *vfio_group;
38 struct vfio_group *(*fn)(struct file *);
39
40 fn = symbol_get(vfio_group_get_external_user);
41 if (!fn)
42 return ERR_PTR(-EINVAL);
43
44 vfio_group = fn(filep);
45
46 symbol_put(vfio_group_get_external_user);
47
48 return vfio_group;
49 }
50
kvm_vfio_external_group_match_file(struct vfio_group * group,struct file * filep)51 static bool kvm_vfio_external_group_match_file(struct vfio_group *group,
52 struct file *filep)
53 {
54 bool ret, (*fn)(struct vfio_group *, struct file *);
55
56 fn = symbol_get(vfio_external_group_match_file);
57 if (!fn)
58 return false;
59
60 ret = fn(group, filep);
61
62 symbol_put(vfio_external_group_match_file);
63
64 return ret;
65 }
66
kvm_vfio_group_put_external_user(struct vfio_group * vfio_group)67 static void kvm_vfio_group_put_external_user(struct vfio_group *vfio_group)
68 {
69 void (*fn)(struct vfio_group *);
70
71 fn = symbol_get(vfio_group_put_external_user);
72 if (!fn)
73 return;
74
75 fn(vfio_group);
76
77 symbol_put(vfio_group_put_external_user);
78 }
79
kvm_vfio_group_set_kvm(struct vfio_group * group,struct kvm * kvm)80 static void kvm_vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm)
81 {
82 void (*fn)(struct vfio_group *, struct kvm *);
83
84 fn = symbol_get(vfio_group_set_kvm);
85 if (!fn)
86 return;
87
88 fn(group, kvm);
89
90 symbol_put(vfio_group_set_kvm);
91 }
92
kvm_vfio_group_is_coherent(struct vfio_group * vfio_group)93 static bool kvm_vfio_group_is_coherent(struct vfio_group *vfio_group)
94 {
95 long (*fn)(struct vfio_group *, unsigned long);
96 long ret;
97
98 fn = symbol_get(vfio_external_check_extension);
99 if (!fn)
100 return false;
101
102 ret = fn(vfio_group, VFIO_DMA_CC_IOMMU);
103
104 symbol_put(vfio_external_check_extension);
105
106 return ret > 0;
107 }
108
109 #ifdef CONFIG_SPAPR_TCE_IOMMU
kvm_vfio_external_user_iommu_id(struct vfio_group * vfio_group)110 static int kvm_vfio_external_user_iommu_id(struct vfio_group *vfio_group)
111 {
112 int (*fn)(struct vfio_group *);
113 int ret = -EINVAL;
114
115 fn = symbol_get(vfio_external_user_iommu_id);
116 if (!fn)
117 return ret;
118
119 ret = fn(vfio_group);
120
121 symbol_put(vfio_external_user_iommu_id);
122
123 return ret;
124 }
125
kvm_vfio_group_get_iommu_group(struct vfio_group * group)126 static struct iommu_group *kvm_vfio_group_get_iommu_group(
127 struct vfio_group *group)
128 {
129 int group_id = kvm_vfio_external_user_iommu_id(group);
130
131 if (group_id < 0)
132 return NULL;
133
134 return iommu_group_get_by_id(group_id);
135 }
136
kvm_spapr_tce_release_vfio_group(struct kvm * kvm,struct vfio_group * vfio_group)137 static void kvm_spapr_tce_release_vfio_group(struct kvm *kvm,
138 struct vfio_group *vfio_group)
139 {
140 struct iommu_group *grp = kvm_vfio_group_get_iommu_group(vfio_group);
141
142 if (WARN_ON_ONCE(!grp))
143 return;
144
145 kvm_spapr_tce_release_iommu_group(kvm, grp);
146 iommu_group_put(grp);
147 }
148 #endif
149
150 /*
151 * Groups can use the same or different IOMMU domains. If the same then
152 * adding a new group may change the coherency of groups we've previously
153 * been told about. We don't want to care about any of that so we retest
154 * each group and bail as soon as we find one that's noncoherent. This
155 * means we only ever [un]register_noncoherent_dma once for the whole device.
156 */
kvm_vfio_update_coherency(struct kvm_device * dev)157 static void kvm_vfio_update_coherency(struct kvm_device *dev)
158 {
159 struct kvm_vfio *kv = dev->private;
160 bool noncoherent = false;
161 struct kvm_vfio_group *kvg;
162
163 mutex_lock(&kv->lock);
164
165 list_for_each_entry(kvg, &kv->group_list, node) {
166 if (!kvm_vfio_group_is_coherent(kvg->vfio_group)) {
167 noncoherent = true;
168 break;
169 }
170 }
171
172 if (noncoherent != kv->noncoherent) {
173 kv->noncoherent = noncoherent;
174
175 if (kv->noncoherent)
176 kvm_arch_register_noncoherent_dma(dev->kvm);
177 else
178 kvm_arch_unregister_noncoherent_dma(dev->kvm);
179 }
180
181 mutex_unlock(&kv->lock);
182 }
183
kvm_vfio_set_group(struct kvm_device * dev,long attr,u64 arg)184 static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg)
185 {
186 struct kvm_vfio *kv = dev->private;
187 struct vfio_group *vfio_group;
188 struct kvm_vfio_group *kvg;
189 int32_t __user *argp = (int32_t __user *)(unsigned long)arg;
190 struct fd f;
191 int32_t fd;
192 int ret;
193
194 switch (attr) {
195 case KVM_DEV_VFIO_GROUP_ADD:
196 if (get_user(fd, argp))
197 return -EFAULT;
198
199 f = fdget(fd);
200 if (!f.file)
201 return -EBADF;
202
203 vfio_group = kvm_vfio_group_get_external_user(f.file);
204 fdput(f);
205
206 if (IS_ERR(vfio_group))
207 return PTR_ERR(vfio_group);
208
209 mutex_lock(&kv->lock);
210
211 list_for_each_entry(kvg, &kv->group_list, node) {
212 if (kvg->vfio_group == vfio_group) {
213 mutex_unlock(&kv->lock);
214 kvm_vfio_group_put_external_user(vfio_group);
215 return -EEXIST;
216 }
217 }
218
219 kvg = kzalloc(sizeof(*kvg), GFP_KERNEL_ACCOUNT);
220 if (!kvg) {
221 mutex_unlock(&kv->lock);
222 kvm_vfio_group_put_external_user(vfio_group);
223 return -ENOMEM;
224 }
225
226 list_add_tail(&kvg->node, &kv->group_list);
227 kvg->vfio_group = vfio_group;
228
229 kvm_arch_start_assignment(dev->kvm);
230
231 mutex_unlock(&kv->lock);
232
233 kvm_vfio_group_set_kvm(vfio_group, dev->kvm);
234
235 kvm_vfio_update_coherency(dev);
236
237 return 0;
238
239 case KVM_DEV_VFIO_GROUP_DEL:
240 if (get_user(fd, argp))
241 return -EFAULT;
242
243 f = fdget(fd);
244 if (!f.file)
245 return -EBADF;
246
247 ret = -ENOENT;
248
249 mutex_lock(&kv->lock);
250
251 list_for_each_entry(kvg, &kv->group_list, node) {
252 if (!kvm_vfio_external_group_match_file(kvg->vfio_group,
253 f.file))
254 continue;
255
256 list_del(&kvg->node);
257 kvm_arch_end_assignment(dev->kvm);
258 #ifdef CONFIG_SPAPR_TCE_IOMMU
259 kvm_spapr_tce_release_vfio_group(dev->kvm,
260 kvg->vfio_group);
261 #endif
262 kvm_vfio_group_set_kvm(kvg->vfio_group, NULL);
263 kvm_vfio_group_put_external_user(kvg->vfio_group);
264 kfree(kvg);
265 ret = 0;
266 break;
267 }
268
269 mutex_unlock(&kv->lock);
270
271 fdput(f);
272
273 kvm_vfio_update_coherency(dev);
274
275 return ret;
276
277 #ifdef CONFIG_SPAPR_TCE_IOMMU
278 case KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE: {
279 struct kvm_vfio_spapr_tce param;
280 struct kvm_vfio *kv = dev->private;
281 struct vfio_group *vfio_group;
282 struct kvm_vfio_group *kvg;
283 struct fd f;
284 struct iommu_group *grp;
285
286 if (copy_from_user(¶m, (void __user *)arg,
287 sizeof(struct kvm_vfio_spapr_tce)))
288 return -EFAULT;
289
290 f = fdget(param.groupfd);
291 if (!f.file)
292 return -EBADF;
293
294 vfio_group = kvm_vfio_group_get_external_user(f.file);
295 fdput(f);
296
297 if (IS_ERR(vfio_group))
298 return PTR_ERR(vfio_group);
299
300 grp = kvm_vfio_group_get_iommu_group(vfio_group);
301 if (WARN_ON_ONCE(!grp)) {
302 kvm_vfio_group_put_external_user(vfio_group);
303 return -EIO;
304 }
305
306 ret = -ENOENT;
307
308 mutex_lock(&kv->lock);
309
310 list_for_each_entry(kvg, &kv->group_list, node) {
311 if (kvg->vfio_group != vfio_group)
312 continue;
313
314 ret = kvm_spapr_tce_attach_iommu_group(dev->kvm,
315 param.tablefd, grp);
316 break;
317 }
318
319 mutex_unlock(&kv->lock);
320
321 iommu_group_put(grp);
322 kvm_vfio_group_put_external_user(vfio_group);
323
324 return ret;
325 }
326 #endif /* CONFIG_SPAPR_TCE_IOMMU */
327 }
328
329 return -ENXIO;
330 }
331
kvm_vfio_set_attr(struct kvm_device * dev,struct kvm_device_attr * attr)332 static int kvm_vfio_set_attr(struct kvm_device *dev,
333 struct kvm_device_attr *attr)
334 {
335 switch (attr->group) {
336 case KVM_DEV_VFIO_GROUP:
337 return kvm_vfio_set_group(dev, attr->attr, attr->addr);
338 }
339
340 return -ENXIO;
341 }
342
kvm_vfio_has_attr(struct kvm_device * dev,struct kvm_device_attr * attr)343 static int kvm_vfio_has_attr(struct kvm_device *dev,
344 struct kvm_device_attr *attr)
345 {
346 switch (attr->group) {
347 case KVM_DEV_VFIO_GROUP:
348 switch (attr->attr) {
349 case KVM_DEV_VFIO_GROUP_ADD:
350 case KVM_DEV_VFIO_GROUP_DEL:
351 #ifdef CONFIG_SPAPR_TCE_IOMMU
352 case KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE:
353 #endif
354 return 0;
355 }
356
357 break;
358 }
359
360 return -ENXIO;
361 }
362
kvm_vfio_destroy(struct kvm_device * dev)363 static void kvm_vfio_destroy(struct kvm_device *dev)
364 {
365 struct kvm_vfio *kv = dev->private;
366 struct kvm_vfio_group *kvg, *tmp;
367
368 list_for_each_entry_safe(kvg, tmp, &kv->group_list, node) {
369 #ifdef CONFIG_SPAPR_TCE_IOMMU
370 kvm_spapr_tce_release_vfio_group(dev->kvm, kvg->vfio_group);
371 #endif
372 kvm_vfio_group_set_kvm(kvg->vfio_group, NULL);
373 kvm_vfio_group_put_external_user(kvg->vfio_group);
374 list_del(&kvg->node);
375 kfree(kvg);
376 kvm_arch_end_assignment(dev->kvm);
377 }
378
379 kvm_vfio_update_coherency(dev);
380
381 kfree(kv);
382 kfree(dev); /* alloc by kvm_ioctl_create_device, free by .destroy */
383 }
384
385 static int kvm_vfio_create(struct kvm_device *dev, u32 type);
386
387 static struct kvm_device_ops kvm_vfio_ops = {
388 .name = "kvm-vfio",
389 .create = kvm_vfio_create,
390 .destroy = kvm_vfio_destroy,
391 .set_attr = kvm_vfio_set_attr,
392 .has_attr = kvm_vfio_has_attr,
393 };
394
kvm_vfio_create(struct kvm_device * dev,u32 type)395 static int kvm_vfio_create(struct kvm_device *dev, u32 type)
396 {
397 struct kvm_device *tmp;
398 struct kvm_vfio *kv;
399
400 /* Only one VFIO "device" per VM */
401 list_for_each_entry(tmp, &dev->kvm->devices, vm_node)
402 if (tmp->ops == &kvm_vfio_ops)
403 return -EBUSY;
404
405 kv = kzalloc(sizeof(*kv), GFP_KERNEL_ACCOUNT);
406 if (!kv)
407 return -ENOMEM;
408
409 INIT_LIST_HEAD(&kv->group_list);
410 mutex_init(&kv->lock);
411
412 dev->private = kv;
413
414 return 0;
415 }
416
kvm_vfio_ops_init(void)417 int kvm_vfio_ops_init(void)
418 {
419 return kvm_register_device_ops(&kvm_vfio_ops, KVM_DEV_TYPE_VFIO);
420 }
421
kvm_vfio_ops_exit(void)422 void kvm_vfio_ops_exit(void)
423 {
424 kvm_unregister_device_ops(KVM_DEV_TYPE_VFIO);
425 }
426