1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Physical device callbacks for vfio_ccw
4 *
5 * Copyright IBM Corp. 2017
6 * Copyright Red Hat, Inc. 2019
7 *
8 * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
9 * Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
10 * Cornelia Huck <cohuck@redhat.com>
11 */
12
13 #include <linux/vfio.h>
14 #include <linux/mdev.h>
15 #include <linux/nospec.h>
16 #include <linux/slab.h>
17
18 #include "vfio_ccw_private.h"
19
vfio_ccw_mdev_reset(struct mdev_device * mdev)20 static int vfio_ccw_mdev_reset(struct mdev_device *mdev)
21 {
22 struct vfio_ccw_private *private;
23 struct subchannel *sch;
24 int ret;
25
26 private = dev_get_drvdata(mdev_parent_dev(mdev));
27 sch = private->sch;
28 /*
29 * TODO:
30 * In the cureent stage, some things like "no I/O running" and "no
31 * interrupt pending" are clear, but we are not sure what other state
32 * we need to care about.
33 * There are still a lot more instructions need to be handled. We
34 * should come back here later.
35 */
36 ret = vfio_ccw_sch_quiesce(sch);
37 if (ret)
38 return ret;
39
40 ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
41 if (!ret)
42 private->state = VFIO_CCW_STATE_IDLE;
43
44 return ret;
45 }
46
vfio_ccw_mdev_notifier(struct notifier_block * nb,unsigned long action,void * data)47 static int vfio_ccw_mdev_notifier(struct notifier_block *nb,
48 unsigned long action,
49 void *data)
50 {
51 struct vfio_ccw_private *private =
52 container_of(nb, struct vfio_ccw_private, nb);
53
54 /*
55 * Vendor drivers MUST unpin pages in response to an
56 * invalidation.
57 */
58 if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
59 struct vfio_iommu_type1_dma_unmap *unmap = data;
60
61 if (!cp_iova_pinned(&private->cp, unmap->iova))
62 return NOTIFY_OK;
63
64 if (vfio_ccw_mdev_reset(private->mdev))
65 return NOTIFY_BAD;
66
67 cp_free(&private->cp);
68 return NOTIFY_OK;
69 }
70
71 return NOTIFY_DONE;
72 }
73
name_show(struct kobject * kobj,struct device * dev,char * buf)74 static ssize_t name_show(struct kobject *kobj, struct device *dev, char *buf)
75 {
76 return sprintf(buf, "I/O subchannel (Non-QDIO)\n");
77 }
78 static MDEV_TYPE_ATTR_RO(name);
79
device_api_show(struct kobject * kobj,struct device * dev,char * buf)80 static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
81 char *buf)
82 {
83 return sprintf(buf, "%s\n", VFIO_DEVICE_API_CCW_STRING);
84 }
85 static MDEV_TYPE_ATTR_RO(device_api);
86
available_instances_show(struct kobject * kobj,struct device * dev,char * buf)87 static ssize_t available_instances_show(struct kobject *kobj,
88 struct device *dev, char *buf)
89 {
90 struct vfio_ccw_private *private = dev_get_drvdata(dev);
91
92 return sprintf(buf, "%d\n", atomic_read(&private->avail));
93 }
94 static MDEV_TYPE_ATTR_RO(available_instances);
95
96 static struct attribute *mdev_types_attrs[] = {
97 &mdev_type_attr_name.attr,
98 &mdev_type_attr_device_api.attr,
99 &mdev_type_attr_available_instances.attr,
100 NULL,
101 };
102
103 static struct attribute_group mdev_type_group = {
104 .name = "io",
105 .attrs = mdev_types_attrs,
106 };
107
108 static struct attribute_group *mdev_type_groups[] = {
109 &mdev_type_group,
110 NULL,
111 };
112
vfio_ccw_mdev_create(struct kobject * kobj,struct mdev_device * mdev)113 static int vfio_ccw_mdev_create(struct kobject *kobj, struct mdev_device *mdev)
114 {
115 struct vfio_ccw_private *private =
116 dev_get_drvdata(mdev_parent_dev(mdev));
117
118 if (private->state == VFIO_CCW_STATE_NOT_OPER)
119 return -ENODEV;
120
121 if (atomic_dec_if_positive(&private->avail) < 0)
122 return -EPERM;
123
124 private->mdev = mdev;
125 private->state = VFIO_CCW_STATE_IDLE;
126
127 VFIO_CCW_MSG_EVENT(2, "mdev %pUl, sch %x.%x.%04x: create\n",
128 mdev_uuid(mdev), private->sch->schid.cssid,
129 private->sch->schid.ssid,
130 private->sch->schid.sch_no);
131
132 return 0;
133 }
134
vfio_ccw_mdev_remove(struct mdev_device * mdev)135 static int vfio_ccw_mdev_remove(struct mdev_device *mdev)
136 {
137 struct vfio_ccw_private *private =
138 dev_get_drvdata(mdev_parent_dev(mdev));
139
140 VFIO_CCW_MSG_EVENT(2, "mdev %pUl, sch %x.%x.%04x: remove\n",
141 mdev_uuid(mdev), private->sch->schid.cssid,
142 private->sch->schid.ssid,
143 private->sch->schid.sch_no);
144
145 if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
146 (private->state != VFIO_CCW_STATE_STANDBY)) {
147 if (!vfio_ccw_sch_quiesce(private->sch))
148 private->state = VFIO_CCW_STATE_STANDBY;
149 /* The state will be NOT_OPER on error. */
150 }
151
152 cp_free(&private->cp);
153 private->mdev = NULL;
154 atomic_inc(&private->avail);
155
156 return 0;
157 }
158
vfio_ccw_mdev_open(struct mdev_device * mdev)159 static int vfio_ccw_mdev_open(struct mdev_device *mdev)
160 {
161 struct vfio_ccw_private *private =
162 dev_get_drvdata(mdev_parent_dev(mdev));
163 unsigned long events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
164 int ret;
165
166 private->nb.notifier_call = vfio_ccw_mdev_notifier;
167
168 ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
169 &events, &private->nb);
170 if (ret)
171 return ret;
172
173 ret = vfio_ccw_register_async_dev_regions(private);
174 if (ret)
175 goto out_unregister;
176
177 ret = vfio_ccw_register_schib_dev_regions(private);
178 if (ret)
179 goto out_unregister;
180
181 ret = vfio_ccw_register_crw_dev_regions(private);
182 if (ret)
183 goto out_unregister;
184
185 return ret;
186
187 out_unregister:
188 vfio_ccw_unregister_dev_regions(private);
189 vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
190 &private->nb);
191 return ret;
192 }
193
vfio_ccw_mdev_release(struct mdev_device * mdev)194 static void vfio_ccw_mdev_release(struct mdev_device *mdev)
195 {
196 struct vfio_ccw_private *private =
197 dev_get_drvdata(mdev_parent_dev(mdev));
198
199 if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
200 (private->state != VFIO_CCW_STATE_STANDBY)) {
201 if (!vfio_ccw_mdev_reset(mdev))
202 private->state = VFIO_CCW_STATE_STANDBY;
203 /* The state will be NOT_OPER on error. */
204 }
205
206 cp_free(&private->cp);
207 vfio_ccw_unregister_dev_regions(private);
208 vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
209 &private->nb);
210 }
211
vfio_ccw_mdev_read_io_region(struct vfio_ccw_private * private,char __user * buf,size_t count,loff_t * ppos)212 static ssize_t vfio_ccw_mdev_read_io_region(struct vfio_ccw_private *private,
213 char __user *buf, size_t count,
214 loff_t *ppos)
215 {
216 loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
217 struct ccw_io_region *region;
218 int ret;
219
220 if (pos + count > sizeof(*region))
221 return -EINVAL;
222
223 mutex_lock(&private->io_mutex);
224 region = private->io_region;
225 if (copy_to_user(buf, (void *)region + pos, count))
226 ret = -EFAULT;
227 else
228 ret = count;
229 mutex_unlock(&private->io_mutex);
230 return ret;
231 }
232
vfio_ccw_mdev_read(struct mdev_device * mdev,char __user * buf,size_t count,loff_t * ppos)233 static ssize_t vfio_ccw_mdev_read(struct mdev_device *mdev,
234 char __user *buf,
235 size_t count,
236 loff_t *ppos)
237 {
238 unsigned int index = VFIO_CCW_OFFSET_TO_INDEX(*ppos);
239 struct vfio_ccw_private *private;
240
241 private = dev_get_drvdata(mdev_parent_dev(mdev));
242
243 if (index >= VFIO_CCW_NUM_REGIONS + private->num_regions)
244 return -EINVAL;
245
246 switch (index) {
247 case VFIO_CCW_CONFIG_REGION_INDEX:
248 return vfio_ccw_mdev_read_io_region(private, buf, count, ppos);
249 default:
250 index -= VFIO_CCW_NUM_REGIONS;
251 return private->region[index].ops->read(private, buf, count,
252 ppos);
253 }
254
255 return -EINVAL;
256 }
257
vfio_ccw_mdev_write_io_region(struct vfio_ccw_private * private,const char __user * buf,size_t count,loff_t * ppos)258 static ssize_t vfio_ccw_mdev_write_io_region(struct vfio_ccw_private *private,
259 const char __user *buf,
260 size_t count, loff_t *ppos)
261 {
262 loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
263 struct ccw_io_region *region;
264 int ret;
265
266 if (pos + count > sizeof(*region))
267 return -EINVAL;
268
269 if (!mutex_trylock(&private->io_mutex))
270 return -EAGAIN;
271
272 region = private->io_region;
273 if (copy_from_user((void *)region + pos, buf, count)) {
274 ret = -EFAULT;
275 goto out_unlock;
276 }
277
278 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_IO_REQ);
279 ret = (region->ret_code != 0) ? region->ret_code : count;
280
281 out_unlock:
282 mutex_unlock(&private->io_mutex);
283 return ret;
284 }
285
vfio_ccw_mdev_write(struct mdev_device * mdev,const char __user * buf,size_t count,loff_t * ppos)286 static ssize_t vfio_ccw_mdev_write(struct mdev_device *mdev,
287 const char __user *buf,
288 size_t count,
289 loff_t *ppos)
290 {
291 unsigned int index = VFIO_CCW_OFFSET_TO_INDEX(*ppos);
292 struct vfio_ccw_private *private;
293
294 private = dev_get_drvdata(mdev_parent_dev(mdev));
295
296 if (index >= VFIO_CCW_NUM_REGIONS + private->num_regions)
297 return -EINVAL;
298
299 switch (index) {
300 case VFIO_CCW_CONFIG_REGION_INDEX:
301 return vfio_ccw_mdev_write_io_region(private, buf, count, ppos);
302 default:
303 index -= VFIO_CCW_NUM_REGIONS;
304 return private->region[index].ops->write(private, buf, count,
305 ppos);
306 }
307
308 return -EINVAL;
309 }
310
vfio_ccw_mdev_get_device_info(struct vfio_device_info * info,struct mdev_device * mdev)311 static int vfio_ccw_mdev_get_device_info(struct vfio_device_info *info,
312 struct mdev_device *mdev)
313 {
314 struct vfio_ccw_private *private;
315
316 private = dev_get_drvdata(mdev_parent_dev(mdev));
317 info->flags = VFIO_DEVICE_FLAGS_CCW | VFIO_DEVICE_FLAGS_RESET;
318 info->num_regions = VFIO_CCW_NUM_REGIONS + private->num_regions;
319 info->num_irqs = VFIO_CCW_NUM_IRQS;
320
321 return 0;
322 }
323
vfio_ccw_mdev_get_region_info(struct vfio_region_info * info,struct mdev_device * mdev,unsigned long arg)324 static int vfio_ccw_mdev_get_region_info(struct vfio_region_info *info,
325 struct mdev_device *mdev,
326 unsigned long arg)
327 {
328 struct vfio_ccw_private *private;
329 int i;
330
331 private = dev_get_drvdata(mdev_parent_dev(mdev));
332 switch (info->index) {
333 case VFIO_CCW_CONFIG_REGION_INDEX:
334 info->offset = 0;
335 info->size = sizeof(struct ccw_io_region);
336 info->flags = VFIO_REGION_INFO_FLAG_READ
337 | VFIO_REGION_INFO_FLAG_WRITE;
338 return 0;
339 default: /* all other regions are handled via capability chain */
340 {
341 struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
342 struct vfio_region_info_cap_type cap_type = {
343 .header.id = VFIO_REGION_INFO_CAP_TYPE,
344 .header.version = 1 };
345 int ret;
346
347 if (info->index >=
348 VFIO_CCW_NUM_REGIONS + private->num_regions)
349 return -EINVAL;
350
351 info->index = array_index_nospec(info->index,
352 VFIO_CCW_NUM_REGIONS +
353 private->num_regions);
354
355 i = info->index - VFIO_CCW_NUM_REGIONS;
356
357 info->offset = VFIO_CCW_INDEX_TO_OFFSET(info->index);
358 info->size = private->region[i].size;
359 info->flags = private->region[i].flags;
360
361 cap_type.type = private->region[i].type;
362 cap_type.subtype = private->region[i].subtype;
363
364 ret = vfio_info_add_capability(&caps, &cap_type.header,
365 sizeof(cap_type));
366 if (ret)
367 return ret;
368
369 info->flags |= VFIO_REGION_INFO_FLAG_CAPS;
370 if (info->argsz < sizeof(*info) + caps.size) {
371 info->argsz = sizeof(*info) + caps.size;
372 info->cap_offset = 0;
373 } else {
374 vfio_info_cap_shift(&caps, sizeof(*info));
375 if (copy_to_user((void __user *)arg + sizeof(*info),
376 caps.buf, caps.size)) {
377 kfree(caps.buf);
378 return -EFAULT;
379 }
380 info->cap_offset = sizeof(*info);
381 }
382
383 kfree(caps.buf);
384
385 }
386 }
387 return 0;
388 }
389
vfio_ccw_mdev_get_irq_info(struct vfio_irq_info * info)390 static int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info *info)
391 {
392 switch (info->index) {
393 case VFIO_CCW_IO_IRQ_INDEX:
394 case VFIO_CCW_CRW_IRQ_INDEX:
395 info->count = 1;
396 info->flags = VFIO_IRQ_INFO_EVENTFD;
397 break;
398 default:
399 return -EINVAL;
400 }
401
402 return 0;
403 }
404
vfio_ccw_mdev_set_irqs(struct mdev_device * mdev,uint32_t flags,uint32_t index,void __user * data)405 static int vfio_ccw_mdev_set_irqs(struct mdev_device *mdev,
406 uint32_t flags,
407 uint32_t index,
408 void __user *data)
409 {
410 struct vfio_ccw_private *private;
411 struct eventfd_ctx **ctx;
412
413 if (!(flags & VFIO_IRQ_SET_ACTION_TRIGGER))
414 return -EINVAL;
415
416 private = dev_get_drvdata(mdev_parent_dev(mdev));
417
418 switch (index) {
419 case VFIO_CCW_IO_IRQ_INDEX:
420 ctx = &private->io_trigger;
421 break;
422 case VFIO_CCW_CRW_IRQ_INDEX:
423 ctx = &private->crw_trigger;
424 break;
425 default:
426 return -EINVAL;
427 }
428
429 switch (flags & VFIO_IRQ_SET_DATA_TYPE_MASK) {
430 case VFIO_IRQ_SET_DATA_NONE:
431 {
432 if (*ctx)
433 eventfd_signal(*ctx, 1);
434 return 0;
435 }
436 case VFIO_IRQ_SET_DATA_BOOL:
437 {
438 uint8_t trigger;
439
440 if (get_user(trigger, (uint8_t __user *)data))
441 return -EFAULT;
442
443 if (trigger && *ctx)
444 eventfd_signal(*ctx, 1);
445 return 0;
446 }
447 case VFIO_IRQ_SET_DATA_EVENTFD:
448 {
449 int32_t fd;
450
451 if (get_user(fd, (int32_t __user *)data))
452 return -EFAULT;
453
454 if (fd == -1) {
455 if (*ctx)
456 eventfd_ctx_put(*ctx);
457 *ctx = NULL;
458 } else if (fd >= 0) {
459 struct eventfd_ctx *efdctx;
460
461 efdctx = eventfd_ctx_fdget(fd);
462 if (IS_ERR(efdctx))
463 return PTR_ERR(efdctx);
464
465 if (*ctx)
466 eventfd_ctx_put(*ctx);
467
468 *ctx = efdctx;
469 } else
470 return -EINVAL;
471
472 return 0;
473 }
474 default:
475 return -EINVAL;
476 }
477 }
478
vfio_ccw_register_dev_region(struct vfio_ccw_private * private,unsigned int subtype,const struct vfio_ccw_regops * ops,size_t size,u32 flags,void * data)479 int vfio_ccw_register_dev_region(struct vfio_ccw_private *private,
480 unsigned int subtype,
481 const struct vfio_ccw_regops *ops,
482 size_t size, u32 flags, void *data)
483 {
484 struct vfio_ccw_region *region;
485
486 region = krealloc(private->region,
487 (private->num_regions + 1) * sizeof(*region),
488 GFP_KERNEL);
489 if (!region)
490 return -ENOMEM;
491
492 private->region = region;
493 private->region[private->num_regions].type = VFIO_REGION_TYPE_CCW;
494 private->region[private->num_regions].subtype = subtype;
495 private->region[private->num_regions].ops = ops;
496 private->region[private->num_regions].size = size;
497 private->region[private->num_regions].flags = flags;
498 private->region[private->num_regions].data = data;
499
500 private->num_regions++;
501
502 return 0;
503 }
504
vfio_ccw_unregister_dev_regions(struct vfio_ccw_private * private)505 void vfio_ccw_unregister_dev_regions(struct vfio_ccw_private *private)
506 {
507 int i;
508
509 for (i = 0; i < private->num_regions; i++)
510 private->region[i].ops->release(private, &private->region[i]);
511 private->num_regions = 0;
512 kfree(private->region);
513 private->region = NULL;
514 }
515
vfio_ccw_mdev_ioctl(struct mdev_device * mdev,unsigned int cmd,unsigned long arg)516 static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev,
517 unsigned int cmd,
518 unsigned long arg)
519 {
520 int ret = 0;
521 unsigned long minsz;
522
523 switch (cmd) {
524 case VFIO_DEVICE_GET_INFO:
525 {
526 struct vfio_device_info info;
527
528 minsz = offsetofend(struct vfio_device_info, num_irqs);
529
530 if (copy_from_user(&info, (void __user *)arg, minsz))
531 return -EFAULT;
532
533 if (info.argsz < minsz)
534 return -EINVAL;
535
536 ret = vfio_ccw_mdev_get_device_info(&info, mdev);
537 if (ret)
538 return ret;
539
540 return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
541 }
542 case VFIO_DEVICE_GET_REGION_INFO:
543 {
544 struct vfio_region_info info;
545
546 minsz = offsetofend(struct vfio_region_info, offset);
547
548 if (copy_from_user(&info, (void __user *)arg, minsz))
549 return -EFAULT;
550
551 if (info.argsz < minsz)
552 return -EINVAL;
553
554 ret = vfio_ccw_mdev_get_region_info(&info, mdev, arg);
555 if (ret)
556 return ret;
557
558 return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
559 }
560 case VFIO_DEVICE_GET_IRQ_INFO:
561 {
562 struct vfio_irq_info info;
563
564 minsz = offsetofend(struct vfio_irq_info, count);
565
566 if (copy_from_user(&info, (void __user *)arg, minsz))
567 return -EFAULT;
568
569 if (info.argsz < minsz || info.index >= VFIO_CCW_NUM_IRQS)
570 return -EINVAL;
571
572 ret = vfio_ccw_mdev_get_irq_info(&info);
573 if (ret)
574 return ret;
575
576 if (info.count == -1)
577 return -EINVAL;
578
579 return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
580 }
581 case VFIO_DEVICE_SET_IRQS:
582 {
583 struct vfio_irq_set hdr;
584 size_t data_size;
585 void __user *data;
586
587 minsz = offsetofend(struct vfio_irq_set, count);
588
589 if (copy_from_user(&hdr, (void __user *)arg, minsz))
590 return -EFAULT;
591
592 ret = vfio_set_irqs_validate_and_prepare(&hdr, 1,
593 VFIO_CCW_NUM_IRQS,
594 &data_size);
595 if (ret)
596 return ret;
597
598 data = (void __user *)(arg + minsz);
599 return vfio_ccw_mdev_set_irqs(mdev, hdr.flags, hdr.index, data);
600 }
601 case VFIO_DEVICE_RESET:
602 return vfio_ccw_mdev_reset(mdev);
603 default:
604 return -ENOTTY;
605 }
606 }
607
608 static const struct mdev_parent_ops vfio_ccw_mdev_ops = {
609 .owner = THIS_MODULE,
610 .supported_type_groups = mdev_type_groups,
611 .create = vfio_ccw_mdev_create,
612 .remove = vfio_ccw_mdev_remove,
613 .open = vfio_ccw_mdev_open,
614 .release = vfio_ccw_mdev_release,
615 .read = vfio_ccw_mdev_read,
616 .write = vfio_ccw_mdev_write,
617 .ioctl = vfio_ccw_mdev_ioctl,
618 };
619
vfio_ccw_mdev_reg(struct subchannel * sch)620 int vfio_ccw_mdev_reg(struct subchannel *sch)
621 {
622 return mdev_register_device(&sch->dev, &vfio_ccw_mdev_ops);
623 }
624
vfio_ccw_mdev_unreg(struct subchannel * sch)625 void vfio_ccw_mdev_unreg(struct subchannel *sch)
626 {
627 mdev_unregister_device(&sch->dev);
628 }
629