• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * VFIO PCI interrupt handling
3  *
4  * Copyright (C) 2012 Red Hat, Inc.  All rights reserved.
5  *     Author: Alex Williamson <alex.williamson@redhat.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * Derived from original vfio:
12  * Copyright 2010 Cisco Systems, Inc.  All rights reserved.
13  * Author: Tom Lyon, pugs@cisco.com
14  */
15 
16 #include <linux/device.h>
17 #include <linux/interrupt.h>
18 #include <linux/eventfd.h>
19 #include <linux/msi.h>
20 #include <linux/pci.h>
21 #include <linux/file.h>
22 #include <linux/vfio.h>
23 #include <linux/wait.h>
24 #include <linux/slab.h>
25 
26 #include "vfio_pci_private.h"
27 
28 /*
29  * INTx
30  */
vfio_send_intx_eventfd(void * opaque,void * unused)31 static void vfio_send_intx_eventfd(void *opaque, void *unused)
32 {
33 	struct vfio_pci_device *vdev = opaque;
34 
35 	if (likely(is_intx(vdev) && !vdev->virq_disabled))
36 		eventfd_signal(vdev->ctx[0].trigger, 1);
37 }
38 
vfio_pci_intx_mask(struct vfio_pci_device * vdev)39 void vfio_pci_intx_mask(struct vfio_pci_device *vdev)
40 {
41 	struct pci_dev *pdev = vdev->pdev;
42 	unsigned long flags;
43 
44 	spin_lock_irqsave(&vdev->irqlock, flags);
45 
46 	/*
47 	 * Masking can come from interrupt, ioctl, or config space
48 	 * via INTx disable.  The latter means this can get called
49 	 * even when not using intx delivery.  In this case, just
50 	 * try to have the physical bit follow the virtual bit.
51 	 */
52 	if (unlikely(!is_intx(vdev))) {
53 		if (vdev->pci_2_3)
54 			pci_intx(pdev, 0);
55 	} else if (!vdev->ctx[0].masked) {
56 		/*
57 		 * Can't use check_and_mask here because we always want to
58 		 * mask, not just when something is pending.
59 		 */
60 		if (vdev->pci_2_3)
61 			pci_intx(pdev, 0);
62 		else
63 			disable_irq_nosync(pdev->irq);
64 
65 		vdev->ctx[0].masked = true;
66 	}
67 
68 	spin_unlock_irqrestore(&vdev->irqlock, flags);
69 }
70 
71 /*
72  * If this is triggered by an eventfd, we can't call eventfd_signal
73  * or else we'll deadlock on the eventfd wait queue.  Return >0 when
74  * a signal is necessary, which can then be handled via a work queue
75  * or directly depending on the caller.
76  */
vfio_pci_intx_unmask_handler(void * opaque,void * unused)77 static int vfio_pci_intx_unmask_handler(void *opaque, void *unused)
78 {
79 	struct vfio_pci_device *vdev = opaque;
80 	struct pci_dev *pdev = vdev->pdev;
81 	unsigned long flags;
82 	int ret = 0;
83 
84 	spin_lock_irqsave(&vdev->irqlock, flags);
85 
86 	/*
87 	 * Unmasking comes from ioctl or config, so again, have the
88 	 * physical bit follow the virtual even when not using INTx.
89 	 */
90 	if (unlikely(!is_intx(vdev))) {
91 		if (vdev->pci_2_3)
92 			pci_intx(pdev, 1);
93 	} else if (vdev->ctx[0].masked && !vdev->virq_disabled) {
94 		/*
95 		 * A pending interrupt here would immediately trigger,
96 		 * but we can avoid that overhead by just re-sending
97 		 * the interrupt to the user.
98 		 */
99 		if (vdev->pci_2_3) {
100 			if (!pci_check_and_unmask_intx(pdev))
101 				ret = 1;
102 		} else
103 			enable_irq(pdev->irq);
104 
105 		vdev->ctx[0].masked = (ret > 0);
106 	}
107 
108 	spin_unlock_irqrestore(&vdev->irqlock, flags);
109 
110 	return ret;
111 }
112 
vfio_pci_intx_unmask(struct vfio_pci_device * vdev)113 void vfio_pci_intx_unmask(struct vfio_pci_device *vdev)
114 {
115 	if (vfio_pci_intx_unmask_handler(vdev, NULL) > 0)
116 		vfio_send_intx_eventfd(vdev, NULL);
117 }
118 
vfio_intx_handler(int irq,void * dev_id)119 static irqreturn_t vfio_intx_handler(int irq, void *dev_id)
120 {
121 	struct vfio_pci_device *vdev = dev_id;
122 	unsigned long flags;
123 	int ret = IRQ_NONE;
124 
125 	spin_lock_irqsave(&vdev->irqlock, flags);
126 
127 	if (!vdev->pci_2_3) {
128 		disable_irq_nosync(vdev->pdev->irq);
129 		vdev->ctx[0].masked = true;
130 		ret = IRQ_HANDLED;
131 	} else if (!vdev->ctx[0].masked &&  /* may be shared */
132 		   pci_check_and_mask_intx(vdev->pdev)) {
133 		vdev->ctx[0].masked = true;
134 		ret = IRQ_HANDLED;
135 	}
136 
137 	spin_unlock_irqrestore(&vdev->irqlock, flags);
138 
139 	if (ret == IRQ_HANDLED)
140 		vfio_send_intx_eventfd(vdev, NULL);
141 
142 	return ret;
143 }
144 
vfio_intx_enable(struct vfio_pci_device * vdev)145 static int vfio_intx_enable(struct vfio_pci_device *vdev)
146 {
147 	if (!is_irq_none(vdev))
148 		return -EINVAL;
149 
150 	if (!vdev->pdev->irq)
151 		return -ENODEV;
152 
153 	vdev->ctx = kzalloc(sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
154 	if (!vdev->ctx)
155 		return -ENOMEM;
156 
157 	vdev->num_ctx = 1;
158 
159 	/*
160 	 * If the virtual interrupt is masked, restore it.  Devices
161 	 * supporting DisINTx can be masked at the hardware level
162 	 * here, non-PCI-2.3 devices will have to wait until the
163 	 * interrupt is enabled.
164 	 */
165 	vdev->ctx[0].masked = vdev->virq_disabled;
166 	if (vdev->pci_2_3)
167 		pci_intx(vdev->pdev, !vdev->ctx[0].masked);
168 
169 	vdev->irq_type = VFIO_PCI_INTX_IRQ_INDEX;
170 
171 	return 0;
172 }
173 
vfio_intx_set_signal(struct vfio_pci_device * vdev,int fd)174 static int vfio_intx_set_signal(struct vfio_pci_device *vdev, int fd)
175 {
176 	struct pci_dev *pdev = vdev->pdev;
177 	unsigned long irqflags = IRQF_SHARED;
178 	struct eventfd_ctx *trigger;
179 	unsigned long flags;
180 	int ret;
181 
182 	if (vdev->ctx[0].trigger) {
183 		free_irq(pdev->irq, vdev);
184 		kfree(vdev->ctx[0].name);
185 		eventfd_ctx_put(vdev->ctx[0].trigger);
186 		vdev->ctx[0].trigger = NULL;
187 	}
188 
189 	if (fd < 0) /* Disable only */
190 		return 0;
191 
192 	vdev->ctx[0].name = kasprintf(GFP_KERNEL, "vfio-intx(%s)",
193 				      pci_name(pdev));
194 	if (!vdev->ctx[0].name)
195 		return -ENOMEM;
196 
197 	trigger = eventfd_ctx_fdget(fd);
198 	if (IS_ERR(trigger)) {
199 		kfree(vdev->ctx[0].name);
200 		return PTR_ERR(trigger);
201 	}
202 
203 	vdev->ctx[0].trigger = trigger;
204 
205 	if (!vdev->pci_2_3)
206 		irqflags = 0;
207 
208 	ret = request_irq(pdev->irq, vfio_intx_handler,
209 			  irqflags, vdev->ctx[0].name, vdev);
210 	if (ret) {
211 		vdev->ctx[0].trigger = NULL;
212 		kfree(vdev->ctx[0].name);
213 		eventfd_ctx_put(trigger);
214 		return ret;
215 	}
216 
217 	/*
218 	 * INTx disable will stick across the new irq setup,
219 	 * disable_irq won't.
220 	 */
221 	spin_lock_irqsave(&vdev->irqlock, flags);
222 	if (!vdev->pci_2_3 && vdev->ctx[0].masked)
223 		disable_irq_nosync(pdev->irq);
224 	spin_unlock_irqrestore(&vdev->irqlock, flags);
225 
226 	return 0;
227 }
228 
vfio_intx_disable(struct vfio_pci_device * vdev)229 static void vfio_intx_disable(struct vfio_pci_device *vdev)
230 {
231 	vfio_intx_set_signal(vdev, -1);
232 	vfio_virqfd_disable(&vdev->ctx[0].unmask);
233 	vfio_virqfd_disable(&vdev->ctx[0].mask);
234 	vdev->irq_type = VFIO_PCI_NUM_IRQS;
235 	vdev->num_ctx = 0;
236 	kfree(vdev->ctx);
237 }
238 
239 /*
240  * MSI/MSI-X
241  */
vfio_msihandler(int irq,void * arg)242 static irqreturn_t vfio_msihandler(int irq, void *arg)
243 {
244 	struct eventfd_ctx *trigger = arg;
245 
246 	eventfd_signal(trigger, 1);
247 	return IRQ_HANDLED;
248 }
249 
vfio_msi_enable(struct vfio_pci_device * vdev,int nvec,bool msix)250 static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix)
251 {
252 	struct pci_dev *pdev = vdev->pdev;
253 	int ret;
254 
255 	if (!is_irq_none(vdev))
256 		return -EINVAL;
257 
258 	vdev->ctx = kcalloc(nvec, sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
259 	if (!vdev->ctx)
260 		return -ENOMEM;
261 
262 	if (msix) {
263 		int i;
264 
265 		vdev->msix = kzalloc(nvec * sizeof(struct msix_entry),
266 				     GFP_KERNEL);
267 		if (!vdev->msix) {
268 			kfree(vdev->ctx);
269 			return -ENOMEM;
270 		}
271 
272 		for (i = 0; i < nvec; i++)
273 			vdev->msix[i].entry = i;
274 
275 		ret = pci_enable_msix_range(pdev, vdev->msix, 1, nvec);
276 		if (ret < nvec) {
277 			if (ret > 0)
278 				pci_disable_msix(pdev);
279 			kfree(vdev->msix);
280 			kfree(vdev->ctx);
281 			return ret;
282 		}
283 	} else {
284 		ret = pci_enable_msi_range(pdev, 1, nvec);
285 		if (ret < nvec) {
286 			if (ret > 0)
287 				pci_disable_msi(pdev);
288 			kfree(vdev->ctx);
289 			return ret;
290 		}
291 	}
292 
293 	vdev->num_ctx = nvec;
294 	vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX :
295 				VFIO_PCI_MSI_IRQ_INDEX;
296 
297 	if (!msix) {
298 		/*
299 		 * Compute the virtual hardware field for max msi vectors -
300 		 * it is the log base 2 of the number of vectors.
301 		 */
302 		vdev->msi_qmax = fls(nvec * 2 - 1) - 1;
303 	}
304 
305 	return 0;
306 }
307 
vfio_msi_set_vector_signal(struct vfio_pci_device * vdev,int vector,int fd,bool msix)308 static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
309 				      int vector, int fd, bool msix)
310 {
311 	struct pci_dev *pdev = vdev->pdev;
312 	int irq = msix ? vdev->msix[vector].vector : pdev->irq + vector;
313 	char *name = msix ? "vfio-msix" : "vfio-msi";
314 	struct eventfd_ctx *trigger;
315 	int ret;
316 
317 	if (vector >= vdev->num_ctx)
318 		return -EINVAL;
319 
320 	if (vdev->ctx[vector].trigger) {
321 		irq_bypass_unregister_producer(&vdev->ctx[vector].producer);
322 		free_irq(irq, vdev->ctx[vector].trigger);
323 		kfree(vdev->ctx[vector].name);
324 		eventfd_ctx_put(vdev->ctx[vector].trigger);
325 		vdev->ctx[vector].trigger = NULL;
326 	}
327 
328 	if (fd < 0)
329 		return 0;
330 
331 	vdev->ctx[vector].name = kasprintf(GFP_KERNEL, "%s[%d](%s)",
332 					   name, vector, pci_name(pdev));
333 	if (!vdev->ctx[vector].name)
334 		return -ENOMEM;
335 
336 	trigger = eventfd_ctx_fdget(fd);
337 	if (IS_ERR(trigger)) {
338 		kfree(vdev->ctx[vector].name);
339 		return PTR_ERR(trigger);
340 	}
341 
342 	/*
343 	 * The MSIx vector table resides in device memory which may be cleared
344 	 * via backdoor resets. We don't allow direct access to the vector
345 	 * table so even if a userspace driver attempts to save/restore around
346 	 * such a reset it would be unsuccessful. To avoid this, restore the
347 	 * cached value of the message prior to enabling.
348 	 */
349 	if (msix) {
350 		struct msi_msg msg;
351 
352 		get_cached_msi_msg(irq, &msg);
353 		pci_write_msi_msg(irq, &msg);
354 	}
355 
356 	ret = request_irq(irq, vfio_msihandler, 0,
357 			  vdev->ctx[vector].name, trigger);
358 	if (ret) {
359 		kfree(vdev->ctx[vector].name);
360 		eventfd_ctx_put(trigger);
361 		return ret;
362 	}
363 
364 	vdev->ctx[vector].producer.token = trigger;
365 	vdev->ctx[vector].producer.irq = irq;
366 	ret = irq_bypass_register_producer(&vdev->ctx[vector].producer);
367 	if (unlikely(ret)) {
368 		dev_info(&pdev->dev,
369 		"irq bypass producer (token %p) registration fails: %d\n",
370 		vdev->ctx[vector].producer.token, ret);
371 
372 		vdev->ctx[vector].producer.token = NULL;
373 	}
374 	vdev->ctx[vector].trigger = trigger;
375 
376 	return 0;
377 }
378 
vfio_msi_set_block(struct vfio_pci_device * vdev,unsigned start,unsigned count,int32_t * fds,bool msix)379 static int vfio_msi_set_block(struct vfio_pci_device *vdev, unsigned start,
380 			      unsigned count, int32_t *fds, bool msix)
381 {
382 	int i, j, ret = 0;
383 
384 	if (start + count > vdev->num_ctx)
385 		return -EINVAL;
386 
387 	for (i = 0, j = start; i < count && !ret; i++, j++) {
388 		int fd = fds ? fds[i] : -1;
389 		ret = vfio_msi_set_vector_signal(vdev, j, fd, msix);
390 	}
391 
392 	if (ret) {
393 		for (--j; j >= start; j--)
394 			vfio_msi_set_vector_signal(vdev, j, -1, msix);
395 	}
396 
397 	return ret;
398 }
399 
vfio_msi_disable(struct vfio_pci_device * vdev,bool msix)400 static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix)
401 {
402 	struct pci_dev *pdev = vdev->pdev;
403 	int i;
404 
405 	vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix);
406 
407 	for (i = 0; i < vdev->num_ctx; i++) {
408 		vfio_virqfd_disable(&vdev->ctx[i].unmask);
409 		vfio_virqfd_disable(&vdev->ctx[i].mask);
410 	}
411 
412 	if (msix) {
413 		pci_disable_msix(vdev->pdev);
414 		kfree(vdev->msix);
415 	} else
416 		pci_disable_msi(pdev);
417 
418 	vdev->irq_type = VFIO_PCI_NUM_IRQS;
419 	vdev->num_ctx = 0;
420 	kfree(vdev->ctx);
421 }
422 
423 /*
424  * IOCTL support
425  */
vfio_pci_set_intx_unmask(struct vfio_pci_device * vdev,unsigned index,unsigned start,unsigned count,uint32_t flags,void * data)426 static int vfio_pci_set_intx_unmask(struct vfio_pci_device *vdev,
427 				    unsigned index, unsigned start,
428 				    unsigned count, uint32_t flags, void *data)
429 {
430 	if (!is_intx(vdev) || start != 0 || count != 1)
431 		return -EINVAL;
432 
433 	if (flags & VFIO_IRQ_SET_DATA_NONE) {
434 		vfio_pci_intx_unmask(vdev);
435 	} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
436 		uint8_t unmask = *(uint8_t *)data;
437 		if (unmask)
438 			vfio_pci_intx_unmask(vdev);
439 	} else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
440 		int32_t fd = *(int32_t *)data;
441 		if (fd >= 0)
442 			return vfio_virqfd_enable((void *) vdev,
443 						  vfio_pci_intx_unmask_handler,
444 						  vfio_send_intx_eventfd, NULL,
445 						  &vdev->ctx[0].unmask, fd);
446 
447 		vfio_virqfd_disable(&vdev->ctx[0].unmask);
448 	}
449 
450 	return 0;
451 }
452 
vfio_pci_set_intx_mask(struct vfio_pci_device * vdev,unsigned index,unsigned start,unsigned count,uint32_t flags,void * data)453 static int vfio_pci_set_intx_mask(struct vfio_pci_device *vdev,
454 				  unsigned index, unsigned start,
455 				  unsigned count, uint32_t flags, void *data)
456 {
457 	if (!is_intx(vdev) || start != 0 || count != 1)
458 		return -EINVAL;
459 
460 	if (flags & VFIO_IRQ_SET_DATA_NONE) {
461 		vfio_pci_intx_mask(vdev);
462 	} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
463 		uint8_t mask = *(uint8_t *)data;
464 		if (mask)
465 			vfio_pci_intx_mask(vdev);
466 	} else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
467 		return -ENOTTY; /* XXX implement me */
468 	}
469 
470 	return 0;
471 }
472 
vfio_pci_set_intx_trigger(struct vfio_pci_device * vdev,unsigned index,unsigned start,unsigned count,uint32_t flags,void * data)473 static int vfio_pci_set_intx_trigger(struct vfio_pci_device *vdev,
474 				     unsigned index, unsigned start,
475 				     unsigned count, uint32_t flags, void *data)
476 {
477 	if (is_intx(vdev) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) {
478 		vfio_intx_disable(vdev);
479 		return 0;
480 	}
481 
482 	if (!(is_intx(vdev) || is_irq_none(vdev)) || start != 0 || count != 1)
483 		return -EINVAL;
484 
485 	if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
486 		int32_t fd = *(int32_t *)data;
487 		int ret;
488 
489 		if (is_intx(vdev))
490 			return vfio_intx_set_signal(vdev, fd);
491 
492 		ret = vfio_intx_enable(vdev);
493 		if (ret)
494 			return ret;
495 
496 		ret = vfio_intx_set_signal(vdev, fd);
497 		if (ret)
498 			vfio_intx_disable(vdev);
499 
500 		return ret;
501 	}
502 
503 	if (!is_intx(vdev))
504 		return -EINVAL;
505 
506 	if (flags & VFIO_IRQ_SET_DATA_NONE) {
507 		vfio_send_intx_eventfd(vdev, NULL);
508 	} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
509 		uint8_t trigger = *(uint8_t *)data;
510 		if (trigger)
511 			vfio_send_intx_eventfd(vdev, NULL);
512 	}
513 	return 0;
514 }
515 
vfio_pci_set_msi_trigger(struct vfio_pci_device * vdev,unsigned index,unsigned start,unsigned count,uint32_t flags,void * data)516 static int vfio_pci_set_msi_trigger(struct vfio_pci_device *vdev,
517 				    unsigned index, unsigned start,
518 				    unsigned count, uint32_t flags, void *data)
519 {
520 	int i;
521 	bool msix = (index == VFIO_PCI_MSIX_IRQ_INDEX) ? true : false;
522 
523 	if (irq_is(vdev, index) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) {
524 		vfio_msi_disable(vdev, msix);
525 		return 0;
526 	}
527 
528 	if (!(irq_is(vdev, index) || is_irq_none(vdev)))
529 		return -EINVAL;
530 
531 	if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
532 		int32_t *fds = data;
533 		int ret;
534 
535 		if (vdev->irq_type == index)
536 			return vfio_msi_set_block(vdev, start, count,
537 						  fds, msix);
538 
539 		ret = vfio_msi_enable(vdev, start + count, msix);
540 		if (ret)
541 			return ret;
542 
543 		ret = vfio_msi_set_block(vdev, start, count, fds, msix);
544 		if (ret)
545 			vfio_msi_disable(vdev, msix);
546 
547 		return ret;
548 	}
549 
550 	if (!irq_is(vdev, index) || start + count > vdev->num_ctx)
551 		return -EINVAL;
552 
553 	for (i = start; i < start + count; i++) {
554 		if (!vdev->ctx[i].trigger)
555 			continue;
556 		if (flags & VFIO_IRQ_SET_DATA_NONE) {
557 			eventfd_signal(vdev->ctx[i].trigger, 1);
558 		} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
559 			uint8_t *bools = data;
560 			if (bools[i - start])
561 				eventfd_signal(vdev->ctx[i].trigger, 1);
562 		}
563 	}
564 	return 0;
565 }
566 
vfio_pci_set_ctx_trigger_single(struct eventfd_ctx ** ctx,unsigned int count,uint32_t flags,void * data)567 static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx **ctx,
568 					   unsigned int count, uint32_t flags,
569 					   void *data)
570 {
571 	/* DATA_NONE/DATA_BOOL enables loopback testing */
572 	if (flags & VFIO_IRQ_SET_DATA_NONE) {
573 		if (*ctx) {
574 			if (count) {
575 				eventfd_signal(*ctx, 1);
576 			} else {
577 				eventfd_ctx_put(*ctx);
578 				*ctx = NULL;
579 			}
580 			return 0;
581 		}
582 	} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
583 		uint8_t trigger;
584 
585 		if (!count)
586 			return -EINVAL;
587 
588 		trigger = *(uint8_t *)data;
589 		if (trigger && *ctx)
590 			eventfd_signal(*ctx, 1);
591 
592 		return 0;
593 	} else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
594 		int32_t fd;
595 
596 		if (!count)
597 			return -EINVAL;
598 
599 		fd = *(int32_t *)data;
600 		if (fd == -1) {
601 			if (*ctx)
602 				eventfd_ctx_put(*ctx);
603 			*ctx = NULL;
604 		} else if (fd >= 0) {
605 			struct eventfd_ctx *efdctx;
606 
607 			efdctx = eventfd_ctx_fdget(fd);
608 			if (IS_ERR(efdctx))
609 				return PTR_ERR(efdctx);
610 
611 			if (*ctx)
612 				eventfd_ctx_put(*ctx);
613 
614 			*ctx = efdctx;
615 		}
616 		return 0;
617 	}
618 
619 	return -EINVAL;
620 }
621 
vfio_pci_set_err_trigger(struct vfio_pci_device * vdev,unsigned index,unsigned start,unsigned count,uint32_t flags,void * data)622 static int vfio_pci_set_err_trigger(struct vfio_pci_device *vdev,
623 				    unsigned index, unsigned start,
624 				    unsigned count, uint32_t flags, void *data)
625 {
626 	if (index != VFIO_PCI_ERR_IRQ_INDEX || start != 0 || count > 1)
627 		return -EINVAL;
628 
629 	return vfio_pci_set_ctx_trigger_single(&vdev->err_trigger,
630 					       count, flags, data);
631 }
632 
vfio_pci_set_req_trigger(struct vfio_pci_device * vdev,unsigned index,unsigned start,unsigned count,uint32_t flags,void * data)633 static int vfio_pci_set_req_trigger(struct vfio_pci_device *vdev,
634 				    unsigned index, unsigned start,
635 				    unsigned count, uint32_t flags, void *data)
636 {
637 	if (index != VFIO_PCI_REQ_IRQ_INDEX || start != 0 || count > 1)
638 		return -EINVAL;
639 
640 	return vfio_pci_set_ctx_trigger_single(&vdev->req_trigger,
641 					       count, flags, data);
642 }
643 
vfio_pci_set_irqs_ioctl(struct vfio_pci_device * vdev,uint32_t flags,unsigned index,unsigned start,unsigned count,void * data)644 int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags,
645 			    unsigned index, unsigned start, unsigned count,
646 			    void *data)
647 {
648 	int (*func)(struct vfio_pci_device *vdev, unsigned index,
649 		    unsigned start, unsigned count, uint32_t flags,
650 		    void *data) = NULL;
651 
652 	switch (index) {
653 	case VFIO_PCI_INTX_IRQ_INDEX:
654 		switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
655 		case VFIO_IRQ_SET_ACTION_MASK:
656 			func = vfio_pci_set_intx_mask;
657 			break;
658 		case VFIO_IRQ_SET_ACTION_UNMASK:
659 			func = vfio_pci_set_intx_unmask;
660 			break;
661 		case VFIO_IRQ_SET_ACTION_TRIGGER:
662 			func = vfio_pci_set_intx_trigger;
663 			break;
664 		}
665 		break;
666 	case VFIO_PCI_MSI_IRQ_INDEX:
667 	case VFIO_PCI_MSIX_IRQ_INDEX:
668 		switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
669 		case VFIO_IRQ_SET_ACTION_MASK:
670 		case VFIO_IRQ_SET_ACTION_UNMASK:
671 			/* XXX Need masking support exported */
672 			break;
673 		case VFIO_IRQ_SET_ACTION_TRIGGER:
674 			func = vfio_pci_set_msi_trigger;
675 			break;
676 		}
677 		break;
678 	case VFIO_PCI_ERR_IRQ_INDEX:
679 		switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
680 		case VFIO_IRQ_SET_ACTION_TRIGGER:
681 			if (pci_is_pcie(vdev->pdev))
682 				func = vfio_pci_set_err_trigger;
683 			break;
684 		}
685 		break;
686 	case VFIO_PCI_REQ_IRQ_INDEX:
687 		switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
688 		case VFIO_IRQ_SET_ACTION_TRIGGER:
689 			func = vfio_pci_set_req_trigger;
690 			break;
691 		}
692 		break;
693 	}
694 
695 	if (!func)
696 		return -ENOTTY;
697 
698 	return func(vdev, index, start, count, flags, data);
699 }
700