• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * VFIO PCI interrupt handling
4  *
5  * Copyright (C) 2012 Red Hat, Inc.  All rights reserved.
6  *     Author: Alex Williamson <alex.williamson@redhat.com>
7  *
8  * Derived from original vfio:
9  * Copyright 2010 Cisco Systems, Inc.  All rights reserved.
10  * Author: Tom Lyon, pugs@cisco.com
11  */
12 
13 #include <linux/device.h>
14 #include <linux/interrupt.h>
15 #include <linux/eventfd.h>
16 #include <linux/msi.h>
17 #include <linux/pci.h>
18 #include <linux/file.h>
19 #include <linux/vfio.h>
20 #include <linux/wait.h>
21 #include <linux/slab.h>
22 
23 #include "vfio_pci_priv.h"
24 
25 struct vfio_pci_irq_ctx {
26 	struct eventfd_ctx	*trigger;
27 	struct virqfd		*unmask;
28 	struct virqfd		*mask;
29 	char			*name;
30 	bool			masked;
31 	struct irq_bypass_producer	producer;
32 };
33 
irq_is(struct vfio_pci_core_device * vdev,int type)34 static bool irq_is(struct vfio_pci_core_device *vdev, int type)
35 {
36 	return vdev->irq_type == type;
37 }
38 
is_intx(struct vfio_pci_core_device * vdev)39 static bool is_intx(struct vfio_pci_core_device *vdev)
40 {
41 	return vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX;
42 }
43 
is_irq_none(struct vfio_pci_core_device * vdev)44 static bool is_irq_none(struct vfio_pci_core_device *vdev)
45 {
46 	return !(vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX ||
47 		 vdev->irq_type == VFIO_PCI_MSI_IRQ_INDEX ||
48 		 vdev->irq_type == VFIO_PCI_MSIX_IRQ_INDEX);
49 }
50 
51 static
vfio_irq_ctx_get(struct vfio_pci_core_device * vdev,unsigned long index)52 struct vfio_pci_irq_ctx *vfio_irq_ctx_get(struct vfio_pci_core_device *vdev,
53 					  unsigned long index)
54 {
55 	return xa_load(&vdev->ctx, index);
56 }
57 
vfio_irq_ctx_free(struct vfio_pci_core_device * vdev,struct vfio_pci_irq_ctx * ctx,unsigned long index)58 static void vfio_irq_ctx_free(struct vfio_pci_core_device *vdev,
59 			      struct vfio_pci_irq_ctx *ctx, unsigned long index)
60 {
61 	xa_erase(&vdev->ctx, index);
62 	kfree(ctx);
63 }
64 
65 static struct vfio_pci_irq_ctx *
vfio_irq_ctx_alloc(struct vfio_pci_core_device * vdev,unsigned long index)66 vfio_irq_ctx_alloc(struct vfio_pci_core_device *vdev, unsigned long index)
67 {
68 	struct vfio_pci_irq_ctx *ctx;
69 	int ret;
70 
71 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL_ACCOUNT);
72 	if (!ctx)
73 		return NULL;
74 
75 	ret = xa_insert(&vdev->ctx, index, ctx, GFP_KERNEL_ACCOUNT);
76 	if (ret) {
77 		kfree(ctx);
78 		return NULL;
79 	}
80 
81 	return ctx;
82 }
83 
84 /*
85  * INTx
86  */
vfio_send_intx_eventfd(void * opaque,void * unused)87 static void vfio_send_intx_eventfd(void *opaque, void *unused)
88 {
89 	struct vfio_pci_core_device *vdev = opaque;
90 
91 	if (likely(is_intx(vdev) && !vdev->virq_disabled)) {
92 		struct vfio_pci_irq_ctx *ctx;
93 		struct eventfd_ctx *trigger;
94 
95 		ctx = vfio_irq_ctx_get(vdev, 0);
96 		if (WARN_ON_ONCE(!ctx))
97 			return;
98 
99 		trigger = READ_ONCE(ctx->trigger);
100 		if (likely(trigger))
101 			eventfd_signal(trigger, 1);
102 	}
103 }
104 
105 /* Returns true if the INTx vfio_pci_irq_ctx.masked value is changed. */
__vfio_pci_intx_mask(struct vfio_pci_core_device * vdev)106 static bool __vfio_pci_intx_mask(struct vfio_pci_core_device *vdev)
107 {
108 	struct pci_dev *pdev = vdev->pdev;
109 	struct vfio_pci_irq_ctx *ctx;
110 	unsigned long flags;
111 	bool masked_changed = false;
112 
113 	lockdep_assert_held(&vdev->igate);
114 
115 	spin_lock_irqsave(&vdev->irqlock, flags);
116 
117 	/*
118 	 * Masking can come from interrupt, ioctl, or config space
119 	 * via INTx disable.  The latter means this can get called
120 	 * even when not using intx delivery.  In this case, just
121 	 * try to have the physical bit follow the virtual bit.
122 	 */
123 	if (unlikely(!is_intx(vdev))) {
124 		if (vdev->pci_2_3)
125 			pci_intx(pdev, 0);
126 		goto out_unlock;
127 	}
128 
129 	ctx = vfio_irq_ctx_get(vdev, 0);
130 	if (WARN_ON_ONCE(!ctx))
131 		goto out_unlock;
132 
133 	if (!ctx->masked) {
134 		/*
135 		 * Can't use check_and_mask here because we always want to
136 		 * mask, not just when something is pending.
137 		 */
138 		if (vdev->pci_2_3)
139 			pci_intx(pdev, 0);
140 		else
141 			disable_irq_nosync(pdev->irq);
142 
143 		ctx->masked = true;
144 		masked_changed = true;
145 	}
146 
147 out_unlock:
148 	spin_unlock_irqrestore(&vdev->irqlock, flags);
149 	return masked_changed;
150 }
151 
vfio_pci_intx_mask(struct vfio_pci_core_device * vdev)152 bool vfio_pci_intx_mask(struct vfio_pci_core_device *vdev)
153 {
154 	bool mask_changed;
155 
156 	mutex_lock(&vdev->igate);
157 	mask_changed = __vfio_pci_intx_mask(vdev);
158 	mutex_unlock(&vdev->igate);
159 
160 	return mask_changed;
161 }
162 
163 /*
164  * If this is triggered by an eventfd, we can't call eventfd_signal
165  * or else we'll deadlock on the eventfd wait queue.  Return >0 when
166  * a signal is necessary, which can then be handled via a work queue
167  * or directly depending on the caller.
168  */
vfio_pci_intx_unmask_handler(void * opaque,void * unused)169 static int vfio_pci_intx_unmask_handler(void *opaque, void *unused)
170 {
171 	struct vfio_pci_core_device *vdev = opaque;
172 	struct pci_dev *pdev = vdev->pdev;
173 	struct vfio_pci_irq_ctx *ctx;
174 	unsigned long flags;
175 	int ret = 0;
176 
177 	spin_lock_irqsave(&vdev->irqlock, flags);
178 
179 	/*
180 	 * Unmasking comes from ioctl or config, so again, have the
181 	 * physical bit follow the virtual even when not using INTx.
182 	 */
183 	if (unlikely(!is_intx(vdev))) {
184 		if (vdev->pci_2_3)
185 			pci_intx(pdev, 1);
186 		goto out_unlock;
187 	}
188 
189 	ctx = vfio_irq_ctx_get(vdev, 0);
190 	if (WARN_ON_ONCE(!ctx))
191 		goto out_unlock;
192 
193 	if (ctx->masked && !vdev->virq_disabled) {
194 		/*
195 		 * A pending interrupt here would immediately trigger,
196 		 * but we can avoid that overhead by just re-sending
197 		 * the interrupt to the user.
198 		 */
199 		if (vdev->pci_2_3) {
200 			if (!pci_check_and_unmask_intx(pdev))
201 				ret = 1;
202 		} else
203 			enable_irq(pdev->irq);
204 
205 		ctx->masked = (ret > 0);
206 	}
207 
208 out_unlock:
209 	spin_unlock_irqrestore(&vdev->irqlock, flags);
210 
211 	return ret;
212 }
213 
__vfio_pci_intx_unmask(struct vfio_pci_core_device * vdev)214 static void __vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev)
215 {
216 	lockdep_assert_held(&vdev->igate);
217 
218 	if (vfio_pci_intx_unmask_handler(vdev, NULL) > 0)
219 		vfio_send_intx_eventfd(vdev, NULL);
220 }
221 
vfio_pci_intx_unmask(struct vfio_pci_core_device * vdev)222 void vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev)
223 {
224 	mutex_lock(&vdev->igate);
225 	__vfio_pci_intx_unmask(vdev);
226 	mutex_unlock(&vdev->igate);
227 }
228 
vfio_intx_handler(int irq,void * dev_id)229 static irqreturn_t vfio_intx_handler(int irq, void *dev_id)
230 {
231 	struct vfio_pci_core_device *vdev = dev_id;
232 	struct vfio_pci_irq_ctx *ctx;
233 	unsigned long flags;
234 	int ret = IRQ_NONE;
235 
236 	ctx = vfio_irq_ctx_get(vdev, 0);
237 	if (WARN_ON_ONCE(!ctx))
238 		return ret;
239 
240 	spin_lock_irqsave(&vdev->irqlock, flags);
241 
242 	if (!vdev->pci_2_3) {
243 		disable_irq_nosync(vdev->pdev->irq);
244 		ctx->masked = true;
245 		ret = IRQ_HANDLED;
246 	} else if (!ctx->masked &&  /* may be shared */
247 		   pci_check_and_mask_intx(vdev->pdev)) {
248 		ctx->masked = true;
249 		ret = IRQ_HANDLED;
250 	}
251 
252 	spin_unlock_irqrestore(&vdev->irqlock, flags);
253 
254 	if (ret == IRQ_HANDLED)
255 		vfio_send_intx_eventfd(vdev, NULL);
256 
257 	return ret;
258 }
259 
vfio_intx_enable(struct vfio_pci_core_device * vdev,struct eventfd_ctx * trigger)260 static int vfio_intx_enable(struct vfio_pci_core_device *vdev,
261 			    struct eventfd_ctx *trigger)
262 {
263 	struct pci_dev *pdev = vdev->pdev;
264 	struct vfio_pci_irq_ctx *ctx;
265 	unsigned long irqflags;
266 	char *name;
267 	int ret;
268 
269 	if (!is_irq_none(vdev))
270 		return -EINVAL;
271 
272 	if (!pdev->irq)
273 		return -ENODEV;
274 
275 	name = kasprintf(GFP_KERNEL_ACCOUNT, "vfio-intx(%s)", pci_name(pdev));
276 	if (!name)
277 		return -ENOMEM;
278 
279 	ctx = vfio_irq_ctx_alloc(vdev, 0);
280 	if (!ctx) {
281 		kfree(name);
282 		return -ENOMEM;
283 	}
284 
285 	ctx->name = name;
286 	ctx->trigger = trigger;
287 
288 	/*
289 	 * Fill the initial masked state based on virq_disabled.  After
290 	 * enable, changing the DisINTx bit in vconfig directly changes INTx
291 	 * masking.  igate prevents races during setup, once running masked
292 	 * is protected via irqlock.
293 	 *
294 	 * Devices supporting DisINTx also reflect the current mask state in
295 	 * the physical DisINTx bit, which is not affected during IRQ setup.
296 	 *
297 	 * Devices without DisINTx support require an exclusive interrupt.
298 	 * IRQ masking is performed at the IRQ chip.  Again, igate protects
299 	 * against races during setup and IRQ handlers and irqfds are not
300 	 * yet active, therefore masked is stable and can be used to
301 	 * conditionally auto-enable the IRQ.
302 	 *
303 	 * irq_type must be stable while the IRQ handler is registered,
304 	 * therefore it must be set before request_irq().
305 	 */
306 	ctx->masked = vdev->virq_disabled;
307 	if (vdev->pci_2_3) {
308 		pci_intx(pdev, !ctx->masked);
309 		irqflags = IRQF_SHARED;
310 	} else {
311 		irqflags = ctx->masked ? IRQF_NO_AUTOEN : 0;
312 	}
313 
314 	vdev->irq_type = VFIO_PCI_INTX_IRQ_INDEX;
315 
316 	ret = request_irq(pdev->irq, vfio_intx_handler,
317 			  irqflags, ctx->name, vdev);
318 	if (ret) {
319 		vdev->irq_type = VFIO_PCI_NUM_IRQS;
320 		kfree(name);
321 		vfio_irq_ctx_free(vdev, ctx, 0);
322 		return ret;
323 	}
324 
325 	return 0;
326 }
327 
vfio_intx_set_signal(struct vfio_pci_core_device * vdev,struct eventfd_ctx * trigger)328 static int vfio_intx_set_signal(struct vfio_pci_core_device *vdev,
329 				struct eventfd_ctx *trigger)
330 {
331 	struct pci_dev *pdev = vdev->pdev;
332 	struct vfio_pci_irq_ctx *ctx;
333 	struct eventfd_ctx *old;
334 
335 	ctx = vfio_irq_ctx_get(vdev, 0);
336 	if (WARN_ON_ONCE(!ctx))
337 		return -EINVAL;
338 
339 	old = ctx->trigger;
340 
341 	WRITE_ONCE(ctx->trigger, trigger);
342 
343 	/* Releasing an old ctx requires synchronizing in-flight users */
344 	if (old) {
345 		synchronize_irq(pdev->irq);
346 		vfio_virqfd_flush_thread(&ctx->unmask);
347 		eventfd_ctx_put(old);
348 	}
349 
350 	return 0;
351 }
352 
vfio_intx_disable(struct vfio_pci_core_device * vdev)353 static void vfio_intx_disable(struct vfio_pci_core_device *vdev)
354 {
355 	struct pci_dev *pdev = vdev->pdev;
356 	struct vfio_pci_irq_ctx *ctx;
357 
358 	ctx = vfio_irq_ctx_get(vdev, 0);
359 	WARN_ON_ONCE(!ctx);
360 	if (ctx) {
361 		vfio_virqfd_disable(&ctx->unmask);
362 		vfio_virqfd_disable(&ctx->mask);
363 		free_irq(pdev->irq, vdev);
364 		if (ctx->trigger)
365 			eventfd_ctx_put(ctx->trigger);
366 		kfree(ctx->name);
367 		vfio_irq_ctx_free(vdev, ctx, 0);
368 	}
369 	vdev->irq_type = VFIO_PCI_NUM_IRQS;
370 }
371 
372 /*
373  * MSI/MSI-X
374  */
vfio_msihandler(int irq,void * arg)375 static irqreturn_t vfio_msihandler(int irq, void *arg)
376 {
377 	struct eventfd_ctx *trigger = arg;
378 
379 	eventfd_signal(trigger, 1);
380 	return IRQ_HANDLED;
381 }
382 
vfio_msi_enable(struct vfio_pci_core_device * vdev,int nvec,bool msix)383 static int vfio_msi_enable(struct vfio_pci_core_device *vdev, int nvec, bool msix)
384 {
385 	struct pci_dev *pdev = vdev->pdev;
386 	unsigned int flag = msix ? PCI_IRQ_MSIX : PCI_IRQ_MSI;
387 	int ret;
388 	u16 cmd;
389 
390 	if (!is_irq_none(vdev))
391 		return -EINVAL;
392 
393 	/* return the number of supported vectors if we can't get all: */
394 	cmd = vfio_pci_memory_lock_and_enable(vdev);
395 	ret = pci_alloc_irq_vectors(pdev, 1, nvec, flag);
396 	if (ret < nvec) {
397 		if (ret > 0)
398 			pci_free_irq_vectors(pdev);
399 		vfio_pci_memory_unlock_and_restore(vdev, cmd);
400 		return ret;
401 	}
402 	vfio_pci_memory_unlock_and_restore(vdev, cmd);
403 
404 	vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX :
405 				VFIO_PCI_MSI_IRQ_INDEX;
406 
407 	if (!msix) {
408 		/*
409 		 * Compute the virtual hardware field for max msi vectors -
410 		 * it is the log base 2 of the number of vectors.
411 		 */
412 		vdev->msi_qmax = fls(nvec * 2 - 1) - 1;
413 	}
414 
415 	return 0;
416 }
417 
418 /*
419  * vfio_msi_alloc_irq() returns the Linux IRQ number of an MSI or MSI-X device
420  * interrupt vector. If a Linux IRQ number is not available then a new
421  * interrupt is allocated if dynamic MSI-X is supported.
422  *
423  * Where is vfio_msi_free_irq()? Allocated interrupts are maintained,
424  * essentially forming a cache that subsequent allocations can draw from.
425  * Interrupts are freed using pci_free_irq_vectors() when MSI/MSI-X is
426  * disabled.
427  */
vfio_msi_alloc_irq(struct vfio_pci_core_device * vdev,unsigned int vector,bool msix)428 static int vfio_msi_alloc_irq(struct vfio_pci_core_device *vdev,
429 			      unsigned int vector, bool msix)
430 {
431 	struct pci_dev *pdev = vdev->pdev;
432 	struct msi_map map;
433 	int irq;
434 	u16 cmd;
435 
436 	irq = pci_irq_vector(pdev, vector);
437 	if (WARN_ON_ONCE(irq == 0))
438 		return -EINVAL;
439 	if (irq > 0 || !msix || !vdev->has_dyn_msix)
440 		return irq;
441 
442 	cmd = vfio_pci_memory_lock_and_enable(vdev);
443 	map = pci_msix_alloc_irq_at(pdev, vector, NULL);
444 	vfio_pci_memory_unlock_and_restore(vdev, cmd);
445 
446 	return map.index < 0 ? map.index : map.virq;
447 }
448 
vfio_msi_set_vector_signal(struct vfio_pci_core_device * vdev,unsigned int vector,int fd,bool msix)449 static int vfio_msi_set_vector_signal(struct vfio_pci_core_device *vdev,
450 				      unsigned int vector, int fd, bool msix)
451 {
452 	struct pci_dev *pdev = vdev->pdev;
453 	struct vfio_pci_irq_ctx *ctx;
454 	struct eventfd_ctx *trigger;
455 	int irq = -EINVAL, ret;
456 	u16 cmd;
457 
458 	ctx = vfio_irq_ctx_get(vdev, vector);
459 
460 	if (ctx) {
461 		irq_bypass_unregister_producer(&ctx->producer);
462 		irq = pci_irq_vector(pdev, vector);
463 		cmd = vfio_pci_memory_lock_and_enable(vdev);
464 		free_irq(irq, ctx->trigger);
465 		vfio_pci_memory_unlock_and_restore(vdev, cmd);
466 		/* Interrupt stays allocated, will be freed at MSI-X disable. */
467 		kfree(ctx->name);
468 		eventfd_ctx_put(ctx->trigger);
469 		vfio_irq_ctx_free(vdev, ctx, vector);
470 	}
471 
472 	if (fd < 0)
473 		return 0;
474 
475 	if (irq == -EINVAL) {
476 		/* Interrupt stays allocated, will be freed at MSI-X disable. */
477 		irq = vfio_msi_alloc_irq(vdev, vector, msix);
478 		if (irq < 0)
479 			return irq;
480 	}
481 
482 	ctx = vfio_irq_ctx_alloc(vdev, vector);
483 	if (!ctx)
484 		return -ENOMEM;
485 
486 	ctx->name = kasprintf(GFP_KERNEL_ACCOUNT, "vfio-msi%s[%d](%s)",
487 			      msix ? "x" : "", vector, pci_name(pdev));
488 	if (!ctx->name) {
489 		ret = -ENOMEM;
490 		goto out_free_ctx;
491 	}
492 
493 	trigger = eventfd_ctx_fdget(fd);
494 	if (IS_ERR(trigger)) {
495 		ret = PTR_ERR(trigger);
496 		goto out_free_name;
497 	}
498 
499 	/*
500 	 * If the vector was previously allocated, refresh the on-device
501 	 * message data before enabling in case it had been cleared or
502 	 * corrupted (e.g. due to backdoor resets) since writing.
503 	 */
504 	cmd = vfio_pci_memory_lock_and_enable(vdev);
505 	if (msix) {
506 		struct msi_msg msg;
507 
508 		get_cached_msi_msg(irq, &msg);
509 		pci_write_msi_msg(irq, &msg);
510 	}
511 
512 	ret = request_irq(irq, vfio_msihandler, 0, ctx->name, trigger);
513 	vfio_pci_memory_unlock_and_restore(vdev, cmd);
514 	if (ret)
515 		goto out_put_eventfd_ctx;
516 
517 	ctx->producer.token = trigger;
518 	ctx->producer.irq = irq;
519 	ret = irq_bypass_register_producer(&ctx->producer);
520 	if (unlikely(ret)) {
521 		dev_info(&pdev->dev,
522 		"irq bypass producer (token %p) registration fails: %d\n",
523 		ctx->producer.token, ret);
524 
525 		ctx->producer.token = NULL;
526 	}
527 	ctx->trigger = trigger;
528 
529 	return 0;
530 
531 out_put_eventfd_ctx:
532 	eventfd_ctx_put(trigger);
533 out_free_name:
534 	kfree(ctx->name);
535 out_free_ctx:
536 	vfio_irq_ctx_free(vdev, ctx, vector);
537 	return ret;
538 }
539 
vfio_msi_set_block(struct vfio_pci_core_device * vdev,unsigned start,unsigned count,int32_t * fds,bool msix)540 static int vfio_msi_set_block(struct vfio_pci_core_device *vdev, unsigned start,
541 			      unsigned count, int32_t *fds, bool msix)
542 {
543 	unsigned int i, j;
544 	int ret = 0;
545 
546 	for (i = 0, j = start; i < count && !ret; i++, j++) {
547 		int fd = fds ? fds[i] : -1;
548 		ret = vfio_msi_set_vector_signal(vdev, j, fd, msix);
549 	}
550 
551 	if (ret) {
552 		for (i = start; i < j; i++)
553 			vfio_msi_set_vector_signal(vdev, i, -1, msix);
554 	}
555 
556 	return ret;
557 }
558 
vfio_msi_disable(struct vfio_pci_core_device * vdev,bool msix)559 static void vfio_msi_disable(struct vfio_pci_core_device *vdev, bool msix)
560 {
561 	struct pci_dev *pdev = vdev->pdev;
562 	struct vfio_pci_irq_ctx *ctx;
563 	unsigned long i;
564 	u16 cmd;
565 
566 	xa_for_each(&vdev->ctx, i, ctx) {
567 		vfio_virqfd_disable(&ctx->unmask);
568 		vfio_virqfd_disable(&ctx->mask);
569 		vfio_msi_set_vector_signal(vdev, i, -1, msix);
570 	}
571 
572 	cmd = vfio_pci_memory_lock_and_enable(vdev);
573 	pci_free_irq_vectors(pdev);
574 	vfio_pci_memory_unlock_and_restore(vdev, cmd);
575 
576 	/*
577 	 * Both disable paths above use pci_intx_for_msi() to clear DisINTx
578 	 * via their shutdown paths.  Restore for NoINTx devices.
579 	 */
580 	if (vdev->nointx)
581 		pci_intx(pdev, 0);
582 
583 	vdev->irq_type = VFIO_PCI_NUM_IRQS;
584 }
585 
586 /*
587  * IOCTL support
588  */
vfio_pci_set_intx_unmask(struct vfio_pci_core_device * vdev,unsigned index,unsigned start,unsigned count,uint32_t flags,void * data)589 static int vfio_pci_set_intx_unmask(struct vfio_pci_core_device *vdev,
590 				    unsigned index, unsigned start,
591 				    unsigned count, uint32_t flags, void *data)
592 {
593 	if (!is_intx(vdev) || start != 0 || count != 1)
594 		return -EINVAL;
595 
596 	if (flags & VFIO_IRQ_SET_DATA_NONE) {
597 		__vfio_pci_intx_unmask(vdev);
598 	} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
599 		uint8_t unmask = *(uint8_t *)data;
600 		if (unmask)
601 			__vfio_pci_intx_unmask(vdev);
602 	} else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
603 		struct vfio_pci_irq_ctx *ctx = vfio_irq_ctx_get(vdev, 0);
604 		int32_t fd = *(int32_t *)data;
605 
606 		if (WARN_ON_ONCE(!ctx))
607 			return -EINVAL;
608 		if (fd >= 0)
609 			return vfio_virqfd_enable((void *) vdev,
610 						  vfio_pci_intx_unmask_handler,
611 						  vfio_send_intx_eventfd, NULL,
612 						  &ctx->unmask, fd);
613 
614 		vfio_virqfd_disable(&ctx->unmask);
615 	}
616 
617 	return 0;
618 }
619 
vfio_pci_set_intx_mask(struct vfio_pci_core_device * vdev,unsigned index,unsigned start,unsigned count,uint32_t flags,void * data)620 static int vfio_pci_set_intx_mask(struct vfio_pci_core_device *vdev,
621 				  unsigned index, unsigned start,
622 				  unsigned count, uint32_t flags, void *data)
623 {
624 	if (!is_intx(vdev) || start != 0 || count != 1)
625 		return -EINVAL;
626 
627 	if (flags & VFIO_IRQ_SET_DATA_NONE) {
628 		__vfio_pci_intx_mask(vdev);
629 	} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
630 		uint8_t mask = *(uint8_t *)data;
631 		if (mask)
632 			__vfio_pci_intx_mask(vdev);
633 	} else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
634 		return -ENOTTY; /* XXX implement me */
635 	}
636 
637 	return 0;
638 }
639 
vfio_pci_set_intx_trigger(struct vfio_pci_core_device * vdev,unsigned index,unsigned start,unsigned count,uint32_t flags,void * data)640 static int vfio_pci_set_intx_trigger(struct vfio_pci_core_device *vdev,
641 				     unsigned index, unsigned start,
642 				     unsigned count, uint32_t flags, void *data)
643 {
644 	if (is_intx(vdev) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) {
645 		vfio_intx_disable(vdev);
646 		return 0;
647 	}
648 
649 	if (!(is_intx(vdev) || is_irq_none(vdev)) || start != 0 || count != 1)
650 		return -EINVAL;
651 
652 	if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
653 		struct eventfd_ctx *trigger = NULL;
654 		int32_t fd = *(int32_t *)data;
655 		int ret;
656 
657 		if (fd >= 0) {
658 			trigger = eventfd_ctx_fdget(fd);
659 			if (IS_ERR(trigger))
660 				return PTR_ERR(trigger);
661 		}
662 
663 		if (is_intx(vdev))
664 			ret = vfio_intx_set_signal(vdev, trigger);
665 		else
666 			ret = vfio_intx_enable(vdev, trigger);
667 
668 		if (ret && trigger)
669 			eventfd_ctx_put(trigger);
670 
671 		return ret;
672 	}
673 
674 	if (!is_intx(vdev))
675 		return -EINVAL;
676 
677 	if (flags & VFIO_IRQ_SET_DATA_NONE) {
678 		vfio_send_intx_eventfd(vdev, NULL);
679 	} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
680 		uint8_t trigger = *(uint8_t *)data;
681 		if (trigger)
682 			vfio_send_intx_eventfd(vdev, NULL);
683 	}
684 	return 0;
685 }
686 
vfio_pci_set_msi_trigger(struct vfio_pci_core_device * vdev,unsigned index,unsigned start,unsigned count,uint32_t flags,void * data)687 static int vfio_pci_set_msi_trigger(struct vfio_pci_core_device *vdev,
688 				    unsigned index, unsigned start,
689 				    unsigned count, uint32_t flags, void *data)
690 {
691 	struct vfio_pci_irq_ctx *ctx;
692 	unsigned int i;
693 	bool msix = (index == VFIO_PCI_MSIX_IRQ_INDEX) ? true : false;
694 
695 	if (irq_is(vdev, index) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) {
696 		vfio_msi_disable(vdev, msix);
697 		return 0;
698 	}
699 
700 	if (!(irq_is(vdev, index) || is_irq_none(vdev)))
701 		return -EINVAL;
702 
703 	if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
704 		int32_t *fds = data;
705 		int ret;
706 
707 		if (vdev->irq_type == index)
708 			return vfio_msi_set_block(vdev, start, count,
709 						  fds, msix);
710 
711 		ret = vfio_msi_enable(vdev, start + count, msix);
712 		if (ret)
713 			return ret;
714 
715 		ret = vfio_msi_set_block(vdev, start, count, fds, msix);
716 		if (ret)
717 			vfio_msi_disable(vdev, msix);
718 
719 		return ret;
720 	}
721 
722 	if (!irq_is(vdev, index))
723 		return -EINVAL;
724 
725 	for (i = start; i < start + count; i++) {
726 		ctx = vfio_irq_ctx_get(vdev, i);
727 		if (!ctx)
728 			continue;
729 		if (flags & VFIO_IRQ_SET_DATA_NONE) {
730 			eventfd_signal(ctx->trigger, 1);
731 		} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
732 			uint8_t *bools = data;
733 			if (bools[i - start])
734 				eventfd_signal(ctx->trigger, 1);
735 		}
736 	}
737 	return 0;
738 }
739 
vfio_pci_set_ctx_trigger_single(struct eventfd_ctx ** ctx,unsigned int count,uint32_t flags,void * data)740 static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx **ctx,
741 					   unsigned int count, uint32_t flags,
742 					   void *data)
743 {
744 	/* DATA_NONE/DATA_BOOL enables loopback testing */
745 	if (flags & VFIO_IRQ_SET_DATA_NONE) {
746 		if (*ctx) {
747 			if (count) {
748 				eventfd_signal(*ctx, 1);
749 			} else {
750 				eventfd_ctx_put(*ctx);
751 				*ctx = NULL;
752 			}
753 			return 0;
754 		}
755 	} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
756 		uint8_t trigger;
757 
758 		if (!count)
759 			return -EINVAL;
760 
761 		trigger = *(uint8_t *)data;
762 		if (trigger && *ctx)
763 			eventfd_signal(*ctx, 1);
764 
765 		return 0;
766 	} else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
767 		int32_t fd;
768 
769 		if (!count)
770 			return -EINVAL;
771 
772 		fd = *(int32_t *)data;
773 		if (fd == -1) {
774 			if (*ctx)
775 				eventfd_ctx_put(*ctx);
776 			*ctx = NULL;
777 		} else if (fd >= 0) {
778 			struct eventfd_ctx *efdctx;
779 
780 			efdctx = eventfd_ctx_fdget(fd);
781 			if (IS_ERR(efdctx))
782 				return PTR_ERR(efdctx);
783 
784 			if (*ctx)
785 				eventfd_ctx_put(*ctx);
786 
787 			*ctx = efdctx;
788 		}
789 		return 0;
790 	}
791 
792 	return -EINVAL;
793 }
794 
vfio_pci_set_err_trigger(struct vfio_pci_core_device * vdev,unsigned index,unsigned start,unsigned count,uint32_t flags,void * data)795 static int vfio_pci_set_err_trigger(struct vfio_pci_core_device *vdev,
796 				    unsigned index, unsigned start,
797 				    unsigned count, uint32_t flags, void *data)
798 {
799 	if (index != VFIO_PCI_ERR_IRQ_INDEX || start != 0 || count > 1)
800 		return -EINVAL;
801 
802 	return vfio_pci_set_ctx_trigger_single(&vdev->err_trigger,
803 					       count, flags, data);
804 }
805 
vfio_pci_set_req_trigger(struct vfio_pci_core_device * vdev,unsigned index,unsigned start,unsigned count,uint32_t flags,void * data)806 static int vfio_pci_set_req_trigger(struct vfio_pci_core_device *vdev,
807 				    unsigned index, unsigned start,
808 				    unsigned count, uint32_t flags, void *data)
809 {
810 	if (index != VFIO_PCI_REQ_IRQ_INDEX || start != 0 || count > 1)
811 		return -EINVAL;
812 
813 	return vfio_pci_set_ctx_trigger_single(&vdev->req_trigger,
814 					       count, flags, data);
815 }
816 
vfio_pci_set_irqs_ioctl(struct vfio_pci_core_device * vdev,uint32_t flags,unsigned index,unsigned start,unsigned count,void * data)817 int vfio_pci_set_irqs_ioctl(struct vfio_pci_core_device *vdev, uint32_t flags,
818 			    unsigned index, unsigned start, unsigned count,
819 			    void *data)
820 {
821 	int (*func)(struct vfio_pci_core_device *vdev, unsigned index,
822 		    unsigned start, unsigned count, uint32_t flags,
823 		    void *data) = NULL;
824 
825 	switch (index) {
826 	case VFIO_PCI_INTX_IRQ_INDEX:
827 		switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
828 		case VFIO_IRQ_SET_ACTION_MASK:
829 			func = vfio_pci_set_intx_mask;
830 			break;
831 		case VFIO_IRQ_SET_ACTION_UNMASK:
832 			func = vfio_pci_set_intx_unmask;
833 			break;
834 		case VFIO_IRQ_SET_ACTION_TRIGGER:
835 			func = vfio_pci_set_intx_trigger;
836 			break;
837 		}
838 		break;
839 	case VFIO_PCI_MSI_IRQ_INDEX:
840 	case VFIO_PCI_MSIX_IRQ_INDEX:
841 		switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
842 		case VFIO_IRQ_SET_ACTION_MASK:
843 		case VFIO_IRQ_SET_ACTION_UNMASK:
844 			/* XXX Need masking support exported */
845 			break;
846 		case VFIO_IRQ_SET_ACTION_TRIGGER:
847 			func = vfio_pci_set_msi_trigger;
848 			break;
849 		}
850 		break;
851 	case VFIO_PCI_ERR_IRQ_INDEX:
852 		switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
853 		case VFIO_IRQ_SET_ACTION_TRIGGER:
854 			if (pci_is_pcie(vdev->pdev))
855 				func = vfio_pci_set_err_trigger;
856 			break;
857 		}
858 		break;
859 	case VFIO_PCI_REQ_IRQ_INDEX:
860 		switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
861 		case VFIO_IRQ_SET_ACTION_TRIGGER:
862 			func = vfio_pci_set_req_trigger;
863 			break;
864 		}
865 		break;
866 	}
867 
868 	if (!func)
869 		return -ENOTTY;
870 
871 	return func(vdev, index, start, count, flags, data);
872 }
873