1 /*
2 * kvm eventfd support - use eventfd objects to signal various KVM events
3 *
4 * Copyright 2009 Novell. All Rights Reserved.
5 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
6 *
7 * Author:
8 * Gregory Haskins <ghaskins@novell.com>
9 *
10 * This file is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License
12 * as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software Foundation,
21 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
22 */
23
24 #include <linux/kvm_host.h>
25 #include <linux/kvm.h>
26 #include <linux/kvm_irqfd.h>
27 #include <linux/workqueue.h>
28 #include <linux/syscalls.h>
29 #include <linux/wait.h>
30 #include <linux/poll.h>
31 #include <linux/file.h>
32 #include <linux/list.h>
33 #include <linux/eventfd.h>
34 #include <linux/kernel.h>
35 #include <linux/srcu.h>
36 #include <linux/slab.h>
37 #include <linux/seqlock.h>
38 #include <linux/irqbypass.h>
39 #include <trace/events/kvm.h>
40
41 #include <kvm/iodev.h>
42
43 #ifdef CONFIG_HAVE_KVM_IRQFD
44
45 static struct workqueue_struct *irqfd_cleanup_wq;
46
47 static void
irqfd_inject(struct work_struct * work)48 irqfd_inject(struct work_struct *work)
49 {
50 struct kvm_kernel_irqfd *irqfd =
51 container_of(work, struct kvm_kernel_irqfd, inject);
52 struct kvm *kvm = irqfd->kvm;
53
54 if (!irqfd->resampler) {
55 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1,
56 false);
57 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0,
58 false);
59 } else
60 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
61 irqfd->gsi, 1, false);
62 }
63
64 /*
65 * Since resampler irqfds share an IRQ source ID, we de-assert once
66 * then notify all of the resampler irqfds using this GSI. We can't
67 * do multiple de-asserts or we risk racing with incoming re-asserts.
68 */
69 static void
irqfd_resampler_ack(struct kvm_irq_ack_notifier * kian)70 irqfd_resampler_ack(struct kvm_irq_ack_notifier *kian)
71 {
72 struct kvm_kernel_irqfd_resampler *resampler;
73 struct kvm *kvm;
74 struct kvm_kernel_irqfd *irqfd;
75 int idx;
76
77 resampler = container_of(kian,
78 struct kvm_kernel_irqfd_resampler, notifier);
79 kvm = resampler->kvm;
80
81 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
82 resampler->notifier.gsi, 0, false);
83
84 idx = srcu_read_lock(&kvm->irq_srcu);
85
86 list_for_each_entry_rcu(irqfd, &resampler->list, resampler_link)
87 eventfd_signal(irqfd->resamplefd, 1);
88
89 srcu_read_unlock(&kvm->irq_srcu, idx);
90 }
91
92 static void
irqfd_resampler_shutdown(struct kvm_kernel_irqfd * irqfd)93 irqfd_resampler_shutdown(struct kvm_kernel_irqfd *irqfd)
94 {
95 struct kvm_kernel_irqfd_resampler *resampler = irqfd->resampler;
96 struct kvm *kvm = resampler->kvm;
97
98 mutex_lock(&kvm->irqfds.resampler_lock);
99
100 list_del_rcu(&irqfd->resampler_link);
101 synchronize_srcu(&kvm->irq_srcu);
102
103 if (list_empty(&resampler->list)) {
104 list_del(&resampler->link);
105 kvm_unregister_irq_ack_notifier(kvm, &resampler->notifier);
106 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
107 resampler->notifier.gsi, 0, false);
108 kfree(resampler);
109 }
110
111 mutex_unlock(&kvm->irqfds.resampler_lock);
112 }
113
114 /*
115 * Race-free decouple logic (ordering is critical)
116 */
117 static void
irqfd_shutdown(struct work_struct * work)118 irqfd_shutdown(struct work_struct *work)
119 {
120 struct kvm_kernel_irqfd *irqfd =
121 container_of(work, struct kvm_kernel_irqfd, shutdown);
122 struct kvm *kvm = irqfd->kvm;
123 u64 cnt;
124
125 /* Make sure irqfd has been initalized in assign path. */
126 synchronize_srcu(&kvm->irq_srcu);
127
128 /*
129 * Synchronize with the wait-queue and unhook ourselves to prevent
130 * further events.
131 */
132 eventfd_ctx_remove_wait_queue(irqfd->eventfd, &irqfd->wait, &cnt);
133
134 /*
135 * We know no new events will be scheduled at this point, so block
136 * until all previously outstanding events have completed
137 */
138 flush_work(&irqfd->inject);
139
140 if (irqfd->resampler) {
141 irqfd_resampler_shutdown(irqfd);
142 eventfd_ctx_put(irqfd->resamplefd);
143 }
144
145 /*
146 * It is now safe to release the object's resources
147 */
148 #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
149 irq_bypass_unregister_consumer(&irqfd->consumer);
150 #endif
151 eventfd_ctx_put(irqfd->eventfd);
152 kfree(irqfd);
153 }
154
155
156 /* assumes kvm->irqfds.lock is held */
157 static bool
irqfd_is_active(struct kvm_kernel_irqfd * irqfd)158 irqfd_is_active(struct kvm_kernel_irqfd *irqfd)
159 {
160 return list_empty(&irqfd->list) ? false : true;
161 }
162
163 /*
164 * Mark the irqfd as inactive and schedule it for removal
165 *
166 * assumes kvm->irqfds.lock is held
167 */
168 static void
irqfd_deactivate(struct kvm_kernel_irqfd * irqfd)169 irqfd_deactivate(struct kvm_kernel_irqfd *irqfd)
170 {
171 BUG_ON(!irqfd_is_active(irqfd));
172
173 list_del_init(&irqfd->list);
174
175 queue_work(irqfd_cleanup_wq, &irqfd->shutdown);
176 }
177
kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry * irq,struct kvm * kvm,int irq_source_id,int level,bool line_status)178 int __attribute__((weak)) kvm_arch_set_irq_inatomic(
179 struct kvm_kernel_irq_routing_entry *irq,
180 struct kvm *kvm, int irq_source_id,
181 int level,
182 bool line_status)
183 {
184 return -EWOULDBLOCK;
185 }
186
187 /*
188 * Called with wqh->lock held and interrupts disabled
189 */
190 static int
irqfd_wakeup(wait_queue_t * wait,unsigned mode,int sync,void * key)191 irqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
192 {
193 struct kvm_kernel_irqfd *irqfd =
194 container_of(wait, struct kvm_kernel_irqfd, wait);
195 unsigned long flags = (unsigned long)key;
196 struct kvm_kernel_irq_routing_entry irq;
197 struct kvm *kvm = irqfd->kvm;
198 unsigned seq;
199 int idx;
200
201 if (flags & POLLIN) {
202 idx = srcu_read_lock(&kvm->irq_srcu);
203 do {
204 seq = read_seqcount_begin(&irqfd->irq_entry_sc);
205 irq = irqfd->irq_entry;
206 } while (read_seqcount_retry(&irqfd->irq_entry_sc, seq));
207 /* An event has been signaled, inject an interrupt */
208 if (kvm_arch_set_irq_inatomic(&irq, kvm,
209 KVM_USERSPACE_IRQ_SOURCE_ID, 1,
210 false) == -EWOULDBLOCK)
211 schedule_work(&irqfd->inject);
212 srcu_read_unlock(&kvm->irq_srcu, idx);
213 }
214
215 if (flags & POLLHUP) {
216 /* The eventfd is closing, detach from KVM */
217 unsigned long flags;
218
219 spin_lock_irqsave(&kvm->irqfds.lock, flags);
220
221 /*
222 * We must check if someone deactivated the irqfd before
223 * we could acquire the irqfds.lock since the item is
224 * deactivated from the KVM side before it is unhooked from
225 * the wait-queue. If it is already deactivated, we can
226 * simply return knowing the other side will cleanup for us.
227 * We cannot race against the irqfd going away since the
228 * other side is required to acquire wqh->lock, which we hold
229 */
230 if (irqfd_is_active(irqfd))
231 irqfd_deactivate(irqfd);
232
233 spin_unlock_irqrestore(&kvm->irqfds.lock, flags);
234 }
235
236 return 0;
237 }
238
239 static void
irqfd_ptable_queue_proc(struct file * file,wait_queue_head_t * wqh,poll_table * pt)240 irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh,
241 poll_table *pt)
242 {
243 struct kvm_kernel_irqfd *irqfd =
244 container_of(pt, struct kvm_kernel_irqfd, pt);
245 add_wait_queue(wqh, &irqfd->wait);
246 }
247
248 /* Must be called under irqfds.lock */
irqfd_update(struct kvm * kvm,struct kvm_kernel_irqfd * irqfd)249 static void irqfd_update(struct kvm *kvm, struct kvm_kernel_irqfd *irqfd)
250 {
251 struct kvm_kernel_irq_routing_entry *e;
252 struct kvm_kernel_irq_routing_entry entries[KVM_NR_IRQCHIPS];
253 int n_entries;
254
255 n_entries = kvm_irq_map_gsi(kvm, entries, irqfd->gsi);
256
257 write_seqcount_begin(&irqfd->irq_entry_sc);
258
259 e = entries;
260 if (n_entries == 1)
261 irqfd->irq_entry = *e;
262 else
263 irqfd->irq_entry.type = 0;
264
265 write_seqcount_end(&irqfd->irq_entry_sc);
266 }
267
268 #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
kvm_arch_irq_bypass_stop(struct irq_bypass_consumer * cons)269 void __attribute__((weak)) kvm_arch_irq_bypass_stop(
270 struct irq_bypass_consumer *cons)
271 {
272 }
273
kvm_arch_irq_bypass_start(struct irq_bypass_consumer * cons)274 void __attribute__((weak)) kvm_arch_irq_bypass_start(
275 struct irq_bypass_consumer *cons)
276 {
277 }
278
kvm_arch_update_irqfd_routing(struct kvm * kvm,unsigned int host_irq,uint32_t guest_irq,bool set)279 int __attribute__((weak)) kvm_arch_update_irqfd_routing(
280 struct kvm *kvm, unsigned int host_irq,
281 uint32_t guest_irq, bool set)
282 {
283 return 0;
284 }
285 #endif
286
287 static int
kvm_irqfd_assign(struct kvm * kvm,struct kvm_irqfd * args)288 kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
289 {
290 struct kvm_kernel_irqfd *irqfd, *tmp;
291 struct fd f;
292 struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL;
293 int ret;
294 unsigned int events;
295 int idx;
296
297 if (!kvm_arch_intc_initialized(kvm))
298 return -EAGAIN;
299
300 irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL);
301 if (!irqfd)
302 return -ENOMEM;
303
304 irqfd->kvm = kvm;
305 irqfd->gsi = args->gsi;
306 INIT_LIST_HEAD(&irqfd->list);
307 INIT_WORK(&irqfd->inject, irqfd_inject);
308 INIT_WORK(&irqfd->shutdown, irqfd_shutdown);
309 seqcount_init(&irqfd->irq_entry_sc);
310
311 f = fdget(args->fd);
312 if (!f.file) {
313 ret = -EBADF;
314 goto out;
315 }
316
317 eventfd = eventfd_ctx_fileget(f.file);
318 if (IS_ERR(eventfd)) {
319 ret = PTR_ERR(eventfd);
320 goto fail;
321 }
322
323 irqfd->eventfd = eventfd;
324
325 if (args->flags & KVM_IRQFD_FLAG_RESAMPLE) {
326 struct kvm_kernel_irqfd_resampler *resampler;
327
328 resamplefd = eventfd_ctx_fdget(args->resamplefd);
329 if (IS_ERR(resamplefd)) {
330 ret = PTR_ERR(resamplefd);
331 goto fail;
332 }
333
334 irqfd->resamplefd = resamplefd;
335 INIT_LIST_HEAD(&irqfd->resampler_link);
336
337 mutex_lock(&kvm->irqfds.resampler_lock);
338
339 list_for_each_entry(resampler,
340 &kvm->irqfds.resampler_list, link) {
341 if (resampler->notifier.gsi == irqfd->gsi) {
342 irqfd->resampler = resampler;
343 break;
344 }
345 }
346
347 if (!irqfd->resampler) {
348 resampler = kzalloc(sizeof(*resampler), GFP_KERNEL);
349 if (!resampler) {
350 ret = -ENOMEM;
351 mutex_unlock(&kvm->irqfds.resampler_lock);
352 goto fail;
353 }
354
355 resampler->kvm = kvm;
356 INIT_LIST_HEAD(&resampler->list);
357 resampler->notifier.gsi = irqfd->gsi;
358 resampler->notifier.irq_acked = irqfd_resampler_ack;
359 INIT_LIST_HEAD(&resampler->link);
360
361 list_add(&resampler->link, &kvm->irqfds.resampler_list);
362 kvm_register_irq_ack_notifier(kvm,
363 &resampler->notifier);
364 irqfd->resampler = resampler;
365 }
366
367 list_add_rcu(&irqfd->resampler_link, &irqfd->resampler->list);
368 synchronize_srcu(&kvm->irq_srcu);
369
370 mutex_unlock(&kvm->irqfds.resampler_lock);
371 }
372
373 /*
374 * Install our own custom wake-up handling so we are notified via
375 * a callback whenever someone signals the underlying eventfd
376 */
377 init_waitqueue_func_entry(&irqfd->wait, irqfd_wakeup);
378 init_poll_funcptr(&irqfd->pt, irqfd_ptable_queue_proc);
379
380 spin_lock_irq(&kvm->irqfds.lock);
381
382 ret = 0;
383 list_for_each_entry(tmp, &kvm->irqfds.items, list) {
384 if (irqfd->eventfd != tmp->eventfd)
385 continue;
386 /* This fd is used for another irq already. */
387 ret = -EBUSY;
388 spin_unlock_irq(&kvm->irqfds.lock);
389 goto fail;
390 }
391
392 idx = srcu_read_lock(&kvm->irq_srcu);
393 irqfd_update(kvm, irqfd);
394
395 list_add_tail(&irqfd->list, &kvm->irqfds.items);
396
397 spin_unlock_irq(&kvm->irqfds.lock);
398
399 /*
400 * Check if there was an event already pending on the eventfd
401 * before we registered, and trigger it as if we didn't miss it.
402 */
403 events = f.file->f_op->poll(f.file, &irqfd->pt);
404
405 if (events & POLLIN)
406 schedule_work(&irqfd->inject);
407
408 #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
409 irqfd->consumer.token = (void *)irqfd->eventfd;
410 irqfd->consumer.add_producer = kvm_arch_irq_bypass_add_producer;
411 irqfd->consumer.del_producer = kvm_arch_irq_bypass_del_producer;
412 irqfd->consumer.stop = kvm_arch_irq_bypass_stop;
413 irqfd->consumer.start = kvm_arch_irq_bypass_start;
414 ret = irq_bypass_register_consumer(&irqfd->consumer);
415 if (ret)
416 pr_info("irq bypass consumer (token %p) registration fails: %d\n",
417 irqfd->consumer.token, ret);
418 #endif
419
420 srcu_read_unlock(&kvm->irq_srcu, idx);
421
422 /*
423 * do not drop the file until the irqfd is fully initialized, otherwise
424 * we might race against the POLLHUP
425 */
426 fdput(f);
427 return 0;
428
429 fail:
430 if (irqfd->resampler)
431 irqfd_resampler_shutdown(irqfd);
432
433 if (resamplefd && !IS_ERR(resamplefd))
434 eventfd_ctx_put(resamplefd);
435
436 if (eventfd && !IS_ERR(eventfd))
437 eventfd_ctx_put(eventfd);
438
439 fdput(f);
440
441 out:
442 kfree(irqfd);
443 return ret;
444 }
445
kvm_irq_has_notifier(struct kvm * kvm,unsigned irqchip,unsigned pin)446 bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin)
447 {
448 struct kvm_irq_ack_notifier *kian;
449 int gsi, idx;
450
451 idx = srcu_read_lock(&kvm->irq_srcu);
452 gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
453 if (gsi != -1)
454 hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
455 link)
456 if (kian->gsi == gsi) {
457 srcu_read_unlock(&kvm->irq_srcu, idx);
458 return true;
459 }
460
461 srcu_read_unlock(&kvm->irq_srcu, idx);
462
463 return false;
464 }
465 EXPORT_SYMBOL_GPL(kvm_irq_has_notifier);
466
kvm_notify_acked_gsi(struct kvm * kvm,int gsi)467 void kvm_notify_acked_gsi(struct kvm *kvm, int gsi)
468 {
469 struct kvm_irq_ack_notifier *kian;
470
471 hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
472 link)
473 if (kian->gsi == gsi)
474 kian->irq_acked(kian);
475 }
476
kvm_notify_acked_irq(struct kvm * kvm,unsigned irqchip,unsigned pin)477 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
478 {
479 int gsi, idx;
480
481 trace_kvm_ack_irq(irqchip, pin);
482
483 idx = srcu_read_lock(&kvm->irq_srcu);
484 gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
485 if (gsi != -1)
486 kvm_notify_acked_gsi(kvm, gsi);
487 srcu_read_unlock(&kvm->irq_srcu, idx);
488 }
489
kvm_register_irq_ack_notifier(struct kvm * kvm,struct kvm_irq_ack_notifier * kian)490 void kvm_register_irq_ack_notifier(struct kvm *kvm,
491 struct kvm_irq_ack_notifier *kian)
492 {
493 mutex_lock(&kvm->irq_lock);
494 hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list);
495 mutex_unlock(&kvm->irq_lock);
496 kvm_vcpu_request_scan_ioapic(kvm);
497 }
498
kvm_unregister_irq_ack_notifier(struct kvm * kvm,struct kvm_irq_ack_notifier * kian)499 void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
500 struct kvm_irq_ack_notifier *kian)
501 {
502 mutex_lock(&kvm->irq_lock);
503 hlist_del_init_rcu(&kian->link);
504 mutex_unlock(&kvm->irq_lock);
505 synchronize_srcu(&kvm->irq_srcu);
506 kvm_vcpu_request_scan_ioapic(kvm);
507 }
508 #endif
509
510 void
kvm_eventfd_init(struct kvm * kvm)511 kvm_eventfd_init(struct kvm *kvm)
512 {
513 #ifdef CONFIG_HAVE_KVM_IRQFD
514 spin_lock_init(&kvm->irqfds.lock);
515 INIT_LIST_HEAD(&kvm->irqfds.items);
516 INIT_LIST_HEAD(&kvm->irqfds.resampler_list);
517 mutex_init(&kvm->irqfds.resampler_lock);
518 #endif
519 INIT_LIST_HEAD(&kvm->ioeventfds);
520 }
521
522 #ifdef CONFIG_HAVE_KVM_IRQFD
523 /*
524 * shutdown any irqfd's that match fd+gsi
525 */
526 static int
kvm_irqfd_deassign(struct kvm * kvm,struct kvm_irqfd * args)527 kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args)
528 {
529 struct kvm_kernel_irqfd *irqfd, *tmp;
530 struct eventfd_ctx *eventfd;
531
532 eventfd = eventfd_ctx_fdget(args->fd);
533 if (IS_ERR(eventfd))
534 return PTR_ERR(eventfd);
535
536 spin_lock_irq(&kvm->irqfds.lock);
537
538 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) {
539 if (irqfd->eventfd == eventfd && irqfd->gsi == args->gsi) {
540 /*
541 * This clearing of irq_entry.type is needed for when
542 * another thread calls kvm_irq_routing_update before
543 * we flush workqueue below (we synchronize with
544 * kvm_irq_routing_update using irqfds.lock).
545 */
546 write_seqcount_begin(&irqfd->irq_entry_sc);
547 irqfd->irq_entry.type = 0;
548 write_seqcount_end(&irqfd->irq_entry_sc);
549 irqfd_deactivate(irqfd);
550 }
551 }
552
553 spin_unlock_irq(&kvm->irqfds.lock);
554 eventfd_ctx_put(eventfd);
555
556 /*
557 * Block until we know all outstanding shutdown jobs have completed
558 * so that we guarantee there will not be any more interrupts on this
559 * gsi once this deassign function returns.
560 */
561 flush_workqueue(irqfd_cleanup_wq);
562
563 return 0;
564 }
565
566 int
kvm_irqfd(struct kvm * kvm,struct kvm_irqfd * args)567 kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
568 {
569 if (args->flags & ~(KVM_IRQFD_FLAG_DEASSIGN | KVM_IRQFD_FLAG_RESAMPLE))
570 return -EINVAL;
571
572 if (args->flags & KVM_IRQFD_FLAG_DEASSIGN)
573 return kvm_irqfd_deassign(kvm, args);
574
575 return kvm_irqfd_assign(kvm, args);
576 }
577
578 /*
579 * This function is called as the kvm VM fd is being released. Shutdown all
580 * irqfds that still remain open
581 */
582 void
kvm_irqfd_release(struct kvm * kvm)583 kvm_irqfd_release(struct kvm *kvm)
584 {
585 struct kvm_kernel_irqfd *irqfd, *tmp;
586
587 spin_lock_irq(&kvm->irqfds.lock);
588
589 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list)
590 irqfd_deactivate(irqfd);
591
592 spin_unlock_irq(&kvm->irqfds.lock);
593
594 /*
595 * Block until we know all outstanding shutdown jobs have completed
596 * since we do not take a kvm* reference.
597 */
598 flush_workqueue(irqfd_cleanup_wq);
599
600 }
601
602 /*
603 * Take note of a change in irq routing.
604 * Caller must invoke synchronize_srcu(&kvm->irq_srcu) afterwards.
605 */
kvm_irq_routing_update(struct kvm * kvm)606 void kvm_irq_routing_update(struct kvm *kvm)
607 {
608 struct kvm_kernel_irqfd *irqfd;
609
610 spin_lock_irq(&kvm->irqfds.lock);
611
612 list_for_each_entry(irqfd, &kvm->irqfds.items, list) {
613 irqfd_update(kvm, irqfd);
614
615 #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
616 if (irqfd->producer) {
617 int ret = kvm_arch_update_irqfd_routing(
618 irqfd->kvm, irqfd->producer->irq,
619 irqfd->gsi, 1);
620 WARN_ON(ret);
621 }
622 #endif
623 }
624
625 spin_unlock_irq(&kvm->irqfds.lock);
626 }
627
628 /*
629 * create a host-wide workqueue for issuing deferred shutdown requests
630 * aggregated from all vm* instances. We need our own isolated single-thread
631 * queue to prevent deadlock against flushing the normal work-queue.
632 */
kvm_irqfd_init(void)633 int kvm_irqfd_init(void)
634 {
635 irqfd_cleanup_wq = create_singlethread_workqueue("kvm-irqfd-cleanup");
636 if (!irqfd_cleanup_wq)
637 return -ENOMEM;
638
639 return 0;
640 }
641
kvm_irqfd_exit(void)642 void kvm_irqfd_exit(void)
643 {
644 destroy_workqueue(irqfd_cleanup_wq);
645 }
646 #endif
647
648 /*
649 * --------------------------------------------------------------------
650 * ioeventfd: translate a PIO/MMIO memory write to an eventfd signal.
651 *
652 * userspace can register a PIO/MMIO address with an eventfd for receiving
653 * notification when the memory has been touched.
654 * --------------------------------------------------------------------
655 */
656
657 struct _ioeventfd {
658 struct list_head list;
659 u64 addr;
660 int length;
661 struct eventfd_ctx *eventfd;
662 u64 datamatch;
663 struct kvm_io_device dev;
664 u8 bus_idx;
665 bool wildcard;
666 };
667
668 static inline struct _ioeventfd *
to_ioeventfd(struct kvm_io_device * dev)669 to_ioeventfd(struct kvm_io_device *dev)
670 {
671 return container_of(dev, struct _ioeventfd, dev);
672 }
673
674 static void
ioeventfd_release(struct _ioeventfd * p)675 ioeventfd_release(struct _ioeventfd *p)
676 {
677 eventfd_ctx_put(p->eventfd);
678 list_del(&p->list);
679 kfree(p);
680 }
681
682 static bool
ioeventfd_in_range(struct _ioeventfd * p,gpa_t addr,int len,const void * val)683 ioeventfd_in_range(struct _ioeventfd *p, gpa_t addr, int len, const void *val)
684 {
685 u64 _val;
686
687 if (addr != p->addr)
688 /* address must be precise for a hit */
689 return false;
690
691 if (!p->length)
692 /* length = 0 means only look at the address, so always a hit */
693 return true;
694
695 if (len != p->length)
696 /* address-range must be precise for a hit */
697 return false;
698
699 if (p->wildcard)
700 /* all else equal, wildcard is always a hit */
701 return true;
702
703 /* otherwise, we have to actually compare the data */
704
705 BUG_ON(!IS_ALIGNED((unsigned long)val, len));
706
707 switch (len) {
708 case 1:
709 _val = *(u8 *)val;
710 break;
711 case 2:
712 _val = *(u16 *)val;
713 break;
714 case 4:
715 _val = *(u32 *)val;
716 break;
717 case 8:
718 _val = *(u64 *)val;
719 break;
720 default:
721 return false;
722 }
723
724 return _val == p->datamatch ? true : false;
725 }
726
727 /* MMIO/PIO writes trigger an event if the addr/val match */
728 static int
ioeventfd_write(struct kvm_vcpu * vcpu,struct kvm_io_device * this,gpa_t addr,int len,const void * val)729 ioeventfd_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, gpa_t addr,
730 int len, const void *val)
731 {
732 struct _ioeventfd *p = to_ioeventfd(this);
733
734 if (!ioeventfd_in_range(p, addr, len, val))
735 return -EOPNOTSUPP;
736
737 eventfd_signal(p->eventfd, 1);
738 return 0;
739 }
740
741 /*
742 * This function is called as KVM is completely shutting down. We do not
743 * need to worry about locking just nuke anything we have as quickly as possible
744 */
745 static void
ioeventfd_destructor(struct kvm_io_device * this)746 ioeventfd_destructor(struct kvm_io_device *this)
747 {
748 struct _ioeventfd *p = to_ioeventfd(this);
749
750 ioeventfd_release(p);
751 }
752
753 static const struct kvm_io_device_ops ioeventfd_ops = {
754 .write = ioeventfd_write,
755 .destructor = ioeventfd_destructor,
756 };
757
758 /* assumes kvm->slots_lock held */
759 static bool
ioeventfd_check_collision(struct kvm * kvm,struct _ioeventfd * p)760 ioeventfd_check_collision(struct kvm *kvm, struct _ioeventfd *p)
761 {
762 struct _ioeventfd *_p;
763
764 list_for_each_entry(_p, &kvm->ioeventfds, list)
765 if (_p->bus_idx == p->bus_idx &&
766 _p->addr == p->addr &&
767 (!_p->length || !p->length ||
768 (_p->length == p->length &&
769 (_p->wildcard || p->wildcard ||
770 _p->datamatch == p->datamatch))))
771 return true;
772
773 return false;
774 }
775
ioeventfd_bus_from_flags(__u32 flags)776 static enum kvm_bus ioeventfd_bus_from_flags(__u32 flags)
777 {
778 if (flags & KVM_IOEVENTFD_FLAG_PIO)
779 return KVM_PIO_BUS;
780 if (flags & KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY)
781 return KVM_VIRTIO_CCW_NOTIFY_BUS;
782 return KVM_MMIO_BUS;
783 }
784
kvm_assign_ioeventfd_idx(struct kvm * kvm,enum kvm_bus bus_idx,struct kvm_ioeventfd * args)785 static int kvm_assign_ioeventfd_idx(struct kvm *kvm,
786 enum kvm_bus bus_idx,
787 struct kvm_ioeventfd *args)
788 {
789
790 struct eventfd_ctx *eventfd;
791 struct _ioeventfd *p;
792 int ret;
793
794 eventfd = eventfd_ctx_fdget(args->fd);
795 if (IS_ERR(eventfd))
796 return PTR_ERR(eventfd);
797
798 p = kzalloc(sizeof(*p), GFP_KERNEL);
799 if (!p) {
800 ret = -ENOMEM;
801 goto fail;
802 }
803
804 INIT_LIST_HEAD(&p->list);
805 p->addr = args->addr;
806 p->bus_idx = bus_idx;
807 p->length = args->len;
808 p->eventfd = eventfd;
809
810 /* The datamatch feature is optional, otherwise this is a wildcard */
811 if (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH)
812 p->datamatch = args->datamatch;
813 else
814 p->wildcard = true;
815
816 mutex_lock(&kvm->slots_lock);
817
818 /* Verify that there isn't a match already */
819 if (ioeventfd_check_collision(kvm, p)) {
820 ret = -EEXIST;
821 goto unlock_fail;
822 }
823
824 kvm_iodevice_init(&p->dev, &ioeventfd_ops);
825
826 ret = kvm_io_bus_register_dev(kvm, bus_idx, p->addr, p->length,
827 &p->dev);
828 if (ret < 0)
829 goto unlock_fail;
830
831 kvm->buses[bus_idx]->ioeventfd_count++;
832 list_add_tail(&p->list, &kvm->ioeventfds);
833
834 mutex_unlock(&kvm->slots_lock);
835
836 return 0;
837
838 unlock_fail:
839 mutex_unlock(&kvm->slots_lock);
840
841 fail:
842 kfree(p);
843 eventfd_ctx_put(eventfd);
844
845 return ret;
846 }
847
848 static int
kvm_deassign_ioeventfd_idx(struct kvm * kvm,enum kvm_bus bus_idx,struct kvm_ioeventfd * args)849 kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
850 struct kvm_ioeventfd *args)
851 {
852 struct _ioeventfd *p, *tmp;
853 struct eventfd_ctx *eventfd;
854 int ret = -ENOENT;
855
856 eventfd = eventfd_ctx_fdget(args->fd);
857 if (IS_ERR(eventfd))
858 return PTR_ERR(eventfd);
859
860 mutex_lock(&kvm->slots_lock);
861
862 list_for_each_entry_safe(p, tmp, &kvm->ioeventfds, list) {
863 bool wildcard = !(args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH);
864
865 if (p->bus_idx != bus_idx ||
866 p->eventfd != eventfd ||
867 p->addr != args->addr ||
868 p->length != args->len ||
869 p->wildcard != wildcard)
870 continue;
871
872 if (!p->wildcard && p->datamatch != args->datamatch)
873 continue;
874
875 kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
876 if (kvm->buses[bus_idx])
877 kvm->buses[bus_idx]->ioeventfd_count--;
878 ioeventfd_release(p);
879 ret = 0;
880 break;
881 }
882
883 mutex_unlock(&kvm->slots_lock);
884
885 eventfd_ctx_put(eventfd);
886
887 return ret;
888 }
889
kvm_deassign_ioeventfd(struct kvm * kvm,struct kvm_ioeventfd * args)890 static int kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
891 {
892 enum kvm_bus bus_idx = ioeventfd_bus_from_flags(args->flags);
893 int ret = kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
894
895 if (!args->len && bus_idx == KVM_MMIO_BUS)
896 kvm_deassign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
897
898 return ret;
899 }
900
901 static int
kvm_assign_ioeventfd(struct kvm * kvm,struct kvm_ioeventfd * args)902 kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
903 {
904 enum kvm_bus bus_idx;
905 int ret;
906
907 bus_idx = ioeventfd_bus_from_flags(args->flags);
908 /* must be natural-word sized, or 0 to ignore length */
909 switch (args->len) {
910 case 0:
911 case 1:
912 case 2:
913 case 4:
914 case 8:
915 break;
916 default:
917 return -EINVAL;
918 }
919
920 /* check for range overflow */
921 if (args->addr + args->len < args->addr)
922 return -EINVAL;
923
924 /* check for extra flags that we don't understand */
925 if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK)
926 return -EINVAL;
927
928 /* ioeventfd with no length can't be combined with DATAMATCH */
929 if (!args->len && (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH))
930 return -EINVAL;
931
932 ret = kvm_assign_ioeventfd_idx(kvm, bus_idx, args);
933 if (ret)
934 goto fail;
935
936 /* When length is ignored, MMIO is also put on a separate bus, for
937 * faster lookups.
938 */
939 if (!args->len && bus_idx == KVM_MMIO_BUS) {
940 ret = kvm_assign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
941 if (ret < 0)
942 goto fast_fail;
943 }
944
945 return 0;
946
947 fast_fail:
948 kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
949 fail:
950 return ret;
951 }
952
953 int
kvm_ioeventfd(struct kvm * kvm,struct kvm_ioeventfd * args)954 kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
955 {
956 if (args->flags & KVM_IOEVENTFD_FLAG_DEASSIGN)
957 return kvm_deassign_ioeventfd(kvm, args);
958
959 return kvm_assign_ioeventfd(kvm, args);
960 }
961