1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/kvm_host.h>
25#include <linux/kvm.h>
26#include <linux/workqueue.h>
27#include <linux/syscalls.h>
28#include <linux/wait.h>
29#include <linux/poll.h>
30#include <linux/file.h>
31#include <linux/list.h>
32#include <linux/eventfd.h>
33#include <linux/kernel.h>
34#include <linux/slab.h>
35
36#include "iodev.h"
37
38#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56struct _irqfd_resampler {
57 struct kvm *kvm;
58
59
60
61
62 struct list_head list;
63 struct kvm_irq_ack_notifier notifier;
64
65
66
67
68
69 struct list_head link;
70};
71
72struct _irqfd {
73
74 struct kvm *kvm;
75 wait_queue_t wait;
76
77 struct kvm_kernel_irq_routing_entry __rcu *irq_entry;
78
79 int gsi;
80 struct work_struct inject;
81
82 struct _irqfd_resampler *resampler;
83
84 struct eventfd_ctx *resamplefd;
85
86 struct list_head resampler_link;
87
88 struct eventfd_ctx *eventfd;
89 struct list_head list;
90 poll_table pt;
91 struct work_struct shutdown;
92};
93
94static struct workqueue_struct *irqfd_cleanup_wq;
95
96static void
97irqfd_inject(struct work_struct *work)
98{
99 struct _irqfd *irqfd = container_of(work, struct _irqfd, inject);
100 struct kvm *kvm = irqfd->kvm;
101
102 if (!irqfd->resampler) {
103 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1,
104 false);
105 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0,
106 false);
107 } else
108 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
109 irqfd->gsi, 1, false);
110}
111
112
113
114
115
116
117static void
118irqfd_resampler_ack(struct kvm_irq_ack_notifier *kian)
119{
120 struct _irqfd_resampler *resampler;
121 struct _irqfd *irqfd;
122
123 resampler = container_of(kian, struct _irqfd_resampler, notifier);
124
125 kvm_set_irq(resampler->kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
126 resampler->notifier.gsi, 0, false);
127
128 rcu_read_lock();
129
130 list_for_each_entry_rcu(irqfd, &resampler->list, resampler_link)
131 eventfd_signal(irqfd->resamplefd, 1);
132
133 rcu_read_unlock();
134}
135
136static void
137irqfd_resampler_shutdown(struct _irqfd *irqfd)
138{
139 struct _irqfd_resampler *resampler = irqfd->resampler;
140 struct kvm *kvm = resampler->kvm;
141
142 mutex_lock(&kvm->irqfds.resampler_lock);
143
144 list_del_rcu(&irqfd->resampler_link);
145 synchronize_rcu();
146
147 if (list_empty(&resampler->list)) {
148 list_del(&resampler->link);
149 kvm_unregister_irq_ack_notifier(kvm, &resampler->notifier);
150 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
151 resampler->notifier.gsi, 0, false);
152 kfree(resampler);
153 }
154
155 mutex_unlock(&kvm->irqfds.resampler_lock);
156}
157
158
159
160
161static void
162irqfd_shutdown(struct work_struct *work)
163{
164 struct _irqfd *irqfd = container_of(work, struct _irqfd, shutdown);
165 u64 cnt;
166
167
168
169
170
171 eventfd_ctx_remove_wait_queue(irqfd->eventfd, &irqfd->wait, &cnt);
172
173
174
175
176
177 flush_work(&irqfd->inject);
178
179 if (irqfd->resampler) {
180 irqfd_resampler_shutdown(irqfd);
181 eventfd_ctx_put(irqfd->resamplefd);
182 }
183
184
185
186
187 eventfd_ctx_put(irqfd->eventfd);
188 kfree(irqfd);
189}
190
191
192
193static bool
194irqfd_is_active(struct _irqfd *irqfd)
195{
196 return list_empty(&irqfd->list) ? false : true;
197}
198
199
200
201
202
203
204static void
205irqfd_deactivate(struct _irqfd *irqfd)
206{
207 BUG_ON(!irqfd_is_active(irqfd));
208
209 list_del_init(&irqfd->list);
210
211 queue_work(irqfd_cleanup_wq, &irqfd->shutdown);
212}
213
214
215
216
217static int
218irqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
219{
220 struct _irqfd *irqfd = container_of(wait, struct _irqfd, wait);
221 unsigned long flags = (unsigned long)key;
222 struct kvm_kernel_irq_routing_entry *irq;
223 struct kvm *kvm = irqfd->kvm;
224
225 if (flags & POLLIN) {
226 rcu_read_lock();
227 irq = rcu_dereference(irqfd->irq_entry);
228
229 if (irq)
230 kvm_set_msi(irq, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1,
231 false);
232 else
233 schedule_work(&irqfd->inject);
234 rcu_read_unlock();
235 }
236
237 if (flags & POLLHUP) {
238
239 unsigned long flags;
240
241 spin_lock_irqsave(&kvm->irqfds.lock, flags);
242
243
244
245
246
247
248
249
250
251
252 if (irqfd_is_active(irqfd))
253 irqfd_deactivate(irqfd);
254
255 spin_unlock_irqrestore(&kvm->irqfds.lock, flags);
256 }
257
258 return 0;
259}
260
261static void
262irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh,
263 poll_table *pt)
264{
265 struct _irqfd *irqfd = container_of(pt, struct _irqfd, pt);
266 add_wait_queue(wqh, &irqfd->wait);
267}
268
269
270static void irqfd_update(struct kvm *kvm, struct _irqfd *irqfd,
271 struct kvm_irq_routing_table *irq_rt)
272{
273 struct kvm_kernel_irq_routing_entry *e;
274
275 if (irqfd->gsi >= irq_rt->nr_rt_entries) {
276 rcu_assign_pointer(irqfd->irq_entry, NULL);
277 return;
278 }
279
280 hlist_for_each_entry(e, &irq_rt->map[irqfd->gsi], link) {
281
282 if (e->type == KVM_IRQ_ROUTING_MSI)
283 rcu_assign_pointer(irqfd->irq_entry, e);
284 else
285 rcu_assign_pointer(irqfd->irq_entry, NULL);
286 }
287}
288
289static int
290kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
291{
292 struct kvm_irq_routing_table *irq_rt;
293 struct _irqfd *irqfd, *tmp;
294 struct fd f;
295 struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL;
296 int ret;
297 unsigned int events;
298
299 irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL);
300 if (!irqfd)
301 return -ENOMEM;
302
303 irqfd->kvm = kvm;
304 irqfd->gsi = args->gsi;
305 INIT_LIST_HEAD(&irqfd->list);
306 INIT_WORK(&irqfd->inject, irqfd_inject);
307 INIT_WORK(&irqfd->shutdown, irqfd_shutdown);
308
309 f = fdget(args->fd);
310 if (!f.file) {
311 ret = -EBADF;
312 goto out;
313 }
314
315 eventfd = eventfd_ctx_fileget(f.file);
316 if (IS_ERR(eventfd)) {
317 ret = PTR_ERR(eventfd);
318 goto fail;
319 }
320
321 irqfd->eventfd = eventfd;
322
323 if (args->flags & KVM_IRQFD_FLAG_RESAMPLE) {
324 struct _irqfd_resampler *resampler;
325
326 resamplefd = eventfd_ctx_fdget(args->resamplefd);
327 if (IS_ERR(resamplefd)) {
328 ret = PTR_ERR(resamplefd);
329 goto fail;
330 }
331
332 irqfd->resamplefd = resamplefd;
333 INIT_LIST_HEAD(&irqfd->resampler_link);
334
335 mutex_lock(&kvm->irqfds.resampler_lock);
336
337 list_for_each_entry(resampler,
338 &kvm->irqfds.resampler_list, link) {
339 if (resampler->notifier.gsi == irqfd->gsi) {
340 irqfd->resampler = resampler;
341 break;
342 }
343 }
344
345 if (!irqfd->resampler) {
346 resampler = kzalloc(sizeof(*resampler), GFP_KERNEL);
347 if (!resampler) {
348 ret = -ENOMEM;
349 mutex_unlock(&kvm->irqfds.resampler_lock);
350 goto fail;
351 }
352
353 resampler->kvm = kvm;
354 INIT_LIST_HEAD(&resampler->list);
355 resampler->notifier.gsi = irqfd->gsi;
356 resampler->notifier.irq_acked = irqfd_resampler_ack;
357 INIT_LIST_HEAD(&resampler->link);
358
359 list_add(&resampler->link, &kvm->irqfds.resampler_list);
360 kvm_register_irq_ack_notifier(kvm,
361 &resampler->notifier);
362 irqfd->resampler = resampler;
363 }
364
365 list_add_rcu(&irqfd->resampler_link, &irqfd->resampler->list);
366 synchronize_rcu();
367
368 mutex_unlock(&kvm->irqfds.resampler_lock);
369 }
370
371
372
373
374
375 init_waitqueue_func_entry(&irqfd->wait, irqfd_wakeup);
376 init_poll_funcptr(&irqfd->pt, irqfd_ptable_queue_proc);
377
378 spin_lock_irq(&kvm->irqfds.lock);
379
380 ret = 0;
381 list_for_each_entry(tmp, &kvm->irqfds.items, list) {
382 if (irqfd->eventfd != tmp->eventfd)
383 continue;
384
385 ret = -EBUSY;
386 spin_unlock_irq(&kvm->irqfds.lock);
387 goto fail;
388 }
389
390 irq_rt = rcu_dereference_protected(kvm->irq_routing,
391 lockdep_is_held(&kvm->irqfds.lock));
392 irqfd_update(kvm, irqfd, irq_rt);
393
394 events = f.file->f_op->poll(f.file, &irqfd->pt);
395
396 list_add_tail(&irqfd->list, &kvm->irqfds.items);
397
398
399
400
401
402 if (events & POLLIN)
403 schedule_work(&irqfd->inject);
404
405 spin_unlock_irq(&kvm->irqfds.lock);
406
407
408
409
410
411 fdput(f);
412
413 return 0;
414
415fail:
416 if (irqfd->resampler)
417 irqfd_resampler_shutdown(irqfd);
418
419 if (resamplefd && !IS_ERR(resamplefd))
420 eventfd_ctx_put(resamplefd);
421
422 if (eventfd && !IS_ERR(eventfd))
423 eventfd_ctx_put(eventfd);
424
425 fdput(f);
426
427out:
428 kfree(irqfd);
429 return ret;
430}
431#endif
432
433void
434kvm_eventfd_init(struct kvm *kvm)
435{
436#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
437 spin_lock_init(&kvm->irqfds.lock);
438 INIT_LIST_HEAD(&kvm->irqfds.items);
439 INIT_LIST_HEAD(&kvm->irqfds.resampler_list);
440 mutex_init(&kvm->irqfds.resampler_lock);
441#endif
442 INIT_LIST_HEAD(&kvm->ioeventfds);
443}
444
445#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
446
447
448
449static int
450kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args)
451{
452 struct _irqfd *irqfd, *tmp;
453 struct eventfd_ctx *eventfd;
454
455 eventfd = eventfd_ctx_fdget(args->fd);
456 if (IS_ERR(eventfd))
457 return PTR_ERR(eventfd);
458
459 spin_lock_irq(&kvm->irqfds.lock);
460
461 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) {
462 if (irqfd->eventfd == eventfd && irqfd->gsi == args->gsi) {
463
464
465
466
467
468
469
470
471 rcu_assign_pointer(irqfd->irq_entry, NULL);
472 irqfd_deactivate(irqfd);
473 }
474 }
475
476 spin_unlock_irq(&kvm->irqfds.lock);
477 eventfd_ctx_put(eventfd);
478
479
480
481
482
483
484 flush_workqueue(irqfd_cleanup_wq);
485
486 return 0;
487}
488
489int
490kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
491{
492 if (args->flags & ~(KVM_IRQFD_FLAG_DEASSIGN | KVM_IRQFD_FLAG_RESAMPLE))
493 return -EINVAL;
494
495 if (args->flags & KVM_IRQFD_FLAG_DEASSIGN)
496 return kvm_irqfd_deassign(kvm, args);
497
498 return kvm_irqfd_assign(kvm, args);
499}
500
501
502
503
504
505void
506kvm_irqfd_release(struct kvm *kvm)
507{
508 struct _irqfd *irqfd, *tmp;
509
510 spin_lock_irq(&kvm->irqfds.lock);
511
512 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list)
513 irqfd_deactivate(irqfd);
514
515 spin_unlock_irq(&kvm->irqfds.lock);
516
517
518
519
520
521 flush_workqueue(irqfd_cleanup_wq);
522
523}
524
525
526
527
528
529void kvm_irq_routing_update(struct kvm *kvm,
530 struct kvm_irq_routing_table *irq_rt)
531{
532 struct _irqfd *irqfd;
533
534 spin_lock_irq(&kvm->irqfds.lock);
535
536 rcu_assign_pointer(kvm->irq_routing, irq_rt);
537
538 list_for_each_entry(irqfd, &kvm->irqfds.items, list)
539 irqfd_update(kvm, irqfd, irq_rt);
540
541 spin_unlock_irq(&kvm->irqfds.lock);
542}
543
544
545
546
547
548
549int kvm_irqfd_init(void)
550{
551 irqfd_cleanup_wq = create_singlethread_workqueue("kvm-irqfd-cleanup");
552 if (!irqfd_cleanup_wq)
553 return -ENOMEM;
554
555 return 0;
556}
557
558void kvm_irqfd_exit(void)
559{
560 destroy_workqueue(irqfd_cleanup_wq);
561}
562#endif
563
564
565
566
567
568
569
570
571
572
573struct _ioeventfd {
574 struct list_head list;
575 u64 addr;
576 int length;
577 struct eventfd_ctx *eventfd;
578 u64 datamatch;
579 struct kvm_io_device dev;
580 u8 bus_idx;
581 bool wildcard;
582};
583
584static inline struct _ioeventfd *
585to_ioeventfd(struct kvm_io_device *dev)
586{
587 return container_of(dev, struct _ioeventfd, dev);
588}
589
590static void
591ioeventfd_release(struct _ioeventfd *p)
592{
593 eventfd_ctx_put(p->eventfd);
594 list_del(&p->list);
595 kfree(p);
596}
597
598static bool
599ioeventfd_in_range(struct _ioeventfd *p, gpa_t addr, int len, const void *val)
600{
601 u64 _val;
602
603 if (!(addr == p->addr && len == p->length))
604
605 return false;
606
607 if (p->wildcard)
608
609 return true;
610
611
612
613 BUG_ON(!IS_ALIGNED((unsigned long)val, len));
614
615 switch (len) {
616 case 1:
617 _val = *(u8 *)val;
618 break;
619 case 2:
620 _val = *(u16 *)val;
621 break;
622 case 4:
623 _val = *(u32 *)val;
624 break;
625 case 8:
626 _val = *(u64 *)val;
627 break;
628 default:
629 return false;
630 }
631
632 return _val == p->datamatch ? true : false;
633}
634
635
636static int
637ioeventfd_write(struct kvm_io_device *this, gpa_t addr, int len,
638 const void *val)
639{
640 struct _ioeventfd *p = to_ioeventfd(this);
641
642 if (!ioeventfd_in_range(p, addr, len, val))
643 return -EOPNOTSUPP;
644
645 eventfd_signal(p->eventfd, 1);
646 return 0;
647}
648
649
650
651
652
653static void
654ioeventfd_destructor(struct kvm_io_device *this)
655{
656 struct _ioeventfd *p = to_ioeventfd(this);
657
658 ioeventfd_release(p);
659}
660
661static const struct kvm_io_device_ops ioeventfd_ops = {
662 .write = ioeventfd_write,
663 .destructor = ioeventfd_destructor,
664};
665
666
667static bool
668ioeventfd_check_collision(struct kvm *kvm, struct _ioeventfd *p)
669{
670 struct _ioeventfd *_p;
671
672 list_for_each_entry(_p, &kvm->ioeventfds, list)
673 if (_p->bus_idx == p->bus_idx &&
674 _p->addr == p->addr && _p->length == p->length &&
675 (_p->wildcard || p->wildcard ||
676 _p->datamatch == p->datamatch))
677 return true;
678
679 return false;
680}
681
682static enum kvm_bus ioeventfd_bus_from_flags(__u32 flags)
683{
684 if (flags & KVM_IOEVENTFD_FLAG_PIO)
685 return KVM_PIO_BUS;
686 if (flags & KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY)
687 return KVM_VIRTIO_CCW_NOTIFY_BUS;
688 return KVM_MMIO_BUS;
689}
690
691static int
692kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
693{
694 enum kvm_bus bus_idx;
695 struct _ioeventfd *p;
696 struct eventfd_ctx *eventfd;
697 int ret;
698
699 bus_idx = ioeventfd_bus_from_flags(args->flags);
700
701 switch (args->len) {
702 case 1:
703 case 2:
704 case 4:
705 case 8:
706 break;
707 default:
708 return -EINVAL;
709 }
710
711
712 if (args->addr + args->len < args->addr)
713 return -EINVAL;
714
715
716 if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK)
717 return -EINVAL;
718
719 eventfd = eventfd_ctx_fdget(args->fd);
720 if (IS_ERR(eventfd))
721 return PTR_ERR(eventfd);
722
723 p = kzalloc(sizeof(*p), GFP_KERNEL);
724 if (!p) {
725 ret = -ENOMEM;
726 goto fail;
727 }
728
729 INIT_LIST_HEAD(&p->list);
730 p->addr = args->addr;
731 p->bus_idx = bus_idx;
732 p->length = args->len;
733 p->eventfd = eventfd;
734
735
736 if (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH)
737 p->datamatch = args->datamatch;
738 else
739 p->wildcard = true;
740
741 mutex_lock(&kvm->slots_lock);
742
743
744 if (ioeventfd_check_collision(kvm, p)) {
745 ret = -EEXIST;
746 goto unlock_fail;
747 }
748
749 kvm_iodevice_init(&p->dev, &ioeventfd_ops);
750
751 ret = kvm_io_bus_register_dev(kvm, bus_idx, p->addr, p->length,
752 &p->dev);
753 if (ret < 0)
754 goto unlock_fail;
755
756 kvm->buses[bus_idx]->ioeventfd_count++;
757 list_add_tail(&p->list, &kvm->ioeventfds);
758
759 mutex_unlock(&kvm->slots_lock);
760
761 return 0;
762
763unlock_fail:
764 mutex_unlock(&kvm->slots_lock);
765
766fail:
767 kfree(p);
768 eventfd_ctx_put(eventfd);
769
770 return ret;
771}
772
773static int
774kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
775{
776 enum kvm_bus bus_idx;
777 struct _ioeventfd *p, *tmp;
778 struct eventfd_ctx *eventfd;
779 int ret = -ENOENT;
780
781 bus_idx = ioeventfd_bus_from_flags(args->flags);
782 eventfd = eventfd_ctx_fdget(args->fd);
783 if (IS_ERR(eventfd))
784 return PTR_ERR(eventfd);
785
786 mutex_lock(&kvm->slots_lock);
787
788 list_for_each_entry_safe(p, tmp, &kvm->ioeventfds, list) {
789 bool wildcard = !(args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH);
790
791 if (p->bus_idx != bus_idx ||
792 p->eventfd != eventfd ||
793 p->addr != args->addr ||
794 p->length != args->len ||
795 p->wildcard != wildcard)
796 continue;
797
798 if (!p->wildcard && p->datamatch != args->datamatch)
799 continue;
800
801 kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
802 kvm->buses[bus_idx]->ioeventfd_count--;
803 ioeventfd_release(p);
804 ret = 0;
805 break;
806 }
807
808 mutex_unlock(&kvm->slots_lock);
809
810 eventfd_ctx_put(eventfd);
811
812 return ret;
813}
814
815int
816kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
817{
818 if (args->flags & KVM_IOEVENTFD_FLAG_DEASSIGN)
819 return kvm_deassign_ioeventfd(kvm, args);
820
821 return kvm_assign_ioeventfd(kvm, args);
822}
823