1
2
3
4
5
6
7
8
9
10
11
12#include <linux/kvm_host.h>
13#include <linux/kvm.h>
14#include <linux/kvm_irqfd.h>
15#include <linux/workqueue.h>
16#include <linux/syscalls.h>
17#include <linux/wait.h>
18#include <linux/poll.h>
19#include <linux/file.h>
20#include <linux/list.h>
21#include <linux/eventfd.h>
22#include <linux/kernel.h>
23#include <linux/srcu.h>
24#include <linux/slab.h>
25#include <linux/seqlock.h>
26#include <linux/irqbypass.h>
27#include <trace/events/kvm.h>
28
29#include <kvm/iodev.h>
30
31#ifdef CONFIG_HAVE_KVM_IRQCHIP
32
33static struct workqueue_struct *irqfd_cleanup_wq;
34
35bool __attribute__((weak))
36kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args)
37{
38 return true;
39}
40
41static void
42irqfd_inject(struct work_struct *work)
43{
44 struct kvm_kernel_irqfd *irqfd =
45 container_of(work, struct kvm_kernel_irqfd, inject);
46 struct kvm *kvm = irqfd->kvm;
47
48 if (!irqfd->resampler) {
49 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1,
50 false);
51 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0,
52 false);
53 } else
54 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
55 irqfd->gsi, 1, false);
56}
57
58static void irqfd_resampler_notify(struct kvm_kernel_irqfd_resampler *resampler)
59{
60 struct kvm_kernel_irqfd *irqfd;
61
62 list_for_each_entry_srcu(irqfd, &resampler->list, resampler_link,
63 srcu_read_lock_held(&resampler->kvm->irq_srcu))
64 eventfd_signal(irqfd->resamplefd);
65}
66
67
68
69
70
71
72static void
73irqfd_resampler_ack(struct kvm_irq_ack_notifier *kian)
74{
75 struct kvm_kernel_irqfd_resampler *resampler;
76 struct kvm *kvm;
77 int idx;
78
79 resampler = container_of(kian,
80 struct kvm_kernel_irqfd_resampler, notifier);
81 kvm = resampler->kvm;
82
83 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
84 resampler->notifier.gsi, 0, false);
85
86 idx = srcu_read_lock(&kvm->irq_srcu);
87 irqfd_resampler_notify(resampler);
88 srcu_read_unlock(&kvm->irq_srcu, idx);
89}
90
91static void
92irqfd_resampler_shutdown(struct kvm_kernel_irqfd *irqfd)
93{
94 struct kvm_kernel_irqfd_resampler *resampler = irqfd->resampler;
95 struct kvm *kvm = resampler->kvm;
96
97 mutex_lock(&kvm->irqfds.resampler_lock);
98
99 list_del_rcu(&irqfd->resampler_link);
100
101 if (list_empty(&resampler->list)) {
102 list_del_rcu(&resampler->link);
103 kvm_unregister_irq_ack_notifier(kvm, &resampler->notifier);
104
105
106
107
108 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
109 resampler->notifier.gsi, 0, false);
110 kfree(resampler);
111 } else {
112 synchronize_srcu_expedited(&kvm->irq_srcu);
113 }
114
115 mutex_unlock(&kvm->irqfds.resampler_lock);
116}
117
118
119
120
121static void
122irqfd_shutdown(struct work_struct *work)
123{
124 struct kvm_kernel_irqfd *irqfd =
125 container_of(work, struct kvm_kernel_irqfd, shutdown);
126 struct kvm *kvm = irqfd->kvm;
127 u64 cnt;
128
129
130 synchronize_srcu_expedited(&kvm->irq_srcu);
131
132
133
134
135
136 eventfd_ctx_remove_wait_queue(irqfd->eventfd, &irqfd->wait, &cnt);
137
138
139
140
141
142 flush_work(&irqfd->inject);
143
144 if (irqfd->resampler) {
145 irqfd_resampler_shutdown(irqfd);
146 eventfd_ctx_put(irqfd->resamplefd);
147 }
148
149
150
151
152#if IS_ENABLED(CONFIG_HAVE_KVM_IRQ_BYPASS)
153 irq_bypass_unregister_consumer(&irqfd->consumer);
154#endif
155 eventfd_ctx_put(irqfd->eventfd);
156 kfree(irqfd);
157}
158
159
160
161static bool
162irqfd_is_active(struct kvm_kernel_irqfd *irqfd)
163{
164 return list_empty(&irqfd->list) ? false : true;
165}
166
167
168
169
170
171
172static void
173irqfd_deactivate(struct kvm_kernel_irqfd *irqfd)
174{
175 BUG_ON(!irqfd_is_active(irqfd));
176
177 list_del_init(&irqfd->list);
178
179 queue_work(irqfd_cleanup_wq, &irqfd->shutdown);
180}
181
182int __attribute__((weak)) kvm_arch_set_irq_inatomic(
183 struct kvm_kernel_irq_routing_entry *irq,
184 struct kvm *kvm, int irq_source_id,
185 int level,
186 bool line_status)
187{
188 return -EWOULDBLOCK;
189}
190
191
192
193
194static int
195irqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
196{
197 struct kvm_kernel_irqfd *irqfd =
198 container_of(wait, struct kvm_kernel_irqfd, wait);
199 __poll_t flags = key_to_poll(key);
200 struct kvm_kernel_irq_routing_entry irq;
201 struct kvm *kvm = irqfd->kvm;
202 unsigned seq;
203 int idx;
204 int ret = 0;
205
206 if (flags & EPOLLIN) {
207 u64 cnt;
208 eventfd_ctx_do_read(irqfd->eventfd, &cnt);
209
210 idx = srcu_read_lock(&kvm->irq_srcu);
211 do {
212 seq = read_seqcount_begin(&irqfd->irq_entry_sc);
213 irq = irqfd->irq_entry;
214 } while (read_seqcount_retry(&irqfd->irq_entry_sc, seq));
215
216 if (kvm_arch_set_irq_inatomic(&irq, kvm,
217 KVM_USERSPACE_IRQ_SOURCE_ID, 1,
218 false) == -EWOULDBLOCK)
219 schedule_work(&irqfd->inject);
220 srcu_read_unlock(&kvm->irq_srcu, idx);
221 ret = 1;
222 }
223
224 if (flags & EPOLLHUP) {
225
226 unsigned long iflags;
227
228 spin_lock_irqsave(&kvm->irqfds.lock, iflags);
229
230
231
232
233
234
235
236
237
238
239 if (irqfd_is_active(irqfd))
240 irqfd_deactivate(irqfd);
241
242 spin_unlock_irqrestore(&kvm->irqfds.lock, iflags);
243 }
244
245 return ret;
246}
247
248static void
249irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh,
250 poll_table *pt)
251{
252 struct kvm_kernel_irqfd *irqfd =
253 container_of(pt, struct kvm_kernel_irqfd, pt);
254 add_wait_queue_priority(wqh, &irqfd->wait);
255}
256
257
258static void irqfd_update(struct kvm *kvm, struct kvm_kernel_irqfd *irqfd)
259{
260 struct kvm_kernel_irq_routing_entry *e;
261 struct kvm_kernel_irq_routing_entry entries[KVM_NR_IRQCHIPS];
262 int n_entries;
263
264 n_entries = kvm_irq_map_gsi(kvm, entries, irqfd->gsi);
265
266 write_seqcount_begin(&irqfd->irq_entry_sc);
267
268 e = entries;
269 if (n_entries == 1)
270 irqfd->irq_entry = *e;
271 else
272 irqfd->irq_entry.type = 0;
273
274 write_seqcount_end(&irqfd->irq_entry_sc);
275}
276
277#if IS_ENABLED(CONFIG_HAVE_KVM_IRQ_BYPASS)
278void __attribute__((weak)) kvm_arch_irq_bypass_stop(
279 struct irq_bypass_consumer *cons)
280{
281}
282
283void __attribute__((weak)) kvm_arch_irq_bypass_start(
284 struct irq_bypass_consumer *cons)
285{
286}
287
288int __attribute__((weak)) kvm_arch_update_irqfd_routing(
289 struct kvm *kvm, unsigned int host_irq,
290 uint32_t guest_irq, bool set)
291{
292 return 0;
293}
294
295bool __attribute__((weak)) kvm_arch_irqfd_route_changed(
296 struct kvm_kernel_irq_routing_entry *old,
297 struct kvm_kernel_irq_routing_entry *new)
298{
299 return true;
300}
301#endif
302
303static int
304kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
305{
306 struct kvm_kernel_irqfd *irqfd, *tmp;
307 struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL;
308 int ret;
309 __poll_t events;
310 int idx;
311
312 if (!kvm_arch_intc_initialized(kvm))
313 return -EAGAIN;
314
315 if (!kvm_arch_irqfd_allowed(kvm, args))
316 return -EINVAL;
317
318 irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL_ACCOUNT);
319 if (!irqfd)
320 return -ENOMEM;
321
322 irqfd->kvm = kvm;
323 irqfd->gsi = args->gsi;
324 INIT_LIST_HEAD(&irqfd->list);
325 INIT_WORK(&irqfd->inject, irqfd_inject);
326 INIT_WORK(&irqfd->shutdown, irqfd_shutdown);
327 seqcount_spinlock_init(&irqfd->irq_entry_sc, &kvm->irqfds.lock);
328
329 CLASS(fd, f)(args->fd);
330 if (fd_empty(f)) {
331 ret = -EBADF;
332 goto out;
333 }
334
335 eventfd = eventfd_ctx_fileget(fd_file(f));
336 if (IS_ERR(eventfd)) {
337 ret = PTR_ERR(eventfd);
338 goto out;
339 }
340
341 irqfd->eventfd = eventfd;
342
343 if (args->flags & KVM_IRQFD_FLAG_RESAMPLE) {
344 struct kvm_kernel_irqfd_resampler *resampler;
345
346 resamplefd = eventfd_ctx_fdget(args->resamplefd);
347 if (IS_ERR(resamplefd)) {
348 ret = PTR_ERR(resamplefd);
349 goto fail;
350 }
351
352 irqfd->resamplefd = resamplefd;
353 INIT_LIST_HEAD(&irqfd->resampler_link);
354
355 mutex_lock(&kvm->irqfds.resampler_lock);
356
357 list_for_each_entry(resampler,
358 &kvm->irqfds.resampler_list, link) {
359 if (resampler->notifier.gsi == irqfd->gsi) {
360 irqfd->resampler = resampler;
361 break;
362 }
363 }
364
365 if (!irqfd->resampler) {
366 resampler = kzalloc(sizeof(*resampler),
367 GFP_KERNEL_ACCOUNT);
368 if (!resampler) {
369 ret = -ENOMEM;
370 mutex_unlock(&kvm->irqfds.resampler_lock);
371 goto fail;
372 }
373
374 resampler->kvm = kvm;
375 INIT_LIST_HEAD(&resampler->list);
376 resampler->notifier.gsi = irqfd->gsi;
377 resampler->notifier.irq_acked = irqfd_resampler_ack;
378 INIT_LIST_HEAD(&resampler->link);
379
380 list_add_rcu(&resampler->link, &kvm->irqfds.resampler_list);
381 kvm_register_irq_ack_notifier(kvm,
382 &resampler->notifier);
383 irqfd->resampler = resampler;
384 }
385
386 list_add_rcu(&irqfd->resampler_link, &irqfd->resampler->list);
387 synchronize_srcu_expedited(&kvm->irq_srcu);
388
389 mutex_unlock(&kvm->irqfds.resampler_lock);
390 }
391
392
393
394
395
396 init_waitqueue_func_entry(&irqfd->wait, irqfd_wakeup);
397 init_poll_funcptr(&irqfd->pt, irqfd_ptable_queue_proc);
398
399 spin_lock_irq(&kvm->irqfds.lock);
400
401 ret = 0;
402 list_for_each_entry(tmp, &kvm->irqfds.items, list) {
403 if (irqfd->eventfd != tmp->eventfd)
404 continue;
405
406 ret = -EBUSY;
407 spin_unlock_irq(&kvm->irqfds.lock);
408 goto fail;
409 }
410
411 idx = srcu_read_lock(&kvm->irq_srcu);
412 irqfd_update(kvm, irqfd);
413
414 list_add_tail(&irqfd->list, &kvm->irqfds.items);
415
416 spin_unlock_irq(&kvm->irqfds.lock);
417
418
419
420
421
422 events = vfs_poll(fd_file(f), &irqfd->pt);
423
424 if (events & EPOLLIN)
425 schedule_work(&irqfd->inject);
426
427#if IS_ENABLED(CONFIG_HAVE_KVM_IRQ_BYPASS)
428 if (kvm_arch_has_irq_bypass()) {
429 irqfd->consumer.token = (void *)irqfd->eventfd;
430 irqfd->consumer.add_producer = kvm_arch_irq_bypass_add_producer;
431 irqfd->consumer.del_producer = kvm_arch_irq_bypass_del_producer;
432 irqfd->consumer.stop = kvm_arch_irq_bypass_stop;
433 irqfd->consumer.start = kvm_arch_irq_bypass_start;
434 ret = irq_bypass_register_consumer(&irqfd->consumer);
435 if (ret)
436 pr_info("irq bypass consumer (token %p) registration fails: %d\n",
437 irqfd->consumer.token, ret);
438 }
439#endif
440
441 srcu_read_unlock(&kvm->irq_srcu, idx);
442 return 0;
443
444fail:
445 if (irqfd->resampler)
446 irqfd_resampler_shutdown(irqfd);
447
448 if (resamplefd && !IS_ERR(resamplefd))
449 eventfd_ctx_put(resamplefd);
450
451 if (eventfd && !IS_ERR(eventfd))
452 eventfd_ctx_put(eventfd);
453
454out:
455 kfree(irqfd);
456 return ret;
457}
458
459bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin)
460{
461 struct kvm_irq_ack_notifier *kian;
462 int gsi, idx;
463
464 idx = srcu_read_lock(&kvm->irq_srcu);
465 gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
466 if (gsi != -1)
467 hlist_for_each_entry_srcu(kian, &kvm->irq_ack_notifier_list,
468 link, srcu_read_lock_held(&kvm->irq_srcu))
469 if (kian->gsi == gsi) {
470 srcu_read_unlock(&kvm->irq_srcu, idx);
471 return true;
472 }
473
474 srcu_read_unlock(&kvm->irq_srcu, idx);
475
476 return false;
477}
478EXPORT_SYMBOL_GPL(kvm_irq_has_notifier);
479
480void kvm_notify_acked_gsi(struct kvm *kvm, int gsi)
481{
482 struct kvm_irq_ack_notifier *kian;
483
484 hlist_for_each_entry_srcu(kian, &kvm->irq_ack_notifier_list,
485 link, srcu_read_lock_held(&kvm->irq_srcu))
486 if (kian->gsi == gsi)
487 kian->irq_acked(kian);
488}
489
490void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
491{
492 int gsi, idx;
493
494 trace_kvm_ack_irq(irqchip, pin);
495
496 idx = srcu_read_lock(&kvm->irq_srcu);
497 gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
498 if (gsi != -1)
499 kvm_notify_acked_gsi(kvm, gsi);
500 srcu_read_unlock(&kvm->irq_srcu, idx);
501}
502
503void kvm_register_irq_ack_notifier(struct kvm *kvm,
504 struct kvm_irq_ack_notifier *kian)
505{
506 mutex_lock(&kvm->irq_lock);
507 hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list);
508 mutex_unlock(&kvm->irq_lock);
509 kvm_arch_post_irq_ack_notifier_list_update(kvm);
510}
511
512void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
513 struct kvm_irq_ack_notifier *kian)
514{
515 mutex_lock(&kvm->irq_lock);
516 hlist_del_init_rcu(&kian->link);
517 mutex_unlock(&kvm->irq_lock);
518 synchronize_srcu_expedited(&kvm->irq_srcu);
519 kvm_arch_post_irq_ack_notifier_list_update(kvm);
520}
521
522
523
524
525static int
526kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args)
527{
528 struct kvm_kernel_irqfd *irqfd, *tmp;
529 struct eventfd_ctx *eventfd;
530
531 eventfd = eventfd_ctx_fdget(args->fd);
532 if (IS_ERR(eventfd))
533 return PTR_ERR(eventfd);
534
535 spin_lock_irq(&kvm->irqfds.lock);
536
537 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) {
538 if (irqfd->eventfd == eventfd && irqfd->gsi == args->gsi) {
539
540
541
542
543
544
545 write_seqcount_begin(&irqfd->irq_entry_sc);
546 irqfd->irq_entry.type = 0;
547 write_seqcount_end(&irqfd->irq_entry_sc);
548 irqfd_deactivate(irqfd);
549 }
550 }
551
552 spin_unlock_irq(&kvm->irqfds.lock);
553 eventfd_ctx_put(eventfd);
554
555
556
557
558
559
560 flush_workqueue(irqfd_cleanup_wq);
561
562 return 0;
563}
564
565int
566kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
567{
568 if (args->flags & ~(KVM_IRQFD_FLAG_DEASSIGN | KVM_IRQFD_FLAG_RESAMPLE))
569 return -EINVAL;
570
571 if (args->flags & KVM_IRQFD_FLAG_DEASSIGN)
572 return kvm_irqfd_deassign(kvm, args);
573
574 return kvm_irqfd_assign(kvm, args);
575}
576
577
578
579
580
581void
582kvm_irqfd_release(struct kvm *kvm)
583{
584 struct kvm_kernel_irqfd *irqfd, *tmp;
585
586 spin_lock_irq(&kvm->irqfds.lock);
587
588 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list)
589 irqfd_deactivate(irqfd);
590
591 spin_unlock_irq(&kvm->irqfds.lock);
592
593
594
595
596
597 flush_workqueue(irqfd_cleanup_wq);
598
599}
600
601
602
603
604
605void kvm_irq_routing_update(struct kvm *kvm)
606{
607 struct kvm_kernel_irqfd *irqfd;
608
609 spin_lock_irq(&kvm->irqfds.lock);
610
611 list_for_each_entry(irqfd, &kvm->irqfds.items, list) {
612#if IS_ENABLED(CONFIG_HAVE_KVM_IRQ_BYPASS)
613
614 struct kvm_kernel_irq_routing_entry old = irqfd->irq_entry;
615#endif
616
617 irqfd_update(kvm, irqfd);
618
619#if IS_ENABLED(CONFIG_HAVE_KVM_IRQ_BYPASS)
620 if (irqfd->producer &&
621 kvm_arch_irqfd_route_changed(&old, &irqfd->irq_entry)) {
622 int ret = kvm_arch_update_irqfd_routing(
623 irqfd->kvm, irqfd->producer->irq,
624 irqfd->gsi, 1);
625 WARN_ON(ret);
626 }
627#endif
628 }
629
630 spin_unlock_irq(&kvm->irqfds.lock);
631}
632
633bool kvm_notify_irqfd_resampler(struct kvm *kvm,
634 unsigned int irqchip,
635 unsigned int pin)
636{
637 struct kvm_kernel_irqfd_resampler *resampler;
638 int gsi, idx;
639
640 idx = srcu_read_lock(&kvm->irq_srcu);
641 gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
642 if (gsi != -1) {
643 list_for_each_entry_srcu(resampler,
644 &kvm->irqfds.resampler_list, link,
645 srcu_read_lock_held(&kvm->irq_srcu)) {
646 if (resampler->notifier.gsi == gsi) {
647 irqfd_resampler_notify(resampler);
648 srcu_read_unlock(&kvm->irq_srcu, idx);
649 return true;
650 }
651 }
652 }
653 srcu_read_unlock(&kvm->irq_srcu, idx);
654
655 return false;
656}
657
658
659
660
661
662
663int kvm_irqfd_init(void)
664{
665 irqfd_cleanup_wq = alloc_workqueue("kvm-irqfd-cleanup", 0, 0);
666 if (!irqfd_cleanup_wq)
667 return -ENOMEM;
668
669 return 0;
670}
671
672void kvm_irqfd_exit(void)
673{
674 destroy_workqueue(irqfd_cleanup_wq);
675}
676#endif
677
678
679
680
681
682
683
684
685
686
687struct _ioeventfd {
688 struct list_head list;
689 u64 addr;
690 int length;
691 struct eventfd_ctx *eventfd;
692 u64 datamatch;
693 struct kvm_io_device dev;
694 u8 bus_idx;
695 bool wildcard;
696};
697
698static inline struct _ioeventfd *
699to_ioeventfd(struct kvm_io_device *dev)
700{
701 return container_of(dev, struct _ioeventfd, dev);
702}
703
704static void
705ioeventfd_release(struct _ioeventfd *p)
706{
707 eventfd_ctx_put(p->eventfd);
708 list_del(&p->list);
709 kfree(p);
710}
711
712static bool
713ioeventfd_in_range(struct _ioeventfd *p, gpa_t addr, int len, const void *val)
714{
715 u64 _val;
716
717 if (addr != p->addr)
718
719 return false;
720
721 if (!p->length)
722
723 return true;
724
725 if (len != p->length)
726
727 return false;
728
729 if (p->wildcard)
730
731 return true;
732
733
734
735 BUG_ON(!IS_ALIGNED((unsigned long)val, len));
736
737 switch (len) {
738 case 1:
739 _val = *(u8 *)val;
740 break;
741 case 2:
742 _val = *(u16 *)val;
743 break;
744 case 4:
745 _val = *(u32 *)val;
746 break;
747 case 8:
748 _val = *(u64 *)val;
749 break;
750 default:
751 return false;
752 }
753
754 return _val == p->datamatch;
755}
756
757
758static int
759ioeventfd_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, gpa_t addr,
760 int len, const void *val)
761{
762 struct _ioeventfd *p = to_ioeventfd(this);
763
764 if (!ioeventfd_in_range(p, addr, len, val))
765 return -EOPNOTSUPP;
766
767 eventfd_signal(p->eventfd);
768 return 0;
769}
770
771
772
773
774
775static void
776ioeventfd_destructor(struct kvm_io_device *this)
777{
778 struct _ioeventfd *p = to_ioeventfd(this);
779
780 ioeventfd_release(p);
781}
782
783static const struct kvm_io_device_ops ioeventfd_ops = {
784 .write = ioeventfd_write,
785 .destructor = ioeventfd_destructor,
786};
787
788
789static bool
790ioeventfd_check_collision(struct kvm *kvm, struct _ioeventfd *p)
791{
792 struct _ioeventfd *_p;
793
794 list_for_each_entry(_p, &kvm->ioeventfds, list)
795 if (_p->bus_idx == p->bus_idx &&
796 _p->addr == p->addr &&
797 (!_p->length || !p->length ||
798 (_p->length == p->length &&
799 (_p->wildcard || p->wildcard ||
800 _p->datamatch == p->datamatch))))
801 return true;
802
803 return false;
804}
805
806static enum kvm_bus ioeventfd_bus_from_flags(__u32 flags)
807{
808 if (flags & KVM_IOEVENTFD_FLAG_PIO)
809 return KVM_PIO_BUS;
810 if (flags & KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY)
811 return KVM_VIRTIO_CCW_NOTIFY_BUS;
812 return KVM_MMIO_BUS;
813}
814
815static int kvm_assign_ioeventfd_idx(struct kvm *kvm,
816 enum kvm_bus bus_idx,
817 struct kvm_ioeventfd *args)
818{
819
820 struct eventfd_ctx *eventfd;
821 struct _ioeventfd *p;
822 int ret;
823
824 eventfd = eventfd_ctx_fdget(args->fd);
825 if (IS_ERR(eventfd))
826 return PTR_ERR(eventfd);
827
828 p = kzalloc(sizeof(*p), GFP_KERNEL_ACCOUNT);
829 if (!p) {
830 ret = -ENOMEM;
831 goto fail;
832 }
833
834 INIT_LIST_HEAD(&p->list);
835 p->addr = args->addr;
836 p->bus_idx = bus_idx;
837 p->length = args->len;
838 p->eventfd = eventfd;
839
840
841 if (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH)
842 p->datamatch = args->datamatch;
843 else
844 p->wildcard = true;
845
846 mutex_lock(&kvm->slots_lock);
847
848
849 if (ioeventfd_check_collision(kvm, p)) {
850 ret = -EEXIST;
851 goto unlock_fail;
852 }
853
854 kvm_iodevice_init(&p->dev, &ioeventfd_ops);
855
856 ret = kvm_io_bus_register_dev(kvm, bus_idx, p->addr, p->length,
857 &p->dev);
858 if (ret < 0)
859 goto unlock_fail;
860
861 kvm_get_bus(kvm, bus_idx)->ioeventfd_count++;
862 list_add_tail(&p->list, &kvm->ioeventfds);
863
864 mutex_unlock(&kvm->slots_lock);
865
866 return 0;
867
868unlock_fail:
869 mutex_unlock(&kvm->slots_lock);
870 kfree(p);
871
872fail:
873 eventfd_ctx_put(eventfd);
874
875 return ret;
876}
877
878static int
879kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
880 struct kvm_ioeventfd *args)
881{
882 struct _ioeventfd *p;
883 struct eventfd_ctx *eventfd;
884 struct kvm_io_bus *bus;
885 int ret = -ENOENT;
886 bool wildcard;
887
888 eventfd = eventfd_ctx_fdget(args->fd);
889 if (IS_ERR(eventfd))
890 return PTR_ERR(eventfd);
891
892 wildcard = !(args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH);
893
894 mutex_lock(&kvm->slots_lock);
895
896 list_for_each_entry(p, &kvm->ioeventfds, list) {
897 if (p->bus_idx != bus_idx ||
898 p->eventfd != eventfd ||
899 p->addr != args->addr ||
900 p->length != args->len ||
901 p->wildcard != wildcard)
902 continue;
903
904 if (!p->wildcard && p->datamatch != args->datamatch)
905 continue;
906
907 kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
908 bus = kvm_get_bus(kvm, bus_idx);
909 if (bus)
910 bus->ioeventfd_count--;
911 ret = 0;
912 break;
913 }
914
915 mutex_unlock(&kvm->slots_lock);
916
917 eventfd_ctx_put(eventfd);
918
919 return ret;
920}
921
922static int kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
923{
924 enum kvm_bus bus_idx = ioeventfd_bus_from_flags(args->flags);
925 int ret = kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
926
927 if (!args->len && bus_idx == KVM_MMIO_BUS)
928 kvm_deassign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
929
930 return ret;
931}
932
933static int
934kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
935{
936 enum kvm_bus bus_idx;
937 int ret;
938
939 bus_idx = ioeventfd_bus_from_flags(args->flags);
940
941 switch (args->len) {
942 case 0:
943 case 1:
944 case 2:
945 case 4:
946 case 8:
947 break;
948 default:
949 return -EINVAL;
950 }
951
952
953 if (args->addr + args->len < args->addr)
954 return -EINVAL;
955
956
957 if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK)
958 return -EINVAL;
959
960
961 if (!args->len && (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH))
962 return -EINVAL;
963
964 ret = kvm_assign_ioeventfd_idx(kvm, bus_idx, args);
965 if (ret)
966 goto fail;
967
968
969
970
971 if (!args->len && bus_idx == KVM_MMIO_BUS) {
972 ret = kvm_assign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
973 if (ret < 0)
974 goto fast_fail;
975 }
976
977 return 0;
978
979fast_fail:
980 kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
981fail:
982 return ret;
983}
984
985int
986kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
987{
988 if (args->flags & KVM_IOEVENTFD_FLAG_DEASSIGN)
989 return kvm_deassign_ioeventfd(kvm, args);
990
991 return kvm_assign_ioeventfd(kvm, args);
992}
993
994void
995kvm_eventfd_init(struct kvm *kvm)
996{
997#ifdef CONFIG_HAVE_KVM_IRQCHIP
998 spin_lock_init(&kvm->irqfds.lock);
999 INIT_LIST_HEAD(&kvm->irqfds.items);
1000 INIT_LIST_HEAD(&kvm->irqfds.resampler_list);
1001 mutex_init(&kvm->irqfds.resampler_lock);
1002#endif
1003 INIT_LIST_HEAD(&kvm->ioeventfds);
1004}
1005