1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/interrupt.h>
18#include <linux/irq.h>
19#include <linux/kvm.h>
20#include <linux/kvm_host.h>
21#include <linux/list_sort.h>
22#include <linux/nospec.h>
23
24#include <asm/kvm_hyp.h>
25
26#include "vgic.h"
27
28#define CREATE_TRACE_POINTS
29#include "trace.h"
30
31#ifdef CONFIG_DEBUG_SPINLOCK
32#define DEBUG_SPINLOCK_BUG_ON(p) BUG_ON(p)
33#else
34#define DEBUG_SPINLOCK_BUG_ON(p)
35#endif
36
37struct vgic_global kvm_vgic_global_state __ro_after_init = {
38 .gicv3_cpuif = STATIC_KEY_FALSE_INIT,
39};
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
76{
77 struct vgic_dist *dist = &kvm->arch.vgic;
78 struct vgic_irq *irq = NULL;
79 unsigned long flags;
80
81 spin_lock_irqsave(&dist->lpi_list_lock, flags);
82
83 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
84 if (irq->intid != intid)
85 continue;
86
87
88
89
90
91 vgic_get_irq_kref(irq);
92 goto out_unlock;
93 }
94 irq = NULL;
95
96out_unlock:
97 spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
98
99 return irq;
100}
101
102
103
104
105
106
107struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
108 u32 intid)
109{
110
111 if (intid <= VGIC_MAX_PRIVATE) {
112 intid = array_index_nospec(intid, VGIC_MAX_PRIVATE);
113 return &vcpu->arch.vgic_cpu.private_irqs[intid];
114 }
115
116
117 if (intid <= VGIC_MAX_SPI) {
118 intid = array_index_nospec(intid, VGIC_MAX_SPI);
119 return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS];
120 }
121
122
123 if (intid >= VGIC_MIN_LPI)
124 return vgic_get_lpi(kvm, intid);
125
126 WARN(1, "Looking up struct vgic_irq for reserved INTID");
127 return NULL;
128}
129
130
131
132
133
134
135static void vgic_irq_release(struct kref *ref)
136{
137}
138
139void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
140{
141 struct vgic_dist *dist = &kvm->arch.vgic;
142 unsigned long flags;
143
144 if (irq->intid < VGIC_MIN_LPI)
145 return;
146
147 spin_lock_irqsave(&dist->lpi_list_lock, flags);
148 if (!kref_put(&irq->refcount, vgic_irq_release)) {
149 spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
150 return;
151 };
152
153 list_del(&irq->lpi_list);
154 dist->lpi_list_count--;
155 spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
156
157 kfree(irq);
158}
159
160void vgic_irq_set_phys_pending(struct vgic_irq *irq, bool pending)
161{
162 WARN_ON(irq_set_irqchip_state(irq->host_irq,
163 IRQCHIP_STATE_PENDING,
164 pending));
165}
166
167bool vgic_get_phys_line_level(struct vgic_irq *irq)
168{
169 bool line_level;
170
171 BUG_ON(!irq->hw);
172
173 if (irq->get_input_level)
174 return irq->get_input_level(irq->intid);
175
176 WARN_ON(irq_get_irqchip_state(irq->host_irq,
177 IRQCHIP_STATE_PENDING,
178 &line_level));
179 return line_level;
180}
181
182
183void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active)
184{
185
186 BUG_ON(!irq->hw);
187 WARN_ON(irq_set_irqchip_state(irq->host_irq,
188 IRQCHIP_STATE_ACTIVE,
189 active));
190}
191
192
193
194
195
196
197
198
199
200
201
202
203static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq)
204{
205 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));
206
207
208 if (irq->active)
209 return irq->vcpu ? : irq->target_vcpu;
210
211
212
213
214
215
216
217 if (irq->enabled && irq_is_pending(irq)) {
218 if (unlikely(irq->target_vcpu &&
219 !irq->target_vcpu->kvm->arch.vgic.enabled))
220 return NULL;
221
222 return irq->target_vcpu;
223 }
224
225
226
227
228 return NULL;
229}
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b)
247{
248 struct vgic_irq *irqa = container_of(a, struct vgic_irq, ap_list);
249 struct vgic_irq *irqb = container_of(b, struct vgic_irq, ap_list);
250 bool penda, pendb;
251 int ret;
252
253 spin_lock(&irqa->irq_lock);
254 spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING);
255
256 if (irqa->active || irqb->active) {
257 ret = (int)irqb->active - (int)irqa->active;
258 goto out;
259 }
260
261 penda = irqa->enabled && irq_is_pending(irqa);
262 pendb = irqb->enabled && irq_is_pending(irqb);
263
264 if (!penda || !pendb) {
265 ret = (int)pendb - (int)penda;
266 goto out;
267 }
268
269
270 ret = irqa->priority - irqb->priority;
271out:
272 spin_unlock(&irqb->irq_lock);
273 spin_unlock(&irqa->irq_lock);
274 return ret;
275}
276
277
278static void vgic_sort_ap_list(struct kvm_vcpu *vcpu)
279{
280 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
281
282 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
283
284 list_sort(NULL, &vgic_cpu->ap_list_head, vgic_irq_cmp);
285}
286
287
288
289
290
291
292static bool vgic_validate_injection(struct vgic_irq *irq, bool level, void *owner)
293{
294 if (irq->owner != owner)
295 return false;
296
297 switch (irq->config) {
298 case VGIC_CONFIG_LEVEL:
299 return irq->line_level != level;
300 case VGIC_CONFIG_EDGE:
301 return level;
302 }
303
304 return false;
305}
306
307
308
309
310
311
312
313
314
315bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
316 unsigned long flags)
317{
318 struct kvm_vcpu *vcpu;
319
320 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));
321
322retry:
323 vcpu = vgic_target_oracle(irq);
324 if (irq->vcpu || !vcpu) {
325
326
327
328
329
330
331
332
333
334 spin_unlock_irqrestore(&irq->irq_lock, flags);
335
336
337
338
339
340
341
342
343
344
345 if (vcpu) {
346 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
347 kvm_vcpu_kick(vcpu);
348 }
349 return false;
350 }
351
352
353
354
355
356 spin_unlock_irqrestore(&irq->irq_lock, flags);
357
358
359
360 spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
361 spin_lock(&irq->irq_lock);
362
363
364
365
366
367
368
369
370
371
372
373
374
375 if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) {
376 spin_unlock(&irq->irq_lock);
377 spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
378
379 spin_lock_irqsave(&irq->irq_lock, flags);
380 goto retry;
381 }
382
383
384
385
386
387 vgic_get_irq_kref(irq);
388 list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head);
389 irq->vcpu = vcpu;
390
391 spin_unlock(&irq->irq_lock);
392 spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
393
394 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
395 kvm_vcpu_kick(vcpu);
396
397 return true;
398}
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
418 bool level, void *owner)
419{
420 struct kvm_vcpu *vcpu;
421 struct vgic_irq *irq;
422 unsigned long flags;
423 int ret;
424
425 trace_vgic_update_irq_pending(cpuid, intid, level);
426
427 ret = vgic_lazy_init(kvm);
428 if (ret)
429 return ret;
430
431 vcpu = kvm_get_vcpu(kvm, cpuid);
432 if (!vcpu && intid < VGIC_NR_PRIVATE_IRQS)
433 return -EINVAL;
434
435 irq = vgic_get_irq(kvm, vcpu, intid);
436 if (!irq)
437 return -EINVAL;
438
439 spin_lock_irqsave(&irq->irq_lock, flags);
440
441 if (!vgic_validate_injection(irq, level, owner)) {
442
443 spin_unlock_irqrestore(&irq->irq_lock, flags);
444 vgic_put_irq(kvm, irq);
445 return 0;
446 }
447
448 if (irq->config == VGIC_CONFIG_LEVEL)
449 irq->line_level = level;
450 else
451 irq->pending_latch = true;
452
453 vgic_queue_irq_unlock(kvm, irq, flags);
454 vgic_put_irq(kvm, irq);
455
456 return 0;
457}
458
459
460static int kvm_vgic_map_irq(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
461 unsigned int host_irq,
462 bool (*get_input_level)(int vindid))
463{
464 struct irq_desc *desc;
465 struct irq_data *data;
466
467
468
469
470 desc = irq_to_desc(host_irq);
471 if (!desc) {
472 kvm_err("%s: no interrupt descriptor\n", __func__);
473 return -EINVAL;
474 }
475 data = irq_desc_get_irq_data(desc);
476 while (data->parent_data)
477 data = data->parent_data;
478
479 irq->hw = true;
480 irq->host_irq = host_irq;
481 irq->hwintid = data->hwirq;
482 irq->get_input_level = get_input_level;
483 return 0;
484}
485
486
487static inline void kvm_vgic_unmap_irq(struct vgic_irq *irq)
488{
489 irq->hw = false;
490 irq->hwintid = 0;
491 irq->get_input_level = NULL;
492}
493
494int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
495 u32 vintid, bool (*get_input_level)(int vindid))
496{
497 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
498 unsigned long flags;
499 int ret;
500
501 BUG_ON(!irq);
502
503 spin_lock_irqsave(&irq->irq_lock, flags);
504 ret = kvm_vgic_map_irq(vcpu, irq, host_irq, get_input_level);
505 spin_unlock_irqrestore(&irq->irq_lock, flags);
506 vgic_put_irq(vcpu->kvm, irq);
507
508 return ret;
509}
510
511
512
513
514
515
516
517
518
519
520void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid)
521{
522 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
523 unsigned long flags;
524
525 if (!irq->hw)
526 goto out;
527
528 spin_lock_irqsave(&irq->irq_lock, flags);
529 irq->active = false;
530 irq->pending_latch = false;
531 irq->line_level = false;
532 spin_unlock_irqrestore(&irq->irq_lock, flags);
533out:
534 vgic_put_irq(vcpu->kvm, irq);
535}
536
537int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid)
538{
539 struct vgic_irq *irq;
540 unsigned long flags;
541
542 if (!vgic_initialized(vcpu->kvm))
543 return -EAGAIN;
544
545 irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
546 BUG_ON(!irq);
547
548 spin_lock_irqsave(&irq->irq_lock, flags);
549 kvm_vgic_unmap_irq(irq);
550 spin_unlock_irqrestore(&irq->irq_lock, flags);
551 vgic_put_irq(vcpu->kvm, irq);
552
553 return 0;
554}
555
556
557
558
559
560
561
562
563
564
565
566int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner)
567{
568 struct vgic_irq *irq;
569 unsigned long flags;
570 int ret = 0;
571
572 if (!vgic_initialized(vcpu->kvm))
573 return -EAGAIN;
574
575
576 if (!irq_is_ppi(intid) && !vgic_valid_spi(vcpu->kvm, intid))
577 return -EINVAL;
578
579 irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
580 spin_lock_irqsave(&irq->irq_lock, flags);
581 if (irq->owner && irq->owner != owner)
582 ret = -EEXIST;
583 else
584 irq->owner = owner;
585 spin_unlock_irqrestore(&irq->irq_lock, flags);
586
587 return ret;
588}
589
590
591
592
593
594
595
596
597
598static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
599{
600 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
601 struct vgic_irq *irq, *tmp;
602 unsigned long flags;
603
604retry:
605 spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
606
607 list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
608 struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB;
609 bool target_vcpu_needs_kick = false;
610
611 spin_lock(&irq->irq_lock);
612
613 BUG_ON(vcpu != irq->vcpu);
614
615 target_vcpu = vgic_target_oracle(irq);
616
617 if (!target_vcpu) {
618
619
620
621
622 list_del(&irq->ap_list);
623 irq->vcpu = NULL;
624 spin_unlock(&irq->irq_lock);
625
626
627
628
629
630
631
632
633 vgic_put_irq(vcpu->kvm, irq);
634 continue;
635 }
636
637 if (target_vcpu == vcpu) {
638
639 spin_unlock(&irq->irq_lock);
640 continue;
641 }
642
643
644
645 spin_unlock(&irq->irq_lock);
646 spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
647
648
649
650
651
652 if (vcpu->vcpu_id < target_vcpu->vcpu_id) {
653 vcpuA = vcpu;
654 vcpuB = target_vcpu;
655 } else {
656 vcpuA = target_vcpu;
657 vcpuB = vcpu;
658 }
659
660 spin_lock_irqsave(&vcpuA->arch.vgic_cpu.ap_list_lock, flags);
661 spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock,
662 SINGLE_DEPTH_NESTING);
663 spin_lock(&irq->irq_lock);
664
665
666
667
668
669
670
671
672
673
674 if (target_vcpu == vgic_target_oracle(irq)) {
675 struct vgic_cpu *new_cpu = &target_vcpu->arch.vgic_cpu;
676
677 list_del(&irq->ap_list);
678 irq->vcpu = target_vcpu;
679 list_add_tail(&irq->ap_list, &new_cpu->ap_list_head);
680 target_vcpu_needs_kick = true;
681 }
682
683 spin_unlock(&irq->irq_lock);
684 spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock);
685 spin_unlock_irqrestore(&vcpuA->arch.vgic_cpu.ap_list_lock, flags);
686
687 if (target_vcpu_needs_kick) {
688 kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu);
689 kvm_vcpu_kick(target_vcpu);
690 }
691
692 goto retry;
693 }
694
695 spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
696}
697
698static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
699{
700 if (kvm_vgic_global_state.type == VGIC_V2)
701 vgic_v2_fold_lr_state(vcpu);
702 else
703 vgic_v3_fold_lr_state(vcpu);
704}
705
706
707static inline void vgic_populate_lr(struct kvm_vcpu *vcpu,
708 struct vgic_irq *irq, int lr)
709{
710 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));
711
712 if (kvm_vgic_global_state.type == VGIC_V2)
713 vgic_v2_populate_lr(vcpu, irq, lr);
714 else
715 vgic_v3_populate_lr(vcpu, irq, lr);
716}
717
718static inline void vgic_clear_lr(struct kvm_vcpu *vcpu, int lr)
719{
720 if (kvm_vgic_global_state.type == VGIC_V2)
721 vgic_v2_clear_lr(vcpu, lr);
722 else
723 vgic_v3_clear_lr(vcpu, lr);
724}
725
726static inline void vgic_set_underflow(struct kvm_vcpu *vcpu)
727{
728 if (kvm_vgic_global_state.type == VGIC_V2)
729 vgic_v2_set_underflow(vcpu);
730 else
731 vgic_v3_set_underflow(vcpu);
732}
733
734
735static int compute_ap_list_depth(struct kvm_vcpu *vcpu,
736 bool *multi_sgi)
737{
738 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
739 struct vgic_irq *irq;
740 int count = 0;
741
742 *multi_sgi = false;
743
744 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
745
746 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
747 int w;
748
749 spin_lock(&irq->irq_lock);
750
751 w = vgic_irq_get_lr_count(irq);
752 spin_unlock(&irq->irq_lock);
753
754 count += w;
755 *multi_sgi |= (w > 1);
756 }
757 return count;
758}
759
760
761static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
762{
763 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
764 struct vgic_irq *irq;
765 int count;
766 bool multi_sgi;
767 u8 prio = 0xff;
768
769 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
770
771 count = compute_ap_list_depth(vcpu, &multi_sgi);
772 if (count > kvm_vgic_global_state.nr_lr || multi_sgi)
773 vgic_sort_ap_list(vcpu);
774
775 count = 0;
776
777 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
778 spin_lock(&irq->irq_lock);
779
780
781
782
783
784
785
786
787 if (multi_sgi && irq->priority > prio) {
788 spin_unlock(&irq->irq_lock);
789 break;
790 }
791
792 if (likely(vgic_target_oracle(irq) == vcpu)) {
793 vgic_populate_lr(vcpu, irq, count++);
794
795 if (irq->source)
796 prio = irq->priority;
797 }
798
799 spin_unlock(&irq->irq_lock);
800
801 if (count == kvm_vgic_global_state.nr_lr) {
802 if (!list_is_last(&irq->ap_list,
803 &vgic_cpu->ap_list_head))
804 vgic_set_underflow(vcpu);
805 break;
806 }
807 }
808
809 vcpu->arch.vgic_cpu.used_lrs = count;
810
811
812 for ( ; count < kvm_vgic_global_state.nr_lr; count++)
813 vgic_clear_lr(vcpu, count);
814}
815
816static inline bool can_access_vgic_from_kernel(void)
817{
818
819
820
821
822
823 return !static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif) || has_vhe();
824}
825
826static inline void vgic_save_state(struct kvm_vcpu *vcpu)
827{
828 if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
829 vgic_v2_save_state(vcpu);
830 else
831 __vgic_v3_save_state(vcpu);
832}
833
834
835void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
836{
837 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
838
839 WARN_ON(vgic_v4_sync_hwstate(vcpu));
840
841
842 if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
843 return;
844
845 if (can_access_vgic_from_kernel())
846 vgic_save_state(vcpu);
847
848 if (vgic_cpu->used_lrs)
849 vgic_fold_lr_state(vcpu);
850 vgic_prune_ap_list(vcpu);
851}
852
853static inline void vgic_restore_state(struct kvm_vcpu *vcpu)
854{
855 if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
856 vgic_v2_restore_state(vcpu);
857 else
858 __vgic_v3_restore_state(vcpu);
859}
860
861
862void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
863{
864 WARN_ON(vgic_v4_flush_hwstate(vcpu));
865
866
867
868
869
870
871
872
873
874
875 if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
876 return;
877
878 DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
879
880 spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
881 vgic_flush_lr_state(vcpu);
882 spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
883
884 if (can_access_vgic_from_kernel())
885 vgic_restore_state(vcpu);
886}
887
888void kvm_vgic_load(struct kvm_vcpu *vcpu)
889{
890 if (unlikely(!vgic_initialized(vcpu->kvm)))
891 return;
892
893 if (kvm_vgic_global_state.type == VGIC_V2)
894 vgic_v2_load(vcpu);
895 else
896 vgic_v3_load(vcpu);
897}
898
899void kvm_vgic_put(struct kvm_vcpu *vcpu)
900{
901 if (unlikely(!vgic_initialized(vcpu->kvm)))
902 return;
903
904 if (kvm_vgic_global_state.type == VGIC_V2)
905 vgic_v2_put(vcpu);
906 else
907 vgic_v3_put(vcpu);
908}
909
910int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
911{
912 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
913 struct vgic_irq *irq;
914 bool pending = false;
915 unsigned long flags;
916
917 if (!vcpu->kvm->arch.vgic.enabled)
918 return false;
919
920 if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last)
921 return true;
922
923 spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
924
925 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
926 spin_lock(&irq->irq_lock);
927 pending = irq_is_pending(irq) && irq->enabled;
928 spin_unlock(&irq->irq_lock);
929
930 if (pending)
931 break;
932 }
933
934 spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
935
936 return pending;
937}
938
939void vgic_kick_vcpus(struct kvm *kvm)
940{
941 struct kvm_vcpu *vcpu;
942 int c;
943
944
945
946
947
948 kvm_for_each_vcpu(c, vcpu, kvm) {
949 if (kvm_vgic_vcpu_pending_irq(vcpu)) {
950 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
951 kvm_vcpu_kick(vcpu);
952 }
953 }
954}
955
956bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid)
957{
958 struct vgic_irq *irq;
959 bool map_is_active;
960 unsigned long flags;
961
962 if (!vgic_initialized(vcpu->kvm))
963 return false;
964
965 irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
966 spin_lock_irqsave(&irq->irq_lock, flags);
967 map_is_active = irq->hw && irq->active;
968 spin_unlock_irqrestore(&irq->irq_lock, flags);
969 vgic_put_irq(vcpu->kvm, irq);
970
971 return map_is_active;
972}
973
974