1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/cpu.h>
20#include <linux/kvm.h>
21#include <linux/kvm_host.h>
22#include <linux/interrupt.h>
23#include <linux/io.h>
24#include <linux/of.h>
25#include <linux/of_address.h>
26#include <linux/of_irq.h>
27#include <linux/rculist.h>
28#include <linux/uaccess.h>
29
30#include <asm/kvm_emulate.h>
31#include <asm/kvm_arm.h>
32#include <asm/kvm_mmu.h>
33#include <trace/events/kvm.h>
34#include <asm/kvm.h>
35#include <kvm/iodev.h>
36
37#define CREATE_TRACE_POINTS
38#include "trace.h"
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105#include "vgic.h"
106
107static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu);
108static void vgic_retire_lr(int lr_nr, struct kvm_vcpu *vcpu);
109static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr);
110static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc);
111static u64 vgic_get_elrsr(struct kvm_vcpu *vcpu);
112static struct irq_phys_map *vgic_irq_map_search(struct kvm_vcpu *vcpu,
113 int virt_irq);
114static int compute_pending_for_cpu(struct kvm_vcpu *vcpu);
115
116static const struct vgic_ops *vgic_ops;
117static const struct vgic_params *vgic;
118
119static void add_sgi_source(struct kvm_vcpu *vcpu, int irq, int source)
120{
121 vcpu->kvm->arch.vgic.vm_ops.add_sgi_source(vcpu, irq, source);
122}
123
124static bool queue_sgi(struct kvm_vcpu *vcpu, int irq)
125{
126 return vcpu->kvm->arch.vgic.vm_ops.queue_sgi(vcpu, irq);
127}
128
129int kvm_vgic_map_resources(struct kvm *kvm)
130{
131 return kvm->arch.vgic.vm_ops.map_resources(kvm, vgic);
132}
133
134
135
136
137
138
139
140
141
142
143
144
145
146#if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 64
147#define REG_OFFSET_SWIZZLE 1
148#else
149#define REG_OFFSET_SWIZZLE 0
150#endif
151
152static int vgic_init_bitmap(struct vgic_bitmap *b, int nr_cpus, int nr_irqs)
153{
154 int nr_longs;
155
156 nr_longs = nr_cpus + BITS_TO_LONGS(nr_irqs - VGIC_NR_PRIVATE_IRQS);
157
158 b->private = kzalloc(sizeof(unsigned long) * nr_longs, GFP_KERNEL);
159 if (!b->private)
160 return -ENOMEM;
161
162 b->shared = b->private + nr_cpus;
163
164 return 0;
165}
166
167static void vgic_free_bitmap(struct vgic_bitmap *b)
168{
169 kfree(b->private);
170 b->private = NULL;
171 b->shared = NULL;
172}
173
174
175
176
177
178
179
180static unsigned long *u64_to_bitmask(u64 *val)
181{
182#if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 32
183 *val = (*val >> 32) | (*val << 32);
184#endif
185 return (unsigned long *)val;
186}
187
188u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x, int cpuid, u32 offset)
189{
190 offset >>= 2;
191 if (!offset)
192 return (u32 *)(x->private + cpuid) + REG_OFFSET_SWIZZLE;
193 else
194 return (u32 *)(x->shared) + ((offset - 1) ^ REG_OFFSET_SWIZZLE);
195}
196
197static int vgic_bitmap_get_irq_val(struct vgic_bitmap *x,
198 int cpuid, int irq)
199{
200 if (irq < VGIC_NR_PRIVATE_IRQS)
201 return test_bit(irq, x->private + cpuid);
202
203 return test_bit(irq - VGIC_NR_PRIVATE_IRQS, x->shared);
204}
205
206void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid,
207 int irq, int val)
208{
209 unsigned long *reg;
210
211 if (irq < VGIC_NR_PRIVATE_IRQS) {
212 reg = x->private + cpuid;
213 } else {
214 reg = x->shared;
215 irq -= VGIC_NR_PRIVATE_IRQS;
216 }
217
218 if (val)
219 set_bit(irq, reg);
220 else
221 clear_bit(irq, reg);
222}
223
224static unsigned long *vgic_bitmap_get_cpu_map(struct vgic_bitmap *x, int cpuid)
225{
226 return x->private + cpuid;
227}
228
229unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x)
230{
231 return x->shared;
232}
233
234static int vgic_init_bytemap(struct vgic_bytemap *x, int nr_cpus, int nr_irqs)
235{
236 int size;
237
238 size = nr_cpus * VGIC_NR_PRIVATE_IRQS;
239 size += nr_irqs - VGIC_NR_PRIVATE_IRQS;
240
241 x->private = kzalloc(size, GFP_KERNEL);
242 if (!x->private)
243 return -ENOMEM;
244
245 x->shared = x->private + nr_cpus * VGIC_NR_PRIVATE_IRQS / sizeof(u32);
246 return 0;
247}
248
249static void vgic_free_bytemap(struct vgic_bytemap *b)
250{
251 kfree(b->private);
252 b->private = NULL;
253 b->shared = NULL;
254}
255
256u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset)
257{
258 u32 *reg;
259
260 if (offset < VGIC_NR_PRIVATE_IRQS) {
261 reg = x->private;
262 offset += cpuid * VGIC_NR_PRIVATE_IRQS;
263 } else {
264 reg = x->shared;
265 offset -= VGIC_NR_PRIVATE_IRQS;
266 }
267
268 return reg + (offset / sizeof(u32));
269}
270
271#define VGIC_CFG_LEVEL 0
272#define VGIC_CFG_EDGE 1
273
274static bool vgic_irq_is_edge(struct kvm_vcpu *vcpu, int irq)
275{
276 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
277 int irq_val;
278
279 irq_val = vgic_bitmap_get_irq_val(&dist->irq_cfg, vcpu->vcpu_id, irq);
280 return irq_val == VGIC_CFG_EDGE;
281}
282
283static int vgic_irq_is_enabled(struct kvm_vcpu *vcpu, int irq)
284{
285 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
286
287 return vgic_bitmap_get_irq_val(&dist->irq_enabled, vcpu->vcpu_id, irq);
288}
289
290static int vgic_irq_is_queued(struct kvm_vcpu *vcpu, int irq)
291{
292 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
293
294 return vgic_bitmap_get_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq);
295}
296
297static int vgic_irq_is_active(struct kvm_vcpu *vcpu, int irq)
298{
299 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
300
301 return vgic_bitmap_get_irq_val(&dist->irq_active, vcpu->vcpu_id, irq);
302}
303
304static void vgic_irq_set_queued(struct kvm_vcpu *vcpu, int irq)
305{
306 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
307
308 vgic_bitmap_set_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq, 1);
309}
310
311static void vgic_irq_clear_queued(struct kvm_vcpu *vcpu, int irq)
312{
313 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
314
315 vgic_bitmap_set_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq, 0);
316}
317
318static void vgic_irq_set_active(struct kvm_vcpu *vcpu, int irq)
319{
320 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
321
322 vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 1);
323}
324
325static void vgic_irq_clear_active(struct kvm_vcpu *vcpu, int irq)
326{
327 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
328
329 vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 0);
330}
331
332static int vgic_dist_irq_get_level(struct kvm_vcpu *vcpu, int irq)
333{
334 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
335
336 return vgic_bitmap_get_irq_val(&dist->irq_level, vcpu->vcpu_id, irq);
337}
338
339static void vgic_dist_irq_set_level(struct kvm_vcpu *vcpu, int irq)
340{
341 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
342
343 vgic_bitmap_set_irq_val(&dist->irq_level, vcpu->vcpu_id, irq, 1);
344}
345
346static void vgic_dist_irq_clear_level(struct kvm_vcpu *vcpu, int irq)
347{
348 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
349
350 vgic_bitmap_set_irq_val(&dist->irq_level, vcpu->vcpu_id, irq, 0);
351}
352
353static int vgic_dist_irq_soft_pend(struct kvm_vcpu *vcpu, int irq)
354{
355 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
356
357 return vgic_bitmap_get_irq_val(&dist->irq_soft_pend, vcpu->vcpu_id, irq);
358}
359
360static void vgic_dist_irq_clear_soft_pend(struct kvm_vcpu *vcpu, int irq)
361{
362 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
363
364 vgic_bitmap_set_irq_val(&dist->irq_soft_pend, vcpu->vcpu_id, irq, 0);
365 if (!vgic_dist_irq_get_level(vcpu, irq)) {
366 vgic_dist_irq_clear_pending(vcpu, irq);
367 if (!compute_pending_for_cpu(vcpu))
368 clear_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
369 }
370}
371
372static int vgic_dist_irq_is_pending(struct kvm_vcpu *vcpu, int irq)
373{
374 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
375
376 return vgic_bitmap_get_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq);
377}
378
379void vgic_dist_irq_set_pending(struct kvm_vcpu *vcpu, int irq)
380{
381 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
382
383 vgic_bitmap_set_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq, 1);
384}
385
386void vgic_dist_irq_clear_pending(struct kvm_vcpu *vcpu, int irq)
387{
388 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
389
390 vgic_bitmap_set_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq, 0);
391}
392
393static void vgic_cpu_irq_set(struct kvm_vcpu *vcpu, int irq)
394{
395 if (irq < VGIC_NR_PRIVATE_IRQS)
396 set_bit(irq, vcpu->arch.vgic_cpu.pending_percpu);
397 else
398 set_bit(irq - VGIC_NR_PRIVATE_IRQS,
399 vcpu->arch.vgic_cpu.pending_shared);
400}
401
402void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq)
403{
404 if (irq < VGIC_NR_PRIVATE_IRQS)
405 clear_bit(irq, vcpu->arch.vgic_cpu.pending_percpu);
406 else
407 clear_bit(irq - VGIC_NR_PRIVATE_IRQS,
408 vcpu->arch.vgic_cpu.pending_shared);
409}
410
411static bool vgic_can_sample_irq(struct kvm_vcpu *vcpu, int irq)
412{
413 return !vgic_irq_is_queued(vcpu, irq);
414}
415
416
417
418
419
420
421
422
423
424
425
426
427void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg,
428 phys_addr_t offset, int mode)
429{
430 int word_offset = (offset & 3) * 8;
431 u32 mask = (1UL << (mmio->len * 8)) - 1;
432 u32 regval;
433
434
435
436
437
438
439 if (reg) {
440 regval = *reg;
441 } else {
442 BUG_ON(mode != (ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED));
443 regval = 0;
444 }
445
446 if (mmio->is_write) {
447 u32 data = mmio_data_read(mmio, mask) << word_offset;
448 switch (ACCESS_WRITE_MASK(mode)) {
449 case ACCESS_WRITE_IGNORED:
450 return;
451
452 case ACCESS_WRITE_SETBIT:
453 regval |= data;
454 break;
455
456 case ACCESS_WRITE_CLEARBIT:
457 regval &= ~data;
458 break;
459
460 case ACCESS_WRITE_VALUE:
461 regval = (regval & ~(mask << word_offset)) | data;
462 break;
463 }
464 *reg = regval;
465 } else {
466 switch (ACCESS_READ_MASK(mode)) {
467 case ACCESS_READ_RAZ:
468 regval = 0;
469
470
471 case ACCESS_READ_VALUE:
472 mmio_data_write(mmio, mask, regval >> word_offset);
473 }
474 }
475}
476
477bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
478 phys_addr_t offset)
479{
480 vgic_reg_access(mmio, NULL, offset,
481 ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
482 return false;
483}
484
485bool vgic_handle_enable_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
486 phys_addr_t offset, int vcpu_id, int access)
487{
488 u32 *reg;
489 int mode = ACCESS_READ_VALUE | access;
490 struct kvm_vcpu *target_vcpu = kvm_get_vcpu(kvm, vcpu_id);
491
492 reg = vgic_bitmap_get_reg(&kvm->arch.vgic.irq_enabled, vcpu_id, offset);
493 vgic_reg_access(mmio, reg, offset, mode);
494 if (mmio->is_write) {
495 if (access & ACCESS_WRITE_CLEARBIT) {
496 if (offset < 4)
497 *reg |= 0xffff;
498 vgic_retire_disabled_irqs(target_vcpu);
499 }
500 vgic_update_state(kvm);
501 return true;
502 }
503
504 return false;
505}
506
507bool vgic_handle_set_pending_reg(struct kvm *kvm,
508 struct kvm_exit_mmio *mmio,
509 phys_addr_t offset, int vcpu_id)
510{
511 u32 *reg, orig;
512 u32 level_mask;
513 int mode = ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT;
514 struct vgic_dist *dist = &kvm->arch.vgic;
515
516 reg = vgic_bitmap_get_reg(&dist->irq_cfg, vcpu_id, offset);
517 level_mask = (~(*reg));
518
519
520 reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu_id, offset);
521 orig = *reg;
522 vgic_reg_access(mmio, reg, offset, mode);
523
524 if (mmio->is_write) {
525
526 reg = vgic_bitmap_get_reg(&dist->irq_soft_pend,
527 vcpu_id, offset);
528 vgic_reg_access(mmio, reg, offset, mode);
529 *reg &= level_mask;
530
531
532 if (offset < 2) {
533 *reg &= ~0xffff;
534 *reg |= orig & 0xffff;
535 }
536
537 vgic_update_state(kvm);
538 return true;
539 }
540
541 return false;
542}
543
544bool vgic_handle_clear_pending_reg(struct kvm *kvm,
545 struct kvm_exit_mmio *mmio,
546 phys_addr_t offset, int vcpu_id)
547{
548 u32 *level_active;
549 u32 *reg, orig;
550 int mode = ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT;
551 struct vgic_dist *dist = &kvm->arch.vgic;
552
553 reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu_id, offset);
554 orig = *reg;
555 vgic_reg_access(mmio, reg, offset, mode);
556 if (mmio->is_write) {
557
558 level_active = vgic_bitmap_get_reg(&dist->irq_level,
559 vcpu_id, offset);
560 reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu_id, offset);
561 *reg |= *level_active;
562
563
564 if (offset < 2) {
565 *reg &= ~0xffff;
566 *reg |= orig & 0xffff;
567 }
568
569
570 reg = vgic_bitmap_get_reg(&dist->irq_soft_pend,
571 vcpu_id, offset);
572 vgic_reg_access(mmio, reg, offset, mode);
573
574 vgic_update_state(kvm);
575 return true;
576 }
577 return false;
578}
579
580bool vgic_handle_set_active_reg(struct kvm *kvm,
581 struct kvm_exit_mmio *mmio,
582 phys_addr_t offset, int vcpu_id)
583{
584 u32 *reg;
585 struct vgic_dist *dist = &kvm->arch.vgic;
586
587 reg = vgic_bitmap_get_reg(&dist->irq_active, vcpu_id, offset);
588 vgic_reg_access(mmio, reg, offset,
589 ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
590
591 if (mmio->is_write) {
592 vgic_update_state(kvm);
593 return true;
594 }
595
596 return false;
597}
598
599bool vgic_handle_clear_active_reg(struct kvm *kvm,
600 struct kvm_exit_mmio *mmio,
601 phys_addr_t offset, int vcpu_id)
602{
603 u32 *reg;
604 struct vgic_dist *dist = &kvm->arch.vgic;
605
606 reg = vgic_bitmap_get_reg(&dist->irq_active, vcpu_id, offset);
607 vgic_reg_access(mmio, reg, offset,
608 ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
609
610 if (mmio->is_write) {
611 vgic_update_state(kvm);
612 return true;
613 }
614
615 return false;
616}
617
618static u32 vgic_cfg_expand(u16 val)
619{
620 u32 res = 0;
621 int i;
622
623
624
625
626
627 for (i = 0; i < 16; i++)
628 res |= ((val >> i) & VGIC_CFG_EDGE) << (2 * i + 1);
629
630 return res;
631}
632
633static u16 vgic_cfg_compress(u32 val)
634{
635 u16 res = 0;
636 int i;
637
638
639
640
641
642 for (i = 0; i < 16; i++)
643 res |= ((val >> (i * 2 + 1)) & VGIC_CFG_EDGE) << i;
644
645 return res;
646}
647
648
649
650
651
652
653bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio,
654 phys_addr_t offset)
655{
656 u32 val;
657
658 if (offset & 4)
659 val = *reg >> 16;
660 else
661 val = *reg & 0xffff;
662
663 val = vgic_cfg_expand(val);
664 vgic_reg_access(mmio, &val, offset,
665 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
666 if (mmio->is_write) {
667
668 if (offset < 8)
669 return false;
670
671 val = vgic_cfg_compress(val);
672 if (offset & 4) {
673 *reg &= 0xffff;
674 *reg |= val << 16;
675 } else {
676 *reg &= 0xffff << 16;
677 *reg |= val;
678 }
679 }
680
681 return false;
682}
683
684
685
686
687
688
689
690
691
692void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
693{
694 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
695 u64 elrsr = vgic_get_elrsr(vcpu);
696 unsigned long *elrsr_ptr = u64_to_bitmask(&elrsr);
697 int i;
698
699 for_each_clear_bit(i, elrsr_ptr, vgic_cpu->nr_lr) {
700 struct vgic_lr lr = vgic_get_lr(vcpu, i);
701
702
703
704
705
706
707
708
709 BUG_ON(!(lr.state & LR_STATE_MASK));
710
711
712 if (lr.irq < VGIC_NR_SGIS)
713 add_sgi_source(vcpu, lr.irq, lr.source);
714
715
716
717
718
719
720 if (lr.state & LR_STATE_ACTIVE)
721 vgic_irq_set_active(vcpu, lr.irq);
722
723
724
725
726
727 vgic_retire_lr(i, vcpu);
728
729
730 vgic_update_state(vcpu->kvm);
731 }
732}
733
734const
735struct vgic_io_range *vgic_find_range(const struct vgic_io_range *ranges,
736 int len, gpa_t offset)
737{
738 while (ranges->len) {
739 if (offset >= ranges->base &&
740 (offset + len) <= (ranges->base + ranges->len))
741 return ranges;
742 ranges++;
743 }
744
745 return NULL;
746}
747
748static bool vgic_validate_access(const struct vgic_dist *dist,
749 const struct vgic_io_range *range,
750 unsigned long offset)
751{
752 int irq;
753
754 if (!range->bits_per_irq)
755 return true;
756
757 irq = offset * 8 / range->bits_per_irq;
758 if (irq >= dist->nr_irqs)
759 return false;
760
761 return true;
762}
763
764
765
766
767
768
769
770
771
772
773
774static bool call_range_handler(struct kvm_vcpu *vcpu,
775 struct kvm_exit_mmio *mmio,
776 unsigned long offset,
777 const struct vgic_io_range *range)
778{
779 struct kvm_exit_mmio mmio32;
780 bool ret;
781
782 if (likely(mmio->len <= 4))
783 return range->handle_mmio(vcpu, mmio, offset);
784
785
786
787
788
789
790 mmio32.len = 4;
791 mmio32.is_write = mmio->is_write;
792 mmio32.private = mmio->private;
793
794 mmio32.phys_addr = mmio->phys_addr + 4;
795 mmio32.data = &((u32 *)mmio->data)[1];
796 ret = range->handle_mmio(vcpu, &mmio32, offset + 4);
797
798 mmio32.phys_addr = mmio->phys_addr;
799 mmio32.data = &((u32 *)mmio->data)[0];
800 ret |= range->handle_mmio(vcpu, &mmio32, offset);
801
802 return ret;
803}
804
805
806
807
808
809
810
811
812
813
814
815
816
817static int vgic_handle_mmio_access(struct kvm_vcpu *vcpu,
818 struct kvm_io_device *this, gpa_t addr,
819 int len, void *val, bool is_write)
820{
821 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
822 struct vgic_io_device *iodev = container_of(this,
823 struct vgic_io_device, dev);
824 struct kvm_run *run = vcpu->run;
825 const struct vgic_io_range *range;
826 struct kvm_exit_mmio mmio;
827 bool updated_state;
828 gpa_t offset;
829
830 offset = addr - iodev->addr;
831 range = vgic_find_range(iodev->reg_ranges, len, offset);
832 if (unlikely(!range || !range->handle_mmio)) {
833 pr_warn("Unhandled access %d %08llx %d\n", is_write, addr, len);
834 return -ENXIO;
835 }
836
837 mmio.phys_addr = addr;
838 mmio.len = len;
839 mmio.is_write = is_write;
840 mmio.data = val;
841 mmio.private = iodev->redist_vcpu;
842
843 spin_lock(&dist->lock);
844 offset -= range->base;
845 if (vgic_validate_access(dist, range, offset)) {
846 updated_state = call_range_handler(vcpu, &mmio, offset, range);
847 } else {
848 if (!is_write)
849 memset(val, 0, len);
850 updated_state = false;
851 }
852 spin_unlock(&dist->lock);
853 run->mmio.is_write = is_write;
854 run->mmio.len = len;
855 run->mmio.phys_addr = addr;
856 memcpy(run->mmio.data, val, len);
857
858 kvm_handle_mmio_return(vcpu, run);
859
860 if (updated_state)
861 vgic_kick_vcpus(vcpu->kvm);
862
863 return 0;
864}
865
866static int vgic_handle_mmio_read(struct kvm_vcpu *vcpu,
867 struct kvm_io_device *this,
868 gpa_t addr, int len, void *val)
869{
870 return vgic_handle_mmio_access(vcpu, this, addr, len, val, false);
871}
872
873static int vgic_handle_mmio_write(struct kvm_vcpu *vcpu,
874 struct kvm_io_device *this,
875 gpa_t addr, int len, const void *val)
876{
877 return vgic_handle_mmio_access(vcpu, this, addr, len, (void *)val,
878 true);
879}
880
881static struct kvm_io_device_ops vgic_io_ops = {
882 .read = vgic_handle_mmio_read,
883 .write = vgic_handle_mmio_write,
884};
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902int vgic_register_kvm_io_dev(struct kvm *kvm, gpa_t base, int len,
903 const struct vgic_io_range *ranges,
904 int redist_vcpu_id,
905 struct vgic_io_device *iodev)
906{
907 struct kvm_vcpu *vcpu = NULL;
908 int ret;
909
910 if (redist_vcpu_id >= 0)
911 vcpu = kvm_get_vcpu(kvm, redist_vcpu_id);
912
913 iodev->addr = base;
914 iodev->len = len;
915 iodev->reg_ranges = ranges;
916 iodev->redist_vcpu = vcpu;
917
918 kvm_iodevice_init(&iodev->dev, &vgic_io_ops);
919
920 mutex_lock(&kvm->slots_lock);
921
922 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, base, len,
923 &iodev->dev);
924 mutex_unlock(&kvm->slots_lock);
925
926
927 if (ret)
928 iodev->dev.ops = NULL;
929
930 return ret;
931}
932
933static int vgic_nr_shared_irqs(struct vgic_dist *dist)
934{
935 return dist->nr_irqs - VGIC_NR_PRIVATE_IRQS;
936}
937
938static int compute_active_for_cpu(struct kvm_vcpu *vcpu)
939{
940 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
941 unsigned long *active, *enabled, *act_percpu, *act_shared;
942 unsigned long active_private, active_shared;
943 int nr_shared = vgic_nr_shared_irqs(dist);
944 int vcpu_id;
945
946 vcpu_id = vcpu->vcpu_id;
947 act_percpu = vcpu->arch.vgic_cpu.active_percpu;
948 act_shared = vcpu->arch.vgic_cpu.active_shared;
949
950 active = vgic_bitmap_get_cpu_map(&dist->irq_active, vcpu_id);
951 enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id);
952 bitmap_and(act_percpu, active, enabled, VGIC_NR_PRIVATE_IRQS);
953
954 active = vgic_bitmap_get_shared_map(&dist->irq_active);
955 enabled = vgic_bitmap_get_shared_map(&dist->irq_enabled);
956 bitmap_and(act_shared, active, enabled, nr_shared);
957 bitmap_and(act_shared, act_shared,
958 vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]),
959 nr_shared);
960
961 active_private = find_first_bit(act_percpu, VGIC_NR_PRIVATE_IRQS);
962 active_shared = find_first_bit(act_shared, nr_shared);
963
964 return (active_private < VGIC_NR_PRIVATE_IRQS ||
965 active_shared < nr_shared);
966}
967
968static int compute_pending_for_cpu(struct kvm_vcpu *vcpu)
969{
970 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
971 unsigned long *pending, *enabled, *pend_percpu, *pend_shared;
972 unsigned long pending_private, pending_shared;
973 int nr_shared = vgic_nr_shared_irqs(dist);
974 int vcpu_id;
975
976 vcpu_id = vcpu->vcpu_id;
977 pend_percpu = vcpu->arch.vgic_cpu.pending_percpu;
978 pend_shared = vcpu->arch.vgic_cpu.pending_shared;
979
980 if (!dist->enabled) {
981 bitmap_zero(pend_percpu, VGIC_NR_PRIVATE_IRQS);
982 bitmap_zero(pend_shared, nr_shared);
983 return 0;
984 }
985
986 pending = vgic_bitmap_get_cpu_map(&dist->irq_pending, vcpu_id);
987 enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id);
988 bitmap_and(pend_percpu, pending, enabled, VGIC_NR_PRIVATE_IRQS);
989
990 pending = vgic_bitmap_get_shared_map(&dist->irq_pending);
991 enabled = vgic_bitmap_get_shared_map(&dist->irq_enabled);
992 bitmap_and(pend_shared, pending, enabled, nr_shared);
993 bitmap_and(pend_shared, pend_shared,
994 vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]),
995 nr_shared);
996
997 pending_private = find_first_bit(pend_percpu, VGIC_NR_PRIVATE_IRQS);
998 pending_shared = find_first_bit(pend_shared, nr_shared);
999 return (pending_private < VGIC_NR_PRIVATE_IRQS ||
1000 pending_shared < vgic_nr_shared_irqs(dist));
1001}
1002
1003
1004
1005
1006
1007void vgic_update_state(struct kvm *kvm)
1008{
1009 struct vgic_dist *dist = &kvm->arch.vgic;
1010 struct kvm_vcpu *vcpu;
1011 int c;
1012
1013 kvm_for_each_vcpu(c, vcpu, kvm) {
1014 if (compute_pending_for_cpu(vcpu))
1015 set_bit(c, dist->irq_pending_on_cpu);
1016
1017 if (compute_active_for_cpu(vcpu))
1018 set_bit(c, dist->irq_active_on_cpu);
1019 else
1020 clear_bit(c, dist->irq_active_on_cpu);
1021 }
1022}
1023
1024static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr)
1025{
1026 return vgic_ops->get_lr(vcpu, lr);
1027}
1028
1029static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr,
1030 struct vgic_lr vlr)
1031{
1032 vgic_ops->set_lr(vcpu, lr, vlr);
1033}
1034
1035static inline u64 vgic_get_elrsr(struct kvm_vcpu *vcpu)
1036{
1037 return vgic_ops->get_elrsr(vcpu);
1038}
1039
1040static inline u64 vgic_get_eisr(struct kvm_vcpu *vcpu)
1041{
1042 return vgic_ops->get_eisr(vcpu);
1043}
1044
1045static inline void vgic_clear_eisr(struct kvm_vcpu *vcpu)
1046{
1047 vgic_ops->clear_eisr(vcpu);
1048}
1049
1050static inline u32 vgic_get_interrupt_status(struct kvm_vcpu *vcpu)
1051{
1052 return vgic_ops->get_interrupt_status(vcpu);
1053}
1054
1055static inline void vgic_enable_underflow(struct kvm_vcpu *vcpu)
1056{
1057 vgic_ops->enable_underflow(vcpu);
1058}
1059
1060static inline void vgic_disable_underflow(struct kvm_vcpu *vcpu)
1061{
1062 vgic_ops->disable_underflow(vcpu);
1063}
1064
1065void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
1066{
1067 vgic_ops->get_vmcr(vcpu, vmcr);
1068}
1069
1070void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
1071{
1072 vgic_ops->set_vmcr(vcpu, vmcr);
1073}
1074
1075static inline void vgic_enable(struct kvm_vcpu *vcpu)
1076{
1077 vgic_ops->enable(vcpu);
1078}
1079
1080static void vgic_retire_lr(int lr_nr, struct kvm_vcpu *vcpu)
1081{
1082 struct vgic_lr vlr = vgic_get_lr(vcpu, lr_nr);
1083
1084 vgic_irq_clear_queued(vcpu, vlr.irq);
1085
1086
1087
1088
1089
1090 if (vlr.state & LR_STATE_PENDING) {
1091 vgic_dist_irq_set_pending(vcpu, vlr.irq);
1092 vlr.hwirq = 0;
1093 }
1094
1095 vlr.state = 0;
1096 vgic_set_lr(vcpu, lr_nr, vlr);
1097}
1098
1099static bool dist_active_irq(struct kvm_vcpu *vcpu)
1100{
1101 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1102
1103 return test_bit(vcpu->vcpu_id, dist->irq_active_on_cpu);
1104}
1105
1106bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, struct irq_phys_map *map)
1107{
1108 int i;
1109
1110 for (i = 0; i < vcpu->arch.vgic_cpu.nr_lr; i++) {
1111 struct vgic_lr vlr = vgic_get_lr(vcpu, i);
1112
1113 if (vlr.irq == map->virt_irq && vlr.state & LR_STATE_ACTIVE)
1114 return true;
1115 }
1116
1117 return vgic_irq_is_active(vcpu, map->virt_irq);
1118}
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
1130{
1131 u64 elrsr = vgic_get_elrsr(vcpu);
1132 unsigned long *elrsr_ptr = u64_to_bitmask(&elrsr);
1133 int lr;
1134
1135 for_each_clear_bit(lr, elrsr_ptr, vgic->nr_lr) {
1136 struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
1137
1138 if (!vgic_irq_is_enabled(vcpu, vlr.irq))
1139 vgic_retire_lr(lr, vcpu);
1140 }
1141}
1142
1143static void vgic_queue_irq_to_lr(struct kvm_vcpu *vcpu, int irq,
1144 int lr_nr, struct vgic_lr vlr)
1145{
1146 if (vgic_irq_is_active(vcpu, irq)) {
1147 vlr.state |= LR_STATE_ACTIVE;
1148 kvm_debug("Set active, clear distributor: 0x%x\n", vlr.state);
1149 vgic_irq_clear_active(vcpu, irq);
1150 vgic_update_state(vcpu->kvm);
1151 } else {
1152 WARN_ON(!vgic_dist_irq_is_pending(vcpu, irq));
1153 vlr.state |= LR_STATE_PENDING;
1154 kvm_debug("Set pending: 0x%x\n", vlr.state);
1155 }
1156
1157 if (!vgic_irq_is_edge(vcpu, irq))
1158 vlr.state |= LR_EOI_INT;
1159
1160 if (vlr.irq >= VGIC_NR_SGIS) {
1161 struct irq_phys_map *map;
1162 map = vgic_irq_map_search(vcpu, irq);
1163
1164 if (map) {
1165 vlr.hwirq = map->phys_irq;
1166 vlr.state |= LR_HW;
1167 vlr.state &= ~LR_EOI_INT;
1168
1169
1170
1171
1172
1173
1174 vgic_irq_set_queued(vcpu, irq);
1175 }
1176 }
1177
1178 vgic_set_lr(vcpu, lr_nr, vlr);
1179}
1180
1181
1182
1183
1184
1185
1186bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
1187{
1188 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1189 u64 elrsr = vgic_get_elrsr(vcpu);
1190 unsigned long *elrsr_ptr = u64_to_bitmask(&elrsr);
1191 struct vgic_lr vlr;
1192 int lr;
1193
1194
1195 BUG_ON(sgi_source_id & ~7);
1196 BUG_ON(sgi_source_id && irq >= VGIC_NR_SGIS);
1197 BUG_ON(irq >= dist->nr_irqs);
1198
1199 kvm_debug("Queue IRQ%d\n", irq);
1200
1201
1202 for_each_clear_bit(lr, elrsr_ptr, vgic->nr_lr) {
1203 vlr = vgic_get_lr(vcpu, lr);
1204 if (vlr.irq == irq && vlr.source == sgi_source_id) {
1205 kvm_debug("LR%d piggyback for IRQ%d\n", lr, vlr.irq);
1206 vgic_queue_irq_to_lr(vcpu, irq, lr, vlr);
1207 return true;
1208 }
1209 }
1210
1211
1212 lr = find_first_bit(elrsr_ptr, vgic->nr_lr);
1213 if (lr >= vgic->nr_lr)
1214 return false;
1215
1216 kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id);
1217
1218 vlr.irq = irq;
1219 vlr.source = sgi_source_id;
1220 vlr.state = 0;
1221 vgic_queue_irq_to_lr(vcpu, irq, lr, vlr);
1222
1223 return true;
1224}
1225
1226static bool vgic_queue_hwirq(struct kvm_vcpu *vcpu, int irq)
1227{
1228 if (!vgic_can_sample_irq(vcpu, irq))
1229 return true;
1230
1231 if (vgic_queue_irq(vcpu, 0, irq)) {
1232 if (vgic_irq_is_edge(vcpu, irq)) {
1233 vgic_dist_irq_clear_pending(vcpu, irq);
1234 vgic_cpu_irq_clear(vcpu, irq);
1235 } else {
1236 vgic_irq_set_queued(vcpu, irq);
1237 }
1238
1239 return true;
1240 }
1241
1242 return false;
1243}
1244
1245
1246
1247
1248
1249static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
1250{
1251 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1252 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1253 unsigned long *pa_percpu, *pa_shared;
1254 int i, vcpu_id;
1255 int overflow = 0;
1256 int nr_shared = vgic_nr_shared_irqs(dist);
1257
1258 vcpu_id = vcpu->vcpu_id;
1259
1260 pa_percpu = vcpu->arch.vgic_cpu.pend_act_percpu;
1261 pa_shared = vcpu->arch.vgic_cpu.pend_act_shared;
1262
1263 bitmap_or(pa_percpu, vgic_cpu->pending_percpu, vgic_cpu->active_percpu,
1264 VGIC_NR_PRIVATE_IRQS);
1265 bitmap_or(pa_shared, vgic_cpu->pending_shared, vgic_cpu->active_shared,
1266 nr_shared);
1267
1268
1269
1270
1271
1272 if (!kvm_vgic_vcpu_pending_irq(vcpu) && !dist_active_irq(vcpu))
1273 goto epilog;
1274
1275
1276 for_each_set_bit(i, pa_percpu, VGIC_NR_SGIS) {
1277 if (!queue_sgi(vcpu, i))
1278 overflow = 1;
1279 }
1280
1281
1282 for_each_set_bit_from(i, pa_percpu, VGIC_NR_PRIVATE_IRQS) {
1283 if (!vgic_queue_hwirq(vcpu, i))
1284 overflow = 1;
1285 }
1286
1287
1288 for_each_set_bit(i, pa_shared, nr_shared) {
1289 if (!vgic_queue_hwirq(vcpu, i + VGIC_NR_PRIVATE_IRQS))
1290 overflow = 1;
1291 }
1292
1293
1294
1295
1296epilog:
1297 if (overflow) {
1298 vgic_enable_underflow(vcpu);
1299 } else {
1300 vgic_disable_underflow(vcpu);
1301
1302
1303
1304
1305
1306
1307 clear_bit(vcpu_id, dist->irq_pending_on_cpu);
1308 }
1309}
1310
1311static int process_queued_irq(struct kvm_vcpu *vcpu,
1312 int lr, struct vgic_lr vlr)
1313{
1314 int pending = 0;
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327 vgic_dist_irq_clear_soft_pend(vcpu, vlr.irq);
1328
1329
1330
1331
1332 vgic_irq_clear_queued(vcpu, vlr.irq);
1333
1334
1335 if (vgic_irq_is_edge(vcpu, vlr.irq)) {
1336 BUG_ON(!(vlr.state & LR_HW));
1337 pending = vgic_dist_irq_is_pending(vcpu, vlr.irq);
1338 } else {
1339 if (vgic_dist_irq_get_level(vcpu, vlr.irq)) {
1340 vgic_cpu_irq_set(vcpu, vlr.irq);
1341 pending = 1;
1342 } else {
1343 vgic_dist_irq_clear_pending(vcpu, vlr.irq);
1344 vgic_cpu_irq_clear(vcpu, vlr.irq);
1345 }
1346 }
1347
1348
1349
1350
1351
1352 vlr.state = 0;
1353 vlr.hwirq = 0;
1354 vgic_set_lr(vcpu, lr, vlr);
1355
1356 return pending;
1357}
1358
1359static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
1360{
1361 u32 status = vgic_get_interrupt_status(vcpu);
1362 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1363 struct kvm *kvm = vcpu->kvm;
1364 int level_pending = 0;
1365
1366 kvm_debug("STATUS = %08x\n", status);
1367
1368 if (status & INT_STATUS_EOI) {
1369
1370
1371
1372
1373 u64 eisr = vgic_get_eisr(vcpu);
1374 unsigned long *eisr_ptr = u64_to_bitmask(&eisr);
1375 int lr;
1376
1377 for_each_set_bit(lr, eisr_ptr, vgic->nr_lr) {
1378 struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
1379
1380 WARN_ON(vgic_irq_is_edge(vcpu, vlr.irq));
1381 WARN_ON(vlr.state & LR_STATE_MASK);
1382
1383
1384
1385
1386
1387
1388
1389 kvm_notify_acked_irq(kvm, 0,
1390 vlr.irq - VGIC_NR_PRIVATE_IRQS);
1391
1392 spin_lock(&dist->lock);
1393 level_pending |= process_queued_irq(vcpu, lr, vlr);
1394 spin_unlock(&dist->lock);
1395 }
1396 }
1397
1398 if (status & INT_STATUS_UNDERFLOW)
1399 vgic_disable_underflow(vcpu);
1400
1401
1402
1403
1404
1405
1406
1407 vgic_clear_eisr(vcpu);
1408
1409 return level_pending;
1410}
1411
1412
1413
1414
1415
1416
1417static bool vgic_sync_hwirq(struct kvm_vcpu *vcpu, int lr, struct vgic_lr vlr)
1418{
1419 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1420 bool level_pending;
1421
1422 if (!(vlr.state & LR_HW))
1423 return false;
1424
1425 if (vlr.state & LR_STATE_ACTIVE)
1426 return false;
1427
1428 spin_lock(&dist->lock);
1429 level_pending = process_queued_irq(vcpu, lr, vlr);
1430 spin_unlock(&dist->lock);
1431 return level_pending;
1432}
1433
1434
1435static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
1436{
1437 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1438 u64 elrsr;
1439 unsigned long *elrsr_ptr;
1440 int lr, pending;
1441 bool level_pending;
1442
1443 level_pending = vgic_process_maintenance(vcpu);
1444
1445
1446 for (lr = 0; lr < vgic->nr_lr; lr++) {
1447 struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
1448
1449 level_pending |= vgic_sync_hwirq(vcpu, lr, vlr);
1450 BUG_ON(vlr.irq >= dist->nr_irqs);
1451 }
1452
1453
1454 elrsr = vgic_get_elrsr(vcpu);
1455 elrsr_ptr = u64_to_bitmask(&elrsr);
1456 pending = find_first_zero_bit(elrsr_ptr, vgic->nr_lr);
1457 if (level_pending || pending < vgic->nr_lr)
1458 set_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
1459}
1460
1461void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
1462{
1463 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1464
1465 if (!irqchip_in_kernel(vcpu->kvm))
1466 return;
1467
1468 spin_lock(&dist->lock);
1469 __kvm_vgic_flush_hwstate(vcpu);
1470 spin_unlock(&dist->lock);
1471}
1472
1473void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
1474{
1475 if (!irqchip_in_kernel(vcpu->kvm))
1476 return;
1477
1478 __kvm_vgic_sync_hwstate(vcpu);
1479}
1480
1481int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
1482{
1483 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1484
1485 if (!irqchip_in_kernel(vcpu->kvm))
1486 return 0;
1487
1488 return test_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
1489}
1490
1491void vgic_kick_vcpus(struct kvm *kvm)
1492{
1493 struct kvm_vcpu *vcpu;
1494 int c;
1495
1496
1497
1498
1499
1500 kvm_for_each_vcpu(c, vcpu, kvm) {
1501 if (kvm_vgic_vcpu_pending_irq(vcpu))
1502 kvm_vcpu_kick(vcpu);
1503 }
1504}
1505
1506static int vgic_validate_injection(struct kvm_vcpu *vcpu, int irq, int level)
1507{
1508 int edge_triggered = vgic_irq_is_edge(vcpu, irq);
1509
1510
1511
1512
1513
1514
1515 if (edge_triggered) {
1516 int state = vgic_dist_irq_is_pending(vcpu, irq);
1517 return level > state;
1518 } else {
1519 int state = vgic_dist_irq_get_level(vcpu, irq);
1520 return level != state;
1521 }
1522}
1523
1524static int vgic_update_irq_pending(struct kvm *kvm, int cpuid,
1525 struct irq_phys_map *map,
1526 unsigned int irq_num, bool level)
1527{
1528 struct vgic_dist *dist = &kvm->arch.vgic;
1529 struct kvm_vcpu *vcpu;
1530 int edge_triggered, level_triggered;
1531 int enabled;
1532 bool ret = true, can_inject = true;
1533
1534 trace_vgic_update_irq_pending(cpuid, irq_num, level);
1535
1536 if (irq_num >= min(kvm->arch.vgic.nr_irqs, 1020))
1537 return -EINVAL;
1538
1539 spin_lock(&dist->lock);
1540
1541 vcpu = kvm_get_vcpu(kvm, cpuid);
1542 edge_triggered = vgic_irq_is_edge(vcpu, irq_num);
1543 level_triggered = !edge_triggered;
1544
1545 if (!vgic_validate_injection(vcpu, irq_num, level)) {
1546 ret = false;
1547 goto out;
1548 }
1549
1550 if (irq_num >= VGIC_NR_PRIVATE_IRQS) {
1551 cpuid = dist->irq_spi_cpu[irq_num - VGIC_NR_PRIVATE_IRQS];
1552 if (cpuid == VCPU_NOT_ALLOCATED) {
1553
1554 cpuid = 0;
1555 can_inject = false;
1556 }
1557 vcpu = kvm_get_vcpu(kvm, cpuid);
1558 }
1559
1560 kvm_debug("Inject IRQ%d level %d CPU%d\n", irq_num, level, cpuid);
1561
1562 if (level) {
1563 if (level_triggered)
1564 vgic_dist_irq_set_level(vcpu, irq_num);
1565 vgic_dist_irq_set_pending(vcpu, irq_num);
1566 } else {
1567 if (level_triggered) {
1568 vgic_dist_irq_clear_level(vcpu, irq_num);
1569 if (!vgic_dist_irq_soft_pend(vcpu, irq_num)) {
1570 vgic_dist_irq_clear_pending(vcpu, irq_num);
1571 vgic_cpu_irq_clear(vcpu, irq_num);
1572 if (!compute_pending_for_cpu(vcpu))
1573 clear_bit(cpuid, dist->irq_pending_on_cpu);
1574 }
1575 }
1576
1577 ret = false;
1578 goto out;
1579 }
1580
1581 enabled = vgic_irq_is_enabled(vcpu, irq_num);
1582
1583 if (!enabled || !can_inject) {
1584 ret = false;
1585 goto out;
1586 }
1587
1588 if (!vgic_can_sample_irq(vcpu, irq_num)) {
1589
1590
1591
1592
1593 ret = false;
1594 goto out;
1595 }
1596
1597 if (level) {
1598 vgic_cpu_irq_set(vcpu, irq_num);
1599 set_bit(cpuid, dist->irq_pending_on_cpu);
1600 }
1601
1602out:
1603 spin_unlock(&dist->lock);
1604
1605 if (ret) {
1606
1607 kvm_vcpu_kick(kvm_get_vcpu(kvm, cpuid));
1608 }
1609
1610 return 0;
1611}
1612
1613static int vgic_lazy_init(struct kvm *kvm)
1614{
1615 int ret = 0;
1616
1617 if (unlikely(!vgic_initialized(kvm))) {
1618
1619
1620
1621
1622
1623
1624 if (kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V2)
1625 return -EBUSY;
1626
1627 mutex_lock(&kvm->lock);
1628 ret = vgic_init(kvm);
1629 mutex_unlock(&kvm->lock);
1630 }
1631
1632 return ret;
1633}
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
1651 bool level)
1652{
1653 struct irq_phys_map *map;
1654 int ret;
1655
1656 ret = vgic_lazy_init(kvm);
1657 if (ret)
1658 return ret;
1659
1660 map = vgic_irq_map_search(kvm_get_vcpu(kvm, cpuid), irq_num);
1661 if (map)
1662 return -EINVAL;
1663
1664 return vgic_update_irq_pending(kvm, cpuid, NULL, irq_num, level);
1665}
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid,
1682 struct irq_phys_map *map, bool level)
1683{
1684 int ret;
1685
1686 ret = vgic_lazy_init(kvm);
1687 if (ret)
1688 return ret;
1689
1690 return vgic_update_irq_pending(kvm, cpuid, map, map->virt_irq, level);
1691}
1692
1693static irqreturn_t vgic_maintenance_handler(int irq, void *data)
1694{
1695
1696
1697
1698
1699
1700
1701 return IRQ_HANDLED;
1702}
1703
1704static struct list_head *vgic_get_irq_phys_map_list(struct kvm_vcpu *vcpu,
1705 int virt_irq)
1706{
1707 if (virt_irq < VGIC_NR_PRIVATE_IRQS)
1708 return &vcpu->arch.vgic_cpu.irq_phys_map_list;
1709 else
1710 return &vcpu->kvm->arch.vgic.irq_phys_map_list;
1711}
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu,
1727 int virt_irq, int irq)
1728{
1729 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1730 struct list_head *root = vgic_get_irq_phys_map_list(vcpu, virt_irq);
1731 struct irq_phys_map *map;
1732 struct irq_phys_map_entry *entry;
1733 struct irq_desc *desc;
1734 struct irq_data *data;
1735 int phys_irq;
1736
1737 desc = irq_to_desc(irq);
1738 if (!desc) {
1739 kvm_err("%s: no interrupt descriptor\n", __func__);
1740 return ERR_PTR(-EINVAL);
1741 }
1742
1743 data = irq_desc_get_irq_data(desc);
1744 while (data->parent_data)
1745 data = data->parent_data;
1746
1747 phys_irq = data->hwirq;
1748
1749
1750 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1751 if (!entry)
1752 return ERR_PTR(-ENOMEM);
1753
1754 spin_lock(&dist->irq_phys_map_lock);
1755
1756
1757 map = vgic_irq_map_search(vcpu, virt_irq);
1758 if (map) {
1759
1760 if (map->phys_irq != phys_irq ||
1761 map->irq != irq)
1762 map = ERR_PTR(-EINVAL);
1763
1764
1765 goto out;
1766 }
1767
1768 map = &entry->map;
1769 map->virt_irq = virt_irq;
1770 map->phys_irq = phys_irq;
1771 map->irq = irq;
1772
1773 list_add_tail_rcu(&entry->entry, root);
1774
1775out:
1776 spin_unlock(&dist->irq_phys_map_lock);
1777
1778
1779 if (IS_ERR(map) || map != &entry->map)
1780 kfree(entry);
1781 return map;
1782}
1783
1784static struct irq_phys_map *vgic_irq_map_search(struct kvm_vcpu *vcpu,
1785 int virt_irq)
1786{
1787 struct list_head *root = vgic_get_irq_phys_map_list(vcpu, virt_irq);
1788 struct irq_phys_map_entry *entry;
1789 struct irq_phys_map *map;
1790
1791 rcu_read_lock();
1792
1793 list_for_each_entry_rcu(entry, root, entry) {
1794 map = &entry->map;
1795 if (map->virt_irq == virt_irq) {
1796 rcu_read_unlock();
1797 return map;
1798 }
1799 }
1800
1801 rcu_read_unlock();
1802
1803 return NULL;
1804}
1805
1806static void vgic_free_phys_irq_map_rcu(struct rcu_head *rcu)
1807{
1808 struct irq_phys_map_entry *entry;
1809
1810 entry = container_of(rcu, struct irq_phys_map_entry, rcu);
1811 kfree(entry);
1812}
1813
1814
1815
1816
1817
1818
1819
1820
1821int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, struct irq_phys_map *map)
1822{
1823 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1824 struct irq_phys_map_entry *entry;
1825 struct list_head *root;
1826
1827 if (!map)
1828 return -EINVAL;
1829
1830 root = vgic_get_irq_phys_map_list(vcpu, map->virt_irq);
1831
1832 spin_lock(&dist->irq_phys_map_lock);
1833
1834 list_for_each_entry(entry, root, entry) {
1835 if (&entry->map == map) {
1836 list_del_rcu(&entry->entry);
1837 call_rcu(&entry->rcu, vgic_free_phys_irq_map_rcu);
1838 break;
1839 }
1840 }
1841
1842 spin_unlock(&dist->irq_phys_map_lock);
1843
1844 return 0;
1845}
1846
1847static void vgic_destroy_irq_phys_map(struct kvm *kvm, struct list_head *root)
1848{
1849 struct vgic_dist *dist = &kvm->arch.vgic;
1850 struct irq_phys_map_entry *entry;
1851
1852 spin_lock(&dist->irq_phys_map_lock);
1853
1854 list_for_each_entry(entry, root, entry) {
1855 list_del_rcu(&entry->entry);
1856 call_rcu(&entry->rcu, vgic_free_phys_irq_map_rcu);
1857 }
1858
1859 spin_unlock(&dist->irq_phys_map_lock);
1860}
1861
1862void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
1863{
1864 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1865
1866 kfree(vgic_cpu->pending_shared);
1867 kfree(vgic_cpu->active_shared);
1868 kfree(vgic_cpu->pend_act_shared);
1869 vgic_destroy_irq_phys_map(vcpu->kvm, &vgic_cpu->irq_phys_map_list);
1870 vgic_cpu->pending_shared = NULL;
1871 vgic_cpu->active_shared = NULL;
1872 vgic_cpu->pend_act_shared = NULL;
1873}
1874
1875static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
1876{
1877 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1878 int nr_longs = BITS_TO_LONGS(nr_irqs - VGIC_NR_PRIVATE_IRQS);
1879 int sz = nr_longs * sizeof(unsigned long);
1880 vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL);
1881 vgic_cpu->active_shared = kzalloc(sz, GFP_KERNEL);
1882 vgic_cpu->pend_act_shared = kzalloc(sz, GFP_KERNEL);
1883
1884 if (!vgic_cpu->pending_shared
1885 || !vgic_cpu->active_shared
1886 || !vgic_cpu->pend_act_shared) {
1887 kvm_vgic_vcpu_destroy(vcpu);
1888 return -ENOMEM;
1889 }
1890
1891
1892
1893
1894
1895
1896 vgic_cpu->nr_lr = vgic->nr_lr;
1897
1898 return 0;
1899}
1900
1901
1902
1903
1904
1905
1906void kvm_vgic_vcpu_early_init(struct kvm_vcpu *vcpu)
1907{
1908 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1909 INIT_LIST_HEAD(&vgic_cpu->irq_phys_map_list);
1910}
1911
1912
1913
1914
1915
1916
1917
1918int kvm_vgic_get_max_vcpus(void)
1919{
1920 return vgic->max_gic_vcpus;
1921}
1922
1923void kvm_vgic_destroy(struct kvm *kvm)
1924{
1925 struct vgic_dist *dist = &kvm->arch.vgic;
1926 struct kvm_vcpu *vcpu;
1927 int i;
1928
1929 kvm_for_each_vcpu(i, vcpu, kvm)
1930 kvm_vgic_vcpu_destroy(vcpu);
1931
1932 vgic_free_bitmap(&dist->irq_enabled);
1933 vgic_free_bitmap(&dist->irq_level);
1934 vgic_free_bitmap(&dist->irq_pending);
1935 vgic_free_bitmap(&dist->irq_soft_pend);
1936 vgic_free_bitmap(&dist->irq_queued);
1937 vgic_free_bitmap(&dist->irq_cfg);
1938 vgic_free_bytemap(&dist->irq_priority);
1939 if (dist->irq_spi_target) {
1940 for (i = 0; i < dist->nr_cpus; i++)
1941 vgic_free_bitmap(&dist->irq_spi_target[i]);
1942 }
1943 kfree(dist->irq_sgi_sources);
1944 kfree(dist->irq_spi_cpu);
1945 kfree(dist->irq_spi_mpidr);
1946 kfree(dist->irq_spi_target);
1947 kfree(dist->irq_pending_on_cpu);
1948 kfree(dist->irq_active_on_cpu);
1949 vgic_destroy_irq_phys_map(kvm, &dist->irq_phys_map_list);
1950 dist->irq_sgi_sources = NULL;
1951 dist->irq_spi_cpu = NULL;
1952 dist->irq_spi_target = NULL;
1953 dist->irq_pending_on_cpu = NULL;
1954 dist->irq_active_on_cpu = NULL;
1955 dist->nr_cpus = 0;
1956}
1957
1958
1959
1960
1961
1962int vgic_init(struct kvm *kvm)
1963{
1964 struct vgic_dist *dist = &kvm->arch.vgic;
1965 struct kvm_vcpu *vcpu;
1966 int nr_cpus, nr_irqs;
1967 int ret, i, vcpu_id;
1968
1969 if (vgic_initialized(kvm))
1970 return 0;
1971
1972 nr_cpus = dist->nr_cpus = atomic_read(&kvm->online_vcpus);
1973 if (!nr_cpus)
1974 return -ENODEV;
1975
1976
1977
1978
1979
1980 if (!dist->nr_irqs)
1981 dist->nr_irqs = VGIC_NR_IRQS_LEGACY;
1982
1983 nr_irqs = dist->nr_irqs;
1984
1985 ret = vgic_init_bitmap(&dist->irq_enabled, nr_cpus, nr_irqs);
1986 ret |= vgic_init_bitmap(&dist->irq_level, nr_cpus, nr_irqs);
1987 ret |= vgic_init_bitmap(&dist->irq_pending, nr_cpus, nr_irqs);
1988 ret |= vgic_init_bitmap(&dist->irq_soft_pend, nr_cpus, nr_irqs);
1989 ret |= vgic_init_bitmap(&dist->irq_queued, nr_cpus, nr_irqs);
1990 ret |= vgic_init_bitmap(&dist->irq_active, nr_cpus, nr_irqs);
1991 ret |= vgic_init_bitmap(&dist->irq_cfg, nr_cpus, nr_irqs);
1992 ret |= vgic_init_bytemap(&dist->irq_priority, nr_cpus, nr_irqs);
1993
1994 if (ret)
1995 goto out;
1996
1997 dist->irq_sgi_sources = kzalloc(nr_cpus * VGIC_NR_SGIS, GFP_KERNEL);
1998 dist->irq_spi_cpu = kzalloc(nr_irqs - VGIC_NR_PRIVATE_IRQS, GFP_KERNEL);
1999 dist->irq_spi_target = kzalloc(sizeof(*dist->irq_spi_target) * nr_cpus,
2000 GFP_KERNEL);
2001 dist->irq_pending_on_cpu = kzalloc(BITS_TO_LONGS(nr_cpus) * sizeof(long),
2002 GFP_KERNEL);
2003 dist->irq_active_on_cpu = kzalloc(BITS_TO_LONGS(nr_cpus) * sizeof(long),
2004 GFP_KERNEL);
2005 if (!dist->irq_sgi_sources ||
2006 !dist->irq_spi_cpu ||
2007 !dist->irq_spi_target ||
2008 !dist->irq_pending_on_cpu ||
2009 !dist->irq_active_on_cpu) {
2010 ret = -ENOMEM;
2011 goto out;
2012 }
2013
2014 for (i = 0; i < nr_cpus; i++)
2015 ret |= vgic_init_bitmap(&dist->irq_spi_target[i],
2016 nr_cpus, nr_irqs);
2017
2018 if (ret)
2019 goto out;
2020
2021 ret = kvm->arch.vgic.vm_ops.init_model(kvm);
2022 if (ret)
2023 goto out;
2024
2025 kvm_for_each_vcpu(vcpu_id, vcpu, kvm) {
2026 ret = vgic_vcpu_init_maps(vcpu, nr_irqs);
2027 if (ret) {
2028 kvm_err("VGIC: Failed to allocate vcpu memory\n");
2029 break;
2030 }
2031
2032
2033
2034
2035
2036 for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
2037 if (i < VGIC_NR_SGIS) {
2038
2039 vgic_bitmap_set_irq_val(&dist->irq_enabled,
2040 vcpu->vcpu_id, i, 1);
2041 vgic_bitmap_set_irq_val(&dist->irq_cfg,
2042 vcpu->vcpu_id, i,
2043 VGIC_CFG_EDGE);
2044 } else if (i < VGIC_NR_PRIVATE_IRQS) {
2045
2046 vgic_bitmap_set_irq_val(&dist->irq_cfg,
2047 vcpu->vcpu_id, i,
2048 VGIC_CFG_LEVEL);
2049 }
2050 }
2051
2052 vgic_enable(vcpu);
2053 }
2054
2055out:
2056 if (ret)
2057 kvm_vgic_destroy(kvm);
2058
2059 return ret;
2060}
2061
2062static int init_vgic_model(struct kvm *kvm, int type)
2063{
2064 switch (type) {
2065 case KVM_DEV_TYPE_ARM_VGIC_V2:
2066 vgic_v2_init_emulation(kvm);
2067 break;
2068#ifdef CONFIG_KVM_ARM_VGIC_V3
2069 case KVM_DEV_TYPE_ARM_VGIC_V3:
2070 vgic_v3_init_emulation(kvm);
2071 break;
2072#endif
2073 default:
2074 return -ENODEV;
2075 }
2076
2077 if (atomic_read(&kvm->online_vcpus) > kvm->arch.max_vcpus)
2078 return -E2BIG;
2079
2080 return 0;
2081}
2082
2083
2084
2085
2086
2087
2088void kvm_vgic_early_init(struct kvm *kvm)
2089{
2090 spin_lock_init(&kvm->arch.vgic.lock);
2091 spin_lock_init(&kvm->arch.vgic.irq_phys_map_lock);
2092 INIT_LIST_HEAD(&kvm->arch.vgic.irq_phys_map_list);
2093}
2094
2095int kvm_vgic_create(struct kvm *kvm, u32 type)
2096{
2097 int i, vcpu_lock_idx = -1, ret;
2098 struct kvm_vcpu *vcpu;
2099
2100 mutex_lock(&kvm->lock);
2101
2102 if (irqchip_in_kernel(kvm)) {
2103 ret = -EEXIST;
2104 goto out;
2105 }
2106
2107
2108
2109
2110
2111
2112
2113 if (type == KVM_DEV_TYPE_ARM_VGIC_V2 && !vgic->can_emulate_gicv2) {
2114 ret = -ENODEV;
2115 goto out;
2116 }
2117
2118
2119
2120
2121
2122
2123 ret = -EBUSY;
2124 kvm_for_each_vcpu(i, vcpu, kvm) {
2125 if (!mutex_trylock(&vcpu->mutex))
2126 goto out_unlock;
2127 vcpu_lock_idx = i;
2128 }
2129
2130 kvm_for_each_vcpu(i, vcpu, kvm) {
2131 if (vcpu->arch.has_run_once)
2132 goto out_unlock;
2133 }
2134 ret = 0;
2135
2136 ret = init_vgic_model(kvm, type);
2137 if (ret)
2138 goto out_unlock;
2139
2140 kvm->arch.vgic.in_kernel = true;
2141 kvm->arch.vgic.vgic_model = type;
2142 kvm->arch.vgic.vctrl_base = vgic->vctrl_base;
2143 kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
2144 kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;
2145 kvm->arch.vgic.vgic_redist_base = VGIC_ADDR_UNDEF;
2146
2147out_unlock:
2148 for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
2149 vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
2150 mutex_unlock(&vcpu->mutex);
2151 }
2152
2153out:
2154 mutex_unlock(&kvm->lock);
2155 return ret;
2156}
2157
2158static int vgic_ioaddr_overlap(struct kvm *kvm)
2159{
2160 phys_addr_t dist = kvm->arch.vgic.vgic_dist_base;
2161 phys_addr_t cpu = kvm->arch.vgic.vgic_cpu_base;
2162
2163 if (IS_VGIC_ADDR_UNDEF(dist) || IS_VGIC_ADDR_UNDEF(cpu))
2164 return 0;
2165 if ((dist <= cpu && dist + KVM_VGIC_V2_DIST_SIZE > cpu) ||
2166 (cpu <= dist && cpu + KVM_VGIC_V2_CPU_SIZE > dist))
2167 return -EBUSY;
2168 return 0;
2169}
2170
2171static int vgic_ioaddr_assign(struct kvm *kvm, phys_addr_t *ioaddr,
2172 phys_addr_t addr, phys_addr_t size)
2173{
2174 int ret;
2175
2176 if (addr & ~KVM_PHYS_MASK)
2177 return -E2BIG;
2178
2179 if (addr & (SZ_4K - 1))
2180 return -EINVAL;
2181
2182 if (!IS_VGIC_ADDR_UNDEF(*ioaddr))
2183 return -EEXIST;
2184 if (addr + size < addr)
2185 return -EINVAL;
2186
2187 *ioaddr = addr;
2188 ret = vgic_ioaddr_overlap(kvm);
2189 if (ret)
2190 *ioaddr = VGIC_ADDR_UNDEF;
2191
2192 return ret;
2193}
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
2209{
2210 int r = 0;
2211 struct vgic_dist *vgic = &kvm->arch.vgic;
2212 int type_needed;
2213 phys_addr_t *addr_ptr, block_size;
2214 phys_addr_t alignment;
2215
2216 mutex_lock(&kvm->lock);
2217 switch (type) {
2218 case KVM_VGIC_V2_ADDR_TYPE_DIST:
2219 type_needed = KVM_DEV_TYPE_ARM_VGIC_V2;
2220 addr_ptr = &vgic->vgic_dist_base;
2221 block_size = KVM_VGIC_V2_DIST_SIZE;
2222 alignment = SZ_4K;
2223 break;
2224 case KVM_VGIC_V2_ADDR_TYPE_CPU:
2225 type_needed = KVM_DEV_TYPE_ARM_VGIC_V2;
2226 addr_ptr = &vgic->vgic_cpu_base;
2227 block_size = KVM_VGIC_V2_CPU_SIZE;
2228 alignment = SZ_4K;
2229 break;
2230#ifdef CONFIG_KVM_ARM_VGIC_V3
2231 case KVM_VGIC_V3_ADDR_TYPE_DIST:
2232 type_needed = KVM_DEV_TYPE_ARM_VGIC_V3;
2233 addr_ptr = &vgic->vgic_dist_base;
2234 block_size = KVM_VGIC_V3_DIST_SIZE;
2235 alignment = SZ_64K;
2236 break;
2237 case KVM_VGIC_V3_ADDR_TYPE_REDIST:
2238 type_needed = KVM_DEV_TYPE_ARM_VGIC_V3;
2239 addr_ptr = &vgic->vgic_redist_base;
2240 block_size = KVM_VGIC_V3_REDIST_SIZE;
2241 alignment = SZ_64K;
2242 break;
2243#endif
2244 default:
2245 r = -ENODEV;
2246 goto out;
2247 }
2248
2249 if (vgic->vgic_model != type_needed) {
2250 r = -ENODEV;
2251 goto out;
2252 }
2253
2254 if (write) {
2255 if (!IS_ALIGNED(*addr, alignment))
2256 r = -EINVAL;
2257 else
2258 r = vgic_ioaddr_assign(kvm, addr_ptr, *addr,
2259 block_size);
2260 } else {
2261 *addr = *addr_ptr;
2262 }
2263
2264out:
2265 mutex_unlock(&kvm->lock);
2266 return r;
2267}
2268
2269int vgic_set_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2270{
2271 int r;
2272
2273 switch (attr->group) {
2274 case KVM_DEV_ARM_VGIC_GRP_ADDR: {
2275 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2276 u64 addr;
2277 unsigned long type = (unsigned long)attr->attr;
2278
2279 if (copy_from_user(&addr, uaddr, sizeof(addr)))
2280 return -EFAULT;
2281
2282 r = kvm_vgic_addr(dev->kvm, type, &addr, true);
2283 return (r == -ENODEV) ? -ENXIO : r;
2284 }
2285 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
2286 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
2287 u32 val;
2288 int ret = 0;
2289
2290 if (get_user(val, uaddr))
2291 return -EFAULT;
2292
2293
2294
2295
2296
2297
2298
2299 if (val < (VGIC_NR_PRIVATE_IRQS + 32) ||
2300 val > VGIC_MAX_IRQS ||
2301 (val & 31))
2302 return -EINVAL;
2303
2304 mutex_lock(&dev->kvm->lock);
2305
2306 if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_irqs)
2307 ret = -EBUSY;
2308 else
2309 dev->kvm->arch.vgic.nr_irqs = val;
2310
2311 mutex_unlock(&dev->kvm->lock);
2312
2313 return ret;
2314 }
2315 case KVM_DEV_ARM_VGIC_GRP_CTRL: {
2316 switch (attr->attr) {
2317 case KVM_DEV_ARM_VGIC_CTRL_INIT:
2318 r = vgic_init(dev->kvm);
2319 return r;
2320 }
2321 break;
2322 }
2323 }
2324
2325 return -ENXIO;
2326}
2327
2328int vgic_get_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2329{
2330 int r = -ENXIO;
2331
2332 switch (attr->group) {
2333 case KVM_DEV_ARM_VGIC_GRP_ADDR: {
2334 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2335 u64 addr;
2336 unsigned long type = (unsigned long)attr->attr;
2337
2338 r = kvm_vgic_addr(dev->kvm, type, &addr, false);
2339 if (r)
2340 return (r == -ENODEV) ? -ENXIO : r;
2341
2342 if (copy_to_user(uaddr, &addr, sizeof(addr)))
2343 return -EFAULT;
2344 break;
2345 }
2346 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
2347 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
2348
2349 r = put_user(dev->kvm->arch.vgic.nr_irqs, uaddr);
2350 break;
2351 }
2352
2353 }
2354
2355 return r;
2356}
2357
2358int vgic_has_attr_regs(const struct vgic_io_range *ranges, phys_addr_t offset)
2359{
2360 if (vgic_find_range(ranges, 4, offset))
2361 return 0;
2362 else
2363 return -ENXIO;
2364}
2365
2366static void vgic_init_maintenance_interrupt(void *info)
2367{
2368 enable_percpu_irq(vgic->maint_irq, 0);
2369}
2370
2371static int vgic_cpu_notify(struct notifier_block *self,
2372 unsigned long action, void *cpu)
2373{
2374 switch (action) {
2375 case CPU_STARTING:
2376 case CPU_STARTING_FROZEN:
2377 vgic_init_maintenance_interrupt(NULL);
2378 break;
2379 case CPU_DYING:
2380 case CPU_DYING_FROZEN:
2381 disable_percpu_irq(vgic->maint_irq);
2382 break;
2383 }
2384
2385 return NOTIFY_OK;
2386}
2387
2388static struct notifier_block vgic_cpu_nb = {
2389 .notifier_call = vgic_cpu_notify,
2390};
2391
2392static const struct of_device_id vgic_ids[] = {
2393 { .compatible = "arm,cortex-a15-gic", .data = vgic_v2_probe, },
2394 { .compatible = "arm,cortex-a7-gic", .data = vgic_v2_probe, },
2395 { .compatible = "arm,gic-400", .data = vgic_v2_probe, },
2396 { .compatible = "arm,gic-v3", .data = vgic_v3_probe, },
2397 {},
2398};
2399
2400int kvm_vgic_hyp_init(void)
2401{
2402 const struct of_device_id *matched_id;
2403 const int (*vgic_probe)(struct device_node *,const struct vgic_ops **,
2404 const struct vgic_params **);
2405 struct device_node *vgic_node;
2406 int ret;
2407
2408 vgic_node = of_find_matching_node_and_match(NULL,
2409 vgic_ids, &matched_id);
2410 if (!vgic_node) {
2411 kvm_err("error: no compatible GIC node found\n");
2412 return -ENODEV;
2413 }
2414
2415 vgic_probe = matched_id->data;
2416 ret = vgic_probe(vgic_node, &vgic_ops, &vgic);
2417 if (ret)
2418 return ret;
2419
2420 ret = request_percpu_irq(vgic->maint_irq, vgic_maintenance_handler,
2421 "vgic", kvm_get_running_vcpus());
2422 if (ret) {
2423 kvm_err("Cannot register interrupt %d\n", vgic->maint_irq);
2424 return ret;
2425 }
2426
2427 ret = __register_cpu_notifier(&vgic_cpu_nb);
2428 if (ret) {
2429 kvm_err("Cannot register vgic CPU notifier\n");
2430 goto out_free_irq;
2431 }
2432
2433 on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
2434
2435 return 0;
2436
2437out_free_irq:
2438 free_percpu_irq(vgic->maint_irq, kvm_get_running_vcpus());
2439 return ret;
2440}
2441
2442int kvm_irq_map_gsi(struct kvm *kvm,
2443 struct kvm_kernel_irq_routing_entry *entries,
2444 int gsi)
2445{
2446 return 0;
2447}
2448
2449int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)
2450{
2451 return pin;
2452}
2453
2454int kvm_set_irq(struct kvm *kvm, int irq_source_id,
2455 u32 irq, int level, bool line_status)
2456{
2457 unsigned int spi = irq + VGIC_NR_PRIVATE_IRQS;
2458
2459 trace_kvm_set_irq(irq, level, irq_source_id);
2460
2461 BUG_ON(!vgic_initialized(kvm));
2462
2463 return kvm_vgic_inject_irq(kvm, 0, spi, level);
2464}
2465
2466
2467int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
2468 struct kvm *kvm, int irq_source_id,
2469 int level, bool line_status)
2470{
2471 return 0;
2472}
2473