1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/cpu.h>
20#include <linux/kvm.h>
21#include <linux/kvm_host.h>
22#include <linux/interrupt.h>
23#include <linux/io.h>
24#include <linux/of.h>
25#include <linux/of_address.h>
26#include <linux/of_irq.h>
27#include <linux/uaccess.h>
28
29#include <linux/irqchip/arm-gic.h>
30
31#include <asm/kvm_emulate.h>
32#include <asm/kvm_arm.h>
33#include <asm/kvm_mmu.h>
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72#define VGIC_ADDR_UNDEF (-1)
73#define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF)
74
75#define PRODUCT_ID_KVM 0x4b
76#define IMPLEMENTER_ARM 0x43b
77#define GICC_ARCH_VERSION_V2 0x2
78
79#define ACCESS_READ_VALUE (1 << 0)
80#define ACCESS_READ_RAZ (0 << 0)
81#define ACCESS_READ_MASK(x) ((x) & (1 << 0))
82#define ACCESS_WRITE_IGNORED (0 << 1)
83#define ACCESS_WRITE_SETBIT (1 << 1)
84#define ACCESS_WRITE_CLEARBIT (2 << 1)
85#define ACCESS_WRITE_VALUE (3 << 1)
86#define ACCESS_WRITE_MASK(x) ((x) & (3 << 1))
87
88static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu);
89static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu);
90static void vgic_update_state(struct kvm *kvm);
91static void vgic_kick_vcpus(struct kvm *kvm);
92static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg);
93static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr);
94static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc);
95static void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
96static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
97
98static const struct vgic_ops *vgic_ops;
99static const struct vgic_params *vgic;
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115#if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 64
116#define REG_OFFSET_SWIZZLE 1
117#else
118#define REG_OFFSET_SWIZZLE 0
119#endif
120
121static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x,
122 int cpuid, u32 offset)
123{
124 offset >>= 2;
125 if (!offset)
126 return x->percpu[cpuid].reg + (offset ^ REG_OFFSET_SWIZZLE);
127 else
128 return x->shared.reg + ((offset - 1) ^ REG_OFFSET_SWIZZLE);
129}
130
131static int vgic_bitmap_get_irq_val(struct vgic_bitmap *x,
132 int cpuid, int irq)
133{
134 if (irq < VGIC_NR_PRIVATE_IRQS)
135 return test_bit(irq, x->percpu[cpuid].reg_ul);
136
137 return test_bit(irq - VGIC_NR_PRIVATE_IRQS, x->shared.reg_ul);
138}
139
140static void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid,
141 int irq, int val)
142{
143 unsigned long *reg;
144
145 if (irq < VGIC_NR_PRIVATE_IRQS) {
146 reg = x->percpu[cpuid].reg_ul;
147 } else {
148 reg = x->shared.reg_ul;
149 irq -= VGIC_NR_PRIVATE_IRQS;
150 }
151
152 if (val)
153 set_bit(irq, reg);
154 else
155 clear_bit(irq, reg);
156}
157
158static unsigned long *vgic_bitmap_get_cpu_map(struct vgic_bitmap *x, int cpuid)
159{
160 if (unlikely(cpuid >= VGIC_MAX_CPUS))
161 return NULL;
162 return x->percpu[cpuid].reg_ul;
163}
164
165static unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x)
166{
167 return x->shared.reg_ul;
168}
169
170static u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset)
171{
172 offset >>= 2;
173 BUG_ON(offset > (VGIC_NR_IRQS / 4));
174 if (offset < 8)
175 return x->percpu[cpuid] + offset;
176 else
177 return x->shared + offset - 8;
178}
179
180#define VGIC_CFG_LEVEL 0
181#define VGIC_CFG_EDGE 1
182
183static bool vgic_irq_is_edge(struct kvm_vcpu *vcpu, int irq)
184{
185 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
186 int irq_val;
187
188 irq_val = vgic_bitmap_get_irq_val(&dist->irq_cfg, vcpu->vcpu_id, irq);
189 return irq_val == VGIC_CFG_EDGE;
190}
191
192static int vgic_irq_is_enabled(struct kvm_vcpu *vcpu, int irq)
193{
194 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
195
196 return vgic_bitmap_get_irq_val(&dist->irq_enabled, vcpu->vcpu_id, irq);
197}
198
199static int vgic_irq_is_active(struct kvm_vcpu *vcpu, int irq)
200{
201 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
202
203 return vgic_bitmap_get_irq_val(&dist->irq_active, vcpu->vcpu_id, irq);
204}
205
206static void vgic_irq_set_active(struct kvm_vcpu *vcpu, int irq)
207{
208 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
209
210 vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 1);
211}
212
213static void vgic_irq_clear_active(struct kvm_vcpu *vcpu, int irq)
214{
215 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
216
217 vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 0);
218}
219
220static int vgic_dist_irq_is_pending(struct kvm_vcpu *vcpu, int irq)
221{
222 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
223
224 return vgic_bitmap_get_irq_val(&dist->irq_state, vcpu->vcpu_id, irq);
225}
226
227static void vgic_dist_irq_set(struct kvm_vcpu *vcpu, int irq)
228{
229 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
230
231 vgic_bitmap_set_irq_val(&dist->irq_state, vcpu->vcpu_id, irq, 1);
232}
233
234static void vgic_dist_irq_clear(struct kvm_vcpu *vcpu, int irq)
235{
236 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
237
238 vgic_bitmap_set_irq_val(&dist->irq_state, vcpu->vcpu_id, irq, 0);
239}
240
241static void vgic_cpu_irq_set(struct kvm_vcpu *vcpu, int irq)
242{
243 if (irq < VGIC_NR_PRIVATE_IRQS)
244 set_bit(irq, vcpu->arch.vgic_cpu.pending_percpu);
245 else
246 set_bit(irq - VGIC_NR_PRIVATE_IRQS,
247 vcpu->arch.vgic_cpu.pending_shared);
248}
249
250static void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq)
251{
252 if (irq < VGIC_NR_PRIVATE_IRQS)
253 clear_bit(irq, vcpu->arch.vgic_cpu.pending_percpu);
254 else
255 clear_bit(irq - VGIC_NR_PRIVATE_IRQS,
256 vcpu->arch.vgic_cpu.pending_shared);
257}
258
259static u32 mmio_data_read(struct kvm_exit_mmio *mmio, u32 mask)
260{
261 return le32_to_cpu(*((u32 *)mmio->data)) & mask;
262}
263
264static void mmio_data_write(struct kvm_exit_mmio *mmio, u32 mask, u32 value)
265{
266 *((u32 *)mmio->data) = cpu_to_le32(value) & mask;
267}
268
269
270
271
272
273
274
275
276
277
278
279
280static void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg,
281 phys_addr_t offset, int mode)
282{
283 int word_offset = (offset & 3) * 8;
284 u32 mask = (1UL << (mmio->len * 8)) - 1;
285 u32 regval;
286
287
288
289
290
291
292 if (reg) {
293 regval = *reg;
294 } else {
295 BUG_ON(mode != (ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED));
296 regval = 0;
297 }
298
299 if (mmio->is_write) {
300 u32 data = mmio_data_read(mmio, mask) << word_offset;
301 switch (ACCESS_WRITE_MASK(mode)) {
302 case ACCESS_WRITE_IGNORED:
303 return;
304
305 case ACCESS_WRITE_SETBIT:
306 regval |= data;
307 break;
308
309 case ACCESS_WRITE_CLEARBIT:
310 regval &= ~data;
311 break;
312
313 case ACCESS_WRITE_VALUE:
314 regval = (regval & ~(mask << word_offset)) | data;
315 break;
316 }
317 *reg = regval;
318 } else {
319 switch (ACCESS_READ_MASK(mode)) {
320 case ACCESS_READ_RAZ:
321 regval = 0;
322
323
324 case ACCESS_READ_VALUE:
325 mmio_data_write(mmio, mask, regval >> word_offset);
326 }
327 }
328}
329
330static bool handle_mmio_misc(struct kvm_vcpu *vcpu,
331 struct kvm_exit_mmio *mmio, phys_addr_t offset)
332{
333 u32 reg;
334 u32 word_offset = offset & 3;
335
336 switch (offset & ~3) {
337 case 0:
338 reg = vcpu->kvm->arch.vgic.enabled;
339 vgic_reg_access(mmio, ®, word_offset,
340 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
341 if (mmio->is_write) {
342 vcpu->kvm->arch.vgic.enabled = reg & 1;
343 vgic_update_state(vcpu->kvm);
344 return true;
345 }
346 break;
347
348 case 4:
349 reg = (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
350 reg |= (VGIC_NR_IRQS >> 5) - 1;
351 vgic_reg_access(mmio, ®, word_offset,
352 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
353 break;
354
355 case 8:
356 reg = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
357 vgic_reg_access(mmio, ®, word_offset,
358 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
359 break;
360 }
361
362 return false;
363}
364
365static bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu,
366 struct kvm_exit_mmio *mmio, phys_addr_t offset)
367{
368 vgic_reg_access(mmio, NULL, offset,
369 ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
370 return false;
371}
372
373static bool handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu,
374 struct kvm_exit_mmio *mmio,
375 phys_addr_t offset)
376{
377 u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_enabled,
378 vcpu->vcpu_id, offset);
379 vgic_reg_access(mmio, reg, offset,
380 ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
381 if (mmio->is_write) {
382 vgic_update_state(vcpu->kvm);
383 return true;
384 }
385
386 return false;
387}
388
389static bool handle_mmio_clear_enable_reg(struct kvm_vcpu *vcpu,
390 struct kvm_exit_mmio *mmio,
391 phys_addr_t offset)
392{
393 u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_enabled,
394 vcpu->vcpu_id, offset);
395 vgic_reg_access(mmio, reg, offset,
396 ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
397 if (mmio->is_write) {
398 if (offset < 4)
399 *reg |= 0xffff;
400 vgic_retire_disabled_irqs(vcpu);
401 vgic_update_state(vcpu->kvm);
402 return true;
403 }
404
405 return false;
406}
407
408static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu,
409 struct kvm_exit_mmio *mmio,
410 phys_addr_t offset)
411{
412 u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_state,
413 vcpu->vcpu_id, offset);
414 vgic_reg_access(mmio, reg, offset,
415 ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
416 if (mmio->is_write) {
417 vgic_update_state(vcpu->kvm);
418 return true;
419 }
420
421 return false;
422}
423
424static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu,
425 struct kvm_exit_mmio *mmio,
426 phys_addr_t offset)
427{
428 u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_state,
429 vcpu->vcpu_id, offset);
430 vgic_reg_access(mmio, reg, offset,
431 ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
432 if (mmio->is_write) {
433 vgic_update_state(vcpu->kvm);
434 return true;
435 }
436
437 return false;
438}
439
440static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu,
441 struct kvm_exit_mmio *mmio,
442 phys_addr_t offset)
443{
444 u32 *reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
445 vcpu->vcpu_id, offset);
446 vgic_reg_access(mmio, reg, offset,
447 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
448 return false;
449}
450
451#define GICD_ITARGETSR_SIZE 32
452#define GICD_CPUTARGETS_BITS 8
453#define GICD_IRQS_PER_ITARGETSR (GICD_ITARGETSR_SIZE / GICD_CPUTARGETS_BITS)
454static u32 vgic_get_target_reg(struct kvm *kvm, int irq)
455{
456 struct vgic_dist *dist = &kvm->arch.vgic;
457 int i;
458 u32 val = 0;
459
460 irq -= VGIC_NR_PRIVATE_IRQS;
461
462 for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++)
463 val |= 1 << (dist->irq_spi_cpu[irq + i] + i * 8);
464
465 return val;
466}
467
468static void vgic_set_target_reg(struct kvm *kvm, u32 val, int irq)
469{
470 struct vgic_dist *dist = &kvm->arch.vgic;
471 struct kvm_vcpu *vcpu;
472 int i, c;
473 unsigned long *bmap;
474 u32 target;
475
476 irq -= VGIC_NR_PRIVATE_IRQS;
477
478
479
480
481
482
483 for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++) {
484 int shift = i * GICD_CPUTARGETS_BITS;
485 target = ffs((val >> shift) & 0xffU);
486 target = target ? (target - 1) : 0;
487 dist->irq_spi_cpu[irq + i] = target;
488 kvm_for_each_vcpu(c, vcpu, kvm) {
489 bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[c]);
490 if (c == target)
491 set_bit(irq + i, bmap);
492 else
493 clear_bit(irq + i, bmap);
494 }
495 }
496}
497
498static bool handle_mmio_target_reg(struct kvm_vcpu *vcpu,
499 struct kvm_exit_mmio *mmio,
500 phys_addr_t offset)
501{
502 u32 reg;
503
504
505 if (offset < 32) {
506 u32 roreg = 1 << vcpu->vcpu_id;
507 roreg |= roreg << 8;
508 roreg |= roreg << 16;
509
510 vgic_reg_access(mmio, &roreg, offset,
511 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
512 return false;
513 }
514
515 reg = vgic_get_target_reg(vcpu->kvm, offset & ~3U);
516 vgic_reg_access(mmio, ®, offset,
517 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
518 if (mmio->is_write) {
519 vgic_set_target_reg(vcpu->kvm, reg, offset & ~3U);
520 vgic_update_state(vcpu->kvm);
521 return true;
522 }
523
524 return false;
525}
526
527static u32 vgic_cfg_expand(u16 val)
528{
529 u32 res = 0;
530 int i;
531
532
533
534
535
536 for (i = 0; i < 16; i++)
537 res |= ((val >> i) & VGIC_CFG_EDGE) << (2 * i + 1);
538
539 return res;
540}
541
542static u16 vgic_cfg_compress(u32 val)
543{
544 u16 res = 0;
545 int i;
546
547
548
549
550
551 for (i = 0; i < 16; i++)
552 res |= ((val >> (i * 2 + 1)) & VGIC_CFG_EDGE) << i;
553
554 return res;
555}
556
557
558
559
560
561
562static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
563 struct kvm_exit_mmio *mmio, phys_addr_t offset)
564{
565 u32 val;
566 u32 *reg;
567
568 reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
569 vcpu->vcpu_id, offset >> 1);
570
571 if (offset & 4)
572 val = *reg >> 16;
573 else
574 val = *reg & 0xffff;
575
576 val = vgic_cfg_expand(val);
577 vgic_reg_access(mmio, &val, offset,
578 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
579 if (mmio->is_write) {
580 if (offset < 8) {
581 *reg = ~0U;
582 return false;
583 }
584
585 val = vgic_cfg_compress(val);
586 if (offset & 4) {
587 *reg &= 0xffff;
588 *reg |= val << 16;
589 } else {
590 *reg &= 0xffff << 16;
591 *reg |= val;
592 }
593 }
594
595 return false;
596}
597
598static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu,
599 struct kvm_exit_mmio *mmio, phys_addr_t offset)
600{
601 u32 reg;
602 vgic_reg_access(mmio, ®, offset,
603 ACCESS_READ_RAZ | ACCESS_WRITE_VALUE);
604 if (mmio->is_write) {
605 vgic_dispatch_sgi(vcpu, reg);
606 vgic_update_state(vcpu->kvm);
607 return true;
608 }
609
610 return false;
611}
612
613
614
615
616
617
618
619
620
621
622
623
624
625static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
626{
627 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
628 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
629 int vcpu_id = vcpu->vcpu_id;
630 int i;
631
632 for_each_set_bit(i, vgic_cpu->lr_used, vgic_cpu->nr_lr) {
633 struct vgic_lr lr = vgic_get_lr(vcpu, i);
634
635
636
637
638
639
640
641
642
643
644
645 if ((lr.state & LR_STATE_MASK) == LR_STATE_ACTIVE)
646 continue;
647
648
649
650
651
652
653
654 vgic_dist_irq_set(vcpu, lr.irq);
655 if (lr.irq < VGIC_NR_SGIS)
656 dist->irq_sgi_sources[vcpu_id][lr.irq] |= 1 << lr.source;
657 lr.state &= ~LR_STATE_PENDING;
658 vgic_set_lr(vcpu, i, lr);
659
660
661
662
663
664
665 if (!(lr.state & LR_STATE_MASK))
666 vgic_retire_lr(i, lr.irq, vcpu);
667
668
669 vgic_update_state(vcpu->kvm);
670 }
671}
672
673
674static bool read_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
675 struct kvm_exit_mmio *mmio,
676 phys_addr_t offset)
677{
678 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
679 int sgi;
680 int min_sgi = (offset & ~0x3) * 4;
681 int max_sgi = min_sgi + 3;
682 int vcpu_id = vcpu->vcpu_id;
683 u32 reg = 0;
684
685
686 for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
687 int shift = 8 * (sgi - min_sgi);
688 reg |= (u32)dist->irq_sgi_sources[vcpu_id][sgi] << shift;
689 }
690
691 mmio_data_write(mmio, ~0, reg);
692 return false;
693}
694
695static bool write_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
696 struct kvm_exit_mmio *mmio,
697 phys_addr_t offset, bool set)
698{
699 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
700 int sgi;
701 int min_sgi = (offset & ~0x3) * 4;
702 int max_sgi = min_sgi + 3;
703 int vcpu_id = vcpu->vcpu_id;
704 u32 reg;
705 bool updated = false;
706
707 reg = mmio_data_read(mmio, ~0);
708
709
710 for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
711 u8 mask = reg >> (8 * (sgi - min_sgi));
712 if (set) {
713 if ((dist->irq_sgi_sources[vcpu_id][sgi] & mask) != mask)
714 updated = true;
715 dist->irq_sgi_sources[vcpu_id][sgi] |= mask;
716 } else {
717 if (dist->irq_sgi_sources[vcpu_id][sgi] & mask)
718 updated = true;
719 dist->irq_sgi_sources[vcpu_id][sgi] &= ~mask;
720 }
721 }
722
723 if (updated)
724 vgic_update_state(vcpu->kvm);
725
726 return updated;
727}
728
729static bool handle_mmio_sgi_set(struct kvm_vcpu *vcpu,
730 struct kvm_exit_mmio *mmio,
731 phys_addr_t offset)
732{
733 if (!mmio->is_write)
734 return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
735 else
736 return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, true);
737}
738
739static bool handle_mmio_sgi_clear(struct kvm_vcpu *vcpu,
740 struct kvm_exit_mmio *mmio,
741 phys_addr_t offset)
742{
743 if (!mmio->is_write)
744 return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
745 else
746 return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, false);
747}
748
749
750
751
752
753
754
755struct mmio_range {
756 phys_addr_t base;
757 unsigned long len;
758 bool (*handle_mmio)(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
759 phys_addr_t offset);
760};
761
762static const struct mmio_range vgic_dist_ranges[] = {
763 {
764 .base = GIC_DIST_CTRL,
765 .len = 12,
766 .handle_mmio = handle_mmio_misc,
767 },
768 {
769 .base = GIC_DIST_IGROUP,
770 .len = VGIC_NR_IRQS / 8,
771 .handle_mmio = handle_mmio_raz_wi,
772 },
773 {
774 .base = GIC_DIST_ENABLE_SET,
775 .len = VGIC_NR_IRQS / 8,
776 .handle_mmio = handle_mmio_set_enable_reg,
777 },
778 {
779 .base = GIC_DIST_ENABLE_CLEAR,
780 .len = VGIC_NR_IRQS / 8,
781 .handle_mmio = handle_mmio_clear_enable_reg,
782 },
783 {
784 .base = GIC_DIST_PENDING_SET,
785 .len = VGIC_NR_IRQS / 8,
786 .handle_mmio = handle_mmio_set_pending_reg,
787 },
788 {
789 .base = GIC_DIST_PENDING_CLEAR,
790 .len = VGIC_NR_IRQS / 8,
791 .handle_mmio = handle_mmio_clear_pending_reg,
792 },
793 {
794 .base = GIC_DIST_ACTIVE_SET,
795 .len = VGIC_NR_IRQS / 8,
796 .handle_mmio = handle_mmio_raz_wi,
797 },
798 {
799 .base = GIC_DIST_ACTIVE_CLEAR,
800 .len = VGIC_NR_IRQS / 8,
801 .handle_mmio = handle_mmio_raz_wi,
802 },
803 {
804 .base = GIC_DIST_PRI,
805 .len = VGIC_NR_IRQS,
806 .handle_mmio = handle_mmio_priority_reg,
807 },
808 {
809 .base = GIC_DIST_TARGET,
810 .len = VGIC_NR_IRQS,
811 .handle_mmio = handle_mmio_target_reg,
812 },
813 {
814 .base = GIC_DIST_CONFIG,
815 .len = VGIC_NR_IRQS / 4,
816 .handle_mmio = handle_mmio_cfg_reg,
817 },
818 {
819 .base = GIC_DIST_SOFTINT,
820 .len = 4,
821 .handle_mmio = handle_mmio_sgi_reg,
822 },
823 {
824 .base = GIC_DIST_SGI_PENDING_CLEAR,
825 .len = VGIC_NR_SGIS,
826 .handle_mmio = handle_mmio_sgi_clear,
827 },
828 {
829 .base = GIC_DIST_SGI_PENDING_SET,
830 .len = VGIC_NR_SGIS,
831 .handle_mmio = handle_mmio_sgi_set,
832 },
833 {}
834};
835
836static const
837struct mmio_range *find_matching_range(const struct mmio_range *ranges,
838 struct kvm_exit_mmio *mmio,
839 phys_addr_t offset)
840{
841 const struct mmio_range *r = ranges;
842
843 while (r->len) {
844 if (offset >= r->base &&
845 (offset + mmio->len) <= (r->base + r->len))
846 return r;
847 r++;
848 }
849
850 return NULL;
851}
852
853
854
855
856
857
858
859
860
861
862bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
863 struct kvm_exit_mmio *mmio)
864{
865 const struct mmio_range *range;
866 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
867 unsigned long base = dist->vgic_dist_base;
868 bool updated_state;
869 unsigned long offset;
870
871 if (!irqchip_in_kernel(vcpu->kvm) ||
872 mmio->phys_addr < base ||
873 (mmio->phys_addr + mmio->len) > (base + KVM_VGIC_V2_DIST_SIZE))
874 return false;
875
876
877 if (mmio->len > 4) {
878 kvm_inject_dabt(vcpu, mmio->phys_addr);
879 return true;
880 }
881
882 offset = mmio->phys_addr - base;
883 range = find_matching_range(vgic_dist_ranges, mmio, offset);
884 if (unlikely(!range || !range->handle_mmio)) {
885 pr_warn("Unhandled access %d %08llx %d\n",
886 mmio->is_write, mmio->phys_addr, mmio->len);
887 return false;
888 }
889
890 spin_lock(&vcpu->kvm->arch.vgic.lock);
891 offset = mmio->phys_addr - range->base - base;
892 updated_state = range->handle_mmio(vcpu, mmio, offset);
893 spin_unlock(&vcpu->kvm->arch.vgic.lock);
894 kvm_prepare_mmio(run, mmio);
895 kvm_handle_mmio_return(vcpu, run);
896
897 if (updated_state)
898 vgic_kick_vcpus(vcpu->kvm);
899
900 return true;
901}
902
903static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg)
904{
905 struct kvm *kvm = vcpu->kvm;
906 struct vgic_dist *dist = &kvm->arch.vgic;
907 int nrcpus = atomic_read(&kvm->online_vcpus);
908 u8 target_cpus;
909 int sgi, mode, c, vcpu_id;
910
911 vcpu_id = vcpu->vcpu_id;
912
913 sgi = reg & 0xf;
914 target_cpus = (reg >> 16) & 0xff;
915 mode = (reg >> 24) & 3;
916
917 switch (mode) {
918 case 0:
919 if (!target_cpus)
920 return;
921 break;
922
923 case 1:
924 target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff;
925 break;
926
927 case 2:
928 target_cpus = 1 << vcpu_id;
929 break;
930 }
931
932 kvm_for_each_vcpu(c, vcpu, kvm) {
933 if (target_cpus & 1) {
934
935 vgic_dist_irq_set(vcpu, sgi);
936 dist->irq_sgi_sources[c][sgi] |= 1 << vcpu_id;
937 kvm_debug("SGI%d from CPU%d to CPU%d\n", sgi, vcpu_id, c);
938 }
939
940 target_cpus >>= 1;
941 }
942}
943
944static int compute_pending_for_cpu(struct kvm_vcpu *vcpu)
945{
946 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
947 unsigned long *pending, *enabled, *pend_percpu, *pend_shared;
948 unsigned long pending_private, pending_shared;
949 int vcpu_id;
950
951 vcpu_id = vcpu->vcpu_id;
952 pend_percpu = vcpu->arch.vgic_cpu.pending_percpu;
953 pend_shared = vcpu->arch.vgic_cpu.pending_shared;
954
955 pending = vgic_bitmap_get_cpu_map(&dist->irq_state, vcpu_id);
956 enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id);
957 bitmap_and(pend_percpu, pending, enabled, VGIC_NR_PRIVATE_IRQS);
958
959 pending = vgic_bitmap_get_shared_map(&dist->irq_state);
960 enabled = vgic_bitmap_get_shared_map(&dist->irq_enabled);
961 bitmap_and(pend_shared, pending, enabled, VGIC_NR_SHARED_IRQS);
962 bitmap_and(pend_shared, pend_shared,
963 vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]),
964 VGIC_NR_SHARED_IRQS);
965
966 pending_private = find_first_bit(pend_percpu, VGIC_NR_PRIVATE_IRQS);
967 pending_shared = find_first_bit(pend_shared, VGIC_NR_SHARED_IRQS);
968 return (pending_private < VGIC_NR_PRIVATE_IRQS ||
969 pending_shared < VGIC_NR_SHARED_IRQS);
970}
971
972
973
974
975
976static void vgic_update_state(struct kvm *kvm)
977{
978 struct vgic_dist *dist = &kvm->arch.vgic;
979 struct kvm_vcpu *vcpu;
980 int c;
981
982 if (!dist->enabled) {
983 set_bit(0, &dist->irq_pending_on_cpu);
984 return;
985 }
986
987 kvm_for_each_vcpu(c, vcpu, kvm) {
988 if (compute_pending_for_cpu(vcpu)) {
989 pr_debug("CPU%d has pending interrupts\n", c);
990 set_bit(c, &dist->irq_pending_on_cpu);
991 }
992 }
993}
994
995static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr)
996{
997 return vgic_ops->get_lr(vcpu, lr);
998}
999
1000static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr,
1001 struct vgic_lr vlr)
1002{
1003 vgic_ops->set_lr(vcpu, lr, vlr);
1004}
1005
1006static void vgic_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
1007 struct vgic_lr vlr)
1008{
1009 vgic_ops->sync_lr_elrsr(vcpu, lr, vlr);
1010}
1011
1012static inline u64 vgic_get_elrsr(struct kvm_vcpu *vcpu)
1013{
1014 return vgic_ops->get_elrsr(vcpu);
1015}
1016
1017static inline u64 vgic_get_eisr(struct kvm_vcpu *vcpu)
1018{
1019 return vgic_ops->get_eisr(vcpu);
1020}
1021
1022static inline u32 vgic_get_interrupt_status(struct kvm_vcpu *vcpu)
1023{
1024 return vgic_ops->get_interrupt_status(vcpu);
1025}
1026
1027static inline void vgic_enable_underflow(struct kvm_vcpu *vcpu)
1028{
1029 vgic_ops->enable_underflow(vcpu);
1030}
1031
1032static inline void vgic_disable_underflow(struct kvm_vcpu *vcpu)
1033{
1034 vgic_ops->disable_underflow(vcpu);
1035}
1036
1037static inline void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
1038{
1039 vgic_ops->get_vmcr(vcpu, vmcr);
1040}
1041
1042static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
1043{
1044 vgic_ops->set_vmcr(vcpu, vmcr);
1045}
1046
1047static inline void vgic_enable(struct kvm_vcpu *vcpu)
1048{
1049 vgic_ops->enable(vcpu);
1050}
1051
1052static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
1053{
1054 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1055 struct vgic_lr vlr = vgic_get_lr(vcpu, lr_nr);
1056
1057 vlr.state = 0;
1058 vgic_set_lr(vcpu, lr_nr, vlr);
1059 clear_bit(lr_nr, vgic_cpu->lr_used);
1060 vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
1061}
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
1073{
1074 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1075 int lr;
1076
1077 for_each_set_bit(lr, vgic_cpu->lr_used, vgic->nr_lr) {
1078 struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
1079
1080 if (!vgic_irq_is_enabled(vcpu, vlr.irq)) {
1081 vgic_retire_lr(lr, vlr.irq, vcpu);
1082 if (vgic_irq_is_active(vcpu, vlr.irq))
1083 vgic_irq_clear_active(vcpu, vlr.irq);
1084 }
1085 }
1086}
1087
1088
1089
1090
1091
1092static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
1093{
1094 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1095 struct vgic_lr vlr;
1096 int lr;
1097
1098
1099 BUG_ON(sgi_source_id & ~7);
1100 BUG_ON(sgi_source_id && irq >= VGIC_NR_SGIS);
1101 BUG_ON(irq >= VGIC_NR_IRQS);
1102
1103 kvm_debug("Queue IRQ%d\n", irq);
1104
1105 lr = vgic_cpu->vgic_irq_lr_map[irq];
1106
1107
1108 if (lr != LR_EMPTY) {
1109 vlr = vgic_get_lr(vcpu, lr);
1110 if (vlr.source == sgi_source_id) {
1111 kvm_debug("LR%d piggyback for IRQ%d\n", lr, vlr.irq);
1112 BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
1113 vlr.state |= LR_STATE_PENDING;
1114 vgic_set_lr(vcpu, lr, vlr);
1115 return true;
1116 }
1117 }
1118
1119
1120 lr = find_first_zero_bit((unsigned long *)vgic_cpu->lr_used,
1121 vgic->nr_lr);
1122 if (lr >= vgic->nr_lr)
1123 return false;
1124
1125 kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id);
1126 vgic_cpu->vgic_irq_lr_map[irq] = lr;
1127 set_bit(lr, vgic_cpu->lr_used);
1128
1129 vlr.irq = irq;
1130 vlr.source = sgi_source_id;
1131 vlr.state = LR_STATE_PENDING;
1132 if (!vgic_irq_is_edge(vcpu, irq))
1133 vlr.state |= LR_EOI_INT;
1134
1135 vgic_set_lr(vcpu, lr, vlr);
1136
1137 return true;
1138}
1139
1140static bool vgic_queue_sgi(struct kvm_vcpu *vcpu, int irq)
1141{
1142 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1143 unsigned long sources;
1144 int vcpu_id = vcpu->vcpu_id;
1145 int c;
1146
1147 sources = dist->irq_sgi_sources[vcpu_id][irq];
1148
1149 for_each_set_bit(c, &sources, VGIC_MAX_CPUS) {
1150 if (vgic_queue_irq(vcpu, c, irq))
1151 clear_bit(c, &sources);
1152 }
1153
1154 dist->irq_sgi_sources[vcpu_id][irq] = sources;
1155
1156
1157
1158
1159
1160
1161
1162 if (!sources) {
1163 vgic_dist_irq_clear(vcpu, irq);
1164 vgic_cpu_irq_clear(vcpu, irq);
1165 return true;
1166 }
1167
1168 return false;
1169}
1170
1171static bool vgic_queue_hwirq(struct kvm_vcpu *vcpu, int irq)
1172{
1173 if (vgic_irq_is_active(vcpu, irq))
1174 return true;
1175
1176 if (vgic_queue_irq(vcpu, 0, irq)) {
1177 if (vgic_irq_is_edge(vcpu, irq)) {
1178 vgic_dist_irq_clear(vcpu, irq);
1179 vgic_cpu_irq_clear(vcpu, irq);
1180 } else {
1181 vgic_irq_set_active(vcpu, irq);
1182 }
1183
1184 return true;
1185 }
1186
1187 return false;
1188}
1189
1190
1191
1192
1193
1194static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
1195{
1196 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1197 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1198 int i, vcpu_id;
1199 int overflow = 0;
1200
1201 vcpu_id = vcpu->vcpu_id;
1202
1203
1204
1205
1206
1207
1208 if (!kvm_vgic_vcpu_pending_irq(vcpu)) {
1209 pr_debug("CPU%d has no pending interrupt\n", vcpu_id);
1210 goto epilog;
1211 }
1212
1213
1214 for_each_set_bit(i, vgic_cpu->pending_percpu, VGIC_NR_SGIS) {
1215 if (!vgic_queue_sgi(vcpu, i))
1216 overflow = 1;
1217 }
1218
1219
1220 for_each_set_bit_from(i, vgic_cpu->pending_percpu, VGIC_NR_PRIVATE_IRQS) {
1221 if (!vgic_queue_hwirq(vcpu, i))
1222 overflow = 1;
1223 }
1224
1225
1226 for_each_set_bit(i, vgic_cpu->pending_shared, VGIC_NR_SHARED_IRQS) {
1227 if (!vgic_queue_hwirq(vcpu, i + VGIC_NR_PRIVATE_IRQS))
1228 overflow = 1;
1229 }
1230
1231epilog:
1232 if (overflow) {
1233 vgic_enable_underflow(vcpu);
1234 } else {
1235 vgic_disable_underflow(vcpu);
1236
1237
1238
1239
1240
1241
1242 clear_bit(vcpu_id, &dist->irq_pending_on_cpu);
1243 }
1244}
1245
1246static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
1247{
1248 u32 status = vgic_get_interrupt_status(vcpu);
1249 bool level_pending = false;
1250
1251 kvm_debug("STATUS = %08x\n", status);
1252
1253 if (status & INT_STATUS_EOI) {
1254
1255
1256
1257
1258 u64 eisr = vgic_get_eisr(vcpu);
1259 unsigned long *eisr_ptr = (unsigned long *)&eisr;
1260 int lr;
1261
1262 for_each_set_bit(lr, eisr_ptr, vgic->nr_lr) {
1263 struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
1264
1265 vgic_irq_clear_active(vcpu, vlr.irq);
1266 WARN_ON(vlr.state & LR_STATE_MASK);
1267 vlr.state = 0;
1268 vgic_set_lr(vcpu, lr, vlr);
1269
1270
1271 if (vgic_dist_irq_is_pending(vcpu, vlr.irq)) {
1272 vgic_cpu_irq_set(vcpu, vlr.irq);
1273 level_pending = true;
1274 } else {
1275 vgic_cpu_irq_clear(vcpu, vlr.irq);
1276 }
1277
1278
1279
1280
1281
1282 vgic_sync_lr_elrsr(vcpu, lr, vlr);
1283 }
1284 }
1285
1286 if (status & INT_STATUS_UNDERFLOW)
1287 vgic_disable_underflow(vcpu);
1288
1289 return level_pending;
1290}
1291
1292
1293
1294
1295
1296static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
1297{
1298 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1299 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1300 u64 elrsr;
1301 unsigned long *elrsr_ptr;
1302 int lr, pending;
1303 bool level_pending;
1304
1305 level_pending = vgic_process_maintenance(vcpu);
1306 elrsr = vgic_get_elrsr(vcpu);
1307 elrsr_ptr = (unsigned long *)&elrsr;
1308
1309
1310 for_each_set_bit(lr, elrsr_ptr, vgic->nr_lr) {
1311 struct vgic_lr vlr;
1312
1313 if (!test_and_clear_bit(lr, vgic_cpu->lr_used))
1314 continue;
1315
1316 vlr = vgic_get_lr(vcpu, lr);
1317
1318 BUG_ON(vlr.irq >= VGIC_NR_IRQS);
1319 vgic_cpu->vgic_irq_lr_map[vlr.irq] = LR_EMPTY;
1320 }
1321
1322
1323 pending = find_first_zero_bit(elrsr_ptr, vgic->nr_lr);
1324 if (level_pending || pending < vgic->nr_lr)
1325 set_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu);
1326}
1327
1328void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
1329{
1330 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1331
1332 if (!irqchip_in_kernel(vcpu->kvm))
1333 return;
1334
1335 spin_lock(&dist->lock);
1336 __kvm_vgic_flush_hwstate(vcpu);
1337 spin_unlock(&dist->lock);
1338}
1339
1340void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
1341{
1342 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1343
1344 if (!irqchip_in_kernel(vcpu->kvm))
1345 return;
1346
1347 spin_lock(&dist->lock);
1348 __kvm_vgic_sync_hwstate(vcpu);
1349 spin_unlock(&dist->lock);
1350}
1351
1352int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
1353{
1354 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1355
1356 if (!irqchip_in_kernel(vcpu->kvm))
1357 return 0;
1358
1359 return test_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu);
1360}
1361
1362static void vgic_kick_vcpus(struct kvm *kvm)
1363{
1364 struct kvm_vcpu *vcpu;
1365 int c;
1366
1367
1368
1369
1370
1371 kvm_for_each_vcpu(c, vcpu, kvm) {
1372 if (kvm_vgic_vcpu_pending_irq(vcpu))
1373 kvm_vcpu_kick(vcpu);
1374 }
1375}
1376
1377static int vgic_validate_injection(struct kvm_vcpu *vcpu, int irq, int level)
1378{
1379 int is_edge = vgic_irq_is_edge(vcpu, irq);
1380 int state = vgic_dist_irq_is_pending(vcpu, irq);
1381
1382
1383
1384
1385
1386
1387 if (is_edge)
1388 return level > state;
1389 else
1390 return level != state;
1391}
1392
1393static bool vgic_update_irq_state(struct kvm *kvm, int cpuid,
1394 unsigned int irq_num, bool level)
1395{
1396 struct vgic_dist *dist = &kvm->arch.vgic;
1397 struct kvm_vcpu *vcpu;
1398 int is_edge, is_level;
1399 int enabled;
1400 bool ret = true;
1401
1402 spin_lock(&dist->lock);
1403
1404 vcpu = kvm_get_vcpu(kvm, cpuid);
1405 is_edge = vgic_irq_is_edge(vcpu, irq_num);
1406 is_level = !is_edge;
1407
1408 if (!vgic_validate_injection(vcpu, irq_num, level)) {
1409 ret = false;
1410 goto out;
1411 }
1412
1413 if (irq_num >= VGIC_NR_PRIVATE_IRQS) {
1414 cpuid = dist->irq_spi_cpu[irq_num - VGIC_NR_PRIVATE_IRQS];
1415 vcpu = kvm_get_vcpu(kvm, cpuid);
1416 }
1417
1418 kvm_debug("Inject IRQ%d level %d CPU%d\n", irq_num, level, cpuid);
1419
1420 if (level)
1421 vgic_dist_irq_set(vcpu, irq_num);
1422 else
1423 vgic_dist_irq_clear(vcpu, irq_num);
1424
1425 enabled = vgic_irq_is_enabled(vcpu, irq_num);
1426
1427 if (!enabled) {
1428 ret = false;
1429 goto out;
1430 }
1431
1432 if (is_level && vgic_irq_is_active(vcpu, irq_num)) {
1433
1434
1435
1436
1437 ret = false;
1438 goto out;
1439 }
1440
1441 if (level) {
1442 vgic_cpu_irq_set(vcpu, irq_num);
1443 set_bit(cpuid, &dist->irq_pending_on_cpu);
1444 }
1445
1446out:
1447 spin_unlock(&dist->lock);
1448
1449 return ret;
1450}
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
1467 bool level)
1468{
1469 if (vgic_update_irq_state(kvm, cpuid, irq_num, level))
1470 vgic_kick_vcpus(kvm);
1471
1472 return 0;
1473}
1474
1475static irqreturn_t vgic_maintenance_handler(int irq, void *data)
1476{
1477
1478
1479
1480
1481
1482
1483 return IRQ_HANDLED;
1484}
1485
1486
1487
1488
1489
1490
1491
1492
1493int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
1494{
1495 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1496 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1497 int i;
1498
1499 if (vcpu->vcpu_id >= VGIC_MAX_CPUS)
1500 return -EBUSY;
1501
1502 for (i = 0; i < VGIC_NR_IRQS; i++) {
1503 if (i < VGIC_NR_PPIS)
1504 vgic_bitmap_set_irq_val(&dist->irq_enabled,
1505 vcpu->vcpu_id, i, 1);
1506 if (i < VGIC_NR_PRIVATE_IRQS)
1507 vgic_bitmap_set_irq_val(&dist->irq_cfg,
1508 vcpu->vcpu_id, i, VGIC_CFG_EDGE);
1509
1510 vgic_cpu->vgic_irq_lr_map[i] = LR_EMPTY;
1511 }
1512
1513
1514
1515
1516
1517
1518 vgic_cpu->nr_lr = vgic->nr_lr;
1519
1520 vgic_enable(vcpu);
1521
1522 return 0;
1523}
1524
1525static void vgic_init_maintenance_interrupt(void *info)
1526{
1527 enable_percpu_irq(vgic->maint_irq, 0);
1528}
1529
1530static int vgic_cpu_notify(struct notifier_block *self,
1531 unsigned long action, void *cpu)
1532{
1533 switch (action) {
1534 case CPU_STARTING:
1535 case CPU_STARTING_FROZEN:
1536 vgic_init_maintenance_interrupt(NULL);
1537 break;
1538 case CPU_DYING:
1539 case CPU_DYING_FROZEN:
1540 disable_percpu_irq(vgic->maint_irq);
1541 break;
1542 }
1543
1544 return NOTIFY_OK;
1545}
1546
1547static struct notifier_block vgic_cpu_nb = {
1548 .notifier_call = vgic_cpu_notify,
1549};
1550
1551static const struct of_device_id vgic_ids[] = {
1552 { .compatible = "arm,cortex-a15-gic", .data = vgic_v2_probe, },
1553 { .compatible = "arm,gic-v3", .data = vgic_v3_probe, },
1554 {},
1555};
1556
1557int kvm_vgic_hyp_init(void)
1558{
1559 const struct of_device_id *matched_id;
1560 int (*vgic_probe)(struct device_node *,const struct vgic_ops **,
1561 const struct vgic_params **);
1562 struct device_node *vgic_node;
1563 int ret;
1564
1565 vgic_node = of_find_matching_node_and_match(NULL,
1566 vgic_ids, &matched_id);
1567 if (!vgic_node) {
1568 kvm_err("error: no compatible GIC node found\n");
1569 return -ENODEV;
1570 }
1571
1572 vgic_probe = matched_id->data;
1573 ret = vgic_probe(vgic_node, &vgic_ops, &vgic);
1574 if (ret)
1575 return ret;
1576
1577 ret = request_percpu_irq(vgic->maint_irq, vgic_maintenance_handler,
1578 "vgic", kvm_get_running_vcpus());
1579 if (ret) {
1580 kvm_err("Cannot register interrupt %d\n", vgic->maint_irq);
1581 return ret;
1582 }
1583
1584 ret = __register_cpu_notifier(&vgic_cpu_nb);
1585 if (ret) {
1586 kvm_err("Cannot register vgic CPU notifier\n");
1587 goto out_free_irq;
1588 }
1589
1590
1591 vgic_arch_setup(vgic);
1592
1593 on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
1594
1595 return 0;
1596
1597out_free_irq:
1598 free_percpu_irq(vgic->maint_irq, kvm_get_running_vcpus());
1599 return ret;
1600}
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611int kvm_vgic_init(struct kvm *kvm)
1612{
1613 int ret = 0, i;
1614
1615 if (!irqchip_in_kernel(kvm))
1616 return 0;
1617
1618 mutex_lock(&kvm->lock);
1619
1620 if (vgic_initialized(kvm))
1621 goto out;
1622
1623 if (IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_dist_base) ||
1624 IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_cpu_base)) {
1625 kvm_err("Need to set vgic cpu and dist addresses first\n");
1626 ret = -ENXIO;
1627 goto out;
1628 }
1629
1630 ret = kvm_phys_addr_ioremap(kvm, kvm->arch.vgic.vgic_cpu_base,
1631 vgic->vcpu_base, KVM_VGIC_V2_CPU_SIZE);
1632 if (ret) {
1633 kvm_err("Unable to remap VGIC CPU to VCPU\n");
1634 goto out;
1635 }
1636
1637 for (i = VGIC_NR_PRIVATE_IRQS; i < VGIC_NR_IRQS; i += 4)
1638 vgic_set_target_reg(kvm, 0, i);
1639
1640 kvm->arch.vgic.ready = true;
1641out:
1642 mutex_unlock(&kvm->lock);
1643 return ret;
1644}
1645
1646int kvm_vgic_create(struct kvm *kvm)
1647{
1648 int i, vcpu_lock_idx = -1, ret = 0;
1649 struct kvm_vcpu *vcpu;
1650
1651 mutex_lock(&kvm->lock);
1652
1653 if (kvm->arch.vgic.vctrl_base) {
1654 ret = -EEXIST;
1655 goto out;
1656 }
1657
1658
1659
1660
1661
1662
1663 kvm_for_each_vcpu(i, vcpu, kvm) {
1664 if (!mutex_trylock(&vcpu->mutex))
1665 goto out_unlock;
1666 vcpu_lock_idx = i;
1667 }
1668
1669 kvm_for_each_vcpu(i, vcpu, kvm) {
1670 if (vcpu->arch.has_run_once) {
1671 ret = -EBUSY;
1672 goto out_unlock;
1673 }
1674 }
1675
1676 spin_lock_init(&kvm->arch.vgic.lock);
1677 kvm->arch.vgic.in_kernel = true;
1678 kvm->arch.vgic.vctrl_base = vgic->vctrl_base;
1679 kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
1680 kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;
1681
1682out_unlock:
1683 for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
1684 vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
1685 mutex_unlock(&vcpu->mutex);
1686 }
1687
1688out:
1689 mutex_unlock(&kvm->lock);
1690 return ret;
1691}
1692
1693static bool vgic_ioaddr_overlap(struct kvm *kvm)
1694{
1695 phys_addr_t dist = kvm->arch.vgic.vgic_dist_base;
1696 phys_addr_t cpu = kvm->arch.vgic.vgic_cpu_base;
1697
1698 if (IS_VGIC_ADDR_UNDEF(dist) || IS_VGIC_ADDR_UNDEF(cpu))
1699 return 0;
1700 if ((dist <= cpu && dist + KVM_VGIC_V2_DIST_SIZE > cpu) ||
1701 (cpu <= dist && cpu + KVM_VGIC_V2_CPU_SIZE > dist))
1702 return -EBUSY;
1703 return 0;
1704}
1705
1706static int vgic_ioaddr_assign(struct kvm *kvm, phys_addr_t *ioaddr,
1707 phys_addr_t addr, phys_addr_t size)
1708{
1709 int ret;
1710
1711 if (addr & ~KVM_PHYS_MASK)
1712 return -E2BIG;
1713
1714 if (addr & (SZ_4K - 1))
1715 return -EINVAL;
1716
1717 if (!IS_VGIC_ADDR_UNDEF(*ioaddr))
1718 return -EEXIST;
1719 if (addr + size < addr)
1720 return -EINVAL;
1721
1722 *ioaddr = addr;
1723 ret = vgic_ioaddr_overlap(kvm);
1724 if (ret)
1725 *ioaddr = VGIC_ADDR_UNDEF;
1726
1727 return ret;
1728}
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
1744{
1745 int r = 0;
1746 struct vgic_dist *vgic = &kvm->arch.vgic;
1747
1748 mutex_lock(&kvm->lock);
1749 switch (type) {
1750 case KVM_VGIC_V2_ADDR_TYPE_DIST:
1751 if (write) {
1752 r = vgic_ioaddr_assign(kvm, &vgic->vgic_dist_base,
1753 *addr, KVM_VGIC_V2_DIST_SIZE);
1754 } else {
1755 *addr = vgic->vgic_dist_base;
1756 }
1757 break;
1758 case KVM_VGIC_V2_ADDR_TYPE_CPU:
1759 if (write) {
1760 r = vgic_ioaddr_assign(kvm, &vgic->vgic_cpu_base,
1761 *addr, KVM_VGIC_V2_CPU_SIZE);
1762 } else {
1763 *addr = vgic->vgic_cpu_base;
1764 }
1765 break;
1766 default:
1767 r = -ENODEV;
1768 }
1769
1770 mutex_unlock(&kvm->lock);
1771 return r;
1772}
1773
1774static bool handle_cpu_mmio_misc(struct kvm_vcpu *vcpu,
1775 struct kvm_exit_mmio *mmio, phys_addr_t offset)
1776{
1777 bool updated = false;
1778 struct vgic_vmcr vmcr;
1779 u32 *vmcr_field;
1780 u32 reg;
1781
1782 vgic_get_vmcr(vcpu, &vmcr);
1783
1784 switch (offset & ~0x3) {
1785 case GIC_CPU_CTRL:
1786 vmcr_field = &vmcr.ctlr;
1787 break;
1788 case GIC_CPU_PRIMASK:
1789 vmcr_field = &vmcr.pmr;
1790 break;
1791 case GIC_CPU_BINPOINT:
1792 vmcr_field = &vmcr.bpr;
1793 break;
1794 case GIC_CPU_ALIAS_BINPOINT:
1795 vmcr_field = &vmcr.abpr;
1796 break;
1797 default:
1798 BUG();
1799 }
1800
1801 if (!mmio->is_write) {
1802 reg = *vmcr_field;
1803 mmio_data_write(mmio, ~0, reg);
1804 } else {
1805 reg = mmio_data_read(mmio, ~0);
1806 if (reg != *vmcr_field) {
1807 *vmcr_field = reg;
1808 vgic_set_vmcr(vcpu, &vmcr);
1809 updated = true;
1810 }
1811 }
1812 return updated;
1813}
1814
1815static bool handle_mmio_abpr(struct kvm_vcpu *vcpu,
1816 struct kvm_exit_mmio *mmio, phys_addr_t offset)
1817{
1818 return handle_cpu_mmio_misc(vcpu, mmio, GIC_CPU_ALIAS_BINPOINT);
1819}
1820
1821static bool handle_cpu_mmio_ident(struct kvm_vcpu *vcpu,
1822 struct kvm_exit_mmio *mmio,
1823 phys_addr_t offset)
1824{
1825 u32 reg;
1826
1827 if (mmio->is_write)
1828 return false;
1829
1830
1831 reg = (PRODUCT_ID_KVM << 20) |
1832 (GICC_ARCH_VERSION_V2 << 16) |
1833 (IMPLEMENTER_ARM << 0);
1834 mmio_data_write(mmio, ~0, reg);
1835 return false;
1836}
1837
1838
1839
1840
1841
1842static const struct mmio_range vgic_cpu_ranges[] = {
1843 {
1844 .base = GIC_CPU_CTRL,
1845 .len = 12,
1846 .handle_mmio = handle_cpu_mmio_misc,
1847 },
1848 {
1849 .base = GIC_CPU_ALIAS_BINPOINT,
1850 .len = 4,
1851 .handle_mmio = handle_mmio_abpr,
1852 },
1853 {
1854 .base = GIC_CPU_ACTIVEPRIO,
1855 .len = 16,
1856 .handle_mmio = handle_mmio_raz_wi,
1857 },
1858 {
1859 .base = GIC_CPU_IDENT,
1860 .len = 4,
1861 .handle_mmio = handle_cpu_mmio_ident,
1862 },
1863};
1864
1865static int vgic_attr_regs_access(struct kvm_device *dev,
1866 struct kvm_device_attr *attr,
1867 u32 *reg, bool is_write)
1868{
1869 const struct mmio_range *r = NULL, *ranges;
1870 phys_addr_t offset;
1871 int ret, cpuid, c;
1872 struct kvm_vcpu *vcpu, *tmp_vcpu;
1873 struct vgic_dist *vgic;
1874 struct kvm_exit_mmio mmio;
1875
1876 offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
1877 cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
1878 KVM_DEV_ARM_VGIC_CPUID_SHIFT;
1879
1880 mutex_lock(&dev->kvm->lock);
1881
1882 if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) {
1883 ret = -EINVAL;
1884 goto out;
1885 }
1886
1887 vcpu = kvm_get_vcpu(dev->kvm, cpuid);
1888 vgic = &dev->kvm->arch.vgic;
1889
1890 mmio.len = 4;
1891 mmio.is_write = is_write;
1892 if (is_write)
1893 mmio_data_write(&mmio, ~0, *reg);
1894 switch (attr->group) {
1895 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
1896 mmio.phys_addr = vgic->vgic_dist_base + offset;
1897 ranges = vgic_dist_ranges;
1898 break;
1899 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
1900 mmio.phys_addr = vgic->vgic_cpu_base + offset;
1901 ranges = vgic_cpu_ranges;
1902 break;
1903 default:
1904 BUG();
1905 }
1906 r = find_matching_range(ranges, &mmio, offset);
1907
1908 if (unlikely(!r || !r->handle_mmio)) {
1909 ret = -ENXIO;
1910 goto out;
1911 }
1912
1913
1914 spin_lock(&vgic->lock);
1915
1916
1917
1918
1919
1920
1921
1922
1923 kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) {
1924 if (unlikely(tmp_vcpu->cpu != -1)) {
1925 ret = -EBUSY;
1926 goto out_vgic_unlock;
1927 }
1928 }
1929
1930
1931
1932
1933
1934
1935 kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm)
1936 vgic_unqueue_irqs(tmp_vcpu);
1937
1938 offset -= r->base;
1939 r->handle_mmio(vcpu, &mmio, offset);
1940
1941 if (!is_write)
1942 *reg = mmio_data_read(&mmio, ~0);
1943
1944 ret = 0;
1945out_vgic_unlock:
1946 spin_unlock(&vgic->lock);
1947out:
1948 mutex_unlock(&dev->kvm->lock);
1949 return ret;
1950}
1951
1952static int vgic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1953{
1954 int r;
1955
1956 switch (attr->group) {
1957 case KVM_DEV_ARM_VGIC_GRP_ADDR: {
1958 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
1959 u64 addr;
1960 unsigned long type = (unsigned long)attr->attr;
1961
1962 if (copy_from_user(&addr, uaddr, sizeof(addr)))
1963 return -EFAULT;
1964
1965 r = kvm_vgic_addr(dev->kvm, type, &addr, true);
1966 return (r == -ENODEV) ? -ENXIO : r;
1967 }
1968
1969 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
1970 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
1971 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
1972 u32 reg;
1973
1974 if (get_user(reg, uaddr))
1975 return -EFAULT;
1976
1977 return vgic_attr_regs_access(dev, attr, ®, true);
1978 }
1979
1980 }
1981
1982 return -ENXIO;
1983}
1984
1985static int vgic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1986{
1987 int r = -ENXIO;
1988
1989 switch (attr->group) {
1990 case KVM_DEV_ARM_VGIC_GRP_ADDR: {
1991 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
1992 u64 addr;
1993 unsigned long type = (unsigned long)attr->attr;
1994
1995 r = kvm_vgic_addr(dev->kvm, type, &addr, false);
1996 if (r)
1997 return (r == -ENODEV) ? -ENXIO : r;
1998
1999 if (copy_to_user(uaddr, &addr, sizeof(addr)))
2000 return -EFAULT;
2001 break;
2002 }
2003
2004 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
2005 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
2006 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
2007 u32 reg = 0;
2008
2009 r = vgic_attr_regs_access(dev, attr, ®, false);
2010 if (r)
2011 return r;
2012 r = put_user(reg, uaddr);
2013 break;
2014 }
2015
2016 }
2017
2018 return r;
2019}
2020
2021static int vgic_has_attr_regs(const struct mmio_range *ranges,
2022 phys_addr_t offset)
2023{
2024 struct kvm_exit_mmio dev_attr_mmio;
2025
2026 dev_attr_mmio.len = 4;
2027 if (find_matching_range(ranges, &dev_attr_mmio, offset))
2028 return 0;
2029 else
2030 return -ENXIO;
2031}
2032
2033static int vgic_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2034{
2035 phys_addr_t offset;
2036
2037 switch (attr->group) {
2038 case KVM_DEV_ARM_VGIC_GRP_ADDR:
2039 switch (attr->attr) {
2040 case KVM_VGIC_V2_ADDR_TYPE_DIST:
2041 case KVM_VGIC_V2_ADDR_TYPE_CPU:
2042 return 0;
2043 }
2044 break;
2045 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
2046 offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
2047 return vgic_has_attr_regs(vgic_dist_ranges, offset);
2048 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
2049 offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
2050 return vgic_has_attr_regs(vgic_cpu_ranges, offset);
2051 }
2052 return -ENXIO;
2053}
2054
2055static void vgic_destroy(struct kvm_device *dev)
2056{
2057 kfree(dev);
2058}
2059
2060static int vgic_create(struct kvm_device *dev, u32 type)
2061{
2062 return kvm_vgic_create(dev->kvm);
2063}
2064
2065struct kvm_device_ops kvm_arm_vgic_v2_ops = {
2066 .name = "kvm-arm-vgic",
2067 .create = vgic_create,
2068 .destroy = vgic_destroy,
2069 .set_attr = vgic_set_attr,
2070 .get_attr = vgic_get_attr,
2071 .has_attr = vgic_has_attr,
2072};
2073