1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/bitops.h>
15#include <linux/bsearch.h>
16#include <linux/kvm.h>
17#include <linux/kvm_host.h>
18#include <kvm/iodev.h>
19#include <kvm/arm_arch_timer.h>
20#include <kvm/arm_vgic.h>
21
22#include "vgic.h"
23#include "vgic-mmio.h"
24
25unsigned long vgic_mmio_read_raz(struct kvm_vcpu *vcpu,
26 gpa_t addr, unsigned int len)
27{
28 return 0;
29}
30
31unsigned long vgic_mmio_read_rao(struct kvm_vcpu *vcpu,
32 gpa_t addr, unsigned int len)
33{
34 return -1UL;
35}
36
37void vgic_mmio_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
38 unsigned int len, unsigned long val)
39{
40
41}
42
43
44
45
46
47unsigned long vgic_mmio_read_enable(struct kvm_vcpu *vcpu,
48 gpa_t addr, unsigned int len)
49{
50 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
51 u32 value = 0;
52 int i;
53
54
55 for (i = 0; i < len * 8; i++) {
56 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
57
58 if (irq->enabled)
59 value |= (1U << i);
60
61 vgic_put_irq(vcpu->kvm, irq);
62 }
63
64 return value;
65}
66
67void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
68 gpa_t addr, unsigned int len,
69 unsigned long val)
70{
71 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
72 int i;
73 unsigned long flags;
74
75 for_each_set_bit(i, &val, len * 8) {
76 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
77
78 spin_lock_irqsave(&irq->irq_lock, flags);
79 irq->enabled = true;
80 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
81
82 vgic_put_irq(vcpu->kvm, irq);
83 }
84}
85
86void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
87 gpa_t addr, unsigned int len,
88 unsigned long val)
89{
90 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
91 int i;
92 unsigned long flags;
93
94 for_each_set_bit(i, &val, len * 8) {
95 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
96
97 spin_lock_irqsave(&irq->irq_lock, flags);
98
99 irq->enabled = false;
100
101 spin_unlock_irqrestore(&irq->irq_lock, flags);
102 vgic_put_irq(vcpu->kvm, irq);
103 }
104}
105
106unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
107 gpa_t addr, unsigned int len)
108{
109 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
110 u32 value = 0;
111 int i;
112
113
114 for (i = 0; i < len * 8; i++) {
115 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
116 unsigned long flags;
117
118 spin_lock_irqsave(&irq->irq_lock, flags);
119 if (irq_is_pending(irq))
120 value |= (1U << i);
121 spin_unlock_irqrestore(&irq->irq_lock, flags);
122
123 vgic_put_irq(vcpu->kvm, irq);
124 }
125
126 return value;
127}
128
129
130
131
132
133
134
135
136
137
138
139
140static struct kvm_vcpu *vgic_get_mmio_requester_vcpu(void)
141{
142 struct kvm_vcpu *vcpu;
143
144 preempt_disable();
145 vcpu = kvm_arm_get_running_vcpu();
146 preempt_enable();
147 return vcpu;
148}
149
150
151static void vgic_hw_irq_spending(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
152 bool is_uaccess)
153{
154 if (is_uaccess)
155 return;
156
157 irq->pending_latch = true;
158 vgic_irq_set_phys_active(irq, true);
159}
160
161void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
162 gpa_t addr, unsigned int len,
163 unsigned long val)
164{
165 bool is_uaccess = !vgic_get_mmio_requester_vcpu();
166 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
167 int i;
168 unsigned long flags;
169
170 for_each_set_bit(i, &val, len * 8) {
171 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
172
173 spin_lock_irqsave(&irq->irq_lock, flags);
174 if (irq->hw)
175 vgic_hw_irq_spending(vcpu, irq, is_uaccess);
176 else
177 irq->pending_latch = true;
178 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
179 vgic_put_irq(vcpu->kvm, irq);
180 }
181}
182
183
184static void vgic_hw_irq_cpending(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
185 bool is_uaccess)
186{
187 if (is_uaccess)
188 return;
189
190 irq->pending_latch = false;
191
192
193
194
195
196
197
198
199
200
201
202
203 vgic_irq_set_phys_pending(irq, false);
204 if (!irq->active)
205 vgic_irq_set_phys_active(irq, false);
206}
207
208void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
209 gpa_t addr, unsigned int len,
210 unsigned long val)
211{
212 bool is_uaccess = !vgic_get_mmio_requester_vcpu();
213 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
214 int i;
215 unsigned long flags;
216
217 for_each_set_bit(i, &val, len * 8) {
218 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
219
220 spin_lock_irqsave(&irq->irq_lock, flags);
221
222 if (irq->hw)
223 vgic_hw_irq_cpending(vcpu, irq, is_uaccess);
224 else
225 irq->pending_latch = false;
226
227 spin_unlock_irqrestore(&irq->irq_lock, flags);
228 vgic_put_irq(vcpu->kvm, irq);
229 }
230}
231
232unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
233 gpa_t addr, unsigned int len)
234{
235 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
236 u32 value = 0;
237 int i;
238
239
240 for (i = 0; i < len * 8; i++) {
241 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
242
243 if (irq->active)
244 value |= (1U << i);
245
246 vgic_put_irq(vcpu->kvm, irq);
247 }
248
249 return value;
250}
251
252
253static void vgic_hw_irq_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
254 bool active, bool is_uaccess)
255{
256 if (is_uaccess)
257 return;
258
259 irq->active = active;
260 vgic_irq_set_phys_active(irq, active);
261}
262
263static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
264 bool active)
265{
266 unsigned long flags;
267 struct kvm_vcpu *requester_vcpu = vgic_get_mmio_requester_vcpu();
268
269 spin_lock_irqsave(&irq->irq_lock, flags);
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287 while (irq->vcpu &&
288 irq->vcpu != requester_vcpu &&
289 irq->vcpu->cpu != -1)
290 cond_resched_lock(&irq->irq_lock);
291
292 if (irq->hw) {
293 vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
294 } else {
295 u32 model = vcpu->kvm->arch.vgic.vgic_model;
296
297 irq->active = active;
298 if (model == KVM_DEV_TYPE_ARM_VGIC_V2 &&
299 active && vgic_irq_is_sgi(irq->intid))
300 irq->active_source = requester_vcpu->vcpu_id;
301 }
302
303 if (irq->active)
304 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
305 else
306 spin_unlock_irqrestore(&irq->irq_lock, flags);
307}
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
325{
326 if (intid > VGIC_NR_PRIVATE_IRQS)
327 kvm_arm_halt_guest(vcpu->kvm);
328}
329
330
331static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid)
332{
333 if (intid > VGIC_NR_PRIVATE_IRQS)
334 kvm_arm_resume_guest(vcpu->kvm);
335}
336
337static void __vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
338 gpa_t addr, unsigned int len,
339 unsigned long val)
340{
341 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
342 int i;
343
344 for_each_set_bit(i, &val, len * 8) {
345 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
346 vgic_mmio_change_active(vcpu, irq, false);
347 vgic_put_irq(vcpu->kvm, irq);
348 }
349}
350
351void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
352 gpa_t addr, unsigned int len,
353 unsigned long val)
354{
355 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
356
357 mutex_lock(&vcpu->kvm->lock);
358 vgic_change_active_prepare(vcpu, intid);
359
360 __vgic_mmio_write_cactive(vcpu, addr, len, val);
361
362 vgic_change_active_finish(vcpu, intid);
363 mutex_unlock(&vcpu->kvm->lock);
364}
365
366void vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu,
367 gpa_t addr, unsigned int len,
368 unsigned long val)
369{
370 __vgic_mmio_write_cactive(vcpu, addr, len, val);
371}
372
373static void __vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
374 gpa_t addr, unsigned int len,
375 unsigned long val)
376{
377 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
378 int i;
379
380 for_each_set_bit(i, &val, len * 8) {
381 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
382 vgic_mmio_change_active(vcpu, irq, true);
383 vgic_put_irq(vcpu->kvm, irq);
384 }
385}
386
387void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
388 gpa_t addr, unsigned int len,
389 unsigned long val)
390{
391 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
392
393 mutex_lock(&vcpu->kvm->lock);
394 vgic_change_active_prepare(vcpu, intid);
395
396 __vgic_mmio_write_sactive(vcpu, addr, len, val);
397
398 vgic_change_active_finish(vcpu, intid);
399 mutex_unlock(&vcpu->kvm->lock);
400}
401
402void vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu,
403 gpa_t addr, unsigned int len,
404 unsigned long val)
405{
406 __vgic_mmio_write_sactive(vcpu, addr, len, val);
407}
408
409unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu,
410 gpa_t addr, unsigned int len)
411{
412 u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
413 int i;
414 u64 val = 0;
415
416 for (i = 0; i < len; i++) {
417 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
418
419 val |= (u64)irq->priority << (i * 8);
420
421 vgic_put_irq(vcpu->kvm, irq);
422 }
423
424 return val;
425}
426
427
428
429
430
431
432
433
434void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
435 gpa_t addr, unsigned int len,
436 unsigned long val)
437{
438 u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
439 int i;
440 unsigned long flags;
441
442 for (i = 0; i < len; i++) {
443 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
444
445 spin_lock_irqsave(&irq->irq_lock, flags);
446
447 irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS);
448 spin_unlock_irqrestore(&irq->irq_lock, flags);
449
450 vgic_put_irq(vcpu->kvm, irq);
451 }
452}
453
454unsigned long vgic_mmio_read_config(struct kvm_vcpu *vcpu,
455 gpa_t addr, unsigned int len)
456{
457 u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
458 u32 value = 0;
459 int i;
460
461 for (i = 0; i < len * 4; i++) {
462 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
463
464 if (irq->config == VGIC_CONFIG_EDGE)
465 value |= (2U << (i * 2));
466
467 vgic_put_irq(vcpu->kvm, irq);
468 }
469
470 return value;
471}
472
473void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
474 gpa_t addr, unsigned int len,
475 unsigned long val)
476{
477 u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
478 int i;
479 unsigned long flags;
480
481 for (i = 0; i < len * 4; i++) {
482 struct vgic_irq *irq;
483
484
485
486
487
488
489
490 if (intid + i < VGIC_NR_PRIVATE_IRQS)
491 continue;
492
493 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
494 spin_lock_irqsave(&irq->irq_lock, flags);
495
496 if (test_bit(i * 2 + 1, &val))
497 irq->config = VGIC_CONFIG_EDGE;
498 else
499 irq->config = VGIC_CONFIG_LEVEL;
500
501 spin_unlock_irqrestore(&irq->irq_lock, flags);
502 vgic_put_irq(vcpu->kvm, irq);
503 }
504}
505
506u64 vgic_read_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid)
507{
508 int i;
509 u64 val = 0;
510 int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
511
512 for (i = 0; i < 32; i++) {
513 struct vgic_irq *irq;
514
515 if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs)
516 continue;
517
518 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
519 if (irq->config == VGIC_CONFIG_LEVEL && irq->line_level)
520 val |= (1U << i);
521
522 vgic_put_irq(vcpu->kvm, irq);
523 }
524
525 return val;
526}
527
528void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid,
529 const u64 val)
530{
531 int i;
532 int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
533 unsigned long flags;
534
535 for (i = 0; i < 32; i++) {
536 struct vgic_irq *irq;
537 bool new_level;
538
539 if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs)
540 continue;
541
542 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
543
544
545
546
547
548
549 new_level = !!(val & (1U << i));
550 spin_lock_irqsave(&irq->irq_lock, flags);
551 irq->line_level = new_level;
552 if (new_level)
553 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
554 else
555 spin_unlock_irqrestore(&irq->irq_lock, flags);
556
557 vgic_put_irq(vcpu->kvm, irq);
558 }
559}
560
561static int match_region(const void *key, const void *elt)
562{
563 const unsigned int offset = (unsigned long)key;
564 const struct vgic_register_region *region = elt;
565
566 if (offset < region->reg_offset)
567 return -1;
568
569 if (offset >= region->reg_offset + region->len)
570 return 1;
571
572 return 0;
573}
574
575const struct vgic_register_region *
576vgic_find_mmio_region(const struct vgic_register_region *regions,
577 int nr_regions, unsigned int offset)
578{
579 return bsearch((void *)(uintptr_t)offset, regions, nr_regions,
580 sizeof(regions[0]), match_region);
581}
582
583void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
584{
585 if (kvm_vgic_global_state.type == VGIC_V2)
586 vgic_v2_set_vmcr(vcpu, vmcr);
587 else
588 vgic_v3_set_vmcr(vcpu, vmcr);
589}
590
591void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
592{
593 if (kvm_vgic_global_state.type == VGIC_V2)
594 vgic_v2_get_vmcr(vcpu, vmcr);
595 else
596 vgic_v3_get_vmcr(vcpu, vmcr);
597}
598
599
600
601
602
603
604
605
606
607
608unsigned long vgic_data_mmio_bus_to_host(const void *val, unsigned int len)
609{
610 unsigned long data = kvm_mmio_read_buf(val, len);
611
612 switch (len) {
613 case 1:
614 return data;
615 case 2:
616 return le16_to_cpu(data);
617 case 4:
618 return le32_to_cpu(data);
619 default:
620 return le64_to_cpu(data);
621 }
622}
623
624
625
626
627
628
629
630
631
632
633void vgic_data_host_to_mmio_bus(void *buf, unsigned int len,
634 unsigned long data)
635{
636 switch (len) {
637 case 1:
638 break;
639 case 2:
640 data = cpu_to_le16(data);
641 break;
642 case 4:
643 data = cpu_to_le32(data);
644 break;
645 default:
646 data = cpu_to_le64(data);
647 }
648
649 kvm_mmio_write_buf(buf, len, data);
650}
651
652static
653struct vgic_io_device *kvm_to_vgic_iodev(const struct kvm_io_device *dev)
654{
655 return container_of(dev, struct vgic_io_device, dev);
656}
657
658static bool check_region(const struct kvm *kvm,
659 const struct vgic_register_region *region,
660 gpa_t addr, int len)
661{
662 int flags, nr_irqs = kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
663
664 switch (len) {
665 case sizeof(u8):
666 flags = VGIC_ACCESS_8bit;
667 break;
668 case sizeof(u32):
669 flags = VGIC_ACCESS_32bit;
670 break;
671 case sizeof(u64):
672 flags = VGIC_ACCESS_64bit;
673 break;
674 default:
675 return false;
676 }
677
678 if ((region->access_flags & flags) && IS_ALIGNED(addr, len)) {
679 if (!region->bits_per_irq)
680 return true;
681
682
683 return VGIC_ADDR_TO_INTID(addr, region->bits_per_irq) < nr_irqs;
684 }
685
686 return false;
687}
688
689const struct vgic_register_region *
690vgic_get_mmio_region(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
691 gpa_t addr, int len)
692{
693 const struct vgic_register_region *region;
694
695 region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions,
696 addr - iodev->base_addr);
697 if (!region || !check_region(vcpu->kvm, region, addr, len))
698 return NULL;
699
700 return region;
701}
702
703static int vgic_uaccess_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
704 gpa_t addr, u32 *val)
705{
706 struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
707 const struct vgic_register_region *region;
708 struct kvm_vcpu *r_vcpu;
709
710 region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32));
711 if (!region) {
712 *val = 0;
713 return 0;
714 }
715
716 r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
717 if (region->uaccess_read)
718 *val = region->uaccess_read(r_vcpu, addr, sizeof(u32));
719 else
720 *val = region->read(r_vcpu, addr, sizeof(u32));
721
722 return 0;
723}
724
725static int vgic_uaccess_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
726 gpa_t addr, const u32 *val)
727{
728 struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
729 const struct vgic_register_region *region;
730 struct kvm_vcpu *r_vcpu;
731
732 region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32));
733 if (!region)
734 return 0;
735
736 r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
737 if (region->uaccess_write)
738 region->uaccess_write(r_vcpu, addr, sizeof(u32), *val);
739 else
740 region->write(r_vcpu, addr, sizeof(u32), *val);
741
742 return 0;
743}
744
745
746
747
748int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev,
749 bool is_write, int offset, u32 *val)
750{
751 if (is_write)
752 return vgic_uaccess_write(vcpu, &dev->dev, offset, val);
753 else
754 return vgic_uaccess_read(vcpu, &dev->dev, offset, val);
755}
756
757static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
758 gpa_t addr, int len, void *val)
759{
760 struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
761 const struct vgic_register_region *region;
762 unsigned long data = 0;
763
764 region = vgic_get_mmio_region(vcpu, iodev, addr, len);
765 if (!region) {
766 memset(val, 0, len);
767 return 0;
768 }
769
770 switch (iodev->iodev_type) {
771 case IODEV_CPUIF:
772 data = region->read(vcpu, addr, len);
773 break;
774 case IODEV_DIST:
775 data = region->read(vcpu, addr, len);
776 break;
777 case IODEV_REDIST:
778 data = region->read(iodev->redist_vcpu, addr, len);
779 break;
780 case IODEV_ITS:
781 data = region->its_read(vcpu->kvm, iodev->its, addr, len);
782 break;
783 }
784
785 vgic_data_host_to_mmio_bus(val, len, data);
786 return 0;
787}
788
789static int dispatch_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
790 gpa_t addr, int len, const void *val)
791{
792 struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
793 const struct vgic_register_region *region;
794 unsigned long data = vgic_data_mmio_bus_to_host(val, len);
795
796 region = vgic_get_mmio_region(vcpu, iodev, addr, len);
797 if (!region)
798 return 0;
799
800 switch (iodev->iodev_type) {
801 case IODEV_CPUIF:
802 region->write(vcpu, addr, len, data);
803 break;
804 case IODEV_DIST:
805 region->write(vcpu, addr, len, data);
806 break;
807 case IODEV_REDIST:
808 region->write(iodev->redist_vcpu, addr, len, data);
809 break;
810 case IODEV_ITS:
811 region->its_write(vcpu->kvm, iodev->its, addr, len, data);
812 break;
813 }
814
815 return 0;
816}
817
818struct kvm_io_device_ops kvm_io_gic_ops = {
819 .read = dispatch_mmio_read,
820 .write = dispatch_mmio_write,
821};
822
823int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
824 enum vgic_type type)
825{
826 struct vgic_io_device *io_device = &kvm->arch.vgic.dist_iodev;
827 int ret = 0;
828 unsigned int len;
829
830 switch (type) {
831 case VGIC_V2:
832 len = vgic_v2_init_dist_iodev(io_device);
833 break;
834 case VGIC_V3:
835 len = vgic_v3_init_dist_iodev(io_device);
836 break;
837 default:
838 BUG_ON(1);
839 }
840
841 io_device->base_addr = dist_base_address;
842 io_device->iodev_type = IODEV_DIST;
843 io_device->redist_vcpu = NULL;
844
845 mutex_lock(&kvm->slots_lock);
846 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address,
847 len, &io_device->dev);
848 mutex_unlock(&kvm->slots_lock);
849
850 return ret;
851}
852