1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include "qemu/osdep.h"
23#include "qapi/error.h"
24#include "qemu-common.h"
25#include "cpu.h"
26#include "hw/sysbus.h"
27#include "migration/migration.h"
28#include "sysemu/kvm.h"
29#include "kvm_arm.h"
30#include "gic_internal.h"
31#include "vgic_common.h"
32
33
34
35#ifdef DEBUG_GIC_KVM
36static const int debug_gic_kvm = 1;
37#else
38static const int debug_gic_kvm = 0;
39#endif
40
41#define DPRINTF(fmt, ...) do { \
42 if (debug_gic_kvm) { \
43 printf("arm_gic: " fmt , ## __VA_ARGS__); \
44 } \
45 } while (0)
46
47#define TYPE_KVM_ARM_GIC "kvm-arm-gic"
48#define KVM_ARM_GIC(obj) \
49 OBJECT_CHECK(GICState, (obj), TYPE_KVM_ARM_GIC)
50#define KVM_ARM_GIC_CLASS(klass) \
51 OBJECT_CLASS_CHECK(KVMARMGICClass, (klass), TYPE_KVM_ARM_GIC)
52#define KVM_ARM_GIC_GET_CLASS(obj) \
53 OBJECT_GET_CLASS(KVMARMGICClass, (obj), TYPE_KVM_ARM_GIC)
54
55typedef struct KVMARMGICClass {
56 ARMGICCommonClass parent_class;
57 DeviceRealize parent_realize;
58 void (*parent_reset)(DeviceState *dev);
59} KVMARMGICClass;
60
61void kvm_arm_gic_set_irq(uint32_t num_irq, int irq, int level)
62{
63
64
65
66
67
68
69
70
71
72 int kvm_irq, irqtype, cpu;
73
74 if (irq < (num_irq - GIC_INTERNAL)) {
75
76
77
78
79 irqtype = KVM_ARM_IRQ_TYPE_SPI;
80 cpu = 0;
81 irq += GIC_INTERNAL;
82 } else {
83
84 irqtype = KVM_ARM_IRQ_TYPE_PPI;
85 irq -= (num_irq - GIC_INTERNAL);
86 cpu = irq / GIC_INTERNAL;
87 irq %= GIC_INTERNAL;
88 }
89 kvm_irq = (irqtype << KVM_ARM_IRQ_TYPE_SHIFT)
90 | (cpu << KVM_ARM_IRQ_VCPU_SHIFT) | irq;
91
92 kvm_set_irq(kvm_state, kvm_irq, !!level);
93}
94
95static void kvm_arm_gicv2_set_irq(void *opaque, int irq, int level)
96{
97 GICState *s = (GICState *)opaque;
98
99 kvm_arm_gic_set_irq(s->num_irq, irq, level);
100}
101
102static bool kvm_arm_gic_can_save_restore(GICState *s)
103{
104 return s->dev_fd >= 0;
105}
106
107#define KVM_VGIC_ATTR(offset, cpu) \
108 ((((uint64_t)(cpu) << KVM_DEV_ARM_VGIC_CPUID_SHIFT) & \
109 KVM_DEV_ARM_VGIC_CPUID_MASK) | \
110 (((uint64_t)(offset) << KVM_DEV_ARM_VGIC_OFFSET_SHIFT) & \
111 KVM_DEV_ARM_VGIC_OFFSET_MASK))
112
113static void kvm_gicd_access(GICState *s, int offset, int cpu,
114 uint32_t *val, bool write)
115{
116 kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_DIST_REGS,
117 KVM_VGIC_ATTR(offset, cpu), val, write);
118}
119
120static void kvm_gicc_access(GICState *s, int offset, int cpu,
121 uint32_t *val, bool write)
122{
123 kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CPU_REGS,
124 KVM_VGIC_ATTR(offset, cpu), val, write);
125}
126
127#define for_each_irq_reg(_ctr, _max_irq, _field_width) \
128 for (_ctr = 0; _ctr < ((_max_irq) / (32 / (_field_width))); _ctr++)
129
130
131
132
133
134typedef void (*vgic_translate_fn)(GICState *s, int irq, int cpu,
135 uint32_t *field, bool to_kernel);
136
137
138
139
140static void translate_clear(GICState *s, int irq, int cpu,
141 uint32_t *field, bool to_kernel)
142{
143 if (to_kernel) {
144 *field = ~0;
145 } else {
146
147 abort();
148 }
149}
150
151static void translate_group(GICState *s, int irq, int cpu,
152 uint32_t *field, bool to_kernel)
153{
154 int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK;
155
156 if (to_kernel) {
157 *field = GIC_TEST_GROUP(irq, cm);
158 } else {
159 if (*field & 1) {
160 GIC_SET_GROUP(irq, cm);
161 }
162 }
163}
164
165static void translate_enabled(GICState *s, int irq, int cpu,
166 uint32_t *field, bool to_kernel)
167{
168 int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK;
169
170 if (to_kernel) {
171 *field = GIC_TEST_ENABLED(irq, cm);
172 } else {
173 if (*field & 1) {
174 GIC_SET_ENABLED(irq, cm);
175 }
176 }
177}
178
179static void translate_pending(GICState *s, int irq, int cpu,
180 uint32_t *field, bool to_kernel)
181{
182 int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK;
183
184 if (to_kernel) {
185 *field = gic_test_pending(s, irq, cm);
186 } else {
187 if (*field & 1) {
188 GIC_SET_PENDING(irq, cm);
189
190 }
191 }
192}
193
194static void translate_active(GICState *s, int irq, int cpu,
195 uint32_t *field, bool to_kernel)
196{
197 int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK;
198
199 if (to_kernel) {
200 *field = GIC_TEST_ACTIVE(irq, cm);
201 } else {
202 if (*field & 1) {
203 GIC_SET_ACTIVE(irq, cm);
204 }
205 }
206}
207
208static void translate_trigger(GICState *s, int irq, int cpu,
209 uint32_t *field, bool to_kernel)
210{
211 if (to_kernel) {
212 *field = (GIC_TEST_EDGE_TRIGGER(irq)) ? 0x2 : 0x0;
213 } else {
214 if (*field & 0x2) {
215 GIC_SET_EDGE_TRIGGER(irq);
216 }
217 }
218}
219
220static void translate_priority(GICState *s, int irq, int cpu,
221 uint32_t *field, bool to_kernel)
222{
223 if (to_kernel) {
224 *field = GIC_GET_PRIORITY(irq, cpu) & 0xff;
225 } else {
226 gic_set_priority(s, cpu, irq, *field & 0xff, MEMTXATTRS_UNSPECIFIED);
227 }
228}
229
230static void translate_targets(GICState *s, int irq, int cpu,
231 uint32_t *field, bool to_kernel)
232{
233 if (to_kernel) {
234 *field = s->irq_target[irq] & 0xff;
235 } else {
236 s->irq_target[irq] = *field & 0xff;
237 }
238}
239
240static void translate_sgisource(GICState *s, int irq, int cpu,
241 uint32_t *field, bool to_kernel)
242{
243 if (to_kernel) {
244 *field = s->sgi_pending[irq][cpu] & 0xff;
245 } else {
246 s->sgi_pending[irq][cpu] = *field & 0xff;
247 }
248}
249
250
251static void kvm_dist_get(GICState *s, uint32_t offset, int width,
252 int maxirq, vgic_translate_fn translate_fn)
253{
254 uint32_t reg;
255 int i;
256 int j;
257 int irq;
258 int cpu;
259 int regsz = 32 / width;
260 uint32_t field;
261
262 for_each_irq_reg(i, maxirq, width) {
263 irq = i * regsz;
264 cpu = 0;
265 while ((cpu < s->num_cpu && irq < GIC_INTERNAL) || cpu == 0) {
266 kvm_gicd_access(s, offset, cpu, ®, false);
267 for (j = 0; j < regsz; j++) {
268 field = extract32(reg, j * width, width);
269 translate_fn(s, irq + j, cpu, &field, false);
270 }
271
272 cpu++;
273 }
274 offset += 4;
275 }
276}
277
278
279static void kvm_dist_put(GICState *s, uint32_t offset, int width,
280 int maxirq, vgic_translate_fn translate_fn)
281{
282 uint32_t reg;
283 int i;
284 int j;
285 int irq;
286 int cpu;
287 int regsz = 32 / width;
288 uint32_t field;
289
290 for_each_irq_reg(i, maxirq, width) {
291 irq = i * regsz;
292 cpu = 0;
293 while ((cpu < s->num_cpu && irq < GIC_INTERNAL) || cpu == 0) {
294 reg = 0;
295 for (j = 0; j < regsz; j++) {
296 translate_fn(s, irq + j, cpu, &field, true);
297 reg = deposit32(reg, j * width, width, field);
298 }
299 kvm_gicd_access(s, offset, cpu, ®, true);
300
301 cpu++;
302 }
303 offset += 4;
304 }
305}
306
307static void kvm_arm_gic_put(GICState *s)
308{
309 uint32_t reg;
310 int i;
311 int cpu;
312 int num_cpu;
313 int num_irq;
314
315
316
317
318
319
320
321
322
323
324 reg = s->ctlr;
325 kvm_gicd_access(s, 0x0, 0, ®, true);
326
327
328 kvm_gicd_access(s, 0x4, 0, ®, false);
329 num_irq = ((reg & 0x1f) + 1) * 32;
330 num_cpu = ((reg & 0xe0) >> 5) + 1;
331
332 if (num_irq < s->num_irq) {
333 fprintf(stderr, "Restoring %u IRQs, but kernel supports max %d\n",
334 s->num_irq, num_irq);
335 abort();
336 } else if (num_cpu != s->num_cpu) {
337 fprintf(stderr, "Restoring %u CPU interfaces, kernel only has %d\n",
338 s->num_cpu, num_cpu);
339
340 abort();
341 }
342
343
344
345
346 kvm_dist_put(s, 0x180, 1, s->num_irq, translate_clear);
347 kvm_dist_put(s, 0x100, 1, s->num_irq, translate_enabled);
348
349
350 kvm_dist_put(s, 0x80, 1, s->num_irq, translate_group);
351
352
353
354
355 kvm_dist_put(s, 0x800, 8, s->num_irq, translate_targets);
356
357
358
359
360 kvm_dist_put(s, 0xc00, 2, s->num_irq, translate_trigger);
361
362
363 kvm_dist_put(s, 0x280, 1, s->num_irq, translate_clear);
364 kvm_dist_put(s, 0x200, 1, s->num_irq, translate_pending);
365
366
367 kvm_dist_put(s, 0x380, 1, s->num_irq, translate_clear);
368 kvm_dist_put(s, 0x300, 1, s->num_irq, translate_active);
369
370
371
372 kvm_dist_put(s, 0x400, 8, s->num_irq, translate_priority);
373
374
375 kvm_dist_put(s, 0xf10, 8, GIC_NR_SGIS, translate_clear);
376 kvm_dist_put(s, 0xf20, 8, GIC_NR_SGIS, translate_sgisource);
377
378
379
380
381
382
383 for (cpu = 0; cpu < s->num_cpu; cpu++) {
384
385 reg = s->cpu_ctlr[cpu];
386 kvm_gicc_access(s, 0x00, cpu, ®, true);
387
388
389 reg = (s->priority_mask[cpu] & 0xff);
390 kvm_gicc_access(s, 0x04, cpu, ®, true);
391
392
393 reg = (s->bpr[cpu] & 0x7);
394 kvm_gicc_access(s, 0x08, cpu, ®, true);
395
396
397 reg = (s->abpr[cpu] & 0x7);
398 kvm_gicc_access(s, 0x1c, cpu, ®, true);
399
400
401 for (i = 0; i < 4; i++) {
402 reg = s->apr[i][cpu];
403 kvm_gicc_access(s, 0xd0 + i * 4, cpu, ®, true);
404 }
405 }
406}
407
408static void kvm_arm_gic_get(GICState *s)
409{
410 uint32_t reg;
411 int i;
412 int cpu;
413
414
415
416
417
418
419 kvm_gicd_access(s, 0x0, 0, ®, false);
420 s->ctlr = reg;
421
422
423 kvm_gicd_access(s, 0x4, 0, ®, false);
424 s->num_irq = ((reg & 0x1f) + 1) * 32;
425 s->num_cpu = ((reg & 0xe0) >> 5) + 1;
426
427 if (s->num_irq > GIC_MAXIRQ) {
428 fprintf(stderr, "Too many IRQs reported from the kernel: %d\n",
429 s->num_irq);
430 abort();
431 }
432
433
434 kvm_gicd_access(s, 0x8, 0, ®, false);
435
436
437 for (i = 0; i < s->num_irq; i++) {
438 memset(&s->irq_state[i], 0, sizeof(s->irq_state[0]));
439 }
440
441
442 kvm_dist_get(s, 0x80, 1, s->num_irq, translate_group);
443
444
445 kvm_dist_get(s, 0x100, 1, s->num_irq, translate_enabled);
446
447
448 kvm_dist_get(s, 0x200, 1, s->num_irq, translate_pending);
449
450
451 kvm_dist_get(s, 0x300, 1, s->num_irq, translate_active);
452
453
454 kvm_dist_get(s, 0xc00, 2, s->num_irq, translate_trigger);
455
456
457 kvm_dist_get(s, 0x400, 8, s->num_irq, translate_priority);
458
459
460 kvm_dist_get(s, 0x800, 8, s->num_irq, translate_targets);
461
462
463 kvm_dist_get(s, 0xf10, 8, GIC_NR_SGIS, translate_sgisource);
464
465
466
467
468
469
470 for (cpu = 0; cpu < s->num_cpu; cpu++) {
471
472 kvm_gicc_access(s, 0x00, cpu, ®, false);
473 s->cpu_ctlr[cpu] = reg;
474
475
476 kvm_gicc_access(s, 0x04, cpu, ®, false);
477 s->priority_mask[cpu] = (reg & 0xff);
478
479
480 kvm_gicc_access(s, 0x08, cpu, ®, false);
481 s->bpr[cpu] = (reg & 0x7);
482
483
484 kvm_gicc_access(s, 0x1c, cpu, ®, false);
485 s->abpr[cpu] = (reg & 0x7);
486
487
488 for (i = 0; i < 4; i++) {
489 kvm_gicc_access(s, 0xd0 + i * 4, cpu, ®, false);
490 s->apr[i][cpu] = reg;
491 }
492 }
493}
494
495static void kvm_arm_gic_reset(DeviceState *dev)
496{
497 GICState *s = ARM_GIC_COMMON(dev);
498 KVMARMGICClass *kgc = KVM_ARM_GIC_GET_CLASS(s);
499
500 kgc->parent_reset(dev);
501
502 if (kvm_arm_gic_can_save_restore(s)) {
503 kvm_arm_gic_put(s);
504 }
505}
506
507static void kvm_arm_gic_realize(DeviceState *dev, Error **errp)
508{
509 int i;
510 GICState *s = KVM_ARM_GIC(dev);
511 KVMARMGICClass *kgc = KVM_ARM_GIC_GET_CLASS(s);
512 Error *local_err = NULL;
513 int ret;
514
515 kgc->parent_realize(dev, &local_err);
516 if (local_err) {
517 error_propagate(errp, local_err);
518 return;
519 }
520
521 if (s->security_extn) {
522 error_setg(errp, "the in-kernel VGIC does not implement the "
523 "security extensions");
524 return;
525 }
526
527 gic_init_irqs_and_mmio(s, kvm_arm_gicv2_set_irq, NULL);
528
529 for (i = 0; i < s->num_irq - GIC_INTERNAL; i++) {
530 qemu_irq irq = qdev_get_gpio_in(dev, i);
531 kvm_irqchip_set_qemuirq_gsi(kvm_state, irq, i);
532 }
533
534
535 s->dev_fd = -1;
536 ret = kvm_create_device(kvm_state, KVM_DEV_TYPE_ARM_VGIC_V2, false);
537 if (ret >= 0) {
538 s->dev_fd = ret;
539
540
541 if (kvm_device_check_attr(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_NR_IRQS, 0)) {
542 uint32_t numirqs = s->num_irq;
543 kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_NR_IRQS, 0,
544 &numirqs, true);
545 }
546
547 if (kvm_device_check_attr(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
548 KVM_DEV_ARM_VGIC_CTRL_INIT)) {
549 kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
550 KVM_DEV_ARM_VGIC_CTRL_INIT, NULL, true);
551 }
552 } else if (ret != -ENODEV && ret != -ENOTSUP) {
553 error_setg_errno(errp, -ret, "error creating in-kernel VGIC");
554 return;
555 }
556
557
558 kvm_arm_register_device(&s->iomem,
559 (KVM_ARM_DEVICE_VGIC_V2 << KVM_ARM_DEVICE_ID_SHIFT)
560 | KVM_VGIC_V2_ADDR_TYPE_DIST,
561 KVM_DEV_ARM_VGIC_GRP_ADDR,
562 KVM_VGIC_V2_ADDR_TYPE_DIST,
563 s->dev_fd);
564
565
566
567
568 kvm_arm_register_device(&s->cpuiomem[0],
569 (KVM_ARM_DEVICE_VGIC_V2 << KVM_ARM_DEVICE_ID_SHIFT)
570 | KVM_VGIC_V2_ADDR_TYPE_CPU,
571 KVM_DEV_ARM_VGIC_GRP_ADDR,
572 KVM_VGIC_V2_ADDR_TYPE_CPU,
573 s->dev_fd);
574
575 if (!kvm_arm_gic_can_save_restore(s)) {
576 error_setg(&s->migration_blocker, "This operating system kernel does "
577 "not support vGICv2 migration");
578 migrate_add_blocker(s->migration_blocker);
579 }
580}
581
582static void kvm_arm_gic_class_init(ObjectClass *klass, void *data)
583{
584 DeviceClass *dc = DEVICE_CLASS(klass);
585 ARMGICCommonClass *agcc = ARM_GIC_COMMON_CLASS(klass);
586 KVMARMGICClass *kgc = KVM_ARM_GIC_CLASS(klass);
587
588 agcc->pre_save = kvm_arm_gic_get;
589 agcc->post_load = kvm_arm_gic_put;
590 kgc->parent_realize = dc->realize;
591 kgc->parent_reset = dc->reset;
592 dc->realize = kvm_arm_gic_realize;
593 dc->reset = kvm_arm_gic_reset;
594}
595
596static const TypeInfo kvm_arm_gic_info = {
597 .name = TYPE_KVM_ARM_GIC,
598 .parent = TYPE_ARM_GIC_COMMON,
599 .instance_size = sizeof(GICState),
600 .class_init = kvm_arm_gic_class_init,
601 .class_size = sizeof(KVMARMGICClass),
602};
603
604static void kvm_arm_gic_register_types(void)
605{
606 type_register_static(&kvm_arm_gic_info);
607}
608
609type_init(kvm_arm_gic_register_types)
610