1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include "qemu/osdep.h"
23#include "qapi/error.h"
24#include "hw/sysbus.h"
25#include "migration/migration.h"
26#include "sysemu/kvm.h"
27#include "kvm_arm.h"
28#include "gic_internal.h"
29#include "vgic_common.h"
30
31
32
33#ifdef DEBUG_GIC_KVM
34static const int debug_gic_kvm = 1;
35#else
36static const int debug_gic_kvm = 0;
37#endif
38
39#define DPRINTF(fmt, ...) do { \
40 if (debug_gic_kvm) { \
41 printf("arm_gic: " fmt , ## __VA_ARGS__); \
42 } \
43 } while (0)
44
45#define TYPE_KVM_ARM_GIC "kvm-arm-gic"
46#define KVM_ARM_GIC(obj) \
47 OBJECT_CHECK(GICState, (obj), TYPE_KVM_ARM_GIC)
48#define KVM_ARM_GIC_CLASS(klass) \
49 OBJECT_CLASS_CHECK(KVMARMGICClass, (klass), TYPE_KVM_ARM_GIC)
50#define KVM_ARM_GIC_GET_CLASS(obj) \
51 OBJECT_GET_CLASS(KVMARMGICClass, (obj), TYPE_KVM_ARM_GIC)
52
53typedef struct KVMARMGICClass {
54 ARMGICCommonClass parent_class;
55 DeviceRealize parent_realize;
56 void (*parent_reset)(DeviceState *dev);
57} KVMARMGICClass;
58
59void kvm_arm_gic_set_irq(uint32_t num_irq, int irq, int level)
60{
61
62
63
64
65
66
67
68
69
70 int kvm_irq, irqtype, cpu;
71
72 if (irq < (num_irq - GIC_INTERNAL)) {
73
74
75
76
77 irqtype = KVM_ARM_IRQ_TYPE_SPI;
78 cpu = 0;
79 irq += GIC_INTERNAL;
80 } else {
81
82 irqtype = KVM_ARM_IRQ_TYPE_PPI;
83 irq -= (num_irq - GIC_INTERNAL);
84 cpu = irq / GIC_INTERNAL;
85 irq %= GIC_INTERNAL;
86 }
87 kvm_irq = (irqtype << KVM_ARM_IRQ_TYPE_SHIFT)
88 | (cpu << KVM_ARM_IRQ_VCPU_SHIFT) | irq;
89
90 kvm_set_irq(kvm_state, kvm_irq, !!level);
91}
92
93static void kvm_arm_gicv2_set_irq(void *opaque, int irq, int level)
94{
95 GICState *s = (GICState *)opaque;
96
97 kvm_arm_gic_set_irq(s->num_irq, irq, level);
98}
99
100static bool kvm_arm_gic_can_save_restore(GICState *s)
101{
102 return s->dev_fd >= 0;
103}
104
105#define KVM_VGIC_ATTR(offset, cpu) \
106 ((((uint64_t)(cpu) << KVM_DEV_ARM_VGIC_CPUID_SHIFT) & \
107 KVM_DEV_ARM_VGIC_CPUID_MASK) | \
108 (((uint64_t)(offset) << KVM_DEV_ARM_VGIC_OFFSET_SHIFT) & \
109 KVM_DEV_ARM_VGIC_OFFSET_MASK))
110
111static void kvm_gicd_access(GICState *s, int offset, int cpu,
112 uint32_t *val, bool write)
113{
114 kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_DIST_REGS,
115 KVM_VGIC_ATTR(offset, cpu), val, write);
116}
117
118static void kvm_gicc_access(GICState *s, int offset, int cpu,
119 uint32_t *val, bool write)
120{
121 kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CPU_REGS,
122 KVM_VGIC_ATTR(offset, cpu), val, write);
123}
124
125#define for_each_irq_reg(_ctr, _max_irq, _field_width) \
126 for (_ctr = 0; _ctr < ((_max_irq) / (32 / (_field_width))); _ctr++)
127
128
129
130
131
132typedef void (*vgic_translate_fn)(GICState *s, int irq, int cpu,
133 uint32_t *field, bool to_kernel);
134
135
136
137
138static void translate_clear(GICState *s, int irq, int cpu,
139 uint32_t *field, bool to_kernel)
140{
141 if (to_kernel) {
142 *field = ~0;
143 } else {
144
145 abort();
146 }
147}
148
149static void translate_group(GICState *s, int irq, int cpu,
150 uint32_t *field, bool to_kernel)
151{
152 int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK;
153
154 if (to_kernel) {
155 *field = GIC_TEST_GROUP(irq, cm);
156 } else {
157 if (*field & 1) {
158 GIC_SET_GROUP(irq, cm);
159 }
160 }
161}
162
163static void translate_enabled(GICState *s, int irq, int cpu,
164 uint32_t *field, bool to_kernel)
165{
166 int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK;
167
168 if (to_kernel) {
169 *field = GIC_TEST_ENABLED(irq, cm);
170 } else {
171 if (*field & 1) {
172 GIC_SET_ENABLED(irq, cm);
173 }
174 }
175}
176
177static void translate_pending(GICState *s, int irq, int cpu,
178 uint32_t *field, bool to_kernel)
179{
180 int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK;
181
182 if (to_kernel) {
183 *field = gic_test_pending(s, irq, cm);
184 } else {
185 if (*field & 1) {
186 GIC_SET_PENDING(irq, cm);
187
188 }
189 }
190}
191
192static void translate_active(GICState *s, int irq, int cpu,
193 uint32_t *field, bool to_kernel)
194{
195 int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK;
196
197 if (to_kernel) {
198 *field = GIC_TEST_ACTIVE(irq, cm);
199 } else {
200 if (*field & 1) {
201 GIC_SET_ACTIVE(irq, cm);
202 }
203 }
204}
205
206static void translate_trigger(GICState *s, int irq, int cpu,
207 uint32_t *field, bool to_kernel)
208{
209 if (to_kernel) {
210 *field = (GIC_TEST_EDGE_TRIGGER(irq)) ? 0x2 : 0x0;
211 } else {
212 if (*field & 0x2) {
213 GIC_SET_EDGE_TRIGGER(irq);
214 }
215 }
216}
217
218static void translate_priority(GICState *s, int irq, int cpu,
219 uint32_t *field, bool to_kernel)
220{
221 if (to_kernel) {
222 *field = GIC_GET_PRIORITY(irq, cpu) & 0xff;
223 } else {
224 gic_set_priority(s, cpu, irq, *field & 0xff, MEMTXATTRS_UNSPECIFIED);
225 }
226}
227
228static void translate_targets(GICState *s, int irq, int cpu,
229 uint32_t *field, bool to_kernel)
230{
231 if (to_kernel) {
232 *field = s->irq_target[irq] & 0xff;
233 } else {
234 s->irq_target[irq] = *field & 0xff;
235 }
236}
237
238static void translate_sgisource(GICState *s, int irq, int cpu,
239 uint32_t *field, bool to_kernel)
240{
241 if (to_kernel) {
242 *field = s->sgi_pending[irq][cpu] & 0xff;
243 } else {
244 s->sgi_pending[irq][cpu] = *field & 0xff;
245 }
246}
247
248
249static void kvm_dist_get(GICState *s, uint32_t offset, int width,
250 int maxirq, vgic_translate_fn translate_fn)
251{
252 uint32_t reg;
253 int i;
254 int j;
255 int irq;
256 int cpu;
257 int regsz = 32 / width;
258 uint32_t field;
259
260 for_each_irq_reg(i, maxirq, width) {
261 irq = i * regsz;
262 cpu = 0;
263 while ((cpu < s->num_cpu && irq < GIC_INTERNAL) || cpu == 0) {
264 kvm_gicd_access(s, offset, cpu, ®, false);
265 for (j = 0; j < regsz; j++) {
266 field = extract32(reg, j * width, width);
267 translate_fn(s, irq + j, cpu, &field, false);
268 }
269
270 cpu++;
271 }
272 offset += 4;
273 }
274}
275
276
277static void kvm_dist_put(GICState *s, uint32_t offset, int width,
278 int maxirq, vgic_translate_fn translate_fn)
279{
280 uint32_t reg;
281 int i;
282 int j;
283 int irq;
284 int cpu;
285 int regsz = 32 / width;
286 uint32_t field;
287
288 for_each_irq_reg(i, maxirq, width) {
289 irq = i * regsz;
290 cpu = 0;
291 while ((cpu < s->num_cpu && irq < GIC_INTERNAL) || cpu == 0) {
292 reg = 0;
293 for (j = 0; j < regsz; j++) {
294 translate_fn(s, irq + j, cpu, &field, true);
295 reg = deposit32(reg, j * width, width, field);
296 }
297 kvm_gicd_access(s, offset, cpu, ®, true);
298
299 cpu++;
300 }
301 offset += 4;
302 }
303}
304
305static void kvm_arm_gic_put(GICState *s)
306{
307 uint32_t reg;
308 int i;
309 int cpu;
310 int num_cpu;
311 int num_irq;
312
313
314
315
316
317
318
319
320
321
322 reg = s->ctlr;
323 kvm_gicd_access(s, 0x0, 0, ®, true);
324
325
326 kvm_gicd_access(s, 0x4, 0, ®, false);
327 num_irq = ((reg & 0x1f) + 1) * 32;
328 num_cpu = ((reg & 0xe0) >> 5) + 1;
329
330 if (num_irq < s->num_irq) {
331 fprintf(stderr, "Restoring %u IRQs, but kernel supports max %d\n",
332 s->num_irq, num_irq);
333 abort();
334 } else if (num_cpu != s->num_cpu) {
335 fprintf(stderr, "Restoring %u CPU interfaces, kernel only has %d\n",
336 s->num_cpu, num_cpu);
337
338 abort();
339 }
340
341
342
343
344 kvm_dist_put(s, 0x180, 1, s->num_irq, translate_clear);
345 kvm_dist_put(s, 0x100, 1, s->num_irq, translate_enabled);
346
347
348 kvm_dist_put(s, 0x80, 1, s->num_irq, translate_group);
349
350
351
352
353 kvm_dist_put(s, 0x800, 8, s->num_irq, translate_targets);
354
355
356
357
358 kvm_dist_put(s, 0xc00, 2, s->num_irq, translate_trigger);
359
360
361 kvm_dist_put(s, 0x280, 1, s->num_irq, translate_clear);
362 kvm_dist_put(s, 0x200, 1, s->num_irq, translate_pending);
363
364
365 kvm_dist_put(s, 0x380, 1, s->num_irq, translate_clear);
366 kvm_dist_put(s, 0x300, 1, s->num_irq, translate_active);
367
368
369
370 kvm_dist_put(s, 0x400, 8, s->num_irq, translate_priority);
371
372
373 kvm_dist_put(s, 0xf10, 8, GIC_NR_SGIS, translate_clear);
374 kvm_dist_put(s, 0xf20, 8, GIC_NR_SGIS, translate_sgisource);
375
376
377
378
379
380
381 for (cpu = 0; cpu < s->num_cpu; cpu++) {
382
383 reg = s->cpu_ctlr[cpu];
384 kvm_gicc_access(s, 0x00, cpu, ®, true);
385
386
387 reg = (s->priority_mask[cpu] & 0xff);
388 kvm_gicc_access(s, 0x04, cpu, ®, true);
389
390
391 reg = (s->bpr[cpu] & 0x7);
392 kvm_gicc_access(s, 0x08, cpu, ®, true);
393
394
395 reg = (s->abpr[cpu] & 0x7);
396 kvm_gicc_access(s, 0x1c, cpu, ®, true);
397
398
399 for (i = 0; i < 4; i++) {
400 reg = s->apr[i][cpu];
401 kvm_gicc_access(s, 0xd0 + i * 4, cpu, ®, true);
402 }
403 }
404}
405
406static void kvm_arm_gic_get(GICState *s)
407{
408 uint32_t reg;
409 int i;
410 int cpu;
411
412
413
414
415
416
417 kvm_gicd_access(s, 0x0, 0, ®, false);
418 s->ctlr = reg;
419
420
421 kvm_gicd_access(s, 0x4, 0, ®, false);
422 s->num_irq = ((reg & 0x1f) + 1) * 32;
423 s->num_cpu = ((reg & 0xe0) >> 5) + 1;
424
425 if (s->num_irq > GIC_MAXIRQ) {
426 fprintf(stderr, "Too many IRQs reported from the kernel: %d\n",
427 s->num_irq);
428 abort();
429 }
430
431
432 kvm_gicd_access(s, 0x8, 0, ®, false);
433
434
435 for (i = 0; i < s->num_irq; i++) {
436 memset(&s->irq_state[i], 0, sizeof(s->irq_state[0]));
437 }
438
439
440 kvm_dist_get(s, 0x80, 1, s->num_irq, translate_group);
441
442
443 kvm_dist_get(s, 0x100, 1, s->num_irq, translate_enabled);
444
445
446 kvm_dist_get(s, 0x200, 1, s->num_irq, translate_pending);
447
448
449 kvm_dist_get(s, 0x300, 1, s->num_irq, translate_active);
450
451
452 kvm_dist_get(s, 0xc00, 2, s->num_irq, translate_trigger);
453
454
455 kvm_dist_get(s, 0x400, 8, s->num_irq, translate_priority);
456
457
458 kvm_dist_get(s, 0x800, 8, s->num_irq, translate_targets);
459
460
461 kvm_dist_get(s, 0xf10, 8, GIC_NR_SGIS, translate_sgisource);
462
463
464
465
466
467
468 for (cpu = 0; cpu < s->num_cpu; cpu++) {
469
470 kvm_gicc_access(s, 0x00, cpu, ®, false);
471 s->cpu_ctlr[cpu] = reg;
472
473
474 kvm_gicc_access(s, 0x04, cpu, ®, false);
475 s->priority_mask[cpu] = (reg & 0xff);
476
477
478 kvm_gicc_access(s, 0x08, cpu, ®, false);
479 s->bpr[cpu] = (reg & 0x7);
480
481
482 kvm_gicc_access(s, 0x1c, cpu, ®, false);
483 s->abpr[cpu] = (reg & 0x7);
484
485
486 for (i = 0; i < 4; i++) {
487 kvm_gicc_access(s, 0xd0 + i * 4, cpu, ®, false);
488 s->apr[i][cpu] = reg;
489 }
490 }
491}
492
493static void kvm_arm_gic_reset(DeviceState *dev)
494{
495 GICState *s = ARM_GIC_COMMON(dev);
496 KVMARMGICClass *kgc = KVM_ARM_GIC_GET_CLASS(s);
497
498 kgc->parent_reset(dev);
499
500 if (kvm_arm_gic_can_save_restore(s)) {
501 kvm_arm_gic_put(s);
502 }
503}
504
505static void kvm_arm_gic_realize(DeviceState *dev, Error **errp)
506{
507 int i;
508 GICState *s = KVM_ARM_GIC(dev);
509 KVMARMGICClass *kgc = KVM_ARM_GIC_GET_CLASS(s);
510 Error *local_err = NULL;
511 int ret;
512
513 kgc->parent_realize(dev, &local_err);
514 if (local_err) {
515 error_propagate(errp, local_err);
516 return;
517 }
518
519 if (s->security_extn) {
520 error_setg(errp, "the in-kernel VGIC does not implement the "
521 "security extensions");
522 return;
523 }
524
525 gic_init_irqs_and_mmio(s, kvm_arm_gicv2_set_irq, NULL);
526
527 for (i = 0; i < s->num_irq - GIC_INTERNAL; i++) {
528 qemu_irq irq = qdev_get_gpio_in(dev, i);
529 kvm_irqchip_set_qemuirq_gsi(kvm_state, irq, i);
530 }
531
532
533 s->dev_fd = -1;
534 ret = kvm_create_device(kvm_state, KVM_DEV_TYPE_ARM_VGIC_V2, false);
535 if (ret >= 0) {
536 s->dev_fd = ret;
537
538
539 if (kvm_device_check_attr(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_NR_IRQS, 0)) {
540 uint32_t numirqs = s->num_irq;
541 kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_NR_IRQS, 0,
542 &numirqs, true);
543 }
544
545 if (kvm_device_check_attr(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
546 KVM_DEV_ARM_VGIC_CTRL_INIT)) {
547 kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
548 KVM_DEV_ARM_VGIC_CTRL_INIT, NULL, true);
549 }
550 } else if (ret != -ENODEV && ret != -ENOTSUP) {
551 error_setg_errno(errp, -ret, "error creating in-kernel VGIC");
552 return;
553 }
554
555
556 kvm_arm_register_device(&s->iomem,
557 (KVM_ARM_DEVICE_VGIC_V2 << KVM_ARM_DEVICE_ID_SHIFT)
558 | KVM_VGIC_V2_ADDR_TYPE_DIST,
559 KVM_DEV_ARM_VGIC_GRP_ADDR,
560 KVM_VGIC_V2_ADDR_TYPE_DIST,
561 s->dev_fd);
562
563
564
565
566 kvm_arm_register_device(&s->cpuiomem[0],
567 (KVM_ARM_DEVICE_VGIC_V2 << KVM_ARM_DEVICE_ID_SHIFT)
568 | KVM_VGIC_V2_ADDR_TYPE_CPU,
569 KVM_DEV_ARM_VGIC_GRP_ADDR,
570 KVM_VGIC_V2_ADDR_TYPE_CPU,
571 s->dev_fd);
572
573 if (!kvm_arm_gic_can_save_restore(s)) {
574 error_setg(&s->migration_blocker, "This operating system kernel does "
575 "not support vGICv2 migration");
576 migrate_add_blocker(s->migration_blocker);
577 }
578}
579
580static void kvm_arm_gic_class_init(ObjectClass *klass, void *data)
581{
582 DeviceClass *dc = DEVICE_CLASS(klass);
583 ARMGICCommonClass *agcc = ARM_GIC_COMMON_CLASS(klass);
584 KVMARMGICClass *kgc = KVM_ARM_GIC_CLASS(klass);
585
586 agcc->pre_save = kvm_arm_gic_get;
587 agcc->post_load = kvm_arm_gic_put;
588 kgc->parent_realize = dc->realize;
589 kgc->parent_reset = dc->reset;
590 dc->realize = kvm_arm_gic_realize;
591 dc->reset = kvm_arm_gic_reset;
592}
593
594static const TypeInfo kvm_arm_gic_info = {
595 .name = TYPE_KVM_ARM_GIC,
596 .parent = TYPE_ARM_GIC_COMMON,
597 .instance_size = sizeof(GICState),
598 .class_init = kvm_arm_gic_class_init,
599 .class_size = sizeof(KVMARMGICClass),
600};
601
602static void kvm_arm_gic_register_types(void)
603{
604 type_register_static(&kvm_arm_gic_info);
605}
606
607type_init(kvm_arm_gic_register_types)
608