1
2
3
4
5
6
7
8
9
10#include "qemu/osdep.h"
11#include "qemu/log.h"
12#include "qemu/module.h"
13#include "qapi/error.h"
14#include "qemu/error-report.h"
15#include "target/ppc/cpu.h"
16#include "sysemu/cpus.h"
17#include "monitor/monitor.h"
18#include "hw/ppc/fdt.h"
19#include "hw/ppc/spapr.h"
20#include "hw/ppc/spapr_cpu_core.h"
21#include "hw/ppc/spapr_xive.h"
22#include "hw/ppc/xive.h"
23#include "hw/ppc/xive_regs.h"
24
25
26
27
28
29#define SPAPR_XIVE_VC_BASE 0x0006010000000000ull
30#define SPAPR_XIVE_TM_BASE 0x0006030203180000ull
31
32
33
34
35
36
37
38
39
40
41
42#define SPAPR_XIVE_NVT_BASE 0x400
43
44
45
46
47static uint32_t spapr_xive_nvt_to_target(uint8_t nvt_blk, uint32_t nvt_idx)
48{
49 return nvt_idx - SPAPR_XIVE_NVT_BASE;
50}
51
52static void spapr_xive_cpu_to_nvt(PowerPCCPU *cpu,
53 uint8_t *out_nvt_blk, uint32_t *out_nvt_idx)
54{
55 assert(cpu);
56
57 if (out_nvt_blk) {
58 *out_nvt_blk = SPAPR_XIVE_BLOCK_ID;
59 }
60
61 if (out_nvt_blk) {
62 *out_nvt_idx = SPAPR_XIVE_NVT_BASE + cpu->vcpu_id;
63 }
64}
65
66static int spapr_xive_target_to_nvt(uint32_t target,
67 uint8_t *out_nvt_blk, uint32_t *out_nvt_idx)
68{
69 PowerPCCPU *cpu = spapr_find_cpu(target);
70
71 if (!cpu) {
72 return -1;
73 }
74
75 spapr_xive_cpu_to_nvt(cpu, out_nvt_blk, out_nvt_idx);
76 return 0;
77}
78
79
80
81
82
83int spapr_xive_end_to_target(uint8_t end_blk, uint32_t end_idx,
84 uint32_t *out_server, uint8_t *out_prio)
85{
86
87 assert(end_blk == SPAPR_XIVE_BLOCK_ID);
88
89 if (out_server) {
90 *out_server = end_idx >> 3;
91 }
92
93 if (out_prio) {
94 *out_prio = end_idx & 0x7;
95 }
96 return 0;
97}
98
99static void spapr_xive_cpu_to_end(PowerPCCPU *cpu, uint8_t prio,
100 uint8_t *out_end_blk, uint32_t *out_end_idx)
101{
102 assert(cpu);
103
104 if (out_end_blk) {
105 *out_end_blk = SPAPR_XIVE_BLOCK_ID;
106 }
107
108 if (out_end_idx) {
109 *out_end_idx = (cpu->vcpu_id << 3) + prio;
110 }
111}
112
113static int spapr_xive_target_to_end(uint32_t target, uint8_t prio,
114 uint8_t *out_end_blk, uint32_t *out_end_idx)
115{
116 PowerPCCPU *cpu = spapr_find_cpu(target);
117
118 if (!cpu) {
119 return -1;
120 }
121
122 spapr_xive_cpu_to_end(cpu, prio, out_end_blk, out_end_idx);
123 return 0;
124}
125
126
127
128
129
130static void spapr_xive_end_pic_print_info(SpaprXive *xive, XiveEND *end,
131 Monitor *mon)
132{
133 uint64_t qaddr_base = xive_end_qaddr(end);
134 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
135 uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1);
136 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
137 uint32_t qentries = 1 << (qsize + 10);
138 uint32_t nvt = xive_get_field32(END_W6_NVT_INDEX, end->w6);
139 uint8_t priority = xive_get_field32(END_W7_F0_PRIORITY, end->w7);
140
141 monitor_printf(mon, "%3d/%d % 6d/%5d @%"PRIx64" ^%d",
142 spapr_xive_nvt_to_target(0, nvt),
143 priority, qindex, qentries, qaddr_base, qgen);
144
145 xive_end_queue_pic_print_info(end, 6, mon);
146 monitor_printf(mon, "]");
147}
148
149void spapr_xive_pic_print_info(SpaprXive *xive, Monitor *mon)
150{
151 XiveSource *xsrc = &xive->source;
152 int i;
153
154 if (kvm_irqchip_in_kernel()) {
155 Error *local_err = NULL;
156
157 kvmppc_xive_synchronize_state(xive, &local_err);
158 if (local_err) {
159 error_report_err(local_err);
160 return;
161 }
162 }
163
164 monitor_printf(mon, " LISN PQ EISN CPU/PRIO EQ\n");
165
166 for (i = 0; i < xive->nr_irqs; i++) {
167 uint8_t pq = xive_source_esb_get(xsrc, i);
168 XiveEAS *eas = &xive->eat[i];
169
170 if (!xive_eas_is_valid(eas)) {
171 continue;
172 }
173
174 monitor_printf(mon, " %08x %s %c%c%c %s %08x ", i,
175 xive_source_irq_is_lsi(xsrc, i) ? "LSI" : "MSI",
176 pq & XIVE_ESB_VAL_P ? 'P' : '-',
177 pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
178 xsrc->status[i] & XIVE_STATUS_ASSERTED ? 'A' : ' ',
179 xive_eas_is_masked(eas) ? "M" : " ",
180 (int) xive_get_field64(EAS_END_DATA, eas->w));
181
182 if (!xive_eas_is_masked(eas)) {
183 uint32_t end_idx = xive_get_field64(EAS_END_INDEX, eas->w);
184 XiveEND *end;
185
186 assert(end_idx < xive->nr_ends);
187 end = &xive->endt[end_idx];
188
189 if (xive_end_is_valid(end)) {
190 spapr_xive_end_pic_print_info(xive, end, mon);
191 }
192 }
193 monitor_printf(mon, "\n");
194 }
195}
196
197void spapr_xive_mmio_set_enabled(SpaprXive *xive, bool enable)
198{
199 memory_region_set_enabled(&xive->source.esb_mmio, enable);
200 memory_region_set_enabled(&xive->tm_mmio, enable);
201
202
203 memory_region_set_enabled(&xive->end_source.esb_mmio, false);
204}
205
206
207
208
209
210
211void spapr_xive_set_tctx_os_cam(XiveTCTX *tctx)
212{
213 uint8_t nvt_blk;
214 uint32_t nvt_idx;
215 uint32_t nvt_cam;
216
217 spapr_xive_cpu_to_nvt(POWERPC_CPU(tctx->cs), &nvt_blk, &nvt_idx);
218
219 nvt_cam = cpu_to_be32(TM_QW1W2_VO | xive_nvt_cam_line(nvt_blk, nvt_idx));
220 memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &nvt_cam, 4);
221}
222
223static void spapr_xive_end_reset(XiveEND *end)
224{
225 memset(end, 0, sizeof(*end));
226
227
228 end->w1 = cpu_to_be32(END_W1_ESe_Q | END_W1_ESn_Q);
229}
230
231static void spapr_xive_reset(void *dev)
232{
233 SpaprXive *xive = SPAPR_XIVE(dev);
234 int i;
235
236
237
238
239
240
241
242 for (i = 0; i < xive->nr_irqs; i++) {
243 XiveEAS *eas = &xive->eat[i];
244 if (xive_eas_is_valid(eas)) {
245 eas->w = cpu_to_be64(EAS_VALID | EAS_MASKED);
246 } else {
247 eas->w = 0;
248 }
249 }
250
251
252 for (i = 0; i < xive->nr_ends; i++) {
253 spapr_xive_end_reset(&xive->endt[i]);
254 }
255}
256
257static void spapr_xive_instance_init(Object *obj)
258{
259 SpaprXive *xive = SPAPR_XIVE(obj);
260
261 object_initialize_child(obj, "source", &xive->source, sizeof(xive->source),
262 TYPE_XIVE_SOURCE, &error_abort, NULL);
263
264 object_initialize_child(obj, "end_source", &xive->end_source,
265 sizeof(xive->end_source), TYPE_XIVE_END_SOURCE,
266 &error_abort, NULL);
267
268
269 xive->fd = -1;
270}
271
272static void spapr_xive_realize(DeviceState *dev, Error **errp)
273{
274 SpaprXive *xive = SPAPR_XIVE(dev);
275 XiveSource *xsrc = &xive->source;
276 XiveENDSource *end_xsrc = &xive->end_source;
277 Error *local_err = NULL;
278
279 if (!xive->nr_irqs) {
280 error_setg(errp, "Number of interrupt needs to be greater 0");
281 return;
282 }
283
284 if (!xive->nr_ends) {
285 error_setg(errp, "Number of interrupt needs to be greater 0");
286 return;
287 }
288
289
290
291
292 object_property_set_int(OBJECT(xsrc), xive->nr_irqs, "nr-irqs",
293 &error_fatal);
294 object_property_add_const_link(OBJECT(xsrc), "xive", OBJECT(xive),
295 &error_fatal);
296 object_property_set_bool(OBJECT(xsrc), true, "realized", &local_err);
297 if (local_err) {
298 error_propagate(errp, local_err);
299 return;
300 }
301 sysbus_init_mmio(SYS_BUS_DEVICE(xive), &xsrc->esb_mmio);
302
303
304
305
306 object_property_set_int(OBJECT(end_xsrc), xive->nr_irqs, "nr-ends",
307 &error_fatal);
308 object_property_add_const_link(OBJECT(end_xsrc), "xive", OBJECT(xive),
309 &error_fatal);
310 object_property_set_bool(OBJECT(end_xsrc), true, "realized", &local_err);
311 if (local_err) {
312 error_propagate(errp, local_err);
313 return;
314 }
315 sysbus_init_mmio(SYS_BUS_DEVICE(xive), &end_xsrc->esb_mmio);
316
317
318 xive->end_base = xive->vc_base + (1ull << xsrc->esb_shift) * xsrc->nr_irqs;
319
320
321
322
323 xive->eat = g_new0(XiveEAS, xive->nr_irqs);
324 xive->endt = g_new0(XiveEND, xive->nr_ends);
325
326 xive->nodename = g_strdup_printf("interrupt-controller@%" PRIx64,
327 xive->tm_base + XIVE_TM_USER_PAGE * (1 << TM_SHIFT));
328
329 qemu_register_reset(spapr_xive_reset, dev);
330
331
332 memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &xive_tm_ops, xive,
333 "xive.tima", 4ull << TM_SHIFT);
334 sysbus_init_mmio(SYS_BUS_DEVICE(xive), &xive->tm_mmio);
335
336
337
338
339
340 sysbus_mmio_map(SYS_BUS_DEVICE(xive), 0, xive->vc_base);
341 sysbus_mmio_map(SYS_BUS_DEVICE(xive), 1, xive->end_base);
342 sysbus_mmio_map(SYS_BUS_DEVICE(xive), 2, xive->tm_base);
343}
344
345static int spapr_xive_get_eas(XiveRouter *xrtr, uint8_t eas_blk,
346 uint32_t eas_idx, XiveEAS *eas)
347{
348 SpaprXive *xive = SPAPR_XIVE(xrtr);
349
350 if (eas_idx >= xive->nr_irqs) {
351 return -1;
352 }
353
354 *eas = xive->eat[eas_idx];
355 return 0;
356}
357
358static int spapr_xive_get_end(XiveRouter *xrtr,
359 uint8_t end_blk, uint32_t end_idx, XiveEND *end)
360{
361 SpaprXive *xive = SPAPR_XIVE(xrtr);
362
363 if (end_idx >= xive->nr_ends) {
364 return -1;
365 }
366
367 memcpy(end, &xive->endt[end_idx], sizeof(XiveEND));
368 return 0;
369}
370
371static int spapr_xive_write_end(XiveRouter *xrtr, uint8_t end_blk,
372 uint32_t end_idx, XiveEND *end,
373 uint8_t word_number)
374{
375 SpaprXive *xive = SPAPR_XIVE(xrtr);
376
377 if (end_idx >= xive->nr_ends) {
378 return -1;
379 }
380
381 memcpy(&xive->endt[end_idx], end, sizeof(XiveEND));
382 return 0;
383}
384
385static int spapr_xive_get_nvt(XiveRouter *xrtr,
386 uint8_t nvt_blk, uint32_t nvt_idx, XiveNVT *nvt)
387{
388 uint32_t vcpu_id = spapr_xive_nvt_to_target(nvt_blk, nvt_idx);
389 PowerPCCPU *cpu = spapr_find_cpu(vcpu_id);
390
391 if (!cpu) {
392
393 return -1;
394 }
395
396
397
398
399
400 nvt->w0 = cpu_to_be32(NVT_W0_VALID);
401 return 0;
402}
403
404static int spapr_xive_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk,
405 uint32_t nvt_idx, XiveNVT *nvt,
406 uint8_t word_number)
407{
408
409
410
411
412
413 g_assert_not_reached();
414}
415
416static XiveTCTX *spapr_xive_get_tctx(XiveRouter *xrtr, CPUState *cs)
417{
418 PowerPCCPU *cpu = POWERPC_CPU(cs);
419
420 return spapr_cpu_state(cpu)->tctx;
421}
422
423static const VMStateDescription vmstate_spapr_xive_end = {
424 .name = TYPE_SPAPR_XIVE "/end",
425 .version_id = 1,
426 .minimum_version_id = 1,
427 .fields = (VMStateField []) {
428 VMSTATE_UINT32(w0, XiveEND),
429 VMSTATE_UINT32(w1, XiveEND),
430 VMSTATE_UINT32(w2, XiveEND),
431 VMSTATE_UINT32(w3, XiveEND),
432 VMSTATE_UINT32(w4, XiveEND),
433 VMSTATE_UINT32(w5, XiveEND),
434 VMSTATE_UINT32(w6, XiveEND),
435 VMSTATE_UINT32(w7, XiveEND),
436 VMSTATE_END_OF_LIST()
437 },
438};
439
440static const VMStateDescription vmstate_spapr_xive_eas = {
441 .name = TYPE_SPAPR_XIVE "/eas",
442 .version_id = 1,
443 .minimum_version_id = 1,
444 .fields = (VMStateField []) {
445 VMSTATE_UINT64(w, XiveEAS),
446 VMSTATE_END_OF_LIST()
447 },
448};
449
450static int vmstate_spapr_xive_pre_save(void *opaque)
451{
452 if (kvm_irqchip_in_kernel()) {
453 return kvmppc_xive_pre_save(SPAPR_XIVE(opaque));
454 }
455
456 return 0;
457}
458
459
460
461
462
463int spapr_xive_post_load(SpaprXive *xive, int version_id)
464{
465 if (kvm_irqchip_in_kernel()) {
466 return kvmppc_xive_post_load(xive, version_id);
467 }
468
469 return 0;
470}
471
472static const VMStateDescription vmstate_spapr_xive = {
473 .name = TYPE_SPAPR_XIVE,
474 .version_id = 1,
475 .minimum_version_id = 1,
476 .pre_save = vmstate_spapr_xive_pre_save,
477 .post_load = NULL,
478 .fields = (VMStateField[]) {
479 VMSTATE_UINT32_EQUAL(nr_irqs, SpaprXive, NULL),
480 VMSTATE_STRUCT_VARRAY_POINTER_UINT32(eat, SpaprXive, nr_irqs,
481 vmstate_spapr_xive_eas, XiveEAS),
482 VMSTATE_STRUCT_VARRAY_POINTER_UINT32(endt, SpaprXive, nr_ends,
483 vmstate_spapr_xive_end, XiveEND),
484 VMSTATE_END_OF_LIST()
485 },
486};
487
488static Property spapr_xive_properties[] = {
489 DEFINE_PROP_UINT32("nr-irqs", SpaprXive, nr_irqs, 0),
490 DEFINE_PROP_UINT32("nr-ends", SpaprXive, nr_ends, 0),
491 DEFINE_PROP_UINT64("vc-base", SpaprXive, vc_base, SPAPR_XIVE_VC_BASE),
492 DEFINE_PROP_UINT64("tm-base", SpaprXive, tm_base, SPAPR_XIVE_TM_BASE),
493 DEFINE_PROP_END_OF_LIST(),
494};
495
496static void spapr_xive_class_init(ObjectClass *klass, void *data)
497{
498 DeviceClass *dc = DEVICE_CLASS(klass);
499 XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass);
500
501 dc->desc = "sPAPR XIVE Interrupt Controller";
502 dc->props = spapr_xive_properties;
503 dc->realize = spapr_xive_realize;
504 dc->vmsd = &vmstate_spapr_xive;
505
506 xrc->get_eas = spapr_xive_get_eas;
507 xrc->get_end = spapr_xive_get_end;
508 xrc->write_end = spapr_xive_write_end;
509 xrc->get_nvt = spapr_xive_get_nvt;
510 xrc->write_nvt = spapr_xive_write_nvt;
511 xrc->get_tctx = spapr_xive_get_tctx;
512}
513
514static const TypeInfo spapr_xive_info = {
515 .name = TYPE_SPAPR_XIVE,
516 .parent = TYPE_XIVE_ROUTER,
517 .instance_init = spapr_xive_instance_init,
518 .instance_size = sizeof(SpaprXive),
519 .class_init = spapr_xive_class_init,
520};
521
522static void spapr_xive_register_types(void)
523{
524 type_register_static(&spapr_xive_info);
525}
526
527type_init(spapr_xive_register_types)
528
529bool spapr_xive_irq_claim(SpaprXive *xive, uint32_t lisn, bool lsi)
530{
531 XiveSource *xsrc = &xive->source;
532
533 if (lisn >= xive->nr_irqs) {
534 return false;
535 }
536
537 xive->eat[lisn].w |= cpu_to_be64(EAS_VALID);
538 if (lsi) {
539 xive_source_irq_set_lsi(xsrc, lisn);
540 }
541
542 if (kvm_irqchip_in_kernel()) {
543 Error *local_err = NULL;
544
545 kvmppc_xive_source_reset_one(xsrc, lisn, &local_err);
546 if (local_err) {
547 error_report_err(local_err);
548 return false;
549 }
550 }
551
552 return true;
553}
554
555bool spapr_xive_irq_free(SpaprXive *xive, uint32_t lisn)
556{
557 if (lisn >= xive->nr_irqs) {
558 return false;
559 }
560
561 xive->eat[lisn].w &= cpu_to_be64(~EAS_VALID);
562 return true;
563}
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586static bool spapr_xive_priority_is_reserved(uint8_t priority)
587{
588 switch (priority) {
589 case 0 ... 6:
590 return false;
591 case 7:
592 default:
593 return true;
594 }
595}
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627#define SPAPR_XIVE_SRC_H_INT_ESB PPC_BIT(60)
628#define SPAPR_XIVE_SRC_LSI PPC_BIT(61)
629#define SPAPR_XIVE_SRC_TRIGGER PPC_BIT(62)
630
631#define SPAPR_XIVE_SRC_STORE_EOI PPC_BIT(63)
632
633static target_ulong h_int_get_source_info(PowerPCCPU *cpu,
634 SpaprMachineState *spapr,
635 target_ulong opcode,
636 target_ulong *args)
637{
638 SpaprXive *xive = spapr->xive;
639 XiveSource *xsrc = &xive->source;
640 target_ulong flags = args[0];
641 target_ulong lisn = args[1];
642
643 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
644 return H_FUNCTION;
645 }
646
647 if (flags) {
648 return H_PARAMETER;
649 }
650
651 if (lisn >= xive->nr_irqs) {
652 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
653 lisn);
654 return H_P2;
655 }
656
657 if (!xive_eas_is_valid(&xive->eat[lisn])) {
658 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
659 lisn);
660 return H_P2;
661 }
662
663
664
665
666
667 args[0] = 0;
668 if (!xive_source_esb_has_2page(xsrc)) {
669 args[0] |= SPAPR_XIVE_SRC_TRIGGER;
670 }
671 if (xsrc->esb_flags & XIVE_SRC_STORE_EOI) {
672 args[0] |= SPAPR_XIVE_SRC_STORE_EOI;
673 }
674
675
676
677
678
679
680 if (xive_source_irq_is_lsi(xsrc, lisn)) {
681 args[0] |= SPAPR_XIVE_SRC_H_INT_ESB | SPAPR_XIVE_SRC_LSI;
682 }
683
684 if (!(args[0] & SPAPR_XIVE_SRC_H_INT_ESB)) {
685 args[1] = xive->vc_base + xive_source_esb_mgmt(xsrc, lisn);
686 } else {
687 args[1] = -1;
688 }
689
690 if (xive_source_esb_has_2page(xsrc) &&
691 !(args[0] & SPAPR_XIVE_SRC_H_INT_ESB)) {
692 args[2] = xive->vc_base + xive_source_esb_page(xsrc, lisn);
693 } else {
694 args[2] = -1;
695 }
696
697 if (xive_source_esb_has_2page(xsrc)) {
698 args[3] = xsrc->esb_shift - 1;
699 } else {
700 args[3] = xsrc->esb_shift;
701 }
702
703 return H_SUCCESS;
704}
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740#define SPAPR_XIVE_SRC_SET_EISN PPC_BIT(62)
741#define SPAPR_XIVE_SRC_MASK PPC_BIT(63)
742
743static target_ulong h_int_set_source_config(PowerPCCPU *cpu,
744 SpaprMachineState *spapr,
745 target_ulong opcode,
746 target_ulong *args)
747{
748 SpaprXive *xive = spapr->xive;
749 XiveEAS eas, new_eas;
750 target_ulong flags = args[0];
751 target_ulong lisn = args[1];
752 target_ulong target = args[2];
753 target_ulong priority = args[3];
754 target_ulong eisn = args[4];
755 uint8_t end_blk;
756 uint32_t end_idx;
757
758 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
759 return H_FUNCTION;
760 }
761
762 if (flags & ~(SPAPR_XIVE_SRC_SET_EISN | SPAPR_XIVE_SRC_MASK)) {
763 return H_PARAMETER;
764 }
765
766 if (lisn >= xive->nr_irqs) {
767 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
768 lisn);
769 return H_P2;
770 }
771
772 eas = xive->eat[lisn];
773 if (!xive_eas_is_valid(&eas)) {
774 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
775 lisn);
776 return H_P2;
777 }
778
779
780 if (priority == 0xff) {
781 new_eas.w = cpu_to_be64(EAS_VALID | EAS_MASKED);
782 goto out;
783 }
784
785 if (flags & SPAPR_XIVE_SRC_MASK) {
786 new_eas.w = eas.w | cpu_to_be64(EAS_MASKED);
787 } else {
788 new_eas.w = eas.w & cpu_to_be64(~EAS_MASKED);
789 }
790
791 if (spapr_xive_priority_is_reserved(priority)) {
792 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld
793 " is reserved\n", priority);
794 return H_P4;
795 }
796
797
798
799
800
801
802 if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) {
803 return H_P3;
804 }
805
806 new_eas.w = xive_set_field64(EAS_END_BLOCK, new_eas.w, end_blk);
807 new_eas.w = xive_set_field64(EAS_END_INDEX, new_eas.w, end_idx);
808
809 if (flags & SPAPR_XIVE_SRC_SET_EISN) {
810 new_eas.w = xive_set_field64(EAS_END_DATA, new_eas.w, eisn);
811 }
812
813 if (kvm_irqchip_in_kernel()) {
814 Error *local_err = NULL;
815
816 kvmppc_xive_set_source_config(xive, lisn, &new_eas, &local_err);
817 if (local_err) {
818 error_report_err(local_err);
819 return H_HARDWARE;
820 }
821 }
822
823out:
824 xive->eat[lisn] = new_eas;
825 return H_SUCCESS;
826}
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850static target_ulong h_int_get_source_config(PowerPCCPU *cpu,
851 SpaprMachineState *spapr,
852 target_ulong opcode,
853 target_ulong *args)
854{
855 SpaprXive *xive = spapr->xive;
856 target_ulong flags = args[0];
857 target_ulong lisn = args[1];
858 XiveEAS eas;
859 XiveEND *end;
860 uint8_t nvt_blk;
861 uint32_t end_idx, nvt_idx;
862
863 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
864 return H_FUNCTION;
865 }
866
867 if (flags) {
868 return H_PARAMETER;
869 }
870
871 if (lisn >= xive->nr_irqs) {
872 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
873 lisn);
874 return H_P2;
875 }
876
877 eas = xive->eat[lisn];
878 if (!xive_eas_is_valid(&eas)) {
879 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
880 lisn);
881 return H_P2;
882 }
883
884
885 end_idx = xive_get_field64(EAS_END_INDEX, eas.w);
886
887 assert(end_idx < xive->nr_ends);
888 end = &xive->endt[end_idx];
889
890 nvt_blk = xive_get_field32(END_W6_NVT_BLOCK, end->w6);
891 nvt_idx = xive_get_field32(END_W6_NVT_INDEX, end->w6);
892 args[0] = spapr_xive_nvt_to_target(nvt_blk, nvt_idx);
893
894 if (xive_eas_is_masked(&eas)) {
895 args[1] = 0xff;
896 } else {
897 args[1] = xive_get_field32(END_W7_F0_PRIORITY, end->w7);
898 }
899
900 args[2] = xive_get_field64(EAS_END_DATA, eas.w);
901
902 return H_SUCCESS;
903}
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923static target_ulong h_int_get_queue_info(PowerPCCPU *cpu,
924 SpaprMachineState *spapr,
925 target_ulong opcode,
926 target_ulong *args)
927{
928 SpaprXive *xive = spapr->xive;
929 XiveENDSource *end_xsrc = &xive->end_source;
930 target_ulong flags = args[0];
931 target_ulong target = args[1];
932 target_ulong priority = args[2];
933 XiveEND *end;
934 uint8_t end_blk;
935 uint32_t end_idx;
936
937 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
938 return H_FUNCTION;
939 }
940
941 if (flags) {
942 return H_PARAMETER;
943 }
944
945
946
947
948
949
950 if (spapr_xive_priority_is_reserved(priority)) {
951 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld
952 " is reserved\n", priority);
953 return H_P3;
954 }
955
956
957
958
959
960
961 if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) {
962 return H_P2;
963 }
964
965 assert(end_idx < xive->nr_ends);
966 end = &xive->endt[end_idx];
967
968 args[0] = xive->end_base + (1ull << (end_xsrc->esb_shift + 1)) * end_idx;
969 if (xive_end_is_enqueue(end)) {
970 args[1] = xive_get_field32(END_W0_QSIZE, end->w0) + 12;
971 } else {
972 args[1] = 0;
973 }
974
975 return H_SUCCESS;
976}
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007#define SPAPR_XIVE_END_ALWAYS_NOTIFY PPC_BIT(63)
1008
1009static target_ulong h_int_set_queue_config(PowerPCCPU *cpu,
1010 SpaprMachineState *spapr,
1011 target_ulong opcode,
1012 target_ulong *args)
1013{
1014 SpaprXive *xive = spapr->xive;
1015 target_ulong flags = args[0];
1016 target_ulong target = args[1];
1017 target_ulong priority = args[2];
1018 target_ulong qpage = args[3];
1019 target_ulong qsize = args[4];
1020 XiveEND end;
1021 uint8_t end_blk, nvt_blk;
1022 uint32_t end_idx, nvt_idx;
1023
1024 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1025 return H_FUNCTION;
1026 }
1027
1028 if (flags & ~SPAPR_XIVE_END_ALWAYS_NOTIFY) {
1029 return H_PARAMETER;
1030 }
1031
1032
1033
1034
1035
1036
1037 if (spapr_xive_priority_is_reserved(priority)) {
1038 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld
1039 " is reserved\n", priority);
1040 return H_P3;
1041 }
1042
1043
1044
1045
1046
1047
1048
1049 if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) {
1050 return H_P2;
1051 }
1052
1053 assert(end_idx < xive->nr_ends);
1054 memcpy(&end, &xive->endt[end_idx], sizeof(XiveEND));
1055
1056 switch (qsize) {
1057 case 12:
1058 case 16:
1059 case 21:
1060 case 24:
1061 if (!QEMU_IS_ALIGNED(qpage, 1ul << qsize)) {
1062 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: EQ @0x%" HWADDR_PRIx
1063 " is not naturally aligned with %" HWADDR_PRIx "\n",
1064 qpage, (hwaddr)1 << qsize);
1065 return H_P4;
1066 }
1067 end.w2 = cpu_to_be32((qpage >> 32) & 0x0fffffff);
1068 end.w3 = cpu_to_be32(qpage & 0xffffffff);
1069 end.w0 |= cpu_to_be32(END_W0_ENQUEUE);
1070 end.w0 = xive_set_field32(END_W0_QSIZE, end.w0, qsize - 12);
1071 break;
1072 case 0:
1073
1074 spapr_xive_end_reset(&end);
1075 goto out;
1076
1077 default:
1078 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid EQ size %"PRIx64"\n",
1079 qsize);
1080 return H_P5;
1081 }
1082
1083 if (qsize) {
1084 hwaddr plen = 1 << qsize;
1085 void *eq;
1086
1087
1088
1089
1090
1091 eq = address_space_map(CPU(cpu)->as, qpage, &plen, true,
1092 MEMTXATTRS_UNSPECIFIED);
1093 if (plen != 1 << qsize) {
1094 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to map EQ @0x%"
1095 HWADDR_PRIx "\n", qpage);
1096 return H_P4;
1097 }
1098 address_space_unmap(CPU(cpu)->as, eq, plen, true, plen);
1099 }
1100
1101
1102 if (spapr_xive_target_to_nvt(target, &nvt_blk, &nvt_idx)) {
1103 g_assert_not_reached();
1104 }
1105
1106
1107
1108
1109
1110 end.w6 = xive_set_field32(END_W6_NVT_BLOCK, 0ul, nvt_blk) |
1111 xive_set_field32(END_W6_NVT_INDEX, 0ul, nvt_idx);
1112 end.w7 = xive_set_field32(END_W7_F0_PRIORITY, 0ul, priority);
1113
1114 if (flags & SPAPR_XIVE_END_ALWAYS_NOTIFY) {
1115 end.w0 |= cpu_to_be32(END_W0_UCOND_NOTIFY);
1116 } else {
1117 end.w0 &= cpu_to_be32((uint32_t)~END_W0_UCOND_NOTIFY);
1118 }
1119
1120
1121
1122
1123
1124 end.w1 = cpu_to_be32(END_W1_GENERATION) |
1125 xive_set_field32(END_W1_PAGE_OFF, 0ul, 0ul);
1126 end.w0 |= cpu_to_be32(END_W0_VALID);
1127
1128
1129
1130
1131
1132
1133out:
1134 if (kvm_irqchip_in_kernel()) {
1135 Error *local_err = NULL;
1136
1137 kvmppc_xive_set_queue_config(xive, end_blk, end_idx, &end, &local_err);
1138 if (local_err) {
1139 error_report_err(local_err);
1140 return H_HARDWARE;
1141 }
1142 }
1143
1144
1145 memcpy(&xive->endt[end_idx], &end, sizeof(XiveEND));
1146 return H_SUCCESS;
1147}
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176#define SPAPR_XIVE_END_DEBUG PPC_BIT(63)
1177
1178static target_ulong h_int_get_queue_config(PowerPCCPU *cpu,
1179 SpaprMachineState *spapr,
1180 target_ulong opcode,
1181 target_ulong *args)
1182{
1183 SpaprXive *xive = spapr->xive;
1184 target_ulong flags = args[0];
1185 target_ulong target = args[1];
1186 target_ulong priority = args[2];
1187 XiveEND *end;
1188 uint8_t end_blk;
1189 uint32_t end_idx;
1190
1191 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1192 return H_FUNCTION;
1193 }
1194
1195 if (flags & ~SPAPR_XIVE_END_DEBUG) {
1196 return H_PARAMETER;
1197 }
1198
1199
1200
1201
1202
1203
1204 if (spapr_xive_priority_is_reserved(priority)) {
1205 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld
1206 " is reserved\n", priority);
1207 return H_P3;
1208 }
1209
1210
1211
1212
1213
1214
1215 if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) {
1216 return H_P2;
1217 }
1218
1219 assert(end_idx < xive->nr_ends);
1220 end = &xive->endt[end_idx];
1221
1222 args[0] = 0;
1223 if (xive_end_is_notify(end)) {
1224 args[0] |= SPAPR_XIVE_END_ALWAYS_NOTIFY;
1225 }
1226
1227 if (xive_end_is_enqueue(end)) {
1228 args[1] = xive_end_qaddr(end);
1229 args[2] = xive_get_field32(END_W0_QSIZE, end->w0) + 12;
1230 } else {
1231 args[1] = 0;
1232 args[2] = 0;
1233 }
1234
1235 if (kvm_irqchip_in_kernel()) {
1236 Error *local_err = NULL;
1237
1238 kvmppc_xive_get_queue_config(xive, end_blk, end_idx, end, &local_err);
1239 if (local_err) {
1240 error_report_err(local_err);
1241 return H_HARDWARE;
1242 }
1243 }
1244
1245
1246 if (flags & SPAPR_XIVE_END_DEBUG) {
1247
1248 args[0] |= (uint64_t)xive_get_field32(END_W1_GENERATION, end->w1) << 62;
1249
1250
1251 args[3] = xive_get_field32(END_W1_PAGE_OFF, end->w1);
1252 } else {
1253 args[3] = 0;
1254 }
1255
1256 return H_SUCCESS;
1257}
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279static target_ulong h_int_set_os_reporting_line(PowerPCCPU *cpu,
1280 SpaprMachineState *spapr,
1281 target_ulong opcode,
1282 target_ulong *args)
1283{
1284 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1285 return H_FUNCTION;
1286 }
1287
1288
1289
1290
1291
1292
1293
1294 return H_FUNCTION;
1295}
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315static target_ulong h_int_get_os_reporting_line(PowerPCCPU *cpu,
1316 SpaprMachineState *spapr,
1317 target_ulong opcode,
1318 target_ulong *args)
1319{
1320 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1321 return H_FUNCTION;
1322 }
1323
1324
1325
1326
1327
1328
1329
1330 return H_FUNCTION;
1331}
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356#define SPAPR_XIVE_ESB_STORE PPC_BIT(63)
1357
1358static target_ulong h_int_esb(PowerPCCPU *cpu,
1359 SpaprMachineState *spapr,
1360 target_ulong opcode,
1361 target_ulong *args)
1362{
1363 SpaprXive *xive = spapr->xive;
1364 XiveEAS eas;
1365 target_ulong flags = args[0];
1366 target_ulong lisn = args[1];
1367 target_ulong offset = args[2];
1368 target_ulong data = args[3];
1369 hwaddr mmio_addr;
1370 XiveSource *xsrc = &xive->source;
1371
1372 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1373 return H_FUNCTION;
1374 }
1375
1376 if (flags & ~SPAPR_XIVE_ESB_STORE) {
1377 return H_PARAMETER;
1378 }
1379
1380 if (lisn >= xive->nr_irqs) {
1381 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
1382 lisn);
1383 return H_P2;
1384 }
1385
1386 eas = xive->eat[lisn];
1387 if (!xive_eas_is_valid(&eas)) {
1388 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
1389 lisn);
1390 return H_P2;
1391 }
1392
1393 if (offset > (1ull << xsrc->esb_shift)) {
1394 return H_P3;
1395 }
1396
1397 if (kvm_irqchip_in_kernel()) {
1398 args[0] = kvmppc_xive_esb_rw(xsrc, lisn, offset, data,
1399 flags & SPAPR_XIVE_ESB_STORE);
1400 } else {
1401 mmio_addr = xive->vc_base + xive_source_esb_mgmt(xsrc, lisn) + offset;
1402
1403 if (dma_memory_rw(&address_space_memory, mmio_addr, &data, 8,
1404 (flags & SPAPR_XIVE_ESB_STORE))) {
1405 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to access ESB @0x%"
1406 HWADDR_PRIx "\n", mmio_addr);
1407 return H_HARDWARE;
1408 }
1409 args[0] = (flags & SPAPR_XIVE_ESB_STORE) ? -1 : data;
1410 }
1411 return H_SUCCESS;
1412}
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431static target_ulong h_int_sync(PowerPCCPU *cpu,
1432 SpaprMachineState *spapr,
1433 target_ulong opcode,
1434 target_ulong *args)
1435{
1436 SpaprXive *xive = spapr->xive;
1437 XiveEAS eas;
1438 target_ulong flags = args[0];
1439 target_ulong lisn = args[1];
1440
1441 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1442 return H_FUNCTION;
1443 }
1444
1445 if (flags) {
1446 return H_PARAMETER;
1447 }
1448
1449 if (lisn >= xive->nr_irqs) {
1450 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
1451 lisn);
1452 return H_P2;
1453 }
1454
1455 eas = xive->eat[lisn];
1456 if (!xive_eas_is_valid(&eas)) {
1457 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
1458 lisn);
1459 return H_P2;
1460 }
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472 if (kvm_irqchip_in_kernel()) {
1473 Error *local_err = NULL;
1474
1475 kvmppc_xive_sync_source(xive, lisn, &local_err);
1476 if (local_err) {
1477 error_report_err(local_err);
1478 return H_HARDWARE;
1479 }
1480 }
1481 return H_SUCCESS;
1482}
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498static target_ulong h_int_reset(PowerPCCPU *cpu,
1499 SpaprMachineState *spapr,
1500 target_ulong opcode,
1501 target_ulong *args)
1502{
1503 SpaprXive *xive = spapr->xive;
1504 target_ulong flags = args[0];
1505
1506 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1507 return H_FUNCTION;
1508 }
1509
1510 if (flags) {
1511 return H_PARAMETER;
1512 }
1513
1514 device_reset(DEVICE(xive));
1515
1516 if (kvm_irqchip_in_kernel()) {
1517 Error *local_err = NULL;
1518
1519 kvmppc_xive_reset(xive, &local_err);
1520 if (local_err) {
1521 error_report_err(local_err);
1522 return H_HARDWARE;
1523 }
1524 }
1525 return H_SUCCESS;
1526}
1527
1528void spapr_xive_hcall_init(SpaprMachineState *spapr)
1529{
1530 spapr_register_hypercall(H_INT_GET_SOURCE_INFO, h_int_get_source_info);
1531 spapr_register_hypercall(H_INT_SET_SOURCE_CONFIG, h_int_set_source_config);
1532 spapr_register_hypercall(H_INT_GET_SOURCE_CONFIG, h_int_get_source_config);
1533 spapr_register_hypercall(H_INT_GET_QUEUE_INFO, h_int_get_queue_info);
1534 spapr_register_hypercall(H_INT_SET_QUEUE_CONFIG, h_int_set_queue_config);
1535 spapr_register_hypercall(H_INT_GET_QUEUE_CONFIG, h_int_get_queue_config);
1536 spapr_register_hypercall(H_INT_SET_OS_REPORTING_LINE,
1537 h_int_set_os_reporting_line);
1538 spapr_register_hypercall(H_INT_GET_OS_REPORTING_LINE,
1539 h_int_get_os_reporting_line);
1540 spapr_register_hypercall(H_INT_ESB, h_int_esb);
1541 spapr_register_hypercall(H_INT_SYNC, h_int_sync);
1542 spapr_register_hypercall(H_INT_RESET, h_int_reset);
1543}
1544
1545void spapr_dt_xive(SpaprMachineState *spapr, uint32_t nr_servers, void *fdt,
1546 uint32_t phandle)
1547{
1548 SpaprXive *xive = spapr->xive;
1549 int node;
1550 uint64_t timas[2 * 2];
1551
1552 uint32_t lisn_ranges[] = {
1553 cpu_to_be32(0),
1554 cpu_to_be32(nr_servers),
1555 };
1556
1557
1558
1559
1560 uint32_t eq_sizes[] = {
1561 cpu_to_be32(16),
1562 };
1563
1564
1565
1566
1567 uint32_t plat_res_int_priorities[] = {
1568 cpu_to_be32(7),
1569 cpu_to_be32(0xf8),
1570 };
1571
1572
1573 timas[0] = cpu_to_be64(xive->tm_base +
1574 XIVE_TM_USER_PAGE * (1ull << TM_SHIFT));
1575 timas[1] = cpu_to_be64(1ull << TM_SHIFT);
1576 timas[2] = cpu_to_be64(xive->tm_base +
1577 XIVE_TM_OS_PAGE * (1ull << TM_SHIFT));
1578 timas[3] = cpu_to_be64(1ull << TM_SHIFT);
1579
1580 _FDT(node = fdt_add_subnode(fdt, 0, xive->nodename));
1581
1582 _FDT(fdt_setprop_string(fdt, node, "device_type", "power-ivpe"));
1583 _FDT(fdt_setprop(fdt, node, "reg", timas, sizeof(timas)));
1584
1585 _FDT(fdt_setprop_string(fdt, node, "compatible", "ibm,power-ivpe"));
1586 _FDT(fdt_setprop(fdt, node, "ibm,xive-eq-sizes", eq_sizes,
1587 sizeof(eq_sizes)));
1588 _FDT(fdt_setprop(fdt, node, "ibm,xive-lisn-ranges", lisn_ranges,
1589 sizeof(lisn_ranges)));
1590
1591
1592 _FDT(fdt_setprop(fdt, node, "interrupt-controller", NULL, 0));
1593 _FDT(fdt_setprop_cell(fdt, node, "#interrupt-cells", 2));
1594
1595
1596 _FDT(fdt_setprop_cell(fdt, node, "linux,phandle", phandle));
1597 _FDT(fdt_setprop_cell(fdt, node, "phandle", phandle));
1598
1599
1600
1601
1602
1603 _FDT(fdt_setprop(fdt, 0, "ibm,plat-res-int-priorities",
1604 plat_res_int_priorities, sizeof(plat_res_int_priorities)));
1605}
1606