1
2
3
4
5
6
7
8
9
10#include "qemu/osdep.h"
11#include "qemu/log.h"
12#include "qapi/error.h"
13#include "qemu/error-report.h"
14#include "target/ppc/cpu.h"
15#include "sysemu/cpus.h"
16#include "monitor/monitor.h"
17#include "hw/ppc/fdt.h"
18#include "hw/ppc/spapr.h"
19#include "hw/ppc/spapr_cpu_core.h"
20#include "hw/ppc/spapr_xive.h"
21#include "hw/ppc/xive.h"
22#include "hw/ppc/xive_regs.h"
23
24
25
26
27
28#define SPAPR_XIVE_VC_BASE 0x0006010000000000ull
29#define SPAPR_XIVE_TM_BASE 0x0006030203180000ull
30
31
32
33
34
35
36
37
38
39
40
41#define SPAPR_XIVE_NVT_BASE 0x400
42
43
44
45
46
47
48#define SPAPR_XIVE_BLOCK_ID 0x0
49
50
51
52
53static uint32_t spapr_xive_nvt_to_target(uint8_t nvt_blk, uint32_t nvt_idx)
54{
55 return nvt_idx - SPAPR_XIVE_NVT_BASE;
56}
57
58static void spapr_xive_cpu_to_nvt(PowerPCCPU *cpu,
59 uint8_t *out_nvt_blk, uint32_t *out_nvt_idx)
60{
61 assert(cpu);
62
63 if (out_nvt_blk) {
64 *out_nvt_blk = SPAPR_XIVE_BLOCK_ID;
65 }
66
67 if (out_nvt_blk) {
68 *out_nvt_idx = SPAPR_XIVE_NVT_BASE + cpu->vcpu_id;
69 }
70}
71
72static int spapr_xive_target_to_nvt(uint32_t target,
73 uint8_t *out_nvt_blk, uint32_t *out_nvt_idx)
74{
75 PowerPCCPU *cpu = spapr_find_cpu(target);
76
77 if (!cpu) {
78 return -1;
79 }
80
81 spapr_xive_cpu_to_nvt(cpu, out_nvt_blk, out_nvt_idx);
82 return 0;
83}
84
85
86
87
88
89static void spapr_xive_cpu_to_end(PowerPCCPU *cpu, uint8_t prio,
90 uint8_t *out_end_blk, uint32_t *out_end_idx)
91{
92 assert(cpu);
93
94 if (out_end_blk) {
95 *out_end_blk = SPAPR_XIVE_BLOCK_ID;
96 }
97
98 if (out_end_idx) {
99 *out_end_idx = (cpu->vcpu_id << 3) + prio;
100 }
101}
102
103static int spapr_xive_target_to_end(uint32_t target, uint8_t prio,
104 uint8_t *out_end_blk, uint32_t *out_end_idx)
105{
106 PowerPCCPU *cpu = spapr_find_cpu(target);
107
108 if (!cpu) {
109 return -1;
110 }
111
112 spapr_xive_cpu_to_end(cpu, prio, out_end_blk, out_end_idx);
113 return 0;
114}
115
116
117
118
119
120static void spapr_xive_end_pic_print_info(SpaprXive *xive, XiveEND *end,
121 Monitor *mon)
122{
123 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
124 uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1);
125 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
126 uint32_t qentries = 1 << (qsize + 10);
127 uint32_t nvt = xive_get_field32(END_W6_NVT_INDEX, end->w6);
128 uint8_t priority = xive_get_field32(END_W7_F0_PRIORITY, end->w7);
129
130 monitor_printf(mon, "%3d/%d % 6d/%5d ^%d",
131 spapr_xive_nvt_to_target(0, nvt),
132 priority, qindex, qentries, qgen);
133
134 xive_end_queue_pic_print_info(end, 6, mon);
135 monitor_printf(mon, "]");
136}
137
138void spapr_xive_pic_print_info(SpaprXive *xive, Monitor *mon)
139{
140 XiveSource *xsrc = &xive->source;
141 int i;
142
143 monitor_printf(mon, " LSIN PQ EISN CPU/PRIO EQ\n");
144
145 for (i = 0; i < xive->nr_irqs; i++) {
146 uint8_t pq = xive_source_esb_get(xsrc, i);
147 XiveEAS *eas = &xive->eat[i];
148
149 if (!xive_eas_is_valid(eas)) {
150 continue;
151 }
152
153 monitor_printf(mon, " %08x %s %c%c%c %s %08x ", i,
154 xive_source_irq_is_lsi(xsrc, i) ? "LSI" : "MSI",
155 pq & XIVE_ESB_VAL_P ? 'P' : '-',
156 pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
157 xsrc->status[i] & XIVE_STATUS_ASSERTED ? 'A' : ' ',
158 xive_eas_is_masked(eas) ? "M" : " ",
159 (int) xive_get_field64(EAS_END_DATA, eas->w));
160
161 if (!xive_eas_is_masked(eas)) {
162 uint32_t end_idx = xive_get_field64(EAS_END_INDEX, eas->w);
163 XiveEND *end;
164
165 assert(end_idx < xive->nr_ends);
166 end = &xive->endt[end_idx];
167
168 if (xive_end_is_valid(end)) {
169 spapr_xive_end_pic_print_info(xive, end, mon);
170 }
171 }
172 monitor_printf(mon, "\n");
173 }
174}
175
176static void spapr_xive_map_mmio(SpaprXive *xive)
177{
178 sysbus_mmio_map(SYS_BUS_DEVICE(xive), 0, xive->vc_base);
179 sysbus_mmio_map(SYS_BUS_DEVICE(xive), 1, xive->end_base);
180 sysbus_mmio_map(SYS_BUS_DEVICE(xive), 2, xive->tm_base);
181}
182
183void spapr_xive_mmio_set_enabled(SpaprXive *xive, bool enable)
184{
185 memory_region_set_enabled(&xive->source.esb_mmio, enable);
186 memory_region_set_enabled(&xive->tm_mmio, enable);
187
188
189 memory_region_set_enabled(&xive->end_source.esb_mmio, false);
190}
191
192
193
194
195
196
197void spapr_xive_set_tctx_os_cam(XiveTCTX *tctx)
198{
199 uint8_t nvt_blk;
200 uint32_t nvt_idx;
201 uint32_t nvt_cam;
202
203 spapr_xive_cpu_to_nvt(POWERPC_CPU(tctx->cs), &nvt_blk, &nvt_idx);
204
205 nvt_cam = cpu_to_be32(TM_QW1W2_VO | xive_nvt_cam_line(nvt_blk, nvt_idx));
206 memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &nvt_cam, 4);
207}
208
209static void spapr_xive_end_reset(XiveEND *end)
210{
211 memset(end, 0, sizeof(*end));
212
213
214 end->w1 = cpu_to_be32(END_W1_ESe_Q | END_W1_ESn_Q);
215}
216
217static void spapr_xive_reset(void *dev)
218{
219 SpaprXive *xive = SPAPR_XIVE(dev);
220 int i;
221
222
223
224
225
226
227
228 for (i = 0; i < xive->nr_irqs; i++) {
229 XiveEAS *eas = &xive->eat[i];
230 if (xive_eas_is_valid(eas)) {
231 eas->w = cpu_to_be64(EAS_VALID | EAS_MASKED);
232 } else {
233 eas->w = 0;
234 }
235 }
236
237
238 for (i = 0; i < xive->nr_ends; i++) {
239 spapr_xive_end_reset(&xive->endt[i]);
240 }
241}
242
243static void spapr_xive_instance_init(Object *obj)
244{
245 SpaprXive *xive = SPAPR_XIVE(obj);
246
247 object_initialize_child(obj, "source", &xive->source, sizeof(xive->source),
248 TYPE_XIVE_SOURCE, &error_abort, NULL);
249
250 object_initialize_child(obj, "end_source", &xive->end_source,
251 sizeof(xive->end_source), TYPE_XIVE_END_SOURCE,
252 &error_abort, NULL);
253}
254
255static void spapr_xive_realize(DeviceState *dev, Error **errp)
256{
257 SpaprXive *xive = SPAPR_XIVE(dev);
258 XiveSource *xsrc = &xive->source;
259 XiveENDSource *end_xsrc = &xive->end_source;
260 Error *local_err = NULL;
261
262 if (!xive->nr_irqs) {
263 error_setg(errp, "Number of interrupt needs to be greater 0");
264 return;
265 }
266
267 if (!xive->nr_ends) {
268 error_setg(errp, "Number of interrupt needs to be greater 0");
269 return;
270 }
271
272
273
274
275 object_property_set_int(OBJECT(xsrc), xive->nr_irqs, "nr-irqs",
276 &error_fatal);
277 object_property_add_const_link(OBJECT(xsrc), "xive", OBJECT(xive),
278 &error_fatal);
279 object_property_set_bool(OBJECT(xsrc), true, "realized", &local_err);
280 if (local_err) {
281 error_propagate(errp, local_err);
282 return;
283 }
284
285
286
287
288 object_property_set_int(OBJECT(end_xsrc), xive->nr_irqs, "nr-ends",
289 &error_fatal);
290 object_property_add_const_link(OBJECT(end_xsrc), "xive", OBJECT(xive),
291 &error_fatal);
292 object_property_set_bool(OBJECT(end_xsrc), true, "realized", &local_err);
293 if (local_err) {
294 error_propagate(errp, local_err);
295 return;
296 }
297
298
299 xive->end_base = xive->vc_base + (1ull << xsrc->esb_shift) * xsrc->nr_irqs;
300
301
302
303
304 xive->eat = g_new0(XiveEAS, xive->nr_irqs);
305 xive->endt = g_new0(XiveEND, xive->nr_ends);
306
307
308 memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &xive_tm_ops, xive,
309 "xive.tima", 4ull << TM_SHIFT);
310
311
312 sysbus_init_mmio(SYS_BUS_DEVICE(xive), &xsrc->esb_mmio);
313 sysbus_init_mmio(SYS_BUS_DEVICE(xive), &end_xsrc->esb_mmio);
314 sysbus_init_mmio(SYS_BUS_DEVICE(xive), &xive->tm_mmio);
315
316
317 spapr_xive_map_mmio(xive);
318
319 xive->nodename = g_strdup_printf("interrupt-controller@%" PRIx64,
320 xive->tm_base + XIVE_TM_USER_PAGE * (1 << TM_SHIFT));
321
322 qemu_register_reset(spapr_xive_reset, dev);
323}
324
325static int spapr_xive_get_eas(XiveRouter *xrtr, uint8_t eas_blk,
326 uint32_t eas_idx, XiveEAS *eas)
327{
328 SpaprXive *xive = SPAPR_XIVE(xrtr);
329
330 if (eas_idx >= xive->nr_irqs) {
331 return -1;
332 }
333
334 *eas = xive->eat[eas_idx];
335 return 0;
336}
337
338static int spapr_xive_get_end(XiveRouter *xrtr,
339 uint8_t end_blk, uint32_t end_idx, XiveEND *end)
340{
341 SpaprXive *xive = SPAPR_XIVE(xrtr);
342
343 if (end_idx >= xive->nr_ends) {
344 return -1;
345 }
346
347 memcpy(end, &xive->endt[end_idx], sizeof(XiveEND));
348 return 0;
349}
350
351static int spapr_xive_write_end(XiveRouter *xrtr, uint8_t end_blk,
352 uint32_t end_idx, XiveEND *end,
353 uint8_t word_number)
354{
355 SpaprXive *xive = SPAPR_XIVE(xrtr);
356
357 if (end_idx >= xive->nr_ends) {
358 return -1;
359 }
360
361 memcpy(&xive->endt[end_idx], end, sizeof(XiveEND));
362 return 0;
363}
364
365static int spapr_xive_get_nvt(XiveRouter *xrtr,
366 uint8_t nvt_blk, uint32_t nvt_idx, XiveNVT *nvt)
367{
368 uint32_t vcpu_id = spapr_xive_nvt_to_target(nvt_blk, nvt_idx);
369 PowerPCCPU *cpu = spapr_find_cpu(vcpu_id);
370
371 if (!cpu) {
372
373 return -1;
374 }
375
376
377
378
379
380 nvt->w0 = cpu_to_be32(NVT_W0_VALID);
381 return 0;
382}
383
384static int spapr_xive_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk,
385 uint32_t nvt_idx, XiveNVT *nvt,
386 uint8_t word_number)
387{
388
389
390
391
392
393 g_assert_not_reached();
394}
395
396static XiveTCTX *spapr_xive_get_tctx(XiveRouter *xrtr, CPUState *cs)
397{
398 PowerPCCPU *cpu = POWERPC_CPU(cs);
399
400 return spapr_cpu_state(cpu)->tctx;
401}
402
403static const VMStateDescription vmstate_spapr_xive_end = {
404 .name = TYPE_SPAPR_XIVE "/end",
405 .version_id = 1,
406 .minimum_version_id = 1,
407 .fields = (VMStateField []) {
408 VMSTATE_UINT32(w0, XiveEND),
409 VMSTATE_UINT32(w1, XiveEND),
410 VMSTATE_UINT32(w2, XiveEND),
411 VMSTATE_UINT32(w3, XiveEND),
412 VMSTATE_UINT32(w4, XiveEND),
413 VMSTATE_UINT32(w5, XiveEND),
414 VMSTATE_UINT32(w6, XiveEND),
415 VMSTATE_UINT32(w7, XiveEND),
416 VMSTATE_END_OF_LIST()
417 },
418};
419
420static const VMStateDescription vmstate_spapr_xive_eas = {
421 .name = TYPE_SPAPR_XIVE "/eas",
422 .version_id = 1,
423 .minimum_version_id = 1,
424 .fields = (VMStateField []) {
425 VMSTATE_UINT64(w, XiveEAS),
426 VMSTATE_END_OF_LIST()
427 },
428};
429
430static const VMStateDescription vmstate_spapr_xive = {
431 .name = TYPE_SPAPR_XIVE,
432 .version_id = 1,
433 .minimum_version_id = 1,
434 .fields = (VMStateField[]) {
435 VMSTATE_UINT32_EQUAL(nr_irqs, SpaprXive, NULL),
436 VMSTATE_STRUCT_VARRAY_POINTER_UINT32(eat, SpaprXive, nr_irqs,
437 vmstate_spapr_xive_eas, XiveEAS),
438 VMSTATE_STRUCT_VARRAY_POINTER_UINT32(endt, SpaprXive, nr_ends,
439 vmstate_spapr_xive_end, XiveEND),
440 VMSTATE_END_OF_LIST()
441 },
442};
443
444static Property spapr_xive_properties[] = {
445 DEFINE_PROP_UINT32("nr-irqs", SpaprXive, nr_irqs, 0),
446 DEFINE_PROP_UINT32("nr-ends", SpaprXive, nr_ends, 0),
447 DEFINE_PROP_UINT64("vc-base", SpaprXive, vc_base, SPAPR_XIVE_VC_BASE),
448 DEFINE_PROP_UINT64("tm-base", SpaprXive, tm_base, SPAPR_XIVE_TM_BASE),
449 DEFINE_PROP_END_OF_LIST(),
450};
451
452static void spapr_xive_class_init(ObjectClass *klass, void *data)
453{
454 DeviceClass *dc = DEVICE_CLASS(klass);
455 XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass);
456
457 dc->desc = "sPAPR XIVE Interrupt Controller";
458 dc->props = spapr_xive_properties;
459 dc->realize = spapr_xive_realize;
460 dc->vmsd = &vmstate_spapr_xive;
461
462 xrc->get_eas = spapr_xive_get_eas;
463 xrc->get_end = spapr_xive_get_end;
464 xrc->write_end = spapr_xive_write_end;
465 xrc->get_nvt = spapr_xive_get_nvt;
466 xrc->write_nvt = spapr_xive_write_nvt;
467 xrc->get_tctx = spapr_xive_get_tctx;
468}
469
470static const TypeInfo spapr_xive_info = {
471 .name = TYPE_SPAPR_XIVE,
472 .parent = TYPE_XIVE_ROUTER,
473 .instance_init = spapr_xive_instance_init,
474 .instance_size = sizeof(SpaprXive),
475 .class_init = spapr_xive_class_init,
476};
477
478static void spapr_xive_register_types(void)
479{
480 type_register_static(&spapr_xive_info);
481}
482
483type_init(spapr_xive_register_types)
484
485bool spapr_xive_irq_claim(SpaprXive *xive, uint32_t lisn, bool lsi)
486{
487 XiveSource *xsrc = &xive->source;
488
489 if (lisn >= xive->nr_irqs) {
490 return false;
491 }
492
493 xive->eat[lisn].w |= cpu_to_be64(EAS_VALID);
494 if (lsi) {
495 xive_source_irq_set_lsi(xsrc, lisn);
496 }
497 return true;
498}
499
500bool spapr_xive_irq_free(SpaprXive *xive, uint32_t lisn)
501{
502 if (lisn >= xive->nr_irqs) {
503 return false;
504 }
505
506 xive->eat[lisn].w &= cpu_to_be64(~EAS_VALID);
507 return true;
508}
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531static bool spapr_xive_priority_is_reserved(uint8_t priority)
532{
533 switch (priority) {
534 case 0 ... 6:
535 return false;
536 case 7:
537 default:
538 return true;
539 }
540}
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572#define SPAPR_XIVE_SRC_H_INT_ESB PPC_BIT(60)
573#define SPAPR_XIVE_SRC_LSI PPC_BIT(61)
574#define SPAPR_XIVE_SRC_TRIGGER PPC_BIT(62)
575
576#define SPAPR_XIVE_SRC_STORE_EOI PPC_BIT(63)
577
578static target_ulong h_int_get_source_info(PowerPCCPU *cpu,
579 SpaprMachineState *spapr,
580 target_ulong opcode,
581 target_ulong *args)
582{
583 SpaprXive *xive = spapr->xive;
584 XiveSource *xsrc = &xive->source;
585 target_ulong flags = args[0];
586 target_ulong lisn = args[1];
587
588 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
589 return H_FUNCTION;
590 }
591
592 if (flags) {
593 return H_PARAMETER;
594 }
595
596 if (lisn >= xive->nr_irqs) {
597 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
598 lisn);
599 return H_P2;
600 }
601
602 if (!xive_eas_is_valid(&xive->eat[lisn])) {
603 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
604 lisn);
605 return H_P2;
606 }
607
608
609
610
611
612 args[0] = 0;
613 if (!xive_source_esb_has_2page(xsrc)) {
614 args[0] |= SPAPR_XIVE_SRC_TRIGGER;
615 }
616 if (xsrc->esb_flags & XIVE_SRC_STORE_EOI) {
617 args[0] |= SPAPR_XIVE_SRC_STORE_EOI;
618 }
619
620
621
622
623
624
625 if (xive_source_irq_is_lsi(xsrc, lisn)) {
626 args[0] |= SPAPR_XIVE_SRC_H_INT_ESB | SPAPR_XIVE_SRC_LSI;
627 }
628
629 if (!(args[0] & SPAPR_XIVE_SRC_H_INT_ESB)) {
630 args[1] = xive->vc_base + xive_source_esb_mgmt(xsrc, lisn);
631 } else {
632 args[1] = -1;
633 }
634
635 if (xive_source_esb_has_2page(xsrc) &&
636 !(args[0] & SPAPR_XIVE_SRC_H_INT_ESB)) {
637 args[2] = xive->vc_base + xive_source_esb_page(xsrc, lisn);
638 } else {
639 args[2] = -1;
640 }
641
642 if (xive_source_esb_has_2page(xsrc)) {
643 args[3] = xsrc->esb_shift - 1;
644 } else {
645 args[3] = xsrc->esb_shift;
646 }
647
648 return H_SUCCESS;
649}
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685#define SPAPR_XIVE_SRC_SET_EISN PPC_BIT(62)
686#define SPAPR_XIVE_SRC_MASK PPC_BIT(63)
687
688static target_ulong h_int_set_source_config(PowerPCCPU *cpu,
689 SpaprMachineState *spapr,
690 target_ulong opcode,
691 target_ulong *args)
692{
693 SpaprXive *xive = spapr->xive;
694 XiveEAS eas, new_eas;
695 target_ulong flags = args[0];
696 target_ulong lisn = args[1];
697 target_ulong target = args[2];
698 target_ulong priority = args[3];
699 target_ulong eisn = args[4];
700 uint8_t end_blk;
701 uint32_t end_idx;
702
703 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
704 return H_FUNCTION;
705 }
706
707 if (flags & ~(SPAPR_XIVE_SRC_SET_EISN | SPAPR_XIVE_SRC_MASK)) {
708 return H_PARAMETER;
709 }
710
711 if (lisn >= xive->nr_irqs) {
712 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
713 lisn);
714 return H_P2;
715 }
716
717 eas = xive->eat[lisn];
718 if (!xive_eas_is_valid(&eas)) {
719 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
720 lisn);
721 return H_P2;
722 }
723
724
725 if (priority == 0xff) {
726 new_eas.w = cpu_to_be64(EAS_VALID | EAS_MASKED);
727 goto out;
728 }
729
730 if (flags & SPAPR_XIVE_SRC_MASK) {
731 new_eas.w = eas.w | cpu_to_be64(EAS_MASKED);
732 } else {
733 new_eas.w = eas.w & cpu_to_be64(~EAS_MASKED);
734 }
735
736 if (spapr_xive_priority_is_reserved(priority)) {
737 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld
738 " is reserved\n", priority);
739 return H_P4;
740 }
741
742
743
744
745
746
747 if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) {
748 return H_P3;
749 }
750
751 new_eas.w = xive_set_field64(EAS_END_BLOCK, new_eas.w, end_blk);
752 new_eas.w = xive_set_field64(EAS_END_INDEX, new_eas.w, end_idx);
753
754 if (flags & SPAPR_XIVE_SRC_SET_EISN) {
755 new_eas.w = xive_set_field64(EAS_END_DATA, new_eas.w, eisn);
756 }
757
758out:
759 xive->eat[lisn] = new_eas;
760 return H_SUCCESS;
761}
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785static target_ulong h_int_get_source_config(PowerPCCPU *cpu,
786 SpaprMachineState *spapr,
787 target_ulong opcode,
788 target_ulong *args)
789{
790 SpaprXive *xive = spapr->xive;
791 target_ulong flags = args[0];
792 target_ulong lisn = args[1];
793 XiveEAS eas;
794 XiveEND *end;
795 uint8_t nvt_blk;
796 uint32_t end_idx, nvt_idx;
797
798 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
799 return H_FUNCTION;
800 }
801
802 if (flags) {
803 return H_PARAMETER;
804 }
805
806 if (lisn >= xive->nr_irqs) {
807 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
808 lisn);
809 return H_P2;
810 }
811
812 eas = xive->eat[lisn];
813 if (!xive_eas_is_valid(&eas)) {
814 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
815 lisn);
816 return H_P2;
817 }
818
819
820 end_idx = xive_get_field64(EAS_END_INDEX, eas.w);
821
822 assert(end_idx < xive->nr_ends);
823 end = &xive->endt[end_idx];
824
825 nvt_blk = xive_get_field32(END_W6_NVT_BLOCK, end->w6);
826 nvt_idx = xive_get_field32(END_W6_NVT_INDEX, end->w6);
827 args[0] = spapr_xive_nvt_to_target(nvt_blk, nvt_idx);
828
829 if (xive_eas_is_masked(&eas)) {
830 args[1] = 0xff;
831 } else {
832 args[1] = xive_get_field32(END_W7_F0_PRIORITY, end->w7);
833 }
834
835 args[2] = xive_get_field64(EAS_END_DATA, eas.w);
836
837 return H_SUCCESS;
838}
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858static target_ulong h_int_get_queue_info(PowerPCCPU *cpu,
859 SpaprMachineState *spapr,
860 target_ulong opcode,
861 target_ulong *args)
862{
863 SpaprXive *xive = spapr->xive;
864 XiveENDSource *end_xsrc = &xive->end_source;
865 target_ulong flags = args[0];
866 target_ulong target = args[1];
867 target_ulong priority = args[2];
868 XiveEND *end;
869 uint8_t end_blk;
870 uint32_t end_idx;
871
872 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
873 return H_FUNCTION;
874 }
875
876 if (flags) {
877 return H_PARAMETER;
878 }
879
880
881
882
883
884
885 if (spapr_xive_priority_is_reserved(priority)) {
886 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld
887 " is reserved\n", priority);
888 return H_P3;
889 }
890
891
892
893
894
895
896 if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) {
897 return H_P2;
898 }
899
900 assert(end_idx < xive->nr_ends);
901 end = &xive->endt[end_idx];
902
903 args[0] = xive->end_base + (1ull << (end_xsrc->esb_shift + 1)) * end_idx;
904 if (xive_end_is_enqueue(end)) {
905 args[1] = xive_get_field32(END_W0_QSIZE, end->w0) + 12;
906 } else {
907 args[1] = 0;
908 }
909
910 return H_SUCCESS;
911}
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942#define SPAPR_XIVE_END_ALWAYS_NOTIFY PPC_BIT(63)
943
944static target_ulong h_int_set_queue_config(PowerPCCPU *cpu,
945 SpaprMachineState *spapr,
946 target_ulong opcode,
947 target_ulong *args)
948{
949 SpaprXive *xive = spapr->xive;
950 target_ulong flags = args[0];
951 target_ulong target = args[1];
952 target_ulong priority = args[2];
953 target_ulong qpage = args[3];
954 target_ulong qsize = args[4];
955 XiveEND end;
956 uint8_t end_blk, nvt_blk;
957 uint32_t end_idx, nvt_idx;
958
959 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
960 return H_FUNCTION;
961 }
962
963 if (flags & ~SPAPR_XIVE_END_ALWAYS_NOTIFY) {
964 return H_PARAMETER;
965 }
966
967
968
969
970
971
972 if (spapr_xive_priority_is_reserved(priority)) {
973 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld
974 " is reserved\n", priority);
975 return H_P3;
976 }
977
978
979
980
981
982
983
984 if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) {
985 return H_P2;
986 }
987
988 assert(end_idx < xive->nr_ends);
989 memcpy(&end, &xive->endt[end_idx], sizeof(XiveEND));
990
991 switch (qsize) {
992 case 12:
993 case 16:
994 case 21:
995 case 24:
996 end.w2 = cpu_to_be32((qpage >> 32) & 0x0fffffff);
997 end.w3 = cpu_to_be32(qpage & 0xffffffff);
998 end.w0 |= cpu_to_be32(END_W0_ENQUEUE);
999 end.w0 = xive_set_field32(END_W0_QSIZE, end.w0, qsize - 12);
1000 break;
1001 case 0:
1002
1003 spapr_xive_end_reset(&end);
1004 goto out;
1005
1006 default:
1007 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid EQ size %"PRIx64"\n",
1008 qsize);
1009 return H_P5;
1010 }
1011
1012 if (qsize) {
1013 hwaddr plen = 1 << qsize;
1014 void *eq;
1015
1016
1017
1018
1019
1020 eq = address_space_map(CPU(cpu)->as, qpage, &plen, true,
1021 MEMTXATTRS_UNSPECIFIED);
1022 if (plen != 1 << qsize) {
1023 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to map EQ @0x%"
1024 HWADDR_PRIx "\n", qpage);
1025 return H_P4;
1026 }
1027 address_space_unmap(CPU(cpu)->as, eq, plen, true, plen);
1028 }
1029
1030
1031 if (spapr_xive_target_to_nvt(target, &nvt_blk, &nvt_idx)) {
1032 g_assert_not_reached();
1033 }
1034
1035
1036
1037
1038
1039 end.w6 = xive_set_field32(END_W6_NVT_BLOCK, 0ul, nvt_blk) |
1040 xive_set_field32(END_W6_NVT_INDEX, 0ul, nvt_idx);
1041 end.w7 = xive_set_field32(END_W7_F0_PRIORITY, 0ul, priority);
1042
1043 if (flags & SPAPR_XIVE_END_ALWAYS_NOTIFY) {
1044 end.w0 |= cpu_to_be32(END_W0_UCOND_NOTIFY);
1045 } else {
1046 end.w0 &= cpu_to_be32((uint32_t)~END_W0_UCOND_NOTIFY);
1047 }
1048
1049
1050
1051
1052
1053 end.w1 = cpu_to_be32(END_W1_GENERATION) |
1054 xive_set_field32(END_W1_PAGE_OFF, 0ul, 0ul);
1055 end.w0 |= cpu_to_be32(END_W0_VALID);
1056
1057
1058
1059
1060
1061
1062out:
1063
1064 memcpy(&xive->endt[end_idx], &end, sizeof(XiveEND));
1065 return H_SUCCESS;
1066}
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095#define SPAPR_XIVE_END_DEBUG PPC_BIT(63)
1096
1097static target_ulong h_int_get_queue_config(PowerPCCPU *cpu,
1098 SpaprMachineState *spapr,
1099 target_ulong opcode,
1100 target_ulong *args)
1101{
1102 SpaprXive *xive = spapr->xive;
1103 target_ulong flags = args[0];
1104 target_ulong target = args[1];
1105 target_ulong priority = args[2];
1106 XiveEND *end;
1107 uint8_t end_blk;
1108 uint32_t end_idx;
1109
1110 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1111 return H_FUNCTION;
1112 }
1113
1114 if (flags & ~SPAPR_XIVE_END_DEBUG) {
1115 return H_PARAMETER;
1116 }
1117
1118
1119
1120
1121
1122
1123 if (spapr_xive_priority_is_reserved(priority)) {
1124 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld
1125 " is reserved\n", priority);
1126 return H_P3;
1127 }
1128
1129
1130
1131
1132
1133
1134 if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) {
1135 return H_P2;
1136 }
1137
1138 assert(end_idx < xive->nr_ends);
1139 end = &xive->endt[end_idx];
1140
1141 args[0] = 0;
1142 if (xive_end_is_notify(end)) {
1143 args[0] |= SPAPR_XIVE_END_ALWAYS_NOTIFY;
1144 }
1145
1146 if (xive_end_is_enqueue(end)) {
1147 args[1] = (uint64_t) be32_to_cpu(end->w2 & 0x0fffffff) << 32
1148 | be32_to_cpu(end->w3);
1149 args[2] = xive_get_field32(END_W0_QSIZE, end->w0) + 12;
1150 } else {
1151 args[1] = 0;
1152 args[2] = 0;
1153 }
1154
1155
1156 if (flags & SPAPR_XIVE_END_DEBUG) {
1157
1158 args[0] |= (uint64_t)xive_get_field32(END_W1_GENERATION, end->w1) << 62;
1159
1160
1161 args[3] = xive_get_field32(END_W1_PAGE_OFF, end->w1);
1162 } else {
1163 args[3] = 0;
1164 }
1165
1166 return H_SUCCESS;
1167}
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189static target_ulong h_int_set_os_reporting_line(PowerPCCPU *cpu,
1190 SpaprMachineState *spapr,
1191 target_ulong opcode,
1192 target_ulong *args)
1193{
1194 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1195 return H_FUNCTION;
1196 }
1197
1198
1199
1200
1201
1202
1203
1204 return H_FUNCTION;
1205}
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225static target_ulong h_int_get_os_reporting_line(PowerPCCPU *cpu,
1226 SpaprMachineState *spapr,
1227 target_ulong opcode,
1228 target_ulong *args)
1229{
1230 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1231 return H_FUNCTION;
1232 }
1233
1234
1235
1236
1237
1238
1239
1240 return H_FUNCTION;
1241}
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266#define SPAPR_XIVE_ESB_STORE PPC_BIT(63)
1267
1268static target_ulong h_int_esb(PowerPCCPU *cpu,
1269 SpaprMachineState *spapr,
1270 target_ulong opcode,
1271 target_ulong *args)
1272{
1273 SpaprXive *xive = spapr->xive;
1274 XiveEAS eas;
1275 target_ulong flags = args[0];
1276 target_ulong lisn = args[1];
1277 target_ulong offset = args[2];
1278 target_ulong data = args[3];
1279 hwaddr mmio_addr;
1280 XiveSource *xsrc = &xive->source;
1281
1282 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1283 return H_FUNCTION;
1284 }
1285
1286 if (flags & ~SPAPR_XIVE_ESB_STORE) {
1287 return H_PARAMETER;
1288 }
1289
1290 if (lisn >= xive->nr_irqs) {
1291 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
1292 lisn);
1293 return H_P2;
1294 }
1295
1296 eas = xive->eat[lisn];
1297 if (!xive_eas_is_valid(&eas)) {
1298 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
1299 lisn);
1300 return H_P2;
1301 }
1302
1303 if (offset > (1ull << xsrc->esb_shift)) {
1304 return H_P3;
1305 }
1306
1307 mmio_addr = xive->vc_base + xive_source_esb_mgmt(xsrc, lisn) + offset;
1308
1309 if (dma_memory_rw(&address_space_memory, mmio_addr, &data, 8,
1310 (flags & SPAPR_XIVE_ESB_STORE))) {
1311 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to access ESB @0x%"
1312 HWADDR_PRIx "\n", mmio_addr);
1313 return H_HARDWARE;
1314 }
1315 args[0] = (flags & SPAPR_XIVE_ESB_STORE) ? -1 : data;
1316 return H_SUCCESS;
1317}
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336static target_ulong h_int_sync(PowerPCCPU *cpu,
1337 SpaprMachineState *spapr,
1338 target_ulong opcode,
1339 target_ulong *args)
1340{
1341 SpaprXive *xive = spapr->xive;
1342 XiveEAS eas;
1343 target_ulong flags = args[0];
1344 target_ulong lisn = args[1];
1345
1346 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1347 return H_FUNCTION;
1348 }
1349
1350 if (flags) {
1351 return H_PARAMETER;
1352 }
1353
1354 if (lisn >= xive->nr_irqs) {
1355 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
1356 lisn);
1357 return H_P2;
1358 }
1359
1360 eas = xive->eat[lisn];
1361 if (!xive_eas_is_valid(&eas)) {
1362 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
1363 lisn);
1364 return H_P2;
1365 }
1366
1367
1368
1369
1370
1371
1372
1373 return H_SUCCESS;
1374}
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390static target_ulong h_int_reset(PowerPCCPU *cpu,
1391 SpaprMachineState *spapr,
1392 target_ulong opcode,
1393 target_ulong *args)
1394{
1395 SpaprXive *xive = spapr->xive;
1396 target_ulong flags = args[0];
1397
1398 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1399 return H_FUNCTION;
1400 }
1401
1402 if (flags) {
1403 return H_PARAMETER;
1404 }
1405
1406 device_reset(DEVICE(xive));
1407 return H_SUCCESS;
1408}
1409
1410void spapr_xive_hcall_init(SpaprMachineState *spapr)
1411{
1412 spapr_register_hypercall(H_INT_GET_SOURCE_INFO, h_int_get_source_info);
1413 spapr_register_hypercall(H_INT_SET_SOURCE_CONFIG, h_int_set_source_config);
1414 spapr_register_hypercall(H_INT_GET_SOURCE_CONFIG, h_int_get_source_config);
1415 spapr_register_hypercall(H_INT_GET_QUEUE_INFO, h_int_get_queue_info);
1416 spapr_register_hypercall(H_INT_SET_QUEUE_CONFIG, h_int_set_queue_config);
1417 spapr_register_hypercall(H_INT_GET_QUEUE_CONFIG, h_int_get_queue_config);
1418 spapr_register_hypercall(H_INT_SET_OS_REPORTING_LINE,
1419 h_int_set_os_reporting_line);
1420 spapr_register_hypercall(H_INT_GET_OS_REPORTING_LINE,
1421 h_int_get_os_reporting_line);
1422 spapr_register_hypercall(H_INT_ESB, h_int_esb);
1423 spapr_register_hypercall(H_INT_SYNC, h_int_sync);
1424 spapr_register_hypercall(H_INT_RESET, h_int_reset);
1425}
1426
1427void spapr_dt_xive(SpaprMachineState *spapr, uint32_t nr_servers, void *fdt,
1428 uint32_t phandle)
1429{
1430 SpaprXive *xive = spapr->xive;
1431 int node;
1432 uint64_t timas[2 * 2];
1433
1434 uint32_t lisn_ranges[] = {
1435 cpu_to_be32(0),
1436 cpu_to_be32(nr_servers),
1437 };
1438
1439
1440
1441
1442 uint32_t eq_sizes[] = {
1443 cpu_to_be32(16),
1444 };
1445
1446
1447
1448
1449 uint32_t plat_res_int_priorities[] = {
1450 cpu_to_be32(7),
1451 cpu_to_be32(0xf8),
1452 };
1453
1454
1455 timas[0] = cpu_to_be64(xive->tm_base +
1456 XIVE_TM_USER_PAGE * (1ull << TM_SHIFT));
1457 timas[1] = cpu_to_be64(1ull << TM_SHIFT);
1458 timas[2] = cpu_to_be64(xive->tm_base +
1459 XIVE_TM_OS_PAGE * (1ull << TM_SHIFT));
1460 timas[3] = cpu_to_be64(1ull << TM_SHIFT);
1461
1462 _FDT(node = fdt_add_subnode(fdt, 0, xive->nodename));
1463
1464 _FDT(fdt_setprop_string(fdt, node, "device_type", "power-ivpe"));
1465 _FDT(fdt_setprop(fdt, node, "reg", timas, sizeof(timas)));
1466
1467 _FDT(fdt_setprop_string(fdt, node, "compatible", "ibm,power-ivpe"));
1468 _FDT(fdt_setprop(fdt, node, "ibm,xive-eq-sizes", eq_sizes,
1469 sizeof(eq_sizes)));
1470 _FDT(fdt_setprop(fdt, node, "ibm,xive-lisn-ranges", lisn_ranges,
1471 sizeof(lisn_ranges)));
1472
1473
1474 _FDT(fdt_setprop(fdt, node, "interrupt-controller", NULL, 0));
1475 _FDT(fdt_setprop_cell(fdt, node, "#interrupt-cells", 2));
1476
1477
1478 _FDT(fdt_setprop_cell(fdt, node, "linux,phandle", phandle));
1479 _FDT(fdt_setprop_cell(fdt, node, "phandle", phandle));
1480
1481
1482
1483
1484
1485 _FDT(fdt_setprop(fdt, 0, "ibm,plat-res-int-priorities",
1486 plat_res_int_priorities, sizeof(plat_res_int_priorities)));
1487}
1488