1
2
3
4
5
6
7
8
9
10#include "qemu/osdep.h"
11#include "qemu/log.h"
12#include "qemu/module.h"
13#include "qapi/error.h"
14#include "qemu/error-report.h"
15#include "target/ppc/cpu.h"
16#include "sysemu/cpus.h"
17#include "sysemu/reset.h"
18#include "migration/vmstate.h"
19#include "monitor/monitor.h"
20#include "hw/ppc/fdt.h"
21#include "hw/ppc/spapr.h"
22#include "hw/ppc/spapr_cpu_core.h"
23#include "hw/ppc/spapr_xive.h"
24#include "hw/ppc/xive.h"
25#include "hw/ppc/xive_regs.h"
26#include "hw/qdev-properties.h"
27#include "trace.h"
28
29
30
31
32
33#define SPAPR_XIVE_VC_BASE 0x0006010000000000ull
34#define SPAPR_XIVE_TM_BASE 0x0006030203180000ull
35
36
37
38
39
40
41
42
43
44
45
46#define SPAPR_XIVE_NVT_BASE 0x400
47
48
49
50
51static uint32_t spapr_xive_nvt_to_target(uint8_t nvt_blk, uint32_t nvt_idx)
52{
53 return nvt_idx - SPAPR_XIVE_NVT_BASE;
54}
55
56static void spapr_xive_cpu_to_nvt(PowerPCCPU *cpu,
57 uint8_t *out_nvt_blk, uint32_t *out_nvt_idx)
58{
59 assert(cpu);
60
61 if (out_nvt_blk) {
62 *out_nvt_blk = SPAPR_XIVE_BLOCK_ID;
63 }
64
65 if (out_nvt_blk) {
66 *out_nvt_idx = SPAPR_XIVE_NVT_BASE + cpu->vcpu_id;
67 }
68}
69
70static int spapr_xive_target_to_nvt(uint32_t target,
71 uint8_t *out_nvt_blk, uint32_t *out_nvt_idx)
72{
73 PowerPCCPU *cpu = spapr_find_cpu(target);
74
75 if (!cpu) {
76 return -1;
77 }
78
79 spapr_xive_cpu_to_nvt(cpu, out_nvt_blk, out_nvt_idx);
80 return 0;
81}
82
83
84
85
86
87int spapr_xive_end_to_target(uint8_t end_blk, uint32_t end_idx,
88 uint32_t *out_server, uint8_t *out_prio)
89{
90
91 assert(end_blk == SPAPR_XIVE_BLOCK_ID);
92
93 if (out_server) {
94 *out_server = end_idx >> 3;
95 }
96
97 if (out_prio) {
98 *out_prio = end_idx & 0x7;
99 }
100 return 0;
101}
102
103static void spapr_xive_cpu_to_end(PowerPCCPU *cpu, uint8_t prio,
104 uint8_t *out_end_blk, uint32_t *out_end_idx)
105{
106 assert(cpu);
107
108 if (out_end_blk) {
109 *out_end_blk = SPAPR_XIVE_BLOCK_ID;
110 }
111
112 if (out_end_idx) {
113 *out_end_idx = (cpu->vcpu_id << 3) + prio;
114 }
115}
116
117static int spapr_xive_target_to_end(uint32_t target, uint8_t prio,
118 uint8_t *out_end_blk, uint32_t *out_end_idx)
119{
120 PowerPCCPU *cpu = spapr_find_cpu(target);
121
122 if (!cpu) {
123 return -1;
124 }
125
126 spapr_xive_cpu_to_end(cpu, prio, out_end_blk, out_end_idx);
127 return 0;
128}
129
130
131
132
133
134static void spapr_xive_end_pic_print_info(SpaprXive *xive, XiveEND *end,
135 Monitor *mon)
136{
137 uint64_t qaddr_base = xive_end_qaddr(end);
138 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
139 uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1);
140 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
141 uint32_t qentries = 1 << (qsize + 10);
142 uint32_t nvt = xive_get_field32(END_W6_NVT_INDEX, end->w6);
143 uint8_t priority = xive_get_field32(END_W7_F0_PRIORITY, end->w7);
144
145 monitor_printf(mon, "%3d/%d % 6d/%5d @%"PRIx64" ^%d",
146 spapr_xive_nvt_to_target(0, nvt),
147 priority, qindex, qentries, qaddr_base, qgen);
148
149 xive_end_queue_pic_print_info(end, 6, mon);
150}
151
152
153
154
155
156#define spapr_xive_in_kernel(xive) \
157 (kvm_irqchip_in_kernel() && (xive)->fd != -1)
158
159static void spapr_xive_pic_print_info(SpaprXive *xive, Monitor *mon)
160{
161 XiveSource *xsrc = &xive->source;
162 int i;
163
164 if (spapr_xive_in_kernel(xive)) {
165 Error *local_err = NULL;
166
167 kvmppc_xive_synchronize_state(xive, &local_err);
168 if (local_err) {
169 error_report_err(local_err);
170 return;
171 }
172 }
173
174 monitor_printf(mon, " LISN PQ EISN CPU/PRIO EQ\n");
175
176 for (i = 0; i < xive->nr_irqs; i++) {
177 uint8_t pq = xive_source_esb_get(xsrc, i);
178 XiveEAS *eas = &xive->eat[i];
179
180 if (!xive_eas_is_valid(eas)) {
181 continue;
182 }
183
184 monitor_printf(mon, " %08x %s %c%c%c %s %08x ", i,
185 xive_source_irq_is_lsi(xsrc, i) ? "LSI" : "MSI",
186 pq & XIVE_ESB_VAL_P ? 'P' : '-',
187 pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
188 xive_source_is_asserted(xsrc, i) ? 'A' : ' ',
189 xive_eas_is_masked(eas) ? "M" : " ",
190 (int) xive_get_field64(EAS_END_DATA, eas->w));
191
192 if (!xive_eas_is_masked(eas)) {
193 uint32_t end_idx = xive_get_field64(EAS_END_INDEX, eas->w);
194 XiveEND *end;
195
196 assert(end_idx < xive->nr_ends);
197 end = &xive->endt[end_idx];
198
199 if (xive_end_is_valid(end)) {
200 spapr_xive_end_pic_print_info(xive, end, mon);
201 }
202 }
203 monitor_printf(mon, "\n");
204 }
205}
206
207void spapr_xive_mmio_set_enabled(SpaprXive *xive, bool enable)
208{
209 memory_region_set_enabled(&xive->source.esb_mmio, enable);
210 memory_region_set_enabled(&xive->tm_mmio, enable);
211
212
213 memory_region_set_enabled(&xive->end_source.esb_mmio, false);
214}
215
216static void spapr_xive_tm_write(void *opaque, hwaddr offset,
217 uint64_t value, unsigned size)
218{
219 XiveTCTX *tctx = spapr_cpu_state(POWERPC_CPU(current_cpu))->tctx;
220
221 xive_tctx_tm_write(XIVE_PRESENTER(opaque), tctx, offset, value, size);
222}
223
224static uint64_t spapr_xive_tm_read(void *opaque, hwaddr offset, unsigned size)
225{
226 XiveTCTX *tctx = spapr_cpu_state(POWERPC_CPU(current_cpu))->tctx;
227
228 return xive_tctx_tm_read(XIVE_PRESENTER(opaque), tctx, offset, size);
229}
230
231const MemoryRegionOps spapr_xive_tm_ops = {
232 .read = spapr_xive_tm_read,
233 .write = spapr_xive_tm_write,
234 .endianness = DEVICE_BIG_ENDIAN,
235 .valid = {
236 .min_access_size = 1,
237 .max_access_size = 8,
238 },
239 .impl = {
240 .min_access_size = 1,
241 .max_access_size = 8,
242 },
243};
244
245static void spapr_xive_end_reset(XiveEND *end)
246{
247 memset(end, 0, sizeof(*end));
248
249
250 end->w1 = cpu_to_be32(END_W1_ESe_Q | END_W1_ESn_Q);
251}
252
253static void spapr_xive_reset(void *dev)
254{
255 SpaprXive *xive = SPAPR_XIVE(dev);
256 int i;
257
258
259
260
261
262
263
264 for (i = 0; i < xive->nr_irqs; i++) {
265 XiveEAS *eas = &xive->eat[i];
266 if (xive_eas_is_valid(eas)) {
267 eas->w = cpu_to_be64(EAS_VALID | EAS_MASKED);
268 } else {
269 eas->w = 0;
270 }
271 }
272
273
274 for (i = 0; i < xive->nr_ends; i++) {
275 spapr_xive_end_reset(&xive->endt[i]);
276 }
277}
278
279static void spapr_xive_instance_init(Object *obj)
280{
281 SpaprXive *xive = SPAPR_XIVE(obj);
282
283 object_initialize_child(obj, "source", &xive->source, TYPE_XIVE_SOURCE);
284
285 object_initialize_child(obj, "end_source", &xive->end_source,
286 TYPE_XIVE_END_SOURCE);
287
288
289 xive->fd = -1;
290}
291
292static void spapr_xive_realize(DeviceState *dev, Error **errp)
293{
294 SpaprXive *xive = SPAPR_XIVE(dev);
295 SpaprXiveClass *sxc = SPAPR_XIVE_GET_CLASS(xive);
296 XiveSource *xsrc = &xive->source;
297 XiveENDSource *end_xsrc = &xive->end_source;
298 Error *local_err = NULL;
299
300
301 g_assert(xive->nr_irqs);
302 g_assert(xive->nr_ends);
303
304 sxc->parent_realize(dev, &local_err);
305 if (local_err) {
306 error_propagate(errp, local_err);
307 return;
308 }
309
310
311
312
313 object_property_set_int(OBJECT(xsrc), "nr-irqs", xive->nr_irqs,
314 &error_fatal);
315 object_property_set_link(OBJECT(xsrc), "xive", OBJECT(xive), &error_abort);
316 if (!qdev_realize(DEVICE(xsrc), NULL, errp)) {
317 return;
318 }
319 sysbus_init_mmio(SYS_BUS_DEVICE(xive), &xsrc->esb_mmio);
320
321
322
323
324 object_property_set_int(OBJECT(end_xsrc), "nr-ends", xive->nr_irqs,
325 &error_fatal);
326 object_property_set_link(OBJECT(end_xsrc), "xive", OBJECT(xive),
327 &error_abort);
328 if (!qdev_realize(DEVICE(end_xsrc), NULL, errp)) {
329 return;
330 }
331 sysbus_init_mmio(SYS_BUS_DEVICE(xive), &end_xsrc->esb_mmio);
332
333
334 xive->end_base = xive->vc_base + xive_source_esb_len(xsrc);
335
336
337
338
339 xive->eat = g_new0(XiveEAS, xive->nr_irqs);
340 xive->endt = g_new0(XiveEND, xive->nr_ends);
341
342 xive->nodename = g_strdup_printf("interrupt-controller@%" PRIx64,
343 xive->tm_base + XIVE_TM_USER_PAGE * (1 << TM_SHIFT));
344
345 qemu_register_reset(spapr_xive_reset, dev);
346
347
348 memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &spapr_xive_tm_ops,
349 xive, "xive.tima", 4ull << TM_SHIFT);
350 sysbus_init_mmio(SYS_BUS_DEVICE(xive), &xive->tm_mmio);
351
352
353
354
355
356 sysbus_mmio_map(SYS_BUS_DEVICE(xive), 0, xive->vc_base);
357 sysbus_mmio_map(SYS_BUS_DEVICE(xive), 1, xive->end_base);
358 sysbus_mmio_map(SYS_BUS_DEVICE(xive), 2, xive->tm_base);
359}
360
361static int spapr_xive_get_eas(XiveRouter *xrtr, uint8_t eas_blk,
362 uint32_t eas_idx, XiveEAS *eas)
363{
364 SpaprXive *xive = SPAPR_XIVE(xrtr);
365
366 if (eas_idx >= xive->nr_irqs) {
367 return -1;
368 }
369
370 *eas = xive->eat[eas_idx];
371 return 0;
372}
373
374static int spapr_xive_get_end(XiveRouter *xrtr,
375 uint8_t end_blk, uint32_t end_idx, XiveEND *end)
376{
377 SpaprXive *xive = SPAPR_XIVE(xrtr);
378
379 if (end_idx >= xive->nr_ends) {
380 return -1;
381 }
382
383 memcpy(end, &xive->endt[end_idx], sizeof(XiveEND));
384 return 0;
385}
386
387static int spapr_xive_write_end(XiveRouter *xrtr, uint8_t end_blk,
388 uint32_t end_idx, XiveEND *end,
389 uint8_t word_number)
390{
391 SpaprXive *xive = SPAPR_XIVE(xrtr);
392
393 if (end_idx >= xive->nr_ends) {
394 return -1;
395 }
396
397 memcpy(&xive->endt[end_idx], end, sizeof(XiveEND));
398 return 0;
399}
400
401static int spapr_xive_get_nvt(XiveRouter *xrtr,
402 uint8_t nvt_blk, uint32_t nvt_idx, XiveNVT *nvt)
403{
404 uint32_t vcpu_id = spapr_xive_nvt_to_target(nvt_blk, nvt_idx);
405 PowerPCCPU *cpu = spapr_find_cpu(vcpu_id);
406
407 if (!cpu) {
408
409 return -1;
410 }
411
412
413
414
415
416 nvt->w0 = cpu_to_be32(NVT_W0_VALID);
417 return 0;
418}
419
420static int spapr_xive_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk,
421 uint32_t nvt_idx, XiveNVT *nvt,
422 uint8_t word_number)
423{
424
425
426
427
428
429 g_assert_not_reached();
430}
431
432static int spapr_xive_match_nvt(XivePresenter *xptr, uint8_t format,
433 uint8_t nvt_blk, uint32_t nvt_idx,
434 bool cam_ignore, uint8_t priority,
435 uint32_t logic_serv, XiveTCTXMatch *match)
436{
437 CPUState *cs;
438 int count = 0;
439
440 CPU_FOREACH(cs) {
441 PowerPCCPU *cpu = POWERPC_CPU(cs);
442 XiveTCTX *tctx = spapr_cpu_state(cpu)->tctx;
443 int ring;
444
445
446
447
448
449 if (!tctx) {
450 continue;
451 }
452
453
454
455
456 ring = xive_presenter_tctx_match(xptr, tctx, format, nvt_blk, nvt_idx,
457 cam_ignore, logic_serv);
458
459
460
461
462 if (ring != -1) {
463 if (match->tctx) {
464 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a thread "
465 "context NVT %x/%x\n", nvt_blk, nvt_idx);
466 return -1;
467 }
468
469 match->ring = ring;
470 match->tctx = tctx;
471 count++;
472 }
473 }
474
475 return count;
476}
477
478static uint8_t spapr_xive_get_block_id(XiveRouter *xrtr)
479{
480 return SPAPR_XIVE_BLOCK_ID;
481}
482
483static const VMStateDescription vmstate_spapr_xive_end = {
484 .name = TYPE_SPAPR_XIVE "/end",
485 .version_id = 1,
486 .minimum_version_id = 1,
487 .fields = (VMStateField []) {
488 VMSTATE_UINT32(w0, XiveEND),
489 VMSTATE_UINT32(w1, XiveEND),
490 VMSTATE_UINT32(w2, XiveEND),
491 VMSTATE_UINT32(w3, XiveEND),
492 VMSTATE_UINT32(w4, XiveEND),
493 VMSTATE_UINT32(w5, XiveEND),
494 VMSTATE_UINT32(w6, XiveEND),
495 VMSTATE_UINT32(w7, XiveEND),
496 VMSTATE_END_OF_LIST()
497 },
498};
499
500static const VMStateDescription vmstate_spapr_xive_eas = {
501 .name = TYPE_SPAPR_XIVE "/eas",
502 .version_id = 1,
503 .minimum_version_id = 1,
504 .fields = (VMStateField []) {
505 VMSTATE_UINT64(w, XiveEAS),
506 VMSTATE_END_OF_LIST()
507 },
508};
509
510static int vmstate_spapr_xive_pre_save(void *opaque)
511{
512 SpaprXive *xive = SPAPR_XIVE(opaque);
513
514 if (spapr_xive_in_kernel(xive)) {
515 return kvmppc_xive_pre_save(xive);
516 }
517
518 return 0;
519}
520
521
522
523
524
525static int spapr_xive_post_load(SpaprInterruptController *intc, int version_id)
526{
527 SpaprXive *xive = SPAPR_XIVE(intc);
528
529 if (spapr_xive_in_kernel(xive)) {
530 return kvmppc_xive_post_load(xive, version_id);
531 }
532
533 return 0;
534}
535
536static const VMStateDescription vmstate_spapr_xive = {
537 .name = TYPE_SPAPR_XIVE,
538 .version_id = 1,
539 .minimum_version_id = 1,
540 .pre_save = vmstate_spapr_xive_pre_save,
541 .post_load = NULL,
542 .fields = (VMStateField[]) {
543 VMSTATE_UINT32_EQUAL(nr_irqs, SpaprXive, NULL),
544 VMSTATE_STRUCT_VARRAY_POINTER_UINT32(eat, SpaprXive, nr_irqs,
545 vmstate_spapr_xive_eas, XiveEAS),
546 VMSTATE_STRUCT_VARRAY_POINTER_UINT32(endt, SpaprXive, nr_ends,
547 vmstate_spapr_xive_end, XiveEND),
548 VMSTATE_END_OF_LIST()
549 },
550};
551
552static int spapr_xive_claim_irq(SpaprInterruptController *intc, int lisn,
553 bool lsi, Error **errp)
554{
555 SpaprXive *xive = SPAPR_XIVE(intc);
556 XiveSource *xsrc = &xive->source;
557
558 assert(lisn < xive->nr_irqs);
559
560 trace_spapr_xive_claim_irq(lisn, lsi);
561
562 if (xive_eas_is_valid(&xive->eat[lisn])) {
563 error_setg(errp, "IRQ %d is not free", lisn);
564 return -EBUSY;
565 }
566
567
568
569
570 xive->eat[lisn].w |= cpu_to_be64(EAS_VALID | EAS_MASKED);
571 if (lsi) {
572 xive_source_irq_set_lsi(xsrc, lisn);
573 }
574
575 if (spapr_xive_in_kernel(xive)) {
576 return kvmppc_xive_source_reset_one(xsrc, lisn, errp);
577 }
578
579 return 0;
580}
581
582static void spapr_xive_free_irq(SpaprInterruptController *intc, int lisn)
583{
584 SpaprXive *xive = SPAPR_XIVE(intc);
585 assert(lisn < xive->nr_irqs);
586
587 trace_spapr_xive_free_irq(lisn);
588
589 xive->eat[lisn].w &= cpu_to_be64(~EAS_VALID);
590}
591
592static Property spapr_xive_properties[] = {
593 DEFINE_PROP_UINT32("nr-irqs", SpaprXive, nr_irqs, 0),
594 DEFINE_PROP_UINT32("nr-ends", SpaprXive, nr_ends, 0),
595 DEFINE_PROP_UINT64("vc-base", SpaprXive, vc_base, SPAPR_XIVE_VC_BASE),
596 DEFINE_PROP_UINT64("tm-base", SpaprXive, tm_base, SPAPR_XIVE_TM_BASE),
597 DEFINE_PROP_UINT8("hv-prio", SpaprXive, hv_prio, 7),
598 DEFINE_PROP_END_OF_LIST(),
599};
600
601static int spapr_xive_cpu_intc_create(SpaprInterruptController *intc,
602 PowerPCCPU *cpu, Error **errp)
603{
604 SpaprXive *xive = SPAPR_XIVE(intc);
605 Object *obj;
606 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
607
608 obj = xive_tctx_create(OBJECT(cpu), XIVE_PRESENTER(xive), errp);
609 if (!obj) {
610 return -1;
611 }
612
613 spapr_cpu->tctx = XIVE_TCTX(obj);
614 return 0;
615}
616
617static void xive_tctx_set_os_cam(XiveTCTX *tctx, uint32_t os_cam)
618{
619 uint32_t qw1w2 = cpu_to_be32(TM_QW1W2_VO | os_cam);
620 memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2, 4);
621}
622
623static void spapr_xive_cpu_intc_reset(SpaprInterruptController *intc,
624 PowerPCCPU *cpu)
625{
626 XiveTCTX *tctx = spapr_cpu_state(cpu)->tctx;
627 uint8_t nvt_blk;
628 uint32_t nvt_idx;
629
630 xive_tctx_reset(tctx);
631
632
633
634
635
636
637 spapr_xive_cpu_to_nvt(cpu, &nvt_blk, &nvt_idx);
638
639 xive_tctx_set_os_cam(tctx, xive_nvt_cam_line(nvt_blk, nvt_idx));
640}
641
642static void spapr_xive_cpu_intc_destroy(SpaprInterruptController *intc,
643 PowerPCCPU *cpu)
644{
645 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
646
647 xive_tctx_destroy(spapr_cpu->tctx);
648 spapr_cpu->tctx = NULL;
649}
650
651static void spapr_xive_set_irq(SpaprInterruptController *intc, int irq, int val)
652{
653 SpaprXive *xive = SPAPR_XIVE(intc);
654
655 trace_spapr_xive_set_irq(irq, val);
656
657 if (spapr_xive_in_kernel(xive)) {
658 kvmppc_xive_source_set_irq(&xive->source, irq, val);
659 } else {
660 xive_source_set_irq(&xive->source, irq, val);
661 }
662}
663
664static void spapr_xive_print_info(SpaprInterruptController *intc, Monitor *mon)
665{
666 SpaprXive *xive = SPAPR_XIVE(intc);
667 CPUState *cs;
668
669 CPU_FOREACH(cs) {
670 PowerPCCPU *cpu = POWERPC_CPU(cs);
671
672 xive_tctx_pic_print_info(spapr_cpu_state(cpu)->tctx, mon);
673 }
674
675 spapr_xive_pic_print_info(xive, mon);
676}
677
678static void spapr_xive_dt(SpaprInterruptController *intc, uint32_t nr_servers,
679 void *fdt, uint32_t phandle)
680{
681 SpaprXive *xive = SPAPR_XIVE(intc);
682 int node;
683 uint64_t timas[2 * 2];
684
685 uint32_t lisn_ranges[] = {
686 cpu_to_be32(SPAPR_IRQ_IPI),
687 cpu_to_be32(SPAPR_IRQ_IPI + nr_servers),
688 };
689
690
691
692
693 uint32_t eq_sizes[] = {
694 cpu_to_be32(16),
695 };
696
697
698
699
700
701 uint32_t plat_res_int_priorities[] = {
702 cpu_to_be32(xive->hv_prio),
703 cpu_to_be32(0xff - xive->hv_prio),
704 };
705
706
707 timas[0] = cpu_to_be64(xive->tm_base +
708 XIVE_TM_USER_PAGE * (1ull << TM_SHIFT));
709 timas[1] = cpu_to_be64(1ull << TM_SHIFT);
710 timas[2] = cpu_to_be64(xive->tm_base +
711 XIVE_TM_OS_PAGE * (1ull << TM_SHIFT));
712 timas[3] = cpu_to_be64(1ull << TM_SHIFT);
713
714 _FDT(node = fdt_add_subnode(fdt, 0, xive->nodename));
715
716 _FDT(fdt_setprop_string(fdt, node, "device_type", "power-ivpe"));
717 _FDT(fdt_setprop(fdt, node, "reg", timas, sizeof(timas)));
718
719 _FDT(fdt_setprop_string(fdt, node, "compatible", "ibm,power-ivpe"));
720 _FDT(fdt_setprop(fdt, node, "ibm,xive-eq-sizes", eq_sizes,
721 sizeof(eq_sizes)));
722 _FDT(fdt_setprop(fdt, node, "ibm,xive-lisn-ranges", lisn_ranges,
723 sizeof(lisn_ranges)));
724
725
726 _FDT(fdt_setprop(fdt, node, "interrupt-controller", NULL, 0));
727 _FDT(fdt_setprop_cell(fdt, node, "#interrupt-cells", 2));
728
729
730 _FDT(fdt_setprop_cell(fdt, node, "linux,phandle", phandle));
731 _FDT(fdt_setprop_cell(fdt, node, "phandle", phandle));
732
733
734
735
736
737 _FDT(fdt_setprop(fdt, 0, "ibm,plat-res-int-priorities",
738 plat_res_int_priorities, sizeof(plat_res_int_priorities)));
739}
740
741static int spapr_xive_activate(SpaprInterruptController *intc,
742 uint32_t nr_servers, Error **errp)
743{
744 SpaprXive *xive = SPAPR_XIVE(intc);
745
746 if (kvm_enabled()) {
747 int rc = spapr_irq_init_kvm(kvmppc_xive_connect, intc, nr_servers,
748 errp);
749 if (rc < 0) {
750 return rc;
751 }
752 }
753
754
755 spapr_xive_mmio_set_enabled(xive, true);
756
757 return 0;
758}
759
760static void spapr_xive_deactivate(SpaprInterruptController *intc)
761{
762 SpaprXive *xive = SPAPR_XIVE(intc);
763
764 spapr_xive_mmio_set_enabled(xive, false);
765
766 if (spapr_xive_in_kernel(xive)) {
767 kvmppc_xive_disconnect(intc);
768 }
769}
770
771static bool spapr_xive_in_kernel_xptr(const XivePresenter *xptr)
772{
773 return spapr_xive_in_kernel(SPAPR_XIVE(xptr));
774}
775
776static void spapr_xive_class_init(ObjectClass *klass, void *data)
777{
778 DeviceClass *dc = DEVICE_CLASS(klass);
779 XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass);
780 SpaprInterruptControllerClass *sicc = SPAPR_INTC_CLASS(klass);
781 XivePresenterClass *xpc = XIVE_PRESENTER_CLASS(klass);
782 SpaprXiveClass *sxc = SPAPR_XIVE_CLASS(klass);
783
784 dc->desc = "sPAPR XIVE Interrupt Controller";
785 device_class_set_props(dc, spapr_xive_properties);
786 device_class_set_parent_realize(dc, spapr_xive_realize,
787 &sxc->parent_realize);
788 dc->vmsd = &vmstate_spapr_xive;
789
790 xrc->get_eas = spapr_xive_get_eas;
791 xrc->get_end = spapr_xive_get_end;
792 xrc->write_end = spapr_xive_write_end;
793 xrc->get_nvt = spapr_xive_get_nvt;
794 xrc->write_nvt = spapr_xive_write_nvt;
795 xrc->get_block_id = spapr_xive_get_block_id;
796
797 sicc->activate = spapr_xive_activate;
798 sicc->deactivate = spapr_xive_deactivate;
799 sicc->cpu_intc_create = spapr_xive_cpu_intc_create;
800 sicc->cpu_intc_reset = spapr_xive_cpu_intc_reset;
801 sicc->cpu_intc_destroy = spapr_xive_cpu_intc_destroy;
802 sicc->claim_irq = spapr_xive_claim_irq;
803 sicc->free_irq = spapr_xive_free_irq;
804 sicc->set_irq = spapr_xive_set_irq;
805 sicc->print_info = spapr_xive_print_info;
806 sicc->dt = spapr_xive_dt;
807 sicc->post_load = spapr_xive_post_load;
808
809 xpc->match_nvt = spapr_xive_match_nvt;
810 xpc->in_kernel = spapr_xive_in_kernel_xptr;
811}
812
813static const TypeInfo spapr_xive_info = {
814 .name = TYPE_SPAPR_XIVE,
815 .parent = TYPE_XIVE_ROUTER,
816 .instance_init = spapr_xive_instance_init,
817 .instance_size = sizeof(SpaprXive),
818 .class_init = spapr_xive_class_init,
819 .class_size = sizeof(SpaprXiveClass),
820 .interfaces = (InterfaceInfo[]) {
821 { TYPE_SPAPR_INTC },
822 { }
823 },
824};
825
826static void spapr_xive_register_types(void)
827{
828 type_register_static(&spapr_xive_info);
829}
830
831type_init(spapr_xive_register_types)
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853static bool spapr_xive_priority_is_reserved(SpaprXive *xive, uint8_t priority)
854{
855 return priority >= xive->hv_prio;
856}
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888#define SPAPR_XIVE_SRC_H_INT_ESB PPC_BIT(60)
889#define SPAPR_XIVE_SRC_LSI PPC_BIT(61)
890#define SPAPR_XIVE_SRC_TRIGGER PPC_BIT(62)
891
892#define SPAPR_XIVE_SRC_STORE_EOI PPC_BIT(63)
893
894static target_ulong h_int_get_source_info(PowerPCCPU *cpu,
895 SpaprMachineState *spapr,
896 target_ulong opcode,
897 target_ulong *args)
898{
899 SpaprXive *xive = spapr->xive;
900 XiveSource *xsrc = &xive->source;
901 target_ulong flags = args[0];
902 target_ulong lisn = args[1];
903
904 trace_spapr_xive_get_source_info(flags, lisn);
905
906 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
907 return H_FUNCTION;
908 }
909
910 if (flags) {
911 return H_PARAMETER;
912 }
913
914 if (lisn >= xive->nr_irqs) {
915 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
916 lisn);
917 return H_P2;
918 }
919
920 if (!xive_eas_is_valid(&xive->eat[lisn])) {
921 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
922 lisn);
923 return H_P2;
924 }
925
926
927
928
929
930 args[0] = 0;
931 if (!xive_source_esb_has_2page(xsrc)) {
932 args[0] |= SPAPR_XIVE_SRC_TRIGGER;
933 }
934 if (xsrc->esb_flags & XIVE_SRC_STORE_EOI) {
935 args[0] |= SPAPR_XIVE_SRC_STORE_EOI;
936 }
937
938
939
940
941
942
943 if (xive_source_irq_is_lsi(xsrc, lisn)) {
944 args[0] |= SPAPR_XIVE_SRC_H_INT_ESB | SPAPR_XIVE_SRC_LSI;
945 }
946
947 if (!(args[0] & SPAPR_XIVE_SRC_H_INT_ESB)) {
948 args[1] = xive->vc_base + xive_source_esb_mgmt(xsrc, lisn);
949 } else {
950 args[1] = -1;
951 }
952
953 if (xive_source_esb_has_2page(xsrc) &&
954 !(args[0] & SPAPR_XIVE_SRC_H_INT_ESB)) {
955 args[2] = xive->vc_base + xive_source_esb_page(xsrc, lisn);
956 } else {
957 args[2] = -1;
958 }
959
960 if (xive_source_esb_has_2page(xsrc)) {
961 args[3] = xsrc->esb_shift - 1;
962 } else {
963 args[3] = xsrc->esb_shift;
964 }
965
966 return H_SUCCESS;
967}
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003#define SPAPR_XIVE_SRC_SET_EISN PPC_BIT(62)
1004#define SPAPR_XIVE_SRC_MASK PPC_BIT(63)
1005
1006static target_ulong h_int_set_source_config(PowerPCCPU *cpu,
1007 SpaprMachineState *spapr,
1008 target_ulong opcode,
1009 target_ulong *args)
1010{
1011 SpaprXive *xive = spapr->xive;
1012 XiveEAS eas, new_eas;
1013 target_ulong flags = args[0];
1014 target_ulong lisn = args[1];
1015 target_ulong target = args[2];
1016 target_ulong priority = args[3];
1017 target_ulong eisn = args[4];
1018 uint8_t end_blk;
1019 uint32_t end_idx;
1020
1021 trace_spapr_xive_set_source_config(flags, lisn, target, priority, eisn);
1022
1023 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1024 return H_FUNCTION;
1025 }
1026
1027 if (flags & ~(SPAPR_XIVE_SRC_SET_EISN | SPAPR_XIVE_SRC_MASK)) {
1028 return H_PARAMETER;
1029 }
1030
1031 if (lisn >= xive->nr_irqs) {
1032 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
1033 lisn);
1034 return H_P2;
1035 }
1036
1037 eas = xive->eat[lisn];
1038 if (!xive_eas_is_valid(&eas)) {
1039 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
1040 lisn);
1041 return H_P2;
1042 }
1043
1044
1045 if (priority == 0xff) {
1046 new_eas.w = cpu_to_be64(EAS_VALID | EAS_MASKED);
1047 goto out;
1048 }
1049
1050 if (flags & SPAPR_XIVE_SRC_MASK) {
1051 new_eas.w = eas.w | cpu_to_be64(EAS_MASKED);
1052 } else {
1053 new_eas.w = eas.w & cpu_to_be64(~EAS_MASKED);
1054 }
1055
1056 if (spapr_xive_priority_is_reserved(xive, priority)) {
1057 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld
1058 " is reserved\n", priority);
1059 return H_P4;
1060 }
1061
1062
1063
1064
1065
1066
1067 if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) {
1068 return H_P3;
1069 }
1070
1071 new_eas.w = xive_set_field64(EAS_END_BLOCK, new_eas.w, end_blk);
1072 new_eas.w = xive_set_field64(EAS_END_INDEX, new_eas.w, end_idx);
1073
1074 if (flags & SPAPR_XIVE_SRC_SET_EISN) {
1075 new_eas.w = xive_set_field64(EAS_END_DATA, new_eas.w, eisn);
1076 }
1077
1078 if (spapr_xive_in_kernel(xive)) {
1079 Error *local_err = NULL;
1080
1081 kvmppc_xive_set_source_config(xive, lisn, &new_eas, &local_err);
1082 if (local_err) {
1083 error_report_err(local_err);
1084 return H_HARDWARE;
1085 }
1086 }
1087
1088out:
1089 xive->eat[lisn] = new_eas;
1090 return H_SUCCESS;
1091}
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115static target_ulong h_int_get_source_config(PowerPCCPU *cpu,
1116 SpaprMachineState *spapr,
1117 target_ulong opcode,
1118 target_ulong *args)
1119{
1120 SpaprXive *xive = spapr->xive;
1121 target_ulong flags = args[0];
1122 target_ulong lisn = args[1];
1123 XiveEAS eas;
1124 XiveEND *end;
1125 uint8_t nvt_blk;
1126 uint32_t end_idx, nvt_idx;
1127
1128 trace_spapr_xive_get_source_config(flags, lisn);
1129
1130 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1131 return H_FUNCTION;
1132 }
1133
1134 if (flags) {
1135 return H_PARAMETER;
1136 }
1137
1138 if (lisn >= xive->nr_irqs) {
1139 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
1140 lisn);
1141 return H_P2;
1142 }
1143
1144 eas = xive->eat[lisn];
1145 if (!xive_eas_is_valid(&eas)) {
1146 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
1147 lisn);
1148 return H_P2;
1149 }
1150
1151
1152 end_idx = xive_get_field64(EAS_END_INDEX, eas.w);
1153
1154 assert(end_idx < xive->nr_ends);
1155 end = &xive->endt[end_idx];
1156
1157 nvt_blk = xive_get_field32(END_W6_NVT_BLOCK, end->w6);
1158 nvt_idx = xive_get_field32(END_W6_NVT_INDEX, end->w6);
1159 args[0] = spapr_xive_nvt_to_target(nvt_blk, nvt_idx);
1160
1161 if (xive_eas_is_masked(&eas)) {
1162 args[1] = 0xff;
1163 } else {
1164 args[1] = xive_get_field32(END_W7_F0_PRIORITY, end->w7);
1165 }
1166
1167 args[2] = xive_get_field64(EAS_END_DATA, eas.w);
1168
1169 return H_SUCCESS;
1170}
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190static target_ulong h_int_get_queue_info(PowerPCCPU *cpu,
1191 SpaprMachineState *spapr,
1192 target_ulong opcode,
1193 target_ulong *args)
1194{
1195 SpaprXive *xive = spapr->xive;
1196 XiveENDSource *end_xsrc = &xive->end_source;
1197 target_ulong flags = args[0];
1198 target_ulong target = args[1];
1199 target_ulong priority = args[2];
1200 XiveEND *end;
1201 uint8_t end_blk;
1202 uint32_t end_idx;
1203
1204 trace_spapr_xive_get_queue_info(flags, target, priority);
1205
1206 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1207 return H_FUNCTION;
1208 }
1209
1210 if (flags) {
1211 return H_PARAMETER;
1212 }
1213
1214
1215
1216
1217
1218
1219 if (spapr_xive_priority_is_reserved(xive, priority)) {
1220 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld
1221 " is reserved\n", priority);
1222 return H_P3;
1223 }
1224
1225
1226
1227
1228
1229
1230 if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) {
1231 return H_P2;
1232 }
1233
1234 assert(end_idx < xive->nr_ends);
1235 end = &xive->endt[end_idx];
1236
1237 args[0] = xive->end_base + (1ull << (end_xsrc->esb_shift + 1)) * end_idx;
1238 if (xive_end_is_enqueue(end)) {
1239 args[1] = xive_get_field32(END_W0_QSIZE, end->w0) + 12;
1240 } else {
1241 args[1] = 0;
1242 }
1243
1244 return H_SUCCESS;
1245}
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276#define SPAPR_XIVE_END_ALWAYS_NOTIFY PPC_BIT(63)
1277
1278static target_ulong h_int_set_queue_config(PowerPCCPU *cpu,
1279 SpaprMachineState *spapr,
1280 target_ulong opcode,
1281 target_ulong *args)
1282{
1283 SpaprXive *xive = spapr->xive;
1284 target_ulong flags = args[0];
1285 target_ulong target = args[1];
1286 target_ulong priority = args[2];
1287 target_ulong qpage = args[3];
1288 target_ulong qsize = args[4];
1289 XiveEND end;
1290 uint8_t end_blk, nvt_blk;
1291 uint32_t end_idx, nvt_idx;
1292
1293 trace_spapr_xive_set_queue_config(flags, target, priority, qpage, qsize);
1294
1295 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1296 return H_FUNCTION;
1297 }
1298
1299 if (flags & ~SPAPR_XIVE_END_ALWAYS_NOTIFY) {
1300 return H_PARAMETER;
1301 }
1302
1303
1304
1305
1306
1307
1308 if (spapr_xive_priority_is_reserved(xive, priority)) {
1309 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld
1310 " is reserved\n", priority);
1311 return H_P3;
1312 }
1313
1314
1315
1316
1317
1318
1319
1320 if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) {
1321 return H_P2;
1322 }
1323
1324 assert(end_idx < xive->nr_ends);
1325 memcpy(&end, &xive->endt[end_idx], sizeof(XiveEND));
1326
1327 switch (qsize) {
1328 case 12:
1329 case 16:
1330 case 21:
1331 case 24:
1332 if (!QEMU_IS_ALIGNED(qpage, 1ul << qsize)) {
1333 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: EQ @0x%" HWADDR_PRIx
1334 " is not naturally aligned with %" HWADDR_PRIx "\n",
1335 qpage, (hwaddr)1 << qsize);
1336 return H_P4;
1337 }
1338 end.w2 = cpu_to_be32((qpage >> 32) & 0x0fffffff);
1339 end.w3 = cpu_to_be32(qpage & 0xffffffff);
1340 end.w0 |= cpu_to_be32(END_W0_ENQUEUE);
1341 end.w0 = xive_set_field32(END_W0_QSIZE, end.w0, qsize - 12);
1342 break;
1343 case 0:
1344
1345 spapr_xive_end_reset(&end);
1346 goto out;
1347
1348 default:
1349 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid EQ size %"PRIx64"\n",
1350 qsize);
1351 return H_P5;
1352 }
1353
1354 if (qsize) {
1355 hwaddr plen = 1 << qsize;
1356 void *eq;
1357
1358
1359
1360
1361
1362 eq = address_space_map(CPU(cpu)->as, qpage, &plen, true,
1363 MEMTXATTRS_UNSPECIFIED);
1364 if (plen != 1 << qsize) {
1365 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to map EQ @0x%"
1366 HWADDR_PRIx "\n", qpage);
1367 return H_P4;
1368 }
1369 address_space_unmap(CPU(cpu)->as, eq, plen, true, plen);
1370 }
1371
1372
1373 if (spapr_xive_target_to_nvt(target, &nvt_blk, &nvt_idx)) {
1374 g_assert_not_reached();
1375 }
1376
1377
1378
1379
1380
1381 end.w6 = xive_set_field32(END_W6_NVT_BLOCK, 0ul, nvt_blk) |
1382 xive_set_field32(END_W6_NVT_INDEX, 0ul, nvt_idx);
1383 end.w7 = xive_set_field32(END_W7_F0_PRIORITY, 0ul, priority);
1384
1385 if (flags & SPAPR_XIVE_END_ALWAYS_NOTIFY) {
1386 end.w0 |= cpu_to_be32(END_W0_UCOND_NOTIFY);
1387 } else {
1388 end.w0 &= cpu_to_be32((uint32_t)~END_W0_UCOND_NOTIFY);
1389 }
1390
1391
1392
1393
1394
1395 end.w1 = cpu_to_be32(END_W1_GENERATION) |
1396 xive_set_field32(END_W1_PAGE_OFF, 0ul, 0ul);
1397 end.w0 |= cpu_to_be32(END_W0_VALID);
1398
1399
1400
1401
1402
1403
1404out:
1405 if (spapr_xive_in_kernel(xive)) {
1406 Error *local_err = NULL;
1407
1408 kvmppc_xive_set_queue_config(xive, end_blk, end_idx, &end, &local_err);
1409 if (local_err) {
1410 error_report_err(local_err);
1411 return H_HARDWARE;
1412 }
1413 }
1414
1415
1416 memcpy(&xive->endt[end_idx], &end, sizeof(XiveEND));
1417 return H_SUCCESS;
1418}
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447#define SPAPR_XIVE_END_DEBUG PPC_BIT(63)
1448
1449static target_ulong h_int_get_queue_config(PowerPCCPU *cpu,
1450 SpaprMachineState *spapr,
1451 target_ulong opcode,
1452 target_ulong *args)
1453{
1454 SpaprXive *xive = spapr->xive;
1455 target_ulong flags = args[0];
1456 target_ulong target = args[1];
1457 target_ulong priority = args[2];
1458 XiveEND *end;
1459 uint8_t end_blk;
1460 uint32_t end_idx;
1461
1462 trace_spapr_xive_get_queue_config(flags, target, priority);
1463
1464 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1465 return H_FUNCTION;
1466 }
1467
1468 if (flags & ~SPAPR_XIVE_END_DEBUG) {
1469 return H_PARAMETER;
1470 }
1471
1472
1473
1474
1475
1476
1477 if (spapr_xive_priority_is_reserved(xive, priority)) {
1478 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld
1479 " is reserved\n", priority);
1480 return H_P3;
1481 }
1482
1483
1484
1485
1486
1487
1488 if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) {
1489 return H_P2;
1490 }
1491
1492 assert(end_idx < xive->nr_ends);
1493 end = &xive->endt[end_idx];
1494
1495 args[0] = 0;
1496 if (xive_end_is_notify(end)) {
1497 args[0] |= SPAPR_XIVE_END_ALWAYS_NOTIFY;
1498 }
1499
1500 if (xive_end_is_enqueue(end)) {
1501 args[1] = xive_end_qaddr(end);
1502 args[2] = xive_get_field32(END_W0_QSIZE, end->w0) + 12;
1503 } else {
1504 args[1] = 0;
1505 args[2] = 0;
1506 }
1507
1508 if (spapr_xive_in_kernel(xive)) {
1509 Error *local_err = NULL;
1510
1511 kvmppc_xive_get_queue_config(xive, end_blk, end_idx, end, &local_err);
1512 if (local_err) {
1513 error_report_err(local_err);
1514 return H_HARDWARE;
1515 }
1516 }
1517
1518
1519 if (flags & SPAPR_XIVE_END_DEBUG) {
1520
1521 args[0] |= (uint64_t)xive_get_field32(END_W1_GENERATION, end->w1) << 62;
1522
1523
1524 args[3] = xive_get_field32(END_W1_PAGE_OFF, end->w1);
1525 } else {
1526 args[3] = 0;
1527 }
1528
1529 return H_SUCCESS;
1530}
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552static target_ulong h_int_set_os_reporting_line(PowerPCCPU *cpu,
1553 SpaprMachineState *spapr,
1554 target_ulong opcode,
1555 target_ulong *args)
1556{
1557 target_ulong flags = args[0];
1558
1559 trace_spapr_xive_set_os_reporting_line(flags);
1560
1561 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1562 return H_FUNCTION;
1563 }
1564
1565
1566
1567
1568
1569
1570
1571 return H_FUNCTION;
1572}
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592static target_ulong h_int_get_os_reporting_line(PowerPCCPU *cpu,
1593 SpaprMachineState *spapr,
1594 target_ulong opcode,
1595 target_ulong *args)
1596{
1597 target_ulong flags = args[0];
1598
1599 trace_spapr_xive_get_os_reporting_line(flags);
1600
1601 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1602 return H_FUNCTION;
1603 }
1604
1605
1606
1607
1608
1609
1610
1611 return H_FUNCTION;
1612}
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637#define SPAPR_XIVE_ESB_STORE PPC_BIT(63)
1638
1639static target_ulong h_int_esb(PowerPCCPU *cpu,
1640 SpaprMachineState *spapr,
1641 target_ulong opcode,
1642 target_ulong *args)
1643{
1644 SpaprXive *xive = spapr->xive;
1645 XiveEAS eas;
1646 target_ulong flags = args[0];
1647 target_ulong lisn = args[1];
1648 target_ulong offset = args[2];
1649 target_ulong data = args[3];
1650 hwaddr mmio_addr;
1651 XiveSource *xsrc = &xive->source;
1652
1653 trace_spapr_xive_esb(flags, lisn, offset, data);
1654
1655 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1656 return H_FUNCTION;
1657 }
1658
1659 if (flags & ~SPAPR_XIVE_ESB_STORE) {
1660 return H_PARAMETER;
1661 }
1662
1663 if (lisn >= xive->nr_irqs) {
1664 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
1665 lisn);
1666 return H_P2;
1667 }
1668
1669 eas = xive->eat[lisn];
1670 if (!xive_eas_is_valid(&eas)) {
1671 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
1672 lisn);
1673 return H_P2;
1674 }
1675
1676 if (offset > (1ull << xsrc->esb_shift)) {
1677 return H_P3;
1678 }
1679
1680 if (spapr_xive_in_kernel(xive)) {
1681 args[0] = kvmppc_xive_esb_rw(xsrc, lisn, offset, data,
1682 flags & SPAPR_XIVE_ESB_STORE);
1683 } else {
1684 mmio_addr = xive->vc_base + xive_source_esb_mgmt(xsrc, lisn) + offset;
1685
1686 if (dma_memory_rw(&address_space_memory, mmio_addr, &data, 8,
1687 (flags & SPAPR_XIVE_ESB_STORE))) {
1688 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to access ESB @0x%"
1689 HWADDR_PRIx "\n", mmio_addr);
1690 return H_HARDWARE;
1691 }
1692 args[0] = (flags & SPAPR_XIVE_ESB_STORE) ? -1 : data;
1693 }
1694 return H_SUCCESS;
1695}
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714static target_ulong h_int_sync(PowerPCCPU *cpu,
1715 SpaprMachineState *spapr,
1716 target_ulong opcode,
1717 target_ulong *args)
1718{
1719 SpaprXive *xive = spapr->xive;
1720 XiveEAS eas;
1721 target_ulong flags = args[0];
1722 target_ulong lisn = args[1];
1723
1724 trace_spapr_xive_sync(flags, lisn);
1725
1726 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1727 return H_FUNCTION;
1728 }
1729
1730 if (flags) {
1731 return H_PARAMETER;
1732 }
1733
1734 if (lisn >= xive->nr_irqs) {
1735 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
1736 lisn);
1737 return H_P2;
1738 }
1739
1740 eas = xive->eat[lisn];
1741 if (!xive_eas_is_valid(&eas)) {
1742 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
1743 lisn);
1744 return H_P2;
1745 }
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757 if (spapr_xive_in_kernel(xive)) {
1758 Error *local_err = NULL;
1759
1760 kvmppc_xive_sync_source(xive, lisn, &local_err);
1761 if (local_err) {
1762 error_report_err(local_err);
1763 return H_HARDWARE;
1764 }
1765 }
1766 return H_SUCCESS;
1767}
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783static target_ulong h_int_reset(PowerPCCPU *cpu,
1784 SpaprMachineState *spapr,
1785 target_ulong opcode,
1786 target_ulong *args)
1787{
1788 SpaprXive *xive = spapr->xive;
1789 target_ulong flags = args[0];
1790
1791 trace_spapr_xive_reset(flags);
1792
1793 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1794 return H_FUNCTION;
1795 }
1796
1797 if (flags) {
1798 return H_PARAMETER;
1799 }
1800
1801 device_cold_reset(DEVICE(xive));
1802
1803 if (spapr_xive_in_kernel(xive)) {
1804 Error *local_err = NULL;
1805
1806 kvmppc_xive_reset(xive, &local_err);
1807 if (local_err) {
1808 error_report_err(local_err);
1809 return H_HARDWARE;
1810 }
1811 }
1812 return H_SUCCESS;
1813}
1814
1815void spapr_xive_hcall_init(SpaprMachineState *spapr)
1816{
1817 spapr_register_hypercall(H_INT_GET_SOURCE_INFO, h_int_get_source_info);
1818 spapr_register_hypercall(H_INT_SET_SOURCE_CONFIG, h_int_set_source_config);
1819 spapr_register_hypercall(H_INT_GET_SOURCE_CONFIG, h_int_get_source_config);
1820 spapr_register_hypercall(H_INT_GET_QUEUE_INFO, h_int_get_queue_info);
1821 spapr_register_hypercall(H_INT_SET_QUEUE_CONFIG, h_int_set_queue_config);
1822 spapr_register_hypercall(H_INT_GET_QUEUE_CONFIG, h_int_get_queue_config);
1823 spapr_register_hypercall(H_INT_SET_OS_REPORTING_LINE,
1824 h_int_set_os_reporting_line);
1825 spapr_register_hypercall(H_INT_GET_OS_REPORTING_LINE,
1826 h_int_get_os_reporting_line);
1827 spapr_register_hypercall(H_INT_ESB, h_int_esb);
1828 spapr_register_hypercall(H_INT_SYNC, h_int_sync);
1829 spapr_register_hypercall(H_INT_RESET, h_int_reset);
1830}
1831