1
2
3
4
5
6
7
8
9
10#include "qemu/osdep.h"
11#include "qemu/log.h"
12#include "qemu/module.h"
13#include "qapi/error.h"
14#include "qemu/error-report.h"
15#include "target/ppc/cpu.h"
16#include "sysemu/cpus.h"
17#include "sysemu/reset.h"
18#include "migration/vmstate.h"
19#include "monitor/monitor.h"
20#include "hw/ppc/fdt.h"
21#include "hw/ppc/spapr.h"
22#include "hw/ppc/spapr_cpu_core.h"
23#include "hw/ppc/spapr_xive.h"
24#include "hw/ppc/xive.h"
25#include "hw/ppc/xive_regs.h"
26#include "hw/qdev-properties.h"
27
28
29
30
31
32#define SPAPR_XIVE_VC_BASE 0x0006010000000000ull
33#define SPAPR_XIVE_TM_BASE 0x0006030203180000ull
34
35
36
37
38
39
40
41
42
43
44
45#define SPAPR_XIVE_NVT_BASE 0x400
46
47
48
49
50static uint32_t spapr_xive_nvt_to_target(uint8_t nvt_blk, uint32_t nvt_idx)
51{
52 return nvt_idx - SPAPR_XIVE_NVT_BASE;
53}
54
55static void spapr_xive_cpu_to_nvt(PowerPCCPU *cpu,
56 uint8_t *out_nvt_blk, uint32_t *out_nvt_idx)
57{
58 assert(cpu);
59
60 if (out_nvt_blk) {
61 *out_nvt_blk = SPAPR_XIVE_BLOCK_ID;
62 }
63
64 if (out_nvt_blk) {
65 *out_nvt_idx = SPAPR_XIVE_NVT_BASE + cpu->vcpu_id;
66 }
67}
68
69static int spapr_xive_target_to_nvt(uint32_t target,
70 uint8_t *out_nvt_blk, uint32_t *out_nvt_idx)
71{
72 PowerPCCPU *cpu = spapr_find_cpu(target);
73
74 if (!cpu) {
75 return -1;
76 }
77
78 spapr_xive_cpu_to_nvt(cpu, out_nvt_blk, out_nvt_idx);
79 return 0;
80}
81
82
83
84
85
86int spapr_xive_end_to_target(uint8_t end_blk, uint32_t end_idx,
87 uint32_t *out_server, uint8_t *out_prio)
88{
89
90 assert(end_blk == SPAPR_XIVE_BLOCK_ID);
91
92 if (out_server) {
93 *out_server = end_idx >> 3;
94 }
95
96 if (out_prio) {
97 *out_prio = end_idx & 0x7;
98 }
99 return 0;
100}
101
102static void spapr_xive_cpu_to_end(PowerPCCPU *cpu, uint8_t prio,
103 uint8_t *out_end_blk, uint32_t *out_end_idx)
104{
105 assert(cpu);
106
107 if (out_end_blk) {
108 *out_end_blk = SPAPR_XIVE_BLOCK_ID;
109 }
110
111 if (out_end_idx) {
112 *out_end_idx = (cpu->vcpu_id << 3) + prio;
113 }
114}
115
116static int spapr_xive_target_to_end(uint32_t target, uint8_t prio,
117 uint8_t *out_end_blk, uint32_t *out_end_idx)
118{
119 PowerPCCPU *cpu = spapr_find_cpu(target);
120
121 if (!cpu) {
122 return -1;
123 }
124
125 spapr_xive_cpu_to_end(cpu, prio, out_end_blk, out_end_idx);
126 return 0;
127}
128
129
130
131
132
133static void spapr_xive_end_pic_print_info(SpaprXive *xive, XiveEND *end,
134 Monitor *mon)
135{
136 uint64_t qaddr_base = xive_end_qaddr(end);
137 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
138 uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1);
139 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
140 uint32_t qentries = 1 << (qsize + 10);
141 uint32_t nvt = xive_get_field32(END_W6_NVT_INDEX, end->w6);
142 uint8_t priority = xive_get_field32(END_W7_F0_PRIORITY, end->w7);
143
144 monitor_printf(mon, "%3d/%d % 6d/%5d @%"PRIx64" ^%d",
145 spapr_xive_nvt_to_target(0, nvt),
146 priority, qindex, qentries, qaddr_base, qgen);
147
148 xive_end_queue_pic_print_info(end, 6, mon);
149}
150
151void spapr_xive_pic_print_info(SpaprXive *xive, Monitor *mon)
152{
153 XiveSource *xsrc = &xive->source;
154 int i;
155
156 if (kvm_irqchip_in_kernel()) {
157 Error *local_err = NULL;
158
159 kvmppc_xive_synchronize_state(xive, &local_err);
160 if (local_err) {
161 error_report_err(local_err);
162 return;
163 }
164 }
165
166 monitor_printf(mon, " LISN PQ EISN CPU/PRIO EQ\n");
167
168 for (i = 0; i < xive->nr_irqs; i++) {
169 uint8_t pq = xive_source_esb_get(xsrc, i);
170 XiveEAS *eas = &xive->eat[i];
171
172 if (!xive_eas_is_valid(eas)) {
173 continue;
174 }
175
176 monitor_printf(mon, " %08x %s %c%c%c %s %08x ", i,
177 xive_source_irq_is_lsi(xsrc, i) ? "LSI" : "MSI",
178 pq & XIVE_ESB_VAL_P ? 'P' : '-',
179 pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
180 xsrc->status[i] & XIVE_STATUS_ASSERTED ? 'A' : ' ',
181 xive_eas_is_masked(eas) ? "M" : " ",
182 (int) xive_get_field64(EAS_END_DATA, eas->w));
183
184 if (!xive_eas_is_masked(eas)) {
185 uint32_t end_idx = xive_get_field64(EAS_END_INDEX, eas->w);
186 XiveEND *end;
187
188 assert(end_idx < xive->nr_ends);
189 end = &xive->endt[end_idx];
190
191 if (xive_end_is_valid(end)) {
192 spapr_xive_end_pic_print_info(xive, end, mon);
193 }
194 }
195 monitor_printf(mon, "\n");
196 }
197}
198
199void spapr_xive_mmio_set_enabled(SpaprXive *xive, bool enable)
200{
201 memory_region_set_enabled(&xive->source.esb_mmio, enable);
202 memory_region_set_enabled(&xive->tm_mmio, enable);
203
204
205 memory_region_set_enabled(&xive->end_source.esb_mmio, false);
206}
207
208static void spapr_xive_end_reset(XiveEND *end)
209{
210 memset(end, 0, sizeof(*end));
211
212
213 end->w1 = cpu_to_be32(END_W1_ESe_Q | END_W1_ESn_Q);
214}
215
216static void spapr_xive_reset(void *dev)
217{
218 SpaprXive *xive = SPAPR_XIVE(dev);
219 int i;
220
221
222
223
224
225
226
227 for (i = 0; i < xive->nr_irqs; i++) {
228 XiveEAS *eas = &xive->eat[i];
229 if (xive_eas_is_valid(eas)) {
230 eas->w = cpu_to_be64(EAS_VALID | EAS_MASKED);
231 } else {
232 eas->w = 0;
233 }
234 }
235
236
237 for (i = 0; i < xive->nr_ends; i++) {
238 spapr_xive_end_reset(&xive->endt[i]);
239 }
240}
241
242static void spapr_xive_instance_init(Object *obj)
243{
244 SpaprXive *xive = SPAPR_XIVE(obj);
245
246 object_initialize_child(obj, "source", &xive->source, sizeof(xive->source),
247 TYPE_XIVE_SOURCE, &error_abort, NULL);
248
249 object_initialize_child(obj, "end_source", &xive->end_source,
250 sizeof(xive->end_source), TYPE_XIVE_END_SOURCE,
251 &error_abort, NULL);
252
253
254 xive->fd = -1;
255}
256
257static void spapr_xive_realize(DeviceState *dev, Error **errp)
258{
259 SpaprXive *xive = SPAPR_XIVE(dev);
260 XiveSource *xsrc = &xive->source;
261 XiveENDSource *end_xsrc = &xive->end_source;
262 Error *local_err = NULL;
263
264 if (!xive->nr_irqs) {
265 error_setg(errp, "Number of interrupt needs to be greater 0");
266 return;
267 }
268
269 if (!xive->nr_ends) {
270 error_setg(errp, "Number of interrupt needs to be greater 0");
271 return;
272 }
273
274
275
276
277 object_property_set_int(OBJECT(xsrc), xive->nr_irqs, "nr-irqs",
278 &error_fatal);
279 object_property_add_const_link(OBJECT(xsrc), "xive", OBJECT(xive),
280 &error_fatal);
281 object_property_set_bool(OBJECT(xsrc), true, "realized", &local_err);
282 if (local_err) {
283 error_propagate(errp, local_err);
284 return;
285 }
286 sysbus_init_mmio(SYS_BUS_DEVICE(xive), &xsrc->esb_mmio);
287
288
289
290
291 object_property_set_int(OBJECT(end_xsrc), xive->nr_irqs, "nr-ends",
292 &error_fatal);
293 object_property_add_const_link(OBJECT(end_xsrc), "xive", OBJECT(xive),
294 &error_fatal);
295 object_property_set_bool(OBJECT(end_xsrc), true, "realized", &local_err);
296 if (local_err) {
297 error_propagate(errp, local_err);
298 return;
299 }
300 sysbus_init_mmio(SYS_BUS_DEVICE(xive), &end_xsrc->esb_mmio);
301
302
303 xive->end_base = xive->vc_base + (1ull << xsrc->esb_shift) * xsrc->nr_irqs;
304
305
306
307
308 xive->eat = g_new0(XiveEAS, xive->nr_irqs);
309 xive->endt = g_new0(XiveEND, xive->nr_ends);
310
311 xive->nodename = g_strdup_printf("interrupt-controller@%" PRIx64,
312 xive->tm_base + XIVE_TM_USER_PAGE * (1 << TM_SHIFT));
313
314 qemu_register_reset(spapr_xive_reset, dev);
315
316
317 memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &xive_tm_ops, xive,
318 "xive.tima", 4ull << TM_SHIFT);
319 sysbus_init_mmio(SYS_BUS_DEVICE(xive), &xive->tm_mmio);
320
321
322
323
324
325 sysbus_mmio_map(SYS_BUS_DEVICE(xive), 0, xive->vc_base);
326 sysbus_mmio_map(SYS_BUS_DEVICE(xive), 1, xive->end_base);
327 sysbus_mmio_map(SYS_BUS_DEVICE(xive), 2, xive->tm_base);
328}
329
330static int spapr_xive_get_eas(XiveRouter *xrtr, uint8_t eas_blk,
331 uint32_t eas_idx, XiveEAS *eas)
332{
333 SpaprXive *xive = SPAPR_XIVE(xrtr);
334
335 if (eas_idx >= xive->nr_irqs) {
336 return -1;
337 }
338
339 *eas = xive->eat[eas_idx];
340 return 0;
341}
342
343static int spapr_xive_get_end(XiveRouter *xrtr,
344 uint8_t end_blk, uint32_t end_idx, XiveEND *end)
345{
346 SpaprXive *xive = SPAPR_XIVE(xrtr);
347
348 if (end_idx >= xive->nr_ends) {
349 return -1;
350 }
351
352 memcpy(end, &xive->endt[end_idx], sizeof(XiveEND));
353 return 0;
354}
355
356static int spapr_xive_write_end(XiveRouter *xrtr, uint8_t end_blk,
357 uint32_t end_idx, XiveEND *end,
358 uint8_t word_number)
359{
360 SpaprXive *xive = SPAPR_XIVE(xrtr);
361
362 if (end_idx >= xive->nr_ends) {
363 return -1;
364 }
365
366 memcpy(&xive->endt[end_idx], end, sizeof(XiveEND));
367 return 0;
368}
369
370static int spapr_xive_get_nvt(XiveRouter *xrtr,
371 uint8_t nvt_blk, uint32_t nvt_idx, XiveNVT *nvt)
372{
373 uint32_t vcpu_id = spapr_xive_nvt_to_target(nvt_blk, nvt_idx);
374 PowerPCCPU *cpu = spapr_find_cpu(vcpu_id);
375
376 if (!cpu) {
377
378 return -1;
379 }
380
381
382
383
384
385 nvt->w0 = cpu_to_be32(NVT_W0_VALID);
386 return 0;
387}
388
389static int spapr_xive_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk,
390 uint32_t nvt_idx, XiveNVT *nvt,
391 uint8_t word_number)
392{
393
394
395
396
397
398 g_assert_not_reached();
399}
400
401static XiveTCTX *spapr_xive_get_tctx(XiveRouter *xrtr, CPUState *cs)
402{
403 PowerPCCPU *cpu = POWERPC_CPU(cs);
404
405 return spapr_cpu_state(cpu)->tctx;
406}
407
408static const VMStateDescription vmstate_spapr_xive_end = {
409 .name = TYPE_SPAPR_XIVE "/end",
410 .version_id = 1,
411 .minimum_version_id = 1,
412 .fields = (VMStateField []) {
413 VMSTATE_UINT32(w0, XiveEND),
414 VMSTATE_UINT32(w1, XiveEND),
415 VMSTATE_UINT32(w2, XiveEND),
416 VMSTATE_UINT32(w3, XiveEND),
417 VMSTATE_UINT32(w4, XiveEND),
418 VMSTATE_UINT32(w5, XiveEND),
419 VMSTATE_UINT32(w6, XiveEND),
420 VMSTATE_UINT32(w7, XiveEND),
421 VMSTATE_END_OF_LIST()
422 },
423};
424
425static const VMStateDescription vmstate_spapr_xive_eas = {
426 .name = TYPE_SPAPR_XIVE "/eas",
427 .version_id = 1,
428 .minimum_version_id = 1,
429 .fields = (VMStateField []) {
430 VMSTATE_UINT64(w, XiveEAS),
431 VMSTATE_END_OF_LIST()
432 },
433};
434
435static int vmstate_spapr_xive_pre_save(void *opaque)
436{
437 if (kvm_irqchip_in_kernel()) {
438 return kvmppc_xive_pre_save(SPAPR_XIVE(opaque));
439 }
440
441 return 0;
442}
443
444
445
446
447
448static int spapr_xive_post_load(SpaprInterruptController *intc, int version_id)
449{
450 if (kvm_irqchip_in_kernel()) {
451 return kvmppc_xive_post_load(SPAPR_XIVE(intc), version_id);
452 }
453
454 return 0;
455}
456
457static const VMStateDescription vmstate_spapr_xive = {
458 .name = TYPE_SPAPR_XIVE,
459 .version_id = 1,
460 .minimum_version_id = 1,
461 .pre_save = vmstate_spapr_xive_pre_save,
462 .post_load = NULL,
463 .fields = (VMStateField[]) {
464 VMSTATE_UINT32_EQUAL(nr_irqs, SpaprXive, NULL),
465 VMSTATE_STRUCT_VARRAY_POINTER_UINT32(eat, SpaprXive, nr_irqs,
466 vmstate_spapr_xive_eas, XiveEAS),
467 VMSTATE_STRUCT_VARRAY_POINTER_UINT32(endt, SpaprXive, nr_ends,
468 vmstate_spapr_xive_end, XiveEND),
469 VMSTATE_END_OF_LIST()
470 },
471};
472
473static int spapr_xive_claim_irq(SpaprInterruptController *intc, int lisn,
474 bool lsi, Error **errp)
475{
476 SpaprXive *xive = SPAPR_XIVE(intc);
477 XiveSource *xsrc = &xive->source;
478
479 assert(lisn < xive->nr_irqs);
480
481 if (xive_eas_is_valid(&xive->eat[lisn])) {
482 error_setg(errp, "IRQ %d is not free", lisn);
483 return -EBUSY;
484 }
485
486
487
488
489 xive->eat[lisn].w |= cpu_to_be64(EAS_VALID | EAS_MASKED);
490 if (lsi) {
491 xive_source_irq_set_lsi(xsrc, lisn);
492 }
493
494 if (kvm_irqchip_in_kernel()) {
495 return kvmppc_xive_source_reset_one(xsrc, lisn, errp);
496 }
497
498 return 0;
499}
500
501static void spapr_xive_free_irq(SpaprInterruptController *intc, int lisn)
502{
503 SpaprXive *xive = SPAPR_XIVE(intc);
504 assert(lisn < xive->nr_irqs);
505
506 xive->eat[lisn].w &= cpu_to_be64(~EAS_VALID);
507}
508
509static Property spapr_xive_properties[] = {
510 DEFINE_PROP_UINT32("nr-irqs", SpaprXive, nr_irqs, 0),
511 DEFINE_PROP_UINT32("nr-ends", SpaprXive, nr_ends, 0),
512 DEFINE_PROP_UINT64("vc-base", SpaprXive, vc_base, SPAPR_XIVE_VC_BASE),
513 DEFINE_PROP_UINT64("tm-base", SpaprXive, tm_base, SPAPR_XIVE_TM_BASE),
514 DEFINE_PROP_END_OF_LIST(),
515};
516
517static int spapr_xive_cpu_intc_create(SpaprInterruptController *intc,
518 PowerPCCPU *cpu, Error **errp)
519{
520 SpaprXive *xive = SPAPR_XIVE(intc);
521 Object *obj;
522 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
523
524 obj = xive_tctx_create(OBJECT(cpu), XIVE_ROUTER(xive), errp);
525 if (!obj) {
526 return -1;
527 }
528
529 spapr_cpu->tctx = XIVE_TCTX(obj);
530 return 0;
531}
532
533static void xive_tctx_set_os_cam(XiveTCTX *tctx, uint32_t os_cam)
534{
535 uint32_t qw1w2 = cpu_to_be32(TM_QW1W2_VO | os_cam);
536 memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2, 4);
537}
538
539static void spapr_xive_cpu_intc_reset(SpaprInterruptController *intc,
540 PowerPCCPU *cpu)
541{
542 XiveTCTX *tctx = spapr_cpu_state(cpu)->tctx;
543 uint8_t nvt_blk;
544 uint32_t nvt_idx;
545
546 xive_tctx_reset(tctx);
547
548
549
550
551
552
553 spapr_xive_cpu_to_nvt(cpu, &nvt_blk, &nvt_idx);
554
555 xive_tctx_set_os_cam(tctx, xive_nvt_cam_line(nvt_blk, nvt_idx));
556}
557
558static void spapr_xive_cpu_intc_destroy(SpaprInterruptController *intc,
559 PowerPCCPU *cpu)
560{
561 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
562
563 xive_tctx_destroy(spapr_cpu->tctx);
564 spapr_cpu->tctx = NULL;
565}
566
567static void spapr_xive_set_irq(SpaprInterruptController *intc, int irq, int val)
568{
569 SpaprXive *xive = SPAPR_XIVE(intc);
570
571 if (kvm_irqchip_in_kernel()) {
572 kvmppc_xive_source_set_irq(&xive->source, irq, val);
573 } else {
574 xive_source_set_irq(&xive->source, irq, val);
575 }
576}
577
578static void spapr_xive_print_info(SpaprInterruptController *intc, Monitor *mon)
579{
580 SpaprXive *xive = SPAPR_XIVE(intc);
581 CPUState *cs;
582
583 CPU_FOREACH(cs) {
584 PowerPCCPU *cpu = POWERPC_CPU(cs);
585
586 xive_tctx_pic_print_info(spapr_cpu_state(cpu)->tctx, mon);
587 }
588
589 spapr_xive_pic_print_info(xive, mon);
590}
591
592static void spapr_xive_dt(SpaprInterruptController *intc, uint32_t nr_servers,
593 void *fdt, uint32_t phandle)
594{
595 SpaprXive *xive = SPAPR_XIVE(intc);
596 int node;
597 uint64_t timas[2 * 2];
598
599 uint32_t lisn_ranges[] = {
600 cpu_to_be32(0),
601 cpu_to_be32(nr_servers),
602 };
603
604
605
606
607 uint32_t eq_sizes[] = {
608 cpu_to_be32(16),
609 };
610
611
612
613
614 uint32_t plat_res_int_priorities[] = {
615 cpu_to_be32(7),
616 cpu_to_be32(0xf8),
617 };
618
619
620 timas[0] = cpu_to_be64(xive->tm_base +
621 XIVE_TM_USER_PAGE * (1ull << TM_SHIFT));
622 timas[1] = cpu_to_be64(1ull << TM_SHIFT);
623 timas[2] = cpu_to_be64(xive->tm_base +
624 XIVE_TM_OS_PAGE * (1ull << TM_SHIFT));
625 timas[3] = cpu_to_be64(1ull << TM_SHIFT);
626
627 _FDT(node = fdt_add_subnode(fdt, 0, xive->nodename));
628
629 _FDT(fdt_setprop_string(fdt, node, "device_type", "power-ivpe"));
630 _FDT(fdt_setprop(fdt, node, "reg", timas, sizeof(timas)));
631
632 _FDT(fdt_setprop_string(fdt, node, "compatible", "ibm,power-ivpe"));
633 _FDT(fdt_setprop(fdt, node, "ibm,xive-eq-sizes", eq_sizes,
634 sizeof(eq_sizes)));
635 _FDT(fdt_setprop(fdt, node, "ibm,xive-lisn-ranges", lisn_ranges,
636 sizeof(lisn_ranges)));
637
638
639 _FDT(fdt_setprop(fdt, node, "interrupt-controller", NULL, 0));
640 _FDT(fdt_setprop_cell(fdt, node, "#interrupt-cells", 2));
641
642
643 _FDT(fdt_setprop_cell(fdt, node, "linux,phandle", phandle));
644 _FDT(fdt_setprop_cell(fdt, node, "phandle", phandle));
645
646
647
648
649
650 _FDT(fdt_setprop(fdt, 0, "ibm,plat-res-int-priorities",
651 plat_res_int_priorities, sizeof(plat_res_int_priorities)));
652}
653
654static int spapr_xive_activate(SpaprInterruptController *intc, Error **errp)
655{
656 SpaprXive *xive = SPAPR_XIVE(intc);
657
658 if (kvm_enabled()) {
659 int rc = spapr_irq_init_kvm(kvmppc_xive_connect, intc, errp);
660 if (rc < 0) {
661 return rc;
662 }
663 }
664
665
666 spapr_xive_mmio_set_enabled(xive, true);
667
668 return 0;
669}
670
671static void spapr_xive_deactivate(SpaprInterruptController *intc)
672{
673 SpaprXive *xive = SPAPR_XIVE(intc);
674
675 spapr_xive_mmio_set_enabled(xive, false);
676
677 if (kvm_irqchip_in_kernel()) {
678 kvmppc_xive_disconnect(intc);
679 }
680}
681
682static void spapr_xive_class_init(ObjectClass *klass, void *data)
683{
684 DeviceClass *dc = DEVICE_CLASS(klass);
685 XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass);
686 SpaprInterruptControllerClass *sicc = SPAPR_INTC_CLASS(klass);
687
688 dc->desc = "sPAPR XIVE Interrupt Controller";
689 dc->props = spapr_xive_properties;
690 dc->realize = spapr_xive_realize;
691 dc->vmsd = &vmstate_spapr_xive;
692
693 xrc->get_eas = spapr_xive_get_eas;
694 xrc->get_end = spapr_xive_get_end;
695 xrc->write_end = spapr_xive_write_end;
696 xrc->get_nvt = spapr_xive_get_nvt;
697 xrc->write_nvt = spapr_xive_write_nvt;
698 xrc->get_tctx = spapr_xive_get_tctx;
699
700 sicc->activate = spapr_xive_activate;
701 sicc->deactivate = spapr_xive_deactivate;
702 sicc->cpu_intc_create = spapr_xive_cpu_intc_create;
703 sicc->cpu_intc_reset = spapr_xive_cpu_intc_reset;
704 sicc->cpu_intc_destroy = spapr_xive_cpu_intc_destroy;
705 sicc->claim_irq = spapr_xive_claim_irq;
706 sicc->free_irq = spapr_xive_free_irq;
707 sicc->set_irq = spapr_xive_set_irq;
708 sicc->print_info = spapr_xive_print_info;
709 sicc->dt = spapr_xive_dt;
710 sicc->post_load = spapr_xive_post_load;
711}
712
713static const TypeInfo spapr_xive_info = {
714 .name = TYPE_SPAPR_XIVE,
715 .parent = TYPE_XIVE_ROUTER,
716 .instance_init = spapr_xive_instance_init,
717 .instance_size = sizeof(SpaprXive),
718 .class_init = spapr_xive_class_init,
719 .interfaces = (InterfaceInfo[]) {
720 { TYPE_SPAPR_INTC },
721 { }
722 },
723};
724
725static void spapr_xive_register_types(void)
726{
727 type_register_static(&spapr_xive_info);
728}
729
730type_init(spapr_xive_register_types)
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753static bool spapr_xive_priority_is_reserved(uint8_t priority)
754{
755 switch (priority) {
756 case 0 ... 6:
757 return false;
758 case 7:
759 default:
760 return true;
761 }
762}
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794#define SPAPR_XIVE_SRC_H_INT_ESB PPC_BIT(60)
795#define SPAPR_XIVE_SRC_LSI PPC_BIT(61)
796#define SPAPR_XIVE_SRC_TRIGGER PPC_BIT(62)
797
798#define SPAPR_XIVE_SRC_STORE_EOI PPC_BIT(63)
799
800static target_ulong h_int_get_source_info(PowerPCCPU *cpu,
801 SpaprMachineState *spapr,
802 target_ulong opcode,
803 target_ulong *args)
804{
805 SpaprXive *xive = spapr->xive;
806 XiveSource *xsrc = &xive->source;
807 target_ulong flags = args[0];
808 target_ulong lisn = args[1];
809
810 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
811 return H_FUNCTION;
812 }
813
814 if (flags) {
815 return H_PARAMETER;
816 }
817
818 if (lisn >= xive->nr_irqs) {
819 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
820 lisn);
821 return H_P2;
822 }
823
824 if (!xive_eas_is_valid(&xive->eat[lisn])) {
825 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
826 lisn);
827 return H_P2;
828 }
829
830
831
832
833
834 args[0] = 0;
835 if (!xive_source_esb_has_2page(xsrc)) {
836 args[0] |= SPAPR_XIVE_SRC_TRIGGER;
837 }
838 if (xsrc->esb_flags & XIVE_SRC_STORE_EOI) {
839 args[0] |= SPAPR_XIVE_SRC_STORE_EOI;
840 }
841
842
843
844
845
846
847 if (xive_source_irq_is_lsi(xsrc, lisn)) {
848 args[0] |= SPAPR_XIVE_SRC_H_INT_ESB | SPAPR_XIVE_SRC_LSI;
849 }
850
851 if (!(args[0] & SPAPR_XIVE_SRC_H_INT_ESB)) {
852 args[1] = xive->vc_base + xive_source_esb_mgmt(xsrc, lisn);
853 } else {
854 args[1] = -1;
855 }
856
857 if (xive_source_esb_has_2page(xsrc) &&
858 !(args[0] & SPAPR_XIVE_SRC_H_INT_ESB)) {
859 args[2] = xive->vc_base + xive_source_esb_page(xsrc, lisn);
860 } else {
861 args[2] = -1;
862 }
863
864 if (xive_source_esb_has_2page(xsrc)) {
865 args[3] = xsrc->esb_shift - 1;
866 } else {
867 args[3] = xsrc->esb_shift;
868 }
869
870 return H_SUCCESS;
871}
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907#define SPAPR_XIVE_SRC_SET_EISN PPC_BIT(62)
908#define SPAPR_XIVE_SRC_MASK PPC_BIT(63)
909
910static target_ulong h_int_set_source_config(PowerPCCPU *cpu,
911 SpaprMachineState *spapr,
912 target_ulong opcode,
913 target_ulong *args)
914{
915 SpaprXive *xive = spapr->xive;
916 XiveEAS eas, new_eas;
917 target_ulong flags = args[0];
918 target_ulong lisn = args[1];
919 target_ulong target = args[2];
920 target_ulong priority = args[3];
921 target_ulong eisn = args[4];
922 uint8_t end_blk;
923 uint32_t end_idx;
924
925 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
926 return H_FUNCTION;
927 }
928
929 if (flags & ~(SPAPR_XIVE_SRC_SET_EISN | SPAPR_XIVE_SRC_MASK)) {
930 return H_PARAMETER;
931 }
932
933 if (lisn >= xive->nr_irqs) {
934 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
935 lisn);
936 return H_P2;
937 }
938
939 eas = xive->eat[lisn];
940 if (!xive_eas_is_valid(&eas)) {
941 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
942 lisn);
943 return H_P2;
944 }
945
946
947 if (priority == 0xff) {
948 new_eas.w = cpu_to_be64(EAS_VALID | EAS_MASKED);
949 goto out;
950 }
951
952 if (flags & SPAPR_XIVE_SRC_MASK) {
953 new_eas.w = eas.w | cpu_to_be64(EAS_MASKED);
954 } else {
955 new_eas.w = eas.w & cpu_to_be64(~EAS_MASKED);
956 }
957
958 if (spapr_xive_priority_is_reserved(priority)) {
959 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld
960 " is reserved\n", priority);
961 return H_P4;
962 }
963
964
965
966
967
968
969 if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) {
970 return H_P3;
971 }
972
973 new_eas.w = xive_set_field64(EAS_END_BLOCK, new_eas.w, end_blk);
974 new_eas.w = xive_set_field64(EAS_END_INDEX, new_eas.w, end_idx);
975
976 if (flags & SPAPR_XIVE_SRC_SET_EISN) {
977 new_eas.w = xive_set_field64(EAS_END_DATA, new_eas.w, eisn);
978 }
979
980 if (kvm_irqchip_in_kernel()) {
981 Error *local_err = NULL;
982
983 kvmppc_xive_set_source_config(xive, lisn, &new_eas, &local_err);
984 if (local_err) {
985 error_report_err(local_err);
986 return H_HARDWARE;
987 }
988 }
989
990out:
991 xive->eat[lisn] = new_eas;
992 return H_SUCCESS;
993}
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017static target_ulong h_int_get_source_config(PowerPCCPU *cpu,
1018 SpaprMachineState *spapr,
1019 target_ulong opcode,
1020 target_ulong *args)
1021{
1022 SpaprXive *xive = spapr->xive;
1023 target_ulong flags = args[0];
1024 target_ulong lisn = args[1];
1025 XiveEAS eas;
1026 XiveEND *end;
1027 uint8_t nvt_blk;
1028 uint32_t end_idx, nvt_idx;
1029
1030 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1031 return H_FUNCTION;
1032 }
1033
1034 if (flags) {
1035 return H_PARAMETER;
1036 }
1037
1038 if (lisn >= xive->nr_irqs) {
1039 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
1040 lisn);
1041 return H_P2;
1042 }
1043
1044 eas = xive->eat[lisn];
1045 if (!xive_eas_is_valid(&eas)) {
1046 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
1047 lisn);
1048 return H_P2;
1049 }
1050
1051
1052 end_idx = xive_get_field64(EAS_END_INDEX, eas.w);
1053
1054 assert(end_idx < xive->nr_ends);
1055 end = &xive->endt[end_idx];
1056
1057 nvt_blk = xive_get_field32(END_W6_NVT_BLOCK, end->w6);
1058 nvt_idx = xive_get_field32(END_W6_NVT_INDEX, end->w6);
1059 args[0] = spapr_xive_nvt_to_target(nvt_blk, nvt_idx);
1060
1061 if (xive_eas_is_masked(&eas)) {
1062 args[1] = 0xff;
1063 } else {
1064 args[1] = xive_get_field32(END_W7_F0_PRIORITY, end->w7);
1065 }
1066
1067 args[2] = xive_get_field64(EAS_END_DATA, eas.w);
1068
1069 return H_SUCCESS;
1070}
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090static target_ulong h_int_get_queue_info(PowerPCCPU *cpu,
1091 SpaprMachineState *spapr,
1092 target_ulong opcode,
1093 target_ulong *args)
1094{
1095 SpaprXive *xive = spapr->xive;
1096 XiveENDSource *end_xsrc = &xive->end_source;
1097 target_ulong flags = args[0];
1098 target_ulong target = args[1];
1099 target_ulong priority = args[2];
1100 XiveEND *end;
1101 uint8_t end_blk;
1102 uint32_t end_idx;
1103
1104 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1105 return H_FUNCTION;
1106 }
1107
1108 if (flags) {
1109 return H_PARAMETER;
1110 }
1111
1112
1113
1114
1115
1116
1117 if (spapr_xive_priority_is_reserved(priority)) {
1118 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld
1119 " is reserved\n", priority);
1120 return H_P3;
1121 }
1122
1123
1124
1125
1126
1127
1128 if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) {
1129 return H_P2;
1130 }
1131
1132 assert(end_idx < xive->nr_ends);
1133 end = &xive->endt[end_idx];
1134
1135 args[0] = xive->end_base + (1ull << (end_xsrc->esb_shift + 1)) * end_idx;
1136 if (xive_end_is_enqueue(end)) {
1137 args[1] = xive_get_field32(END_W0_QSIZE, end->w0) + 12;
1138 } else {
1139 args[1] = 0;
1140 }
1141
1142 return H_SUCCESS;
1143}
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174#define SPAPR_XIVE_END_ALWAYS_NOTIFY PPC_BIT(63)
1175
1176static target_ulong h_int_set_queue_config(PowerPCCPU *cpu,
1177 SpaprMachineState *spapr,
1178 target_ulong opcode,
1179 target_ulong *args)
1180{
1181 SpaprXive *xive = spapr->xive;
1182 target_ulong flags = args[0];
1183 target_ulong target = args[1];
1184 target_ulong priority = args[2];
1185 target_ulong qpage = args[3];
1186 target_ulong qsize = args[4];
1187 XiveEND end;
1188 uint8_t end_blk, nvt_blk;
1189 uint32_t end_idx, nvt_idx;
1190
1191 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1192 return H_FUNCTION;
1193 }
1194
1195 if (flags & ~SPAPR_XIVE_END_ALWAYS_NOTIFY) {
1196 return H_PARAMETER;
1197 }
1198
1199
1200
1201
1202
1203
1204 if (spapr_xive_priority_is_reserved(priority)) {
1205 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld
1206 " is reserved\n", priority);
1207 return H_P3;
1208 }
1209
1210
1211
1212
1213
1214
1215
1216 if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) {
1217 return H_P2;
1218 }
1219
1220 assert(end_idx < xive->nr_ends);
1221 memcpy(&end, &xive->endt[end_idx], sizeof(XiveEND));
1222
1223 switch (qsize) {
1224 case 12:
1225 case 16:
1226 case 21:
1227 case 24:
1228 if (!QEMU_IS_ALIGNED(qpage, 1ul << qsize)) {
1229 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: EQ @0x%" HWADDR_PRIx
1230 " is not naturally aligned with %" HWADDR_PRIx "\n",
1231 qpage, (hwaddr)1 << qsize);
1232 return H_P4;
1233 }
1234 end.w2 = cpu_to_be32((qpage >> 32) & 0x0fffffff);
1235 end.w3 = cpu_to_be32(qpage & 0xffffffff);
1236 end.w0 |= cpu_to_be32(END_W0_ENQUEUE);
1237 end.w0 = xive_set_field32(END_W0_QSIZE, end.w0, qsize - 12);
1238 break;
1239 case 0:
1240
1241 spapr_xive_end_reset(&end);
1242 goto out;
1243
1244 default:
1245 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid EQ size %"PRIx64"\n",
1246 qsize);
1247 return H_P5;
1248 }
1249
1250 if (qsize) {
1251 hwaddr plen = 1 << qsize;
1252 void *eq;
1253
1254
1255
1256
1257
1258 eq = address_space_map(CPU(cpu)->as, qpage, &plen, true,
1259 MEMTXATTRS_UNSPECIFIED);
1260 if (plen != 1 << qsize) {
1261 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to map EQ @0x%"
1262 HWADDR_PRIx "\n", qpage);
1263 return H_P4;
1264 }
1265 address_space_unmap(CPU(cpu)->as, eq, plen, true, plen);
1266 }
1267
1268
1269 if (spapr_xive_target_to_nvt(target, &nvt_blk, &nvt_idx)) {
1270 g_assert_not_reached();
1271 }
1272
1273
1274
1275
1276
1277 end.w6 = xive_set_field32(END_W6_NVT_BLOCK, 0ul, nvt_blk) |
1278 xive_set_field32(END_W6_NVT_INDEX, 0ul, nvt_idx);
1279 end.w7 = xive_set_field32(END_W7_F0_PRIORITY, 0ul, priority);
1280
1281 if (flags & SPAPR_XIVE_END_ALWAYS_NOTIFY) {
1282 end.w0 |= cpu_to_be32(END_W0_UCOND_NOTIFY);
1283 } else {
1284 end.w0 &= cpu_to_be32((uint32_t)~END_W0_UCOND_NOTIFY);
1285 }
1286
1287
1288
1289
1290
1291 end.w1 = cpu_to_be32(END_W1_GENERATION) |
1292 xive_set_field32(END_W1_PAGE_OFF, 0ul, 0ul);
1293 end.w0 |= cpu_to_be32(END_W0_VALID);
1294
1295
1296
1297
1298
1299
1300out:
1301 if (kvm_irqchip_in_kernel()) {
1302 Error *local_err = NULL;
1303
1304 kvmppc_xive_set_queue_config(xive, end_blk, end_idx, &end, &local_err);
1305 if (local_err) {
1306 error_report_err(local_err);
1307 return H_HARDWARE;
1308 }
1309 }
1310
1311
1312 memcpy(&xive->endt[end_idx], &end, sizeof(XiveEND));
1313 return H_SUCCESS;
1314}
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343#define SPAPR_XIVE_END_DEBUG PPC_BIT(63)
1344
1345static target_ulong h_int_get_queue_config(PowerPCCPU *cpu,
1346 SpaprMachineState *spapr,
1347 target_ulong opcode,
1348 target_ulong *args)
1349{
1350 SpaprXive *xive = spapr->xive;
1351 target_ulong flags = args[0];
1352 target_ulong target = args[1];
1353 target_ulong priority = args[2];
1354 XiveEND *end;
1355 uint8_t end_blk;
1356 uint32_t end_idx;
1357
1358 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1359 return H_FUNCTION;
1360 }
1361
1362 if (flags & ~SPAPR_XIVE_END_DEBUG) {
1363 return H_PARAMETER;
1364 }
1365
1366
1367
1368
1369
1370
1371 if (spapr_xive_priority_is_reserved(priority)) {
1372 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld
1373 " is reserved\n", priority);
1374 return H_P3;
1375 }
1376
1377
1378
1379
1380
1381
1382 if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) {
1383 return H_P2;
1384 }
1385
1386 assert(end_idx < xive->nr_ends);
1387 end = &xive->endt[end_idx];
1388
1389 args[0] = 0;
1390 if (xive_end_is_notify(end)) {
1391 args[0] |= SPAPR_XIVE_END_ALWAYS_NOTIFY;
1392 }
1393
1394 if (xive_end_is_enqueue(end)) {
1395 args[1] = xive_end_qaddr(end);
1396 args[2] = xive_get_field32(END_W0_QSIZE, end->w0) + 12;
1397 } else {
1398 args[1] = 0;
1399 args[2] = 0;
1400 }
1401
1402 if (kvm_irqchip_in_kernel()) {
1403 Error *local_err = NULL;
1404
1405 kvmppc_xive_get_queue_config(xive, end_blk, end_idx, end, &local_err);
1406 if (local_err) {
1407 error_report_err(local_err);
1408 return H_HARDWARE;
1409 }
1410 }
1411
1412
1413 if (flags & SPAPR_XIVE_END_DEBUG) {
1414
1415 args[0] |= (uint64_t)xive_get_field32(END_W1_GENERATION, end->w1) << 62;
1416
1417
1418 args[3] = xive_get_field32(END_W1_PAGE_OFF, end->w1);
1419 } else {
1420 args[3] = 0;
1421 }
1422
1423 return H_SUCCESS;
1424}
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446static target_ulong h_int_set_os_reporting_line(PowerPCCPU *cpu,
1447 SpaprMachineState *spapr,
1448 target_ulong opcode,
1449 target_ulong *args)
1450{
1451 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1452 return H_FUNCTION;
1453 }
1454
1455
1456
1457
1458
1459
1460
1461 return H_FUNCTION;
1462}
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482static target_ulong h_int_get_os_reporting_line(PowerPCCPU *cpu,
1483 SpaprMachineState *spapr,
1484 target_ulong opcode,
1485 target_ulong *args)
1486{
1487 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1488 return H_FUNCTION;
1489 }
1490
1491
1492
1493
1494
1495
1496
1497 return H_FUNCTION;
1498}
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523#define SPAPR_XIVE_ESB_STORE PPC_BIT(63)
1524
1525static target_ulong h_int_esb(PowerPCCPU *cpu,
1526 SpaprMachineState *spapr,
1527 target_ulong opcode,
1528 target_ulong *args)
1529{
1530 SpaprXive *xive = spapr->xive;
1531 XiveEAS eas;
1532 target_ulong flags = args[0];
1533 target_ulong lisn = args[1];
1534 target_ulong offset = args[2];
1535 target_ulong data = args[3];
1536 hwaddr mmio_addr;
1537 XiveSource *xsrc = &xive->source;
1538
1539 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1540 return H_FUNCTION;
1541 }
1542
1543 if (flags & ~SPAPR_XIVE_ESB_STORE) {
1544 return H_PARAMETER;
1545 }
1546
1547 if (lisn >= xive->nr_irqs) {
1548 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
1549 lisn);
1550 return H_P2;
1551 }
1552
1553 eas = xive->eat[lisn];
1554 if (!xive_eas_is_valid(&eas)) {
1555 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
1556 lisn);
1557 return H_P2;
1558 }
1559
1560 if (offset > (1ull << xsrc->esb_shift)) {
1561 return H_P3;
1562 }
1563
1564 if (kvm_irqchip_in_kernel()) {
1565 args[0] = kvmppc_xive_esb_rw(xsrc, lisn, offset, data,
1566 flags & SPAPR_XIVE_ESB_STORE);
1567 } else {
1568 mmio_addr = xive->vc_base + xive_source_esb_mgmt(xsrc, lisn) + offset;
1569
1570 if (dma_memory_rw(&address_space_memory, mmio_addr, &data, 8,
1571 (flags & SPAPR_XIVE_ESB_STORE))) {
1572 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to access ESB @0x%"
1573 HWADDR_PRIx "\n", mmio_addr);
1574 return H_HARDWARE;
1575 }
1576 args[0] = (flags & SPAPR_XIVE_ESB_STORE) ? -1 : data;
1577 }
1578 return H_SUCCESS;
1579}
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598static target_ulong h_int_sync(PowerPCCPU *cpu,
1599 SpaprMachineState *spapr,
1600 target_ulong opcode,
1601 target_ulong *args)
1602{
1603 SpaprXive *xive = spapr->xive;
1604 XiveEAS eas;
1605 target_ulong flags = args[0];
1606 target_ulong lisn = args[1];
1607
1608 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1609 return H_FUNCTION;
1610 }
1611
1612 if (flags) {
1613 return H_PARAMETER;
1614 }
1615
1616 if (lisn >= xive->nr_irqs) {
1617 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
1618 lisn);
1619 return H_P2;
1620 }
1621
1622 eas = xive->eat[lisn];
1623 if (!xive_eas_is_valid(&eas)) {
1624 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
1625 lisn);
1626 return H_P2;
1627 }
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639 if (kvm_irqchip_in_kernel()) {
1640 Error *local_err = NULL;
1641
1642 kvmppc_xive_sync_source(xive, lisn, &local_err);
1643 if (local_err) {
1644 error_report_err(local_err);
1645 return H_HARDWARE;
1646 }
1647 }
1648 return H_SUCCESS;
1649}
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665static target_ulong h_int_reset(PowerPCCPU *cpu,
1666 SpaprMachineState *spapr,
1667 target_ulong opcode,
1668 target_ulong *args)
1669{
1670 SpaprXive *xive = spapr->xive;
1671 target_ulong flags = args[0];
1672
1673 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1674 return H_FUNCTION;
1675 }
1676
1677 if (flags) {
1678 return H_PARAMETER;
1679 }
1680
1681 device_reset(DEVICE(xive));
1682
1683 if (kvm_irqchip_in_kernel()) {
1684 Error *local_err = NULL;
1685
1686 kvmppc_xive_reset(xive, &local_err);
1687 if (local_err) {
1688 error_report_err(local_err);
1689 return H_HARDWARE;
1690 }
1691 }
1692 return H_SUCCESS;
1693}
1694
1695void spapr_xive_hcall_init(SpaprMachineState *spapr)
1696{
1697 spapr_register_hypercall(H_INT_GET_SOURCE_INFO, h_int_get_source_info);
1698 spapr_register_hypercall(H_INT_SET_SOURCE_CONFIG, h_int_set_source_config);
1699 spapr_register_hypercall(H_INT_GET_SOURCE_CONFIG, h_int_get_source_config);
1700 spapr_register_hypercall(H_INT_GET_QUEUE_INFO, h_int_get_queue_info);
1701 spapr_register_hypercall(H_INT_SET_QUEUE_CONFIG, h_int_set_queue_config);
1702 spapr_register_hypercall(H_INT_GET_QUEUE_CONFIG, h_int_get_queue_config);
1703 spapr_register_hypercall(H_INT_SET_OS_REPORTING_LINE,
1704 h_int_set_os_reporting_line);
1705 spapr_register_hypercall(H_INT_GET_OS_REPORTING_LINE,
1706 h_int_get_os_reporting_line);
1707 spapr_register_hypercall(H_INT_ESB, h_int_esb);
1708 spapr_register_hypercall(H_INT_SYNC, h_int_sync);
1709 spapr_register_hypercall(H_INT_RESET, h_int_reset);
1710}
1711