1
2
3
4
5
6
7
8
9
10#include "qemu/osdep.h"
11#include "qemu/log.h"
12#include "qemu/module.h"
13#include "qapi/error.h"
14#include "qemu/error-report.h"
15#include "target/ppc/cpu.h"
16#include "sysemu/cpus.h"
17#include "sysemu/reset.h"
18#include "migration/vmstate.h"
19#include "monitor/monitor.h"
20#include "hw/ppc/fdt.h"
21#include "hw/ppc/spapr.h"
22#include "hw/ppc/spapr_cpu_core.h"
23#include "hw/ppc/spapr_xive.h"
24#include "hw/ppc/xive.h"
25#include "hw/ppc/xive_regs.h"
26#include "hw/qdev-properties.h"
27#include "trace.h"
28
29
30
31
32
33#define SPAPR_XIVE_VC_BASE 0x0006010000000000ull
34#define SPAPR_XIVE_TM_BASE 0x0006030203180000ull
35
36
37
38
39
40
41
42
43
44
45
46#define SPAPR_XIVE_NVT_BASE 0x400
47
48
49
50
51static uint32_t spapr_xive_nvt_to_target(uint8_t nvt_blk, uint32_t nvt_idx)
52{
53 return nvt_idx - SPAPR_XIVE_NVT_BASE;
54}
55
56static void spapr_xive_cpu_to_nvt(PowerPCCPU *cpu,
57 uint8_t *out_nvt_blk, uint32_t *out_nvt_idx)
58{
59 assert(cpu);
60
61 if (out_nvt_blk) {
62 *out_nvt_blk = SPAPR_XIVE_BLOCK_ID;
63 }
64
65 if (out_nvt_blk) {
66 *out_nvt_idx = SPAPR_XIVE_NVT_BASE + cpu->vcpu_id;
67 }
68}
69
70static int spapr_xive_target_to_nvt(uint32_t target,
71 uint8_t *out_nvt_blk, uint32_t *out_nvt_idx)
72{
73 PowerPCCPU *cpu = spapr_find_cpu(target);
74
75 if (!cpu) {
76 return -1;
77 }
78
79 spapr_xive_cpu_to_nvt(cpu, out_nvt_blk, out_nvt_idx);
80 return 0;
81}
82
83
84
85
86
87int spapr_xive_end_to_target(uint8_t end_blk, uint32_t end_idx,
88 uint32_t *out_server, uint8_t *out_prio)
89{
90
91 assert(end_blk == SPAPR_XIVE_BLOCK_ID);
92
93 if (out_server) {
94 *out_server = end_idx >> 3;
95 }
96
97 if (out_prio) {
98 *out_prio = end_idx & 0x7;
99 }
100 return 0;
101}
102
103static void spapr_xive_cpu_to_end(PowerPCCPU *cpu, uint8_t prio,
104 uint8_t *out_end_blk, uint32_t *out_end_idx)
105{
106 assert(cpu);
107
108 if (out_end_blk) {
109 *out_end_blk = SPAPR_XIVE_BLOCK_ID;
110 }
111
112 if (out_end_idx) {
113 *out_end_idx = (cpu->vcpu_id << 3) + prio;
114 }
115}
116
117static int spapr_xive_target_to_end(uint32_t target, uint8_t prio,
118 uint8_t *out_end_blk, uint32_t *out_end_idx)
119{
120 PowerPCCPU *cpu = spapr_find_cpu(target);
121
122 if (!cpu) {
123 return -1;
124 }
125
126 spapr_xive_cpu_to_end(cpu, prio, out_end_blk, out_end_idx);
127 return 0;
128}
129
130
131
132
133
134static void spapr_xive_end_pic_print_info(SpaprXive *xive, XiveEND *end,
135 Monitor *mon)
136{
137 uint64_t qaddr_base = xive_end_qaddr(end);
138 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
139 uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1);
140 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
141 uint32_t qentries = 1 << (qsize + 10);
142 uint32_t nvt = xive_get_field32(END_W6_NVT_INDEX, end->w6);
143 uint8_t priority = xive_get_field32(END_W7_F0_PRIORITY, end->w7);
144
145 monitor_printf(mon, "%3d/%d % 6d/%5d @%"PRIx64" ^%d",
146 spapr_xive_nvt_to_target(0, nvt),
147 priority, qindex, qentries, qaddr_base, qgen);
148
149 xive_end_queue_pic_print_info(end, 6, mon);
150}
151
152
153
154
155
156#define spapr_xive_in_kernel(xive) \
157 (kvm_irqchip_in_kernel() && (xive)->fd != -1)
158
159static void spapr_xive_pic_print_info(SpaprXive *xive, Monitor *mon)
160{
161 XiveSource *xsrc = &xive->source;
162 int i;
163
164 if (spapr_xive_in_kernel(xive)) {
165 Error *local_err = NULL;
166
167 kvmppc_xive_synchronize_state(xive, &local_err);
168 if (local_err) {
169 error_report_err(local_err);
170 return;
171 }
172 }
173
174 monitor_printf(mon, " LISN PQ EISN CPU/PRIO EQ\n");
175
176 for (i = 0; i < xive->nr_irqs; i++) {
177 uint8_t pq = xive_source_esb_get(xsrc, i);
178 XiveEAS *eas = &xive->eat[i];
179
180 if (!xive_eas_is_valid(eas)) {
181 continue;
182 }
183
184 monitor_printf(mon, " %08x %s %c%c%c %s %08x ", i,
185 xive_source_irq_is_lsi(xsrc, i) ? "LSI" : "MSI",
186 pq & XIVE_ESB_VAL_P ? 'P' : '-',
187 pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
188 xive_source_is_asserted(xsrc, i) ? 'A' : ' ',
189 xive_eas_is_masked(eas) ? "M" : " ",
190 (int) xive_get_field64(EAS_END_DATA, eas->w));
191
192 if (!xive_eas_is_masked(eas)) {
193 uint32_t end_idx = xive_get_field64(EAS_END_INDEX, eas->w);
194 XiveEND *end;
195
196 assert(end_idx < xive->nr_ends);
197 end = &xive->endt[end_idx];
198
199 if (xive_end_is_valid(end)) {
200 spapr_xive_end_pic_print_info(xive, end, mon);
201 }
202 }
203 monitor_printf(mon, "\n");
204 }
205}
206
207void spapr_xive_mmio_set_enabled(SpaprXive *xive, bool enable)
208{
209 memory_region_set_enabled(&xive->source.esb_mmio, enable);
210 memory_region_set_enabled(&xive->tm_mmio, enable);
211
212
213 memory_region_set_enabled(&xive->end_source.esb_mmio, false);
214}
215
216static void spapr_xive_tm_write(void *opaque, hwaddr offset,
217 uint64_t value, unsigned size)
218{
219 XiveTCTX *tctx = spapr_cpu_state(POWERPC_CPU(current_cpu))->tctx;
220
221 xive_tctx_tm_write(XIVE_PRESENTER(opaque), tctx, offset, value, size);
222}
223
224static uint64_t spapr_xive_tm_read(void *opaque, hwaddr offset, unsigned size)
225{
226 XiveTCTX *tctx = spapr_cpu_state(POWERPC_CPU(current_cpu))->tctx;
227
228 return xive_tctx_tm_read(XIVE_PRESENTER(opaque), tctx, offset, size);
229}
230
231const MemoryRegionOps spapr_xive_tm_ops = {
232 .read = spapr_xive_tm_read,
233 .write = spapr_xive_tm_write,
234 .endianness = DEVICE_BIG_ENDIAN,
235 .valid = {
236 .min_access_size = 1,
237 .max_access_size = 8,
238 },
239 .impl = {
240 .min_access_size = 1,
241 .max_access_size = 8,
242 },
243};
244
245static void spapr_xive_end_reset(XiveEND *end)
246{
247 memset(end, 0, sizeof(*end));
248
249
250 end->w1 = cpu_to_be32(END_W1_ESe_Q | END_W1_ESn_Q);
251}
252
253static void spapr_xive_reset(void *dev)
254{
255 SpaprXive *xive = SPAPR_XIVE(dev);
256 int i;
257
258
259
260
261
262
263
264 for (i = 0; i < xive->nr_irqs; i++) {
265 XiveEAS *eas = &xive->eat[i];
266 if (xive_eas_is_valid(eas)) {
267 eas->w = cpu_to_be64(EAS_VALID | EAS_MASKED);
268 } else {
269 eas->w = 0;
270 }
271 }
272
273
274 for (i = 0; i < xive->nr_ends; i++) {
275 spapr_xive_end_reset(&xive->endt[i]);
276 }
277}
278
279static void spapr_xive_instance_init(Object *obj)
280{
281 SpaprXive *xive = SPAPR_XIVE(obj);
282
283 object_initialize_child(obj, "source", &xive->source, TYPE_XIVE_SOURCE);
284
285 object_initialize_child(obj, "end_source", &xive->end_source,
286 TYPE_XIVE_END_SOURCE);
287
288
289 xive->fd = -1;
290}
291
292static void spapr_xive_realize(DeviceState *dev, Error **errp)
293{
294 SpaprXive *xive = SPAPR_XIVE(dev);
295 SpaprXiveClass *sxc = SPAPR_XIVE_GET_CLASS(xive);
296 XiveSource *xsrc = &xive->source;
297 XiveENDSource *end_xsrc = &xive->end_source;
298 Error *local_err = NULL;
299
300
301 g_assert(xive->nr_irqs);
302 g_assert(xive->nr_ends);
303
304 sxc->parent_realize(dev, &local_err);
305 if (local_err) {
306 error_propagate(errp, local_err);
307 return;
308 }
309
310
311
312
313 object_property_set_int(OBJECT(xsrc), "nr-irqs", xive->nr_irqs,
314 &error_fatal);
315 object_property_set_link(OBJECT(xsrc), "xive", OBJECT(xive), &error_abort);
316 if (!qdev_realize(DEVICE(xsrc), NULL, errp)) {
317 return;
318 }
319 sysbus_init_mmio(SYS_BUS_DEVICE(xive), &xsrc->esb_mmio);
320
321
322
323
324 object_property_set_int(OBJECT(end_xsrc), "nr-ends", xive->nr_irqs,
325 &error_fatal);
326 object_property_set_link(OBJECT(end_xsrc), "xive", OBJECT(xive),
327 &error_abort);
328 if (!qdev_realize(DEVICE(end_xsrc), NULL, errp)) {
329 return;
330 }
331 sysbus_init_mmio(SYS_BUS_DEVICE(xive), &end_xsrc->esb_mmio);
332
333
334 xive->end_base = xive->vc_base + xive_source_esb_len(xsrc);
335
336
337
338
339 xive->eat = g_new0(XiveEAS, xive->nr_irqs);
340 xive->endt = g_new0(XiveEND, xive->nr_ends);
341
342 xive->nodename = g_strdup_printf("interrupt-controller@%" PRIx64,
343 xive->tm_base + XIVE_TM_USER_PAGE * (1 << TM_SHIFT));
344
345 qemu_register_reset(spapr_xive_reset, dev);
346
347
348 memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &spapr_xive_tm_ops,
349 xive, "xive.tima", 4ull << TM_SHIFT);
350 sysbus_init_mmio(SYS_BUS_DEVICE(xive), &xive->tm_mmio);
351
352
353
354
355
356 sysbus_mmio_map(SYS_BUS_DEVICE(xive), 0, xive->vc_base);
357 sysbus_mmio_map(SYS_BUS_DEVICE(xive), 1, xive->end_base);
358 sysbus_mmio_map(SYS_BUS_DEVICE(xive), 2, xive->tm_base);
359}
360
361static int spapr_xive_get_eas(XiveRouter *xrtr, uint8_t eas_blk,
362 uint32_t eas_idx, XiveEAS *eas)
363{
364 SpaprXive *xive = SPAPR_XIVE(xrtr);
365
366 if (eas_idx >= xive->nr_irqs) {
367 return -1;
368 }
369
370 *eas = xive->eat[eas_idx];
371 return 0;
372}
373
374static int spapr_xive_get_end(XiveRouter *xrtr,
375 uint8_t end_blk, uint32_t end_idx, XiveEND *end)
376{
377 SpaprXive *xive = SPAPR_XIVE(xrtr);
378
379 if (end_idx >= xive->nr_ends) {
380 return -1;
381 }
382
383 memcpy(end, &xive->endt[end_idx], sizeof(XiveEND));
384 return 0;
385}
386
387static int spapr_xive_write_end(XiveRouter *xrtr, uint8_t end_blk,
388 uint32_t end_idx, XiveEND *end,
389 uint8_t word_number)
390{
391 SpaprXive *xive = SPAPR_XIVE(xrtr);
392
393 if (end_idx >= xive->nr_ends) {
394 return -1;
395 }
396
397 memcpy(&xive->endt[end_idx], end, sizeof(XiveEND));
398 return 0;
399}
400
401static int spapr_xive_get_nvt(XiveRouter *xrtr,
402 uint8_t nvt_blk, uint32_t nvt_idx, XiveNVT *nvt)
403{
404 uint32_t vcpu_id = spapr_xive_nvt_to_target(nvt_blk, nvt_idx);
405 PowerPCCPU *cpu = spapr_find_cpu(vcpu_id);
406
407 if (!cpu) {
408
409 return -1;
410 }
411
412
413
414
415
416 nvt->w0 = cpu_to_be32(NVT_W0_VALID);
417 return 0;
418}
419
420static int spapr_xive_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk,
421 uint32_t nvt_idx, XiveNVT *nvt,
422 uint8_t word_number)
423{
424
425
426
427
428
429 g_assert_not_reached();
430}
431
432static int spapr_xive_match_nvt(XivePresenter *xptr, uint8_t format,
433 uint8_t nvt_blk, uint32_t nvt_idx,
434 bool cam_ignore, uint8_t priority,
435 uint32_t logic_serv, XiveTCTXMatch *match)
436{
437 CPUState *cs;
438 int count = 0;
439
440 CPU_FOREACH(cs) {
441 PowerPCCPU *cpu = POWERPC_CPU(cs);
442 XiveTCTX *tctx = spapr_cpu_state(cpu)->tctx;
443 int ring;
444
445
446
447
448
449 if (!tctx) {
450 continue;
451 }
452
453
454
455
456 ring = xive_presenter_tctx_match(xptr, tctx, format, nvt_blk, nvt_idx,
457 cam_ignore, logic_serv);
458
459
460
461
462 if (ring != -1) {
463 if (match->tctx) {
464 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a thread "
465 "context NVT %x/%x\n", nvt_blk, nvt_idx);
466 return -1;
467 }
468
469 match->ring = ring;
470 match->tctx = tctx;
471 count++;
472 }
473 }
474
475 return count;
476}
477
478static uint8_t spapr_xive_get_block_id(XiveRouter *xrtr)
479{
480 return SPAPR_XIVE_BLOCK_ID;
481}
482
483static int spapr_xive_get_pq(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
484 uint8_t *pq)
485{
486 SpaprXive *xive = SPAPR_XIVE(xrtr);
487
488 assert(SPAPR_XIVE_BLOCK_ID == blk);
489
490 *pq = xive_source_esb_get(&xive->source, idx);
491 return 0;
492}
493
494static int spapr_xive_set_pq(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
495 uint8_t *pq)
496{
497 SpaprXive *xive = SPAPR_XIVE(xrtr);
498
499 assert(SPAPR_XIVE_BLOCK_ID == blk);
500
501 *pq = xive_source_esb_set(&xive->source, idx, *pq);
502 return 0;
503}
504
505
506static const VMStateDescription vmstate_spapr_xive_end = {
507 .name = TYPE_SPAPR_XIVE "/end",
508 .version_id = 1,
509 .minimum_version_id = 1,
510 .fields = (VMStateField []) {
511 VMSTATE_UINT32(w0, XiveEND),
512 VMSTATE_UINT32(w1, XiveEND),
513 VMSTATE_UINT32(w2, XiveEND),
514 VMSTATE_UINT32(w3, XiveEND),
515 VMSTATE_UINT32(w4, XiveEND),
516 VMSTATE_UINT32(w5, XiveEND),
517 VMSTATE_UINT32(w6, XiveEND),
518 VMSTATE_UINT32(w7, XiveEND),
519 VMSTATE_END_OF_LIST()
520 },
521};
522
523static const VMStateDescription vmstate_spapr_xive_eas = {
524 .name = TYPE_SPAPR_XIVE "/eas",
525 .version_id = 1,
526 .minimum_version_id = 1,
527 .fields = (VMStateField []) {
528 VMSTATE_UINT64(w, XiveEAS),
529 VMSTATE_END_OF_LIST()
530 },
531};
532
533static int vmstate_spapr_xive_pre_save(void *opaque)
534{
535 SpaprXive *xive = SPAPR_XIVE(opaque);
536
537 if (spapr_xive_in_kernel(xive)) {
538 return kvmppc_xive_pre_save(xive);
539 }
540
541 return 0;
542}
543
544
545
546
547
548static int spapr_xive_post_load(SpaprInterruptController *intc, int version_id)
549{
550 SpaprXive *xive = SPAPR_XIVE(intc);
551
552 if (spapr_xive_in_kernel(xive)) {
553 return kvmppc_xive_post_load(xive, version_id);
554 }
555
556 return 0;
557}
558
559static const VMStateDescription vmstate_spapr_xive = {
560 .name = TYPE_SPAPR_XIVE,
561 .version_id = 1,
562 .minimum_version_id = 1,
563 .pre_save = vmstate_spapr_xive_pre_save,
564 .post_load = NULL,
565 .fields = (VMStateField[]) {
566 VMSTATE_UINT32_EQUAL(nr_irqs, SpaprXive, NULL),
567 VMSTATE_STRUCT_VARRAY_POINTER_UINT32(eat, SpaprXive, nr_irqs,
568 vmstate_spapr_xive_eas, XiveEAS),
569 VMSTATE_STRUCT_VARRAY_POINTER_UINT32(endt, SpaprXive, nr_ends,
570 vmstate_spapr_xive_end, XiveEND),
571 VMSTATE_END_OF_LIST()
572 },
573};
574
575static int spapr_xive_claim_irq(SpaprInterruptController *intc, int lisn,
576 bool lsi, Error **errp)
577{
578 SpaprXive *xive = SPAPR_XIVE(intc);
579 XiveSource *xsrc = &xive->source;
580
581 assert(lisn < xive->nr_irqs);
582
583 trace_spapr_xive_claim_irq(lisn, lsi);
584
585 if (xive_eas_is_valid(&xive->eat[lisn])) {
586 error_setg(errp, "IRQ %d is not free", lisn);
587 return -EBUSY;
588 }
589
590
591
592
593 xive->eat[lisn].w |= cpu_to_be64(EAS_VALID | EAS_MASKED);
594 if (lsi) {
595 xive_source_irq_set_lsi(xsrc, lisn);
596 }
597
598 if (spapr_xive_in_kernel(xive)) {
599 return kvmppc_xive_source_reset_one(xsrc, lisn, errp);
600 }
601
602 return 0;
603}
604
605static void spapr_xive_free_irq(SpaprInterruptController *intc, int lisn)
606{
607 SpaprXive *xive = SPAPR_XIVE(intc);
608 assert(lisn < xive->nr_irqs);
609
610 trace_spapr_xive_free_irq(lisn);
611
612 xive->eat[lisn].w &= cpu_to_be64(~EAS_VALID);
613}
614
615static Property spapr_xive_properties[] = {
616 DEFINE_PROP_UINT32("nr-irqs", SpaprXive, nr_irqs, 0),
617 DEFINE_PROP_UINT32("nr-ends", SpaprXive, nr_ends, 0),
618 DEFINE_PROP_UINT64("vc-base", SpaprXive, vc_base, SPAPR_XIVE_VC_BASE),
619 DEFINE_PROP_UINT64("tm-base", SpaprXive, tm_base, SPAPR_XIVE_TM_BASE),
620 DEFINE_PROP_UINT8("hv-prio", SpaprXive, hv_prio, 7),
621 DEFINE_PROP_END_OF_LIST(),
622};
623
624static int spapr_xive_cpu_intc_create(SpaprInterruptController *intc,
625 PowerPCCPU *cpu, Error **errp)
626{
627 SpaprXive *xive = SPAPR_XIVE(intc);
628 Object *obj;
629 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
630
631 obj = xive_tctx_create(OBJECT(cpu), XIVE_PRESENTER(xive), errp);
632 if (!obj) {
633 return -1;
634 }
635
636 spapr_cpu->tctx = XIVE_TCTX(obj);
637 return 0;
638}
639
640static void xive_tctx_set_os_cam(XiveTCTX *tctx, uint32_t os_cam)
641{
642 uint32_t qw1w2 = cpu_to_be32(TM_QW1W2_VO | os_cam);
643 memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2, 4);
644}
645
646static void spapr_xive_cpu_intc_reset(SpaprInterruptController *intc,
647 PowerPCCPU *cpu)
648{
649 XiveTCTX *tctx = spapr_cpu_state(cpu)->tctx;
650 uint8_t nvt_blk;
651 uint32_t nvt_idx;
652
653 xive_tctx_reset(tctx);
654
655
656
657
658
659
660 spapr_xive_cpu_to_nvt(cpu, &nvt_blk, &nvt_idx);
661
662 xive_tctx_set_os_cam(tctx, xive_nvt_cam_line(nvt_blk, nvt_idx));
663}
664
665static void spapr_xive_cpu_intc_destroy(SpaprInterruptController *intc,
666 PowerPCCPU *cpu)
667{
668 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
669
670 xive_tctx_destroy(spapr_cpu->tctx);
671 spapr_cpu->tctx = NULL;
672}
673
674static void spapr_xive_set_irq(SpaprInterruptController *intc, int irq, int val)
675{
676 SpaprXive *xive = SPAPR_XIVE(intc);
677
678 trace_spapr_xive_set_irq(irq, val);
679
680 if (spapr_xive_in_kernel(xive)) {
681 kvmppc_xive_source_set_irq(&xive->source, irq, val);
682 } else {
683 xive_source_set_irq(&xive->source, irq, val);
684 }
685}
686
687static void spapr_xive_print_info(SpaprInterruptController *intc, Monitor *mon)
688{
689 SpaprXive *xive = SPAPR_XIVE(intc);
690 CPUState *cs;
691
692 CPU_FOREACH(cs) {
693 PowerPCCPU *cpu = POWERPC_CPU(cs);
694
695 xive_tctx_pic_print_info(spapr_cpu_state(cpu)->tctx, mon);
696 }
697
698 spapr_xive_pic_print_info(xive, mon);
699}
700
701static void spapr_xive_dt(SpaprInterruptController *intc, uint32_t nr_servers,
702 void *fdt, uint32_t phandle)
703{
704 SpaprXive *xive = SPAPR_XIVE(intc);
705 int node;
706 uint64_t timas[2 * 2];
707
708 uint32_t lisn_ranges[] = {
709 cpu_to_be32(SPAPR_IRQ_IPI),
710 cpu_to_be32(SPAPR_IRQ_IPI + nr_servers),
711 };
712
713
714
715
716 uint32_t eq_sizes[] = {
717 cpu_to_be32(16),
718 };
719
720
721
722
723
724 uint32_t plat_res_int_priorities[] = {
725 cpu_to_be32(xive->hv_prio),
726 cpu_to_be32(0xff - xive->hv_prio),
727 };
728
729
730 timas[0] = cpu_to_be64(xive->tm_base +
731 XIVE_TM_USER_PAGE * (1ull << TM_SHIFT));
732 timas[1] = cpu_to_be64(1ull << TM_SHIFT);
733 timas[2] = cpu_to_be64(xive->tm_base +
734 XIVE_TM_OS_PAGE * (1ull << TM_SHIFT));
735 timas[3] = cpu_to_be64(1ull << TM_SHIFT);
736
737 _FDT(node = fdt_add_subnode(fdt, 0, xive->nodename));
738
739 _FDT(fdt_setprop_string(fdt, node, "device_type", "power-ivpe"));
740 _FDT(fdt_setprop(fdt, node, "reg", timas, sizeof(timas)));
741
742 _FDT(fdt_setprop_string(fdt, node, "compatible", "ibm,power-ivpe"));
743 _FDT(fdt_setprop(fdt, node, "ibm,xive-eq-sizes", eq_sizes,
744 sizeof(eq_sizes)));
745 _FDT(fdt_setprop(fdt, node, "ibm,xive-lisn-ranges", lisn_ranges,
746 sizeof(lisn_ranges)));
747
748
749 _FDT(fdt_setprop(fdt, node, "interrupt-controller", NULL, 0));
750 _FDT(fdt_setprop_cell(fdt, node, "#interrupt-cells", 2));
751
752
753 _FDT(fdt_setprop_cell(fdt, node, "linux,phandle", phandle));
754 _FDT(fdt_setprop_cell(fdt, node, "phandle", phandle));
755
756
757
758
759
760 _FDT(fdt_setprop(fdt, 0, "ibm,plat-res-int-priorities",
761 plat_res_int_priorities, sizeof(plat_res_int_priorities)));
762}
763
764static int spapr_xive_activate(SpaprInterruptController *intc,
765 uint32_t nr_servers, Error **errp)
766{
767 SpaprXive *xive = SPAPR_XIVE(intc);
768
769 if (kvm_enabled()) {
770 int rc = spapr_irq_init_kvm(kvmppc_xive_connect, intc, nr_servers,
771 errp);
772 if (rc < 0) {
773 return rc;
774 }
775 }
776
777
778 spapr_xive_mmio_set_enabled(xive, true);
779
780 return 0;
781}
782
783static void spapr_xive_deactivate(SpaprInterruptController *intc)
784{
785 SpaprXive *xive = SPAPR_XIVE(intc);
786
787 spapr_xive_mmio_set_enabled(xive, false);
788
789 if (spapr_xive_in_kernel(xive)) {
790 kvmppc_xive_disconnect(intc);
791 }
792}
793
794static bool spapr_xive_in_kernel_xptr(const XivePresenter *xptr)
795{
796 return spapr_xive_in_kernel(SPAPR_XIVE(xptr));
797}
798
799static void spapr_xive_class_init(ObjectClass *klass, void *data)
800{
801 DeviceClass *dc = DEVICE_CLASS(klass);
802 XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass);
803 SpaprInterruptControllerClass *sicc = SPAPR_INTC_CLASS(klass);
804 XivePresenterClass *xpc = XIVE_PRESENTER_CLASS(klass);
805 SpaprXiveClass *sxc = SPAPR_XIVE_CLASS(klass);
806
807 dc->desc = "sPAPR XIVE Interrupt Controller";
808 device_class_set_props(dc, spapr_xive_properties);
809 device_class_set_parent_realize(dc, spapr_xive_realize,
810 &sxc->parent_realize);
811 dc->vmsd = &vmstate_spapr_xive;
812
813 xrc->get_eas = spapr_xive_get_eas;
814 xrc->get_pq = spapr_xive_get_pq;
815 xrc->set_pq = spapr_xive_set_pq;
816 xrc->get_end = spapr_xive_get_end;
817 xrc->write_end = spapr_xive_write_end;
818 xrc->get_nvt = spapr_xive_get_nvt;
819 xrc->write_nvt = spapr_xive_write_nvt;
820 xrc->get_block_id = spapr_xive_get_block_id;
821
822 sicc->activate = spapr_xive_activate;
823 sicc->deactivate = spapr_xive_deactivate;
824 sicc->cpu_intc_create = spapr_xive_cpu_intc_create;
825 sicc->cpu_intc_reset = spapr_xive_cpu_intc_reset;
826 sicc->cpu_intc_destroy = spapr_xive_cpu_intc_destroy;
827 sicc->claim_irq = spapr_xive_claim_irq;
828 sicc->free_irq = spapr_xive_free_irq;
829 sicc->set_irq = spapr_xive_set_irq;
830 sicc->print_info = spapr_xive_print_info;
831 sicc->dt = spapr_xive_dt;
832 sicc->post_load = spapr_xive_post_load;
833
834 xpc->match_nvt = spapr_xive_match_nvt;
835 xpc->in_kernel = spapr_xive_in_kernel_xptr;
836}
837
838static const TypeInfo spapr_xive_info = {
839 .name = TYPE_SPAPR_XIVE,
840 .parent = TYPE_XIVE_ROUTER,
841 .instance_init = spapr_xive_instance_init,
842 .instance_size = sizeof(SpaprXive),
843 .class_init = spapr_xive_class_init,
844 .class_size = sizeof(SpaprXiveClass),
845 .interfaces = (InterfaceInfo[]) {
846 { TYPE_SPAPR_INTC },
847 { }
848 },
849};
850
851static void spapr_xive_register_types(void)
852{
853 type_register_static(&spapr_xive_info);
854}
855
856type_init(spapr_xive_register_types)
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878static bool spapr_xive_priority_is_reserved(SpaprXive *xive, uint8_t priority)
879{
880 return priority >= xive->hv_prio;
881}
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913#define SPAPR_XIVE_SRC_H_INT_ESB PPC_BIT(60)
914#define SPAPR_XIVE_SRC_LSI PPC_BIT(61)
915#define SPAPR_XIVE_SRC_TRIGGER PPC_BIT(62)
916
917#define SPAPR_XIVE_SRC_STORE_EOI PPC_BIT(63)
918
919static target_ulong h_int_get_source_info(PowerPCCPU *cpu,
920 SpaprMachineState *spapr,
921 target_ulong opcode,
922 target_ulong *args)
923{
924 SpaprXive *xive = spapr->xive;
925 XiveSource *xsrc = &xive->source;
926 target_ulong flags = args[0];
927 target_ulong lisn = args[1];
928
929 trace_spapr_xive_get_source_info(flags, lisn);
930
931 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
932 return H_FUNCTION;
933 }
934
935 if (flags) {
936 return H_PARAMETER;
937 }
938
939 if (lisn >= xive->nr_irqs) {
940 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
941 lisn);
942 return H_P2;
943 }
944
945 if (!xive_eas_is_valid(&xive->eat[lisn])) {
946 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
947 lisn);
948 return H_P2;
949 }
950
951
952
953
954
955 args[0] = 0;
956 if (!xive_source_esb_has_2page(xsrc)) {
957 args[0] |= SPAPR_XIVE_SRC_TRIGGER;
958 }
959 if (xsrc->esb_flags & XIVE_SRC_STORE_EOI) {
960 args[0] |= SPAPR_XIVE_SRC_STORE_EOI;
961 }
962
963
964
965
966
967
968 if (xive_source_irq_is_lsi(xsrc, lisn)) {
969 args[0] |= SPAPR_XIVE_SRC_H_INT_ESB | SPAPR_XIVE_SRC_LSI;
970 }
971
972 if (!(args[0] & SPAPR_XIVE_SRC_H_INT_ESB)) {
973 args[1] = xive->vc_base + xive_source_esb_mgmt(xsrc, lisn);
974 } else {
975 args[1] = -1;
976 }
977
978 if (xive_source_esb_has_2page(xsrc) &&
979 !(args[0] & SPAPR_XIVE_SRC_H_INT_ESB)) {
980 args[2] = xive->vc_base + xive_source_esb_page(xsrc, lisn);
981 } else {
982 args[2] = -1;
983 }
984
985 if (xive_source_esb_has_2page(xsrc)) {
986 args[3] = xsrc->esb_shift - 1;
987 } else {
988 args[3] = xsrc->esb_shift;
989 }
990
991 return H_SUCCESS;
992}
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028#define SPAPR_XIVE_SRC_SET_EISN PPC_BIT(62)
1029#define SPAPR_XIVE_SRC_MASK PPC_BIT(63)
1030
1031static target_ulong h_int_set_source_config(PowerPCCPU *cpu,
1032 SpaprMachineState *spapr,
1033 target_ulong opcode,
1034 target_ulong *args)
1035{
1036 SpaprXive *xive = spapr->xive;
1037 XiveEAS eas, new_eas;
1038 target_ulong flags = args[0];
1039 target_ulong lisn = args[1];
1040 target_ulong target = args[2];
1041 target_ulong priority = args[3];
1042 target_ulong eisn = args[4];
1043 uint8_t end_blk;
1044 uint32_t end_idx;
1045
1046 trace_spapr_xive_set_source_config(flags, lisn, target, priority, eisn);
1047
1048 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1049 return H_FUNCTION;
1050 }
1051
1052 if (flags & ~(SPAPR_XIVE_SRC_SET_EISN | SPAPR_XIVE_SRC_MASK)) {
1053 return H_PARAMETER;
1054 }
1055
1056 if (lisn >= xive->nr_irqs) {
1057 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
1058 lisn);
1059 return H_P2;
1060 }
1061
1062 eas = xive->eat[lisn];
1063 if (!xive_eas_is_valid(&eas)) {
1064 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
1065 lisn);
1066 return H_P2;
1067 }
1068
1069
1070 if (priority == 0xff) {
1071 new_eas.w = cpu_to_be64(EAS_VALID | EAS_MASKED);
1072 goto out;
1073 }
1074
1075 if (flags & SPAPR_XIVE_SRC_MASK) {
1076 new_eas.w = eas.w | cpu_to_be64(EAS_MASKED);
1077 } else {
1078 new_eas.w = eas.w & cpu_to_be64(~EAS_MASKED);
1079 }
1080
1081 if (spapr_xive_priority_is_reserved(xive, priority)) {
1082 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld
1083 " is reserved\n", priority);
1084 return H_P4;
1085 }
1086
1087
1088
1089
1090
1091
1092 if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) {
1093 return H_P3;
1094 }
1095
1096 new_eas.w = xive_set_field64(EAS_END_BLOCK, new_eas.w, end_blk);
1097 new_eas.w = xive_set_field64(EAS_END_INDEX, new_eas.w, end_idx);
1098
1099 if (flags & SPAPR_XIVE_SRC_SET_EISN) {
1100 new_eas.w = xive_set_field64(EAS_END_DATA, new_eas.w, eisn);
1101 }
1102
1103 if (spapr_xive_in_kernel(xive)) {
1104 Error *local_err = NULL;
1105
1106 kvmppc_xive_set_source_config(xive, lisn, &new_eas, &local_err);
1107 if (local_err) {
1108 error_report_err(local_err);
1109 return H_HARDWARE;
1110 }
1111 }
1112
1113out:
1114 xive->eat[lisn] = new_eas;
1115 return H_SUCCESS;
1116}
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140static target_ulong h_int_get_source_config(PowerPCCPU *cpu,
1141 SpaprMachineState *spapr,
1142 target_ulong opcode,
1143 target_ulong *args)
1144{
1145 SpaprXive *xive = spapr->xive;
1146 target_ulong flags = args[0];
1147 target_ulong lisn = args[1];
1148 XiveEAS eas;
1149 XiveEND *end;
1150 uint8_t nvt_blk;
1151 uint32_t end_idx, nvt_idx;
1152
1153 trace_spapr_xive_get_source_config(flags, lisn);
1154
1155 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1156 return H_FUNCTION;
1157 }
1158
1159 if (flags) {
1160 return H_PARAMETER;
1161 }
1162
1163 if (lisn >= xive->nr_irqs) {
1164 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
1165 lisn);
1166 return H_P2;
1167 }
1168
1169 eas = xive->eat[lisn];
1170 if (!xive_eas_is_valid(&eas)) {
1171 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
1172 lisn);
1173 return H_P2;
1174 }
1175
1176
1177 end_idx = xive_get_field64(EAS_END_INDEX, eas.w);
1178
1179 assert(end_idx < xive->nr_ends);
1180 end = &xive->endt[end_idx];
1181
1182 nvt_blk = xive_get_field32(END_W6_NVT_BLOCK, end->w6);
1183 nvt_idx = xive_get_field32(END_W6_NVT_INDEX, end->w6);
1184 args[0] = spapr_xive_nvt_to_target(nvt_blk, nvt_idx);
1185
1186 if (xive_eas_is_masked(&eas)) {
1187 args[1] = 0xff;
1188 } else {
1189 args[1] = xive_get_field32(END_W7_F0_PRIORITY, end->w7);
1190 }
1191
1192 args[2] = xive_get_field64(EAS_END_DATA, eas.w);
1193
1194 return H_SUCCESS;
1195}
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215static target_ulong h_int_get_queue_info(PowerPCCPU *cpu,
1216 SpaprMachineState *spapr,
1217 target_ulong opcode,
1218 target_ulong *args)
1219{
1220 SpaprXive *xive = spapr->xive;
1221 XiveENDSource *end_xsrc = &xive->end_source;
1222 target_ulong flags = args[0];
1223 target_ulong target = args[1];
1224 target_ulong priority = args[2];
1225 XiveEND *end;
1226 uint8_t end_blk;
1227 uint32_t end_idx;
1228
1229 trace_spapr_xive_get_queue_info(flags, target, priority);
1230
1231 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1232 return H_FUNCTION;
1233 }
1234
1235 if (flags) {
1236 return H_PARAMETER;
1237 }
1238
1239
1240
1241
1242
1243
1244 if (spapr_xive_priority_is_reserved(xive, priority)) {
1245 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld
1246 " is reserved\n", priority);
1247 return H_P3;
1248 }
1249
1250
1251
1252
1253
1254
1255 if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) {
1256 return H_P2;
1257 }
1258
1259 assert(end_idx < xive->nr_ends);
1260 end = &xive->endt[end_idx];
1261
1262 args[0] = xive->end_base + (1ull << (end_xsrc->esb_shift + 1)) * end_idx;
1263 if (xive_end_is_enqueue(end)) {
1264 args[1] = xive_get_field32(END_W0_QSIZE, end->w0) + 12;
1265 } else {
1266 args[1] = 0;
1267 }
1268
1269 return H_SUCCESS;
1270}
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301#define SPAPR_XIVE_END_ALWAYS_NOTIFY PPC_BIT(63)
1302
1303static target_ulong h_int_set_queue_config(PowerPCCPU *cpu,
1304 SpaprMachineState *spapr,
1305 target_ulong opcode,
1306 target_ulong *args)
1307{
1308 SpaprXive *xive = spapr->xive;
1309 target_ulong flags = args[0];
1310 target_ulong target = args[1];
1311 target_ulong priority = args[2];
1312 target_ulong qpage = args[3];
1313 target_ulong qsize = args[4];
1314 XiveEND end;
1315 uint8_t end_blk, nvt_blk;
1316 uint32_t end_idx, nvt_idx;
1317
1318 trace_spapr_xive_set_queue_config(flags, target, priority, qpage, qsize);
1319
1320 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1321 return H_FUNCTION;
1322 }
1323
1324 if (flags & ~SPAPR_XIVE_END_ALWAYS_NOTIFY) {
1325 return H_PARAMETER;
1326 }
1327
1328
1329
1330
1331
1332
1333 if (spapr_xive_priority_is_reserved(xive, priority)) {
1334 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld
1335 " is reserved\n", priority);
1336 return H_P3;
1337 }
1338
1339
1340
1341
1342
1343
1344
1345 if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) {
1346 return H_P2;
1347 }
1348
1349 assert(end_idx < xive->nr_ends);
1350 memcpy(&end, &xive->endt[end_idx], sizeof(XiveEND));
1351
1352 switch (qsize) {
1353 case 12:
1354 case 16:
1355 case 21:
1356 case 24:
1357 if (!QEMU_IS_ALIGNED(qpage, 1ul << qsize)) {
1358 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: EQ @0x%" HWADDR_PRIx
1359 " is not naturally aligned with %" HWADDR_PRIx "\n",
1360 qpage, (hwaddr)1 << qsize);
1361 return H_P4;
1362 }
1363 end.w2 = cpu_to_be32((qpage >> 32) & 0x0fffffff);
1364 end.w3 = cpu_to_be32(qpage & 0xffffffff);
1365 end.w0 |= cpu_to_be32(END_W0_ENQUEUE);
1366 end.w0 = xive_set_field32(END_W0_QSIZE, end.w0, qsize - 12);
1367 break;
1368 case 0:
1369
1370 spapr_xive_end_reset(&end);
1371 goto out;
1372
1373 default:
1374 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid EQ size %"PRIx64"\n",
1375 qsize);
1376 return H_P5;
1377 }
1378
1379 if (qsize) {
1380 hwaddr plen = 1 << qsize;
1381 void *eq;
1382
1383
1384
1385
1386
1387 eq = address_space_map(CPU(cpu)->as, qpage, &plen, true,
1388 MEMTXATTRS_UNSPECIFIED);
1389 if (plen != 1 << qsize) {
1390 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to map EQ @0x%"
1391 HWADDR_PRIx "\n", qpage);
1392 return H_P4;
1393 }
1394 address_space_unmap(CPU(cpu)->as, eq, plen, true, plen);
1395 }
1396
1397
1398 if (spapr_xive_target_to_nvt(target, &nvt_blk, &nvt_idx)) {
1399 g_assert_not_reached();
1400 }
1401
1402
1403
1404
1405
1406 end.w6 = xive_set_field32(END_W6_NVT_BLOCK, 0ul, nvt_blk) |
1407 xive_set_field32(END_W6_NVT_INDEX, 0ul, nvt_idx);
1408 end.w7 = xive_set_field32(END_W7_F0_PRIORITY, 0ul, priority);
1409
1410 if (flags & SPAPR_XIVE_END_ALWAYS_NOTIFY) {
1411 end.w0 |= cpu_to_be32(END_W0_UCOND_NOTIFY);
1412 } else {
1413 end.w0 &= cpu_to_be32((uint32_t)~END_W0_UCOND_NOTIFY);
1414 }
1415
1416
1417
1418
1419
1420 end.w1 = cpu_to_be32(END_W1_GENERATION) |
1421 xive_set_field32(END_W1_PAGE_OFF, 0ul, 0ul);
1422 end.w0 |= cpu_to_be32(END_W0_VALID);
1423
1424
1425
1426
1427
1428
1429out:
1430 if (spapr_xive_in_kernel(xive)) {
1431 Error *local_err = NULL;
1432
1433 kvmppc_xive_set_queue_config(xive, end_blk, end_idx, &end, &local_err);
1434 if (local_err) {
1435 error_report_err(local_err);
1436 return H_HARDWARE;
1437 }
1438 }
1439
1440
1441 memcpy(&xive->endt[end_idx], &end, sizeof(XiveEND));
1442 return H_SUCCESS;
1443}
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472#define SPAPR_XIVE_END_DEBUG PPC_BIT(63)
1473
1474static target_ulong h_int_get_queue_config(PowerPCCPU *cpu,
1475 SpaprMachineState *spapr,
1476 target_ulong opcode,
1477 target_ulong *args)
1478{
1479 SpaprXive *xive = spapr->xive;
1480 target_ulong flags = args[0];
1481 target_ulong target = args[1];
1482 target_ulong priority = args[2];
1483 XiveEND *end;
1484 uint8_t end_blk;
1485 uint32_t end_idx;
1486
1487 trace_spapr_xive_get_queue_config(flags, target, priority);
1488
1489 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1490 return H_FUNCTION;
1491 }
1492
1493 if (flags & ~SPAPR_XIVE_END_DEBUG) {
1494 return H_PARAMETER;
1495 }
1496
1497
1498
1499
1500
1501
1502 if (spapr_xive_priority_is_reserved(xive, priority)) {
1503 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld
1504 " is reserved\n", priority);
1505 return H_P3;
1506 }
1507
1508
1509
1510
1511
1512
1513 if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) {
1514 return H_P2;
1515 }
1516
1517 assert(end_idx < xive->nr_ends);
1518 end = &xive->endt[end_idx];
1519
1520 args[0] = 0;
1521 if (xive_end_is_notify(end)) {
1522 args[0] |= SPAPR_XIVE_END_ALWAYS_NOTIFY;
1523 }
1524
1525 if (xive_end_is_enqueue(end)) {
1526 args[1] = xive_end_qaddr(end);
1527 args[2] = xive_get_field32(END_W0_QSIZE, end->w0) + 12;
1528 } else {
1529 args[1] = 0;
1530 args[2] = 0;
1531 }
1532
1533 if (spapr_xive_in_kernel(xive)) {
1534 Error *local_err = NULL;
1535
1536 kvmppc_xive_get_queue_config(xive, end_blk, end_idx, end, &local_err);
1537 if (local_err) {
1538 error_report_err(local_err);
1539 return H_HARDWARE;
1540 }
1541 }
1542
1543
1544 if (flags & SPAPR_XIVE_END_DEBUG) {
1545
1546 args[0] |= (uint64_t)xive_get_field32(END_W1_GENERATION, end->w1) << 62;
1547
1548
1549 args[3] = xive_get_field32(END_W1_PAGE_OFF, end->w1);
1550 } else {
1551 args[3] = 0;
1552 }
1553
1554 return H_SUCCESS;
1555}
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577static target_ulong h_int_set_os_reporting_line(PowerPCCPU *cpu,
1578 SpaprMachineState *spapr,
1579 target_ulong opcode,
1580 target_ulong *args)
1581{
1582 target_ulong flags = args[0];
1583
1584 trace_spapr_xive_set_os_reporting_line(flags);
1585
1586 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1587 return H_FUNCTION;
1588 }
1589
1590
1591
1592
1593
1594
1595
1596 return H_FUNCTION;
1597}
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617static target_ulong h_int_get_os_reporting_line(PowerPCCPU *cpu,
1618 SpaprMachineState *spapr,
1619 target_ulong opcode,
1620 target_ulong *args)
1621{
1622 target_ulong flags = args[0];
1623
1624 trace_spapr_xive_get_os_reporting_line(flags);
1625
1626 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1627 return H_FUNCTION;
1628 }
1629
1630
1631
1632
1633
1634
1635
1636 return H_FUNCTION;
1637}
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662#define SPAPR_XIVE_ESB_STORE PPC_BIT(63)
1663
1664static target_ulong h_int_esb(PowerPCCPU *cpu,
1665 SpaprMachineState *spapr,
1666 target_ulong opcode,
1667 target_ulong *args)
1668{
1669 SpaprXive *xive = spapr->xive;
1670 XiveEAS eas;
1671 target_ulong flags = args[0];
1672 target_ulong lisn = args[1];
1673 target_ulong offset = args[2];
1674 target_ulong data = args[3];
1675 hwaddr mmio_addr;
1676 XiveSource *xsrc = &xive->source;
1677
1678 trace_spapr_xive_esb(flags, lisn, offset, data);
1679
1680 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1681 return H_FUNCTION;
1682 }
1683
1684 if (flags & ~SPAPR_XIVE_ESB_STORE) {
1685 return H_PARAMETER;
1686 }
1687
1688 if (lisn >= xive->nr_irqs) {
1689 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
1690 lisn);
1691 return H_P2;
1692 }
1693
1694 eas = xive->eat[lisn];
1695 if (!xive_eas_is_valid(&eas)) {
1696 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
1697 lisn);
1698 return H_P2;
1699 }
1700
1701 if (offset > (1ull << xsrc->esb_shift)) {
1702 return H_P3;
1703 }
1704
1705 if (spapr_xive_in_kernel(xive)) {
1706 args[0] = kvmppc_xive_esb_rw(xsrc, lisn, offset, data,
1707 flags & SPAPR_XIVE_ESB_STORE);
1708 } else {
1709 mmio_addr = xive->vc_base + xive_source_esb_mgmt(xsrc, lisn) + offset;
1710
1711 if (dma_memory_rw(&address_space_memory, mmio_addr, &data, 8,
1712 (flags & SPAPR_XIVE_ESB_STORE),
1713 MEMTXATTRS_UNSPECIFIED)) {
1714 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to access ESB @0x%"
1715 HWADDR_PRIx "\n", mmio_addr);
1716 return H_HARDWARE;
1717 }
1718 args[0] = (flags & SPAPR_XIVE_ESB_STORE) ? -1 : data;
1719 }
1720 return H_SUCCESS;
1721}
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740static target_ulong h_int_sync(PowerPCCPU *cpu,
1741 SpaprMachineState *spapr,
1742 target_ulong opcode,
1743 target_ulong *args)
1744{
1745 SpaprXive *xive = spapr->xive;
1746 XiveEAS eas;
1747 target_ulong flags = args[0];
1748 target_ulong lisn = args[1];
1749
1750 trace_spapr_xive_sync(flags, lisn);
1751
1752 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1753 return H_FUNCTION;
1754 }
1755
1756 if (flags) {
1757 return H_PARAMETER;
1758 }
1759
1760 if (lisn >= xive->nr_irqs) {
1761 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
1762 lisn);
1763 return H_P2;
1764 }
1765
1766 eas = xive->eat[lisn];
1767 if (!xive_eas_is_valid(&eas)) {
1768 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
1769 lisn);
1770 return H_P2;
1771 }
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783 if (spapr_xive_in_kernel(xive)) {
1784 Error *local_err = NULL;
1785
1786 kvmppc_xive_sync_source(xive, lisn, &local_err);
1787 if (local_err) {
1788 error_report_err(local_err);
1789 return H_HARDWARE;
1790 }
1791 }
1792 return H_SUCCESS;
1793}
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809static target_ulong h_int_reset(PowerPCCPU *cpu,
1810 SpaprMachineState *spapr,
1811 target_ulong opcode,
1812 target_ulong *args)
1813{
1814 SpaprXive *xive = spapr->xive;
1815 target_ulong flags = args[0];
1816
1817 trace_spapr_xive_reset(flags);
1818
1819 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1820 return H_FUNCTION;
1821 }
1822
1823 if (flags) {
1824 return H_PARAMETER;
1825 }
1826
1827 device_cold_reset(DEVICE(xive));
1828
1829 if (spapr_xive_in_kernel(xive)) {
1830 Error *local_err = NULL;
1831
1832 kvmppc_xive_reset(xive, &local_err);
1833 if (local_err) {
1834 error_report_err(local_err);
1835 return H_HARDWARE;
1836 }
1837 }
1838 return H_SUCCESS;
1839}
1840
1841void spapr_xive_hcall_init(SpaprMachineState *spapr)
1842{
1843 spapr_register_hypercall(H_INT_GET_SOURCE_INFO, h_int_get_source_info);
1844 spapr_register_hypercall(H_INT_SET_SOURCE_CONFIG, h_int_set_source_config);
1845 spapr_register_hypercall(H_INT_GET_SOURCE_CONFIG, h_int_get_source_config);
1846 spapr_register_hypercall(H_INT_GET_QUEUE_INFO, h_int_get_queue_info);
1847 spapr_register_hypercall(H_INT_SET_QUEUE_CONFIG, h_int_set_queue_config);
1848 spapr_register_hypercall(H_INT_GET_QUEUE_CONFIG, h_int_get_queue_config);
1849 spapr_register_hypercall(H_INT_SET_OS_REPORTING_LINE,
1850 h_int_set_os_reporting_line);
1851 spapr_register_hypercall(H_INT_GET_OS_REPORTING_LINE,
1852 h_int_get_os_reporting_line);
1853 spapr_register_hypercall(H_INT_ESB, h_int_esb);
1854 spapr_register_hypercall(H_INT_SYNC, h_int_sync);
1855 spapr_register_hypercall(H_INT_RESET, h_int_reset);
1856}
1857