1
2
3
4
5
6
7
8
9
10#include "qemu/osdep.h"
11#include "qemu/log.h"
12#include "qemu/module.h"
13#include "qapi/error.h"
14#include "target/ppc/cpu.h"
15#include "sysemu/cpus.h"
16#include "sysemu/dma.h"
17#include "hw/qdev-properties.h"
18#include "monitor/monitor.h"
19#include "hw/ppc/xive.h"
20#include "hw/ppc/xive_regs.h"
21
22
23
24
25
26
27
28
29
30
31static uint8_t priority_to_ipb(uint8_t priority)
32{
33 return priority > XIVE_PRIORITY_MAX ?
34 0 : 1 << (XIVE_PRIORITY_MAX - priority);
35}
36
37
38
39
40
41
42static uint8_t ipb_to_pipr(uint8_t ibp)
43{
44 return ibp ? clz32((uint32_t)ibp << 24) : 0xff;
45}
46
47static void ipb_update(uint8_t *regs, uint8_t priority)
48{
49 regs[TM_IPB] |= priority_to_ipb(priority);
50 regs[TM_PIPR] = ipb_to_pipr(regs[TM_IPB]);
51}
52
53static uint8_t exception_mask(uint8_t ring)
54{
55 switch (ring) {
56 case TM_QW1_OS:
57 return TM_QW1_NSR_EO;
58 case TM_QW3_HV_PHYS:
59 return TM_QW3_NSR_HE;
60 default:
61 g_assert_not_reached();
62 }
63}
64
65static qemu_irq xive_tctx_output(XiveTCTX *tctx, uint8_t ring)
66{
67 switch (ring) {
68 case TM_QW0_USER:
69 return 0;
70 case TM_QW1_OS:
71 return tctx->os_output;
72 case TM_QW2_HV_POOL:
73 case TM_QW3_HV_PHYS:
74 return tctx->hv_output;
75 default:
76 return 0;
77 }
78}
79
80static uint64_t xive_tctx_accept(XiveTCTX *tctx, uint8_t ring)
81{
82 uint8_t *regs = &tctx->regs[ring];
83 uint8_t nsr = regs[TM_NSR];
84 uint8_t mask = exception_mask(ring);
85
86 qemu_irq_lower(xive_tctx_output(tctx, ring));
87
88 if (regs[TM_NSR] & mask) {
89 uint8_t cppr = regs[TM_PIPR];
90
91 regs[TM_CPPR] = cppr;
92
93
94 regs[TM_IPB] &= ~priority_to_ipb(cppr);
95 regs[TM_PIPR] = ipb_to_pipr(regs[TM_IPB]);
96
97
98 regs[TM_NSR] &= ~mask;
99 }
100
101 return (nsr << 8) | regs[TM_CPPR];
102}
103
104static void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring)
105{
106 uint8_t *regs = &tctx->regs[ring];
107
108 if (regs[TM_PIPR] < regs[TM_CPPR]) {
109 switch (ring) {
110 case TM_QW1_OS:
111 regs[TM_NSR] |= TM_QW1_NSR_EO;
112 break;
113 case TM_QW3_HV_PHYS:
114 regs[TM_NSR] |= (TM_QW3_NSR_HE_PHYS << 6);
115 break;
116 default:
117 g_assert_not_reached();
118 }
119 qemu_irq_raise(xive_tctx_output(tctx, ring));
120 }
121}
122
123static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr)
124{
125 if (cppr > XIVE_PRIORITY_MAX) {
126 cppr = 0xff;
127 }
128
129 tctx->regs[ring + TM_CPPR] = cppr;
130
131
132 xive_tctx_notify(tctx, ring);
133}
134
135static inline uint32_t xive_tctx_word2(uint8_t *ring)
136{
137 return *((uint32_t *) &ring[TM_WORD2]);
138}
139
140
141
142
143
144static void xive_tm_set_hv_cppr(XiveTCTX *tctx, hwaddr offset,
145 uint64_t value, unsigned size)
146{
147 xive_tctx_set_cppr(tctx, TM_QW3_HV_PHYS, value & 0xff);
148}
149
150static uint64_t xive_tm_ack_hv_reg(XiveTCTX *tctx, hwaddr offset, unsigned size)
151{
152 return xive_tctx_accept(tctx, TM_QW3_HV_PHYS);
153}
154
155static uint64_t xive_tm_pull_pool_ctx(XiveTCTX *tctx, hwaddr offset,
156 unsigned size)
157{
158 uint32_t qw2w2_prev = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]);
159 uint32_t qw2w2;
160
161 qw2w2 = xive_set_field32(TM_QW2W2_VP, qw2w2_prev, 0);
162 memcpy(&tctx->regs[TM_QW2_HV_POOL + TM_WORD2], &qw2w2, 4);
163 return qw2w2;
164}
165
166static void xive_tm_vt_push(XiveTCTX *tctx, hwaddr offset,
167 uint64_t value, unsigned size)
168{
169 tctx->regs[TM_QW3_HV_PHYS + TM_WORD2] = value & 0xff;
170}
171
172static uint64_t xive_tm_vt_poll(XiveTCTX *tctx, hwaddr offset, unsigned size)
173{
174 return tctx->regs[TM_QW3_HV_PHYS + TM_WORD2] & 0xff;
175}
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190static const uint8_t xive_tm_hw_view[] = {
191 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0,
192 3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0,
193 0, 0, 3, 3, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0,
194 3, 3, 3, 3, 0, 3, 0, 2, 3, 0, 0, 3, 3, 3, 3, 0,
195};
196
197static const uint8_t xive_tm_hv_view[] = {
198 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0,
199 3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0,
200 0, 0, 3, 3, 0, 0, 0, 0, 0, 3, 3, 3, 0, 0, 0, 0,
201 3, 3, 3, 3, 0, 3, 0, 2, 3, 0, 0, 3, 0, 0, 0, 0,
202};
203
204static const uint8_t xive_tm_os_view[] = {
205 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0,
206 2, 3, 2, 2, 2, 2, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0,
207 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
208 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
209};
210
211static const uint8_t xive_tm_user_view[] = {
212 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
213 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
214 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
215 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
216};
217
218
219
220
221
222static const uint8_t *xive_tm_views[] = {
223 [XIVE_TM_HW_PAGE] = xive_tm_hw_view,
224 [XIVE_TM_HV_PAGE] = xive_tm_hv_view,
225 [XIVE_TM_OS_PAGE] = xive_tm_os_view,
226 [XIVE_TM_USER_PAGE] = xive_tm_user_view,
227};
228
229
230
231
232static uint64_t xive_tm_mask(hwaddr offset, unsigned size, bool write)
233{
234 uint8_t page_offset = (offset >> TM_SHIFT) & 0x3;
235 uint8_t reg_offset = offset & 0x3F;
236 uint8_t reg_mask = write ? 0x1 : 0x2;
237 uint64_t mask = 0x0;
238 int i;
239
240 for (i = 0; i < size; i++) {
241 if (xive_tm_views[page_offset][reg_offset + i] & reg_mask) {
242 mask |= (uint64_t) 0xff << (8 * (size - i - 1));
243 }
244 }
245
246 return mask;
247}
248
249static void xive_tm_raw_write(XiveTCTX *tctx, hwaddr offset, uint64_t value,
250 unsigned size)
251{
252 uint8_t ring_offset = offset & 0x30;
253 uint8_t reg_offset = offset & 0x3F;
254 uint64_t mask = xive_tm_mask(offset, size, true);
255 int i;
256
257
258
259
260
261 if (size < 4 || !mask || ring_offset == TM_QW0_USER) {
262 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA @%"
263 HWADDR_PRIx"\n", offset);
264 return;
265 }
266
267
268
269
270
271 for (i = 0; i < size; i++) {
272 uint8_t byte_mask = (mask >> (8 * (size - i - 1)));
273 if (byte_mask) {
274 tctx->regs[reg_offset + i] = (value >> (8 * (size - i - 1))) &
275 byte_mask;
276 }
277 }
278}
279
280static uint64_t xive_tm_raw_read(XiveTCTX *tctx, hwaddr offset, unsigned size)
281{
282 uint8_t ring_offset = offset & 0x30;
283 uint8_t reg_offset = offset & 0x3F;
284 uint64_t mask = xive_tm_mask(offset, size, false);
285 uint64_t ret;
286 int i;
287
288
289
290
291
292 if (size < 4 || !mask || ring_offset == TM_QW0_USER) {
293 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid read access at TIMA @%"
294 HWADDR_PRIx"\n", offset);
295 return -1;
296 }
297
298
299 ret = 0;
300 for (i = 0; i < size; i++) {
301 ret |= (uint64_t) tctx->regs[reg_offset + i] << (8 * (size - i - 1));
302 }
303
304
305 return ret & mask;
306}
307
308
309
310
311
312
313
314
315static uint64_t xive_tm_ack_os_reg(XiveTCTX *tctx, hwaddr offset, unsigned size)
316{
317 return xive_tctx_accept(tctx, TM_QW1_OS);
318}
319
320static void xive_tm_set_os_cppr(XiveTCTX *tctx, hwaddr offset,
321 uint64_t value, unsigned size)
322{
323 xive_tctx_set_cppr(tctx, TM_QW1_OS, value & 0xff);
324}
325
326
327
328
329
330static void xive_tm_set_os_pending(XiveTCTX *tctx, hwaddr offset,
331 uint64_t value, unsigned size)
332{
333 ipb_update(&tctx->regs[TM_QW1_OS], value & 0xff);
334 xive_tctx_notify(tctx, TM_QW1_OS);
335}
336
337
338
339
340
341typedef struct XiveTmOp {
342 uint8_t page_offset;
343 uint32_t op_offset;
344 unsigned size;
345 void (*write_handler)(XiveTCTX *tctx, hwaddr offset, uint64_t value,
346 unsigned size);
347 uint64_t (*read_handler)(XiveTCTX *tctx, hwaddr offset, unsigned size);
348} XiveTmOp;
349
350static const XiveTmOp xive_tm_operations[] = {
351
352
353
354
355 { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive_tm_set_os_cppr, NULL },
356 { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive_tm_set_hv_cppr, NULL },
357 { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, xive_tm_vt_push, NULL },
358 { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, NULL, xive_tm_vt_poll },
359
360
361 { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, NULL, xive_tm_ack_os_reg },
362 { XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, xive_tm_set_os_pending, NULL },
363 { XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, NULL, xive_tm_ack_hv_reg },
364 { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, NULL, xive_tm_pull_pool_ctx },
365 { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, NULL, xive_tm_pull_pool_ctx },
366};
367
368static const XiveTmOp *xive_tm_find_op(hwaddr offset, unsigned size, bool write)
369{
370 uint8_t page_offset = (offset >> TM_SHIFT) & 0x3;
371 uint32_t op_offset = offset & 0xFFF;
372 int i;
373
374 for (i = 0; i < ARRAY_SIZE(xive_tm_operations); i++) {
375 const XiveTmOp *xto = &xive_tm_operations[i];
376
377
378 if (xto->page_offset >= page_offset &&
379 xto->op_offset == op_offset &&
380 xto->size == size &&
381 ((write && xto->write_handler) || (!write && xto->read_handler))) {
382 return xto;
383 }
384 }
385 return NULL;
386}
387
388
389
390
391void xive_tctx_tm_write(XiveTCTX *tctx, hwaddr offset, uint64_t value,
392 unsigned size)
393{
394 const XiveTmOp *xto;
395
396
397
398
399
400
401
402
403 if (offset & 0x800) {
404 xto = xive_tm_find_op(offset, size, true);
405 if (!xto) {
406 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA"
407 "@%"HWADDR_PRIx"\n", offset);
408 } else {
409 xto->write_handler(tctx, offset, value, size);
410 }
411 return;
412 }
413
414
415
416
417 xto = xive_tm_find_op(offset, size, true);
418 if (xto) {
419 xto->write_handler(tctx, offset, value, size);
420 return;
421 }
422
423
424
425
426 xive_tm_raw_write(tctx, offset, value, size);
427}
428
429uint64_t xive_tctx_tm_read(XiveTCTX *tctx, hwaddr offset, unsigned size)
430{
431 const XiveTmOp *xto;
432
433
434
435
436
437
438
439
440 if (offset & 0x800) {
441 xto = xive_tm_find_op(offset, size, false);
442 if (!xto) {
443 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid read access to TIMA"
444 "@%"HWADDR_PRIx"\n", offset);
445 return -1;
446 }
447 return xto->read_handler(tctx, offset, size);
448 }
449
450
451
452
453 xto = xive_tm_find_op(offset, size, false);
454 if (xto) {
455 return xto->read_handler(tctx, offset, size);
456 }
457
458
459
460
461 return xive_tm_raw_read(tctx, offset, size);
462}
463
464static void xive_tm_write(void *opaque, hwaddr offset,
465 uint64_t value, unsigned size)
466{
467 XiveTCTX *tctx = xive_router_get_tctx(XIVE_ROUTER(opaque), current_cpu);
468
469 xive_tctx_tm_write(tctx, offset, value, size);
470}
471
472static uint64_t xive_tm_read(void *opaque, hwaddr offset, unsigned size)
473{
474 XiveTCTX *tctx = xive_router_get_tctx(XIVE_ROUTER(opaque), current_cpu);
475
476 return xive_tctx_tm_read(tctx, offset, size);
477}
478
479const MemoryRegionOps xive_tm_ops = {
480 .read = xive_tm_read,
481 .write = xive_tm_write,
482 .endianness = DEVICE_BIG_ENDIAN,
483 .valid = {
484 .min_access_size = 1,
485 .max_access_size = 8,
486 },
487 .impl = {
488 .min_access_size = 1,
489 .max_access_size = 8,
490 },
491};
492
493static char *xive_tctx_ring_print(uint8_t *ring)
494{
495 uint32_t w2 = xive_tctx_word2(ring);
496
497 return g_strdup_printf("%02x %02x %02x %02x %02x "
498 "%02x %02x %02x %08x",
499 ring[TM_NSR], ring[TM_CPPR], ring[TM_IPB], ring[TM_LSMFB],
500 ring[TM_ACK_CNT], ring[TM_INC], ring[TM_AGE], ring[TM_PIPR],
501 be32_to_cpu(w2));
502}
503
504static const char * const xive_tctx_ring_names[] = {
505 "USER", "OS", "POOL", "PHYS",
506};
507
508void xive_tctx_pic_print_info(XiveTCTX *tctx, Monitor *mon)
509{
510 int cpu_index = tctx->cs ? tctx->cs->cpu_index : -1;
511 int i;
512
513 if (kvm_irqchip_in_kernel()) {
514 Error *local_err = NULL;
515
516 kvmppc_xive_cpu_synchronize_state(tctx, &local_err);
517 if (local_err) {
518 error_report_err(local_err);
519 return;
520 }
521 }
522
523 monitor_printf(mon, "CPU[%04x]: QW NSR CPPR IPB LSMFB ACK# INC AGE PIPR"
524 " W2\n", cpu_index);
525
526 for (i = 0; i < XIVE_TM_RING_COUNT; i++) {
527 char *s = xive_tctx_ring_print(&tctx->regs[i * XIVE_TM_RING_SIZE]);
528 monitor_printf(mon, "CPU[%04x]: %4s %s\n", cpu_index,
529 xive_tctx_ring_names[i], s);
530 g_free(s);
531 }
532}
533
534static void xive_tctx_reset(void *dev)
535{
536 XiveTCTX *tctx = XIVE_TCTX(dev);
537
538 memset(tctx->regs, 0, sizeof(tctx->regs));
539
540
541 tctx->regs[TM_QW1_OS + TM_LSMFB] = 0xFF;
542 tctx->regs[TM_QW1_OS + TM_ACK_CNT] = 0xFF;
543 tctx->regs[TM_QW1_OS + TM_AGE] = 0xFF;
544
545
546
547
548
549 tctx->regs[TM_QW1_OS + TM_PIPR] =
550 ipb_to_pipr(tctx->regs[TM_QW1_OS + TM_IPB]);
551 tctx->regs[TM_QW3_HV_PHYS + TM_PIPR] =
552 ipb_to_pipr(tctx->regs[TM_QW3_HV_PHYS + TM_IPB]);
553}
554
555static void xive_tctx_realize(DeviceState *dev, Error **errp)
556{
557 XiveTCTX *tctx = XIVE_TCTX(dev);
558 PowerPCCPU *cpu;
559 CPUPPCState *env;
560 Object *obj;
561 Error *local_err = NULL;
562
563 obj = object_property_get_link(OBJECT(dev), "cpu", &local_err);
564 if (!obj) {
565 error_propagate(errp, local_err);
566 error_prepend(errp, "required link 'cpu' not found: ");
567 return;
568 }
569
570 cpu = POWERPC_CPU(obj);
571 tctx->cs = CPU(obj);
572
573 env = &cpu->env;
574 switch (PPC_INPUT(env)) {
575 case PPC_FLAGS_INPUT_POWER9:
576 tctx->hv_output = env->irq_inputs[POWER9_INPUT_HINT];
577 tctx->os_output = env->irq_inputs[POWER9_INPUT_INT];
578 break;
579
580 default:
581 error_setg(errp, "XIVE interrupt controller does not support "
582 "this CPU bus model");
583 return;
584 }
585
586
587 if (kvm_irqchip_in_kernel()) {
588 kvmppc_xive_cpu_connect(tctx, &local_err);
589 if (local_err) {
590 error_propagate(errp, local_err);
591 return;
592 }
593 }
594
595 qemu_register_reset(xive_tctx_reset, dev);
596}
597
598static void xive_tctx_unrealize(DeviceState *dev, Error **errp)
599{
600 qemu_unregister_reset(xive_tctx_reset, dev);
601}
602
603static int vmstate_xive_tctx_pre_save(void *opaque)
604{
605 Error *local_err = NULL;
606
607 if (kvm_irqchip_in_kernel()) {
608 kvmppc_xive_cpu_get_state(XIVE_TCTX(opaque), &local_err);
609 if (local_err) {
610 error_report_err(local_err);
611 return -1;
612 }
613 }
614
615 return 0;
616}
617
618static int vmstate_xive_tctx_post_load(void *opaque, int version_id)
619{
620 Error *local_err = NULL;
621
622 if (kvm_irqchip_in_kernel()) {
623
624
625
626
627 kvmppc_xive_cpu_set_state(XIVE_TCTX(opaque), &local_err);
628 if (local_err) {
629 error_report_err(local_err);
630 return -1;
631 }
632 }
633
634 return 0;
635}
636
637static const VMStateDescription vmstate_xive_tctx = {
638 .name = TYPE_XIVE_TCTX,
639 .version_id = 1,
640 .minimum_version_id = 1,
641 .pre_save = vmstate_xive_tctx_pre_save,
642 .post_load = vmstate_xive_tctx_post_load,
643 .fields = (VMStateField[]) {
644 VMSTATE_BUFFER(regs, XiveTCTX),
645 VMSTATE_END_OF_LIST()
646 },
647};
648
649static void xive_tctx_class_init(ObjectClass *klass, void *data)
650{
651 DeviceClass *dc = DEVICE_CLASS(klass);
652
653 dc->desc = "XIVE Interrupt Thread Context";
654 dc->realize = xive_tctx_realize;
655 dc->unrealize = xive_tctx_unrealize;
656 dc->vmsd = &vmstate_xive_tctx;
657}
658
659static const TypeInfo xive_tctx_info = {
660 .name = TYPE_XIVE_TCTX,
661 .parent = TYPE_DEVICE,
662 .instance_size = sizeof(XiveTCTX),
663 .class_init = xive_tctx_class_init,
664};
665
666Object *xive_tctx_create(Object *cpu, XiveRouter *xrtr, Error **errp)
667{
668 Error *local_err = NULL;
669 Object *obj;
670
671 obj = object_new(TYPE_XIVE_TCTX);
672 object_property_add_child(cpu, TYPE_XIVE_TCTX, obj, &error_abort);
673 object_unref(obj);
674 object_property_add_const_link(obj, "cpu", cpu, &error_abort);
675 object_property_set_bool(obj, true, "realized", &local_err);
676 if (local_err) {
677 goto error;
678 }
679
680 return obj;
681
682error:
683 object_unparent(obj);
684 error_propagate(errp, local_err);
685 return NULL;
686}
687
688
689
690
691
692static uint8_t xive_esb_set(uint8_t *pq, uint8_t value)
693{
694 uint8_t old_pq = *pq & 0x3;
695
696 *pq &= ~0x3;
697 *pq |= value & 0x3;
698
699 return old_pq;
700}
701
702static bool xive_esb_trigger(uint8_t *pq)
703{
704 uint8_t old_pq = *pq & 0x3;
705
706 switch (old_pq) {
707 case XIVE_ESB_RESET:
708 xive_esb_set(pq, XIVE_ESB_PENDING);
709 return true;
710 case XIVE_ESB_PENDING:
711 case XIVE_ESB_QUEUED:
712 xive_esb_set(pq, XIVE_ESB_QUEUED);
713 return false;
714 case XIVE_ESB_OFF:
715 xive_esb_set(pq, XIVE_ESB_OFF);
716 return false;
717 default:
718 g_assert_not_reached();
719 }
720}
721
722static bool xive_esb_eoi(uint8_t *pq)
723{
724 uint8_t old_pq = *pq & 0x3;
725
726 switch (old_pq) {
727 case XIVE_ESB_RESET:
728 case XIVE_ESB_PENDING:
729 xive_esb_set(pq, XIVE_ESB_RESET);
730 return false;
731 case XIVE_ESB_QUEUED:
732 xive_esb_set(pq, XIVE_ESB_PENDING);
733 return true;
734 case XIVE_ESB_OFF:
735 xive_esb_set(pq, XIVE_ESB_OFF);
736 return false;
737 default:
738 g_assert_not_reached();
739 }
740}
741
742
743
744
745
746uint8_t xive_source_esb_get(XiveSource *xsrc, uint32_t srcno)
747{
748 assert(srcno < xsrc->nr_irqs);
749
750 return xsrc->status[srcno] & 0x3;
751}
752
753uint8_t xive_source_esb_set(XiveSource *xsrc, uint32_t srcno, uint8_t pq)
754{
755 assert(srcno < xsrc->nr_irqs);
756
757 return xive_esb_set(&xsrc->status[srcno], pq);
758}
759
760
761
762
763static bool xive_source_lsi_trigger(XiveSource *xsrc, uint32_t srcno)
764{
765 uint8_t old_pq = xive_source_esb_get(xsrc, srcno);
766
767 xsrc->status[srcno] |= XIVE_STATUS_ASSERTED;
768
769 switch (old_pq) {
770 case XIVE_ESB_RESET:
771 xive_source_esb_set(xsrc, srcno, XIVE_ESB_PENDING);
772 return true;
773 default:
774 return false;
775 }
776}
777
778
779
780
781static bool xive_source_esb_trigger(XiveSource *xsrc, uint32_t srcno)
782{
783 bool ret;
784
785 assert(srcno < xsrc->nr_irqs);
786
787 ret = xive_esb_trigger(&xsrc->status[srcno]);
788
789 if (xive_source_irq_is_lsi(xsrc, srcno) &&
790 xive_source_esb_get(xsrc, srcno) == XIVE_ESB_QUEUED) {
791 qemu_log_mask(LOG_GUEST_ERROR,
792 "XIVE: queued an event on LSI IRQ %d\n", srcno);
793 }
794
795 return ret;
796}
797
798
799
800
801static bool xive_source_esb_eoi(XiveSource *xsrc, uint32_t srcno)
802{
803 bool ret;
804
805 assert(srcno < xsrc->nr_irqs);
806
807 ret = xive_esb_eoi(&xsrc->status[srcno]);
808
809
810
811
812
813
814 if (xive_source_irq_is_lsi(xsrc, srcno) &&
815 xsrc->status[srcno] & XIVE_STATUS_ASSERTED) {
816 ret = xive_source_lsi_trigger(xsrc, srcno);
817 }
818
819 return ret;
820}
821
822
823
824
825static void xive_source_notify(XiveSource *xsrc, int srcno)
826{
827 XiveNotifierClass *xnc = XIVE_NOTIFIER_GET_CLASS(xsrc->xive);
828
829 if (xnc->notify) {
830 xnc->notify(xsrc->xive, srcno);
831 }
832}
833
834
835
836
837
838static inline bool addr_is_even(hwaddr addr, uint32_t shift)
839{
840 return !((addr >> shift) & 1);
841}
842
843static inline bool xive_source_is_trigger_page(XiveSource *xsrc, hwaddr addr)
844{
845 return xive_source_esb_has_2page(xsrc) &&
846 addr_is_even(addr, xsrc->esb_shift - 1);
847}
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863static uint64_t xive_source_esb_read(void *opaque, hwaddr addr, unsigned size)
864{
865 XiveSource *xsrc = XIVE_SOURCE(opaque);
866 uint32_t offset = addr & 0xFFF;
867 uint32_t srcno = addr >> xsrc->esb_shift;
868 uint64_t ret = -1;
869
870
871 if (xive_source_is_trigger_page(xsrc, addr)) {
872 qemu_log_mask(LOG_GUEST_ERROR,
873 "XIVE: invalid load on IRQ %d trigger page at "
874 "0x%"HWADDR_PRIx"\n", srcno, addr);
875 return -1;
876 }
877
878 switch (offset) {
879 case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF:
880 ret = xive_source_esb_eoi(xsrc, srcno);
881
882
883 if (ret) {
884 xive_source_notify(xsrc, srcno);
885 }
886 break;
887
888 case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF:
889 ret = xive_source_esb_get(xsrc, srcno);
890 break;
891
892 case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF:
893 case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF:
894 case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF:
895 case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF:
896 ret = xive_source_esb_set(xsrc, srcno, (offset >> 8) & 0x3);
897 break;
898 default:
899 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB load addr %x\n",
900 offset);
901 }
902
903 return ret;
904}
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920static void xive_source_esb_write(void *opaque, hwaddr addr,
921 uint64_t value, unsigned size)
922{
923 XiveSource *xsrc = XIVE_SOURCE(opaque);
924 uint32_t offset = addr & 0xFFF;
925 uint32_t srcno = addr >> xsrc->esb_shift;
926 bool notify = false;
927
928
929 if (xive_source_is_trigger_page(xsrc, addr)) {
930 notify = xive_source_esb_trigger(xsrc, srcno);
931 goto out;
932 }
933
934 switch (offset) {
935 case 0 ... 0x3FF:
936 notify = xive_source_esb_trigger(xsrc, srcno);
937 break;
938
939 case XIVE_ESB_STORE_EOI ... XIVE_ESB_STORE_EOI + 0x3FF:
940 if (!(xsrc->esb_flags & XIVE_SRC_STORE_EOI)) {
941 qemu_log_mask(LOG_GUEST_ERROR,
942 "XIVE: invalid Store EOI for IRQ %d\n", srcno);
943 return;
944 }
945
946 notify = xive_source_esb_eoi(xsrc, srcno);
947 break;
948
949 case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF:
950 case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF:
951 case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF:
952 case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF:
953 xive_source_esb_set(xsrc, srcno, (offset >> 8) & 0x3);
954 break;
955
956 default:
957 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB write addr %x\n",
958 offset);
959 return;
960 }
961
962out:
963
964 if (notify) {
965 xive_source_notify(xsrc, srcno);
966 }
967}
968
969static const MemoryRegionOps xive_source_esb_ops = {
970 .read = xive_source_esb_read,
971 .write = xive_source_esb_write,
972 .endianness = DEVICE_BIG_ENDIAN,
973 .valid = {
974 .min_access_size = 8,
975 .max_access_size = 8,
976 },
977 .impl = {
978 .min_access_size = 8,
979 .max_access_size = 8,
980 },
981};
982
983void xive_source_set_irq(void *opaque, int srcno, int val)
984{
985 XiveSource *xsrc = XIVE_SOURCE(opaque);
986 bool notify = false;
987
988 if (xive_source_irq_is_lsi(xsrc, srcno)) {
989 if (val) {
990 notify = xive_source_lsi_trigger(xsrc, srcno);
991 } else {
992 xsrc->status[srcno] &= ~XIVE_STATUS_ASSERTED;
993 }
994 } else {
995 if (val) {
996 notify = xive_source_esb_trigger(xsrc, srcno);
997 }
998 }
999
1000
1001 if (notify) {
1002 xive_source_notify(xsrc, srcno);
1003 }
1004}
1005
1006void xive_source_pic_print_info(XiveSource *xsrc, uint32_t offset, Monitor *mon)
1007{
1008 int i;
1009
1010 for (i = 0; i < xsrc->nr_irqs; i++) {
1011 uint8_t pq = xive_source_esb_get(xsrc, i);
1012
1013 if (pq == XIVE_ESB_OFF) {
1014 continue;
1015 }
1016
1017 monitor_printf(mon, " %08x %s %c%c%c\n", i + offset,
1018 xive_source_irq_is_lsi(xsrc, i) ? "LSI" : "MSI",
1019 pq & XIVE_ESB_VAL_P ? 'P' : '-',
1020 pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
1021 xsrc->status[i] & XIVE_STATUS_ASSERTED ? 'A' : ' ');
1022 }
1023}
1024
1025static void xive_source_reset(void *dev)
1026{
1027 XiveSource *xsrc = XIVE_SOURCE(dev);
1028
1029
1030
1031
1032 memset(xsrc->status, XIVE_ESB_OFF, xsrc->nr_irqs);
1033}
1034
1035static void xive_source_realize(DeviceState *dev, Error **errp)
1036{
1037 XiveSource *xsrc = XIVE_SOURCE(dev);
1038 Object *obj;
1039 Error *local_err = NULL;
1040
1041 obj = object_property_get_link(OBJECT(dev), "xive", &local_err);
1042 if (!obj) {
1043 error_propagate(errp, local_err);
1044 error_prepend(errp, "required link 'xive' not found: ");
1045 return;
1046 }
1047
1048 xsrc->xive = XIVE_NOTIFIER(obj);
1049
1050 if (!xsrc->nr_irqs) {
1051 error_setg(errp, "Number of interrupt needs to be greater than 0");
1052 return;
1053 }
1054
1055 if (xsrc->esb_shift != XIVE_ESB_4K &&
1056 xsrc->esb_shift != XIVE_ESB_4K_2PAGE &&
1057 xsrc->esb_shift != XIVE_ESB_64K &&
1058 xsrc->esb_shift != XIVE_ESB_64K_2PAGE) {
1059 error_setg(errp, "Invalid ESB shift setting");
1060 return;
1061 }
1062
1063 xsrc->status = g_malloc0(xsrc->nr_irqs);
1064 xsrc->lsi_map = bitmap_new(xsrc->nr_irqs);
1065
1066 if (!kvm_irqchip_in_kernel()) {
1067 memory_region_init_io(&xsrc->esb_mmio, OBJECT(xsrc),
1068 &xive_source_esb_ops, xsrc, "xive.esb",
1069 (1ull << xsrc->esb_shift) * xsrc->nr_irqs);
1070 }
1071
1072 qemu_register_reset(xive_source_reset, dev);
1073}
1074
1075static const VMStateDescription vmstate_xive_source = {
1076 .name = TYPE_XIVE_SOURCE,
1077 .version_id = 1,
1078 .minimum_version_id = 1,
1079 .fields = (VMStateField[]) {
1080 VMSTATE_UINT32_EQUAL(nr_irqs, XiveSource, NULL),
1081 VMSTATE_VBUFFER_UINT32(status, XiveSource, 1, NULL, nr_irqs),
1082 VMSTATE_END_OF_LIST()
1083 },
1084};
1085
1086
1087
1088
1089
1090static Property xive_source_properties[] = {
1091 DEFINE_PROP_UINT64("flags", XiveSource, esb_flags, 0),
1092 DEFINE_PROP_UINT32("nr-irqs", XiveSource, nr_irqs, 0),
1093 DEFINE_PROP_UINT32("shift", XiveSource, esb_shift, XIVE_ESB_64K_2PAGE),
1094 DEFINE_PROP_END_OF_LIST(),
1095};
1096
1097static void xive_source_class_init(ObjectClass *klass, void *data)
1098{
1099 DeviceClass *dc = DEVICE_CLASS(klass);
1100
1101 dc->desc = "XIVE Interrupt Source";
1102 dc->props = xive_source_properties;
1103 dc->realize = xive_source_realize;
1104 dc->vmsd = &vmstate_xive_source;
1105}
1106
1107static const TypeInfo xive_source_info = {
1108 .name = TYPE_XIVE_SOURCE,
1109 .parent = TYPE_DEVICE,
1110 .instance_size = sizeof(XiveSource),
1111 .class_init = xive_source_class_init,
1112};
1113
1114
1115
1116
1117
1118void xive_end_queue_pic_print_info(XiveEND *end, uint32_t width, Monitor *mon)
1119{
1120 uint64_t qaddr_base = xive_end_qaddr(end);
1121 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
1122 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
1123 uint32_t qentries = 1 << (qsize + 10);
1124 int i;
1125
1126
1127
1128
1129 monitor_printf(mon, " [ ");
1130 qindex = (qindex - (width - 1)) & (qentries - 1);
1131 for (i = 0; i < width; i++) {
1132 uint64_t qaddr = qaddr_base + (qindex << 2);
1133 uint32_t qdata = -1;
1134
1135 if (dma_memory_read(&address_space_memory, qaddr, &qdata,
1136 sizeof(qdata))) {
1137 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to read EQ @0x%"
1138 HWADDR_PRIx "\n", qaddr);
1139 return;
1140 }
1141 monitor_printf(mon, "%s%08x ", i == width - 1 ? "^" : "",
1142 be32_to_cpu(qdata));
1143 qindex = (qindex + 1) & (qentries - 1);
1144 }
1145}
1146
1147void xive_end_pic_print_info(XiveEND *end, uint32_t end_idx, Monitor *mon)
1148{
1149 uint64_t qaddr_base = xive_end_qaddr(end);
1150 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
1151 uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1);
1152 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
1153 uint32_t qentries = 1 << (qsize + 10);
1154
1155 uint32_t nvt = xive_get_field32(END_W6_NVT_INDEX, end->w6);
1156 uint8_t priority = xive_get_field32(END_W7_F0_PRIORITY, end->w7);
1157
1158 if (!xive_end_is_valid(end)) {
1159 return;
1160 }
1161
1162 monitor_printf(mon, " %08x %c%c%c%c%c prio:%d nvt:%04x eq:@%08"PRIx64
1163 "% 6d/%5d ^%d", end_idx,
1164 xive_end_is_valid(end) ? 'v' : '-',
1165 xive_end_is_enqueue(end) ? 'q' : '-',
1166 xive_end_is_notify(end) ? 'n' : '-',
1167 xive_end_is_backlog(end) ? 'b' : '-',
1168 xive_end_is_escalate(end) ? 'e' : '-',
1169 priority, nvt, qaddr_base, qindex, qentries, qgen);
1170
1171 xive_end_queue_pic_print_info(end, 6, mon);
1172 monitor_printf(mon, "]\n");
1173}
1174
1175static void xive_end_enqueue(XiveEND *end, uint32_t data)
1176{
1177 uint64_t qaddr_base = xive_end_qaddr(end);
1178 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
1179 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
1180 uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1);
1181
1182 uint64_t qaddr = qaddr_base + (qindex << 2);
1183 uint32_t qdata = cpu_to_be32((qgen << 31) | (data & 0x7fffffff));
1184 uint32_t qentries = 1 << (qsize + 10);
1185
1186 if (dma_memory_write(&address_space_memory, qaddr, &qdata, sizeof(qdata))) {
1187 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to write END data @0x%"
1188 HWADDR_PRIx "\n", qaddr);
1189 return;
1190 }
1191
1192 qindex = (qindex + 1) & (qentries - 1);
1193 if (qindex == 0) {
1194 qgen ^= 1;
1195 end->w1 = xive_set_field32(END_W1_GENERATION, end->w1, qgen);
1196 }
1197 end->w1 = xive_set_field32(END_W1_PAGE_OFF, end->w1, qindex);
1198}
1199
1200
1201
1202
1203
1204int xive_router_get_eas(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx,
1205 XiveEAS *eas)
1206{
1207 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1208
1209 return xrc->get_eas(xrtr, eas_blk, eas_idx, eas);
1210}
1211
1212int xive_router_get_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx,
1213 XiveEND *end)
1214{
1215 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1216
1217 return xrc->get_end(xrtr, end_blk, end_idx, end);
1218}
1219
1220int xive_router_write_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx,
1221 XiveEND *end, uint8_t word_number)
1222{
1223 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1224
1225 return xrc->write_end(xrtr, end_blk, end_idx, end, word_number);
1226}
1227
1228int xive_router_get_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx,
1229 XiveNVT *nvt)
1230{
1231 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1232
1233 return xrc->get_nvt(xrtr, nvt_blk, nvt_idx, nvt);
1234}
1235
1236int xive_router_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx,
1237 XiveNVT *nvt, uint8_t word_number)
1238{
1239 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1240
1241 return xrc->write_nvt(xrtr, nvt_blk, nvt_idx, nvt, word_number);
1242}
1243
1244XiveTCTX *xive_router_get_tctx(XiveRouter *xrtr, CPUState *cs)
1245{
1246 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1247
1248 return xrc->get_tctx(xrtr, cs);
1249}
1250
1251
1252
1253
1254
1255
1256static uint32_t xive_tctx_hw_cam_line(XiveTCTX *tctx)
1257{
1258 CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env;
1259 uint32_t pir = env->spr_cb[SPR_PIR].default_value;
1260
1261 return xive_nvt_cam_line((pir >> 8) & 0xf, 1 << 7 | (pir & 0x7f));
1262}
1263
1264
1265
1266
1267static int xive_presenter_tctx_match(XiveTCTX *tctx, uint8_t format,
1268 uint8_t nvt_blk, uint32_t nvt_idx,
1269 bool cam_ignore, uint32_t logic_serv)
1270{
1271 uint32_t cam = xive_nvt_cam_line(nvt_blk, nvt_idx);
1272 uint32_t qw3w2 = xive_tctx_word2(&tctx->regs[TM_QW3_HV_PHYS]);
1273 uint32_t qw2w2 = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]);
1274 uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]);
1275 uint32_t qw0w2 = xive_tctx_word2(&tctx->regs[TM_QW0_USER]);
1276
1277
1278
1279
1280
1281
1282 if (format == 0) {
1283 if (cam_ignore == true) {
1284
1285
1286
1287
1288 qemu_log_mask(LOG_UNIMP, "XIVE: no support for LS NVT %x/%x\n",
1289 nvt_blk, nvt_idx);
1290 return -1;
1291 }
1292
1293
1294
1295
1296 if ((be32_to_cpu(qw3w2) & TM_QW3W2_VT) &&
1297 cam == xive_tctx_hw_cam_line(tctx)) {
1298 return TM_QW3_HV_PHYS;
1299 }
1300
1301
1302 if ((be32_to_cpu(qw2w2) & TM_QW2W2_VP) &&
1303 cam == xive_get_field32(TM_QW2W2_POOL_CAM, qw2w2)) {
1304 return TM_QW2_HV_POOL;
1305 }
1306
1307
1308 if ((be32_to_cpu(qw1w2) & TM_QW1W2_VO) &&
1309 cam == xive_get_field32(TM_QW1W2_OS_CAM, qw1w2)) {
1310 return TM_QW1_OS;
1311 }
1312 } else {
1313
1314
1315
1316 if ((be32_to_cpu(qw1w2) & TM_QW1W2_VO) &&
1317 (cam == xive_get_field32(TM_QW1W2_OS_CAM, qw1w2)) &&
1318 (be32_to_cpu(qw0w2) & TM_QW0W2_VU) &&
1319 (logic_serv == xive_get_field32(TM_QW0W2_LOGIC_SERV, qw0w2))) {
1320 return TM_QW0_USER;
1321 }
1322 }
1323 return -1;
1324}
1325
1326typedef struct XiveTCTXMatch {
1327 XiveTCTX *tctx;
1328 uint8_t ring;
1329} XiveTCTXMatch;
1330
1331static bool xive_presenter_match(XiveRouter *xrtr, uint8_t format,
1332 uint8_t nvt_blk, uint32_t nvt_idx,
1333 bool cam_ignore, uint8_t priority,
1334 uint32_t logic_serv, XiveTCTXMatch *match)
1335{
1336 CPUState *cs;
1337
1338
1339
1340
1341
1342
1343 CPU_FOREACH(cs) {
1344 XiveTCTX *tctx = xive_router_get_tctx(xrtr, cs);
1345 int ring;
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356 ring = xive_presenter_tctx_match(tctx, format, nvt_blk, nvt_idx,
1357 cam_ignore, logic_serv);
1358
1359
1360
1361
1362 if (ring != -1) {
1363 if (match->tctx) {
1364 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a thread "
1365 "context NVT %x/%x\n", nvt_blk, nvt_idx);
1366 return false;
1367 }
1368
1369 match->ring = ring;
1370 match->tctx = tctx;
1371 }
1372 }
1373
1374 if (!match->tctx) {
1375 qemu_log_mask(LOG_UNIMP, "XIVE: NVT %x/%x is not dispatched\n",
1376 nvt_blk, nvt_idx);
1377 return false;
1378 }
1379
1380 return true;
1381}
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398static void xive_presenter_notify(XiveRouter *xrtr, uint8_t format,
1399 uint8_t nvt_blk, uint32_t nvt_idx,
1400 bool cam_ignore, uint8_t priority,
1401 uint32_t logic_serv)
1402{
1403 XiveNVT nvt;
1404 XiveTCTXMatch match = { .tctx = NULL, .ring = 0 };
1405 bool found;
1406
1407
1408 if (xive_router_get_nvt(xrtr, nvt_blk, nvt_idx, &nvt)) {
1409 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no NVT %x/%x\n",
1410 nvt_blk, nvt_idx);
1411 return;
1412 }
1413
1414 if (!xive_nvt_is_valid(&nvt)) {
1415 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVT %x/%x is invalid\n",
1416 nvt_blk, nvt_idx);
1417 return;
1418 }
1419
1420 found = xive_presenter_match(xrtr, format, nvt_blk, nvt_idx, cam_ignore,
1421 priority, logic_serv, &match);
1422 if (found) {
1423 ipb_update(&match.tctx->regs[match.ring], priority);
1424 xive_tctx_notify(match.tctx, match.ring);
1425 return;
1426 }
1427
1428
1429 ipb_update((uint8_t *) &nvt.w4, priority);
1430 xive_router_write_nvt(xrtr, nvt_blk, nvt_idx, &nvt, 4);
1431
1432
1433
1434
1435
1436
1437
1438}
1439
1440
1441
1442
1443
1444
1445static void xive_router_end_notify(XiveRouter *xrtr, uint8_t end_blk,
1446 uint32_t end_idx, uint32_t end_data)
1447{
1448 XiveEND end;
1449 uint8_t priority;
1450 uint8_t format;
1451
1452
1453 if (xive_router_get_end(xrtr, end_blk, end_idx, &end)) {
1454 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk,
1455 end_idx);
1456 return;
1457 }
1458
1459 if (!xive_end_is_valid(&end)) {
1460 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n",
1461 end_blk, end_idx);
1462 return;
1463 }
1464
1465 if (xive_end_is_enqueue(&end)) {
1466 xive_end_enqueue(&end, end_data);
1467
1468 xive_router_write_end(xrtr, end_blk, end_idx, &end, 1);
1469 }
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479 format = xive_get_field32(END_W6_FORMAT_BIT, end.w6);
1480 priority = xive_get_field32(END_W7_F0_PRIORITY, end.w7);
1481
1482
1483 if (format == 0 && priority == 0xff) {
1484 return;
1485 }
1486
1487
1488
1489
1490
1491 if (!xive_end_is_notify(&end)) {
1492 uint8_t pq = xive_get_field32(END_W1_ESn, end.w1);
1493 bool notify = xive_esb_trigger(&pq);
1494
1495 if (pq != xive_get_field32(END_W1_ESn, end.w1)) {
1496 end.w1 = xive_set_field32(END_W1_ESn, end.w1, pq);
1497 xive_router_write_end(xrtr, end_blk, end_idx, &end, 1);
1498 }
1499
1500
1501 if (!notify) {
1502 return;
1503 }
1504 }
1505
1506
1507
1508
1509 xive_presenter_notify(xrtr, format,
1510 xive_get_field32(END_W6_NVT_BLOCK, end.w6),
1511 xive_get_field32(END_W6_NVT_INDEX, end.w6),
1512 xive_get_field32(END_W7_F0_IGNORE, end.w7),
1513 priority,
1514 xive_get_field32(END_W7_F1_LOG_SERVER_ID, end.w7));
1515
1516
1517}
1518
1519void xive_router_notify(XiveNotifier *xn, uint32_t lisn)
1520{
1521 XiveRouter *xrtr = XIVE_ROUTER(xn);
1522 uint8_t eas_blk = XIVE_SRCNO_BLOCK(lisn);
1523 uint32_t eas_idx = XIVE_SRCNO_INDEX(lisn);
1524 XiveEAS eas;
1525
1526
1527 if (xive_router_get_eas(xrtr, eas_blk, eas_idx, &eas)) {
1528 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN %x\n", lisn);
1529 return;
1530 }
1531
1532
1533
1534
1535
1536
1537
1538 if (!xive_eas_is_valid(&eas)) {
1539 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid LISN %x\n", lisn);
1540 return;
1541 }
1542
1543 if (xive_eas_is_masked(&eas)) {
1544
1545 return;
1546 }
1547
1548
1549
1550
1551 xive_router_end_notify(xrtr,
1552 xive_get_field64(EAS_END_BLOCK, eas.w),
1553 xive_get_field64(EAS_END_INDEX, eas.w),
1554 xive_get_field64(EAS_END_DATA, eas.w));
1555}
1556
1557static void xive_router_class_init(ObjectClass *klass, void *data)
1558{
1559 DeviceClass *dc = DEVICE_CLASS(klass);
1560 XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
1561
1562 dc->desc = "XIVE Router Engine";
1563 xnc->notify = xive_router_notify;
1564}
1565
1566static const TypeInfo xive_router_info = {
1567 .name = TYPE_XIVE_ROUTER,
1568 .parent = TYPE_SYS_BUS_DEVICE,
1569 .abstract = true,
1570 .class_size = sizeof(XiveRouterClass),
1571 .class_init = xive_router_class_init,
1572 .interfaces = (InterfaceInfo[]) {
1573 { TYPE_XIVE_NOTIFIER },
1574 { }
1575 }
1576};
1577
1578void xive_eas_pic_print_info(XiveEAS *eas, uint32_t lisn, Monitor *mon)
1579{
1580 if (!xive_eas_is_valid(eas)) {
1581 return;
1582 }
1583
1584 monitor_printf(mon, " %08x %s end:%02x/%04x data:%08x\n",
1585 lisn, xive_eas_is_masked(eas) ? "M" : " ",
1586 (uint8_t) xive_get_field64(EAS_END_BLOCK, eas->w),
1587 (uint32_t) xive_get_field64(EAS_END_INDEX, eas->w),
1588 (uint32_t) xive_get_field64(EAS_END_DATA, eas->w));
1589}
1590
1591
1592
1593
1594static uint64_t xive_end_source_read(void *opaque, hwaddr addr, unsigned size)
1595{
1596 XiveENDSource *xsrc = XIVE_END_SOURCE(opaque);
1597 uint32_t offset = addr & 0xFFF;
1598 uint8_t end_blk;
1599 uint32_t end_idx;
1600 XiveEND end;
1601 uint32_t end_esmask;
1602 uint8_t pq;
1603 uint64_t ret = -1;
1604
1605 end_blk = xsrc->block_id;
1606 end_idx = addr >> (xsrc->esb_shift + 1);
1607
1608 if (xive_router_get_end(xsrc->xrtr, end_blk, end_idx, &end)) {
1609 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk,
1610 end_idx);
1611 return -1;
1612 }
1613
1614 if (!xive_end_is_valid(&end)) {
1615 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n",
1616 end_blk, end_idx);
1617 return -1;
1618 }
1619
1620 end_esmask = addr_is_even(addr, xsrc->esb_shift) ? END_W1_ESn : END_W1_ESe;
1621 pq = xive_get_field32(end_esmask, end.w1);
1622
1623 switch (offset) {
1624 case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF:
1625 ret = xive_esb_eoi(&pq);
1626
1627
1628 break;
1629
1630 case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF:
1631 ret = pq;
1632 break;
1633
1634 case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF:
1635 case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF:
1636 case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF:
1637 case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF:
1638 ret = xive_esb_set(&pq, (offset >> 8) & 0x3);
1639 break;
1640 default:
1641 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid END ESB load addr %d\n",
1642 offset);
1643 return -1;
1644 }
1645
1646 if (pq != xive_get_field32(end_esmask, end.w1)) {
1647 end.w1 = xive_set_field32(end_esmask, end.w1, pq);
1648 xive_router_write_end(xsrc->xrtr, end_blk, end_idx, &end, 1);
1649 }
1650
1651 return ret;
1652}
1653
1654
1655
1656
1657static void xive_end_source_write(void *opaque, hwaddr addr,
1658 uint64_t value, unsigned size)
1659{
1660 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB write addr 0x%"
1661 HWADDR_PRIx"\n", addr);
1662}
1663
1664static const MemoryRegionOps xive_end_source_ops = {
1665 .read = xive_end_source_read,
1666 .write = xive_end_source_write,
1667 .endianness = DEVICE_BIG_ENDIAN,
1668 .valid = {
1669 .min_access_size = 8,
1670 .max_access_size = 8,
1671 },
1672 .impl = {
1673 .min_access_size = 8,
1674 .max_access_size = 8,
1675 },
1676};
1677
1678static void xive_end_source_realize(DeviceState *dev, Error **errp)
1679{
1680 XiveENDSource *xsrc = XIVE_END_SOURCE(dev);
1681 Object *obj;
1682 Error *local_err = NULL;
1683
1684 obj = object_property_get_link(OBJECT(dev), "xive", &local_err);
1685 if (!obj) {
1686 error_propagate(errp, local_err);
1687 error_prepend(errp, "required link 'xive' not found: ");
1688 return;
1689 }
1690
1691 xsrc->xrtr = XIVE_ROUTER(obj);
1692
1693 if (!xsrc->nr_ends) {
1694 error_setg(errp, "Number of interrupt needs to be greater than 0");
1695 return;
1696 }
1697
1698 if (xsrc->esb_shift != XIVE_ESB_4K &&
1699 xsrc->esb_shift != XIVE_ESB_64K) {
1700 error_setg(errp, "Invalid ESB shift setting");
1701 return;
1702 }
1703
1704
1705
1706
1707
1708 memory_region_init_io(&xsrc->esb_mmio, OBJECT(xsrc),
1709 &xive_end_source_ops, xsrc, "xive.end",
1710 (1ull << (xsrc->esb_shift + 1)) * xsrc->nr_ends);
1711}
1712
1713static Property xive_end_source_properties[] = {
1714 DEFINE_PROP_UINT8("block-id", XiveENDSource, block_id, 0),
1715 DEFINE_PROP_UINT32("nr-ends", XiveENDSource, nr_ends, 0),
1716 DEFINE_PROP_UINT32("shift", XiveENDSource, esb_shift, XIVE_ESB_64K),
1717 DEFINE_PROP_END_OF_LIST(),
1718};
1719
1720static void xive_end_source_class_init(ObjectClass *klass, void *data)
1721{
1722 DeviceClass *dc = DEVICE_CLASS(klass);
1723
1724 dc->desc = "XIVE END Source";
1725 dc->props = xive_end_source_properties;
1726 dc->realize = xive_end_source_realize;
1727}
1728
1729static const TypeInfo xive_end_source_info = {
1730 .name = TYPE_XIVE_END_SOURCE,
1731 .parent = TYPE_DEVICE,
1732 .instance_size = sizeof(XiveENDSource),
1733 .class_init = xive_end_source_class_init,
1734};
1735
1736
1737
1738
1739static const TypeInfo xive_notifier_info = {
1740 .name = TYPE_XIVE_NOTIFIER,
1741 .parent = TYPE_INTERFACE,
1742 .class_size = sizeof(XiveNotifierClass),
1743};
1744
1745static void xive_register_types(void)
1746{
1747 type_register_static(&xive_source_info);
1748 type_register_static(&xive_notifier_info);
1749 type_register_static(&xive_router_info);
1750 type_register_static(&xive_end_source_info);
1751 type_register_static(&xive_tctx_info);
1752}
1753
1754type_init(xive_register_types)
1755