1
2
3
4
5
6
7
8
9
10#include "qemu/osdep.h"
11#include "qemu/log.h"
12#include "qemu/module.h"
13#include "qapi/error.h"
14#include "target/ppc/cpu.h"
15#include "sysemu/cpus.h"
16#include "sysemu/dma.h"
17#include "sysemu/reset.h"
18#include "hw/qdev-properties.h"
19#include "migration/vmstate.h"
20#include "monitor/monitor.h"
21#include "hw/irq.h"
22#include "hw/ppc/xive.h"
23#include "hw/ppc/xive_regs.h"
24
25
26
27
28
29
30
31
32
33
34static uint8_t priority_to_ipb(uint8_t priority)
35{
36 return priority > XIVE_PRIORITY_MAX ?
37 0 : 1 << (XIVE_PRIORITY_MAX - priority);
38}
39
40
41
42
43
44
45static uint8_t ipb_to_pipr(uint8_t ibp)
46{
47 return ibp ? clz32((uint32_t)ibp << 24) : 0xff;
48}
49
50static void ipb_update(uint8_t *regs, uint8_t priority)
51{
52 regs[TM_IPB] |= priority_to_ipb(priority);
53 regs[TM_PIPR] = ipb_to_pipr(regs[TM_IPB]);
54}
55
56static uint8_t exception_mask(uint8_t ring)
57{
58 switch (ring) {
59 case TM_QW1_OS:
60 return TM_QW1_NSR_EO;
61 case TM_QW3_HV_PHYS:
62 return TM_QW3_NSR_HE;
63 default:
64 g_assert_not_reached();
65 }
66}
67
68static qemu_irq xive_tctx_output(XiveTCTX *tctx, uint8_t ring)
69{
70 switch (ring) {
71 case TM_QW0_USER:
72 return 0;
73 case TM_QW1_OS:
74 return tctx->os_output;
75 case TM_QW2_HV_POOL:
76 case TM_QW3_HV_PHYS:
77 return tctx->hv_output;
78 default:
79 return 0;
80 }
81}
82
83static uint64_t xive_tctx_accept(XiveTCTX *tctx, uint8_t ring)
84{
85 uint8_t *regs = &tctx->regs[ring];
86 uint8_t nsr = regs[TM_NSR];
87 uint8_t mask = exception_mask(ring);
88
89 qemu_irq_lower(xive_tctx_output(tctx, ring));
90
91 if (regs[TM_NSR] & mask) {
92 uint8_t cppr = regs[TM_PIPR];
93
94 regs[TM_CPPR] = cppr;
95
96
97 regs[TM_IPB] &= ~priority_to_ipb(cppr);
98 regs[TM_PIPR] = ipb_to_pipr(regs[TM_IPB]);
99
100
101 regs[TM_NSR] &= ~mask;
102 }
103
104 return (nsr << 8) | regs[TM_CPPR];
105}
106
107static void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring)
108{
109 uint8_t *regs = &tctx->regs[ring];
110
111 if (regs[TM_PIPR] < regs[TM_CPPR]) {
112 switch (ring) {
113 case TM_QW1_OS:
114 regs[TM_NSR] |= TM_QW1_NSR_EO;
115 break;
116 case TM_QW3_HV_PHYS:
117 regs[TM_NSR] |= (TM_QW3_NSR_HE_PHYS << 6);
118 break;
119 default:
120 g_assert_not_reached();
121 }
122 qemu_irq_raise(xive_tctx_output(tctx, ring));
123 }
124}
125
126static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr)
127{
128 if (cppr > XIVE_PRIORITY_MAX) {
129 cppr = 0xff;
130 }
131
132 tctx->regs[ring + TM_CPPR] = cppr;
133
134
135 xive_tctx_notify(tctx, ring);
136}
137
138static inline uint32_t xive_tctx_word2(uint8_t *ring)
139{
140 return *((uint32_t *) &ring[TM_WORD2]);
141}
142
143
144
145
146
147static void xive_tm_set_hv_cppr(XiveTCTX *tctx, hwaddr offset,
148 uint64_t value, unsigned size)
149{
150 xive_tctx_set_cppr(tctx, TM_QW3_HV_PHYS, value & 0xff);
151}
152
153static uint64_t xive_tm_ack_hv_reg(XiveTCTX *tctx, hwaddr offset, unsigned size)
154{
155 return xive_tctx_accept(tctx, TM_QW3_HV_PHYS);
156}
157
158static uint64_t xive_tm_pull_pool_ctx(XiveTCTX *tctx, hwaddr offset,
159 unsigned size)
160{
161 uint32_t qw2w2_prev = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]);
162 uint32_t qw2w2;
163
164 qw2w2 = xive_set_field32(TM_QW2W2_VP, qw2w2_prev, 0);
165 memcpy(&tctx->regs[TM_QW2_HV_POOL + TM_WORD2], &qw2w2, 4);
166 return qw2w2;
167}
168
169static void xive_tm_vt_push(XiveTCTX *tctx, hwaddr offset,
170 uint64_t value, unsigned size)
171{
172 tctx->regs[TM_QW3_HV_PHYS + TM_WORD2] = value & 0xff;
173}
174
175static uint64_t xive_tm_vt_poll(XiveTCTX *tctx, hwaddr offset, unsigned size)
176{
177 return tctx->regs[TM_QW3_HV_PHYS + TM_WORD2] & 0xff;
178}
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193static const uint8_t xive_tm_hw_view[] = {
194 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0,
195 3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0,
196 0, 0, 3, 3, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0,
197 3, 3, 3, 3, 0, 3, 0, 2, 3, 0, 0, 3, 3, 3, 3, 0,
198};
199
200static const uint8_t xive_tm_hv_view[] = {
201 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0,
202 3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0,
203 0, 0, 3, 3, 0, 0, 0, 0, 0, 3, 3, 3, 0, 0, 0, 0,
204 3, 3, 3, 3, 0, 3, 0, 2, 3, 0, 0, 3, 0, 0, 0, 0,
205};
206
207static const uint8_t xive_tm_os_view[] = {
208 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0,
209 2, 3, 2, 2, 2, 2, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0,
210 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
211 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
212};
213
214static const uint8_t xive_tm_user_view[] = {
215 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
216 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
217 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
218 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
219};
220
221
222
223
224
225static const uint8_t *xive_tm_views[] = {
226 [XIVE_TM_HW_PAGE] = xive_tm_hw_view,
227 [XIVE_TM_HV_PAGE] = xive_tm_hv_view,
228 [XIVE_TM_OS_PAGE] = xive_tm_os_view,
229 [XIVE_TM_USER_PAGE] = xive_tm_user_view,
230};
231
232
233
234
235static uint64_t xive_tm_mask(hwaddr offset, unsigned size, bool write)
236{
237 uint8_t page_offset = (offset >> TM_SHIFT) & 0x3;
238 uint8_t reg_offset = offset & 0x3F;
239 uint8_t reg_mask = write ? 0x1 : 0x2;
240 uint64_t mask = 0x0;
241 int i;
242
243 for (i = 0; i < size; i++) {
244 if (xive_tm_views[page_offset][reg_offset + i] & reg_mask) {
245 mask |= (uint64_t) 0xff << (8 * (size - i - 1));
246 }
247 }
248
249 return mask;
250}
251
252static void xive_tm_raw_write(XiveTCTX *tctx, hwaddr offset, uint64_t value,
253 unsigned size)
254{
255 uint8_t ring_offset = offset & 0x30;
256 uint8_t reg_offset = offset & 0x3F;
257 uint64_t mask = xive_tm_mask(offset, size, true);
258 int i;
259
260
261
262
263
264 if (size < 4 || !mask || ring_offset == TM_QW0_USER) {
265 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA @%"
266 HWADDR_PRIx"\n", offset);
267 return;
268 }
269
270
271
272
273
274 for (i = 0; i < size; i++) {
275 uint8_t byte_mask = (mask >> (8 * (size - i - 1)));
276 if (byte_mask) {
277 tctx->regs[reg_offset + i] = (value >> (8 * (size - i - 1))) &
278 byte_mask;
279 }
280 }
281}
282
283static uint64_t xive_tm_raw_read(XiveTCTX *tctx, hwaddr offset, unsigned size)
284{
285 uint8_t ring_offset = offset & 0x30;
286 uint8_t reg_offset = offset & 0x3F;
287 uint64_t mask = xive_tm_mask(offset, size, false);
288 uint64_t ret;
289 int i;
290
291
292
293
294
295 if (size < 4 || !mask || ring_offset == TM_QW0_USER) {
296 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid read access at TIMA @%"
297 HWADDR_PRIx"\n", offset);
298 return -1;
299 }
300
301
302 ret = 0;
303 for (i = 0; i < size; i++) {
304 ret |= (uint64_t) tctx->regs[reg_offset + i] << (8 * (size - i - 1));
305 }
306
307
308 return ret & mask;
309}
310
311
312
313
314
315
316
317
318static uint64_t xive_tm_ack_os_reg(XiveTCTX *tctx, hwaddr offset, unsigned size)
319{
320 return xive_tctx_accept(tctx, TM_QW1_OS);
321}
322
323static void xive_tm_set_os_cppr(XiveTCTX *tctx, hwaddr offset,
324 uint64_t value, unsigned size)
325{
326 xive_tctx_set_cppr(tctx, TM_QW1_OS, value & 0xff);
327}
328
329
330
331
332
333static void xive_tm_set_os_pending(XiveTCTX *tctx, hwaddr offset,
334 uint64_t value, unsigned size)
335{
336 ipb_update(&tctx->regs[TM_QW1_OS], value & 0xff);
337 xive_tctx_notify(tctx, TM_QW1_OS);
338}
339
340static uint64_t xive_tm_pull_os_ctx(XiveTCTX *tctx, hwaddr offset,
341 unsigned size)
342{
343 uint32_t qw1w2_prev = xive_tctx_word2(&tctx->regs[TM_QW1_OS]);
344 uint32_t qw1w2;
345
346 qw1w2 = xive_set_field32(TM_QW1W2_VO, qw1w2_prev, 0);
347 memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2, 4);
348 return qw1w2;
349}
350
351
352
353
354
355typedef struct XiveTmOp {
356 uint8_t page_offset;
357 uint32_t op_offset;
358 unsigned size;
359 void (*write_handler)(XiveTCTX *tctx, hwaddr offset, uint64_t value,
360 unsigned size);
361 uint64_t (*read_handler)(XiveTCTX *tctx, hwaddr offset, unsigned size);
362} XiveTmOp;
363
364static const XiveTmOp xive_tm_operations[] = {
365
366
367
368
369 { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive_tm_set_os_cppr, NULL },
370 { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive_tm_set_hv_cppr, NULL },
371 { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, xive_tm_vt_push, NULL },
372 { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, NULL, xive_tm_vt_poll },
373
374
375 { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, NULL, xive_tm_ack_os_reg },
376 { XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, xive_tm_set_os_pending, NULL },
377 { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 4, NULL, xive_tm_pull_os_ctx },
378 { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 8, NULL, xive_tm_pull_os_ctx },
379 { XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, NULL, xive_tm_ack_hv_reg },
380 { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, NULL, xive_tm_pull_pool_ctx },
381 { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, NULL, xive_tm_pull_pool_ctx },
382};
383
384static const XiveTmOp *xive_tm_find_op(hwaddr offset, unsigned size, bool write)
385{
386 uint8_t page_offset = (offset >> TM_SHIFT) & 0x3;
387 uint32_t op_offset = offset & 0xFFF;
388 int i;
389
390 for (i = 0; i < ARRAY_SIZE(xive_tm_operations); i++) {
391 const XiveTmOp *xto = &xive_tm_operations[i];
392
393
394 if (xto->page_offset >= page_offset &&
395 xto->op_offset == op_offset &&
396 xto->size == size &&
397 ((write && xto->write_handler) || (!write && xto->read_handler))) {
398 return xto;
399 }
400 }
401 return NULL;
402}
403
404
405
406
407void xive_tctx_tm_write(XiveTCTX *tctx, hwaddr offset, uint64_t value,
408 unsigned size)
409{
410 const XiveTmOp *xto;
411
412
413
414
415
416
417
418
419 if (offset & 0x800) {
420 xto = xive_tm_find_op(offset, size, true);
421 if (!xto) {
422 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA "
423 "@%"HWADDR_PRIx"\n", offset);
424 } else {
425 xto->write_handler(tctx, offset, value, size);
426 }
427 return;
428 }
429
430
431
432
433 xto = xive_tm_find_op(offset, size, true);
434 if (xto) {
435 xto->write_handler(tctx, offset, value, size);
436 return;
437 }
438
439
440
441
442 xive_tm_raw_write(tctx, offset, value, size);
443}
444
445uint64_t xive_tctx_tm_read(XiveTCTX *tctx, hwaddr offset, unsigned size)
446{
447 const XiveTmOp *xto;
448
449
450
451
452
453
454
455
456 if (offset & 0x800) {
457 xto = xive_tm_find_op(offset, size, false);
458 if (!xto) {
459 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid read access to TIMA"
460 "@%"HWADDR_PRIx"\n", offset);
461 return -1;
462 }
463 return xto->read_handler(tctx, offset, size);
464 }
465
466
467
468
469 xto = xive_tm_find_op(offset, size, false);
470 if (xto) {
471 return xto->read_handler(tctx, offset, size);
472 }
473
474
475
476
477 return xive_tm_raw_read(tctx, offset, size);
478}
479
480static void xive_tm_write(void *opaque, hwaddr offset,
481 uint64_t value, unsigned size)
482{
483 XiveTCTX *tctx = xive_router_get_tctx(XIVE_ROUTER(opaque), current_cpu);
484
485 xive_tctx_tm_write(tctx, offset, value, size);
486}
487
488static uint64_t xive_tm_read(void *opaque, hwaddr offset, unsigned size)
489{
490 XiveTCTX *tctx = xive_router_get_tctx(XIVE_ROUTER(opaque), current_cpu);
491
492 return xive_tctx_tm_read(tctx, offset, size);
493}
494
495const MemoryRegionOps xive_tm_ops = {
496 .read = xive_tm_read,
497 .write = xive_tm_write,
498 .endianness = DEVICE_BIG_ENDIAN,
499 .valid = {
500 .min_access_size = 1,
501 .max_access_size = 8,
502 },
503 .impl = {
504 .min_access_size = 1,
505 .max_access_size = 8,
506 },
507};
508
509static char *xive_tctx_ring_print(uint8_t *ring)
510{
511 uint32_t w2 = xive_tctx_word2(ring);
512
513 return g_strdup_printf("%02x %02x %02x %02x %02x "
514 "%02x %02x %02x %08x",
515 ring[TM_NSR], ring[TM_CPPR], ring[TM_IPB], ring[TM_LSMFB],
516 ring[TM_ACK_CNT], ring[TM_INC], ring[TM_AGE], ring[TM_PIPR],
517 be32_to_cpu(w2));
518}
519
520static const char * const xive_tctx_ring_names[] = {
521 "USER", "OS", "POOL", "PHYS",
522};
523
524void xive_tctx_pic_print_info(XiveTCTX *tctx, Monitor *mon)
525{
526 int cpu_index;
527 int i;
528
529
530
531
532 if (!tctx) {
533 return;
534 }
535
536 cpu_index = tctx->cs ? tctx->cs->cpu_index : -1;
537
538 if (kvm_irqchip_in_kernel()) {
539 Error *local_err = NULL;
540
541 kvmppc_xive_cpu_synchronize_state(tctx, &local_err);
542 if (local_err) {
543 error_report_err(local_err);
544 return;
545 }
546 }
547
548 monitor_printf(mon, "CPU[%04x]: QW NSR CPPR IPB LSMFB ACK# INC AGE PIPR"
549 " W2\n", cpu_index);
550
551 for (i = 0; i < XIVE_TM_RING_COUNT; i++) {
552 char *s = xive_tctx_ring_print(&tctx->regs[i * XIVE_TM_RING_SIZE]);
553 monitor_printf(mon, "CPU[%04x]: %4s %s\n", cpu_index,
554 xive_tctx_ring_names[i], s);
555 g_free(s);
556 }
557}
558
559void xive_tctx_reset(XiveTCTX *tctx)
560{
561 memset(tctx->regs, 0, sizeof(tctx->regs));
562
563
564 tctx->regs[TM_QW1_OS + TM_LSMFB] = 0xFF;
565 tctx->regs[TM_QW1_OS + TM_ACK_CNT] = 0xFF;
566 tctx->regs[TM_QW1_OS + TM_AGE] = 0xFF;
567
568
569
570
571
572 tctx->regs[TM_QW1_OS + TM_PIPR] =
573 ipb_to_pipr(tctx->regs[TM_QW1_OS + TM_IPB]);
574 tctx->regs[TM_QW3_HV_PHYS + TM_PIPR] =
575 ipb_to_pipr(tctx->regs[TM_QW3_HV_PHYS + TM_IPB]);
576}
577
578static void xive_tctx_realize(DeviceState *dev, Error **errp)
579{
580 XiveTCTX *tctx = XIVE_TCTX(dev);
581 PowerPCCPU *cpu;
582 CPUPPCState *env;
583 Object *obj;
584 Error *local_err = NULL;
585
586 obj = object_property_get_link(OBJECT(dev), "cpu", &local_err);
587 if (!obj) {
588 error_propagate(errp, local_err);
589 error_prepend(errp, "required link 'cpu' not found: ");
590 return;
591 }
592
593 cpu = POWERPC_CPU(obj);
594 tctx->cs = CPU(obj);
595
596 env = &cpu->env;
597 switch (PPC_INPUT(env)) {
598 case PPC_FLAGS_INPUT_POWER9:
599 tctx->hv_output = env->irq_inputs[POWER9_INPUT_HINT];
600 tctx->os_output = env->irq_inputs[POWER9_INPUT_INT];
601 break;
602
603 default:
604 error_setg(errp, "XIVE interrupt controller does not support "
605 "this CPU bus model");
606 return;
607 }
608
609
610 if (kvm_irqchip_in_kernel()) {
611 kvmppc_xive_cpu_connect(tctx, &local_err);
612 if (local_err) {
613 error_propagate(errp, local_err);
614 return;
615 }
616 }
617}
618
619static int vmstate_xive_tctx_pre_save(void *opaque)
620{
621 Error *local_err = NULL;
622
623 if (kvm_irqchip_in_kernel()) {
624 kvmppc_xive_cpu_get_state(XIVE_TCTX(opaque), &local_err);
625 if (local_err) {
626 error_report_err(local_err);
627 return -1;
628 }
629 }
630
631 return 0;
632}
633
634static int vmstate_xive_tctx_post_load(void *opaque, int version_id)
635{
636 Error *local_err = NULL;
637
638 if (kvm_irqchip_in_kernel()) {
639
640
641
642
643 kvmppc_xive_cpu_set_state(XIVE_TCTX(opaque), &local_err);
644 if (local_err) {
645 error_report_err(local_err);
646 return -1;
647 }
648 }
649
650 return 0;
651}
652
653static const VMStateDescription vmstate_xive_tctx = {
654 .name = TYPE_XIVE_TCTX,
655 .version_id = 1,
656 .minimum_version_id = 1,
657 .pre_save = vmstate_xive_tctx_pre_save,
658 .post_load = vmstate_xive_tctx_post_load,
659 .fields = (VMStateField[]) {
660 VMSTATE_BUFFER(regs, XiveTCTX),
661 VMSTATE_END_OF_LIST()
662 },
663};
664
665static void xive_tctx_class_init(ObjectClass *klass, void *data)
666{
667 DeviceClass *dc = DEVICE_CLASS(klass);
668
669 dc->desc = "XIVE Interrupt Thread Context";
670 dc->realize = xive_tctx_realize;
671 dc->vmsd = &vmstate_xive_tctx;
672
673
674
675
676 dc->user_creatable = false;
677}
678
679static const TypeInfo xive_tctx_info = {
680 .name = TYPE_XIVE_TCTX,
681 .parent = TYPE_DEVICE,
682 .instance_size = sizeof(XiveTCTX),
683 .class_init = xive_tctx_class_init,
684};
685
686Object *xive_tctx_create(Object *cpu, XiveRouter *xrtr, Error **errp)
687{
688 Error *local_err = NULL;
689 Object *obj;
690
691 obj = object_new(TYPE_XIVE_TCTX);
692 object_property_add_child(cpu, TYPE_XIVE_TCTX, obj, &error_abort);
693 object_unref(obj);
694 object_ref(cpu);
695 object_property_add_const_link(obj, "cpu", cpu, &error_abort);
696 object_property_set_bool(obj, true, "realized", &local_err);
697 if (local_err) {
698 goto error;
699 }
700
701 return obj;
702
703error:
704 object_unparent(obj);
705 error_propagate(errp, local_err);
706 return NULL;
707}
708
709void xive_tctx_destroy(XiveTCTX *tctx)
710{
711 Object *obj = OBJECT(tctx);
712
713 object_unref(object_property_get_link(obj, "cpu", &error_abort));
714 object_unparent(obj);
715}
716
717
718
719
720
721static uint8_t xive_esb_set(uint8_t *pq, uint8_t value)
722{
723 uint8_t old_pq = *pq & 0x3;
724
725 *pq &= ~0x3;
726 *pq |= value & 0x3;
727
728 return old_pq;
729}
730
731static bool xive_esb_trigger(uint8_t *pq)
732{
733 uint8_t old_pq = *pq & 0x3;
734
735 switch (old_pq) {
736 case XIVE_ESB_RESET:
737 xive_esb_set(pq, XIVE_ESB_PENDING);
738 return true;
739 case XIVE_ESB_PENDING:
740 case XIVE_ESB_QUEUED:
741 xive_esb_set(pq, XIVE_ESB_QUEUED);
742 return false;
743 case XIVE_ESB_OFF:
744 xive_esb_set(pq, XIVE_ESB_OFF);
745 return false;
746 default:
747 g_assert_not_reached();
748 }
749}
750
751static bool xive_esb_eoi(uint8_t *pq)
752{
753 uint8_t old_pq = *pq & 0x3;
754
755 switch (old_pq) {
756 case XIVE_ESB_RESET:
757 case XIVE_ESB_PENDING:
758 xive_esb_set(pq, XIVE_ESB_RESET);
759 return false;
760 case XIVE_ESB_QUEUED:
761 xive_esb_set(pq, XIVE_ESB_PENDING);
762 return true;
763 case XIVE_ESB_OFF:
764 xive_esb_set(pq, XIVE_ESB_OFF);
765 return false;
766 default:
767 g_assert_not_reached();
768 }
769}
770
771
772
773
774
775uint8_t xive_source_esb_get(XiveSource *xsrc, uint32_t srcno)
776{
777 assert(srcno < xsrc->nr_irqs);
778
779 return xsrc->status[srcno] & 0x3;
780}
781
782uint8_t xive_source_esb_set(XiveSource *xsrc, uint32_t srcno, uint8_t pq)
783{
784 assert(srcno < xsrc->nr_irqs);
785
786 return xive_esb_set(&xsrc->status[srcno], pq);
787}
788
789
790
791
792static bool xive_source_lsi_trigger(XiveSource *xsrc, uint32_t srcno)
793{
794 uint8_t old_pq = xive_source_esb_get(xsrc, srcno);
795
796 xsrc->status[srcno] |= XIVE_STATUS_ASSERTED;
797
798 switch (old_pq) {
799 case XIVE_ESB_RESET:
800 xive_source_esb_set(xsrc, srcno, XIVE_ESB_PENDING);
801 return true;
802 default:
803 return false;
804 }
805}
806
807
808
809
810static bool xive_source_esb_trigger(XiveSource *xsrc, uint32_t srcno)
811{
812 bool ret;
813
814 assert(srcno < xsrc->nr_irqs);
815
816 ret = xive_esb_trigger(&xsrc->status[srcno]);
817
818 if (xive_source_irq_is_lsi(xsrc, srcno) &&
819 xive_source_esb_get(xsrc, srcno) == XIVE_ESB_QUEUED) {
820 qemu_log_mask(LOG_GUEST_ERROR,
821 "XIVE: queued an event on LSI IRQ %d\n", srcno);
822 }
823
824 return ret;
825}
826
827
828
829
830static bool xive_source_esb_eoi(XiveSource *xsrc, uint32_t srcno)
831{
832 bool ret;
833
834 assert(srcno < xsrc->nr_irqs);
835
836 ret = xive_esb_eoi(&xsrc->status[srcno]);
837
838
839
840
841
842
843 if (xive_source_irq_is_lsi(xsrc, srcno) &&
844 xsrc->status[srcno] & XIVE_STATUS_ASSERTED) {
845 ret = xive_source_lsi_trigger(xsrc, srcno);
846 }
847
848 return ret;
849}
850
851
852
853
854static void xive_source_notify(XiveSource *xsrc, int srcno)
855{
856 XiveNotifierClass *xnc = XIVE_NOTIFIER_GET_CLASS(xsrc->xive);
857
858 if (xnc->notify) {
859 xnc->notify(xsrc->xive, srcno);
860 }
861}
862
863
864
865
866
867static inline bool addr_is_even(hwaddr addr, uint32_t shift)
868{
869 return !((addr >> shift) & 1);
870}
871
872static inline bool xive_source_is_trigger_page(XiveSource *xsrc, hwaddr addr)
873{
874 return xive_source_esb_has_2page(xsrc) &&
875 addr_is_even(addr, xsrc->esb_shift - 1);
876}
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892static uint64_t xive_source_esb_read(void *opaque, hwaddr addr, unsigned size)
893{
894 XiveSource *xsrc = XIVE_SOURCE(opaque);
895 uint32_t offset = addr & 0xFFF;
896 uint32_t srcno = addr >> xsrc->esb_shift;
897 uint64_t ret = -1;
898
899
900 if (xive_source_is_trigger_page(xsrc, addr)) {
901 qemu_log_mask(LOG_GUEST_ERROR,
902 "XIVE: invalid load on IRQ %d trigger page at "
903 "0x%"HWADDR_PRIx"\n", srcno, addr);
904 return -1;
905 }
906
907 switch (offset) {
908 case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF:
909 ret = xive_source_esb_eoi(xsrc, srcno);
910
911
912 if (ret) {
913 xive_source_notify(xsrc, srcno);
914 }
915 break;
916
917 case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF:
918 ret = xive_source_esb_get(xsrc, srcno);
919 break;
920
921 case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF:
922 case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF:
923 case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF:
924 case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF:
925 ret = xive_source_esb_set(xsrc, srcno, (offset >> 8) & 0x3);
926 break;
927 default:
928 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB load addr %x\n",
929 offset);
930 }
931
932 return ret;
933}
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949static void xive_source_esb_write(void *opaque, hwaddr addr,
950 uint64_t value, unsigned size)
951{
952 XiveSource *xsrc = XIVE_SOURCE(opaque);
953 uint32_t offset = addr & 0xFFF;
954 uint32_t srcno = addr >> xsrc->esb_shift;
955 bool notify = false;
956
957
958 if (xive_source_is_trigger_page(xsrc, addr)) {
959 notify = xive_source_esb_trigger(xsrc, srcno);
960 goto out;
961 }
962
963 switch (offset) {
964 case 0 ... 0x3FF:
965 notify = xive_source_esb_trigger(xsrc, srcno);
966 break;
967
968 case XIVE_ESB_STORE_EOI ... XIVE_ESB_STORE_EOI + 0x3FF:
969 if (!(xsrc->esb_flags & XIVE_SRC_STORE_EOI)) {
970 qemu_log_mask(LOG_GUEST_ERROR,
971 "XIVE: invalid Store EOI for IRQ %d\n", srcno);
972 return;
973 }
974
975 notify = xive_source_esb_eoi(xsrc, srcno);
976 break;
977
978 case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF:
979 case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF:
980 case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF:
981 case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF:
982 xive_source_esb_set(xsrc, srcno, (offset >> 8) & 0x3);
983 break;
984
985 default:
986 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB write addr %x\n",
987 offset);
988 return;
989 }
990
991out:
992
993 if (notify) {
994 xive_source_notify(xsrc, srcno);
995 }
996}
997
998static const MemoryRegionOps xive_source_esb_ops = {
999 .read = xive_source_esb_read,
1000 .write = xive_source_esb_write,
1001 .endianness = DEVICE_BIG_ENDIAN,
1002 .valid = {
1003 .min_access_size = 8,
1004 .max_access_size = 8,
1005 },
1006 .impl = {
1007 .min_access_size = 8,
1008 .max_access_size = 8,
1009 },
1010};
1011
1012void xive_source_set_irq(void *opaque, int srcno, int val)
1013{
1014 XiveSource *xsrc = XIVE_SOURCE(opaque);
1015 bool notify = false;
1016
1017 if (xive_source_irq_is_lsi(xsrc, srcno)) {
1018 if (val) {
1019 notify = xive_source_lsi_trigger(xsrc, srcno);
1020 } else {
1021 xsrc->status[srcno] &= ~XIVE_STATUS_ASSERTED;
1022 }
1023 } else {
1024 if (val) {
1025 notify = xive_source_esb_trigger(xsrc, srcno);
1026 }
1027 }
1028
1029
1030 if (notify) {
1031 xive_source_notify(xsrc, srcno);
1032 }
1033}
1034
1035void xive_source_pic_print_info(XiveSource *xsrc, uint32_t offset, Monitor *mon)
1036{
1037 int i;
1038
1039 for (i = 0; i < xsrc->nr_irqs; i++) {
1040 uint8_t pq = xive_source_esb_get(xsrc, i);
1041
1042 if (pq == XIVE_ESB_OFF) {
1043 continue;
1044 }
1045
1046 monitor_printf(mon, " %08x %s %c%c%c\n", i + offset,
1047 xive_source_irq_is_lsi(xsrc, i) ? "LSI" : "MSI",
1048 pq & XIVE_ESB_VAL_P ? 'P' : '-',
1049 pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
1050 xsrc->status[i] & XIVE_STATUS_ASSERTED ? 'A' : ' ');
1051 }
1052}
1053
1054static void xive_source_reset(void *dev)
1055{
1056 XiveSource *xsrc = XIVE_SOURCE(dev);
1057
1058
1059
1060
1061 memset(xsrc->status, XIVE_ESB_OFF, xsrc->nr_irqs);
1062}
1063
1064static void xive_source_realize(DeviceState *dev, Error **errp)
1065{
1066 XiveSource *xsrc = XIVE_SOURCE(dev);
1067 Object *obj;
1068 Error *local_err = NULL;
1069
1070 obj = object_property_get_link(OBJECT(dev), "xive", &local_err);
1071 if (!obj) {
1072 error_propagate(errp, local_err);
1073 error_prepend(errp, "required link 'xive' not found: ");
1074 return;
1075 }
1076
1077 xsrc->xive = XIVE_NOTIFIER(obj);
1078
1079 if (!xsrc->nr_irqs) {
1080 error_setg(errp, "Number of interrupt needs to be greater than 0");
1081 return;
1082 }
1083
1084 if (xsrc->esb_shift != XIVE_ESB_4K &&
1085 xsrc->esb_shift != XIVE_ESB_4K_2PAGE &&
1086 xsrc->esb_shift != XIVE_ESB_64K &&
1087 xsrc->esb_shift != XIVE_ESB_64K_2PAGE) {
1088 error_setg(errp, "Invalid ESB shift setting");
1089 return;
1090 }
1091
1092 xsrc->status = g_malloc0(xsrc->nr_irqs);
1093 xsrc->lsi_map = bitmap_new(xsrc->nr_irqs);
1094
1095 if (!kvm_irqchip_in_kernel()) {
1096 memory_region_init_io(&xsrc->esb_mmio, OBJECT(xsrc),
1097 &xive_source_esb_ops, xsrc, "xive.esb",
1098 (1ull << xsrc->esb_shift) * xsrc->nr_irqs);
1099 }
1100
1101 qemu_register_reset(xive_source_reset, dev);
1102}
1103
1104static const VMStateDescription vmstate_xive_source = {
1105 .name = TYPE_XIVE_SOURCE,
1106 .version_id = 1,
1107 .minimum_version_id = 1,
1108 .fields = (VMStateField[]) {
1109 VMSTATE_UINT32_EQUAL(nr_irqs, XiveSource, NULL),
1110 VMSTATE_VBUFFER_UINT32(status, XiveSource, 1, NULL, nr_irqs),
1111 VMSTATE_END_OF_LIST()
1112 },
1113};
1114
1115
1116
1117
1118
1119static Property xive_source_properties[] = {
1120 DEFINE_PROP_UINT64("flags", XiveSource, esb_flags, 0),
1121 DEFINE_PROP_UINT32("nr-irqs", XiveSource, nr_irqs, 0),
1122 DEFINE_PROP_UINT32("shift", XiveSource, esb_shift, XIVE_ESB_64K_2PAGE),
1123 DEFINE_PROP_END_OF_LIST(),
1124};
1125
1126static void xive_source_class_init(ObjectClass *klass, void *data)
1127{
1128 DeviceClass *dc = DEVICE_CLASS(klass);
1129
1130 dc->desc = "XIVE Interrupt Source";
1131 dc->props = xive_source_properties;
1132 dc->realize = xive_source_realize;
1133 dc->vmsd = &vmstate_xive_source;
1134
1135
1136
1137
1138 dc->user_creatable = false;
1139}
1140
1141static const TypeInfo xive_source_info = {
1142 .name = TYPE_XIVE_SOURCE,
1143 .parent = TYPE_DEVICE,
1144 .instance_size = sizeof(XiveSource),
1145 .class_init = xive_source_class_init,
1146};
1147
1148
1149
1150
1151
1152void xive_end_queue_pic_print_info(XiveEND *end, uint32_t width, Monitor *mon)
1153{
1154 uint64_t qaddr_base = xive_end_qaddr(end);
1155 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
1156 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
1157 uint32_t qentries = 1 << (qsize + 10);
1158 int i;
1159
1160
1161
1162
1163 monitor_printf(mon, " [ ");
1164 qindex = (qindex - (width - 1)) & (qentries - 1);
1165 for (i = 0; i < width; i++) {
1166 uint64_t qaddr = qaddr_base + (qindex << 2);
1167 uint32_t qdata = -1;
1168
1169 if (dma_memory_read(&address_space_memory, qaddr, &qdata,
1170 sizeof(qdata))) {
1171 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to read EQ @0x%"
1172 HWADDR_PRIx "\n", qaddr);
1173 return;
1174 }
1175 monitor_printf(mon, "%s%08x ", i == width - 1 ? "^" : "",
1176 be32_to_cpu(qdata));
1177 qindex = (qindex + 1) & (qentries - 1);
1178 }
1179 monitor_printf(mon, "]");
1180}
1181
1182void xive_end_pic_print_info(XiveEND *end, uint32_t end_idx, Monitor *mon)
1183{
1184 uint64_t qaddr_base = xive_end_qaddr(end);
1185 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
1186 uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1);
1187 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
1188 uint32_t qentries = 1 << (qsize + 10);
1189
1190 uint32_t nvt_blk = xive_get_field32(END_W6_NVT_BLOCK, end->w6);
1191 uint32_t nvt_idx = xive_get_field32(END_W6_NVT_INDEX, end->w6);
1192 uint8_t priority = xive_get_field32(END_W7_F0_PRIORITY, end->w7);
1193 uint8_t pq;
1194
1195 if (!xive_end_is_valid(end)) {
1196 return;
1197 }
1198
1199 pq = xive_get_field32(END_W1_ESn, end->w1);
1200
1201 monitor_printf(mon, " %08x %c%c %c%c%c%c%c%c%c prio:%d nvt:%02x/%04x",
1202 end_idx,
1203 pq & XIVE_ESB_VAL_P ? 'P' : '-',
1204 pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
1205 xive_end_is_valid(end) ? 'v' : '-',
1206 xive_end_is_enqueue(end) ? 'q' : '-',
1207 xive_end_is_notify(end) ? 'n' : '-',
1208 xive_end_is_backlog(end) ? 'b' : '-',
1209 xive_end_is_escalate(end) ? 'e' : '-',
1210 xive_end_is_uncond_escalation(end) ? 'u' : '-',
1211 xive_end_is_silent_escalation(end) ? 's' : '-',
1212 priority, nvt_blk, nvt_idx);
1213
1214 if (qaddr_base) {
1215 monitor_printf(mon, " eq:@%08"PRIx64"% 6d/%5d ^%d",
1216 qaddr_base, qindex, qentries, qgen);
1217 xive_end_queue_pic_print_info(end, 6, mon);
1218 }
1219 monitor_printf(mon, "\n");
1220}
1221
1222static void xive_end_enqueue(XiveEND *end, uint32_t data)
1223{
1224 uint64_t qaddr_base = xive_end_qaddr(end);
1225 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
1226 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
1227 uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1);
1228
1229 uint64_t qaddr = qaddr_base + (qindex << 2);
1230 uint32_t qdata = cpu_to_be32((qgen << 31) | (data & 0x7fffffff));
1231 uint32_t qentries = 1 << (qsize + 10);
1232
1233 if (dma_memory_write(&address_space_memory, qaddr, &qdata, sizeof(qdata))) {
1234 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to write END data @0x%"
1235 HWADDR_PRIx "\n", qaddr);
1236 return;
1237 }
1238
1239 qindex = (qindex + 1) & (qentries - 1);
1240 if (qindex == 0) {
1241 qgen ^= 1;
1242 end->w1 = xive_set_field32(END_W1_GENERATION, end->w1, qgen);
1243 }
1244 end->w1 = xive_set_field32(END_W1_PAGE_OFF, end->w1, qindex);
1245}
1246
1247void xive_end_eas_pic_print_info(XiveEND *end, uint32_t end_idx,
1248 Monitor *mon)
1249{
1250 XiveEAS *eas = (XiveEAS *) &end->w4;
1251 uint8_t pq;
1252
1253 if (!xive_end_is_escalate(end)) {
1254 return;
1255 }
1256
1257 pq = xive_get_field32(END_W1_ESe, end->w1);
1258
1259 monitor_printf(mon, " %08x %c%c %c%c end:%02x/%04x data:%08x\n",
1260 end_idx,
1261 pq & XIVE_ESB_VAL_P ? 'P' : '-',
1262 pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
1263 xive_eas_is_valid(eas) ? 'V' : ' ',
1264 xive_eas_is_masked(eas) ? 'M' : ' ',
1265 (uint8_t) xive_get_field64(EAS_END_BLOCK, eas->w),
1266 (uint32_t) xive_get_field64(EAS_END_INDEX, eas->w),
1267 (uint32_t) xive_get_field64(EAS_END_DATA, eas->w));
1268}
1269
1270
1271
1272
1273
1274int xive_router_get_eas(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx,
1275 XiveEAS *eas)
1276{
1277 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1278
1279 return xrc->get_eas(xrtr, eas_blk, eas_idx, eas);
1280}
1281
1282int xive_router_get_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx,
1283 XiveEND *end)
1284{
1285 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1286
1287 return xrc->get_end(xrtr, end_blk, end_idx, end);
1288}
1289
1290int xive_router_write_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx,
1291 XiveEND *end, uint8_t word_number)
1292{
1293 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1294
1295 return xrc->write_end(xrtr, end_blk, end_idx, end, word_number);
1296}
1297
1298int xive_router_get_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx,
1299 XiveNVT *nvt)
1300{
1301 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1302
1303 return xrc->get_nvt(xrtr, nvt_blk, nvt_idx, nvt);
1304}
1305
1306int xive_router_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx,
1307 XiveNVT *nvt, uint8_t word_number)
1308{
1309 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1310
1311 return xrc->write_nvt(xrtr, nvt_blk, nvt_idx, nvt, word_number);
1312}
1313
1314XiveTCTX *xive_router_get_tctx(XiveRouter *xrtr, CPUState *cs)
1315{
1316 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1317
1318 return xrc->get_tctx(xrtr, cs);
1319}
1320
1321
1322
1323
1324
1325
1326static uint32_t xive_tctx_hw_cam_line(XiveTCTX *tctx)
1327{
1328 CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env;
1329 uint32_t pir = env->spr_cb[SPR_PIR].default_value;
1330
1331 return xive_nvt_cam_line((pir >> 8) & 0xf, 1 << 7 | (pir & 0x7f));
1332}
1333
1334
1335
1336
1337static int xive_presenter_tctx_match(XiveTCTX *tctx, uint8_t format,
1338 uint8_t nvt_blk, uint32_t nvt_idx,
1339 bool cam_ignore, uint32_t logic_serv)
1340{
1341 uint32_t cam = xive_nvt_cam_line(nvt_blk, nvt_idx);
1342 uint32_t qw3w2 = xive_tctx_word2(&tctx->regs[TM_QW3_HV_PHYS]);
1343 uint32_t qw2w2 = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]);
1344 uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]);
1345 uint32_t qw0w2 = xive_tctx_word2(&tctx->regs[TM_QW0_USER]);
1346
1347
1348
1349
1350
1351
1352 if (format == 0) {
1353 if (cam_ignore == true) {
1354
1355
1356
1357
1358 qemu_log_mask(LOG_UNIMP, "XIVE: no support for LS NVT %x/%x\n",
1359 nvt_blk, nvt_idx);
1360 return -1;
1361 }
1362
1363
1364
1365
1366 if ((be32_to_cpu(qw3w2) & TM_QW3W2_VT) &&
1367 cam == xive_tctx_hw_cam_line(tctx)) {
1368 return TM_QW3_HV_PHYS;
1369 }
1370
1371
1372 if ((be32_to_cpu(qw2w2) & TM_QW2W2_VP) &&
1373 cam == xive_get_field32(TM_QW2W2_POOL_CAM, qw2w2)) {
1374 return TM_QW2_HV_POOL;
1375 }
1376
1377
1378 if ((be32_to_cpu(qw1w2) & TM_QW1W2_VO) &&
1379 cam == xive_get_field32(TM_QW1W2_OS_CAM, qw1w2)) {
1380 return TM_QW1_OS;
1381 }
1382 } else {
1383
1384
1385
1386 if ((be32_to_cpu(qw1w2) & TM_QW1W2_VO) &&
1387 (cam == xive_get_field32(TM_QW1W2_OS_CAM, qw1w2)) &&
1388 (be32_to_cpu(qw0w2) & TM_QW0W2_VU) &&
1389 (logic_serv == xive_get_field32(TM_QW0W2_LOGIC_SERV, qw0w2))) {
1390 return TM_QW0_USER;
1391 }
1392 }
1393 return -1;
1394}
1395
1396typedef struct XiveTCTXMatch {
1397 XiveTCTX *tctx;
1398 uint8_t ring;
1399} XiveTCTXMatch;
1400
1401static bool xive_presenter_match(XiveRouter *xrtr, uint8_t format,
1402 uint8_t nvt_blk, uint32_t nvt_idx,
1403 bool cam_ignore, uint8_t priority,
1404 uint32_t logic_serv, XiveTCTXMatch *match)
1405{
1406 CPUState *cs;
1407
1408
1409
1410
1411
1412
1413 CPU_FOREACH(cs) {
1414 XiveTCTX *tctx = xive_router_get_tctx(xrtr, cs);
1415 int ring;
1416
1417
1418
1419
1420
1421 if (!tctx) {
1422 continue;
1423 }
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434 ring = xive_presenter_tctx_match(tctx, format, nvt_blk, nvt_idx,
1435 cam_ignore, logic_serv);
1436
1437
1438
1439
1440 if (ring != -1) {
1441 if (match->tctx) {
1442 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a thread "
1443 "context NVT %x/%x\n", nvt_blk, nvt_idx);
1444 return false;
1445 }
1446
1447 match->ring = ring;
1448 match->tctx = tctx;
1449 }
1450 }
1451
1452 if (!match->tctx) {
1453 qemu_log_mask(LOG_UNIMP, "XIVE: NVT %x/%x is not dispatched\n",
1454 nvt_blk, nvt_idx);
1455 return false;
1456 }
1457
1458 return true;
1459}
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476static bool xive_presenter_notify(XiveRouter *xrtr, uint8_t format,
1477 uint8_t nvt_blk, uint32_t nvt_idx,
1478 bool cam_ignore, uint8_t priority,
1479 uint32_t logic_serv)
1480{
1481 XiveTCTXMatch match = { .tctx = NULL, .ring = 0 };
1482 bool found;
1483
1484 found = xive_presenter_match(xrtr, format, nvt_blk, nvt_idx, cam_ignore,
1485 priority, logic_serv, &match);
1486 if (found) {
1487 ipb_update(&match.tctx->regs[match.ring], priority);
1488 xive_tctx_notify(match.tctx, match.ring);
1489 }
1490
1491 return found;
1492}
1493
1494
1495
1496
1497
1498
1499static bool xive_router_end_es_notify(XiveRouter *xrtr, uint8_t end_blk,
1500 uint32_t end_idx, XiveEND *end,
1501 uint32_t end_esmask)
1502{
1503 uint8_t pq = xive_get_field32(end_esmask, end->w1);
1504 bool notify = xive_esb_trigger(&pq);
1505
1506 if (pq != xive_get_field32(end_esmask, end->w1)) {
1507 end->w1 = xive_set_field32(end_esmask, end->w1, pq);
1508 xive_router_write_end(xrtr, end_blk, end_idx, end, 1);
1509 }
1510
1511
1512 return notify;
1513}
1514
1515
1516
1517
1518
1519
1520static void xive_router_end_notify(XiveRouter *xrtr, uint8_t end_blk,
1521 uint32_t end_idx, uint32_t end_data)
1522{
1523 XiveEND end;
1524 uint8_t priority;
1525 uint8_t format;
1526 uint8_t nvt_blk;
1527 uint32_t nvt_idx;
1528 XiveNVT nvt;
1529 bool found;
1530
1531
1532 if (xive_router_get_end(xrtr, end_blk, end_idx, &end)) {
1533 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk,
1534 end_idx);
1535 return;
1536 }
1537
1538 if (!xive_end_is_valid(&end)) {
1539 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n",
1540 end_blk, end_idx);
1541 return;
1542 }
1543
1544 if (xive_end_is_enqueue(&end)) {
1545 xive_end_enqueue(&end, end_data);
1546
1547 xive_router_write_end(xrtr, end_blk, end_idx, &end, 1);
1548 }
1549
1550
1551
1552
1553 if (xive_end_is_silent_escalation(&end)) {
1554 goto do_escalation;
1555 }
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565 format = xive_get_field32(END_W6_FORMAT_BIT, end.w6);
1566 priority = xive_get_field32(END_W7_F0_PRIORITY, end.w7);
1567
1568
1569 if (format == 0 && priority == 0xff) {
1570 return;
1571 }
1572
1573
1574
1575
1576
1577 if (!xive_end_is_notify(&end)) {
1578
1579 if (!xive_router_end_es_notify(xrtr, end_blk, end_idx,
1580 &end, END_W1_ESn)) {
1581 return;
1582 }
1583 }
1584
1585
1586
1587
1588 nvt_blk = xive_get_field32(END_W6_NVT_BLOCK, end.w6);
1589 nvt_idx = xive_get_field32(END_W6_NVT_INDEX, end.w6);
1590
1591
1592 if (xive_router_get_nvt(xrtr, nvt_blk, nvt_idx, &nvt)) {
1593 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no NVT %x/%x\n",
1594 nvt_blk, nvt_idx);
1595 return;
1596 }
1597
1598 if (!xive_nvt_is_valid(&nvt)) {
1599 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVT %x/%x is invalid\n",
1600 nvt_blk, nvt_idx);
1601 return;
1602 }
1603
1604 found = xive_presenter_notify(xrtr, format, nvt_blk, nvt_idx,
1605 xive_get_field32(END_W7_F0_IGNORE, end.w7),
1606 priority,
1607 xive_get_field32(END_W7_F1_LOG_SERVER_ID, end.w7));
1608
1609
1610
1611 if (found) {
1612 return;
1613 }
1614
1615
1616
1617
1618
1619
1620 if (xive_end_is_backlog(&end)) {
1621 if (format == 1) {
1622 qemu_log_mask(LOG_GUEST_ERROR,
1623 "XIVE: END %x/%x invalid config: F1 & backlog\n",
1624 end_blk, end_idx);
1625 return;
1626 }
1627
1628 ipb_update((uint8_t *) &nvt.w4, priority);
1629 xive_router_write_nvt(xrtr, nvt_blk, nvt_idx, &nvt, 4);
1630
1631
1632
1633
1634 }
1635
1636do_escalation:
1637
1638
1639
1640
1641 if (!xive_end_is_escalate(&end)) {
1642 return;
1643 }
1644
1645
1646
1647
1648
1649 if (!xive_end_is_uncond_escalation(&end)) {
1650
1651 if (!xive_router_end_es_notify(xrtr, end_blk, end_idx,
1652 &end, END_W1_ESe)) {
1653 return;
1654 }
1655 }
1656
1657
1658
1659
1660 xive_router_end_notify(xrtr,
1661 xive_get_field32(END_W4_ESC_END_BLOCK, end.w4),
1662 xive_get_field32(END_W4_ESC_END_INDEX, end.w4),
1663 xive_get_field32(END_W5_ESC_END_DATA, end.w5));
1664}
1665
1666void xive_router_notify(XiveNotifier *xn, uint32_t lisn)
1667{
1668 XiveRouter *xrtr = XIVE_ROUTER(xn);
1669 uint8_t eas_blk = XIVE_EAS_BLOCK(lisn);
1670 uint32_t eas_idx = XIVE_EAS_INDEX(lisn);
1671 XiveEAS eas;
1672
1673
1674 if (xive_router_get_eas(xrtr, eas_blk, eas_idx, &eas)) {
1675 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN %x\n", lisn);
1676 return;
1677 }
1678
1679
1680
1681
1682
1683
1684
1685 if (!xive_eas_is_valid(&eas)) {
1686 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid LISN %x\n", lisn);
1687 return;
1688 }
1689
1690 if (xive_eas_is_masked(&eas)) {
1691
1692 return;
1693 }
1694
1695
1696
1697
1698 xive_router_end_notify(xrtr,
1699 xive_get_field64(EAS_END_BLOCK, eas.w),
1700 xive_get_field64(EAS_END_INDEX, eas.w),
1701 xive_get_field64(EAS_END_DATA, eas.w));
1702}
1703
1704static void xive_router_class_init(ObjectClass *klass, void *data)
1705{
1706 DeviceClass *dc = DEVICE_CLASS(klass);
1707 XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
1708
1709 dc->desc = "XIVE Router Engine";
1710 xnc->notify = xive_router_notify;
1711}
1712
1713static const TypeInfo xive_router_info = {
1714 .name = TYPE_XIVE_ROUTER,
1715 .parent = TYPE_SYS_BUS_DEVICE,
1716 .abstract = true,
1717 .class_size = sizeof(XiveRouterClass),
1718 .class_init = xive_router_class_init,
1719 .interfaces = (InterfaceInfo[]) {
1720 { TYPE_XIVE_NOTIFIER },
1721 { }
1722 }
1723};
1724
1725void xive_eas_pic_print_info(XiveEAS *eas, uint32_t lisn, Monitor *mon)
1726{
1727 if (!xive_eas_is_valid(eas)) {
1728 return;
1729 }
1730
1731 monitor_printf(mon, " %08x %s end:%02x/%04x data:%08x\n",
1732 lisn, xive_eas_is_masked(eas) ? "M" : " ",
1733 (uint8_t) xive_get_field64(EAS_END_BLOCK, eas->w),
1734 (uint32_t) xive_get_field64(EAS_END_INDEX, eas->w),
1735 (uint32_t) xive_get_field64(EAS_END_DATA, eas->w));
1736}
1737
1738
1739
1740
1741static uint64_t xive_end_source_read(void *opaque, hwaddr addr, unsigned size)
1742{
1743 XiveENDSource *xsrc = XIVE_END_SOURCE(opaque);
1744 uint32_t offset = addr & 0xFFF;
1745 uint8_t end_blk;
1746 uint32_t end_idx;
1747 XiveEND end;
1748 uint32_t end_esmask;
1749 uint8_t pq;
1750 uint64_t ret = -1;
1751
1752 end_blk = xsrc->block_id;
1753 end_idx = addr >> (xsrc->esb_shift + 1);
1754
1755 if (xive_router_get_end(xsrc->xrtr, end_blk, end_idx, &end)) {
1756 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk,
1757 end_idx);
1758 return -1;
1759 }
1760
1761 if (!xive_end_is_valid(&end)) {
1762 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n",
1763 end_blk, end_idx);
1764 return -1;
1765 }
1766
1767 end_esmask = addr_is_even(addr, xsrc->esb_shift) ? END_W1_ESn : END_W1_ESe;
1768 pq = xive_get_field32(end_esmask, end.w1);
1769
1770 switch (offset) {
1771 case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF:
1772 ret = xive_esb_eoi(&pq);
1773
1774
1775 break;
1776
1777 case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF:
1778 ret = pq;
1779 break;
1780
1781 case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF:
1782 case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF:
1783 case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF:
1784 case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF:
1785 ret = xive_esb_set(&pq, (offset >> 8) & 0x3);
1786 break;
1787 default:
1788 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid END ESB load addr %d\n",
1789 offset);
1790 return -1;
1791 }
1792
1793 if (pq != xive_get_field32(end_esmask, end.w1)) {
1794 end.w1 = xive_set_field32(end_esmask, end.w1, pq);
1795 xive_router_write_end(xsrc->xrtr, end_blk, end_idx, &end, 1);
1796 }
1797
1798 return ret;
1799}
1800
1801
1802
1803
1804static void xive_end_source_write(void *opaque, hwaddr addr,
1805 uint64_t value, unsigned size)
1806{
1807 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB write addr 0x%"
1808 HWADDR_PRIx"\n", addr);
1809}
1810
1811static const MemoryRegionOps xive_end_source_ops = {
1812 .read = xive_end_source_read,
1813 .write = xive_end_source_write,
1814 .endianness = DEVICE_BIG_ENDIAN,
1815 .valid = {
1816 .min_access_size = 8,
1817 .max_access_size = 8,
1818 },
1819 .impl = {
1820 .min_access_size = 8,
1821 .max_access_size = 8,
1822 },
1823};
1824
1825static void xive_end_source_realize(DeviceState *dev, Error **errp)
1826{
1827 XiveENDSource *xsrc = XIVE_END_SOURCE(dev);
1828 Object *obj;
1829 Error *local_err = NULL;
1830
1831 obj = object_property_get_link(OBJECT(dev), "xive", &local_err);
1832 if (!obj) {
1833 error_propagate(errp, local_err);
1834 error_prepend(errp, "required link 'xive' not found: ");
1835 return;
1836 }
1837
1838 xsrc->xrtr = XIVE_ROUTER(obj);
1839
1840 if (!xsrc->nr_ends) {
1841 error_setg(errp, "Number of interrupt needs to be greater than 0");
1842 return;
1843 }
1844
1845 if (xsrc->esb_shift != XIVE_ESB_4K &&
1846 xsrc->esb_shift != XIVE_ESB_64K) {
1847 error_setg(errp, "Invalid ESB shift setting");
1848 return;
1849 }
1850
1851
1852
1853
1854
1855 memory_region_init_io(&xsrc->esb_mmio, OBJECT(xsrc),
1856 &xive_end_source_ops, xsrc, "xive.end",
1857 (1ull << (xsrc->esb_shift + 1)) * xsrc->nr_ends);
1858}
1859
1860static Property xive_end_source_properties[] = {
1861 DEFINE_PROP_UINT8("block-id", XiveENDSource, block_id, 0),
1862 DEFINE_PROP_UINT32("nr-ends", XiveENDSource, nr_ends, 0),
1863 DEFINE_PROP_UINT32("shift", XiveENDSource, esb_shift, XIVE_ESB_64K),
1864 DEFINE_PROP_END_OF_LIST(),
1865};
1866
1867static void xive_end_source_class_init(ObjectClass *klass, void *data)
1868{
1869 DeviceClass *dc = DEVICE_CLASS(klass);
1870
1871 dc->desc = "XIVE END Source";
1872 dc->props = xive_end_source_properties;
1873 dc->realize = xive_end_source_realize;
1874
1875
1876
1877
1878 dc->user_creatable = false;
1879}
1880
1881static const TypeInfo xive_end_source_info = {
1882 .name = TYPE_XIVE_END_SOURCE,
1883 .parent = TYPE_DEVICE,
1884 .instance_size = sizeof(XiveENDSource),
1885 .class_init = xive_end_source_class_init,
1886};
1887
1888
1889
1890
1891static const TypeInfo xive_notifier_info = {
1892 .name = TYPE_XIVE_NOTIFIER,
1893 .parent = TYPE_INTERFACE,
1894 .class_size = sizeof(XiveNotifierClass),
1895};
1896
1897static void xive_register_types(void)
1898{
1899 type_register_static(&xive_source_info);
1900 type_register_static(&xive_notifier_info);
1901 type_register_static(&xive_router_info);
1902 type_register_static(&xive_end_source_info);
1903 type_register_static(&xive_tctx_info);
1904}
1905
1906type_init(xive_register_types)
1907