1
2
3
4
5
6
7
8
9
10#include "qemu/osdep.h"
11#include "qemu/log.h"
12#include "qapi/error.h"
13#include "target/ppc/cpu.h"
14#include "sysemu/cpus.h"
15#include "sysemu/dma.h"
16#include "hw/qdev-properties.h"
17#include "monitor/monitor.h"
18#include "hw/ppc/xive.h"
19#include "hw/ppc/xive_regs.h"
20
21
22
23
24
25
26
27
28
29
30static uint8_t priority_to_ipb(uint8_t priority)
31{
32 return priority > XIVE_PRIORITY_MAX ?
33 0 : 1 << (XIVE_PRIORITY_MAX - priority);
34}
35
36
37
38
39
40
41static uint8_t ipb_to_pipr(uint8_t ibp)
42{
43 return ibp ? clz32((uint32_t)ibp << 24) : 0xff;
44}
45
46static void ipb_update(uint8_t *regs, uint8_t priority)
47{
48 regs[TM_IPB] |= priority_to_ipb(priority);
49 regs[TM_PIPR] = ipb_to_pipr(regs[TM_IPB]);
50}
51
52static uint8_t exception_mask(uint8_t ring)
53{
54 switch (ring) {
55 case TM_QW1_OS:
56 return TM_QW1_NSR_EO;
57 case TM_QW3_HV_PHYS:
58 return TM_QW3_NSR_HE;
59 default:
60 g_assert_not_reached();
61 }
62}
63
64static uint64_t xive_tctx_accept(XiveTCTX *tctx, uint8_t ring)
65{
66 uint8_t *regs = &tctx->regs[ring];
67 uint8_t nsr = regs[TM_NSR];
68 uint8_t mask = exception_mask(ring);
69
70 qemu_irq_lower(tctx->output);
71
72 if (regs[TM_NSR] & mask) {
73 uint8_t cppr = regs[TM_PIPR];
74
75 regs[TM_CPPR] = cppr;
76
77
78 regs[TM_IPB] &= ~priority_to_ipb(cppr);
79 regs[TM_PIPR] = ipb_to_pipr(regs[TM_IPB]);
80
81
82 regs[TM_NSR] &= ~mask;
83 }
84
85 return (nsr << 8) | regs[TM_CPPR];
86}
87
88static void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring)
89{
90 uint8_t *regs = &tctx->regs[ring];
91
92 if (regs[TM_PIPR] < regs[TM_CPPR]) {
93 switch (ring) {
94 case TM_QW1_OS:
95 regs[TM_NSR] |= TM_QW1_NSR_EO;
96 break;
97 case TM_QW3_HV_PHYS:
98 regs[TM_NSR] |= (TM_QW3_NSR_HE_PHYS << 6);
99 break;
100 default:
101 g_assert_not_reached();
102 }
103 qemu_irq_raise(tctx->output);
104 }
105}
106
107static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr)
108{
109 if (cppr > XIVE_PRIORITY_MAX) {
110 cppr = 0xff;
111 }
112
113 tctx->regs[ring + TM_CPPR] = cppr;
114
115
116 xive_tctx_notify(tctx, ring);
117}
118
119
120
121
122
123static void xive_tm_set_hv_cppr(XiveTCTX *tctx, hwaddr offset,
124 uint64_t value, unsigned size)
125{
126 xive_tctx_set_cppr(tctx, TM_QW3_HV_PHYS, value & 0xff);
127}
128
129static uint64_t xive_tm_ack_hv_reg(XiveTCTX *tctx, hwaddr offset, unsigned size)
130{
131 return xive_tctx_accept(tctx, TM_QW3_HV_PHYS);
132}
133
134static uint64_t xive_tm_pull_pool_ctx(XiveTCTX *tctx, hwaddr offset,
135 unsigned size)
136{
137 uint64_t ret;
138
139 ret = tctx->regs[TM_QW2_HV_POOL + TM_WORD2] & TM_QW2W2_POOL_CAM;
140 tctx->regs[TM_QW2_HV_POOL + TM_WORD2] &= ~TM_QW2W2_POOL_CAM;
141 return ret;
142}
143
144static void xive_tm_vt_push(XiveTCTX *tctx, hwaddr offset,
145 uint64_t value, unsigned size)
146{
147 tctx->regs[TM_QW3_HV_PHYS + TM_WORD2] = value & 0xff;
148}
149
150static uint64_t xive_tm_vt_poll(XiveTCTX *tctx, hwaddr offset, unsigned size)
151{
152 return tctx->regs[TM_QW3_HV_PHYS + TM_WORD2] & 0xff;
153}
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168static const uint8_t xive_tm_hw_view[] = {
169 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0,
170 3, 3, 3, 3, 3, 3, 0, 3, 3, 3, 3, 3, 0, 0, 0, 0,
171 0, 0, 3, 3, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0,
172 3, 3, 3, 3, 0, 3, 0, 3, 3, 0, 0, 3, 3, 3, 3, 0,
173};
174
175static const uint8_t xive_tm_hv_view[] = {
176 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0,
177 3, 3, 3, 3, 3, 3, 0, 3, 3, 3, 3, 3, 0, 0, 0, 0,
178 0, 0, 3, 3, 0, 0, 0, 0, 0, 3, 3, 3, 0, 0, 0, 0,
179 3, 3, 3, 3, 0, 3, 0, 3, 3, 0, 0, 3, 0, 0, 0, 0,
180};
181
182static const uint8_t xive_tm_os_view[] = {
183 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0,
184 2, 3, 2, 2, 2, 2, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0,
185 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
186 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
187};
188
189static const uint8_t xive_tm_user_view[] = {
190 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
191 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
192 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
193 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
194};
195
196
197
198
199
200static const uint8_t *xive_tm_views[] = {
201 [XIVE_TM_HW_PAGE] = xive_tm_hw_view,
202 [XIVE_TM_HV_PAGE] = xive_tm_hv_view,
203 [XIVE_TM_OS_PAGE] = xive_tm_os_view,
204 [XIVE_TM_USER_PAGE] = xive_tm_user_view,
205};
206
207
208
209
210static uint64_t xive_tm_mask(hwaddr offset, unsigned size, bool write)
211{
212 uint8_t page_offset = (offset >> TM_SHIFT) & 0x3;
213 uint8_t reg_offset = offset & 0x3F;
214 uint8_t reg_mask = write ? 0x1 : 0x2;
215 uint64_t mask = 0x0;
216 int i;
217
218 for (i = 0; i < size; i++) {
219 if (xive_tm_views[page_offset][reg_offset + i] & reg_mask) {
220 mask |= (uint64_t) 0xff << (8 * (size - i - 1));
221 }
222 }
223
224 return mask;
225}
226
227static void xive_tm_raw_write(XiveTCTX *tctx, hwaddr offset, uint64_t value,
228 unsigned size)
229{
230 uint8_t ring_offset = offset & 0x30;
231 uint8_t reg_offset = offset & 0x3F;
232 uint64_t mask = xive_tm_mask(offset, size, true);
233 int i;
234
235
236
237
238
239 if (size < 4 || !mask || ring_offset == TM_QW0_USER) {
240 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA @%"
241 HWADDR_PRIx"\n", offset);
242 return;
243 }
244
245
246
247
248
249 for (i = 0; i < size; i++) {
250 uint8_t byte_mask = (mask >> (8 * (size - i - 1)));
251 if (byte_mask) {
252 tctx->regs[reg_offset + i] = (value >> (8 * (size - i - 1))) &
253 byte_mask;
254 }
255 }
256}
257
258static uint64_t xive_tm_raw_read(XiveTCTX *tctx, hwaddr offset, unsigned size)
259{
260 uint8_t ring_offset = offset & 0x30;
261 uint8_t reg_offset = offset & 0x3F;
262 uint64_t mask = xive_tm_mask(offset, size, false);
263 uint64_t ret;
264 int i;
265
266
267
268
269
270 if (size < 4 || !mask || ring_offset == TM_QW0_USER) {
271 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid read access at TIMA @%"
272 HWADDR_PRIx"\n", offset);
273 return -1;
274 }
275
276
277 ret = 0;
278 for (i = 0; i < size; i++) {
279 ret |= (uint64_t) tctx->regs[reg_offset + i] << (8 * (size - i - 1));
280 }
281
282
283 return ret & mask;
284}
285
286
287
288
289
290
291
292
293static uint64_t xive_tm_ack_os_reg(XiveTCTX *tctx, hwaddr offset, unsigned size)
294{
295 return xive_tctx_accept(tctx, TM_QW1_OS);
296}
297
298static void xive_tm_set_os_cppr(XiveTCTX *tctx, hwaddr offset,
299 uint64_t value, unsigned size)
300{
301 xive_tctx_set_cppr(tctx, TM_QW1_OS, value & 0xff);
302}
303
304
305
306
307
308static void xive_tm_set_os_pending(XiveTCTX *tctx, hwaddr offset,
309 uint64_t value, unsigned size)
310{
311 ipb_update(&tctx->regs[TM_QW1_OS], value & 0xff);
312 xive_tctx_notify(tctx, TM_QW1_OS);
313}
314
315
316
317
318
319typedef struct XiveTmOp {
320 uint8_t page_offset;
321 uint32_t op_offset;
322 unsigned size;
323 void (*write_handler)(XiveTCTX *tctx, hwaddr offset, uint64_t value,
324 unsigned size);
325 uint64_t (*read_handler)(XiveTCTX *tctx, hwaddr offset, unsigned size);
326} XiveTmOp;
327
328static const XiveTmOp xive_tm_operations[] = {
329
330
331
332
333 { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive_tm_set_os_cppr, NULL },
334 { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive_tm_set_hv_cppr, NULL },
335 { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, xive_tm_vt_push, NULL },
336 { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, NULL, xive_tm_vt_poll },
337
338
339 { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, NULL, xive_tm_ack_os_reg },
340 { XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, xive_tm_set_os_pending, NULL },
341 { XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, NULL, xive_tm_ack_hv_reg },
342 { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, NULL, xive_tm_pull_pool_ctx },
343 { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, NULL, xive_tm_pull_pool_ctx },
344};
345
346static const XiveTmOp *xive_tm_find_op(hwaddr offset, unsigned size, bool write)
347{
348 uint8_t page_offset = (offset >> TM_SHIFT) & 0x3;
349 uint32_t op_offset = offset & 0xFFF;
350 int i;
351
352 for (i = 0; i < ARRAY_SIZE(xive_tm_operations); i++) {
353 const XiveTmOp *xto = &xive_tm_operations[i];
354
355
356 if (xto->page_offset >= page_offset &&
357 xto->op_offset == op_offset &&
358 xto->size == size &&
359 ((write && xto->write_handler) || (!write && xto->read_handler))) {
360 return xto;
361 }
362 }
363 return NULL;
364}
365
366
367
368
369void xive_tctx_tm_write(XiveTCTX *tctx, hwaddr offset, uint64_t value,
370 unsigned size)
371{
372 const XiveTmOp *xto;
373
374
375
376
377
378
379
380
381 if (offset & 0x800) {
382 xto = xive_tm_find_op(offset, size, true);
383 if (!xto) {
384 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA"
385 "@%"HWADDR_PRIx"\n", offset);
386 } else {
387 xto->write_handler(tctx, offset, value, size);
388 }
389 return;
390 }
391
392
393
394
395 xto = xive_tm_find_op(offset, size, true);
396 if (xto) {
397 xto->write_handler(tctx, offset, value, size);
398 return;
399 }
400
401
402
403
404 xive_tm_raw_write(tctx, offset, value, size);
405}
406
407uint64_t xive_tctx_tm_read(XiveTCTX *tctx, hwaddr offset, unsigned size)
408{
409 const XiveTmOp *xto;
410
411
412
413
414
415
416
417
418 if (offset & 0x800) {
419 xto = xive_tm_find_op(offset, size, false);
420 if (!xto) {
421 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid read access to TIMA"
422 "@%"HWADDR_PRIx"\n", offset);
423 return -1;
424 }
425 return xto->read_handler(tctx, offset, size);
426 }
427
428
429
430
431 xto = xive_tm_find_op(offset, size, false);
432 if (xto) {
433 return xto->read_handler(tctx, offset, size);
434 }
435
436
437
438
439 return xive_tm_raw_read(tctx, offset, size);
440}
441
442static void xive_tm_write(void *opaque, hwaddr offset,
443 uint64_t value, unsigned size)
444{
445 XiveTCTX *tctx = xive_router_get_tctx(XIVE_ROUTER(opaque), current_cpu);
446
447 xive_tctx_tm_write(tctx, offset, value, size);
448}
449
450static uint64_t xive_tm_read(void *opaque, hwaddr offset, unsigned size)
451{
452 XiveTCTX *tctx = xive_router_get_tctx(XIVE_ROUTER(opaque), current_cpu);
453
454 return xive_tctx_tm_read(tctx, offset, size);
455}
456
457const MemoryRegionOps xive_tm_ops = {
458 .read = xive_tm_read,
459 .write = xive_tm_write,
460 .endianness = DEVICE_BIG_ENDIAN,
461 .valid = {
462 .min_access_size = 1,
463 .max_access_size = 8,
464 },
465 .impl = {
466 .min_access_size = 1,
467 .max_access_size = 8,
468 },
469};
470
471static inline uint32_t xive_tctx_word2(uint8_t *ring)
472{
473 return *((uint32_t *) &ring[TM_WORD2]);
474}
475
476static char *xive_tctx_ring_print(uint8_t *ring)
477{
478 uint32_t w2 = xive_tctx_word2(ring);
479
480 return g_strdup_printf("%02x %02x %02x %02x %02x "
481 "%02x %02x %02x %08x",
482 ring[TM_NSR], ring[TM_CPPR], ring[TM_IPB], ring[TM_LSMFB],
483 ring[TM_ACK_CNT], ring[TM_INC], ring[TM_AGE], ring[TM_PIPR],
484 be32_to_cpu(w2));
485}
486
487static const char * const xive_tctx_ring_names[] = {
488 "USER", "OS", "POOL", "PHYS",
489};
490
491void xive_tctx_pic_print_info(XiveTCTX *tctx, Monitor *mon)
492{
493 int cpu_index = tctx->cs ? tctx->cs->cpu_index : -1;
494 int i;
495
496 monitor_printf(mon, "CPU[%04x]: QW NSR CPPR IPB LSMFB ACK# INC AGE PIPR"
497 " W2\n", cpu_index);
498
499 for (i = 0; i < XIVE_TM_RING_COUNT; i++) {
500 char *s = xive_tctx_ring_print(&tctx->regs[i * XIVE_TM_RING_SIZE]);
501 monitor_printf(mon, "CPU[%04x]: %4s %s\n", cpu_index,
502 xive_tctx_ring_names[i], s);
503 g_free(s);
504 }
505}
506
507static void xive_tctx_reset(void *dev)
508{
509 XiveTCTX *tctx = XIVE_TCTX(dev);
510
511 memset(tctx->regs, 0, sizeof(tctx->regs));
512
513
514 tctx->regs[TM_QW1_OS + TM_LSMFB] = 0xFF;
515 tctx->regs[TM_QW1_OS + TM_ACK_CNT] = 0xFF;
516 tctx->regs[TM_QW1_OS + TM_AGE] = 0xFF;
517
518
519
520
521
522 tctx->regs[TM_QW1_OS + TM_PIPR] =
523 ipb_to_pipr(tctx->regs[TM_QW1_OS + TM_IPB]);
524 tctx->regs[TM_QW3_HV_PHYS + TM_PIPR] =
525 ipb_to_pipr(tctx->regs[TM_QW3_HV_PHYS + TM_IPB]);
526}
527
528static void xive_tctx_realize(DeviceState *dev, Error **errp)
529{
530 XiveTCTX *tctx = XIVE_TCTX(dev);
531 PowerPCCPU *cpu;
532 CPUPPCState *env;
533 Object *obj;
534 Error *local_err = NULL;
535
536 obj = object_property_get_link(OBJECT(dev), "cpu", &local_err);
537 if (!obj) {
538 error_propagate(errp, local_err);
539 error_prepend(errp, "required link 'cpu' not found: ");
540 return;
541 }
542
543 cpu = POWERPC_CPU(obj);
544 tctx->cs = CPU(obj);
545
546 env = &cpu->env;
547 switch (PPC_INPUT(env)) {
548 case PPC_FLAGS_INPUT_POWER9:
549 tctx->output = env->irq_inputs[POWER9_INPUT_INT];
550 break;
551
552 default:
553 error_setg(errp, "XIVE interrupt controller does not support "
554 "this CPU bus model");
555 return;
556 }
557
558 qemu_register_reset(xive_tctx_reset, dev);
559}
560
561static void xive_tctx_unrealize(DeviceState *dev, Error **errp)
562{
563 qemu_unregister_reset(xive_tctx_reset, dev);
564}
565
566static const VMStateDescription vmstate_xive_tctx = {
567 .name = TYPE_XIVE_TCTX,
568 .version_id = 1,
569 .minimum_version_id = 1,
570 .fields = (VMStateField[]) {
571 VMSTATE_BUFFER(regs, XiveTCTX),
572 VMSTATE_END_OF_LIST()
573 },
574};
575
576static void xive_tctx_class_init(ObjectClass *klass, void *data)
577{
578 DeviceClass *dc = DEVICE_CLASS(klass);
579
580 dc->desc = "XIVE Interrupt Thread Context";
581 dc->realize = xive_tctx_realize;
582 dc->unrealize = xive_tctx_unrealize;
583 dc->vmsd = &vmstate_xive_tctx;
584}
585
586static const TypeInfo xive_tctx_info = {
587 .name = TYPE_XIVE_TCTX,
588 .parent = TYPE_DEVICE,
589 .instance_size = sizeof(XiveTCTX),
590 .class_init = xive_tctx_class_init,
591};
592
593Object *xive_tctx_create(Object *cpu, XiveRouter *xrtr, Error **errp)
594{
595 Error *local_err = NULL;
596 Object *obj;
597
598 obj = object_new(TYPE_XIVE_TCTX);
599 object_property_add_child(cpu, TYPE_XIVE_TCTX, obj, &error_abort);
600 object_unref(obj);
601 object_property_add_const_link(obj, "cpu", cpu, &error_abort);
602 object_property_set_bool(obj, true, "realized", &local_err);
603 if (local_err) {
604 goto error;
605 }
606
607 return obj;
608
609error:
610 object_unparent(obj);
611 error_propagate(errp, local_err);
612 return NULL;
613}
614
615
616
617
618
619static uint8_t xive_esb_set(uint8_t *pq, uint8_t value)
620{
621 uint8_t old_pq = *pq & 0x3;
622
623 *pq &= ~0x3;
624 *pq |= value & 0x3;
625
626 return old_pq;
627}
628
629static bool xive_esb_trigger(uint8_t *pq)
630{
631 uint8_t old_pq = *pq & 0x3;
632
633 switch (old_pq) {
634 case XIVE_ESB_RESET:
635 xive_esb_set(pq, XIVE_ESB_PENDING);
636 return true;
637 case XIVE_ESB_PENDING:
638 case XIVE_ESB_QUEUED:
639 xive_esb_set(pq, XIVE_ESB_QUEUED);
640 return false;
641 case XIVE_ESB_OFF:
642 xive_esb_set(pq, XIVE_ESB_OFF);
643 return false;
644 default:
645 g_assert_not_reached();
646 }
647}
648
649static bool xive_esb_eoi(uint8_t *pq)
650{
651 uint8_t old_pq = *pq & 0x3;
652
653 switch (old_pq) {
654 case XIVE_ESB_RESET:
655 case XIVE_ESB_PENDING:
656 xive_esb_set(pq, XIVE_ESB_RESET);
657 return false;
658 case XIVE_ESB_QUEUED:
659 xive_esb_set(pq, XIVE_ESB_PENDING);
660 return true;
661 case XIVE_ESB_OFF:
662 xive_esb_set(pq, XIVE_ESB_OFF);
663 return false;
664 default:
665 g_assert_not_reached();
666 }
667}
668
669
670
671
672
673uint8_t xive_source_esb_get(XiveSource *xsrc, uint32_t srcno)
674{
675 assert(srcno < xsrc->nr_irqs);
676
677 return xsrc->status[srcno] & 0x3;
678}
679
680uint8_t xive_source_esb_set(XiveSource *xsrc, uint32_t srcno, uint8_t pq)
681{
682 assert(srcno < xsrc->nr_irqs);
683
684 return xive_esb_set(&xsrc->status[srcno], pq);
685}
686
687
688
689
690static bool xive_source_lsi_trigger(XiveSource *xsrc, uint32_t srcno)
691{
692 uint8_t old_pq = xive_source_esb_get(xsrc, srcno);
693
694 xsrc->status[srcno] |= XIVE_STATUS_ASSERTED;
695
696 switch (old_pq) {
697 case XIVE_ESB_RESET:
698 xive_source_esb_set(xsrc, srcno, XIVE_ESB_PENDING);
699 return true;
700 default:
701 return false;
702 }
703}
704
705
706
707
708static bool xive_source_esb_trigger(XiveSource *xsrc, uint32_t srcno)
709{
710 bool ret;
711
712 assert(srcno < xsrc->nr_irqs);
713
714 ret = xive_esb_trigger(&xsrc->status[srcno]);
715
716 if (xive_source_irq_is_lsi(xsrc, srcno) &&
717 xive_source_esb_get(xsrc, srcno) == XIVE_ESB_QUEUED) {
718 qemu_log_mask(LOG_GUEST_ERROR,
719 "XIVE: queued an event on LSI IRQ %d\n", srcno);
720 }
721
722 return ret;
723}
724
725
726
727
728static bool xive_source_esb_eoi(XiveSource *xsrc, uint32_t srcno)
729{
730 bool ret;
731
732 assert(srcno < xsrc->nr_irqs);
733
734 ret = xive_esb_eoi(&xsrc->status[srcno]);
735
736
737
738
739
740
741 if (xive_source_irq_is_lsi(xsrc, srcno) &&
742 xsrc->status[srcno] & XIVE_STATUS_ASSERTED) {
743 ret = xive_source_lsi_trigger(xsrc, srcno);
744 }
745
746 return ret;
747}
748
749
750
751
752static void xive_source_notify(XiveSource *xsrc, int srcno)
753{
754 XiveNotifierClass *xnc = XIVE_NOTIFIER_GET_CLASS(xsrc->xive);
755
756 if (xnc->notify) {
757 xnc->notify(xsrc->xive, srcno);
758 }
759}
760
761
762
763
764
765static inline bool addr_is_even(hwaddr addr, uint32_t shift)
766{
767 return !((addr >> shift) & 1);
768}
769
770static inline bool xive_source_is_trigger_page(XiveSource *xsrc, hwaddr addr)
771{
772 return xive_source_esb_has_2page(xsrc) &&
773 addr_is_even(addr, xsrc->esb_shift - 1);
774}
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790static uint64_t xive_source_esb_read(void *opaque, hwaddr addr, unsigned size)
791{
792 XiveSource *xsrc = XIVE_SOURCE(opaque);
793 uint32_t offset = addr & 0xFFF;
794 uint32_t srcno = addr >> xsrc->esb_shift;
795 uint64_t ret = -1;
796
797
798 if (xive_source_is_trigger_page(xsrc, addr)) {
799 qemu_log_mask(LOG_GUEST_ERROR,
800 "XIVE: invalid load on IRQ %d trigger page at "
801 "0x%"HWADDR_PRIx"\n", srcno, addr);
802 return -1;
803 }
804
805 switch (offset) {
806 case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF:
807 ret = xive_source_esb_eoi(xsrc, srcno);
808
809
810 if (ret) {
811 xive_source_notify(xsrc, srcno);
812 }
813 break;
814
815 case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF:
816 ret = xive_source_esb_get(xsrc, srcno);
817 break;
818
819 case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF:
820 case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF:
821 case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF:
822 case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF:
823 ret = xive_source_esb_set(xsrc, srcno, (offset >> 8) & 0x3);
824 break;
825 default:
826 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB load addr %x\n",
827 offset);
828 }
829
830 return ret;
831}
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847static void xive_source_esb_write(void *opaque, hwaddr addr,
848 uint64_t value, unsigned size)
849{
850 XiveSource *xsrc = XIVE_SOURCE(opaque);
851 uint32_t offset = addr & 0xFFF;
852 uint32_t srcno = addr >> xsrc->esb_shift;
853 bool notify = false;
854
855
856 if (xive_source_is_trigger_page(xsrc, addr)) {
857 notify = xive_source_esb_trigger(xsrc, srcno);
858 goto out;
859 }
860
861 switch (offset) {
862 case 0 ... 0x3FF:
863 notify = xive_source_esb_trigger(xsrc, srcno);
864 break;
865
866 case XIVE_ESB_STORE_EOI ... XIVE_ESB_STORE_EOI + 0x3FF:
867 if (!(xsrc->esb_flags & XIVE_SRC_STORE_EOI)) {
868 qemu_log_mask(LOG_GUEST_ERROR,
869 "XIVE: invalid Store EOI for IRQ %d\n", srcno);
870 return;
871 }
872
873 notify = xive_source_esb_eoi(xsrc, srcno);
874 break;
875
876 case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF:
877 case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF:
878 case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF:
879 case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF:
880 xive_source_esb_set(xsrc, srcno, (offset >> 8) & 0x3);
881 break;
882
883 default:
884 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB write addr %x\n",
885 offset);
886 return;
887 }
888
889out:
890
891 if (notify) {
892 xive_source_notify(xsrc, srcno);
893 }
894}
895
896static const MemoryRegionOps xive_source_esb_ops = {
897 .read = xive_source_esb_read,
898 .write = xive_source_esb_write,
899 .endianness = DEVICE_BIG_ENDIAN,
900 .valid = {
901 .min_access_size = 8,
902 .max_access_size = 8,
903 },
904 .impl = {
905 .min_access_size = 8,
906 .max_access_size = 8,
907 },
908};
909
910void xive_source_set_irq(void *opaque, int srcno, int val)
911{
912 XiveSource *xsrc = XIVE_SOURCE(opaque);
913 bool notify = false;
914
915 if (xive_source_irq_is_lsi(xsrc, srcno)) {
916 if (val) {
917 notify = xive_source_lsi_trigger(xsrc, srcno);
918 } else {
919 xsrc->status[srcno] &= ~XIVE_STATUS_ASSERTED;
920 }
921 } else {
922 if (val) {
923 notify = xive_source_esb_trigger(xsrc, srcno);
924 }
925 }
926
927
928 if (notify) {
929 xive_source_notify(xsrc, srcno);
930 }
931}
932
933void xive_source_pic_print_info(XiveSource *xsrc, uint32_t offset, Monitor *mon)
934{
935 int i;
936
937 for (i = 0; i < xsrc->nr_irqs; i++) {
938 uint8_t pq = xive_source_esb_get(xsrc, i);
939
940 if (pq == XIVE_ESB_OFF) {
941 continue;
942 }
943
944 monitor_printf(mon, " %08x %s %c%c%c\n", i + offset,
945 xive_source_irq_is_lsi(xsrc, i) ? "LSI" : "MSI",
946 pq & XIVE_ESB_VAL_P ? 'P' : '-',
947 pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
948 xsrc->status[i] & XIVE_STATUS_ASSERTED ? 'A' : ' ');
949 }
950}
951
952static void xive_source_reset(void *dev)
953{
954 XiveSource *xsrc = XIVE_SOURCE(dev);
955
956
957
958
959 memset(xsrc->status, XIVE_ESB_OFF, xsrc->nr_irqs);
960}
961
962static void xive_source_realize(DeviceState *dev, Error **errp)
963{
964 XiveSource *xsrc = XIVE_SOURCE(dev);
965 Object *obj;
966 Error *local_err = NULL;
967
968 obj = object_property_get_link(OBJECT(dev), "xive", &local_err);
969 if (!obj) {
970 error_propagate(errp, local_err);
971 error_prepend(errp, "required link 'xive' not found: ");
972 return;
973 }
974
975 xsrc->xive = XIVE_NOTIFIER(obj);
976
977 if (!xsrc->nr_irqs) {
978 error_setg(errp, "Number of interrupt needs to be greater than 0");
979 return;
980 }
981
982 if (xsrc->esb_shift != XIVE_ESB_4K &&
983 xsrc->esb_shift != XIVE_ESB_4K_2PAGE &&
984 xsrc->esb_shift != XIVE_ESB_64K &&
985 xsrc->esb_shift != XIVE_ESB_64K_2PAGE) {
986 error_setg(errp, "Invalid ESB shift setting");
987 return;
988 }
989
990 xsrc->status = g_malloc0(xsrc->nr_irqs);
991 xsrc->lsi_map = bitmap_new(xsrc->nr_irqs);
992
993 memory_region_init_io(&xsrc->esb_mmio, OBJECT(xsrc),
994 &xive_source_esb_ops, xsrc, "xive.esb",
995 (1ull << xsrc->esb_shift) * xsrc->nr_irqs);
996
997 qemu_register_reset(xive_source_reset, dev);
998}
999
1000static const VMStateDescription vmstate_xive_source = {
1001 .name = TYPE_XIVE_SOURCE,
1002 .version_id = 1,
1003 .minimum_version_id = 1,
1004 .fields = (VMStateField[]) {
1005 VMSTATE_UINT32_EQUAL(nr_irqs, XiveSource, NULL),
1006 VMSTATE_VBUFFER_UINT32(status, XiveSource, 1, NULL, nr_irqs),
1007 VMSTATE_END_OF_LIST()
1008 },
1009};
1010
1011
1012
1013
1014
1015static Property xive_source_properties[] = {
1016 DEFINE_PROP_UINT64("flags", XiveSource, esb_flags, 0),
1017 DEFINE_PROP_UINT32("nr-irqs", XiveSource, nr_irqs, 0),
1018 DEFINE_PROP_UINT32("shift", XiveSource, esb_shift, XIVE_ESB_64K_2PAGE),
1019 DEFINE_PROP_END_OF_LIST(),
1020};
1021
1022static void xive_source_class_init(ObjectClass *klass, void *data)
1023{
1024 DeviceClass *dc = DEVICE_CLASS(klass);
1025
1026 dc->desc = "XIVE Interrupt Source";
1027 dc->props = xive_source_properties;
1028 dc->realize = xive_source_realize;
1029 dc->vmsd = &vmstate_xive_source;
1030}
1031
1032static const TypeInfo xive_source_info = {
1033 .name = TYPE_XIVE_SOURCE,
1034 .parent = TYPE_DEVICE,
1035 .instance_size = sizeof(XiveSource),
1036 .class_init = xive_source_class_init,
1037};
1038
1039
1040
1041
1042
1043void xive_end_queue_pic_print_info(XiveEND *end, uint32_t width, Monitor *mon)
1044{
1045 uint64_t qaddr_base = (uint64_t) be32_to_cpu(end->w2 & 0x0fffffff) << 32
1046 | be32_to_cpu(end->w3);
1047 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
1048 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
1049 uint32_t qentries = 1 << (qsize + 10);
1050 int i;
1051
1052
1053
1054
1055 monitor_printf(mon, " [ ");
1056 qindex = (qindex - (width - 1)) & (qentries - 1);
1057 for (i = 0; i < width; i++) {
1058 uint64_t qaddr = qaddr_base + (qindex << 2);
1059 uint32_t qdata = -1;
1060
1061 if (dma_memory_read(&address_space_memory, qaddr, &qdata,
1062 sizeof(qdata))) {
1063 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to read EQ @0x%"
1064 HWADDR_PRIx "\n", qaddr);
1065 return;
1066 }
1067 monitor_printf(mon, "%s%08x ", i == width - 1 ? "^" : "",
1068 be32_to_cpu(qdata));
1069 qindex = (qindex + 1) & (qentries - 1);
1070 }
1071}
1072
1073void xive_end_pic_print_info(XiveEND *end, uint32_t end_idx, Monitor *mon)
1074{
1075 uint64_t qaddr_base = (uint64_t) be32_to_cpu(end->w2 & 0x0fffffff) << 32
1076 | be32_to_cpu(end->w3);
1077 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
1078 uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1);
1079 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
1080 uint32_t qentries = 1 << (qsize + 10);
1081
1082 uint32_t nvt = xive_get_field32(END_W6_NVT_INDEX, end->w6);
1083 uint8_t priority = xive_get_field32(END_W7_F0_PRIORITY, end->w7);
1084
1085 if (!xive_end_is_valid(end)) {
1086 return;
1087 }
1088
1089 monitor_printf(mon, " %08x %c%c%c%c%c prio:%d nvt:%04x eq:@%08"PRIx64
1090 "% 6d/%5d ^%d", end_idx,
1091 xive_end_is_valid(end) ? 'v' : '-',
1092 xive_end_is_enqueue(end) ? 'q' : '-',
1093 xive_end_is_notify(end) ? 'n' : '-',
1094 xive_end_is_backlog(end) ? 'b' : '-',
1095 xive_end_is_escalate(end) ? 'e' : '-',
1096 priority, nvt, qaddr_base, qindex, qentries, qgen);
1097
1098 xive_end_queue_pic_print_info(end, 6, mon);
1099 monitor_printf(mon, "]\n");
1100}
1101
1102static void xive_end_enqueue(XiveEND *end, uint32_t data)
1103{
1104 uint64_t qaddr_base = (uint64_t) be32_to_cpu(end->w2 & 0x0fffffff) << 32
1105 | be32_to_cpu(end->w3);
1106 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
1107 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
1108 uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1);
1109
1110 uint64_t qaddr = qaddr_base + (qindex << 2);
1111 uint32_t qdata = cpu_to_be32((qgen << 31) | (data & 0x7fffffff));
1112 uint32_t qentries = 1 << (qsize + 10);
1113
1114 if (dma_memory_write(&address_space_memory, qaddr, &qdata, sizeof(qdata))) {
1115 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to write END data @0x%"
1116 HWADDR_PRIx "\n", qaddr);
1117 return;
1118 }
1119
1120 qindex = (qindex + 1) & (qentries - 1);
1121 if (qindex == 0) {
1122 qgen ^= 1;
1123 end->w1 = xive_set_field32(END_W1_GENERATION, end->w1, qgen);
1124 }
1125 end->w1 = xive_set_field32(END_W1_PAGE_OFF, end->w1, qindex);
1126}
1127
1128
1129
1130
1131
1132int xive_router_get_eas(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx,
1133 XiveEAS *eas)
1134{
1135 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1136
1137 return xrc->get_eas(xrtr, eas_blk, eas_idx, eas);
1138}
1139
1140int xive_router_get_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx,
1141 XiveEND *end)
1142{
1143 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1144
1145 return xrc->get_end(xrtr, end_blk, end_idx, end);
1146}
1147
1148int xive_router_write_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx,
1149 XiveEND *end, uint8_t word_number)
1150{
1151 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1152
1153 return xrc->write_end(xrtr, end_blk, end_idx, end, word_number);
1154}
1155
1156int xive_router_get_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx,
1157 XiveNVT *nvt)
1158{
1159 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1160
1161 return xrc->get_nvt(xrtr, nvt_blk, nvt_idx, nvt);
1162}
1163
1164int xive_router_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx,
1165 XiveNVT *nvt, uint8_t word_number)
1166{
1167 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1168
1169 return xrc->write_nvt(xrtr, nvt_blk, nvt_idx, nvt, word_number);
1170}
1171
1172XiveTCTX *xive_router_get_tctx(XiveRouter *xrtr, CPUState *cs)
1173{
1174 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1175
1176 return xrc->get_tctx(xrtr, cs);
1177}
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188static uint32_t hw_cam_line(uint8_t chip_id, uint8_t tid)
1189{
1190 return 1 << 11 | (chip_id & 0xf) << 7 | (tid & 0x7f);
1191}
1192
1193static bool xive_presenter_tctx_match_hw(XiveTCTX *tctx,
1194 uint8_t nvt_blk, uint32_t nvt_idx)
1195{
1196 CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env;
1197 uint32_t pir = env->spr_cb[SPR_PIR].default_value;
1198
1199 return hw_cam_line((pir >> 8) & 0xf, pir & 0x7f) ==
1200 hw_cam_line(nvt_blk, nvt_idx);
1201}
1202
1203
1204
1205
1206static int xive_presenter_tctx_match(XiveTCTX *tctx, uint8_t format,
1207 uint8_t nvt_blk, uint32_t nvt_idx,
1208 bool cam_ignore, uint32_t logic_serv)
1209{
1210 uint32_t cam = xive_nvt_cam_line(nvt_blk, nvt_idx);
1211 uint32_t qw3w2 = xive_tctx_word2(&tctx->regs[TM_QW3_HV_PHYS]);
1212 uint32_t qw2w2 = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]);
1213 uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]);
1214 uint32_t qw0w2 = xive_tctx_word2(&tctx->regs[TM_QW0_USER]);
1215
1216
1217
1218
1219
1220
1221 if (format == 0) {
1222 if (cam_ignore == true) {
1223
1224
1225
1226
1227 qemu_log_mask(LOG_UNIMP, "XIVE: no support for LS NVT %x/%x\n",
1228 nvt_blk, nvt_idx);
1229 return -1;
1230 }
1231
1232
1233
1234
1235 if ((be32_to_cpu(qw3w2) & TM_QW3W2_VT) &&
1236 xive_presenter_tctx_match_hw(tctx, nvt_blk, nvt_idx)) {
1237 return TM_QW3_HV_PHYS;
1238 }
1239
1240
1241 if ((be32_to_cpu(qw2w2) & TM_QW2W2_VP) &&
1242 cam == xive_get_field32(TM_QW2W2_POOL_CAM, qw2w2)) {
1243 return TM_QW2_HV_POOL;
1244 }
1245
1246
1247 if ((be32_to_cpu(qw1w2) & TM_QW1W2_VO) &&
1248 cam == xive_get_field32(TM_QW1W2_OS_CAM, qw1w2)) {
1249 return TM_QW1_OS;
1250 }
1251 } else {
1252
1253
1254
1255 if ((be32_to_cpu(qw1w2) & TM_QW1W2_VO) &&
1256 (cam == xive_get_field32(TM_QW1W2_OS_CAM, qw1w2)) &&
1257 (be32_to_cpu(qw0w2) & TM_QW0W2_VU) &&
1258 (logic_serv == xive_get_field32(TM_QW0W2_LOGIC_SERV, qw0w2))) {
1259 return TM_QW0_USER;
1260 }
1261 }
1262 return -1;
1263}
1264
1265typedef struct XiveTCTXMatch {
1266 XiveTCTX *tctx;
1267 uint8_t ring;
1268} XiveTCTXMatch;
1269
1270static bool xive_presenter_match(XiveRouter *xrtr, uint8_t format,
1271 uint8_t nvt_blk, uint32_t nvt_idx,
1272 bool cam_ignore, uint8_t priority,
1273 uint32_t logic_serv, XiveTCTXMatch *match)
1274{
1275 CPUState *cs;
1276
1277
1278
1279
1280
1281
1282 CPU_FOREACH(cs) {
1283 XiveTCTX *tctx = xive_router_get_tctx(xrtr, cs);
1284 int ring;
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295 ring = xive_presenter_tctx_match(tctx, format, nvt_blk, nvt_idx,
1296 cam_ignore, logic_serv);
1297
1298
1299
1300
1301 if (ring != -1) {
1302 if (match->tctx) {
1303 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a thread "
1304 "context NVT %x/%x\n", nvt_blk, nvt_idx);
1305 return false;
1306 }
1307
1308 match->ring = ring;
1309 match->tctx = tctx;
1310 }
1311 }
1312
1313 if (!match->tctx) {
1314 qemu_log_mask(LOG_UNIMP, "XIVE: NVT %x/%x is not dispatched\n",
1315 nvt_blk, nvt_idx);
1316 return false;
1317 }
1318
1319 return true;
1320}
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337static void xive_presenter_notify(XiveRouter *xrtr, uint8_t format,
1338 uint8_t nvt_blk, uint32_t nvt_idx,
1339 bool cam_ignore, uint8_t priority,
1340 uint32_t logic_serv)
1341{
1342 XiveNVT nvt;
1343 XiveTCTXMatch match = { .tctx = NULL, .ring = 0 };
1344 bool found;
1345
1346
1347 if (xive_router_get_nvt(xrtr, nvt_blk, nvt_idx, &nvt)) {
1348 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no NVT %x/%x\n",
1349 nvt_blk, nvt_idx);
1350 return;
1351 }
1352
1353 if (!xive_nvt_is_valid(&nvt)) {
1354 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVT %x/%x is invalid\n",
1355 nvt_blk, nvt_idx);
1356 return;
1357 }
1358
1359 found = xive_presenter_match(xrtr, format, nvt_blk, nvt_idx, cam_ignore,
1360 priority, logic_serv, &match);
1361 if (found) {
1362 ipb_update(&match.tctx->regs[match.ring], priority);
1363 xive_tctx_notify(match.tctx, match.ring);
1364 return;
1365 }
1366
1367
1368 ipb_update((uint8_t *) &nvt.w4, priority);
1369 xive_router_write_nvt(xrtr, nvt_blk, nvt_idx, &nvt, 4);
1370
1371
1372
1373
1374
1375
1376
1377}
1378
1379
1380
1381
1382
1383
1384static void xive_router_end_notify(XiveRouter *xrtr, uint8_t end_blk,
1385 uint32_t end_idx, uint32_t end_data)
1386{
1387 XiveEND end;
1388 uint8_t priority;
1389 uint8_t format;
1390
1391
1392 if (xive_router_get_end(xrtr, end_blk, end_idx, &end)) {
1393 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk,
1394 end_idx);
1395 return;
1396 }
1397
1398 if (!xive_end_is_valid(&end)) {
1399 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n",
1400 end_blk, end_idx);
1401 return;
1402 }
1403
1404 if (xive_end_is_enqueue(&end)) {
1405 xive_end_enqueue(&end, end_data);
1406
1407 xive_router_write_end(xrtr, end_blk, end_idx, &end, 1);
1408 }
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418 format = xive_get_field32(END_W6_FORMAT_BIT, end.w6);
1419 priority = xive_get_field32(END_W7_F0_PRIORITY, end.w7);
1420
1421
1422 if (format == 0 && priority == 0xff) {
1423 return;
1424 }
1425
1426
1427
1428
1429
1430 if (!xive_end_is_notify(&end)) {
1431 uint8_t pq = xive_get_field32(END_W1_ESn, end.w1);
1432 bool notify = xive_esb_trigger(&pq);
1433
1434 if (pq != xive_get_field32(END_W1_ESn, end.w1)) {
1435 end.w1 = xive_set_field32(END_W1_ESn, end.w1, pq);
1436 xive_router_write_end(xrtr, end_blk, end_idx, &end, 1);
1437 }
1438
1439
1440 if (!notify) {
1441 return;
1442 }
1443 }
1444
1445
1446
1447
1448 xive_presenter_notify(xrtr, format,
1449 xive_get_field32(END_W6_NVT_BLOCK, end.w6),
1450 xive_get_field32(END_W6_NVT_INDEX, end.w6),
1451 xive_get_field32(END_W7_F0_IGNORE, end.w7),
1452 priority,
1453 xive_get_field32(END_W7_F1_LOG_SERVER_ID, end.w7));
1454
1455
1456}
1457
1458void xive_router_notify(XiveNotifier *xn, uint32_t lisn)
1459{
1460 XiveRouter *xrtr = XIVE_ROUTER(xn);
1461 uint8_t eas_blk = XIVE_SRCNO_BLOCK(lisn);
1462 uint32_t eas_idx = XIVE_SRCNO_INDEX(lisn);
1463 XiveEAS eas;
1464
1465
1466 if (xive_router_get_eas(xrtr, eas_blk, eas_idx, &eas)) {
1467 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN %x\n", lisn);
1468 return;
1469 }
1470
1471
1472
1473
1474
1475
1476
1477 if (!xive_eas_is_valid(&eas)) {
1478 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid LISN %x\n", lisn);
1479 return;
1480 }
1481
1482 if (xive_eas_is_masked(&eas)) {
1483
1484 return;
1485 }
1486
1487
1488
1489
1490 xive_router_end_notify(xrtr,
1491 xive_get_field64(EAS_END_BLOCK, eas.w),
1492 xive_get_field64(EAS_END_INDEX, eas.w),
1493 xive_get_field64(EAS_END_DATA, eas.w));
1494}
1495
1496static void xive_router_class_init(ObjectClass *klass, void *data)
1497{
1498 DeviceClass *dc = DEVICE_CLASS(klass);
1499 XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
1500
1501 dc->desc = "XIVE Router Engine";
1502 xnc->notify = xive_router_notify;
1503}
1504
1505static const TypeInfo xive_router_info = {
1506 .name = TYPE_XIVE_ROUTER,
1507 .parent = TYPE_SYS_BUS_DEVICE,
1508 .abstract = true,
1509 .class_size = sizeof(XiveRouterClass),
1510 .class_init = xive_router_class_init,
1511 .interfaces = (InterfaceInfo[]) {
1512 { TYPE_XIVE_NOTIFIER },
1513 { }
1514 }
1515};
1516
1517void xive_eas_pic_print_info(XiveEAS *eas, uint32_t lisn, Monitor *mon)
1518{
1519 if (!xive_eas_is_valid(eas)) {
1520 return;
1521 }
1522
1523 monitor_printf(mon, " %08x %s end:%02x/%04x data:%08x\n",
1524 lisn, xive_eas_is_masked(eas) ? "M" : " ",
1525 (uint8_t) xive_get_field64(EAS_END_BLOCK, eas->w),
1526 (uint32_t) xive_get_field64(EAS_END_INDEX, eas->w),
1527 (uint32_t) xive_get_field64(EAS_END_DATA, eas->w));
1528}
1529
1530
1531
1532
1533static uint64_t xive_end_source_read(void *opaque, hwaddr addr, unsigned size)
1534{
1535 XiveENDSource *xsrc = XIVE_END_SOURCE(opaque);
1536 uint32_t offset = addr & 0xFFF;
1537 uint8_t end_blk;
1538 uint32_t end_idx;
1539 XiveEND end;
1540 uint32_t end_esmask;
1541 uint8_t pq;
1542 uint64_t ret = -1;
1543
1544 end_blk = xsrc->block_id;
1545 end_idx = addr >> (xsrc->esb_shift + 1);
1546
1547 if (xive_router_get_end(xsrc->xrtr, end_blk, end_idx, &end)) {
1548 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk,
1549 end_idx);
1550 return -1;
1551 }
1552
1553 if (!xive_end_is_valid(&end)) {
1554 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n",
1555 end_blk, end_idx);
1556 return -1;
1557 }
1558
1559 end_esmask = addr_is_even(addr, xsrc->esb_shift) ? END_W1_ESn : END_W1_ESe;
1560 pq = xive_get_field32(end_esmask, end.w1);
1561
1562 switch (offset) {
1563 case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF:
1564 ret = xive_esb_eoi(&pq);
1565
1566
1567 break;
1568
1569 case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF:
1570 ret = pq;
1571 break;
1572
1573 case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF:
1574 case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF:
1575 case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF:
1576 case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF:
1577 ret = xive_esb_set(&pq, (offset >> 8) & 0x3);
1578 break;
1579 default:
1580 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid END ESB load addr %d\n",
1581 offset);
1582 return -1;
1583 }
1584
1585 if (pq != xive_get_field32(end_esmask, end.w1)) {
1586 end.w1 = xive_set_field32(end_esmask, end.w1, pq);
1587 xive_router_write_end(xsrc->xrtr, end_blk, end_idx, &end, 1);
1588 }
1589
1590 return ret;
1591}
1592
1593
1594
1595
1596static void xive_end_source_write(void *opaque, hwaddr addr,
1597 uint64_t value, unsigned size)
1598{
1599 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB write addr 0x%"
1600 HWADDR_PRIx"\n", addr);
1601}
1602
1603static const MemoryRegionOps xive_end_source_ops = {
1604 .read = xive_end_source_read,
1605 .write = xive_end_source_write,
1606 .endianness = DEVICE_BIG_ENDIAN,
1607 .valid = {
1608 .min_access_size = 8,
1609 .max_access_size = 8,
1610 },
1611 .impl = {
1612 .min_access_size = 8,
1613 .max_access_size = 8,
1614 },
1615};
1616
1617static void xive_end_source_realize(DeviceState *dev, Error **errp)
1618{
1619 XiveENDSource *xsrc = XIVE_END_SOURCE(dev);
1620 Object *obj;
1621 Error *local_err = NULL;
1622
1623 obj = object_property_get_link(OBJECT(dev), "xive", &local_err);
1624 if (!obj) {
1625 error_propagate(errp, local_err);
1626 error_prepend(errp, "required link 'xive' not found: ");
1627 return;
1628 }
1629
1630 xsrc->xrtr = XIVE_ROUTER(obj);
1631
1632 if (!xsrc->nr_ends) {
1633 error_setg(errp, "Number of interrupt needs to be greater than 0");
1634 return;
1635 }
1636
1637 if (xsrc->esb_shift != XIVE_ESB_4K &&
1638 xsrc->esb_shift != XIVE_ESB_64K) {
1639 error_setg(errp, "Invalid ESB shift setting");
1640 return;
1641 }
1642
1643
1644
1645
1646
1647 memory_region_init_io(&xsrc->esb_mmio, OBJECT(xsrc),
1648 &xive_end_source_ops, xsrc, "xive.end",
1649 (1ull << (xsrc->esb_shift + 1)) * xsrc->nr_ends);
1650}
1651
1652static Property xive_end_source_properties[] = {
1653 DEFINE_PROP_UINT8("block-id", XiveENDSource, block_id, 0),
1654 DEFINE_PROP_UINT32("nr-ends", XiveENDSource, nr_ends, 0),
1655 DEFINE_PROP_UINT32("shift", XiveENDSource, esb_shift, XIVE_ESB_64K),
1656 DEFINE_PROP_END_OF_LIST(),
1657};
1658
1659static void xive_end_source_class_init(ObjectClass *klass, void *data)
1660{
1661 DeviceClass *dc = DEVICE_CLASS(klass);
1662
1663 dc->desc = "XIVE END Source";
1664 dc->props = xive_end_source_properties;
1665 dc->realize = xive_end_source_realize;
1666}
1667
1668static const TypeInfo xive_end_source_info = {
1669 .name = TYPE_XIVE_END_SOURCE,
1670 .parent = TYPE_DEVICE,
1671 .instance_size = sizeof(XiveENDSource),
1672 .class_init = xive_end_source_class_init,
1673};
1674
1675
1676
1677
1678static const TypeInfo xive_notifier_info = {
1679 .name = TYPE_XIVE_NOTIFIER,
1680 .parent = TYPE_INTERFACE,
1681 .class_size = sizeof(XiveNotifierClass),
1682};
1683
1684static void xive_register_types(void)
1685{
1686 type_register_static(&xive_source_info);
1687 type_register_static(&xive_notifier_info);
1688 type_register_static(&xive_router_info);
1689 type_register_static(&xive_end_source_info);
1690 type_register_static(&xive_tctx_info);
1691}
1692
1693type_init(xive_register_types)
1694