1
2
3
4
5
6
7
8
9
10#include "qemu/osdep.h"
11#include "qemu/log.h"
12#include "qemu/module.h"
13#include "qapi/error.h"
14#include "target/ppc/cpu.h"
15#include "sysemu/cpus.h"
16#include "sysemu/dma.h"
17#include "sysemu/reset.h"
18#include "hw/qdev-properties.h"
19#include "migration/vmstate.h"
20#include "monitor/monitor.h"
21#include "hw/irq.h"
22#include "hw/ppc/xive.h"
23#include "hw/ppc/xive_regs.h"
24#include "trace.h"
25
26
27
28
29
30
31
32
33
34
35static uint8_t priority_to_ipb(uint8_t priority)
36{
37 return priority > XIVE_PRIORITY_MAX ?
38 0 : 1 << (XIVE_PRIORITY_MAX - priority);
39}
40
41
42
43
44
45
46static uint8_t ipb_to_pipr(uint8_t ibp)
47{
48 return ibp ? clz32((uint32_t)ibp << 24) : 0xff;
49}
50
51static uint8_t exception_mask(uint8_t ring)
52{
53 switch (ring) {
54 case TM_QW1_OS:
55 return TM_QW1_NSR_EO;
56 case TM_QW3_HV_PHYS:
57 return TM_QW3_NSR_HE;
58 default:
59 g_assert_not_reached();
60 }
61}
62
63static qemu_irq xive_tctx_output(XiveTCTX *tctx, uint8_t ring)
64{
65 switch (ring) {
66 case TM_QW0_USER:
67 return 0;
68 case TM_QW1_OS:
69 return tctx->os_output;
70 case TM_QW2_HV_POOL:
71 case TM_QW3_HV_PHYS:
72 return tctx->hv_output;
73 default:
74 return 0;
75 }
76}
77
78static uint64_t xive_tctx_accept(XiveTCTX *tctx, uint8_t ring)
79{
80 uint8_t *regs = &tctx->regs[ring];
81 uint8_t nsr = regs[TM_NSR];
82 uint8_t mask = exception_mask(ring);
83
84 qemu_irq_lower(xive_tctx_output(tctx, ring));
85
86 if (regs[TM_NSR] & mask) {
87 uint8_t cppr = regs[TM_PIPR];
88
89 regs[TM_CPPR] = cppr;
90
91
92 regs[TM_IPB] &= ~priority_to_ipb(cppr);
93 regs[TM_PIPR] = ipb_to_pipr(regs[TM_IPB]);
94
95
96 regs[TM_NSR] &= ~mask;
97
98 trace_xive_tctx_accept(tctx->cs->cpu_index, ring,
99 regs[TM_IPB], regs[TM_PIPR],
100 regs[TM_CPPR], regs[TM_NSR]);
101 }
102
103 return (nsr << 8) | regs[TM_CPPR];
104}
105
106static void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring)
107{
108 uint8_t *regs = &tctx->regs[ring];
109
110 if (regs[TM_PIPR] < regs[TM_CPPR]) {
111 switch (ring) {
112 case TM_QW1_OS:
113 regs[TM_NSR] |= TM_QW1_NSR_EO;
114 break;
115 case TM_QW3_HV_PHYS:
116 regs[TM_NSR] |= (TM_QW3_NSR_HE_PHYS << 6);
117 break;
118 default:
119 g_assert_not_reached();
120 }
121 trace_xive_tctx_notify(tctx->cs->cpu_index, ring,
122 regs[TM_IPB], regs[TM_PIPR],
123 regs[TM_CPPR], regs[TM_NSR]);
124 qemu_irq_raise(xive_tctx_output(tctx, ring));
125 }
126}
127
128static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr)
129{
130 uint8_t *regs = &tctx->regs[ring];
131
132 trace_xive_tctx_set_cppr(tctx->cs->cpu_index, ring,
133 regs[TM_IPB], regs[TM_PIPR],
134 cppr, regs[TM_NSR]);
135
136 if (cppr > XIVE_PRIORITY_MAX) {
137 cppr = 0xff;
138 }
139
140 tctx->regs[ring + TM_CPPR] = cppr;
141
142
143 xive_tctx_notify(tctx, ring);
144}
145
146void xive_tctx_ipb_update(XiveTCTX *tctx, uint8_t ring, uint8_t ipb)
147{
148 uint8_t *regs = &tctx->regs[ring];
149
150 regs[TM_IPB] |= ipb;
151 regs[TM_PIPR] = ipb_to_pipr(regs[TM_IPB]);
152 xive_tctx_notify(tctx, ring);
153}
154
155static inline uint32_t xive_tctx_word2(uint8_t *ring)
156{
157 return *((uint32_t *) &ring[TM_WORD2]);
158}
159
160
161
162
163
164static void xive_tm_set_hv_cppr(XivePresenter *xptr, XiveTCTX *tctx,
165 hwaddr offset, uint64_t value, unsigned size)
166{
167 xive_tctx_set_cppr(tctx, TM_QW3_HV_PHYS, value & 0xff);
168}
169
170static uint64_t xive_tm_ack_hv_reg(XivePresenter *xptr, XiveTCTX *tctx,
171 hwaddr offset, unsigned size)
172{
173 return xive_tctx_accept(tctx, TM_QW3_HV_PHYS);
174}
175
176static uint64_t xive_tm_pull_pool_ctx(XivePresenter *xptr, XiveTCTX *tctx,
177 hwaddr offset, unsigned size)
178{
179 uint32_t qw2w2_prev = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]);
180 uint32_t qw2w2;
181
182 qw2w2 = xive_set_field32(TM_QW2W2_VP, qw2w2_prev, 0);
183 memcpy(&tctx->regs[TM_QW2_HV_POOL + TM_WORD2], &qw2w2, 4);
184 return qw2w2;
185}
186
187static void xive_tm_vt_push(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset,
188 uint64_t value, unsigned size)
189{
190 tctx->regs[TM_QW3_HV_PHYS + TM_WORD2] = value & 0xff;
191}
192
193static uint64_t xive_tm_vt_poll(XivePresenter *xptr, XiveTCTX *tctx,
194 hwaddr offset, unsigned size)
195{
196 return tctx->regs[TM_QW3_HV_PHYS + TM_WORD2] & 0xff;
197}
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212static const uint8_t xive_tm_hw_view[] = {
213 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0,
214 3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0,
215 0, 0, 3, 3, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0,
216 3, 3, 3, 3, 0, 3, 0, 2, 3, 0, 0, 3, 3, 3, 3, 0,
217};
218
219static const uint8_t xive_tm_hv_view[] = {
220 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0,
221 3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0,
222 0, 0, 3, 3, 0, 0, 0, 0, 0, 3, 3, 3, 0, 0, 0, 0,
223 3, 3, 3, 3, 0, 3, 0, 2, 3, 0, 0, 3, 0, 0, 0, 0,
224};
225
226static const uint8_t xive_tm_os_view[] = {
227 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0,
228 2, 3, 2, 2, 2, 2, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0,
229 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
230 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
231};
232
233static const uint8_t xive_tm_user_view[] = {
234 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
235 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
236 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
237 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
238};
239
240
241
242
243
244static const uint8_t *xive_tm_views[] = {
245 [XIVE_TM_HW_PAGE] = xive_tm_hw_view,
246 [XIVE_TM_HV_PAGE] = xive_tm_hv_view,
247 [XIVE_TM_OS_PAGE] = xive_tm_os_view,
248 [XIVE_TM_USER_PAGE] = xive_tm_user_view,
249};
250
251
252
253
254static uint64_t xive_tm_mask(hwaddr offset, unsigned size, bool write)
255{
256 uint8_t page_offset = (offset >> TM_SHIFT) & 0x3;
257 uint8_t reg_offset = offset & 0x3F;
258 uint8_t reg_mask = write ? 0x1 : 0x2;
259 uint64_t mask = 0x0;
260 int i;
261
262 for (i = 0; i < size; i++) {
263 if (xive_tm_views[page_offset][reg_offset + i] & reg_mask) {
264 mask |= (uint64_t) 0xff << (8 * (size - i - 1));
265 }
266 }
267
268 return mask;
269}
270
271static void xive_tm_raw_write(XiveTCTX *tctx, hwaddr offset, uint64_t value,
272 unsigned size)
273{
274 uint8_t ring_offset = offset & 0x30;
275 uint8_t reg_offset = offset & 0x3F;
276 uint64_t mask = xive_tm_mask(offset, size, true);
277 int i;
278
279
280
281
282
283 if (size < 4 || !mask || ring_offset == TM_QW0_USER) {
284 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA @%"
285 HWADDR_PRIx"\n", offset);
286 return;
287 }
288
289
290
291
292
293 for (i = 0; i < size; i++) {
294 uint8_t byte_mask = (mask >> (8 * (size - i - 1)));
295 if (byte_mask) {
296 tctx->regs[reg_offset + i] = (value >> (8 * (size - i - 1))) &
297 byte_mask;
298 }
299 }
300}
301
302static uint64_t xive_tm_raw_read(XiveTCTX *tctx, hwaddr offset, unsigned size)
303{
304 uint8_t ring_offset = offset & 0x30;
305 uint8_t reg_offset = offset & 0x3F;
306 uint64_t mask = xive_tm_mask(offset, size, false);
307 uint64_t ret;
308 int i;
309
310
311
312
313
314 if (size < 4 || !mask || ring_offset == TM_QW0_USER) {
315 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid read access at TIMA @%"
316 HWADDR_PRIx"\n", offset);
317 return -1;
318 }
319
320
321 ret = 0;
322 for (i = 0; i < size; i++) {
323 ret |= (uint64_t) tctx->regs[reg_offset + i] << (8 * (size - i - 1));
324 }
325
326
327 return ret & mask;
328}
329
330
331
332
333
334
335
336
337static uint64_t xive_tm_ack_os_reg(XivePresenter *xptr, XiveTCTX *tctx,
338 hwaddr offset, unsigned size)
339{
340 return xive_tctx_accept(tctx, TM_QW1_OS);
341}
342
343static void xive_tm_set_os_cppr(XivePresenter *xptr, XiveTCTX *tctx,
344 hwaddr offset, uint64_t value, unsigned size)
345{
346 xive_tctx_set_cppr(tctx, TM_QW1_OS, value & 0xff);
347}
348
349
350
351
352
353static void xive_tm_set_os_pending(XivePresenter *xptr, XiveTCTX *tctx,
354 hwaddr offset, uint64_t value, unsigned size)
355{
356 xive_tctx_ipb_update(tctx, TM_QW1_OS, priority_to_ipb(value & 0xff));
357}
358
359static void xive_os_cam_decode(uint32_t cam, uint8_t *nvt_blk,
360 uint32_t *nvt_idx, bool *vo)
361{
362 if (nvt_blk) {
363 *nvt_blk = xive_nvt_blk(cam);
364 }
365 if (nvt_idx) {
366 *nvt_idx = xive_nvt_idx(cam);
367 }
368 if (vo) {
369 *vo = !!(cam & TM_QW1W2_VO);
370 }
371}
372
373static uint32_t xive_tctx_get_os_cam(XiveTCTX *tctx, uint8_t *nvt_blk,
374 uint32_t *nvt_idx, bool *vo)
375{
376 uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]);
377 uint32_t cam = be32_to_cpu(qw1w2);
378
379 xive_os_cam_decode(cam, nvt_blk, nvt_idx, vo);
380 return qw1w2;
381}
382
383static void xive_tctx_set_os_cam(XiveTCTX *tctx, uint32_t qw1w2)
384{
385 memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2, 4);
386}
387
388static uint64_t xive_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx,
389 hwaddr offset, unsigned size)
390{
391 uint32_t qw1w2;
392 uint32_t qw1w2_new;
393 uint8_t nvt_blk;
394 uint32_t nvt_idx;
395 bool vo;
396
397 qw1w2 = xive_tctx_get_os_cam(tctx, &nvt_blk, &nvt_idx, &vo);
398
399 if (!vo) {
400 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: pulling invalid NVT %x/%x !?\n",
401 nvt_blk, nvt_idx);
402 }
403
404
405 qw1w2_new = xive_set_field32(TM_QW1W2_VO, qw1w2, 0);
406 xive_tctx_set_os_cam(tctx, qw1w2_new);
407 return qw1w2;
408}
409
410static void xive_tctx_need_resend(XiveRouter *xrtr, XiveTCTX *tctx,
411 uint8_t nvt_blk, uint32_t nvt_idx)
412{
413 XiveNVT nvt;
414 uint8_t ipb;
415
416
417
418
419
420 if (xive_router_get_nvt(xrtr, nvt_blk, nvt_idx, &nvt)) {
421 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVT %x/%x\n",
422 nvt_blk, nvt_idx);
423 return;
424 }
425
426 ipb = xive_get_field32(NVT_W4_IPB, nvt.w4);
427
428 if (ipb) {
429
430 nvt.w4 = xive_set_field32(NVT_W4_IPB, nvt.w4, 0);
431 xive_router_write_nvt(xrtr, nvt_blk, nvt_idx, &nvt, 4);
432
433
434 xive_tctx_ipb_update(tctx, TM_QW1_OS, ipb);
435 }
436}
437
438
439
440
441static void xive_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx,
442 hwaddr offset, uint64_t value, unsigned size)
443{
444 uint32_t cam = value;
445 uint32_t qw1w2 = cpu_to_be32(cam);
446 uint8_t nvt_blk;
447 uint32_t nvt_idx;
448 bool vo;
449
450 xive_os_cam_decode(cam, &nvt_blk, &nvt_idx, &vo);
451
452
453 xive_tctx_set_os_cam(tctx, qw1w2);
454
455
456 if (vo) {
457 xive_tctx_need_resend(XIVE_ROUTER(xptr), tctx, nvt_blk, nvt_idx);
458 }
459}
460
461
462
463
464
465typedef struct XiveTmOp {
466 uint8_t page_offset;
467 uint32_t op_offset;
468 unsigned size;
469 void (*write_handler)(XivePresenter *xptr, XiveTCTX *tctx,
470 hwaddr offset,
471 uint64_t value, unsigned size);
472 uint64_t (*read_handler)(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset,
473 unsigned size);
474} XiveTmOp;
475
476static const XiveTmOp xive_tm_operations[] = {
477
478
479
480
481 { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive_tm_set_os_cppr, NULL },
482 { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 4, xive_tm_push_os_ctx, NULL },
483 { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive_tm_set_hv_cppr, NULL },
484 { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, xive_tm_vt_push, NULL },
485 { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, NULL, xive_tm_vt_poll },
486
487
488 { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, NULL, xive_tm_ack_os_reg },
489 { XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, xive_tm_set_os_pending, NULL },
490 { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 4, NULL, xive_tm_pull_os_ctx },
491 { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 8, NULL, xive_tm_pull_os_ctx },
492 { XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, NULL, xive_tm_ack_hv_reg },
493 { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, NULL, xive_tm_pull_pool_ctx },
494 { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, NULL, xive_tm_pull_pool_ctx },
495};
496
497static const XiveTmOp *xive_tm_find_op(hwaddr offset, unsigned size, bool write)
498{
499 uint8_t page_offset = (offset >> TM_SHIFT) & 0x3;
500 uint32_t op_offset = offset & 0xFFF;
501 int i;
502
503 for (i = 0; i < ARRAY_SIZE(xive_tm_operations); i++) {
504 const XiveTmOp *xto = &xive_tm_operations[i];
505
506
507 if (xto->page_offset >= page_offset &&
508 xto->op_offset == op_offset &&
509 xto->size == size &&
510 ((write && xto->write_handler) || (!write && xto->read_handler))) {
511 return xto;
512 }
513 }
514 return NULL;
515}
516
517
518
519
520void xive_tctx_tm_write(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset,
521 uint64_t value, unsigned size)
522{
523 const XiveTmOp *xto;
524
525 trace_xive_tctx_tm_write(offset, size, value);
526
527
528
529
530
531
532
533
534 if (offset & 0x800) {
535 xto = xive_tm_find_op(offset, size, true);
536 if (!xto) {
537 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA "
538 "@%"HWADDR_PRIx"\n", offset);
539 } else {
540 xto->write_handler(xptr, tctx, offset, value, size);
541 }
542 return;
543 }
544
545
546
547
548 xto = xive_tm_find_op(offset, size, true);
549 if (xto) {
550 xto->write_handler(xptr, tctx, offset, value, size);
551 return;
552 }
553
554
555
556
557 xive_tm_raw_write(tctx, offset, value, size);
558}
559
560uint64_t xive_tctx_tm_read(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset,
561 unsigned size)
562{
563 const XiveTmOp *xto;
564 uint64_t ret;
565
566
567
568
569
570
571
572
573 if (offset & 0x800) {
574 xto = xive_tm_find_op(offset, size, false);
575 if (!xto) {
576 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid read access to TIMA"
577 "@%"HWADDR_PRIx"\n", offset);
578 return -1;
579 }
580 ret = xto->read_handler(xptr, tctx, offset, size);
581 goto out;
582 }
583
584
585
586
587 xto = xive_tm_find_op(offset, size, false);
588 if (xto) {
589 ret = xto->read_handler(xptr, tctx, offset, size);
590 goto out;
591 }
592
593
594
595
596 ret = xive_tm_raw_read(tctx, offset, size);
597out:
598 trace_xive_tctx_tm_read(offset, size, ret);
599 return ret;
600}
601
602static char *xive_tctx_ring_print(uint8_t *ring)
603{
604 uint32_t w2 = xive_tctx_word2(ring);
605
606 return g_strdup_printf("%02x %02x %02x %02x %02x "
607 "%02x %02x %02x %08x",
608 ring[TM_NSR], ring[TM_CPPR], ring[TM_IPB], ring[TM_LSMFB],
609 ring[TM_ACK_CNT], ring[TM_INC], ring[TM_AGE], ring[TM_PIPR],
610 be32_to_cpu(w2));
611}
612
613static const char * const xive_tctx_ring_names[] = {
614 "USER", "OS", "POOL", "PHYS",
615};
616
617
618
619
620
621#define xive_in_kernel(xptr) \
622 (kvm_irqchip_in_kernel() && \
623 ({ \
624 XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr); \
625 xpc->in_kernel ? xpc->in_kernel(xptr) : false; \
626 }))
627
628void xive_tctx_pic_print_info(XiveTCTX *tctx, Monitor *mon)
629{
630 int cpu_index;
631 int i;
632
633
634
635
636 if (!tctx) {
637 return;
638 }
639
640 cpu_index = tctx->cs ? tctx->cs->cpu_index : -1;
641
642 if (xive_in_kernel(tctx->xptr)) {
643 Error *local_err = NULL;
644
645 kvmppc_xive_cpu_synchronize_state(tctx, &local_err);
646 if (local_err) {
647 error_report_err(local_err);
648 return;
649 }
650 }
651
652 monitor_printf(mon, "CPU[%04x]: QW NSR CPPR IPB LSMFB ACK# INC AGE PIPR"
653 " W2\n", cpu_index);
654
655 for (i = 0; i < XIVE_TM_RING_COUNT; i++) {
656 char *s = xive_tctx_ring_print(&tctx->regs[i * XIVE_TM_RING_SIZE]);
657 monitor_printf(mon, "CPU[%04x]: %4s %s\n", cpu_index,
658 xive_tctx_ring_names[i], s);
659 g_free(s);
660 }
661}
662
663void xive_tctx_reset(XiveTCTX *tctx)
664{
665 memset(tctx->regs, 0, sizeof(tctx->regs));
666
667
668 tctx->regs[TM_QW1_OS + TM_LSMFB] = 0xFF;
669 tctx->regs[TM_QW1_OS + TM_ACK_CNT] = 0xFF;
670 tctx->regs[TM_QW1_OS + TM_AGE] = 0xFF;
671
672
673
674
675
676 tctx->regs[TM_QW1_OS + TM_PIPR] =
677 ipb_to_pipr(tctx->regs[TM_QW1_OS + TM_IPB]);
678 tctx->regs[TM_QW3_HV_PHYS + TM_PIPR] =
679 ipb_to_pipr(tctx->regs[TM_QW3_HV_PHYS + TM_IPB]);
680}
681
682static void xive_tctx_realize(DeviceState *dev, Error **errp)
683{
684 XiveTCTX *tctx = XIVE_TCTX(dev);
685 PowerPCCPU *cpu;
686 CPUPPCState *env;
687
688 assert(tctx->cs);
689 assert(tctx->xptr);
690
691 cpu = POWERPC_CPU(tctx->cs);
692 env = &cpu->env;
693 switch (PPC_INPUT(env)) {
694 case PPC_FLAGS_INPUT_POWER9:
695 tctx->hv_output = env->irq_inputs[POWER9_INPUT_HINT];
696 tctx->os_output = env->irq_inputs[POWER9_INPUT_INT];
697 break;
698
699 default:
700 error_setg(errp, "XIVE interrupt controller does not support "
701 "this CPU bus model");
702 return;
703 }
704
705
706 if (xive_in_kernel(tctx->xptr)) {
707 if (kvmppc_xive_cpu_connect(tctx, errp) < 0) {
708 return;
709 }
710 }
711}
712
713static int vmstate_xive_tctx_pre_save(void *opaque)
714{
715 XiveTCTX *tctx = XIVE_TCTX(opaque);
716 Error *local_err = NULL;
717 int ret;
718
719 if (xive_in_kernel(tctx->xptr)) {
720 ret = kvmppc_xive_cpu_get_state(tctx, &local_err);
721 if (ret < 0) {
722 error_report_err(local_err);
723 return ret;
724 }
725 }
726
727 return 0;
728}
729
730static int vmstate_xive_tctx_post_load(void *opaque, int version_id)
731{
732 XiveTCTX *tctx = XIVE_TCTX(opaque);
733 Error *local_err = NULL;
734 int ret;
735
736 if (xive_in_kernel(tctx->xptr)) {
737
738
739
740
741 ret = kvmppc_xive_cpu_set_state(tctx, &local_err);
742 if (ret < 0) {
743 error_report_err(local_err);
744 return ret;
745 }
746 }
747
748 return 0;
749}
750
751static const VMStateDescription vmstate_xive_tctx = {
752 .name = TYPE_XIVE_TCTX,
753 .version_id = 1,
754 .minimum_version_id = 1,
755 .pre_save = vmstate_xive_tctx_pre_save,
756 .post_load = vmstate_xive_tctx_post_load,
757 .fields = (VMStateField[]) {
758 VMSTATE_BUFFER(regs, XiveTCTX),
759 VMSTATE_END_OF_LIST()
760 },
761};
762
763static Property xive_tctx_properties[] = {
764 DEFINE_PROP_LINK("cpu", XiveTCTX, cs, TYPE_CPU, CPUState *),
765 DEFINE_PROP_LINK("presenter", XiveTCTX, xptr, TYPE_XIVE_PRESENTER,
766 XivePresenter *),
767 DEFINE_PROP_END_OF_LIST(),
768};
769
770static void xive_tctx_class_init(ObjectClass *klass, void *data)
771{
772 DeviceClass *dc = DEVICE_CLASS(klass);
773
774 dc->desc = "XIVE Interrupt Thread Context";
775 dc->realize = xive_tctx_realize;
776 dc->vmsd = &vmstate_xive_tctx;
777 device_class_set_props(dc, xive_tctx_properties);
778
779
780
781
782 dc->user_creatable = false;
783}
784
785static const TypeInfo xive_tctx_info = {
786 .name = TYPE_XIVE_TCTX,
787 .parent = TYPE_DEVICE,
788 .instance_size = sizeof(XiveTCTX),
789 .class_init = xive_tctx_class_init,
790};
791
792Object *xive_tctx_create(Object *cpu, XivePresenter *xptr, Error **errp)
793{
794 Object *obj;
795
796 obj = object_new(TYPE_XIVE_TCTX);
797 object_property_add_child(cpu, TYPE_XIVE_TCTX, obj);
798 object_unref(obj);
799 object_property_set_link(obj, "cpu", cpu, &error_abort);
800 object_property_set_link(obj, "presenter", OBJECT(xptr), &error_abort);
801 if (!qdev_realize(DEVICE(obj), NULL, errp)) {
802 object_unparent(obj);
803 return NULL;
804 }
805 return obj;
806}
807
808void xive_tctx_destroy(XiveTCTX *tctx)
809{
810 Object *obj = OBJECT(tctx);
811
812 object_unparent(obj);
813}
814
815
816
817
818
819static uint8_t xive_esb_set(uint8_t *pq, uint8_t value)
820{
821 uint8_t old_pq = *pq & 0x3;
822
823 *pq &= ~0x3;
824 *pq |= value & 0x3;
825
826 return old_pq;
827}
828
829static bool xive_esb_trigger(uint8_t *pq)
830{
831 uint8_t old_pq = *pq & 0x3;
832
833 switch (old_pq) {
834 case XIVE_ESB_RESET:
835 xive_esb_set(pq, XIVE_ESB_PENDING);
836 return true;
837 case XIVE_ESB_PENDING:
838 case XIVE_ESB_QUEUED:
839 xive_esb_set(pq, XIVE_ESB_QUEUED);
840 return false;
841 case XIVE_ESB_OFF:
842 xive_esb_set(pq, XIVE_ESB_OFF);
843 return false;
844 default:
845 g_assert_not_reached();
846 }
847}
848
849static bool xive_esb_eoi(uint8_t *pq)
850{
851 uint8_t old_pq = *pq & 0x3;
852
853 switch (old_pq) {
854 case XIVE_ESB_RESET:
855 case XIVE_ESB_PENDING:
856 xive_esb_set(pq, XIVE_ESB_RESET);
857 return false;
858 case XIVE_ESB_QUEUED:
859 xive_esb_set(pq, XIVE_ESB_PENDING);
860 return true;
861 case XIVE_ESB_OFF:
862 xive_esb_set(pq, XIVE_ESB_OFF);
863 return false;
864 default:
865 g_assert_not_reached();
866 }
867}
868
869
870
871
872
873uint8_t xive_source_esb_get(XiveSource *xsrc, uint32_t srcno)
874{
875 assert(srcno < xsrc->nr_irqs);
876
877 return xsrc->status[srcno] & 0x3;
878}
879
880uint8_t xive_source_esb_set(XiveSource *xsrc, uint32_t srcno, uint8_t pq)
881{
882 assert(srcno < xsrc->nr_irqs);
883
884 return xive_esb_set(&xsrc->status[srcno], pq);
885}
886
887
888
889
890static bool xive_source_lsi_trigger(XiveSource *xsrc, uint32_t srcno)
891{
892 uint8_t old_pq = xive_source_esb_get(xsrc, srcno);
893
894 xsrc->status[srcno] |= XIVE_STATUS_ASSERTED;
895
896 switch (old_pq) {
897 case XIVE_ESB_RESET:
898 xive_source_esb_set(xsrc, srcno, XIVE_ESB_PENDING);
899 return true;
900 default:
901 return false;
902 }
903}
904
905
906
907
908static bool xive_source_esb_trigger(XiveSource *xsrc, uint32_t srcno)
909{
910 bool ret;
911
912 assert(srcno < xsrc->nr_irqs);
913
914 ret = xive_esb_trigger(&xsrc->status[srcno]);
915
916 if (xive_source_irq_is_lsi(xsrc, srcno) &&
917 xive_source_esb_get(xsrc, srcno) == XIVE_ESB_QUEUED) {
918 qemu_log_mask(LOG_GUEST_ERROR,
919 "XIVE: queued an event on LSI IRQ %d\n", srcno);
920 }
921
922 return ret;
923}
924
925
926
927
928static bool xive_source_esb_eoi(XiveSource *xsrc, uint32_t srcno)
929{
930 bool ret;
931
932 assert(srcno < xsrc->nr_irqs);
933
934 ret = xive_esb_eoi(&xsrc->status[srcno]);
935
936
937
938
939
940
941 if (xive_source_irq_is_lsi(xsrc, srcno) &&
942 xsrc->status[srcno] & XIVE_STATUS_ASSERTED) {
943 ret = xive_source_lsi_trigger(xsrc, srcno);
944 }
945
946 return ret;
947}
948
949
950
951
952static void xive_source_notify(XiveSource *xsrc, int srcno)
953{
954 XiveNotifierClass *xnc = XIVE_NOTIFIER_GET_CLASS(xsrc->xive);
955
956 if (xnc->notify) {
957 xnc->notify(xsrc->xive, srcno);
958 }
959}
960
961
962
963
964
965static inline bool addr_is_even(hwaddr addr, uint32_t shift)
966{
967 return !((addr >> shift) & 1);
968}
969
970static inline bool xive_source_is_trigger_page(XiveSource *xsrc, hwaddr addr)
971{
972 return xive_source_esb_has_2page(xsrc) &&
973 addr_is_even(addr, xsrc->esb_shift - 1);
974}
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990static uint64_t xive_source_esb_read(void *opaque, hwaddr addr, unsigned size)
991{
992 XiveSource *xsrc = XIVE_SOURCE(opaque);
993 uint32_t offset = addr & 0xFFF;
994 uint32_t srcno = addr >> xsrc->esb_shift;
995 uint64_t ret = -1;
996
997
998 if (xive_source_is_trigger_page(xsrc, addr)) {
999 qemu_log_mask(LOG_GUEST_ERROR,
1000 "XIVE: invalid load on IRQ %d trigger page at "
1001 "0x%"HWADDR_PRIx"\n", srcno, addr);
1002 return -1;
1003 }
1004
1005 switch (offset) {
1006 case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF:
1007 ret = xive_source_esb_eoi(xsrc, srcno);
1008
1009
1010 if (ret) {
1011 xive_source_notify(xsrc, srcno);
1012 }
1013 break;
1014
1015 case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF:
1016 ret = xive_source_esb_get(xsrc, srcno);
1017 break;
1018
1019 case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF:
1020 case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF:
1021 case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF:
1022 case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF:
1023 ret = xive_source_esb_set(xsrc, srcno, (offset >> 8) & 0x3);
1024 break;
1025 default:
1026 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB load addr %x\n",
1027 offset);
1028 }
1029
1030 trace_xive_source_esb_read(addr, srcno, ret);
1031
1032 return ret;
1033}
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049static void xive_source_esb_write(void *opaque, hwaddr addr,
1050 uint64_t value, unsigned size)
1051{
1052 XiveSource *xsrc = XIVE_SOURCE(opaque);
1053 uint32_t offset = addr & 0xFFF;
1054 uint32_t srcno = addr >> xsrc->esb_shift;
1055 bool notify = false;
1056
1057 trace_xive_source_esb_write(addr, srcno, value);
1058
1059
1060 if (xive_source_is_trigger_page(xsrc, addr)) {
1061 notify = xive_source_esb_trigger(xsrc, srcno);
1062 goto out;
1063 }
1064
1065 switch (offset) {
1066 case 0 ... 0x3FF:
1067 notify = xive_source_esb_trigger(xsrc, srcno);
1068 break;
1069
1070 case XIVE_ESB_STORE_EOI ... XIVE_ESB_STORE_EOI + 0x3FF:
1071 if (!(xsrc->esb_flags & XIVE_SRC_STORE_EOI)) {
1072 qemu_log_mask(LOG_GUEST_ERROR,
1073 "XIVE: invalid Store EOI for IRQ %d\n", srcno);
1074 return;
1075 }
1076
1077 notify = xive_source_esb_eoi(xsrc, srcno);
1078 break;
1079
1080 case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF:
1081 case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF:
1082 case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF:
1083 case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF:
1084 xive_source_esb_set(xsrc, srcno, (offset >> 8) & 0x3);
1085 break;
1086
1087 default:
1088 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB write addr %x\n",
1089 offset);
1090 return;
1091 }
1092
1093out:
1094
1095 if (notify) {
1096 xive_source_notify(xsrc, srcno);
1097 }
1098}
1099
1100static const MemoryRegionOps xive_source_esb_ops = {
1101 .read = xive_source_esb_read,
1102 .write = xive_source_esb_write,
1103 .endianness = DEVICE_BIG_ENDIAN,
1104 .valid = {
1105 .min_access_size = 8,
1106 .max_access_size = 8,
1107 },
1108 .impl = {
1109 .min_access_size = 8,
1110 .max_access_size = 8,
1111 },
1112};
1113
1114void xive_source_set_irq(void *opaque, int srcno, int val)
1115{
1116 XiveSource *xsrc = XIVE_SOURCE(opaque);
1117 bool notify = false;
1118
1119 if (xive_source_irq_is_lsi(xsrc, srcno)) {
1120 if (val) {
1121 notify = xive_source_lsi_trigger(xsrc, srcno);
1122 } else {
1123 xsrc->status[srcno] &= ~XIVE_STATUS_ASSERTED;
1124 }
1125 } else {
1126 if (val) {
1127 notify = xive_source_esb_trigger(xsrc, srcno);
1128 }
1129 }
1130
1131
1132 if (notify) {
1133 xive_source_notify(xsrc, srcno);
1134 }
1135}
1136
1137void xive_source_pic_print_info(XiveSource *xsrc, uint32_t offset, Monitor *mon)
1138{
1139 int i;
1140
1141 for (i = 0; i < xsrc->nr_irqs; i++) {
1142 uint8_t pq = xive_source_esb_get(xsrc, i);
1143
1144 if (pq == XIVE_ESB_OFF) {
1145 continue;
1146 }
1147
1148 monitor_printf(mon, " %08x %s %c%c%c\n", i + offset,
1149 xive_source_irq_is_lsi(xsrc, i) ? "LSI" : "MSI",
1150 pq & XIVE_ESB_VAL_P ? 'P' : '-',
1151 pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
1152 xsrc->status[i] & XIVE_STATUS_ASSERTED ? 'A' : ' ');
1153 }
1154}
1155
1156static void xive_source_reset(void *dev)
1157{
1158 XiveSource *xsrc = XIVE_SOURCE(dev);
1159
1160
1161
1162
1163 memset(xsrc->status, XIVE_ESB_OFF, xsrc->nr_irqs);
1164}
1165
1166static void xive_source_realize(DeviceState *dev, Error **errp)
1167{
1168 XiveSource *xsrc = XIVE_SOURCE(dev);
1169 size_t esb_len = xive_source_esb_len(xsrc);
1170
1171 assert(xsrc->xive);
1172
1173 if (!xsrc->nr_irqs) {
1174 error_setg(errp, "Number of interrupt needs to be greater than 0");
1175 return;
1176 }
1177
1178 if (xsrc->esb_shift != XIVE_ESB_4K &&
1179 xsrc->esb_shift != XIVE_ESB_4K_2PAGE &&
1180 xsrc->esb_shift != XIVE_ESB_64K &&
1181 xsrc->esb_shift != XIVE_ESB_64K_2PAGE) {
1182 error_setg(errp, "Invalid ESB shift setting");
1183 return;
1184 }
1185
1186 xsrc->status = g_malloc0(xsrc->nr_irqs);
1187 xsrc->lsi_map = bitmap_new(xsrc->nr_irqs);
1188
1189 memory_region_init(&xsrc->esb_mmio, OBJECT(xsrc), "xive.esb", esb_len);
1190 memory_region_init_io(&xsrc->esb_mmio_emulated, OBJECT(xsrc),
1191 &xive_source_esb_ops, xsrc, "xive.esb-emulated",
1192 esb_len);
1193 memory_region_add_subregion(&xsrc->esb_mmio, 0, &xsrc->esb_mmio_emulated);
1194
1195 qemu_register_reset(xive_source_reset, dev);
1196}
1197
1198static const VMStateDescription vmstate_xive_source = {
1199 .name = TYPE_XIVE_SOURCE,
1200 .version_id = 1,
1201 .minimum_version_id = 1,
1202 .fields = (VMStateField[]) {
1203 VMSTATE_UINT32_EQUAL(nr_irqs, XiveSource, NULL),
1204 VMSTATE_VBUFFER_UINT32(status, XiveSource, 1, NULL, nr_irqs),
1205 VMSTATE_END_OF_LIST()
1206 },
1207};
1208
1209
1210
1211
1212
1213static Property xive_source_properties[] = {
1214 DEFINE_PROP_UINT64("flags", XiveSource, esb_flags, 0),
1215 DEFINE_PROP_UINT32("nr-irqs", XiveSource, nr_irqs, 0),
1216 DEFINE_PROP_UINT32("shift", XiveSource, esb_shift, XIVE_ESB_64K_2PAGE),
1217 DEFINE_PROP_LINK("xive", XiveSource, xive, TYPE_XIVE_NOTIFIER,
1218 XiveNotifier *),
1219 DEFINE_PROP_END_OF_LIST(),
1220};
1221
1222static void xive_source_class_init(ObjectClass *klass, void *data)
1223{
1224 DeviceClass *dc = DEVICE_CLASS(klass);
1225
1226 dc->desc = "XIVE Interrupt Source";
1227 device_class_set_props(dc, xive_source_properties);
1228 dc->realize = xive_source_realize;
1229 dc->vmsd = &vmstate_xive_source;
1230
1231
1232
1233
1234 dc->user_creatable = false;
1235}
1236
1237static const TypeInfo xive_source_info = {
1238 .name = TYPE_XIVE_SOURCE,
1239 .parent = TYPE_DEVICE,
1240 .instance_size = sizeof(XiveSource),
1241 .class_init = xive_source_class_init,
1242};
1243
1244
1245
1246
1247
1248void xive_end_queue_pic_print_info(XiveEND *end, uint32_t width, Monitor *mon)
1249{
1250 uint64_t qaddr_base = xive_end_qaddr(end);
1251 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
1252 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
1253 uint32_t qentries = 1 << (qsize + 10);
1254 int i;
1255
1256
1257
1258
1259 monitor_printf(mon, " [ ");
1260 qindex = (qindex - (width - 1)) & (qentries - 1);
1261 for (i = 0; i < width; i++) {
1262 uint64_t qaddr = qaddr_base + (qindex << 2);
1263 uint32_t qdata = -1;
1264
1265 if (dma_memory_read(&address_space_memory, qaddr, &qdata,
1266 sizeof(qdata))) {
1267 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to read EQ @0x%"
1268 HWADDR_PRIx "\n", qaddr);
1269 return;
1270 }
1271 monitor_printf(mon, "%s%08x ", i == width - 1 ? "^" : "",
1272 be32_to_cpu(qdata));
1273 qindex = (qindex + 1) & (qentries - 1);
1274 }
1275 monitor_printf(mon, "]");
1276}
1277
1278void xive_end_pic_print_info(XiveEND *end, uint32_t end_idx, Monitor *mon)
1279{
1280 uint64_t qaddr_base = xive_end_qaddr(end);
1281 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
1282 uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1);
1283 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
1284 uint32_t qentries = 1 << (qsize + 10);
1285
1286 uint32_t nvt_blk = xive_get_field32(END_W6_NVT_BLOCK, end->w6);
1287 uint32_t nvt_idx = xive_get_field32(END_W6_NVT_INDEX, end->w6);
1288 uint8_t priority = xive_get_field32(END_W7_F0_PRIORITY, end->w7);
1289 uint8_t pq;
1290
1291 if (!xive_end_is_valid(end)) {
1292 return;
1293 }
1294
1295 pq = xive_get_field32(END_W1_ESn, end->w1);
1296
1297 monitor_printf(mon, " %08x %c%c %c%c%c%c%c%c%c%c prio:%d nvt:%02x/%04x",
1298 end_idx,
1299 pq & XIVE_ESB_VAL_P ? 'P' : '-',
1300 pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
1301 xive_end_is_valid(end) ? 'v' : '-',
1302 xive_end_is_enqueue(end) ? 'q' : '-',
1303 xive_end_is_notify(end) ? 'n' : '-',
1304 xive_end_is_backlog(end) ? 'b' : '-',
1305 xive_end_is_escalate(end) ? 'e' : '-',
1306 xive_end_is_uncond_escalation(end) ? 'u' : '-',
1307 xive_end_is_silent_escalation(end) ? 's' : '-',
1308 xive_end_is_firmware(end) ? 'f' : '-',
1309 priority, nvt_blk, nvt_idx);
1310
1311 if (qaddr_base) {
1312 monitor_printf(mon, " eq:@%08"PRIx64"% 6d/%5d ^%d",
1313 qaddr_base, qindex, qentries, qgen);
1314 xive_end_queue_pic_print_info(end, 6, mon);
1315 }
1316 monitor_printf(mon, "\n");
1317}
1318
1319static void xive_end_enqueue(XiveEND *end, uint32_t data)
1320{
1321 uint64_t qaddr_base = xive_end_qaddr(end);
1322 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
1323 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
1324 uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1);
1325
1326 uint64_t qaddr = qaddr_base + (qindex << 2);
1327 uint32_t qdata = cpu_to_be32((qgen << 31) | (data & 0x7fffffff));
1328 uint32_t qentries = 1 << (qsize + 10);
1329
1330 if (dma_memory_write(&address_space_memory, qaddr, &qdata, sizeof(qdata))) {
1331 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to write END data @0x%"
1332 HWADDR_PRIx "\n", qaddr);
1333 return;
1334 }
1335
1336 qindex = (qindex + 1) & (qentries - 1);
1337 if (qindex == 0) {
1338 qgen ^= 1;
1339 end->w1 = xive_set_field32(END_W1_GENERATION, end->w1, qgen);
1340 }
1341 end->w1 = xive_set_field32(END_W1_PAGE_OFF, end->w1, qindex);
1342}
1343
1344void xive_end_eas_pic_print_info(XiveEND *end, uint32_t end_idx,
1345 Monitor *mon)
1346{
1347 XiveEAS *eas = (XiveEAS *) &end->w4;
1348 uint8_t pq;
1349
1350 if (!xive_end_is_escalate(end)) {
1351 return;
1352 }
1353
1354 pq = xive_get_field32(END_W1_ESe, end->w1);
1355
1356 monitor_printf(mon, " %08x %c%c %c%c end:%02x/%04x data:%08x\n",
1357 end_idx,
1358 pq & XIVE_ESB_VAL_P ? 'P' : '-',
1359 pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
1360 xive_eas_is_valid(eas) ? 'V' : ' ',
1361 xive_eas_is_masked(eas) ? 'M' : ' ',
1362 (uint8_t) xive_get_field64(EAS_END_BLOCK, eas->w),
1363 (uint32_t) xive_get_field64(EAS_END_INDEX, eas->w),
1364 (uint32_t) xive_get_field64(EAS_END_DATA, eas->w));
1365}
1366
1367
1368
1369
1370
1371int xive_router_get_eas(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx,
1372 XiveEAS *eas)
1373{
1374 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1375
1376 return xrc->get_eas(xrtr, eas_blk, eas_idx, eas);
1377}
1378
1379int xive_router_get_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx,
1380 XiveEND *end)
1381{
1382 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1383
1384 return xrc->get_end(xrtr, end_blk, end_idx, end);
1385}
1386
1387int xive_router_write_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx,
1388 XiveEND *end, uint8_t word_number)
1389{
1390 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1391
1392 return xrc->write_end(xrtr, end_blk, end_idx, end, word_number);
1393}
1394
1395int xive_router_get_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx,
1396 XiveNVT *nvt)
1397{
1398 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1399
1400 return xrc->get_nvt(xrtr, nvt_blk, nvt_idx, nvt);
1401}
1402
1403int xive_router_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx,
1404 XiveNVT *nvt, uint8_t word_number)
1405{
1406 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1407
1408 return xrc->write_nvt(xrtr, nvt_blk, nvt_idx, nvt, word_number);
1409}
1410
1411static int xive_router_get_block_id(XiveRouter *xrtr)
1412{
1413 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1414
1415 return xrc->get_block_id(xrtr);
1416}
1417
1418static void xive_router_realize(DeviceState *dev, Error **errp)
1419{
1420 XiveRouter *xrtr = XIVE_ROUTER(dev);
1421
1422 assert(xrtr->xfb);
1423}
1424
1425
1426
1427
1428
1429
1430static uint32_t xive_tctx_hw_cam_line(XivePresenter *xptr, XiveTCTX *tctx)
1431{
1432 CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env;
1433 uint32_t pir = env->spr_cb[SPR_PIR].default_value;
1434 uint8_t blk = xive_router_get_block_id(XIVE_ROUTER(xptr));
1435
1436 return xive_nvt_cam_line(blk, 1 << 7 | (pir & 0x7f));
1437}
1438
1439
1440
1441
1442int xive_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx,
1443 uint8_t format,
1444 uint8_t nvt_blk, uint32_t nvt_idx,
1445 bool cam_ignore, uint32_t logic_serv)
1446{
1447 uint32_t cam = xive_nvt_cam_line(nvt_blk, nvt_idx);
1448 uint32_t qw3w2 = xive_tctx_word2(&tctx->regs[TM_QW3_HV_PHYS]);
1449 uint32_t qw2w2 = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]);
1450 uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]);
1451 uint32_t qw0w2 = xive_tctx_word2(&tctx->regs[TM_QW0_USER]);
1452
1453
1454
1455
1456
1457
1458 if (format == 0) {
1459 if (cam_ignore == true) {
1460
1461
1462
1463
1464 qemu_log_mask(LOG_UNIMP, "XIVE: no support for LS NVT %x/%x\n",
1465 nvt_blk, nvt_idx);
1466 return -1;
1467 }
1468
1469
1470
1471
1472 if ((be32_to_cpu(qw3w2) & TM_QW3W2_VT) &&
1473 cam == xive_tctx_hw_cam_line(xptr, tctx)) {
1474 return TM_QW3_HV_PHYS;
1475 }
1476
1477
1478 if ((be32_to_cpu(qw2w2) & TM_QW2W2_VP) &&
1479 cam == xive_get_field32(TM_QW2W2_POOL_CAM, qw2w2)) {
1480 return TM_QW2_HV_POOL;
1481 }
1482
1483
1484 if ((be32_to_cpu(qw1w2) & TM_QW1W2_VO) &&
1485 cam == xive_get_field32(TM_QW1W2_OS_CAM, qw1w2)) {
1486 return TM_QW1_OS;
1487 }
1488 } else {
1489
1490
1491
1492 if ((be32_to_cpu(qw1w2) & TM_QW1W2_VO) &&
1493 (cam == xive_get_field32(TM_QW1W2_OS_CAM, qw1w2)) &&
1494 (be32_to_cpu(qw0w2) & TM_QW0W2_VU) &&
1495 (logic_serv == xive_get_field32(TM_QW0W2_LOGIC_SERV, qw0w2))) {
1496 return TM_QW0_USER;
1497 }
1498 }
1499 return -1;
1500}
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517static bool xive_presenter_notify(XiveFabric *xfb, uint8_t format,
1518 uint8_t nvt_blk, uint32_t nvt_idx,
1519 bool cam_ignore, uint8_t priority,
1520 uint32_t logic_serv)
1521{
1522 XiveFabricClass *xfc = XIVE_FABRIC_GET_CLASS(xfb);
1523 XiveTCTXMatch match = { .tctx = NULL, .ring = 0 };
1524 int count;
1525
1526
1527
1528
1529 count = xfc->match_nvt(xfb, format, nvt_blk, nvt_idx, cam_ignore,
1530 priority, logic_serv, &match);
1531 if (count < 0) {
1532 return false;
1533 }
1534
1535
1536 if (count) {
1537 trace_xive_presenter_notify(nvt_blk, nvt_idx, match.ring);
1538 xive_tctx_ipb_update(match.tctx, match.ring, priority_to_ipb(priority));
1539 }
1540
1541 return !!count;
1542}
1543
1544
1545
1546
1547
1548
1549static bool xive_router_end_es_notify(XiveRouter *xrtr, uint8_t end_blk,
1550 uint32_t end_idx, XiveEND *end,
1551 uint32_t end_esmask)
1552{
1553 uint8_t pq = xive_get_field32(end_esmask, end->w1);
1554 bool notify = xive_esb_trigger(&pq);
1555
1556 if (pq != xive_get_field32(end_esmask, end->w1)) {
1557 end->w1 = xive_set_field32(end_esmask, end->w1, pq);
1558 xive_router_write_end(xrtr, end_blk, end_idx, end, 1);
1559 }
1560
1561
1562 return notify;
1563}
1564
1565
1566
1567
1568
1569
1570static void xive_router_end_notify(XiveRouter *xrtr, uint8_t end_blk,
1571 uint32_t end_idx, uint32_t end_data)
1572{
1573 XiveEND end;
1574 uint8_t priority;
1575 uint8_t format;
1576 uint8_t nvt_blk;
1577 uint32_t nvt_idx;
1578 XiveNVT nvt;
1579 bool found;
1580
1581
1582 if (xive_router_get_end(xrtr, end_blk, end_idx, &end)) {
1583 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk,
1584 end_idx);
1585 return;
1586 }
1587
1588 if (!xive_end_is_valid(&end)) {
1589 trace_xive_router_end_notify(end_blk, end_idx, end_data);
1590 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n",
1591 end_blk, end_idx);
1592 return;
1593 }
1594
1595 if (xive_end_is_enqueue(&end)) {
1596 xive_end_enqueue(&end, end_data);
1597
1598 xive_router_write_end(xrtr, end_blk, end_idx, &end, 1);
1599 }
1600
1601
1602
1603
1604 if (xive_end_is_silent_escalation(&end)) {
1605 goto do_escalation;
1606 }
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616 format = xive_get_field32(END_W6_FORMAT_BIT, end.w6);
1617 priority = xive_get_field32(END_W7_F0_PRIORITY, end.w7);
1618
1619
1620 if (format == 0 && priority == 0xff) {
1621 return;
1622 }
1623
1624
1625
1626
1627
1628 if (!xive_end_is_notify(&end)) {
1629
1630 if (!xive_router_end_es_notify(xrtr, end_blk, end_idx,
1631 &end, END_W1_ESn)) {
1632 return;
1633 }
1634 }
1635
1636
1637
1638
1639 nvt_blk = xive_get_field32(END_W6_NVT_BLOCK, end.w6);
1640 nvt_idx = xive_get_field32(END_W6_NVT_INDEX, end.w6);
1641
1642
1643 if (xive_router_get_nvt(xrtr, nvt_blk, nvt_idx, &nvt)) {
1644 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no NVT %x/%x\n",
1645 nvt_blk, nvt_idx);
1646 return;
1647 }
1648
1649 if (!xive_nvt_is_valid(&nvt)) {
1650 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVT %x/%x is invalid\n",
1651 nvt_blk, nvt_idx);
1652 return;
1653 }
1654
1655 found = xive_presenter_notify(xrtr->xfb, format, nvt_blk, nvt_idx,
1656 xive_get_field32(END_W7_F0_IGNORE, end.w7),
1657 priority,
1658 xive_get_field32(END_W7_F1_LOG_SERVER_ID, end.w7));
1659
1660
1661
1662 if (found) {
1663 return;
1664 }
1665
1666
1667
1668
1669
1670
1671 if (xive_end_is_backlog(&end)) {
1672 uint8_t ipb;
1673
1674 if (format == 1) {
1675 qemu_log_mask(LOG_GUEST_ERROR,
1676 "XIVE: END %x/%x invalid config: F1 & backlog\n",
1677 end_blk, end_idx);
1678 return;
1679 }
1680
1681
1682
1683
1684
1685 ipb = xive_get_field32(NVT_W4_IPB, nvt.w4) | priority_to_ipb(priority);
1686 nvt.w4 = xive_set_field32(NVT_W4_IPB, nvt.w4, ipb);
1687 xive_router_write_nvt(xrtr, nvt_blk, nvt_idx, &nvt, 4);
1688
1689
1690
1691
1692 }
1693
1694do_escalation:
1695
1696
1697
1698
1699 if (!xive_end_is_escalate(&end)) {
1700 return;
1701 }
1702
1703
1704
1705
1706
1707 if (!xive_end_is_uncond_escalation(&end)) {
1708
1709 if (!xive_router_end_es_notify(xrtr, end_blk, end_idx,
1710 &end, END_W1_ESe)) {
1711 return;
1712 }
1713 }
1714
1715 trace_xive_router_end_escalate(end_blk, end_idx,
1716 (uint8_t) xive_get_field32(END_W4_ESC_END_BLOCK, end.w4),
1717 (uint32_t) xive_get_field32(END_W4_ESC_END_INDEX, end.w4),
1718 (uint32_t) xive_get_field32(END_W5_ESC_END_DATA, end.w5));
1719
1720
1721
1722 xive_router_end_notify(xrtr,
1723 xive_get_field32(END_W4_ESC_END_BLOCK, end.w4),
1724 xive_get_field32(END_W4_ESC_END_INDEX, end.w4),
1725 xive_get_field32(END_W5_ESC_END_DATA, end.w5));
1726}
1727
1728void xive_router_notify(XiveNotifier *xn, uint32_t lisn)
1729{
1730 XiveRouter *xrtr = XIVE_ROUTER(xn);
1731 uint8_t eas_blk = XIVE_EAS_BLOCK(lisn);
1732 uint32_t eas_idx = XIVE_EAS_INDEX(lisn);
1733 XiveEAS eas;
1734
1735
1736 if (xive_router_get_eas(xrtr, eas_blk, eas_idx, &eas)) {
1737 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN %x\n", lisn);
1738 return;
1739 }
1740
1741
1742
1743
1744
1745
1746
1747 if (!xive_eas_is_valid(&eas)) {
1748 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid LISN %x\n", lisn);
1749 return;
1750 }
1751
1752 if (xive_eas_is_masked(&eas)) {
1753
1754 return;
1755 }
1756
1757
1758
1759
1760 xive_router_end_notify(xrtr,
1761 xive_get_field64(EAS_END_BLOCK, eas.w),
1762 xive_get_field64(EAS_END_INDEX, eas.w),
1763 xive_get_field64(EAS_END_DATA, eas.w));
1764}
1765
1766static Property xive_router_properties[] = {
1767 DEFINE_PROP_LINK("xive-fabric", XiveRouter, xfb,
1768 TYPE_XIVE_FABRIC, XiveFabric *),
1769 DEFINE_PROP_END_OF_LIST(),
1770};
1771
1772static void xive_router_class_init(ObjectClass *klass, void *data)
1773{
1774 DeviceClass *dc = DEVICE_CLASS(klass);
1775 XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
1776
1777 dc->desc = "XIVE Router Engine";
1778 device_class_set_props(dc, xive_router_properties);
1779
1780 dc->realize = xive_router_realize;
1781 xnc->notify = xive_router_notify;
1782}
1783
1784static const TypeInfo xive_router_info = {
1785 .name = TYPE_XIVE_ROUTER,
1786 .parent = TYPE_SYS_BUS_DEVICE,
1787 .abstract = true,
1788 .instance_size = sizeof(XiveRouter),
1789 .class_size = sizeof(XiveRouterClass),
1790 .class_init = xive_router_class_init,
1791 .interfaces = (InterfaceInfo[]) {
1792 { TYPE_XIVE_NOTIFIER },
1793 { TYPE_XIVE_PRESENTER },
1794 { }
1795 }
1796};
1797
1798void xive_eas_pic_print_info(XiveEAS *eas, uint32_t lisn, Monitor *mon)
1799{
1800 if (!xive_eas_is_valid(eas)) {
1801 return;
1802 }
1803
1804 monitor_printf(mon, " %08x %s end:%02x/%04x data:%08x\n",
1805 lisn, xive_eas_is_masked(eas) ? "M" : " ",
1806 (uint8_t) xive_get_field64(EAS_END_BLOCK, eas->w),
1807 (uint32_t) xive_get_field64(EAS_END_INDEX, eas->w),
1808 (uint32_t) xive_get_field64(EAS_END_DATA, eas->w));
1809}
1810
1811
1812
1813
1814static uint64_t xive_end_source_read(void *opaque, hwaddr addr, unsigned size)
1815{
1816 XiveENDSource *xsrc = XIVE_END_SOURCE(opaque);
1817 uint32_t offset = addr & 0xFFF;
1818 uint8_t end_blk;
1819 uint32_t end_idx;
1820 XiveEND end;
1821 uint32_t end_esmask;
1822 uint8_t pq;
1823 uint64_t ret = -1;
1824
1825
1826
1827
1828
1829 end_blk = xive_router_get_block_id(xsrc->xrtr);
1830 end_idx = addr >> (xsrc->esb_shift + 1);
1831
1832 trace_xive_end_source_read(end_blk, end_idx, addr);
1833
1834 if (xive_router_get_end(xsrc->xrtr, end_blk, end_idx, &end)) {
1835 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk,
1836 end_idx);
1837 return -1;
1838 }
1839
1840 if (!xive_end_is_valid(&end)) {
1841 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n",
1842 end_blk, end_idx);
1843 return -1;
1844 }
1845
1846 end_esmask = addr_is_even(addr, xsrc->esb_shift) ? END_W1_ESn : END_W1_ESe;
1847 pq = xive_get_field32(end_esmask, end.w1);
1848
1849 switch (offset) {
1850 case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF:
1851 ret = xive_esb_eoi(&pq);
1852
1853
1854 break;
1855
1856 case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF:
1857 ret = pq;
1858 break;
1859
1860 case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF:
1861 case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF:
1862 case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF:
1863 case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF:
1864 ret = xive_esb_set(&pq, (offset >> 8) & 0x3);
1865 break;
1866 default:
1867 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid END ESB load addr %d\n",
1868 offset);
1869 return -1;
1870 }
1871
1872 if (pq != xive_get_field32(end_esmask, end.w1)) {
1873 end.w1 = xive_set_field32(end_esmask, end.w1, pq);
1874 xive_router_write_end(xsrc->xrtr, end_blk, end_idx, &end, 1);
1875 }
1876
1877 return ret;
1878}
1879
1880
1881
1882
1883static void xive_end_source_write(void *opaque, hwaddr addr,
1884 uint64_t value, unsigned size)
1885{
1886 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB write addr 0x%"
1887 HWADDR_PRIx"\n", addr);
1888}
1889
1890static const MemoryRegionOps xive_end_source_ops = {
1891 .read = xive_end_source_read,
1892 .write = xive_end_source_write,
1893 .endianness = DEVICE_BIG_ENDIAN,
1894 .valid = {
1895 .min_access_size = 8,
1896 .max_access_size = 8,
1897 },
1898 .impl = {
1899 .min_access_size = 8,
1900 .max_access_size = 8,
1901 },
1902};
1903
1904static void xive_end_source_realize(DeviceState *dev, Error **errp)
1905{
1906 XiveENDSource *xsrc = XIVE_END_SOURCE(dev);
1907
1908 assert(xsrc->xrtr);
1909
1910 if (!xsrc->nr_ends) {
1911 error_setg(errp, "Number of interrupt needs to be greater than 0");
1912 return;
1913 }
1914
1915 if (xsrc->esb_shift != XIVE_ESB_4K &&
1916 xsrc->esb_shift != XIVE_ESB_64K) {
1917 error_setg(errp, "Invalid ESB shift setting");
1918 return;
1919 }
1920
1921
1922
1923
1924
1925 memory_region_init_io(&xsrc->esb_mmio, OBJECT(xsrc),
1926 &xive_end_source_ops, xsrc, "xive.end",
1927 (1ull << (xsrc->esb_shift + 1)) * xsrc->nr_ends);
1928}
1929
1930static Property xive_end_source_properties[] = {
1931 DEFINE_PROP_UINT32("nr-ends", XiveENDSource, nr_ends, 0),
1932 DEFINE_PROP_UINT32("shift", XiveENDSource, esb_shift, XIVE_ESB_64K),
1933 DEFINE_PROP_LINK("xive", XiveENDSource, xrtr, TYPE_XIVE_ROUTER,
1934 XiveRouter *),
1935 DEFINE_PROP_END_OF_LIST(),
1936};
1937
1938static void xive_end_source_class_init(ObjectClass *klass, void *data)
1939{
1940 DeviceClass *dc = DEVICE_CLASS(klass);
1941
1942 dc->desc = "XIVE END Source";
1943 device_class_set_props(dc, xive_end_source_properties);
1944 dc->realize = xive_end_source_realize;
1945
1946
1947
1948
1949 dc->user_creatable = false;
1950}
1951
1952static const TypeInfo xive_end_source_info = {
1953 .name = TYPE_XIVE_END_SOURCE,
1954 .parent = TYPE_DEVICE,
1955 .instance_size = sizeof(XiveENDSource),
1956 .class_init = xive_end_source_class_init,
1957};
1958
1959
1960
1961
1962static const TypeInfo xive_notifier_info = {
1963 .name = TYPE_XIVE_NOTIFIER,
1964 .parent = TYPE_INTERFACE,
1965 .class_size = sizeof(XiveNotifierClass),
1966};
1967
1968
1969
1970
1971static const TypeInfo xive_presenter_info = {
1972 .name = TYPE_XIVE_PRESENTER,
1973 .parent = TYPE_INTERFACE,
1974 .class_size = sizeof(XivePresenterClass),
1975};
1976
1977
1978
1979
1980static const TypeInfo xive_fabric_info = {
1981 .name = TYPE_XIVE_FABRIC,
1982 .parent = TYPE_INTERFACE,
1983 .class_size = sizeof(XiveFabricClass),
1984};
1985
1986static void xive_register_types(void)
1987{
1988 type_register_static(&xive_fabric_info);
1989 type_register_static(&xive_source_info);
1990 type_register_static(&xive_notifier_info);
1991 type_register_static(&xive_presenter_info);
1992 type_register_static(&xive_router_info);
1993 type_register_static(&xive_end_source_info);
1994 type_register_static(&xive_tctx_info);
1995}
1996
1997type_init(xive_register_types)
1998