1
2
3
4
5
6
7
8
9
10#include "qemu/osdep.h"
11#include "qemu/log.h"
12#include "qemu/module.h"
13#include "qapi/error.h"
14#include "target/ppc/cpu.h"
15#include "sysemu/cpus.h"
16#include "sysemu/dma.h"
17#include "sysemu/reset.h"
18#include "hw/qdev-properties.h"
19#include "migration/vmstate.h"
20#include "monitor/monitor.h"
21#include "hw/irq.h"
22#include "hw/ppc/xive.h"
23#include "hw/ppc/xive_regs.h"
24#include "trace.h"
25
26
27
28
29
30
31
32
33
34
35static uint8_t ipb_to_pipr(uint8_t ibp)
36{
37 return ibp ? clz32((uint32_t)ibp << 24) : 0xff;
38}
39
40static uint8_t exception_mask(uint8_t ring)
41{
42 switch (ring) {
43 case TM_QW1_OS:
44 return TM_QW1_NSR_EO;
45 case TM_QW3_HV_PHYS:
46 return TM_QW3_NSR_HE;
47 default:
48 g_assert_not_reached();
49 }
50}
51
52static qemu_irq xive_tctx_output(XiveTCTX *tctx, uint8_t ring)
53{
54 switch (ring) {
55 case TM_QW0_USER:
56 return 0;
57 case TM_QW1_OS:
58 return tctx->os_output;
59 case TM_QW2_HV_POOL:
60 case TM_QW3_HV_PHYS:
61 return tctx->hv_output;
62 default:
63 return 0;
64 }
65}
66
67static uint64_t xive_tctx_accept(XiveTCTX *tctx, uint8_t ring)
68{
69 uint8_t *regs = &tctx->regs[ring];
70 uint8_t nsr = regs[TM_NSR];
71 uint8_t mask = exception_mask(ring);
72
73 qemu_irq_lower(xive_tctx_output(tctx, ring));
74
75 if (regs[TM_NSR] & mask) {
76 uint8_t cppr = regs[TM_PIPR];
77
78 regs[TM_CPPR] = cppr;
79
80
81 regs[TM_IPB] &= ~xive_priority_to_ipb(cppr);
82 regs[TM_PIPR] = ipb_to_pipr(regs[TM_IPB]);
83
84
85 regs[TM_NSR] &= ~mask;
86
87 trace_xive_tctx_accept(tctx->cs->cpu_index, ring,
88 regs[TM_IPB], regs[TM_PIPR],
89 regs[TM_CPPR], regs[TM_NSR]);
90 }
91
92 return (nsr << 8) | regs[TM_CPPR];
93}
94
95static void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring)
96{
97 uint8_t *regs = &tctx->regs[ring];
98
99 if (regs[TM_PIPR] < regs[TM_CPPR]) {
100 switch (ring) {
101 case TM_QW1_OS:
102 regs[TM_NSR] |= TM_QW1_NSR_EO;
103 break;
104 case TM_QW3_HV_PHYS:
105 regs[TM_NSR] |= (TM_QW3_NSR_HE_PHYS << 6);
106 break;
107 default:
108 g_assert_not_reached();
109 }
110 trace_xive_tctx_notify(tctx->cs->cpu_index, ring,
111 regs[TM_IPB], regs[TM_PIPR],
112 regs[TM_CPPR], regs[TM_NSR]);
113 qemu_irq_raise(xive_tctx_output(tctx, ring));
114 }
115}
116
117static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr)
118{
119 uint8_t *regs = &tctx->regs[ring];
120
121 trace_xive_tctx_set_cppr(tctx->cs->cpu_index, ring,
122 regs[TM_IPB], regs[TM_PIPR],
123 cppr, regs[TM_NSR]);
124
125 if (cppr > XIVE_PRIORITY_MAX) {
126 cppr = 0xff;
127 }
128
129 tctx->regs[ring + TM_CPPR] = cppr;
130
131
132 xive_tctx_notify(tctx, ring);
133}
134
135void xive_tctx_ipb_update(XiveTCTX *tctx, uint8_t ring, uint8_t ipb)
136{
137 uint8_t *regs = &tctx->regs[ring];
138
139 regs[TM_IPB] |= ipb;
140 regs[TM_PIPR] = ipb_to_pipr(regs[TM_IPB]);
141 xive_tctx_notify(tctx, ring);
142}
143
144
145
146
147
148static void xive_tm_set_hv_cppr(XivePresenter *xptr, XiveTCTX *tctx,
149 hwaddr offset, uint64_t value, unsigned size)
150{
151 xive_tctx_set_cppr(tctx, TM_QW3_HV_PHYS, value & 0xff);
152}
153
154static uint64_t xive_tm_ack_hv_reg(XivePresenter *xptr, XiveTCTX *tctx,
155 hwaddr offset, unsigned size)
156{
157 return xive_tctx_accept(tctx, TM_QW3_HV_PHYS);
158}
159
160static uint64_t xive_tm_pull_pool_ctx(XivePresenter *xptr, XiveTCTX *tctx,
161 hwaddr offset, unsigned size)
162{
163 uint32_t qw2w2_prev = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]);
164 uint32_t qw2w2;
165
166 qw2w2 = xive_set_field32(TM_QW2W2_VP, qw2w2_prev, 0);
167 memcpy(&tctx->regs[TM_QW2_HV_POOL + TM_WORD2], &qw2w2, 4);
168 return qw2w2;
169}
170
171static void xive_tm_vt_push(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset,
172 uint64_t value, unsigned size)
173{
174 tctx->regs[TM_QW3_HV_PHYS + TM_WORD2] = value & 0xff;
175}
176
177static uint64_t xive_tm_vt_poll(XivePresenter *xptr, XiveTCTX *tctx,
178 hwaddr offset, unsigned size)
179{
180 return tctx->regs[TM_QW3_HV_PHYS + TM_WORD2] & 0xff;
181}
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196static const uint8_t xive_tm_hw_view[] = {
197 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0,
198 3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0,
199 0, 0, 3, 3, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0,
200 3, 3, 3, 3, 0, 3, 0, 2, 3, 0, 0, 3, 3, 3, 3, 0,
201};
202
203static const uint8_t xive_tm_hv_view[] = {
204 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0,
205 3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0,
206 0, 0, 3, 3, 0, 0, 0, 0, 0, 3, 3, 3, 0, 0, 0, 0,
207 3, 3, 3, 3, 0, 3, 0, 2, 3, 0, 0, 3, 0, 0, 0, 0,
208};
209
210static const uint8_t xive_tm_os_view[] = {
211 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0,
212 2, 3, 2, 2, 2, 2, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0,
213 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
214 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
215};
216
217static const uint8_t xive_tm_user_view[] = {
218 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
219 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
220 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
221 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
222};
223
224
225
226
227
228static const uint8_t *xive_tm_views[] = {
229 [XIVE_TM_HW_PAGE] = xive_tm_hw_view,
230 [XIVE_TM_HV_PAGE] = xive_tm_hv_view,
231 [XIVE_TM_OS_PAGE] = xive_tm_os_view,
232 [XIVE_TM_USER_PAGE] = xive_tm_user_view,
233};
234
235
236
237
238static uint64_t xive_tm_mask(hwaddr offset, unsigned size, bool write)
239{
240 uint8_t page_offset = (offset >> TM_SHIFT) & 0x3;
241 uint8_t reg_offset = offset & 0x3F;
242 uint8_t reg_mask = write ? 0x1 : 0x2;
243 uint64_t mask = 0x0;
244 int i;
245
246 for (i = 0; i < size; i++) {
247 if (xive_tm_views[page_offset][reg_offset + i] & reg_mask) {
248 mask |= (uint64_t) 0xff << (8 * (size - i - 1));
249 }
250 }
251
252 return mask;
253}
254
255static void xive_tm_raw_write(XiveTCTX *tctx, hwaddr offset, uint64_t value,
256 unsigned size)
257{
258 uint8_t ring_offset = offset & 0x30;
259 uint8_t reg_offset = offset & 0x3F;
260 uint64_t mask = xive_tm_mask(offset, size, true);
261 int i;
262
263
264
265
266
267 if (size < 4 || !mask || ring_offset == TM_QW0_USER) {
268 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA @%"
269 HWADDR_PRIx"\n", offset);
270 return;
271 }
272
273
274
275
276
277 for (i = 0; i < size; i++) {
278 uint8_t byte_mask = (mask >> (8 * (size - i - 1)));
279 if (byte_mask) {
280 tctx->regs[reg_offset + i] = (value >> (8 * (size - i - 1))) &
281 byte_mask;
282 }
283 }
284}
285
286static uint64_t xive_tm_raw_read(XiveTCTX *tctx, hwaddr offset, unsigned size)
287{
288 uint8_t ring_offset = offset & 0x30;
289 uint8_t reg_offset = offset & 0x3F;
290 uint64_t mask = xive_tm_mask(offset, size, false);
291 uint64_t ret;
292 int i;
293
294
295
296
297
298 if (size < 4 || !mask || ring_offset == TM_QW0_USER) {
299 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid read access at TIMA @%"
300 HWADDR_PRIx"\n", offset);
301 return -1;
302 }
303
304
305 ret = 0;
306 for (i = 0; i < size; i++) {
307 ret |= (uint64_t) tctx->regs[reg_offset + i] << (8 * (size - i - 1));
308 }
309
310
311 return ret & mask;
312}
313
314
315
316
317
318
319
320
321static uint64_t xive_tm_ack_os_reg(XivePresenter *xptr, XiveTCTX *tctx,
322 hwaddr offset, unsigned size)
323{
324 return xive_tctx_accept(tctx, TM_QW1_OS);
325}
326
327static void xive_tm_set_os_cppr(XivePresenter *xptr, XiveTCTX *tctx,
328 hwaddr offset, uint64_t value, unsigned size)
329{
330 xive_tctx_set_cppr(tctx, TM_QW1_OS, value & 0xff);
331}
332
333
334
335
336
337static void xive_tm_set_os_pending(XivePresenter *xptr, XiveTCTX *tctx,
338 hwaddr offset, uint64_t value, unsigned size)
339{
340 xive_tctx_ipb_update(tctx, TM_QW1_OS, xive_priority_to_ipb(value & 0xff));
341}
342
343static void xive_os_cam_decode(uint32_t cam, uint8_t *nvt_blk,
344 uint32_t *nvt_idx, bool *vo)
345{
346 if (nvt_blk) {
347 *nvt_blk = xive_nvt_blk(cam);
348 }
349 if (nvt_idx) {
350 *nvt_idx = xive_nvt_idx(cam);
351 }
352 if (vo) {
353 *vo = !!(cam & TM_QW1W2_VO);
354 }
355}
356
357static uint32_t xive_tctx_get_os_cam(XiveTCTX *tctx, uint8_t *nvt_blk,
358 uint32_t *nvt_idx, bool *vo)
359{
360 uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]);
361 uint32_t cam = be32_to_cpu(qw1w2);
362
363 xive_os_cam_decode(cam, nvt_blk, nvt_idx, vo);
364 return qw1w2;
365}
366
367static void xive_tctx_set_os_cam(XiveTCTX *tctx, uint32_t qw1w2)
368{
369 memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2, 4);
370}
371
372static uint64_t xive_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx,
373 hwaddr offset, unsigned size)
374{
375 uint32_t qw1w2;
376 uint32_t qw1w2_new;
377 uint8_t nvt_blk;
378 uint32_t nvt_idx;
379 bool vo;
380
381 qw1w2 = xive_tctx_get_os_cam(tctx, &nvt_blk, &nvt_idx, &vo);
382
383 if (!vo) {
384 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: pulling invalid NVT %x/%x !?\n",
385 nvt_blk, nvt_idx);
386 }
387
388
389 qw1w2_new = xive_set_field32(TM_QW1W2_VO, qw1w2, 0);
390 xive_tctx_set_os_cam(tctx, qw1w2_new);
391 return qw1w2;
392}
393
394static void xive_tctx_need_resend(XiveRouter *xrtr, XiveTCTX *tctx,
395 uint8_t nvt_blk, uint32_t nvt_idx)
396{
397 XiveNVT nvt;
398 uint8_t ipb;
399
400
401
402
403
404 if (xive_router_get_nvt(xrtr, nvt_blk, nvt_idx, &nvt)) {
405 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVT %x/%x\n",
406 nvt_blk, nvt_idx);
407 return;
408 }
409
410 ipb = xive_get_field32(NVT_W4_IPB, nvt.w4);
411
412 if (ipb) {
413
414 nvt.w4 = xive_set_field32(NVT_W4_IPB, nvt.w4, 0);
415 xive_router_write_nvt(xrtr, nvt_blk, nvt_idx, &nvt, 4);
416
417
418 xive_tctx_ipb_update(tctx, TM_QW1_OS, ipb);
419 }
420}
421
422
423
424
425static void xive_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx,
426 hwaddr offset, uint64_t value, unsigned size)
427{
428 uint32_t cam = value;
429 uint32_t qw1w2 = cpu_to_be32(cam);
430 uint8_t nvt_blk;
431 uint32_t nvt_idx;
432 bool vo;
433
434 xive_os_cam_decode(cam, &nvt_blk, &nvt_idx, &vo);
435
436
437 xive_tctx_set_os_cam(tctx, qw1w2);
438
439
440 if (vo) {
441 xive_tctx_need_resend(XIVE_ROUTER(xptr), tctx, nvt_blk, nvt_idx);
442 }
443}
444
445
446
447
448
449typedef struct XiveTmOp {
450 uint8_t page_offset;
451 uint32_t op_offset;
452 unsigned size;
453 void (*write_handler)(XivePresenter *xptr, XiveTCTX *tctx,
454 hwaddr offset,
455 uint64_t value, unsigned size);
456 uint64_t (*read_handler)(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset,
457 unsigned size);
458} XiveTmOp;
459
460static const XiveTmOp xive_tm_operations[] = {
461
462
463
464
465 { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive_tm_set_os_cppr, NULL },
466 { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 4, xive_tm_push_os_ctx, NULL },
467 { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive_tm_set_hv_cppr, NULL },
468 { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, xive_tm_vt_push, NULL },
469 { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, NULL, xive_tm_vt_poll },
470
471
472 { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, NULL, xive_tm_ack_os_reg },
473 { XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, xive_tm_set_os_pending, NULL },
474 { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 4, NULL, xive_tm_pull_os_ctx },
475 { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 8, NULL, xive_tm_pull_os_ctx },
476 { XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, NULL, xive_tm_ack_hv_reg },
477 { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, NULL, xive_tm_pull_pool_ctx },
478 { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, NULL, xive_tm_pull_pool_ctx },
479};
480
481static const XiveTmOp *xive_tm_find_op(hwaddr offset, unsigned size, bool write)
482{
483 uint8_t page_offset = (offset >> TM_SHIFT) & 0x3;
484 uint32_t op_offset = offset & 0xFFF;
485 int i;
486
487 for (i = 0; i < ARRAY_SIZE(xive_tm_operations); i++) {
488 const XiveTmOp *xto = &xive_tm_operations[i];
489
490
491 if (xto->page_offset >= page_offset &&
492 xto->op_offset == op_offset &&
493 xto->size == size &&
494 ((write && xto->write_handler) || (!write && xto->read_handler))) {
495 return xto;
496 }
497 }
498 return NULL;
499}
500
501
502
503
504void xive_tctx_tm_write(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset,
505 uint64_t value, unsigned size)
506{
507 const XiveTmOp *xto;
508
509 trace_xive_tctx_tm_write(offset, size, value);
510
511
512
513
514
515
516
517
518 if (offset & 0x800) {
519 xto = xive_tm_find_op(offset, size, true);
520 if (!xto) {
521 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA "
522 "@%"HWADDR_PRIx"\n", offset);
523 } else {
524 xto->write_handler(xptr, tctx, offset, value, size);
525 }
526 return;
527 }
528
529
530
531
532 xto = xive_tm_find_op(offset, size, true);
533 if (xto) {
534 xto->write_handler(xptr, tctx, offset, value, size);
535 return;
536 }
537
538
539
540
541 xive_tm_raw_write(tctx, offset, value, size);
542}
543
544uint64_t xive_tctx_tm_read(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset,
545 unsigned size)
546{
547 const XiveTmOp *xto;
548 uint64_t ret;
549
550
551
552
553
554
555
556
557 if (offset & 0x800) {
558 xto = xive_tm_find_op(offset, size, false);
559 if (!xto) {
560 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid read access to TIMA"
561 "@%"HWADDR_PRIx"\n", offset);
562 return -1;
563 }
564 ret = xto->read_handler(xptr, tctx, offset, size);
565 goto out;
566 }
567
568
569
570
571 xto = xive_tm_find_op(offset, size, false);
572 if (xto) {
573 ret = xto->read_handler(xptr, tctx, offset, size);
574 goto out;
575 }
576
577
578
579
580 ret = xive_tm_raw_read(tctx, offset, size);
581out:
582 trace_xive_tctx_tm_read(offset, size, ret);
583 return ret;
584}
585
586static char *xive_tctx_ring_print(uint8_t *ring)
587{
588 uint32_t w2 = xive_tctx_word2(ring);
589
590 return g_strdup_printf("%02x %02x %02x %02x %02x "
591 "%02x %02x %02x %08x",
592 ring[TM_NSR], ring[TM_CPPR], ring[TM_IPB], ring[TM_LSMFB],
593 ring[TM_ACK_CNT], ring[TM_INC], ring[TM_AGE], ring[TM_PIPR],
594 be32_to_cpu(w2));
595}
596
597static const char * const xive_tctx_ring_names[] = {
598 "USER", "OS", "POOL", "PHYS",
599};
600
601
602
603
604
605#define xive_in_kernel(xptr) \
606 (kvm_irqchip_in_kernel() && \
607 ({ \
608 XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr); \
609 xpc->in_kernel ? xpc->in_kernel(xptr) : false; \
610 }))
611
612void xive_tctx_pic_print_info(XiveTCTX *tctx, Monitor *mon)
613{
614 int cpu_index;
615 int i;
616
617
618
619
620 if (!tctx) {
621 return;
622 }
623
624 cpu_index = tctx->cs ? tctx->cs->cpu_index : -1;
625
626 if (xive_in_kernel(tctx->xptr)) {
627 Error *local_err = NULL;
628
629 kvmppc_xive_cpu_synchronize_state(tctx, &local_err);
630 if (local_err) {
631 error_report_err(local_err);
632 return;
633 }
634 }
635
636 monitor_printf(mon, "CPU[%04x]: QW NSR CPPR IPB LSMFB ACK# INC AGE PIPR"
637 " W2\n", cpu_index);
638
639 for (i = 0; i < XIVE_TM_RING_COUNT; i++) {
640 char *s = xive_tctx_ring_print(&tctx->regs[i * XIVE_TM_RING_SIZE]);
641 monitor_printf(mon, "CPU[%04x]: %4s %s\n", cpu_index,
642 xive_tctx_ring_names[i], s);
643 g_free(s);
644 }
645}
646
647void xive_tctx_reset(XiveTCTX *tctx)
648{
649 memset(tctx->regs, 0, sizeof(tctx->regs));
650
651
652 tctx->regs[TM_QW1_OS + TM_LSMFB] = 0xFF;
653 tctx->regs[TM_QW1_OS + TM_ACK_CNT] = 0xFF;
654 tctx->regs[TM_QW1_OS + TM_AGE] = 0xFF;
655
656
657
658
659
660 tctx->regs[TM_QW1_OS + TM_PIPR] =
661 ipb_to_pipr(tctx->regs[TM_QW1_OS + TM_IPB]);
662 tctx->regs[TM_QW3_HV_PHYS + TM_PIPR] =
663 ipb_to_pipr(tctx->regs[TM_QW3_HV_PHYS + TM_IPB]);
664}
665
666static void xive_tctx_realize(DeviceState *dev, Error **errp)
667{
668 XiveTCTX *tctx = XIVE_TCTX(dev);
669 PowerPCCPU *cpu;
670 CPUPPCState *env;
671
672 assert(tctx->cs);
673 assert(tctx->xptr);
674
675 cpu = POWERPC_CPU(tctx->cs);
676 env = &cpu->env;
677 switch (PPC_INPUT(env)) {
678 case PPC_FLAGS_INPUT_POWER9:
679 tctx->hv_output = env->irq_inputs[POWER9_INPUT_HINT];
680 tctx->os_output = env->irq_inputs[POWER9_INPUT_INT];
681 break;
682
683 default:
684 error_setg(errp, "XIVE interrupt controller does not support "
685 "this CPU bus model");
686 return;
687 }
688
689
690 if (xive_in_kernel(tctx->xptr)) {
691 if (kvmppc_xive_cpu_connect(tctx, errp) < 0) {
692 return;
693 }
694 }
695}
696
697static int vmstate_xive_tctx_pre_save(void *opaque)
698{
699 XiveTCTX *tctx = XIVE_TCTX(opaque);
700 Error *local_err = NULL;
701 int ret;
702
703 if (xive_in_kernel(tctx->xptr)) {
704 ret = kvmppc_xive_cpu_get_state(tctx, &local_err);
705 if (ret < 0) {
706 error_report_err(local_err);
707 return ret;
708 }
709 }
710
711 return 0;
712}
713
714static int vmstate_xive_tctx_post_load(void *opaque, int version_id)
715{
716 XiveTCTX *tctx = XIVE_TCTX(opaque);
717 Error *local_err = NULL;
718 int ret;
719
720 if (xive_in_kernel(tctx->xptr)) {
721
722
723
724
725 ret = kvmppc_xive_cpu_set_state(tctx, &local_err);
726 if (ret < 0) {
727 error_report_err(local_err);
728 return ret;
729 }
730 }
731
732 return 0;
733}
734
735static const VMStateDescription vmstate_xive_tctx = {
736 .name = TYPE_XIVE_TCTX,
737 .version_id = 1,
738 .minimum_version_id = 1,
739 .pre_save = vmstate_xive_tctx_pre_save,
740 .post_load = vmstate_xive_tctx_post_load,
741 .fields = (VMStateField[]) {
742 VMSTATE_BUFFER(regs, XiveTCTX),
743 VMSTATE_END_OF_LIST()
744 },
745};
746
747static Property xive_tctx_properties[] = {
748 DEFINE_PROP_LINK("cpu", XiveTCTX, cs, TYPE_CPU, CPUState *),
749 DEFINE_PROP_LINK("presenter", XiveTCTX, xptr, TYPE_XIVE_PRESENTER,
750 XivePresenter *),
751 DEFINE_PROP_END_OF_LIST(),
752};
753
754static void xive_tctx_class_init(ObjectClass *klass, void *data)
755{
756 DeviceClass *dc = DEVICE_CLASS(klass);
757
758 dc->desc = "XIVE Interrupt Thread Context";
759 dc->realize = xive_tctx_realize;
760 dc->vmsd = &vmstate_xive_tctx;
761 device_class_set_props(dc, xive_tctx_properties);
762
763
764
765
766 dc->user_creatable = false;
767}
768
769static const TypeInfo xive_tctx_info = {
770 .name = TYPE_XIVE_TCTX,
771 .parent = TYPE_DEVICE,
772 .instance_size = sizeof(XiveTCTX),
773 .class_init = xive_tctx_class_init,
774};
775
776Object *xive_tctx_create(Object *cpu, XivePresenter *xptr, Error **errp)
777{
778 Object *obj;
779
780 obj = object_new(TYPE_XIVE_TCTX);
781 object_property_add_child(cpu, TYPE_XIVE_TCTX, obj);
782 object_unref(obj);
783 object_property_set_link(obj, "cpu", cpu, &error_abort);
784 object_property_set_link(obj, "presenter", OBJECT(xptr), &error_abort);
785 if (!qdev_realize(DEVICE(obj), NULL, errp)) {
786 object_unparent(obj);
787 return NULL;
788 }
789 return obj;
790}
791
792void xive_tctx_destroy(XiveTCTX *tctx)
793{
794 Object *obj = OBJECT(tctx);
795
796 object_unparent(obj);
797}
798
799
800
801
802
803uint8_t xive_esb_set(uint8_t *pq, uint8_t value)
804{
805 uint8_t old_pq = *pq & 0x3;
806
807 *pq &= ~0x3;
808 *pq |= value & 0x3;
809
810 return old_pq;
811}
812
813bool xive_esb_trigger(uint8_t *pq)
814{
815 uint8_t old_pq = *pq & 0x3;
816
817 switch (old_pq) {
818 case XIVE_ESB_RESET:
819 xive_esb_set(pq, XIVE_ESB_PENDING);
820 return true;
821 case XIVE_ESB_PENDING:
822 case XIVE_ESB_QUEUED:
823 xive_esb_set(pq, XIVE_ESB_QUEUED);
824 return false;
825 case XIVE_ESB_OFF:
826 xive_esb_set(pq, XIVE_ESB_OFF);
827 return false;
828 default:
829 g_assert_not_reached();
830 }
831}
832
833bool xive_esb_eoi(uint8_t *pq)
834{
835 uint8_t old_pq = *pq & 0x3;
836
837 switch (old_pq) {
838 case XIVE_ESB_RESET:
839 case XIVE_ESB_PENDING:
840 xive_esb_set(pq, XIVE_ESB_RESET);
841 return false;
842 case XIVE_ESB_QUEUED:
843 xive_esb_set(pq, XIVE_ESB_PENDING);
844 return true;
845 case XIVE_ESB_OFF:
846 xive_esb_set(pq, XIVE_ESB_OFF);
847 return false;
848 default:
849 g_assert_not_reached();
850 }
851}
852
853
854
855
856
857uint8_t xive_source_esb_get(XiveSource *xsrc, uint32_t srcno)
858{
859 assert(srcno < xsrc->nr_irqs);
860
861 return xsrc->status[srcno] & 0x3;
862}
863
864uint8_t xive_source_esb_set(XiveSource *xsrc, uint32_t srcno, uint8_t pq)
865{
866 assert(srcno < xsrc->nr_irqs);
867
868 return xive_esb_set(&xsrc->status[srcno], pq);
869}
870
871
872
873
874static bool xive_source_lsi_trigger(XiveSource *xsrc, uint32_t srcno)
875{
876 uint8_t old_pq = xive_source_esb_get(xsrc, srcno);
877
878 xive_source_set_asserted(xsrc, srcno, true);
879
880 switch (old_pq) {
881 case XIVE_ESB_RESET:
882 xive_source_esb_set(xsrc, srcno, XIVE_ESB_PENDING);
883 return true;
884 default:
885 return false;
886 }
887}
888
889
890
891
892static bool xive_source_esb_trigger(XiveSource *xsrc, uint32_t srcno)
893{
894 bool ret;
895
896 assert(srcno < xsrc->nr_irqs);
897
898 ret = xive_esb_trigger(&xsrc->status[srcno]);
899
900 if (xive_source_irq_is_lsi(xsrc, srcno) &&
901 xive_source_esb_get(xsrc, srcno) == XIVE_ESB_QUEUED) {
902 qemu_log_mask(LOG_GUEST_ERROR,
903 "XIVE: queued an event on LSI IRQ %d\n", srcno);
904 }
905
906 return ret;
907}
908
909
910
911
912static bool xive_source_esb_eoi(XiveSource *xsrc, uint32_t srcno)
913{
914 bool ret;
915
916 assert(srcno < xsrc->nr_irqs);
917
918 ret = xive_esb_eoi(&xsrc->status[srcno]);
919
920
921
922
923
924
925 if (xive_source_irq_is_lsi(xsrc, srcno) &&
926 xive_source_is_asserted(xsrc, srcno)) {
927 ret = xive_source_lsi_trigger(xsrc, srcno);
928 }
929
930 return ret;
931}
932
933
934
935
936static void xive_source_notify(XiveSource *xsrc, int srcno)
937{
938 XiveNotifierClass *xnc = XIVE_NOTIFIER_GET_CLASS(xsrc->xive);
939
940 if (xnc->notify) {
941 xnc->notify(xsrc->xive, srcno);
942 }
943}
944
945
946
947
948
949static inline bool addr_is_even(hwaddr addr, uint32_t shift)
950{
951 return !((addr >> shift) & 1);
952}
953
954static inline bool xive_source_is_trigger_page(XiveSource *xsrc, hwaddr addr)
955{
956 return xive_source_esb_has_2page(xsrc) &&
957 addr_is_even(addr, xsrc->esb_shift - 1);
958}
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974static uint64_t xive_source_esb_read(void *opaque, hwaddr addr, unsigned size)
975{
976 XiveSource *xsrc = XIVE_SOURCE(opaque);
977 uint32_t offset = addr & 0xFFF;
978 uint32_t srcno = addr >> xsrc->esb_shift;
979 uint64_t ret = -1;
980
981
982 if (xive_source_is_trigger_page(xsrc, addr)) {
983 qemu_log_mask(LOG_GUEST_ERROR,
984 "XIVE: invalid load on IRQ %d trigger page at "
985 "0x%"HWADDR_PRIx"\n", srcno, addr);
986 return -1;
987 }
988
989 switch (offset) {
990 case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF:
991 ret = xive_source_esb_eoi(xsrc, srcno);
992
993
994 if (ret) {
995 xive_source_notify(xsrc, srcno);
996 }
997 break;
998
999 case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF:
1000 ret = xive_source_esb_get(xsrc, srcno);
1001 break;
1002
1003 case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF:
1004 case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF:
1005 case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF:
1006 case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF:
1007 ret = xive_source_esb_set(xsrc, srcno, (offset >> 8) & 0x3);
1008 break;
1009 default:
1010 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB load addr %x\n",
1011 offset);
1012 }
1013
1014 trace_xive_source_esb_read(addr, srcno, ret);
1015
1016 return ret;
1017}
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033static void xive_source_esb_write(void *opaque, hwaddr addr,
1034 uint64_t value, unsigned size)
1035{
1036 XiveSource *xsrc = XIVE_SOURCE(opaque);
1037 uint32_t offset = addr & 0xFFF;
1038 uint32_t srcno = addr >> xsrc->esb_shift;
1039 bool notify = false;
1040
1041 trace_xive_source_esb_write(addr, srcno, value);
1042
1043
1044 if (xive_source_is_trigger_page(xsrc, addr)) {
1045 notify = xive_source_esb_trigger(xsrc, srcno);
1046 goto out;
1047 }
1048
1049 switch (offset) {
1050 case 0 ... 0x3FF:
1051 notify = xive_source_esb_trigger(xsrc, srcno);
1052 break;
1053
1054 case XIVE_ESB_STORE_EOI ... XIVE_ESB_STORE_EOI + 0x3FF:
1055 if (!(xsrc->esb_flags & XIVE_SRC_STORE_EOI)) {
1056 qemu_log_mask(LOG_GUEST_ERROR,
1057 "XIVE: invalid Store EOI for IRQ %d\n", srcno);
1058 return;
1059 }
1060
1061 notify = xive_source_esb_eoi(xsrc, srcno);
1062 break;
1063
1064 case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF:
1065 case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF:
1066 case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF:
1067 case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF:
1068 xive_source_esb_set(xsrc, srcno, (offset >> 8) & 0x3);
1069 break;
1070
1071 default:
1072 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB write addr %x\n",
1073 offset);
1074 return;
1075 }
1076
1077out:
1078
1079 if (notify) {
1080 xive_source_notify(xsrc, srcno);
1081 }
1082}
1083
1084static const MemoryRegionOps xive_source_esb_ops = {
1085 .read = xive_source_esb_read,
1086 .write = xive_source_esb_write,
1087 .endianness = DEVICE_BIG_ENDIAN,
1088 .valid = {
1089 .min_access_size = 8,
1090 .max_access_size = 8,
1091 },
1092 .impl = {
1093 .min_access_size = 8,
1094 .max_access_size = 8,
1095 },
1096};
1097
1098void xive_source_set_irq(void *opaque, int srcno, int val)
1099{
1100 XiveSource *xsrc = XIVE_SOURCE(opaque);
1101 bool notify = false;
1102
1103 if (xive_source_irq_is_lsi(xsrc, srcno)) {
1104 if (val) {
1105 notify = xive_source_lsi_trigger(xsrc, srcno);
1106 } else {
1107 xive_source_set_asserted(xsrc, srcno, false);
1108 }
1109 } else {
1110 if (val) {
1111 notify = xive_source_esb_trigger(xsrc, srcno);
1112 }
1113 }
1114
1115
1116 if (notify) {
1117 xive_source_notify(xsrc, srcno);
1118 }
1119}
1120
1121void xive_source_pic_print_info(XiveSource *xsrc, uint32_t offset, Monitor *mon)
1122{
1123 int i;
1124
1125 for (i = 0; i < xsrc->nr_irqs; i++) {
1126 uint8_t pq = xive_source_esb_get(xsrc, i);
1127
1128 if (pq == XIVE_ESB_OFF) {
1129 continue;
1130 }
1131
1132 monitor_printf(mon, " %08x %s %c%c%c\n", i + offset,
1133 xive_source_irq_is_lsi(xsrc, i) ? "LSI" : "MSI",
1134 pq & XIVE_ESB_VAL_P ? 'P' : '-',
1135 pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
1136 xive_source_is_asserted(xsrc, i) ? 'A' : ' ');
1137 }
1138}
1139
1140static void xive_source_reset(void *dev)
1141{
1142 XiveSource *xsrc = XIVE_SOURCE(dev);
1143
1144
1145
1146
1147 memset(xsrc->status, XIVE_ESB_OFF, xsrc->nr_irqs);
1148}
1149
1150static void xive_source_realize(DeviceState *dev, Error **errp)
1151{
1152 XiveSource *xsrc = XIVE_SOURCE(dev);
1153 size_t esb_len = xive_source_esb_len(xsrc);
1154
1155 assert(xsrc->xive);
1156
1157 if (!xsrc->nr_irqs) {
1158 error_setg(errp, "Number of interrupt needs to be greater than 0");
1159 return;
1160 }
1161
1162 if (xsrc->esb_shift != XIVE_ESB_4K &&
1163 xsrc->esb_shift != XIVE_ESB_4K_2PAGE &&
1164 xsrc->esb_shift != XIVE_ESB_64K &&
1165 xsrc->esb_shift != XIVE_ESB_64K_2PAGE) {
1166 error_setg(errp, "Invalid ESB shift setting");
1167 return;
1168 }
1169
1170 xsrc->status = g_malloc0(xsrc->nr_irqs);
1171 xsrc->lsi_map = bitmap_new(xsrc->nr_irqs);
1172
1173 memory_region_init(&xsrc->esb_mmio, OBJECT(xsrc), "xive.esb", esb_len);
1174 memory_region_init_io(&xsrc->esb_mmio_emulated, OBJECT(xsrc),
1175 &xive_source_esb_ops, xsrc, "xive.esb-emulated",
1176 esb_len);
1177 memory_region_add_subregion(&xsrc->esb_mmio, 0, &xsrc->esb_mmio_emulated);
1178
1179 qemu_register_reset(xive_source_reset, dev);
1180}
1181
1182static const VMStateDescription vmstate_xive_source = {
1183 .name = TYPE_XIVE_SOURCE,
1184 .version_id = 1,
1185 .minimum_version_id = 1,
1186 .fields = (VMStateField[]) {
1187 VMSTATE_UINT32_EQUAL(nr_irqs, XiveSource, NULL),
1188 VMSTATE_VBUFFER_UINT32(status, XiveSource, 1, NULL, nr_irqs),
1189 VMSTATE_END_OF_LIST()
1190 },
1191};
1192
1193
1194
1195
1196
1197static Property xive_source_properties[] = {
1198 DEFINE_PROP_UINT64("flags", XiveSource, esb_flags, 0),
1199 DEFINE_PROP_UINT32("nr-irqs", XiveSource, nr_irqs, 0),
1200 DEFINE_PROP_UINT32("shift", XiveSource, esb_shift, XIVE_ESB_64K_2PAGE),
1201 DEFINE_PROP_LINK("xive", XiveSource, xive, TYPE_XIVE_NOTIFIER,
1202 XiveNotifier *),
1203 DEFINE_PROP_END_OF_LIST(),
1204};
1205
1206static void xive_source_class_init(ObjectClass *klass, void *data)
1207{
1208 DeviceClass *dc = DEVICE_CLASS(klass);
1209
1210 dc->desc = "XIVE Interrupt Source";
1211 device_class_set_props(dc, xive_source_properties);
1212 dc->realize = xive_source_realize;
1213 dc->vmsd = &vmstate_xive_source;
1214
1215
1216
1217
1218 dc->user_creatable = false;
1219}
1220
1221static const TypeInfo xive_source_info = {
1222 .name = TYPE_XIVE_SOURCE,
1223 .parent = TYPE_DEVICE,
1224 .instance_size = sizeof(XiveSource),
1225 .class_init = xive_source_class_init,
1226};
1227
1228
1229
1230
1231
1232void xive_end_queue_pic_print_info(XiveEND *end, uint32_t width, Monitor *mon)
1233{
1234 uint64_t qaddr_base = xive_end_qaddr(end);
1235 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
1236 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
1237 uint32_t qentries = 1 << (qsize + 10);
1238 int i;
1239
1240
1241
1242
1243 monitor_printf(mon, " [ ");
1244 qindex = (qindex - (width - 1)) & (qentries - 1);
1245 for (i = 0; i < width; i++) {
1246 uint64_t qaddr = qaddr_base + (qindex << 2);
1247 uint32_t qdata = -1;
1248
1249 if (dma_memory_read(&address_space_memory, qaddr, &qdata,
1250 sizeof(qdata))) {
1251 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to read EQ @0x%"
1252 HWADDR_PRIx "\n", qaddr);
1253 return;
1254 }
1255 monitor_printf(mon, "%s%08x ", i == width - 1 ? "^" : "",
1256 be32_to_cpu(qdata));
1257 qindex = (qindex + 1) & (qentries - 1);
1258 }
1259 monitor_printf(mon, "]");
1260}
1261
1262void xive_end_pic_print_info(XiveEND *end, uint32_t end_idx, Monitor *mon)
1263{
1264 uint64_t qaddr_base = xive_end_qaddr(end);
1265 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
1266 uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1);
1267 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
1268 uint32_t qentries = 1 << (qsize + 10);
1269
1270 uint32_t nvt_blk = xive_get_field32(END_W6_NVT_BLOCK, end->w6);
1271 uint32_t nvt_idx = xive_get_field32(END_W6_NVT_INDEX, end->w6);
1272 uint8_t priority = xive_get_field32(END_W7_F0_PRIORITY, end->w7);
1273 uint8_t pq;
1274
1275 if (!xive_end_is_valid(end)) {
1276 return;
1277 }
1278
1279 pq = xive_get_field32(END_W1_ESn, end->w1);
1280
1281 monitor_printf(mon, " %08x %c%c %c%c%c%c%c%c%c%c prio:%d nvt:%02x/%04x",
1282 end_idx,
1283 pq & XIVE_ESB_VAL_P ? 'P' : '-',
1284 pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
1285 xive_end_is_valid(end) ? 'v' : '-',
1286 xive_end_is_enqueue(end) ? 'q' : '-',
1287 xive_end_is_notify(end) ? 'n' : '-',
1288 xive_end_is_backlog(end) ? 'b' : '-',
1289 xive_end_is_escalate(end) ? 'e' : '-',
1290 xive_end_is_uncond_escalation(end) ? 'u' : '-',
1291 xive_end_is_silent_escalation(end) ? 's' : '-',
1292 xive_end_is_firmware(end) ? 'f' : '-',
1293 priority, nvt_blk, nvt_idx);
1294
1295 if (qaddr_base) {
1296 monitor_printf(mon, " eq:@%08"PRIx64"% 6d/%5d ^%d",
1297 qaddr_base, qindex, qentries, qgen);
1298 xive_end_queue_pic_print_info(end, 6, mon);
1299 }
1300 monitor_printf(mon, "\n");
1301}
1302
1303static void xive_end_enqueue(XiveEND *end, uint32_t data)
1304{
1305 uint64_t qaddr_base = xive_end_qaddr(end);
1306 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
1307 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
1308 uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1);
1309
1310 uint64_t qaddr = qaddr_base + (qindex << 2);
1311 uint32_t qdata = cpu_to_be32((qgen << 31) | (data & 0x7fffffff));
1312 uint32_t qentries = 1 << (qsize + 10);
1313
1314 if (dma_memory_write(&address_space_memory, qaddr, &qdata, sizeof(qdata))) {
1315 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to write END data @0x%"
1316 HWADDR_PRIx "\n", qaddr);
1317 return;
1318 }
1319
1320 qindex = (qindex + 1) & (qentries - 1);
1321 if (qindex == 0) {
1322 qgen ^= 1;
1323 end->w1 = xive_set_field32(END_W1_GENERATION, end->w1, qgen);
1324 }
1325 end->w1 = xive_set_field32(END_W1_PAGE_OFF, end->w1, qindex);
1326}
1327
1328void xive_end_eas_pic_print_info(XiveEND *end, uint32_t end_idx,
1329 Monitor *mon)
1330{
1331 XiveEAS *eas = (XiveEAS *) &end->w4;
1332 uint8_t pq;
1333
1334 if (!xive_end_is_escalate(end)) {
1335 return;
1336 }
1337
1338 pq = xive_get_field32(END_W1_ESe, end->w1);
1339
1340 monitor_printf(mon, " %08x %c%c %c%c end:%02x/%04x data:%08x\n",
1341 end_idx,
1342 pq & XIVE_ESB_VAL_P ? 'P' : '-',
1343 pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
1344 xive_eas_is_valid(eas) ? 'V' : ' ',
1345 xive_eas_is_masked(eas) ? 'M' : ' ',
1346 (uint8_t) xive_get_field64(EAS_END_BLOCK, eas->w),
1347 (uint32_t) xive_get_field64(EAS_END_INDEX, eas->w),
1348 (uint32_t) xive_get_field64(EAS_END_DATA, eas->w));
1349}
1350
1351
1352
1353
1354
1355int xive_router_get_eas(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx,
1356 XiveEAS *eas)
1357{
1358 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1359
1360 return xrc->get_eas(xrtr, eas_blk, eas_idx, eas);
1361}
1362
1363int xive_router_get_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx,
1364 XiveEND *end)
1365{
1366 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1367
1368 return xrc->get_end(xrtr, end_blk, end_idx, end);
1369}
1370
1371int xive_router_write_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx,
1372 XiveEND *end, uint8_t word_number)
1373{
1374 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1375
1376 return xrc->write_end(xrtr, end_blk, end_idx, end, word_number);
1377}
1378
1379int xive_router_get_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx,
1380 XiveNVT *nvt)
1381{
1382 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1383
1384 return xrc->get_nvt(xrtr, nvt_blk, nvt_idx, nvt);
1385}
1386
1387int xive_router_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx,
1388 XiveNVT *nvt, uint8_t word_number)
1389{
1390 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1391
1392 return xrc->write_nvt(xrtr, nvt_blk, nvt_idx, nvt, word_number);
1393}
1394
1395static int xive_router_get_block_id(XiveRouter *xrtr)
1396{
1397 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1398
1399 return xrc->get_block_id(xrtr);
1400}
1401
1402static void xive_router_realize(DeviceState *dev, Error **errp)
1403{
1404 XiveRouter *xrtr = XIVE_ROUTER(dev);
1405
1406 assert(xrtr->xfb);
1407}
1408
1409
1410
1411
1412
1413
1414static uint32_t xive_tctx_hw_cam_line(XivePresenter *xptr, XiveTCTX *tctx)
1415{
1416 CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env;
1417 uint32_t pir = env->spr_cb[SPR_PIR].default_value;
1418 uint8_t blk = xive_router_get_block_id(XIVE_ROUTER(xptr));
1419
1420 return xive_nvt_cam_line(blk, 1 << 7 | (pir & 0x7f));
1421}
1422
1423
1424
1425
1426int xive_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx,
1427 uint8_t format,
1428 uint8_t nvt_blk, uint32_t nvt_idx,
1429 bool cam_ignore, uint32_t logic_serv)
1430{
1431 uint32_t cam = xive_nvt_cam_line(nvt_blk, nvt_idx);
1432 uint32_t qw3w2 = xive_tctx_word2(&tctx->regs[TM_QW3_HV_PHYS]);
1433 uint32_t qw2w2 = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]);
1434 uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]);
1435 uint32_t qw0w2 = xive_tctx_word2(&tctx->regs[TM_QW0_USER]);
1436
1437
1438
1439
1440
1441
1442 if (format == 0) {
1443 if (cam_ignore == true) {
1444
1445
1446
1447
1448 qemu_log_mask(LOG_UNIMP, "XIVE: no support for LS NVT %x/%x\n",
1449 nvt_blk, nvt_idx);
1450 return -1;
1451 }
1452
1453
1454
1455
1456 if ((be32_to_cpu(qw3w2) & TM_QW3W2_VT) &&
1457 cam == xive_tctx_hw_cam_line(xptr, tctx)) {
1458 return TM_QW3_HV_PHYS;
1459 }
1460
1461
1462 if ((be32_to_cpu(qw2w2) & TM_QW2W2_VP) &&
1463 cam == xive_get_field32(TM_QW2W2_POOL_CAM, qw2w2)) {
1464 return TM_QW2_HV_POOL;
1465 }
1466
1467
1468 if ((be32_to_cpu(qw1w2) & TM_QW1W2_VO) &&
1469 cam == xive_get_field32(TM_QW1W2_OS_CAM, qw1w2)) {
1470 return TM_QW1_OS;
1471 }
1472 } else {
1473
1474
1475
1476 if ((be32_to_cpu(qw1w2) & TM_QW1W2_VO) &&
1477 (cam == xive_get_field32(TM_QW1W2_OS_CAM, qw1w2)) &&
1478 (be32_to_cpu(qw0w2) & TM_QW0W2_VU) &&
1479 (logic_serv == xive_get_field32(TM_QW0W2_LOGIC_SERV, qw0w2))) {
1480 return TM_QW0_USER;
1481 }
1482 }
1483 return -1;
1484}
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501bool xive_presenter_notify(XiveFabric *xfb, uint8_t format,
1502 uint8_t nvt_blk, uint32_t nvt_idx,
1503 bool cam_ignore, uint8_t priority,
1504 uint32_t logic_serv)
1505{
1506 XiveFabricClass *xfc = XIVE_FABRIC_GET_CLASS(xfb);
1507 XiveTCTXMatch match = { .tctx = NULL, .ring = 0 };
1508 int count;
1509
1510
1511
1512
1513 count = xfc->match_nvt(xfb, format, nvt_blk, nvt_idx, cam_ignore,
1514 priority, logic_serv, &match);
1515 if (count < 0) {
1516 return false;
1517 }
1518
1519
1520 if (count) {
1521 trace_xive_presenter_notify(nvt_blk, nvt_idx, match.ring);
1522 xive_tctx_ipb_update(match.tctx, match.ring,
1523 xive_priority_to_ipb(priority));
1524 }
1525
1526 return !!count;
1527}
1528
1529
1530
1531
1532
1533
1534static bool xive_router_end_es_notify(XiveRouter *xrtr, uint8_t end_blk,
1535 uint32_t end_idx, XiveEND *end,
1536 uint32_t end_esmask)
1537{
1538 uint8_t pq = xive_get_field32(end_esmask, end->w1);
1539 bool notify = xive_esb_trigger(&pq);
1540
1541 if (pq != xive_get_field32(end_esmask, end->w1)) {
1542 end->w1 = xive_set_field32(end_esmask, end->w1, pq);
1543 xive_router_write_end(xrtr, end_blk, end_idx, end, 1);
1544 }
1545
1546
1547 return notify;
1548}
1549
1550
1551
1552
1553
1554
1555static void xive_router_end_notify(XiveRouter *xrtr, uint8_t end_blk,
1556 uint32_t end_idx, uint32_t end_data)
1557{
1558 XiveEND end;
1559 uint8_t priority;
1560 uint8_t format;
1561 uint8_t nvt_blk;
1562 uint32_t nvt_idx;
1563 XiveNVT nvt;
1564 bool found;
1565
1566
1567 if (xive_router_get_end(xrtr, end_blk, end_idx, &end)) {
1568 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk,
1569 end_idx);
1570 return;
1571 }
1572
1573 if (!xive_end_is_valid(&end)) {
1574 trace_xive_router_end_notify(end_blk, end_idx, end_data);
1575 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n",
1576 end_blk, end_idx);
1577 return;
1578 }
1579
1580 if (xive_end_is_enqueue(&end)) {
1581 xive_end_enqueue(&end, end_data);
1582
1583 xive_router_write_end(xrtr, end_blk, end_idx, &end, 1);
1584 }
1585
1586
1587
1588
1589 if (xive_end_is_silent_escalation(&end)) {
1590 goto do_escalation;
1591 }
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601 format = xive_get_field32(END_W6_FORMAT_BIT, end.w6);
1602 priority = xive_get_field32(END_W7_F0_PRIORITY, end.w7);
1603
1604
1605 if (format == 0 && priority == 0xff) {
1606 return;
1607 }
1608
1609
1610
1611
1612
1613 if (!xive_end_is_notify(&end)) {
1614
1615 if (!xive_router_end_es_notify(xrtr, end_blk, end_idx,
1616 &end, END_W1_ESn)) {
1617 return;
1618 }
1619 }
1620
1621
1622
1623
1624 nvt_blk = xive_get_field32(END_W6_NVT_BLOCK, end.w6);
1625 nvt_idx = xive_get_field32(END_W6_NVT_INDEX, end.w6);
1626
1627
1628 if (xive_router_get_nvt(xrtr, nvt_blk, nvt_idx, &nvt)) {
1629 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no NVT %x/%x\n",
1630 nvt_blk, nvt_idx);
1631 return;
1632 }
1633
1634 if (!xive_nvt_is_valid(&nvt)) {
1635 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVT %x/%x is invalid\n",
1636 nvt_blk, nvt_idx);
1637 return;
1638 }
1639
1640 found = xive_presenter_notify(xrtr->xfb, format, nvt_blk, nvt_idx,
1641 xive_get_field32(END_W7_F0_IGNORE, end.w7),
1642 priority,
1643 xive_get_field32(END_W7_F1_LOG_SERVER_ID, end.w7));
1644
1645
1646
1647 if (found) {
1648 return;
1649 }
1650
1651
1652
1653
1654
1655
1656 if (xive_end_is_backlog(&end)) {
1657 uint8_t ipb;
1658
1659 if (format == 1) {
1660 qemu_log_mask(LOG_GUEST_ERROR,
1661 "XIVE: END %x/%x invalid config: F1 & backlog\n",
1662 end_blk, end_idx);
1663 return;
1664 }
1665
1666
1667
1668
1669
1670 ipb = xive_get_field32(NVT_W4_IPB, nvt.w4) |
1671 xive_priority_to_ipb(priority);
1672 nvt.w4 = xive_set_field32(NVT_W4_IPB, nvt.w4, ipb);
1673 xive_router_write_nvt(xrtr, nvt_blk, nvt_idx, &nvt, 4);
1674
1675
1676
1677
1678 }
1679
1680do_escalation:
1681
1682
1683
1684
1685 if (!xive_end_is_escalate(&end)) {
1686 return;
1687 }
1688
1689
1690
1691
1692
1693 if (!xive_end_is_uncond_escalation(&end)) {
1694
1695 if (!xive_router_end_es_notify(xrtr, end_blk, end_idx,
1696 &end, END_W1_ESe)) {
1697 return;
1698 }
1699 }
1700
1701 trace_xive_router_end_escalate(end_blk, end_idx,
1702 (uint8_t) xive_get_field32(END_W4_ESC_END_BLOCK, end.w4),
1703 (uint32_t) xive_get_field32(END_W4_ESC_END_INDEX, end.w4),
1704 (uint32_t) xive_get_field32(END_W5_ESC_END_DATA, end.w5));
1705
1706
1707
1708 xive_router_end_notify(xrtr,
1709 xive_get_field32(END_W4_ESC_END_BLOCK, end.w4),
1710 xive_get_field32(END_W4_ESC_END_INDEX, end.w4),
1711 xive_get_field32(END_W5_ESC_END_DATA, end.w5));
1712}
1713
1714void xive_router_notify(XiveNotifier *xn, uint32_t lisn)
1715{
1716 XiveRouter *xrtr = XIVE_ROUTER(xn);
1717 uint8_t eas_blk = XIVE_EAS_BLOCK(lisn);
1718 uint32_t eas_idx = XIVE_EAS_INDEX(lisn);
1719 XiveEAS eas;
1720
1721
1722 if (xive_router_get_eas(xrtr, eas_blk, eas_idx, &eas)) {
1723 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN %x\n", lisn);
1724 return;
1725 }
1726
1727
1728
1729
1730
1731
1732
1733 if (!xive_eas_is_valid(&eas)) {
1734 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid LISN %x\n", lisn);
1735 return;
1736 }
1737
1738 if (xive_eas_is_masked(&eas)) {
1739
1740 return;
1741 }
1742
1743
1744
1745
1746 xive_router_end_notify(xrtr,
1747 xive_get_field64(EAS_END_BLOCK, eas.w),
1748 xive_get_field64(EAS_END_INDEX, eas.w),
1749 xive_get_field64(EAS_END_DATA, eas.w));
1750}
1751
1752static Property xive_router_properties[] = {
1753 DEFINE_PROP_LINK("xive-fabric", XiveRouter, xfb,
1754 TYPE_XIVE_FABRIC, XiveFabric *),
1755 DEFINE_PROP_END_OF_LIST(),
1756};
1757
1758static void xive_router_class_init(ObjectClass *klass, void *data)
1759{
1760 DeviceClass *dc = DEVICE_CLASS(klass);
1761 XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
1762
1763 dc->desc = "XIVE Router Engine";
1764 device_class_set_props(dc, xive_router_properties);
1765
1766 dc->realize = xive_router_realize;
1767 xnc->notify = xive_router_notify;
1768}
1769
1770static const TypeInfo xive_router_info = {
1771 .name = TYPE_XIVE_ROUTER,
1772 .parent = TYPE_SYS_BUS_DEVICE,
1773 .abstract = true,
1774 .instance_size = sizeof(XiveRouter),
1775 .class_size = sizeof(XiveRouterClass),
1776 .class_init = xive_router_class_init,
1777 .interfaces = (InterfaceInfo[]) {
1778 { TYPE_XIVE_NOTIFIER },
1779 { TYPE_XIVE_PRESENTER },
1780 { }
1781 }
1782};
1783
1784void xive_eas_pic_print_info(XiveEAS *eas, uint32_t lisn, Monitor *mon)
1785{
1786 if (!xive_eas_is_valid(eas)) {
1787 return;
1788 }
1789
1790 monitor_printf(mon, " %08x %s end:%02x/%04x data:%08x\n",
1791 lisn, xive_eas_is_masked(eas) ? "M" : " ",
1792 (uint8_t) xive_get_field64(EAS_END_BLOCK, eas->w),
1793 (uint32_t) xive_get_field64(EAS_END_INDEX, eas->w),
1794 (uint32_t) xive_get_field64(EAS_END_DATA, eas->w));
1795}
1796
1797
1798
1799
1800static uint64_t xive_end_source_read(void *opaque, hwaddr addr, unsigned size)
1801{
1802 XiveENDSource *xsrc = XIVE_END_SOURCE(opaque);
1803 uint32_t offset = addr & 0xFFF;
1804 uint8_t end_blk;
1805 uint32_t end_idx;
1806 XiveEND end;
1807 uint32_t end_esmask;
1808 uint8_t pq;
1809 uint64_t ret = -1;
1810
1811
1812
1813
1814
1815 end_blk = xive_router_get_block_id(xsrc->xrtr);
1816 end_idx = addr >> (xsrc->esb_shift + 1);
1817
1818 trace_xive_end_source_read(end_blk, end_idx, addr);
1819
1820 if (xive_router_get_end(xsrc->xrtr, end_blk, end_idx, &end)) {
1821 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk,
1822 end_idx);
1823 return -1;
1824 }
1825
1826 if (!xive_end_is_valid(&end)) {
1827 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n",
1828 end_blk, end_idx);
1829 return -1;
1830 }
1831
1832 end_esmask = addr_is_even(addr, xsrc->esb_shift) ? END_W1_ESn : END_W1_ESe;
1833 pq = xive_get_field32(end_esmask, end.w1);
1834
1835 switch (offset) {
1836 case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF:
1837 ret = xive_esb_eoi(&pq);
1838
1839
1840 break;
1841
1842 case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF:
1843 ret = pq;
1844 break;
1845
1846 case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF:
1847 case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF:
1848 case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF:
1849 case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF:
1850 ret = xive_esb_set(&pq, (offset >> 8) & 0x3);
1851 break;
1852 default:
1853 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid END ESB load addr %d\n",
1854 offset);
1855 return -1;
1856 }
1857
1858 if (pq != xive_get_field32(end_esmask, end.w1)) {
1859 end.w1 = xive_set_field32(end_esmask, end.w1, pq);
1860 xive_router_write_end(xsrc->xrtr, end_blk, end_idx, &end, 1);
1861 }
1862
1863 return ret;
1864}
1865
1866
1867
1868
1869static void xive_end_source_write(void *opaque, hwaddr addr,
1870 uint64_t value, unsigned size)
1871{
1872 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB write addr 0x%"
1873 HWADDR_PRIx"\n", addr);
1874}
1875
1876static const MemoryRegionOps xive_end_source_ops = {
1877 .read = xive_end_source_read,
1878 .write = xive_end_source_write,
1879 .endianness = DEVICE_BIG_ENDIAN,
1880 .valid = {
1881 .min_access_size = 8,
1882 .max_access_size = 8,
1883 },
1884 .impl = {
1885 .min_access_size = 8,
1886 .max_access_size = 8,
1887 },
1888};
1889
1890static void xive_end_source_realize(DeviceState *dev, Error **errp)
1891{
1892 XiveENDSource *xsrc = XIVE_END_SOURCE(dev);
1893
1894 assert(xsrc->xrtr);
1895
1896 if (!xsrc->nr_ends) {
1897 error_setg(errp, "Number of interrupt needs to be greater than 0");
1898 return;
1899 }
1900
1901 if (xsrc->esb_shift != XIVE_ESB_4K &&
1902 xsrc->esb_shift != XIVE_ESB_64K) {
1903 error_setg(errp, "Invalid ESB shift setting");
1904 return;
1905 }
1906
1907
1908
1909
1910
1911 memory_region_init_io(&xsrc->esb_mmio, OBJECT(xsrc),
1912 &xive_end_source_ops, xsrc, "xive.end",
1913 (1ull << (xsrc->esb_shift + 1)) * xsrc->nr_ends);
1914}
1915
1916static Property xive_end_source_properties[] = {
1917 DEFINE_PROP_UINT32("nr-ends", XiveENDSource, nr_ends, 0),
1918 DEFINE_PROP_UINT32("shift", XiveENDSource, esb_shift, XIVE_ESB_64K),
1919 DEFINE_PROP_LINK("xive", XiveENDSource, xrtr, TYPE_XIVE_ROUTER,
1920 XiveRouter *),
1921 DEFINE_PROP_END_OF_LIST(),
1922};
1923
1924static void xive_end_source_class_init(ObjectClass *klass, void *data)
1925{
1926 DeviceClass *dc = DEVICE_CLASS(klass);
1927
1928 dc->desc = "XIVE END Source";
1929 device_class_set_props(dc, xive_end_source_properties);
1930 dc->realize = xive_end_source_realize;
1931
1932
1933
1934
1935 dc->user_creatable = false;
1936}
1937
1938static const TypeInfo xive_end_source_info = {
1939 .name = TYPE_XIVE_END_SOURCE,
1940 .parent = TYPE_DEVICE,
1941 .instance_size = sizeof(XiveENDSource),
1942 .class_init = xive_end_source_class_init,
1943};
1944
1945
1946
1947
1948static const TypeInfo xive_notifier_info = {
1949 .name = TYPE_XIVE_NOTIFIER,
1950 .parent = TYPE_INTERFACE,
1951 .class_size = sizeof(XiveNotifierClass),
1952};
1953
1954
1955
1956
1957static const TypeInfo xive_presenter_info = {
1958 .name = TYPE_XIVE_PRESENTER,
1959 .parent = TYPE_INTERFACE,
1960 .class_size = sizeof(XivePresenterClass),
1961};
1962
1963
1964
1965
1966static const TypeInfo xive_fabric_info = {
1967 .name = TYPE_XIVE_FABRIC,
1968 .parent = TYPE_INTERFACE,
1969 .class_size = sizeof(XiveFabricClass),
1970};
1971
1972static void xive_register_types(void)
1973{
1974 type_register_static(&xive_fabric_info);
1975 type_register_static(&xive_source_info);
1976 type_register_static(&xive_notifier_info);
1977 type_register_static(&xive_presenter_info);
1978 type_register_static(&xive_router_info);
1979 type_register_static(&xive_end_source_info);
1980 type_register_static(&xive_tctx_info);
1981}
1982
1983type_init(xive_register_types)
1984