1
2
3
4
5
6
7
8
9
10#include "qemu/osdep.h"
11#include "qemu/log.h"
12#include "qemu/module.h"
13#include "qapi/error.h"
14#include "target/ppc/cpu.h"
15#include "sysemu/cpus.h"
16#include "sysemu/dma.h"
17#include "sysemu/reset.h"
18#include "monitor/monitor.h"
19#include "hw/ppc/fdt.h"
20#include "hw/ppc/pnv.h"
21#include "hw/ppc/pnv_core.h"
22#include "hw/ppc/pnv_xscom.h"
23#include "hw/ppc/pnv_xive.h"
24#include "hw/ppc/xive_regs.h"
25#include "hw/qdev-properties.h"
26#include "hw/ppc/ppc.h"
27#include "trace.h"
28
29#include <libfdt.h>
30
31#include "pnv_xive_regs.h"
32
33#undef XIVE_DEBUG
34
35
36
37
38#define SBE_PER_BYTE 4
39
40typedef struct XiveVstInfo {
41 const char *name;
42 uint32_t size;
43 uint32_t max_blocks;
44} XiveVstInfo;
45
46static const XiveVstInfo vst_infos[] = {
47 [VST_TSEL_IVT] = { "EAT", sizeof(XiveEAS), 16 },
48 [VST_TSEL_SBE] = { "SBE", 1, 16 },
49 [VST_TSEL_EQDT] = { "ENDT", sizeof(XiveEND), 16 },
50 [VST_TSEL_VPDT] = { "VPDT", sizeof(XiveNVT), 32 },
51
52
53
54
55
56
57
58
59
60
61
62 [VST_TSEL_IRQ] = { "IRQ", 1, 6 },
63};
64
65#define xive_error(xive, fmt, ...) \
66 qemu_log_mask(LOG_GUEST_ERROR, "XIVE[%x] - " fmt "\n", \
67 (xive)->chip->chip_id, ## __VA_ARGS__);
68
69
70
71
72
73
74static uint8_t pnv_xive_block_id(PnvXive *xive)
75{
76 uint8_t blk = xive->chip->chip_id;
77 uint64_t cfg_val = xive->regs[PC_TCTXT_CFG >> 3];
78
79 if (cfg_val & PC_TCTXT_CHIPID_OVERRIDE) {
80 blk = GETFIELD(PC_TCTXT_CHIPID, cfg_val);
81 }
82
83 return blk;
84}
85
86
87
88
89
90
91
92static PnvXive *pnv_xive_get_remote(uint8_t blk)
93{
94 PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine());
95 int i;
96
97 for (i = 0; i < pnv->num_chips; i++) {
98 Pnv9Chip *chip9 = PNV9_CHIP(pnv->chips[i]);
99 PnvXive *xive = &chip9->xive;
100
101 if (pnv_xive_block_id(xive) == blk) {
102 return xive;
103 }
104 }
105 return NULL;
106}
107
108
109
110
111
112
113
114
115#define XIVE_VSD_SIZE 8
116
117
118static uint64_t pnv_xive_vst_page_size_allowed(uint32_t page_shift)
119{
120 return page_shift == 12 || page_shift == 16 ||
121 page_shift == 21 || page_shift == 24;
122}
123
124static uint64_t pnv_xive_vst_addr_direct(PnvXive *xive, uint32_t type,
125 uint64_t vsd, uint32_t idx)
126{
127 const XiveVstInfo *info = &vst_infos[type];
128 uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
129 uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
130 uint32_t idx_max;
131
132 idx_max = vst_tsize / info->size - 1;
133 if (idx > idx_max) {
134#ifdef XIVE_DEBUG
135 xive_error(xive, "VST: %s entry %x out of range [ 0 .. %x ] !?",
136 info->name, idx, idx_max);
137#endif
138 return 0;
139 }
140
141 return vst_addr + idx * info->size;
142}
143
144static uint64_t pnv_xive_vst_addr_indirect(PnvXive *xive, uint32_t type,
145 uint64_t vsd, uint32_t idx)
146{
147 const XiveVstInfo *info = &vst_infos[type];
148 uint64_t vsd_addr;
149 uint32_t vsd_idx;
150 uint32_t page_shift;
151 uint32_t vst_per_page;
152
153
154 vsd_addr = vsd & VSD_ADDRESS_MASK;
155 if (ldq_be_dma(&address_space_memory, vsd_addr, &vsd,
156 MEMTXATTRS_UNSPECIFIED)) {
157 xive_error(xive, "VST: failed to access %s entry %x @0x%" PRIx64,
158 info->name, idx, vsd_addr);
159 return 0;
160 }
161
162 if (!(vsd & VSD_ADDRESS_MASK)) {
163#ifdef XIVE_DEBUG
164 xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
165#endif
166 return 0;
167 }
168
169 page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
170
171 if (!pnv_xive_vst_page_size_allowed(page_shift)) {
172 xive_error(xive, "VST: invalid %s page shift %d", info->name,
173 page_shift);
174 return 0;
175 }
176
177 vst_per_page = (1ull << page_shift) / info->size;
178 vsd_idx = idx / vst_per_page;
179
180
181 if (vsd_idx) {
182 vsd_addr = vsd_addr + vsd_idx * XIVE_VSD_SIZE;
183 if (ldq_be_dma(&address_space_memory, vsd_addr, &vsd,
184 MEMTXATTRS_UNSPECIFIED)) {
185 xive_error(xive, "VST: failed to access %s entry %x @0x%"
186 PRIx64, info->name, vsd_idx, vsd_addr);
187 return 0;
188 }
189
190 if (!(vsd & VSD_ADDRESS_MASK)) {
191#ifdef XIVE_DEBUG
192 xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
193#endif
194 return 0;
195 }
196
197
198
199
200
201 if (page_shift != GETFIELD(VSD_TSIZE, vsd) + 12) {
202 xive_error(xive, "VST: %s entry %x indirect page size differ !?",
203 info->name, idx);
204 return 0;
205 }
206 }
207
208 return pnv_xive_vst_addr_direct(xive, type, vsd, (idx % vst_per_page));
209}
210
211static uint64_t pnv_xive_vst_addr(PnvXive *xive, uint32_t type, uint8_t blk,
212 uint32_t idx)
213{
214 const XiveVstInfo *info = &vst_infos[type];
215 uint64_t vsd;
216
217 if (blk >= info->max_blocks) {
218 xive_error(xive, "VST: invalid block id %d for VST %s %d !?",
219 blk, info->name, idx);
220 return 0;
221 }
222
223 vsd = xive->vsds[type][blk];
224
225
226 if (GETFIELD(VSD_MODE, vsd) == VSD_MODE_FORWARD) {
227 xive = pnv_xive_get_remote(blk);
228
229 return xive ? pnv_xive_vst_addr(xive, type, blk, idx) : 0;
230 }
231
232 if (VSD_INDIRECT & vsd) {
233 return pnv_xive_vst_addr_indirect(xive, type, vsd, idx);
234 }
235
236 return pnv_xive_vst_addr_direct(xive, type, vsd, idx);
237}
238
239static int pnv_xive_vst_read(PnvXive *xive, uint32_t type, uint8_t blk,
240 uint32_t idx, void *data)
241{
242 const XiveVstInfo *info = &vst_infos[type];
243 uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx);
244
245 if (!addr) {
246 return -1;
247 }
248
249 cpu_physical_memory_read(addr, data, info->size);
250 return 0;
251}
252
253#define XIVE_VST_WORD_ALL -1
254
255static int pnv_xive_vst_write(PnvXive *xive, uint32_t type, uint8_t blk,
256 uint32_t idx, void *data, uint32_t word_number)
257{
258 const XiveVstInfo *info = &vst_infos[type];
259 uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx);
260
261 if (!addr) {
262 return -1;
263 }
264
265 if (word_number == XIVE_VST_WORD_ALL) {
266 cpu_physical_memory_write(addr, data, info->size);
267 } else {
268 cpu_physical_memory_write(addr + word_number * 4,
269 data + word_number * 4, 4);
270 }
271 return 0;
272}
273
274static int pnv_xive_get_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
275 XiveEND *end)
276{
277 return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end);
278}
279
280static int pnv_xive_write_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
281 XiveEND *end, uint8_t word_number)
282{
283 return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end,
284 word_number);
285}
286
287static int pnv_xive_end_update(PnvXive *xive)
288{
289 uint8_t blk = GETFIELD(VC_EQC_CWATCH_BLOCKID,
290 xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
291 uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET,
292 xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
293 int i;
294 uint64_t eqc_watch[4];
295
296 for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) {
297 eqc_watch[i] = cpu_to_be64(xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i]);
298 }
299
300 return pnv_xive_vst_write(xive, VST_TSEL_EQDT, blk, idx, eqc_watch,
301 XIVE_VST_WORD_ALL);
302}
303
304static void pnv_xive_end_cache_load(PnvXive *xive)
305{
306 uint8_t blk = GETFIELD(VC_EQC_CWATCH_BLOCKID,
307 xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
308 uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET,
309 xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
310 uint64_t eqc_watch[4] = { 0 };
311 int i;
312
313 if (pnv_xive_vst_read(xive, VST_TSEL_EQDT, blk, idx, eqc_watch)) {
314 xive_error(xive, "VST: no END entry %x/%x !?", blk, idx);
315 }
316
317 for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) {
318 xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(eqc_watch[i]);
319 }
320}
321
322static int pnv_xive_get_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
323 XiveNVT *nvt)
324{
325 return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt);
326}
327
328static int pnv_xive_write_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
329 XiveNVT *nvt, uint8_t word_number)
330{
331 return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt,
332 word_number);
333}
334
335static int pnv_xive_nvt_update(PnvXive *xive)
336{
337 uint8_t blk = GETFIELD(PC_VPC_CWATCH_BLOCKID,
338 xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
339 uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET,
340 xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
341 int i;
342 uint64_t vpc_watch[8];
343
344 for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) {
345 vpc_watch[i] = cpu_to_be64(xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i]);
346 }
347
348 return pnv_xive_vst_write(xive, VST_TSEL_VPDT, blk, idx, vpc_watch,
349 XIVE_VST_WORD_ALL);
350}
351
352static void pnv_xive_nvt_cache_load(PnvXive *xive)
353{
354 uint8_t blk = GETFIELD(PC_VPC_CWATCH_BLOCKID,
355 xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
356 uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET,
357 xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
358 uint64_t vpc_watch[8] = { 0 };
359 int i;
360
361 if (pnv_xive_vst_read(xive, VST_TSEL_VPDT, blk, idx, vpc_watch)) {
362 xive_error(xive, "VST: no NVT entry %x/%x !?", blk, idx);
363 }
364
365 for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) {
366 xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(vpc_watch[i]);
367 }
368}
369
370static int pnv_xive_get_eas(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
371 XiveEAS *eas)
372{
373 PnvXive *xive = PNV_XIVE(xrtr);
374
375
376
377
378 if (pnv_xive_block_id(xive) != blk) {
379 xive_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
380 return -1;
381 }
382
383 return pnv_xive_vst_read(xive, VST_TSEL_IVT, blk, idx, eas);
384}
385
386static int pnv_xive_get_pq(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
387 uint8_t *pq)
388{
389 PnvXive *xive = PNV_XIVE(xrtr);
390
391 if (pnv_xive_block_id(xive) != blk) {
392 xive_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
393 return -1;
394 }
395
396 *pq = xive_source_esb_get(&xive->ipi_source, idx);
397 return 0;
398}
399
400static int pnv_xive_set_pq(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
401 uint8_t *pq)
402{
403 PnvXive *xive = PNV_XIVE(xrtr);
404
405 if (pnv_xive_block_id(xive) != blk) {
406 xive_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
407 return -1;
408 }
409
410 *pq = xive_source_esb_set(&xive->ipi_source, idx, *pq);
411 return 0;
412}
413
414
415
416
417
418
419static bool pnv_xive_is_cpu_enabled(PnvXive *xive, PowerPCCPU *cpu)
420{
421 int pir = ppc_cpu_pir(cpu);
422 uint32_t fc = PNV9_PIR2FUSEDCORE(pir);
423 uint64_t reg = fc < 8 ? PC_THREAD_EN_REG0 : PC_THREAD_EN_REG1;
424 uint32_t bit = pir & 0x3f;
425
426 return xive->regs[reg >> 3] & PPC_BIT(bit);
427}
428
429static int pnv_xive_match_nvt(XivePresenter *xptr, uint8_t format,
430 uint8_t nvt_blk, uint32_t nvt_idx,
431 bool cam_ignore, uint8_t priority,
432 uint32_t logic_serv, XiveTCTXMatch *match)
433{
434 PnvXive *xive = PNV_XIVE(xptr);
435 PnvChip *chip = xive->chip;
436 int count = 0;
437 int i, j;
438
439 for (i = 0; i < chip->nr_cores; i++) {
440 PnvCore *pc = chip->cores[i];
441 CPUCore *cc = CPU_CORE(pc);
442
443 for (j = 0; j < cc->nr_threads; j++) {
444 PowerPCCPU *cpu = pc->threads[j];
445 XiveTCTX *tctx;
446 int ring;
447
448 if (!pnv_xive_is_cpu_enabled(xive, cpu)) {
449 continue;
450 }
451
452 tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
453
454
455
456
457 ring = xive_presenter_tctx_match(xptr, tctx, format, nvt_blk,
458 nvt_idx, cam_ignore, logic_serv);
459
460
461
462
463 if (ring != -1) {
464 if (match->tctx) {
465 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a "
466 "thread context NVT %x/%x\n",
467 nvt_blk, nvt_idx);
468 return -1;
469 }
470
471 match->ring = ring;
472 match->tctx = tctx;
473 count++;
474 }
475 }
476 }
477
478 return count;
479}
480
481static uint8_t pnv_xive_get_block_id(XiveRouter *xrtr)
482{
483 return pnv_xive_block_id(PNV_XIVE(xrtr));
484}
485
486
487
488
489
490
491static PnvXive *pnv_xive_tm_get_xive(PowerPCCPU *cpu)
492{
493 int pir = ppc_cpu_pir(cpu);
494 XivePresenter *xptr = XIVE_TCTX(pnv_cpu_state(cpu)->intc)->xptr;
495 PnvXive *xive = PNV_XIVE(xptr);
496
497 if (!pnv_xive_is_cpu_enabled(xive, cpu)) {
498 xive_error(xive, "IC: CPU %x is not enabled", pir);
499 }
500 return xive;
501}
502
503
504
505
506
507
508
509
510static void pnv_xive_notify(XiveNotifier *xn, uint32_t srcno, bool pq_checked)
511{
512 PnvXive *xive = PNV_XIVE(xn);
513 uint8_t blk = pnv_xive_block_id(xive);
514
515 xive_router_notify(xn, XIVE_EAS(blk, srcno), pq_checked);
516}
517
518
519
520
521
522static uint64_t pnv_xive_vc_size(PnvXive *xive)
523{
524 return (~xive->regs[CQ_VC_BARM >> 3] + 1) & CQ_VC_BARM_MASK;
525}
526
527static uint64_t pnv_xive_edt_shift(PnvXive *xive)
528{
529 return ctz64(pnv_xive_vc_size(xive) / XIVE_TABLE_EDT_MAX);
530}
531
532static uint64_t pnv_xive_pc_size(PnvXive *xive)
533{
534 return (~xive->regs[CQ_PC_BARM >> 3] + 1) & CQ_PC_BARM_MASK;
535}
536
537static uint32_t pnv_xive_nr_ipis(PnvXive *xive, uint8_t blk)
538{
539 uint64_t vsd = xive->vsds[VST_TSEL_SBE][blk];
540 uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
541
542 return VSD_INDIRECT & vsd ? 0 : vst_tsize * SBE_PER_BYTE;
543}
544
545
546
547
548static uint64_t pnv_xive_vst_per_subpage(PnvXive *xive, uint32_t type)
549{
550 uint8_t blk = pnv_xive_block_id(xive);
551 uint64_t vsd = xive->vsds[type][blk];
552 const XiveVstInfo *info = &vst_infos[type];
553 uint64_t vsd_addr;
554 uint32_t page_shift;
555
556
557 if (!(VSD_INDIRECT & vsd)) {
558 return 1;
559 }
560
561
562 vsd_addr = vsd & VSD_ADDRESS_MASK;
563 if (ldq_be_dma(&address_space_memory, vsd_addr, &vsd,
564 MEMTXATTRS_UNSPECIFIED)) {
565 xive_error(xive, "VST: failed to access %s entry @0x%" PRIx64,
566 info->name, vsd_addr);
567 return 0;
568 }
569
570 if (!(vsd & VSD_ADDRESS_MASK)) {
571#ifdef XIVE_DEBUG
572 xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
573#endif
574 return 0;
575 }
576
577 page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
578
579 if (!pnv_xive_vst_page_size_allowed(page_shift)) {
580 xive_error(xive, "VST: invalid %s page shift %d", info->name,
581 page_shift);
582 return 0;
583 }
584
585 return (1ull << page_shift) / info->size;
586}
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602static uint64_t pnv_xive_edt_size(PnvXive *xive, uint64_t type)
603{
604 uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive);
605 uint64_t size = 0;
606 int i;
607
608 for (i = 0; i < XIVE_TABLE_EDT_MAX; i++) {
609 uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]);
610
611 if (edt_type == type) {
612 size += edt_size;
613 }
614 }
615
616 return size;
617}
618
619
620
621
622
623static uint64_t pnv_xive_edt_offset(PnvXive *xive, uint64_t vc_offset,
624 uint64_t type)
625{
626 int i;
627 uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive);
628 uint64_t edt_offset = vc_offset;
629
630 for (i = 0; i < XIVE_TABLE_EDT_MAX && (i * edt_size) < vc_offset; i++) {
631 uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]);
632
633 if (edt_type != type) {
634 edt_offset -= edt_size;
635 }
636 }
637
638 return edt_offset;
639}
640
641static void pnv_xive_edt_resize(PnvXive *xive)
642{
643 uint64_t ipi_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_IPI);
644 uint64_t end_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_EQ);
645
646 memory_region_set_size(&xive->ipi_edt_mmio, ipi_edt_size);
647 memory_region_add_subregion(&xive->ipi_mmio, 0, &xive->ipi_edt_mmio);
648
649 memory_region_set_size(&xive->end_edt_mmio, end_edt_size);
650 memory_region_add_subregion(&xive->end_mmio, 0, &xive->end_edt_mmio);
651}
652
653
654
655
656static int pnv_xive_table_set_data(PnvXive *xive, uint64_t val)
657{
658 uint64_t tsel = xive->regs[CQ_TAR >> 3] & CQ_TAR_TSEL;
659 uint8_t tsel_index = GETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3]);
660 uint64_t *xive_table;
661 uint8_t max_index;
662
663 switch (tsel) {
664 case CQ_TAR_TSEL_BLK:
665 max_index = ARRAY_SIZE(xive->blk);
666 xive_table = xive->blk;
667 break;
668 case CQ_TAR_TSEL_MIG:
669 max_index = ARRAY_SIZE(xive->mig);
670 xive_table = xive->mig;
671 break;
672 case CQ_TAR_TSEL_EDT:
673 max_index = ARRAY_SIZE(xive->edt);
674 xive_table = xive->edt;
675 break;
676 case CQ_TAR_TSEL_VDT:
677 max_index = ARRAY_SIZE(xive->vdt);
678 xive_table = xive->vdt;
679 break;
680 default:
681 xive_error(xive, "IC: invalid table %d", (int) tsel);
682 return -1;
683 }
684
685 if (tsel_index >= max_index) {
686 xive_error(xive, "IC: invalid index %d", (int) tsel_index);
687 return -1;
688 }
689
690 xive_table[tsel_index] = val;
691
692 if (xive->regs[CQ_TAR >> 3] & CQ_TAR_TBL_AUTOINC) {
693 xive->regs[CQ_TAR >> 3] =
694 SETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3], ++tsel_index);
695 }
696
697
698
699
700
701 if (tsel == CQ_TAR_TSEL_EDT && tsel_index == ARRAY_SIZE(xive->edt)) {
702 pnv_xive_edt_resize(xive);
703 }
704
705 return 0;
706}
707
708
709
710
711static void pnv_xive_vst_set_exclusive(PnvXive *xive, uint8_t type,
712 uint8_t blk, uint64_t vsd)
713{
714 XiveENDSource *end_xsrc = &xive->end_source;
715 XiveSource *xsrc = &xive->ipi_source;
716 const XiveVstInfo *info = &vst_infos[type];
717 uint32_t page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
718 uint64_t vst_tsize = 1ull << page_shift;
719 uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
720
721
722
723 if (VSD_INDIRECT & vsd) {
724 if (!(xive->regs[VC_GLOBAL_CONFIG >> 3] & VC_GCONF_INDIRECT)) {
725 xive_error(xive, "VST: %s indirect tables are not enabled",
726 info->name);
727 return;
728 }
729
730 if (!pnv_xive_vst_page_size_allowed(page_shift)) {
731 xive_error(xive, "VST: invalid %s page shift %d", info->name,
732 page_shift);
733 return;
734 }
735 }
736
737 if (!QEMU_IS_ALIGNED(vst_addr, 1ull << page_shift)) {
738 xive_error(xive, "VST: %s table address 0x%"PRIx64" is not aligned with"
739 " page shift %d", info->name, vst_addr, page_shift);
740 return;
741 }
742
743
744 xive->vsds[type][blk] = vsd;
745
746
747
748 switch (type) {
749 case VST_TSEL_IVT:
750 break;
751
752 case VST_TSEL_EQDT:
753
754
755
756
757
758
759
760 if (!(VSD_INDIRECT & vsd)) {
761 memory_region_set_size(&end_xsrc->esb_mmio, (vst_tsize / info->size)
762 * (1ull << xsrc->esb_shift));
763 }
764 memory_region_add_subregion(&xive->end_edt_mmio, 0,
765 &end_xsrc->esb_mmio);
766 break;
767
768 case VST_TSEL_SBE:
769
770
771
772
773
774
775
776
777
778 if (!(VSD_INDIRECT & vsd)) {
779 memory_region_set_size(&xsrc->esb_mmio, vst_tsize * SBE_PER_BYTE
780 * (1ull << xsrc->esb_shift));
781 }
782 memory_region_add_subregion(&xive->ipi_edt_mmio, 0, &xsrc->esb_mmio);
783 break;
784
785 case VST_TSEL_VPDT:
786 case VST_TSEL_IRQ:
787
788
789
790
791 break;
792
793 default:
794 g_assert_not_reached();
795 }
796}
797
798
799
800
801
802static void pnv_xive_vst_set_data(PnvXive *xive, uint64_t vsd, bool pc_engine)
803{
804 uint8_t mode = GETFIELD(VSD_MODE, vsd);
805 uint8_t type = GETFIELD(VST_TABLE_SELECT,
806 xive->regs[VC_VSD_TABLE_ADDR >> 3]);
807 uint8_t blk = GETFIELD(VST_TABLE_BLOCK,
808 xive->regs[VC_VSD_TABLE_ADDR >> 3]);
809 uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
810
811 if (type > VST_TSEL_IRQ) {
812 xive_error(xive, "VST: invalid table type %d", type);
813 return;
814 }
815
816 if (blk >= vst_infos[type].max_blocks) {
817 xive_error(xive, "VST: invalid block id %d for"
818 " %s table", blk, vst_infos[type].name);
819 return;
820 }
821
822
823
824
825
826 if (pc_engine) {
827 return;
828 }
829
830 if (!vst_addr) {
831 xive_error(xive, "VST: invalid %s table address", vst_infos[type].name);
832 return;
833 }
834
835 switch (mode) {
836 case VSD_MODE_FORWARD:
837 xive->vsds[type][blk] = vsd;
838 break;
839
840 case VSD_MODE_EXCLUSIVE:
841 pnv_xive_vst_set_exclusive(xive, type, blk, vsd);
842 break;
843
844 default:
845 xive_error(xive, "VST: unsupported table mode %d", mode);
846 return;
847 }
848}
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872static void pnv_xive_ic_reg_write(void *opaque, hwaddr offset,
873 uint64_t val, unsigned size)
874{
875 PnvXive *xive = PNV_XIVE(opaque);
876 MemoryRegion *sysmem = get_system_memory();
877 uint32_t reg = offset >> 3;
878 bool is_chip0 = xive->chip->chip_id == 0;
879
880 switch (offset) {
881
882
883
884
885 case CQ_MSGSND:
886 case CQ_FIRMASK_OR:
887 break;
888 case CQ_PBI_CTL:
889 if (val & CQ_PBI_PC_64K) {
890 xive->pc_shift = 16;
891 }
892 if (val & CQ_PBI_VC_64K) {
893 xive->vc_shift = 16;
894 }
895 break;
896 case CQ_CFG_PB_GEN:
897
898
899
900 break;
901
902
903
904
905 case VC_GLOBAL_CONFIG:
906 break;
907
908
909
910
911 case PC_GLOBAL_CONFIG:
912
913
914
915
916 break;
917 case PC_TCTXT_CFG:
918
919
920
921 break;
922 case PC_TCTXT_TRACK:
923
924
925
926
927
928 break;
929
930
931
932
933 case VC_SBC_CONFIG:
934
935
936
937
938 if (val & (VC_SBC_CONF_CPLX_CIST | VC_SBC_CONF_CIST_BOTH)) {
939 xive->ipi_source.esb_flags |= XIVE_SRC_STORE_EOI;
940 }
941 break;
942
943 case VC_EQC_CONFIG:
944 case VC_AIB_TX_ORDER_TAG2:
945 break;
946
947
948
949
950 case CQ_RST_CTL:
951
952 break;
953
954 case CQ_IC_BAR:
955 xive->ic_shift = val & CQ_IC_BAR_64K ? 16 : 12;
956 if (!(val & CQ_IC_BAR_VALID)) {
957 xive->ic_base = 0;
958 if (xive->regs[reg] & CQ_IC_BAR_VALID) {
959 memory_region_del_subregion(&xive->ic_mmio,
960 &xive->ic_reg_mmio);
961 memory_region_del_subregion(&xive->ic_mmio,
962 &xive->ic_notify_mmio);
963 memory_region_del_subregion(&xive->ic_mmio,
964 &xive->ic_lsi_mmio);
965 memory_region_del_subregion(&xive->ic_mmio,
966 &xive->tm_indirect_mmio);
967
968 memory_region_del_subregion(sysmem, &xive->ic_mmio);
969 }
970 } else {
971 xive->ic_base = val & ~(CQ_IC_BAR_VALID | CQ_IC_BAR_64K);
972 if (!(xive->regs[reg] & CQ_IC_BAR_VALID)) {
973 memory_region_add_subregion(sysmem, xive->ic_base,
974 &xive->ic_mmio);
975
976 memory_region_add_subregion(&xive->ic_mmio, 0,
977 &xive->ic_reg_mmio);
978 memory_region_add_subregion(&xive->ic_mmio,
979 1ul << xive->ic_shift,
980 &xive->ic_notify_mmio);
981 memory_region_add_subregion(&xive->ic_mmio,
982 2ul << xive->ic_shift,
983 &xive->ic_lsi_mmio);
984 memory_region_add_subregion(&xive->ic_mmio,
985 4ull << xive->ic_shift,
986 &xive->tm_indirect_mmio);
987 }
988 }
989 break;
990
991 case CQ_TM1_BAR:
992 case CQ_TM2_BAR:
993 xive->tm_shift = val & CQ_TM_BAR_64K ? 16 : 12;
994 if (!(val & CQ_TM_BAR_VALID)) {
995 xive->tm_base = 0;
996 if (xive->regs[reg] & CQ_TM_BAR_VALID && is_chip0) {
997 memory_region_del_subregion(sysmem, &xive->tm_mmio);
998 }
999 } else {
1000 xive->tm_base = val & ~(CQ_TM_BAR_VALID | CQ_TM_BAR_64K);
1001 if (!(xive->regs[reg] & CQ_TM_BAR_VALID) && is_chip0) {
1002 memory_region_add_subregion(sysmem, xive->tm_base,
1003 &xive->tm_mmio);
1004 }
1005 }
1006 break;
1007
1008 case CQ_PC_BARM:
1009 xive->regs[reg] = val;
1010 memory_region_set_size(&xive->pc_mmio, pnv_xive_pc_size(xive));
1011 break;
1012 case CQ_PC_BAR:
1013 if (!(val & CQ_PC_BAR_VALID)) {
1014 xive->pc_base = 0;
1015 if (xive->regs[reg] & CQ_PC_BAR_VALID) {
1016 memory_region_del_subregion(sysmem, &xive->pc_mmio);
1017 }
1018 } else {
1019 xive->pc_base = val & ~(CQ_PC_BAR_VALID);
1020 if (!(xive->regs[reg] & CQ_PC_BAR_VALID)) {
1021 memory_region_add_subregion(sysmem, xive->pc_base,
1022 &xive->pc_mmio);
1023 }
1024 }
1025 break;
1026
1027 case CQ_VC_BARM:
1028 xive->regs[reg] = val;
1029 memory_region_set_size(&xive->vc_mmio, pnv_xive_vc_size(xive));
1030 break;
1031 case CQ_VC_BAR:
1032 if (!(val & CQ_VC_BAR_VALID)) {
1033 xive->vc_base = 0;
1034 if (xive->regs[reg] & CQ_VC_BAR_VALID) {
1035 memory_region_del_subregion(sysmem, &xive->vc_mmio);
1036 }
1037 } else {
1038 xive->vc_base = val & ~(CQ_VC_BAR_VALID);
1039 if (!(xive->regs[reg] & CQ_VC_BAR_VALID)) {
1040 memory_region_add_subregion(sysmem, xive->vc_base,
1041 &xive->vc_mmio);
1042 }
1043 }
1044 break;
1045
1046
1047
1048
1049 case CQ_TAR:
1050 break;
1051 case CQ_TDR:
1052 pnv_xive_table_set_data(xive, val);
1053 break;
1054
1055
1056
1057
1058 case VC_VSD_TABLE_ADDR:
1059 case PC_VSD_TABLE_ADDR:
1060 break;
1061 case VC_VSD_TABLE_DATA:
1062 case PC_VSD_TABLE_DATA:
1063 pnv_xive_vst_set_data(xive, val, offset == PC_VSD_TABLE_DATA);
1064 break;
1065
1066
1067
1068
1069 case VC_IRQ_CONFIG_IPI:
1070 case VC_IRQ_CONFIG_HW:
1071 case VC_IRQ_CONFIG_CASCADE1:
1072 case VC_IRQ_CONFIG_CASCADE2:
1073 case VC_IRQ_CONFIG_REDIST:
1074 case VC_IRQ_CONFIG_IPI_CASC:
1075 break;
1076
1077
1078
1079
1080 case PC_THREAD_EN_REG0:
1081 case PC_THREAD_EN_REG1:
1082 break;
1083
1084 case PC_THREAD_EN_REG0_SET:
1085 xive->regs[PC_THREAD_EN_REG0 >> 3] |= val;
1086 break;
1087 case PC_THREAD_EN_REG1_SET:
1088 xive->regs[PC_THREAD_EN_REG1 >> 3] |= val;
1089 break;
1090 case PC_THREAD_EN_REG0_CLR:
1091 xive->regs[PC_THREAD_EN_REG0 >> 3] &= ~val;
1092 break;
1093 case PC_THREAD_EN_REG1_CLR:
1094 xive->regs[PC_THREAD_EN_REG1 >> 3] &= ~val;
1095 break;
1096
1097
1098
1099
1100
1101 case PC_TCTXT_INDIR0 ... PC_TCTXT_INDIR3:
1102 break;
1103
1104
1105
1106
1107 case VC_IVC_SCRUB_MASK:
1108 case VC_IVC_SCRUB_TRIG:
1109 break;
1110
1111 case VC_EQC_CWATCH_SPEC:
1112 val &= ~VC_EQC_CWATCH_CONFLICT;
1113 break;
1114 case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3:
1115 break;
1116 case VC_EQC_CWATCH_DAT0:
1117
1118 xive->regs[reg] = val;
1119 pnv_xive_end_update(xive);
1120 break;
1121 case VC_EQC_SCRUB_MASK:
1122 case VC_EQC_SCRUB_TRIG:
1123
1124
1125
1126
1127 break;
1128
1129 case PC_VPC_CWATCH_SPEC:
1130 val &= ~PC_VPC_CWATCH_CONFLICT;
1131 break;
1132 case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7:
1133 break;
1134 case PC_VPC_CWATCH_DAT0:
1135
1136 xive->regs[reg] = val;
1137 pnv_xive_nvt_update(xive);
1138 break;
1139 case PC_VPC_SCRUB_MASK:
1140 case PC_VPC_SCRUB_TRIG:
1141
1142
1143
1144
1145 break;
1146
1147
1148
1149
1150
1151 case PC_AT_KILL:
1152 break;
1153 case VC_AT_MACRO_KILL:
1154 break;
1155 case PC_AT_KILL_MASK:
1156 case VC_AT_MACRO_KILL_MASK:
1157 break;
1158
1159 default:
1160 xive_error(xive, "IC: invalid write to reg=0x%"HWADDR_PRIx, offset);
1161 return;
1162 }
1163
1164 xive->regs[reg] = val;
1165}
1166
1167static uint64_t pnv_xive_ic_reg_read(void *opaque, hwaddr offset, unsigned size)
1168{
1169 PnvXive *xive = PNV_XIVE(opaque);
1170 uint64_t val = 0;
1171 uint32_t reg = offset >> 3;
1172
1173 switch (offset) {
1174 case CQ_CFG_PB_GEN:
1175 case CQ_IC_BAR:
1176 case CQ_TM1_BAR:
1177 case CQ_TM2_BAR:
1178 case CQ_PC_BAR:
1179 case CQ_PC_BARM:
1180 case CQ_VC_BAR:
1181 case CQ_VC_BARM:
1182 case CQ_TAR:
1183 case CQ_TDR:
1184 case CQ_PBI_CTL:
1185
1186 case PC_TCTXT_CFG:
1187 case PC_TCTXT_TRACK:
1188 case PC_TCTXT_INDIR0:
1189 case PC_TCTXT_INDIR1:
1190 case PC_TCTXT_INDIR2:
1191 case PC_TCTXT_INDIR3:
1192 case PC_GLOBAL_CONFIG:
1193
1194 case PC_VPC_SCRUB_MASK:
1195
1196 case VC_GLOBAL_CONFIG:
1197 case VC_AIB_TX_ORDER_TAG2:
1198
1199 case VC_IRQ_CONFIG_IPI:
1200 case VC_IRQ_CONFIG_HW:
1201 case VC_IRQ_CONFIG_CASCADE1:
1202 case VC_IRQ_CONFIG_CASCADE2:
1203 case VC_IRQ_CONFIG_REDIST:
1204 case VC_IRQ_CONFIG_IPI_CASC:
1205
1206 case VC_EQC_SCRUB_MASK:
1207 case VC_IVC_SCRUB_MASK:
1208 case VC_SBC_CONFIG:
1209 case VC_AT_MACRO_KILL_MASK:
1210 case VC_VSD_TABLE_ADDR:
1211 case PC_VSD_TABLE_ADDR:
1212 case VC_VSD_TABLE_DATA:
1213 case PC_VSD_TABLE_DATA:
1214 case PC_THREAD_EN_REG0:
1215 case PC_THREAD_EN_REG1:
1216 val = xive->regs[reg];
1217 break;
1218
1219
1220
1221
1222 case PC_THREAD_EN_REG0_SET:
1223 case PC_THREAD_EN_REG0_CLR:
1224 val = xive->regs[PC_THREAD_EN_REG0 >> 3];
1225 break;
1226 case PC_THREAD_EN_REG1_SET:
1227 case PC_THREAD_EN_REG1_CLR:
1228 val = xive->regs[PC_THREAD_EN_REG1 >> 3];
1229 break;
1230
1231 case CQ_MSGSND:
1232 val = 0xffffff0000000000;
1233 break;
1234
1235
1236
1237
1238 case VC_EQC_CWATCH_SPEC:
1239 xive->regs[reg] = ~(VC_EQC_CWATCH_FULL | VC_EQC_CWATCH_CONFLICT);
1240 val = xive->regs[reg];
1241 break;
1242 case VC_EQC_CWATCH_DAT0:
1243
1244
1245
1246
1247 pnv_xive_end_cache_load(xive);
1248 val = xive->regs[reg];
1249 break;
1250 case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3:
1251 val = xive->regs[reg];
1252 break;
1253
1254 case PC_VPC_CWATCH_SPEC:
1255 xive->regs[reg] = ~(PC_VPC_CWATCH_FULL | PC_VPC_CWATCH_CONFLICT);
1256 val = xive->regs[reg];
1257 break;
1258 case PC_VPC_CWATCH_DAT0:
1259
1260
1261
1262
1263 pnv_xive_nvt_cache_load(xive);
1264 val = xive->regs[reg];
1265 break;
1266 case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7:
1267 val = xive->regs[reg];
1268 break;
1269
1270 case PC_VPC_SCRUB_TRIG:
1271 case VC_IVC_SCRUB_TRIG:
1272 case VC_EQC_SCRUB_TRIG:
1273 xive->regs[reg] &= ~VC_SCRUB_VALID;
1274 val = xive->regs[reg];
1275 break;
1276
1277
1278
1279
1280 case PC_AT_KILL:
1281 xive->regs[reg] &= ~PC_AT_KILL_VALID;
1282 val = xive->regs[reg];
1283 break;
1284 case VC_AT_MACRO_KILL:
1285 xive->regs[reg] &= ~VC_KILL_VALID;
1286 val = xive->regs[reg];
1287 break;
1288
1289
1290
1291
1292 case VC_EQC_CONFIG:
1293 val = VC_EQC_SYNC_MASK;
1294 break;
1295
1296 default:
1297 xive_error(xive, "IC: invalid read reg=0x%"HWADDR_PRIx, offset);
1298 }
1299
1300 return val;
1301}
1302
1303static const MemoryRegionOps pnv_xive_ic_reg_ops = {
1304 .read = pnv_xive_ic_reg_read,
1305 .write = pnv_xive_ic_reg_write,
1306 .endianness = DEVICE_BIG_ENDIAN,
1307 .valid = {
1308 .min_access_size = 8,
1309 .max_access_size = 8,
1310 },
1311 .impl = {
1312 .min_access_size = 8,
1313 .max_access_size = 8,
1314 },
1315};
1316
1317
1318
1319
1320#define PNV_XIVE_FORWARD_IPI 0x800
1321#define PNV_XIVE_FORWARD_HW 0x880
1322#define PNV_XIVE_FORWARD_OS_ESC 0x900
1323#define PNV_XIVE_FORWARD_HW_ESC 0x980
1324#define PNV_XIVE_FORWARD_REDIS 0xa00
1325#define PNV_XIVE_RESERVED5 0xa80
1326#define PNV_XIVE_RESERVED6 0xb00
1327#define PNV_XIVE_RESERVED7 0xb80
1328
1329
1330#define PNV_XIVE_SYNC_IPI 0xc00
1331#define PNV_XIVE_SYNC_HW 0xc80
1332#define PNV_XIVE_SYNC_OS_ESC 0xd00
1333#define PNV_XIVE_SYNC_HW_ESC 0xd80
1334#define PNV_XIVE_SYNC_REDIS 0xe00
1335
1336
1337#define PNV_XIVE_SYNC_PULL 0xe80
1338#define PNV_XIVE_SYNC_PUSH 0xf00
1339#define PNV_XIVE_SYNC_VPC 0xf80
1340
1341static void pnv_xive_ic_hw_trigger(PnvXive *xive, hwaddr addr, uint64_t val)
1342{
1343 uint8_t blk;
1344 uint32_t idx;
1345
1346 trace_pnv_xive_ic_hw_trigger(addr, val);
1347
1348 if (val & XIVE_TRIGGER_END) {
1349 xive_error(xive, "IC: END trigger at @0x%"HWADDR_PRIx" data 0x%"PRIx64,
1350 addr, val);
1351 return;
1352 }
1353
1354
1355
1356
1357
1358
1359 blk = XIVE_EAS_BLOCK(val);
1360 idx = XIVE_EAS_INDEX(val);
1361
1362 xive_router_notify(XIVE_NOTIFIER(xive), XIVE_EAS(blk, idx),
1363 !!(val & XIVE_TRIGGER_PQ));
1364}
1365
1366static void pnv_xive_ic_notify_write(void *opaque, hwaddr addr, uint64_t val,
1367 unsigned size)
1368{
1369 PnvXive *xive = PNV_XIVE(opaque);
1370
1371
1372 switch (addr) {
1373 case 0x000 ... 0x7FF:
1374 pnv_xive_ic_hw_trigger(opaque, addr, val);
1375 break;
1376
1377
1378 case PNV_XIVE_FORWARD_IPI:
1379 case PNV_XIVE_FORWARD_HW:
1380 case PNV_XIVE_FORWARD_OS_ESC:
1381 case PNV_XIVE_FORWARD_HW_ESC:
1382 case PNV_XIVE_FORWARD_REDIS:
1383
1384 xive_error(xive, "IC: forwarded at @0x%"HWADDR_PRIx" IRQ 0x%"PRIx64,
1385 addr, val);
1386 break;
1387
1388
1389 case PNV_XIVE_SYNC_IPI:
1390 case PNV_XIVE_SYNC_HW:
1391 case PNV_XIVE_SYNC_OS_ESC:
1392 case PNV_XIVE_SYNC_HW_ESC:
1393 case PNV_XIVE_SYNC_REDIS:
1394 break;
1395
1396
1397 case PNV_XIVE_SYNC_PULL:
1398 case PNV_XIVE_SYNC_PUSH:
1399 case PNV_XIVE_SYNC_VPC:
1400 break;
1401
1402 default:
1403 xive_error(xive, "IC: invalid notify write @%"HWADDR_PRIx, addr);
1404 }
1405}
1406
1407static uint64_t pnv_xive_ic_notify_read(void *opaque, hwaddr addr,
1408 unsigned size)
1409{
1410 PnvXive *xive = PNV_XIVE(opaque);
1411
1412
1413 xive_error(xive, "IC: invalid notify read @%"HWADDR_PRIx, addr);
1414 return -1;
1415}
1416
1417static const MemoryRegionOps pnv_xive_ic_notify_ops = {
1418 .read = pnv_xive_ic_notify_read,
1419 .write = pnv_xive_ic_notify_write,
1420 .endianness = DEVICE_BIG_ENDIAN,
1421 .valid = {
1422 .min_access_size = 8,
1423 .max_access_size = 8,
1424 },
1425 .impl = {
1426 .min_access_size = 8,
1427 .max_access_size = 8,
1428 },
1429};
1430
1431
1432
1433
1434
1435static void pnv_xive_ic_lsi_write(void *opaque, hwaddr addr,
1436 uint64_t val, unsigned size)
1437{
1438 PnvXive *xive = PNV_XIVE(opaque);
1439
1440 xive_error(xive, "IC: LSI invalid write @%"HWADDR_PRIx, addr);
1441}
1442
1443static uint64_t pnv_xive_ic_lsi_read(void *opaque, hwaddr addr, unsigned size)
1444{
1445 PnvXive *xive = PNV_XIVE(opaque);
1446
1447 xive_error(xive, "IC: LSI invalid read @%"HWADDR_PRIx, addr);
1448 return -1;
1449}
1450
1451static const MemoryRegionOps pnv_xive_ic_lsi_ops = {
1452 .read = pnv_xive_ic_lsi_read,
1453 .write = pnv_xive_ic_lsi_write,
1454 .endianness = DEVICE_BIG_ENDIAN,
1455 .valid = {
1456 .min_access_size = 8,
1457 .max_access_size = 8,
1458 },
1459 .impl = {
1460 .min_access_size = 8,
1461 .max_access_size = 8,
1462 },
1463};
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474static XiveTCTX *pnv_xive_get_indirect_tctx(PnvXive *xive)
1475{
1476 PnvChip *chip = xive->chip;
1477 uint64_t tctxt_indir = xive->regs[PC_TCTXT_INDIR0 >> 3];
1478 PowerPCCPU *cpu = NULL;
1479 int pir;
1480
1481 if (!(tctxt_indir & PC_TCTXT_INDIR_VALID)) {
1482 xive_error(xive, "IC: no indirect TIMA access in progress");
1483 return NULL;
1484 }
1485
1486 pir = (chip->chip_id << 8) | GETFIELD(PC_TCTXT_INDIR_THRDID, tctxt_indir);
1487 cpu = pnv_chip_find_cpu(chip, pir);
1488 if (!cpu) {
1489 xive_error(xive, "IC: invalid PIR %x for indirect access", pir);
1490 return NULL;
1491 }
1492
1493
1494 if (!pnv_xive_is_cpu_enabled(xive, cpu)) {
1495 xive_error(xive, "IC: CPU %x is not enabled", pir);
1496 }
1497
1498 return XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1499}
1500
1501static void xive_tm_indirect_write(void *opaque, hwaddr offset,
1502 uint64_t value, unsigned size)
1503{
1504 XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque));
1505
1506 xive_tctx_tm_write(XIVE_PRESENTER(opaque), tctx, offset, value, size);
1507}
1508
1509static uint64_t xive_tm_indirect_read(void *opaque, hwaddr offset,
1510 unsigned size)
1511{
1512 XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque));
1513
1514 return xive_tctx_tm_read(XIVE_PRESENTER(opaque), tctx, offset, size);
1515}
1516
1517static const MemoryRegionOps xive_tm_indirect_ops = {
1518 .read = xive_tm_indirect_read,
1519 .write = xive_tm_indirect_write,
1520 .endianness = DEVICE_BIG_ENDIAN,
1521 .valid = {
1522 .min_access_size = 1,
1523 .max_access_size = 8,
1524 },
1525 .impl = {
1526 .min_access_size = 1,
1527 .max_access_size = 8,
1528 },
1529};
1530
1531static void pnv_xive_tm_write(void *opaque, hwaddr offset,
1532 uint64_t value, unsigned size)
1533{
1534 PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
1535 PnvXive *xive = pnv_xive_tm_get_xive(cpu);
1536 XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1537
1538 xive_tctx_tm_write(XIVE_PRESENTER(xive), tctx, offset, value, size);
1539}
1540
1541static uint64_t pnv_xive_tm_read(void *opaque, hwaddr offset, unsigned size)
1542{
1543 PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
1544 PnvXive *xive = pnv_xive_tm_get_xive(cpu);
1545 XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1546
1547 return xive_tctx_tm_read(XIVE_PRESENTER(xive), tctx, offset, size);
1548}
1549
1550const MemoryRegionOps pnv_xive_tm_ops = {
1551 .read = pnv_xive_tm_read,
1552 .write = pnv_xive_tm_write,
1553 .endianness = DEVICE_BIG_ENDIAN,
1554 .valid = {
1555 .min_access_size = 1,
1556 .max_access_size = 8,
1557 },
1558 .impl = {
1559 .min_access_size = 1,
1560 .max_access_size = 8,
1561 },
1562};
1563
1564
1565
1566
1567static uint64_t pnv_xive_xscom_read(void *opaque, hwaddr addr, unsigned size)
1568{
1569 switch (addr >> 3) {
1570 case X_VC_EQC_CONFIG:
1571
1572 return VC_EQC_SYNC_MASK;
1573 default:
1574 return pnv_xive_ic_reg_read(opaque, addr, size);
1575 }
1576}
1577
1578static void pnv_xive_xscom_write(void *opaque, hwaddr addr,
1579 uint64_t val, unsigned size)
1580{
1581 pnv_xive_ic_reg_write(opaque, addr, val, size);
1582}
1583
1584static const MemoryRegionOps pnv_xive_xscom_ops = {
1585 .read = pnv_xive_xscom_read,
1586 .write = pnv_xive_xscom_write,
1587 .endianness = DEVICE_BIG_ENDIAN,
1588 .valid = {
1589 .min_access_size = 8,
1590 .max_access_size = 8,
1591 },
1592 .impl = {
1593 .min_access_size = 8,
1594 .max_access_size = 8,
1595 }
1596};
1597
1598
1599
1600
1601static uint64_t pnv_xive_vc_read(void *opaque, hwaddr offset,
1602 unsigned size)
1603{
1604 PnvXive *xive = PNV_XIVE(opaque);
1605 uint64_t edt_index = offset >> pnv_xive_edt_shift(xive);
1606 uint64_t edt_type = 0;
1607 uint64_t edt_offset;
1608 MemTxResult result;
1609 AddressSpace *edt_as = NULL;
1610 uint64_t ret = -1;
1611
1612 if (edt_index < XIVE_TABLE_EDT_MAX) {
1613 edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]);
1614 }
1615
1616 switch (edt_type) {
1617 case CQ_TDR_EDT_IPI:
1618 edt_as = &xive->ipi_as;
1619 break;
1620 case CQ_TDR_EDT_EQ:
1621 edt_as = &xive->end_as;
1622 break;
1623 default:
1624 xive_error(xive, "VC: invalid EDT type for read @%"HWADDR_PRIx, offset);
1625 return -1;
1626 }
1627
1628
1629 edt_offset = pnv_xive_edt_offset(xive, offset, edt_type);
1630
1631 ret = address_space_ldq(edt_as, edt_offset, MEMTXATTRS_UNSPECIFIED,
1632 &result);
1633
1634 if (result != MEMTX_OK) {
1635 xive_error(xive, "VC: %s read failed at @0x%"HWADDR_PRIx " -> @0x%"
1636 HWADDR_PRIx, edt_type == CQ_TDR_EDT_IPI ? "IPI" : "END",
1637 offset, edt_offset);
1638 return -1;
1639 }
1640
1641 return ret;
1642}
1643
1644static void pnv_xive_vc_write(void *opaque, hwaddr offset,
1645 uint64_t val, unsigned size)
1646{
1647 PnvXive *xive = PNV_XIVE(opaque);
1648 uint64_t edt_index = offset >> pnv_xive_edt_shift(xive);
1649 uint64_t edt_type = 0;
1650 uint64_t edt_offset;
1651 MemTxResult result;
1652 AddressSpace *edt_as = NULL;
1653
1654 if (edt_index < XIVE_TABLE_EDT_MAX) {
1655 edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]);
1656 }
1657
1658 switch (edt_type) {
1659 case CQ_TDR_EDT_IPI:
1660 edt_as = &xive->ipi_as;
1661 break;
1662 case CQ_TDR_EDT_EQ:
1663 edt_as = &xive->end_as;
1664 break;
1665 default:
1666 xive_error(xive, "VC: invalid EDT type for write @%"HWADDR_PRIx,
1667 offset);
1668 return;
1669 }
1670
1671
1672 edt_offset = pnv_xive_edt_offset(xive, offset, edt_type);
1673
1674 address_space_stq(edt_as, edt_offset, val, MEMTXATTRS_UNSPECIFIED, &result);
1675 if (result != MEMTX_OK) {
1676 xive_error(xive, "VC: write failed at @0x%"HWADDR_PRIx, edt_offset);
1677 }
1678}
1679
1680static const MemoryRegionOps pnv_xive_vc_ops = {
1681 .read = pnv_xive_vc_read,
1682 .write = pnv_xive_vc_write,
1683 .endianness = DEVICE_BIG_ENDIAN,
1684 .valid = {
1685 .min_access_size = 8,
1686 .max_access_size = 8,
1687 },
1688 .impl = {
1689 .min_access_size = 8,
1690 .max_access_size = 8,
1691 },
1692};
1693
1694
1695
1696
1697
1698static uint64_t pnv_xive_pc_read(void *opaque, hwaddr addr,
1699 unsigned size)
1700{
1701 PnvXive *xive = PNV_XIVE(opaque);
1702
1703 xive_error(xive, "PC: invalid read @%"HWADDR_PRIx, addr);
1704 return -1;
1705}
1706
1707static void pnv_xive_pc_write(void *opaque, hwaddr addr,
1708 uint64_t value, unsigned size)
1709{
1710 PnvXive *xive = PNV_XIVE(opaque);
1711
1712 xive_error(xive, "PC: invalid write to VC @%"HWADDR_PRIx, addr);
1713}
1714
1715static const MemoryRegionOps pnv_xive_pc_ops = {
1716 .read = pnv_xive_pc_read,
1717 .write = pnv_xive_pc_write,
1718 .endianness = DEVICE_BIG_ENDIAN,
1719 .valid = {
1720 .min_access_size = 8,
1721 .max_access_size = 8,
1722 },
1723 .impl = {
1724 .min_access_size = 8,
1725 .max_access_size = 8,
1726 },
1727};
1728
1729static void xive_nvt_pic_print_info(XiveNVT *nvt, uint32_t nvt_idx,
1730 Monitor *mon)
1731{
1732 uint8_t eq_blk = xive_get_field32(NVT_W1_EQ_BLOCK, nvt->w1);
1733 uint32_t eq_idx = xive_get_field32(NVT_W1_EQ_INDEX, nvt->w1);
1734
1735 if (!xive_nvt_is_valid(nvt)) {
1736 return;
1737 }
1738
1739 monitor_printf(mon, " %08x end:%02x/%04x IPB:%02x\n", nvt_idx,
1740 eq_blk, eq_idx,
1741 xive_get_field32(NVT_W4_IPB, nvt->w4));
1742}
1743
1744void pnv_xive_pic_print_info(PnvXive *xive, Monitor *mon)
1745{
1746 XiveRouter *xrtr = XIVE_ROUTER(xive);
1747 uint8_t blk = pnv_xive_block_id(xive);
1748 uint8_t chip_id = xive->chip->chip_id;
1749 uint32_t srcno0 = XIVE_EAS(blk, 0);
1750 uint32_t nr_ipis = pnv_xive_nr_ipis(xive, blk);
1751 XiveEAS eas;
1752 XiveEND end;
1753 XiveNVT nvt;
1754 int i;
1755 uint64_t xive_nvt_per_subpage;
1756
1757 monitor_printf(mon, "XIVE[%x] #%d Source %08x .. %08x\n", chip_id, blk,
1758 srcno0, srcno0 + nr_ipis - 1);
1759 xive_source_pic_print_info(&xive->ipi_source, srcno0, mon);
1760
1761 monitor_printf(mon, "XIVE[%x] #%d EAT %08x .. %08x\n", chip_id, blk,
1762 srcno0, srcno0 + nr_ipis - 1);
1763 for (i = 0; i < nr_ipis; i++) {
1764 if (xive_router_get_eas(xrtr, blk, i, &eas)) {
1765 break;
1766 }
1767 if (!xive_eas_is_masked(&eas)) {
1768 xive_eas_pic_print_info(&eas, i, mon);
1769 }
1770 }
1771
1772 monitor_printf(mon, "XIVE[%x] #%d ENDT\n", chip_id, blk);
1773 i = 0;
1774 while (!xive_router_get_end(xrtr, blk, i, &end)) {
1775 xive_end_pic_print_info(&end, i++, mon);
1776 }
1777
1778 monitor_printf(mon, "XIVE[%x] #%d END Escalation EAT\n", chip_id, blk);
1779 i = 0;
1780 while (!xive_router_get_end(xrtr, blk, i, &end)) {
1781 xive_end_eas_pic_print_info(&end, i++, mon);
1782 }
1783
1784 monitor_printf(mon, "XIVE[%x] #%d NVTT %08x .. %08x\n", chip_id, blk,
1785 0, XIVE_NVT_COUNT - 1);
1786 xive_nvt_per_subpage = pnv_xive_vst_per_subpage(xive, VST_TSEL_VPDT);
1787 for (i = 0; i < XIVE_NVT_COUNT; i += xive_nvt_per_subpage) {
1788 while (!xive_router_get_nvt(xrtr, blk, i, &nvt)) {
1789 xive_nvt_pic_print_info(&nvt, i++, mon);
1790 }
1791 }
1792}
1793
1794static void pnv_xive_reset(void *dev)
1795{
1796 PnvXive *xive = PNV_XIVE(dev);
1797 XiveSource *xsrc = &xive->ipi_source;
1798 XiveENDSource *end_xsrc = &xive->end_source;
1799
1800
1801 xive->ic_shift = xive->vc_shift = xive->pc_shift = 12;
1802
1803
1804 if (memory_region_is_mapped(&xsrc->esb_mmio)) {
1805 memory_region_del_subregion(&xive->ipi_edt_mmio, &xsrc->esb_mmio);
1806 }
1807
1808 if (memory_region_is_mapped(&xive->ipi_edt_mmio)) {
1809 memory_region_del_subregion(&xive->ipi_mmio, &xive->ipi_edt_mmio);
1810 }
1811
1812 if (memory_region_is_mapped(&end_xsrc->esb_mmio)) {
1813 memory_region_del_subregion(&xive->end_edt_mmio, &end_xsrc->esb_mmio);
1814 }
1815
1816 if (memory_region_is_mapped(&xive->end_edt_mmio)) {
1817 memory_region_del_subregion(&xive->end_mmio, &xive->end_edt_mmio);
1818 }
1819}
1820
1821static void pnv_xive_init(Object *obj)
1822{
1823 PnvXive *xive = PNV_XIVE(obj);
1824
1825 object_initialize_child(obj, "ipi_source", &xive->ipi_source,
1826 TYPE_XIVE_SOURCE);
1827 object_initialize_child(obj, "end_source", &xive->end_source,
1828 TYPE_XIVE_END_SOURCE);
1829}
1830
1831
1832
1833
1834#define PNV_XIVE_NR_IRQS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1835#define PNV_XIVE_NR_ENDS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1836
1837static void pnv_xive_realize(DeviceState *dev, Error **errp)
1838{
1839 PnvXive *xive = PNV_XIVE(dev);
1840 PnvXiveClass *pxc = PNV_XIVE_GET_CLASS(dev);
1841 XiveSource *xsrc = &xive->ipi_source;
1842 XiveENDSource *end_xsrc = &xive->end_source;
1843 Error *local_err = NULL;
1844
1845 pxc->parent_realize(dev, &local_err);
1846 if (local_err) {
1847 error_propagate(errp, local_err);
1848 return;
1849 }
1850
1851 assert(xive->chip);
1852
1853
1854
1855
1856
1857
1858
1859 object_property_set_int(OBJECT(xsrc), "nr-irqs", PNV_XIVE_NR_IRQS,
1860 &error_fatal);
1861 object_property_set_link(OBJECT(xsrc), "xive", OBJECT(xive), &error_abort);
1862 if (!qdev_realize(DEVICE(xsrc), NULL, errp)) {
1863 return;
1864 }
1865
1866 object_property_set_int(OBJECT(end_xsrc), "nr-ends", PNV_XIVE_NR_ENDS,
1867 &error_fatal);
1868 object_property_set_link(OBJECT(end_xsrc), "xive", OBJECT(xive),
1869 &error_abort);
1870 if (!qdev_realize(DEVICE(end_xsrc), NULL, errp)) {
1871 return;
1872 }
1873
1874
1875 xive->ic_shift = xive->vc_shift = xive->pc_shift = 12;
1876
1877
1878 memory_region_init_io(&xive->xscom_regs, OBJECT(dev), &pnv_xive_xscom_ops,
1879 xive, "xscom-xive", PNV9_XSCOM_XIVE_SIZE << 3);
1880
1881
1882 memory_region_init(&xive->ic_mmio, OBJECT(dev), "xive-ic",
1883 PNV9_XIVE_IC_SIZE);
1884
1885 memory_region_init_io(&xive->ic_reg_mmio, OBJECT(dev), &pnv_xive_ic_reg_ops,
1886 xive, "xive-ic-reg", 1 << xive->ic_shift);
1887 memory_region_init_io(&xive->ic_notify_mmio, OBJECT(dev),
1888 &pnv_xive_ic_notify_ops,
1889 xive, "xive-ic-notify", 1 << xive->ic_shift);
1890
1891
1892 memory_region_init_io(&xive->ic_lsi_mmio, OBJECT(dev), &pnv_xive_ic_lsi_ops,
1893 xive, "xive-ic-lsi", 2 << xive->ic_shift);
1894
1895
1896 memory_region_init_io(&xive->tm_indirect_mmio, OBJECT(dev),
1897 &xive_tm_indirect_ops,
1898 xive, "xive-tima-indirect", PNV9_XIVE_TM_SIZE);
1899
1900
1901
1902
1903
1904
1905 memory_region_init_io(&xive->vc_mmio, OBJECT(xive), &pnv_xive_vc_ops, xive,
1906 "xive-vc", PNV9_XIVE_VC_SIZE);
1907
1908 memory_region_init(&xive->ipi_mmio, OBJECT(xive), "xive-vc-ipi",
1909 PNV9_XIVE_VC_SIZE);
1910 address_space_init(&xive->ipi_as, &xive->ipi_mmio, "xive-vc-ipi");
1911 memory_region_init(&xive->end_mmio, OBJECT(xive), "xive-vc-end",
1912 PNV9_XIVE_VC_SIZE);
1913 address_space_init(&xive->end_as, &xive->end_mmio, "xive-vc-end");
1914
1915
1916
1917
1918
1919 memory_region_init(&xive->ipi_edt_mmio, OBJECT(xive), "xive-vc-ipi-edt", 0);
1920 memory_region_init(&xive->end_edt_mmio, OBJECT(xive), "xive-vc-end-edt", 0);
1921
1922
1923 memory_region_init_io(&xive->pc_mmio, OBJECT(xive), &pnv_xive_pc_ops, xive,
1924 "xive-pc", PNV9_XIVE_PC_SIZE);
1925
1926
1927 memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &pnv_xive_tm_ops,
1928 xive, "xive-tima", PNV9_XIVE_TM_SIZE);
1929
1930 qemu_register_reset(pnv_xive_reset, dev);
1931}
1932
1933static int pnv_xive_dt_xscom(PnvXScomInterface *dev, void *fdt,
1934 int xscom_offset)
1935{
1936 const char compat[] = "ibm,power9-xive-x";
1937 char *name;
1938 int offset;
1939 uint32_t lpc_pcba = PNV9_XSCOM_XIVE_BASE;
1940 uint32_t reg[] = {
1941 cpu_to_be32(lpc_pcba),
1942 cpu_to_be32(PNV9_XSCOM_XIVE_SIZE)
1943 };
1944
1945 name = g_strdup_printf("xive@%x", lpc_pcba);
1946 offset = fdt_add_subnode(fdt, xscom_offset, name);
1947 _FDT(offset);
1948 g_free(name);
1949
1950 _FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg))));
1951 _FDT((fdt_setprop(fdt, offset, "compatible", compat,
1952 sizeof(compat))));
1953 return 0;
1954}
1955
1956static Property pnv_xive_properties[] = {
1957 DEFINE_PROP_UINT64("ic-bar", PnvXive, ic_base, 0),
1958 DEFINE_PROP_UINT64("vc-bar", PnvXive, vc_base, 0),
1959 DEFINE_PROP_UINT64("pc-bar", PnvXive, pc_base, 0),
1960 DEFINE_PROP_UINT64("tm-bar", PnvXive, tm_base, 0),
1961
1962 DEFINE_PROP_LINK("chip", PnvXive, chip, TYPE_PNV_CHIP, PnvChip *),
1963 DEFINE_PROP_END_OF_LIST(),
1964};
1965
1966static void pnv_xive_class_init(ObjectClass *klass, void *data)
1967{
1968 DeviceClass *dc = DEVICE_CLASS(klass);
1969 PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass);
1970 XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass);
1971 XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
1972 XivePresenterClass *xpc = XIVE_PRESENTER_CLASS(klass);
1973 PnvXiveClass *pxc = PNV_XIVE_CLASS(klass);
1974
1975 xdc->dt_xscom = pnv_xive_dt_xscom;
1976
1977 dc->desc = "PowerNV XIVE Interrupt Controller";
1978 device_class_set_parent_realize(dc, pnv_xive_realize, &pxc->parent_realize);
1979 dc->realize = pnv_xive_realize;
1980 device_class_set_props(dc, pnv_xive_properties);
1981
1982 xrc->get_eas = pnv_xive_get_eas;
1983 xrc->get_pq = pnv_xive_get_pq;
1984 xrc->set_pq = pnv_xive_set_pq;
1985 xrc->get_end = pnv_xive_get_end;
1986 xrc->write_end = pnv_xive_write_end;
1987 xrc->get_nvt = pnv_xive_get_nvt;
1988 xrc->write_nvt = pnv_xive_write_nvt;
1989 xrc->get_block_id = pnv_xive_get_block_id;
1990
1991 xnc->notify = pnv_xive_notify;
1992 xpc->match_nvt = pnv_xive_match_nvt;
1993};
1994
1995static const TypeInfo pnv_xive_info = {
1996 .name = TYPE_PNV_XIVE,
1997 .parent = TYPE_XIVE_ROUTER,
1998 .instance_init = pnv_xive_init,
1999 .instance_size = sizeof(PnvXive),
2000 .class_init = pnv_xive_class_init,
2001 .class_size = sizeof(PnvXiveClass),
2002 .interfaces = (InterfaceInfo[]) {
2003 { TYPE_PNV_XSCOM_INTERFACE },
2004 { }
2005 }
2006};
2007
2008static void pnv_xive_register_types(void)
2009{
2010 type_register_static(&pnv_xive_info);
2011}
2012
2013type_init(pnv_xive_register_types)
2014