1
2
3
4
5
6
7
8
9
10#include "qemu/osdep.h"
11#include "qemu/log.h"
12#include "qemu/module.h"
13#include "qapi/error.h"
14#include "target/ppc/cpu.h"
15#include "sysemu/cpus.h"
16#include "sysemu/dma.h"
17#include "monitor/monitor.h"
18#include "hw/ppc/fdt.h"
19#include "hw/ppc/pnv.h"
20#include "hw/ppc/pnv_core.h"
21#include "hw/ppc/pnv_xscom.h"
22#include "hw/ppc/pnv_xive.h"
23#include "hw/ppc/xive_regs.h"
24#include "hw/ppc/ppc.h"
25
26#include <libfdt.h>
27
28#include "pnv_xive_regs.h"
29
30#define XIVE_DEBUG
31
32
33
34
35#define SBE_PER_BYTE 4
36
37typedef struct XiveVstInfo {
38 const char *name;
39 uint32_t size;
40 uint32_t max_blocks;
41} XiveVstInfo;
42
43static const XiveVstInfo vst_infos[] = {
44 [VST_TSEL_IVT] = { "EAT", sizeof(XiveEAS), 16 },
45 [VST_TSEL_SBE] = { "SBE", 1, 16 },
46 [VST_TSEL_EQDT] = { "ENDT", sizeof(XiveEND), 16 },
47 [VST_TSEL_VPDT] = { "VPDT", sizeof(XiveNVT), 32 },
48
49
50
51
52
53
54
55
56
57
58
59 [VST_TSEL_IRQ] = { "IRQ", 1, 6 },
60};
61
62#define xive_error(xive, fmt, ...) \
63 qemu_log_mask(LOG_GUEST_ERROR, "XIVE[%x] - " fmt "\n", \
64 (xive)->chip->chip_id, ## __VA_ARGS__);
65
66
67
68
69
70
71
72
73
74
75static inline uint64_t GETFIELD(uint64_t mask, uint64_t word)
76{
77 return (word & mask) >> ctz64(mask);
78}
79
80static inline uint64_t SETFIELD(uint64_t mask, uint64_t word,
81 uint64_t value)
82{
83 return (word & ~mask) | ((value << ctz64(mask)) & mask);
84}
85
86
87
88
89
90
91
92static PnvXive *pnv_xive_get_ic(uint8_t blk)
93{
94 PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine());
95 int i;
96
97 for (i = 0; i < pnv->num_chips; i++) {
98 Pnv9Chip *chip9 = PNV9_CHIP(pnv->chips[i]);
99 PnvXive *xive = &chip9->xive;
100
101 if (xive->chip->chip_id == blk) {
102 return xive;
103 }
104 }
105 return NULL;
106}
107
108
109
110
111
112
113
114
115#define XIVE_VSD_SIZE 8
116
117
118static uint64_t pnv_xive_vst_page_size_allowed(uint32_t page_shift)
119{
120 return page_shift == 12 || page_shift == 16 ||
121 page_shift == 21 || page_shift == 24;
122}
123
124static uint64_t pnv_xive_vst_size(uint64_t vsd)
125{
126 uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
127
128
129
130
131
132 if (VSD_INDIRECT & vsd) {
133 uint32_t nr_pages = vst_tsize / XIVE_VSD_SIZE;
134 uint32_t page_shift;
135
136 vsd = ldq_be_dma(&address_space_memory, vsd & VSD_ADDRESS_MASK);
137 page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
138
139 if (!pnv_xive_vst_page_size_allowed(page_shift)) {
140 return 0;
141 }
142
143 return nr_pages * (1ull << page_shift);
144 }
145
146 return vst_tsize;
147}
148
149static uint64_t pnv_xive_vst_addr_direct(PnvXive *xive, uint32_t type,
150 uint64_t vsd, uint32_t idx)
151{
152 const XiveVstInfo *info = &vst_infos[type];
153 uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
154
155 return vst_addr + idx * info->size;
156}
157
158static uint64_t pnv_xive_vst_addr_indirect(PnvXive *xive, uint32_t type,
159 uint64_t vsd, uint32_t idx)
160{
161 const XiveVstInfo *info = &vst_infos[type];
162 uint64_t vsd_addr;
163 uint32_t vsd_idx;
164 uint32_t page_shift;
165 uint32_t vst_per_page;
166
167
168 vsd_addr = vsd & VSD_ADDRESS_MASK;
169 vsd = ldq_be_dma(&address_space_memory, vsd_addr);
170
171 if (!(vsd & VSD_ADDRESS_MASK)) {
172 xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
173 return 0;
174 }
175
176 page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
177
178 if (!pnv_xive_vst_page_size_allowed(page_shift)) {
179 xive_error(xive, "VST: invalid %s page shift %d", info->name,
180 page_shift);
181 return 0;
182 }
183
184 vst_per_page = (1ull << page_shift) / info->size;
185 vsd_idx = idx / vst_per_page;
186
187
188 if (vsd_idx) {
189 vsd_addr = vsd_addr + vsd_idx * XIVE_VSD_SIZE;
190 vsd = ldq_be_dma(&address_space_memory, vsd_addr);
191
192 if (!(vsd & VSD_ADDRESS_MASK)) {
193 xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
194 return 0;
195 }
196
197
198
199
200
201 if (page_shift != GETFIELD(VSD_TSIZE, vsd) + 12) {
202 xive_error(xive, "VST: %s entry %x indirect page size differ !?",
203 info->name, idx);
204 return 0;
205 }
206 }
207
208 return pnv_xive_vst_addr_direct(xive, type, vsd, (idx % vst_per_page));
209}
210
211static uint64_t pnv_xive_vst_addr(PnvXive *xive, uint32_t type, uint8_t blk,
212 uint32_t idx)
213{
214 const XiveVstInfo *info = &vst_infos[type];
215 uint64_t vsd;
216 uint32_t idx_max;
217
218 if (blk >= info->max_blocks) {
219 xive_error(xive, "VST: invalid block id %d for VST %s %d !?",
220 blk, info->name, idx);
221 return 0;
222 }
223
224 vsd = xive->vsds[type][blk];
225
226
227 if (GETFIELD(VSD_MODE, vsd) == VSD_MODE_FORWARD) {
228 xive = pnv_xive_get_ic(blk);
229
230 return xive ? pnv_xive_vst_addr(xive, type, blk, idx) : 0;
231 }
232
233 idx_max = pnv_xive_vst_size(vsd) / info->size - 1;
234 if (idx > idx_max) {
235#ifdef XIVE_DEBUG
236 xive_error(xive, "VST: %s entry %x/%x out of range [ 0 .. %x ] !?",
237 info->name, blk, idx, idx_max);
238#endif
239 return 0;
240 }
241
242 if (VSD_INDIRECT & vsd) {
243 return pnv_xive_vst_addr_indirect(xive, type, vsd, idx);
244 }
245
246 return pnv_xive_vst_addr_direct(xive, type, vsd, idx);
247}
248
249static int pnv_xive_vst_read(PnvXive *xive, uint32_t type, uint8_t blk,
250 uint32_t idx, void *data)
251{
252 const XiveVstInfo *info = &vst_infos[type];
253 uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx);
254
255 if (!addr) {
256 return -1;
257 }
258
259 cpu_physical_memory_read(addr, data, info->size);
260 return 0;
261}
262
263#define XIVE_VST_WORD_ALL -1
264
265static int pnv_xive_vst_write(PnvXive *xive, uint32_t type, uint8_t blk,
266 uint32_t idx, void *data, uint32_t word_number)
267{
268 const XiveVstInfo *info = &vst_infos[type];
269 uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx);
270
271 if (!addr) {
272 return -1;
273 }
274
275 if (word_number == XIVE_VST_WORD_ALL) {
276 cpu_physical_memory_write(addr, data, info->size);
277 } else {
278 cpu_physical_memory_write(addr + word_number * 4,
279 data + word_number * 4, 4);
280 }
281 return 0;
282}
283
284static int pnv_xive_get_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
285 XiveEND *end)
286{
287 return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end);
288}
289
290static int pnv_xive_write_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
291 XiveEND *end, uint8_t word_number)
292{
293 return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end,
294 word_number);
295}
296
297static int pnv_xive_end_update(PnvXive *xive)
298{
299 uint8_t blk = GETFIELD(VC_EQC_CWATCH_BLOCKID,
300 xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
301 uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET,
302 xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
303 int i;
304 uint64_t eqc_watch[4];
305
306 for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) {
307 eqc_watch[i] = cpu_to_be64(xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i]);
308 }
309
310 return pnv_xive_vst_write(xive, VST_TSEL_EQDT, blk, idx, eqc_watch,
311 XIVE_VST_WORD_ALL);
312}
313
314static void pnv_xive_end_cache_load(PnvXive *xive)
315{
316 uint8_t blk = GETFIELD(VC_EQC_CWATCH_BLOCKID,
317 xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
318 uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET,
319 xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
320 uint64_t eqc_watch[4] = { 0 };
321 int i;
322
323 if (pnv_xive_vst_read(xive, VST_TSEL_EQDT, blk, idx, eqc_watch)) {
324 xive_error(xive, "VST: no END entry %x/%x !?", blk, idx);
325 }
326
327 for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) {
328 xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(eqc_watch[i]);
329 }
330}
331
332static int pnv_xive_get_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
333 XiveNVT *nvt)
334{
335 return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt);
336}
337
338static int pnv_xive_write_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
339 XiveNVT *nvt, uint8_t word_number)
340{
341 return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt,
342 word_number);
343}
344
345static int pnv_xive_nvt_update(PnvXive *xive)
346{
347 uint8_t blk = GETFIELD(PC_VPC_CWATCH_BLOCKID,
348 xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
349 uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET,
350 xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
351 int i;
352 uint64_t vpc_watch[8];
353
354 for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) {
355 vpc_watch[i] = cpu_to_be64(xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i]);
356 }
357
358 return pnv_xive_vst_write(xive, VST_TSEL_VPDT, blk, idx, vpc_watch,
359 XIVE_VST_WORD_ALL);
360}
361
362static void pnv_xive_nvt_cache_load(PnvXive *xive)
363{
364 uint8_t blk = GETFIELD(PC_VPC_CWATCH_BLOCKID,
365 xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
366 uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET,
367 xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
368 uint64_t vpc_watch[8] = { 0 };
369 int i;
370
371 if (pnv_xive_vst_read(xive, VST_TSEL_VPDT, blk, idx, vpc_watch)) {
372 xive_error(xive, "VST: no NVT entry %x/%x !?", blk, idx);
373 }
374
375 for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) {
376 xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(vpc_watch[i]);
377 }
378}
379
380static int pnv_xive_get_eas(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
381 XiveEAS *eas)
382{
383 PnvXive *xive = PNV_XIVE(xrtr);
384
385 if (pnv_xive_get_ic(blk) != xive) {
386 xive_error(xive, "VST: EAS %x is remote !?", XIVE_SRCNO(blk, idx));
387 return -1;
388 }
389
390 return pnv_xive_vst_read(xive, VST_TSEL_IVT, blk, idx, eas);
391}
392
393static XiveTCTX *pnv_xive_get_tctx(XiveRouter *xrtr, CPUState *cs)
394{
395 PowerPCCPU *cpu = POWERPC_CPU(cs);
396 XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
397 PnvXive *xive = NULL;
398 CPUPPCState *env = &cpu->env;
399 int pir = env->spr_cb[SPR_PIR].default_value;
400
401
402
403
404
405
406
407
408 xive = pnv_xive_get_ic((pir >> 8) & 0xf);
409 if (!xive) {
410 return NULL;
411 }
412
413 if (!(xive->regs[PC_THREAD_EN_REG0 >> 3] & PPC_BIT(pir & 0x3f))) {
414 xive_error(PNV_XIVE(xrtr), "IC: CPU %x is not enabled", pir);
415 }
416
417 return tctx;
418}
419
420
421
422
423
424
425
426
427static void pnv_xive_notify(XiveNotifier *xn, uint32_t srcno)
428{
429 PnvXive *xive = PNV_XIVE(xn);
430 uint8_t blk = xive->chip->chip_id;
431
432 xive_router_notify(xn, XIVE_SRCNO(blk, srcno));
433}
434
435
436
437
438
439static uint64_t pnv_xive_vc_size(PnvXive *xive)
440{
441 return (~xive->regs[CQ_VC_BARM >> 3] + 1) & CQ_VC_BARM_MASK;
442}
443
444static uint64_t pnv_xive_edt_shift(PnvXive *xive)
445{
446 return ctz64(pnv_xive_vc_size(xive) / XIVE_TABLE_EDT_MAX);
447}
448
449static uint64_t pnv_xive_pc_size(PnvXive *xive)
450{
451 return (~xive->regs[CQ_PC_BARM >> 3] + 1) & CQ_PC_BARM_MASK;
452}
453
454static uint32_t pnv_xive_nr_ipis(PnvXive *xive)
455{
456 uint8_t blk = xive->chip->chip_id;
457
458 return pnv_xive_vst_size(xive->vsds[VST_TSEL_SBE][blk]) * SBE_PER_BYTE;
459}
460
461static uint32_t pnv_xive_nr_ends(PnvXive *xive)
462{
463 uint8_t blk = xive->chip->chip_id;
464
465 return pnv_xive_vst_size(xive->vsds[VST_TSEL_EQDT][blk])
466 / vst_infos[VST_TSEL_EQDT].size;
467}
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483static uint64_t pnv_xive_edt_size(PnvXive *xive, uint64_t type)
484{
485 uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive);
486 uint64_t size = 0;
487 int i;
488
489 for (i = 0; i < XIVE_TABLE_EDT_MAX; i++) {
490 uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]);
491
492 if (edt_type == type) {
493 size += edt_size;
494 }
495 }
496
497 return size;
498}
499
500
501
502
503
504static uint64_t pnv_xive_edt_offset(PnvXive *xive, uint64_t vc_offset,
505 uint64_t type)
506{
507 int i;
508 uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive);
509 uint64_t edt_offset = vc_offset;
510
511 for (i = 0; i < XIVE_TABLE_EDT_MAX && (i * edt_size) < vc_offset; i++) {
512 uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]);
513
514 if (edt_type != type) {
515 edt_offset -= edt_size;
516 }
517 }
518
519 return edt_offset;
520}
521
522static void pnv_xive_edt_resize(PnvXive *xive)
523{
524 uint64_t ipi_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_IPI);
525 uint64_t end_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_EQ);
526
527 memory_region_set_size(&xive->ipi_edt_mmio, ipi_edt_size);
528 memory_region_add_subregion(&xive->ipi_mmio, 0, &xive->ipi_edt_mmio);
529
530 memory_region_set_size(&xive->end_edt_mmio, end_edt_size);
531 memory_region_add_subregion(&xive->end_mmio, 0, &xive->end_edt_mmio);
532}
533
534
535
536
537static int pnv_xive_table_set_data(PnvXive *xive, uint64_t val)
538{
539 uint64_t tsel = xive->regs[CQ_TAR >> 3] & CQ_TAR_TSEL;
540 uint8_t tsel_index = GETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3]);
541 uint64_t *xive_table;
542 uint8_t max_index;
543
544 switch (tsel) {
545 case CQ_TAR_TSEL_BLK:
546 max_index = ARRAY_SIZE(xive->blk);
547 xive_table = xive->blk;
548 break;
549 case CQ_TAR_TSEL_MIG:
550 max_index = ARRAY_SIZE(xive->mig);
551 xive_table = xive->mig;
552 break;
553 case CQ_TAR_TSEL_EDT:
554 max_index = ARRAY_SIZE(xive->edt);
555 xive_table = xive->edt;
556 break;
557 case CQ_TAR_TSEL_VDT:
558 max_index = ARRAY_SIZE(xive->vdt);
559 xive_table = xive->vdt;
560 break;
561 default:
562 xive_error(xive, "IC: invalid table %d", (int) tsel);
563 return -1;
564 }
565
566 if (tsel_index >= max_index) {
567 xive_error(xive, "IC: invalid index %d", (int) tsel_index);
568 return -1;
569 }
570
571 xive_table[tsel_index] = val;
572
573 if (xive->regs[CQ_TAR >> 3] & CQ_TAR_TBL_AUTOINC) {
574 xive->regs[CQ_TAR >> 3] =
575 SETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3], ++tsel_index);
576 }
577
578
579
580
581
582 if (tsel == CQ_TAR_TSEL_EDT && tsel_index == ARRAY_SIZE(xive->edt)) {
583 pnv_xive_edt_resize(xive);
584 }
585
586 return 0;
587}
588
589
590
591
592static void pnv_xive_vst_set_exclusive(PnvXive *xive, uint8_t type,
593 uint8_t blk, uint64_t vsd)
594{
595 XiveENDSource *end_xsrc = &xive->end_source;
596 XiveSource *xsrc = &xive->ipi_source;
597 const XiveVstInfo *info = &vst_infos[type];
598 uint32_t page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
599 uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
600
601
602
603 if (VSD_INDIRECT & vsd) {
604 if (!(xive->regs[VC_GLOBAL_CONFIG >> 3] & VC_GCONF_INDIRECT)) {
605 xive_error(xive, "VST: %s indirect tables are not enabled",
606 info->name);
607 return;
608 }
609
610 if (!pnv_xive_vst_page_size_allowed(page_shift)) {
611 xive_error(xive, "VST: invalid %s page shift %d", info->name,
612 page_shift);
613 return;
614 }
615 }
616
617 if (!QEMU_IS_ALIGNED(vst_addr, 1ull << page_shift)) {
618 xive_error(xive, "VST: %s table address 0x%"PRIx64" is not aligned with"
619 " page shift %d", info->name, vst_addr, page_shift);
620 return;
621 }
622
623
624 xive->vsds[type][blk] = vsd;
625
626
627
628 switch (type) {
629 case VST_TSEL_IVT:
630 break;
631
632 case VST_TSEL_EQDT:
633
634
635
636
637 memory_region_set_size(&end_xsrc->esb_mmio, pnv_xive_nr_ends(xive) *
638 (1ull << (end_xsrc->esb_shift + 1)));
639 memory_region_add_subregion(&xive->end_edt_mmio, 0,
640 &end_xsrc->esb_mmio);
641 break;
642
643 case VST_TSEL_SBE:
644
645
646
647
648
649
650 memory_region_set_size(&xsrc->esb_mmio, pnv_xive_nr_ipis(xive) *
651 (1ull << xsrc->esb_shift));
652 memory_region_add_subregion(&xive->ipi_edt_mmio, 0, &xsrc->esb_mmio);
653 break;
654
655 case VST_TSEL_VPDT:
656 case VST_TSEL_IRQ:
657
658
659
660
661 break;
662
663 default:
664 g_assert_not_reached();
665 }
666}
667
668
669
670
671
672static void pnv_xive_vst_set_data(PnvXive *xive, uint64_t vsd, bool pc_engine)
673{
674 uint8_t mode = GETFIELD(VSD_MODE, vsd);
675 uint8_t type = GETFIELD(VST_TABLE_SELECT,
676 xive->regs[VC_VSD_TABLE_ADDR >> 3]);
677 uint8_t blk = GETFIELD(VST_TABLE_BLOCK,
678 xive->regs[VC_VSD_TABLE_ADDR >> 3]);
679 uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
680
681 if (type > VST_TSEL_IRQ) {
682 xive_error(xive, "VST: invalid table type %d", type);
683 return;
684 }
685
686 if (blk >= vst_infos[type].max_blocks) {
687 xive_error(xive, "VST: invalid block id %d for"
688 " %s table", blk, vst_infos[type].name);
689 return;
690 }
691
692
693
694
695
696 if (pc_engine) {
697 return;
698 }
699
700 if (!vst_addr) {
701 xive_error(xive, "VST: invalid %s table address", vst_infos[type].name);
702 return;
703 }
704
705 switch (mode) {
706 case VSD_MODE_FORWARD:
707 xive->vsds[type][blk] = vsd;
708 break;
709
710 case VSD_MODE_EXCLUSIVE:
711 pnv_xive_vst_set_exclusive(xive, type, blk, vsd);
712 break;
713
714 default:
715 xive_error(xive, "VST: unsupported table mode %d", mode);
716 return;
717 }
718}
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742static void pnv_xive_ic_reg_write(void *opaque, hwaddr offset,
743 uint64_t val, unsigned size)
744{
745 PnvXive *xive = PNV_XIVE(opaque);
746 MemoryRegion *sysmem = get_system_memory();
747 uint32_t reg = offset >> 3;
748 bool is_chip0 = xive->chip->chip_id == 0;
749
750 switch (offset) {
751
752
753
754
755 case CQ_MSGSND:
756 case CQ_FIRMASK_OR:
757 break;
758 case CQ_PBI_CTL:
759 if (val & CQ_PBI_PC_64K) {
760 xive->pc_shift = 16;
761 }
762 if (val & CQ_PBI_VC_64K) {
763 xive->vc_shift = 16;
764 }
765 break;
766 case CQ_CFG_PB_GEN:
767
768
769
770 break;
771
772
773
774
775 case VC_GLOBAL_CONFIG:
776 break;
777
778
779
780
781 case PC_GLOBAL_CONFIG:
782
783
784
785
786 break;
787 case PC_TCTXT_CFG:
788
789
790
791
792
793
794
795
796
797
798
799
800
801 if (val & PC_TCTXT_CHIPID_OVERRIDE) {
802 xive->tctx_chipid = GETFIELD(PC_TCTXT_CHIPID, val);
803 }
804 break;
805 case PC_TCTXT_TRACK:
806
807
808
809
810
811 break;
812
813
814
815
816 case VC_SBC_CONFIG:
817
818
819
820
821 if (val & (VC_SBC_CONF_CPLX_CIST | VC_SBC_CONF_CIST_BOTH)) {
822 xive->ipi_source.esb_flags |= XIVE_SRC_STORE_EOI;
823 }
824 break;
825
826 case VC_EQC_CONFIG:
827 case VC_AIB_TX_ORDER_TAG2:
828 break;
829
830
831
832
833 case CQ_RST_CTL:
834
835 break;
836
837 case CQ_IC_BAR:
838 xive->ic_shift = val & CQ_IC_BAR_64K ? 16 : 12;
839 if (!(val & CQ_IC_BAR_VALID)) {
840 xive->ic_base = 0;
841 if (xive->regs[reg] & CQ_IC_BAR_VALID) {
842 memory_region_del_subregion(&xive->ic_mmio,
843 &xive->ic_reg_mmio);
844 memory_region_del_subregion(&xive->ic_mmio,
845 &xive->ic_notify_mmio);
846 memory_region_del_subregion(&xive->ic_mmio,
847 &xive->ic_lsi_mmio);
848 memory_region_del_subregion(&xive->ic_mmio,
849 &xive->tm_indirect_mmio);
850
851 memory_region_del_subregion(sysmem, &xive->ic_mmio);
852 }
853 } else {
854 xive->ic_base = val & ~(CQ_IC_BAR_VALID | CQ_IC_BAR_64K);
855 if (!(xive->regs[reg] & CQ_IC_BAR_VALID)) {
856 memory_region_add_subregion(sysmem, xive->ic_base,
857 &xive->ic_mmio);
858
859 memory_region_add_subregion(&xive->ic_mmio, 0,
860 &xive->ic_reg_mmio);
861 memory_region_add_subregion(&xive->ic_mmio,
862 1ul << xive->ic_shift,
863 &xive->ic_notify_mmio);
864 memory_region_add_subregion(&xive->ic_mmio,
865 2ul << xive->ic_shift,
866 &xive->ic_lsi_mmio);
867 memory_region_add_subregion(&xive->ic_mmio,
868 4ull << xive->ic_shift,
869 &xive->tm_indirect_mmio);
870 }
871 }
872 break;
873
874 case CQ_TM1_BAR:
875 case CQ_TM2_BAR:
876 xive->tm_shift = val & CQ_TM_BAR_64K ? 16 : 12;
877 if (!(val & CQ_TM_BAR_VALID)) {
878 xive->tm_base = 0;
879 if (xive->regs[reg] & CQ_TM_BAR_VALID && is_chip0) {
880 memory_region_del_subregion(sysmem, &xive->tm_mmio);
881 }
882 } else {
883 xive->tm_base = val & ~(CQ_TM_BAR_VALID | CQ_TM_BAR_64K);
884 if (!(xive->regs[reg] & CQ_TM_BAR_VALID) && is_chip0) {
885 memory_region_add_subregion(sysmem, xive->tm_base,
886 &xive->tm_mmio);
887 }
888 }
889 break;
890
891 case CQ_PC_BARM:
892 xive->regs[reg] = val;
893 memory_region_set_size(&xive->pc_mmio, pnv_xive_pc_size(xive));
894 break;
895 case CQ_PC_BAR:
896 if (!(val & CQ_PC_BAR_VALID)) {
897 xive->pc_base = 0;
898 if (xive->regs[reg] & CQ_PC_BAR_VALID) {
899 memory_region_del_subregion(sysmem, &xive->pc_mmio);
900 }
901 } else {
902 xive->pc_base = val & ~(CQ_PC_BAR_VALID);
903 if (!(xive->regs[reg] & CQ_PC_BAR_VALID)) {
904 memory_region_add_subregion(sysmem, xive->pc_base,
905 &xive->pc_mmio);
906 }
907 }
908 break;
909
910 case CQ_VC_BARM:
911 xive->regs[reg] = val;
912 memory_region_set_size(&xive->vc_mmio, pnv_xive_vc_size(xive));
913 break;
914 case CQ_VC_BAR:
915 if (!(val & CQ_VC_BAR_VALID)) {
916 xive->vc_base = 0;
917 if (xive->regs[reg] & CQ_VC_BAR_VALID) {
918 memory_region_del_subregion(sysmem, &xive->vc_mmio);
919 }
920 } else {
921 xive->vc_base = val & ~(CQ_VC_BAR_VALID);
922 if (!(xive->regs[reg] & CQ_VC_BAR_VALID)) {
923 memory_region_add_subregion(sysmem, xive->vc_base,
924 &xive->vc_mmio);
925 }
926 }
927 break;
928
929
930
931
932 case CQ_TAR:
933 break;
934 case CQ_TDR:
935 pnv_xive_table_set_data(xive, val);
936 break;
937
938
939
940
941 case VC_VSD_TABLE_ADDR:
942 case PC_VSD_TABLE_ADDR:
943 break;
944 case VC_VSD_TABLE_DATA:
945 case PC_VSD_TABLE_DATA:
946 pnv_xive_vst_set_data(xive, val, offset == PC_VSD_TABLE_DATA);
947 break;
948
949
950
951
952 case VC_IRQ_CONFIG_IPI:
953 case VC_IRQ_CONFIG_HW:
954 case VC_IRQ_CONFIG_CASCADE1:
955 case VC_IRQ_CONFIG_CASCADE2:
956 case VC_IRQ_CONFIG_REDIST:
957 case VC_IRQ_CONFIG_IPI_CASC:
958 break;
959
960
961
962
963 case PC_THREAD_EN_REG0:
964 case PC_THREAD_EN_REG1:
965 break;
966
967 case PC_THREAD_EN_REG0_SET:
968 xive->regs[PC_THREAD_EN_REG0 >> 3] |= val;
969 break;
970 case PC_THREAD_EN_REG1_SET:
971 xive->regs[PC_THREAD_EN_REG1 >> 3] |= val;
972 break;
973 case PC_THREAD_EN_REG0_CLR:
974 xive->regs[PC_THREAD_EN_REG0 >> 3] &= ~val;
975 break;
976 case PC_THREAD_EN_REG1_CLR:
977 xive->regs[PC_THREAD_EN_REG1 >> 3] &= ~val;
978 break;
979
980
981
982
983
984 case PC_TCTXT_INDIR0 ... PC_TCTXT_INDIR3:
985 break;
986
987
988
989
990 case VC_IVC_SCRUB_MASK:
991 case VC_IVC_SCRUB_TRIG:
992 break;
993
994 case VC_EQC_CWATCH_SPEC:
995 val &= ~VC_EQC_CWATCH_CONFLICT;
996 break;
997 case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3:
998 break;
999 case VC_EQC_CWATCH_DAT0:
1000
1001 xive->regs[reg] = val;
1002 pnv_xive_end_update(xive);
1003 break;
1004 case VC_EQC_SCRUB_MASK:
1005 case VC_EQC_SCRUB_TRIG:
1006
1007
1008
1009
1010 break;
1011
1012 case PC_VPC_CWATCH_SPEC:
1013 val &= ~PC_VPC_CWATCH_CONFLICT;
1014 break;
1015 case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7:
1016 break;
1017 case PC_VPC_CWATCH_DAT0:
1018
1019 xive->regs[reg] = val;
1020 pnv_xive_nvt_update(xive);
1021 break;
1022 case PC_VPC_SCRUB_MASK:
1023 case PC_VPC_SCRUB_TRIG:
1024
1025
1026
1027
1028 break;
1029
1030
1031
1032
1033
1034 case PC_AT_KILL:
1035 break;
1036 case VC_AT_MACRO_KILL:
1037 break;
1038 case PC_AT_KILL_MASK:
1039 case VC_AT_MACRO_KILL_MASK:
1040 break;
1041
1042 default:
1043 xive_error(xive, "IC: invalid write to reg=0x%"HWADDR_PRIx, offset);
1044 return;
1045 }
1046
1047 xive->regs[reg] = val;
1048}
1049
1050static uint64_t pnv_xive_ic_reg_read(void *opaque, hwaddr offset, unsigned size)
1051{
1052 PnvXive *xive = PNV_XIVE(opaque);
1053 uint64_t val = 0;
1054 uint32_t reg = offset >> 3;
1055
1056 switch (offset) {
1057 case CQ_CFG_PB_GEN:
1058 case CQ_IC_BAR:
1059 case CQ_TM1_BAR:
1060 case CQ_TM2_BAR:
1061 case CQ_PC_BAR:
1062 case CQ_PC_BARM:
1063 case CQ_VC_BAR:
1064 case CQ_VC_BARM:
1065 case CQ_TAR:
1066 case CQ_TDR:
1067 case CQ_PBI_CTL:
1068
1069 case PC_TCTXT_CFG:
1070 case PC_TCTXT_TRACK:
1071 case PC_TCTXT_INDIR0:
1072 case PC_TCTXT_INDIR1:
1073 case PC_TCTXT_INDIR2:
1074 case PC_TCTXT_INDIR3:
1075 case PC_GLOBAL_CONFIG:
1076
1077 case PC_VPC_SCRUB_MASK:
1078
1079 case VC_GLOBAL_CONFIG:
1080 case VC_AIB_TX_ORDER_TAG2:
1081
1082 case VC_IRQ_CONFIG_IPI:
1083 case VC_IRQ_CONFIG_HW:
1084 case VC_IRQ_CONFIG_CASCADE1:
1085 case VC_IRQ_CONFIG_CASCADE2:
1086 case VC_IRQ_CONFIG_REDIST:
1087 case VC_IRQ_CONFIG_IPI_CASC:
1088
1089 case VC_EQC_SCRUB_MASK:
1090 case VC_IVC_SCRUB_MASK:
1091 case VC_SBC_CONFIG:
1092 case VC_AT_MACRO_KILL_MASK:
1093 case VC_VSD_TABLE_ADDR:
1094 case PC_VSD_TABLE_ADDR:
1095 case VC_VSD_TABLE_DATA:
1096 case PC_VSD_TABLE_DATA:
1097 case PC_THREAD_EN_REG0:
1098 case PC_THREAD_EN_REG1:
1099 val = xive->regs[reg];
1100 break;
1101
1102
1103
1104
1105 case PC_THREAD_EN_REG0_SET:
1106 case PC_THREAD_EN_REG0_CLR:
1107 val = xive->regs[PC_THREAD_EN_REG0 >> 3];
1108 break;
1109 case PC_THREAD_EN_REG1_SET:
1110 case PC_THREAD_EN_REG1_CLR:
1111 val = xive->regs[PC_THREAD_EN_REG1 >> 3];
1112 break;
1113
1114 case CQ_MSGSND:
1115 val = 0xffffff0000000000;
1116 break;
1117
1118
1119
1120
1121 case VC_EQC_CWATCH_SPEC:
1122 xive->regs[reg] = ~(VC_EQC_CWATCH_FULL | VC_EQC_CWATCH_CONFLICT);
1123 val = xive->regs[reg];
1124 break;
1125 case VC_EQC_CWATCH_DAT0:
1126
1127
1128
1129
1130 pnv_xive_end_cache_load(xive);
1131 val = xive->regs[reg];
1132 break;
1133 case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3:
1134 val = xive->regs[reg];
1135 break;
1136
1137 case PC_VPC_CWATCH_SPEC:
1138 xive->regs[reg] = ~(PC_VPC_CWATCH_FULL | PC_VPC_CWATCH_CONFLICT);
1139 val = xive->regs[reg];
1140 break;
1141 case PC_VPC_CWATCH_DAT0:
1142
1143
1144
1145
1146 pnv_xive_nvt_cache_load(xive);
1147 val = xive->regs[reg];
1148 break;
1149 case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7:
1150 val = xive->regs[reg];
1151 break;
1152
1153 case PC_VPC_SCRUB_TRIG:
1154 case VC_IVC_SCRUB_TRIG:
1155 case VC_EQC_SCRUB_TRIG:
1156 xive->regs[reg] &= ~VC_SCRUB_VALID;
1157 val = xive->regs[reg];
1158 break;
1159
1160
1161
1162
1163 case PC_AT_KILL:
1164 xive->regs[reg] &= ~PC_AT_KILL_VALID;
1165 val = xive->regs[reg];
1166 break;
1167 case VC_AT_MACRO_KILL:
1168 xive->regs[reg] &= ~VC_KILL_VALID;
1169 val = xive->regs[reg];
1170 break;
1171
1172
1173
1174
1175 case VC_EQC_CONFIG:
1176 val = VC_EQC_SYNC_MASK;
1177 break;
1178
1179 default:
1180 xive_error(xive, "IC: invalid read reg=0x%"HWADDR_PRIx, offset);
1181 }
1182
1183 return val;
1184}
1185
1186static const MemoryRegionOps pnv_xive_ic_reg_ops = {
1187 .read = pnv_xive_ic_reg_read,
1188 .write = pnv_xive_ic_reg_write,
1189 .endianness = DEVICE_BIG_ENDIAN,
1190 .valid = {
1191 .min_access_size = 8,
1192 .max_access_size = 8,
1193 },
1194 .impl = {
1195 .min_access_size = 8,
1196 .max_access_size = 8,
1197 },
1198};
1199
1200
1201
1202
1203#define PNV_XIVE_FORWARD_IPI 0x800
1204#define PNV_XIVE_FORWARD_HW 0x880
1205#define PNV_XIVE_FORWARD_OS_ESC 0x900
1206#define PNV_XIVE_FORWARD_HW_ESC 0x980
1207#define PNV_XIVE_FORWARD_REDIS 0xa00
1208#define PNV_XIVE_RESERVED5 0xa80
1209#define PNV_XIVE_RESERVED6 0xb00
1210#define PNV_XIVE_RESERVED7 0xb80
1211
1212
1213#define PNV_XIVE_SYNC_IPI 0xc00
1214#define PNV_XIVE_SYNC_HW 0xc80
1215#define PNV_XIVE_SYNC_OS_ESC 0xd00
1216#define PNV_XIVE_SYNC_HW_ESC 0xd80
1217#define PNV_XIVE_SYNC_REDIS 0xe00
1218
1219
1220#define PNV_XIVE_SYNC_PULL 0xe80
1221#define PNV_XIVE_SYNC_PUSH 0xf00
1222#define PNV_XIVE_SYNC_VPC 0xf80
1223
1224static void pnv_xive_ic_hw_trigger(PnvXive *xive, hwaddr addr, uint64_t val)
1225{
1226
1227
1228
1229
1230
1231 xive_router_notify(XIVE_NOTIFIER(xive), val);
1232}
1233
1234static void pnv_xive_ic_notify_write(void *opaque, hwaddr addr, uint64_t val,
1235 unsigned size)
1236{
1237 PnvXive *xive = PNV_XIVE(opaque);
1238
1239
1240 switch (addr) {
1241 case 0x000 ... 0x7FF:
1242 pnv_xive_ic_hw_trigger(opaque, addr, val);
1243 break;
1244
1245
1246 case PNV_XIVE_FORWARD_IPI:
1247 case PNV_XIVE_FORWARD_HW:
1248 case PNV_XIVE_FORWARD_OS_ESC:
1249 case PNV_XIVE_FORWARD_HW_ESC:
1250 case PNV_XIVE_FORWARD_REDIS:
1251
1252 xive_error(xive, "IC: forwarded at @0x%"HWADDR_PRIx" IRQ 0x%"PRIx64,
1253 addr, val);
1254 break;
1255
1256
1257 case PNV_XIVE_SYNC_IPI:
1258 case PNV_XIVE_SYNC_HW:
1259 case PNV_XIVE_SYNC_OS_ESC:
1260 case PNV_XIVE_SYNC_HW_ESC:
1261 case PNV_XIVE_SYNC_REDIS:
1262 break;
1263
1264
1265 case PNV_XIVE_SYNC_PULL:
1266 case PNV_XIVE_SYNC_PUSH:
1267 case PNV_XIVE_SYNC_VPC:
1268 break;
1269
1270 default:
1271 xive_error(xive, "IC: invalid notify write @%"HWADDR_PRIx, addr);
1272 }
1273}
1274
1275static uint64_t pnv_xive_ic_notify_read(void *opaque, hwaddr addr,
1276 unsigned size)
1277{
1278 PnvXive *xive = PNV_XIVE(opaque);
1279
1280
1281 xive_error(xive, "IC: invalid notify read @%"HWADDR_PRIx, addr);
1282 return -1;
1283}
1284
1285static const MemoryRegionOps pnv_xive_ic_notify_ops = {
1286 .read = pnv_xive_ic_notify_read,
1287 .write = pnv_xive_ic_notify_write,
1288 .endianness = DEVICE_BIG_ENDIAN,
1289 .valid = {
1290 .min_access_size = 8,
1291 .max_access_size = 8,
1292 },
1293 .impl = {
1294 .min_access_size = 8,
1295 .max_access_size = 8,
1296 },
1297};
1298
1299
1300
1301
1302
1303static void pnv_xive_ic_lsi_write(void *opaque, hwaddr addr,
1304 uint64_t val, unsigned size)
1305{
1306 PnvXive *xive = PNV_XIVE(opaque);
1307
1308 xive_error(xive, "IC: LSI invalid write @%"HWADDR_PRIx, addr);
1309}
1310
1311static uint64_t pnv_xive_ic_lsi_read(void *opaque, hwaddr addr, unsigned size)
1312{
1313 PnvXive *xive = PNV_XIVE(opaque);
1314
1315 xive_error(xive, "IC: LSI invalid read @%"HWADDR_PRIx, addr);
1316 return -1;
1317}
1318
1319static const MemoryRegionOps pnv_xive_ic_lsi_ops = {
1320 .read = pnv_xive_ic_lsi_read,
1321 .write = pnv_xive_ic_lsi_write,
1322 .endianness = DEVICE_BIG_ENDIAN,
1323 .valid = {
1324 .min_access_size = 8,
1325 .max_access_size = 8,
1326 },
1327 .impl = {
1328 .min_access_size = 8,
1329 .max_access_size = 8,
1330 },
1331};
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342static XiveTCTX *pnv_xive_get_indirect_tctx(PnvXive *xive)
1343{
1344 uint64_t tctxt_indir = xive->regs[PC_TCTXT_INDIR0 >> 3];
1345 PowerPCCPU *cpu = NULL;
1346 int pir;
1347
1348 if (!(tctxt_indir & PC_TCTXT_INDIR_VALID)) {
1349 xive_error(xive, "IC: no indirect TIMA access in progress");
1350 return NULL;
1351 }
1352
1353 pir = GETFIELD(PC_TCTXT_INDIR_THRDID, tctxt_indir) & 0xff;
1354 cpu = ppc_get_vcpu_by_pir(pir);
1355 if (!cpu) {
1356 xive_error(xive, "IC: invalid PIR %x for indirect access", pir);
1357 return NULL;
1358 }
1359
1360
1361 if (!(xive->regs[PC_THREAD_EN_REG0 >> 3] & PPC_BIT(pir & 0x3f))) {
1362 xive_error(xive, "IC: CPU %x is not enabled", pir);
1363 }
1364
1365 return XIVE_TCTX(pnv_cpu_state(cpu)->intc);
1366}
1367
1368static void xive_tm_indirect_write(void *opaque, hwaddr offset,
1369 uint64_t value, unsigned size)
1370{
1371 XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque));
1372
1373 xive_tctx_tm_write(tctx, offset, value, size);
1374}
1375
1376static uint64_t xive_tm_indirect_read(void *opaque, hwaddr offset,
1377 unsigned size)
1378{
1379 XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque));
1380
1381 return xive_tctx_tm_read(tctx, offset, size);
1382}
1383
1384static const MemoryRegionOps xive_tm_indirect_ops = {
1385 .read = xive_tm_indirect_read,
1386 .write = xive_tm_indirect_write,
1387 .endianness = DEVICE_BIG_ENDIAN,
1388 .valid = {
1389 .min_access_size = 1,
1390 .max_access_size = 8,
1391 },
1392 .impl = {
1393 .min_access_size = 1,
1394 .max_access_size = 8,
1395 },
1396};
1397
1398
1399
1400
1401static uint64_t pnv_xive_xscom_read(void *opaque, hwaddr addr, unsigned size)
1402{
1403 switch (addr >> 3) {
1404 case X_VC_EQC_CONFIG:
1405
1406 return VC_EQC_SYNC_MASK;
1407 default:
1408 return pnv_xive_ic_reg_read(opaque, addr, size);
1409 }
1410}
1411
1412static void pnv_xive_xscom_write(void *opaque, hwaddr addr,
1413 uint64_t val, unsigned size)
1414{
1415 pnv_xive_ic_reg_write(opaque, addr, val, size);
1416}
1417
1418static const MemoryRegionOps pnv_xive_xscom_ops = {
1419 .read = pnv_xive_xscom_read,
1420 .write = pnv_xive_xscom_write,
1421 .endianness = DEVICE_BIG_ENDIAN,
1422 .valid = {
1423 .min_access_size = 8,
1424 .max_access_size = 8,
1425 },
1426 .impl = {
1427 .min_access_size = 8,
1428 .max_access_size = 8,
1429 }
1430};
1431
1432
1433
1434
1435static uint64_t pnv_xive_vc_read(void *opaque, hwaddr offset,
1436 unsigned size)
1437{
1438 PnvXive *xive = PNV_XIVE(opaque);
1439 uint64_t edt_index = offset >> pnv_xive_edt_shift(xive);
1440 uint64_t edt_type = 0;
1441 uint64_t edt_offset;
1442 MemTxResult result;
1443 AddressSpace *edt_as = NULL;
1444 uint64_t ret = -1;
1445
1446 if (edt_index < XIVE_TABLE_EDT_MAX) {
1447 edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]);
1448 }
1449
1450 switch (edt_type) {
1451 case CQ_TDR_EDT_IPI:
1452 edt_as = &xive->ipi_as;
1453 break;
1454 case CQ_TDR_EDT_EQ:
1455 edt_as = &xive->end_as;
1456 break;
1457 default:
1458 xive_error(xive, "VC: invalid EDT type for read @%"HWADDR_PRIx, offset);
1459 return -1;
1460 }
1461
1462
1463 edt_offset = pnv_xive_edt_offset(xive, offset, edt_type);
1464
1465 ret = address_space_ldq(edt_as, edt_offset, MEMTXATTRS_UNSPECIFIED,
1466 &result);
1467
1468 if (result != MEMTX_OK) {
1469 xive_error(xive, "VC: %s read failed at @0x%"HWADDR_PRIx " -> @0x%"
1470 HWADDR_PRIx, edt_type == CQ_TDR_EDT_IPI ? "IPI" : "END",
1471 offset, edt_offset);
1472 return -1;
1473 }
1474
1475 return ret;
1476}
1477
1478static void pnv_xive_vc_write(void *opaque, hwaddr offset,
1479 uint64_t val, unsigned size)
1480{
1481 PnvXive *xive = PNV_XIVE(opaque);
1482 uint64_t edt_index = offset >> pnv_xive_edt_shift(xive);
1483 uint64_t edt_type = 0;
1484 uint64_t edt_offset;
1485 MemTxResult result;
1486 AddressSpace *edt_as = NULL;
1487
1488 if (edt_index < XIVE_TABLE_EDT_MAX) {
1489 edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]);
1490 }
1491
1492 switch (edt_type) {
1493 case CQ_TDR_EDT_IPI:
1494 edt_as = &xive->ipi_as;
1495 break;
1496 case CQ_TDR_EDT_EQ:
1497 edt_as = &xive->end_as;
1498 break;
1499 default:
1500 xive_error(xive, "VC: invalid EDT type for write @%"HWADDR_PRIx,
1501 offset);
1502 return;
1503 }
1504
1505
1506 edt_offset = pnv_xive_edt_offset(xive, offset, edt_type);
1507
1508 address_space_stq(edt_as, edt_offset, val, MEMTXATTRS_UNSPECIFIED, &result);
1509 if (result != MEMTX_OK) {
1510 xive_error(xive, "VC: write failed at @0x%"HWADDR_PRIx, edt_offset);
1511 }
1512}
1513
1514static const MemoryRegionOps pnv_xive_vc_ops = {
1515 .read = pnv_xive_vc_read,
1516 .write = pnv_xive_vc_write,
1517 .endianness = DEVICE_BIG_ENDIAN,
1518 .valid = {
1519 .min_access_size = 8,
1520 .max_access_size = 8,
1521 },
1522 .impl = {
1523 .min_access_size = 8,
1524 .max_access_size = 8,
1525 },
1526};
1527
1528
1529
1530
1531
1532static uint64_t pnv_xive_pc_read(void *opaque, hwaddr addr,
1533 unsigned size)
1534{
1535 PnvXive *xive = PNV_XIVE(opaque);
1536
1537 xive_error(xive, "PC: invalid read @%"HWADDR_PRIx, addr);
1538 return -1;
1539}
1540
1541static void pnv_xive_pc_write(void *opaque, hwaddr addr,
1542 uint64_t value, unsigned size)
1543{
1544 PnvXive *xive = PNV_XIVE(opaque);
1545
1546 xive_error(xive, "PC: invalid write to VC @%"HWADDR_PRIx, addr);
1547}
1548
1549static const MemoryRegionOps pnv_xive_pc_ops = {
1550 .read = pnv_xive_pc_read,
1551 .write = pnv_xive_pc_write,
1552 .endianness = DEVICE_BIG_ENDIAN,
1553 .valid = {
1554 .min_access_size = 8,
1555 .max_access_size = 8,
1556 },
1557 .impl = {
1558 .min_access_size = 8,
1559 .max_access_size = 8,
1560 },
1561};
1562
1563void pnv_xive_pic_print_info(PnvXive *xive, Monitor *mon)
1564{
1565 XiveRouter *xrtr = XIVE_ROUTER(xive);
1566 uint8_t blk = xive->chip->chip_id;
1567 uint32_t srcno0 = XIVE_SRCNO(blk, 0);
1568 uint32_t nr_ipis = pnv_xive_nr_ipis(xive);
1569 uint32_t nr_ends = pnv_xive_nr_ends(xive);
1570 XiveEAS eas;
1571 XiveEND end;
1572 int i;
1573
1574 monitor_printf(mon, "XIVE[%x] Source %08x .. %08x\n", blk, srcno0,
1575 srcno0 + nr_ipis - 1);
1576 xive_source_pic_print_info(&xive->ipi_source, srcno0, mon);
1577
1578 monitor_printf(mon, "XIVE[%x] EAT %08x .. %08x\n", blk, srcno0,
1579 srcno0 + nr_ipis - 1);
1580 for (i = 0; i < nr_ipis; i++) {
1581 if (xive_router_get_eas(xrtr, blk, i, &eas)) {
1582 break;
1583 }
1584 if (!xive_eas_is_masked(&eas)) {
1585 xive_eas_pic_print_info(&eas, i, mon);
1586 }
1587 }
1588
1589 monitor_printf(mon, "XIVE[%x] ENDT %08x .. %08x\n", blk, 0, nr_ends - 1);
1590 for (i = 0; i < nr_ends; i++) {
1591 if (xive_router_get_end(xrtr, blk, i, &end)) {
1592 break;
1593 }
1594 xive_end_pic_print_info(&end, i, mon);
1595 }
1596}
1597
1598static void pnv_xive_reset(void *dev)
1599{
1600 PnvXive *xive = PNV_XIVE(dev);
1601 XiveSource *xsrc = &xive->ipi_source;
1602 XiveENDSource *end_xsrc = &xive->end_source;
1603
1604
1605
1606
1607
1608 xive->tctx_chipid = xive->chip->chip_id;
1609
1610
1611 xive->ic_shift = xive->vc_shift = xive->pc_shift = 12;
1612
1613
1614 if (memory_region_is_mapped(&xsrc->esb_mmio)) {
1615 memory_region_del_subregion(&xive->ipi_edt_mmio, &xsrc->esb_mmio);
1616 }
1617
1618 if (memory_region_is_mapped(&xive->ipi_edt_mmio)) {
1619 memory_region_del_subregion(&xive->ipi_mmio, &xive->ipi_edt_mmio);
1620 }
1621
1622 if (memory_region_is_mapped(&end_xsrc->esb_mmio)) {
1623 memory_region_del_subregion(&xive->end_edt_mmio, &end_xsrc->esb_mmio);
1624 }
1625
1626 if (memory_region_is_mapped(&xive->end_edt_mmio)) {
1627 memory_region_del_subregion(&xive->end_mmio, &xive->end_edt_mmio);
1628 }
1629}
1630
1631static void pnv_xive_init(Object *obj)
1632{
1633 PnvXive *xive = PNV_XIVE(obj);
1634
1635 object_initialize_child(obj, "ipi_source", &xive->ipi_source,
1636 sizeof(xive->ipi_source), TYPE_XIVE_SOURCE,
1637 &error_abort, NULL);
1638 object_initialize_child(obj, "end_source", &xive->end_source,
1639 sizeof(xive->end_source), TYPE_XIVE_END_SOURCE,
1640 &error_abort, NULL);
1641}
1642
1643
1644
1645
1646#define PNV_XIVE_NR_IRQS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1647#define PNV_XIVE_NR_ENDS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1648
1649static void pnv_xive_realize(DeviceState *dev, Error **errp)
1650{
1651 PnvXive *xive = PNV_XIVE(dev);
1652 XiveSource *xsrc = &xive->ipi_source;
1653 XiveENDSource *end_xsrc = &xive->end_source;
1654 Error *local_err = NULL;
1655 Object *obj;
1656
1657 obj = object_property_get_link(OBJECT(dev), "chip", &local_err);
1658 if (!obj) {
1659 error_propagate(errp, local_err);
1660 error_prepend(errp, "required link 'chip' not found: ");
1661 return;
1662 }
1663
1664
1665 xive->chip = PNV_CHIP(obj);
1666
1667
1668
1669
1670
1671
1672
1673 object_property_set_int(OBJECT(xsrc), PNV_XIVE_NR_IRQS, "nr-irqs",
1674 &error_fatal);
1675 object_property_add_const_link(OBJECT(xsrc), "xive", OBJECT(xive),
1676 &error_fatal);
1677 object_property_set_bool(OBJECT(xsrc), true, "realized", &local_err);
1678 if (local_err) {
1679 error_propagate(errp, local_err);
1680 return;
1681 }
1682
1683 object_property_set_int(OBJECT(end_xsrc), PNV_XIVE_NR_ENDS, "nr-ends",
1684 &error_fatal);
1685 object_property_add_const_link(OBJECT(end_xsrc), "xive", OBJECT(xive),
1686 &error_fatal);
1687 object_property_set_bool(OBJECT(end_xsrc), true, "realized", &local_err);
1688 if (local_err) {
1689 error_propagate(errp, local_err);
1690 return;
1691 }
1692
1693
1694 xive->ic_shift = xive->vc_shift = xive->pc_shift = 12;
1695
1696
1697 memory_region_init_io(&xive->xscom_regs, OBJECT(dev), &pnv_xive_xscom_ops,
1698 xive, "xscom-xive", PNV9_XSCOM_XIVE_SIZE << 3);
1699
1700
1701 memory_region_init(&xive->ic_mmio, OBJECT(dev), "xive-ic",
1702 PNV9_XIVE_IC_SIZE);
1703
1704 memory_region_init_io(&xive->ic_reg_mmio, OBJECT(dev), &pnv_xive_ic_reg_ops,
1705 xive, "xive-ic-reg", 1 << xive->ic_shift);
1706 memory_region_init_io(&xive->ic_notify_mmio, OBJECT(dev),
1707 &pnv_xive_ic_notify_ops,
1708 xive, "xive-ic-notify", 1 << xive->ic_shift);
1709
1710
1711 memory_region_init_io(&xive->ic_lsi_mmio, OBJECT(dev), &pnv_xive_ic_lsi_ops,
1712 xive, "xive-ic-lsi", 2 << xive->ic_shift);
1713
1714
1715 memory_region_init_io(&xive->tm_indirect_mmio, OBJECT(dev),
1716 &xive_tm_indirect_ops,
1717 xive, "xive-tima-indirect", PNV9_XIVE_TM_SIZE);
1718
1719
1720
1721
1722
1723
1724 memory_region_init_io(&xive->vc_mmio, OBJECT(xive), &pnv_xive_vc_ops, xive,
1725 "xive-vc", PNV9_XIVE_VC_SIZE);
1726
1727 memory_region_init(&xive->ipi_mmio, OBJECT(xive), "xive-vc-ipi",
1728 PNV9_XIVE_VC_SIZE);
1729 address_space_init(&xive->ipi_as, &xive->ipi_mmio, "xive-vc-ipi");
1730 memory_region_init(&xive->end_mmio, OBJECT(xive), "xive-vc-end",
1731 PNV9_XIVE_VC_SIZE);
1732 address_space_init(&xive->end_as, &xive->end_mmio, "xive-vc-end");
1733
1734
1735
1736
1737
1738 memory_region_init(&xive->ipi_edt_mmio, OBJECT(xive), "xive-vc-ipi-edt", 0);
1739 memory_region_init(&xive->end_edt_mmio, OBJECT(xive), "xive-vc-end-edt", 0);
1740
1741
1742 memory_region_init_io(&xive->pc_mmio, OBJECT(xive), &pnv_xive_pc_ops, xive,
1743 "xive-pc", PNV9_XIVE_PC_SIZE);
1744
1745
1746 memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &xive_tm_ops,
1747 xive, "xive-tima", PNV9_XIVE_TM_SIZE);
1748
1749 qemu_register_reset(pnv_xive_reset, dev);
1750}
1751
1752static int pnv_xive_dt_xscom(PnvXScomInterface *dev, void *fdt,
1753 int xscom_offset)
1754{
1755 const char compat[] = "ibm,power9-xive-x";
1756 char *name;
1757 int offset;
1758 uint32_t lpc_pcba = PNV9_XSCOM_XIVE_BASE;
1759 uint32_t reg[] = {
1760 cpu_to_be32(lpc_pcba),
1761 cpu_to_be32(PNV9_XSCOM_XIVE_SIZE)
1762 };
1763
1764 name = g_strdup_printf("xive@%x", lpc_pcba);
1765 offset = fdt_add_subnode(fdt, xscom_offset, name);
1766 _FDT(offset);
1767 g_free(name);
1768
1769 _FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg))));
1770 _FDT((fdt_setprop(fdt, offset, "compatible", compat,
1771 sizeof(compat))));
1772 return 0;
1773}
1774
1775static Property pnv_xive_properties[] = {
1776 DEFINE_PROP_UINT64("ic-bar", PnvXive, ic_base, 0),
1777 DEFINE_PROP_UINT64("vc-bar", PnvXive, vc_base, 0),
1778 DEFINE_PROP_UINT64("pc-bar", PnvXive, pc_base, 0),
1779 DEFINE_PROP_UINT64("tm-bar", PnvXive, tm_base, 0),
1780 DEFINE_PROP_END_OF_LIST(),
1781};
1782
1783static void pnv_xive_class_init(ObjectClass *klass, void *data)
1784{
1785 DeviceClass *dc = DEVICE_CLASS(klass);
1786 PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass);
1787 XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass);
1788 XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
1789
1790 xdc->dt_xscom = pnv_xive_dt_xscom;
1791
1792 dc->desc = "PowerNV XIVE Interrupt Controller";
1793 dc->realize = pnv_xive_realize;
1794 dc->props = pnv_xive_properties;
1795
1796 xrc->get_eas = pnv_xive_get_eas;
1797 xrc->get_end = pnv_xive_get_end;
1798 xrc->write_end = pnv_xive_write_end;
1799 xrc->get_nvt = pnv_xive_get_nvt;
1800 xrc->write_nvt = pnv_xive_write_nvt;
1801 xrc->get_tctx = pnv_xive_get_tctx;
1802
1803 xnc->notify = pnv_xive_notify;
1804};
1805
1806static const TypeInfo pnv_xive_info = {
1807 .name = TYPE_PNV_XIVE,
1808 .parent = TYPE_XIVE_ROUTER,
1809 .instance_init = pnv_xive_init,
1810 .instance_size = sizeof(PnvXive),
1811 .class_init = pnv_xive_class_init,
1812 .interfaces = (InterfaceInfo[]) {
1813 { TYPE_PNV_XSCOM_INTERFACE },
1814 { }
1815 }
1816};
1817
1818static void pnv_xive_register_types(void)
1819{
1820 type_register_static(&pnv_xive_info);
1821}
1822
1823type_init(pnv_xive_register_types)
1824