1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#include "qemu/osdep.h"
31#include "hw/sysbus.h"
32#include "hw/pci/pci.h"
33#include "hw/pci/pci_host.h"
34#include "hw/pci/pci_bridge.h"
35#include "hw/pci/pci_bus.h"
36#include "hw/pci-host/apb.h"
37#include "sysemu/sysemu.h"
38#include "exec/address-spaces.h"
39#include "qemu/log.h"
40
41
42
43
44#ifdef DEBUG_APB
45#define APB_DPRINTF(fmt, ...) \
46do { printf("APB: " fmt , ## __VA_ARGS__); } while (0)
47#else
48#define APB_DPRINTF(fmt, ...)
49#endif
50
51
52
53
54#ifdef DEBUG_IOMMU
55#define IOMMU_DPRINTF(fmt, ...) \
56do { printf("IOMMU: " fmt , ## __VA_ARGS__); } while (0)
57#else
58#define IOMMU_DPRINTF(fmt, ...)
59#endif
60
61
62
63
64
65
66
67
68
69
70#define PBM_PCI_IMR_MASK 0x7fffffff
71#define PBM_PCI_IMR_ENABLED 0x80000000
72
73#define POR (1U << 31)
74#define SOFT_POR (1U << 30)
75#define SOFT_XIR (1U << 29)
76#define BTN_POR (1U << 28)
77#define BTN_XIR (1U << 27)
78#define RESET_MASK 0xf8000000
79#define RESET_WCMASK 0x98000000
80#define RESET_WMASK 0x60000000
81
82#define MAX_IVEC 0x40
83#define NO_IRQ_REQUEST (MAX_IVEC + 1)
84
85#define IOMMU_PAGE_SIZE_8K (1ULL << 13)
86#define IOMMU_PAGE_MASK_8K (~(IOMMU_PAGE_SIZE_8K - 1))
87#define IOMMU_PAGE_SIZE_64K (1ULL << 16)
88#define IOMMU_PAGE_MASK_64K (~(IOMMU_PAGE_SIZE_64K - 1))
89
90#define IOMMU_NREGS 3
91
92#define IOMMU_CTRL 0x0
93#define IOMMU_CTRL_TBW_SIZE (1ULL << 2)
94#define IOMMU_CTRL_MMU_EN (1ULL)
95
96#define IOMMU_CTRL_TSB_SHIFT 16
97
98#define IOMMU_BASE 0x8
99#define IOMMU_FLUSH 0x10
100
101#define IOMMU_TTE_DATA_V (1ULL << 63)
102#define IOMMU_TTE_DATA_SIZE (1ULL << 61)
103#define IOMMU_TTE_DATA_W (1ULL << 1)
104
105#define IOMMU_TTE_PHYS_MASK_8K 0x1ffffffe000ULL
106#define IOMMU_TTE_PHYS_MASK_64K 0x1ffffff8000ULL
107
108#define IOMMU_TSB_8K_OFFSET_MASK_8M 0x00000000007fe000ULL
109#define IOMMU_TSB_8K_OFFSET_MASK_16M 0x0000000000ffe000ULL
110#define IOMMU_TSB_8K_OFFSET_MASK_32M 0x0000000001ffe000ULL
111#define IOMMU_TSB_8K_OFFSET_MASK_64M 0x0000000003ffe000ULL
112#define IOMMU_TSB_8K_OFFSET_MASK_128M 0x0000000007ffe000ULL
113#define IOMMU_TSB_8K_OFFSET_MASK_256M 0x000000000fffe000ULL
114#define IOMMU_TSB_8K_OFFSET_MASK_512M 0x000000001fffe000ULL
115#define IOMMU_TSB_8K_OFFSET_MASK_1G 0x000000003fffe000ULL
116
117#define IOMMU_TSB_64K_OFFSET_MASK_64M 0x0000000003ff0000ULL
118#define IOMMU_TSB_64K_OFFSET_MASK_128M 0x0000000007ff0000ULL
119#define IOMMU_TSB_64K_OFFSET_MASK_256M 0x000000000fff0000ULL
120#define IOMMU_TSB_64K_OFFSET_MASK_512M 0x000000001fff0000ULL
121#define IOMMU_TSB_64K_OFFSET_MASK_1G 0x000000003fff0000ULL
122#define IOMMU_TSB_64K_OFFSET_MASK_2G 0x000000007fff0000ULL
123
124typedef struct IOMMUState {
125 AddressSpace iommu_as;
126 IOMMUMemoryRegion iommu;
127
128 uint64_t regs[IOMMU_NREGS];
129} IOMMUState;
130
131#define TYPE_APB "pbm"
132
133#define APB_DEVICE(obj) \
134 OBJECT_CHECK(APBState, (obj), TYPE_APB)
135
136#define TYPE_APB_IOMMU_MEMORY_REGION "pbm-iommu-memory-region"
137
138typedef struct APBState {
139 PCIHostState parent_obj;
140
141 MemoryRegion apb_config;
142 MemoryRegion pci_config;
143 MemoryRegion pci_mmio;
144 MemoryRegion pci_ioport;
145 uint64_t pci_irq_in;
146 IOMMUState iommu;
147 uint32_t pci_control[16];
148 uint32_t pci_irq_map[8];
149 uint32_t pci_err_irq_map[4];
150 uint32_t obio_irq_map[32];
151 qemu_irq *pbm_irqs;
152 qemu_irq *ivec_irqs;
153 unsigned int irq_request;
154 uint32_t reset_control;
155 unsigned int nr_resets;
156} APBState;
157
158static inline void pbm_set_request(APBState *s, unsigned int irq_num)
159{
160 APB_DPRINTF("%s: request irq %d\n", __func__, irq_num);
161
162 s->irq_request = irq_num;
163 qemu_set_irq(s->ivec_irqs[irq_num], 1);
164}
165
166static inline void pbm_check_irqs(APBState *s)
167{
168
169 unsigned int i;
170
171
172 if (s->irq_request != NO_IRQ_REQUEST) {
173 pbm_set_request(s, s->irq_request);
174 return;
175 }
176
177 if (s->pci_irq_in == 0ULL) {
178 return;
179 }
180 for (i = 0; i < 32; i++) {
181 if (s->pci_irq_in & (1ULL << i)) {
182 if (s->pci_irq_map[i >> 2] & PBM_PCI_IMR_ENABLED) {
183 pbm_set_request(s, i);
184 return;
185 }
186 }
187 }
188 for (i = 32; i < 64; i++) {
189 if (s->pci_irq_in & (1ULL << i)) {
190 if (s->obio_irq_map[i - 32] & PBM_PCI_IMR_ENABLED) {
191 pbm_set_request(s, i);
192 break;
193 }
194 }
195 }
196}
197
198static inline void pbm_clear_request(APBState *s, unsigned int irq_num)
199{
200 APB_DPRINTF("%s: clear request irq %d\n", __func__, irq_num);
201 qemu_set_irq(s->ivec_irqs[irq_num], 0);
202 s->irq_request = NO_IRQ_REQUEST;
203}
204
205static AddressSpace *pbm_pci_dma_iommu(PCIBus *bus, void *opaque, int devfn)
206{
207 IOMMUState *is = opaque;
208
209 return &is->iommu_as;
210}
211
212
213static IOMMUTLBEntry pbm_translate_iommu(IOMMUMemoryRegion *iommu, hwaddr addr,
214 IOMMUAccessFlags flag)
215{
216 IOMMUState *is = container_of(iommu, IOMMUState, iommu);
217 hwaddr baseaddr, offset;
218 uint64_t tte;
219 uint32_t tsbsize;
220 IOMMUTLBEntry ret = {
221 .target_as = &address_space_memory,
222 .iova = 0,
223 .translated_addr = 0,
224 .addr_mask = ~(hwaddr)0,
225 .perm = IOMMU_NONE,
226 };
227
228 if (!(is->regs[IOMMU_CTRL >> 3] & IOMMU_CTRL_MMU_EN)) {
229
230 ret.iova = addr & IOMMU_PAGE_MASK_8K;
231 ret.translated_addr = addr;
232 ret.addr_mask = IOMMU_PAGE_MASK_8K;
233 ret.perm = IOMMU_RW;
234
235 return ret;
236 }
237
238 baseaddr = is->regs[IOMMU_BASE >> 3];
239 tsbsize = (is->regs[IOMMU_CTRL >> 3] >> IOMMU_CTRL_TSB_SHIFT) & 0x7;
240
241 if (is->regs[IOMMU_CTRL >> 3] & IOMMU_CTRL_TBW_SIZE) {
242
243 switch (tsbsize) {
244 case 0:
245 offset = (addr & IOMMU_TSB_64K_OFFSET_MASK_64M) >> 13;
246 break;
247 case 1:
248 offset = (addr & IOMMU_TSB_64K_OFFSET_MASK_128M) >> 13;
249 break;
250 case 2:
251 offset = (addr & IOMMU_TSB_64K_OFFSET_MASK_256M) >> 13;
252 break;
253 case 3:
254 offset = (addr & IOMMU_TSB_64K_OFFSET_MASK_512M) >> 13;
255 break;
256 case 4:
257 offset = (addr & IOMMU_TSB_64K_OFFSET_MASK_1G) >> 13;
258 break;
259 case 5:
260 offset = (addr & IOMMU_TSB_64K_OFFSET_MASK_2G) >> 13;
261 break;
262 default:
263
264 return ret;
265 }
266 } else {
267
268 switch (tsbsize) {
269 case 0:
270 offset = (addr & IOMMU_TSB_8K_OFFSET_MASK_8M) >> 10;
271 break;
272 case 1:
273 offset = (addr & IOMMU_TSB_8K_OFFSET_MASK_16M) >> 10;
274 break;
275 case 2:
276 offset = (addr & IOMMU_TSB_8K_OFFSET_MASK_32M) >> 10;
277 break;
278 case 3:
279 offset = (addr & IOMMU_TSB_8K_OFFSET_MASK_64M) >> 10;
280 break;
281 case 4:
282 offset = (addr & IOMMU_TSB_8K_OFFSET_MASK_128M) >> 10;
283 break;
284 case 5:
285 offset = (addr & IOMMU_TSB_8K_OFFSET_MASK_256M) >> 10;
286 break;
287 case 6:
288 offset = (addr & IOMMU_TSB_8K_OFFSET_MASK_512M) >> 10;
289 break;
290 case 7:
291 offset = (addr & IOMMU_TSB_8K_OFFSET_MASK_1G) >> 10;
292 break;
293 }
294 }
295
296 tte = address_space_ldq_be(&address_space_memory, baseaddr + offset,
297 MEMTXATTRS_UNSPECIFIED, NULL);
298
299 if (!(tte & IOMMU_TTE_DATA_V)) {
300
301 return ret;
302 }
303
304 if (tte & IOMMU_TTE_DATA_W) {
305
306 ret.perm = IOMMU_RW;
307 } else {
308 ret.perm = IOMMU_RO;
309 }
310
311
312 if (tte & IOMMU_TTE_DATA_SIZE) {
313
314 ret.iova = addr & IOMMU_PAGE_MASK_64K;
315 ret.translated_addr = tte & IOMMU_TTE_PHYS_MASK_64K;
316 ret.addr_mask = (IOMMU_PAGE_SIZE_64K - 1);
317 } else {
318
319 ret.iova = addr & IOMMU_PAGE_MASK_8K;
320 ret.translated_addr = tte & IOMMU_TTE_PHYS_MASK_8K;
321 ret.addr_mask = (IOMMU_PAGE_SIZE_8K - 1);
322 }
323
324 return ret;
325}
326
327static void iommu_config_write(void *opaque, hwaddr addr,
328 uint64_t val, unsigned size)
329{
330 IOMMUState *is = opaque;
331
332 IOMMU_DPRINTF("IOMMU config write: 0x%" HWADDR_PRIx " val: %" PRIx64
333 " size: %d\n", addr, val, size);
334
335 switch (addr) {
336 case IOMMU_CTRL:
337 if (size == 4) {
338 is->regs[IOMMU_CTRL >> 3] &= 0xffffffffULL;
339 is->regs[IOMMU_CTRL >> 3] |= val << 32;
340 } else {
341 is->regs[IOMMU_CTRL >> 3] = val;
342 }
343 break;
344 case IOMMU_CTRL + 0x4:
345 is->regs[IOMMU_CTRL >> 3] &= 0xffffffff00000000ULL;
346 is->regs[IOMMU_CTRL >> 3] |= val & 0xffffffffULL;
347 break;
348 case IOMMU_BASE:
349 if (size == 4) {
350 is->regs[IOMMU_BASE >> 3] &= 0xffffffffULL;
351 is->regs[IOMMU_BASE >> 3] |= val << 32;
352 } else {
353 is->regs[IOMMU_BASE >> 3] = val;
354 }
355 break;
356 case IOMMU_BASE + 0x4:
357 is->regs[IOMMU_BASE >> 3] &= 0xffffffff00000000ULL;
358 is->regs[IOMMU_BASE >> 3] |= val & 0xffffffffULL;
359 break;
360 case IOMMU_FLUSH:
361 case IOMMU_FLUSH + 0x4:
362 break;
363 default:
364 qemu_log_mask(LOG_UNIMP,
365 "apb iommu: Unimplemented register write "
366 "reg 0x%" HWADDR_PRIx " size 0x%x value 0x%" PRIx64 "\n",
367 addr, size, val);
368 break;
369 }
370}
371
372static uint64_t iommu_config_read(void *opaque, hwaddr addr, unsigned size)
373{
374 IOMMUState *is = opaque;
375 uint64_t val;
376
377 switch (addr) {
378 case IOMMU_CTRL:
379 if (size == 4) {
380 val = is->regs[IOMMU_CTRL >> 3] >> 32;
381 } else {
382 val = is->regs[IOMMU_CTRL >> 3];
383 }
384 break;
385 case IOMMU_CTRL + 0x4:
386 val = is->regs[IOMMU_CTRL >> 3] & 0xffffffffULL;
387 break;
388 case IOMMU_BASE:
389 if (size == 4) {
390 val = is->regs[IOMMU_BASE >> 3] >> 32;
391 } else {
392 val = is->regs[IOMMU_BASE >> 3];
393 }
394 break;
395 case IOMMU_BASE + 0x4:
396 val = is->regs[IOMMU_BASE >> 3] & 0xffffffffULL;
397 break;
398 case IOMMU_FLUSH:
399 case IOMMU_FLUSH + 0x4:
400 val = 0;
401 break;
402 default:
403 qemu_log_mask(LOG_UNIMP,
404 "apb iommu: Unimplemented register read "
405 "reg 0x%" HWADDR_PRIx " size 0x%x\n",
406 addr, size);
407 val = 0;
408 break;
409 }
410
411 IOMMU_DPRINTF("IOMMU config read: 0x%" HWADDR_PRIx " val: %" PRIx64
412 " size: %d\n", addr, val, size);
413
414 return val;
415}
416
417static void apb_config_writel (void *opaque, hwaddr addr,
418 uint64_t val, unsigned size)
419{
420 APBState *s = opaque;
421 IOMMUState *is = &s->iommu;
422
423 APB_DPRINTF("%s: addr " TARGET_FMT_plx " val %" PRIx64 "\n", __func__, addr, val);
424
425 switch (addr & 0xffff) {
426 case 0x30 ... 0x4f:
427
428 break;
429 case 0x200 ... 0x217:
430 iommu_config_write(is, (addr & 0x1f), val, size);
431 break;
432 case 0xc00 ... 0xc3f:
433 if (addr & 4) {
434 unsigned int ino = (addr & 0x3f) >> 3;
435 s->pci_irq_map[ino] &= PBM_PCI_IMR_MASK;
436 s->pci_irq_map[ino] |= val & ~PBM_PCI_IMR_MASK;
437 if ((s->irq_request == ino) && !(val & ~PBM_PCI_IMR_MASK)) {
438 pbm_clear_request(s, ino);
439 }
440 pbm_check_irqs(s);
441 }
442 break;
443 case 0x1000 ... 0x107f:
444 if (addr & 4) {
445 unsigned int ino = ((addr & 0xff) >> 3);
446 s->obio_irq_map[ino] &= PBM_PCI_IMR_MASK;
447 s->obio_irq_map[ino] |= val & ~PBM_PCI_IMR_MASK;
448 if ((s->irq_request == (ino | 0x20))
449 && !(val & ~PBM_PCI_IMR_MASK)) {
450 pbm_clear_request(s, ino | 0x20);
451 }
452 pbm_check_irqs(s);
453 }
454 break;
455 case 0x1400 ... 0x14ff:
456 if (addr & 4) {
457 unsigned int ino = (addr & 0xff) >> 5;
458 if ((s->irq_request / 4) == ino) {
459 pbm_clear_request(s, s->irq_request);
460 pbm_check_irqs(s);
461 }
462 }
463 break;
464 case 0x1800 ... 0x1860:
465 if (addr & 4) {
466 unsigned int ino = ((addr & 0xff) >> 3) | 0x20;
467 if (s->irq_request == ino) {
468 pbm_clear_request(s, ino);
469 pbm_check_irqs(s);
470 }
471 }
472 break;
473 case 0x2000 ... 0x202f:
474 s->pci_control[(addr & 0x3f) >> 2] = val;
475 break;
476 case 0xf020 ... 0xf027:
477 if (addr & 4) {
478 val &= RESET_MASK;
479 s->reset_control &= ~(val & RESET_WCMASK);
480 s->reset_control |= val & RESET_WMASK;
481 if (val & SOFT_POR) {
482 s->nr_resets = 0;
483 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
484 } else if (val & SOFT_XIR) {
485 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
486 }
487 }
488 break;
489 case 0x5000 ... 0x51cf:
490 case 0xa400 ... 0xa67f:
491 case 0xa800 ... 0xa80f:
492 case 0xf000 ... 0xf01f:
493
494 default:
495 break;
496 }
497}
498
499static uint64_t apb_config_readl (void *opaque,
500 hwaddr addr, unsigned size)
501{
502 APBState *s = opaque;
503 IOMMUState *is = &s->iommu;
504 uint32_t val;
505
506 switch (addr & 0xffff) {
507 case 0x30 ... 0x4f:
508 val = 0;
509
510 break;
511 case 0x200 ... 0x217:
512 val = iommu_config_read(is, (addr & 0x1f), size);
513 break;
514 case 0xc00 ... 0xc3f:
515 if (addr & 4) {
516 val = s->pci_irq_map[(addr & 0x3f) >> 3];
517 } else {
518 val = 0;
519 }
520 break;
521 case 0x1000 ... 0x107f:
522 if (addr & 4) {
523 val = s->obio_irq_map[(addr & 0xff) >> 3];
524 } else {
525 val = 0;
526 }
527 break;
528 case 0x1080 ... 0x108f:
529 if (addr & 4) {
530 val = s->pci_err_irq_map[(addr & 0xf) >> 3];
531 } else {
532 val = 0;
533 }
534 break;
535 case 0x2000 ... 0x202f:
536 val = s->pci_control[(addr & 0x3f) >> 2];
537 break;
538 case 0xf020 ... 0xf027:
539 if (addr & 4) {
540 val = s->reset_control;
541 } else {
542 val = 0;
543 }
544 break;
545 case 0x5000 ... 0x51cf:
546 case 0xa400 ... 0xa67f:
547 case 0xa800 ... 0xa80f:
548 case 0xf000 ... 0xf01f:
549
550 default:
551 val = 0;
552 break;
553 }
554 APB_DPRINTF("%s: addr " TARGET_FMT_plx " -> %x\n", __func__, addr, val);
555
556 return val;
557}
558
559static const MemoryRegionOps apb_config_ops = {
560 .read = apb_config_readl,
561 .write = apb_config_writel,
562 .endianness = DEVICE_NATIVE_ENDIAN,
563};
564
565static void apb_pci_config_write(void *opaque, hwaddr addr,
566 uint64_t val, unsigned size)
567{
568 APBState *s = opaque;
569 PCIHostState *phb = PCI_HOST_BRIDGE(s);
570
571 val = qemu_bswap_len(val, size);
572 APB_DPRINTF("%s: addr " TARGET_FMT_plx " val %" PRIx64 "\n", __func__, addr, val);
573 pci_data_write(phb->bus, addr, val, size);
574}
575
576static uint64_t apb_pci_config_read(void *opaque, hwaddr addr,
577 unsigned size)
578{
579 uint32_t ret;
580 APBState *s = opaque;
581 PCIHostState *phb = PCI_HOST_BRIDGE(s);
582
583 ret = pci_data_read(phb->bus, addr, size);
584 ret = qemu_bswap_len(ret, size);
585 APB_DPRINTF("%s: addr " TARGET_FMT_plx " -> %x\n", __func__, addr, ret);
586 return ret;
587}
588
589
590static int pci_apb_map_irq(PCIDevice *pci_dev, int irq_num)
591{
592 return ((pci_dev->devfn & 0x18) >> 1) + irq_num;
593}
594
595static int pci_pbm_map_irq(PCIDevice *pci_dev, int irq_num)
596{
597 int bus_offset;
598 if (pci_dev->devfn & 1)
599 bus_offset = 16;
600 else
601 bus_offset = 0;
602 return (bus_offset + (PCI_SLOT(pci_dev->devfn) << 2) + irq_num) & 0x1f;
603}
604
605static void pci_apb_set_irq(void *opaque, int irq_num, int level)
606{
607 APBState *s = opaque;
608
609 APB_DPRINTF("%s: set irq_in %d level %d\n", __func__, irq_num, level);
610
611 if (irq_num < 32) {
612 if (level) {
613 s->pci_irq_in |= 1ULL << irq_num;
614 if (s->pci_irq_map[irq_num >> 2] & PBM_PCI_IMR_ENABLED) {
615 pbm_set_request(s, irq_num);
616 }
617 } else {
618 s->pci_irq_in &= ~(1ULL << irq_num);
619 }
620 } else {
621
622 if (level) {
623 APB_DPRINTF("%s: set irq %d level %d\n", __func__, irq_num, level);
624 s->pci_irq_in |= 1ULL << irq_num;
625 if ((s->irq_request == NO_IRQ_REQUEST)
626 && (s->obio_irq_map[irq_num - 32] & PBM_PCI_IMR_ENABLED)) {
627 pbm_set_request(s, irq_num);
628 }
629 } else {
630 s->pci_irq_in &= ~(1ULL << irq_num);
631 }
632 }
633}
634
635static void apb_pci_bridge_realize(PCIDevice *dev, Error **errp)
636{
637 pci_bridge_initfn(dev, TYPE_PCI_BUS);
638
639
640
641
642
643
644
645
646
647
648 pci_set_word(dev->config + PCI_COMMAND,
649 PCI_COMMAND_MEMORY);
650 pci_set_word(dev->config + PCI_STATUS,
651 PCI_STATUS_FAST_BACK | PCI_STATUS_66MHZ |
652 PCI_STATUS_DEVSEL_MEDIUM);
653}
654
655PCIBus *pci_apb_init(hwaddr special_base,
656 hwaddr mem_base,
657 qemu_irq *ivec_irqs, PCIBus **bus2, PCIBus **bus3,
658 qemu_irq **pbm_irqs)
659{
660 DeviceState *dev;
661 SysBusDevice *s;
662 PCIHostState *phb;
663 APBState *d;
664 IOMMUState *is;
665 PCIDevice *pci_dev;
666 PCIBridge *br;
667
668
669 dev = qdev_create(NULL, TYPE_APB);
670 d = APB_DEVICE(dev);
671 phb = PCI_HOST_BRIDGE(dev);
672 phb->bus = pci_register_bus(DEVICE(phb), "pci",
673 pci_apb_set_irq, pci_pbm_map_irq, d,
674 &d->pci_mmio,
675 get_system_io(),
676 0, 32, TYPE_PCI_BUS);
677 qdev_init_nofail(dev);
678 s = SYS_BUS_DEVICE(dev);
679
680 sysbus_mmio_map(s, 0, special_base);
681
682 sysbus_mmio_map(s, 1, special_base + 0x1000000ULL);
683
684 sysbus_mmio_map(s, 2, special_base + 0x2000000ULL);
685
686 memory_region_init(&d->pci_mmio, OBJECT(s), "pci-mmio", 0x100000000ULL);
687 memory_region_add_subregion(get_system_memory(), mem_base, &d->pci_mmio);
688
689 *pbm_irqs = d->pbm_irqs;
690 d->ivec_irqs = ivec_irqs;
691
692 pci_create_simple(phb->bus, 0, "pbm-pci");
693
694
695 is = &d->iommu;
696 memset(is, 0, sizeof(IOMMUState));
697
698 memory_region_init_iommu(&is->iommu, sizeof(is->iommu),
699 TYPE_APB_IOMMU_MEMORY_REGION, OBJECT(dev),
700 "iommu-apb", UINT64_MAX);
701 address_space_init(&is->iommu_as, MEMORY_REGION(&is->iommu), "pbm-as");
702 pci_setup_iommu(phb->bus, pbm_pci_dma_iommu, is);
703
704
705 pci_dev = pci_create_multifunction(phb->bus, PCI_DEVFN(1, 0), true,
706 "pbm-bridge");
707 br = PCI_BRIDGE(pci_dev);
708 pci_bridge_map_irq(br, "Advanced PCI Bus secondary bridge 1",
709 pci_apb_map_irq);
710 qdev_init_nofail(&pci_dev->qdev);
711 *bus2 = pci_bridge_get_sec_bus(br);
712
713 pci_dev = pci_create_multifunction(phb->bus, PCI_DEVFN(1, 1), true,
714 "pbm-bridge");
715 br = PCI_BRIDGE(pci_dev);
716 pci_bridge_map_irq(br, "Advanced PCI Bus secondary bridge 2",
717 pci_apb_map_irq);
718 qdev_init_nofail(&pci_dev->qdev);
719 *bus3 = pci_bridge_get_sec_bus(br);
720
721 return phb->bus;
722}
723
724static void pci_pbm_reset(DeviceState *d)
725{
726 unsigned int i;
727 APBState *s = APB_DEVICE(d);
728
729 for (i = 0; i < 8; i++) {
730 s->pci_irq_map[i] &= PBM_PCI_IMR_MASK;
731 }
732 for (i = 0; i < 32; i++) {
733 s->obio_irq_map[i] &= PBM_PCI_IMR_MASK;
734 }
735
736 s->irq_request = NO_IRQ_REQUEST;
737 s->pci_irq_in = 0ULL;
738
739 if (s->nr_resets++ == 0) {
740
741 s->reset_control = POR;
742 }
743}
744
745static const MemoryRegionOps pci_config_ops = {
746 .read = apb_pci_config_read,
747 .write = apb_pci_config_write,
748 .endianness = DEVICE_NATIVE_ENDIAN,
749};
750
751static int pci_pbm_init_device(SysBusDevice *dev)
752{
753 APBState *s;
754 unsigned int i;
755
756 s = APB_DEVICE(dev);
757 for (i = 0; i < 8; i++) {
758 s->pci_irq_map[i] = (0x1f << 6) | (i << 2);
759 }
760 for (i = 0; i < 2; i++) {
761 s->pci_err_irq_map[i] = (0x1f << 6) | 0x30;
762 }
763 for (i = 0; i < 32; i++) {
764 s->obio_irq_map[i] = ((0x1f << 6) | 0x20) + i;
765 }
766 s->pbm_irqs = qemu_allocate_irqs(pci_apb_set_irq, s, MAX_IVEC);
767 s->irq_request = NO_IRQ_REQUEST;
768 s->pci_irq_in = 0ULL;
769
770
771 memory_region_init_io(&s->apb_config, OBJECT(s), &apb_config_ops, s,
772 "apb-config", 0x10000);
773
774 sysbus_init_mmio(dev, &s->apb_config);
775
776 memory_region_init_io(&s->pci_config, OBJECT(s), &pci_config_ops, s,
777 "apb-pci-config", 0x1000000);
778
779 sysbus_init_mmio(dev, &s->pci_config);
780
781
782 memory_region_init_alias(&s->pci_ioport, OBJECT(s), "apb-pci-ioport",
783 get_system_io(), 0, 0x10000);
784
785 sysbus_init_mmio(dev, &s->pci_ioport);
786
787 return 0;
788}
789
790static void pbm_pci_host_realize(PCIDevice *d, Error **errp)
791{
792 pci_set_word(d->config + PCI_COMMAND,
793 PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
794 pci_set_word(d->config + PCI_STATUS,
795 PCI_STATUS_FAST_BACK | PCI_STATUS_66MHZ |
796 PCI_STATUS_DEVSEL_MEDIUM);
797}
798
799static void pbm_pci_host_class_init(ObjectClass *klass, void *data)
800{
801 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
802 DeviceClass *dc = DEVICE_CLASS(klass);
803
804 k->realize = pbm_pci_host_realize;
805 k->vendor_id = PCI_VENDOR_ID_SUN;
806 k->device_id = PCI_DEVICE_ID_SUN_SABRE;
807 k->class_id = PCI_CLASS_BRIDGE_HOST;
808
809
810
811
812 dc->user_creatable = false;
813}
814
815static const TypeInfo pbm_pci_host_info = {
816 .name = "pbm-pci",
817 .parent = TYPE_PCI_DEVICE,
818 .instance_size = sizeof(PCIDevice),
819 .class_init = pbm_pci_host_class_init,
820};
821
822static void pbm_host_class_init(ObjectClass *klass, void *data)
823{
824 DeviceClass *dc = DEVICE_CLASS(klass);
825 SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
826
827 k->init = pci_pbm_init_device;
828 set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
829 dc->reset = pci_pbm_reset;
830}
831
832static const TypeInfo pbm_host_info = {
833 .name = TYPE_APB,
834 .parent = TYPE_PCI_HOST_BRIDGE,
835 .instance_size = sizeof(APBState),
836 .class_init = pbm_host_class_init,
837};
838
839static void pbm_pci_bridge_class_init(ObjectClass *klass, void *data)
840{
841 DeviceClass *dc = DEVICE_CLASS(klass);
842 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
843
844 k->realize = apb_pci_bridge_realize;
845 k->exit = pci_bridge_exitfn;
846 k->vendor_id = PCI_VENDOR_ID_SUN;
847 k->device_id = PCI_DEVICE_ID_SUN_SIMBA;
848 k->revision = 0x11;
849 k->config_write = pci_bridge_write_config;
850 k->is_bridge = 1;
851 set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
852 dc->reset = pci_bridge_reset;
853 dc->vmsd = &vmstate_pci_device;
854}
855
856static const TypeInfo pbm_pci_bridge_info = {
857 .name = "pbm-bridge",
858 .parent = TYPE_PCI_BRIDGE,
859 .class_init = pbm_pci_bridge_class_init,
860};
861
862static void pbm_iommu_memory_region_class_init(ObjectClass *klass, void *data)
863{
864 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
865
866 imrc->translate = pbm_translate_iommu;
867}
868
869static const TypeInfo pbm_iommu_memory_region_info = {
870 .parent = TYPE_IOMMU_MEMORY_REGION,
871 .name = TYPE_APB_IOMMU_MEMORY_REGION,
872 .class_init = pbm_iommu_memory_region_class_init,
873};
874
875static void pbm_register_types(void)
876{
877 type_register_static(&pbm_host_info);
878 type_register_static(&pbm_pci_host_info);
879 type_register_static(&pbm_pci_bridge_info);
880 type_register_static(&pbm_iommu_memory_region_info);
881}
882
883type_init(pbm_register_types)
884