1
2
3
4
5
6
7
8
9
10#define DEBUG
11
12#include <linux/kernel.h>
13#include <linux/pci.h>
14#include <linux/delay.h>
15#include <linux/string.h>
16#include <linux/init.h>
17#include <linux/bootmem.h>
18#include <linux/irq.h>
19#include <linux/interrupt.h>
20#include <linux/debugfs.h>
21
22#include <asm/sections.h>
23#include <asm/io.h>
24#include <asm/prom.h>
25#include <asm/pci-bridge.h>
26#include <asm/machdep.h>
27#include <asm/ppc-pci.h>
28#include <asm/iommu.h>
29#include <asm/io-workarounds.h>
30#include <asm/debug.h>
31
32#include "wsp.h"
33#include "wsp_pci.h"
34#include "msi.h"
35
36
37
38
39
40
41#define MAX_TABLE_TVT_COUNT 8
42
43struct wsp_dma_table {
44 struct list_head link;
45 struct iommu_table table;
46 struct wsp_phb *phb;
47 struct page *tces[MAX_TABLE_TVT_COUNT];
48};
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78#define NUM_DMA32_REGIONS 1
79
80struct wsp_phb {
81 struct pci_controller *hose;
82
83
84
85
86
87
88
89
90 spinlock_t lock;
91 struct list_head dma_tables;
92 unsigned long dma32_map;
93 unsigned long dma32_base;
94 unsigned int dma32_num_regions;
95 unsigned long dma32_region_size;
96
97
98 struct dentry *ddir;
99
100 struct list_head all;
101};
102static LIST_HEAD(wsp_phbs);
103
104
105#define cfg_debug(fmt...)
106
107
108static int wsp_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
109 int offset, int len, u32 *val)
110{
111 struct pci_controller *hose;
112 int suboff;
113 u64 addr;
114
115 hose = pci_bus_to_host(bus);
116 if (hose == NULL)
117 return PCIBIOS_DEVICE_NOT_FOUND;
118 if (offset >= 0x1000)
119 return PCIBIOS_BAD_REGISTER_NUMBER;
120 addr = PCIE_REG_CA_ENABLE |
121 ((u64)bus->number) << PCIE_REG_CA_BUS_SHIFT |
122 ((u64)devfn) << PCIE_REG_CA_FUNC_SHIFT |
123 ((u64)offset & ~3) << PCIE_REG_CA_REG_SHIFT;
124 suboff = offset & 3;
125
126
127
128
129
130
131 switch (len) {
132 case 1:
133 addr |= (0x8ul >> suboff) << PCIE_REG_CA_BE_SHIFT;
134 out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr);
135 *val = (in_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA)
136 >> (suboff << 3)) & 0xff;
137 cfg_debug("read 1 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%02x\n",
138 bus->number, devfn >> 3, devfn & 7,
139 offset, suboff, addr, *val);
140 break;
141 case 2:
142 addr |= (0xcul >> suboff) << PCIE_REG_CA_BE_SHIFT;
143 out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr);
144 *val = (in_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA)
145 >> (suboff << 3)) & 0xffff;
146 cfg_debug("read 2 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%04x\n",
147 bus->number, devfn >> 3, devfn & 7,
148 offset, suboff, addr, *val);
149 break;
150 default:
151 addr |= 0xful << PCIE_REG_CA_BE_SHIFT;
152 out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr);
153 *val = in_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA);
154 cfg_debug("read 4 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%08x\n",
155 bus->number, devfn >> 3, devfn & 7,
156 offset, suboff, addr, *val);
157 break;
158 }
159 return PCIBIOS_SUCCESSFUL;
160}
161
162static int wsp_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
163 int offset, int len, u32 val)
164{
165 struct pci_controller *hose;
166 int suboff;
167 u64 addr;
168
169 hose = pci_bus_to_host(bus);
170 if (hose == NULL)
171 return PCIBIOS_DEVICE_NOT_FOUND;
172 if (offset >= 0x1000)
173 return PCIBIOS_BAD_REGISTER_NUMBER;
174 addr = PCIE_REG_CA_ENABLE |
175 ((u64)bus->number) << PCIE_REG_CA_BUS_SHIFT |
176 ((u64)devfn) << PCIE_REG_CA_FUNC_SHIFT |
177 ((u64)offset & ~3) << PCIE_REG_CA_REG_SHIFT;
178 suboff = offset & 3;
179
180
181
182
183
184 switch (len) {
185 case 1:
186 addr |= (0x8ul >> suboff) << PCIE_REG_CA_BE_SHIFT;
187 val <<= suboff << 3;
188 out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr);
189 out_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA, val);
190 cfg_debug("write 1 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%02x\n",
191 bus->number, devfn >> 3, devfn & 7,
192 offset, suboff, addr, val);
193 break;
194 case 2:
195 addr |= (0xcul >> suboff) << PCIE_REG_CA_BE_SHIFT;
196 val <<= suboff << 3;
197 out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr);
198 out_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA, val);
199 cfg_debug("write 2 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%04x\n",
200 bus->number, devfn >> 3, devfn & 7,
201 offset, suboff, addr, val);
202 break;
203 default:
204 addr |= 0xful << PCIE_REG_CA_BE_SHIFT;
205 out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr);
206 out_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA, val);
207 cfg_debug("write 4 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%08x\n",
208 bus->number, devfn >> 3, devfn & 7,
209 offset, suboff, addr, val);
210 break;
211 }
212 return PCIBIOS_SUCCESSFUL;
213}
214
215static struct pci_ops wsp_pcie_pci_ops =
216{
217 .read = wsp_pcie_read_config,
218 .write = wsp_pcie_write_config,
219};
220
221#define TCE_SHIFT 12
222#define TCE_PAGE_SIZE (1 << TCE_SHIFT)
223#define TCE_PCI_WRITE 0x2
224#define TCE_PCI_READ 0x1
225#define TCE_RPN_MASK 0x3fffffffffful
226#define TCE_RPN_SHIFT 12
227
228
229#define dma_debug(fmt...)
230
231static int tce_build_wsp(struct iommu_table *tbl, long index, long npages,
232 unsigned long uaddr, enum dma_data_direction direction,
233 struct dma_attrs *attrs)
234{
235 struct wsp_dma_table *ptbl = container_of(tbl,
236 struct wsp_dma_table,
237 table);
238 u64 proto_tce;
239 u64 *tcep;
240 u64 rpn;
241
242 proto_tce = TCE_PCI_READ;
243#ifdef CONFIG_WSP_DD1_WORKAROUND_DD1_TCE_BUGS
244 proto_tce |= TCE_PCI_WRITE;
245#else
246 if (direction != DMA_TO_DEVICE)
247 proto_tce |= TCE_PCI_WRITE;
248#endif
249
250
251
252
253 while (npages--) {
254
255 tcep = (u64 *)page_address(ptbl->tces[index >> 16]);
256 tcep += (index & 0xffff);
257
258
259 rpn = __pa(uaddr) >> TCE_SHIFT;
260 *tcep = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
261
262 dma_debug("[DMA] TCE %p set to 0x%016llx (dma addr: 0x%lx)\n",
263 tcep, *tcep, (tbl->it_offset + index) << IOMMU_PAGE_SHIFT);
264
265 uaddr += TCE_PAGE_SIZE;
266 index++;
267 }
268 return 0;
269}
270
271static void tce_free_wsp(struct iommu_table *tbl, long index, long npages)
272{
273 struct wsp_dma_table *ptbl = container_of(tbl,
274 struct wsp_dma_table,
275 table);
276#ifndef CONFIG_WSP_DD1_WORKAROUND_DD1_TCE_BUGS
277 struct pci_controller *hose = ptbl->phb->hose;
278#endif
279 u64 *tcep;
280
281
282
283
284
285 while (npages--) {
286
287 tcep = (u64 *)page_address(ptbl->tces[index >> 16]);
288 tcep += (index & 0xffff);
289 dma_debug("[DMA] TCE %p cleared\n", tcep);
290 *tcep = 0;
291#ifndef CONFIG_WSP_DD1_WORKAROUND_DD1_TCE_BUGS
292
293 out_be64(hose->cfg_data + PCIE_REG_TCE_KILL,
294 PCIE_REG_TCEKILL_SINGLE | PCIE_REG_TCEKILL_PS_4K |
295 (__pa(tcep) & PCIE_REG_TCEKILL_ADDR_MASK));
296#endif
297 index++;
298 }
299}
300
301static struct wsp_dma_table *wsp_pci_create_dma32_table(struct wsp_phb *phb,
302 unsigned int region,
303 struct pci_dev *validate)
304{
305 struct pci_controller *hose = phb->hose;
306 unsigned long size = phb->dma32_region_size;
307 unsigned long addr = phb->dma32_region_size * region + phb->dma32_base;
308 struct wsp_dma_table *tbl;
309 int tvts_per_table, i, tvt, nid;
310 unsigned long flags;
311
312 nid = of_node_to_nid(phb->hose->dn);
313
314
315 tvts_per_table = size / 0x10000000;
316 if (tvts_per_table == 0)
317 tvts_per_table = 1;
318
319
320
321
322 tvt = region * tvts_per_table;
323
324 pr_debug(" Region : %d\n", region);
325 pr_debug(" DMA range : 0x%08lx..0x%08lx\n", addr, addr + size - 1);
326 pr_debug(" Number of TVTs : %d\n", tvts_per_table);
327 pr_debug(" Base TVT : %d\n", tvt);
328 pr_debug(" Node : %d\n", nid);
329
330 tbl = kzalloc_node(sizeof(struct wsp_dma_table), GFP_KERNEL, nid);
331 if (!tbl)
332 return ERR_PTR(-ENOMEM);
333 tbl->phb = phb;
334
335
336 for (i = 0; i < tvts_per_table; i++) {
337 u64 tvt_data1, tvt_data0;
338
339
340
341
342 tbl->tces[i] = alloc_pages_node(nid, GFP_KERNEL, get_order(0x80000));
343 if (tbl->tces[i] == NULL)
344 goto fail;
345 memset(page_address(tbl->tces[i]), 0, 0x80000);
346
347 pr_debug(" TCE table %d at : %p\n", i, page_address(tbl->tces[i]));
348
349
350 tvt_data0 = 2ull << IODA_TVT0_TCE_TABLE_SIZE_SHIFT;
351
352 tvt_data1 = 1ull << IODA_TVT1_IO_PAGE_SIZE_SHIFT;
353
354 tvt_data0 |= __pa(page_address(tbl->tces[i])) << IODA_TVT0_TTA_SHIFT;
355
356
357
358
359
360 if (validate) {
361 tvt_data0 |= IODA_TVT0_BUSNUM_VALID_MASK;
362 tvt_data0 |= validate->bus->number;
363 tvt_data1 |= IODA_TVT1_DEVNUM_VALID;
364 tvt_data1 |= ((u64)PCI_SLOT(validate->devfn))
365 << IODA_TVT1_DEVNUM_VALUE_SHIFT;
366 tvt_data1 |= IODA_TVT1_FUNCNUM_VALID;
367 tvt_data1 |= ((u64)PCI_FUNC(validate->devfn))
368 << IODA_TVT1_FUNCNUM_VALUE_SHIFT;
369 }
370
371
372
373
374 spin_lock_irqsave(&phb->lock, flags);
375 out_be64(hose->cfg_data + PCIE_REG_IODA_ADDR,
376 (tvt + i) | PCIE_REG_IODA_AD_TBL_TVT);
377 out_be64(hose->cfg_data + PCIE_REG_IODA_DATA1, tvt_data1);
378 out_be64(hose->cfg_data + PCIE_REG_IODA_DATA0, tvt_data0);
379 spin_unlock_irqrestore(&phb->lock, flags);
380 }
381
382
383 tbl->table.it_blocksize = 16;
384 tbl->table.it_offset = addr >> IOMMU_PAGE_SHIFT;
385 tbl->table.it_size = size >> IOMMU_PAGE_SHIFT;
386
387
388
389
390
391 iommu_init_table(&tbl->table, nid);
392
393 list_add(&tbl->link, &phb->dma_tables);
394 return tbl;
395
396 fail:
397 pr_debug(" Failed to allocate a 256M TCE table !\n");
398 for (i = 0; i < tvts_per_table; i++)
399 if (tbl->tces[i])
400 __free_pages(tbl->tces[i], get_order(0x80000));
401 kfree(tbl);
402 return ERR_PTR(-ENOMEM);
403}
404
405static void __devinit wsp_pci_dma_dev_setup(struct pci_dev *pdev)
406{
407 struct dev_archdata *archdata = &pdev->dev.archdata;
408 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
409 struct wsp_phb *phb = hose->private_data;
410 struct wsp_dma_table *table = NULL;
411 unsigned long flags;
412 int i;
413
414
415 if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
416 return;
417
418 pr_debug("%s: Setting up DMA...\n", pci_name(pdev));
419
420 spin_lock_irqsave(&phb->lock, flags);
421
422
423 if (phb->dma32_num_regions == 1) {
424 spin_unlock_irqrestore(&phb->lock, flags);
425 if (list_empty(&phb->dma_tables))
426 table = wsp_pci_create_dma32_table(phb, 0, NULL);
427 else
428 table = list_first_entry(&phb->dma_tables,
429 struct wsp_dma_table,
430 link);
431 } else {
432
433 for (i = 0; i < phb->dma32_num_regions && !table; i++) {
434 if (__test_and_set_bit(i, &phb->dma32_map))
435 continue;
436 spin_unlock_irqrestore(&phb->lock, flags);
437 table = wsp_pci_create_dma32_table(phb, i, pdev);
438 }
439 }
440
441
442 if (IS_ERR(table)) {
443 pr_err("%s: Failed to create DMA table, err %ld !\n",
444 pci_name(pdev), PTR_ERR(table));
445 return;
446 }
447
448
449 if (table) {
450 pr_info("%s: Setup iommu: 32-bit DMA region 0x%08lx..0x%08lx\n",
451 pci_name(pdev),
452 table->table.it_offset << IOMMU_PAGE_SHIFT,
453 (table->table.it_offset << IOMMU_PAGE_SHIFT)
454 + phb->dma32_region_size - 1);
455 archdata->dma_data.iommu_table_base = &table->table;
456 return;
457 }
458
459
460 spin_unlock_irqrestore(&phb->lock, flags);
461 pr_err("%s: Out of DMA space !\n", pci_name(pdev));
462}
463
464static void __init wsp_pcie_configure_hw(struct pci_controller *hose)
465{
466 u64 val;
467 int i;
468
469#define DUMP_REG(x) \
470 pr_debug("%-30s : 0x%016llx\n", #x, in_be64(hose->cfg_data + x))
471
472
473
474
475
476 val = in_be64(hose->cfg_data + PCIE_REG_SYS_CFG1);
477 pr_debug("PCI-E SYS_CFG1 : 0x%llx\n", val);
478 out_be64(hose->cfg_data + PCIE_REG_SYS_CFG1,
479 (val & ~PCIE_REG_SYS_CFG1_CLASS_CODE) | (PCI_CLASS_BRIDGE_PCI << 8));
480 pr_debug("PCI-E SYS_CFG1 : 0x%llx\n", in_be64(hose->cfg_data + PCIE_REG_SYS_CFG1));
481
482#ifdef CONFIG_WSP_DD1_WORKAROUND_DD1_TCE_BUGS
483
484 out_be64(hose->cfg_data + 0xe50,
485 in_be64(hose->cfg_data + 0xe50) | (3ull << 62));
486 printk("PCI-E DEBUG CONTROL 5 = 0x%llx\n", in_be64(hose->cfg_data + 0xe50));
487#endif
488
489
490 out_be64(hose->cfg_data + PCIE_REG_IO_BASE_ADDR, hose->io_base_phys);
491 out_be64(hose->cfg_data + PCIE_REG_IO_BASE_MASK,
492 (~(hose->io_resource.end - hose->io_resource.start)) &
493 0x3fffffff000ul);
494 out_be64(hose->cfg_data + PCIE_REG_IO_START_ADDR, 0 | 1);
495
496 out_be64(hose->cfg_data + PCIE_REG_M32A_BASE_ADDR,
497 hose->mem_resources[0].start);
498 printk("Want to write to M32A_BASE_MASK : 0x%llx\n",
499 (~(hose->mem_resources[0].end -
500 hose->mem_resources[0].start)) & 0x3ffffff0000ul);
501 out_be64(hose->cfg_data + PCIE_REG_M32A_BASE_MASK,
502 (~(hose->mem_resources[0].end -
503 hose->mem_resources[0].start)) & 0x3ffffff0000ul);
504 out_be64(hose->cfg_data + PCIE_REG_M32A_START_ADDR,
505 (hose->mem_resources[0].start - hose->pci_mem_offset) | 1);
506
507
508
509
510
511 for (i = 0; i < IODA_TVT_COUNT; i++) {
512 out_be64(hose->cfg_data + PCIE_REG_IODA_ADDR,
513 PCIE_REG_IODA_AD_TBL_TVT | i);
514 out_be64(hose->cfg_data + PCIE_REG_IODA_DATA1, 0);
515 out_be64(hose->cfg_data + PCIE_REG_IODA_DATA0, 0);
516 }
517
518
519 out_be64(hose->cfg_data + PCIE_REG_PHB_CONFIG,
520 in_be64(hose->cfg_data + PCIE_REG_PHB_CONFIG) |
521 PCIE_REG_PHBC_64B_TCE_EN);
522
523
524 val = PCIE_REG_PHBC_32BIT_MSI_EN |
525 PCIE_REG_PHBC_IO_EN |
526 PCIE_REG_PHBC_64BIT_MSI_EN |
527 PCIE_REG_PHBC_M32A_EN;
528 if (iommu_is_off)
529 val |= PCIE_REG_PHBC_DMA_XLATE_BYPASS;
530 pr_debug("Will write config: 0x%llx\n", val);
531 out_be64(hose->cfg_data + PCIE_REG_PHB_CONFIG, val);
532
533
534 out_be64(hose->cfg_data + 0xe00,
535 in_be64(hose->cfg_data + 0xe00) | 0x0008000000000000ull);
536
537
538
539
540
541
542 out_be64(hose->cfg_data + PCIE_REG_DMA_ERR_STATUS_MASK, 0x8000000000000000ull);
543 out_be64(hose->cfg_data + PCIE_REG_DMA_ERR1_STATUS_MASK, 0x8000000000000000ull);
544
545
546
547
548
549
550 out_be64(hose->cfg_data + PCIE_UTL_SYS_BUS_AGENT_ERR_SEV, 0);
551 out_be64(hose->cfg_data + PCIE_UTL_RC_ERR_SEVERITY, 0);
552 out_be64(hose->cfg_data + PCIE_UTL_PCIE_PORT_ERROR_SEV, 0);
553 out_be64(hose->cfg_data + PCIE_UTL_SYS_BUS_AGENT_IRQ_EN, 0xffffffff00000000ull);
554 out_be64(hose->cfg_data + PCIE_UTL_PCIE_PORT_IRQ_EN, 0xff5fffff00000000ull);
555 out_be64(hose->cfg_data + PCIE_UTL_EP_ERR_IRQ_EN, 0xffffffff00000000ull);
556
557 DUMP_REG(PCIE_REG_IO_BASE_ADDR);
558 DUMP_REG(PCIE_REG_IO_BASE_MASK);
559 DUMP_REG(PCIE_REG_IO_START_ADDR);
560 DUMP_REG(PCIE_REG_M32A_BASE_ADDR);
561 DUMP_REG(PCIE_REG_M32A_BASE_MASK);
562 DUMP_REG(PCIE_REG_M32A_START_ADDR);
563 DUMP_REG(PCIE_REG_M32B_BASE_ADDR);
564 DUMP_REG(PCIE_REG_M32B_BASE_MASK);
565 DUMP_REG(PCIE_REG_M32B_START_ADDR);
566 DUMP_REG(PCIE_REG_M64_BASE_ADDR);
567 DUMP_REG(PCIE_REG_M64_BASE_MASK);
568 DUMP_REG(PCIE_REG_M64_START_ADDR);
569 DUMP_REG(PCIE_REG_PHB_CONFIG);
570}
571
572static void wsp_pci_wait_io_idle(struct wsp_phb *phb, unsigned long port)
573{
574 u64 val;
575 int i;
576
577 for (i = 0; i < 10000; i++) {
578 val = in_be64(phb->hose->cfg_data + 0xe08);
579 if ((val & 0x1900000000000000ull) == 0x0100000000000000ull)
580 return;
581 udelay(1);
582 }
583 pr_warning("PCI IO timeout on domain %d port 0x%lx\n",
584 phb->hose->global_number, port);
585}
586
587#define DEF_PCI_AC_RET_pio(name, ret, at, al, aa) \
588static ret wsp_pci_##name at \
589{ \
590 struct iowa_bus *bus; \
591 struct wsp_phb *phb; \
592 unsigned long flags; \
593 ret rval; \
594 bus = iowa_pio_find_bus(aa); \
595 WARN_ON(!bus); \
596 phb = bus->private; \
597 spin_lock_irqsave(&phb->lock, flags); \
598 wsp_pci_wait_io_idle(phb, aa); \
599 rval = __do_##name al; \
600 spin_unlock_irqrestore(&phb->lock, flags); \
601 return rval; \
602}
603
604#define DEF_PCI_AC_NORET_pio(name, at, al, aa) \
605static void wsp_pci_##name at \
606{ \
607 struct iowa_bus *bus; \
608 struct wsp_phb *phb; \
609 unsigned long flags; \
610 bus = iowa_pio_find_bus(aa); \
611 WARN_ON(!bus); \
612 phb = bus->private; \
613 spin_lock_irqsave(&phb->lock, flags); \
614 wsp_pci_wait_io_idle(phb, aa); \
615 __do_##name al; \
616 spin_unlock_irqrestore(&phb->lock, flags); \
617}
618
619#define DEF_PCI_AC_RET_mem(name, ret, at, al, aa)
620#define DEF_PCI_AC_NORET_mem(name, at, al, aa)
621
622#define DEF_PCI_AC_RET(name, ret, at, al, space, aa) \
623 DEF_PCI_AC_RET_##space(name, ret, at, al, aa)
624
625#define DEF_PCI_AC_NORET(name, at, al, space, aa) \
626 DEF_PCI_AC_NORET_##space(name, at, al, aa) \
627
628
629#include <asm/io-defs.h>
630
631#undef DEF_PCI_AC_RET
632#undef DEF_PCI_AC_NORET
633
634static struct ppc_pci_io wsp_pci_iops = {
635 .inb = wsp_pci_inb,
636 .inw = wsp_pci_inw,
637 .inl = wsp_pci_inl,
638 .outb = wsp_pci_outb,
639 .outw = wsp_pci_outw,
640 .outl = wsp_pci_outl,
641 .insb = wsp_pci_insb,
642 .insw = wsp_pci_insw,
643 .insl = wsp_pci_insl,
644 .outsb = wsp_pci_outsb,
645 .outsw = wsp_pci_outsw,
646 .outsl = wsp_pci_outsl,
647};
648
649static int __init wsp_setup_one_phb(struct device_node *np)
650{
651 struct pci_controller *hose;
652 struct wsp_phb *phb;
653
654 pr_info("PCI: Setting up PCIe host bridge 0x%s\n", np->full_name);
655
656 phb = zalloc_maybe_bootmem(sizeof(struct wsp_phb), GFP_KERNEL);
657 if (!phb)
658 return -ENOMEM;
659 hose = pcibios_alloc_controller(np);
660 if (!hose) {
661
662 return -ENOMEM;
663 }
664 hose->private_data = phb;
665 phb->hose = hose;
666
667 INIT_LIST_HEAD(&phb->dma_tables);
668 spin_lock_init(&phb->lock);
669
670
671 hose->first_busno = 0;
672 hose->last_busno = 0xff;
673
674
675
676 hose->cfg_data = of_iomap(hose->dn, 0);
677
678 pr_debug("PCIe registers mapped at 0x%p\n", hose->cfg_data);
679
680
681 pci_process_bridge_OF_ranges(hose, np, 0);
682
683
684 pci_add_flags(PCI_REASSIGN_ALL_BUS | PCI_REASSIGN_ALL_RSRC |
685 PCI_ENABLE_PROC_DOMAINS);
686
687
688 phb->dma32_base = 0;
689 phb->dma32_num_regions = NUM_DMA32_REGIONS;
690 if (phb->dma32_num_regions > MAX_TABLE_TVT_COUNT) {
691 pr_warning("IOMMU: Clamped to %d DMA32 regions\n",
692 MAX_TABLE_TVT_COUNT);
693 phb->dma32_num_regions = MAX_TABLE_TVT_COUNT;
694 }
695 phb->dma32_region_size = 0x80000000 / phb->dma32_num_regions;
696
697 BUG_ON(!is_power_of_2(phb->dma32_region_size));
698
699
700 hose->ops = &wsp_pcie_pci_ops;
701
702
703 wsp_pcie_configure_hw(hose);
704
705
706 iowa_register_bus(hose, &wsp_pci_iops, NULL, phb);
707#ifdef CONFIG_PCI_MSI
708 wsp_setup_phb_msi(hose);
709#endif
710
711
712 list_add(&phb->all, &wsp_phbs);
713
714 return 0;
715}
716
717void __init wsp_setup_pci(void)
718{
719 struct device_node *np;
720 int rc;
721
722
723 for_each_compatible_node(np, "pciex", PCIE_COMPATIBLE) {
724 rc = wsp_setup_one_phb(np);
725 if (rc)
726 pr_err("Failed to setup PCIe bridge %s, rc=%d\n",
727 np->full_name, rc);
728 }
729
730
731 pci_devs_phb_init();
732
733
734 if (iommu_is_off) {
735 pr_info("PCI-E: Disabled TCEs, using direct DMA\n");
736 set_pci_dma_ops(&dma_direct_ops);
737 } else {
738 ppc_md.pci_dma_dev_setup = wsp_pci_dma_dev_setup;
739 ppc_md.tce_build = tce_build_wsp;
740 ppc_md.tce_free = tce_free_wsp;
741 set_pci_dma_ops(&dma_iommu_ops);
742 }
743}
744
745#define err_debug(fmt...) pr_debug(fmt)
746
747
748static int __init wsp_pci_get_err_irq_no_dt(struct device_node *np)
749{
750 const u32 *prop;
751 int hw_irq;
752
753
754 np = of_get_next_child(np, NULL);
755 if (np == NULL)
756 return 0;
757
758
759 prop = of_get_property(np, "interrupt-map", NULL);
760 if (prop == NULL)
761 return 0;
762
763
764 hw_irq = prop[5] & 0xf;
765
766
767 if (hw_irq < 5)
768 hw_irq = 4;
769 else
770 hw_irq = 9;
771 hw_irq |= prop[5] & ~0xf;
772
773 err_debug("PCI: Using 0x%x as error IRQ for %s\n",
774 hw_irq, np->parent->full_name);
775 return irq_create_mapping(NULL, hw_irq);
776}
777
778static const struct {
779 u32 offset;
780 const char *name;
781} wsp_pci_regs[] = {
782#define DREG(x) { PCIE_REG_##x, #x }
783#define DUTL(x) { PCIE_UTL_##x, "UTL_" #x }
784
785
786
787 DREG(DMA_CHAN_STATUS),
788 DREG(CPU_LOADSTORE_STATUS),
789 DREG(LOCK0),
790 DREG(LOCK1),
791 DREG(PHB_CONFIG),
792 DREG(IO_BASE_ADDR),
793 DREG(IO_BASE_MASK),
794 DREG(IO_START_ADDR),
795 DREG(M32A_BASE_ADDR),
796 DREG(M32A_BASE_MASK),
797 DREG(M32A_START_ADDR),
798 DREG(M32B_BASE_ADDR),
799 DREG(M32B_BASE_MASK),
800 DREG(M32B_START_ADDR),
801 DREG(M64_BASE_ADDR),
802 DREG(M64_BASE_MASK),
803 DREG(M64_START_ADDR),
804 DREG(TCE_KILL),
805 DREG(LOCK2),
806 DREG(PHB_GEN_CAP),
807 DREG(PHB_TCE_CAP),
808 DREG(PHB_IRQ_CAP),
809 DREG(PHB_EEH_CAP),
810 DREG(PAPR_ERR_INJ_CONTROL),
811 DREG(PAPR_ERR_INJ_ADDR),
812 DREG(PAPR_ERR_INJ_MASK),
813
814
815 DUTL(SYS_BUS_CONTROL),
816 DUTL(STATUS),
817 DUTL(SYS_BUS_AGENT_STATUS),
818 DUTL(SYS_BUS_AGENT_ERR_SEV),
819 DUTL(SYS_BUS_AGENT_IRQ_EN),
820 DUTL(SYS_BUS_BURST_SZ_CONF),
821 DUTL(REVISION_ID),
822 DUTL(OUT_POST_HDR_BUF_ALLOC),
823 DUTL(OUT_POST_DAT_BUF_ALLOC),
824 DUTL(IN_POST_HDR_BUF_ALLOC),
825 DUTL(IN_POST_DAT_BUF_ALLOC),
826 DUTL(OUT_NP_BUF_ALLOC),
827 DUTL(IN_NP_BUF_ALLOC),
828 DUTL(PCIE_TAGS_ALLOC),
829 DUTL(GBIF_READ_TAGS_ALLOC),
830
831 DUTL(PCIE_PORT_CONTROL),
832 DUTL(PCIE_PORT_STATUS),
833 DUTL(PCIE_PORT_ERROR_SEV),
834 DUTL(PCIE_PORT_IRQ_EN),
835 DUTL(RC_STATUS),
836 DUTL(RC_ERR_SEVERITY),
837 DUTL(RC_IRQ_EN),
838 DUTL(EP_STATUS),
839 DUTL(EP_ERR_SEVERITY),
840 DUTL(EP_ERR_IRQ_EN),
841 DUTL(PCI_PM_CTRL1),
842 DUTL(PCI_PM_CTRL2),
843
844
845 DREG(SYSTEM_CONFIG1),
846 DREG(SYSTEM_CONFIG2),
847 DREG(EP_SYSTEM_CONFIG),
848 DREG(EP_FLR),
849 DREG(EP_BAR_CONFIG),
850 DREG(LINK_CONFIG),
851 DREG(PM_CONFIG),
852 DREG(DLP_CONTROL),
853 DREG(DLP_STATUS),
854 DREG(ERR_REPORT_CONTROL),
855 DREG(SLOT_CONTROL1),
856 DREG(SLOT_CONTROL2),
857 DREG(UTL_CONFIG),
858 DREG(BUFFERS_CONFIG),
859 DREG(ERROR_INJECT),
860 DREG(SRIOV_CONFIG),
861 DREG(PF0_SRIOV_STATUS),
862 DREG(PF1_SRIOV_STATUS),
863 DREG(PORT_NUMBER),
864 DREG(POR_SYSTEM_CONFIG),
865
866
867 DREG(PHB_VERSION),
868 DREG(RESET),
869 DREG(PHB_CONTROL),
870 DREG(PHB_TIMEOUT_CONTROL1),
871 DREG(PHB_QUIESCE_DMA),
872 DREG(PHB_DMA_READ_TAG_ACTV),
873 DREG(PHB_TCE_READ_TAG_ACTV),
874
875
876 DREG(LEM_FIR_ACCUM),
877 DREG(LEM_FIR_AND_MASK),
878 DREG(LEM_FIR_OR_MASK),
879 DREG(LEM_ACTION0),
880 DREG(LEM_ACTION1),
881 DREG(LEM_ERROR_MASK),
882 DREG(LEM_ERROR_AND_MASK),
883 DREG(LEM_ERROR_OR_MASK),
884
885
886 DREG(PHB_ERR_STATUS),
887 DREG(PHB_ERR_STATUS),
888 DREG(PHB_ERR1_STATUS),
889 DREG(PHB_ERR_INJECT),
890 DREG(PHB_ERR_LEM_ENABLE),
891 DREG(PHB_ERR_IRQ_ENABLE),
892 DREG(PHB_ERR_FREEZE_ENABLE),
893 DREG(PHB_ERR_SIDE_ENABLE),
894 DREG(PHB_ERR_LOG_0),
895 DREG(PHB_ERR_LOG_1),
896 DREG(PHB_ERR_STATUS_MASK),
897 DREG(PHB_ERR1_STATUS_MASK),
898 DREG(MMIO_ERR_STATUS),
899 DREG(MMIO_ERR1_STATUS),
900 DREG(MMIO_ERR_INJECT),
901 DREG(MMIO_ERR_LEM_ENABLE),
902 DREG(MMIO_ERR_IRQ_ENABLE),
903 DREG(MMIO_ERR_FREEZE_ENABLE),
904 DREG(MMIO_ERR_SIDE_ENABLE),
905 DREG(MMIO_ERR_LOG_0),
906 DREG(MMIO_ERR_LOG_1),
907 DREG(MMIO_ERR_STATUS_MASK),
908 DREG(MMIO_ERR1_STATUS_MASK),
909 DREG(DMA_ERR_STATUS),
910 DREG(DMA_ERR1_STATUS),
911 DREG(DMA_ERR_INJECT),
912 DREG(DMA_ERR_LEM_ENABLE),
913 DREG(DMA_ERR_IRQ_ENABLE),
914 DREG(DMA_ERR_FREEZE_ENABLE),
915 DREG(DMA_ERR_SIDE_ENABLE),
916 DREG(DMA_ERR_LOG_0),
917 DREG(DMA_ERR_LOG_1),
918 DREG(DMA_ERR_STATUS_MASK),
919 DREG(DMA_ERR1_STATUS_MASK),
920
921
922 DREG(PHB_DEBUG_CONTROL0),
923 DREG(PHB_DEBUG_STATUS0),
924 DREG(PHB_DEBUG_CONTROL1),
925 DREG(PHB_DEBUG_STATUS1),
926 DREG(PHB_DEBUG_CONTROL2),
927 DREG(PHB_DEBUG_STATUS2),
928 DREG(PHB_DEBUG_CONTROL3),
929 DREG(PHB_DEBUG_STATUS3),
930 DREG(PHB_DEBUG_CONTROL4),
931 DREG(PHB_DEBUG_STATUS4),
932 DREG(PHB_DEBUG_CONTROL5),
933 DREG(PHB_DEBUG_STATUS5),
934
935
936
937
938
939};
940
941static int wsp_pci_regs_show(struct seq_file *m, void *private)
942{
943 struct wsp_phb *phb = m->private;
944 struct pci_controller *hose = phb->hose;
945 int i;
946
947 for (i = 0; i < ARRAY_SIZE(wsp_pci_regs); i++) {
948
949 if (wsp_pci_regs[i].offset == 0xc08 ||
950 wsp_pci_regs[i].offset == 0xc10 ||
951 wsp_pci_regs[i].offset == 0xc38 ||
952 wsp_pci_regs[i].offset == 0xc40)
953 continue;
954 seq_printf(m, "0x%03x: 0x%016llx %s\n",
955 wsp_pci_regs[i].offset,
956 in_be64(hose->cfg_data + wsp_pci_regs[i].offset),
957 wsp_pci_regs[i].name);
958 }
959 return 0;
960}
961
962static int wsp_pci_regs_open(struct inode *inode, struct file *file)
963{
964 return single_open(file, wsp_pci_regs_show, inode->i_private);
965}
966
967static const struct file_operations wsp_pci_regs_fops = {
968 .open = wsp_pci_regs_open,
969 .read = seq_read,
970 .llseek = seq_lseek,
971 .release = single_release,
972};
973
974static int wsp_pci_reg_set(void *data, u64 val)
975{
976 out_be64((void __iomem *)data, val);
977 return 0;
978}
979
980static int wsp_pci_reg_get(void *data, u64 *val)
981{
982 *val = in_be64((void __iomem *)data);
983 return 0;
984}
985
986DEFINE_SIMPLE_ATTRIBUTE(wsp_pci_reg_fops, wsp_pci_reg_get, wsp_pci_reg_set, "0x%llx\n");
987
988static irqreturn_t wsp_pci_err_irq(int irq, void *dev_id)
989{
990 struct wsp_phb *phb = dev_id;
991 struct pci_controller *hose = phb->hose;
992 irqreturn_t handled = IRQ_NONE;
993 struct wsp_pcie_err_log_data ed;
994
995 pr_err("PCI: Error interrupt on %s (PHB %d)\n",
996 hose->dn->full_name, hose->global_number);
997 again:
998 memset(&ed, 0, sizeof(ed));
999
1000
1001 ed.utl_sys_err = in_be64(hose->cfg_data + PCIE_UTL_SYS_BUS_AGENT_STATUS);
1002 if (ed.utl_sys_err)
1003 out_be64(hose->cfg_data + PCIE_UTL_SYS_BUS_AGENT_STATUS, ed.utl_sys_err);
1004 ed.utl_port_err = in_be64(hose->cfg_data + PCIE_UTL_PCIE_PORT_STATUS);
1005 if (ed.utl_port_err)
1006 out_be64(hose->cfg_data + PCIE_UTL_PCIE_PORT_STATUS, ed.utl_port_err);
1007 ed.utl_rc_err = in_be64(hose->cfg_data + PCIE_UTL_RC_STATUS);
1008 if (ed.utl_rc_err)
1009 out_be64(hose->cfg_data + PCIE_UTL_RC_STATUS, ed.utl_rc_err);
1010
1011
1012 ed.phb_err = in_be64(hose->cfg_data + PCIE_REG_PHB_ERR_STATUS);
1013 if (ed.phb_err) {
1014 ed.phb_err1 = in_be64(hose->cfg_data + PCIE_REG_PHB_ERR1_STATUS);
1015 ed.phb_log0 = in_be64(hose->cfg_data + PCIE_REG_PHB_ERR_LOG_0);
1016 ed.phb_log1 = in_be64(hose->cfg_data + PCIE_REG_PHB_ERR_LOG_1);
1017 out_be64(hose->cfg_data + PCIE_REG_PHB_ERR1_STATUS, 0);
1018 out_be64(hose->cfg_data + PCIE_REG_PHB_ERR_STATUS, 0);
1019 }
1020 ed.mmio_err = in_be64(hose->cfg_data + PCIE_REG_MMIO_ERR_STATUS);
1021 if (ed.mmio_err) {
1022 ed.mmio_err1 = in_be64(hose->cfg_data + PCIE_REG_MMIO_ERR1_STATUS);
1023 ed.mmio_log0 = in_be64(hose->cfg_data + PCIE_REG_MMIO_ERR_LOG_0);
1024 ed.mmio_log1 = in_be64(hose->cfg_data + PCIE_REG_MMIO_ERR_LOG_1);
1025 out_be64(hose->cfg_data + PCIE_REG_MMIO_ERR1_STATUS, 0);
1026 out_be64(hose->cfg_data + PCIE_REG_MMIO_ERR_STATUS, 0);
1027 }
1028 ed.dma_err = in_be64(hose->cfg_data + PCIE_REG_DMA_ERR_STATUS);
1029 if (ed.dma_err) {
1030 ed.dma_err1 = in_be64(hose->cfg_data + PCIE_REG_DMA_ERR1_STATUS);
1031 ed.dma_log0 = in_be64(hose->cfg_data + PCIE_REG_DMA_ERR_LOG_0);
1032 ed.dma_log1 = in_be64(hose->cfg_data + PCIE_REG_DMA_ERR_LOG_1);
1033 out_be64(hose->cfg_data + PCIE_REG_DMA_ERR1_STATUS, 0);
1034 out_be64(hose->cfg_data + PCIE_REG_DMA_ERR_STATUS, 0);
1035 }
1036
1037
1038 if (ed.phb_err) {
1039 pr_err(" PHB Error Status : 0x%016llx\n", ed.phb_err);
1040 pr_err(" PHB First Error Status: 0x%016llx\n", ed.phb_err1);
1041 pr_err(" PHB Error Log 0 : 0x%016llx\n", ed.phb_log0);
1042 pr_err(" PHB Error Log 1 : 0x%016llx\n", ed.phb_log1);
1043 }
1044 if (ed.mmio_err) {
1045 pr_err(" MMIO Error Status : 0x%016llx\n", ed.mmio_err);
1046 pr_err(" MMIO First Error Status: 0x%016llx\n", ed.mmio_err1);
1047 pr_err(" MMIO Error Log 0 : 0x%016llx\n", ed.mmio_log0);
1048 pr_err(" MMIO Error Log 1 : 0x%016llx\n", ed.mmio_log1);
1049 }
1050 if (ed.dma_err) {
1051 pr_err(" DMA Error Status : 0x%016llx\n", ed.dma_err);
1052 pr_err(" DMA First Error Status: 0x%016llx\n", ed.dma_err1);
1053 pr_err(" DMA Error Log 0 : 0x%016llx\n", ed.dma_log0);
1054 pr_err(" DMA Error Log 1 : 0x%016llx\n", ed.dma_log1);
1055 }
1056 if (ed.utl_sys_err)
1057 pr_err(" UTL Sys Error Status : 0x%016llx\n", ed.utl_sys_err);
1058 if (ed.utl_port_err)
1059 pr_err(" UTL Port Error Status : 0x%016llx\n", ed.utl_port_err);
1060 if (ed.utl_rc_err)
1061 pr_err(" UTL RC Error Status : 0x%016llx\n", ed.utl_rc_err);
1062
1063
1064
1065
1066
1067 if (ed.dma_err || ed.mmio_err || ed.phb_err) {
1068 handled = IRQ_HANDLED;
1069 goto again;
1070 }
1071 return handled;
1072}
1073
1074static void __init wsp_setup_pci_err_reporting(struct wsp_phb *phb)
1075{
1076 struct pci_controller *hose = phb->hose;
1077 int err_irq, i, rc;
1078 char fname[16];
1079
1080
1081 sprintf(fname, "phb%d", phb->hose->global_number);
1082 phb->ddir = debugfs_create_dir(fname, powerpc_debugfs_root);
1083
1084
1085 if (phb->ddir) {
1086 struct dentry *d = debugfs_create_dir("regs", phb->ddir);
1087 char tmp[64];
1088
1089 for (i = 0; i < ARRAY_SIZE(wsp_pci_regs); i++) {
1090 sprintf(tmp, "%03x_%s", wsp_pci_regs[i].offset,
1091 wsp_pci_regs[i].name);
1092 debugfs_create_file(tmp, 0600, d,
1093 hose->cfg_data + wsp_pci_regs[i].offset,
1094 &wsp_pci_reg_fops);
1095 }
1096 debugfs_create_file("all_regs", 0600, phb->ddir, phb, &wsp_pci_regs_fops);
1097 }
1098
1099
1100 err_irq = irq_of_parse_and_map(hose->dn, 0);
1101 if (err_irq == 0)
1102
1103 err_irq = wsp_pci_get_err_irq_no_dt(hose->dn);
1104 if (err_irq == 0) {
1105 pr_err("PCI: Failed to fetch error interrupt for %s\n",
1106 hose->dn->full_name);
1107 return;
1108 }
1109
1110 rc = request_irq(err_irq, wsp_pci_err_irq, 0, "wsp_pci error", phb);
1111 if (rc) {
1112 pr_err("PCI: Failed to request interrupt for %s\n",
1113 hose->dn->full_name);
1114 }
1115
1116 out_be64(hose->cfg_data + PCIE_REG_PHB_ERR_IRQ_ENABLE, 0xffffffffffffffffull);
1117 out_be64(hose->cfg_data + PCIE_REG_MMIO_ERR_IRQ_ENABLE, 0xffffffffffffffffull);
1118 out_be64(hose->cfg_data + PCIE_REG_DMA_ERR_IRQ_ENABLE, 0xffffffffffffffffull);
1119}
1120
1121
1122
1123
1124static int __init wsp_setup_pci_late(void)
1125{
1126 struct wsp_phb *phb;
1127
1128 list_for_each_entry(phb, &wsp_phbs, all)
1129 wsp_setup_pci_err_reporting(phb);
1130
1131 return 0;
1132}
1133arch_initcall(wsp_setup_pci_late);
1134