1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/kernel.h>
15#include <linux/pci.h>
16#include <linux/delay.h>
17#include <linux/string.h>
18#include <linux/init.h>
19#include <linux/bootmem.h>
20#include <linux/irq.h>
21#include <linux/io.h>
22#include <linux/msi.h>
23
24#include <asm/sections.h>
25#include <asm/io.h>
26#include <asm/prom.h>
27#include <asm/pci-bridge.h>
28#include <asm/machdep.h>
29#include <asm/msi_bitmap.h>
30#include <asm/ppc-pci.h>
31#include <asm/opal.h>
32#include <asm/iommu.h>
33#include <asm/tce.h>
34#include <asm/firmware.h>
35
36#include "powernv.h"
37#include "pci.h"
38
39
40#define PCI_RESET_DELAY_US 3000000
41
42#define cfg_dbg(fmt...) do { } while(0)
43
44
45#ifdef CONFIG_PCI_MSI
46static int pnv_msi_check_device(struct pci_dev* pdev, int nvec, int type)
47{
48 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
49 struct pnv_phb *phb = hose->private_data;
50 struct pci_dn *pdn = pci_get_pdn(pdev);
51
52 if (pdn && pdn->force_32bit_msi && !phb->msi32_support)
53 return -ENODEV;
54
55 return (phb && phb->msi_bmp.bitmap) ? 0 : -ENODEV;
56}
57
58static int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
59{
60 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
61 struct pnv_phb *phb = hose->private_data;
62 struct msi_desc *entry;
63 struct msi_msg msg;
64 int hwirq;
65 unsigned int virq;
66 int rc;
67
68 if (WARN_ON(!phb))
69 return -ENODEV;
70
71 list_for_each_entry(entry, &pdev->msi_list, list) {
72 if (!entry->msi_attrib.is_64 && !phb->msi32_support) {
73 pr_warn("%s: Supports only 64-bit MSIs\n",
74 pci_name(pdev));
75 return -ENXIO;
76 }
77 hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, 1);
78 if (hwirq < 0) {
79 pr_warn("%s: Failed to find a free MSI\n",
80 pci_name(pdev));
81 return -ENOSPC;
82 }
83 virq = irq_create_mapping(NULL, phb->msi_base + hwirq);
84 if (virq == NO_IRQ) {
85 pr_warn("%s: Failed to map MSI to linux irq\n",
86 pci_name(pdev));
87 msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1);
88 return -ENOMEM;
89 }
90 rc = phb->msi_setup(phb, pdev, phb->msi_base + hwirq,
91 virq, entry->msi_attrib.is_64, &msg);
92 if (rc) {
93 pr_warn("%s: Failed to setup MSI\n", pci_name(pdev));
94 irq_dispose_mapping(virq);
95 msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1);
96 return rc;
97 }
98 irq_set_msi_desc(virq, entry);
99 write_msi_msg(virq, &msg);
100 }
101 return 0;
102}
103
104static void pnv_teardown_msi_irqs(struct pci_dev *pdev)
105{
106 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
107 struct pnv_phb *phb = hose->private_data;
108 struct msi_desc *entry;
109
110 if (WARN_ON(!phb))
111 return;
112
113 list_for_each_entry(entry, &pdev->msi_list, list) {
114 if (entry->irq == NO_IRQ)
115 continue;
116 irq_set_msi_desc(entry->irq, NULL);
117 msi_bitmap_free_hwirqs(&phb->msi_bmp,
118 virq_to_hw(entry->irq) - phb->msi_base, 1);
119 irq_dispose_mapping(entry->irq);
120 }
121}
122#endif
123
124static void pnv_pci_dump_p7ioc_diag_data(struct pnv_phb *phb)
125{
126 struct OpalIoP7IOCPhbErrorData *data = &phb->diag.p7ioc;
127 int i;
128
129 pr_info("PHB %d diagnostic data:\n", phb->hose->global_number);
130
131 pr_info(" brdgCtl = 0x%08x\n", data->brdgCtl);
132
133 pr_info(" portStatusReg = 0x%08x\n", data->portStatusReg);
134 pr_info(" rootCmplxStatus = 0x%08x\n", data->rootCmplxStatus);
135 pr_info(" busAgentStatus = 0x%08x\n", data->busAgentStatus);
136
137 pr_info(" deviceStatus = 0x%08x\n", data->deviceStatus);
138 pr_info(" slotStatus = 0x%08x\n", data->slotStatus);
139 pr_info(" linkStatus = 0x%08x\n", data->linkStatus);
140 pr_info(" devCmdStatus = 0x%08x\n", data->devCmdStatus);
141 pr_info(" devSecStatus = 0x%08x\n", data->devSecStatus);
142
143 pr_info(" rootErrorStatus = 0x%08x\n", data->rootErrorStatus);
144 pr_info(" uncorrErrorStatus = 0x%08x\n", data->uncorrErrorStatus);
145 pr_info(" corrErrorStatus = 0x%08x\n", data->corrErrorStatus);
146 pr_info(" tlpHdr1 = 0x%08x\n", data->tlpHdr1);
147 pr_info(" tlpHdr2 = 0x%08x\n", data->tlpHdr2);
148 pr_info(" tlpHdr3 = 0x%08x\n", data->tlpHdr3);
149 pr_info(" tlpHdr4 = 0x%08x\n", data->tlpHdr4);
150 pr_info(" sourceId = 0x%08x\n", data->sourceId);
151
152 pr_info(" errorClass = 0x%016llx\n", data->errorClass);
153 pr_info(" correlator = 0x%016llx\n", data->correlator);
154
155 pr_info(" p7iocPlssr = 0x%016llx\n", data->p7iocPlssr);
156 pr_info(" p7iocCsr = 0x%016llx\n", data->p7iocCsr);
157 pr_info(" lemFir = 0x%016llx\n", data->lemFir);
158 pr_info(" lemErrorMask = 0x%016llx\n", data->lemErrorMask);
159 pr_info(" lemWOF = 0x%016llx\n", data->lemWOF);
160 pr_info(" phbErrorStatus = 0x%016llx\n", data->phbErrorStatus);
161 pr_info(" phbFirstErrorStatus = 0x%016llx\n", data->phbFirstErrorStatus);
162 pr_info(" phbErrorLog0 = 0x%016llx\n", data->phbErrorLog0);
163 pr_info(" phbErrorLog1 = 0x%016llx\n", data->phbErrorLog1);
164 pr_info(" mmioErrorStatus = 0x%016llx\n", data->mmioErrorStatus);
165 pr_info(" mmioFirstErrorStatus = 0x%016llx\n", data->mmioFirstErrorStatus);
166 pr_info(" mmioErrorLog0 = 0x%016llx\n", data->mmioErrorLog0);
167 pr_info(" mmioErrorLog1 = 0x%016llx\n", data->mmioErrorLog1);
168 pr_info(" dma0ErrorStatus = 0x%016llx\n", data->dma0ErrorStatus);
169 pr_info(" dma0FirstErrorStatus = 0x%016llx\n", data->dma0FirstErrorStatus);
170 pr_info(" dma0ErrorLog0 = 0x%016llx\n", data->dma0ErrorLog0);
171 pr_info(" dma0ErrorLog1 = 0x%016llx\n", data->dma0ErrorLog1);
172 pr_info(" dma1ErrorStatus = 0x%016llx\n", data->dma1ErrorStatus);
173 pr_info(" dma1FirstErrorStatus = 0x%016llx\n", data->dma1FirstErrorStatus);
174 pr_info(" dma1ErrorLog0 = 0x%016llx\n", data->dma1ErrorLog0);
175 pr_info(" dma1ErrorLog1 = 0x%016llx\n", data->dma1ErrorLog1);
176
177 for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++) {
178 if ((data->pestA[i] >> 63) == 0 &&
179 (data->pestB[i] >> 63) == 0)
180 continue;
181 pr_info(" PE[%3d] PESTA = 0x%016llx\n", i, data->pestA[i]);
182 pr_info(" PESTB = 0x%016llx\n", data->pestB[i]);
183 }
184}
185
186static void pnv_pci_dump_phb_diag_data(struct pnv_phb *phb)
187{
188 switch(phb->model) {
189 case PNV_PHB_MODEL_P7IOC:
190 pnv_pci_dump_p7ioc_diag_data(phb);
191 break;
192 default:
193 pr_warning("PCI %d: Can't decode this PHB diag data\n",
194 phb->hose->global_number);
195 }
196}
197
198static void pnv_pci_handle_eeh_config(struct pnv_phb *phb, u32 pe_no)
199{
200 unsigned long flags, rc;
201 int has_diag;
202
203 spin_lock_irqsave(&phb->lock, flags);
204
205 rc = opal_pci_get_phb_diag_data(phb->opal_id, phb->diag.blob, PNV_PCI_DIAG_BUF_SIZE);
206 has_diag = (rc == OPAL_SUCCESS);
207
208 rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no,
209 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
210 if (rc) {
211 pr_warning("PCI %d: Failed to clear EEH freeze state"
212 " for PE#%d, err %ld\n",
213 phb->hose->global_number, pe_no, rc);
214
215
216
217
218
219
220 if (has_diag)
221 pnv_pci_dump_phb_diag_data(phb);
222 else
223 pr_warning("PCI %d: No diag data available\n",
224 phb->hose->global_number);
225 }
226
227 spin_unlock_irqrestore(&phb->lock, flags);
228}
229
230static void pnv_pci_config_check_eeh(struct pnv_phb *phb, struct pci_bus *bus,
231 u32 bdfn)
232{
233 s64 rc;
234 u8 fstate;
235 u16 pcierr;
236 u32 pe_no;
237
238
239 pe_no = phb->bdfn_to_pe ? phb->bdfn_to_pe(phb, bus, bdfn & 0xff) : 0;
240
241
242 rc = opal_pci_eeh_freeze_status(phb->opal_id, pe_no, &fstate, &pcierr,
243 NULL);
244 if (rc) {
245 pr_warning("PCI %d: Failed to read EEH status for PE#%d,"
246 " err %lld\n", phb->hose->global_number, pe_no, rc);
247 return;
248 }
249 cfg_dbg(" -> EEH check, bdfn=%04x PE%d fstate=%x\n",
250 bdfn, pe_no, fstate);
251 if (fstate != 0)
252 pnv_pci_handle_eeh_config(phb, pe_no);
253}
254
255static int pnv_pci_read_config(struct pci_bus *bus,
256 unsigned int devfn,
257 int where, int size, u32 *val)
258{
259 struct pci_controller *hose = pci_bus_to_host(bus);
260 struct pnv_phb *phb = hose->private_data;
261 u32 bdfn = (((uint64_t)bus->number) << 8) | devfn;
262 s64 rc;
263
264 if (hose == NULL)
265 return PCIBIOS_DEVICE_NOT_FOUND;
266
267 switch (size) {
268 case 1: {
269 u8 v8;
270 rc = opal_pci_config_read_byte(phb->opal_id, bdfn, where, &v8);
271 *val = (rc == OPAL_SUCCESS) ? v8 : 0xff;
272 break;
273 }
274 case 2: {
275 u16 v16;
276 rc = opal_pci_config_read_half_word(phb->opal_id, bdfn, where,
277 &v16);
278 *val = (rc == OPAL_SUCCESS) ? v16 : 0xffff;
279 break;
280 }
281 case 4: {
282 u32 v32;
283 rc = opal_pci_config_read_word(phb->opal_id, bdfn, where, &v32);
284 *val = (rc == OPAL_SUCCESS) ? v32 : 0xffffffff;
285 break;
286 }
287 default:
288 return PCIBIOS_FUNC_NOT_SUPPORTED;
289 }
290 cfg_dbg("pnv_pci_read_config bus: %x devfn: %x +%x/%x -> %08x\n",
291 bus->number, devfn, where, size, *val);
292
293
294 pnv_pci_config_check_eeh(phb, bus, bdfn);
295
296 return PCIBIOS_SUCCESSFUL;
297}
298
299static int pnv_pci_write_config(struct pci_bus *bus,
300 unsigned int devfn,
301 int where, int size, u32 val)
302{
303 struct pci_controller *hose = pci_bus_to_host(bus);
304 struct pnv_phb *phb = hose->private_data;
305 u32 bdfn = (((uint64_t)bus->number) << 8) | devfn;
306
307 if (hose == NULL)
308 return PCIBIOS_DEVICE_NOT_FOUND;
309
310 cfg_dbg("pnv_pci_write_config bus: %x devfn: %x +%x/%x -> %08x\n",
311 bus->number, devfn, where, size, val);
312 switch (size) {
313 case 1:
314 opal_pci_config_write_byte(phb->opal_id, bdfn, where, val);
315 break;
316 case 2:
317 opal_pci_config_write_half_word(phb->opal_id, bdfn, where, val);
318 break;
319 case 4:
320 opal_pci_config_write_word(phb->opal_id, bdfn, where, val);
321 break;
322 default:
323 return PCIBIOS_FUNC_NOT_SUPPORTED;
324 }
325
326 pnv_pci_config_check_eeh(phb, bus, bdfn);
327
328 return PCIBIOS_SUCCESSFUL;
329}
330
331struct pci_ops pnv_pci_ops = {
332 .read = pnv_pci_read_config,
333 .write = pnv_pci_write_config,
334};
335
336static int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
337 unsigned long uaddr, enum dma_data_direction direction,
338 struct dma_attrs *attrs)
339{
340 u64 proto_tce;
341 u64 *tcep, *tces;
342 u64 rpn;
343
344 proto_tce = TCE_PCI_READ;
345
346 if (direction != DMA_TO_DEVICE)
347 proto_tce |= TCE_PCI_WRITE;
348
349 tces = tcep = ((u64 *)tbl->it_base) + index - tbl->it_offset;
350 rpn = __pa(uaddr) >> TCE_SHIFT;
351
352 while (npages--)
353 *(tcep++) = proto_tce | (rpn++ << TCE_RPN_SHIFT);
354
355
356
357
358
359 if (tbl->it_type & TCE_PCI_SWINV_CREATE)
360 pnv_pci_ioda_tce_invalidate(tbl, tces, tcep - 1);
361
362 return 0;
363}
364
365static void pnv_tce_free(struct iommu_table *tbl, long index, long npages)
366{
367 u64 *tcep, *tces;
368
369 tces = tcep = ((u64 *)tbl->it_base) + index - tbl->it_offset;
370
371 while (npages--)
372 *(tcep++) = 0;
373
374 if (tbl->it_type & TCE_PCI_SWINV_FREE)
375 pnv_pci_ioda_tce_invalidate(tbl, tces, tcep - 1);
376}
377
378static unsigned long pnv_tce_get(struct iommu_table *tbl, long index)
379{
380 return ((u64 *)tbl->it_base)[index - tbl->it_offset];
381}
382
383void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
384 void *tce_mem, u64 tce_size,
385 u64 dma_offset)
386{
387 tbl->it_blocksize = 16;
388 tbl->it_base = (unsigned long)tce_mem;
389 tbl->it_offset = dma_offset >> IOMMU_PAGE_SHIFT;
390 tbl->it_index = 0;
391 tbl->it_size = tce_size >> 3;
392 tbl->it_busno = 0;
393 tbl->it_type = TCE_PCI;
394}
395
396static struct iommu_table *pnv_pci_setup_bml_iommu(struct pci_controller *hose)
397{
398 struct iommu_table *tbl;
399 const __be64 *basep, *swinvp;
400 const __be32 *sizep;
401
402 basep = of_get_property(hose->dn, "linux,tce-base", NULL);
403 sizep = of_get_property(hose->dn, "linux,tce-size", NULL);
404 if (basep == NULL || sizep == NULL) {
405 pr_err("PCI: %s has missing tce entries !\n",
406 hose->dn->full_name);
407 return NULL;
408 }
409 tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, hose->node);
410 if (WARN_ON(!tbl))
411 return NULL;
412 pnv_pci_setup_iommu_table(tbl, __va(be64_to_cpup(basep)),
413 be32_to_cpup(sizep), 0);
414 iommu_init_table(tbl, hose->node);
415
416
417 swinvp = of_get_property(hose->dn, "linux,tce-sw-invalidate-info",
418 NULL);
419 if (swinvp) {
420 tbl->it_busno = swinvp[1];
421 tbl->it_index = (unsigned long)ioremap(swinvp[0], 8);
422 tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE;
423 }
424 return tbl;
425}
426
427static void pnv_pci_dma_fallback_setup(struct pci_controller *hose,
428 struct pci_dev *pdev)
429{
430 struct device_node *np = pci_bus_to_OF_node(hose->bus);
431 struct pci_dn *pdn;
432
433 if (np == NULL)
434 return;
435 pdn = PCI_DN(np);
436 if (!pdn->iommu_table)
437 pdn->iommu_table = pnv_pci_setup_bml_iommu(hose);
438 if (!pdn->iommu_table)
439 return;
440 set_iommu_table_base(&pdev->dev, pdn->iommu_table);
441}
442
443static void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
444{
445 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
446 struct pnv_phb *phb = hose->private_data;
447
448
449
450
451 if (phb && phb->dma_dev_setup)
452 phb->dma_dev_setup(phb, pdev);
453 else
454 pnv_pci_dma_fallback_setup(hose, pdev);
455}
456
457void pnv_pci_shutdown(void)
458{
459 struct pci_controller *hose;
460
461 list_for_each_entry(hose, &hose_list, list_node) {
462 struct pnv_phb *phb = hose->private_data;
463
464 if (phb && phb->shutdown)
465 phb->shutdown(phb);
466 }
467}
468
469
470static void pnv_p7ioc_rc_quirk(struct pci_dev *dev)
471{
472 dev->class = PCI_CLASS_BRIDGE_PCI << 8;
473}
474DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_IBM, 0x3b9, pnv_p7ioc_rc_quirk);
475
476static int pnv_pci_probe_mode(struct pci_bus *bus)
477{
478 struct pci_controller *hose = pci_bus_to_host(bus);
479 const __be64 *tstamp;
480 u64 now, target;
481
482
483
484
485
486 if (bus != hose->bus)
487 return PCI_PROBE_NORMAL;
488 tstamp = of_get_property(hose->dn, "reset-clear-timestamp", NULL);
489 if (!tstamp || !*tstamp)
490 return PCI_PROBE_NORMAL;
491
492 now = mftb() / tb_ticks_per_usec;
493 target = (be64_to_cpup(tstamp) / tb_ticks_per_usec)
494 + PCI_RESET_DELAY_US;
495
496 pr_devel("pci %04d: Reset target: 0x%llx now: 0x%llx\n",
497 hose->global_number, target, now);
498
499 if (now < target)
500 msleep((target - now + 999) / 1000);
501
502 return PCI_PROBE_NORMAL;
503}
504
505void __init pnv_pci_init(void)
506{
507 struct device_node *np;
508
509 pci_add_flags(PCI_CAN_SKIP_ISA_ALIGN);
510
511
512 if (!firmware_has_feature(FW_FEATURE_OPAL)) {
513#ifdef CONFIG_PPC_POWERNV_RTAS
514 init_pci_config_tokens();
515 find_and_init_phbs();
516#endif
517 }
518
519 else {
520 int found_ioda = 0;
521
522
523
524
525
526 for_each_compatible_node(np, NULL, "ibm,ioda-hub") {
527 pnv_pci_init_ioda_hub(np);
528 found_ioda = 1;
529 }
530
531
532 if (!found_ioda)
533 for_each_compatible_node(np, NULL, "ibm,p5ioc2")
534 pnv_pci_init_p5ioc2_hub(np);
535
536
537 for_each_compatible_node(np, NULL, "ibm,ioda2-phb")
538 pnv_pci_init_ioda2_phb(np);
539 }
540
541
542 pci_devs_phb_init();
543
544
545 ppc_md.pci_dma_dev_setup = pnv_pci_dma_dev_setup;
546 ppc_md.tce_build = pnv_tce_build;
547 ppc_md.tce_free = pnv_tce_free;
548 ppc_md.tce_get = pnv_tce_get;
549 ppc_md.pci_probe_mode = pnv_pci_probe_mode;
550 set_pci_dma_ops(&dma_iommu_ops);
551
552
553#ifdef CONFIG_PCI_MSI
554 ppc_md.msi_check_device = pnv_msi_check_device;
555 ppc_md.setup_msi_irqs = pnv_setup_msi_irqs;
556 ppc_md.teardown_msi_irqs = pnv_teardown_msi_irqs;
557#endif
558}
559