1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#undef DEBUG
20
21#include <linux/kernel.h>
22#include <linux/pci.h>
23#include <linux/init.h>
24#include <linux/of.h>
25#include <linux/delay.h>
26#include <linux/slab.h>
27
28#include <asm/io.h>
29#include <asm/pci-bridge.h>
30#include <asm/machdep.h>
31#include <asm/dcr.h>
32#include <asm/dcr-regs.h>
33#include <mm/mmu_decl.h>
34
35#include "ppc4xx_pci.h"
36
37static int dma_offset_set;
38
39#define U64_TO_U32_LOW(val) ((u32)((val) & 0x00000000ffffffffULL))
40#define U64_TO_U32_HIGH(val) ((u32)((val) >> 32))
41
42#define RES_TO_U32_LOW(val) \
43 ((sizeof(resource_size_t) > sizeof(u32)) ? U64_TO_U32_LOW(val) : (val))
44#define RES_TO_U32_HIGH(val) \
45 ((sizeof(resource_size_t) > sizeof(u32)) ? U64_TO_U32_HIGH(val) : (0))
46
47static inline int ppc440spe_revA(void)
48{
49
50 if ((mfspr(SPRN_PVR) & 0xffefffff) == 0x53421890)
51 return 1;
52 else
53 return 0;
54}
55
56static void fixup_ppc4xx_pci_bridge(struct pci_dev *dev)
57{
58 struct pci_controller *hose;
59 int i;
60
61 if (dev->devfn != 0 || dev->bus->self != NULL)
62 return;
63
64 hose = pci_bus_to_host(dev->bus);
65 if (hose == NULL)
66 return;
67
68 if (!of_device_is_compatible(hose->dn, "ibm,plb-pciex") &&
69 !of_device_is_compatible(hose->dn, "ibm,plb-pcix") &&
70 !of_device_is_compatible(hose->dn, "ibm,plb-pci"))
71 return;
72
73 if (of_device_is_compatible(hose->dn, "ibm,plb440epx-pci") ||
74 of_device_is_compatible(hose->dn, "ibm,plb440grx-pci")) {
75 hose->indirect_type |= PPC_INDIRECT_TYPE_BROKEN_MRM;
76 }
77
78
79
80
81 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
82 dev->resource[i].start = dev->resource[i].end = 0;
83 dev->resource[i].flags = 0;
84 }
85
86 printk(KERN_INFO "PCI: Hiding 4xx host bridge resources %s\n",
87 pci_name(dev));
88}
89DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, fixup_ppc4xx_pci_bridge);
90
91static int __init ppc4xx_parse_dma_ranges(struct pci_controller *hose,
92 void __iomem *reg,
93 struct resource *res)
94{
95 u64 size;
96 const u32 *ranges;
97 int rlen;
98 int pna = of_n_addr_cells(hose->dn);
99 int np = pna + 5;
100
101
102 res->start = 0;
103 size = 0x80000000;
104 res->end = size - 1;
105 res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
106
107
108 ranges = of_get_property(hose->dn, "dma-ranges", &rlen);
109 if (ranges == NULL)
110 goto out;
111
112
113 while ((rlen -= np * 4) >= 0) {
114 u32 pci_space = ranges[0];
115 u64 pci_addr = of_read_number(ranges + 1, 2);
116 u64 cpu_addr = of_translate_dma_address(hose->dn, ranges + 3);
117 size = of_read_number(ranges + pna + 3, 2);
118 ranges += np;
119 if (cpu_addr == OF_BAD_ADDR || size == 0)
120 continue;
121
122
123 if ((pci_space & 0x03000000) != 0x02000000)
124 continue;
125
126
127
128
129 if (cpu_addr != 0 || pci_addr > 0xffffffff) {
130 printk(KERN_WARNING "%s: Ignored unsupported dma range"
131 " 0x%016llx...0x%016llx -> 0x%016llx\n",
132 hose->dn->full_name,
133 pci_addr, pci_addr + size - 1, cpu_addr);
134 continue;
135 }
136
137
138 if (!(pci_space & 0x40000000))
139 res->flags &= ~IORESOURCE_PREFETCH;
140
141
142
143 res->start = pci_addr;
144
145 if (sizeof(resource_size_t) == sizeof(u32) &&
146 (pci_addr + size) > 0x100000000ull)
147 res->end = 0xffffffff;
148 else
149 res->end = res->start + size - 1;
150 break;
151 }
152
153
154 if (dma_offset_set && pci_dram_offset != res->start) {
155 printk(KERN_ERR "%s: dma-ranges(s) mismatch\n",
156 hose->dn->full_name);
157 return -ENXIO;
158 }
159
160
161
162
163 if (size < total_memory) {
164 printk(KERN_ERR "%s: dma-ranges too small "
165 "(size=%llx total_memory=%llx)\n",
166 hose->dn->full_name, size, (u64)total_memory);
167 return -ENXIO;
168 }
169
170
171 if ((size & (size - 1)) != 0 ||
172 (res->start & (size - 1)) != 0) {
173 printk(KERN_ERR "%s: dma-ranges unaligned\n",
174 hose->dn->full_name);
175 return -ENXIO;
176 }
177
178
179
180
181 if (res->end > 0xffffffff &&
182 !(of_device_is_compatible(hose->dn, "ibm,plb-pciex-460sx")
183 || of_device_is_compatible(hose->dn, "ibm,plb-pciex-476fpe"))) {
184 printk(KERN_ERR "%s: dma-ranges outside of 32 bits space\n",
185 hose->dn->full_name);
186 return -ENXIO;
187 }
188 out:
189 dma_offset_set = 1;
190 pci_dram_offset = res->start;
191 hose->dma_window_base_cur = res->start;
192 hose->dma_window_size = resource_size(res);
193
194 printk(KERN_INFO "4xx PCI DMA offset set to 0x%08lx\n",
195 pci_dram_offset);
196 printk(KERN_INFO "4xx PCI DMA window base to 0x%016llx\n",
197 (unsigned long long)hose->dma_window_base_cur);
198 printk(KERN_INFO "DMA window size 0x%016llx\n",
199 (unsigned long long)hose->dma_window_size);
200 return 0;
201}
202
203
204
205
206
207static int __init ppc4xx_setup_one_pci_PMM(struct pci_controller *hose,
208 void __iomem *reg,
209 u64 plb_addr,
210 u64 pci_addr,
211 u64 size,
212 unsigned int flags,
213 int index)
214{
215 u32 ma, pcila, pciha;
216
217
218
219
220
221
222
223
224
225
226
227
228 plb_addr &= 0xffffffffull;
229
230
231
232
233
234 if ((plb_addr + size) > 0xffffffffull || !is_power_of_2(size) ||
235 size < 0x1000 || (plb_addr & (size - 1)) != 0) {
236 printk(KERN_WARNING "%s: Resource out of range\n",
237 hose->dn->full_name);
238 return -1;
239 }
240 ma = (0xffffffffu << ilog2(size)) | 1;
241 if (flags & IORESOURCE_PREFETCH)
242 ma |= 2;
243
244 pciha = RES_TO_U32_HIGH(pci_addr);
245 pcila = RES_TO_U32_LOW(pci_addr);
246
247 writel(plb_addr, reg + PCIL0_PMM0LA + (0x10 * index));
248 writel(pcila, reg + PCIL0_PMM0PCILA + (0x10 * index));
249 writel(pciha, reg + PCIL0_PMM0PCIHA + (0x10 * index));
250 writel(ma, reg + PCIL0_PMM0MA + (0x10 * index));
251
252 return 0;
253}
254
255static void __init ppc4xx_configure_pci_PMMs(struct pci_controller *hose,
256 void __iomem *reg)
257{
258 int i, j, found_isa_hole = 0;
259
260
261 for (i = j = 0; i < 3; i++) {
262 struct resource *res = &hose->mem_resources[i];
263 resource_size_t offset = hose->mem_offset[i];
264
265
266 if (!(res->flags & IORESOURCE_MEM))
267 continue;
268 if (j > 2) {
269 printk(KERN_WARNING "%s: Too many ranges\n",
270 hose->dn->full_name);
271 break;
272 }
273
274
275 if (ppc4xx_setup_one_pci_PMM(hose, reg,
276 res->start,
277 res->start - offset,
278 resource_size(res),
279 res->flags,
280 j) == 0) {
281 j++;
282
283
284
285
286 if (res->start == offset)
287 found_isa_hole = 1;
288 }
289 }
290
291
292 if (j <= 2 && !found_isa_hole && hose->isa_mem_size)
293 if (ppc4xx_setup_one_pci_PMM(hose, reg, hose->isa_mem_phys, 0,
294 hose->isa_mem_size, 0, j) == 0)
295 printk(KERN_INFO "%s: Legacy ISA memory support enabled\n",
296 hose->dn->full_name);
297}
298
299static void __init ppc4xx_configure_pci_PTMs(struct pci_controller *hose,
300 void __iomem *reg,
301 const struct resource *res)
302{
303 resource_size_t size = resource_size(res);
304 u32 sa;
305
306
307 sa = (0xffffffffu << ilog2(size)) | 1;
308 sa |= 0x1;
309
310
311 writel(0, reg + PCIL0_PTM1LA);
312 writel(sa, reg + PCIL0_PTM1MS);
313
314
315 early_write_config_dword(hose, hose->first_busno, 0,
316 PCI_BASE_ADDRESS_1, res->start);
317 early_write_config_dword(hose, hose->first_busno, 0,
318 PCI_BASE_ADDRESS_2, 0x00000000);
319 early_write_config_word(hose, hose->first_busno, 0,
320 PCI_COMMAND, 0x0006);
321}
322
323static void __init ppc4xx_probe_pci_bridge(struct device_node *np)
324{
325
326 struct resource rsrc_cfg;
327 struct resource rsrc_reg;
328 struct resource dma_window;
329 struct pci_controller *hose = NULL;
330 void __iomem *reg = NULL;
331 const int *bus_range;
332 int primary = 0;
333
334
335 if (!of_device_is_available(np)) {
336 printk(KERN_INFO "%s: Port disabled via device-tree\n",
337 np->full_name);
338 return;
339 }
340
341
342 if (of_address_to_resource(np, 0, &rsrc_cfg)) {
343 printk(KERN_ERR "%s: Can't get PCI config register base !",
344 np->full_name);
345 return;
346 }
347
348 if (of_address_to_resource(np, 3, &rsrc_reg)) {
349 printk(KERN_ERR "%s: Can't get PCI internal register base !",
350 np->full_name);
351 return;
352 }
353
354
355 if (of_get_property(np, "primary", NULL))
356 primary = 1;
357
358
359 bus_range = of_get_property(np, "bus-range", NULL);
360
361
362 reg = ioremap(rsrc_reg.start, resource_size(&rsrc_reg));
363 if (reg == NULL) {
364 printk(KERN_ERR "%s: Can't map registers !", np->full_name);
365 goto fail;
366 }
367
368
369 hose = pcibios_alloc_controller(np);
370 if (!hose)
371 goto fail;
372
373 hose->first_busno = bus_range ? bus_range[0] : 0x0;
374 hose->last_busno = bus_range ? bus_range[1] : 0xff;
375
376
377 setup_indirect_pci(hose, rsrc_cfg.start, rsrc_cfg.start + 0x4, 0);
378
379
380 writel(0, reg + PCIL0_PMM0MA);
381 writel(0, reg + PCIL0_PMM1MA);
382 writel(0, reg + PCIL0_PMM2MA);
383 writel(0, reg + PCIL0_PTM1MS);
384 writel(0, reg + PCIL0_PTM2MS);
385
386
387 pci_process_bridge_OF_ranges(hose, np, primary);
388
389
390 if (ppc4xx_parse_dma_ranges(hose, reg, &dma_window) != 0)
391 goto fail;
392
393
394 ppc4xx_configure_pci_PMMs(hose, reg);
395
396
397 ppc4xx_configure_pci_PTMs(hose, reg, &dma_window);
398
399
400 iounmap(reg);
401 return;
402
403 fail:
404 if (hose)
405 pcibios_free_controller(hose);
406 if (reg)
407 iounmap(reg);
408}
409
410
411
412
413
414static int __init ppc4xx_setup_one_pcix_POM(struct pci_controller *hose,
415 void __iomem *reg,
416 u64 plb_addr,
417 u64 pci_addr,
418 u64 size,
419 unsigned int flags,
420 int index)
421{
422 u32 lah, lal, pciah, pcial, sa;
423
424 if (!is_power_of_2(size) || size < 0x1000 ||
425 (plb_addr & (size - 1)) != 0) {
426 printk(KERN_WARNING "%s: Resource out of range\n",
427 hose->dn->full_name);
428 return -1;
429 }
430
431
432 lah = RES_TO_U32_HIGH(plb_addr);
433 lal = RES_TO_U32_LOW(plb_addr);
434 pciah = RES_TO_U32_HIGH(pci_addr);
435 pcial = RES_TO_U32_LOW(pci_addr);
436 sa = (0xffffffffu << ilog2(size)) | 0x1;
437
438
439 if (index == 0) {
440 writel(lah, reg + PCIX0_POM0LAH);
441 writel(lal, reg + PCIX0_POM0LAL);
442 writel(pciah, reg + PCIX0_POM0PCIAH);
443 writel(pcial, reg + PCIX0_POM0PCIAL);
444 writel(sa, reg + PCIX0_POM0SA);
445 } else {
446 writel(lah, reg + PCIX0_POM1LAH);
447 writel(lal, reg + PCIX0_POM1LAL);
448 writel(pciah, reg + PCIX0_POM1PCIAH);
449 writel(pcial, reg + PCIX0_POM1PCIAL);
450 writel(sa, reg + PCIX0_POM1SA);
451 }
452
453 return 0;
454}
455
456static void __init ppc4xx_configure_pcix_POMs(struct pci_controller *hose,
457 void __iomem *reg)
458{
459 int i, j, found_isa_hole = 0;
460
461
462 for (i = j = 0; i < 3; i++) {
463 struct resource *res = &hose->mem_resources[i];
464 resource_size_t offset = hose->mem_offset[i];
465
466
467 if (!(res->flags & IORESOURCE_MEM))
468 continue;
469 if (j > 1) {
470 printk(KERN_WARNING "%s: Too many ranges\n",
471 hose->dn->full_name);
472 break;
473 }
474
475
476 if (ppc4xx_setup_one_pcix_POM(hose, reg,
477 res->start,
478 res->start - offset,
479 resource_size(res),
480 res->flags,
481 j) == 0) {
482 j++;
483
484
485
486
487 if (res->start == offset)
488 found_isa_hole = 1;
489 }
490 }
491
492
493 if (j <= 1 && !found_isa_hole && hose->isa_mem_size)
494 if (ppc4xx_setup_one_pcix_POM(hose, reg, hose->isa_mem_phys, 0,
495 hose->isa_mem_size, 0, j) == 0)
496 printk(KERN_INFO "%s: Legacy ISA memory support enabled\n",
497 hose->dn->full_name);
498}
499
500static void __init ppc4xx_configure_pcix_PIMs(struct pci_controller *hose,
501 void __iomem *reg,
502 const struct resource *res,
503 int big_pim,
504 int enable_msi_hole)
505{
506 resource_size_t size = resource_size(res);
507 u32 sa;
508
509
510 writel(0x00000000, reg + PCIX0_PIM0LAH);
511 writel(0x00000000, reg + PCIX0_PIM0LAL);
512
513
514 sa = (0xffffffffu << ilog2(size)) | 1;
515 sa |= 0x1;
516 if (res->flags & IORESOURCE_PREFETCH)
517 sa |= 0x2;
518 if (enable_msi_hole)
519 sa |= 0x4;
520 writel(sa, reg + PCIX0_PIM0SA);
521 if (big_pim)
522 writel(0xffffffff, reg + PCIX0_PIM0SAH);
523
524
525 writel(0x00000000, reg + PCIX0_BAR0H);
526 writel(res->start, reg + PCIX0_BAR0L);
527 writew(0x0006, reg + PCIX0_COMMAND);
528}
529
530static void __init ppc4xx_probe_pcix_bridge(struct device_node *np)
531{
532 struct resource rsrc_cfg;
533 struct resource rsrc_reg;
534 struct resource dma_window;
535 struct pci_controller *hose = NULL;
536 void __iomem *reg = NULL;
537 const int *bus_range;
538 int big_pim = 0, msi = 0, primary = 0;
539
540
541 if (of_address_to_resource(np, 0, &rsrc_cfg)) {
542 printk(KERN_ERR "%s:Can't get PCI-X config register base !",
543 np->full_name);
544 return;
545 }
546
547 if (of_address_to_resource(np, 3, &rsrc_reg)) {
548 printk(KERN_ERR "%s: Can't get PCI-X internal register base !",
549 np->full_name);
550 return;
551 }
552
553
554 if (of_get_property(np, "large-inbound-windows", NULL))
555 big_pim = 1;
556
557
558 if (of_get_property(np, "enable-msi-hole", NULL))
559 msi = 1;
560
561
562 if (of_get_property(np, "primary", NULL))
563 primary = 1;
564
565
566 bus_range = of_get_property(np, "bus-range", NULL);
567
568
569 reg = ioremap(rsrc_reg.start, resource_size(&rsrc_reg));
570 if (reg == NULL) {
571 printk(KERN_ERR "%s: Can't map registers !", np->full_name);
572 goto fail;
573 }
574
575
576 hose = pcibios_alloc_controller(np);
577 if (!hose)
578 goto fail;
579
580 hose->first_busno = bus_range ? bus_range[0] : 0x0;
581 hose->last_busno = bus_range ? bus_range[1] : 0xff;
582
583
584 setup_indirect_pci(hose, rsrc_cfg.start, rsrc_cfg.start + 0x4,
585 PPC_INDIRECT_TYPE_SET_CFG_TYPE);
586
587
588 writel(0, reg + PCIX0_POM0SA);
589 writel(0, reg + PCIX0_POM1SA);
590 writel(0, reg + PCIX0_POM2SA);
591 writel(0, reg + PCIX0_PIM0SA);
592 writel(0, reg + PCIX0_PIM1SA);
593 writel(0, reg + PCIX0_PIM2SA);
594 if (big_pim) {
595 writel(0, reg + PCIX0_PIM0SAH);
596 writel(0, reg + PCIX0_PIM2SAH);
597 }
598
599
600 pci_process_bridge_OF_ranges(hose, np, primary);
601
602
603 if (ppc4xx_parse_dma_ranges(hose, reg, &dma_window) != 0)
604 goto fail;
605
606
607 ppc4xx_configure_pcix_POMs(hose, reg);
608
609
610 ppc4xx_configure_pcix_PIMs(hose, reg, &dma_window, big_pim, msi);
611
612
613 iounmap(reg);
614 return;
615
616 fail:
617 if (hose)
618 pcibios_free_controller(hose);
619 if (reg)
620 iounmap(reg);
621}
622
623#ifdef CONFIG_PPC4xx_PCI_EXPRESS
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639#define MAX_PCIE_BUS_MAPPED 0x40
640
641struct ppc4xx_pciex_port
642{
643 struct pci_controller *hose;
644 struct device_node *node;
645 unsigned int index;
646 int endpoint;
647 int link;
648 int has_ibpre;
649 unsigned int sdr_base;
650 dcr_host_t dcrs;
651 struct resource cfg_space;
652 struct resource utl_regs;
653 void __iomem *utl_base;
654};
655
656static struct ppc4xx_pciex_port *ppc4xx_pciex_ports;
657static unsigned int ppc4xx_pciex_port_count;
658
659struct ppc4xx_pciex_hwops
660{
661 bool want_sdr;
662 int (*core_init)(struct device_node *np);
663 int (*port_init_hw)(struct ppc4xx_pciex_port *port);
664 int (*setup_utl)(struct ppc4xx_pciex_port *port);
665 void (*check_link)(struct ppc4xx_pciex_port *port);
666};
667
668static struct ppc4xx_pciex_hwops *ppc4xx_pciex_hwops;
669
670static int __init ppc4xx_pciex_wait_on_sdr(struct ppc4xx_pciex_port *port,
671 unsigned int sdr_offset,
672 unsigned int mask,
673 unsigned int value,
674 int timeout_ms)
675{
676 u32 val;
677
678 while(timeout_ms--) {
679 val = mfdcri(SDR0, port->sdr_base + sdr_offset);
680 if ((val & mask) == value) {
681 pr_debug("PCIE%d: Wait on SDR %x success with tm %d (%08x)\n",
682 port->index, sdr_offset, timeout_ms, val);
683 return 0;
684 }
685 msleep(1);
686 }
687 return -1;
688}
689
690static int __init ppc4xx_pciex_port_reset_sdr(struct ppc4xx_pciex_port *port)
691{
692
693 if (ppc4xx_pciex_wait_on_sdr(port, PESDRn_RCSSTS, 1 << 20, 0, 10)) {
694 printk(KERN_WARNING "PCIE%d: PGRST failed\n",
695 port->index);
696 return -1;
697 }
698 return 0;
699}
700
701
702static void __init ppc4xx_pciex_check_link_sdr(struct ppc4xx_pciex_port *port)
703{
704 printk(KERN_INFO "PCIE%d: Checking link...\n", port->index);
705
706
707
708
709
710
711
712
713 if (!port->has_ibpre ||
714 !ppc4xx_pciex_wait_on_sdr(port, PESDRn_LOOP,
715 1 << 28, 1 << 28, 100)) {
716 printk(KERN_INFO
717 "PCIE%d: Device detected, waiting for link...\n",
718 port->index);
719 if (ppc4xx_pciex_wait_on_sdr(port, PESDRn_LOOP,
720 0x1000, 0x1000, 2000))
721 printk(KERN_WARNING
722 "PCIE%d: Link up failed\n", port->index);
723 else {
724 printk(KERN_INFO
725 "PCIE%d: link is up !\n", port->index);
726 port->link = 1;
727 }
728 } else
729 printk(KERN_INFO "PCIE%d: No device detected.\n", port->index);
730}
731
732#ifdef CONFIG_44x
733
734
735static int __init ppc440spe_pciex_check_reset(struct device_node *np)
736{
737 u32 valPE0, valPE1, valPE2;
738 int err = 0;
739
740
741 if (!(mfdcri(SDR0, PESDR0_PLLLCT1) & 0x01000000)) {
742
743
744
745
746
747
748 pr_debug("PCIE: SDR0_PLLLCT1 already reset.\n");
749 mtdcri(SDR0, PESDR0_440SPE_RCSSET, 0x01010000);
750 mtdcri(SDR0, PESDR1_440SPE_RCSSET, 0x01010000);
751 mtdcri(SDR0, PESDR2_440SPE_RCSSET, 0x01010000);
752 }
753
754 valPE0 = mfdcri(SDR0, PESDR0_440SPE_RCSSET);
755 valPE1 = mfdcri(SDR0, PESDR1_440SPE_RCSSET);
756 valPE2 = mfdcri(SDR0, PESDR2_440SPE_RCSSET);
757
758
759 if (!(valPE0 & 0x01000000) ||
760 !(valPE1 & 0x01000000) ||
761 !(valPE2 & 0x01000000)) {
762 printk(KERN_INFO "PCIE: SDR0_PExRCSSET rstgu error\n");
763 err = -1;
764 }
765
766
767 if (!(valPE0 & 0x00010000) ||
768 !(valPE1 & 0x00010000) ||
769 !(valPE2 & 0x00010000)) {
770 printk(KERN_INFO "PCIE: SDR0_PExRCSSET rstdl error\n");
771 err = -1;
772 }
773
774
775 if ((valPE0 & 0x00001000) ||
776 (valPE1 & 0x00001000) ||
777 (valPE2 & 0x00001000)) {
778 printk(KERN_INFO "PCIE: SDR0_PExRCSSET rstpyn error\n");
779 err = -1;
780 }
781
782
783 if ((valPE0 & 0x10000000) ||
784 (valPE1 & 0x10000000) ||
785 (valPE2 & 0x10000000)) {
786 printk(KERN_INFO "PCIE: SDR0_PExRCSSET hldplb error\n");
787 err = -1;
788 }
789
790
791 if ((valPE0 & 0x00100000) ||
792 (valPE1 & 0x00100000) ||
793 (valPE2 & 0x00100000)) {
794 printk(KERN_INFO "PCIE: SDR0_PExRCSSET rdy error\n");
795 err = -1;
796 }
797
798
799 if ((valPE0 & 0x00000100) ||
800 (valPE1 & 0x00000100) ||
801 (valPE2 & 0x00000100)) {
802 printk(KERN_INFO "PCIE: SDR0_PExRCSSET shutdown error\n");
803 err = -1;
804 }
805
806 return err;
807}
808
809
810static int __init ppc440spe_pciex_core_init(struct device_node *np)
811{
812 int time_out = 20;
813
814
815 dcri_clrset(SDR0, PESDR0_PLLLCT1, 0, 1 << 28);
816
817
818 if (ppc440spe_pciex_check_reset(np))
819 return -ENXIO;
820
821 if (!(mfdcri(SDR0, PESDR0_PLLLCT2) & 0x10000)) {
822 printk(KERN_INFO "PCIE: PESDR_PLLCT2 resistance calibration "
823 "failed (0x%08x)\n",
824 mfdcri(SDR0, PESDR0_PLLLCT2));
825 return -1;
826 }
827
828
829 dcri_clrset(SDR0, PESDR0_PLLLCT1, 1 << 24, 0);
830 udelay(3);
831
832 while (time_out) {
833 if (!(mfdcri(SDR0, PESDR0_PLLLCT3) & 0x10000000)) {
834 time_out--;
835 udelay(1);
836 } else
837 break;
838 }
839 if (!time_out) {
840 printk(KERN_INFO "PCIE: VCO output not locked\n");
841 return -1;
842 }
843
844 pr_debug("PCIE initialization OK\n");
845
846 return 3;
847}
848
849static int __init ppc440spe_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
850{
851 u32 val = 1 << 24;
852
853 if (port->endpoint)
854 val = PTYPE_LEGACY_ENDPOINT << 20;
855 else
856 val = PTYPE_ROOT_PORT << 20;
857
858 if (port->index == 0)
859 val |= LNKW_X8 << 12;
860 else
861 val |= LNKW_X4 << 12;
862
863 mtdcri(SDR0, port->sdr_base + PESDRn_DLPSET, val);
864 mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET1, 0x20222222);
865 if (ppc440spe_revA())
866 mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET2, 0x11000000);
867 mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL0SET1, 0x35000000);
868 mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL1SET1, 0x35000000);
869 mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL2SET1, 0x35000000);
870 mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL3SET1, 0x35000000);
871 if (port->index == 0) {
872 mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL4SET1,
873 0x35000000);
874 mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL5SET1,
875 0x35000000);
876 mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL6SET1,
877 0x35000000);
878 mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL7SET1,
879 0x35000000);
880 }
881 dcri_clrset(SDR0, port->sdr_base + PESDRn_RCSSET,
882 (1 << 24) | (1 << 16), 1 << 12);
883
884 return ppc4xx_pciex_port_reset_sdr(port);
885}
886
887static int __init ppc440speA_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
888{
889 return ppc440spe_pciex_init_port_hw(port);
890}
891
892static int __init ppc440speB_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
893{
894 int rc = ppc440spe_pciex_init_port_hw(port);
895
896 port->has_ibpre = 1;
897
898 return rc;
899}
900
901static int ppc440speA_pciex_init_utl(struct ppc4xx_pciex_port *port)
902{
903
904 dcr_write(port->dcrs, DCRO_PEGPL_SPECIAL, 0x68782800);
905
906
907
908
909 out_be32(port->utl_base + PEUTL_OUTTR, 0x08000000);
910 out_be32(port->utl_base + PEUTL_INTR, 0x02000000);
911 out_be32(port->utl_base + PEUTL_OPDBSZ, 0x10000000);
912 out_be32(port->utl_base + PEUTL_PBBSZ, 0x53000000);
913 out_be32(port->utl_base + PEUTL_IPHBSZ, 0x08000000);
914 out_be32(port->utl_base + PEUTL_IPDBSZ, 0x10000000);
915 out_be32(port->utl_base + PEUTL_RCIRQEN, 0x00f00000);
916 out_be32(port->utl_base + PEUTL_PCTL, 0x80800066);
917
918 return 0;
919}
920
921static int ppc440speB_pciex_init_utl(struct ppc4xx_pciex_port *port)
922{
923
924 out_be32(port->utl_base + PEUTL_PBCTL, 0x08000000);
925
926 return 0;
927}
928
929static struct ppc4xx_pciex_hwops ppc440speA_pcie_hwops __initdata =
930{
931 .want_sdr = true,
932 .core_init = ppc440spe_pciex_core_init,
933 .port_init_hw = ppc440speA_pciex_init_port_hw,
934 .setup_utl = ppc440speA_pciex_init_utl,
935 .check_link = ppc4xx_pciex_check_link_sdr,
936};
937
938static struct ppc4xx_pciex_hwops ppc440speB_pcie_hwops __initdata =
939{
940 .want_sdr = true,
941 .core_init = ppc440spe_pciex_core_init,
942 .port_init_hw = ppc440speB_pciex_init_port_hw,
943 .setup_utl = ppc440speB_pciex_init_utl,
944 .check_link = ppc4xx_pciex_check_link_sdr,
945};
946
947static int __init ppc460ex_pciex_core_init(struct device_node *np)
948{
949
950 return 2;
951}
952
953static int __init ppc460ex_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
954{
955 u32 val;
956 u32 utlset1;
957
958 if (port->endpoint)
959 val = PTYPE_LEGACY_ENDPOINT << 20;
960 else
961 val = PTYPE_ROOT_PORT << 20;
962
963 if (port->index == 0) {
964 val |= LNKW_X1 << 12;
965 utlset1 = 0x20000000;
966 } else {
967 val |= LNKW_X4 << 12;
968 utlset1 = 0x20101101;
969 }
970
971 mtdcri(SDR0, port->sdr_base + PESDRn_DLPSET, val);
972 mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET1, utlset1);
973 mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET2, 0x01210000);
974
975 switch (port->index) {
976 case 0:
977 mtdcri(SDR0, PESDR0_460EX_L0CDRCTL, 0x00003230);
978 mtdcri(SDR0, PESDR0_460EX_L0DRV, 0x00000130);
979 mtdcri(SDR0, PESDR0_460EX_L0CLK, 0x00000006);
980
981 mtdcri(SDR0, PESDR0_460EX_PHY_CTL_RST,0x10000000);
982 break;
983
984 case 1:
985 mtdcri(SDR0, PESDR1_460EX_L0CDRCTL, 0x00003230);
986 mtdcri(SDR0, PESDR1_460EX_L1CDRCTL, 0x00003230);
987 mtdcri(SDR0, PESDR1_460EX_L2CDRCTL, 0x00003230);
988 mtdcri(SDR0, PESDR1_460EX_L3CDRCTL, 0x00003230);
989 mtdcri(SDR0, PESDR1_460EX_L0DRV, 0x00000130);
990 mtdcri(SDR0, PESDR1_460EX_L1DRV, 0x00000130);
991 mtdcri(SDR0, PESDR1_460EX_L2DRV, 0x00000130);
992 mtdcri(SDR0, PESDR1_460EX_L3DRV, 0x00000130);
993 mtdcri(SDR0, PESDR1_460EX_L0CLK, 0x00000006);
994 mtdcri(SDR0, PESDR1_460EX_L1CLK, 0x00000006);
995 mtdcri(SDR0, PESDR1_460EX_L2CLK, 0x00000006);
996 mtdcri(SDR0, PESDR1_460EX_L3CLK, 0x00000006);
997
998 mtdcri(SDR0, PESDR1_460EX_PHY_CTL_RST,0x10000000);
999 break;
1000 }
1001
1002 mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET,
1003 mfdcri(SDR0, port->sdr_base + PESDRn_RCSSET) |
1004 (PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTPYN));
1005
1006
1007
1008 switch (port->index) {
1009 case 0:
1010 while (!(mfdcri(SDR0, PESDR0_460EX_RSTSTA) & 0x1))
1011 udelay(10);
1012 break;
1013 case 1:
1014 while (!(mfdcri(SDR0, PESDR1_460EX_RSTSTA) & 0x1))
1015 udelay(10);
1016 break;
1017 }
1018
1019 mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET,
1020 (mfdcri(SDR0, port->sdr_base + PESDRn_RCSSET) &
1021 ~(PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTDL)) |
1022 PESDRx_RCSSET_RSTPYN);
1023
1024 port->has_ibpre = 1;
1025
1026 return ppc4xx_pciex_port_reset_sdr(port);
1027}
1028
1029static int ppc460ex_pciex_init_utl(struct ppc4xx_pciex_port *port)
1030{
1031 dcr_write(port->dcrs, DCRO_PEGPL_SPECIAL, 0x0);
1032
1033
1034
1035
1036 out_be32(port->utl_base + PEUTL_PBCTL, 0x0800000c);
1037 out_be32(port->utl_base + PEUTL_OUTTR, 0x08000000);
1038 out_be32(port->utl_base + PEUTL_INTR, 0x02000000);
1039 out_be32(port->utl_base + PEUTL_OPDBSZ, 0x04000000);
1040 out_be32(port->utl_base + PEUTL_PBBSZ, 0x00000000);
1041 out_be32(port->utl_base + PEUTL_IPHBSZ, 0x02000000);
1042 out_be32(port->utl_base + PEUTL_IPDBSZ, 0x04000000);
1043 out_be32(port->utl_base + PEUTL_RCIRQEN,0x00f00000);
1044 out_be32(port->utl_base + PEUTL_PCTL, 0x80800066);
1045
1046 return 0;
1047}
1048
1049static struct ppc4xx_pciex_hwops ppc460ex_pcie_hwops __initdata =
1050{
1051 .want_sdr = true,
1052 .core_init = ppc460ex_pciex_core_init,
1053 .port_init_hw = ppc460ex_pciex_init_port_hw,
1054 .setup_utl = ppc460ex_pciex_init_utl,
1055 .check_link = ppc4xx_pciex_check_link_sdr,
1056};
1057
1058static int __init apm821xx_pciex_core_init(struct device_node *np)
1059{
1060
1061 return 1;
1062}
1063
1064static int __init apm821xx_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
1065{
1066 u32 val;
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076 mtdcri(SDR0, PESDR0_460EX_PHY_CTL_RST, 0x0);
1077 mdelay(10);
1078
1079 if (port->endpoint)
1080 val = PTYPE_LEGACY_ENDPOINT << 20;
1081 else
1082 val = PTYPE_ROOT_PORT << 20;
1083
1084 val |= LNKW_X1 << 12;
1085
1086 mtdcri(SDR0, port->sdr_base + PESDRn_DLPSET, val);
1087 mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET1, 0x00000000);
1088 mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET2, 0x01010000);
1089
1090 mtdcri(SDR0, PESDR0_460EX_L0CDRCTL, 0x00003230);
1091 mtdcri(SDR0, PESDR0_460EX_L0DRV, 0x00000130);
1092 mtdcri(SDR0, PESDR0_460EX_L0CLK, 0x00000006);
1093
1094 mtdcri(SDR0, PESDR0_460EX_PHY_CTL_RST, 0x10000000);
1095 mdelay(50);
1096 mtdcri(SDR0, PESDR0_460EX_PHY_CTL_RST, 0x30000000);
1097
1098 mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET,
1099 mfdcri(SDR0, port->sdr_base + PESDRn_RCSSET) |
1100 (PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTPYN));
1101
1102
1103 val = PESDR0_460EX_RSTSTA - port->sdr_base;
1104 if (ppc4xx_pciex_wait_on_sdr(port, val, 0x1, 1, 100)) {
1105 printk(KERN_WARNING "%s: PCIE: Can't reset PHY\n", __func__);
1106 return -EBUSY;
1107 } else {
1108 mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET,
1109 (mfdcri(SDR0, port->sdr_base + PESDRn_RCSSET) &
1110 ~(PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTDL)) |
1111 PESDRx_RCSSET_RSTPYN);
1112
1113 port->has_ibpre = 1;
1114 return 0;
1115 }
1116}
1117
1118static struct ppc4xx_pciex_hwops apm821xx_pcie_hwops __initdata = {
1119 .want_sdr = true,
1120 .core_init = apm821xx_pciex_core_init,
1121 .port_init_hw = apm821xx_pciex_init_port_hw,
1122 .setup_utl = ppc460ex_pciex_init_utl,
1123 .check_link = ppc4xx_pciex_check_link_sdr,
1124};
1125
1126static int __init ppc460sx_pciex_core_init(struct device_node *np)
1127{
1128
1129 mtdcri(SDR0, PESDR0_460SX_HSSL0DAMP, 0xB9843211);
1130 mtdcri(SDR0, PESDR0_460SX_HSSL1DAMP, 0xB9843211);
1131 mtdcri(SDR0, PESDR0_460SX_HSSL2DAMP, 0xB9843211);
1132 mtdcri(SDR0, PESDR0_460SX_HSSL3DAMP, 0xB9843211);
1133 mtdcri(SDR0, PESDR0_460SX_HSSL4DAMP, 0xB9843211);
1134 mtdcri(SDR0, PESDR0_460SX_HSSL5DAMP, 0xB9843211);
1135 mtdcri(SDR0, PESDR0_460SX_HSSL6DAMP, 0xB9843211);
1136 mtdcri(SDR0, PESDR0_460SX_HSSL7DAMP, 0xB9843211);
1137
1138 mtdcri(SDR0, PESDR1_460SX_HSSL0DAMP, 0xB9843211);
1139 mtdcri(SDR0, PESDR1_460SX_HSSL1DAMP, 0xB9843211);
1140 mtdcri(SDR0, PESDR1_460SX_HSSL2DAMP, 0xB9843211);
1141 mtdcri(SDR0, PESDR1_460SX_HSSL3DAMP, 0xB9843211);
1142
1143 mtdcri(SDR0, PESDR2_460SX_HSSL0DAMP, 0xB9843211);
1144 mtdcri(SDR0, PESDR2_460SX_HSSL1DAMP, 0xB9843211);
1145 mtdcri(SDR0, PESDR2_460SX_HSSL2DAMP, 0xB9843211);
1146 mtdcri(SDR0, PESDR2_460SX_HSSL3DAMP, 0xB9843211);
1147
1148
1149 mtdcri(SDR0, PESDR0_460SX_HSSL0COEFA, 0xDCB98987);
1150 mtdcri(SDR0, PESDR0_460SX_HSSL1COEFA, 0xDCB98987);
1151 mtdcri(SDR0, PESDR0_460SX_HSSL2COEFA, 0xDCB98987);
1152 mtdcri(SDR0, PESDR0_460SX_HSSL3COEFA, 0xDCB98987);
1153 mtdcri(SDR0, PESDR0_460SX_HSSL4COEFA, 0xDCB98987);
1154 mtdcri(SDR0, PESDR0_460SX_HSSL5COEFA, 0xDCB98987);
1155 mtdcri(SDR0, PESDR0_460SX_HSSL6COEFA, 0xDCB98987);
1156 mtdcri(SDR0, PESDR0_460SX_HSSL7COEFA, 0xDCB98987);
1157
1158 mtdcri(SDR0, PESDR1_460SX_HSSL0COEFA, 0xDCB98987);
1159 mtdcri(SDR0, PESDR1_460SX_HSSL1COEFA, 0xDCB98987);
1160 mtdcri(SDR0, PESDR1_460SX_HSSL2COEFA, 0xDCB98987);
1161 mtdcri(SDR0, PESDR1_460SX_HSSL3COEFA, 0xDCB98987);
1162
1163 mtdcri(SDR0, PESDR2_460SX_HSSL0COEFA, 0xDCB98987);
1164 mtdcri(SDR0, PESDR2_460SX_HSSL1COEFA, 0xDCB98987);
1165 mtdcri(SDR0, PESDR2_460SX_HSSL2COEFA, 0xDCB98987);
1166 mtdcri(SDR0, PESDR2_460SX_HSSL3COEFA, 0xDCB98987);
1167
1168
1169 mtdcri(SDR0, PESDR0_460SX_HSSL1CALDRV, 0x22222222);
1170 mtdcri(SDR0, PESDR1_460SX_HSSL1CALDRV, 0x22220000);
1171 mtdcri(SDR0, PESDR2_460SX_HSSL1CALDRV, 0x22220000);
1172
1173
1174 mtdcri(SDR0, PESDR0_460SX_HSSSLEW, 0xFFFFFFFF);
1175 mtdcri(SDR0, PESDR1_460SX_HSSSLEW, 0xFFFF0000);
1176 mtdcri(SDR0, PESDR2_460SX_HSSSLEW, 0xFFFF0000);
1177
1178
1179 mtdcri(SDR0, PESDR0_460SX_HSSCTLSET, 0x00001130);
1180 mtdcri(SDR0, PESDR2_460SX_HSSCTLSET, 0x00001130);
1181
1182 udelay(100);
1183
1184
1185 dcri_clrset(SDR0, PESDR0_PLLLCT2, 0x00000100, 0);
1186
1187
1188 mtdcri(SDR0, PESDR0_460SX_RCSSET,
1189 PESDRx_RCSSET_RSTDL | PESDRx_RCSSET_RSTGU);
1190 mtdcri(SDR0, PESDR1_460SX_RCSSET,
1191 PESDRx_RCSSET_RSTDL | PESDRx_RCSSET_RSTGU);
1192 mtdcri(SDR0, PESDR2_460SX_RCSSET,
1193 PESDRx_RCSSET_RSTDL | PESDRx_RCSSET_RSTGU);
1194
1195 udelay(100);
1196
1197
1198
1199
1200
1201 if (((mfdcri(SDR0, PESDR1_460SX_HSSCTLSET) & 0x00000001) ==
1202 0x00000001)) {
1203 printk(KERN_INFO "PCI: PCIE bifurcation setup successfully.\n");
1204 printk(KERN_INFO "PCI: Total 3 PCIE ports are present\n");
1205 return 3;
1206 }
1207
1208 printk(KERN_INFO "PCI: Total 2 PCIE ports are present\n");
1209 return 2;
1210}
1211
1212static int __init ppc460sx_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
1213{
1214
1215 if (port->endpoint)
1216 dcri_clrset(SDR0, port->sdr_base + PESDRn_UTLSET2,
1217 0x01000000, 0);
1218 else
1219 dcri_clrset(SDR0, port->sdr_base + PESDRn_UTLSET2,
1220 0, 0x01000000);
1221
1222 dcri_clrset(SDR0, port->sdr_base + PESDRn_RCSSET,
1223 (PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTDL),
1224 PESDRx_RCSSET_RSTPYN);
1225
1226 port->has_ibpre = 1;
1227
1228 return ppc4xx_pciex_port_reset_sdr(port);
1229}
1230
1231static int ppc460sx_pciex_init_utl(struct ppc4xx_pciex_port *port)
1232{
1233
1234 out_be32 (port->utl_base + PEUTL_PBBSZ, 0x00000000);
1235
1236 out_be32(port->utl_base + PEUTL_PCTL, 0x80800000);
1237 return 0;
1238}
1239
1240static void __init ppc460sx_pciex_check_link(struct ppc4xx_pciex_port *port)
1241{
1242 void __iomem *mbase;
1243 int attempt = 50;
1244
1245 port->link = 0;
1246
1247 mbase = ioremap(port->cfg_space.start + 0x10000000, 0x1000);
1248 if (mbase == NULL) {
1249 printk(KERN_ERR "%s: Can't map internal config space !",
1250 port->node->full_name);
1251 goto done;
1252 }
1253
1254 while (attempt && (0 == (in_le32(mbase + PECFG_460SX_DLLSTA)
1255 & PECFG_460SX_DLLSTA_LINKUP))) {
1256 attempt--;
1257 mdelay(10);
1258 }
1259 if (attempt)
1260 port->link = 1;
1261done:
1262 iounmap(mbase);
1263
1264}
1265
1266static struct ppc4xx_pciex_hwops ppc460sx_pcie_hwops __initdata = {
1267 .want_sdr = true,
1268 .core_init = ppc460sx_pciex_core_init,
1269 .port_init_hw = ppc460sx_pciex_init_port_hw,
1270 .setup_utl = ppc460sx_pciex_init_utl,
1271 .check_link = ppc460sx_pciex_check_link,
1272};
1273
1274#endif
1275
1276#ifdef CONFIG_40x
1277
1278static int __init ppc405ex_pciex_core_init(struct device_node *np)
1279{
1280
1281 return 2;
1282}
1283
1284static void ppc405ex_pcie_phy_reset(struct ppc4xx_pciex_port *port)
1285{
1286
1287 mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x01010000);
1288 msleep(1);
1289
1290
1291 if (port->endpoint)
1292 mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x01111000);
1293 else
1294 mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x01101000);
1295
1296
1297
1298 while (!(mfdcri(SDR0, port->sdr_base + PESDRn_405EX_PHYSTA) & 0x00001000))
1299 ;
1300
1301
1302 mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x00101000);
1303}
1304
1305static int __init ppc405ex_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
1306{
1307 u32 val;
1308
1309 if (port->endpoint)
1310 val = PTYPE_LEGACY_ENDPOINT;
1311 else
1312 val = PTYPE_ROOT_PORT;
1313
1314 mtdcri(SDR0, port->sdr_base + PESDRn_DLPSET,
1315 1 << 24 | val << 20 | LNKW_X1 << 12);
1316
1317 mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET1, 0x00000000);
1318 mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET2, 0x01010000);
1319 mtdcri(SDR0, port->sdr_base + PESDRn_405EX_PHYSET1, 0x720F0000);
1320 mtdcri(SDR0, port->sdr_base + PESDRn_405EX_PHYSET2, 0x70600003);
1321
1322
1323
1324
1325
1326
1327
1328
1329 val = mfdcri(SDR0, port->sdr_base + PESDRn_LOOP);
1330 if (!(val & 0x00001000))
1331 ppc405ex_pcie_phy_reset(port);
1332
1333 dcr_write(port->dcrs, DCRO_PEGPL_CFG, 0x10000000);
1334
1335 port->has_ibpre = 1;
1336
1337 return ppc4xx_pciex_port_reset_sdr(port);
1338}
1339
1340static int ppc405ex_pciex_init_utl(struct ppc4xx_pciex_port *port)
1341{
1342 dcr_write(port->dcrs, DCRO_PEGPL_SPECIAL, 0x0);
1343
1344
1345
1346
1347 out_be32(port->utl_base + PEUTL_OUTTR, 0x02000000);
1348 out_be32(port->utl_base + PEUTL_INTR, 0x02000000);
1349 out_be32(port->utl_base + PEUTL_OPDBSZ, 0x04000000);
1350 out_be32(port->utl_base + PEUTL_PBBSZ, 0x21000000);
1351 out_be32(port->utl_base + PEUTL_IPHBSZ, 0x02000000);
1352 out_be32(port->utl_base + PEUTL_IPDBSZ, 0x04000000);
1353 out_be32(port->utl_base + PEUTL_RCIRQEN, 0x00f00000);
1354 out_be32(port->utl_base + PEUTL_PCTL, 0x80800066);
1355
1356 out_be32(port->utl_base + PEUTL_PBCTL, 0x08000000);
1357
1358 return 0;
1359}
1360
1361static struct ppc4xx_pciex_hwops ppc405ex_pcie_hwops __initdata =
1362{
1363 .want_sdr = true,
1364 .core_init = ppc405ex_pciex_core_init,
1365 .port_init_hw = ppc405ex_pciex_init_port_hw,
1366 .setup_utl = ppc405ex_pciex_init_utl,
1367 .check_link = ppc4xx_pciex_check_link_sdr,
1368};
1369
1370#endif
1371
1372#ifdef CONFIG_476FPE
1373static int __init ppc_476fpe_pciex_core_init(struct device_node *np)
1374{
1375 return 4;
1376}
1377
1378static void __init ppc_476fpe_pciex_check_link(struct ppc4xx_pciex_port *port)
1379{
1380 u32 timeout_ms = 20;
1381 u32 val = 0, mask = (PECFG_TLDLP_LNKUP|PECFG_TLDLP_PRESENT);
1382 void __iomem *mbase = ioremap(port->cfg_space.start + 0x10000000,
1383 0x1000);
1384
1385 printk(KERN_INFO "PCIE%d: Checking link...\n", port->index);
1386
1387 if (mbase == NULL) {
1388 printk(KERN_WARNING "PCIE%d: failed to get cfg space\n",
1389 port->index);
1390 return;
1391 }
1392
1393 while (timeout_ms--) {
1394 val = in_le32(mbase + PECFG_TLDLP);
1395
1396 if ((val & mask) == mask)
1397 break;
1398 msleep(10);
1399 }
1400
1401 if (val & PECFG_TLDLP_PRESENT) {
1402 printk(KERN_INFO "PCIE%d: link is up !\n", port->index);
1403 port->link = 1;
1404 } else
1405 printk(KERN_WARNING "PCIE%d: Link up failed\n", port->index);
1406
1407 iounmap(mbase);
1408 return;
1409}
1410
1411static struct ppc4xx_pciex_hwops ppc_476fpe_pcie_hwops __initdata =
1412{
1413 .core_init = ppc_476fpe_pciex_core_init,
1414 .check_link = ppc_476fpe_pciex_check_link,
1415};
1416#endif
1417
1418
1419static int __init ppc4xx_pciex_check_core_init(struct device_node *np)
1420{
1421 static int core_init;
1422 int count = -ENODEV;
1423
1424 if (core_init++)
1425 return 0;
1426
1427#ifdef CONFIG_44x
1428 if (of_device_is_compatible(np, "ibm,plb-pciex-440spe")) {
1429 if (ppc440spe_revA())
1430 ppc4xx_pciex_hwops = &ppc440speA_pcie_hwops;
1431 else
1432 ppc4xx_pciex_hwops = &ppc440speB_pcie_hwops;
1433 }
1434 if (of_device_is_compatible(np, "ibm,plb-pciex-460ex"))
1435 ppc4xx_pciex_hwops = &ppc460ex_pcie_hwops;
1436 if (of_device_is_compatible(np, "ibm,plb-pciex-460sx"))
1437 ppc4xx_pciex_hwops = &ppc460sx_pcie_hwops;
1438 if (of_device_is_compatible(np, "ibm,plb-pciex-apm821xx"))
1439 ppc4xx_pciex_hwops = &apm821xx_pcie_hwops;
1440#endif
1441#ifdef CONFIG_40x
1442 if (of_device_is_compatible(np, "ibm,plb-pciex-405ex"))
1443 ppc4xx_pciex_hwops = &ppc405ex_pcie_hwops;
1444#endif
1445#ifdef CONFIG_476FPE
1446 if (of_device_is_compatible(np, "ibm,plb-pciex-476fpe")
1447 || of_device_is_compatible(np, "ibm,plb-pciex-476gtr"))
1448 ppc4xx_pciex_hwops = &ppc_476fpe_pcie_hwops;
1449#endif
1450 if (ppc4xx_pciex_hwops == NULL) {
1451 printk(KERN_WARNING "PCIE: unknown host type %s\n",
1452 np->full_name);
1453 return -ENODEV;
1454 }
1455
1456 count = ppc4xx_pciex_hwops->core_init(np);
1457 if (count > 0) {
1458 ppc4xx_pciex_ports =
1459 kzalloc(count * sizeof(struct ppc4xx_pciex_port),
1460 GFP_KERNEL);
1461 if (ppc4xx_pciex_ports) {
1462 ppc4xx_pciex_port_count = count;
1463 return 0;
1464 }
1465 printk(KERN_WARNING "PCIE: failed to allocate ports array\n");
1466 return -ENOMEM;
1467 }
1468 return -ENODEV;
1469}
1470
1471static void __init ppc4xx_pciex_port_init_mapping(struct ppc4xx_pciex_port *port)
1472{
1473
1474 dcr_write(port->dcrs, DCRO_PEGPL_CFGBAH,
1475 RES_TO_U32_HIGH(port->cfg_space.start));
1476 dcr_write(port->dcrs, DCRO_PEGPL_CFGBAL,
1477 RES_TO_U32_LOW(port->cfg_space.start));
1478
1479
1480 dcr_write(port->dcrs, DCRO_PEGPL_CFGMSK, 0xe0000001);
1481
1482
1483 dcr_write(port->dcrs, DCRO_PEGPL_REGBAH,
1484 RES_TO_U32_HIGH(port->utl_regs.start));
1485 dcr_write(port->dcrs, DCRO_PEGPL_REGBAL,
1486 RES_TO_U32_LOW(port->utl_regs.start));
1487
1488
1489 dcr_write(port->dcrs, DCRO_PEGPL_REGMSK, 0x00007001);
1490
1491
1492 dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL, 0);
1493 dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKL, 0);
1494 dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKL, 0);
1495 dcr_write(port->dcrs, DCRO_PEGPL_MSGMSK, 0);
1496}
1497
1498static int __init ppc4xx_pciex_port_init(struct ppc4xx_pciex_port *port)
1499{
1500 int rc = 0;
1501
1502
1503 if (ppc4xx_pciex_hwops->port_init_hw)
1504 rc = ppc4xx_pciex_hwops->port_init_hw(port);
1505 if (rc != 0)
1506 return rc;
1507
1508
1509
1510
1511
1512 ppc4xx_pciex_port_init_mapping(port);
1513
1514 if (ppc4xx_pciex_hwops->check_link)
1515 ppc4xx_pciex_hwops->check_link(port);
1516
1517
1518
1519
1520 port->utl_base = ioremap(port->utl_regs.start, 0x100);
1521 BUG_ON(port->utl_base == NULL);
1522
1523
1524
1525
1526 if (ppc4xx_pciex_hwops->setup_utl)
1527 ppc4xx_pciex_hwops->setup_utl(port);
1528
1529
1530
1531
1532 if (port->sdr_base) {
1533 if (of_device_is_compatible(port->node,
1534 "ibm,plb-pciex-460sx")){
1535 if (port->link && ppc4xx_pciex_wait_on_sdr(port,
1536 PESDRn_RCSSTS,
1537 1 << 12, 1 << 12, 5000)) {
1538 printk(KERN_INFO "PCIE%d: PLL not locked\n",
1539 port->index);
1540 port->link = 0;
1541 }
1542 } else if (port->link &&
1543 ppc4xx_pciex_wait_on_sdr(port, PESDRn_RCSSTS,
1544 1 << 16, 1 << 16, 5000)) {
1545 printk(KERN_INFO "PCIE%d: VC0 not active\n",
1546 port->index);
1547 port->link = 0;
1548 }
1549
1550 dcri_clrset(SDR0, port->sdr_base + PESDRn_RCSSET, 0, 1 << 20);
1551 }
1552
1553 msleep(100);
1554
1555 return 0;
1556}
1557
1558static int ppc4xx_pciex_validate_bdf(struct ppc4xx_pciex_port *port,
1559 struct pci_bus *bus,
1560 unsigned int devfn)
1561{
1562 static int message;
1563
1564
1565 if (port->endpoint && bus->number != port->hose->first_busno)
1566 return PCIBIOS_DEVICE_NOT_FOUND;
1567
1568
1569 if (bus->number > port->hose->last_busno) {
1570 if (!message) {
1571 printk(KERN_WARNING "Warning! Probing bus %u"
1572 " out of range !\n", bus->number);
1573 message++;
1574 }
1575 return PCIBIOS_DEVICE_NOT_FOUND;
1576 }
1577
1578
1579 if (bus->number == port->hose->first_busno && devfn != 0)
1580 return PCIBIOS_DEVICE_NOT_FOUND;
1581
1582
1583 if (bus->number == (port->hose->first_busno + 1) &&
1584 PCI_SLOT(devfn) != 0)
1585 return PCIBIOS_DEVICE_NOT_FOUND;
1586
1587
1588 if ((bus->number != port->hose->first_busno) && !port->link)
1589 return PCIBIOS_DEVICE_NOT_FOUND;
1590
1591 return 0;
1592}
1593
1594static void __iomem *ppc4xx_pciex_get_config_base(struct ppc4xx_pciex_port *port,
1595 struct pci_bus *bus,
1596 unsigned int devfn)
1597{
1598 int relbus;
1599
1600
1601
1602
1603 if (bus->number == port->hose->first_busno)
1604 return (void __iomem *)port->hose->cfg_addr;
1605
1606 relbus = bus->number - (port->hose->first_busno + 1);
1607 return (void __iomem *)port->hose->cfg_data +
1608 ((relbus << 20) | (devfn << 12));
1609}
1610
1611static int ppc4xx_pciex_read_config(struct pci_bus *bus, unsigned int devfn,
1612 int offset, int len, u32 *val)
1613{
1614 struct pci_controller *hose = pci_bus_to_host(bus);
1615 struct ppc4xx_pciex_port *port =
1616 &ppc4xx_pciex_ports[hose->indirect_type];
1617 void __iomem *addr;
1618 u32 gpl_cfg;
1619
1620 BUG_ON(hose != port->hose);
1621
1622 if (ppc4xx_pciex_validate_bdf(port, bus, devfn) != 0)
1623 return PCIBIOS_DEVICE_NOT_FOUND;
1624
1625 addr = ppc4xx_pciex_get_config_base(port, bus, devfn);
1626
1627
1628
1629
1630
1631
1632 gpl_cfg = dcr_read(port->dcrs, DCRO_PEGPL_CFG);
1633 dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg | GPL_DMER_MASK_DISA);
1634
1635
1636 out_be32(port->utl_base + PEUTL_RCSTA, 0x00040000);
1637
1638 switch (len) {
1639 case 1:
1640 *val = in_8((u8 *)(addr + offset));
1641 break;
1642 case 2:
1643 *val = in_le16((u16 *)(addr + offset));
1644 break;
1645 default:
1646 *val = in_le32((u32 *)(addr + offset));
1647 break;
1648 }
1649
1650 pr_debug("pcie-config-read: bus=%3d [%3d..%3d] devfn=0x%04x"
1651 " offset=0x%04x len=%d, addr=0x%p val=0x%08x\n",
1652 bus->number, hose->first_busno, hose->last_busno,
1653 devfn, offset, len, addr + offset, *val);
1654
1655
1656 if (in_be32(port->utl_base + PEUTL_RCSTA) & 0x00040000) {
1657 pr_debug("Got CRS !\n");
1658 if (len != 4 || offset != 0)
1659 return PCIBIOS_DEVICE_NOT_FOUND;
1660 *val = 0xffff0001;
1661 }
1662
1663 dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg);
1664
1665 return PCIBIOS_SUCCESSFUL;
1666}
1667
1668static int ppc4xx_pciex_write_config(struct pci_bus *bus, unsigned int devfn,
1669 int offset, int len, u32 val)
1670{
1671 struct pci_controller *hose = pci_bus_to_host(bus);
1672 struct ppc4xx_pciex_port *port =
1673 &ppc4xx_pciex_ports[hose->indirect_type];
1674 void __iomem *addr;
1675 u32 gpl_cfg;
1676
1677 if (ppc4xx_pciex_validate_bdf(port, bus, devfn) != 0)
1678 return PCIBIOS_DEVICE_NOT_FOUND;
1679
1680 addr = ppc4xx_pciex_get_config_base(port, bus, devfn);
1681
1682
1683
1684
1685
1686
1687 gpl_cfg = dcr_read(port->dcrs, DCRO_PEGPL_CFG);
1688 dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg | GPL_DMER_MASK_DISA);
1689
1690 pr_debug("pcie-config-write: bus=%3d [%3d..%3d] devfn=0x%04x"
1691 " offset=0x%04x len=%d, addr=0x%p val=0x%08x\n",
1692 bus->number, hose->first_busno, hose->last_busno,
1693 devfn, offset, len, addr + offset, val);
1694
1695 switch (len) {
1696 case 1:
1697 out_8((u8 *)(addr + offset), val);
1698 break;
1699 case 2:
1700 out_le16((u16 *)(addr + offset), val);
1701 break;
1702 default:
1703 out_le32((u32 *)(addr + offset), val);
1704 break;
1705 }
1706
1707 dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg);
1708
1709 return PCIBIOS_SUCCESSFUL;
1710}
1711
1712static struct pci_ops ppc4xx_pciex_pci_ops =
1713{
1714 .read = ppc4xx_pciex_read_config,
1715 .write = ppc4xx_pciex_write_config,
1716};
1717
1718static int __init ppc4xx_setup_one_pciex_POM(struct ppc4xx_pciex_port *port,
1719 struct pci_controller *hose,
1720 void __iomem *mbase,
1721 u64 plb_addr,
1722 u64 pci_addr,
1723 u64 size,
1724 unsigned int flags,
1725 int index)
1726{
1727 u32 lah, lal, pciah, pcial, sa;
1728
1729 if (!is_power_of_2(size) ||
1730 (index < 2 && size < 0x100000) ||
1731 (index == 2 && size < 0x100) ||
1732 (plb_addr & (size - 1)) != 0) {
1733 printk(KERN_WARNING "%s: Resource out of range\n",
1734 hose->dn->full_name);
1735 return -1;
1736 }
1737
1738
1739 lah = RES_TO_U32_HIGH(plb_addr);
1740 lal = RES_TO_U32_LOW(plb_addr);
1741 pciah = RES_TO_U32_HIGH(pci_addr);
1742 pcial = RES_TO_U32_LOW(pci_addr);
1743 sa = (0xffffffffu << ilog2(size)) | 0x1;
1744
1745
1746 switch (index) {
1747 case 0:
1748 out_le32(mbase + PECFG_POM0LAH, pciah);
1749 out_le32(mbase + PECFG_POM0LAL, pcial);
1750 dcr_write(port->dcrs, DCRO_PEGPL_OMR1BAH, lah);
1751 dcr_write(port->dcrs, DCRO_PEGPL_OMR1BAL, lal);
1752 dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKH, 0x7fffffff);
1753
1754 if (of_device_is_compatible(port->node, "ibm,plb-pciex-460sx"))
1755 dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL,
1756 sa | DCRO_PEGPL_460SX_OMR1MSKL_UOT
1757 | DCRO_PEGPL_OMRxMSKL_VAL);
1758 else if (of_device_is_compatible(
1759 port->node, "ibm,plb-pciex-476fpe") ||
1760 of_device_is_compatible(
1761 port->node, "ibm,plb-pciex-476gtr"))
1762 dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL,
1763 sa | DCRO_PEGPL_476FPE_OMR1MSKL_UOT
1764 | DCRO_PEGPL_OMRxMSKL_VAL);
1765 else
1766 dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL,
1767 sa | DCRO_PEGPL_OMR1MSKL_UOT
1768 | DCRO_PEGPL_OMRxMSKL_VAL);
1769 break;
1770 case 1:
1771 out_le32(mbase + PECFG_POM1LAH, pciah);
1772 out_le32(mbase + PECFG_POM1LAL, pcial);
1773 dcr_write(port->dcrs, DCRO_PEGPL_OMR2BAH, lah);
1774 dcr_write(port->dcrs, DCRO_PEGPL_OMR2BAL, lal);
1775 dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKH, 0x7fffffff);
1776 dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKL,
1777 sa | DCRO_PEGPL_OMRxMSKL_VAL);
1778 break;
1779 case 2:
1780 out_le32(mbase + PECFG_POM2LAH, pciah);
1781 out_le32(mbase + PECFG_POM2LAL, pcial);
1782 dcr_write(port->dcrs, DCRO_PEGPL_OMR3BAH, lah);
1783 dcr_write(port->dcrs, DCRO_PEGPL_OMR3BAL, lal);
1784 dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKH, 0x7fffffff);
1785
1786 dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKL,
1787 sa | DCRO_PEGPL_OMR3MSKL_IO
1788 | DCRO_PEGPL_OMRxMSKL_VAL);
1789 break;
1790 }
1791
1792 return 0;
1793}
1794
1795static void __init ppc4xx_configure_pciex_POMs(struct ppc4xx_pciex_port *port,
1796 struct pci_controller *hose,
1797 void __iomem *mbase)
1798{
1799 int i, j, found_isa_hole = 0;
1800
1801
1802 for (i = j = 0; i < 3; i++) {
1803 struct resource *res = &hose->mem_resources[i];
1804 resource_size_t offset = hose->mem_offset[i];
1805
1806
1807 if (!(res->flags & IORESOURCE_MEM))
1808 continue;
1809 if (j > 1) {
1810 printk(KERN_WARNING "%s: Too many ranges\n",
1811 port->node->full_name);
1812 break;
1813 }
1814
1815
1816 if (ppc4xx_setup_one_pciex_POM(port, hose, mbase,
1817 res->start,
1818 res->start - offset,
1819 resource_size(res),
1820 res->flags,
1821 j) == 0) {
1822 j++;
1823
1824
1825
1826
1827 if (res->start == offset)
1828 found_isa_hole = 1;
1829 }
1830 }
1831
1832
1833 if (j <= 1 && !found_isa_hole && hose->isa_mem_size)
1834 if (ppc4xx_setup_one_pciex_POM(port, hose, mbase,
1835 hose->isa_mem_phys, 0,
1836 hose->isa_mem_size, 0, j) == 0)
1837 printk(KERN_INFO "%s: Legacy ISA memory support enabled\n",
1838 hose->dn->full_name);
1839
1840
1841
1842
1843 if (hose->io_resource.flags & IORESOURCE_IO)
1844 ppc4xx_setup_one_pciex_POM(port, hose, mbase,
1845 hose->io_base_phys, 0,
1846 0x10000, IORESOURCE_IO, 2);
1847}
1848
1849static void __init ppc4xx_configure_pciex_PIMs(struct ppc4xx_pciex_port *port,
1850 struct pci_controller *hose,
1851 void __iomem *mbase,
1852 struct resource *res)
1853{
1854 resource_size_t size = resource_size(res);
1855 u64 sa;
1856
1857 if (port->endpoint) {
1858 resource_size_t ep_addr = 0;
1859 resource_size_t ep_size = 32 << 20;
1860
1861
1862
1863
1864
1865
1866
1867 sa = (0xffffffffffffffffull << ilog2(ep_size));
1868
1869
1870 out_le32(mbase + PECFG_BAR0HMPA, RES_TO_U32_HIGH(sa));
1871 out_le32(mbase + PECFG_BAR0LMPA, RES_TO_U32_LOW(sa) |
1872 PCI_BASE_ADDRESS_MEM_TYPE_64);
1873
1874
1875 out_le32(mbase + PECFG_BAR1MPA, 0);
1876 out_le32(mbase + PECFG_BAR2HMPA, 0);
1877 out_le32(mbase + PECFG_BAR2LMPA, 0);
1878
1879 out_le32(mbase + PECFG_PIM01SAH, RES_TO_U32_HIGH(sa));
1880 out_le32(mbase + PECFG_PIM01SAL, RES_TO_U32_LOW(sa));
1881
1882 out_le32(mbase + PCI_BASE_ADDRESS_0, RES_TO_U32_LOW(ep_addr));
1883 out_le32(mbase + PCI_BASE_ADDRESS_1, RES_TO_U32_HIGH(ep_addr));
1884 } else {
1885
1886 sa = (0xffffffffffffffffull << ilog2(size));
1887 if (res->flags & IORESOURCE_PREFETCH)
1888 sa |= PCI_BASE_ADDRESS_MEM_PREFETCH;
1889
1890 if (of_device_is_compatible(port->node, "ibm,plb-pciex-460sx") ||
1891 of_device_is_compatible(
1892 port->node, "ibm,plb-pciex-476fpe") ||
1893 of_device_is_compatible(
1894 port->node, "ibm,plb-pciex-476gtr"))
1895 sa |= PCI_BASE_ADDRESS_MEM_TYPE_64;
1896
1897 out_le32(mbase + PECFG_BAR0HMPA, RES_TO_U32_HIGH(sa));
1898 out_le32(mbase + PECFG_BAR0LMPA, RES_TO_U32_LOW(sa));
1899
1900
1901
1902
1903 out_le32(mbase + PECFG_PIM0LAL, 0x00000000);
1904 out_le32(mbase + PECFG_PIM0LAH, 0x00000000);
1905 out_le32(mbase + PECFG_PIM1LAL, 0x00000000);
1906 out_le32(mbase + PECFG_PIM1LAH, 0x00000000);
1907 out_le32(mbase + PECFG_PIM01SAH, 0xffff0000);
1908 out_le32(mbase + PECFG_PIM01SAL, 0x00000000);
1909
1910 out_le32(mbase + PCI_BASE_ADDRESS_0, RES_TO_U32_LOW(res->start));
1911 out_le32(mbase + PCI_BASE_ADDRESS_1, RES_TO_U32_HIGH(res->start));
1912 }
1913
1914
1915 out_le32(mbase + PECFG_PIMEN, 0x1);
1916
1917
1918 out_le16(mbase + PCI_COMMAND,
1919 in_le16(mbase + PCI_COMMAND) |
1920 PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1921}
1922
1923static void __init ppc4xx_pciex_port_setup_hose(struct ppc4xx_pciex_port *port)
1924{
1925 struct resource dma_window;
1926 struct pci_controller *hose = NULL;
1927 const int *bus_range;
1928 int primary = 0, busses;
1929 void __iomem *mbase = NULL, *cfg_data = NULL;
1930 const u32 *pval;
1931 u32 val;
1932
1933
1934 if (of_get_property(port->node, "primary", NULL))
1935 primary = 1;
1936
1937
1938 bus_range = of_get_property(port->node, "bus-range", NULL);
1939
1940
1941 hose = pcibios_alloc_controller(port->node);
1942 if (!hose)
1943 goto fail;
1944
1945
1946
1947
1948 hose->indirect_type = port->index;
1949
1950
1951 hose->first_busno = bus_range ? bus_range[0] : 0x0;
1952 hose->last_busno = bus_range ? bus_range[1] : 0xff;
1953
1954
1955
1956
1957
1958
1959 busses = hose->last_busno - hose->first_busno;
1960 if (busses > MAX_PCIE_BUS_MAPPED) {
1961 busses = MAX_PCIE_BUS_MAPPED;
1962 hose->last_busno = hose->first_busno + busses;
1963 }
1964
1965 if (!port->endpoint) {
1966
1967
1968
1969 cfg_data = ioremap(port->cfg_space.start +
1970 (hose->first_busno + 1) * 0x100000,
1971 busses * 0x100000);
1972 if (cfg_data == NULL) {
1973 printk(KERN_ERR "%s: Can't map external config space !",
1974 port->node->full_name);
1975 goto fail;
1976 }
1977 hose->cfg_data = cfg_data;
1978 }
1979
1980
1981
1982
1983 mbase = ioremap(port->cfg_space.start + 0x10000000, 0x1000);
1984 if (mbase == NULL) {
1985 printk(KERN_ERR "%s: Can't map internal config space !",
1986 port->node->full_name);
1987 goto fail;
1988 }
1989 hose->cfg_addr = mbase;
1990
1991 pr_debug("PCIE %s, bus %d..%d\n", port->node->full_name,
1992 hose->first_busno, hose->last_busno);
1993 pr_debug(" config space mapped at: root @0x%p, other @0x%p\n",
1994 hose->cfg_addr, hose->cfg_data);
1995
1996
1997 hose->ops = &ppc4xx_pciex_pci_ops;
1998 port->hose = hose;
1999 mbase = (void __iomem *)hose->cfg_addr;
2000
2001 if (!port->endpoint) {
2002
2003
2004
2005 out_8(mbase + PCI_PRIMARY_BUS, hose->first_busno);
2006 out_8(mbase + PCI_SECONDARY_BUS, hose->first_busno + 1);
2007 out_8(mbase + PCI_SUBORDINATE_BUS, hose->last_busno);
2008 }
2009
2010
2011
2012
2013 out_le32(mbase + PECFG_PIMEN, 0);
2014
2015
2016 pci_process_bridge_OF_ranges(hose, port->node, primary);
2017
2018
2019 if (ppc4xx_parse_dma_ranges(hose, mbase, &dma_window) != 0)
2020 goto fail;
2021
2022
2023 ppc4xx_configure_pciex_POMs(port, hose, mbase);
2024
2025
2026 ppc4xx_configure_pciex_PIMs(port, hose, mbase, &dma_window);
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036 pval = of_get_property(port->node, "vendor-id", NULL);
2037 if (pval) {
2038 val = *pval;
2039 } else {
2040 if (!port->endpoint)
2041 val = 0xaaa0 + port->index;
2042 else
2043 val = 0xeee0 + port->index;
2044 }
2045 out_le16(mbase + 0x200, val);
2046
2047 pval = of_get_property(port->node, "device-id", NULL);
2048 if (pval) {
2049 val = *pval;
2050 } else {
2051 if (!port->endpoint)
2052 val = 0xbed0 + port->index;
2053 else
2054 val = 0xfed0 + port->index;
2055 }
2056 out_le16(mbase + 0x202, val);
2057
2058
2059 if (of_device_is_compatible(port->node, "ibm,plb-pciex-460sx"))
2060 out_le16(mbase + 0x204, 0x7);
2061
2062 if (!port->endpoint) {
2063
2064 out_le32(mbase + 0x208, 0x06040001);
2065
2066 printk(KERN_INFO "PCIE%d: successfully set as root-complex\n",
2067 port->index);
2068 } else {
2069
2070 out_le32(mbase + 0x208, 0x0b200001);
2071
2072 printk(KERN_INFO "PCIE%d: successfully set as endpoint\n",
2073 port->index);
2074 }
2075
2076 return;
2077 fail:
2078 if (hose)
2079 pcibios_free_controller(hose);
2080 if (cfg_data)
2081 iounmap(cfg_data);
2082 if (mbase)
2083 iounmap(mbase);
2084}
2085
2086static void __init ppc4xx_probe_pciex_bridge(struct device_node *np)
2087{
2088 struct ppc4xx_pciex_port *port;
2089 const u32 *pval;
2090 int portno;
2091 unsigned int dcrs;
2092 const char *val;
2093
2094
2095
2096
2097 if (ppc4xx_pciex_check_core_init(np))
2098 return;
2099
2100
2101 pval = of_get_property(np, "port", NULL);
2102 if (pval == NULL) {
2103 printk(KERN_ERR "PCIE: Can't find port number for %s\n",
2104 np->full_name);
2105 return;
2106 }
2107 portno = *pval;
2108 if (portno >= ppc4xx_pciex_port_count) {
2109 printk(KERN_ERR "PCIE: port number out of range for %s\n",
2110 np->full_name);
2111 return;
2112 }
2113 port = &ppc4xx_pciex_ports[portno];
2114 port->index = portno;
2115
2116
2117
2118
2119 if (!of_device_is_available(np)) {
2120 printk(KERN_INFO "PCIE%d: Port disabled via device-tree\n", port->index);
2121 return;
2122 }
2123
2124 port->node = of_node_get(np);
2125 if (ppc4xx_pciex_hwops->want_sdr) {
2126 pval = of_get_property(np, "sdr-base", NULL);
2127 if (pval == NULL) {
2128 printk(KERN_ERR "PCIE: missing sdr-base for %s\n",
2129 np->full_name);
2130 return;
2131 }
2132 port->sdr_base = *pval;
2133 }
2134
2135
2136
2137
2138
2139 val = of_get_property(port->node, "device_type", NULL);
2140 if (!strcmp(val, "pci-endpoint")) {
2141 port->endpoint = 1;
2142 } else if (!strcmp(val, "pci")) {
2143 port->endpoint = 0;
2144 } else {
2145 printk(KERN_ERR "PCIE: missing or incorrect device_type for %s\n",
2146 np->full_name);
2147 return;
2148 }
2149
2150
2151 if (of_address_to_resource(np, 0, &port->cfg_space)) {
2152 printk(KERN_ERR "%s: Can't get PCI-E config space !",
2153 np->full_name);
2154 return;
2155 }
2156
2157 if (of_address_to_resource(np, 1, &port->utl_regs)) {
2158 printk(KERN_ERR "%s: Can't get UTL register base !",
2159 np->full_name);
2160 return;
2161 }
2162
2163
2164 dcrs = dcr_resource_start(np, 0);
2165 if (dcrs == 0) {
2166 printk(KERN_ERR "%s: Can't get DCR register base !",
2167 np->full_name);
2168 return;
2169 }
2170 port->dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0));
2171
2172
2173 if (ppc4xx_pciex_port_init(port)) {
2174 printk(KERN_WARNING "PCIE%d: Port init failed\n", port->index);
2175 return;
2176 }
2177
2178
2179 ppc4xx_pciex_port_setup_hose(port);
2180}
2181
2182#endif
2183
2184static int __init ppc4xx_pci_find_bridges(void)
2185{
2186 struct device_node *np;
2187
2188 pci_add_flags(PCI_ENABLE_PROC_DOMAINS | PCI_COMPAT_DOMAIN_0);
2189
2190#ifdef CONFIG_PPC4xx_PCI_EXPRESS
2191 for_each_compatible_node(np, NULL, "ibm,plb-pciex")
2192 ppc4xx_probe_pciex_bridge(np);
2193#endif
2194 for_each_compatible_node(np, NULL, "ibm,plb-pcix")
2195 ppc4xx_probe_pcix_bridge(np);
2196 for_each_compatible_node(np, NULL, "ibm,plb-pci")
2197 ppc4xx_probe_pci_bridge(np);
2198
2199 return 0;
2200}
2201arch_initcall(ppc4xx_pci_find_bridges);
2202
2203