1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/kernel.h>
20#include <linux/pci.h>
21#include <linux/string.h>
22#include <linux/init.h>
23#include <linux/delay.h>
24#include <linux/export.h>
25#include <linux/of_address.h>
26#include <linux/of_pci.h>
27#include <linux/mm.h>
28#include <linux/list.h>
29#include <linux/syscalls.h>
30#include <linux/irq.h>
31#include <linux/vmalloc.h>
32#include <linux/slab.h>
33#include <linux/vgaarb.h>
34
35#include <asm/processor.h>
36#include <asm/io.h>
37#include <asm/prom.h>
38#include <asm/pci-bridge.h>
39#include <asm/byteorder.h>
40#include <asm/machdep.h>
41#include <asm/ppc-pci.h>
42#include <asm/eeh.h>
43
44
45static DEFINE_SPINLOCK(hose_spinlock);
46LIST_HEAD(hose_list);
47
48
49#define MAX_PHBS 0x10000
50
51
52
53
54
55static DECLARE_BITMAP(phb_bitmap, MAX_PHBS);
56
57
58resource_size_t isa_mem_base;
59EXPORT_SYMBOL(isa_mem_base);
60
61
62static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
63
64void set_pci_dma_ops(struct dma_map_ops *dma_ops)
65{
66 pci_dma_ops = dma_ops;
67}
68
69struct dma_map_ops *get_pci_dma_ops(void)
70{
71 return pci_dma_ops;
72}
73EXPORT_SYMBOL(get_pci_dma_ops);
74
75
76
77
78
79static int get_phb_number(struct device_node *dn)
80{
81 int ret, phb_id = -1;
82 u32 prop_32;
83 u64 prop;
84
85
86
87
88
89
90 ret = of_property_read_u64(dn, "ibm,opal-phbid", &prop);
91 if (ret) {
92 ret = of_property_read_u32_index(dn, "reg", 1, &prop_32);
93 prop = prop_32;
94 }
95
96 if (!ret)
97 phb_id = (int)(prop & (MAX_PHBS - 1));
98
99
100 if ((phb_id >= 0) && !test_and_set_bit(phb_id, phb_bitmap))
101 return phb_id;
102
103
104
105
106
107 phb_id = find_first_zero_bit(phb_bitmap, MAX_PHBS);
108 BUG_ON(phb_id >= MAX_PHBS);
109 set_bit(phb_id, phb_bitmap);
110
111 return phb_id;
112}
113
114struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
115{
116 struct pci_controller *phb;
117
118 phb = zalloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL);
119 if (phb == NULL)
120 return NULL;
121 spin_lock(&hose_spinlock);
122 phb->global_number = get_phb_number(dev);
123 list_add_tail(&phb->list_node, &hose_list);
124 spin_unlock(&hose_spinlock);
125 phb->dn = dev;
126 phb->is_dynamic = slab_is_available();
127#ifdef CONFIG_PPC64
128 if (dev) {
129 int nid = of_node_to_nid(dev);
130
131 if (nid < 0 || !node_online(nid))
132 nid = -1;
133
134 PHB_SET_NODE(phb, nid);
135 }
136#endif
137 return phb;
138}
139EXPORT_SYMBOL_GPL(pcibios_alloc_controller);
140
141void pcibios_free_controller(struct pci_controller *phb)
142{
143 spin_lock(&hose_spinlock);
144
145
146 if (phb->global_number < MAX_PHBS)
147 clear_bit(phb->global_number, phb_bitmap);
148
149 list_del(&phb->list_node);
150 spin_unlock(&hose_spinlock);
151
152 if (phb->is_dynamic)
153 kfree(phb);
154}
155EXPORT_SYMBOL_GPL(pcibios_free_controller);
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182void pcibios_free_controller_deferred(struct pci_host_bridge *bridge)
183{
184 struct pci_controller *phb = (struct pci_controller *)
185 bridge->release_data;
186
187 pr_debug("domain %d, dynamic %d\n", phb->global_number, phb->is_dynamic);
188
189 pcibios_free_controller(phb);
190}
191EXPORT_SYMBOL_GPL(pcibios_free_controller_deferred);
192
193
194
195
196
197
198
199resource_size_t pcibios_window_alignment(struct pci_bus *bus,
200 unsigned long type)
201{
202 struct pci_controller *phb = pci_bus_to_host(bus);
203
204 if (phb->controller_ops.window_alignment)
205 return phb->controller_ops.window_alignment(bus, type);
206
207
208
209
210
211
212 return 1;
213}
214
215void pcibios_setup_bridge(struct pci_bus *bus, unsigned long type)
216{
217 struct pci_controller *hose = pci_bus_to_host(bus);
218
219 if (hose->controller_ops.setup_bridge)
220 hose->controller_ops.setup_bridge(bus, type);
221}
222
223void pcibios_reset_secondary_bus(struct pci_dev *dev)
224{
225 struct pci_controller *phb = pci_bus_to_host(dev->bus);
226
227 if (phb->controller_ops.reset_secondary_bus) {
228 phb->controller_ops.reset_secondary_bus(dev);
229 return;
230 }
231
232 pci_reset_secondary_bus(dev);
233}
234
235#ifdef CONFIG_PCI_IOV
236resource_size_t pcibios_iov_resource_alignment(struct pci_dev *pdev, int resno)
237{
238 if (ppc_md.pcibios_iov_resource_alignment)
239 return ppc_md.pcibios_iov_resource_alignment(pdev, resno);
240
241 return pci_iov_resource_size(pdev, resno);
242}
243#endif
244
245static resource_size_t pcibios_io_size(const struct pci_controller *hose)
246{
247#ifdef CONFIG_PPC64
248 return hose->pci_io_size;
249#else
250 return resource_size(&hose->io_resource);
251#endif
252}
253
254int pcibios_vaddr_is_ioport(void __iomem *address)
255{
256 int ret = 0;
257 struct pci_controller *hose;
258 resource_size_t size;
259
260 spin_lock(&hose_spinlock);
261 list_for_each_entry(hose, &hose_list, list_node) {
262 size = pcibios_io_size(hose);
263 if (address >= hose->io_base_virt &&
264 address < (hose->io_base_virt + size)) {
265 ret = 1;
266 break;
267 }
268 }
269 spin_unlock(&hose_spinlock);
270 return ret;
271}
272
273unsigned long pci_address_to_pio(phys_addr_t address)
274{
275 struct pci_controller *hose;
276 resource_size_t size;
277 unsigned long ret = ~0;
278
279 spin_lock(&hose_spinlock);
280 list_for_each_entry(hose, &hose_list, list_node) {
281 size = pcibios_io_size(hose);
282 if (address >= hose->io_base_phys &&
283 address < (hose->io_base_phys + size)) {
284 unsigned long base =
285 (unsigned long)hose->io_base_virt - _IO_BASE;
286 ret = base + (address - hose->io_base_phys);
287 break;
288 }
289 }
290 spin_unlock(&hose_spinlock);
291
292 return ret;
293}
294EXPORT_SYMBOL_GPL(pci_address_to_pio);
295
296
297
298
299int pci_domain_nr(struct pci_bus *bus)
300{
301 struct pci_controller *hose = pci_bus_to_host(bus);
302
303 return hose->global_number;
304}
305EXPORT_SYMBOL(pci_domain_nr);
306
307
308
309
310
311
312
313
314struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node)
315{
316 while(node) {
317 struct pci_controller *hose, *tmp;
318 list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
319 if (hose->dn == node)
320 return hose;
321 node = node->parent;
322 }
323 return NULL;
324}
325
326
327
328
329
330
331static int pci_read_irq_line(struct pci_dev *pci_dev)
332{
333 struct of_phandle_args oirq;
334 unsigned int virq;
335
336 pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev));
337
338#ifdef DEBUG
339 memset(&oirq, 0xff, sizeof(oirq));
340#endif
341
342 if (of_irq_parse_pci(pci_dev, &oirq)) {
343 u8 line, pin;
344
345
346
347
348
349
350
351
352 if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &pin))
353 return -1;
354 if (pin == 0)
355 return -1;
356 if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_LINE, &line) ||
357 line == 0xff || line == 0) {
358 return -1;
359 }
360 pr_debug(" No map ! Using line %d (pin %d) from PCI config\n",
361 line, pin);
362
363 virq = irq_create_mapping(NULL, line);
364 if (virq)
365 irq_set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
366 } else {
367 pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n",
368 oirq.args_count, oirq.args[0], oirq.args[1],
369 of_node_full_name(oirq.np));
370
371 virq = irq_create_of_mapping(&oirq);
372 }
373
374 if (!virq) {
375 pr_debug(" Failed to map !\n");
376 return -1;
377 }
378
379 pr_debug(" Mapped to linux irq %d\n", virq);
380
381 pci_dev->irq = virq;
382
383 return 0;
384}
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
404 resource_size_t *offset,
405 enum pci_mmap_state mmap_state)
406{
407 struct pci_controller *hose = pci_bus_to_host(dev->bus);
408 unsigned long io_offset = 0;
409 int i, res_bit;
410
411 if (hose == NULL)
412 return NULL;
413
414
415 if (mmap_state == pci_mmap_mem) {
416#if 0
417 *offset += hose->pci_mem_offset;
418#endif
419 res_bit = IORESOURCE_MEM;
420 } else {
421 io_offset = (unsigned long)hose->io_base_virt - _IO_BASE;
422 *offset += io_offset;
423 res_bit = IORESOURCE_IO;
424 }
425
426
427
428
429
430 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
431 struct resource *rp = &dev->resource[i];
432 int flags = rp->flags;
433
434
435 if (i == PCI_ROM_RESOURCE)
436 flags |= IORESOURCE_MEM;
437
438
439 if ((flags & res_bit) == 0)
440 continue;
441
442
443 if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end)
444 continue;
445
446
447 if (mmap_state == pci_mmap_io)
448 *offset += hose->io_base_phys - io_offset;
449 return rp;
450 }
451
452 return NULL;
453}
454
455
456
457
458
459
460pgprot_t pci_phys_mem_access_prot(struct file *file,
461 unsigned long pfn,
462 unsigned long size,
463 pgprot_t prot)
464{
465 struct pci_dev *pdev = NULL;
466 struct resource *found = NULL;
467 resource_size_t offset = ((resource_size_t)pfn) << PAGE_SHIFT;
468 int i;
469
470 if (page_is_ram(pfn))
471 return prot;
472
473 prot = pgprot_noncached(prot);
474 for_each_pci_dev(pdev) {
475 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
476 struct resource *rp = &pdev->resource[i];
477 int flags = rp->flags;
478
479
480 if ((flags & IORESOURCE_MEM) == 0)
481 continue;
482
483 if (offset < (rp->start & PAGE_MASK) ||
484 offset > rp->end)
485 continue;
486 found = rp;
487 break;
488 }
489 if (found)
490 break;
491 }
492 if (found) {
493 if (found->flags & IORESOURCE_PREFETCH)
494 prot = pgprot_noncached_wc(prot);
495 pci_dev_put(pdev);
496 }
497
498 pr_debug("PCI: Non-PCI map for %llx, prot: %lx\n",
499 (unsigned long long)offset, pgprot_val(prot));
500
501 return prot;
502}
503
504
505
506
507
508
509
510
511
512
513
514
515int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
516 enum pci_mmap_state mmap_state, int write_combine)
517{
518 resource_size_t offset =
519 ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT;
520 struct resource *rp;
521 int ret;
522
523 rp = __pci_mmap_make_offset(dev, &offset, mmap_state);
524 if (rp == NULL)
525 return -EINVAL;
526
527 vma->vm_pgoff = offset >> PAGE_SHIFT;
528 if (write_combine)
529 vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
530 else
531 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
532
533 ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
534 vma->vm_end - vma->vm_start, vma->vm_page_prot);
535
536 return ret;
537}
538
539
540int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size)
541{
542 unsigned long offset;
543 struct pci_controller *hose = pci_bus_to_host(bus);
544 struct resource *rp = &hose->io_resource;
545 void __iomem *addr;
546
547
548
549
550
551
552 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
553 offset += port;
554
555 if (!(rp->flags & IORESOURCE_IO))
556 return -ENXIO;
557 if (offset < rp->start || (offset + size) > rp->end)
558 return -ENXIO;
559 addr = hose->io_base_virt + port;
560
561 switch(size) {
562 case 1:
563 *((u8 *)val) = in_8(addr);
564 return 1;
565 case 2:
566 if (port & 1)
567 return -EINVAL;
568 *((u16 *)val) = in_le16(addr);
569 return 2;
570 case 4:
571 if (port & 3)
572 return -EINVAL;
573 *((u32 *)val) = in_le32(addr);
574 return 4;
575 }
576 return -EINVAL;
577}
578
579
580int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size)
581{
582 unsigned long offset;
583 struct pci_controller *hose = pci_bus_to_host(bus);
584 struct resource *rp = &hose->io_resource;
585 void __iomem *addr;
586
587
588
589
590
591
592 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
593 offset += port;
594
595 if (!(rp->flags & IORESOURCE_IO))
596 return -ENXIO;
597 if (offset < rp->start || (offset + size) > rp->end)
598 return -ENXIO;
599 addr = hose->io_base_virt + port;
600
601
602
603
604
605
606 switch(size) {
607 case 1:
608 out_8(addr, val >> 24);
609 return 1;
610 case 2:
611 if (port & 1)
612 return -EINVAL;
613 out_le16(addr, val >> 16);
614 return 2;
615 case 4:
616 if (port & 3)
617 return -EINVAL;
618 out_le32(addr, val);
619 return 4;
620 }
621 return -EINVAL;
622}
623
624
625int pci_mmap_legacy_page_range(struct pci_bus *bus,
626 struct vm_area_struct *vma,
627 enum pci_mmap_state mmap_state)
628{
629 struct pci_controller *hose = pci_bus_to_host(bus);
630 resource_size_t offset =
631 ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT;
632 resource_size_t size = vma->vm_end - vma->vm_start;
633 struct resource *rp;
634
635 pr_debug("pci_mmap_legacy_page_range(%04x:%02x, %s @%llx..%llx)\n",
636 pci_domain_nr(bus), bus->number,
637 mmap_state == pci_mmap_mem ? "MEM" : "IO",
638 (unsigned long long)offset,
639 (unsigned long long)(offset + size - 1));
640
641 if (mmap_state == pci_mmap_mem) {
642
643
644
645
646
647
648
649 if ((offset + size) > hose->isa_mem_size) {
650 printk(KERN_DEBUG
651 "Process %s (pid:%d) mapped non-existing PCI legacy memory for 0%04x:%02x\n",
652 current->comm, current->pid, pci_domain_nr(bus), bus->number);
653 if (vma->vm_flags & VM_SHARED)
654 return shmem_zero_setup(vma);
655 return 0;
656 }
657 offset += hose->isa_mem_phys;
658 } else {
659 unsigned long io_offset = (unsigned long)hose->io_base_virt - _IO_BASE;
660 unsigned long roffset = offset + io_offset;
661 rp = &hose->io_resource;
662 if (!(rp->flags & IORESOURCE_IO))
663 return -ENXIO;
664 if (roffset < rp->start || (roffset + size) > rp->end)
665 return -ENXIO;
666 offset += hose->io_base_phys;
667 }
668 pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset);
669
670 vma->vm_pgoff = offset >> PAGE_SHIFT;
671 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
672 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
673 vma->vm_end - vma->vm_start,
674 vma->vm_page_prot);
675}
676
677void pci_resource_to_user(const struct pci_dev *dev, int bar,
678 const struct resource *rsrc,
679 resource_size_t *start, resource_size_t *end)
680{
681 struct pci_bus_region region;
682
683 if (rsrc->flags & IORESOURCE_IO) {
684 pcibios_resource_to_bus(dev->bus, ®ion,
685 (struct resource *) rsrc);
686 *start = region.start;
687 *end = region.end;
688 return;
689 }
690
691
692
693
694
695
696
697
698 *start = rsrc->start;
699 *end = rsrc->end;
700}
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726void pci_process_bridge_OF_ranges(struct pci_controller *hose,
727 struct device_node *dev, int primary)
728{
729 int memno = 0;
730 struct resource *res;
731 struct of_pci_range range;
732 struct of_pci_range_parser parser;
733
734 printk(KERN_INFO "PCI host bridge %s %s ranges:\n",
735 dev->full_name, primary ? "(primary)" : "");
736
737
738 if (of_pci_range_parser_init(&parser, dev))
739 return;
740
741
742 for_each_of_pci_range(&parser, &range) {
743
744
745
746
747
748 if (range.cpu_addr == OF_BAD_ADDR || range.size == 0)
749 continue;
750
751
752 res = NULL;
753 switch (range.flags & IORESOURCE_TYPE_BITS) {
754 case IORESOURCE_IO:
755 printk(KERN_INFO
756 " IO 0x%016llx..0x%016llx -> 0x%016llx\n",
757 range.cpu_addr, range.cpu_addr + range.size - 1,
758 range.pci_addr);
759
760
761 if (hose->pci_io_size) {
762 printk(KERN_INFO
763 " \\--> Skipped (too many) !\n");
764 continue;
765 }
766#ifdef CONFIG_PPC32
767
768 if (range.size > 0x01000000)
769 range.size = 0x01000000;
770
771
772 hose->io_base_virt = ioremap(range.cpu_addr,
773 range.size);
774
775
776 if (primary)
777 isa_io_base =
778 (unsigned long)hose->io_base_virt;
779#endif
780
781
782
783 hose->pci_io_size = range.pci_addr + range.size;
784 hose->io_base_phys = range.cpu_addr - range.pci_addr;
785
786
787 res = &hose->io_resource;
788 range.cpu_addr = range.pci_addr;
789 break;
790 case IORESOURCE_MEM:
791 printk(KERN_INFO
792 " MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n",
793 range.cpu_addr, range.cpu_addr + range.size - 1,
794 range.pci_addr,
795 (range.pci_space & 0x40000000) ?
796 "Prefetch" : "");
797
798
799 if (memno >= 3) {
800 printk(KERN_INFO
801 " \\--> Skipped (too many) !\n");
802 continue;
803 }
804
805 if (range.pci_addr == 0) {
806 if (primary || isa_mem_base == 0)
807 isa_mem_base = range.cpu_addr;
808 hose->isa_mem_phys = range.cpu_addr;
809 hose->isa_mem_size = range.size;
810 }
811
812
813 hose->mem_offset[memno] = range.cpu_addr -
814 range.pci_addr;
815 res = &hose->mem_resources[memno++];
816 break;
817 }
818 if (res != NULL) {
819 res->name = dev->full_name;
820 res->flags = range.flags;
821 res->start = range.cpu_addr;
822 res->end = range.cpu_addr + range.size - 1;
823 res->parent = res->child = res->sibling = NULL;
824 }
825 }
826}
827
828
829int pci_proc_domain(struct pci_bus *bus)
830{
831 struct pci_controller *hose = pci_bus_to_host(bus);
832
833 if (!pci_has_flag(PCI_ENABLE_PROC_DOMAINS))
834 return 0;
835 if (pci_has_flag(PCI_COMPAT_DOMAIN_0))
836 return hose->global_number != 0;
837 return 1;
838}
839
840int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
841{
842 if (ppc_md.pcibios_root_bridge_prepare)
843 return ppc_md.pcibios_root_bridge_prepare(bridge);
844
845 return 0;
846}
847
848
849
850
851static void pcibios_fixup_resources(struct pci_dev *dev)
852{
853 struct pci_controller *hose = pci_bus_to_host(dev->bus);
854 int i;
855
856 if (!hose) {
857 printk(KERN_ERR "No host bridge for PCI dev %s !\n",
858 pci_name(dev));
859 return;
860 }
861
862 if (dev->is_virtfn)
863 return;
864
865 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
866 struct resource *res = dev->resource + i;
867 struct pci_bus_region reg;
868 if (!res->flags)
869 continue;
870
871
872
873
874
875
876 pcibios_resource_to_bus(dev->bus, ®, res);
877 if (pci_has_flag(PCI_REASSIGN_ALL_RSRC) ||
878 (reg.start == 0 && !pci_has_flag(PCI_PROBE_ONLY))) {
879
880 if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC))
881 pr_debug("PCI:%s Resource %d %pR is unassigned\n",
882 pci_name(dev), i, res);
883 res->end -= res->start;
884 res->start = 0;
885 res->flags |= IORESOURCE_UNSET;
886 continue;
887 }
888
889 pr_debug("PCI:%s Resource %d %pR\n", pci_name(dev), i, res);
890 }
891
892
893 if (ppc_md.pcibios_fixup_resources)
894 ppc_md.pcibios_fixup_resources(dev);
895}
896DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources);
897
898
899
900
901
902
903static int pcibios_uninitialized_bridge_resource(struct pci_bus *bus,
904 struct resource *res)
905{
906 struct pci_controller *hose = pci_bus_to_host(bus);
907 struct pci_dev *dev = bus->self;
908 resource_size_t offset;
909 struct pci_bus_region region;
910 u16 command;
911 int i;
912
913
914 if (pci_has_flag(PCI_PROBE_ONLY))
915 return 0;
916
917
918 if (res->flags & IORESOURCE_MEM) {
919 pcibios_resource_to_bus(dev->bus, ®ion, res);
920
921
922 if (region.start != 0)
923 return 0;
924
925
926
927
928 pci_read_config_word(dev, PCI_COMMAND, &command);
929 if ((command & PCI_COMMAND_MEMORY) == 0)
930 return 1;
931
932
933
934
935
936 for (i = 0; i < 3; i++) {
937 if ((hose->mem_resources[i].flags & IORESOURCE_MEM) &&
938 hose->mem_resources[i].start == hose->mem_offset[i])
939 return 0;
940 }
941
942
943
944
945 return 1;
946 } else {
947
948 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
949 if (((res->start - offset) & 0xfffffffful) != 0)
950 return 0;
951
952
953
954
955
956
957 pci_read_config_word(dev, PCI_COMMAND, &command);
958 if (command & PCI_COMMAND_IO)
959 return 0;
960
961
962
963
964 return 1;
965 }
966}
967
968
969static void pcibios_fixup_bridge(struct pci_bus *bus)
970{
971 struct resource *res;
972 int i;
973
974 struct pci_dev *dev = bus->self;
975
976 pci_bus_for_each_resource(bus, res, i) {
977 if (!res || !res->flags)
978 continue;
979 if (i >= 3 && bus->self->transparent)
980 continue;
981
982
983
984
985
986 if (pci_has_flag(PCI_REASSIGN_ALL_RSRC)) {
987 res->flags |= IORESOURCE_UNSET;
988 res->start = 0;
989 res->end = -1;
990 continue;
991 }
992
993 pr_debug("PCI:%s Bus rsrc %d %pR\n", pci_name(dev), i, res);
994
995
996
997
998 if (pcibios_uninitialized_bridge_resource(bus, res)) {
999 res->flags = 0;
1000 pr_debug("PCI:%s (unassigned)\n", pci_name(dev));
1001 }
1002 }
1003}
1004
1005void pcibios_setup_bus_self(struct pci_bus *bus)
1006{
1007 struct pci_controller *phb;
1008
1009
1010 if (bus->self != NULL)
1011 pcibios_fixup_bridge(bus);
1012
1013
1014
1015
1016 if (ppc_md.pcibios_fixup_bus)
1017 ppc_md.pcibios_fixup_bus(bus);
1018
1019
1020 phb = pci_bus_to_host(bus);
1021 if (phb->controller_ops.dma_bus_setup)
1022 phb->controller_ops.dma_bus_setup(bus);
1023}
1024
1025static void pcibios_setup_device(struct pci_dev *dev)
1026{
1027 struct pci_controller *phb;
1028
1029
1030
1031 set_dev_node(&dev->dev, pcibus_to_node(dev->bus));
1032
1033
1034 set_dma_ops(&dev->dev, pci_dma_ops);
1035 set_dma_offset(&dev->dev, PCI_DRAM_OFFSET);
1036
1037
1038 phb = pci_bus_to_host(dev->bus);
1039 if (phb->controller_ops.dma_dev_setup)
1040 phb->controller_ops.dma_dev_setup(dev);
1041
1042
1043 pci_read_irq_line(dev);
1044 if (ppc_md.pci_irq_fixup)
1045 ppc_md.pci_irq_fixup(dev);
1046}
1047
1048int pcibios_add_device(struct pci_dev *dev)
1049{
1050
1051
1052
1053
1054 if (dev->bus->is_added)
1055 pcibios_setup_device(dev);
1056
1057#ifdef CONFIG_PCI_IOV
1058 if (ppc_md.pcibios_fixup_sriov)
1059 ppc_md.pcibios_fixup_sriov(dev);
1060#endif
1061
1062 return 0;
1063}
1064
1065void pcibios_setup_bus_devices(struct pci_bus *bus)
1066{
1067 struct pci_dev *dev;
1068
1069 pr_debug("PCI: Fixup bus devices %d (%s)\n",
1070 bus->number, bus->self ? pci_name(bus->self) : "PHB");
1071
1072 list_for_each_entry(dev, &bus->devices, bus_list) {
1073
1074
1075
1076 if (dev->is_added)
1077 continue;
1078
1079 pcibios_setup_device(dev);
1080 }
1081}
1082
1083void pcibios_set_master(struct pci_dev *dev)
1084{
1085
1086}
1087
1088void pcibios_fixup_bus(struct pci_bus *bus)
1089{
1090
1091
1092
1093
1094 pci_read_bridge_bases(bus);
1095
1096
1097 pcibios_setup_bus_self(bus);
1098
1099
1100 pcibios_setup_bus_devices(bus);
1101}
1102EXPORT_SYMBOL(pcibios_fixup_bus);
1103
1104void pci_fixup_cardbus(struct pci_bus *bus)
1105{
1106
1107 pcibios_setup_bus_devices(bus);
1108}
1109
1110
1111static int skip_isa_ioresource_align(struct pci_dev *dev)
1112{
1113 if (pci_has_flag(PCI_CAN_SKIP_ISA_ALIGN) &&
1114 !(dev->bus->bridge_ctl & PCI_BRIDGE_CTL_ISA))
1115 return 1;
1116 return 0;
1117}
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132resource_size_t pcibios_align_resource(void *data, const struct resource *res,
1133 resource_size_t size, resource_size_t align)
1134{
1135 struct pci_dev *dev = data;
1136 resource_size_t start = res->start;
1137
1138 if (res->flags & IORESOURCE_IO) {
1139 if (skip_isa_ioresource_align(dev))
1140 return start;
1141 if (start & 0x300)
1142 start = (start + 0x3ff) & ~0x3ff;
1143 }
1144
1145 return start;
1146}
1147EXPORT_SYMBOL(pcibios_align_resource);
1148
1149
1150
1151
1152
1153static int reparent_resources(struct resource *parent,
1154 struct resource *res)
1155{
1156 struct resource *p, **pp;
1157 struct resource **firstpp = NULL;
1158
1159 for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) {
1160 if (p->end < res->start)
1161 continue;
1162 if (res->end < p->start)
1163 break;
1164 if (p->start < res->start || p->end > res->end)
1165 return -1;
1166 if (firstpp == NULL)
1167 firstpp = pp;
1168 }
1169 if (firstpp == NULL)
1170 return -1;
1171 res->parent = parent;
1172 res->child = *firstpp;
1173 res->sibling = *pp;
1174 *firstpp = res;
1175 *pp = NULL;
1176 for (p = res->child; p != NULL; p = p->sibling) {
1177 p->parent = res;
1178 pr_debug("PCI: Reparented %s %pR under %s\n",
1179 p->name, p, res->name);
1180 }
1181 return 0;
1182}
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217static void pcibios_allocate_bus_resources(struct pci_bus *bus)
1218{
1219 struct pci_bus *b;
1220 int i;
1221 struct resource *res, *pr;
1222
1223 pr_debug("PCI: Allocating bus resources for %04x:%02x...\n",
1224 pci_domain_nr(bus), bus->number);
1225
1226 pci_bus_for_each_resource(bus, res, i) {
1227 if (!res || !res->flags || res->start > res->end || res->parent)
1228 continue;
1229
1230
1231 if (res->flags & IORESOURCE_UNSET)
1232 goto clear_resource;
1233
1234 if (bus->parent == NULL)
1235 pr = (res->flags & IORESOURCE_IO) ?
1236 &ioport_resource : &iomem_resource;
1237 else {
1238 pr = pci_find_parent_resource(bus->self, res);
1239 if (pr == res) {
1240
1241
1242
1243
1244 continue;
1245 }
1246 }
1247
1248 pr_debug("PCI: %s (bus %d) bridge rsrc %d: %pR, parent %p (%s)\n",
1249 bus->self ? pci_name(bus->self) : "PHB", bus->number,
1250 i, res, pr, (pr && pr->name) ? pr->name : "nil");
1251
1252 if (pr && !(pr->flags & IORESOURCE_UNSET)) {
1253 struct pci_dev *dev = bus->self;
1254
1255 if (request_resource(pr, res) == 0)
1256 continue;
1257
1258
1259
1260
1261
1262 if (reparent_resources(pr, res) == 0)
1263 continue;
1264
1265 if (dev && i < PCI_BRIDGE_RESOURCE_NUM &&
1266 pci_claim_bridge_resource(dev,
1267 i + PCI_BRIDGE_RESOURCES) == 0)
1268 continue;
1269 }
1270 pr_warning("PCI: Cannot allocate resource region "
1271 "%d of PCI bridge %d, will remap\n", i, bus->number);
1272 clear_resource:
1273
1274
1275
1276
1277
1278
1279 res->start = 0;
1280 res->end = -1;
1281 res->flags = 0;
1282 }
1283
1284 list_for_each_entry(b, &bus->children, node)
1285 pcibios_allocate_bus_resources(b);
1286}
1287
1288static inline void alloc_resource(struct pci_dev *dev, int idx)
1289{
1290 struct resource *pr, *r = &dev->resource[idx];
1291
1292 pr_debug("PCI: Allocating %s: Resource %d: %pR\n",
1293 pci_name(dev), idx, r);
1294
1295 pr = pci_find_parent_resource(dev, r);
1296 if (!pr || (pr->flags & IORESOURCE_UNSET) ||
1297 request_resource(pr, r) < 0) {
1298 printk(KERN_WARNING "PCI: Cannot allocate resource region %d"
1299 " of device %s, will remap\n", idx, pci_name(dev));
1300 if (pr)
1301 pr_debug("PCI: parent is %p: %pR\n", pr, pr);
1302
1303 r->flags |= IORESOURCE_UNSET;
1304 r->end -= r->start;
1305 r->start = 0;
1306 }
1307}
1308
1309static void __init pcibios_allocate_resources(int pass)
1310{
1311 struct pci_dev *dev = NULL;
1312 int idx, disabled;
1313 u16 command;
1314 struct resource *r;
1315
1316 for_each_pci_dev(dev) {
1317 pci_read_config_word(dev, PCI_COMMAND, &command);
1318 for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
1319 r = &dev->resource[idx];
1320 if (r->parent)
1321 continue;
1322 if (!r->flags || (r->flags & IORESOURCE_UNSET))
1323 continue;
1324
1325
1326
1327 if (idx == PCI_ROM_RESOURCE )
1328 disabled = 1;
1329 if (r->flags & IORESOURCE_IO)
1330 disabled = !(command & PCI_COMMAND_IO);
1331 else
1332 disabled = !(command & PCI_COMMAND_MEMORY);
1333 if (pass == disabled)
1334 alloc_resource(dev, idx);
1335 }
1336 if (pass)
1337 continue;
1338 r = &dev->resource[PCI_ROM_RESOURCE];
1339 if (r->flags) {
1340
1341
1342
1343 u32 reg;
1344 pci_read_config_dword(dev, dev->rom_base_reg, ®);
1345 if (reg & PCI_ROM_ADDRESS_ENABLE) {
1346 pr_debug("PCI: Switching off ROM of %s\n",
1347 pci_name(dev));
1348 r->flags &= ~IORESOURCE_ROM_ENABLE;
1349 pci_write_config_dword(dev, dev->rom_base_reg,
1350 reg & ~PCI_ROM_ADDRESS_ENABLE);
1351 }
1352 }
1353 }
1354}
1355
1356static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus)
1357{
1358 struct pci_controller *hose = pci_bus_to_host(bus);
1359 resource_size_t offset;
1360 struct resource *res, *pres;
1361 int i;
1362
1363 pr_debug("Reserving legacy ranges for domain %04x\n", pci_domain_nr(bus));
1364
1365
1366 if (!(hose->io_resource.flags & IORESOURCE_IO))
1367 goto no_io;
1368 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
1369 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
1370 BUG_ON(res == NULL);
1371 res->name = "Legacy IO";
1372 res->flags = IORESOURCE_IO;
1373 res->start = offset;
1374 res->end = (offset + 0xfff) & 0xfffffffful;
1375 pr_debug("Candidate legacy IO: %pR\n", res);
1376 if (request_resource(&hose->io_resource, res)) {
1377 printk(KERN_DEBUG
1378 "PCI %04x:%02x Cannot reserve Legacy IO %pR\n",
1379 pci_domain_nr(bus), bus->number, res);
1380 kfree(res);
1381 }
1382
1383 no_io:
1384
1385 for (i = 0; i < 3; i++) {
1386 pres = &hose->mem_resources[i];
1387 offset = hose->mem_offset[i];
1388 if (!(pres->flags & IORESOURCE_MEM))
1389 continue;
1390 pr_debug("hose mem res: %pR\n", pres);
1391 if ((pres->start - offset) <= 0xa0000 &&
1392 (pres->end - offset) >= 0xbffff)
1393 break;
1394 }
1395 if (i >= 3)
1396 return;
1397 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
1398 BUG_ON(res == NULL);
1399 res->name = "Legacy VGA memory";
1400 res->flags = IORESOURCE_MEM;
1401 res->start = 0xa0000 + offset;
1402 res->end = 0xbffff + offset;
1403 pr_debug("Candidate VGA memory: %pR\n", res);
1404 if (request_resource(pres, res)) {
1405 printk(KERN_DEBUG
1406 "PCI %04x:%02x Cannot reserve VGA memory %pR\n",
1407 pci_domain_nr(bus), bus->number, res);
1408 kfree(res);
1409 }
1410}
1411
1412void __init pcibios_resource_survey(void)
1413{
1414 struct pci_bus *b;
1415
1416
1417 list_for_each_entry(b, &pci_root_buses, node)
1418 pcibios_allocate_bus_resources(b);
1419 if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC)) {
1420 pcibios_allocate_resources(0);
1421 pcibios_allocate_resources(1);
1422 }
1423
1424
1425
1426
1427
1428 if (!pci_has_flag(PCI_PROBE_ONLY)) {
1429 list_for_each_entry(b, &pci_root_buses, node)
1430 pcibios_reserve_legacy_regions(b);
1431 }
1432
1433
1434
1435
1436 if (!pci_has_flag(PCI_PROBE_ONLY)) {
1437 pr_debug("PCI: Assigning unassigned resources...\n");
1438 pci_assign_unassigned_resources();
1439 }
1440
1441
1442 if (ppc_md.pcibios_fixup)
1443 ppc_md.pcibios_fixup();
1444}
1445
1446
1447
1448
1449
1450
1451void pcibios_claim_one_bus(struct pci_bus *bus)
1452{
1453 struct pci_dev *dev;
1454 struct pci_bus *child_bus;
1455
1456 list_for_each_entry(dev, &bus->devices, bus_list) {
1457 int i;
1458
1459 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1460 struct resource *r = &dev->resource[i];
1461
1462 if (r->parent || !r->start || !r->flags)
1463 continue;
1464
1465 pr_debug("PCI: Claiming %s: Resource %d: %pR\n",
1466 pci_name(dev), i, r);
1467
1468 if (pci_claim_resource(dev, i) == 0)
1469 continue;
1470
1471 pci_claim_bridge_resource(dev, i);
1472 }
1473 }
1474
1475 list_for_each_entry(child_bus, &bus->children, node)
1476 pcibios_claim_one_bus(child_bus);
1477}
1478EXPORT_SYMBOL_GPL(pcibios_claim_one_bus);
1479
1480
1481
1482
1483
1484
1485
1486
1487void pcibios_finish_adding_to_bus(struct pci_bus *bus)
1488{
1489 pr_debug("PCI: Finishing adding to hotplug bus %04x:%02x\n",
1490 pci_domain_nr(bus), bus->number);
1491
1492
1493 pcibios_allocate_bus_resources(bus);
1494 pcibios_claim_one_bus(bus);
1495 if (!pci_has_flag(PCI_PROBE_ONLY)) {
1496 if (bus->self)
1497 pci_assign_unassigned_bridge_resources(bus->self);
1498 else
1499 pci_assign_unassigned_bus_resources(bus);
1500 }
1501
1502
1503 eeh_add_device_tree_late(bus);
1504
1505
1506 pci_bus_add_devices(bus);
1507
1508
1509 eeh_add_sysfs_files(bus);
1510}
1511EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus);
1512
1513int pcibios_enable_device(struct pci_dev *dev, int mask)
1514{
1515 struct pci_controller *phb = pci_bus_to_host(dev->bus);
1516
1517 if (phb->controller_ops.enable_device_hook)
1518 if (!phb->controller_ops.enable_device_hook(dev))
1519 return -EINVAL;
1520
1521 return pci_enable_resources(dev, mask);
1522}
1523
1524void pcibios_disable_device(struct pci_dev *dev)
1525{
1526 struct pci_controller *phb = pci_bus_to_host(dev->bus);
1527
1528 if (phb->controller_ops.disable_device)
1529 phb->controller_ops.disable_device(dev);
1530}
1531
1532resource_size_t pcibios_io_space_offset(struct pci_controller *hose)
1533{
1534 return (unsigned long) hose->io_base_virt - _IO_BASE;
1535}
1536
1537static void pcibios_setup_phb_resources(struct pci_controller *hose,
1538 struct list_head *resources)
1539{
1540 struct resource *res;
1541 resource_size_t offset;
1542 int i;
1543
1544
1545 res = &hose->io_resource;
1546
1547 if (!res->flags) {
1548 pr_debug("PCI: I/O resource not set for host"
1549 " bridge %s (domain %d)\n",
1550 hose->dn->full_name, hose->global_number);
1551 } else {
1552 offset = pcibios_io_space_offset(hose);
1553
1554 pr_debug("PCI: PHB IO resource = %pR off 0x%08llx\n",
1555 res, (unsigned long long)offset);
1556 pci_add_resource_offset(resources, res, offset);
1557 }
1558
1559
1560 for (i = 0; i < 3; ++i) {
1561 res = &hose->mem_resources[i];
1562 if (!res->flags) {
1563 if (i == 0)
1564 printk(KERN_ERR "PCI: Memory resource 0 not set for "
1565 "host bridge %s (domain %d)\n",
1566 hose->dn->full_name, hose->global_number);
1567 continue;
1568 }
1569 offset = hose->mem_offset[i];
1570
1571
1572 pr_debug("PCI: PHB MEM resource %d = %pR off 0x%08llx\n", i,
1573 res, (unsigned long long)offset);
1574
1575 pci_add_resource_offset(resources, res, offset);
1576 }
1577}
1578
1579
1580
1581
1582
1583#define NULL_PCI_OP(rw, size, type) \
1584static int \
1585null_##rw##_config_##size(struct pci_dev *dev, int offset, type val) \
1586{ \
1587 return PCIBIOS_DEVICE_NOT_FOUND; \
1588}
1589
1590static int
1591null_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
1592 int len, u32 *val)
1593{
1594 return PCIBIOS_DEVICE_NOT_FOUND;
1595}
1596
1597static int
1598null_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
1599 int len, u32 val)
1600{
1601 return PCIBIOS_DEVICE_NOT_FOUND;
1602}
1603
1604static struct pci_ops null_pci_ops =
1605{
1606 .read = null_read_config,
1607 .write = null_write_config,
1608};
1609
1610
1611
1612
1613
1614static struct pci_bus *
1615fake_pci_bus(struct pci_controller *hose, int busnr)
1616{
1617 static struct pci_bus bus;
1618
1619 if (hose == NULL) {
1620 printk(KERN_ERR "Can't find hose for PCI bus %d!\n", busnr);
1621 }
1622 bus.number = busnr;
1623 bus.sysdata = hose;
1624 bus.ops = hose? hose->ops: &null_pci_ops;
1625 return &bus;
1626}
1627
1628#define EARLY_PCI_OP(rw, size, type) \
1629int early_##rw##_config_##size(struct pci_controller *hose, int bus, \
1630 int devfn, int offset, type value) \
1631{ \
1632 return pci_bus_##rw##_config_##size(fake_pci_bus(hose, bus), \
1633 devfn, offset, value); \
1634}
1635
1636EARLY_PCI_OP(read, byte, u8 *)
1637EARLY_PCI_OP(read, word, u16 *)
1638EARLY_PCI_OP(read, dword, u32 *)
1639EARLY_PCI_OP(write, byte, u8)
1640EARLY_PCI_OP(write, word, u16)
1641EARLY_PCI_OP(write, dword, u32)
1642
1643int early_find_capability(struct pci_controller *hose, int bus, int devfn,
1644 int cap)
1645{
1646 return pci_bus_find_capability(fake_pci_bus(hose, bus), devfn, cap);
1647}
1648
1649struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
1650{
1651 struct pci_controller *hose = bus->sysdata;
1652
1653 return of_node_get(hose->dn);
1654}
1655
1656
1657
1658
1659
1660void pcibios_scan_phb(struct pci_controller *hose)
1661{
1662 LIST_HEAD(resources);
1663 struct pci_bus *bus;
1664 struct device_node *node = hose->dn;
1665 int mode;
1666
1667 pr_debug("PCI: Scanning PHB %s\n", of_node_full_name(node));
1668
1669
1670 pcibios_setup_phb_io_space(hose);
1671
1672
1673 pcibios_setup_phb_resources(hose, &resources);
1674
1675 hose->busn.start = hose->first_busno;
1676 hose->busn.end = hose->last_busno;
1677 hose->busn.flags = IORESOURCE_BUS;
1678 pci_add_resource(&resources, &hose->busn);
1679
1680
1681 bus = pci_create_root_bus(hose->parent, hose->first_busno,
1682 hose->ops, hose, &resources);
1683 if (bus == NULL) {
1684 pr_err("Failed to create bus for PCI domain %04x\n",
1685 hose->global_number);
1686 pci_free_resource_list(&resources);
1687 return;
1688 }
1689 hose->bus = bus;
1690
1691
1692 mode = PCI_PROBE_NORMAL;
1693 if (node && hose->controller_ops.probe_mode)
1694 mode = hose->controller_ops.probe_mode(bus);
1695 pr_debug(" probe mode: %d\n", mode);
1696 if (mode == PCI_PROBE_DEVTREE)
1697 of_scan_bus(node, bus);
1698
1699 if (mode == PCI_PROBE_NORMAL) {
1700 pci_bus_update_busn_res_end(bus, 255);
1701 hose->last_busno = pci_scan_child_bus(bus);
1702 pci_bus_update_busn_res_end(bus, hose->last_busno);
1703 }
1704
1705
1706
1707
1708 if (ppc_md.pcibios_fixup_phb)
1709 ppc_md.pcibios_fixup_phb(hose);
1710
1711
1712 if (bus && !pci_has_flag(PCI_PROBE_ONLY)) {
1713 struct pci_bus *child;
1714 list_for_each_entry(child, &bus->children, node)
1715 pcie_bus_configure_settings(child);
1716 }
1717}
1718EXPORT_SYMBOL_GPL(pcibios_scan_phb);
1719
1720static void fixup_hide_host_resource_fsl(struct pci_dev *dev)
1721{
1722 int i, class = dev->class >> 8;
1723
1724 int prog_if = dev->class & 0xf;
1725
1726 if ((class == PCI_CLASS_PROCESSOR_POWERPC ||
1727 class == PCI_CLASS_BRIDGE_OTHER) &&
1728 (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) &&
1729 (prog_if == 0) &&
1730 (dev->bus->parent == NULL)) {
1731 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1732 dev->resource[i].start = 0;
1733 dev->resource[i].end = 0;
1734 dev->resource[i].flags = 0;
1735 }
1736 }
1737}
1738DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MOTOROLA, PCI_ANY_ID, fixup_hide_host_resource_fsl);
1739DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, fixup_hide_host_resource_fsl);
1740
1741static void fixup_vga(struct pci_dev *pdev)
1742{
1743 u16 cmd;
1744
1745 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
1746 if ((cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) || !vga_default_device())
1747 vga_set_default_device(pdev);
1748
1749}
1750DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
1751 PCI_CLASS_DISPLAY_VGA, 8, fixup_vga);
1752