1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/kernel.h>
16#include <linux/pci.h>
17#include <linux/string.h>
18#include <linux/init.h>
19#include <linux/delay.h>
20#include <linux/export.h>
21#include <linux/of_address.h>
22#include <linux/of_pci.h>
23#include <linux/mm.h>
24#include <linux/shmem_fs.h>
25#include <linux/list.h>
26#include <linux/syscalls.h>
27#include <linux/irq.h>
28#include <linux/vmalloc.h>
29#include <linux/slab.h>
30#include <linux/vgaarb.h>
31#include <linux/numa.h>
32#include <linux/msi.h>
33
34#include <asm/processor.h>
35#include <asm/io.h>
36#include <asm/prom.h>
37#include <asm/pci-bridge.h>
38#include <asm/byteorder.h>
39#include <asm/machdep.h>
40#include <asm/ppc-pci.h>
41#include <asm/eeh.h>
42
43#include "../../../drivers/pci/pci.h"
44
45
46static DEFINE_SPINLOCK(hose_spinlock);
47LIST_HEAD(hose_list);
48
49
50#define MAX_PHBS 0x10000
51
52
53
54
55
56static DECLARE_BITMAP(phb_bitmap, MAX_PHBS);
57
58
59resource_size_t isa_mem_base;
60EXPORT_SYMBOL(isa_mem_base);
61
62
63static const struct dma_map_ops *pci_dma_ops;
64
65void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
66{
67 pci_dma_ops = dma_ops;
68}
69
70
71
72
73
74static int get_phb_number(struct device_node *dn)
75{
76 int ret, phb_id = -1;
77 u32 prop_32;
78 u64 prop;
79
80
81
82
83
84
85 ret = of_property_read_u64(dn, "ibm,opal-phbid", &prop);
86 if (ret) {
87 ret = of_property_read_u32_index(dn, "reg", 1, &prop_32);
88 prop = prop_32;
89 }
90
91 if (!ret)
92 phb_id = (int)(prop & (MAX_PHBS - 1));
93
94
95 if ((phb_id >= 0) && !test_and_set_bit(phb_id, phb_bitmap))
96 return phb_id;
97
98
99
100
101
102 phb_id = find_first_zero_bit(phb_bitmap, MAX_PHBS);
103 BUG_ON(phb_id >= MAX_PHBS);
104 set_bit(phb_id, phb_bitmap);
105
106 return phb_id;
107}
108
109struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
110{
111 struct pci_controller *phb;
112
113 phb = zalloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL);
114 if (phb == NULL)
115 return NULL;
116 spin_lock(&hose_spinlock);
117 phb->global_number = get_phb_number(dev);
118 list_add_tail(&phb->list_node, &hose_list);
119 spin_unlock(&hose_spinlock);
120 phb->dn = dev;
121 phb->is_dynamic = slab_is_available();
122#ifdef CONFIG_PPC64
123 if (dev) {
124 int nid = of_node_to_nid(dev);
125
126 if (nid < 0 || !node_online(nid))
127 nid = NUMA_NO_NODE;
128
129 PHB_SET_NODE(phb, nid);
130 }
131#endif
132 return phb;
133}
134EXPORT_SYMBOL_GPL(pcibios_alloc_controller);
135
136void pcibios_free_controller(struct pci_controller *phb)
137{
138 spin_lock(&hose_spinlock);
139
140
141 if (phb->global_number < MAX_PHBS)
142 clear_bit(phb->global_number, phb_bitmap);
143
144 list_del(&phb->list_node);
145 spin_unlock(&hose_spinlock);
146
147 if (phb->is_dynamic)
148 kfree(phb);
149}
150EXPORT_SYMBOL_GPL(pcibios_free_controller);
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177void pcibios_free_controller_deferred(struct pci_host_bridge *bridge)
178{
179 struct pci_controller *phb = (struct pci_controller *)
180 bridge->release_data;
181
182 pr_debug("domain %d, dynamic %d\n", phb->global_number, phb->is_dynamic);
183
184 pcibios_free_controller(phb);
185}
186EXPORT_SYMBOL_GPL(pcibios_free_controller_deferred);
187
188
189
190
191
192
193
194resource_size_t pcibios_window_alignment(struct pci_bus *bus,
195 unsigned long type)
196{
197 struct pci_controller *phb = pci_bus_to_host(bus);
198
199 if (phb->controller_ops.window_alignment)
200 return phb->controller_ops.window_alignment(bus, type);
201
202
203
204
205
206
207 return 1;
208}
209
210void pcibios_setup_bridge(struct pci_bus *bus, unsigned long type)
211{
212 struct pci_controller *hose = pci_bus_to_host(bus);
213
214 if (hose->controller_ops.setup_bridge)
215 hose->controller_ops.setup_bridge(bus, type);
216}
217
218void pcibios_reset_secondary_bus(struct pci_dev *dev)
219{
220 struct pci_controller *phb = pci_bus_to_host(dev->bus);
221
222 if (phb->controller_ops.reset_secondary_bus) {
223 phb->controller_ops.reset_secondary_bus(dev);
224 return;
225 }
226
227 pci_reset_secondary_bus(dev);
228}
229
230resource_size_t pcibios_default_alignment(void)
231{
232 if (ppc_md.pcibios_default_alignment)
233 return ppc_md.pcibios_default_alignment();
234
235 return 0;
236}
237
238#ifdef CONFIG_PCI_IOV
239resource_size_t pcibios_iov_resource_alignment(struct pci_dev *pdev, int resno)
240{
241 if (ppc_md.pcibios_iov_resource_alignment)
242 return ppc_md.pcibios_iov_resource_alignment(pdev, resno);
243
244 return pci_iov_resource_size(pdev, resno);
245}
246
247int pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
248{
249 if (ppc_md.pcibios_sriov_enable)
250 return ppc_md.pcibios_sriov_enable(pdev, num_vfs);
251
252 return 0;
253}
254
255int pcibios_sriov_disable(struct pci_dev *pdev)
256{
257 if (ppc_md.pcibios_sriov_disable)
258 return ppc_md.pcibios_sriov_disable(pdev);
259
260 return 0;
261}
262
263#endif
264
265static resource_size_t pcibios_io_size(const struct pci_controller *hose)
266{
267#ifdef CONFIG_PPC64
268 return hose->pci_io_size;
269#else
270 return resource_size(&hose->io_resource);
271#endif
272}
273
274int pcibios_vaddr_is_ioport(void __iomem *address)
275{
276 int ret = 0;
277 struct pci_controller *hose;
278 resource_size_t size;
279
280 spin_lock(&hose_spinlock);
281 list_for_each_entry(hose, &hose_list, list_node) {
282 size = pcibios_io_size(hose);
283 if (address >= hose->io_base_virt &&
284 address < (hose->io_base_virt + size)) {
285 ret = 1;
286 break;
287 }
288 }
289 spin_unlock(&hose_spinlock);
290 return ret;
291}
292
293unsigned long pci_address_to_pio(phys_addr_t address)
294{
295 struct pci_controller *hose;
296 resource_size_t size;
297 unsigned long ret = ~0;
298
299 spin_lock(&hose_spinlock);
300 list_for_each_entry(hose, &hose_list, list_node) {
301 size = pcibios_io_size(hose);
302 if (address >= hose->io_base_phys &&
303 address < (hose->io_base_phys + size)) {
304 unsigned long base =
305 (unsigned long)hose->io_base_virt - _IO_BASE;
306 ret = base + (address - hose->io_base_phys);
307 break;
308 }
309 }
310 spin_unlock(&hose_spinlock);
311
312 return ret;
313}
314EXPORT_SYMBOL_GPL(pci_address_to_pio);
315
316
317
318
319int pci_domain_nr(struct pci_bus *bus)
320{
321 struct pci_controller *hose = pci_bus_to_host(bus);
322
323 return hose->global_number;
324}
325EXPORT_SYMBOL(pci_domain_nr);
326
327
328
329
330
331
332
333
334struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node)
335{
336 while(node) {
337 struct pci_controller *hose, *tmp;
338 list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
339 if (hose->dn == node)
340 return hose;
341 node = node->parent;
342 }
343 return NULL;
344}
345
346struct pci_controller *pci_find_controller_for_domain(int domain_nr)
347{
348 struct pci_controller *hose;
349
350 list_for_each_entry(hose, &hose_list, list_node)
351 if (hose->global_number == domain_nr)
352 return hose;
353
354 return NULL;
355}
356
357struct pci_intx_virq {
358 int virq;
359 struct kref kref;
360 struct list_head list_node;
361};
362
363static LIST_HEAD(intx_list);
364static DEFINE_MUTEX(intx_mutex);
365
366static void ppc_pci_intx_release(struct kref *kref)
367{
368 struct pci_intx_virq *vi = container_of(kref, struct pci_intx_virq, kref);
369
370 list_del(&vi->list_node);
371 irq_dispose_mapping(vi->virq);
372 kfree(vi);
373}
374
375static int ppc_pci_unmap_irq_line(struct notifier_block *nb,
376 unsigned long action, void *data)
377{
378 struct pci_dev *pdev = to_pci_dev(data);
379
380 if (action == BUS_NOTIFY_DEL_DEVICE) {
381 struct pci_intx_virq *vi;
382
383 mutex_lock(&intx_mutex);
384 list_for_each_entry(vi, &intx_list, list_node) {
385 if (vi->virq == pdev->irq) {
386 kref_put(&vi->kref, ppc_pci_intx_release);
387 break;
388 }
389 }
390 mutex_unlock(&intx_mutex);
391 }
392
393 return NOTIFY_DONE;
394}
395
396static struct notifier_block ppc_pci_unmap_irq_notifier = {
397 .notifier_call = ppc_pci_unmap_irq_line,
398};
399
400static int ppc_pci_register_irq_notifier(void)
401{
402 return bus_register_notifier(&pci_bus_type, &ppc_pci_unmap_irq_notifier);
403}
404arch_initcall(ppc_pci_register_irq_notifier);
405
406
407
408
409
410
411static int pci_read_irq_line(struct pci_dev *pci_dev)
412{
413 int virq;
414 struct pci_intx_virq *vi, *vitmp;
415
416
417 vi = kzalloc(sizeof(struct pci_intx_virq), GFP_KERNEL);
418 if (!vi)
419 return -1;
420
421 pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev));
422
423
424 virq = of_irq_parse_and_map_pci(pci_dev, 0, 0);
425 if (virq <= 0) {
426 u8 line, pin;
427
428
429
430
431
432
433
434
435 if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &pin))
436 goto error_exit;
437 if (pin == 0)
438 goto error_exit;
439 if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_LINE, &line) ||
440 line == 0xff || line == 0) {
441 goto error_exit;
442 }
443 pr_debug(" No map ! Using line %d (pin %d) from PCI config\n",
444 line, pin);
445
446 virq = irq_create_mapping(NULL, line);
447 if (virq)
448 irq_set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
449 }
450
451 if (!virq) {
452 pr_debug(" Failed to map !\n");
453 goto error_exit;
454 }
455
456 pr_debug(" Mapped to linux irq %d\n", virq);
457
458 pci_dev->irq = virq;
459
460 mutex_lock(&intx_mutex);
461 list_for_each_entry(vitmp, &intx_list, list_node) {
462 if (vitmp->virq == virq) {
463 kref_get(&vitmp->kref);
464 kfree(vi);
465 vi = NULL;
466 break;
467 }
468 }
469 if (vi) {
470 vi->virq = virq;
471 kref_init(&vi->kref);
472 list_add_tail(&vi->list_node, &intx_list);
473 }
474 mutex_unlock(&intx_mutex);
475
476 return 0;
477error_exit:
478 kfree(vi);
479 return -1;
480}
481
482
483
484
485
486int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma)
487{
488 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
489 resource_size_t ioaddr = pci_resource_start(pdev, bar);
490
491 if (!hose)
492 return -EINVAL;
493
494
495 ioaddr -= (unsigned long)hose->io_base_virt - _IO_BASE;
496
497 vma->vm_pgoff += (ioaddr + hose->io_base_phys) >> PAGE_SHIFT;
498 return 0;
499}
500
501
502
503
504
505
506pgprot_t pci_phys_mem_access_prot(struct file *file,
507 unsigned long pfn,
508 unsigned long size,
509 pgprot_t prot)
510{
511 struct pci_dev *pdev = NULL;
512 struct resource *found = NULL;
513 resource_size_t offset = ((resource_size_t)pfn) << PAGE_SHIFT;
514 int i;
515
516 if (page_is_ram(pfn))
517 return prot;
518
519 prot = pgprot_noncached(prot);
520 for_each_pci_dev(pdev) {
521 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
522 struct resource *rp = &pdev->resource[i];
523 int flags = rp->flags;
524
525
526 if ((flags & IORESOURCE_MEM) == 0)
527 continue;
528
529 if (offset < (rp->start & PAGE_MASK) ||
530 offset > rp->end)
531 continue;
532 found = rp;
533 break;
534 }
535 if (found)
536 break;
537 }
538 if (found) {
539 if (found->flags & IORESOURCE_PREFETCH)
540 prot = pgprot_noncached_wc(prot);
541 pci_dev_put(pdev);
542 }
543
544 pr_debug("PCI: Non-PCI map for %llx, prot: %lx\n",
545 (unsigned long long)offset, pgprot_val(prot));
546
547 return prot;
548}
549
550
551int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size)
552{
553 unsigned long offset;
554 struct pci_controller *hose = pci_bus_to_host(bus);
555 struct resource *rp = &hose->io_resource;
556 void __iomem *addr;
557
558
559
560
561
562
563 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
564 offset += port;
565
566 if (!(rp->flags & IORESOURCE_IO))
567 return -ENXIO;
568 if (offset < rp->start || (offset + size) > rp->end)
569 return -ENXIO;
570 addr = hose->io_base_virt + port;
571
572 switch(size) {
573 case 1:
574 *((u8 *)val) = in_8(addr);
575 return 1;
576 case 2:
577 if (port & 1)
578 return -EINVAL;
579 *((u16 *)val) = in_le16(addr);
580 return 2;
581 case 4:
582 if (port & 3)
583 return -EINVAL;
584 *((u32 *)val) = in_le32(addr);
585 return 4;
586 }
587 return -EINVAL;
588}
589
590
591int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size)
592{
593 unsigned long offset;
594 struct pci_controller *hose = pci_bus_to_host(bus);
595 struct resource *rp = &hose->io_resource;
596 void __iomem *addr;
597
598
599
600
601
602
603 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
604 offset += port;
605
606 if (!(rp->flags & IORESOURCE_IO))
607 return -ENXIO;
608 if (offset < rp->start || (offset + size) > rp->end)
609 return -ENXIO;
610 addr = hose->io_base_virt + port;
611
612
613
614
615
616
617 switch(size) {
618 case 1:
619 out_8(addr, val >> 24);
620 return 1;
621 case 2:
622 if (port & 1)
623 return -EINVAL;
624 out_le16(addr, val >> 16);
625 return 2;
626 case 4:
627 if (port & 3)
628 return -EINVAL;
629 out_le32(addr, val);
630 return 4;
631 }
632 return -EINVAL;
633}
634
635
636int pci_mmap_legacy_page_range(struct pci_bus *bus,
637 struct vm_area_struct *vma,
638 enum pci_mmap_state mmap_state)
639{
640 struct pci_controller *hose = pci_bus_to_host(bus);
641 resource_size_t offset =
642 ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT;
643 resource_size_t size = vma->vm_end - vma->vm_start;
644 struct resource *rp;
645
646 pr_debug("pci_mmap_legacy_page_range(%04x:%02x, %s @%llx..%llx)\n",
647 pci_domain_nr(bus), bus->number,
648 mmap_state == pci_mmap_mem ? "MEM" : "IO",
649 (unsigned long long)offset,
650 (unsigned long long)(offset + size - 1));
651
652 if (mmap_state == pci_mmap_mem) {
653
654
655
656
657
658
659
660 if ((offset + size) > hose->isa_mem_size) {
661 printk(KERN_DEBUG
662 "Process %s (pid:%d) mapped non-existing PCI legacy memory for 0%04x:%02x\n",
663 current->comm, current->pid, pci_domain_nr(bus), bus->number);
664 if (vma->vm_flags & VM_SHARED)
665 return shmem_zero_setup(vma);
666 return 0;
667 }
668 offset += hose->isa_mem_phys;
669 } else {
670 unsigned long io_offset = (unsigned long)hose->io_base_virt - _IO_BASE;
671 unsigned long roffset = offset + io_offset;
672 rp = &hose->io_resource;
673 if (!(rp->flags & IORESOURCE_IO))
674 return -ENXIO;
675 if (roffset < rp->start || (roffset + size) > rp->end)
676 return -ENXIO;
677 offset += hose->io_base_phys;
678 }
679 pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset);
680
681 vma->vm_pgoff = offset >> PAGE_SHIFT;
682 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
683 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
684 vma->vm_end - vma->vm_start,
685 vma->vm_page_prot);
686}
687
688void pci_resource_to_user(const struct pci_dev *dev, int bar,
689 const struct resource *rsrc,
690 resource_size_t *start, resource_size_t *end)
691{
692 struct pci_bus_region region;
693
694 if (rsrc->flags & IORESOURCE_IO) {
695 pcibios_resource_to_bus(dev->bus, ®ion,
696 (struct resource *) rsrc);
697 *start = region.start;
698 *end = region.end;
699 return;
700 }
701
702
703
704
705
706
707
708
709 *start = rsrc->start;
710 *end = rsrc->end;
711}
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737void pci_process_bridge_OF_ranges(struct pci_controller *hose,
738 struct device_node *dev, int primary)
739{
740 int memno = 0;
741 struct resource *res;
742 struct of_pci_range range;
743 struct of_pci_range_parser parser;
744
745 printk(KERN_INFO "PCI host bridge %pOF %s ranges:\n",
746 dev, primary ? "(primary)" : "");
747
748
749 if (of_pci_range_parser_init(&parser, dev))
750 return;
751
752
753 for_each_of_pci_range(&parser, &range) {
754
755
756
757
758
759 if (range.cpu_addr == OF_BAD_ADDR || range.size == 0)
760 continue;
761
762
763 res = NULL;
764 switch (range.flags & IORESOURCE_TYPE_BITS) {
765 case IORESOURCE_IO:
766 printk(KERN_INFO
767 " IO 0x%016llx..0x%016llx -> 0x%016llx\n",
768 range.cpu_addr, range.cpu_addr + range.size - 1,
769 range.pci_addr);
770
771
772 if (hose->pci_io_size) {
773 printk(KERN_INFO
774 " \\--> Skipped (too many) !\n");
775 continue;
776 }
777#ifdef CONFIG_PPC32
778
779 if (range.size > 0x01000000)
780 range.size = 0x01000000;
781
782
783 hose->io_base_virt = ioremap(range.cpu_addr,
784 range.size);
785
786
787 if (primary)
788 isa_io_base =
789 (unsigned long)hose->io_base_virt;
790#endif
791
792
793
794 hose->pci_io_size = range.pci_addr + range.size;
795 hose->io_base_phys = range.cpu_addr - range.pci_addr;
796
797
798 res = &hose->io_resource;
799 range.cpu_addr = range.pci_addr;
800 break;
801 case IORESOURCE_MEM:
802 printk(KERN_INFO
803 " MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n",
804 range.cpu_addr, range.cpu_addr + range.size - 1,
805 range.pci_addr,
806 (range.flags & IORESOURCE_PREFETCH) ?
807 "Prefetch" : "");
808
809
810 if (memno >= 3) {
811 printk(KERN_INFO
812 " \\--> Skipped (too many) !\n");
813 continue;
814 }
815
816 if (range.pci_addr == 0) {
817 if (primary || isa_mem_base == 0)
818 isa_mem_base = range.cpu_addr;
819 hose->isa_mem_phys = range.cpu_addr;
820 hose->isa_mem_size = range.size;
821 }
822
823
824 hose->mem_offset[memno] = range.cpu_addr -
825 range.pci_addr;
826 res = &hose->mem_resources[memno++];
827 break;
828 }
829 if (res != NULL) {
830 res->name = dev->full_name;
831 res->flags = range.flags;
832 res->start = range.cpu_addr;
833 res->end = range.cpu_addr + range.size - 1;
834 res->parent = res->child = res->sibling = NULL;
835 }
836 }
837}
838
839
840int pci_proc_domain(struct pci_bus *bus)
841{
842 struct pci_controller *hose = pci_bus_to_host(bus);
843
844 if (!pci_has_flag(PCI_ENABLE_PROC_DOMAINS))
845 return 0;
846 if (pci_has_flag(PCI_COMPAT_DOMAIN_0))
847 return hose->global_number != 0;
848 return 1;
849}
850
851int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
852{
853 if (ppc_md.pcibios_root_bridge_prepare)
854 return ppc_md.pcibios_root_bridge_prepare(bridge);
855
856 return 0;
857}
858
859
860
861
862static void pcibios_fixup_resources(struct pci_dev *dev)
863{
864 struct pci_controller *hose = pci_bus_to_host(dev->bus);
865 int i;
866
867 if (!hose) {
868 printk(KERN_ERR "No host bridge for PCI dev %s !\n",
869 pci_name(dev));
870 return;
871 }
872
873 if (dev->is_virtfn)
874 return;
875
876 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
877 struct resource *res = dev->resource + i;
878 struct pci_bus_region reg;
879 if (!res->flags)
880 continue;
881
882
883
884
885
886
887 pcibios_resource_to_bus(dev->bus, ®, res);
888 if (pci_has_flag(PCI_REASSIGN_ALL_RSRC) ||
889 (reg.start == 0 && !pci_has_flag(PCI_PROBE_ONLY))) {
890
891 if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC))
892 pr_debug("PCI:%s Resource %d %pR is unassigned\n",
893 pci_name(dev), i, res);
894 res->end -= res->start;
895 res->start = 0;
896 res->flags |= IORESOURCE_UNSET;
897 continue;
898 }
899
900 pr_debug("PCI:%s Resource %d %pR\n", pci_name(dev), i, res);
901 }
902
903
904 if (ppc_md.pcibios_fixup_resources)
905 ppc_md.pcibios_fixup_resources(dev);
906}
907DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources);
908
909
910
911
912
913
914static int pcibios_uninitialized_bridge_resource(struct pci_bus *bus,
915 struct resource *res)
916{
917 struct pci_controller *hose = pci_bus_to_host(bus);
918 struct pci_dev *dev = bus->self;
919 resource_size_t offset;
920 struct pci_bus_region region;
921 u16 command;
922 int i;
923
924
925 if (pci_has_flag(PCI_PROBE_ONLY))
926 return 0;
927
928
929 if (res->flags & IORESOURCE_MEM) {
930 pcibios_resource_to_bus(dev->bus, ®ion, res);
931
932
933 if (region.start != 0)
934 return 0;
935
936
937
938
939 pci_read_config_word(dev, PCI_COMMAND, &command);
940 if ((command & PCI_COMMAND_MEMORY) == 0)
941 return 1;
942
943
944
945
946
947 for (i = 0; i < 3; i++) {
948 if ((hose->mem_resources[i].flags & IORESOURCE_MEM) &&
949 hose->mem_resources[i].start == hose->mem_offset[i])
950 return 0;
951 }
952
953
954
955
956 return 1;
957 } else {
958
959 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
960 if (((res->start - offset) & 0xfffffffful) != 0)
961 return 0;
962
963
964
965
966
967
968 pci_read_config_word(dev, PCI_COMMAND, &command);
969 if (command & PCI_COMMAND_IO)
970 return 0;
971
972
973
974
975 return 1;
976 }
977}
978
979
980static void pcibios_fixup_bridge(struct pci_bus *bus)
981{
982 struct resource *res;
983 int i;
984
985 struct pci_dev *dev = bus->self;
986
987 pci_bus_for_each_resource(bus, res, i) {
988 if (!res || !res->flags)
989 continue;
990 if (i >= 3 && bus->self->transparent)
991 continue;
992
993
994
995
996
997 if (pci_has_flag(PCI_REASSIGN_ALL_RSRC)) {
998 res->flags |= IORESOURCE_UNSET;
999 res->start = 0;
1000 res->end = -1;
1001 continue;
1002 }
1003
1004 pr_debug("PCI:%s Bus rsrc %d %pR\n", pci_name(dev), i, res);
1005
1006
1007
1008
1009 if (pcibios_uninitialized_bridge_resource(bus, res)) {
1010 res->flags = 0;
1011 pr_debug("PCI:%s (unassigned)\n", pci_name(dev));
1012 }
1013 }
1014}
1015
1016void pcibios_setup_bus_self(struct pci_bus *bus)
1017{
1018 struct pci_controller *phb;
1019
1020
1021 if (bus->self != NULL)
1022 pcibios_fixup_bridge(bus);
1023
1024
1025
1026
1027 if (ppc_md.pcibios_fixup_bus)
1028 ppc_md.pcibios_fixup_bus(bus);
1029
1030
1031 phb = pci_bus_to_host(bus);
1032 if (phb->controller_ops.dma_bus_setup)
1033 phb->controller_ops.dma_bus_setup(bus);
1034}
1035
1036void pcibios_bus_add_device(struct pci_dev *dev)
1037{
1038 struct pci_controller *phb;
1039
1040
1041
1042 set_dev_node(&dev->dev, pcibus_to_node(dev->bus));
1043
1044
1045 set_dma_ops(&dev->dev, pci_dma_ops);
1046 dev->dev.archdata.dma_offset = PCI_DRAM_OFFSET;
1047
1048
1049 phb = pci_bus_to_host(dev->bus);
1050 if (phb->controller_ops.dma_dev_setup)
1051 phb->controller_ops.dma_dev_setup(dev);
1052
1053
1054 pci_read_irq_line(dev);
1055 if (ppc_md.pci_irq_fixup)
1056 ppc_md.pci_irq_fixup(dev);
1057
1058 if (ppc_md.pcibios_bus_add_device)
1059 ppc_md.pcibios_bus_add_device(dev);
1060}
1061
1062int pcibios_add_device(struct pci_dev *dev)
1063{
1064 struct irq_domain *d;
1065
1066#ifdef CONFIG_PCI_IOV
1067 if (ppc_md.pcibios_fixup_sriov)
1068 ppc_md.pcibios_fixup_sriov(dev);
1069#endif
1070
1071 d = dev_get_msi_domain(&dev->bus->dev);
1072 if (d)
1073 dev_set_msi_domain(&dev->dev, d);
1074 return 0;
1075}
1076
1077void pcibios_set_master(struct pci_dev *dev)
1078{
1079
1080}
1081
1082void pcibios_fixup_bus(struct pci_bus *bus)
1083{
1084
1085
1086
1087
1088 pci_read_bridge_bases(bus);
1089
1090
1091 pcibios_setup_bus_self(bus);
1092}
1093EXPORT_SYMBOL(pcibios_fixup_bus);
1094
1095static int skip_isa_ioresource_align(struct pci_dev *dev)
1096{
1097 if (pci_has_flag(PCI_CAN_SKIP_ISA_ALIGN) &&
1098 !(dev->bus->bridge_ctl & PCI_BRIDGE_CTL_ISA))
1099 return 1;
1100 return 0;
1101}
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116resource_size_t pcibios_align_resource(void *data, const struct resource *res,
1117 resource_size_t size, resource_size_t align)
1118{
1119 struct pci_dev *dev = data;
1120 resource_size_t start = res->start;
1121
1122 if (res->flags & IORESOURCE_IO) {
1123 if (skip_isa_ioresource_align(dev))
1124 return start;
1125 if (start & 0x300)
1126 start = (start + 0x3ff) & ~0x3ff;
1127 }
1128
1129 return start;
1130}
1131EXPORT_SYMBOL(pcibios_align_resource);
1132
1133
1134
1135
1136
1137static int reparent_resources(struct resource *parent,
1138 struct resource *res)
1139{
1140 struct resource *p, **pp;
1141 struct resource **firstpp = NULL;
1142
1143 for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) {
1144 if (p->end < res->start)
1145 continue;
1146 if (res->end < p->start)
1147 break;
1148 if (p->start < res->start || p->end > res->end)
1149 return -1;
1150 if (firstpp == NULL)
1151 firstpp = pp;
1152 }
1153 if (firstpp == NULL)
1154 return -1;
1155 res->parent = parent;
1156 res->child = *firstpp;
1157 res->sibling = *pp;
1158 *firstpp = res;
1159 *pp = NULL;
1160 for (p = res->child; p != NULL; p = p->sibling) {
1161 p->parent = res;
1162 pr_debug("PCI: Reparented %s %pR under %s\n",
1163 p->name, p, res->name);
1164 }
1165 return 0;
1166}
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201static void pcibios_allocate_bus_resources(struct pci_bus *bus)
1202{
1203 struct pci_bus *b;
1204 int i;
1205 struct resource *res, *pr;
1206
1207 pr_debug("PCI: Allocating bus resources for %04x:%02x...\n",
1208 pci_domain_nr(bus), bus->number);
1209
1210 pci_bus_for_each_resource(bus, res, i) {
1211 if (!res || !res->flags || res->start > res->end || res->parent)
1212 continue;
1213
1214
1215 if (res->flags & IORESOURCE_UNSET)
1216 goto clear_resource;
1217
1218 if (bus->parent == NULL)
1219 pr = (res->flags & IORESOURCE_IO) ?
1220 &ioport_resource : &iomem_resource;
1221 else {
1222 pr = pci_find_parent_resource(bus->self, res);
1223 if (pr == res) {
1224
1225
1226
1227
1228 continue;
1229 }
1230 }
1231
1232 pr_debug("PCI: %s (bus %d) bridge rsrc %d: %pR, parent %p (%s)\n",
1233 bus->self ? pci_name(bus->self) : "PHB", bus->number,
1234 i, res, pr, (pr && pr->name) ? pr->name : "nil");
1235
1236 if (pr && !(pr->flags & IORESOURCE_UNSET)) {
1237 struct pci_dev *dev = bus->self;
1238
1239 if (request_resource(pr, res) == 0)
1240 continue;
1241
1242
1243
1244
1245
1246 if (reparent_resources(pr, res) == 0)
1247 continue;
1248
1249 if (dev && i < PCI_BRIDGE_RESOURCE_NUM &&
1250 pci_claim_bridge_resource(dev,
1251 i + PCI_BRIDGE_RESOURCES) == 0)
1252 continue;
1253 }
1254 pr_warn("PCI: Cannot allocate resource region %d of PCI bridge %d, will remap\n",
1255 i, bus->number);
1256 clear_resource:
1257
1258
1259
1260
1261
1262
1263 res->start = 0;
1264 res->end = -1;
1265 res->flags = 0;
1266 }
1267
1268 list_for_each_entry(b, &bus->children, node)
1269 pcibios_allocate_bus_resources(b);
1270}
1271
1272static inline void alloc_resource(struct pci_dev *dev, int idx)
1273{
1274 struct resource *pr, *r = &dev->resource[idx];
1275
1276 pr_debug("PCI: Allocating %s: Resource %d: %pR\n",
1277 pci_name(dev), idx, r);
1278
1279 pr = pci_find_parent_resource(dev, r);
1280 if (!pr || (pr->flags & IORESOURCE_UNSET) ||
1281 request_resource(pr, r) < 0) {
1282 printk(KERN_WARNING "PCI: Cannot allocate resource region %d"
1283 " of device %s, will remap\n", idx, pci_name(dev));
1284 if (pr)
1285 pr_debug("PCI: parent is %p: %pR\n", pr, pr);
1286
1287 r->flags |= IORESOURCE_UNSET;
1288 r->end -= r->start;
1289 r->start = 0;
1290 }
1291}
1292
1293static void __init pcibios_allocate_resources(int pass)
1294{
1295 struct pci_dev *dev = NULL;
1296 int idx, disabled;
1297 u16 command;
1298 struct resource *r;
1299
1300 for_each_pci_dev(dev) {
1301 pci_read_config_word(dev, PCI_COMMAND, &command);
1302 for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
1303 r = &dev->resource[idx];
1304 if (r->parent)
1305 continue;
1306 if (!r->flags || (r->flags & IORESOURCE_UNSET))
1307 continue;
1308
1309
1310
1311 if (idx == PCI_ROM_RESOURCE )
1312 disabled = 1;
1313 if (r->flags & IORESOURCE_IO)
1314 disabled = !(command & PCI_COMMAND_IO);
1315 else
1316 disabled = !(command & PCI_COMMAND_MEMORY);
1317 if (pass == disabled)
1318 alloc_resource(dev, idx);
1319 }
1320 if (pass)
1321 continue;
1322 r = &dev->resource[PCI_ROM_RESOURCE];
1323 if (r->flags) {
1324
1325
1326
1327 u32 reg;
1328 pci_read_config_dword(dev, dev->rom_base_reg, ®);
1329 if (reg & PCI_ROM_ADDRESS_ENABLE) {
1330 pr_debug("PCI: Switching off ROM of %s\n",
1331 pci_name(dev));
1332 r->flags &= ~IORESOURCE_ROM_ENABLE;
1333 pci_write_config_dword(dev, dev->rom_base_reg,
1334 reg & ~PCI_ROM_ADDRESS_ENABLE);
1335 }
1336 }
1337 }
1338}
1339
1340static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus)
1341{
1342 struct pci_controller *hose = pci_bus_to_host(bus);
1343 resource_size_t offset;
1344 struct resource *res, *pres;
1345 int i;
1346
1347 pr_debug("Reserving legacy ranges for domain %04x\n", pci_domain_nr(bus));
1348
1349
1350 if (!(hose->io_resource.flags & IORESOURCE_IO))
1351 goto no_io;
1352 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
1353 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
1354 BUG_ON(res == NULL);
1355 res->name = "Legacy IO";
1356 res->flags = IORESOURCE_IO;
1357 res->start = offset;
1358 res->end = (offset + 0xfff) & 0xfffffffful;
1359 pr_debug("Candidate legacy IO: %pR\n", res);
1360 if (request_resource(&hose->io_resource, res)) {
1361 printk(KERN_DEBUG
1362 "PCI %04x:%02x Cannot reserve Legacy IO %pR\n",
1363 pci_domain_nr(bus), bus->number, res);
1364 kfree(res);
1365 }
1366
1367 no_io:
1368
1369 for (i = 0; i < 3; i++) {
1370 pres = &hose->mem_resources[i];
1371 offset = hose->mem_offset[i];
1372 if (!(pres->flags & IORESOURCE_MEM))
1373 continue;
1374 pr_debug("hose mem res: %pR\n", pres);
1375 if ((pres->start - offset) <= 0xa0000 &&
1376 (pres->end - offset) >= 0xbffff)
1377 break;
1378 }
1379 if (i >= 3)
1380 return;
1381 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
1382 BUG_ON(res == NULL);
1383 res->name = "Legacy VGA memory";
1384 res->flags = IORESOURCE_MEM;
1385 res->start = 0xa0000 + offset;
1386 res->end = 0xbffff + offset;
1387 pr_debug("Candidate VGA memory: %pR\n", res);
1388 if (request_resource(pres, res)) {
1389 printk(KERN_DEBUG
1390 "PCI %04x:%02x Cannot reserve VGA memory %pR\n",
1391 pci_domain_nr(bus), bus->number, res);
1392 kfree(res);
1393 }
1394}
1395
1396void __init pcibios_resource_survey(void)
1397{
1398 struct pci_bus *b;
1399
1400
1401 list_for_each_entry(b, &pci_root_buses, node)
1402 pcibios_allocate_bus_resources(b);
1403 if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC)) {
1404 pcibios_allocate_resources(0);
1405 pcibios_allocate_resources(1);
1406 }
1407
1408
1409
1410
1411
1412 if (!pci_has_flag(PCI_PROBE_ONLY)) {
1413 list_for_each_entry(b, &pci_root_buses, node)
1414 pcibios_reserve_legacy_regions(b);
1415 }
1416
1417
1418
1419
1420 if (!pci_has_flag(PCI_PROBE_ONLY)) {
1421 pr_debug("PCI: Assigning unassigned resources...\n");
1422 pci_assign_unassigned_resources();
1423 }
1424}
1425
1426
1427
1428
1429
1430
1431void pcibios_claim_one_bus(struct pci_bus *bus)
1432{
1433 struct pci_dev *dev;
1434 struct pci_bus *child_bus;
1435
1436 list_for_each_entry(dev, &bus->devices, bus_list) {
1437 int i;
1438
1439 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1440 struct resource *r = &dev->resource[i];
1441
1442 if (r->parent || !r->start || !r->flags)
1443 continue;
1444
1445 pr_debug("PCI: Claiming %s: Resource %d: %pR\n",
1446 pci_name(dev), i, r);
1447
1448 if (pci_claim_resource(dev, i) == 0)
1449 continue;
1450
1451 pci_claim_bridge_resource(dev, i);
1452 }
1453 }
1454
1455 list_for_each_entry(child_bus, &bus->children, node)
1456 pcibios_claim_one_bus(child_bus);
1457}
1458EXPORT_SYMBOL_GPL(pcibios_claim_one_bus);
1459
1460
1461
1462
1463
1464
1465
1466
1467void pcibios_finish_adding_to_bus(struct pci_bus *bus)
1468{
1469 pr_debug("PCI: Finishing adding to hotplug bus %04x:%02x\n",
1470 pci_domain_nr(bus), bus->number);
1471
1472
1473 pcibios_allocate_bus_resources(bus);
1474 pcibios_claim_one_bus(bus);
1475 if (!pci_has_flag(PCI_PROBE_ONLY)) {
1476 if (bus->self)
1477 pci_assign_unassigned_bridge_resources(bus->self);
1478 else
1479 pci_assign_unassigned_bus_resources(bus);
1480 }
1481
1482
1483 pci_bus_add_devices(bus);
1484}
1485EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus);
1486
1487int pcibios_enable_device(struct pci_dev *dev, int mask)
1488{
1489 struct pci_controller *phb = pci_bus_to_host(dev->bus);
1490
1491 if (phb->controller_ops.enable_device_hook)
1492 if (!phb->controller_ops.enable_device_hook(dev))
1493 return -EINVAL;
1494
1495 return pci_enable_resources(dev, mask);
1496}
1497
1498void pcibios_disable_device(struct pci_dev *dev)
1499{
1500 struct pci_controller *phb = pci_bus_to_host(dev->bus);
1501
1502 if (phb->controller_ops.disable_device)
1503 phb->controller_ops.disable_device(dev);
1504}
1505
1506resource_size_t pcibios_io_space_offset(struct pci_controller *hose)
1507{
1508 return (unsigned long) hose->io_base_virt - _IO_BASE;
1509}
1510
1511static void pcibios_setup_phb_resources(struct pci_controller *hose,
1512 struct list_head *resources)
1513{
1514 struct resource *res;
1515 resource_size_t offset;
1516 int i;
1517
1518
1519 res = &hose->io_resource;
1520
1521 if (!res->flags) {
1522 pr_debug("PCI: I/O resource not set for host"
1523 " bridge %pOF (domain %d)\n",
1524 hose->dn, hose->global_number);
1525 } else {
1526 offset = pcibios_io_space_offset(hose);
1527
1528 pr_debug("PCI: PHB IO resource = %pR off 0x%08llx\n",
1529 res, (unsigned long long)offset);
1530 pci_add_resource_offset(resources, res, offset);
1531 }
1532
1533
1534 for (i = 0; i < 3; ++i) {
1535 res = &hose->mem_resources[i];
1536 if (!res->flags)
1537 continue;
1538
1539 offset = hose->mem_offset[i];
1540 pr_debug("PCI: PHB MEM resource %d = %pR off 0x%08llx\n", i,
1541 res, (unsigned long long)offset);
1542
1543 pci_add_resource_offset(resources, res, offset);
1544 }
1545}
1546
1547
1548
1549
1550
1551#define NULL_PCI_OP(rw, size, type) \
1552static int \
1553null_##rw##_config_##size(struct pci_dev *dev, int offset, type val) \
1554{ \
1555 return PCIBIOS_DEVICE_NOT_FOUND; \
1556}
1557
1558static int
1559null_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
1560 int len, u32 *val)
1561{
1562 return PCIBIOS_DEVICE_NOT_FOUND;
1563}
1564
1565static int
1566null_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
1567 int len, u32 val)
1568{
1569 return PCIBIOS_DEVICE_NOT_FOUND;
1570}
1571
1572static struct pci_ops null_pci_ops =
1573{
1574 .read = null_read_config,
1575 .write = null_write_config,
1576};
1577
1578
1579
1580
1581
1582static struct pci_bus *
1583fake_pci_bus(struct pci_controller *hose, int busnr)
1584{
1585 static struct pci_bus bus;
1586
1587 if (hose == NULL) {
1588 printk(KERN_ERR "Can't find hose for PCI bus %d!\n", busnr);
1589 }
1590 bus.number = busnr;
1591 bus.sysdata = hose;
1592 bus.ops = hose? hose->ops: &null_pci_ops;
1593 return &bus;
1594}
1595
1596#define EARLY_PCI_OP(rw, size, type) \
1597int early_##rw##_config_##size(struct pci_controller *hose, int bus, \
1598 int devfn, int offset, type value) \
1599{ \
1600 return pci_bus_##rw##_config_##size(fake_pci_bus(hose, bus), \
1601 devfn, offset, value); \
1602}
1603
1604EARLY_PCI_OP(read, byte, u8 *)
1605EARLY_PCI_OP(read, word, u16 *)
1606EARLY_PCI_OP(read, dword, u32 *)
1607EARLY_PCI_OP(write, byte, u8)
1608EARLY_PCI_OP(write, word, u16)
1609EARLY_PCI_OP(write, dword, u32)
1610
1611int early_find_capability(struct pci_controller *hose, int bus, int devfn,
1612 int cap)
1613{
1614 return pci_bus_find_capability(fake_pci_bus(hose, bus), devfn, cap);
1615}
1616
1617struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
1618{
1619 struct pci_controller *hose = bus->sysdata;
1620
1621 return of_node_get(hose->dn);
1622}
1623
1624
1625
1626
1627
1628void pcibios_scan_phb(struct pci_controller *hose)
1629{
1630 LIST_HEAD(resources);
1631 struct pci_bus *bus;
1632 struct device_node *node = hose->dn;
1633 int mode;
1634
1635 pr_debug("PCI: Scanning PHB %pOF\n", node);
1636
1637
1638 pcibios_setup_phb_io_space(hose);
1639
1640
1641 pcibios_setup_phb_resources(hose, &resources);
1642
1643 hose->busn.start = hose->first_busno;
1644 hose->busn.end = hose->last_busno;
1645 hose->busn.flags = IORESOURCE_BUS;
1646 pci_add_resource(&resources, &hose->busn);
1647
1648
1649 bus = pci_create_root_bus(hose->parent, hose->first_busno,
1650 hose->ops, hose, &resources);
1651 if (bus == NULL) {
1652 pr_err("Failed to create bus for PCI domain %04x\n",
1653 hose->global_number);
1654 pci_free_resource_list(&resources);
1655 return;
1656 }
1657 hose->bus = bus;
1658
1659
1660 mode = PCI_PROBE_NORMAL;
1661 if (node && hose->controller_ops.probe_mode)
1662 mode = hose->controller_ops.probe_mode(bus);
1663 pr_debug(" probe mode: %d\n", mode);
1664 if (mode == PCI_PROBE_DEVTREE)
1665 of_scan_bus(node, bus);
1666
1667 if (mode == PCI_PROBE_NORMAL) {
1668 pci_bus_update_busn_res_end(bus, 255);
1669 hose->last_busno = pci_scan_child_bus(bus);
1670 pci_bus_update_busn_res_end(bus, hose->last_busno);
1671 }
1672
1673
1674
1675
1676 if (ppc_md.pcibios_fixup_phb)
1677 ppc_md.pcibios_fixup_phb(hose);
1678
1679
1680 if (bus && !pci_has_flag(PCI_PROBE_ONLY)) {
1681 struct pci_bus *child;
1682 list_for_each_entry(child, &bus->children, node)
1683 pcie_bus_configure_settings(child);
1684 }
1685}
1686EXPORT_SYMBOL_GPL(pcibios_scan_phb);
1687
1688static void fixup_hide_host_resource_fsl(struct pci_dev *dev)
1689{
1690 int i, class = dev->class >> 8;
1691
1692 int prog_if = dev->class & 0xf;
1693
1694 if ((class == PCI_CLASS_PROCESSOR_POWERPC ||
1695 class == PCI_CLASS_BRIDGE_OTHER) &&
1696 (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) &&
1697 (prog_if == 0) &&
1698 (dev->bus->parent == NULL)) {
1699 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1700 dev->resource[i].start = 0;
1701 dev->resource[i].end = 0;
1702 dev->resource[i].flags = 0;
1703 }
1704 }
1705}
1706DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MOTOROLA, PCI_ANY_ID, fixup_hide_host_resource_fsl);
1707DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, fixup_hide_host_resource_fsl);
1708
1709
1710static int __init discover_phbs(void)
1711{
1712 if (ppc_md.discover_phbs)
1713 ppc_md.discover_phbs();
1714
1715 return 0;
1716}
1717core_initcall(discover_phbs);
1718