1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/kernel.h>
20#include <linux/pci.h>
21#include <linux/string.h>
22#include <linux/init.h>
23#include <linux/delay.h>
24#include <linux/export.h>
25#include <linux/of_address.h>
26#include <linux/of_pci.h>
27#include <linux/mm.h>
28#include <linux/shmem_fs.h>
29#include <linux/list.h>
30#include <linux/syscalls.h>
31#include <linux/irq.h>
32#include <linux/vmalloc.h>
33#include <linux/slab.h>
34#include <linux/vgaarb.h>
35#include <linux/numa.h>
36
37#include <asm/processor.h>
38#include <asm/io.h>
39#include <asm/prom.h>
40#include <asm/pci-bridge.h>
41#include <asm/byteorder.h>
42#include <asm/machdep.h>
43#include <asm/ppc-pci.h>
44#include <asm/eeh.h>
45
46#include "../../../drivers/pci/pci.h"
47
48
49static DEFINE_SPINLOCK(hose_spinlock);
50LIST_HEAD(hose_list);
51
52
53#define MAX_PHBS 0x10000
54
55
56
57
58
59static DECLARE_BITMAP(phb_bitmap, MAX_PHBS);
60
61
62resource_size_t isa_mem_base;
63EXPORT_SYMBOL(isa_mem_base);
64
65
66static const struct dma_map_ops *pci_dma_ops;
67
68void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
69{
70 pci_dma_ops = dma_ops;
71}
72
73
74
75
76
77static int get_phb_number(struct device_node *dn)
78{
79 int ret, phb_id = -1;
80 u32 prop_32;
81 u64 prop;
82
83
84
85
86
87
88 ret = of_property_read_u64(dn, "ibm,opal-phbid", &prop);
89 if (ret) {
90 ret = of_property_read_u32_index(dn, "reg", 1, &prop_32);
91 prop = prop_32;
92 }
93
94 if (!ret)
95 phb_id = (int)(prop & (MAX_PHBS - 1));
96
97
98 if ((phb_id >= 0) && !test_and_set_bit(phb_id, phb_bitmap))
99 return phb_id;
100
101
102
103
104
105 phb_id = find_first_zero_bit(phb_bitmap, MAX_PHBS);
106 BUG_ON(phb_id >= MAX_PHBS);
107 set_bit(phb_id, phb_bitmap);
108
109 return phb_id;
110}
111
112struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
113{
114 struct pci_controller *phb;
115
116 phb = zalloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL);
117 if (phb == NULL)
118 return NULL;
119 spin_lock(&hose_spinlock);
120 phb->global_number = get_phb_number(dev);
121 list_add_tail(&phb->list_node, &hose_list);
122 spin_unlock(&hose_spinlock);
123 phb->dn = dev;
124 phb->is_dynamic = slab_is_available();
125#ifdef CONFIG_PPC64
126 if (dev) {
127 int nid = of_node_to_nid(dev);
128
129 if (nid < 0 || !node_online(nid))
130 nid = NUMA_NO_NODE;
131
132 PHB_SET_NODE(phb, nid);
133 }
134#endif
135 return phb;
136}
137EXPORT_SYMBOL_GPL(pcibios_alloc_controller);
138
139void pcibios_free_controller(struct pci_controller *phb)
140{
141 spin_lock(&hose_spinlock);
142
143
144 if (phb->global_number < MAX_PHBS)
145 clear_bit(phb->global_number, phb_bitmap);
146
147 list_del(&phb->list_node);
148 spin_unlock(&hose_spinlock);
149
150 if (phb->is_dynamic)
151 kfree(phb);
152}
153EXPORT_SYMBOL_GPL(pcibios_free_controller);
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180void pcibios_free_controller_deferred(struct pci_host_bridge *bridge)
181{
182 struct pci_controller *phb = (struct pci_controller *)
183 bridge->release_data;
184
185 pr_debug("domain %d, dynamic %d\n", phb->global_number, phb->is_dynamic);
186
187 pcibios_free_controller(phb);
188}
189EXPORT_SYMBOL_GPL(pcibios_free_controller_deferred);
190
191
192
193
194
195
196
197resource_size_t pcibios_window_alignment(struct pci_bus *bus,
198 unsigned long type)
199{
200 struct pci_controller *phb = pci_bus_to_host(bus);
201
202 if (phb->controller_ops.window_alignment)
203 return phb->controller_ops.window_alignment(bus, type);
204
205
206
207
208
209
210 return 1;
211}
212
213void pcibios_setup_bridge(struct pci_bus *bus, unsigned long type)
214{
215 struct pci_controller *hose = pci_bus_to_host(bus);
216
217 if (hose->controller_ops.setup_bridge)
218 hose->controller_ops.setup_bridge(bus, type);
219}
220
221void pcibios_reset_secondary_bus(struct pci_dev *dev)
222{
223 struct pci_controller *phb = pci_bus_to_host(dev->bus);
224
225 if (phb->controller_ops.reset_secondary_bus) {
226 phb->controller_ops.reset_secondary_bus(dev);
227 return;
228 }
229
230 pci_reset_secondary_bus(dev);
231}
232
233resource_size_t pcibios_default_alignment(void)
234{
235 if (ppc_md.pcibios_default_alignment)
236 return ppc_md.pcibios_default_alignment();
237
238 return 0;
239}
240
241#ifdef CONFIG_PCI_IOV
242resource_size_t pcibios_iov_resource_alignment(struct pci_dev *pdev, int resno)
243{
244 if (ppc_md.pcibios_iov_resource_alignment)
245 return ppc_md.pcibios_iov_resource_alignment(pdev, resno);
246
247 return pci_iov_resource_size(pdev, resno);
248}
249
250int pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
251{
252 if (ppc_md.pcibios_sriov_enable)
253 return ppc_md.pcibios_sriov_enable(pdev, num_vfs);
254
255 return 0;
256}
257
258int pcibios_sriov_disable(struct pci_dev *pdev)
259{
260 if (ppc_md.pcibios_sriov_disable)
261 return ppc_md.pcibios_sriov_disable(pdev);
262
263 return 0;
264}
265
266#endif
267
268static resource_size_t pcibios_io_size(const struct pci_controller *hose)
269{
270#ifdef CONFIG_PPC64
271 return hose->pci_io_size;
272#else
273 return resource_size(&hose->io_resource);
274#endif
275}
276
277int pcibios_vaddr_is_ioport(void __iomem *address)
278{
279 int ret = 0;
280 struct pci_controller *hose;
281 resource_size_t size;
282
283 spin_lock(&hose_spinlock);
284 list_for_each_entry(hose, &hose_list, list_node) {
285 size = pcibios_io_size(hose);
286 if (address >= hose->io_base_virt &&
287 address < (hose->io_base_virt + size)) {
288 ret = 1;
289 break;
290 }
291 }
292 spin_unlock(&hose_spinlock);
293 return ret;
294}
295
296unsigned long pci_address_to_pio(phys_addr_t address)
297{
298 struct pci_controller *hose;
299 resource_size_t size;
300 unsigned long ret = ~0;
301
302 spin_lock(&hose_spinlock);
303 list_for_each_entry(hose, &hose_list, list_node) {
304 size = pcibios_io_size(hose);
305 if (address >= hose->io_base_phys &&
306 address < (hose->io_base_phys + size)) {
307 unsigned long base =
308 (unsigned long)hose->io_base_virt - _IO_BASE;
309 ret = base + (address - hose->io_base_phys);
310 break;
311 }
312 }
313 spin_unlock(&hose_spinlock);
314
315 return ret;
316}
317EXPORT_SYMBOL_GPL(pci_address_to_pio);
318
319
320
321
322int pci_domain_nr(struct pci_bus *bus)
323{
324 struct pci_controller *hose = pci_bus_to_host(bus);
325
326 return hose->global_number;
327}
328EXPORT_SYMBOL(pci_domain_nr);
329
330
331
332
333
334
335
336
337struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node)
338{
339 while(node) {
340 struct pci_controller *hose, *tmp;
341 list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
342 if (hose->dn == node)
343 return hose;
344 node = node->parent;
345 }
346 return NULL;
347}
348
349struct pci_controller *pci_find_controller_for_domain(int domain_nr)
350{
351 struct pci_controller *hose;
352
353 list_for_each_entry(hose, &hose_list, list_node)
354 if (hose->global_number == domain_nr)
355 return hose;
356
357 return NULL;
358}
359
360struct pci_intx_virq {
361 int virq;
362 struct kref kref;
363 struct list_head list_node;
364};
365
366static LIST_HEAD(intx_list);
367static DEFINE_MUTEX(intx_mutex);
368
369static void ppc_pci_intx_release(struct kref *kref)
370{
371 struct pci_intx_virq *vi = container_of(kref, struct pci_intx_virq, kref);
372
373 list_del(&vi->list_node);
374 irq_dispose_mapping(vi->virq);
375 kfree(vi);
376}
377
378static int ppc_pci_unmap_irq_line(struct notifier_block *nb,
379 unsigned long action, void *data)
380{
381 struct pci_dev *pdev = to_pci_dev(data);
382
383 if (action == BUS_NOTIFY_DEL_DEVICE) {
384 struct pci_intx_virq *vi;
385
386 mutex_lock(&intx_mutex);
387 list_for_each_entry(vi, &intx_list, list_node) {
388 if (vi->virq == pdev->irq) {
389 kref_put(&vi->kref, ppc_pci_intx_release);
390 break;
391 }
392 }
393 mutex_unlock(&intx_mutex);
394 }
395
396 return NOTIFY_DONE;
397}
398
399static struct notifier_block ppc_pci_unmap_irq_notifier = {
400 .notifier_call = ppc_pci_unmap_irq_line,
401};
402
403static int ppc_pci_register_irq_notifier(void)
404{
405 return bus_register_notifier(&pci_bus_type, &ppc_pci_unmap_irq_notifier);
406}
407arch_initcall(ppc_pci_register_irq_notifier);
408
409
410
411
412
413
414static int pci_read_irq_line(struct pci_dev *pci_dev)
415{
416 int virq;
417 struct pci_intx_virq *vi, *vitmp;
418
419
420 vi = kzalloc(sizeof(struct pci_intx_virq), GFP_KERNEL);
421 if (!vi)
422 return -1;
423
424 pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev));
425
426
427 virq = of_irq_parse_and_map_pci(pci_dev, 0, 0);
428 if (virq <= 0) {
429 u8 line, pin;
430
431
432
433
434
435
436
437
438 if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &pin))
439 goto error_exit;
440 if (pin == 0)
441 goto error_exit;
442 if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_LINE, &line) ||
443 line == 0xff || line == 0) {
444 goto error_exit;
445 }
446 pr_debug(" No map ! Using line %d (pin %d) from PCI config\n",
447 line, pin);
448
449 virq = irq_create_mapping(NULL, line);
450 if (virq)
451 irq_set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
452 }
453
454 if (!virq) {
455 pr_debug(" Failed to map !\n");
456 goto error_exit;
457 }
458
459 pr_debug(" Mapped to linux irq %d\n", virq);
460
461 pci_dev->irq = virq;
462
463 mutex_lock(&intx_mutex);
464 list_for_each_entry(vitmp, &intx_list, list_node) {
465 if (vitmp->virq == virq) {
466 kref_get(&vitmp->kref);
467 kfree(vi);
468 vi = NULL;
469 break;
470 }
471 }
472 if (vi) {
473 vi->virq = virq;
474 kref_init(&vi->kref);
475 list_add_tail(&vi->list_node, &intx_list);
476 }
477 mutex_unlock(&intx_mutex);
478
479 return 0;
480error_exit:
481 kfree(vi);
482 return -1;
483}
484
485
486
487
488
489int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma)
490{
491 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
492 resource_size_t ioaddr = pci_resource_start(pdev, bar);
493
494 if (!hose)
495 return -EINVAL;
496
497
498 ioaddr -= (unsigned long)hose->io_base_virt - _IO_BASE;
499
500 vma->vm_pgoff += (ioaddr + hose->io_base_phys) >> PAGE_SHIFT;
501 return 0;
502}
503
504
505
506
507
508
509pgprot_t pci_phys_mem_access_prot(struct file *file,
510 unsigned long pfn,
511 unsigned long size,
512 pgprot_t prot)
513{
514 struct pci_dev *pdev = NULL;
515 struct resource *found = NULL;
516 resource_size_t offset = ((resource_size_t)pfn) << PAGE_SHIFT;
517 int i;
518
519 if (page_is_ram(pfn))
520 return prot;
521
522 prot = pgprot_noncached(prot);
523 for_each_pci_dev(pdev) {
524 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
525 struct resource *rp = &pdev->resource[i];
526 int flags = rp->flags;
527
528
529 if ((flags & IORESOURCE_MEM) == 0)
530 continue;
531
532 if (offset < (rp->start & PAGE_MASK) ||
533 offset > rp->end)
534 continue;
535 found = rp;
536 break;
537 }
538 if (found)
539 break;
540 }
541 if (found) {
542 if (found->flags & IORESOURCE_PREFETCH)
543 prot = pgprot_noncached_wc(prot);
544 pci_dev_put(pdev);
545 }
546
547 pr_debug("PCI: Non-PCI map for %llx, prot: %lx\n",
548 (unsigned long long)offset, pgprot_val(prot));
549
550 return prot;
551}
552
553
554int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size)
555{
556 unsigned long offset;
557 struct pci_controller *hose = pci_bus_to_host(bus);
558 struct resource *rp = &hose->io_resource;
559 void __iomem *addr;
560
561
562
563
564
565
566 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
567 offset += port;
568
569 if (!(rp->flags & IORESOURCE_IO))
570 return -ENXIO;
571 if (offset < rp->start || (offset + size) > rp->end)
572 return -ENXIO;
573 addr = hose->io_base_virt + port;
574
575 switch(size) {
576 case 1:
577 *((u8 *)val) = in_8(addr);
578 return 1;
579 case 2:
580 if (port & 1)
581 return -EINVAL;
582 *((u16 *)val) = in_le16(addr);
583 return 2;
584 case 4:
585 if (port & 3)
586 return -EINVAL;
587 *((u32 *)val) = in_le32(addr);
588 return 4;
589 }
590 return -EINVAL;
591}
592
593
594int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size)
595{
596 unsigned long offset;
597 struct pci_controller *hose = pci_bus_to_host(bus);
598 struct resource *rp = &hose->io_resource;
599 void __iomem *addr;
600
601
602
603
604
605
606 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
607 offset += port;
608
609 if (!(rp->flags & IORESOURCE_IO))
610 return -ENXIO;
611 if (offset < rp->start || (offset + size) > rp->end)
612 return -ENXIO;
613 addr = hose->io_base_virt + port;
614
615
616
617
618
619
620 switch(size) {
621 case 1:
622 out_8(addr, val >> 24);
623 return 1;
624 case 2:
625 if (port & 1)
626 return -EINVAL;
627 out_le16(addr, val >> 16);
628 return 2;
629 case 4:
630 if (port & 3)
631 return -EINVAL;
632 out_le32(addr, val);
633 return 4;
634 }
635 return -EINVAL;
636}
637
638
639int pci_mmap_legacy_page_range(struct pci_bus *bus,
640 struct vm_area_struct *vma,
641 enum pci_mmap_state mmap_state)
642{
643 struct pci_controller *hose = pci_bus_to_host(bus);
644 resource_size_t offset =
645 ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT;
646 resource_size_t size = vma->vm_end - vma->vm_start;
647 struct resource *rp;
648
649 pr_debug("pci_mmap_legacy_page_range(%04x:%02x, %s @%llx..%llx)\n",
650 pci_domain_nr(bus), bus->number,
651 mmap_state == pci_mmap_mem ? "MEM" : "IO",
652 (unsigned long long)offset,
653 (unsigned long long)(offset + size - 1));
654
655 if (mmap_state == pci_mmap_mem) {
656
657
658
659
660
661
662
663 if ((offset + size) > hose->isa_mem_size) {
664 printk(KERN_DEBUG
665 "Process %s (pid:%d) mapped non-existing PCI legacy memory for 0%04x:%02x\n",
666 current->comm, current->pid, pci_domain_nr(bus), bus->number);
667 if (vma->vm_flags & VM_SHARED)
668 return shmem_zero_setup(vma);
669 return 0;
670 }
671 offset += hose->isa_mem_phys;
672 } else {
673 unsigned long io_offset = (unsigned long)hose->io_base_virt - _IO_BASE;
674 unsigned long roffset = offset + io_offset;
675 rp = &hose->io_resource;
676 if (!(rp->flags & IORESOURCE_IO))
677 return -ENXIO;
678 if (roffset < rp->start || (roffset + size) > rp->end)
679 return -ENXIO;
680 offset += hose->io_base_phys;
681 }
682 pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset);
683
684 vma->vm_pgoff = offset >> PAGE_SHIFT;
685 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
686 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
687 vma->vm_end - vma->vm_start,
688 vma->vm_page_prot);
689}
690
691void pci_resource_to_user(const struct pci_dev *dev, int bar,
692 const struct resource *rsrc,
693 resource_size_t *start, resource_size_t *end)
694{
695 struct pci_bus_region region;
696
697 if (rsrc->flags & IORESOURCE_IO) {
698 pcibios_resource_to_bus(dev->bus, ®ion,
699 (struct resource *) rsrc);
700 *start = region.start;
701 *end = region.end;
702 return;
703 }
704
705
706
707
708
709
710
711
712 *start = rsrc->start;
713 *end = rsrc->end;
714}
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740void pci_process_bridge_OF_ranges(struct pci_controller *hose,
741 struct device_node *dev, int primary)
742{
743 int memno = 0;
744 struct resource *res;
745 struct of_pci_range range;
746 struct of_pci_range_parser parser;
747
748 printk(KERN_INFO "PCI host bridge %pOF %s ranges:\n",
749 dev, primary ? "(primary)" : "");
750
751
752 if (of_pci_range_parser_init(&parser, dev))
753 return;
754
755
756 for_each_of_pci_range(&parser, &range) {
757
758
759
760
761
762 if (range.cpu_addr == OF_BAD_ADDR || range.size == 0)
763 continue;
764
765
766 res = NULL;
767 switch (range.flags & IORESOURCE_TYPE_BITS) {
768 case IORESOURCE_IO:
769 printk(KERN_INFO
770 " IO 0x%016llx..0x%016llx -> 0x%016llx\n",
771 range.cpu_addr, range.cpu_addr + range.size - 1,
772 range.pci_addr);
773
774
775 if (hose->pci_io_size) {
776 printk(KERN_INFO
777 " \\--> Skipped (too many) !\n");
778 continue;
779 }
780#ifdef CONFIG_PPC32
781
782 if (range.size > 0x01000000)
783 range.size = 0x01000000;
784
785
786 hose->io_base_virt = ioremap(range.cpu_addr,
787 range.size);
788
789
790 if (primary)
791 isa_io_base =
792 (unsigned long)hose->io_base_virt;
793#endif
794
795
796
797 hose->pci_io_size = range.pci_addr + range.size;
798 hose->io_base_phys = range.cpu_addr - range.pci_addr;
799
800
801 res = &hose->io_resource;
802 range.cpu_addr = range.pci_addr;
803 break;
804 case IORESOURCE_MEM:
805 printk(KERN_INFO
806 " MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n",
807 range.cpu_addr, range.cpu_addr + range.size - 1,
808 range.pci_addr,
809 (range.flags & IORESOURCE_PREFETCH) ?
810 "Prefetch" : "");
811
812
813 if (memno >= 3) {
814 printk(KERN_INFO
815 " \\--> Skipped (too many) !\n");
816 continue;
817 }
818
819 if (range.pci_addr == 0) {
820 if (primary || isa_mem_base == 0)
821 isa_mem_base = range.cpu_addr;
822 hose->isa_mem_phys = range.cpu_addr;
823 hose->isa_mem_size = range.size;
824 }
825
826
827 hose->mem_offset[memno] = range.cpu_addr -
828 range.pci_addr;
829 res = &hose->mem_resources[memno++];
830 break;
831 }
832 if (res != NULL) {
833 res->name = dev->full_name;
834 res->flags = range.flags;
835 res->start = range.cpu_addr;
836 res->end = range.cpu_addr + range.size - 1;
837 res->parent = res->child = res->sibling = NULL;
838 }
839 }
840}
841
842
843int pci_proc_domain(struct pci_bus *bus)
844{
845 struct pci_controller *hose = pci_bus_to_host(bus);
846
847 if (!pci_has_flag(PCI_ENABLE_PROC_DOMAINS))
848 return 0;
849 if (pci_has_flag(PCI_COMPAT_DOMAIN_0))
850 return hose->global_number != 0;
851 return 1;
852}
853
854int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
855{
856 if (ppc_md.pcibios_root_bridge_prepare)
857 return ppc_md.pcibios_root_bridge_prepare(bridge);
858
859 return 0;
860}
861
862
863
864
865static void pcibios_fixup_resources(struct pci_dev *dev)
866{
867 struct pci_controller *hose = pci_bus_to_host(dev->bus);
868 int i;
869
870 if (!hose) {
871 printk(KERN_ERR "No host bridge for PCI dev %s !\n",
872 pci_name(dev));
873 return;
874 }
875
876 if (dev->is_virtfn)
877 return;
878
879 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
880 struct resource *res = dev->resource + i;
881 struct pci_bus_region reg;
882 if (!res->flags)
883 continue;
884
885
886
887
888
889
890 pcibios_resource_to_bus(dev->bus, ®, res);
891 if (pci_has_flag(PCI_REASSIGN_ALL_RSRC) ||
892 (reg.start == 0 && !pci_has_flag(PCI_PROBE_ONLY))) {
893
894 if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC))
895 pr_debug("PCI:%s Resource %d %pR is unassigned\n",
896 pci_name(dev), i, res);
897 res->end -= res->start;
898 res->start = 0;
899 res->flags |= IORESOURCE_UNSET;
900 continue;
901 }
902
903 pr_debug("PCI:%s Resource %d %pR\n", pci_name(dev), i, res);
904 }
905
906
907 if (ppc_md.pcibios_fixup_resources)
908 ppc_md.pcibios_fixup_resources(dev);
909}
910DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources);
911
912
913
914
915
916
917static int pcibios_uninitialized_bridge_resource(struct pci_bus *bus,
918 struct resource *res)
919{
920 struct pci_controller *hose = pci_bus_to_host(bus);
921 struct pci_dev *dev = bus->self;
922 resource_size_t offset;
923 struct pci_bus_region region;
924 u16 command;
925 int i;
926
927
928 if (pci_has_flag(PCI_PROBE_ONLY))
929 return 0;
930
931
932 if (res->flags & IORESOURCE_MEM) {
933 pcibios_resource_to_bus(dev->bus, ®ion, res);
934
935
936 if (region.start != 0)
937 return 0;
938
939
940
941
942 pci_read_config_word(dev, PCI_COMMAND, &command);
943 if ((command & PCI_COMMAND_MEMORY) == 0)
944 return 1;
945
946
947
948
949
950 for (i = 0; i < 3; i++) {
951 if ((hose->mem_resources[i].flags & IORESOURCE_MEM) &&
952 hose->mem_resources[i].start == hose->mem_offset[i])
953 return 0;
954 }
955
956
957
958
959 return 1;
960 } else {
961
962 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
963 if (((res->start - offset) & 0xfffffffful) != 0)
964 return 0;
965
966
967
968
969
970
971 pci_read_config_word(dev, PCI_COMMAND, &command);
972 if (command & PCI_COMMAND_IO)
973 return 0;
974
975
976
977
978 return 1;
979 }
980}
981
982
983static void pcibios_fixup_bridge(struct pci_bus *bus)
984{
985 struct resource *res;
986 int i;
987
988 struct pci_dev *dev = bus->self;
989
990 pci_bus_for_each_resource(bus, res, i) {
991 if (!res || !res->flags)
992 continue;
993 if (i >= 3 && bus->self->transparent)
994 continue;
995
996
997
998
999
1000 if (pci_has_flag(PCI_REASSIGN_ALL_RSRC)) {
1001 res->flags |= IORESOURCE_UNSET;
1002 res->start = 0;
1003 res->end = -1;
1004 continue;
1005 }
1006
1007 pr_debug("PCI:%s Bus rsrc %d %pR\n", pci_name(dev), i, res);
1008
1009
1010
1011
1012 if (pcibios_uninitialized_bridge_resource(bus, res)) {
1013 res->flags = 0;
1014 pr_debug("PCI:%s (unassigned)\n", pci_name(dev));
1015 }
1016 }
1017}
1018
1019void pcibios_setup_bus_self(struct pci_bus *bus)
1020{
1021 struct pci_controller *phb;
1022
1023
1024 if (bus->self != NULL)
1025 pcibios_fixup_bridge(bus);
1026
1027
1028
1029
1030 if (ppc_md.pcibios_fixup_bus)
1031 ppc_md.pcibios_fixup_bus(bus);
1032
1033
1034 phb = pci_bus_to_host(bus);
1035 if (phb->controller_ops.dma_bus_setup)
1036 phb->controller_ops.dma_bus_setup(bus);
1037}
1038
1039void pcibios_bus_add_device(struct pci_dev *dev)
1040{
1041 struct pci_controller *phb;
1042
1043
1044
1045 set_dev_node(&dev->dev, pcibus_to_node(dev->bus));
1046
1047
1048 set_dma_ops(&dev->dev, pci_dma_ops);
1049 dev->dev.archdata.dma_offset = PCI_DRAM_OFFSET;
1050
1051
1052 phb = pci_bus_to_host(dev->bus);
1053 if (phb->controller_ops.dma_dev_setup)
1054 phb->controller_ops.dma_dev_setup(dev);
1055
1056
1057 pci_read_irq_line(dev);
1058 if (ppc_md.pci_irq_fixup)
1059 ppc_md.pci_irq_fixup(dev);
1060
1061 if (ppc_md.pcibios_bus_add_device)
1062 ppc_md.pcibios_bus_add_device(dev);
1063}
1064
1065int pcibios_add_device(struct pci_dev *dev)
1066{
1067#ifdef CONFIG_PCI_IOV
1068 if (ppc_md.pcibios_fixup_sriov)
1069 ppc_md.pcibios_fixup_sriov(dev);
1070#endif
1071
1072 return 0;
1073}
1074
1075void pcibios_set_master(struct pci_dev *dev)
1076{
1077
1078}
1079
1080void pcibios_fixup_bus(struct pci_bus *bus)
1081{
1082
1083
1084
1085
1086 pci_read_bridge_bases(bus);
1087
1088
1089 pcibios_setup_bus_self(bus);
1090}
1091EXPORT_SYMBOL(pcibios_fixup_bus);
1092
1093static int skip_isa_ioresource_align(struct pci_dev *dev)
1094{
1095 if (pci_has_flag(PCI_CAN_SKIP_ISA_ALIGN) &&
1096 !(dev->bus->bridge_ctl & PCI_BRIDGE_CTL_ISA))
1097 return 1;
1098 return 0;
1099}
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114resource_size_t pcibios_align_resource(void *data, const struct resource *res,
1115 resource_size_t size, resource_size_t align)
1116{
1117 struct pci_dev *dev = data;
1118 resource_size_t start = res->start;
1119
1120 if (res->flags & IORESOURCE_IO) {
1121 if (skip_isa_ioresource_align(dev))
1122 return start;
1123 if (start & 0x300)
1124 start = (start + 0x3ff) & ~0x3ff;
1125 }
1126
1127 return start;
1128}
1129EXPORT_SYMBOL(pcibios_align_resource);
1130
1131
1132
1133
1134
1135static int reparent_resources(struct resource *parent,
1136 struct resource *res)
1137{
1138 struct resource *p, **pp;
1139 struct resource **firstpp = NULL;
1140
1141 for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) {
1142 if (p->end < res->start)
1143 continue;
1144 if (res->end < p->start)
1145 break;
1146 if (p->start < res->start || p->end > res->end)
1147 return -1;
1148 if (firstpp == NULL)
1149 firstpp = pp;
1150 }
1151 if (firstpp == NULL)
1152 return -1;
1153 res->parent = parent;
1154 res->child = *firstpp;
1155 res->sibling = *pp;
1156 *firstpp = res;
1157 *pp = NULL;
1158 for (p = res->child; p != NULL; p = p->sibling) {
1159 p->parent = res;
1160 pr_debug("PCI: Reparented %s %pR under %s\n",
1161 p->name, p, res->name);
1162 }
1163 return 0;
1164}
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199static void pcibios_allocate_bus_resources(struct pci_bus *bus)
1200{
1201 struct pci_bus *b;
1202 int i;
1203 struct resource *res, *pr;
1204
1205 pr_debug("PCI: Allocating bus resources for %04x:%02x...\n",
1206 pci_domain_nr(bus), bus->number);
1207
1208 pci_bus_for_each_resource(bus, res, i) {
1209 if (!res || !res->flags || res->start > res->end || res->parent)
1210 continue;
1211
1212
1213 if (res->flags & IORESOURCE_UNSET)
1214 goto clear_resource;
1215
1216 if (bus->parent == NULL)
1217 pr = (res->flags & IORESOURCE_IO) ?
1218 &ioport_resource : &iomem_resource;
1219 else {
1220 pr = pci_find_parent_resource(bus->self, res);
1221 if (pr == res) {
1222
1223
1224
1225
1226 continue;
1227 }
1228 }
1229
1230 pr_debug("PCI: %s (bus %d) bridge rsrc %d: %pR, parent %p (%s)\n",
1231 bus->self ? pci_name(bus->self) : "PHB", bus->number,
1232 i, res, pr, (pr && pr->name) ? pr->name : "nil");
1233
1234 if (pr && !(pr->flags & IORESOURCE_UNSET)) {
1235 struct pci_dev *dev = bus->self;
1236
1237 if (request_resource(pr, res) == 0)
1238 continue;
1239
1240
1241
1242
1243
1244 if (reparent_resources(pr, res) == 0)
1245 continue;
1246
1247 if (dev && i < PCI_BRIDGE_RESOURCE_NUM &&
1248 pci_claim_bridge_resource(dev,
1249 i + PCI_BRIDGE_RESOURCES) == 0)
1250 continue;
1251 }
1252 pr_warn("PCI: Cannot allocate resource region %d of PCI bridge %d, will remap\n",
1253 i, bus->number);
1254 clear_resource:
1255
1256
1257
1258
1259
1260
1261 res->start = 0;
1262 res->end = -1;
1263 res->flags = 0;
1264 }
1265
1266 list_for_each_entry(b, &bus->children, node)
1267 pcibios_allocate_bus_resources(b);
1268}
1269
1270static inline void alloc_resource(struct pci_dev *dev, int idx)
1271{
1272 struct resource *pr, *r = &dev->resource[idx];
1273
1274 pr_debug("PCI: Allocating %s: Resource %d: %pR\n",
1275 pci_name(dev), idx, r);
1276
1277 pr = pci_find_parent_resource(dev, r);
1278 if (!pr || (pr->flags & IORESOURCE_UNSET) ||
1279 request_resource(pr, r) < 0) {
1280 printk(KERN_WARNING "PCI: Cannot allocate resource region %d"
1281 " of device %s, will remap\n", idx, pci_name(dev));
1282 if (pr)
1283 pr_debug("PCI: parent is %p: %pR\n", pr, pr);
1284
1285 r->flags |= IORESOURCE_UNSET;
1286 r->end -= r->start;
1287 r->start = 0;
1288 }
1289}
1290
1291static void __init pcibios_allocate_resources(int pass)
1292{
1293 struct pci_dev *dev = NULL;
1294 int idx, disabled;
1295 u16 command;
1296 struct resource *r;
1297
1298 for_each_pci_dev(dev) {
1299 pci_read_config_word(dev, PCI_COMMAND, &command);
1300 for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
1301 r = &dev->resource[idx];
1302 if (r->parent)
1303 continue;
1304 if (!r->flags || (r->flags & IORESOURCE_UNSET))
1305 continue;
1306
1307
1308
1309 if (idx == PCI_ROM_RESOURCE )
1310 disabled = 1;
1311 if (r->flags & IORESOURCE_IO)
1312 disabled = !(command & PCI_COMMAND_IO);
1313 else
1314 disabled = !(command & PCI_COMMAND_MEMORY);
1315 if (pass == disabled)
1316 alloc_resource(dev, idx);
1317 }
1318 if (pass)
1319 continue;
1320 r = &dev->resource[PCI_ROM_RESOURCE];
1321 if (r->flags) {
1322
1323
1324
1325 u32 reg;
1326 pci_read_config_dword(dev, dev->rom_base_reg, ®);
1327 if (reg & PCI_ROM_ADDRESS_ENABLE) {
1328 pr_debug("PCI: Switching off ROM of %s\n",
1329 pci_name(dev));
1330 r->flags &= ~IORESOURCE_ROM_ENABLE;
1331 pci_write_config_dword(dev, dev->rom_base_reg,
1332 reg & ~PCI_ROM_ADDRESS_ENABLE);
1333 }
1334 }
1335 }
1336}
1337
1338static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus)
1339{
1340 struct pci_controller *hose = pci_bus_to_host(bus);
1341 resource_size_t offset;
1342 struct resource *res, *pres;
1343 int i;
1344
1345 pr_debug("Reserving legacy ranges for domain %04x\n", pci_domain_nr(bus));
1346
1347
1348 if (!(hose->io_resource.flags & IORESOURCE_IO))
1349 goto no_io;
1350 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
1351 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
1352 BUG_ON(res == NULL);
1353 res->name = "Legacy IO";
1354 res->flags = IORESOURCE_IO;
1355 res->start = offset;
1356 res->end = (offset + 0xfff) & 0xfffffffful;
1357 pr_debug("Candidate legacy IO: %pR\n", res);
1358 if (request_resource(&hose->io_resource, res)) {
1359 printk(KERN_DEBUG
1360 "PCI %04x:%02x Cannot reserve Legacy IO %pR\n",
1361 pci_domain_nr(bus), bus->number, res);
1362 kfree(res);
1363 }
1364
1365 no_io:
1366
1367 for (i = 0; i < 3; i++) {
1368 pres = &hose->mem_resources[i];
1369 offset = hose->mem_offset[i];
1370 if (!(pres->flags & IORESOURCE_MEM))
1371 continue;
1372 pr_debug("hose mem res: %pR\n", pres);
1373 if ((pres->start - offset) <= 0xa0000 &&
1374 (pres->end - offset) >= 0xbffff)
1375 break;
1376 }
1377 if (i >= 3)
1378 return;
1379 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
1380 BUG_ON(res == NULL);
1381 res->name = "Legacy VGA memory";
1382 res->flags = IORESOURCE_MEM;
1383 res->start = 0xa0000 + offset;
1384 res->end = 0xbffff + offset;
1385 pr_debug("Candidate VGA memory: %pR\n", res);
1386 if (request_resource(pres, res)) {
1387 printk(KERN_DEBUG
1388 "PCI %04x:%02x Cannot reserve VGA memory %pR\n",
1389 pci_domain_nr(bus), bus->number, res);
1390 kfree(res);
1391 }
1392}
1393
1394void __init pcibios_resource_survey(void)
1395{
1396 struct pci_bus *b;
1397
1398
1399 list_for_each_entry(b, &pci_root_buses, node)
1400 pcibios_allocate_bus_resources(b);
1401 if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC)) {
1402 pcibios_allocate_resources(0);
1403 pcibios_allocate_resources(1);
1404 }
1405
1406
1407
1408
1409
1410 if (!pci_has_flag(PCI_PROBE_ONLY)) {
1411 list_for_each_entry(b, &pci_root_buses, node)
1412 pcibios_reserve_legacy_regions(b);
1413 }
1414
1415
1416
1417
1418 if (!pci_has_flag(PCI_PROBE_ONLY)) {
1419 pr_debug("PCI: Assigning unassigned resources...\n");
1420 pci_assign_unassigned_resources();
1421 }
1422}
1423
1424
1425
1426
1427
1428
1429void pcibios_claim_one_bus(struct pci_bus *bus)
1430{
1431 struct pci_dev *dev;
1432 struct pci_bus *child_bus;
1433
1434 list_for_each_entry(dev, &bus->devices, bus_list) {
1435 int i;
1436
1437 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1438 struct resource *r = &dev->resource[i];
1439
1440 if (r->parent || !r->start || !r->flags)
1441 continue;
1442
1443 pr_debug("PCI: Claiming %s: Resource %d: %pR\n",
1444 pci_name(dev), i, r);
1445
1446 if (pci_claim_resource(dev, i) == 0)
1447 continue;
1448
1449 pci_claim_bridge_resource(dev, i);
1450 }
1451 }
1452
1453 list_for_each_entry(child_bus, &bus->children, node)
1454 pcibios_claim_one_bus(child_bus);
1455}
1456EXPORT_SYMBOL_GPL(pcibios_claim_one_bus);
1457
1458
1459
1460
1461
1462
1463
1464
1465void pcibios_finish_adding_to_bus(struct pci_bus *bus)
1466{
1467 pr_debug("PCI: Finishing adding to hotplug bus %04x:%02x\n",
1468 pci_domain_nr(bus), bus->number);
1469
1470
1471 pcibios_allocate_bus_resources(bus);
1472 pcibios_claim_one_bus(bus);
1473 if (!pci_has_flag(PCI_PROBE_ONLY)) {
1474 if (bus->self)
1475 pci_assign_unassigned_bridge_resources(bus->self);
1476 else
1477 pci_assign_unassigned_bus_resources(bus);
1478 }
1479
1480
1481 pci_bus_add_devices(bus);
1482}
1483EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus);
1484
1485int pcibios_enable_device(struct pci_dev *dev, int mask)
1486{
1487 struct pci_controller *phb = pci_bus_to_host(dev->bus);
1488
1489 if (phb->controller_ops.enable_device_hook)
1490 if (!phb->controller_ops.enable_device_hook(dev))
1491 return -EINVAL;
1492
1493 return pci_enable_resources(dev, mask);
1494}
1495
1496void pcibios_disable_device(struct pci_dev *dev)
1497{
1498 struct pci_controller *phb = pci_bus_to_host(dev->bus);
1499
1500 if (phb->controller_ops.disable_device)
1501 phb->controller_ops.disable_device(dev);
1502}
1503
1504resource_size_t pcibios_io_space_offset(struct pci_controller *hose)
1505{
1506 return (unsigned long) hose->io_base_virt - _IO_BASE;
1507}
1508
1509static void pcibios_setup_phb_resources(struct pci_controller *hose,
1510 struct list_head *resources)
1511{
1512 struct resource *res;
1513 resource_size_t offset;
1514 int i;
1515
1516
1517 res = &hose->io_resource;
1518
1519 if (!res->flags) {
1520 pr_debug("PCI: I/O resource not set for host"
1521 " bridge %pOF (domain %d)\n",
1522 hose->dn, hose->global_number);
1523 } else {
1524 offset = pcibios_io_space_offset(hose);
1525
1526 pr_debug("PCI: PHB IO resource = %pR off 0x%08llx\n",
1527 res, (unsigned long long)offset);
1528 pci_add_resource_offset(resources, res, offset);
1529 }
1530
1531
1532 for (i = 0; i < 3; ++i) {
1533 res = &hose->mem_resources[i];
1534 if (!res->flags)
1535 continue;
1536
1537 offset = hose->mem_offset[i];
1538 pr_debug("PCI: PHB MEM resource %d = %pR off 0x%08llx\n", i,
1539 res, (unsigned long long)offset);
1540
1541 pci_add_resource_offset(resources, res, offset);
1542 }
1543}
1544
1545
1546
1547
1548
1549#define NULL_PCI_OP(rw, size, type) \
1550static int \
1551null_##rw##_config_##size(struct pci_dev *dev, int offset, type val) \
1552{ \
1553 return PCIBIOS_DEVICE_NOT_FOUND; \
1554}
1555
1556static int
1557null_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
1558 int len, u32 *val)
1559{
1560 return PCIBIOS_DEVICE_NOT_FOUND;
1561}
1562
1563static int
1564null_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
1565 int len, u32 val)
1566{
1567 return PCIBIOS_DEVICE_NOT_FOUND;
1568}
1569
1570static struct pci_ops null_pci_ops =
1571{
1572 .read = null_read_config,
1573 .write = null_write_config,
1574};
1575
1576
1577
1578
1579
1580static struct pci_bus *
1581fake_pci_bus(struct pci_controller *hose, int busnr)
1582{
1583 static struct pci_bus bus;
1584
1585 if (hose == NULL) {
1586 printk(KERN_ERR "Can't find hose for PCI bus %d!\n", busnr);
1587 }
1588 bus.number = busnr;
1589 bus.sysdata = hose;
1590 bus.ops = hose? hose->ops: &null_pci_ops;
1591 return &bus;
1592}
1593
1594#define EARLY_PCI_OP(rw, size, type) \
1595int early_##rw##_config_##size(struct pci_controller *hose, int bus, \
1596 int devfn, int offset, type value) \
1597{ \
1598 return pci_bus_##rw##_config_##size(fake_pci_bus(hose, bus), \
1599 devfn, offset, value); \
1600}
1601
1602EARLY_PCI_OP(read, byte, u8 *)
1603EARLY_PCI_OP(read, word, u16 *)
1604EARLY_PCI_OP(read, dword, u32 *)
1605EARLY_PCI_OP(write, byte, u8)
1606EARLY_PCI_OP(write, word, u16)
1607EARLY_PCI_OP(write, dword, u32)
1608
1609int early_find_capability(struct pci_controller *hose, int bus, int devfn,
1610 int cap)
1611{
1612 return pci_bus_find_capability(fake_pci_bus(hose, bus), devfn, cap);
1613}
1614
1615struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
1616{
1617 struct pci_controller *hose = bus->sysdata;
1618
1619 return of_node_get(hose->dn);
1620}
1621
1622
1623
1624
1625
1626void pcibios_scan_phb(struct pci_controller *hose)
1627{
1628 LIST_HEAD(resources);
1629 struct pci_bus *bus;
1630 struct device_node *node = hose->dn;
1631 int mode;
1632
1633 pr_debug("PCI: Scanning PHB %pOF\n", node);
1634
1635
1636 pcibios_setup_phb_io_space(hose);
1637
1638
1639 pcibios_setup_phb_resources(hose, &resources);
1640
1641 hose->busn.start = hose->first_busno;
1642 hose->busn.end = hose->last_busno;
1643 hose->busn.flags = IORESOURCE_BUS;
1644 pci_add_resource(&resources, &hose->busn);
1645
1646
1647 bus = pci_create_root_bus(hose->parent, hose->first_busno,
1648 hose->ops, hose, &resources);
1649 if (bus == NULL) {
1650 pr_err("Failed to create bus for PCI domain %04x\n",
1651 hose->global_number);
1652 pci_free_resource_list(&resources);
1653 return;
1654 }
1655 hose->bus = bus;
1656
1657
1658 mode = PCI_PROBE_NORMAL;
1659 if (node && hose->controller_ops.probe_mode)
1660 mode = hose->controller_ops.probe_mode(bus);
1661 pr_debug(" probe mode: %d\n", mode);
1662 if (mode == PCI_PROBE_DEVTREE)
1663 of_scan_bus(node, bus);
1664
1665 if (mode == PCI_PROBE_NORMAL) {
1666 pci_bus_update_busn_res_end(bus, 255);
1667 hose->last_busno = pci_scan_child_bus(bus);
1668 pci_bus_update_busn_res_end(bus, hose->last_busno);
1669 }
1670
1671
1672
1673
1674 if (ppc_md.pcibios_fixup_phb)
1675 ppc_md.pcibios_fixup_phb(hose);
1676
1677
1678 if (bus && !pci_has_flag(PCI_PROBE_ONLY)) {
1679 struct pci_bus *child;
1680 list_for_each_entry(child, &bus->children, node)
1681 pcie_bus_configure_settings(child);
1682 }
1683}
1684EXPORT_SYMBOL_GPL(pcibios_scan_phb);
1685
1686static void fixup_hide_host_resource_fsl(struct pci_dev *dev)
1687{
1688 int i, class = dev->class >> 8;
1689
1690 int prog_if = dev->class & 0xf;
1691
1692 if ((class == PCI_CLASS_PROCESSOR_POWERPC ||
1693 class == PCI_CLASS_BRIDGE_OTHER) &&
1694 (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) &&
1695 (prog_if == 0) &&
1696 (dev->bus->parent == NULL)) {
1697 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1698 dev->resource[i].start = 0;
1699 dev->resource[i].end = 0;
1700 dev->resource[i].flags = 0;
1701 }
1702 }
1703}
1704DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MOTOROLA, PCI_ANY_ID, fixup_hide_host_resource_fsl);
1705DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, fixup_hide_host_resource_fsl);
1706