1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/kernel.h>
20#include <linux/pci.h>
21#include <linux/string.h>
22#include <linux/init.h>
23#include <linux/bootmem.h>
24#include <linux/mm.h>
25#include <linux/list.h>
26#include <linux/syscalls.h>
27#include <linux/irq.h>
28#include <linux/vmalloc.h>
29#include <linux/slab.h>
30#include <linux/of.h>
31#include <linux/of_address.h>
32
33#include <asm/processor.h>
34#include <asm/io.h>
35#include <asm/pci-bridge.h>
36#include <asm/byteorder.h>
37
38static DEFINE_SPINLOCK(hose_spinlock);
39LIST_HEAD(hose_list);
40
41
42static int global_phb_number;
43
44
45resource_size_t isa_mem_base;
46
47
48unsigned int pci_flags;
49
50static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
51
52void set_pci_dma_ops(struct dma_map_ops *dma_ops)
53{
54 pci_dma_ops = dma_ops;
55}
56
57struct dma_map_ops *get_pci_dma_ops(void)
58{
59 return pci_dma_ops;
60}
61EXPORT_SYMBOL(get_pci_dma_ops);
62
63struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
64{
65 struct pci_controller *phb;
66
67 phb = zalloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL);
68 if (!phb)
69 return NULL;
70 spin_lock(&hose_spinlock);
71 phb->global_number = global_phb_number++;
72 list_add_tail(&phb->list_node, &hose_list);
73 spin_unlock(&hose_spinlock);
74 phb->dn = dev;
75 phb->is_dynamic = mem_init_done;
76 return phb;
77}
78
79void pcibios_free_controller(struct pci_controller *phb)
80{
81 spin_lock(&hose_spinlock);
82 list_del(&phb->list_node);
83 spin_unlock(&hose_spinlock);
84
85 if (phb->is_dynamic)
86 kfree(phb);
87}
88
89static resource_size_t pcibios_io_size(const struct pci_controller *hose)
90{
91 return hose->io_resource.end - hose->io_resource.start + 1;
92}
93
94int pcibios_vaddr_is_ioport(void __iomem *address)
95{
96 int ret = 0;
97 struct pci_controller *hose;
98 resource_size_t size;
99
100 spin_lock(&hose_spinlock);
101 list_for_each_entry(hose, &hose_list, list_node) {
102 size = pcibios_io_size(hose);
103 if (address >= hose->io_base_virt &&
104 address < (hose->io_base_virt + size)) {
105 ret = 1;
106 break;
107 }
108 }
109 spin_unlock(&hose_spinlock);
110 return ret;
111}
112
113unsigned long pci_address_to_pio(phys_addr_t address)
114{
115 struct pci_controller *hose;
116 resource_size_t size;
117 unsigned long ret = ~0;
118
119 spin_lock(&hose_spinlock);
120 list_for_each_entry(hose, &hose_list, list_node) {
121 size = pcibios_io_size(hose);
122 if (address >= hose->io_base_phys &&
123 address < (hose->io_base_phys + size)) {
124 unsigned long base =
125 (unsigned long)hose->io_base_virt - _IO_BASE;
126 ret = base + (address - hose->io_base_phys);
127 break;
128 }
129 }
130 spin_unlock(&hose_spinlock);
131
132 return ret;
133}
134EXPORT_SYMBOL_GPL(pci_address_to_pio);
135
136
137
138
139int pci_domain_nr(struct pci_bus *bus)
140{
141 struct pci_controller *hose = pci_bus_to_host(bus);
142
143 return hose->global_number;
144}
145EXPORT_SYMBOL(pci_domain_nr);
146
147
148
149
150
151
152
153
154struct pci_controller *pci_find_hose_for_OF_device(struct device_node *node)
155{
156 while (node) {
157 struct pci_controller *hose, *tmp;
158 list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
159 if (hose->dn == node)
160 return hose;
161 node = node->parent;
162 }
163 return NULL;
164}
165
166static ssize_t pci_show_devspec(struct device *dev,
167 struct device_attribute *attr, char *buf)
168{
169 struct pci_dev *pdev;
170 struct device_node *np;
171
172 pdev = to_pci_dev(dev);
173 np = pci_device_to_OF_node(pdev);
174 if (np == NULL || np->full_name == NULL)
175 return 0;
176 return sprintf(buf, "%s", np->full_name);
177}
178static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL);
179
180
181int pcibios_add_platform_entries(struct pci_dev *pdev)
182{
183 return device_create_file(&pdev->dev, &dev_attr_devspec);
184}
185
186char __devinit *pcibios_setup(char *str)
187{
188 return str;
189}
190
191
192
193
194
195
196int pci_read_irq_line(struct pci_dev *pci_dev)
197{
198 struct of_irq oirq;
199 unsigned int virq;
200
201
202
203
204
205
206
207
208
209
210 pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev));
211
212#ifdef DEBUG
213 memset(&oirq, 0xff, sizeof(oirq));
214#endif
215
216 if (of_irq_map_pci(pci_dev, &oirq)) {
217 u8 line, pin;
218
219
220
221
222
223
224
225
226 if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &pin))
227 return -1;
228 if (pin == 0)
229 return -1;
230 if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_LINE, &line) ||
231 line == 0xff || line == 0) {
232 return -1;
233 }
234 pr_debug(" No map ! Using line %d (pin %d) from PCI config\n",
235 line, pin);
236
237 virq = irq_create_mapping(NULL, line);
238 if (virq != NO_IRQ)
239 set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
240 } else {
241 pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n",
242 oirq.size, oirq.specifier[0], oirq.specifier[1],
243 oirq.controller ? oirq.controller->full_name :
244 "<default>");
245
246 virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
247 oirq.size);
248 }
249 if (virq == NO_IRQ) {
250 pr_debug(" Failed to map !\n");
251 return -1;
252 }
253
254 pr_debug(" Mapped to linux irq %d\n", virq);
255
256 pci_dev->irq = virq;
257
258 return 0;
259}
260EXPORT_SYMBOL(pci_read_irq_line);
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
280 resource_size_t *offset,
281 enum pci_mmap_state mmap_state)
282{
283 struct pci_controller *hose = pci_bus_to_host(dev->bus);
284 unsigned long io_offset = 0;
285 int i, res_bit;
286
287 if (hose == 0)
288 return NULL;
289
290
291 if (mmap_state == pci_mmap_mem) {
292#if 0
293 *offset += hose->pci_mem_offset;
294#endif
295 res_bit = IORESOURCE_MEM;
296 } else {
297 io_offset = (unsigned long)hose->io_base_virt - _IO_BASE;
298 *offset += io_offset;
299 res_bit = IORESOURCE_IO;
300 }
301
302
303
304
305
306 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
307 struct resource *rp = &dev->resource[i];
308 int flags = rp->flags;
309
310
311 if (i == PCI_ROM_RESOURCE)
312 flags |= IORESOURCE_MEM;
313
314
315 if ((flags & res_bit) == 0)
316 continue;
317
318
319 if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end)
320 continue;
321
322
323 if (mmap_state == pci_mmap_io)
324 *offset += hose->io_base_phys - io_offset;
325 return rp;
326 }
327
328 return NULL;
329}
330
331
332
333
334
335static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
336 pgprot_t protection,
337 enum pci_mmap_state mmap_state,
338 int write_combine)
339{
340 pgprot_t prot = protection;
341
342
343
344
345
346
347
348 if (mmap_state != pci_mmap_mem)
349 write_combine = 0;
350 else if (write_combine == 0) {
351 if (rp->flags & IORESOURCE_PREFETCH)
352 write_combine = 1;
353 }
354
355 return pgprot_noncached(prot);
356}
357
358
359
360
361
362
363pgprot_t pci_phys_mem_access_prot(struct file *file,
364 unsigned long pfn,
365 unsigned long size,
366 pgprot_t prot)
367{
368 struct pci_dev *pdev = NULL;
369 struct resource *found = NULL;
370 resource_size_t offset = ((resource_size_t)pfn) << PAGE_SHIFT;
371 int i;
372
373 if (page_is_ram(pfn))
374 return prot;
375
376 prot = pgprot_noncached(prot);
377 for_each_pci_dev(pdev) {
378 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
379 struct resource *rp = &pdev->resource[i];
380 int flags = rp->flags;
381
382
383 if ((flags & IORESOURCE_MEM) == 0)
384 continue;
385
386 if (offset < (rp->start & PAGE_MASK) ||
387 offset > rp->end)
388 continue;
389 found = rp;
390 break;
391 }
392 if (found)
393 break;
394 }
395 if (found) {
396 if (found->flags & IORESOURCE_PREFETCH)
397 prot = pgprot_noncached_wc(prot);
398 pci_dev_put(pdev);
399 }
400
401 pr_debug("PCI: Non-PCI map for %llx, prot: %lx\n",
402 (unsigned long long)offset, pgprot_val(prot));
403
404 return prot;
405}
406
407
408
409
410
411
412
413
414
415
416
417int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
418 enum pci_mmap_state mmap_state, int write_combine)
419{
420 resource_size_t offset =
421 ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT;
422 struct resource *rp;
423 int ret;
424
425 rp = __pci_mmap_make_offset(dev, &offset, mmap_state);
426 if (rp == NULL)
427 return -EINVAL;
428
429 vma->vm_pgoff = offset >> PAGE_SHIFT;
430 vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp,
431 vma->vm_page_prot,
432 mmap_state, write_combine);
433
434 ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
435 vma->vm_end - vma->vm_start, vma->vm_page_prot);
436
437 return ret;
438}
439
440
441int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size)
442{
443 unsigned long offset;
444 struct pci_controller *hose = pci_bus_to_host(bus);
445 struct resource *rp = &hose->io_resource;
446 void __iomem *addr;
447
448
449
450
451
452
453 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
454 offset += port;
455
456 if (!(rp->flags & IORESOURCE_IO))
457 return -ENXIO;
458 if (offset < rp->start || (offset + size) > rp->end)
459 return -ENXIO;
460 addr = hose->io_base_virt + port;
461
462 switch (size) {
463 case 1:
464 *((u8 *)val) = in_8(addr);
465 return 1;
466 case 2:
467 if (port & 1)
468 return -EINVAL;
469 *((u16 *)val) = in_le16(addr);
470 return 2;
471 case 4:
472 if (port & 3)
473 return -EINVAL;
474 *((u32 *)val) = in_le32(addr);
475 return 4;
476 }
477 return -EINVAL;
478}
479
480
481int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size)
482{
483 unsigned long offset;
484 struct pci_controller *hose = pci_bus_to_host(bus);
485 struct resource *rp = &hose->io_resource;
486 void __iomem *addr;
487
488
489
490
491
492
493 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
494 offset += port;
495
496 if (!(rp->flags & IORESOURCE_IO))
497 return -ENXIO;
498 if (offset < rp->start || (offset + size) > rp->end)
499 return -ENXIO;
500 addr = hose->io_base_virt + port;
501
502
503
504
505
506
507 switch (size) {
508 case 1:
509 out_8(addr, val >> 24);
510 return 1;
511 case 2:
512 if (port & 1)
513 return -EINVAL;
514 out_le16(addr, val >> 16);
515 return 2;
516 case 4:
517 if (port & 3)
518 return -EINVAL;
519 out_le32(addr, val);
520 return 4;
521 }
522 return -EINVAL;
523}
524
525
526int pci_mmap_legacy_page_range(struct pci_bus *bus,
527 struct vm_area_struct *vma,
528 enum pci_mmap_state mmap_state)
529{
530 struct pci_controller *hose = pci_bus_to_host(bus);
531 resource_size_t offset =
532 ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT;
533 resource_size_t size = vma->vm_end - vma->vm_start;
534 struct resource *rp;
535
536 pr_debug("pci_mmap_legacy_page_range(%04x:%02x, %s @%llx..%llx)\n",
537 pci_domain_nr(bus), bus->number,
538 mmap_state == pci_mmap_mem ? "MEM" : "IO",
539 (unsigned long long)offset,
540 (unsigned long long)(offset + size - 1));
541
542 if (mmap_state == pci_mmap_mem) {
543
544
545
546
547
548
549
550 if ((offset + size) > hose->isa_mem_size) {
551#ifdef CONFIG_MMU
552 printk(KERN_DEBUG
553 "Process %s (pid:%d) mapped non-existing PCI"
554 "legacy memory for 0%04x:%02x\n",
555 current->comm, current->pid, pci_domain_nr(bus),
556 bus->number);
557#endif
558 if (vma->vm_flags & VM_SHARED)
559 return shmem_zero_setup(vma);
560 return 0;
561 }
562 offset += hose->isa_mem_phys;
563 } else {
564 unsigned long io_offset = (unsigned long)hose->io_base_virt - \
565 _IO_BASE;
566 unsigned long roffset = offset + io_offset;
567 rp = &hose->io_resource;
568 if (!(rp->flags & IORESOURCE_IO))
569 return -ENXIO;
570 if (roffset < rp->start || (roffset + size) > rp->end)
571 return -ENXIO;
572 offset += hose->io_base_phys;
573 }
574 pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset);
575
576 vma->vm_pgoff = offset >> PAGE_SHIFT;
577 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
578 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
579 vma->vm_end - vma->vm_start,
580 vma->vm_page_prot);
581}
582
583void pci_resource_to_user(const struct pci_dev *dev, int bar,
584 const struct resource *rsrc,
585 resource_size_t *start, resource_size_t *end)
586{
587 struct pci_controller *hose = pci_bus_to_host(dev->bus);
588 resource_size_t offset = 0;
589
590 if (hose == NULL)
591 return;
592
593 if (rsrc->flags & IORESOURCE_IO)
594 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613#if 0
614 else if (rsrc->flags & IORESOURCE_MEM)
615 offset = hose->pci_mem_offset;
616#endif
617
618 *start = rsrc->start - offset;
619 *end = rsrc->end - offset;
620}
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
656 struct device_node *dev,
657 int primary)
658{
659 const u32 *ranges;
660 int rlen;
661 int pna = of_n_addr_cells(dev);
662 int np = pna + 5;
663 int memno = 0, isa_hole = -1;
664 u32 pci_space;
665 unsigned long long pci_addr, cpu_addr, pci_next, cpu_next, size;
666 unsigned long long isa_mb = 0;
667 struct resource *res;
668
669 printk(KERN_INFO "PCI host bridge %s %s ranges:\n",
670 dev->full_name, primary ? "(primary)" : "");
671
672
673 ranges = of_get_property(dev, "ranges", &rlen);
674 if (ranges == NULL)
675 return;
676
677
678 pr_debug("Parsing ranges property...\n");
679 while ((rlen -= np * 4) >= 0) {
680
681 pci_space = ranges[0];
682 pci_addr = of_read_number(ranges + 1, 2);
683 cpu_addr = of_translate_address(dev, ranges + 3);
684 size = of_read_number(ranges + pna + 3, 2);
685
686 pr_debug("pci_space: 0x%08x pci_addr:0x%016llx "
687 "cpu_addr:0x%016llx size:0x%016llx\n",
688 pci_space, pci_addr, cpu_addr, size);
689
690 ranges += np;
691
692
693
694
695
696
697 if (cpu_addr == OF_BAD_ADDR || size == 0)
698 continue;
699
700
701 for (; rlen >= np * sizeof(u32);
702 ranges += np, rlen -= np * 4) {
703 if (ranges[0] != pci_space)
704 break;
705 pci_next = of_read_number(ranges + 1, 2);
706 cpu_next = of_translate_address(dev, ranges + 3);
707 if (pci_next != pci_addr + size ||
708 cpu_next != cpu_addr + size)
709 break;
710 size += of_read_number(ranges + pna + 3, 2);
711 }
712
713
714 res = NULL;
715 switch ((pci_space >> 24) & 0x3) {
716 case 1:
717 printk(KERN_INFO
718 " IO 0x%016llx..0x%016llx -> 0x%016llx\n",
719 cpu_addr, cpu_addr + size - 1, pci_addr);
720
721
722 if (hose->pci_io_size) {
723 printk(KERN_INFO
724 " \\--> Skipped (too many) !\n");
725 continue;
726 }
727
728 if (size > 0x01000000)
729 size = 0x01000000;
730
731
732 hose->io_base_virt = ioremap(cpu_addr, size);
733
734
735 if (primary)
736 isa_io_base =
737 (unsigned long)hose->io_base_virt;
738
739
740
741 hose->pci_io_size = pci_addr + size;
742 hose->io_base_phys = cpu_addr - pci_addr;
743
744
745 res = &hose->io_resource;
746 res->flags = IORESOURCE_IO;
747 res->start = pci_addr;
748 break;
749 case 2:
750 case 3:
751 printk(KERN_INFO
752 " MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n",
753 cpu_addr, cpu_addr + size - 1, pci_addr,
754 (pci_space & 0x40000000) ? "Prefetch" : "");
755
756
757 if (memno >= 3) {
758 printk(KERN_INFO
759 " \\--> Skipped (too many) !\n");
760 continue;
761 }
762
763 if (pci_addr == 0) {
764 isa_mb = cpu_addr;
765 isa_hole = memno;
766 if (primary || isa_mem_base == 0)
767 isa_mem_base = cpu_addr;
768 hose->isa_mem_phys = cpu_addr;
769 hose->isa_mem_size = size;
770 }
771
772
773
774
775
776 if (memno == 0 ||
777 (isa_hole >= 0 && pci_addr != 0 &&
778 hose->pci_mem_offset == isa_mb))
779 hose->pci_mem_offset = cpu_addr - pci_addr;
780 else if (pci_addr != 0 &&
781 hose->pci_mem_offset != cpu_addr - pci_addr) {
782 printk(KERN_INFO
783 " \\--> Skipped (offset mismatch) !\n");
784 continue;
785 }
786
787
788 res = &hose->mem_resources[memno++];
789 res->flags = IORESOURCE_MEM;
790 if (pci_space & 0x40000000)
791 res->flags |= IORESOURCE_PREFETCH;
792 res->start = cpu_addr;
793 break;
794 }
795 if (res != NULL) {
796 res->name = dev->full_name;
797 res->end = res->start + size - 1;
798 res->parent = NULL;
799 res->sibling = NULL;
800 res->child = NULL;
801 }
802 }
803
804
805
806
807
808 if (isa_hole >= 0 && hose->pci_mem_offset != isa_mb) {
809 unsigned int next = isa_hole + 1;
810 printk(KERN_INFO " Removing ISA hole at 0x%016llx\n", isa_mb);
811 if (next < memno)
812 memmove(&hose->mem_resources[isa_hole],
813 &hose->mem_resources[next],
814 sizeof(struct resource) * (memno - next));
815 hose->mem_resources[--memno].flags = 0;
816 }
817}
818
819
820int pci_proc_domain(struct pci_bus *bus)
821{
822 struct pci_controller *hose = pci_bus_to_host(bus);
823
824 if (!(pci_flags & PCI_ENABLE_PROC_DOMAINS))
825 return 0;
826 if (pci_flags & PCI_COMPAT_DOMAIN_0)
827 return hose->global_number != 0;
828 return 1;
829}
830
831void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
832 struct resource *res)
833{
834 resource_size_t offset = 0, mask = (resource_size_t)-1;
835 struct pci_controller *hose = pci_bus_to_host(dev->bus);
836
837 if (!hose)
838 return;
839 if (res->flags & IORESOURCE_IO) {
840 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
841 mask = 0xffffffffu;
842 } else if (res->flags & IORESOURCE_MEM)
843 offset = hose->pci_mem_offset;
844
845 region->start = (res->start - offset) & mask;
846 region->end = (res->end - offset) & mask;
847}
848EXPORT_SYMBOL(pcibios_resource_to_bus);
849
850void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
851 struct pci_bus_region *region)
852{
853 resource_size_t offset = 0, mask = (resource_size_t)-1;
854 struct pci_controller *hose = pci_bus_to_host(dev->bus);
855
856 if (!hose)
857 return;
858 if (res->flags & IORESOURCE_IO) {
859 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
860 mask = 0xffffffffu;
861 } else if (res->flags & IORESOURCE_MEM)
862 offset = hose->pci_mem_offset;
863 res->start = (region->start + offset) & mask;
864 res->end = (region->end + offset) & mask;
865}
866EXPORT_SYMBOL(pcibios_bus_to_resource);
867
868
869static void __devinit fixup_resource(struct resource *res, struct pci_dev *dev)
870{
871 struct pci_controller *hose = pci_bus_to_host(dev->bus);
872 resource_size_t offset = 0, mask = (resource_size_t)-1;
873
874 if (res->flags & IORESOURCE_IO) {
875 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
876 mask = 0xffffffffu;
877 } else if (res->flags & IORESOURCE_MEM)
878 offset = hose->pci_mem_offset;
879
880 res->start = (res->start + offset) & mask;
881 res->end = (res->end + offset) & mask;
882}
883
884
885
886
887static void __devinit pcibios_fixup_resources(struct pci_dev *dev)
888{
889 struct pci_controller *hose = pci_bus_to_host(dev->bus);
890 int i;
891
892 if (!hose) {
893 printk(KERN_ERR "No host bridge for PCI dev %s !\n",
894 pci_name(dev));
895 return;
896 }
897 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
898 struct resource *res = dev->resource + i;
899 if (!res->flags)
900 continue;
901
902
903
904
905
906
907 if (res->start == 0 && !(pci_flags & PCI_PROBE_ONLY)) {
908 pr_debug("PCI:%s Resource %d %016llx-%016llx [%x]" \
909 "is unassigned\n",
910 pci_name(dev), i,
911 (unsigned long long)res->start,
912 (unsigned long long)res->end,
913 (unsigned int)res->flags);
914 res->end -= res->start;
915 res->start = 0;
916 res->flags |= IORESOURCE_UNSET;
917 continue;
918 }
919
920 pr_debug("PCI:%s Resource %d %016llx-%016llx [%x] fixup...\n",
921 pci_name(dev), i,
922 (unsigned long long)res->start,\
923 (unsigned long long)res->end,
924 (unsigned int)res->flags);
925
926 fixup_resource(res, dev);
927
928 pr_debug("PCI:%s %016llx-%016llx\n",
929 pci_name(dev),
930 (unsigned long long)res->start,
931 (unsigned long long)res->end);
932 }
933}
934DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources);
935
936
937
938
939
940
941static int __devinit pcibios_uninitialized_bridge_resource(struct pci_bus *bus,
942 struct resource *res)
943{
944 struct pci_controller *hose = pci_bus_to_host(bus);
945 struct pci_dev *dev = bus->self;
946 resource_size_t offset;
947 u16 command;
948 int i;
949
950
951 if (pci_flags & PCI_PROBE_ONLY)
952 return 0;
953
954
955 if (res->flags & IORESOURCE_MEM) {
956
957
958
959 if (res->start != hose->pci_mem_offset)
960 return 0;
961
962
963
964
965 pci_read_config_word(dev, PCI_COMMAND, &command);
966 if ((command & PCI_COMMAND_MEMORY) == 0)
967 return 1;
968
969
970
971
972
973 for (i = 0; i < 3; i++) {
974 if ((hose->mem_resources[i].flags & IORESOURCE_MEM) &&
975 hose->mem_resources[i].start == hose->pci_mem_offset)
976 return 0;
977 }
978
979
980
981
982 return 1;
983 } else {
984
985 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
986 if (((res->start - offset) & 0xfffffffful) != 0)
987 return 0;
988
989
990
991
992
993
994
995 pci_read_config_word(dev, PCI_COMMAND, &command);
996 if (command & PCI_COMMAND_IO)
997 return 0;
998
999
1000
1001
1002 return 1;
1003 }
1004}
1005
1006
1007static void __devinit pcibios_fixup_bridge(struct pci_bus *bus)
1008{
1009 struct resource *res;
1010 int i;
1011
1012 struct pci_dev *dev = bus->self;
1013
1014 pci_bus_for_each_resource(bus, res, i) {
1015 res = bus->resource[i];
1016 if (!res)
1017 continue;
1018 if (!res->flags)
1019 continue;
1020 if (i >= 3 && bus->self->transparent)
1021 continue;
1022
1023 pr_debug("PCI:%s Bus rsrc %d %016llx-%016llx [%x] fixup...\n",
1024 pci_name(dev), i,
1025 (unsigned long long)res->start,\
1026 (unsigned long long)res->end,
1027 (unsigned int)res->flags);
1028
1029
1030 fixup_resource(res, dev);
1031
1032
1033
1034
1035 if (pcibios_uninitialized_bridge_resource(bus, res)) {
1036 res->flags = 0;
1037 pr_debug("PCI:%s (unassigned)\n",
1038 pci_name(dev));
1039 } else {
1040 pr_debug("PCI:%s %016llx-%016llx\n",
1041 pci_name(dev),
1042 (unsigned long long)res->start,
1043 (unsigned long long)res->end);
1044 }
1045 }
1046}
1047
1048void __devinit pcibios_setup_bus_self(struct pci_bus *bus)
1049{
1050
1051 if (bus->self != NULL)
1052 pcibios_fixup_bridge(bus);
1053}
1054
1055void __devinit pcibios_setup_bus_devices(struct pci_bus *bus)
1056{
1057 struct pci_dev *dev;
1058
1059 pr_debug("PCI: Fixup bus devices %d (%s)\n",
1060 bus->number, bus->self ? pci_name(bus->self) : "PHB");
1061
1062 list_for_each_entry(dev, &bus->devices, bus_list) {
1063
1064 dev->dev.of_node = pci_device_to_OF_node(dev);
1065
1066
1067
1068
1069 set_dev_node(&dev->dev, pcibus_to_node(dev->bus));
1070
1071
1072 set_dma_ops(&dev->dev, pci_dma_ops);
1073 dev->dev.archdata.dma_data = (void *)PCI_DRAM_OFFSET;
1074
1075
1076 pci_read_irq_line(dev);
1077 }
1078}
1079
1080void __devinit pcibios_fixup_bus(struct pci_bus *bus)
1081{
1082
1083
1084
1085
1086 if (bus->self != NULL)
1087 pci_read_bridge_bases(bus);
1088
1089
1090 pcibios_setup_bus_self(bus);
1091
1092
1093 pcibios_setup_bus_devices(bus);
1094}
1095EXPORT_SYMBOL(pcibios_fixup_bus);
1096
1097static int skip_isa_ioresource_align(struct pci_dev *dev)
1098{
1099 if ((pci_flags & PCI_CAN_SKIP_ISA_ALIGN) &&
1100 !(dev->bus->bridge_ctl & PCI_BRIDGE_CTL_ISA))
1101 return 1;
1102 return 0;
1103}
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118resource_size_t pcibios_align_resource(void *data, const struct resource *res,
1119 resource_size_t size, resource_size_t align)
1120{
1121 struct pci_dev *dev = data;
1122 resource_size_t start = res->start;
1123
1124 if (res->flags & IORESOURCE_IO) {
1125 if (skip_isa_ioresource_align(dev))
1126 return start;
1127 if (start & 0x300)
1128 start = (start + 0x3ff) & ~0x3ff;
1129 }
1130
1131 return start;
1132}
1133EXPORT_SYMBOL(pcibios_align_resource);
1134
1135
1136
1137
1138
1139static int __init reparent_resources(struct resource *parent,
1140 struct resource *res)
1141{
1142 struct resource *p, **pp;
1143 struct resource **firstpp = NULL;
1144
1145 for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) {
1146 if (p->end < res->start)
1147 continue;
1148 if (res->end < p->start)
1149 break;
1150 if (p->start < res->start || p->end > res->end)
1151 return -1;
1152 if (firstpp == NULL)
1153 firstpp = pp;
1154 }
1155 if (firstpp == NULL)
1156 return -1;
1157 res->parent = parent;
1158 res->child = *firstpp;
1159 res->sibling = *pp;
1160 *firstpp = res;
1161 *pp = NULL;
1162 for (p = res->child; p != NULL; p = p->sibling) {
1163 p->parent = res;
1164 pr_debug("PCI: Reparented %s [%llx..%llx] under %s\n",
1165 p->name,
1166 (unsigned long long)p->start,
1167 (unsigned long long)p->end, res->name);
1168 }
1169 return 0;
1170}
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205void pcibios_allocate_bus_resources(struct pci_bus *bus)
1206{
1207 struct pci_bus *b;
1208 int i;
1209 struct resource *res, *pr;
1210
1211 pr_debug("PCI: Allocating bus resources for %04x:%02x...\n",
1212 pci_domain_nr(bus), bus->number);
1213
1214 pci_bus_for_each_resource(bus, res, i) {
1215 res = bus->resource[i];
1216 if (!res || !res->flags
1217 || res->start > res->end || res->parent)
1218 continue;
1219 if (bus->parent == NULL)
1220 pr = (res->flags & IORESOURCE_IO) ?
1221 &ioport_resource : &iomem_resource;
1222 else {
1223
1224
1225
1226
1227
1228
1229 if (pci_flags & PCI_REASSIGN_ALL_RSRC)
1230 goto clear_resource;
1231 pr = pci_find_parent_resource(bus->self, res);
1232 if (pr == res) {
1233
1234
1235
1236
1237 continue;
1238 }
1239 }
1240
1241 pr_debug("PCI: %s (bus %d) bridge rsrc %d: %016llx-%016llx "
1242 "[0x%x], parent %p (%s)\n",
1243 bus->self ? pci_name(bus->self) : "PHB",
1244 bus->number, i,
1245 (unsigned long long)res->start,
1246 (unsigned long long)res->end,
1247 (unsigned int)res->flags,
1248 pr, (pr && pr->name) ? pr->name : "nil");
1249
1250 if (pr && !(pr->flags & IORESOURCE_UNSET)) {
1251 if (request_resource(pr, res) == 0)
1252 continue;
1253
1254
1255
1256
1257
1258 if (reparent_resources(pr, res) == 0)
1259 continue;
1260 }
1261 printk(KERN_WARNING "PCI: Cannot allocate resource region "
1262 "%d of PCI bridge %d, will remap\n", i, bus->number);
1263clear_resource:
1264 res->start = res->end = 0;
1265 res->flags = 0;
1266 }
1267
1268 list_for_each_entry(b, &bus->children, node)
1269 pcibios_allocate_bus_resources(b);
1270}
1271
1272static inline void __devinit alloc_resource(struct pci_dev *dev, int idx)
1273{
1274 struct resource *pr, *r = &dev->resource[idx];
1275
1276 pr_debug("PCI: Allocating %s: Resource %d: %016llx..%016llx [%x]\n",
1277 pci_name(dev), idx,
1278 (unsigned long long)r->start,
1279 (unsigned long long)r->end,
1280 (unsigned int)r->flags);
1281
1282 pr = pci_find_parent_resource(dev, r);
1283 if (!pr || (pr->flags & IORESOURCE_UNSET) ||
1284 request_resource(pr, r) < 0) {
1285 printk(KERN_WARNING "PCI: Cannot allocate resource region %d"
1286 " of device %s, will remap\n", idx, pci_name(dev));
1287 if (pr)
1288 pr_debug("PCI: parent is %p: %016llx-%016llx [%x]\n",
1289 pr,
1290 (unsigned long long)pr->start,
1291 (unsigned long long)pr->end,
1292 (unsigned int)pr->flags);
1293
1294 r->flags |= IORESOURCE_UNSET;
1295 r->end -= r->start;
1296 r->start = 0;
1297 }
1298}
1299
1300static void __init pcibios_allocate_resources(int pass)
1301{
1302 struct pci_dev *dev = NULL;
1303 int idx, disabled;
1304 u16 command;
1305 struct resource *r;
1306
1307 for_each_pci_dev(dev) {
1308 pci_read_config_word(dev, PCI_COMMAND, &command);
1309 for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
1310 r = &dev->resource[idx];
1311 if (r->parent)
1312 continue;
1313 if (!r->flags || (r->flags & IORESOURCE_UNSET))
1314 continue;
1315
1316
1317
1318 if (idx == PCI_ROM_RESOURCE)
1319 disabled = 1;
1320 if (r->flags & IORESOURCE_IO)
1321 disabled = !(command & PCI_COMMAND_IO);
1322 else
1323 disabled = !(command & PCI_COMMAND_MEMORY);
1324 if (pass == disabled)
1325 alloc_resource(dev, idx);
1326 }
1327 if (pass)
1328 continue;
1329 r = &dev->resource[PCI_ROM_RESOURCE];
1330 if (r->flags) {
1331
1332
1333
1334 u32 reg;
1335 pci_read_config_dword(dev, dev->rom_base_reg, ®);
1336 if (reg & PCI_ROM_ADDRESS_ENABLE) {
1337 pr_debug("PCI: Switching off ROM of %s\n",
1338 pci_name(dev));
1339 r->flags &= ~IORESOURCE_ROM_ENABLE;
1340 pci_write_config_dword(dev, dev->rom_base_reg,
1341 reg & ~PCI_ROM_ADDRESS_ENABLE);
1342 }
1343 }
1344 }
1345}
1346
1347static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus)
1348{
1349 struct pci_controller *hose = pci_bus_to_host(bus);
1350 resource_size_t offset;
1351 struct resource *res, *pres;
1352 int i;
1353
1354 pr_debug("Reserving legacy ranges for domain %04x\n",
1355 pci_domain_nr(bus));
1356
1357
1358 if (!(hose->io_resource.flags & IORESOURCE_IO))
1359 goto no_io;
1360 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
1361 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
1362 BUG_ON(res == NULL);
1363 res->name = "Legacy IO";
1364 res->flags = IORESOURCE_IO;
1365 res->start = offset;
1366 res->end = (offset + 0xfff) & 0xfffffffful;
1367 pr_debug("Candidate legacy IO: %pR\n", res);
1368 if (request_resource(&hose->io_resource, res)) {
1369 printk(KERN_DEBUG
1370 "PCI %04x:%02x Cannot reserve Legacy IO %pR\n",
1371 pci_domain_nr(bus), bus->number, res);
1372 kfree(res);
1373 }
1374
1375 no_io:
1376
1377 offset = hose->pci_mem_offset;
1378 pr_debug("hose mem offset: %016llx\n", (unsigned long long)offset);
1379 for (i = 0; i < 3; i++) {
1380 pres = &hose->mem_resources[i];
1381 if (!(pres->flags & IORESOURCE_MEM))
1382 continue;
1383 pr_debug("hose mem res: %pR\n", pres);
1384 if ((pres->start - offset) <= 0xa0000 &&
1385 (pres->end - offset) >= 0xbffff)
1386 break;
1387 }
1388 if (i >= 3)
1389 return;
1390 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
1391 BUG_ON(res == NULL);
1392 res->name = "Legacy VGA memory";
1393 res->flags = IORESOURCE_MEM;
1394 res->start = 0xa0000 + offset;
1395 res->end = 0xbffff + offset;
1396 pr_debug("Candidate VGA memory: %pR\n", res);
1397 if (request_resource(pres, res)) {
1398 printk(KERN_DEBUG
1399 "PCI %04x:%02x Cannot reserve VGA memory %pR\n",
1400 pci_domain_nr(bus), bus->number, res);
1401 kfree(res);
1402 }
1403}
1404
1405void __init pcibios_resource_survey(void)
1406{
1407 struct pci_bus *b;
1408
1409
1410
1411
1412 list_for_each_entry(b, &pci_root_buses, node)
1413 pcibios_allocate_bus_resources(b);
1414
1415 if (!(pci_flags & PCI_REASSIGN_ALL_RSRC)) {
1416 pcibios_allocate_resources(0);
1417 pcibios_allocate_resources(1);
1418 }
1419
1420
1421
1422
1423
1424 if (!(pci_flags & PCI_PROBE_ONLY)) {
1425 list_for_each_entry(b, &pci_root_buses, node)
1426 pcibios_reserve_legacy_regions(b);
1427 }
1428
1429
1430
1431
1432 if (!(pci_flags & PCI_PROBE_ONLY)) {
1433 pr_debug("PCI: Assigning unassigned resources...\n");
1434 pci_assign_unassigned_resources();
1435 }
1436}
1437
1438#ifdef CONFIG_HOTPLUG
1439
1440
1441
1442
1443
1444
1445void __devinit pcibios_claim_one_bus(struct pci_bus *bus)
1446{
1447 struct pci_dev *dev;
1448 struct pci_bus *child_bus;
1449
1450 list_for_each_entry(dev, &bus->devices, bus_list) {
1451 int i;
1452
1453 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1454 struct resource *r = &dev->resource[i];
1455
1456 if (r->parent || !r->start || !r->flags)
1457 continue;
1458
1459 pr_debug("PCI: Claiming %s: "
1460 "Resource %d: %016llx..%016llx [%x]\n",
1461 pci_name(dev), i,
1462 (unsigned long long)r->start,
1463 (unsigned long long)r->end,
1464 (unsigned int)r->flags);
1465
1466 pci_claim_resource(dev, i);
1467 }
1468 }
1469
1470 list_for_each_entry(child_bus, &bus->children, node)
1471 pcibios_claim_one_bus(child_bus);
1472}
1473EXPORT_SYMBOL_GPL(pcibios_claim_one_bus);
1474
1475
1476
1477
1478
1479
1480
1481
1482void pcibios_finish_adding_to_bus(struct pci_bus *bus)
1483{
1484 pr_debug("PCI: Finishing adding to hotplug bus %04x:%02x\n",
1485 pci_domain_nr(bus), bus->number);
1486
1487
1488 pcibios_allocate_bus_resources(bus);
1489 pcibios_claim_one_bus(bus);
1490
1491
1492 pci_bus_add_devices(bus);
1493
1494
1495
1496}
1497EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus);
1498
1499#endif
1500
1501int pcibios_enable_device(struct pci_dev *dev, int mask)
1502{
1503 return pci_enable_resources(dev, mask);
1504}
1505
1506void __devinit pcibios_setup_phb_resources(struct pci_controller *hose)
1507{
1508 struct pci_bus *bus = hose->bus;
1509 struct resource *res;
1510 int i;
1511
1512
1513 bus->resource[0] = res = &hose->io_resource;
1514
1515 if (!res->flags) {
1516 printk(KERN_WARNING "PCI: I/O resource not set for host"
1517 " bridge %s (domain %d)\n",
1518 hose->dn->full_name, hose->global_number);
1519
1520 res->start = (unsigned long)hose->io_base_virt - isa_io_base;
1521 res->end = res->start + IO_SPACE_LIMIT;
1522 res->flags = IORESOURCE_IO;
1523 }
1524
1525 pr_debug("PCI: PHB IO resource = %016llx-%016llx [%lx]\n",
1526 (unsigned long long)res->start,
1527 (unsigned long long)res->end,
1528 (unsigned long)res->flags);
1529
1530
1531 for (i = 0; i < 3; ++i) {
1532 res = &hose->mem_resources[i];
1533 if (!res->flags) {
1534 if (i > 0)
1535 continue;
1536 printk(KERN_ERR "PCI: Memory resource 0 not set for "
1537 "host bridge %s (domain %d)\n",
1538 hose->dn->full_name, hose->global_number);
1539
1540
1541 res->start = hose->pci_mem_offset;
1542 res->end = (resource_size_t)-1LL;
1543 res->flags = IORESOURCE_MEM;
1544
1545 }
1546 bus->resource[i+1] = res;
1547
1548 pr_debug("PCI: PHB MEM resource %d = %016llx-%016llx [%lx]\n",
1549 i, (unsigned long long)res->start,
1550 (unsigned long long)res->end,
1551 (unsigned long)res->flags);
1552 }
1553
1554 pr_debug("PCI: PHB MEM offset = %016llx\n",
1555 (unsigned long long)hose->pci_mem_offset);
1556 pr_debug("PCI: PHB IO offset = %08lx\n",
1557 (unsigned long)hose->io_base_virt - _IO_BASE);
1558}
1559
1560
1561
1562
1563
1564#define NULL_PCI_OP(rw, size, type) \
1565static int \
1566null_##rw##_config_##size(struct pci_dev *dev, int offset, type val) \
1567{ \
1568 return PCIBIOS_DEVICE_NOT_FOUND; \
1569}
1570
1571static int
1572null_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
1573 int len, u32 *val)
1574{
1575 return PCIBIOS_DEVICE_NOT_FOUND;
1576}
1577
1578static int
1579null_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
1580 int len, u32 val)
1581{
1582 return PCIBIOS_DEVICE_NOT_FOUND;
1583}
1584
1585static struct pci_ops null_pci_ops = {
1586 .read = null_read_config,
1587 .write = null_write_config,
1588};
1589
1590
1591
1592
1593
1594static struct pci_bus *
1595fake_pci_bus(struct pci_controller *hose, int busnr)
1596{
1597 static struct pci_bus bus;
1598
1599 if (!hose)
1600 printk(KERN_ERR "Can't find hose for PCI bus %d!\n", busnr);
1601
1602 bus.number = busnr;
1603 bus.sysdata = hose;
1604 bus.ops = hose ? hose->ops : &null_pci_ops;
1605 return &bus;
1606}
1607
1608#define EARLY_PCI_OP(rw, size, type) \
1609int early_##rw##_config_##size(struct pci_controller *hose, int bus, \
1610 int devfn, int offset, type value) \
1611{ \
1612 return pci_bus_##rw##_config_##size(fake_pci_bus(hose, bus), \
1613 devfn, offset, value); \
1614}
1615
1616EARLY_PCI_OP(read, byte, u8 *)
1617EARLY_PCI_OP(read, word, u16 *)
1618EARLY_PCI_OP(read, dword, u32 *)
1619EARLY_PCI_OP(write, byte, u8)
1620EARLY_PCI_OP(write, word, u16)
1621EARLY_PCI_OP(write, dword, u32)
1622
1623int early_find_capability(struct pci_controller *hose, int bus, int devfn,
1624 int cap)
1625{
1626 return pci_bus_find_capability(fake_pci_bus(hose, bus), devfn, cap);
1627}
1628