1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/kernel.h>
16#include <linux/pci.h>
17#include <linux/string.h>
18#include <linux/init.h>
19#include <linux/memblock.h>
20#include <linux/mm.h>
21#include <linux/shmem_fs.h>
22#include <linux/list.h>
23#include <linux/syscalls.h>
24#include <linux/irq.h>
25#include <linux/vmalloc.h>
26#include <linux/slab.h>
27#include <linux/of.h>
28#include <linux/of_address.h>
29#include <linux/of_irq.h>
30#include <linux/of_pci.h>
31#include <linux/export.h>
32
33#include <asm/processor.h>
34#include <linux/io.h>
35#include <asm/pci-bridge.h>
36#include <asm/byteorder.h>
37
38static DEFINE_SPINLOCK(hose_spinlock);
39LIST_HEAD(hose_list);
40
41
42static int global_phb_number;
43
44
45resource_size_t isa_mem_base;
46
47unsigned long isa_io_base;
48EXPORT_SYMBOL(isa_io_base);
49
50static int pci_bus_count;
51
52struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
53{
54 struct pci_controller *phb;
55
56 phb = zalloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL);
57 if (!phb)
58 return NULL;
59 spin_lock(&hose_spinlock);
60 phb->global_number = global_phb_number++;
61 list_add_tail(&phb->list_node, &hose_list);
62 spin_unlock(&hose_spinlock);
63 phb->dn = dev;
64 phb->is_dynamic = mem_init_done;
65 return phb;
66}
67
68void pcibios_free_controller(struct pci_controller *phb)
69{
70 spin_lock(&hose_spinlock);
71 list_del(&phb->list_node);
72 spin_unlock(&hose_spinlock);
73
74 if (phb->is_dynamic)
75 kfree(phb);
76}
77
78static resource_size_t pcibios_io_size(const struct pci_controller *hose)
79{
80 return resource_size(&hose->io_resource);
81}
82
83int pcibios_vaddr_is_ioport(void __iomem *address)
84{
85 int ret = 0;
86 struct pci_controller *hose;
87 resource_size_t size;
88
89 spin_lock(&hose_spinlock);
90 list_for_each_entry(hose, &hose_list, list_node) {
91 size = pcibios_io_size(hose);
92 if (address >= hose->io_base_virt &&
93 address < (hose->io_base_virt + size)) {
94 ret = 1;
95 break;
96 }
97 }
98 spin_unlock(&hose_spinlock);
99 return ret;
100}
101
102unsigned long pci_address_to_pio(phys_addr_t address)
103{
104 struct pci_controller *hose;
105 resource_size_t size;
106 unsigned long ret = ~0;
107
108 spin_lock(&hose_spinlock);
109 list_for_each_entry(hose, &hose_list, list_node) {
110 size = pcibios_io_size(hose);
111 if (address >= hose->io_base_phys &&
112 address < (hose->io_base_phys + size)) {
113 unsigned long base =
114 (unsigned long)hose->io_base_virt - _IO_BASE;
115 ret = base + (address - hose->io_base_phys);
116 break;
117 }
118 }
119 spin_unlock(&hose_spinlock);
120
121 return ret;
122}
123EXPORT_SYMBOL_GPL(pci_address_to_pio);
124
125
126
127
128
129
130
131
132struct pci_controller *pci_find_hose_for_OF_device(struct device_node *node)
133{
134 while (node) {
135 struct pci_controller *hose, *tmp;
136 list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
137 if (hose->dn == node)
138 return hose;
139 node = node->parent;
140 }
141 return NULL;
142}
143
144void __weak pcibios_set_master(struct pci_dev *dev)
145{
146
147}
148
149
150
151
152
153int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma)
154{
155 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
156 resource_size_t ioaddr = pci_resource_start(pdev, bar);
157
158 if (!hose)
159 return -EINVAL;
160
161
162 ioaddr -= (unsigned long)hose->io_base_virt - _IO_BASE;
163
164 vma->vm_pgoff += (ioaddr + hose->io_base_phys) >> PAGE_SHIFT;
165 return 0;
166}
167
168
169
170
171
172
173pgprot_t pci_phys_mem_access_prot(struct file *file,
174 unsigned long pfn,
175 unsigned long size,
176 pgprot_t prot)
177{
178 struct pci_dev *pdev = NULL;
179 struct resource *found = NULL;
180 resource_size_t offset = ((resource_size_t)pfn) << PAGE_SHIFT;
181 int i;
182
183 if (page_is_ram(pfn))
184 return prot;
185
186 prot = pgprot_noncached(prot);
187 for_each_pci_dev(pdev) {
188 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
189 struct resource *rp = &pdev->resource[i];
190 int flags = rp->flags;
191
192
193 if ((flags & IORESOURCE_MEM) == 0)
194 continue;
195
196 if (offset < (rp->start & PAGE_MASK) ||
197 offset > rp->end)
198 continue;
199 found = rp;
200 break;
201 }
202 if (found)
203 break;
204 }
205 if (found) {
206 if (found->flags & IORESOURCE_PREFETCH)
207 prot = pgprot_noncached_wc(prot);
208 pci_dev_put(pdev);
209 }
210
211 pr_debug("PCI: Non-PCI map for %llx, prot: %lx\n",
212 (unsigned long long)offset, pgprot_val(prot));
213
214 return prot;
215}
216
217
218int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size)
219{
220 unsigned long offset;
221 struct pci_controller *hose = pci_bus_to_host(bus);
222 struct resource *rp = &hose->io_resource;
223 void __iomem *addr;
224
225
226
227
228
229
230 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
231 offset += port;
232
233 if (!(rp->flags & IORESOURCE_IO))
234 return -ENXIO;
235 if (offset < rp->start || (offset + size) > rp->end)
236 return -ENXIO;
237 addr = hose->io_base_virt + port;
238
239 switch (size) {
240 case 1:
241 *((u8 *)val) = in_8(addr);
242 return 1;
243 case 2:
244 if (port & 1)
245 return -EINVAL;
246 *((u16 *)val) = in_le16(addr);
247 return 2;
248 case 4:
249 if (port & 3)
250 return -EINVAL;
251 *((u32 *)val) = in_le32(addr);
252 return 4;
253 }
254 return -EINVAL;
255}
256
257
258int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size)
259{
260 unsigned long offset;
261 struct pci_controller *hose = pci_bus_to_host(bus);
262 struct resource *rp = &hose->io_resource;
263 void __iomem *addr;
264
265
266
267
268
269
270 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
271 offset += port;
272
273 if (!(rp->flags & IORESOURCE_IO))
274 return -ENXIO;
275 if (offset < rp->start || (offset + size) > rp->end)
276 return -ENXIO;
277 addr = hose->io_base_virt + port;
278
279
280
281
282
283
284 switch (size) {
285 case 1:
286 out_8(addr, val >> 24);
287 return 1;
288 case 2:
289 if (port & 1)
290 return -EINVAL;
291 out_le16(addr, val >> 16);
292 return 2;
293 case 4:
294 if (port & 3)
295 return -EINVAL;
296 out_le32(addr, val);
297 return 4;
298 }
299 return -EINVAL;
300}
301
302
303int pci_mmap_legacy_page_range(struct pci_bus *bus,
304 struct vm_area_struct *vma,
305 enum pci_mmap_state mmap_state)
306{
307 struct pci_controller *hose = pci_bus_to_host(bus);
308 resource_size_t offset =
309 ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT;
310 resource_size_t size = vma->vm_end - vma->vm_start;
311 struct resource *rp;
312
313 pr_debug("pci_mmap_legacy_page_range(%04x:%02x, %s @%llx..%llx)\n",
314 pci_domain_nr(bus), bus->number,
315 mmap_state == pci_mmap_mem ? "MEM" : "IO",
316 (unsigned long long)offset,
317 (unsigned long long)(offset + size - 1));
318
319 if (mmap_state == pci_mmap_mem) {
320
321
322
323
324
325
326
327 if ((offset + size) > hose->isa_mem_size) {
328 pr_debug("Process %s (pid:%d) mapped non-existing PCI",
329 current->comm, current->pid);
330 pr_debug("legacy memory for 0%04x:%02x\n",
331 pci_domain_nr(bus), bus->number);
332 if (vma->vm_flags & VM_SHARED)
333 return shmem_zero_setup(vma);
334 return 0;
335 }
336 offset += hose->isa_mem_phys;
337 } else {
338 unsigned long io_offset = (unsigned long)hose->io_base_virt -
339 _IO_BASE;
340 unsigned long roffset = offset + io_offset;
341 rp = &hose->io_resource;
342 if (!(rp->flags & IORESOURCE_IO))
343 return -ENXIO;
344 if (roffset < rp->start || (roffset + size) > rp->end)
345 return -ENXIO;
346 offset += hose->io_base_phys;
347 }
348 pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset);
349
350 vma->vm_pgoff = offset >> PAGE_SHIFT;
351 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
352 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
353 vma->vm_end - vma->vm_start,
354 vma->vm_page_prot);
355}
356
357void pci_resource_to_user(const struct pci_dev *dev, int bar,
358 const struct resource *rsrc,
359 resource_size_t *start, resource_size_t *end)
360{
361 struct pci_bus_region region;
362
363 if (rsrc->flags & IORESOURCE_IO) {
364 pcibios_resource_to_bus(dev->bus, ®ion,
365 (struct resource *) rsrc);
366 *start = region.start;
367 *end = region.end;
368 return;
369 }
370
371
372
373
374
375
376
377
378 *start = rsrc->start;
379 *end = rsrc->end;
380}
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415void pci_process_bridge_OF_ranges(struct pci_controller *hose,
416 struct device_node *dev, int primary)
417{
418 int memno = 0, isa_hole = -1;
419 unsigned long long isa_mb = 0;
420 struct resource *res;
421 struct of_pci_range range;
422 struct of_pci_range_parser parser;
423
424 pr_info("PCI host bridge %pOF %s ranges:\n",
425 dev, primary ? "(primary)" : "");
426
427
428 if (of_pci_range_parser_init(&parser, dev))
429 return;
430
431 pr_debug("Parsing ranges property...\n");
432 for_each_of_pci_range(&parser, &range) {
433
434
435
436
437
438
439
440 if (range.cpu_addr == OF_BAD_ADDR || range.size == 0)
441 continue;
442
443
444 res = NULL;
445 switch (range.flags & IORESOURCE_TYPE_BITS) {
446 case IORESOURCE_IO:
447 pr_info(" IO 0x%016llx..0x%016llx -> 0x%016llx\n",
448 range.cpu_addr, range.cpu_addr + range.size - 1,
449 range.pci_addr);
450
451
452 if (hose->pci_io_size) {
453 pr_info(" \\--> Skipped (too many) !\n");
454 continue;
455 }
456
457 if (range.size > 0x01000000)
458 range.size = 0x01000000;
459
460
461 hose->io_base_virt = ioremap(range.cpu_addr,
462 range.size);
463
464
465 if (primary)
466 isa_io_base =
467 (unsigned long)hose->io_base_virt;
468
469
470
471 hose->pci_io_size = range.pci_addr + range.size;
472 hose->io_base_phys = range.cpu_addr - range.pci_addr;
473
474
475 res = &hose->io_resource;
476 range.cpu_addr = range.pci_addr;
477
478 break;
479 case IORESOURCE_MEM:
480 pr_info(" MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n",
481 range.cpu_addr, range.cpu_addr + range.size - 1,
482 range.pci_addr,
483 (range.flags & IORESOURCE_PREFETCH) ?
484 "Prefetch" : "");
485
486
487 if (memno >= 3) {
488 pr_info(" \\--> Skipped (too many) !\n");
489 continue;
490 }
491
492 if (range.pci_addr == 0) {
493 isa_mb = range.cpu_addr;
494 isa_hole = memno;
495 if (primary || isa_mem_base == 0)
496 isa_mem_base = range.cpu_addr;
497 hose->isa_mem_phys = range.cpu_addr;
498 hose->isa_mem_size = range.size;
499 }
500
501
502
503
504
505 if (memno == 0 ||
506 (isa_hole >= 0 && range.pci_addr != 0 &&
507 hose->pci_mem_offset == isa_mb))
508 hose->pci_mem_offset = range.cpu_addr -
509 range.pci_addr;
510 else if (range.pci_addr != 0 &&
511 hose->pci_mem_offset != range.cpu_addr -
512 range.pci_addr) {
513 pr_info(" \\--> Skipped (offset mismatch) !\n");
514 continue;
515 }
516
517
518 res = &hose->mem_resources[memno++];
519 break;
520 }
521 if (res != NULL) {
522 res->name = dev->full_name;
523 res->flags = range.flags;
524 res->start = range.cpu_addr;
525 res->end = range.cpu_addr + range.size - 1;
526 res->parent = res->child = res->sibling = NULL;
527 }
528 }
529
530
531
532
533
534 if (isa_hole >= 0 && hose->pci_mem_offset != isa_mb) {
535 unsigned int next = isa_hole + 1;
536 pr_info(" Removing ISA hole at 0x%016llx\n", isa_mb);
537 if (next < memno)
538 memmove(&hose->mem_resources[isa_hole],
539 &hose->mem_resources[next],
540 sizeof(struct resource) * (memno - next));
541 hose->mem_resources[--memno].flags = 0;
542 }
543}
544
545
546int pci_proc_domain(struct pci_bus *bus)
547{
548 return pci_domain_nr(bus);
549}
550
551
552
553
554static void pcibios_fixup_resources(struct pci_dev *dev)
555{
556
557}
558DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources);
559
560int pcibios_add_device(struct pci_dev *dev)
561{
562 dev->irq = of_irq_parse_and_map_pci(dev, 0, 0);
563
564 return 0;
565}
566EXPORT_SYMBOL(pcibios_add_device);
567
568
569
570
571
572static int __init reparent_resources(struct resource *parent,
573 struct resource *res)
574{
575 struct resource *p, **pp;
576 struct resource **firstpp = NULL;
577
578 for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) {
579 if (p->end < res->start)
580 continue;
581 if (res->end < p->start)
582 break;
583 if (p->start < res->start || p->end > res->end)
584 return -1;
585 if (firstpp == NULL)
586 firstpp = pp;
587 }
588 if (firstpp == NULL)
589 return -1;
590 res->parent = parent;
591 res->child = *firstpp;
592 res->sibling = *pp;
593 *firstpp = res;
594 *pp = NULL;
595 for (p = res->child; p != NULL; p = p->sibling) {
596 p->parent = res;
597 pr_debug("PCI: Reparented %s [%llx..%llx] under %s\n",
598 p->name,
599 (unsigned long long)p->start,
600 (unsigned long long)p->end, res->name);
601 }
602 return 0;
603}
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638static void pcibios_allocate_bus_resources(struct pci_bus *bus)
639{
640 struct pci_bus *b;
641 int i;
642 struct resource *res, *pr;
643
644 pr_debug("PCI: Allocating bus resources for %04x:%02x...\n",
645 pci_domain_nr(bus), bus->number);
646
647 pci_bus_for_each_resource(bus, res, i) {
648 if (!res || !res->flags
649 || res->start > res->end || res->parent)
650 continue;
651 if (bus->parent == NULL)
652 pr = (res->flags & IORESOURCE_IO) ?
653 &ioport_resource : &iomem_resource;
654 else {
655
656
657
658
659
660
661 pr = pci_find_parent_resource(bus->self, res);
662 if (pr == res) {
663
664
665
666
667 continue;
668 }
669 }
670
671 pr_debug("PCI: %s (bus %d) bridge rsrc %d: %016llx-%016llx ",
672 bus->self ? pci_name(bus->self) : "PHB",
673 bus->number, i,
674 (unsigned long long)res->start,
675 (unsigned long long)res->end);
676 pr_debug("[0x%x], parent %p (%s)\n",
677 (unsigned int)res->flags,
678 pr, (pr && pr->name) ? pr->name : "nil");
679
680 if (pr && !(pr->flags & IORESOURCE_UNSET)) {
681 struct pci_dev *dev = bus->self;
682
683 if (request_resource(pr, res) == 0)
684 continue;
685
686
687
688
689
690 if (reparent_resources(pr, res) == 0)
691 continue;
692
693 if (dev && i < PCI_BRIDGE_RESOURCE_NUM &&
694 pci_claim_bridge_resource(dev,
695 i + PCI_BRIDGE_RESOURCES) == 0)
696 continue;
697
698 }
699 pr_warn("PCI: Cannot allocate resource region ");
700 pr_cont("%d of PCI bridge %d, will remap\n", i, bus->number);
701 res->start = res->end = 0;
702 res->flags = 0;
703 }
704
705 list_for_each_entry(b, &bus->children, node)
706 pcibios_allocate_bus_resources(b);
707}
708
709static inline void alloc_resource(struct pci_dev *dev, int idx)
710{
711 struct resource *pr, *r = &dev->resource[idx];
712
713 pr_debug("PCI: Allocating %s: Resource %d: %016llx..%016llx [%x]\n",
714 pci_name(dev), idx,
715 (unsigned long long)r->start,
716 (unsigned long long)r->end,
717 (unsigned int)r->flags);
718
719 pr = pci_find_parent_resource(dev, r);
720 if (!pr || (pr->flags & IORESOURCE_UNSET) ||
721 request_resource(pr, r) < 0) {
722 pr_warn("PCI: Cannot allocate resource region %d ", idx);
723 pr_cont("of device %s, will remap\n", pci_name(dev));
724 if (pr)
725 pr_debug("PCI: parent is %p: %016llx-%016llx [%x]\n",
726 pr,
727 (unsigned long long)pr->start,
728 (unsigned long long)pr->end,
729 (unsigned int)pr->flags);
730
731 r->flags |= IORESOURCE_UNSET;
732 r->end -= r->start;
733 r->start = 0;
734 }
735}
736
737static void __init pcibios_allocate_resources(int pass)
738{
739 struct pci_dev *dev = NULL;
740 int idx, disabled;
741 u16 command;
742 struct resource *r;
743
744 for_each_pci_dev(dev) {
745 pci_read_config_word(dev, PCI_COMMAND, &command);
746 for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
747 r = &dev->resource[idx];
748 if (r->parent)
749 continue;
750 if (!r->flags || (r->flags & IORESOURCE_UNSET))
751 continue;
752
753
754
755 if (idx == PCI_ROM_RESOURCE)
756 disabled = 1;
757 if (r->flags & IORESOURCE_IO)
758 disabled = !(command & PCI_COMMAND_IO);
759 else
760 disabled = !(command & PCI_COMMAND_MEMORY);
761 if (pass == disabled)
762 alloc_resource(dev, idx);
763 }
764 if (pass)
765 continue;
766 r = &dev->resource[PCI_ROM_RESOURCE];
767 if (r->flags) {
768
769
770
771 u32 reg;
772 pci_read_config_dword(dev, dev->rom_base_reg, ®);
773 if (reg & PCI_ROM_ADDRESS_ENABLE) {
774 pr_debug("PCI: Switching off ROM of %s\n",
775 pci_name(dev));
776 r->flags &= ~IORESOURCE_ROM_ENABLE;
777 pci_write_config_dword(dev, dev->rom_base_reg,
778 reg & ~PCI_ROM_ADDRESS_ENABLE);
779 }
780 }
781 }
782}
783
784static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus)
785{
786 struct pci_controller *hose = pci_bus_to_host(bus);
787 resource_size_t offset;
788 struct resource *res, *pres;
789 int i;
790
791 pr_debug("Reserving legacy ranges for domain %04x\n",
792 pci_domain_nr(bus));
793
794
795 if (!(hose->io_resource.flags & IORESOURCE_IO))
796 goto no_io;
797 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
798 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
799 BUG_ON(res == NULL);
800 res->name = "Legacy IO";
801 res->flags = IORESOURCE_IO;
802 res->start = offset;
803 res->end = (offset + 0xfff) & 0xfffffffful;
804 pr_debug("Candidate legacy IO: %pR\n", res);
805 if (request_resource(&hose->io_resource, res)) {
806 pr_debug("PCI %04x:%02x Cannot reserve Legacy IO %pR\n",
807 pci_domain_nr(bus), bus->number, res);
808 kfree(res);
809 }
810
811 no_io:
812
813 offset = hose->pci_mem_offset;
814 pr_debug("hose mem offset: %016llx\n", (unsigned long long)offset);
815 for (i = 0; i < 3; i++) {
816 pres = &hose->mem_resources[i];
817 if (!(pres->flags & IORESOURCE_MEM))
818 continue;
819 pr_debug("hose mem res: %pR\n", pres);
820 if ((pres->start - offset) <= 0xa0000 &&
821 (pres->end - offset) >= 0xbffff)
822 break;
823 }
824 if (i >= 3)
825 return;
826 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
827 BUG_ON(res == NULL);
828 res->name = "Legacy VGA memory";
829 res->flags = IORESOURCE_MEM;
830 res->start = 0xa0000 + offset;
831 res->end = 0xbffff + offset;
832 pr_debug("Candidate VGA memory: %pR\n", res);
833 if (request_resource(pres, res)) {
834 pr_debug("PCI %04x:%02x Cannot reserve VGA memory %pR\n",
835 pci_domain_nr(bus), bus->number, res);
836 kfree(res);
837 }
838}
839
840void __init pcibios_resource_survey(void)
841{
842 struct pci_bus *b;
843
844
845
846
847 list_for_each_entry(b, &pci_root_buses, node)
848 pcibios_allocate_bus_resources(b);
849
850 pcibios_allocate_resources(0);
851 pcibios_allocate_resources(1);
852
853
854
855
856
857 list_for_each_entry(b, &pci_root_buses, node)
858 pcibios_reserve_legacy_regions(b);
859
860
861 pr_debug("PCI: Assigning unassigned resources...\n");
862 pci_assign_unassigned_resources();
863}
864
865static void pcibios_setup_phb_resources(struct pci_controller *hose,
866 struct list_head *resources)
867{
868 unsigned long io_offset;
869 struct resource *res;
870 int i;
871
872
873 res = &hose->io_resource;
874
875
876 io_offset = (unsigned long)hose->io_base_virt - isa_io_base;
877 res->start = (res->start + io_offset) & 0xffffffffu;
878 res->end = (res->end + io_offset) & 0xffffffffu;
879
880 if (!res->flags) {
881 pr_warn("PCI: I/O resource not set for host ");
882 pr_cont("bridge %pOF (domain %d)\n",
883 hose->dn, hose->global_number);
884
885 res->start = (unsigned long)hose->io_base_virt - isa_io_base;
886 res->end = res->start + IO_SPACE_LIMIT;
887 res->flags = IORESOURCE_IO;
888 }
889 pci_add_resource_offset(resources, res,
890 (__force resource_size_t)(hose->io_base_virt - _IO_BASE));
891
892 pr_debug("PCI: PHB IO resource = %016llx-%016llx [%lx]\n",
893 (unsigned long long)res->start,
894 (unsigned long long)res->end,
895 (unsigned long)res->flags);
896
897
898 for (i = 0; i < 3; ++i) {
899 res = &hose->mem_resources[i];
900 if (!res->flags) {
901 if (i > 0)
902 continue;
903 pr_err("PCI: Memory resource 0 not set for ");
904 pr_cont("host bridge %pOF (domain %d)\n",
905 hose->dn, hose->global_number);
906
907
908 res->start = hose->pci_mem_offset;
909 res->end = (resource_size_t)-1LL;
910 res->flags = IORESOURCE_MEM;
911
912 }
913 pci_add_resource_offset(resources, res, hose->pci_mem_offset);
914
915 pr_debug("PCI: PHB MEM resource %d = %016llx-%016llx [%lx]\n",
916 i, (unsigned long long)res->start,
917 (unsigned long long)res->end,
918 (unsigned long)res->flags);
919 }
920
921 pr_debug("PCI: PHB MEM offset = %016llx\n",
922 (unsigned long long)hose->pci_mem_offset);
923 pr_debug("PCI: PHB IO offset = %08lx\n",
924 (unsigned long)hose->io_base_virt - _IO_BASE);
925}
926
927static void pcibios_scan_phb(struct pci_controller *hose)
928{
929 LIST_HEAD(resources);
930 struct pci_bus *bus;
931 struct device_node *node = hose->dn;
932
933 pr_debug("PCI: Scanning PHB %pOF\n", node);
934
935 pcibios_setup_phb_resources(hose, &resources);
936
937 bus = pci_scan_root_bus(hose->parent, hose->first_busno,
938 hose->ops, hose, &resources);
939 if (bus == NULL) {
940 pr_err("Failed to create bus for PCI domain %04x\n",
941 hose->global_number);
942 pci_free_resource_list(&resources);
943 return;
944 }
945 bus->busn_res.start = hose->first_busno;
946 hose->bus = bus;
947
948 hose->last_busno = bus->busn_res.end;
949}
950
951static int __init pcibios_init(void)
952{
953 struct pci_controller *hose, *tmp;
954 int next_busno = 0;
955
956 pr_info("PCI: Probing PCI hardware\n");
957
958
959 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
960 hose->last_busno = 0xff;
961 pcibios_scan_phb(hose);
962 if (next_busno <= hose->last_busno)
963 next_busno = hose->last_busno + 1;
964 }
965 pci_bus_count = next_busno;
966
967
968 pcibios_resource_survey();
969 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
970 if (hose->bus)
971 pci_bus_add_devices(hose->bus);
972 }
973
974 return 0;
975}
976
977subsys_initcall(pcibios_init);
978
979static struct pci_controller *pci_bus_to_hose(int bus)
980{
981 struct pci_controller *hose, *tmp;
982
983 list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
984 if (bus >= hose->first_busno && bus <= hose->last_busno)
985 return hose;
986 return NULL;
987}
988
989
990
991
992
993
994
995long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn)
996{
997 struct pci_controller *hose;
998 long result = -EOPNOTSUPP;
999
1000 hose = pci_bus_to_hose(bus);
1001 if (!hose)
1002 return -ENODEV;
1003
1004 switch (which) {
1005 case IOBASE_BRIDGE_NUMBER:
1006 return (long)hose->first_busno;
1007 case IOBASE_MEMORY:
1008 return (long)hose->pci_mem_offset;
1009 case IOBASE_IO:
1010 return (long)hose->io_base_phys;
1011 case IOBASE_ISA_IO:
1012 return (long)isa_io_base;
1013 case IOBASE_ISA_MEM:
1014 return (long)isa_mem_base;
1015 }
1016
1017 return result;
1018}
1019
1020
1021
1022
1023
1024#define NULL_PCI_OP(rw, size, type) \
1025static int \
1026null_##rw##_config_##size(struct pci_dev *dev, int offset, type val) \
1027{ \
1028 return PCIBIOS_DEVICE_NOT_FOUND; \
1029}
1030
1031static int
1032null_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
1033 int len, u32 *val)
1034{
1035 return PCIBIOS_DEVICE_NOT_FOUND;
1036}
1037
1038static int
1039null_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
1040 int len, u32 val)
1041{
1042 return PCIBIOS_DEVICE_NOT_FOUND;
1043}
1044
1045static struct pci_ops null_pci_ops = {
1046 .read = null_read_config,
1047 .write = null_write_config,
1048};
1049
1050
1051
1052
1053
1054static struct pci_bus *
1055fake_pci_bus(struct pci_controller *hose, int busnr)
1056{
1057 static struct pci_bus bus;
1058
1059 if (!hose)
1060 pr_err("Can't find hose for PCI bus %d!\n", busnr);
1061
1062 bus.number = busnr;
1063 bus.sysdata = hose;
1064 bus.ops = hose ? hose->ops : &null_pci_ops;
1065 return &bus;
1066}
1067
1068#define EARLY_PCI_OP(rw, size, type) \
1069int early_##rw##_config_##size(struct pci_controller *hose, int bus, \
1070 int devfn, int offset, type value) \
1071{ \
1072 return pci_bus_##rw##_config_##size(fake_pci_bus(hose, bus), \
1073 devfn, offset, value); \
1074}
1075
1076EARLY_PCI_OP(read, byte, u8 *)
1077EARLY_PCI_OP(read, word, u16 *)
1078EARLY_PCI_OP(read, dword, u32 *)
1079EARLY_PCI_OP(write, byte, u8)
1080EARLY_PCI_OP(write, word, u16)
1081EARLY_PCI_OP(write, dword, u32)
1082
1083int early_find_capability(struct pci_controller *hose, int bus, int devfn,
1084 int cap)
1085{
1086 return pci_bus_find_capability(fake_pci_bus(hose, bus), devfn, cap);
1087}
1088