1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/kernel.h>
20#include <linux/pci.h>
21#include <linux/string.h>
22#include <linux/init.h>
23#include <linux/bootmem.h>
24#include <linux/mm.h>
25#include <linux/shmem_fs.h>
26#include <linux/list.h>
27#include <linux/syscalls.h>
28#include <linux/irq.h>
29#include <linux/vmalloc.h>
30#include <linux/slab.h>
31#include <linux/of.h>
32#include <linux/of_address.h>
33#include <linux/of_irq.h>
34#include <linux/of_pci.h>
35#include <linux/export.h>
36
37#include <asm/processor.h>
38#include <linux/io.h>
39#include <asm/pci-bridge.h>
40#include <asm/byteorder.h>
41
42static DEFINE_SPINLOCK(hose_spinlock);
43LIST_HEAD(hose_list);
44
45
46static int global_phb_number;
47
48
49resource_size_t isa_mem_base;
50
51unsigned long isa_io_base;
52EXPORT_SYMBOL(isa_io_base);
53
54static int pci_bus_count;
55
56struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
57{
58 struct pci_controller *phb;
59
60 phb = zalloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL);
61 if (!phb)
62 return NULL;
63 spin_lock(&hose_spinlock);
64 phb->global_number = global_phb_number++;
65 list_add_tail(&phb->list_node, &hose_list);
66 spin_unlock(&hose_spinlock);
67 phb->dn = dev;
68 phb->is_dynamic = mem_init_done;
69 return phb;
70}
71
72void pcibios_free_controller(struct pci_controller *phb)
73{
74 spin_lock(&hose_spinlock);
75 list_del(&phb->list_node);
76 spin_unlock(&hose_spinlock);
77
78 if (phb->is_dynamic)
79 kfree(phb);
80}
81
82static resource_size_t pcibios_io_size(const struct pci_controller *hose)
83{
84 return resource_size(&hose->io_resource);
85}
86
87int pcibios_vaddr_is_ioport(void __iomem *address)
88{
89 int ret = 0;
90 struct pci_controller *hose;
91 resource_size_t size;
92
93 spin_lock(&hose_spinlock);
94 list_for_each_entry(hose, &hose_list, list_node) {
95 size = pcibios_io_size(hose);
96 if (address >= hose->io_base_virt &&
97 address < (hose->io_base_virt + size)) {
98 ret = 1;
99 break;
100 }
101 }
102 spin_unlock(&hose_spinlock);
103 return ret;
104}
105
106unsigned long pci_address_to_pio(phys_addr_t address)
107{
108 struct pci_controller *hose;
109 resource_size_t size;
110 unsigned long ret = ~0;
111
112 spin_lock(&hose_spinlock);
113 list_for_each_entry(hose, &hose_list, list_node) {
114 size = pcibios_io_size(hose);
115 if (address >= hose->io_base_phys &&
116 address < (hose->io_base_phys + size)) {
117 unsigned long base =
118 (unsigned long)hose->io_base_virt - _IO_BASE;
119 ret = base + (address - hose->io_base_phys);
120 break;
121 }
122 }
123 spin_unlock(&hose_spinlock);
124
125 return ret;
126}
127EXPORT_SYMBOL_GPL(pci_address_to_pio);
128
129
130
131
132
133
134
135
136struct pci_controller *pci_find_hose_for_OF_device(struct device_node *node)
137{
138 while (node) {
139 struct pci_controller *hose, *tmp;
140 list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
141 if (hose->dn == node)
142 return hose;
143 node = node->parent;
144 }
145 return NULL;
146}
147
148void pcibios_set_master(struct pci_dev *dev)
149{
150
151}
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
171 resource_size_t *offset,
172 enum pci_mmap_state mmap_state)
173{
174 struct pci_controller *hose = pci_bus_to_host(dev->bus);
175 unsigned long io_offset = 0;
176 int i, res_bit;
177
178 if (!hose)
179 return NULL;
180
181
182 if (mmap_state == pci_mmap_mem) {
183#if 0
184 *offset += hose->pci_mem_offset;
185#endif
186 res_bit = IORESOURCE_MEM;
187 } else {
188 io_offset = (unsigned long)hose->io_base_virt - _IO_BASE;
189 *offset += io_offset;
190 res_bit = IORESOURCE_IO;
191 }
192
193
194
195
196
197 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
198 struct resource *rp = &dev->resource[i];
199 int flags = rp->flags;
200
201
202 if (i == PCI_ROM_RESOURCE)
203 flags |= IORESOURCE_MEM;
204
205
206 if ((flags & res_bit) == 0)
207 continue;
208
209
210 if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end)
211 continue;
212
213
214 if (mmap_state == pci_mmap_io)
215 *offset += hose->io_base_phys - io_offset;
216 return rp;
217 }
218
219 return NULL;
220}
221
222
223
224
225
226
227pgprot_t pci_phys_mem_access_prot(struct file *file,
228 unsigned long pfn,
229 unsigned long size,
230 pgprot_t prot)
231{
232 struct pci_dev *pdev = NULL;
233 struct resource *found = NULL;
234 resource_size_t offset = ((resource_size_t)pfn) << PAGE_SHIFT;
235 int i;
236
237 if (page_is_ram(pfn))
238 return prot;
239
240 prot = pgprot_noncached(prot);
241 for_each_pci_dev(pdev) {
242 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
243 struct resource *rp = &pdev->resource[i];
244 int flags = rp->flags;
245
246
247 if ((flags & IORESOURCE_MEM) == 0)
248 continue;
249
250 if (offset < (rp->start & PAGE_MASK) ||
251 offset > rp->end)
252 continue;
253 found = rp;
254 break;
255 }
256 if (found)
257 break;
258 }
259 if (found) {
260 if (found->flags & IORESOURCE_PREFETCH)
261 prot = pgprot_noncached_wc(prot);
262 pci_dev_put(pdev);
263 }
264
265 pr_debug("PCI: Non-PCI map for %llx, prot: %lx\n",
266 (unsigned long long)offset, pgprot_val(prot));
267
268 return prot;
269}
270
271
272
273
274
275
276
277
278
279
280
281int pci_mmap_page_range(struct pci_dev *dev, int bar, struct vm_area_struct *vma,
282 enum pci_mmap_state mmap_state, int write_combine)
283{
284 resource_size_t offset =
285 ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT;
286 struct resource *rp;
287 int ret;
288
289 rp = __pci_mmap_make_offset(dev, &offset, mmap_state);
290 if (rp == NULL)
291 return -EINVAL;
292
293 vma->vm_pgoff = offset >> PAGE_SHIFT;
294 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
295
296 ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
297 vma->vm_end - vma->vm_start, vma->vm_page_prot);
298
299 return ret;
300}
301
302
303int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size)
304{
305 unsigned long offset;
306 struct pci_controller *hose = pci_bus_to_host(bus);
307 struct resource *rp = &hose->io_resource;
308 void __iomem *addr;
309
310
311
312
313
314
315 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
316 offset += port;
317
318 if (!(rp->flags & IORESOURCE_IO))
319 return -ENXIO;
320 if (offset < rp->start || (offset + size) > rp->end)
321 return -ENXIO;
322 addr = hose->io_base_virt + port;
323
324 switch (size) {
325 case 1:
326 *((u8 *)val) = in_8(addr);
327 return 1;
328 case 2:
329 if (port & 1)
330 return -EINVAL;
331 *((u16 *)val) = in_le16(addr);
332 return 2;
333 case 4:
334 if (port & 3)
335 return -EINVAL;
336 *((u32 *)val) = in_le32(addr);
337 return 4;
338 }
339 return -EINVAL;
340}
341
342
343int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size)
344{
345 unsigned long offset;
346 struct pci_controller *hose = pci_bus_to_host(bus);
347 struct resource *rp = &hose->io_resource;
348 void __iomem *addr;
349
350
351
352
353
354
355 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
356 offset += port;
357
358 if (!(rp->flags & IORESOURCE_IO))
359 return -ENXIO;
360 if (offset < rp->start || (offset + size) > rp->end)
361 return -ENXIO;
362 addr = hose->io_base_virt + port;
363
364
365
366
367
368
369 switch (size) {
370 case 1:
371 out_8(addr, val >> 24);
372 return 1;
373 case 2:
374 if (port & 1)
375 return -EINVAL;
376 out_le16(addr, val >> 16);
377 return 2;
378 case 4:
379 if (port & 3)
380 return -EINVAL;
381 out_le32(addr, val);
382 return 4;
383 }
384 return -EINVAL;
385}
386
387
388int pci_mmap_legacy_page_range(struct pci_bus *bus,
389 struct vm_area_struct *vma,
390 enum pci_mmap_state mmap_state)
391{
392 struct pci_controller *hose = pci_bus_to_host(bus);
393 resource_size_t offset =
394 ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT;
395 resource_size_t size = vma->vm_end - vma->vm_start;
396 struct resource *rp;
397
398 pr_debug("pci_mmap_legacy_page_range(%04x:%02x, %s @%llx..%llx)\n",
399 pci_domain_nr(bus), bus->number,
400 mmap_state == pci_mmap_mem ? "MEM" : "IO",
401 (unsigned long long)offset,
402 (unsigned long long)(offset + size - 1));
403
404 if (mmap_state == pci_mmap_mem) {
405
406
407
408
409
410
411
412 if ((offset + size) > hose->isa_mem_size) {
413#ifdef CONFIG_MMU
414 pr_debug("Process %s (pid:%d) mapped non-existing PCI",
415 current->comm, current->pid);
416 pr_debug("legacy memory for 0%04x:%02x\n",
417 pci_domain_nr(bus), bus->number);
418#endif
419 if (vma->vm_flags & VM_SHARED)
420 return shmem_zero_setup(vma);
421 return 0;
422 }
423 offset += hose->isa_mem_phys;
424 } else {
425 unsigned long io_offset = (unsigned long)hose->io_base_virt -
426 _IO_BASE;
427 unsigned long roffset = offset + io_offset;
428 rp = &hose->io_resource;
429 if (!(rp->flags & IORESOURCE_IO))
430 return -ENXIO;
431 if (roffset < rp->start || (roffset + size) > rp->end)
432 return -ENXIO;
433 offset += hose->io_base_phys;
434 }
435 pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset);
436
437 vma->vm_pgoff = offset >> PAGE_SHIFT;
438 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
439 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
440 vma->vm_end - vma->vm_start,
441 vma->vm_page_prot);
442}
443
444void pci_resource_to_user(const struct pci_dev *dev, int bar,
445 const struct resource *rsrc,
446 resource_size_t *start, resource_size_t *end)
447{
448 struct pci_bus_region region;
449
450 if (rsrc->flags & IORESOURCE_IO) {
451 pcibios_resource_to_bus(dev->bus, ®ion,
452 (struct resource *) rsrc);
453 *start = region.start;
454 *end = region.end;
455 return;
456 }
457
458
459
460
461
462
463
464
465 *start = rsrc->start;
466 *end = rsrc->end;
467}
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502void pci_process_bridge_OF_ranges(struct pci_controller *hose,
503 struct device_node *dev, int primary)
504{
505 int memno = 0, isa_hole = -1;
506 unsigned long long isa_mb = 0;
507 struct resource *res;
508 struct of_pci_range range;
509 struct of_pci_range_parser parser;
510
511 pr_info("PCI host bridge %s %s ranges:\n",
512 dev->full_name, primary ? "(primary)" : "");
513
514
515 if (of_pci_range_parser_init(&parser, dev))
516 return;
517
518 pr_debug("Parsing ranges property...\n");
519 for_each_of_pci_range(&parser, &range) {
520
521 pr_debug("pci_space: 0x%08x pci_addr:0x%016llx ",
522 range.pci_space, range.pci_addr);
523 pr_debug("cpu_addr:0x%016llx size:0x%016llx\n",
524 range.cpu_addr, range.size);
525
526
527
528
529
530
531 if (range.cpu_addr == OF_BAD_ADDR || range.size == 0)
532 continue;
533
534
535 res = NULL;
536 switch (range.flags & IORESOURCE_TYPE_BITS) {
537 case IORESOURCE_IO:
538 pr_info(" IO 0x%016llx..0x%016llx -> 0x%016llx\n",
539 range.cpu_addr, range.cpu_addr + range.size - 1,
540 range.pci_addr);
541
542
543 if (hose->pci_io_size) {
544 pr_info(" \\--> Skipped (too many) !\n");
545 continue;
546 }
547
548 if (range.size > 0x01000000)
549 range.size = 0x01000000;
550
551
552 hose->io_base_virt = ioremap(range.cpu_addr,
553 range.size);
554
555
556 if (primary)
557 isa_io_base =
558 (unsigned long)hose->io_base_virt;
559
560
561
562 hose->pci_io_size = range.pci_addr + range.size;
563 hose->io_base_phys = range.cpu_addr - range.pci_addr;
564
565
566 res = &hose->io_resource;
567 range.cpu_addr = range.pci_addr;
568
569 break;
570 case IORESOURCE_MEM:
571 pr_info(" MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n",
572 range.cpu_addr, range.cpu_addr + range.size - 1,
573 range.pci_addr,
574 (range.pci_space & 0x40000000) ?
575 "Prefetch" : "");
576
577
578 if (memno >= 3) {
579 pr_info(" \\--> Skipped (too many) !\n");
580 continue;
581 }
582
583 if (range.pci_addr == 0) {
584 isa_mb = range.cpu_addr;
585 isa_hole = memno;
586 if (primary || isa_mem_base == 0)
587 isa_mem_base = range.cpu_addr;
588 hose->isa_mem_phys = range.cpu_addr;
589 hose->isa_mem_size = range.size;
590 }
591
592
593
594
595
596 if (memno == 0 ||
597 (isa_hole >= 0 && range.pci_addr != 0 &&
598 hose->pci_mem_offset == isa_mb))
599 hose->pci_mem_offset = range.cpu_addr -
600 range.pci_addr;
601 else if (range.pci_addr != 0 &&
602 hose->pci_mem_offset != range.cpu_addr -
603 range.pci_addr) {
604 pr_info(" \\--> Skipped (offset mismatch) !\n");
605 continue;
606 }
607
608
609 res = &hose->mem_resources[memno++];
610 break;
611 }
612 if (res != NULL) {
613 res->name = dev->full_name;
614 res->flags = range.flags;
615 res->start = range.cpu_addr;
616 res->end = range.cpu_addr + range.size - 1;
617 res->parent = res->child = res->sibling = NULL;
618 }
619 }
620
621
622
623
624
625 if (isa_hole >= 0 && hose->pci_mem_offset != isa_mb) {
626 unsigned int next = isa_hole + 1;
627 pr_info(" Removing ISA hole at 0x%016llx\n", isa_mb);
628 if (next < memno)
629 memmove(&hose->mem_resources[isa_hole],
630 &hose->mem_resources[next],
631 sizeof(struct resource) * (memno - next));
632 hose->mem_resources[--memno].flags = 0;
633 }
634}
635
636
637int pci_proc_domain(struct pci_bus *bus)
638{
639 return pci_domain_nr(bus);
640}
641
642
643
644
645static void pcibios_fixup_resources(struct pci_dev *dev)
646{
647 struct pci_controller *hose = pci_bus_to_host(dev->bus);
648 int i;
649
650 if (!hose) {
651 pr_err("No host bridge for PCI dev %s !\n",
652 pci_name(dev));
653 return;
654 }
655 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
656 struct resource *res = dev->resource + i;
657 if (!res->flags)
658 continue;
659 if (res->start == 0) {
660 pr_debug("PCI:%s Resource %d %016llx-%016llx [%x]",
661 pci_name(dev), i,
662 (unsigned long long)res->start,
663 (unsigned long long)res->end,
664 (unsigned int)res->flags);
665 pr_debug("is unassigned\n");
666 res->end -= res->start;
667 res->start = 0;
668 res->flags |= IORESOURCE_UNSET;
669 continue;
670 }
671
672 pr_debug("PCI:%s Resource %d %016llx-%016llx [%x]\n",
673 pci_name(dev), i,
674 (unsigned long long)res->start,
675 (unsigned long long)res->end,
676 (unsigned int)res->flags);
677 }
678}
679DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources);
680
681
682
683
684
685
686static int pcibios_uninitialized_bridge_resource(struct pci_bus *bus,
687 struct resource *res)
688{
689 struct pci_controller *hose = pci_bus_to_host(bus);
690 struct pci_dev *dev = bus->self;
691 resource_size_t offset;
692 u16 command;
693 int i;
694
695
696 if (res->flags & IORESOURCE_MEM) {
697
698
699
700 if (res->start != hose->pci_mem_offset)
701 return 0;
702
703
704
705
706 pci_read_config_word(dev, PCI_COMMAND, &command);
707 if ((command & PCI_COMMAND_MEMORY) == 0)
708 return 1;
709
710
711
712
713
714 for (i = 0; i < 3; i++) {
715 if ((hose->mem_resources[i].flags & IORESOURCE_MEM) &&
716 hose->mem_resources[i].start == hose->pci_mem_offset)
717 return 0;
718 }
719
720
721
722
723 return 1;
724 } else {
725
726 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
727 if (((res->start - offset) & 0xfffffffful) != 0)
728 return 0;
729
730
731
732
733
734
735
736 pci_read_config_word(dev, PCI_COMMAND, &command);
737 if (command & PCI_COMMAND_IO)
738 return 0;
739
740
741
742
743 return 1;
744 }
745}
746
747
748static void pcibios_fixup_bridge(struct pci_bus *bus)
749{
750 struct resource *res;
751 int i;
752
753 struct pci_dev *dev = bus->self;
754
755 pci_bus_for_each_resource(bus, res, i) {
756 if (!res)
757 continue;
758 if (!res->flags)
759 continue;
760 if (i >= 3 && bus->self->transparent)
761 continue;
762
763 pr_debug("PCI:%s Bus rsrc %d %016llx-%016llx [%x] fixup...\n",
764 pci_name(dev), i,
765 (unsigned long long)res->start,
766 (unsigned long long)res->end,
767 (unsigned int)res->flags);
768
769
770
771
772 if (pcibios_uninitialized_bridge_resource(bus, res)) {
773 res->flags = 0;
774 pr_debug("PCI:%s (unassigned)\n",
775 pci_name(dev));
776 } else {
777 pr_debug("PCI:%s %016llx-%016llx\n",
778 pci_name(dev),
779 (unsigned long long)res->start,
780 (unsigned long long)res->end);
781 }
782 }
783}
784
785void pcibios_setup_bus_self(struct pci_bus *bus)
786{
787
788 if (bus->self != NULL)
789 pcibios_fixup_bridge(bus);
790}
791
792void pcibios_setup_bus_devices(struct pci_bus *bus)
793{
794 struct pci_dev *dev;
795
796 pr_debug("PCI: Fixup bus devices %d (%s)\n",
797 bus->number, bus->self ? pci_name(bus->self) : "PHB");
798
799 list_for_each_entry(dev, &bus->devices, bus_list) {
800
801 dev->dev.of_node = pci_device_to_OF_node(dev);
802
803
804
805
806 set_dev_node(&dev->dev, pcibus_to_node(dev->bus));
807
808
809 dev->irq = of_irq_parse_and_map_pci(dev, 0, 0);
810 }
811}
812
813void pcibios_fixup_bus(struct pci_bus *bus)
814{
815
816}
817EXPORT_SYMBOL(pcibios_fixup_bus);
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832resource_size_t pcibios_align_resource(void *data, const struct resource *res,
833 resource_size_t size, resource_size_t align)
834{
835 return res->start;
836}
837EXPORT_SYMBOL(pcibios_align_resource);
838
839int pcibios_add_device(struct pci_dev *dev)
840{
841 dev->irq = of_irq_parse_and_map_pci(dev, 0, 0);
842
843 return 0;
844}
845EXPORT_SYMBOL(pcibios_add_device);
846
847
848
849
850
851static int __init reparent_resources(struct resource *parent,
852 struct resource *res)
853{
854 struct resource *p, **pp;
855 struct resource **firstpp = NULL;
856
857 for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) {
858 if (p->end < res->start)
859 continue;
860 if (res->end < p->start)
861 break;
862 if (p->start < res->start || p->end > res->end)
863 return -1;
864 if (firstpp == NULL)
865 firstpp = pp;
866 }
867 if (firstpp == NULL)
868 return -1;
869 res->parent = parent;
870 res->child = *firstpp;
871 res->sibling = *pp;
872 *firstpp = res;
873 *pp = NULL;
874 for (p = res->child; p != NULL; p = p->sibling) {
875 p->parent = res;
876 pr_debug("PCI: Reparented %s [%llx..%llx] under %s\n",
877 p->name,
878 (unsigned long long)p->start,
879 (unsigned long long)p->end, res->name);
880 }
881 return 0;
882}
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917static void pcibios_allocate_bus_resources(struct pci_bus *bus)
918{
919 struct pci_bus *b;
920 int i;
921 struct resource *res, *pr;
922
923 pr_debug("PCI: Allocating bus resources for %04x:%02x...\n",
924 pci_domain_nr(bus), bus->number);
925
926 pci_bus_for_each_resource(bus, res, i) {
927 if (!res || !res->flags
928 || res->start > res->end || res->parent)
929 continue;
930 if (bus->parent == NULL)
931 pr = (res->flags & IORESOURCE_IO) ?
932 &ioport_resource : &iomem_resource;
933 else {
934
935
936
937
938
939
940 pr = pci_find_parent_resource(bus->self, res);
941 if (pr == res) {
942
943
944
945
946 continue;
947 }
948 }
949
950 pr_debug("PCI: %s (bus %d) bridge rsrc %d: %016llx-%016llx ",
951 bus->self ? pci_name(bus->self) : "PHB",
952 bus->number, i,
953 (unsigned long long)res->start,
954 (unsigned long long)res->end);
955 pr_debug("[0x%x], parent %p (%s)\n",
956 (unsigned int)res->flags,
957 pr, (pr && pr->name) ? pr->name : "nil");
958
959 if (pr && !(pr->flags & IORESOURCE_UNSET)) {
960 struct pci_dev *dev = bus->self;
961
962 if (request_resource(pr, res) == 0)
963 continue;
964
965
966
967
968
969 if (reparent_resources(pr, res) == 0)
970 continue;
971
972 if (dev && i < PCI_BRIDGE_RESOURCE_NUM &&
973 pci_claim_bridge_resource(dev,
974 i + PCI_BRIDGE_RESOURCES) == 0)
975 continue;
976
977 }
978 pr_warn("PCI: Cannot allocate resource region ");
979 pr_cont("%d of PCI bridge %d, will remap\n", i, bus->number);
980 res->start = res->end = 0;
981 res->flags = 0;
982 }
983
984 list_for_each_entry(b, &bus->children, node)
985 pcibios_allocate_bus_resources(b);
986}
987
988static inline void alloc_resource(struct pci_dev *dev, int idx)
989{
990 struct resource *pr, *r = &dev->resource[idx];
991
992 pr_debug("PCI: Allocating %s: Resource %d: %016llx..%016llx [%x]\n",
993 pci_name(dev), idx,
994 (unsigned long long)r->start,
995 (unsigned long long)r->end,
996 (unsigned int)r->flags);
997
998 pr = pci_find_parent_resource(dev, r);
999 if (!pr || (pr->flags & IORESOURCE_UNSET) ||
1000 request_resource(pr, r) < 0) {
1001 pr_warn("PCI: Cannot allocate resource region %d ", idx);
1002 pr_cont("of device %s, will remap\n", pci_name(dev));
1003 if (pr)
1004 pr_debug("PCI: parent is %p: %016llx-%016llx [%x]\n",
1005 pr,
1006 (unsigned long long)pr->start,
1007 (unsigned long long)pr->end,
1008 (unsigned int)pr->flags);
1009
1010 r->flags |= IORESOURCE_UNSET;
1011 r->end -= r->start;
1012 r->start = 0;
1013 }
1014}
1015
1016static void __init pcibios_allocate_resources(int pass)
1017{
1018 struct pci_dev *dev = NULL;
1019 int idx, disabled;
1020 u16 command;
1021 struct resource *r;
1022
1023 for_each_pci_dev(dev) {
1024 pci_read_config_word(dev, PCI_COMMAND, &command);
1025 for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
1026 r = &dev->resource[idx];
1027 if (r->parent)
1028 continue;
1029 if (!r->flags || (r->flags & IORESOURCE_UNSET))
1030 continue;
1031
1032
1033
1034 if (idx == PCI_ROM_RESOURCE)
1035 disabled = 1;
1036 if (r->flags & IORESOURCE_IO)
1037 disabled = !(command & PCI_COMMAND_IO);
1038 else
1039 disabled = !(command & PCI_COMMAND_MEMORY);
1040 if (pass == disabled)
1041 alloc_resource(dev, idx);
1042 }
1043 if (pass)
1044 continue;
1045 r = &dev->resource[PCI_ROM_RESOURCE];
1046 if (r->flags) {
1047
1048
1049
1050 u32 reg;
1051 pci_read_config_dword(dev, dev->rom_base_reg, ®);
1052 if (reg & PCI_ROM_ADDRESS_ENABLE) {
1053 pr_debug("PCI: Switching off ROM of %s\n",
1054 pci_name(dev));
1055 r->flags &= ~IORESOURCE_ROM_ENABLE;
1056 pci_write_config_dword(dev, dev->rom_base_reg,
1057 reg & ~PCI_ROM_ADDRESS_ENABLE);
1058 }
1059 }
1060 }
1061}
1062
1063static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus)
1064{
1065 struct pci_controller *hose = pci_bus_to_host(bus);
1066 resource_size_t offset;
1067 struct resource *res, *pres;
1068 int i;
1069
1070 pr_debug("Reserving legacy ranges for domain %04x\n",
1071 pci_domain_nr(bus));
1072
1073
1074 if (!(hose->io_resource.flags & IORESOURCE_IO))
1075 goto no_io;
1076 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
1077 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
1078 BUG_ON(res == NULL);
1079 res->name = "Legacy IO";
1080 res->flags = IORESOURCE_IO;
1081 res->start = offset;
1082 res->end = (offset + 0xfff) & 0xfffffffful;
1083 pr_debug("Candidate legacy IO: %pR\n", res);
1084 if (request_resource(&hose->io_resource, res)) {
1085 pr_debug("PCI %04x:%02x Cannot reserve Legacy IO %pR\n",
1086 pci_domain_nr(bus), bus->number, res);
1087 kfree(res);
1088 }
1089
1090 no_io:
1091
1092 offset = hose->pci_mem_offset;
1093 pr_debug("hose mem offset: %016llx\n", (unsigned long long)offset);
1094 for (i = 0; i < 3; i++) {
1095 pres = &hose->mem_resources[i];
1096 if (!(pres->flags & IORESOURCE_MEM))
1097 continue;
1098 pr_debug("hose mem res: %pR\n", pres);
1099 if ((pres->start - offset) <= 0xa0000 &&
1100 (pres->end - offset) >= 0xbffff)
1101 break;
1102 }
1103 if (i >= 3)
1104 return;
1105 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
1106 BUG_ON(res == NULL);
1107 res->name = "Legacy VGA memory";
1108 res->flags = IORESOURCE_MEM;
1109 res->start = 0xa0000 + offset;
1110 res->end = 0xbffff + offset;
1111 pr_debug("Candidate VGA memory: %pR\n", res);
1112 if (request_resource(pres, res)) {
1113 pr_debug("PCI %04x:%02x Cannot reserve VGA memory %pR\n",
1114 pci_domain_nr(bus), bus->number, res);
1115 kfree(res);
1116 }
1117}
1118
1119void __init pcibios_resource_survey(void)
1120{
1121 struct pci_bus *b;
1122
1123
1124
1125
1126 list_for_each_entry(b, &pci_root_buses, node)
1127 pcibios_allocate_bus_resources(b);
1128
1129 pcibios_allocate_resources(0);
1130 pcibios_allocate_resources(1);
1131
1132
1133
1134
1135
1136 list_for_each_entry(b, &pci_root_buses, node)
1137 pcibios_reserve_legacy_regions(b);
1138
1139
1140 pr_debug("PCI: Assigning unassigned resources...\n");
1141 pci_assign_unassigned_resources();
1142}
1143
1144
1145
1146
1147
1148
1149void pcibios_claim_one_bus(struct pci_bus *bus)
1150{
1151 struct pci_dev *dev;
1152 struct pci_bus *child_bus;
1153
1154 list_for_each_entry(dev, &bus->devices, bus_list) {
1155 int i;
1156
1157 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1158 struct resource *r = &dev->resource[i];
1159
1160 if (r->parent || !r->start || !r->flags)
1161 continue;
1162
1163 pr_debug("PCI: Claiming %s: ", pci_name(dev));
1164 pr_debug("Resource %d: %016llx..%016llx [%x]\n",
1165 i, (unsigned long long)r->start,
1166 (unsigned long long)r->end,
1167 (unsigned int)r->flags);
1168
1169 if (pci_claim_resource(dev, i) == 0)
1170 continue;
1171
1172 pci_claim_bridge_resource(dev, i);
1173 }
1174 }
1175
1176 list_for_each_entry(child_bus, &bus->children, node)
1177 pcibios_claim_one_bus(child_bus);
1178}
1179EXPORT_SYMBOL_GPL(pcibios_claim_one_bus);
1180
1181
1182
1183
1184
1185
1186
1187
1188void pcibios_finish_adding_to_bus(struct pci_bus *bus)
1189{
1190 pr_debug("PCI: Finishing adding to hotplug bus %04x:%02x\n",
1191 pci_domain_nr(bus), bus->number);
1192
1193
1194 pcibios_allocate_bus_resources(bus);
1195 pcibios_claim_one_bus(bus);
1196
1197
1198 pci_bus_add_devices(bus);
1199
1200
1201
1202}
1203EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus);
1204
1205static void pcibios_setup_phb_resources(struct pci_controller *hose,
1206 struct list_head *resources)
1207{
1208 unsigned long io_offset;
1209 struct resource *res;
1210 int i;
1211
1212
1213 res = &hose->io_resource;
1214
1215
1216 io_offset = (unsigned long)hose->io_base_virt - isa_io_base;
1217 res->start = (res->start + io_offset) & 0xffffffffu;
1218 res->end = (res->end + io_offset) & 0xffffffffu;
1219
1220 if (!res->flags) {
1221 pr_warn("PCI: I/O resource not set for host ");
1222 pr_cont("bridge %s (domain %d)\n",
1223 hose->dn->full_name, hose->global_number);
1224
1225 res->start = (unsigned long)hose->io_base_virt - isa_io_base;
1226 res->end = res->start + IO_SPACE_LIMIT;
1227 res->flags = IORESOURCE_IO;
1228 }
1229 pci_add_resource_offset(resources, res,
1230 (__force resource_size_t)(hose->io_base_virt - _IO_BASE));
1231
1232 pr_debug("PCI: PHB IO resource = %016llx-%016llx [%lx]\n",
1233 (unsigned long long)res->start,
1234 (unsigned long long)res->end,
1235 (unsigned long)res->flags);
1236
1237
1238 for (i = 0; i < 3; ++i) {
1239 res = &hose->mem_resources[i];
1240 if (!res->flags) {
1241 if (i > 0)
1242 continue;
1243 pr_err("PCI: Memory resource 0 not set for ");
1244 pr_cont("host bridge %s (domain %d)\n",
1245 hose->dn->full_name, hose->global_number);
1246
1247
1248 res->start = hose->pci_mem_offset;
1249 res->end = (resource_size_t)-1LL;
1250 res->flags = IORESOURCE_MEM;
1251
1252 }
1253 pci_add_resource_offset(resources, res, hose->pci_mem_offset);
1254
1255 pr_debug("PCI: PHB MEM resource %d = %016llx-%016llx [%lx]\n",
1256 i, (unsigned long long)res->start,
1257 (unsigned long long)res->end,
1258 (unsigned long)res->flags);
1259 }
1260
1261 pr_debug("PCI: PHB MEM offset = %016llx\n",
1262 (unsigned long long)hose->pci_mem_offset);
1263 pr_debug("PCI: PHB IO offset = %08lx\n",
1264 (unsigned long)hose->io_base_virt - _IO_BASE);
1265}
1266
1267static void pcibios_scan_phb(struct pci_controller *hose)
1268{
1269 LIST_HEAD(resources);
1270 struct pci_bus *bus;
1271 struct device_node *node = hose->dn;
1272
1273 pr_debug("PCI: Scanning PHB %s\n", of_node_full_name(node));
1274
1275 pcibios_setup_phb_resources(hose, &resources);
1276
1277 bus = pci_scan_root_bus(hose->parent, hose->first_busno,
1278 hose->ops, hose, &resources);
1279 if (bus == NULL) {
1280 pr_err("Failed to create bus for PCI domain %04x\n",
1281 hose->global_number);
1282 pci_free_resource_list(&resources);
1283 return;
1284 }
1285 bus->busn_res.start = hose->first_busno;
1286 hose->bus = bus;
1287
1288 hose->last_busno = bus->busn_res.end;
1289}
1290
1291static int __init pcibios_init(void)
1292{
1293 struct pci_controller *hose, *tmp;
1294 int next_busno = 0;
1295
1296 pr_info("PCI: Probing PCI hardware\n");
1297
1298
1299 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
1300 hose->last_busno = 0xff;
1301 pcibios_scan_phb(hose);
1302 if (next_busno <= hose->last_busno)
1303 next_busno = hose->last_busno + 1;
1304 }
1305 pci_bus_count = next_busno;
1306
1307
1308 pcibios_resource_survey();
1309 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
1310 if (hose->bus)
1311 pci_bus_add_devices(hose->bus);
1312 }
1313
1314 return 0;
1315}
1316
1317subsys_initcall(pcibios_init);
1318
1319static struct pci_controller *pci_bus_to_hose(int bus)
1320{
1321 struct pci_controller *hose, *tmp;
1322
1323 list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
1324 if (bus >= hose->first_busno && bus <= hose->last_busno)
1325 return hose;
1326 return NULL;
1327}
1328
1329
1330
1331
1332
1333
1334
1335long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn)
1336{
1337 struct pci_controller *hose;
1338 long result = -EOPNOTSUPP;
1339
1340 hose = pci_bus_to_hose(bus);
1341 if (!hose)
1342 return -ENODEV;
1343
1344 switch (which) {
1345 case IOBASE_BRIDGE_NUMBER:
1346 return (long)hose->first_busno;
1347 case IOBASE_MEMORY:
1348 return (long)hose->pci_mem_offset;
1349 case IOBASE_IO:
1350 return (long)hose->io_base_phys;
1351 case IOBASE_ISA_IO:
1352 return (long)isa_io_base;
1353 case IOBASE_ISA_MEM:
1354 return (long)isa_mem_base;
1355 }
1356
1357 return result;
1358}
1359
1360
1361
1362
1363
1364#define NULL_PCI_OP(rw, size, type) \
1365static int \
1366null_##rw##_config_##size(struct pci_dev *dev, int offset, type val) \
1367{ \
1368 return PCIBIOS_DEVICE_NOT_FOUND; \
1369}
1370
1371static int
1372null_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
1373 int len, u32 *val)
1374{
1375 return PCIBIOS_DEVICE_NOT_FOUND;
1376}
1377
1378static int
1379null_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
1380 int len, u32 val)
1381{
1382 return PCIBIOS_DEVICE_NOT_FOUND;
1383}
1384
1385static struct pci_ops null_pci_ops = {
1386 .read = null_read_config,
1387 .write = null_write_config,
1388};
1389
1390
1391
1392
1393
1394static struct pci_bus *
1395fake_pci_bus(struct pci_controller *hose, int busnr)
1396{
1397 static struct pci_bus bus;
1398
1399 if (!hose)
1400 pr_err("Can't find hose for PCI bus %d!\n", busnr);
1401
1402 bus.number = busnr;
1403 bus.sysdata = hose;
1404 bus.ops = hose ? hose->ops : &null_pci_ops;
1405 return &bus;
1406}
1407
1408#define EARLY_PCI_OP(rw, size, type) \
1409int early_##rw##_config_##size(struct pci_controller *hose, int bus, \
1410 int devfn, int offset, type value) \
1411{ \
1412 return pci_bus_##rw##_config_##size(fake_pci_bus(hose, bus), \
1413 devfn, offset, value); \
1414}
1415
1416EARLY_PCI_OP(read, byte, u8 *)
1417EARLY_PCI_OP(read, word, u16 *)
1418EARLY_PCI_OP(read, dword, u32 *)
1419EARLY_PCI_OP(write, byte, u8)
1420EARLY_PCI_OP(write, word, u16)
1421EARLY_PCI_OP(write, dword, u32)
1422
1423int early_find_capability(struct pci_controller *hose, int bus, int devfn,
1424 int cap)
1425{
1426 return pci_bus_find_capability(fake_pci_bus(hose, bus), devfn, cap);
1427}
1428
1429