1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/kernel.h>
20#include <linux/pci.h>
21#include <linux/string.h>
22#include <linux/init.h>
23#include <linux/bootmem.h>
24#include <linux/mm.h>
25#include <linux/list.h>
26#include <linux/syscalls.h>
27#include <linux/irq.h>
28#include <linux/vmalloc.h>
29#include <linux/slab.h>
30#include <linux/of.h>
31#include <linux/of_address.h>
32#include <linux/of_irq.h>
33#include <linux/of_pci.h>
34#include <linux/export.h>
35
36#include <asm/processor.h>
37#include <linux/io.h>
38#include <asm/pci-bridge.h>
39#include <asm/byteorder.h>
40
41static DEFINE_SPINLOCK(hose_spinlock);
42LIST_HEAD(hose_list);
43
44
45static int global_phb_number;
46
47
48resource_size_t isa_mem_base;
49
50static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
51
52unsigned long isa_io_base;
53unsigned long pci_dram_offset;
54static int pci_bus_count;
55
56
57void set_pci_dma_ops(struct dma_map_ops *dma_ops)
58{
59 pci_dma_ops = dma_ops;
60}
61
62struct dma_map_ops *get_pci_dma_ops(void)
63{
64 return pci_dma_ops;
65}
66EXPORT_SYMBOL(get_pci_dma_ops);
67
68struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
69{
70 struct pci_controller *phb;
71
72 phb = zalloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL);
73 if (!phb)
74 return NULL;
75 spin_lock(&hose_spinlock);
76 phb->global_number = global_phb_number++;
77 list_add_tail(&phb->list_node, &hose_list);
78 spin_unlock(&hose_spinlock);
79 phb->dn = dev;
80 phb->is_dynamic = mem_init_done;
81 return phb;
82}
83
84void pcibios_free_controller(struct pci_controller *phb)
85{
86 spin_lock(&hose_spinlock);
87 list_del(&phb->list_node);
88 spin_unlock(&hose_spinlock);
89
90 if (phb->is_dynamic)
91 kfree(phb);
92}
93
94static resource_size_t pcibios_io_size(const struct pci_controller *hose)
95{
96 return resource_size(&hose->io_resource);
97}
98
99int pcibios_vaddr_is_ioport(void __iomem *address)
100{
101 int ret = 0;
102 struct pci_controller *hose;
103 resource_size_t size;
104
105 spin_lock(&hose_spinlock);
106 list_for_each_entry(hose, &hose_list, list_node) {
107 size = pcibios_io_size(hose);
108 if (address >= hose->io_base_virt &&
109 address < (hose->io_base_virt + size)) {
110 ret = 1;
111 break;
112 }
113 }
114 spin_unlock(&hose_spinlock);
115 return ret;
116}
117
118unsigned long pci_address_to_pio(phys_addr_t address)
119{
120 struct pci_controller *hose;
121 resource_size_t size;
122 unsigned long ret = ~0;
123
124 spin_lock(&hose_spinlock);
125 list_for_each_entry(hose, &hose_list, list_node) {
126 size = pcibios_io_size(hose);
127 if (address >= hose->io_base_phys &&
128 address < (hose->io_base_phys + size)) {
129 unsigned long base =
130 (unsigned long)hose->io_base_virt - _IO_BASE;
131 ret = base + (address - hose->io_base_phys);
132 break;
133 }
134 }
135 spin_unlock(&hose_spinlock);
136
137 return ret;
138}
139EXPORT_SYMBOL_GPL(pci_address_to_pio);
140
141
142
143
144int pci_domain_nr(struct pci_bus *bus)
145{
146 struct pci_controller *hose = pci_bus_to_host(bus);
147
148 return hose->global_number;
149}
150EXPORT_SYMBOL(pci_domain_nr);
151
152
153
154
155
156
157
158
159struct pci_controller *pci_find_hose_for_OF_device(struct device_node *node)
160{
161 while (node) {
162 struct pci_controller *hose, *tmp;
163 list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
164 if (hose->dn == node)
165 return hose;
166 node = node->parent;
167 }
168 return NULL;
169}
170
171static ssize_t pci_show_devspec(struct device *dev,
172 struct device_attribute *attr, char *buf)
173{
174 struct pci_dev *pdev;
175 struct device_node *np;
176
177 pdev = to_pci_dev(dev);
178 np = pci_device_to_OF_node(pdev);
179 if (np == NULL || np->full_name == NULL)
180 return 0;
181 return sprintf(buf, "%s", np->full_name);
182}
183static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL);
184
185
186int pcibios_add_platform_entries(struct pci_dev *pdev)
187{
188 return device_create_file(&pdev->dev, &dev_attr_devspec);
189}
190
191void pcibios_set_master(struct pci_dev *dev)
192{
193
194}
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
214 resource_size_t *offset,
215 enum pci_mmap_state mmap_state)
216{
217 struct pci_controller *hose = pci_bus_to_host(dev->bus);
218 unsigned long io_offset = 0;
219 int i, res_bit;
220
221 if (!hose)
222 return NULL;
223
224
225 if (mmap_state == pci_mmap_mem) {
226#if 0
227 *offset += hose->pci_mem_offset;
228#endif
229 res_bit = IORESOURCE_MEM;
230 } else {
231 io_offset = (unsigned long)hose->io_base_virt - _IO_BASE;
232 *offset += io_offset;
233 res_bit = IORESOURCE_IO;
234 }
235
236
237
238
239
240 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
241 struct resource *rp = &dev->resource[i];
242 int flags = rp->flags;
243
244
245 if (i == PCI_ROM_RESOURCE)
246 flags |= IORESOURCE_MEM;
247
248
249 if ((flags & res_bit) == 0)
250 continue;
251
252
253 if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end)
254 continue;
255
256
257 if (mmap_state == pci_mmap_io)
258 *offset += hose->io_base_phys - io_offset;
259 return rp;
260 }
261
262 return NULL;
263}
264
265
266
267
268
269static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
270 pgprot_t protection,
271 enum pci_mmap_state mmap_state,
272 int write_combine)
273{
274 pgprot_t prot = protection;
275
276
277
278
279
280
281
282 if (mmap_state != pci_mmap_mem)
283 write_combine = 0;
284 else if (write_combine == 0) {
285 if (rp->flags & IORESOURCE_PREFETCH)
286 write_combine = 1;
287 }
288
289 return pgprot_noncached(prot);
290}
291
292
293
294
295
296
297pgprot_t pci_phys_mem_access_prot(struct file *file,
298 unsigned long pfn,
299 unsigned long size,
300 pgprot_t prot)
301{
302 struct pci_dev *pdev = NULL;
303 struct resource *found = NULL;
304 resource_size_t offset = ((resource_size_t)pfn) << PAGE_SHIFT;
305 int i;
306
307 if (page_is_ram(pfn))
308 return prot;
309
310 prot = pgprot_noncached(prot);
311 for_each_pci_dev(pdev) {
312 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
313 struct resource *rp = &pdev->resource[i];
314 int flags = rp->flags;
315
316
317 if ((flags & IORESOURCE_MEM) == 0)
318 continue;
319
320 if (offset < (rp->start & PAGE_MASK) ||
321 offset > rp->end)
322 continue;
323 found = rp;
324 break;
325 }
326 if (found)
327 break;
328 }
329 if (found) {
330 if (found->flags & IORESOURCE_PREFETCH)
331 prot = pgprot_noncached_wc(prot);
332 pci_dev_put(pdev);
333 }
334
335 pr_debug("PCI: Non-PCI map for %llx, prot: %lx\n",
336 (unsigned long long)offset, pgprot_val(prot));
337
338 return prot;
339}
340
341
342
343
344
345
346
347
348
349
350
351int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
352 enum pci_mmap_state mmap_state, int write_combine)
353{
354 resource_size_t offset =
355 ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT;
356 struct resource *rp;
357 int ret;
358
359 rp = __pci_mmap_make_offset(dev, &offset, mmap_state);
360 if (rp == NULL)
361 return -EINVAL;
362
363 vma->vm_pgoff = offset >> PAGE_SHIFT;
364 vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp,
365 vma->vm_page_prot,
366 mmap_state, write_combine);
367
368 ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
369 vma->vm_end - vma->vm_start, vma->vm_page_prot);
370
371 return ret;
372}
373
374
375int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size)
376{
377 unsigned long offset;
378 struct pci_controller *hose = pci_bus_to_host(bus);
379 struct resource *rp = &hose->io_resource;
380 void __iomem *addr;
381
382
383
384
385
386
387 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
388 offset += port;
389
390 if (!(rp->flags & IORESOURCE_IO))
391 return -ENXIO;
392 if (offset < rp->start || (offset + size) > rp->end)
393 return -ENXIO;
394 addr = hose->io_base_virt + port;
395
396 switch (size) {
397 case 1:
398 *((u8 *)val) = in_8(addr);
399 return 1;
400 case 2:
401 if (port & 1)
402 return -EINVAL;
403 *((u16 *)val) = in_le16(addr);
404 return 2;
405 case 4:
406 if (port & 3)
407 return -EINVAL;
408 *((u32 *)val) = in_le32(addr);
409 return 4;
410 }
411 return -EINVAL;
412}
413
414
415int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size)
416{
417 unsigned long offset;
418 struct pci_controller *hose = pci_bus_to_host(bus);
419 struct resource *rp = &hose->io_resource;
420 void __iomem *addr;
421
422
423
424
425
426
427 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
428 offset += port;
429
430 if (!(rp->flags & IORESOURCE_IO))
431 return -ENXIO;
432 if (offset < rp->start || (offset + size) > rp->end)
433 return -ENXIO;
434 addr = hose->io_base_virt + port;
435
436
437
438
439
440
441 switch (size) {
442 case 1:
443 out_8(addr, val >> 24);
444 return 1;
445 case 2:
446 if (port & 1)
447 return -EINVAL;
448 out_le16(addr, val >> 16);
449 return 2;
450 case 4:
451 if (port & 3)
452 return -EINVAL;
453 out_le32(addr, val);
454 return 4;
455 }
456 return -EINVAL;
457}
458
459
460int pci_mmap_legacy_page_range(struct pci_bus *bus,
461 struct vm_area_struct *vma,
462 enum pci_mmap_state mmap_state)
463{
464 struct pci_controller *hose = pci_bus_to_host(bus);
465 resource_size_t offset =
466 ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT;
467 resource_size_t size = vma->vm_end - vma->vm_start;
468 struct resource *rp;
469
470 pr_debug("pci_mmap_legacy_page_range(%04x:%02x, %s @%llx..%llx)\n",
471 pci_domain_nr(bus), bus->number,
472 mmap_state == pci_mmap_mem ? "MEM" : "IO",
473 (unsigned long long)offset,
474 (unsigned long long)(offset + size - 1));
475
476 if (mmap_state == pci_mmap_mem) {
477
478
479
480
481
482
483
484 if ((offset + size) > hose->isa_mem_size) {
485#ifdef CONFIG_MMU
486 pr_debug("Process %s (pid:%d) mapped non-existing PCI",
487 current->comm, current->pid);
488 pr_debug("legacy memory for 0%04x:%02x\n",
489 pci_domain_nr(bus), bus->number);
490#endif
491 if (vma->vm_flags & VM_SHARED)
492 return shmem_zero_setup(vma);
493 return 0;
494 }
495 offset += hose->isa_mem_phys;
496 } else {
497 unsigned long io_offset = (unsigned long)hose->io_base_virt -
498 _IO_BASE;
499 unsigned long roffset = offset + io_offset;
500 rp = &hose->io_resource;
501 if (!(rp->flags & IORESOURCE_IO))
502 return -ENXIO;
503 if (roffset < rp->start || (roffset + size) > rp->end)
504 return -ENXIO;
505 offset += hose->io_base_phys;
506 }
507 pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset);
508
509 vma->vm_pgoff = offset >> PAGE_SHIFT;
510 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
511 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
512 vma->vm_end - vma->vm_start,
513 vma->vm_page_prot);
514}
515
516void pci_resource_to_user(const struct pci_dev *dev, int bar,
517 const struct resource *rsrc,
518 resource_size_t *start, resource_size_t *end)
519{
520 struct pci_controller *hose = pci_bus_to_host(dev->bus);
521 resource_size_t offset = 0;
522
523 if (hose == NULL)
524 return;
525
526 if (rsrc->flags & IORESOURCE_IO)
527 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546#if 0
547 else if (rsrc->flags & IORESOURCE_MEM)
548 offset = hose->pci_mem_offset;
549#endif
550
551 *start = rsrc->start - offset;
552 *end = rsrc->end - offset;
553}
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588void pci_process_bridge_OF_ranges(struct pci_controller *hose,
589 struct device_node *dev, int primary)
590{
591 int memno = 0, isa_hole = -1;
592 unsigned long long isa_mb = 0;
593 struct resource *res;
594 struct of_pci_range range;
595 struct of_pci_range_parser parser;
596
597 pr_info("PCI host bridge %s %s ranges:\n",
598 dev->full_name, primary ? "(primary)" : "");
599
600
601 if (of_pci_range_parser_init(&parser, dev))
602 return;
603
604 pr_debug("Parsing ranges property...\n");
605 for_each_of_pci_range(&parser, &range) {
606
607 pr_debug("pci_space: 0x%08x pci_addr:0x%016llx ",
608 range.pci_space, range.pci_addr);
609 pr_debug("cpu_addr:0x%016llx size:0x%016llx\n",
610 range.cpu_addr, range.size);
611
612
613
614
615
616
617 if (range.cpu_addr == OF_BAD_ADDR || range.size == 0)
618 continue;
619
620
621 res = NULL;
622 switch (range.flags & IORESOURCE_TYPE_BITS) {
623 case IORESOURCE_IO:
624 pr_info(" IO 0x%016llx..0x%016llx -> 0x%016llx\n",
625 range.cpu_addr, range.cpu_addr + range.size - 1,
626 range.pci_addr);
627
628
629 if (hose->pci_io_size) {
630 pr_info(" \\--> Skipped (too many) !\n");
631 continue;
632 }
633
634 if (range.size > 0x01000000)
635 range.size = 0x01000000;
636
637
638 hose->io_base_virt = ioremap(range.cpu_addr,
639 range.size);
640
641
642 if (primary)
643 isa_io_base =
644 (unsigned long)hose->io_base_virt;
645
646
647
648 hose->pci_io_size = range.pci_addr + range.size;
649 hose->io_base_phys = range.cpu_addr - range.pci_addr;
650
651
652 res = &hose->io_resource;
653 range.cpu_addr = range.pci_addr;
654
655 break;
656 case IORESOURCE_MEM:
657 pr_info(" MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n",
658 range.cpu_addr, range.cpu_addr + range.size - 1,
659 range.pci_addr,
660 (range.pci_space & 0x40000000) ?
661 "Prefetch" : "");
662
663
664 if (memno >= 3) {
665 pr_info(" \\--> Skipped (too many) !\n");
666 continue;
667 }
668
669 if (range.pci_addr == 0) {
670 isa_mb = range.cpu_addr;
671 isa_hole = memno;
672 if (primary || isa_mem_base == 0)
673 isa_mem_base = range.cpu_addr;
674 hose->isa_mem_phys = range.cpu_addr;
675 hose->isa_mem_size = range.size;
676 }
677
678
679
680
681
682 if (memno == 0 ||
683 (isa_hole >= 0 && range.pci_addr != 0 &&
684 hose->pci_mem_offset == isa_mb))
685 hose->pci_mem_offset = range.cpu_addr -
686 range.pci_addr;
687 else if (range.pci_addr != 0 &&
688 hose->pci_mem_offset != range.cpu_addr -
689 range.pci_addr) {
690 pr_info(" \\--> Skipped (offset mismatch) !\n");
691 continue;
692 }
693
694
695 res = &hose->mem_resources[memno++];
696 break;
697 }
698 if (res != NULL)
699 of_pci_range_to_resource(&range, dev, res);
700 }
701
702
703
704
705
706 if (isa_hole >= 0 && hose->pci_mem_offset != isa_mb) {
707 unsigned int next = isa_hole + 1;
708 pr_info(" Removing ISA hole at 0x%016llx\n", isa_mb);
709 if (next < memno)
710 memmove(&hose->mem_resources[isa_hole],
711 &hose->mem_resources[next],
712 sizeof(struct resource) * (memno - next));
713 hose->mem_resources[--memno].flags = 0;
714 }
715}
716
717
718int pci_proc_domain(struct pci_bus *bus)
719{
720 return 0;
721}
722
723
724
725
726static void pcibios_fixup_resources(struct pci_dev *dev)
727{
728 struct pci_controller *hose = pci_bus_to_host(dev->bus);
729 int i;
730
731 if (!hose) {
732 pr_err("No host bridge for PCI dev %s !\n",
733 pci_name(dev));
734 return;
735 }
736 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
737 struct resource *res = dev->resource + i;
738 if (!res->flags)
739 continue;
740 if (res->start == 0) {
741 pr_debug("PCI:%s Resource %d %016llx-%016llx [%x]",
742 pci_name(dev), i,
743 (unsigned long long)res->start,
744 (unsigned long long)res->end,
745 (unsigned int)res->flags);
746 pr_debug("is unassigned\n");
747 res->end -= res->start;
748 res->start = 0;
749 res->flags |= IORESOURCE_UNSET;
750 continue;
751 }
752
753 pr_debug("PCI:%s Resource %d %016llx-%016llx [%x]\n",
754 pci_name(dev), i,
755 (unsigned long long)res->start,
756 (unsigned long long)res->end,
757 (unsigned int)res->flags);
758 }
759}
760DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources);
761
762
763
764
765
766
767static int pcibios_uninitialized_bridge_resource(struct pci_bus *bus,
768 struct resource *res)
769{
770 struct pci_controller *hose = pci_bus_to_host(bus);
771 struct pci_dev *dev = bus->self;
772 resource_size_t offset;
773 u16 command;
774 int i;
775
776
777 if (res->flags & IORESOURCE_MEM) {
778
779
780
781 if (res->start != hose->pci_mem_offset)
782 return 0;
783
784
785
786
787 pci_read_config_word(dev, PCI_COMMAND, &command);
788 if ((command & PCI_COMMAND_MEMORY) == 0)
789 return 1;
790
791
792
793
794
795 for (i = 0; i < 3; i++) {
796 if ((hose->mem_resources[i].flags & IORESOURCE_MEM) &&
797 hose->mem_resources[i].start == hose->pci_mem_offset)
798 return 0;
799 }
800
801
802
803
804 return 1;
805 } else {
806
807 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
808 if (((res->start - offset) & 0xfffffffful) != 0)
809 return 0;
810
811
812
813
814
815
816
817 pci_read_config_word(dev, PCI_COMMAND, &command);
818 if (command & PCI_COMMAND_IO)
819 return 0;
820
821
822
823
824 return 1;
825 }
826}
827
828
829static void pcibios_fixup_bridge(struct pci_bus *bus)
830{
831 struct resource *res;
832 int i;
833
834 struct pci_dev *dev = bus->self;
835
836 pci_bus_for_each_resource(bus, res, i) {
837 if (!res)
838 continue;
839 if (!res->flags)
840 continue;
841 if (i >= 3 && bus->self->transparent)
842 continue;
843
844 pr_debug("PCI:%s Bus rsrc %d %016llx-%016llx [%x] fixup...\n",
845 pci_name(dev), i,
846 (unsigned long long)res->start,
847 (unsigned long long)res->end,
848 (unsigned int)res->flags);
849
850
851
852
853 if (pcibios_uninitialized_bridge_resource(bus, res)) {
854 res->flags = 0;
855 pr_debug("PCI:%s (unassigned)\n",
856 pci_name(dev));
857 } else {
858 pr_debug("PCI:%s %016llx-%016llx\n",
859 pci_name(dev),
860 (unsigned long long)res->start,
861 (unsigned long long)res->end);
862 }
863 }
864}
865
866void pcibios_setup_bus_self(struct pci_bus *bus)
867{
868
869 if (bus->self != NULL)
870 pcibios_fixup_bridge(bus);
871}
872
873void pcibios_setup_bus_devices(struct pci_bus *bus)
874{
875 struct pci_dev *dev;
876
877 pr_debug("PCI: Fixup bus devices %d (%s)\n",
878 bus->number, bus->self ? pci_name(bus->self) : "PHB");
879
880 list_for_each_entry(dev, &bus->devices, bus_list) {
881
882 dev->dev.of_node = pci_device_to_OF_node(dev);
883
884
885
886
887 set_dev_node(&dev->dev, pcibus_to_node(dev->bus));
888
889
890 set_dma_ops(&dev->dev, pci_dma_ops);
891 dev->dev.archdata.dma_data = (void *)PCI_DRAM_OFFSET;
892
893
894 dev->irq = of_irq_parse_and_map_pci(dev, 0, 0);
895 }
896}
897
898void pcibios_fixup_bus(struct pci_bus *bus)
899{
900
901
902
903
904 if (bus->self != NULL)
905 pci_read_bridge_bases(bus);
906
907
908 pcibios_setup_bus_self(bus);
909
910
911 pcibios_setup_bus_devices(bus);
912}
913EXPORT_SYMBOL(pcibios_fixup_bus);
914
915static int skip_isa_ioresource_align(struct pci_dev *dev)
916{
917 return 0;
918}
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933resource_size_t pcibios_align_resource(void *data, const struct resource *res,
934 resource_size_t size, resource_size_t align)
935{
936 struct pci_dev *dev = data;
937 resource_size_t start = res->start;
938
939 if (res->flags & IORESOURCE_IO) {
940 if (skip_isa_ioresource_align(dev))
941 return start;
942 if (start & 0x300)
943 start = (start + 0x3ff) & ~0x3ff;
944 }
945
946 return start;
947}
948EXPORT_SYMBOL(pcibios_align_resource);
949
950
951
952
953
954static int __init reparent_resources(struct resource *parent,
955 struct resource *res)
956{
957 struct resource *p, **pp;
958 struct resource **firstpp = NULL;
959
960 for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) {
961 if (p->end < res->start)
962 continue;
963 if (res->end < p->start)
964 break;
965 if (p->start < res->start || p->end > res->end)
966 return -1;
967 if (firstpp == NULL)
968 firstpp = pp;
969 }
970 if (firstpp == NULL)
971 return -1;
972 res->parent = parent;
973 res->child = *firstpp;
974 res->sibling = *pp;
975 *firstpp = res;
976 *pp = NULL;
977 for (p = res->child; p != NULL; p = p->sibling) {
978 p->parent = res;
979 pr_debug("PCI: Reparented %s [%llx..%llx] under %s\n",
980 p->name,
981 (unsigned long long)p->start,
982 (unsigned long long)p->end, res->name);
983 }
984 return 0;
985}
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020static void pcibios_allocate_bus_resources(struct pci_bus *bus)
1021{
1022 struct pci_bus *b;
1023 int i;
1024 struct resource *res, *pr;
1025
1026 pr_debug("PCI: Allocating bus resources for %04x:%02x...\n",
1027 pci_domain_nr(bus), bus->number);
1028
1029 pci_bus_for_each_resource(bus, res, i) {
1030 if (!res || !res->flags
1031 || res->start > res->end || res->parent)
1032 continue;
1033 if (bus->parent == NULL)
1034 pr = (res->flags & IORESOURCE_IO) ?
1035 &ioport_resource : &iomem_resource;
1036 else {
1037
1038
1039
1040
1041
1042
1043 pr = pci_find_parent_resource(bus->self, res);
1044 if (pr == res) {
1045
1046
1047
1048
1049 continue;
1050 }
1051 }
1052
1053 pr_debug("PCI: %s (bus %d) bridge rsrc %d: %016llx-%016llx ",
1054 bus->self ? pci_name(bus->self) : "PHB",
1055 bus->number, i,
1056 (unsigned long long)res->start,
1057 (unsigned long long)res->end);
1058 pr_debug("[0x%x], parent %p (%s)\n",
1059 (unsigned int)res->flags,
1060 pr, (pr && pr->name) ? pr->name : "nil");
1061
1062 if (pr && !(pr->flags & IORESOURCE_UNSET)) {
1063 if (request_resource(pr, res) == 0)
1064 continue;
1065
1066
1067
1068
1069
1070 if (reparent_resources(pr, res) == 0)
1071 continue;
1072 }
1073 pr_warn("PCI: Cannot allocate resource region ");
1074 pr_cont("%d of PCI bridge %d, will remap\n", i, bus->number);
1075 res->start = res->end = 0;
1076 res->flags = 0;
1077 }
1078
1079 list_for_each_entry(b, &bus->children, node)
1080 pcibios_allocate_bus_resources(b);
1081}
1082
1083static inline void alloc_resource(struct pci_dev *dev, int idx)
1084{
1085 struct resource *pr, *r = &dev->resource[idx];
1086
1087 pr_debug("PCI: Allocating %s: Resource %d: %016llx..%016llx [%x]\n",
1088 pci_name(dev), idx,
1089 (unsigned long long)r->start,
1090 (unsigned long long)r->end,
1091 (unsigned int)r->flags);
1092
1093 pr = pci_find_parent_resource(dev, r);
1094 if (!pr || (pr->flags & IORESOURCE_UNSET) ||
1095 request_resource(pr, r) < 0) {
1096 pr_warn("PCI: Cannot allocate resource region %d ", idx);
1097 pr_cont("of device %s, will remap\n", pci_name(dev));
1098 if (pr)
1099 pr_debug("PCI: parent is %p: %016llx-%016llx [%x]\n",
1100 pr,
1101 (unsigned long long)pr->start,
1102 (unsigned long long)pr->end,
1103 (unsigned int)pr->flags);
1104
1105 r->flags |= IORESOURCE_UNSET;
1106 r->end -= r->start;
1107 r->start = 0;
1108 }
1109}
1110
1111static void __init pcibios_allocate_resources(int pass)
1112{
1113 struct pci_dev *dev = NULL;
1114 int idx, disabled;
1115 u16 command;
1116 struct resource *r;
1117
1118 for_each_pci_dev(dev) {
1119 pci_read_config_word(dev, PCI_COMMAND, &command);
1120 for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
1121 r = &dev->resource[idx];
1122 if (r->parent)
1123 continue;
1124 if (!r->flags || (r->flags & IORESOURCE_UNSET))
1125 continue;
1126
1127
1128
1129 if (idx == PCI_ROM_RESOURCE)
1130 disabled = 1;
1131 if (r->flags & IORESOURCE_IO)
1132 disabled = !(command & PCI_COMMAND_IO);
1133 else
1134 disabled = !(command & PCI_COMMAND_MEMORY);
1135 if (pass == disabled)
1136 alloc_resource(dev, idx);
1137 }
1138 if (pass)
1139 continue;
1140 r = &dev->resource[PCI_ROM_RESOURCE];
1141 if (r->flags) {
1142
1143
1144
1145 u32 reg;
1146 pci_read_config_dword(dev, dev->rom_base_reg, ®);
1147 if (reg & PCI_ROM_ADDRESS_ENABLE) {
1148 pr_debug("PCI: Switching off ROM of %s\n",
1149 pci_name(dev));
1150 r->flags &= ~IORESOURCE_ROM_ENABLE;
1151 pci_write_config_dword(dev, dev->rom_base_reg,
1152 reg & ~PCI_ROM_ADDRESS_ENABLE);
1153 }
1154 }
1155 }
1156}
1157
1158static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus)
1159{
1160 struct pci_controller *hose = pci_bus_to_host(bus);
1161 resource_size_t offset;
1162 struct resource *res, *pres;
1163 int i;
1164
1165 pr_debug("Reserving legacy ranges for domain %04x\n",
1166 pci_domain_nr(bus));
1167
1168
1169 if (!(hose->io_resource.flags & IORESOURCE_IO))
1170 goto no_io;
1171 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
1172 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
1173 BUG_ON(res == NULL);
1174 res->name = "Legacy IO";
1175 res->flags = IORESOURCE_IO;
1176 res->start = offset;
1177 res->end = (offset + 0xfff) & 0xfffffffful;
1178 pr_debug("Candidate legacy IO: %pR\n", res);
1179 if (request_resource(&hose->io_resource, res)) {
1180 pr_debug("PCI %04x:%02x Cannot reserve Legacy IO %pR\n",
1181 pci_domain_nr(bus), bus->number, res);
1182 kfree(res);
1183 }
1184
1185 no_io:
1186
1187 offset = hose->pci_mem_offset;
1188 pr_debug("hose mem offset: %016llx\n", (unsigned long long)offset);
1189 for (i = 0; i < 3; i++) {
1190 pres = &hose->mem_resources[i];
1191 if (!(pres->flags & IORESOURCE_MEM))
1192 continue;
1193 pr_debug("hose mem res: %pR\n", pres);
1194 if ((pres->start - offset) <= 0xa0000 &&
1195 (pres->end - offset) >= 0xbffff)
1196 break;
1197 }
1198 if (i >= 3)
1199 return;
1200 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
1201 BUG_ON(res == NULL);
1202 res->name = "Legacy VGA memory";
1203 res->flags = IORESOURCE_MEM;
1204 res->start = 0xa0000 + offset;
1205 res->end = 0xbffff + offset;
1206 pr_debug("Candidate VGA memory: %pR\n", res);
1207 if (request_resource(pres, res)) {
1208 pr_debug("PCI %04x:%02x Cannot reserve VGA memory %pR\n",
1209 pci_domain_nr(bus), bus->number, res);
1210 kfree(res);
1211 }
1212}
1213
1214void __init pcibios_resource_survey(void)
1215{
1216 struct pci_bus *b;
1217
1218
1219
1220
1221 list_for_each_entry(b, &pci_root_buses, node)
1222 pcibios_allocate_bus_resources(b);
1223
1224 pcibios_allocate_resources(0);
1225 pcibios_allocate_resources(1);
1226
1227
1228
1229
1230
1231 list_for_each_entry(b, &pci_root_buses, node)
1232 pcibios_reserve_legacy_regions(b);
1233
1234
1235 pr_debug("PCI: Assigning unassigned resources...\n");
1236 pci_assign_unassigned_resources();
1237}
1238
1239
1240
1241
1242
1243
1244void pcibios_claim_one_bus(struct pci_bus *bus)
1245{
1246 struct pci_dev *dev;
1247 struct pci_bus *child_bus;
1248
1249 list_for_each_entry(dev, &bus->devices, bus_list) {
1250 int i;
1251
1252 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1253 struct resource *r = &dev->resource[i];
1254
1255 if (r->parent || !r->start || !r->flags)
1256 continue;
1257
1258 pr_debug("PCI: Claiming %s: ", pci_name(dev));
1259 pr_debug("Resource %d: %016llx..%016llx [%x]\n",
1260 i, (unsigned long long)r->start,
1261 (unsigned long long)r->end,
1262 (unsigned int)r->flags);
1263
1264 pci_claim_resource(dev, i);
1265 }
1266 }
1267
1268 list_for_each_entry(child_bus, &bus->children, node)
1269 pcibios_claim_one_bus(child_bus);
1270}
1271EXPORT_SYMBOL_GPL(pcibios_claim_one_bus);
1272
1273
1274
1275
1276
1277
1278
1279
1280void pcibios_finish_adding_to_bus(struct pci_bus *bus)
1281{
1282 pr_debug("PCI: Finishing adding to hotplug bus %04x:%02x\n",
1283 pci_domain_nr(bus), bus->number);
1284
1285
1286 pcibios_allocate_bus_resources(bus);
1287 pcibios_claim_one_bus(bus);
1288
1289
1290 pci_bus_add_devices(bus);
1291
1292
1293
1294}
1295EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus);
1296
1297int pcibios_enable_device(struct pci_dev *dev, int mask)
1298{
1299 return pci_enable_resources(dev, mask);
1300}
1301
1302static void pcibios_setup_phb_resources(struct pci_controller *hose,
1303 struct list_head *resources)
1304{
1305 unsigned long io_offset;
1306 struct resource *res;
1307 int i;
1308
1309
1310 res = &hose->io_resource;
1311
1312
1313 io_offset = (unsigned long)hose->io_base_virt - isa_io_base;
1314 res->start = (res->start + io_offset) & 0xffffffffu;
1315 res->end = (res->end + io_offset) & 0xffffffffu;
1316
1317 if (!res->flags) {
1318 pr_warn("PCI: I/O resource not set for host ");
1319 pr_cont("bridge %s (domain %d)\n",
1320 hose->dn->full_name, hose->global_number);
1321
1322 res->start = (unsigned long)hose->io_base_virt - isa_io_base;
1323 res->end = res->start + IO_SPACE_LIMIT;
1324 res->flags = IORESOURCE_IO;
1325 }
1326 pci_add_resource_offset(resources, res,
1327 (__force resource_size_t)(hose->io_base_virt - _IO_BASE));
1328
1329 pr_debug("PCI: PHB IO resource = %016llx-%016llx [%lx]\n",
1330 (unsigned long long)res->start,
1331 (unsigned long long)res->end,
1332 (unsigned long)res->flags);
1333
1334
1335 for (i = 0; i < 3; ++i) {
1336 res = &hose->mem_resources[i];
1337 if (!res->flags) {
1338 if (i > 0)
1339 continue;
1340 pr_err("PCI: Memory resource 0 not set for ");
1341 pr_cont("host bridge %s (domain %d)\n",
1342 hose->dn->full_name, hose->global_number);
1343
1344
1345 res->start = hose->pci_mem_offset;
1346 res->end = (resource_size_t)-1LL;
1347 res->flags = IORESOURCE_MEM;
1348
1349 }
1350 pci_add_resource_offset(resources, res, hose->pci_mem_offset);
1351
1352 pr_debug("PCI: PHB MEM resource %d = %016llx-%016llx [%lx]\n",
1353 i, (unsigned long long)res->start,
1354 (unsigned long long)res->end,
1355 (unsigned long)res->flags);
1356 }
1357
1358 pr_debug("PCI: PHB MEM offset = %016llx\n",
1359 (unsigned long long)hose->pci_mem_offset);
1360 pr_debug("PCI: PHB IO offset = %08lx\n",
1361 (unsigned long)hose->io_base_virt - _IO_BASE);
1362}
1363
1364struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
1365{
1366 struct pci_controller *hose = bus->sysdata;
1367
1368 return of_node_get(hose->dn);
1369}
1370
1371static void pcibios_scan_phb(struct pci_controller *hose)
1372{
1373 LIST_HEAD(resources);
1374 struct pci_bus *bus;
1375 struct device_node *node = hose->dn;
1376
1377 pr_debug("PCI: Scanning PHB %s\n", of_node_full_name(node));
1378
1379 pcibios_setup_phb_resources(hose, &resources);
1380
1381 bus = pci_scan_root_bus(hose->parent, hose->first_busno,
1382 hose->ops, hose, &resources);
1383 if (bus == NULL) {
1384 pr_err("Failed to create bus for PCI domain %04x\n",
1385 hose->global_number);
1386 pci_free_resource_list(&resources);
1387 return;
1388 }
1389 bus->busn_res.start = hose->first_busno;
1390 hose->bus = bus;
1391
1392 hose->last_busno = bus->busn_res.end;
1393}
1394
1395static int __init pcibios_init(void)
1396{
1397 struct pci_controller *hose, *tmp;
1398 int next_busno = 0;
1399
1400 pr_info("PCI: Probing PCI hardware\n");
1401
1402
1403 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
1404 hose->last_busno = 0xff;
1405 pcibios_scan_phb(hose);
1406 if (next_busno <= hose->last_busno)
1407 next_busno = hose->last_busno + 1;
1408 }
1409 pci_bus_count = next_busno;
1410
1411
1412 pcibios_resource_survey();
1413
1414 return 0;
1415}
1416
1417subsys_initcall(pcibios_init);
1418
1419static struct pci_controller *pci_bus_to_hose(int bus)
1420{
1421 struct pci_controller *hose, *tmp;
1422
1423 list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
1424 if (bus >= hose->first_busno && bus <= hose->last_busno)
1425 return hose;
1426 return NULL;
1427}
1428
1429
1430
1431
1432
1433
1434
1435long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn)
1436{
1437 struct pci_controller *hose;
1438 long result = -EOPNOTSUPP;
1439
1440 hose = pci_bus_to_hose(bus);
1441 if (!hose)
1442 return -ENODEV;
1443
1444 switch (which) {
1445 case IOBASE_BRIDGE_NUMBER:
1446 return (long)hose->first_busno;
1447 case IOBASE_MEMORY:
1448 return (long)hose->pci_mem_offset;
1449 case IOBASE_IO:
1450 return (long)hose->io_base_phys;
1451 case IOBASE_ISA_IO:
1452 return (long)isa_io_base;
1453 case IOBASE_ISA_MEM:
1454 return (long)isa_mem_base;
1455 }
1456
1457 return result;
1458}
1459
1460
1461
1462
1463
1464#define NULL_PCI_OP(rw, size, type) \
1465static int \
1466null_##rw##_config_##size(struct pci_dev *dev, int offset, type val) \
1467{ \
1468 return PCIBIOS_DEVICE_NOT_FOUND; \
1469}
1470
1471static int
1472null_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
1473 int len, u32 *val)
1474{
1475 return PCIBIOS_DEVICE_NOT_FOUND;
1476}
1477
1478static int
1479null_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
1480 int len, u32 val)
1481{
1482 return PCIBIOS_DEVICE_NOT_FOUND;
1483}
1484
1485static struct pci_ops null_pci_ops = {
1486 .read = null_read_config,
1487 .write = null_write_config,
1488};
1489
1490
1491
1492
1493
1494static struct pci_bus *
1495fake_pci_bus(struct pci_controller *hose, int busnr)
1496{
1497 static struct pci_bus bus;
1498
1499 if (!hose)
1500 pr_err("Can't find hose for PCI bus %d!\n", busnr);
1501
1502 bus.number = busnr;
1503 bus.sysdata = hose;
1504 bus.ops = hose ? hose->ops : &null_pci_ops;
1505 return &bus;
1506}
1507
1508#define EARLY_PCI_OP(rw, size, type) \
1509int early_##rw##_config_##size(struct pci_controller *hose, int bus, \
1510 int devfn, int offset, type value) \
1511{ \
1512 return pci_bus_##rw##_config_##size(fake_pci_bus(hose, bus), \
1513 devfn, offset, value); \
1514}
1515
1516EARLY_PCI_OP(read, byte, u8 *)
1517EARLY_PCI_OP(read, word, u16 *)
1518EARLY_PCI_OP(read, dword, u32 *)
1519EARLY_PCI_OP(write, byte, u8)
1520EARLY_PCI_OP(write, word, u16)
1521EARLY_PCI_OP(write, dword, u32)
1522
1523int early_find_capability(struct pci_controller *hose, int bus, int devfn,
1524 int cap)
1525{
1526 return pci_bus_find_capability(fake_pci_bus(hose, bus), devfn, cap);
1527}
1528
1529