1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/kernel.h>
20#include <linux/pci.h>
21#include <linux/string.h>
22#include <linux/init.h>
23#include <linux/bootmem.h>
24#include <linux/delay.h>
25#include <linux/export.h>
26#include <linux/of_address.h>
27#include <linux/of_pci.h>
28#include <linux/mm.h>
29#include <linux/list.h>
30#include <linux/syscalls.h>
31#include <linux/irq.h>
32#include <linux/vmalloc.h>
33#include <linux/slab.h>
34#include <linux/vgaarb.h>
35
36#include <asm/processor.h>
37#include <asm/io.h>
38#include <asm/prom.h>
39#include <asm/pci-bridge.h>
40#include <asm/byteorder.h>
41#include <asm/machdep.h>
42#include <asm/ppc-pci.h>
43#include <asm/eeh.h>
44
45static DEFINE_SPINLOCK(hose_spinlock);
46LIST_HEAD(hose_list);
47
48
49static int global_phb_number;
50
51
52resource_size_t isa_mem_base;
53
54
55static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
56
57void set_pci_dma_ops(struct dma_map_ops *dma_ops)
58{
59 pci_dma_ops = dma_ops;
60}
61
62struct dma_map_ops *get_pci_dma_ops(void)
63{
64 return pci_dma_ops;
65}
66EXPORT_SYMBOL(get_pci_dma_ops);
67
68struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
69{
70 struct pci_controller *phb;
71
72 phb = zalloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL);
73 if (phb == NULL)
74 return NULL;
75 spin_lock(&hose_spinlock);
76 phb->global_number = global_phb_number++;
77 list_add_tail(&phb->list_node, &hose_list);
78 spin_unlock(&hose_spinlock);
79 phb->dn = dev;
80 phb->is_dynamic = mem_init_done;
81#ifdef CONFIG_PPC64
82 if (dev) {
83 int nid = of_node_to_nid(dev);
84
85 if (nid < 0 || !node_online(nid))
86 nid = -1;
87
88 PHB_SET_NODE(phb, nid);
89 }
90#endif
91 return phb;
92}
93EXPORT_SYMBOL_GPL(pcibios_alloc_controller);
94
95void pcibios_free_controller(struct pci_controller *phb)
96{
97 spin_lock(&hose_spinlock);
98 list_del(&phb->list_node);
99 spin_unlock(&hose_spinlock);
100
101 if (phb->is_dynamic)
102 kfree(phb);
103}
104EXPORT_SYMBOL_GPL(pcibios_free_controller);
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131void pcibios_free_controller_deferred(struct pci_host_bridge *bridge)
132{
133 struct pci_controller *phb = (struct pci_controller *)
134 bridge->release_data;
135
136 pr_debug("domain %d, dynamic %d\n", phb->global_number, phb->is_dynamic);
137
138 pcibios_free_controller(phb);
139}
140EXPORT_SYMBOL_GPL(pcibios_free_controller_deferred);
141
142
143
144
145
146
147
148resource_size_t pcibios_window_alignment(struct pci_bus *bus,
149 unsigned long type)
150{
151 struct pci_controller *phb = pci_bus_to_host(bus);
152
153 if (phb->controller_ops.window_alignment)
154 return phb->controller_ops.window_alignment(bus, type);
155
156
157
158
159
160
161 return 1;
162}
163
164void pcibios_reset_secondary_bus(struct pci_dev *dev)
165{
166 struct pci_controller *phb = pci_bus_to_host(dev->bus);
167
168 if (phb->controller_ops.reset_secondary_bus) {
169 phb->controller_ops.reset_secondary_bus(dev);
170 return;
171 }
172
173 pci_reset_secondary_bus(dev);
174}
175
176#ifdef CONFIG_PCI_IOV
177resource_size_t pcibios_iov_resource_alignment(struct pci_dev *pdev, int resno)
178{
179 if (ppc_md.pcibios_iov_resource_alignment)
180 return ppc_md.pcibios_iov_resource_alignment(pdev, resno);
181
182 return pci_iov_resource_size(pdev, resno);
183}
184#endif
185
186static resource_size_t pcibios_io_size(const struct pci_controller *hose)
187{
188#ifdef CONFIG_PPC64
189 return hose->pci_io_size;
190#else
191 return resource_size(&hose->io_resource);
192#endif
193}
194
195int pcibios_vaddr_is_ioport(void __iomem *address)
196{
197 int ret = 0;
198 struct pci_controller *hose;
199 resource_size_t size;
200
201 spin_lock(&hose_spinlock);
202 list_for_each_entry(hose, &hose_list, list_node) {
203 size = pcibios_io_size(hose);
204 if (address >= hose->io_base_virt &&
205 address < (hose->io_base_virt + size)) {
206 ret = 1;
207 break;
208 }
209 }
210 spin_unlock(&hose_spinlock);
211 return ret;
212}
213
214unsigned long pci_address_to_pio(phys_addr_t address)
215{
216 struct pci_controller *hose;
217 resource_size_t size;
218 unsigned long ret = ~0;
219
220 spin_lock(&hose_spinlock);
221 list_for_each_entry(hose, &hose_list, list_node) {
222 size = pcibios_io_size(hose);
223 if (address >= hose->io_base_phys &&
224 address < (hose->io_base_phys + size)) {
225 unsigned long base =
226 (unsigned long)hose->io_base_virt - _IO_BASE;
227 ret = base + (address - hose->io_base_phys);
228 break;
229 }
230 }
231 spin_unlock(&hose_spinlock);
232
233 return ret;
234}
235EXPORT_SYMBOL_GPL(pci_address_to_pio);
236
237
238
239
240int pci_domain_nr(struct pci_bus *bus)
241{
242 struct pci_controller *hose = pci_bus_to_host(bus);
243
244 return hose->global_number;
245}
246EXPORT_SYMBOL(pci_domain_nr);
247
248
249
250
251
252
253
254
255struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node)
256{
257 while(node) {
258 struct pci_controller *hose, *tmp;
259 list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
260 if (hose->dn == node)
261 return hose;
262 node = node->parent;
263 }
264 return NULL;
265}
266
267
268
269
270
271
272static int pci_read_irq_line(struct pci_dev *pci_dev)
273{
274 struct of_irq oirq;
275 unsigned int virq;
276
277 pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev));
278
279#ifdef DEBUG
280 memset(&oirq, 0xff, sizeof(oirq));
281#endif
282
283 if (of_irq_map_pci(pci_dev, &oirq)) {
284 u8 line, pin;
285
286
287
288
289
290
291
292
293 if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &pin))
294 return -1;
295 if (pin == 0)
296 return -1;
297 if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_LINE, &line) ||
298 line == 0xff || line == 0) {
299 return -1;
300 }
301 pr_debug(" No map ! Using line %d (pin %d) from PCI config\n",
302 line, pin);
303
304 virq = irq_create_mapping(NULL, line);
305 if (virq != NO_IRQ)
306 irq_set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
307 } else {
308 pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n",
309 oirq.size, oirq.specifier[0], oirq.specifier[1],
310 of_node_full_name(oirq.controller));
311
312 virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
313 oirq.size);
314 }
315 if(virq == NO_IRQ) {
316 pr_debug(" Failed to map !\n");
317 return -1;
318 }
319
320 pr_debug(" Mapped to linux irq %d\n", virq);
321
322 pci_dev->irq = virq;
323
324 return 0;
325}
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
345 resource_size_t *offset,
346 enum pci_mmap_state mmap_state)
347{
348 struct pci_controller *hose = pci_bus_to_host(dev->bus);
349 unsigned long io_offset = 0;
350 int i, res_bit;
351
352 if (hose == NULL)
353 return NULL;
354
355
356 if (mmap_state == pci_mmap_mem) {
357#if 0
358 *offset += hose->pci_mem_offset;
359#endif
360 res_bit = IORESOURCE_MEM;
361 } else {
362 io_offset = (unsigned long)hose->io_base_virt - _IO_BASE;
363 *offset += io_offset;
364 res_bit = IORESOURCE_IO;
365 }
366
367
368
369
370
371 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
372 struct resource *rp = &dev->resource[i];
373 int flags = rp->flags;
374
375
376 if (i == PCI_ROM_RESOURCE)
377 flags |= IORESOURCE_MEM;
378
379
380 if ((flags & res_bit) == 0)
381 continue;
382
383
384 if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end)
385 continue;
386
387
388 if (mmap_state == pci_mmap_io)
389 *offset += hose->io_base_phys - io_offset;
390 return rp;
391 }
392
393 return NULL;
394}
395
396
397
398
399
400static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
401 pgprot_t protection,
402 enum pci_mmap_state mmap_state,
403 int write_combine)
404{
405
406
407
408
409
410
411
412 if (mmap_state != pci_mmap_mem)
413 write_combine = 0;
414 else if (write_combine == 0) {
415 if (rp->flags & IORESOURCE_PREFETCH)
416 write_combine = 1;
417 }
418
419
420 if (write_combine)
421 return pgprot_noncached_wc(protection);
422 else
423 return pgprot_noncached(protection);
424}
425
426
427
428
429
430
431pgprot_t pci_phys_mem_access_prot(struct file *file,
432 unsigned long pfn,
433 unsigned long size,
434 pgprot_t prot)
435{
436 struct pci_dev *pdev = NULL;
437 struct resource *found = NULL;
438 resource_size_t offset = ((resource_size_t)pfn) << PAGE_SHIFT;
439 int i;
440
441 if (page_is_ram(pfn))
442 return prot;
443
444 prot = pgprot_noncached(prot);
445 for_each_pci_dev(pdev) {
446 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
447 struct resource *rp = &pdev->resource[i];
448 int flags = rp->flags;
449
450
451 if ((flags & IORESOURCE_MEM) == 0)
452 continue;
453
454 if (offset < (rp->start & PAGE_MASK) ||
455 offset > rp->end)
456 continue;
457 found = rp;
458 break;
459 }
460 if (found)
461 break;
462 }
463 if (found) {
464 if (found->flags & IORESOURCE_PREFETCH)
465 prot = pgprot_noncached_wc(prot);
466 pci_dev_put(pdev);
467 }
468
469 pr_debug("PCI: Non-PCI map for %llx, prot: %lx\n",
470 (unsigned long long)offset, pgprot_val(prot));
471
472 return prot;
473}
474
475
476
477
478
479
480
481
482
483
484
485
486int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
487 enum pci_mmap_state mmap_state, int write_combine)
488{
489 resource_size_t offset =
490 ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT;
491 struct resource *rp;
492 int ret;
493
494 rp = __pci_mmap_make_offset(dev, &offset, mmap_state);
495 if (rp == NULL)
496 return -EINVAL;
497
498 vma->vm_pgoff = offset >> PAGE_SHIFT;
499 vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp,
500 vma->vm_page_prot,
501 mmap_state, write_combine);
502
503 ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
504 vma->vm_end - vma->vm_start, vma->vm_page_prot);
505
506 return ret;
507}
508
509
510int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size)
511{
512 unsigned long offset;
513 struct pci_controller *hose = pci_bus_to_host(bus);
514 struct resource *rp = &hose->io_resource;
515 void __iomem *addr;
516
517
518
519
520
521
522 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
523 offset += port;
524
525 if (!(rp->flags & IORESOURCE_IO))
526 return -ENXIO;
527 if (offset < rp->start || (offset + size) > rp->end)
528 return -ENXIO;
529 addr = hose->io_base_virt + port;
530
531 switch(size) {
532 case 1:
533 *((u8 *)val) = in_8(addr);
534 return 1;
535 case 2:
536 if (port & 1)
537 return -EINVAL;
538 *((u16 *)val) = in_le16(addr);
539 return 2;
540 case 4:
541 if (port & 3)
542 return -EINVAL;
543 *((u32 *)val) = in_le32(addr);
544 return 4;
545 }
546 return -EINVAL;
547}
548
549
550int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size)
551{
552 unsigned long offset;
553 struct pci_controller *hose = pci_bus_to_host(bus);
554 struct resource *rp = &hose->io_resource;
555 void __iomem *addr;
556
557
558
559
560
561
562 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
563 offset += port;
564
565 if (!(rp->flags & IORESOURCE_IO))
566 return -ENXIO;
567 if (offset < rp->start || (offset + size) > rp->end)
568 return -ENXIO;
569 addr = hose->io_base_virt + port;
570
571
572
573
574
575
576 switch(size) {
577 case 1:
578 out_8(addr, val >> 24);
579 return 1;
580 case 2:
581 if (port & 1)
582 return -EINVAL;
583 out_le16(addr, val >> 16);
584 return 2;
585 case 4:
586 if (port & 3)
587 return -EINVAL;
588 out_le32(addr, val);
589 return 4;
590 }
591 return -EINVAL;
592}
593
594
595int pci_mmap_legacy_page_range(struct pci_bus *bus,
596 struct vm_area_struct *vma,
597 enum pci_mmap_state mmap_state)
598{
599 struct pci_controller *hose = pci_bus_to_host(bus);
600 resource_size_t offset =
601 ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT;
602 resource_size_t size = vma->vm_end - vma->vm_start;
603 struct resource *rp;
604
605 pr_debug("pci_mmap_legacy_page_range(%04x:%02x, %s @%llx..%llx)\n",
606 pci_domain_nr(bus), bus->number,
607 mmap_state == pci_mmap_mem ? "MEM" : "IO",
608 (unsigned long long)offset,
609 (unsigned long long)(offset + size - 1));
610
611 if (mmap_state == pci_mmap_mem) {
612
613
614
615
616
617
618
619 if ((offset + size) > hose->isa_mem_size) {
620 printk(KERN_DEBUG
621 "Process %s (pid:%d) mapped non-existing PCI legacy memory for 0%04x:%02x\n",
622 current->comm, current->pid, pci_domain_nr(bus), bus->number);
623 if (vma->vm_flags & VM_SHARED)
624 return shmem_zero_setup(vma);
625 return 0;
626 }
627 offset += hose->isa_mem_phys;
628 } else {
629 unsigned long io_offset = (unsigned long)hose->io_base_virt - _IO_BASE;
630 unsigned long roffset = offset + io_offset;
631 rp = &hose->io_resource;
632 if (!(rp->flags & IORESOURCE_IO))
633 return -ENXIO;
634 if (roffset < rp->start || (roffset + size) > rp->end)
635 return -ENXIO;
636 offset += hose->io_base_phys;
637 }
638 pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset);
639
640 vma->vm_pgoff = offset >> PAGE_SHIFT;
641 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
642 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
643 vma->vm_end - vma->vm_start,
644 vma->vm_page_prot);
645}
646
647void pci_resource_to_user(const struct pci_dev *dev, int bar,
648 const struct resource *rsrc,
649 resource_size_t *start, resource_size_t *end)
650{
651 struct pci_controller *hose = pci_bus_to_host(dev->bus);
652 resource_size_t offset = 0;
653
654 if (hose == NULL)
655 return;
656
657 if (rsrc->flags & IORESOURCE_IO)
658 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677#if 0
678 else if (rsrc->flags & IORESOURCE_MEM)
679 offset = hose->pci_mem_offset;
680#endif
681
682 *start = rsrc->start - offset;
683 *end = rsrc->end - offset;
684}
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710void pci_process_bridge_OF_ranges(struct pci_controller *hose,
711 struct device_node *dev, int primary)
712{
713 const __be32 *ranges;
714 int rlen;
715 int pna = of_n_addr_cells(dev);
716 int np = pna + 5;
717 int memno = 0;
718 u32 pci_space;
719 unsigned long long pci_addr, cpu_addr, pci_next, cpu_next, size;
720 struct resource *res;
721
722 printk(KERN_INFO "PCI host bridge %s %s ranges:\n",
723 dev->full_name, primary ? "(primary)" : "");
724
725
726 ranges = of_get_property(dev, "ranges", &rlen);
727 if (ranges == NULL)
728 return;
729
730
731 while ((rlen -= np * 4) >= 0) {
732
733 pci_space = of_read_number(ranges, 1);
734 pci_addr = of_read_number(ranges + 1, 2);
735 cpu_addr = of_translate_address(dev, ranges + 3);
736 size = of_read_number(ranges + pna + 3, 2);
737 ranges += np;
738
739
740
741
742
743
744 if (cpu_addr == OF_BAD_ADDR || size == 0)
745 continue;
746
747
748 for (; rlen >= np * sizeof(u32);
749 ranges += np, rlen -= np * 4) {
750 if (of_read_number(ranges, 1) != pci_space)
751 break;
752 pci_next = of_read_number(ranges + 1, 2);
753 cpu_next = of_translate_address(dev, ranges + 3);
754 if (pci_next != pci_addr + size ||
755 cpu_next != cpu_addr + size)
756 break;
757 size += of_read_number(ranges + pna + 3, 2);
758 }
759
760
761 res = NULL;
762 switch ((pci_space >> 24) & 0x3) {
763 case 1:
764 printk(KERN_INFO
765 " IO 0x%016llx..0x%016llx -> 0x%016llx\n",
766 cpu_addr, cpu_addr + size - 1, pci_addr);
767
768
769 if (hose->pci_io_size) {
770 printk(KERN_INFO
771 " \\--> Skipped (too many) !\n");
772 continue;
773 }
774#ifdef CONFIG_PPC32
775
776 if (size > 0x01000000)
777 size = 0x01000000;
778
779
780 hose->io_base_virt = ioremap(cpu_addr, size);
781
782
783 if (primary)
784 isa_io_base =
785 (unsigned long)hose->io_base_virt;
786#endif
787
788
789
790 hose->pci_io_size = pci_addr + size;
791 hose->io_base_phys = cpu_addr - pci_addr;
792
793
794 res = &hose->io_resource;
795 res->flags = IORESOURCE_IO;
796 res->start = pci_addr;
797 break;
798 case 2:
799 case 3:
800 printk(KERN_INFO
801 " MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n",
802 cpu_addr, cpu_addr + size - 1, pci_addr,
803 (pci_space & 0x40000000) ? "Prefetch" : "");
804
805
806 if (memno >= 3) {
807 printk(KERN_INFO
808 " \\--> Skipped (too many) !\n");
809 continue;
810 }
811
812 if (pci_addr == 0) {
813 if (primary || isa_mem_base == 0)
814 isa_mem_base = cpu_addr;
815 hose->isa_mem_phys = cpu_addr;
816 hose->isa_mem_size = size;
817 }
818
819
820 hose->mem_offset[memno] = cpu_addr - pci_addr;
821 res = &hose->mem_resources[memno++];
822 res->flags = IORESOURCE_MEM;
823 if (pci_space & 0x40000000)
824 res->flags |= IORESOURCE_PREFETCH;
825 res->start = cpu_addr;
826 break;
827 }
828 if (res != NULL) {
829 res->name = dev->full_name;
830 res->end = res->start + size - 1;
831 res->parent = NULL;
832 res->sibling = NULL;
833 res->child = NULL;
834 }
835 }
836}
837
838
839int pci_proc_domain(struct pci_bus *bus)
840{
841 struct pci_controller *hose = pci_bus_to_host(bus);
842
843 if (!pci_has_flag(PCI_ENABLE_PROC_DOMAINS))
844 return 0;
845 if (pci_has_flag(PCI_COMPAT_DOMAIN_0))
846 return hose->global_number != 0;
847 return 1;
848}
849
850int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
851{
852 if (ppc_md.pcibios_root_bridge_prepare)
853 return ppc_md.pcibios_root_bridge_prepare(bridge);
854
855 return 0;
856}
857
858
859
860
861static void pcibios_fixup_resources(struct pci_dev *dev)
862{
863 struct pci_controller *hose = pci_bus_to_host(dev->bus);
864 int i;
865
866 if (!hose) {
867 printk(KERN_ERR "No host bridge for PCI dev %s !\n",
868 pci_name(dev));
869 return;
870 }
871
872 if (dev->is_virtfn)
873 return;
874
875 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
876 struct resource *res = dev->resource + i;
877 struct pci_bus_region reg;
878 if (!res->flags)
879 continue;
880
881
882
883
884
885
886 pcibios_resource_to_bus(dev->bus, ®, res);
887 if (pci_has_flag(PCI_REASSIGN_ALL_RSRC) ||
888 (reg.start == 0 && !pci_has_flag(PCI_PROBE_ONLY))) {
889
890 if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC))
891 pr_debug("PCI:%s Resource %d %016llx-%016llx [%x] "
892 "is unassigned\n",
893 pci_name(dev), i,
894 (unsigned long long)res->start,
895 (unsigned long long)res->end,
896 (unsigned int)res->flags);
897 res->end -= res->start;
898 res->start = 0;
899 res->flags |= IORESOURCE_UNSET;
900 continue;
901 }
902
903 pr_debug("PCI:%s Resource %d %016llx-%016llx [%x]\n",
904 pci_name(dev), i,
905 (unsigned long long)res->start,\
906 (unsigned long long)res->end,
907 (unsigned int)res->flags);
908 }
909
910
911 if (ppc_md.pcibios_fixup_resources)
912 ppc_md.pcibios_fixup_resources(dev);
913}
914DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources);
915
916
917
918
919
920
921static int pcibios_uninitialized_bridge_resource(struct pci_bus *bus,
922 struct resource *res)
923{
924 struct pci_controller *hose = pci_bus_to_host(bus);
925 struct pci_dev *dev = bus->self;
926 resource_size_t offset;
927 struct pci_bus_region region;
928 u16 command;
929 int i;
930
931
932 if (pci_has_flag(PCI_PROBE_ONLY))
933 return 0;
934
935
936 if (res->flags & IORESOURCE_MEM) {
937 pcibios_resource_to_bus(dev->bus, ®ion, res);
938
939
940 if (region.start != 0)
941 return 0;
942
943
944
945
946 pci_read_config_word(dev, PCI_COMMAND, &command);
947 if ((command & PCI_COMMAND_MEMORY) == 0)
948 return 1;
949
950
951
952
953
954 for (i = 0; i < 3; i++) {
955 if ((hose->mem_resources[i].flags & IORESOURCE_MEM) &&
956 hose->mem_resources[i].start == hose->mem_offset[i])
957 return 0;
958 }
959
960
961
962
963 return 1;
964 } else {
965
966 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
967 if (((res->start - offset) & 0xfffffffful) != 0)
968 return 0;
969
970
971
972
973
974
975 pci_read_config_word(dev, PCI_COMMAND, &command);
976 if (command & PCI_COMMAND_IO)
977 return 0;
978
979
980
981
982 return 1;
983 }
984}
985
986
987static void pcibios_fixup_bridge(struct pci_bus *bus)
988{
989 struct resource *res;
990 int i;
991
992 struct pci_dev *dev = bus->self;
993
994 pci_bus_for_each_resource(bus, res, i) {
995 if (!res || !res->flags)
996 continue;
997 if (i >= 3 && bus->self->transparent)
998 continue;
999
1000
1001
1002
1003
1004 if (pci_has_flag(PCI_REASSIGN_ALL_RSRC)) {
1005 res->flags |= IORESOURCE_UNSET;
1006 res->start = 0;
1007 res->end = -1;
1008 continue;
1009 }
1010
1011 pr_debug("PCI:%s Bus rsrc %d %016llx-%016llx [%x]\n",
1012 pci_name(dev), i,
1013 (unsigned long long)res->start,\
1014 (unsigned long long)res->end,
1015 (unsigned int)res->flags);
1016
1017
1018
1019
1020 if (pcibios_uninitialized_bridge_resource(bus, res)) {
1021 res->flags = 0;
1022 pr_debug("PCI:%s (unassigned)\n", pci_name(dev));
1023 }
1024 }
1025}
1026
1027void pcibios_setup_bus_self(struct pci_bus *bus)
1028{
1029 struct pci_controller *phb;
1030
1031
1032 if (bus->self != NULL)
1033 pcibios_fixup_bridge(bus);
1034
1035
1036
1037
1038 if (ppc_md.pcibios_fixup_bus)
1039 ppc_md.pcibios_fixup_bus(bus);
1040
1041
1042 phb = pci_bus_to_host(bus);
1043 if (phb->controller_ops.dma_bus_setup)
1044 phb->controller_ops.dma_bus_setup(bus);
1045}
1046
1047static void pcibios_setup_device(struct pci_dev *dev)
1048{
1049 struct pci_controller *phb;
1050
1051 arch_dma_init(&dev->dev);
1052
1053
1054
1055
1056 set_dev_node(&dev->dev, pcibus_to_node(dev->bus));
1057
1058
1059 set_dma_ops(&dev->dev, pci_dma_ops);
1060 set_dma_offset(&dev->dev, PCI_DRAM_OFFSET);
1061
1062
1063 phb = pci_bus_to_host(dev->bus);
1064 if (phb->controller_ops.dma_dev_setup)
1065 phb->controller_ops.dma_dev_setup(dev);
1066
1067
1068 pci_read_irq_line(dev);
1069 if (ppc_md.pci_irq_fixup)
1070 ppc_md.pci_irq_fixup(dev);
1071}
1072
1073int pcibios_add_device(struct pci_dev *dev)
1074{
1075
1076
1077
1078
1079 if (dev->bus->is_added)
1080 pcibios_setup_device(dev);
1081
1082#ifdef CONFIG_PCI_IOV
1083 if (ppc_md.pcibios_fixup_sriov)
1084 ppc_md.pcibios_fixup_sriov(dev);
1085#endif
1086
1087 return 0;
1088}
1089
1090void pcibios_setup_bus_devices(struct pci_bus *bus)
1091{
1092 struct pci_dev *dev;
1093
1094 pr_debug("PCI: Fixup bus devices %d (%s)\n",
1095 bus->number, bus->self ? pci_name(bus->self) : "PHB");
1096
1097 list_for_each_entry(dev, &bus->devices, bus_list) {
1098
1099
1100
1101 if (dev->is_added)
1102 continue;
1103
1104 pcibios_setup_device(dev);
1105 }
1106}
1107
1108void pcibios_set_master(struct pci_dev *dev)
1109{
1110
1111}
1112
1113void pcibios_fixup_bus(struct pci_bus *bus)
1114{
1115
1116
1117
1118
1119 pci_read_bridge_bases(bus);
1120
1121
1122 pcibios_setup_bus_self(bus);
1123
1124
1125 pcibios_setup_bus_devices(bus);
1126}
1127EXPORT_SYMBOL(pcibios_fixup_bus);
1128
1129void pci_fixup_cardbus(struct pci_bus *bus)
1130{
1131
1132 pcibios_setup_bus_devices(bus);
1133}
1134
1135
1136static int skip_isa_ioresource_align(struct pci_dev *dev)
1137{
1138 if (pci_has_flag(PCI_CAN_SKIP_ISA_ALIGN) &&
1139 !(dev->bus->bridge_ctl & PCI_BRIDGE_CTL_ISA))
1140 return 1;
1141 return 0;
1142}
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157resource_size_t pcibios_align_resource(void *data, const struct resource *res,
1158 resource_size_t size, resource_size_t align)
1159{
1160 struct pci_dev *dev = data;
1161 resource_size_t start = res->start;
1162
1163 if (res->flags & IORESOURCE_IO) {
1164 if (skip_isa_ioresource_align(dev))
1165 return start;
1166 if (start & 0x300)
1167 start = (start + 0x3ff) & ~0x3ff;
1168 }
1169
1170 return start;
1171}
1172EXPORT_SYMBOL(pcibios_align_resource);
1173
1174
1175
1176
1177
1178static int reparent_resources(struct resource *parent,
1179 struct resource *res)
1180{
1181 struct resource *p, **pp;
1182 struct resource **firstpp = NULL;
1183
1184 for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) {
1185 if (p->end < res->start)
1186 continue;
1187 if (res->end < p->start)
1188 break;
1189 if (p->start < res->start || p->end > res->end)
1190 return -1;
1191 if (firstpp == NULL)
1192 firstpp = pp;
1193 }
1194 if (firstpp == NULL)
1195 return -1;
1196 res->parent = parent;
1197 res->child = *firstpp;
1198 res->sibling = *pp;
1199 *firstpp = res;
1200 *pp = NULL;
1201 for (p = res->child; p != NULL; p = p->sibling) {
1202 p->parent = res;
1203 pr_debug("PCI: Reparented %s [%llx..%llx] under %s\n",
1204 p->name,
1205 (unsigned long long)p->start,
1206 (unsigned long long)p->end, res->name);
1207 }
1208 return 0;
1209}
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244void pcibios_allocate_bus_resources(struct pci_bus *bus)
1245{
1246 struct pci_bus *b;
1247 int i;
1248 struct resource *res, *pr;
1249
1250 pr_debug("PCI: Allocating bus resources for %04x:%02x...\n",
1251 pci_domain_nr(bus), bus->number);
1252
1253 pci_bus_for_each_resource(bus, res, i) {
1254 if (!res || !res->flags || res->start > res->end || res->parent)
1255 continue;
1256
1257
1258 if (res->flags & IORESOURCE_UNSET)
1259 goto clear_resource;
1260
1261 if (bus->parent == NULL)
1262 pr = (res->flags & IORESOURCE_IO) ?
1263 &ioport_resource : &iomem_resource;
1264 else {
1265 pr = pci_find_parent_resource(bus->self, res);
1266 if (pr == res) {
1267
1268
1269
1270
1271 continue;
1272 }
1273 }
1274
1275 pr_debug("PCI: %s (bus %d) bridge rsrc %d: %016llx-%016llx "
1276 "[0x%x], parent %p (%s)\n",
1277 bus->self ? pci_name(bus->self) : "PHB",
1278 bus->number, i,
1279 (unsigned long long)res->start,
1280 (unsigned long long)res->end,
1281 (unsigned int)res->flags,
1282 pr, (pr && pr->name) ? pr->name : "nil");
1283
1284 if (pr && !(pr->flags & IORESOURCE_UNSET)) {
1285 struct pci_dev *dev = bus->self;
1286
1287 if (request_resource(pr, res) == 0)
1288 continue;
1289
1290
1291
1292
1293
1294 if (reparent_resources(pr, res) == 0)
1295 continue;
1296
1297 if (dev && i < PCI_BRIDGE_RESOURCE_NUM &&
1298 pci_claim_bridge_resource(dev,
1299 i + PCI_BRIDGE_RESOURCES) == 0)
1300 continue;
1301 }
1302 pr_warning("PCI: Cannot allocate resource region "
1303 "%d of PCI bridge %d, will remap\n", i, bus->number);
1304 clear_resource:
1305
1306
1307
1308
1309
1310
1311 res->start = 0;
1312 res->end = -1;
1313 res->flags = 0;
1314 }
1315
1316 list_for_each_entry(b, &bus->children, node)
1317 pcibios_allocate_bus_resources(b);
1318}
1319
1320static inline void alloc_resource(struct pci_dev *dev, int idx)
1321{
1322 struct resource *pr, *r = &dev->resource[idx];
1323
1324 pr_debug("PCI: Allocating %s: Resource %d: %016llx..%016llx [%x]\n",
1325 pci_name(dev), idx,
1326 (unsigned long long)r->start,
1327 (unsigned long long)r->end,
1328 (unsigned int)r->flags);
1329
1330 pr = pci_find_parent_resource(dev, r);
1331 if (!pr || (pr->flags & IORESOURCE_UNSET) ||
1332 request_resource(pr, r) < 0) {
1333 printk(KERN_WARNING "PCI: Cannot allocate resource region %d"
1334 " of device %s, will remap\n", idx, pci_name(dev));
1335 if (pr)
1336 pr_debug("PCI: parent is %p: %016llx-%016llx [%x]\n",
1337 pr,
1338 (unsigned long long)pr->start,
1339 (unsigned long long)pr->end,
1340 (unsigned int)pr->flags);
1341
1342 r->flags |= IORESOURCE_UNSET;
1343 r->end -= r->start;
1344 r->start = 0;
1345 }
1346}
1347
1348static void __init pcibios_allocate_resources(int pass)
1349{
1350 struct pci_dev *dev = NULL;
1351 int idx, disabled;
1352 u16 command;
1353 struct resource *r;
1354
1355 for_each_pci_dev(dev) {
1356 pci_read_config_word(dev, PCI_COMMAND, &command);
1357 for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
1358 r = &dev->resource[idx];
1359 if (r->parent)
1360 continue;
1361 if (!r->flags || (r->flags & IORESOURCE_UNSET))
1362 continue;
1363
1364
1365
1366 if (idx == PCI_ROM_RESOURCE )
1367 disabled = 1;
1368 if (r->flags & IORESOURCE_IO)
1369 disabled = !(command & PCI_COMMAND_IO);
1370 else
1371 disabled = !(command & PCI_COMMAND_MEMORY);
1372 if (pass == disabled)
1373 alloc_resource(dev, idx);
1374 }
1375 if (pass)
1376 continue;
1377 r = &dev->resource[PCI_ROM_RESOURCE];
1378 if (r->flags) {
1379
1380
1381
1382 u32 reg;
1383 pci_read_config_dword(dev, dev->rom_base_reg, ®);
1384 if (reg & PCI_ROM_ADDRESS_ENABLE) {
1385 pr_debug("PCI: Switching off ROM of %s\n",
1386 pci_name(dev));
1387 r->flags &= ~IORESOURCE_ROM_ENABLE;
1388 pci_write_config_dword(dev, dev->rom_base_reg,
1389 reg & ~PCI_ROM_ADDRESS_ENABLE);
1390 }
1391 }
1392 }
1393}
1394
1395static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus)
1396{
1397 struct pci_controller *hose = pci_bus_to_host(bus);
1398 resource_size_t offset;
1399 struct resource *res, *pres;
1400 int i;
1401
1402 pr_debug("Reserving legacy ranges for domain %04x\n", pci_domain_nr(bus));
1403
1404
1405 if (!(hose->io_resource.flags & IORESOURCE_IO))
1406 goto no_io;
1407 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
1408 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
1409 BUG_ON(res == NULL);
1410 res->name = "Legacy IO";
1411 res->flags = IORESOURCE_IO;
1412 res->start = offset;
1413 res->end = (offset + 0xfff) & 0xfffffffful;
1414 pr_debug("Candidate legacy IO: %pR\n", res);
1415 if (request_resource(&hose->io_resource, res)) {
1416 printk(KERN_DEBUG
1417 "PCI %04x:%02x Cannot reserve Legacy IO %pR\n",
1418 pci_domain_nr(bus), bus->number, res);
1419 kfree(res);
1420 }
1421
1422 no_io:
1423
1424 for (i = 0; i < 3; i++) {
1425 pres = &hose->mem_resources[i];
1426 offset = hose->mem_offset[i];
1427 if (!(pres->flags & IORESOURCE_MEM))
1428 continue;
1429 pr_debug("hose mem res: %pR\n", pres);
1430 if ((pres->start - offset) <= 0xa0000 &&
1431 (pres->end - offset) >= 0xbffff)
1432 break;
1433 }
1434 if (i >= 3)
1435 return;
1436 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
1437 BUG_ON(res == NULL);
1438 res->name = "Legacy VGA memory";
1439 res->flags = IORESOURCE_MEM;
1440 res->start = 0xa0000 + offset;
1441 res->end = 0xbffff + offset;
1442 pr_debug("Candidate VGA memory: %pR\n", res);
1443 if (request_resource(pres, res)) {
1444 printk(KERN_DEBUG
1445 "PCI %04x:%02x Cannot reserve VGA memory %pR\n",
1446 pci_domain_nr(bus), bus->number, res);
1447 kfree(res);
1448 }
1449}
1450
1451void __init pcibios_resource_survey(void)
1452{
1453 struct pci_bus *b;
1454
1455
1456 list_for_each_entry(b, &pci_root_buses, node)
1457 pcibios_allocate_bus_resources(b);
1458 pcibios_allocate_resources(0);
1459 pcibios_allocate_resources(1);
1460
1461
1462
1463
1464
1465 if (!pci_has_flag(PCI_PROBE_ONLY)) {
1466 list_for_each_entry(b, &pci_root_buses, node)
1467 pcibios_reserve_legacy_regions(b);
1468 }
1469
1470
1471
1472
1473 if (!pci_has_flag(PCI_PROBE_ONLY)) {
1474 pr_debug("PCI: Assigning unassigned resources...\n");
1475 pci_assign_unassigned_resources();
1476 }
1477
1478
1479 if (ppc_md.pcibios_fixup)
1480 ppc_md.pcibios_fixup();
1481}
1482
1483
1484
1485
1486
1487
1488void pcibios_claim_one_bus(struct pci_bus *bus)
1489{
1490 struct pci_dev *dev;
1491 struct pci_bus *child_bus;
1492
1493 list_for_each_entry(dev, &bus->devices, bus_list) {
1494 int i;
1495
1496 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1497 struct resource *r = &dev->resource[i];
1498
1499 if (r->parent || !r->start || !r->flags)
1500 continue;
1501
1502 pr_debug("PCI: Claiming %s: "
1503 "Resource %d: %016llx..%016llx [%x]\n",
1504 pci_name(dev), i,
1505 (unsigned long long)r->start,
1506 (unsigned long long)r->end,
1507 (unsigned int)r->flags);
1508
1509 if (pci_claim_resource(dev, i) == 0)
1510 continue;
1511
1512 pci_claim_bridge_resource(dev, i);
1513 }
1514 }
1515
1516 list_for_each_entry(child_bus, &bus->children, node)
1517 pcibios_claim_one_bus(child_bus);
1518}
1519EXPORT_SYMBOL_GPL(pcibios_claim_one_bus);
1520
1521
1522
1523
1524
1525
1526
1527
1528void pcibios_finish_adding_to_bus(struct pci_bus *bus)
1529{
1530 pr_debug("PCI: Finishing adding to hotplug bus %04x:%02x\n",
1531 pci_domain_nr(bus), bus->number);
1532
1533
1534 pcibios_allocate_bus_resources(bus);
1535 pcibios_claim_one_bus(bus);
1536 if (!pci_has_flag(PCI_PROBE_ONLY))
1537 pci_assign_unassigned_bus_resources(bus);
1538
1539
1540 eeh_add_device_tree_late(bus);
1541
1542
1543 pci_bus_add_devices(bus);
1544
1545
1546 eeh_add_sysfs_files(bus);
1547}
1548EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus);
1549
1550int pcibios_enable_device(struct pci_dev *dev, int mask)
1551{
1552 struct pci_controller *phb = pci_bus_to_host(dev->bus);
1553
1554 if (phb->controller_ops.enable_device_hook)
1555 if (!phb->controller_ops.enable_device_hook(dev))
1556 return -EINVAL;
1557
1558 return pci_enable_resources(dev, mask);
1559}
1560
1561void pcibios_disable_device(struct pci_dev *dev)
1562{
1563 struct pci_controller *phb = pci_bus_to_host(dev->bus);
1564
1565 if (phb->controller_ops.disable_device)
1566 phb->controller_ops.disable_device(dev);
1567}
1568
1569resource_size_t pcibios_io_space_offset(struct pci_controller *hose)
1570{
1571 return (unsigned long) hose->io_base_virt - _IO_BASE;
1572}
1573
1574static void pcibios_setup_phb_resources(struct pci_controller *hose,
1575 struct list_head *resources)
1576{
1577 struct resource *res;
1578 resource_size_t offset;
1579 int i;
1580
1581
1582 res = &hose->io_resource;
1583
1584 if (!res->flags) {
1585 printk(KERN_WARNING "PCI: I/O resource not set for host"
1586 " bridge %s (domain %d)\n",
1587 hose->dn->full_name, hose->global_number);
1588 } else {
1589 offset = pcibios_io_space_offset(hose);
1590
1591 pr_debug("PCI: PHB IO resource = %08llx-%08llx [%lx] off 0x%08llx\n",
1592 (unsigned long long)res->start,
1593 (unsigned long long)res->end,
1594 (unsigned long)res->flags,
1595 (unsigned long long)offset);
1596 pci_add_resource_offset(resources, res, offset);
1597 }
1598
1599
1600 for (i = 0; i < 3; ++i) {
1601 res = &hose->mem_resources[i];
1602 if (!res->flags) {
1603 if (i == 0)
1604 printk(KERN_ERR "PCI: Memory resource 0 not set for "
1605 "host bridge %s (domain %d)\n",
1606 hose->dn->full_name, hose->global_number);
1607 continue;
1608 }
1609 offset = hose->mem_offset[i];
1610
1611
1612 pr_debug("PCI: PHB MEM resource %d = %08llx-%08llx [%lx] off 0x%08llx\n", i,
1613 (unsigned long long)res->start,
1614 (unsigned long long)res->end,
1615 (unsigned long)res->flags,
1616 (unsigned long long)offset);
1617
1618 pci_add_resource_offset(resources, res, offset);
1619 }
1620}
1621
1622
1623
1624
1625
1626#define NULL_PCI_OP(rw, size, type) \
1627static int \
1628null_##rw##_config_##size(struct pci_dev *dev, int offset, type val) \
1629{ \
1630 return PCIBIOS_DEVICE_NOT_FOUND; \
1631}
1632
1633static int
1634null_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
1635 int len, u32 *val)
1636{
1637 return PCIBIOS_DEVICE_NOT_FOUND;
1638}
1639
1640static int
1641null_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
1642 int len, u32 val)
1643{
1644 return PCIBIOS_DEVICE_NOT_FOUND;
1645}
1646
1647static struct pci_ops null_pci_ops =
1648{
1649 .read = null_read_config,
1650 .write = null_write_config,
1651};
1652
1653
1654
1655
1656
1657static struct pci_bus *
1658fake_pci_bus(struct pci_controller *hose, int busnr)
1659{
1660 static struct pci_bus bus;
1661
1662 if (hose == NULL) {
1663 printk(KERN_ERR "Can't find hose for PCI bus %d!\n", busnr);
1664 }
1665 bus.number = busnr;
1666 bus.sysdata = hose;
1667 bus.ops = hose? hose->ops: &null_pci_ops;
1668 return &bus;
1669}
1670
1671#define EARLY_PCI_OP(rw, size, type) \
1672int early_##rw##_config_##size(struct pci_controller *hose, int bus, \
1673 int devfn, int offset, type value) \
1674{ \
1675 return pci_bus_##rw##_config_##size(fake_pci_bus(hose, bus), \
1676 devfn, offset, value); \
1677}
1678
1679EARLY_PCI_OP(read, byte, u8 *)
1680EARLY_PCI_OP(read, word, u16 *)
1681EARLY_PCI_OP(read, dword, u32 *)
1682EARLY_PCI_OP(write, byte, u8)
1683EARLY_PCI_OP(write, word, u16)
1684EARLY_PCI_OP(write, dword, u32)
1685
1686extern int pci_bus_find_capability (struct pci_bus *bus, unsigned int devfn, int cap);
1687int early_find_capability(struct pci_controller *hose, int bus, int devfn,
1688 int cap)
1689{
1690 return pci_bus_find_capability(fake_pci_bus(hose, bus), devfn, cap);
1691}
1692
1693struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
1694{
1695 struct pci_controller *hose = bus->sysdata;
1696
1697 return of_node_get(hose->dn);
1698}
1699
1700
1701
1702
1703
1704void pcibios_scan_phb(struct pci_controller *hose)
1705{
1706 LIST_HEAD(resources);
1707 struct pci_bus *bus;
1708 struct device_node *node = hose->dn;
1709 int mode;
1710
1711 pr_debug("PCI: Scanning PHB %s\n", of_node_full_name(node));
1712
1713
1714 pcibios_setup_phb_io_space(hose);
1715
1716
1717 pcibios_setup_phb_resources(hose, &resources);
1718
1719 hose->busn.start = hose->first_busno;
1720 hose->busn.end = hose->last_busno;
1721 hose->busn.flags = IORESOURCE_BUS;
1722 pci_add_resource(&resources, &hose->busn);
1723
1724
1725 bus = pci_create_root_bus(hose->parent, hose->first_busno,
1726 hose->ops, hose, &resources);
1727 if (bus == NULL) {
1728 pr_err("Failed to create bus for PCI domain %04x\n",
1729 hose->global_number);
1730 pci_free_resource_list(&resources);
1731 return;
1732 }
1733 hose->bus = bus;
1734
1735
1736 mode = PCI_PROBE_NORMAL;
1737 if (node && hose->controller_ops.probe_mode)
1738 mode = hose->controller_ops.probe_mode(bus);
1739 pr_debug(" probe mode: %d\n", mode);
1740 if (mode == PCI_PROBE_DEVTREE)
1741 of_scan_bus(node, bus);
1742
1743 if (mode == PCI_PROBE_NORMAL) {
1744 pci_bus_update_busn_res_end(bus, 255);
1745 hose->last_busno = pci_scan_child_bus(bus);
1746 pci_bus_update_busn_res_end(bus, hose->last_busno);
1747 }
1748
1749
1750
1751
1752 if (ppc_md.pcibios_fixup_phb)
1753 ppc_md.pcibios_fixup_phb(hose);
1754
1755
1756 if (bus && !pci_has_flag(PCI_PROBE_ONLY)) {
1757 struct pci_bus *child;
1758 list_for_each_entry(child, &bus->children, node)
1759 pcie_bus_configure_settings(child);
1760 }
1761}
1762EXPORT_SYMBOL_GPL(pcibios_scan_phb);
1763
1764static void fixup_hide_host_resource_fsl(struct pci_dev *dev)
1765{
1766 int i, class = dev->class >> 8;
1767
1768 int prog_if = dev->class & 0xf;
1769
1770 if ((class == PCI_CLASS_PROCESSOR_POWERPC ||
1771 class == PCI_CLASS_BRIDGE_OTHER) &&
1772 (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) &&
1773 (prog_if == 0) &&
1774 (dev->bus->parent == NULL)) {
1775 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1776 dev->resource[i].start = 0;
1777 dev->resource[i].end = 0;
1778 dev->resource[i].flags = 0;
1779 }
1780 }
1781}
1782DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MOTOROLA, PCI_ANY_ID, fixup_hide_host_resource_fsl);
1783DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, fixup_hide_host_resource_fsl);
1784
1785static void fixup_vga(struct pci_dev *pdev)
1786{
1787 u16 cmd;
1788
1789 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
1790 if ((cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) || !vga_default_device())
1791 vga_set_default_device(pdev);
1792
1793}
1794DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
1795 PCI_CLASS_DISPLAY_VGA, 8, fixup_vga);
1796