linux/arch/powerpc/kernel/pci-common.c
<<
>>
Prefs
   1/*
   2 * Contains common pci routines for ALL ppc platform
   3 * (based on pci_32.c and pci_64.c)
   4 *
   5 * Port for PPC64 David Engebretsen, IBM Corp.
   6 * Contains common pci routines for ppc64 platform, pSeries and iSeries brands.
   7 *
   8 * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM
   9 *   Rework, based on alpha PCI code.
  10 *
  11 * Common pmac/prep/chrp pci routines. -- Cort
  12 *
  13 * This program is free software; you can redistribute it and/or
  14 * modify it under the terms of the GNU General Public License
  15 * as published by the Free Software Foundation; either version
  16 * 2 of the License, or (at your option) any later version.
  17 */
  18
  19#include <linux/kernel.h>
  20#include <linux/pci.h>
  21#include <linux/string.h>
  22#include <linux/init.h>
  23#include <linux/bootmem.h>
  24#include <linux/export.h>
  25#include <linux/of_address.h>
  26#include <linux/of_pci.h>
  27#include <linux/mm.h>
  28#include <linux/list.h>
  29#include <linux/syscalls.h>
  30#include <linux/irq.h>
  31#include <linux/vmalloc.h>
  32#include <linux/slab.h>
  33
  34#include <asm/processor.h>
  35#include <asm/io.h>
  36#include <asm/prom.h>
  37#include <asm/pci-bridge.h>
  38#include <asm/byteorder.h>
  39#include <asm/machdep.h>
  40#include <asm/ppc-pci.h>
  41#include <asm/eeh.h>
  42
  43static DEFINE_SPINLOCK(hose_spinlock);
  44LIST_HEAD(hose_list);
  45
  46/* XXX kill that some day ... */
  47static int global_phb_number;           /* Global phb counter */
  48
  49/* ISA Memory physical address */
  50resource_size_t isa_mem_base;
  51
  52
  53static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
  54
  55void set_pci_dma_ops(struct dma_map_ops *dma_ops)
  56{
  57        pci_dma_ops = dma_ops;
  58}
  59
  60struct dma_map_ops *get_pci_dma_ops(void)
  61{
  62        return pci_dma_ops;
  63}
  64EXPORT_SYMBOL(get_pci_dma_ops);
  65
  66struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
  67{
  68        struct pci_controller *phb;
  69
  70        phb = zalloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL);
  71        if (phb == NULL)
  72                return NULL;
  73        spin_lock(&hose_spinlock);
  74        phb->global_number = global_phb_number++;
  75        list_add_tail(&phb->list_node, &hose_list);
  76        spin_unlock(&hose_spinlock);
  77        phb->dn = dev;
  78        phb->is_dynamic = mem_init_done;
  79#ifdef CONFIG_PPC64
  80        if (dev) {
  81                int nid = of_node_to_nid(dev);
  82
  83                if (nid < 0 || !node_online(nid))
  84                        nid = -1;
  85
  86                PHB_SET_NODE(phb, nid);
  87        }
  88#endif
  89        return phb;
  90}
  91
  92void pcibios_free_controller(struct pci_controller *phb)
  93{
  94        spin_lock(&hose_spinlock);
  95        list_del(&phb->list_node);
  96        spin_unlock(&hose_spinlock);
  97
  98        if (phb->is_dynamic)
  99                kfree(phb);
 100}
 101
 102static resource_size_t pcibios_io_size(const struct pci_controller *hose)
 103{
 104#ifdef CONFIG_PPC64
 105        return hose->pci_io_size;
 106#else
 107        return resource_size(&hose->io_resource);
 108#endif
 109}
 110
 111int pcibios_vaddr_is_ioport(void __iomem *address)
 112{
 113        int ret = 0;
 114        struct pci_controller *hose;
 115        resource_size_t size;
 116
 117        spin_lock(&hose_spinlock);
 118        list_for_each_entry(hose, &hose_list, list_node) {
 119                size = pcibios_io_size(hose);
 120                if (address >= hose->io_base_virt &&
 121                    address < (hose->io_base_virt + size)) {
 122                        ret = 1;
 123                        break;
 124                }
 125        }
 126        spin_unlock(&hose_spinlock);
 127        return ret;
 128}
 129
 130unsigned long pci_address_to_pio(phys_addr_t address)
 131{
 132        struct pci_controller *hose;
 133        resource_size_t size;
 134        unsigned long ret = ~0;
 135
 136        spin_lock(&hose_spinlock);
 137        list_for_each_entry(hose, &hose_list, list_node) {
 138                size = pcibios_io_size(hose);
 139                if (address >= hose->io_base_phys &&
 140                    address < (hose->io_base_phys + size)) {
 141                        unsigned long base =
 142                                (unsigned long)hose->io_base_virt - _IO_BASE;
 143                        ret = base + (address - hose->io_base_phys);
 144                        break;
 145                }
 146        }
 147        spin_unlock(&hose_spinlock);
 148
 149        return ret;
 150}
 151EXPORT_SYMBOL_GPL(pci_address_to_pio);
 152
 153/*
 154 * Return the domain number for this bus.
 155 */
 156int pci_domain_nr(struct pci_bus *bus)
 157{
 158        struct pci_controller *hose = pci_bus_to_host(bus);
 159
 160        return hose->global_number;
 161}
 162EXPORT_SYMBOL(pci_domain_nr);
 163
 164/* This routine is meant to be used early during boot, when the
 165 * PCI bus numbers have not yet been assigned, and you need to
 166 * issue PCI config cycles to an OF device.
 167 * It could also be used to "fix" RTAS config cycles if you want
 168 * to set pci_assign_all_buses to 1 and still use RTAS for PCI
 169 * config cycles.
 170 */
 171struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node)
 172{
 173        while(node) {
 174                struct pci_controller *hose, *tmp;
 175                list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
 176                        if (hose->dn == node)
 177                                return hose;
 178                node = node->parent;
 179        }
 180        return NULL;
 181}
 182
 183static ssize_t pci_show_devspec(struct device *dev,
 184                struct device_attribute *attr, char *buf)
 185{
 186        struct pci_dev *pdev;
 187        struct device_node *np;
 188
 189        pdev = to_pci_dev (dev);
 190        np = pci_device_to_OF_node(pdev);
 191        if (np == NULL || np->full_name == NULL)
 192                return 0;
 193        return sprintf(buf, "%s", np->full_name);
 194}
 195static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL);
 196
 197/* Add sysfs properties */
 198int pcibios_add_platform_entries(struct pci_dev *pdev)
 199{
 200        return device_create_file(&pdev->dev, &dev_attr_devspec);
 201}
 202
 203/*
 204 * Reads the interrupt pin to determine if interrupt is use by card.
 205 * If the interrupt is used, then gets the interrupt line from the
 206 * openfirmware and sets it in the pci_dev and pci_config line.
 207 */
 208static int pci_read_irq_line(struct pci_dev *pci_dev)
 209{
 210        struct of_irq oirq;
 211        unsigned int virq;
 212
 213        pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev));
 214
 215#ifdef DEBUG
 216        memset(&oirq, 0xff, sizeof(oirq));
 217#endif
 218        /* Try to get a mapping from the device-tree */
 219        if (of_irq_map_pci(pci_dev, &oirq)) {
 220                u8 line, pin;
 221
 222                /* If that fails, lets fallback to what is in the config
 223                 * space and map that through the default controller. We
 224                 * also set the type to level low since that's what PCI
 225                 * interrupts are. If your platform does differently, then
 226                 * either provide a proper interrupt tree or don't use this
 227                 * function.
 228                 */
 229                if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &pin))
 230                        return -1;
 231                if (pin == 0)
 232                        return -1;
 233                if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_LINE, &line) ||
 234                    line == 0xff || line == 0) {
 235                        return -1;
 236                }
 237                pr_debug(" No map ! Using line %d (pin %d) from PCI config\n",
 238                         line, pin);
 239
 240                virq = irq_create_mapping(NULL, line);
 241                if (virq != NO_IRQ)
 242                        irq_set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
 243        } else {
 244                pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n",
 245                         oirq.size, oirq.specifier[0], oirq.specifier[1],
 246                         of_node_full_name(oirq.controller));
 247
 248                virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
 249                                             oirq.size);
 250        }
 251        if(virq == NO_IRQ) {
 252                pr_debug(" Failed to map !\n");
 253                return -1;
 254        }
 255
 256        pr_debug(" Mapped to linux irq %d\n", virq);
 257
 258        pci_dev->irq = virq;
 259
 260        return 0;
 261}
 262
 263/*
 264 * Platform support for /proc/bus/pci/X/Y mmap()s,
 265 * modelled on the sparc64 implementation by Dave Miller.
 266 *  -- paulus.
 267 */
 268
 269/*
 270 * Adjust vm_pgoff of VMA such that it is the physical page offset
 271 * corresponding to the 32-bit pci bus offset for DEV requested by the user.
 272 *
 273 * Basically, the user finds the base address for his device which he wishes
 274 * to mmap.  They read the 32-bit value from the config space base register,
 275 * add whatever PAGE_SIZE multiple offset they wish, and feed this into the
 276 * offset parameter of mmap on /proc/bus/pci/XXX for that device.
 277 *
 278 * Returns negative error code on failure, zero on success.
 279 */
 280static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
 281                                               resource_size_t *offset,
 282                                               enum pci_mmap_state mmap_state)
 283{
 284        struct pci_controller *hose = pci_bus_to_host(dev->bus);
 285        unsigned long io_offset = 0;
 286        int i, res_bit;
 287
 288        if (hose == 0)
 289                return NULL;            /* should never happen */
 290
 291        /* If memory, add on the PCI bridge address offset */
 292        if (mmap_state == pci_mmap_mem) {
 293#if 0 /* See comment in pci_resource_to_user() for why this is disabled */
 294                *offset += hose->pci_mem_offset;
 295#endif
 296                res_bit = IORESOURCE_MEM;
 297        } else {
 298                io_offset = (unsigned long)hose->io_base_virt - _IO_BASE;
 299                *offset += io_offset;
 300                res_bit = IORESOURCE_IO;
 301        }
 302
 303        /*
 304         * Check that the offset requested corresponds to one of the
 305         * resources of the device.
 306         */
 307        for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
 308                struct resource *rp = &dev->resource[i];
 309                int flags = rp->flags;
 310
 311                /* treat ROM as memory (should be already) */
 312                if (i == PCI_ROM_RESOURCE)
 313                        flags |= IORESOURCE_MEM;
 314
 315                /* Active and same type? */
 316                if ((flags & res_bit) == 0)
 317                        continue;
 318
 319                /* In the range of this resource? */
 320                if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end)
 321                        continue;
 322
 323                /* found it! construct the final physical address */
 324                if (mmap_state == pci_mmap_io)
 325                        *offset += hose->io_base_phys - io_offset;
 326                return rp;
 327        }
 328
 329        return NULL;
 330}
 331
 332/*
 333 * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
 334 * device mapping.
 335 */
 336static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
 337                                      pgprot_t protection,
 338                                      enum pci_mmap_state mmap_state,
 339                                      int write_combine)
 340{
 341        unsigned long prot = pgprot_val(protection);
 342
 343        /* Write combine is always 0 on non-memory space mappings. On
 344         * memory space, if the user didn't pass 1, we check for a
 345         * "prefetchable" resource. This is a bit hackish, but we use
 346         * this to workaround the inability of /sysfs to provide a write
 347         * combine bit
 348         */
 349        if (mmap_state != pci_mmap_mem)
 350                write_combine = 0;
 351        else if (write_combine == 0) {
 352                if (rp->flags & IORESOURCE_PREFETCH)
 353                        write_combine = 1;
 354        }
 355
 356        /* XXX would be nice to have a way to ask for write-through */
 357        if (write_combine)
 358                return pgprot_noncached_wc(prot);
 359        else
 360                return pgprot_noncached(prot);
 361}
 362
 363/*
 364 * This one is used by /dev/mem and fbdev who have no clue about the
 365 * PCI device, it tries to find the PCI device first and calls the
 366 * above routine
 367 */
 368pgprot_t pci_phys_mem_access_prot(struct file *file,
 369                                  unsigned long pfn,
 370                                  unsigned long size,
 371                                  pgprot_t prot)
 372{
 373        struct pci_dev *pdev = NULL;
 374        struct resource *found = NULL;
 375        resource_size_t offset = ((resource_size_t)pfn) << PAGE_SHIFT;
 376        int i;
 377
 378        if (page_is_ram(pfn))
 379                return prot;
 380
 381        prot = pgprot_noncached(prot);
 382        for_each_pci_dev(pdev) {
 383                for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
 384                        struct resource *rp = &pdev->resource[i];
 385                        int flags = rp->flags;
 386
 387                        /* Active and same type? */
 388                        if ((flags & IORESOURCE_MEM) == 0)
 389                                continue;
 390                        /* In the range of this resource? */
 391                        if (offset < (rp->start & PAGE_MASK) ||
 392                            offset > rp->end)
 393                                continue;
 394                        found = rp;
 395                        break;
 396                }
 397                if (found)
 398                        break;
 399        }
 400        if (found) {
 401                if (found->flags & IORESOURCE_PREFETCH)
 402                        prot = pgprot_noncached_wc(prot);
 403                pci_dev_put(pdev);
 404        }
 405
 406        pr_debug("PCI: Non-PCI map for %llx, prot: %lx\n",
 407                 (unsigned long long)offset, pgprot_val(prot));
 408
 409        return prot;
 410}
 411
 412
 413/*
 414 * Perform the actual remap of the pages for a PCI device mapping, as
 415 * appropriate for this architecture.  The region in the process to map
 416 * is described by vm_start and vm_end members of VMA, the base physical
 417 * address is found in vm_pgoff.
 418 * The pci device structure is provided so that architectures may make mapping
 419 * decisions on a per-device or per-bus basis.
 420 *
 421 * Returns a negative error code on failure, zero on success.
 422 */
 423int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
 424                        enum pci_mmap_state mmap_state, int write_combine)
 425{
 426        resource_size_t offset =
 427                ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT;
 428        struct resource *rp;
 429        int ret;
 430
 431        rp = __pci_mmap_make_offset(dev, &offset, mmap_state);
 432        if (rp == NULL)
 433                return -EINVAL;
 434
 435        vma->vm_pgoff = offset >> PAGE_SHIFT;
 436        vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp,
 437                                                  vma->vm_page_prot,
 438                                                  mmap_state, write_combine);
 439
 440        ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
 441                               vma->vm_end - vma->vm_start, vma->vm_page_prot);
 442
 443        return ret;
 444}
 445
 446/* This provides legacy IO read access on a bus */
 447int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size)
 448{
 449        unsigned long offset;
 450        struct pci_controller *hose = pci_bus_to_host(bus);
 451        struct resource *rp = &hose->io_resource;
 452        void __iomem *addr;
 453
 454        /* Check if port can be supported by that bus. We only check
 455         * the ranges of the PHB though, not the bus itself as the rules
 456         * for forwarding legacy cycles down bridges are not our problem
 457         * here. So if the host bridge supports it, we do it.
 458         */
 459        offset = (unsigned long)hose->io_base_virt - _IO_BASE;
 460        offset += port;
 461
 462        if (!(rp->flags & IORESOURCE_IO))
 463                return -ENXIO;
 464        if (offset < rp->start || (offset + size) > rp->end)
 465                return -ENXIO;
 466        addr = hose->io_base_virt + port;
 467
 468        switch(size) {
 469        case 1:
 470                *((u8 *)val) = in_8(addr);
 471                return 1;
 472        case 2:
 473                if (port & 1)
 474                        return -EINVAL;
 475                *((u16 *)val) = in_le16(addr);
 476                return 2;
 477        case 4:
 478                if (port & 3)
 479                        return -EINVAL;
 480                *((u32 *)val) = in_le32(addr);
 481                return 4;
 482        }
 483        return -EINVAL;
 484}
 485
 486/* This provides legacy IO write access on a bus */
 487int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size)
 488{
 489        unsigned long offset;
 490        struct pci_controller *hose = pci_bus_to_host(bus);
 491        struct resource *rp = &hose->io_resource;
 492        void __iomem *addr;
 493
 494        /* Check if port can be supported by that bus. We only check
 495         * the ranges of the PHB though, not the bus itself as the rules
 496         * for forwarding legacy cycles down bridges are not our problem
 497         * here. So if the host bridge supports it, we do it.
 498         */
 499        offset = (unsigned long)hose->io_base_virt - _IO_BASE;
 500        offset += port;
 501
 502        if (!(rp->flags & IORESOURCE_IO))
 503                return -ENXIO;
 504        if (offset < rp->start || (offset + size) > rp->end)
 505                return -ENXIO;
 506        addr = hose->io_base_virt + port;
 507
 508        /* WARNING: The generic code is idiotic. It gets passed a pointer
 509         * to what can be a 1, 2 or 4 byte quantity and always reads that
 510         * as a u32, which means that we have to correct the location of
 511         * the data read within those 32 bits for size 1 and 2
 512         */
 513        switch(size) {
 514        case 1:
 515                out_8(addr, val >> 24);
 516                return 1;
 517        case 2:
 518                if (port & 1)
 519                        return -EINVAL;
 520                out_le16(addr, val >> 16);
 521                return 2;
 522        case 4:
 523                if (port & 3)
 524                        return -EINVAL;
 525                out_le32(addr, val);
 526                return 4;
 527        }
 528        return -EINVAL;
 529}
 530
 531/* This provides legacy IO or memory mmap access on a bus */
 532int pci_mmap_legacy_page_range(struct pci_bus *bus,
 533                               struct vm_area_struct *vma,
 534                               enum pci_mmap_state mmap_state)
 535{
 536        struct pci_controller *hose = pci_bus_to_host(bus);
 537        resource_size_t offset =
 538                ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT;
 539        resource_size_t size = vma->vm_end - vma->vm_start;
 540        struct resource *rp;
 541
 542        pr_debug("pci_mmap_legacy_page_range(%04x:%02x, %s @%llx..%llx)\n",
 543                 pci_domain_nr(bus), bus->number,
 544                 mmap_state == pci_mmap_mem ? "MEM" : "IO",
 545                 (unsigned long long)offset,
 546                 (unsigned long long)(offset + size - 1));
 547
 548        if (mmap_state == pci_mmap_mem) {
 549                /* Hack alert !
 550                 *
 551                 * Because X is lame and can fail starting if it gets an error trying
 552                 * to mmap legacy_mem (instead of just moving on without legacy memory
 553                 * access) we fake it here by giving it anonymous memory, effectively
 554                 * behaving just like /dev/zero
 555                 */
 556                if ((offset + size) > hose->isa_mem_size) {
 557                        printk(KERN_DEBUG
 558                               "Process %s (pid:%d) mapped non-existing PCI legacy memory for 0%04x:%02x\n",
 559                               current->comm, current->pid, pci_domain_nr(bus), bus->number);
 560                        if (vma->vm_flags & VM_SHARED)
 561                                return shmem_zero_setup(vma);
 562                        return 0;
 563                }
 564                offset += hose->isa_mem_phys;
 565        } else {
 566                unsigned long io_offset = (unsigned long)hose->io_base_virt - _IO_BASE;
 567                unsigned long roffset = offset + io_offset;
 568                rp = &hose->io_resource;
 569                if (!(rp->flags & IORESOURCE_IO))
 570                        return -ENXIO;
 571                if (roffset < rp->start || (roffset + size) > rp->end)
 572                        return -ENXIO;
 573                offset += hose->io_base_phys;
 574        }
 575        pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset);
 576
 577        vma->vm_pgoff = offset >> PAGE_SHIFT;
 578        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 579        return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
 580                               vma->vm_end - vma->vm_start,
 581                               vma->vm_page_prot);
 582}
 583
 584void pci_resource_to_user(const struct pci_dev *dev, int bar,
 585                          const struct resource *rsrc,
 586                          resource_size_t *start, resource_size_t *end)
 587{
 588        struct pci_controller *hose = pci_bus_to_host(dev->bus);
 589        resource_size_t offset = 0;
 590
 591        if (hose == NULL)
 592                return;
 593
 594        if (rsrc->flags & IORESOURCE_IO)
 595                offset = (unsigned long)hose->io_base_virt - _IO_BASE;
 596
 597        /* We pass a fully fixed up address to userland for MMIO instead of
 598         * a BAR value because X is lame and expects to be able to use that
 599         * to pass to /dev/mem !
 600         *
 601         * That means that we'll have potentially 64 bits values where some
 602         * userland apps only expect 32 (like X itself since it thinks only
 603         * Sparc has 64 bits MMIO) but if we don't do that, we break it on
 604         * 32 bits CHRPs :-(
 605         *
 606         * Hopefully, the sysfs insterface is immune to that gunk. Once X
 607         * has been fixed (and the fix spread enough), we can re-enable the
 608         * 2 lines below and pass down a BAR value to userland. In that case
 609         * we'll also have to re-enable the matching code in
 610         * __pci_mmap_make_offset().
 611         *
 612         * BenH.
 613         */
 614#if 0
 615        else if (rsrc->flags & IORESOURCE_MEM)
 616                offset = hose->pci_mem_offset;
 617#endif
 618
 619        *start = rsrc->start - offset;
 620        *end = rsrc->end - offset;
 621}
 622
 623/**
 624 * pci_process_bridge_OF_ranges - Parse PCI bridge resources from device tree
 625 * @hose: newly allocated pci_controller to be setup
 626 * @dev: device node of the host bridge
 627 * @primary: set if primary bus (32 bits only, soon to be deprecated)
 628 *
 629 * This function will parse the "ranges" property of a PCI host bridge device
 630 * node and setup the resource mapping of a pci controller based on its
 631 * content.
 632 *
 633 * Life would be boring if it wasn't for a few issues that we have to deal
 634 * with here:
 635 *
 636 *   - We can only cope with one IO space range and up to 3 Memory space
 637 *     ranges. However, some machines (thanks Apple !) tend to split their
 638 *     space into lots of small contiguous ranges. So we have to coalesce.
 639 *
 640 *   - We can only cope with all memory ranges having the same offset
 641 *     between CPU addresses and PCI addresses. Unfortunately, some bridges
 642 *     are setup for a large 1:1 mapping along with a small "window" which
 643 *     maps PCI address 0 to some arbitrary high address of the CPU space in
 644 *     order to give access to the ISA memory hole.
 645 *     The way out of here that I've chosen for now is to always set the
 646 *     offset based on the first resource found, then override it if we
 647 *     have a different offset and the previous was set by an ISA hole.
 648 *
 649 *   - Some busses have IO space not starting at 0, which causes trouble with
 650 *     the way we do our IO resource renumbering. The code somewhat deals with
 651 *     it for 64 bits but I would expect problems on 32 bits.
 652 *
 653 *   - Some 32 bits platforms such as 4xx can have physical space larger than
 654 *     32 bits so we need to use 64 bits values for the parsing
 655 */
 656void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
 657                                            struct device_node *dev,
 658                                            int primary)
 659{
 660        const u32 *ranges;
 661        int rlen;
 662        int pna = of_n_addr_cells(dev);
 663        int np = pna + 5;
 664        int memno = 0, isa_hole = -1;
 665        u32 pci_space;
 666        unsigned long long pci_addr, cpu_addr, pci_next, cpu_next, size;
 667        unsigned long long isa_mb = 0;
 668        struct resource *res;
 669
 670        printk(KERN_INFO "PCI host bridge %s %s ranges:\n",
 671               dev->full_name, primary ? "(primary)" : "");
 672
 673        /* Get ranges property */
 674        ranges = of_get_property(dev, "ranges", &rlen);
 675        if (ranges == NULL)
 676                return;
 677
 678        /* Parse it */
 679        while ((rlen -= np * 4) >= 0) {
 680                /* Read next ranges element */
 681                pci_space = ranges[0];
 682                pci_addr = of_read_number(ranges + 1, 2);
 683                cpu_addr = of_translate_address(dev, ranges + 3);
 684                size = of_read_number(ranges + pna + 3, 2);
 685                ranges += np;
 686
 687                /* If we failed translation or got a zero-sized region
 688                 * (some FW try to feed us with non sensical zero sized regions
 689                 * such as power3 which look like some kind of attempt at exposing
 690                 * the VGA memory hole)
 691                 */
 692                if (cpu_addr == OF_BAD_ADDR || size == 0)
 693                        continue;
 694
 695                /* Now consume following elements while they are contiguous */
 696                for (; rlen >= np * sizeof(u32);
 697                     ranges += np, rlen -= np * 4) {
 698                        if (ranges[0] != pci_space)
 699                                break;
 700                        pci_next = of_read_number(ranges + 1, 2);
 701                        cpu_next = of_translate_address(dev, ranges + 3);
 702                        if (pci_next != pci_addr + size ||
 703                            cpu_next != cpu_addr + size)
 704                                break;
 705                        size += of_read_number(ranges + pna + 3, 2);
 706                }
 707
 708                /* Act based on address space type */
 709                res = NULL;
 710                switch ((pci_space >> 24) & 0x3) {
 711                case 1:         /* PCI IO space */
 712                        printk(KERN_INFO
 713                               "  IO 0x%016llx..0x%016llx -> 0x%016llx\n",
 714                               cpu_addr, cpu_addr + size - 1, pci_addr);
 715
 716                        /* We support only one IO range */
 717                        if (hose->pci_io_size) {
 718                                printk(KERN_INFO
 719                                       " \\--> Skipped (too many) !\n");
 720                                continue;
 721                        }
 722#ifdef CONFIG_PPC32
 723                        /* On 32 bits, limit I/O space to 16MB */
 724                        if (size > 0x01000000)
 725                                size = 0x01000000;
 726
 727                        /* 32 bits needs to map IOs here */
 728                        hose->io_base_virt = ioremap(cpu_addr, size);
 729
 730                        /* Expect trouble if pci_addr is not 0 */
 731                        if (primary)
 732                                isa_io_base =
 733                                        (unsigned long)hose->io_base_virt;
 734#endif /* CONFIG_PPC32 */
 735                        /* pci_io_size and io_base_phys always represent IO
 736                         * space starting at 0 so we factor in pci_addr
 737                         */
 738                        hose->pci_io_size = pci_addr + size;
 739                        hose->io_base_phys = cpu_addr - pci_addr;
 740
 741                        /* Build resource */
 742                        res = &hose->io_resource;
 743                        res->flags = IORESOURCE_IO;
 744                        res->start = pci_addr;
 745                        break;
 746                case 2:         /* PCI Memory space */
 747                case 3:         /* PCI 64 bits Memory space */
 748                        printk(KERN_INFO
 749                               " MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n",
 750                               cpu_addr, cpu_addr + size - 1, pci_addr,
 751                               (pci_space & 0x40000000) ? "Prefetch" : "");
 752
 753                        /* We support only 3 memory ranges */
 754                        if (memno >= 3) {
 755                                printk(KERN_INFO
 756                                       " \\--> Skipped (too many) !\n");
 757                                continue;
 758                        }
 759                        /* Handles ISA memory hole space here */
 760                        if (pci_addr == 0) {
 761                                isa_mb = cpu_addr;
 762                                isa_hole = memno;
 763                                if (primary || isa_mem_base == 0)
 764                                        isa_mem_base = cpu_addr;
 765                                hose->isa_mem_phys = cpu_addr;
 766                                hose->isa_mem_size = size;
 767                        }
 768
 769                        /* We get the PCI/Mem offset from the first range or
 770                         * the, current one if the offset came from an ISA
 771                         * hole. If they don't match, bugger.
 772                         */
 773                        if (memno == 0 ||
 774                            (isa_hole >= 0 && pci_addr != 0 &&
 775                             hose->pci_mem_offset == isa_mb))
 776                                hose->pci_mem_offset = cpu_addr - pci_addr;
 777                        else if (pci_addr != 0 &&
 778                                 hose->pci_mem_offset != cpu_addr - pci_addr) {
 779                                printk(KERN_INFO
 780                                       " \\--> Skipped (offset mismatch) !\n");
 781                                continue;
 782                        }
 783
 784                        /* Build resource */
 785                        res = &hose->mem_resources[memno++];
 786                        res->flags = IORESOURCE_MEM;
 787                        if (pci_space & 0x40000000)
 788                                res->flags |= IORESOURCE_PREFETCH;
 789                        res->start = cpu_addr;
 790                        break;
 791                }
 792                if (res != NULL) {
 793                        res->name = dev->full_name;
 794                        res->end = res->start + size - 1;
 795                        res->parent = NULL;
 796                        res->sibling = NULL;
 797                        res->child = NULL;
 798                }
 799        }
 800
 801        /* If there's an ISA hole and the pci_mem_offset is -not- matching
 802         * the ISA hole offset, then we need to remove the ISA hole from
 803         * the resource list for that brige
 804         */
 805        if (isa_hole >= 0 && hose->pci_mem_offset != isa_mb) {
 806                unsigned int next = isa_hole + 1;
 807                printk(KERN_INFO " Removing ISA hole at 0x%016llx\n", isa_mb);
 808                if (next < memno)
 809                        memmove(&hose->mem_resources[isa_hole],
 810                                &hose->mem_resources[next],
 811                                sizeof(struct resource) * (memno - next));
 812                hose->mem_resources[--memno].flags = 0;
 813        }
 814}
 815
 816/* Decide whether to display the domain number in /proc */
 817int pci_proc_domain(struct pci_bus *bus)
 818{
 819        struct pci_controller *hose = pci_bus_to_host(bus);
 820
 821        if (!pci_has_flag(PCI_ENABLE_PROC_DOMAINS))
 822                return 0;
 823        if (pci_has_flag(PCI_COMPAT_DOMAIN_0))
 824                return hose->global_number != 0;
 825        return 1;
 826}
 827
 828/* This header fixup will do the resource fixup for all devices as they are
 829 * probed, but not for bridge ranges
 830 */
 831static void __devinit pcibios_fixup_resources(struct pci_dev *dev)
 832{
 833        struct pci_controller *hose = pci_bus_to_host(dev->bus);
 834        int i;
 835
 836        if (!hose) {
 837                printk(KERN_ERR "No host bridge for PCI dev %s !\n",
 838                       pci_name(dev));
 839                return;
 840        }
 841        for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
 842                struct resource *res = dev->resource + i;
 843                if (!res->flags)
 844                        continue;
 845
 846                /* If we're going to re-assign everything, we mark all resources
 847                 * as unset (and 0-base them). In addition, we mark BARs starting
 848                 * at 0 as unset as well, except if PCI_PROBE_ONLY is also set
 849                 * since in that case, we don't want to re-assign anything
 850                 */
 851                if (pci_has_flag(PCI_REASSIGN_ALL_RSRC) ||
 852                    (res->start == 0 && !pci_has_flag(PCI_PROBE_ONLY))) {
 853                        /* Only print message if not re-assigning */
 854                        if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC))
 855                                pr_debug("PCI:%s Resource %d %016llx-%016llx [%x] "
 856                                         "is unassigned\n",
 857                                         pci_name(dev), i,
 858                                         (unsigned long long)res->start,
 859                                         (unsigned long long)res->end,
 860                                         (unsigned int)res->flags);
 861                        res->end -= res->start;
 862                        res->start = 0;
 863                        res->flags |= IORESOURCE_UNSET;
 864                        continue;
 865                }
 866
 867                pr_debug("PCI:%s Resource %d %016llx-%016llx [%x]\n",
 868                         pci_name(dev), i,
 869                         (unsigned long long)res->start,\
 870                         (unsigned long long)res->end,
 871                         (unsigned int)res->flags);
 872        }
 873
 874        /* Call machine specific resource fixup */
 875        if (ppc_md.pcibios_fixup_resources)
 876                ppc_md.pcibios_fixup_resources(dev);
 877}
 878DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources);
 879
 880/* This function tries to figure out if a bridge resource has been initialized
 881 * by the firmware or not. It doesn't have to be absolutely bullet proof, but
 882 * things go more smoothly when it gets it right. It should covers cases such
 883 * as Apple "closed" bridge resources and bare-metal pSeries unassigned bridges
 884 */
 885static int __devinit pcibios_uninitialized_bridge_resource(struct pci_bus *bus,
 886                                                           struct resource *res)
 887{
 888        struct pci_controller *hose = pci_bus_to_host(bus);
 889        struct pci_dev *dev = bus->self;
 890        resource_size_t offset;
 891        u16 command;
 892        int i;
 893
 894        /* We don't do anything if PCI_PROBE_ONLY is set */
 895        if (pci_has_flag(PCI_PROBE_ONLY))
 896                return 0;
 897
 898        /* Job is a bit different between memory and IO */
 899        if (res->flags & IORESOURCE_MEM) {
 900                /* If the BAR is non-0 (res != pci_mem_offset) then it's probably been
 901                 * initialized by somebody
 902                 */
 903                if (res->start != hose->pci_mem_offset)
 904                        return 0;
 905
 906                /* The BAR is 0, let's check if memory decoding is enabled on
 907                 * the bridge. If not, we consider it unassigned
 908                 */
 909                pci_read_config_word(dev, PCI_COMMAND, &command);
 910                if ((command & PCI_COMMAND_MEMORY) == 0)
 911                        return 1;
 912
 913                /* Memory decoding is enabled and the BAR is 0. If any of the bridge
 914                 * resources covers that starting address (0 then it's good enough for
 915                 * us for memory
 916                 */
 917                for (i = 0; i < 3; i++) {
 918                        if ((hose->mem_resources[i].flags & IORESOURCE_MEM) &&
 919                            hose->mem_resources[i].start == hose->pci_mem_offset)
 920                                return 0;
 921                }
 922
 923                /* Well, it starts at 0 and we know it will collide so we may as
 924                 * well consider it as unassigned. That covers the Apple case.
 925                 */
 926                return 1;
 927        } else {
 928                /* If the BAR is non-0, then we consider it assigned */
 929                offset = (unsigned long)hose->io_base_virt - _IO_BASE;
 930                if (((res->start - offset) & 0xfffffffful) != 0)
 931                        return 0;
 932
 933                /* Here, we are a bit different than memory as typically IO space
 934                 * starting at low addresses -is- valid. What we do instead if that
 935                 * we consider as unassigned anything that doesn't have IO enabled
 936                 * in the PCI command register, and that's it.
 937                 */
 938                pci_read_config_word(dev, PCI_COMMAND, &command);
 939                if (command & PCI_COMMAND_IO)
 940                        return 0;
 941
 942                /* It's starting at 0 and IO is disabled in the bridge, consider
 943                 * it unassigned
 944                 */
 945                return 1;
 946        }
 947}
 948
 949/* Fixup resources of a PCI<->PCI bridge */
 950static void __devinit pcibios_fixup_bridge(struct pci_bus *bus)
 951{
 952        struct resource *res;
 953        int i;
 954
 955        struct pci_dev *dev = bus->self;
 956
 957        pci_bus_for_each_resource(bus, res, i) {
 958                if (!res || !res->flags)
 959                        continue;
 960                if (i >= 3 && bus->self->transparent)
 961                        continue;
 962
 963                /* If we are going to re-assign everything, mark the resource
 964                 * as unset and move it down to 0
 965                 */
 966                if (pci_has_flag(PCI_REASSIGN_ALL_RSRC)) {
 967                        res->flags |= IORESOURCE_UNSET;
 968                        res->end -= res->start;
 969                        res->start = 0;
 970                        continue;
 971                }
 972
 973                pr_debug("PCI:%s Bus rsrc %d %016llx-%016llx [%x]\n",
 974                         pci_name(dev), i,
 975                         (unsigned long long)res->start,\
 976                         (unsigned long long)res->end,
 977                         (unsigned int)res->flags);
 978
 979                /* Try to detect uninitialized P2P bridge resources,
 980                 * and clear them out so they get re-assigned later
 981                 */
 982                if (pcibios_uninitialized_bridge_resource(bus, res)) {
 983                        res->flags = 0;
 984                        pr_debug("PCI:%s            (unassigned)\n", pci_name(dev));
 985                }
 986        }
 987}
 988
 989void __devinit pcibios_setup_bus_self(struct pci_bus *bus)
 990{
 991        /* Fix up the bus resources for P2P bridges */
 992        if (bus->self != NULL)
 993                pcibios_fixup_bridge(bus);
 994
 995        /* Platform specific bus fixups. This is currently only used
 996         * by fsl_pci and I'm hoping to get rid of it at some point
 997         */
 998        if (ppc_md.pcibios_fixup_bus)
 999                ppc_md.pcibios_fixup_bus(bus);
1000
1001        /* Setup bus DMA mappings */
1002        if (ppc_md.pci_dma_bus_setup)
1003                ppc_md.pci_dma_bus_setup(bus);
1004}
1005
1006void __devinit pcibios_setup_bus_devices(struct pci_bus *bus)
1007{
1008        struct pci_dev *dev;
1009
1010        pr_debug("PCI: Fixup bus devices %d (%s)\n",
1011                 bus->number, bus->self ? pci_name(bus->self) : "PHB");
1012
1013        list_for_each_entry(dev, &bus->devices, bus_list) {
1014                /* Cardbus can call us to add new devices to a bus, so ignore
1015                 * those who are already fully discovered
1016                 */
1017                if (dev->is_added)
1018                        continue;
1019
1020                /* Fixup NUMA node as it may not be setup yet by the generic
1021                 * code and is needed by the DMA init
1022                 */
1023                set_dev_node(&dev->dev, pcibus_to_node(dev->bus));
1024
1025                /* Hook up default DMA ops */
1026                set_dma_ops(&dev->dev, pci_dma_ops);
1027                set_dma_offset(&dev->dev, PCI_DRAM_OFFSET);
1028
1029                /* Additional platform DMA/iommu setup */
1030                if (ppc_md.pci_dma_dev_setup)
1031                        ppc_md.pci_dma_dev_setup(dev);
1032
1033                /* Read default IRQs and fixup if necessary */
1034                pci_read_irq_line(dev);
1035                if (ppc_md.pci_irq_fixup)
1036                        ppc_md.pci_irq_fixup(dev);
1037        }
1038}
1039
1040void pcibios_set_master(struct pci_dev *dev)
1041{
1042        /* No special bus mastering setup handling */
1043}
1044
1045void __devinit pcibios_fixup_bus(struct pci_bus *bus)
1046{
1047        /* When called from the generic PCI probe, read PCI<->PCI bridge
1048         * bases. This is -not- called when generating the PCI tree from
1049         * the OF device-tree.
1050         */
1051        if (bus->self != NULL)
1052                pci_read_bridge_bases(bus);
1053
1054        /* Now fixup the bus bus */
1055        pcibios_setup_bus_self(bus);
1056
1057        /* Now fixup devices on that bus */
1058        pcibios_setup_bus_devices(bus);
1059}
1060EXPORT_SYMBOL(pcibios_fixup_bus);
1061
1062void __devinit pci_fixup_cardbus(struct pci_bus *bus)
1063{
1064        /* Now fixup devices on that bus */
1065        pcibios_setup_bus_devices(bus);
1066}
1067
1068
1069static int skip_isa_ioresource_align(struct pci_dev *dev)
1070{
1071        if (pci_has_flag(PCI_CAN_SKIP_ISA_ALIGN) &&
1072            !(dev->bus->bridge_ctl & PCI_BRIDGE_CTL_ISA))
1073                return 1;
1074        return 0;
1075}
1076
1077/*
1078 * We need to avoid collisions with `mirrored' VGA ports
1079 * and other strange ISA hardware, so we always want the
1080 * addresses to be allocated in the 0x000-0x0ff region
1081 * modulo 0x400.
1082 *
1083 * Why? Because some silly external IO cards only decode
1084 * the low 10 bits of the IO address. The 0x00-0xff region
1085 * is reserved for motherboard devices that decode all 16
1086 * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
1087 * but we want to try to avoid allocating at 0x2900-0x2bff
1088 * which might have be mirrored at 0x0100-0x03ff..
1089 */
1090resource_size_t pcibios_align_resource(void *data, const struct resource *res,
1091                                resource_size_t size, resource_size_t align)
1092{
1093        struct pci_dev *dev = data;
1094        resource_size_t start = res->start;
1095
1096        if (res->flags & IORESOURCE_IO) {
1097                if (skip_isa_ioresource_align(dev))
1098                        return start;
1099                if (start & 0x300)
1100                        start = (start + 0x3ff) & ~0x3ff;
1101        }
1102
1103        return start;
1104}
1105EXPORT_SYMBOL(pcibios_align_resource);
1106
1107/*
1108 * Reparent resource children of pr that conflict with res
1109 * under res, and make res replace those children.
1110 */
1111static int reparent_resources(struct resource *parent,
1112                                     struct resource *res)
1113{
1114        struct resource *p, **pp;
1115        struct resource **firstpp = NULL;
1116
1117        for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) {
1118                if (p->end < res->start)
1119                        continue;
1120                if (res->end < p->start)
1121                        break;
1122                if (p->start < res->start || p->end > res->end)
1123                        return -1;      /* not completely contained */
1124                if (firstpp == NULL)
1125                        firstpp = pp;
1126        }
1127        if (firstpp == NULL)
1128                return -1;      /* didn't find any conflicting entries? */
1129        res->parent = parent;
1130        res->child = *firstpp;
1131        res->sibling = *pp;
1132        *firstpp = res;
1133        *pp = NULL;
1134        for (p = res->child; p != NULL; p = p->sibling) {
1135                p->parent = res;
1136                pr_debug("PCI: Reparented %s [%llx..%llx] under %s\n",
1137                         p->name,
1138                         (unsigned long long)p->start,
1139                         (unsigned long long)p->end, res->name);
1140        }
1141        return 0;
1142}
1143
1144/*
1145 *  Handle resources of PCI devices.  If the world were perfect, we could
1146 *  just allocate all the resource regions and do nothing more.  It isn't.
1147 *  On the other hand, we cannot just re-allocate all devices, as it would
1148 *  require us to know lots of host bridge internals.  So we attempt to
1149 *  keep as much of the original configuration as possible, but tweak it
1150 *  when it's found to be wrong.
1151 *
1152 *  Known BIOS problems we have to work around:
1153 *      - I/O or memory regions not configured
1154 *      - regions configured, but not enabled in the command register
1155 *      - bogus I/O addresses above 64K used
1156 *      - expansion ROMs left enabled (this may sound harmless, but given
1157 *        the fact the PCI specs explicitly allow address decoders to be
1158 *        shared between expansion ROMs and other resource regions, it's
1159 *        at least dangerous)
1160 *
1161 *  Our solution:
1162 *      (1) Allocate resources for all buses behind PCI-to-PCI bridges.
1163 *          This gives us fixed barriers on where we can allocate.
1164 *      (2) Allocate resources for all enabled devices.  If there is
1165 *          a collision, just mark the resource as unallocated. Also
1166 *          disable expansion ROMs during this step.
1167 *      (3) Try to allocate resources for disabled devices.  If the
1168 *          resources were assigned correctly, everything goes well,
1169 *          if they weren't, they won't disturb allocation of other
1170 *          resources.
1171 *      (4) Assign new addresses to resources which were either
1172 *          not configured at all or misconfigured.  If explicitly
1173 *          requested by the user, configure expansion ROM address
1174 *          as well.
1175 */
1176
1177void pcibios_allocate_bus_resources(struct pci_bus *bus)
1178{
1179        struct pci_bus *b;
1180        int i;
1181        struct resource *res, *pr;
1182
1183        pr_debug("PCI: Allocating bus resources for %04x:%02x...\n",
1184                 pci_domain_nr(bus), bus->number);
1185
1186        pci_bus_for_each_resource(bus, res, i) {
1187                if (!res || !res->flags || res->start > res->end || res->parent)
1188                        continue;
1189
1190                /* If the resource was left unset at this point, we clear it */
1191                if (res->flags & IORESOURCE_UNSET)
1192                        goto clear_resource;
1193
1194                if (bus->parent == NULL)
1195                        pr = (res->flags & IORESOURCE_IO) ?
1196                                &ioport_resource : &iomem_resource;
1197                else {
1198                        pr = pci_find_parent_resource(bus->self, res);
1199                        if (pr == res) {
1200                                /* this happens when the generic PCI
1201                                 * code (wrongly) decides that this
1202                                 * bridge is transparent  -- paulus
1203                                 */
1204                                continue;
1205                        }
1206                }
1207
1208                pr_debug("PCI: %s (bus %d) bridge rsrc %d: %016llx-%016llx "
1209                         "[0x%x], parent %p (%s)\n",
1210                         bus->self ? pci_name(bus->self) : "PHB",
1211                         bus->number, i,
1212                         (unsigned long long)res->start,
1213                         (unsigned long long)res->end,
1214                         (unsigned int)res->flags,
1215                         pr, (pr && pr->name) ? pr->name : "nil");
1216
1217                if (pr && !(pr->flags & IORESOURCE_UNSET)) {
1218                        if (request_resource(pr, res) == 0)
1219                                continue;
1220                        /*
1221                         * Must be a conflict with an existing entry.
1222                         * Move that entry (or entries) under the
1223                         * bridge resource and try again.
1224                         */
1225                        if (reparent_resources(pr, res) == 0)
1226                                continue;
1227                }
1228                pr_warning("PCI: Cannot allocate resource region "
1229                           "%d of PCI bridge %d, will remap\n", i, bus->number);
1230        clear_resource:
1231                res->start = res->end = 0;
1232                res->flags = 0;
1233        }
1234
1235        list_for_each_entry(b, &bus->children, node)
1236                pcibios_allocate_bus_resources(b);
1237}
1238
1239static inline void __devinit alloc_resource(struct pci_dev *dev, int idx)
1240{
1241        struct resource *pr, *r = &dev->resource[idx];
1242
1243        pr_debug("PCI: Allocating %s: Resource %d: %016llx..%016llx [%x]\n",
1244                 pci_name(dev), idx,
1245                 (unsigned long long)r->start,
1246                 (unsigned long long)r->end,
1247                 (unsigned int)r->flags);
1248
1249        pr = pci_find_parent_resource(dev, r);
1250        if (!pr || (pr->flags & IORESOURCE_UNSET) ||
1251            request_resource(pr, r) < 0) {
1252                printk(KERN_WARNING "PCI: Cannot allocate resource region %d"
1253                       " of device %s, will remap\n", idx, pci_name(dev));
1254                if (pr)
1255                        pr_debug("PCI:  parent is %p: %016llx-%016llx [%x]\n",
1256                                 pr,
1257                                 (unsigned long long)pr->start,
1258                                 (unsigned long long)pr->end,
1259                                 (unsigned int)pr->flags);
1260                /* We'll assign a new address later */
1261                r->flags |= IORESOURCE_UNSET;
1262                r->end -= r->start;
1263                r->start = 0;
1264        }
1265}
1266
1267static void __init pcibios_allocate_resources(int pass)
1268{
1269        struct pci_dev *dev = NULL;
1270        int idx, disabled;
1271        u16 command;
1272        struct resource *r;
1273
1274        for_each_pci_dev(dev) {
1275                pci_read_config_word(dev, PCI_COMMAND, &command);
1276                for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
1277                        r = &dev->resource[idx];
1278                        if (r->parent)          /* Already allocated */
1279                                continue;
1280                        if (!r->flags || (r->flags & IORESOURCE_UNSET))
1281                                continue;       /* Not assigned at all */
1282                        /* We only allocate ROMs on pass 1 just in case they
1283                         * have been screwed up by firmware
1284                         */
1285                        if (idx == PCI_ROM_RESOURCE )
1286                                disabled = 1;
1287                        if (r->flags & IORESOURCE_IO)
1288                                disabled = !(command & PCI_COMMAND_IO);
1289                        else
1290                                disabled = !(command & PCI_COMMAND_MEMORY);
1291                        if (pass == disabled)
1292                                alloc_resource(dev, idx);
1293                }
1294                if (pass)
1295                        continue;
1296                r = &dev->resource[PCI_ROM_RESOURCE];
1297                if (r->flags) {
1298                        /* Turn the ROM off, leave the resource region,
1299                         * but keep it unregistered.
1300                         */
1301                        u32 reg;
1302                        pci_read_config_dword(dev, dev->rom_base_reg, &reg);
1303                        if (reg & PCI_ROM_ADDRESS_ENABLE) {
1304                                pr_debug("PCI: Switching off ROM of %s\n",
1305                                         pci_name(dev));
1306                                r->flags &= ~IORESOURCE_ROM_ENABLE;
1307                                pci_write_config_dword(dev, dev->rom_base_reg,
1308                                                       reg & ~PCI_ROM_ADDRESS_ENABLE);
1309                        }
1310                }
1311        }
1312}
1313
1314static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus)
1315{
1316        struct pci_controller *hose = pci_bus_to_host(bus);
1317        resource_size_t offset;
1318        struct resource *res, *pres;
1319        int i;
1320
1321        pr_debug("Reserving legacy ranges for domain %04x\n", pci_domain_nr(bus));
1322
1323        /* Check for IO */
1324        if (!(hose->io_resource.flags & IORESOURCE_IO))
1325                goto no_io;
1326        offset = (unsigned long)hose->io_base_virt - _IO_BASE;
1327        res = kzalloc(sizeof(struct resource), GFP_KERNEL);
1328        BUG_ON(res == NULL);
1329        res->name = "Legacy IO";
1330        res->flags = IORESOURCE_IO;
1331        res->start = offset;
1332        res->end = (offset + 0xfff) & 0xfffffffful;
1333        pr_debug("Candidate legacy IO: %pR\n", res);
1334        if (request_resource(&hose->io_resource, res)) {
1335                printk(KERN_DEBUG
1336                       "PCI %04x:%02x Cannot reserve Legacy IO %pR\n",
1337                       pci_domain_nr(bus), bus->number, res);
1338                kfree(res);
1339        }
1340
1341 no_io:
1342        /* Check for memory */
1343        offset = hose->pci_mem_offset;
1344        pr_debug("hose mem offset: %016llx\n", (unsigned long long)offset);
1345        for (i = 0; i < 3; i++) {
1346                pres = &hose->mem_resources[i];
1347                if (!(pres->flags & IORESOURCE_MEM))
1348                        continue;
1349                pr_debug("hose mem res: %pR\n", pres);
1350                if ((pres->start - offset) <= 0xa0000 &&
1351                    (pres->end - offset) >= 0xbffff)
1352                        break;
1353        }
1354        if (i >= 3)
1355                return;
1356        res = kzalloc(sizeof(struct resource), GFP_KERNEL);
1357        BUG_ON(res == NULL);
1358        res->name = "Legacy VGA memory";
1359        res->flags = IORESOURCE_MEM;
1360        res->start = 0xa0000 + offset;
1361        res->end = 0xbffff + offset;
1362        pr_debug("Candidate VGA memory: %pR\n", res);
1363        if (request_resource(pres, res)) {
1364                printk(KERN_DEBUG
1365                       "PCI %04x:%02x Cannot reserve VGA memory %pR\n",
1366                       pci_domain_nr(bus), bus->number, res);
1367                kfree(res);
1368        }
1369}
1370
1371void __init pcibios_resource_survey(void)
1372{
1373        struct pci_bus *b;
1374
1375        /* Allocate and assign resources */
1376        list_for_each_entry(b, &pci_root_buses, node)
1377                pcibios_allocate_bus_resources(b);
1378        pcibios_allocate_resources(0);
1379        pcibios_allocate_resources(1);
1380
1381        /* Before we start assigning unassigned resource, we try to reserve
1382         * the low IO area and the VGA memory area if they intersect the
1383         * bus available resources to avoid allocating things on top of them
1384         */
1385        if (!pci_has_flag(PCI_PROBE_ONLY)) {
1386                list_for_each_entry(b, &pci_root_buses, node)
1387                        pcibios_reserve_legacy_regions(b);
1388        }
1389
1390        /* Now, if the platform didn't decide to blindly trust the firmware,
1391         * we proceed to assigning things that were left unassigned
1392         */
1393        if (!pci_has_flag(PCI_PROBE_ONLY)) {
1394                pr_debug("PCI: Assigning unassigned resources...\n");
1395                pci_assign_unassigned_resources();
1396        }
1397
1398        /* Call machine dependent fixup */
1399        if (ppc_md.pcibios_fixup)
1400                ppc_md.pcibios_fixup();
1401}
1402
1403#ifdef CONFIG_HOTPLUG
1404
1405/* This is used by the PCI hotplug driver to allocate resource
1406 * of newly plugged busses. We can try to consolidate with the
1407 * rest of the code later, for now, keep it as-is as our main
1408 * resource allocation function doesn't deal with sub-trees yet.
1409 */
1410void pcibios_claim_one_bus(struct pci_bus *bus)
1411{
1412        struct pci_dev *dev;
1413        struct pci_bus *child_bus;
1414
1415        list_for_each_entry(dev, &bus->devices, bus_list) {
1416                int i;
1417
1418                for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1419                        struct resource *r = &dev->resource[i];
1420
1421                        if (r->parent || !r->start || !r->flags)
1422                                continue;
1423
1424                        pr_debug("PCI: Claiming %s: "
1425                                 "Resource %d: %016llx..%016llx [%x]\n",
1426                                 pci_name(dev), i,
1427                                 (unsigned long long)r->start,
1428                                 (unsigned long long)r->end,
1429                                 (unsigned int)r->flags);
1430
1431                        pci_claim_resource(dev, i);
1432                }
1433        }
1434
1435        list_for_each_entry(child_bus, &bus->children, node)
1436                pcibios_claim_one_bus(child_bus);
1437}
1438
1439
1440/* pcibios_finish_adding_to_bus
1441 *
1442 * This is to be called by the hotplug code after devices have been
1443 * added to a bus, this include calling it for a PHB that is just
1444 * being added
1445 */
1446void pcibios_finish_adding_to_bus(struct pci_bus *bus)
1447{
1448        pr_debug("PCI: Finishing adding to hotplug bus %04x:%02x\n",
1449                 pci_domain_nr(bus), bus->number);
1450
1451        /* Allocate bus and devices resources */
1452        pcibios_allocate_bus_resources(bus);
1453        pcibios_claim_one_bus(bus);
1454
1455        /* Add new devices to global lists.  Register in proc, sysfs. */
1456        pci_bus_add_devices(bus);
1457
1458        /* Fixup EEH */
1459        eeh_add_device_tree_late(bus);
1460}
1461EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus);
1462
1463#endif /* CONFIG_HOTPLUG */
1464
1465int pcibios_enable_device(struct pci_dev *dev, int mask)
1466{
1467        if (ppc_md.pcibios_enable_device_hook)
1468                if (ppc_md.pcibios_enable_device_hook(dev))
1469                        return -EINVAL;
1470
1471        return pci_enable_resources(dev, mask);
1472}
1473
1474resource_size_t pcibios_io_space_offset(struct pci_controller *hose)
1475{
1476        return (unsigned long) hose->io_base_virt - _IO_BASE;
1477}
1478
1479static void __devinit pcibios_setup_phb_resources(struct pci_controller *hose, struct list_head *resources)
1480{
1481        struct resource *res;
1482        int i;
1483
1484        /* Hookup PHB IO resource */
1485        res = &hose->io_resource;
1486
1487        if (!res->flags) {
1488                printk(KERN_WARNING "PCI: I/O resource not set for host"
1489                       " bridge %s (domain %d)\n",
1490                       hose->dn->full_name, hose->global_number);
1491#ifdef CONFIG_PPC32
1492                /* Workaround for lack of IO resource only on 32-bit */
1493                res->start = (unsigned long)hose->io_base_virt - isa_io_base;
1494                res->end = res->start + IO_SPACE_LIMIT;
1495                res->flags = IORESOURCE_IO;
1496#endif /* CONFIG_PPC32 */
1497        }
1498
1499        pr_debug("PCI: PHB IO resource    = %016llx-%016llx [%lx]\n",
1500                 (unsigned long long)res->start,
1501                 (unsigned long long)res->end,
1502                 (unsigned long)res->flags);
1503        pci_add_resource_offset(resources, res, pcibios_io_space_offset(hose));
1504
1505        /* Hookup PHB Memory resources */
1506        for (i = 0; i < 3; ++i) {
1507                res = &hose->mem_resources[i];
1508                if (!res->flags) {
1509                        if (i > 0)
1510                                continue;
1511                        printk(KERN_ERR "PCI: Memory resource 0 not set for "
1512                               "host bridge %s (domain %d)\n",
1513                               hose->dn->full_name, hose->global_number);
1514#ifdef CONFIG_PPC32
1515                        /* Workaround for lack of MEM resource only on 32-bit */
1516                        res->start = hose->pci_mem_offset;
1517                        res->end = (resource_size_t)-1LL;
1518                        res->flags = IORESOURCE_MEM;
1519#endif /* CONFIG_PPC32 */
1520                }
1521
1522                pr_debug("PCI: PHB MEM resource %d = %016llx-%016llx [%lx]\n", i,
1523                         (unsigned long long)res->start,
1524                         (unsigned long long)res->end,
1525                         (unsigned long)res->flags);
1526                pci_add_resource_offset(resources, res, hose->pci_mem_offset);
1527        }
1528
1529        pr_debug("PCI: PHB MEM offset     = %016llx\n",
1530                 (unsigned long long)hose->pci_mem_offset);
1531        pr_debug("PCI: PHB IO  offset     = %08lx\n",
1532                 (unsigned long)hose->io_base_virt - _IO_BASE);
1533
1534}
1535
1536/*
1537 * Null PCI config access functions, for the case when we can't
1538 * find a hose.
1539 */
1540#define NULL_PCI_OP(rw, size, type)                                     \
1541static int                                                              \
1542null_##rw##_config_##size(struct pci_dev *dev, int offset, type val)    \
1543{                                                                       \
1544        return PCIBIOS_DEVICE_NOT_FOUND;                                \
1545}
1546
1547static int
1548null_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
1549                 int len, u32 *val)
1550{
1551        return PCIBIOS_DEVICE_NOT_FOUND;
1552}
1553
1554static int
1555null_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
1556                  int len, u32 val)
1557{
1558        return PCIBIOS_DEVICE_NOT_FOUND;
1559}
1560
1561static struct pci_ops null_pci_ops =
1562{
1563        .read = null_read_config,
1564        .write = null_write_config,
1565};
1566
1567/*
1568 * These functions are used early on before PCI scanning is done
1569 * and all of the pci_dev and pci_bus structures have been created.
1570 */
1571static struct pci_bus *
1572fake_pci_bus(struct pci_controller *hose, int busnr)
1573{
1574        static struct pci_bus bus;
1575
1576        if (hose == 0) {
1577                printk(KERN_ERR "Can't find hose for PCI bus %d!\n", busnr);
1578        }
1579        bus.number = busnr;
1580        bus.sysdata = hose;
1581        bus.ops = hose? hose->ops: &null_pci_ops;
1582        return &bus;
1583}
1584
1585#define EARLY_PCI_OP(rw, size, type)                                    \
1586int early_##rw##_config_##size(struct pci_controller *hose, int bus,    \
1587                               int devfn, int offset, type value)       \
1588{                                                                       \
1589        return pci_bus_##rw##_config_##size(fake_pci_bus(hose, bus),    \
1590                                            devfn, offset, value);      \
1591}
1592
1593EARLY_PCI_OP(read, byte, u8 *)
1594EARLY_PCI_OP(read, word, u16 *)
1595EARLY_PCI_OP(read, dword, u32 *)
1596EARLY_PCI_OP(write, byte, u8)
1597EARLY_PCI_OP(write, word, u16)
1598EARLY_PCI_OP(write, dword, u32)
1599
1600extern int pci_bus_find_capability (struct pci_bus *bus, unsigned int devfn, int cap);
1601int early_find_capability(struct pci_controller *hose, int bus, int devfn,
1602                          int cap)
1603{
1604        return pci_bus_find_capability(fake_pci_bus(hose, bus), devfn, cap);
1605}
1606
1607struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
1608{
1609        struct pci_controller *hose = bus->sysdata;
1610
1611        return of_node_get(hose->dn);
1612}
1613
1614/**
1615 * pci_scan_phb - Given a pci_controller, setup and scan the PCI bus
1616 * @hose: Pointer to the PCI host controller instance structure
1617 */
1618void __devinit pcibios_scan_phb(struct pci_controller *hose)
1619{
1620        LIST_HEAD(resources);
1621        struct pci_bus *bus;
1622        struct device_node *node = hose->dn;
1623        int mode;
1624
1625        pr_debug("PCI: Scanning PHB %s\n", of_node_full_name(node));
1626
1627        /* Get some IO space for the new PHB */
1628        pcibios_setup_phb_io_space(hose);
1629
1630        /* Wire up PHB bus resources */
1631        pcibios_setup_phb_resources(hose, &resources);
1632
1633        hose->busn.start = hose->first_busno;
1634        hose->busn.end   = hose->last_busno;
1635        hose->busn.flags = IORESOURCE_BUS;
1636        pci_add_resource(&resources, &hose->busn);
1637
1638        /* Create an empty bus for the toplevel */
1639        bus = pci_create_root_bus(hose->parent, hose->first_busno,
1640                                  hose->ops, hose, &resources);
1641        if (bus == NULL) {
1642                pr_err("Failed to create bus for PCI domain %04x\n",
1643                        hose->global_number);
1644                pci_free_resource_list(&resources);
1645                return;
1646        }
1647        hose->bus = bus;
1648
1649        /* Get probe mode and perform scan */
1650        mode = PCI_PROBE_NORMAL;
1651        if (node && ppc_md.pci_probe_mode)
1652                mode = ppc_md.pci_probe_mode(bus);
1653        pr_debug("    probe mode: %d\n", mode);
1654        if (mode == PCI_PROBE_DEVTREE)
1655                of_scan_bus(node, bus);
1656
1657        if (mode == PCI_PROBE_NORMAL) {
1658                pci_bus_update_busn_res_end(bus, 255);
1659                hose->last_busno = pci_scan_child_bus(bus);
1660                pci_bus_update_busn_res_end(bus, hose->last_busno);
1661        }
1662
1663        /* Platform gets a chance to do some global fixups before
1664         * we proceed to resource allocation
1665         */
1666        if (ppc_md.pcibios_fixup_phb)
1667                ppc_md.pcibios_fixup_phb(hose);
1668
1669        /* Configure PCI Express settings */
1670        if (bus && !pci_has_flag(PCI_PROBE_ONLY)) {
1671                struct pci_bus *child;
1672                list_for_each_entry(child, &bus->children, node) {
1673                        struct pci_dev *self = child->self;
1674                        if (!self)
1675                                continue;
1676                        pcie_bus_configure_settings(child, self->pcie_mpss);
1677                }
1678        }
1679}
1680
1681static void fixup_hide_host_resource_fsl(struct pci_dev *dev)
1682{
1683        int i, class = dev->class >> 8;
1684        /* When configured as agent, programing interface = 1 */
1685        int prog_if = dev->class & 0xf;
1686
1687        if ((class == PCI_CLASS_PROCESSOR_POWERPC ||
1688             class == PCI_CLASS_BRIDGE_OTHER) &&
1689                (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) &&
1690                (prog_if == 0) &&
1691                (dev->bus->parent == NULL)) {
1692                for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1693                        dev->resource[i].start = 0;
1694                        dev->resource[i].end = 0;
1695                        dev->resource[i].flags = 0;
1696                }
1697        }
1698}
1699DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MOTOROLA, PCI_ANY_ID, fixup_hide_host_resource_fsl);
1700DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, fixup_hide_host_resource_fsl);
1701