linux/arch/powerpc/kernel/pci-common.c
<<
>>
Prefs
   1/*
   2 * Contains common pci routines for ALL ppc platform
   3 * (based on pci_32.c and pci_64.c)
   4 *
   5 * Port for PPC64 David Engebretsen, IBM Corp.
   6 * Contains common pci routines for ppc64 platform, pSeries and iSeries brands.
   7 *
   8 * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM
   9 *   Rework, based on alpha PCI code.
  10 *
  11 * Common pmac/prep/chrp pci routines. -- Cort
  12 *
  13 * This program is free software; you can redistribute it and/or
  14 * modify it under the terms of the GNU General Public License
  15 * as published by the Free Software Foundation; either version
  16 * 2 of the License, or (at your option) any later version.
  17 */
  18
  19#include <linux/kernel.h>
  20#include <linux/pci.h>
  21#include <linux/string.h>
  22#include <linux/init.h>
  23#include <linux/bootmem.h>
  24#include <linux/delay.h>
  25#include <linux/export.h>
  26#include <linux/of_address.h>
  27#include <linux/of_pci.h>
  28#include <linux/mm.h>
  29#include <linux/list.h>
  30#include <linux/syscalls.h>
  31#include <linux/irq.h>
  32#include <linux/vmalloc.h>
  33#include <linux/slab.h>
  34#include <linux/vgaarb.h>
  35
  36#include <asm/processor.h>
  37#include <asm/io.h>
  38#include <asm/prom.h>
  39#include <asm/pci-bridge.h>
  40#include <asm/byteorder.h>
  41#include <asm/machdep.h>
  42#include <asm/ppc-pci.h>
  43#include <asm/eeh.h>
  44
  45static DEFINE_SPINLOCK(hose_spinlock);
  46LIST_HEAD(hose_list);
  47
  48/* XXX kill that some day ... */
  49static int global_phb_number;           /* Global phb counter */
  50
  51/* ISA Memory physical address */
  52resource_size_t isa_mem_base;
  53
  54
  55static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
  56
  57void set_pci_dma_ops(struct dma_map_ops *dma_ops)
  58{
  59        pci_dma_ops = dma_ops;
  60}
  61
  62struct dma_map_ops *get_pci_dma_ops(void)
  63{
  64        return pci_dma_ops;
  65}
  66EXPORT_SYMBOL(get_pci_dma_ops);
  67
  68struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
  69{
  70        struct pci_controller *phb;
  71
  72        phb = zalloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL);
  73        if (phb == NULL)
  74                return NULL;
  75        spin_lock(&hose_spinlock);
  76        phb->global_number = global_phb_number++;
  77        list_add_tail(&phb->list_node, &hose_list);
  78        spin_unlock(&hose_spinlock);
  79        phb->dn = dev;
  80        phb->is_dynamic = mem_init_done;
  81#ifdef CONFIG_PPC64
  82        if (dev) {
  83                int nid = of_node_to_nid(dev);
  84
  85                if (nid < 0 || !node_online(nid))
  86                        nid = -1;
  87
  88                PHB_SET_NODE(phb, nid);
  89        }
  90#endif
  91        return phb;
  92}
  93EXPORT_SYMBOL_GPL(pcibios_alloc_controller);
  94
  95void pcibios_free_controller(struct pci_controller *phb)
  96{
  97        spin_lock(&hose_spinlock);
  98        list_del(&phb->list_node);
  99        spin_unlock(&hose_spinlock);
 100
 101        if (phb->is_dynamic)
 102                kfree(phb);
 103}
 104EXPORT_SYMBOL_GPL(pcibios_free_controller);
 105
 106/*
 107 * This function is used to call pcibios_free_controller()
 108 * in a deferred manner: a callback from the PCI subsystem.
 109 *
 110 * _*DO NOT*_ call pcibios_free_controller() explicitly if
 111 * this is used (or it may access an invalid *phb pointer).
 112 *
 113 * The callback occurs when all references to the root bus
 114 * are dropped (e.g., child buses/devices and their users).
 115 *
 116 * It's called as .release_fn() of 'struct pci_host_bridge'
 117 * which is associated with the 'struct pci_controller.bus'
 118 * (root bus) - it expects .release_data to hold a pointer
 119 * to 'struct pci_controller'.
 120 *
 121 * In order to use it, register .release_fn()/release_data
 122 * like this:
 123 *
 124 * pci_set_host_bridge_release(bridge,
 125 *                             pcibios_free_controller_deferred
 126 *                             (void *) phb);
 127 *
 128 * e.g. in the pcibios_root_bridge_prepare() callback from
 129 * pci_create_root_bus().
 130 */
 131void pcibios_free_controller_deferred(struct pci_host_bridge *bridge)
 132{
 133        struct pci_controller *phb = (struct pci_controller *)
 134                                         bridge->release_data;
 135
 136        pr_debug("domain %d, dynamic %d\n", phb->global_number, phb->is_dynamic);
 137
 138        pcibios_free_controller(phb);
 139}
 140EXPORT_SYMBOL_GPL(pcibios_free_controller_deferred);
 141
 142/*
 143 * The function is used to return the minimal alignment
 144 * for memory or I/O windows of the associated P2P bridge.
 145 * By default, 4KiB alignment for I/O windows and 1MiB for
 146 * memory windows.
 147 */
 148resource_size_t pcibios_window_alignment(struct pci_bus *bus,
 149                                         unsigned long type)
 150{
 151        struct pci_controller *phb = pci_bus_to_host(bus);
 152
 153        if (phb->controller_ops.window_alignment)
 154                return phb->controller_ops.window_alignment(bus, type);
 155
 156        /*
 157         * PCI core will figure out the default
 158         * alignment: 4KiB for I/O and 1MiB for
 159         * memory window.
 160         */
 161        return 1;
 162}
 163
 164void pcibios_reset_secondary_bus(struct pci_dev *dev)
 165{
 166        struct pci_controller *phb = pci_bus_to_host(dev->bus);
 167
 168        if (phb->controller_ops.reset_secondary_bus) {
 169                phb->controller_ops.reset_secondary_bus(dev);
 170                return;
 171        }
 172
 173        pci_reset_secondary_bus(dev);
 174}
 175
 176#ifdef CONFIG_PCI_IOV
 177resource_size_t pcibios_iov_resource_alignment(struct pci_dev *pdev, int resno)
 178{
 179        if (ppc_md.pcibios_iov_resource_alignment)
 180                return ppc_md.pcibios_iov_resource_alignment(pdev, resno);
 181
 182        return pci_iov_resource_size(pdev, resno);
 183}
 184#endif /* CONFIG_PCI_IOV */
 185
 186static resource_size_t pcibios_io_size(const struct pci_controller *hose)
 187{
 188#ifdef CONFIG_PPC64
 189        return hose->pci_io_size;
 190#else
 191        return resource_size(&hose->io_resource);
 192#endif
 193}
 194
 195int pcibios_vaddr_is_ioport(void __iomem *address)
 196{
 197        int ret = 0;
 198        struct pci_controller *hose;
 199        resource_size_t size;
 200
 201        spin_lock(&hose_spinlock);
 202        list_for_each_entry(hose, &hose_list, list_node) {
 203                size = pcibios_io_size(hose);
 204                if (address >= hose->io_base_virt &&
 205                    address < (hose->io_base_virt + size)) {
 206                        ret = 1;
 207                        break;
 208                }
 209        }
 210        spin_unlock(&hose_spinlock);
 211        return ret;
 212}
 213
 214unsigned long pci_address_to_pio(phys_addr_t address)
 215{
 216        struct pci_controller *hose;
 217        resource_size_t size;
 218        unsigned long ret = ~0;
 219
 220        spin_lock(&hose_spinlock);
 221        list_for_each_entry(hose, &hose_list, list_node) {
 222                size = pcibios_io_size(hose);
 223                if (address >= hose->io_base_phys &&
 224                    address < (hose->io_base_phys + size)) {
 225                        unsigned long base =
 226                                (unsigned long)hose->io_base_virt - _IO_BASE;
 227                        ret = base + (address - hose->io_base_phys);
 228                        break;
 229                }
 230        }
 231        spin_unlock(&hose_spinlock);
 232
 233        return ret;
 234}
 235EXPORT_SYMBOL_GPL(pci_address_to_pio);
 236
 237/*
 238 * Return the domain number for this bus.
 239 */
 240int pci_domain_nr(struct pci_bus *bus)
 241{
 242        struct pci_controller *hose = pci_bus_to_host(bus);
 243
 244        return hose->global_number;
 245}
 246EXPORT_SYMBOL(pci_domain_nr);
 247
 248/* This routine is meant to be used early during boot, when the
 249 * PCI bus numbers have not yet been assigned, and you need to
 250 * issue PCI config cycles to an OF device.
 251 * It could also be used to "fix" RTAS config cycles if you want
 252 * to set pci_assign_all_buses to 1 and still use RTAS for PCI
 253 * config cycles.
 254 */
 255struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node)
 256{
 257        while(node) {
 258                struct pci_controller *hose, *tmp;
 259                list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
 260                        if (hose->dn == node)
 261                                return hose;
 262                node = node->parent;
 263        }
 264        return NULL;
 265}
 266
 267/*
 268 * Reads the interrupt pin to determine if interrupt is use by card.
 269 * If the interrupt is used, then gets the interrupt line from the
 270 * openfirmware and sets it in the pci_dev and pci_config line.
 271 */
 272static int pci_read_irq_line(struct pci_dev *pci_dev)
 273{
 274        struct of_irq oirq;
 275        unsigned int virq;
 276
 277        pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev));
 278
 279#ifdef DEBUG
 280        memset(&oirq, 0xff, sizeof(oirq));
 281#endif
 282        /* Try to get a mapping from the device-tree */
 283        if (of_irq_map_pci(pci_dev, &oirq)) {
 284                u8 line, pin;
 285
 286                /* If that fails, lets fallback to what is in the config
 287                 * space and map that through the default controller. We
 288                 * also set the type to level low since that's what PCI
 289                 * interrupts are. If your platform does differently, then
 290                 * either provide a proper interrupt tree or don't use this
 291                 * function.
 292                 */
 293                if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &pin))
 294                        return -1;
 295                if (pin == 0)
 296                        return -1;
 297                if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_LINE, &line) ||
 298                    line == 0xff || line == 0) {
 299                        return -1;
 300                }
 301                pr_debug(" No map ! Using line %d (pin %d) from PCI config\n",
 302                         line, pin);
 303
 304                virq = irq_create_mapping(NULL, line);
 305                if (virq != NO_IRQ)
 306                        irq_set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
 307        } else {
 308                pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n",
 309                         oirq.size, oirq.specifier[0], oirq.specifier[1],
 310                         of_node_full_name(oirq.controller));
 311
 312                virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
 313                                             oirq.size);
 314        }
 315        if(virq == NO_IRQ) {
 316                pr_debug(" Failed to map !\n");
 317                return -1;
 318        }
 319
 320        pr_debug(" Mapped to linux irq %d\n", virq);
 321
 322        pci_dev->irq = virq;
 323
 324        return 0;
 325}
 326
 327/*
 328 * Platform support for /proc/bus/pci/X/Y mmap()s,
 329 * modelled on the sparc64 implementation by Dave Miller.
 330 *  -- paulus.
 331 */
 332
 333/*
 334 * Adjust vm_pgoff of VMA such that it is the physical page offset
 335 * corresponding to the 32-bit pci bus offset for DEV requested by the user.
 336 *
 337 * Basically, the user finds the base address for his device which he wishes
 338 * to mmap.  They read the 32-bit value from the config space base register,
 339 * add whatever PAGE_SIZE multiple offset they wish, and feed this into the
 340 * offset parameter of mmap on /proc/bus/pci/XXX for that device.
 341 *
 342 * Returns negative error code on failure, zero on success.
 343 */
 344static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
 345                                               resource_size_t *offset,
 346                                               enum pci_mmap_state mmap_state)
 347{
 348        struct pci_controller *hose = pci_bus_to_host(dev->bus);
 349        unsigned long io_offset = 0;
 350        int i, res_bit;
 351
 352        if (hose == NULL)
 353                return NULL;            /* should never happen */
 354
 355        /* If memory, add on the PCI bridge address offset */
 356        if (mmap_state == pci_mmap_mem) {
 357#if 0 /* See comment in pci_resource_to_user() for why this is disabled */
 358                *offset += hose->pci_mem_offset;
 359#endif
 360                res_bit = IORESOURCE_MEM;
 361        } else {
 362                io_offset = (unsigned long)hose->io_base_virt - _IO_BASE;
 363                *offset += io_offset;
 364                res_bit = IORESOURCE_IO;
 365        }
 366
 367        /*
 368         * Check that the offset requested corresponds to one of the
 369         * resources of the device.
 370         */
 371        for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
 372                struct resource *rp = &dev->resource[i];
 373                int flags = rp->flags;
 374
 375                /* treat ROM as memory (should be already) */
 376                if (i == PCI_ROM_RESOURCE)
 377                        flags |= IORESOURCE_MEM;
 378
 379                /* Active and same type? */
 380                if ((flags & res_bit) == 0)
 381                        continue;
 382
 383                /* In the range of this resource? */
 384                if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end)
 385                        continue;
 386
 387                /* found it! construct the final physical address */
 388                if (mmap_state == pci_mmap_io)
 389                        *offset += hose->io_base_phys - io_offset;
 390                return rp;
 391        }
 392
 393        return NULL;
 394}
 395
 396/*
 397 * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
 398 * device mapping.
 399 */
 400static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
 401                                      pgprot_t protection,
 402                                      enum pci_mmap_state mmap_state,
 403                                      int write_combine)
 404{
 405
 406        /* Write combine is always 0 on non-memory space mappings. On
 407         * memory space, if the user didn't pass 1, we check for a
 408         * "prefetchable" resource. This is a bit hackish, but we use
 409         * this to workaround the inability of /sysfs to provide a write
 410         * combine bit
 411         */
 412        if (mmap_state != pci_mmap_mem)
 413                write_combine = 0;
 414        else if (write_combine == 0) {
 415                if (rp->flags & IORESOURCE_PREFETCH)
 416                        write_combine = 1;
 417        }
 418
 419        /* XXX would be nice to have a way to ask for write-through */
 420        if (write_combine)
 421                return pgprot_noncached_wc(protection);
 422        else
 423                return pgprot_noncached(protection);
 424}
 425
 426/*
 427 * This one is used by /dev/mem and fbdev who have no clue about the
 428 * PCI device, it tries to find the PCI device first and calls the
 429 * above routine
 430 */
 431pgprot_t pci_phys_mem_access_prot(struct file *file,
 432                                  unsigned long pfn,
 433                                  unsigned long size,
 434                                  pgprot_t prot)
 435{
 436        struct pci_dev *pdev = NULL;
 437        struct resource *found = NULL;
 438        resource_size_t offset = ((resource_size_t)pfn) << PAGE_SHIFT;
 439        int i;
 440
 441        if (page_is_ram(pfn))
 442                return prot;
 443
 444        prot = pgprot_noncached(prot);
 445        for_each_pci_dev(pdev) {
 446                for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
 447                        struct resource *rp = &pdev->resource[i];
 448                        int flags = rp->flags;
 449
 450                        /* Active and same type? */
 451                        if ((flags & IORESOURCE_MEM) == 0)
 452                                continue;
 453                        /* In the range of this resource? */
 454                        if (offset < (rp->start & PAGE_MASK) ||
 455                            offset > rp->end)
 456                                continue;
 457                        found = rp;
 458                        break;
 459                }
 460                if (found)
 461                        break;
 462        }
 463        if (found) {
 464                if (found->flags & IORESOURCE_PREFETCH)
 465                        prot = pgprot_noncached_wc(prot);
 466                pci_dev_put(pdev);
 467        }
 468
 469        pr_debug("PCI: Non-PCI map for %llx, prot: %lx\n",
 470                 (unsigned long long)offset, pgprot_val(prot));
 471
 472        return prot;
 473}
 474
 475
 476/*
 477 * Perform the actual remap of the pages for a PCI device mapping, as
 478 * appropriate for this architecture.  The region in the process to map
 479 * is described by vm_start and vm_end members of VMA, the base physical
 480 * address is found in vm_pgoff.
 481 * The pci device structure is provided so that architectures may make mapping
 482 * decisions on a per-device or per-bus basis.
 483 *
 484 * Returns a negative error code on failure, zero on success.
 485 */
 486int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
 487                        enum pci_mmap_state mmap_state, int write_combine)
 488{
 489        resource_size_t offset =
 490                ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT;
 491        struct resource *rp;
 492        int ret;
 493
 494        rp = __pci_mmap_make_offset(dev, &offset, mmap_state);
 495        if (rp == NULL)
 496                return -EINVAL;
 497
 498        vma->vm_pgoff = offset >> PAGE_SHIFT;
 499        vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp,
 500                                                  vma->vm_page_prot,
 501                                                  mmap_state, write_combine);
 502
 503        ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
 504                               vma->vm_end - vma->vm_start, vma->vm_page_prot);
 505
 506        return ret;
 507}
 508
 509/* This provides legacy IO read access on a bus */
 510int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size)
 511{
 512        unsigned long offset;
 513        struct pci_controller *hose = pci_bus_to_host(bus);
 514        struct resource *rp = &hose->io_resource;
 515        void __iomem *addr;
 516
 517        /* Check if port can be supported by that bus. We only check
 518         * the ranges of the PHB though, not the bus itself as the rules
 519         * for forwarding legacy cycles down bridges are not our problem
 520         * here. So if the host bridge supports it, we do it.
 521         */
 522        offset = (unsigned long)hose->io_base_virt - _IO_BASE;
 523        offset += port;
 524
 525        if (!(rp->flags & IORESOURCE_IO))
 526                return -ENXIO;
 527        if (offset < rp->start || (offset + size) > rp->end)
 528                return -ENXIO;
 529        addr = hose->io_base_virt + port;
 530
 531        switch(size) {
 532        case 1:
 533                *((u8 *)val) = in_8(addr);
 534                return 1;
 535        case 2:
 536                if (port & 1)
 537                        return -EINVAL;
 538                *((u16 *)val) = in_le16(addr);
 539                return 2;
 540        case 4:
 541                if (port & 3)
 542                        return -EINVAL;
 543                *((u32 *)val) = in_le32(addr);
 544                return 4;
 545        }
 546        return -EINVAL;
 547}
 548
 549/* This provides legacy IO write access on a bus */
 550int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size)
 551{
 552        unsigned long offset;
 553        struct pci_controller *hose = pci_bus_to_host(bus);
 554        struct resource *rp = &hose->io_resource;
 555        void __iomem *addr;
 556
 557        /* Check if port can be supported by that bus. We only check
 558         * the ranges of the PHB though, not the bus itself as the rules
 559         * for forwarding legacy cycles down bridges are not our problem
 560         * here. So if the host bridge supports it, we do it.
 561         */
 562        offset = (unsigned long)hose->io_base_virt - _IO_BASE;
 563        offset += port;
 564
 565        if (!(rp->flags & IORESOURCE_IO))
 566                return -ENXIO;
 567        if (offset < rp->start || (offset + size) > rp->end)
 568                return -ENXIO;
 569        addr = hose->io_base_virt + port;
 570
 571        /* WARNING: The generic code is idiotic. It gets passed a pointer
 572         * to what can be a 1, 2 or 4 byte quantity and always reads that
 573         * as a u32, which means that we have to correct the location of
 574         * the data read within those 32 bits for size 1 and 2
 575         */
 576        switch(size) {
 577        case 1:
 578                out_8(addr, val >> 24);
 579                return 1;
 580        case 2:
 581                if (port & 1)
 582                        return -EINVAL;
 583                out_le16(addr, val >> 16);
 584                return 2;
 585        case 4:
 586                if (port & 3)
 587                        return -EINVAL;
 588                out_le32(addr, val);
 589                return 4;
 590        }
 591        return -EINVAL;
 592}
 593
 594/* This provides legacy IO or memory mmap access on a bus */
 595int pci_mmap_legacy_page_range(struct pci_bus *bus,
 596                               struct vm_area_struct *vma,
 597                               enum pci_mmap_state mmap_state)
 598{
 599        struct pci_controller *hose = pci_bus_to_host(bus);
 600        resource_size_t offset =
 601                ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT;
 602        resource_size_t size = vma->vm_end - vma->vm_start;
 603        struct resource *rp;
 604
 605        pr_debug("pci_mmap_legacy_page_range(%04x:%02x, %s @%llx..%llx)\n",
 606                 pci_domain_nr(bus), bus->number,
 607                 mmap_state == pci_mmap_mem ? "MEM" : "IO",
 608                 (unsigned long long)offset,
 609                 (unsigned long long)(offset + size - 1));
 610
 611        if (mmap_state == pci_mmap_mem) {
 612                /* Hack alert !
 613                 *
 614                 * Because X is lame and can fail starting if it gets an error trying
 615                 * to mmap legacy_mem (instead of just moving on without legacy memory
 616                 * access) we fake it here by giving it anonymous memory, effectively
 617                 * behaving just like /dev/zero
 618                 */
 619                if ((offset + size) > hose->isa_mem_size) {
 620                        printk(KERN_DEBUG
 621                               "Process %s (pid:%d) mapped non-existing PCI legacy memory for 0%04x:%02x\n",
 622                               current->comm, current->pid, pci_domain_nr(bus), bus->number);
 623                        if (vma->vm_flags & VM_SHARED)
 624                                return shmem_zero_setup(vma);
 625                        return 0;
 626                }
 627                offset += hose->isa_mem_phys;
 628        } else {
 629                unsigned long io_offset = (unsigned long)hose->io_base_virt - _IO_BASE;
 630                unsigned long roffset = offset + io_offset;
 631                rp = &hose->io_resource;
 632                if (!(rp->flags & IORESOURCE_IO))
 633                        return -ENXIO;
 634                if (roffset < rp->start || (roffset + size) > rp->end)
 635                        return -ENXIO;
 636                offset += hose->io_base_phys;
 637        }
 638        pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset);
 639
 640        vma->vm_pgoff = offset >> PAGE_SHIFT;
 641        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 642        return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
 643                               vma->vm_end - vma->vm_start,
 644                               vma->vm_page_prot);
 645}
 646
 647void pci_resource_to_user(const struct pci_dev *dev, int bar,
 648                          const struct resource *rsrc,
 649                          resource_size_t *start, resource_size_t *end)
 650{
 651        struct pci_controller *hose = pci_bus_to_host(dev->bus);
 652        resource_size_t offset = 0;
 653
 654        if (hose == NULL)
 655                return;
 656
 657        if (rsrc->flags & IORESOURCE_IO)
 658                offset = (unsigned long)hose->io_base_virt - _IO_BASE;
 659
 660        /* We pass a fully fixed up address to userland for MMIO instead of
 661         * a BAR value because X is lame and expects to be able to use that
 662         * to pass to /dev/mem !
 663         *
 664         * That means that we'll have potentially 64 bits values where some
 665         * userland apps only expect 32 (like X itself since it thinks only
 666         * Sparc has 64 bits MMIO) but if we don't do that, we break it on
 667         * 32 bits CHRPs :-(
 668         *
 669         * Hopefully, the sysfs insterface is immune to that gunk. Once X
 670         * has been fixed (and the fix spread enough), we can re-enable the
 671         * 2 lines below and pass down a BAR value to userland. In that case
 672         * we'll also have to re-enable the matching code in
 673         * __pci_mmap_make_offset().
 674         *
 675         * BenH.
 676         */
 677#if 0
 678        else if (rsrc->flags & IORESOURCE_MEM)
 679                offset = hose->pci_mem_offset;
 680#endif
 681
 682        *start = rsrc->start - offset;
 683        *end = rsrc->end - offset;
 684}
 685
 686/**
 687 * pci_process_bridge_OF_ranges - Parse PCI bridge resources from device tree
 688 * @hose: newly allocated pci_controller to be setup
 689 * @dev: device node of the host bridge
 690 * @primary: set if primary bus (32 bits only, soon to be deprecated)
 691 *
 692 * This function will parse the "ranges" property of a PCI host bridge device
 693 * node and setup the resource mapping of a pci controller based on its
 694 * content.
 695 *
 696 * Life would be boring if it wasn't for a few issues that we have to deal
 697 * with here:
 698 *
 699 *   - We can only cope with one IO space range and up to 3 Memory space
 700 *     ranges. However, some machines (thanks Apple !) tend to split their
 701 *     space into lots of small contiguous ranges. So we have to coalesce.
 702 *
 703 *   - Some busses have IO space not starting at 0, which causes trouble with
 704 *     the way we do our IO resource renumbering. The code somewhat deals with
 705 *     it for 64 bits but I would expect problems on 32 bits.
 706 *
 707 *   - Some 32 bits platforms such as 4xx can have physical space larger than
 708 *     32 bits so we need to use 64 bits values for the parsing
 709 */
 710void pci_process_bridge_OF_ranges(struct pci_controller *hose,
 711                                  struct device_node *dev, int primary)
 712{
 713        const __be32 *ranges;
 714        int rlen;
 715        int pna = of_n_addr_cells(dev);
 716        int np = pna + 5;
 717        int memno = 0;
 718        u32 pci_space;
 719        unsigned long long pci_addr, cpu_addr, pci_next, cpu_next, size;
 720        struct resource *res;
 721
 722        printk(KERN_INFO "PCI host bridge %s %s ranges:\n",
 723               dev->full_name, primary ? "(primary)" : "");
 724
 725        /* Get ranges property */
 726        ranges = of_get_property(dev, "ranges", &rlen);
 727        if (ranges == NULL)
 728                return;
 729
 730        /* Parse it */
 731        while ((rlen -= np * 4) >= 0) {
 732                /* Read next ranges element */
 733                pci_space = of_read_number(ranges, 1);
 734                pci_addr = of_read_number(ranges + 1, 2);
 735                cpu_addr = of_translate_address(dev, ranges + 3);
 736                size = of_read_number(ranges + pna + 3, 2);
 737                ranges += np;
 738
 739                /* If we failed translation or got a zero-sized region
 740                 * (some FW try to feed us with non sensical zero sized regions
 741                 * such as power3 which look like some kind of attempt at exposing
 742                 * the VGA memory hole)
 743                 */
 744                if (cpu_addr == OF_BAD_ADDR || size == 0)
 745                        continue;
 746
 747                /* Now consume following elements while they are contiguous */
 748                for (; rlen >= np * sizeof(u32);
 749                     ranges += np, rlen -= np * 4) {
 750                        if (of_read_number(ranges, 1) != pci_space)
 751                                break;
 752                        pci_next = of_read_number(ranges + 1, 2);
 753                        cpu_next = of_translate_address(dev, ranges + 3);
 754                        if (pci_next != pci_addr + size ||
 755                            cpu_next != cpu_addr + size)
 756                                break;
 757                        size += of_read_number(ranges + pna + 3, 2);
 758                }
 759
 760                /* Act based on address space type */
 761                res = NULL;
 762                switch ((pci_space >> 24) & 0x3) {
 763                case 1:         /* PCI IO space */
 764                        printk(KERN_INFO
 765                               "  IO 0x%016llx..0x%016llx -> 0x%016llx\n",
 766                               cpu_addr, cpu_addr + size - 1, pci_addr);
 767
 768                        /* We support only one IO range */
 769                        if (hose->pci_io_size) {
 770                                printk(KERN_INFO
 771                                       " \\--> Skipped (too many) !\n");
 772                                continue;
 773                        }
 774#ifdef CONFIG_PPC32
 775                        /* On 32 bits, limit I/O space to 16MB */
 776                        if (size > 0x01000000)
 777                                size = 0x01000000;
 778
 779                        /* 32 bits needs to map IOs here */
 780                        hose->io_base_virt = ioremap(cpu_addr, size);
 781
 782                        /* Expect trouble if pci_addr is not 0 */
 783                        if (primary)
 784                                isa_io_base =
 785                                        (unsigned long)hose->io_base_virt;
 786#endif /* CONFIG_PPC32 */
 787                        /* pci_io_size and io_base_phys always represent IO
 788                         * space starting at 0 so we factor in pci_addr
 789                         */
 790                        hose->pci_io_size = pci_addr + size;
 791                        hose->io_base_phys = cpu_addr - pci_addr;
 792
 793                        /* Build resource */
 794                        res = &hose->io_resource;
 795                        res->flags = IORESOURCE_IO;
 796                        res->start = pci_addr;
 797                        break;
 798                case 2:         /* PCI Memory space */
 799                case 3:         /* PCI 64 bits Memory space */
 800                        printk(KERN_INFO
 801                               " MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n",
 802                               cpu_addr, cpu_addr + size - 1, pci_addr,
 803                               (pci_space & 0x40000000) ? "Prefetch" : "");
 804
 805                        /* We support only 3 memory ranges */
 806                        if (memno >= 3) {
 807                                printk(KERN_INFO
 808                                       " \\--> Skipped (too many) !\n");
 809                                continue;
 810                        }
 811                        /* Handles ISA memory hole space here */
 812                        if (pci_addr == 0) {
 813                                if (primary || isa_mem_base == 0)
 814                                        isa_mem_base = cpu_addr;
 815                                hose->isa_mem_phys = cpu_addr;
 816                                hose->isa_mem_size = size;
 817                        }
 818
 819                        /* Build resource */
 820                        hose->mem_offset[memno] = cpu_addr - pci_addr;
 821                        res = &hose->mem_resources[memno++];
 822                        res->flags = IORESOURCE_MEM;
 823                        if (pci_space & 0x40000000)
 824                                res->flags |= IORESOURCE_PREFETCH;
 825                        res->start = cpu_addr;
 826                        break;
 827                }
 828                if (res != NULL) {
 829                        res->name = dev->full_name;
 830                        res->end = res->start + size - 1;
 831                        res->parent = NULL;
 832                        res->sibling = NULL;
 833                        res->child = NULL;
 834                }
 835        }
 836}
 837
 838/* Decide whether to display the domain number in /proc */
 839int pci_proc_domain(struct pci_bus *bus)
 840{
 841        struct pci_controller *hose = pci_bus_to_host(bus);
 842
 843        if (!pci_has_flag(PCI_ENABLE_PROC_DOMAINS))
 844                return 0;
 845        if (pci_has_flag(PCI_COMPAT_DOMAIN_0))
 846                return hose->global_number != 0;
 847        return 1;
 848}
 849
 850int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
 851{
 852        if (ppc_md.pcibios_root_bridge_prepare)
 853                return ppc_md.pcibios_root_bridge_prepare(bridge);
 854
 855        return 0;
 856}
 857
 858/* This header fixup will do the resource fixup for all devices as they are
 859 * probed, but not for bridge ranges
 860 */
 861static void pcibios_fixup_resources(struct pci_dev *dev)
 862{
 863        struct pci_controller *hose = pci_bus_to_host(dev->bus);
 864        int i;
 865
 866        if (!hose) {
 867                printk(KERN_ERR "No host bridge for PCI dev %s !\n",
 868                       pci_name(dev));
 869                return;
 870        }
 871
 872        if (dev->is_virtfn)
 873                return;
 874
 875        for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
 876                struct resource *res = dev->resource + i;
 877                struct pci_bus_region reg;
 878                if (!res->flags)
 879                        continue;
 880
 881                /* If we're going to re-assign everything, we mark all resources
 882                 * as unset (and 0-base them). In addition, we mark BARs starting
 883                 * at 0 as unset as well, except if PCI_PROBE_ONLY is also set
 884                 * since in that case, we don't want to re-assign anything
 885                 */
 886                pcibios_resource_to_bus(dev->bus, &reg, res);
 887                if (pci_has_flag(PCI_REASSIGN_ALL_RSRC) ||
 888                    (reg.start == 0 && !pci_has_flag(PCI_PROBE_ONLY))) {
 889                        /* Only print message if not re-assigning */
 890                        if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC))
 891                                pr_debug("PCI:%s Resource %d %016llx-%016llx [%x] "
 892                                         "is unassigned\n",
 893                                         pci_name(dev), i,
 894                                         (unsigned long long)res->start,
 895                                         (unsigned long long)res->end,
 896                                         (unsigned int)res->flags);
 897                        res->end -= res->start;
 898                        res->start = 0;
 899                        res->flags |= IORESOURCE_UNSET;
 900                        continue;
 901                }
 902
 903                pr_debug("PCI:%s Resource %d %016llx-%016llx [%x]\n",
 904                         pci_name(dev), i,
 905                         (unsigned long long)res->start,\
 906                         (unsigned long long)res->end,
 907                         (unsigned int)res->flags);
 908        }
 909
 910        /* Call machine specific resource fixup */
 911        if (ppc_md.pcibios_fixup_resources)
 912                ppc_md.pcibios_fixup_resources(dev);
 913}
 914DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources);
 915
 916/* This function tries to figure out if a bridge resource has been initialized
 917 * by the firmware or not. It doesn't have to be absolutely bullet proof, but
 918 * things go more smoothly when it gets it right. It should covers cases such
 919 * as Apple "closed" bridge resources and bare-metal pSeries unassigned bridges
 920 */
 921static int pcibios_uninitialized_bridge_resource(struct pci_bus *bus,
 922                                                 struct resource *res)
 923{
 924        struct pci_controller *hose = pci_bus_to_host(bus);
 925        struct pci_dev *dev = bus->self;
 926        resource_size_t offset;
 927        struct pci_bus_region region;
 928        u16 command;
 929        int i;
 930
 931        /* We don't do anything if PCI_PROBE_ONLY is set */
 932        if (pci_has_flag(PCI_PROBE_ONLY))
 933                return 0;
 934
 935        /* Job is a bit different between memory and IO */
 936        if (res->flags & IORESOURCE_MEM) {
 937                pcibios_resource_to_bus(dev->bus, &region, res);
 938
 939                /* If the BAR is non-0 then it's probably been initialized */
 940                if (region.start != 0)
 941                        return 0;
 942
 943                /* The BAR is 0, let's check if memory decoding is enabled on
 944                 * the bridge. If not, we consider it unassigned
 945                 */
 946                pci_read_config_word(dev, PCI_COMMAND, &command);
 947                if ((command & PCI_COMMAND_MEMORY) == 0)
 948                        return 1;
 949
 950                /* Memory decoding is enabled and the BAR is 0. If any of the bridge
 951                 * resources covers that starting address (0 then it's good enough for
 952                 * us for memory space)
 953                 */
 954                for (i = 0; i < 3; i++) {
 955                        if ((hose->mem_resources[i].flags & IORESOURCE_MEM) &&
 956                            hose->mem_resources[i].start == hose->mem_offset[i])
 957                                return 0;
 958                }
 959
 960                /* Well, it starts at 0 and we know it will collide so we may as
 961                 * well consider it as unassigned. That covers the Apple case.
 962                 */
 963                return 1;
 964        } else {
 965                /* If the BAR is non-0, then we consider it assigned */
 966                offset = (unsigned long)hose->io_base_virt - _IO_BASE;
 967                if (((res->start - offset) & 0xfffffffful) != 0)
 968                        return 0;
 969
 970                /* Here, we are a bit different than memory as typically IO space
 971                 * starting at low addresses -is- valid. What we do instead if that
 972                 * we consider as unassigned anything that doesn't have IO enabled
 973                 * in the PCI command register, and that's it.
 974                 */
 975                pci_read_config_word(dev, PCI_COMMAND, &command);
 976                if (command & PCI_COMMAND_IO)
 977                        return 0;
 978
 979                /* It's starting at 0 and IO is disabled in the bridge, consider
 980                 * it unassigned
 981                 */
 982                return 1;
 983        }
 984}
 985
 986/* Fixup resources of a PCI<->PCI bridge */
 987static void pcibios_fixup_bridge(struct pci_bus *bus)
 988{
 989        struct resource *res;
 990        int i;
 991
 992        struct pci_dev *dev = bus->self;
 993
 994        pci_bus_for_each_resource(bus, res, i) {
 995                if (!res || !res->flags)
 996                        continue;
 997                if (i >= 3 && bus->self->transparent)
 998                        continue;
 999
1000                /* If we're going to reassign everything, we can
1001                 * shrink the P2P resource to have size as being
1002                 * of 0 in order to save space.
1003                 */
1004                if (pci_has_flag(PCI_REASSIGN_ALL_RSRC)) {
1005                        res->flags |= IORESOURCE_UNSET;
1006                        res->start = 0;
1007                        res->end = -1;
1008                        continue;
1009                }
1010
1011                pr_debug("PCI:%s Bus rsrc %d %016llx-%016llx [%x]\n",
1012                         pci_name(dev), i,
1013                         (unsigned long long)res->start,\
1014                         (unsigned long long)res->end,
1015                         (unsigned int)res->flags);
1016
1017                /* Try to detect uninitialized P2P bridge resources,
1018                 * and clear them out so they get re-assigned later
1019                 */
1020                if (pcibios_uninitialized_bridge_resource(bus, res)) {
1021                        res->flags = 0;
1022                        pr_debug("PCI:%s            (unassigned)\n", pci_name(dev));
1023                }
1024        }
1025}
1026
1027void pcibios_setup_bus_self(struct pci_bus *bus)
1028{
1029        struct pci_controller *phb;
1030
1031        /* Fix up the bus resources for P2P bridges */
1032        if (bus->self != NULL)
1033                pcibios_fixup_bridge(bus);
1034
1035        /* Platform specific bus fixups. This is currently only used
1036         * by fsl_pci and I'm hoping to get rid of it at some point
1037         */
1038        if (ppc_md.pcibios_fixup_bus)
1039                ppc_md.pcibios_fixup_bus(bus);
1040
1041        /* Setup bus DMA mappings */
1042        phb = pci_bus_to_host(bus);
1043        if (phb->controller_ops.dma_bus_setup)
1044                phb->controller_ops.dma_bus_setup(bus);
1045}
1046
1047static void pcibios_setup_device(struct pci_dev *dev)
1048{
1049        struct pci_controller *phb;
1050
1051        arch_dma_init(&dev->dev);
1052
1053        /* Fixup NUMA node as it may not be setup yet by the generic
1054         * code and is needed by the DMA init
1055         */
1056        set_dev_node(&dev->dev, pcibus_to_node(dev->bus));
1057
1058        /* Hook up default DMA ops */
1059        set_dma_ops(&dev->dev, pci_dma_ops);
1060        set_dma_offset(&dev->dev, PCI_DRAM_OFFSET);
1061
1062        /* Additional platform DMA/iommu setup */
1063        phb = pci_bus_to_host(dev->bus);
1064        if (phb->controller_ops.dma_dev_setup)
1065                phb->controller_ops.dma_dev_setup(dev);
1066
1067        /* Read default IRQs and fixup if necessary */
1068        pci_read_irq_line(dev);
1069        if (ppc_md.pci_irq_fixup)
1070                ppc_md.pci_irq_fixup(dev);
1071}
1072
1073int pcibios_add_device(struct pci_dev *dev)
1074{
1075        /*
1076         * We can only call pcibios_setup_device() after bus setup is complete,
1077         * since some of the platform specific DMA setup code depends on it.
1078         */
1079        if (dev->bus->is_added)
1080                pcibios_setup_device(dev);
1081
1082#ifdef CONFIG_PCI_IOV
1083        if (ppc_md.pcibios_fixup_sriov)
1084                ppc_md.pcibios_fixup_sriov(dev);
1085#endif /* CONFIG_PCI_IOV */
1086
1087        return 0;
1088}
1089
1090void pcibios_setup_bus_devices(struct pci_bus *bus)
1091{
1092        struct pci_dev *dev;
1093
1094        pr_debug("PCI: Fixup bus devices %d (%s)\n",
1095                 bus->number, bus->self ? pci_name(bus->self) : "PHB");
1096
1097        list_for_each_entry(dev, &bus->devices, bus_list) {
1098                /* Cardbus can call us to add new devices to a bus, so ignore
1099                 * those who are already fully discovered
1100                 */
1101                if (dev->is_added)
1102                        continue;
1103
1104                pcibios_setup_device(dev);
1105        }
1106}
1107
1108void pcibios_set_master(struct pci_dev *dev)
1109{
1110        /* No special bus mastering setup handling */
1111}
1112
1113void pcibios_fixup_bus(struct pci_bus *bus)
1114{
1115        /* When called from the generic PCI probe, read PCI<->PCI bridge
1116         * bases. This is -not- called when generating the PCI tree from
1117         * the OF device-tree.
1118         */
1119        pci_read_bridge_bases(bus);
1120
1121        /* Now fixup the bus bus */
1122        pcibios_setup_bus_self(bus);
1123
1124        /* Now fixup devices on that bus */
1125        pcibios_setup_bus_devices(bus);
1126}
1127EXPORT_SYMBOL(pcibios_fixup_bus);
1128
1129void pci_fixup_cardbus(struct pci_bus *bus)
1130{
1131        /* Now fixup devices on that bus */
1132        pcibios_setup_bus_devices(bus);
1133}
1134
1135
1136static int skip_isa_ioresource_align(struct pci_dev *dev)
1137{
1138        if (pci_has_flag(PCI_CAN_SKIP_ISA_ALIGN) &&
1139            !(dev->bus->bridge_ctl & PCI_BRIDGE_CTL_ISA))
1140                return 1;
1141        return 0;
1142}
1143
1144/*
1145 * We need to avoid collisions with `mirrored' VGA ports
1146 * and other strange ISA hardware, so we always want the
1147 * addresses to be allocated in the 0x000-0x0ff region
1148 * modulo 0x400.
1149 *
1150 * Why? Because some silly external IO cards only decode
1151 * the low 10 bits of the IO address. The 0x00-0xff region
1152 * is reserved for motherboard devices that decode all 16
1153 * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
1154 * but we want to try to avoid allocating at 0x2900-0x2bff
1155 * which might have be mirrored at 0x0100-0x03ff..
1156 */
1157resource_size_t pcibios_align_resource(void *data, const struct resource *res,
1158                                resource_size_t size, resource_size_t align)
1159{
1160        struct pci_dev *dev = data;
1161        resource_size_t start = res->start;
1162
1163        if (res->flags & IORESOURCE_IO) {
1164                if (skip_isa_ioresource_align(dev))
1165                        return start;
1166                if (start & 0x300)
1167                        start = (start + 0x3ff) & ~0x3ff;
1168        }
1169
1170        return start;
1171}
1172EXPORT_SYMBOL(pcibios_align_resource);
1173
1174/*
1175 * Reparent resource children of pr that conflict with res
1176 * under res, and make res replace those children.
1177 */
1178static int reparent_resources(struct resource *parent,
1179                                     struct resource *res)
1180{
1181        struct resource *p, **pp;
1182        struct resource **firstpp = NULL;
1183
1184        for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) {
1185                if (p->end < res->start)
1186                        continue;
1187                if (res->end < p->start)
1188                        break;
1189                if (p->start < res->start || p->end > res->end)
1190                        return -1;      /* not completely contained */
1191                if (firstpp == NULL)
1192                        firstpp = pp;
1193        }
1194        if (firstpp == NULL)
1195                return -1;      /* didn't find any conflicting entries? */
1196        res->parent = parent;
1197        res->child = *firstpp;
1198        res->sibling = *pp;
1199        *firstpp = res;
1200        *pp = NULL;
1201        for (p = res->child; p != NULL; p = p->sibling) {
1202                p->parent = res;
1203                pr_debug("PCI: Reparented %s [%llx..%llx] under %s\n",
1204                         p->name,
1205                         (unsigned long long)p->start,
1206                         (unsigned long long)p->end, res->name);
1207        }
1208        return 0;
1209}
1210
1211/*
1212 *  Handle resources of PCI devices.  If the world were perfect, we could
1213 *  just allocate all the resource regions and do nothing more.  It isn't.
1214 *  On the other hand, we cannot just re-allocate all devices, as it would
1215 *  require us to know lots of host bridge internals.  So we attempt to
1216 *  keep as much of the original configuration as possible, but tweak it
1217 *  when it's found to be wrong.
1218 *
1219 *  Known BIOS problems we have to work around:
1220 *      - I/O or memory regions not configured
1221 *      - regions configured, but not enabled in the command register
1222 *      - bogus I/O addresses above 64K used
1223 *      - expansion ROMs left enabled (this may sound harmless, but given
1224 *        the fact the PCI specs explicitly allow address decoders to be
1225 *        shared between expansion ROMs and other resource regions, it's
1226 *        at least dangerous)
1227 *
1228 *  Our solution:
1229 *      (1) Allocate resources for all buses behind PCI-to-PCI bridges.
1230 *          This gives us fixed barriers on where we can allocate.
1231 *      (2) Allocate resources for all enabled devices.  If there is
1232 *          a collision, just mark the resource as unallocated. Also
1233 *          disable expansion ROMs during this step.
1234 *      (3) Try to allocate resources for disabled devices.  If the
1235 *          resources were assigned correctly, everything goes well,
1236 *          if they weren't, they won't disturb allocation of other
1237 *          resources.
1238 *      (4) Assign new addresses to resources which were either
1239 *          not configured at all or misconfigured.  If explicitly
1240 *          requested by the user, configure expansion ROM address
1241 *          as well.
1242 */
1243
1244void pcibios_allocate_bus_resources(struct pci_bus *bus)
1245{
1246        struct pci_bus *b;
1247        int i;
1248        struct resource *res, *pr;
1249
1250        pr_debug("PCI: Allocating bus resources for %04x:%02x...\n",
1251                 pci_domain_nr(bus), bus->number);
1252
1253        pci_bus_for_each_resource(bus, res, i) {
1254                if (!res || !res->flags || res->start > res->end || res->parent)
1255                        continue;
1256
1257                /* If the resource was left unset at this point, we clear it */
1258                if (res->flags & IORESOURCE_UNSET)
1259                        goto clear_resource;
1260
1261                if (bus->parent == NULL)
1262                        pr = (res->flags & IORESOURCE_IO) ?
1263                                &ioport_resource : &iomem_resource;
1264                else {
1265                        pr = pci_find_parent_resource(bus->self, res);
1266                        if (pr == res) {
1267                                /* this happens when the generic PCI
1268                                 * code (wrongly) decides that this
1269                                 * bridge is transparent  -- paulus
1270                                 */
1271                                continue;
1272                        }
1273                }
1274
1275                pr_debug("PCI: %s (bus %d) bridge rsrc %d: %016llx-%016llx "
1276                         "[0x%x], parent %p (%s)\n",
1277                         bus->self ? pci_name(bus->self) : "PHB",
1278                         bus->number, i,
1279                         (unsigned long long)res->start,
1280                         (unsigned long long)res->end,
1281                         (unsigned int)res->flags,
1282                         pr, (pr && pr->name) ? pr->name : "nil");
1283
1284                if (pr && !(pr->flags & IORESOURCE_UNSET)) {
1285                        struct pci_dev *dev = bus->self;
1286
1287                        if (request_resource(pr, res) == 0)
1288                                continue;
1289                        /*
1290                         * Must be a conflict with an existing entry.
1291                         * Move that entry (or entries) under the
1292                         * bridge resource and try again.
1293                         */
1294                        if (reparent_resources(pr, res) == 0)
1295                                continue;
1296
1297                        if (dev && i < PCI_BRIDGE_RESOURCE_NUM &&
1298                            pci_claim_bridge_resource(dev,
1299                                                i + PCI_BRIDGE_RESOURCES) == 0)
1300                                continue;
1301                }
1302                pr_warning("PCI: Cannot allocate resource region "
1303                           "%d of PCI bridge %d, will remap\n", i, bus->number);
1304        clear_resource:
1305                /* The resource might be figured out when doing
1306                 * reassignment based on the resources required
1307                 * by the downstream PCI devices. Here we set
1308                 * the size of the resource to be 0 in order to
1309                 * save more space.
1310                 */
1311                res->start = 0;
1312                res->end = -1;
1313                res->flags = 0;
1314        }
1315
1316        list_for_each_entry(b, &bus->children, node)
1317                pcibios_allocate_bus_resources(b);
1318}
1319
1320static inline void alloc_resource(struct pci_dev *dev, int idx)
1321{
1322        struct resource *pr, *r = &dev->resource[idx];
1323
1324        pr_debug("PCI: Allocating %s: Resource %d: %016llx..%016llx [%x]\n",
1325                 pci_name(dev), idx,
1326                 (unsigned long long)r->start,
1327                 (unsigned long long)r->end,
1328                 (unsigned int)r->flags);
1329
1330        pr = pci_find_parent_resource(dev, r);
1331        if (!pr || (pr->flags & IORESOURCE_UNSET) ||
1332            request_resource(pr, r) < 0) {
1333                printk(KERN_WARNING "PCI: Cannot allocate resource region %d"
1334                       " of device %s, will remap\n", idx, pci_name(dev));
1335                if (pr)
1336                        pr_debug("PCI:  parent is %p: %016llx-%016llx [%x]\n",
1337                                 pr,
1338                                 (unsigned long long)pr->start,
1339                                 (unsigned long long)pr->end,
1340                                 (unsigned int)pr->flags);
1341                /* We'll assign a new address later */
1342                r->flags |= IORESOURCE_UNSET;
1343                r->end -= r->start;
1344                r->start = 0;
1345        }
1346}
1347
1348static void __init pcibios_allocate_resources(int pass)
1349{
1350        struct pci_dev *dev = NULL;
1351        int idx, disabled;
1352        u16 command;
1353        struct resource *r;
1354
1355        for_each_pci_dev(dev) {
1356                pci_read_config_word(dev, PCI_COMMAND, &command);
1357                for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
1358                        r = &dev->resource[idx];
1359                        if (r->parent)          /* Already allocated */
1360                                continue;
1361                        if (!r->flags || (r->flags & IORESOURCE_UNSET))
1362                                continue;       /* Not assigned at all */
1363                        /* We only allocate ROMs on pass 1 just in case they
1364                         * have been screwed up by firmware
1365                         */
1366                        if (idx == PCI_ROM_RESOURCE )
1367                                disabled = 1;
1368                        if (r->flags & IORESOURCE_IO)
1369                                disabled = !(command & PCI_COMMAND_IO);
1370                        else
1371                                disabled = !(command & PCI_COMMAND_MEMORY);
1372                        if (pass == disabled)
1373                                alloc_resource(dev, idx);
1374                }
1375                if (pass)
1376                        continue;
1377                r = &dev->resource[PCI_ROM_RESOURCE];
1378                if (r->flags) {
1379                        /* Turn the ROM off, leave the resource region,
1380                         * but keep it unregistered.
1381                         */
1382                        u32 reg;
1383                        pci_read_config_dword(dev, dev->rom_base_reg, &reg);
1384                        if (reg & PCI_ROM_ADDRESS_ENABLE) {
1385                                pr_debug("PCI: Switching off ROM of %s\n",
1386                                         pci_name(dev));
1387                                r->flags &= ~IORESOURCE_ROM_ENABLE;
1388                                pci_write_config_dword(dev, dev->rom_base_reg,
1389                                                       reg & ~PCI_ROM_ADDRESS_ENABLE);
1390                        }
1391                }
1392        }
1393}
1394
1395static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus)
1396{
1397        struct pci_controller *hose = pci_bus_to_host(bus);
1398        resource_size_t offset;
1399        struct resource *res, *pres;
1400        int i;
1401
1402        pr_debug("Reserving legacy ranges for domain %04x\n", pci_domain_nr(bus));
1403
1404        /* Check for IO */
1405        if (!(hose->io_resource.flags & IORESOURCE_IO))
1406                goto no_io;
1407        offset = (unsigned long)hose->io_base_virt - _IO_BASE;
1408        res = kzalloc(sizeof(struct resource), GFP_KERNEL);
1409        BUG_ON(res == NULL);
1410        res->name = "Legacy IO";
1411        res->flags = IORESOURCE_IO;
1412        res->start = offset;
1413        res->end = (offset + 0xfff) & 0xfffffffful;
1414        pr_debug("Candidate legacy IO: %pR\n", res);
1415        if (request_resource(&hose->io_resource, res)) {
1416                printk(KERN_DEBUG
1417                       "PCI %04x:%02x Cannot reserve Legacy IO %pR\n",
1418                       pci_domain_nr(bus), bus->number, res);
1419                kfree(res);
1420        }
1421
1422 no_io:
1423        /* Check for memory */
1424        for (i = 0; i < 3; i++) {
1425                pres = &hose->mem_resources[i];
1426                offset = hose->mem_offset[i];
1427                if (!(pres->flags & IORESOURCE_MEM))
1428                        continue;
1429                pr_debug("hose mem res: %pR\n", pres);
1430                if ((pres->start - offset) <= 0xa0000 &&
1431                    (pres->end - offset) >= 0xbffff)
1432                        break;
1433        }
1434        if (i >= 3)
1435                return;
1436        res = kzalloc(sizeof(struct resource), GFP_KERNEL);
1437        BUG_ON(res == NULL);
1438        res->name = "Legacy VGA memory";
1439        res->flags = IORESOURCE_MEM;
1440        res->start = 0xa0000 + offset;
1441        res->end = 0xbffff + offset;
1442        pr_debug("Candidate VGA memory: %pR\n", res);
1443        if (request_resource(pres, res)) {
1444                printk(KERN_DEBUG
1445                       "PCI %04x:%02x Cannot reserve VGA memory %pR\n",
1446                       pci_domain_nr(bus), bus->number, res);
1447                kfree(res);
1448        }
1449}
1450
1451void __init pcibios_resource_survey(void)
1452{
1453        struct pci_bus *b;
1454
1455        /* Allocate and assign resources */
1456        list_for_each_entry(b, &pci_root_buses, node)
1457                pcibios_allocate_bus_resources(b);
1458        pcibios_allocate_resources(0);
1459        pcibios_allocate_resources(1);
1460
1461        /* Before we start assigning unassigned resource, we try to reserve
1462         * the low IO area and the VGA memory area if they intersect the
1463         * bus available resources to avoid allocating things on top of them
1464         */
1465        if (!pci_has_flag(PCI_PROBE_ONLY)) {
1466                list_for_each_entry(b, &pci_root_buses, node)
1467                        pcibios_reserve_legacy_regions(b);
1468        }
1469
1470        /* Now, if the platform didn't decide to blindly trust the firmware,
1471         * we proceed to assigning things that were left unassigned
1472         */
1473        if (!pci_has_flag(PCI_PROBE_ONLY)) {
1474                pr_debug("PCI: Assigning unassigned resources...\n");
1475                pci_assign_unassigned_resources();
1476        }
1477
1478        /* Call machine dependent fixup */
1479        if (ppc_md.pcibios_fixup)
1480                ppc_md.pcibios_fixup();
1481}
1482
1483/* This is used by the PCI hotplug driver to allocate resource
1484 * of newly plugged busses. We can try to consolidate with the
1485 * rest of the code later, for now, keep it as-is as our main
1486 * resource allocation function doesn't deal with sub-trees yet.
1487 */
1488void pcibios_claim_one_bus(struct pci_bus *bus)
1489{
1490        struct pci_dev *dev;
1491        struct pci_bus *child_bus;
1492
1493        list_for_each_entry(dev, &bus->devices, bus_list) {
1494                int i;
1495
1496                for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1497                        struct resource *r = &dev->resource[i];
1498
1499                        if (r->parent || !r->start || !r->flags)
1500                                continue;
1501
1502                        pr_debug("PCI: Claiming %s: "
1503                                 "Resource %d: %016llx..%016llx [%x]\n",
1504                                 pci_name(dev), i,
1505                                 (unsigned long long)r->start,
1506                                 (unsigned long long)r->end,
1507                                 (unsigned int)r->flags);
1508
1509                        if (pci_claim_resource(dev, i) == 0)
1510                                continue;
1511
1512                        pci_claim_bridge_resource(dev, i);
1513                }
1514        }
1515
1516        list_for_each_entry(child_bus, &bus->children, node)
1517                pcibios_claim_one_bus(child_bus);
1518}
1519EXPORT_SYMBOL_GPL(pcibios_claim_one_bus);
1520
1521
1522/* pcibios_finish_adding_to_bus
1523 *
1524 * This is to be called by the hotplug code after devices have been
1525 * added to a bus, this include calling it for a PHB that is just
1526 * being added
1527 */
1528void pcibios_finish_adding_to_bus(struct pci_bus *bus)
1529{
1530        pr_debug("PCI: Finishing adding to hotplug bus %04x:%02x\n",
1531                 pci_domain_nr(bus), bus->number);
1532
1533        /* Allocate bus and devices resources */
1534        pcibios_allocate_bus_resources(bus);
1535        pcibios_claim_one_bus(bus);
1536        if (!pci_has_flag(PCI_PROBE_ONLY))
1537                pci_assign_unassigned_bus_resources(bus);
1538
1539        /* Fixup EEH */
1540        eeh_add_device_tree_late(bus);
1541
1542        /* Add new devices to global lists.  Register in proc, sysfs. */
1543        pci_bus_add_devices(bus);
1544
1545        /* sysfs files should only be added after devices are added */
1546        eeh_add_sysfs_files(bus);
1547}
1548EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus);
1549
1550int pcibios_enable_device(struct pci_dev *dev, int mask)
1551{
1552        struct pci_controller *phb = pci_bus_to_host(dev->bus);
1553
1554        if (phb->controller_ops.enable_device_hook)
1555                if (!phb->controller_ops.enable_device_hook(dev))
1556                        return -EINVAL;
1557
1558        return pci_enable_resources(dev, mask);
1559}
1560
1561void pcibios_disable_device(struct pci_dev *dev)
1562{
1563        struct pci_controller *phb = pci_bus_to_host(dev->bus);
1564
1565        if (phb->controller_ops.disable_device)
1566                phb->controller_ops.disable_device(dev);
1567}
1568
1569resource_size_t pcibios_io_space_offset(struct pci_controller *hose)
1570{
1571        return (unsigned long) hose->io_base_virt - _IO_BASE;
1572}
1573
1574static void pcibios_setup_phb_resources(struct pci_controller *hose,
1575                                        struct list_head *resources)
1576{
1577        struct resource *res;
1578        resource_size_t offset;
1579        int i;
1580
1581        /* Hookup PHB IO resource */
1582        res = &hose->io_resource;
1583
1584        if (!res->flags) {
1585                printk(KERN_WARNING "PCI: I/O resource not set for host"
1586                       " bridge %s (domain %d)\n",
1587                       hose->dn->full_name, hose->global_number);
1588        } else {
1589                offset = pcibios_io_space_offset(hose);
1590
1591                pr_debug("PCI: PHB IO resource    = %08llx-%08llx [%lx] off 0x%08llx\n",
1592                         (unsigned long long)res->start,
1593                         (unsigned long long)res->end,
1594                         (unsigned long)res->flags,
1595                         (unsigned long long)offset);
1596                pci_add_resource_offset(resources, res, offset);
1597        }
1598
1599        /* Hookup PHB Memory resources */
1600        for (i = 0; i < 3; ++i) {
1601                res = &hose->mem_resources[i];
1602                if (!res->flags) {
1603                        if (i == 0)
1604                                printk(KERN_ERR "PCI: Memory resource 0 not set for "
1605                                       "host bridge %s (domain %d)\n",
1606                                       hose->dn->full_name, hose->global_number);
1607                        continue;
1608                }
1609                offset = hose->mem_offset[i];
1610
1611
1612                pr_debug("PCI: PHB MEM resource %d = %08llx-%08llx [%lx] off 0x%08llx\n", i,
1613                         (unsigned long long)res->start,
1614                         (unsigned long long)res->end,
1615                         (unsigned long)res->flags,
1616                         (unsigned long long)offset);
1617
1618                pci_add_resource_offset(resources, res, offset);
1619        }
1620}
1621
1622/*
1623 * Null PCI config access functions, for the case when we can't
1624 * find a hose.
1625 */
1626#define NULL_PCI_OP(rw, size, type)                                     \
1627static int                                                              \
1628null_##rw##_config_##size(struct pci_dev *dev, int offset, type val)    \
1629{                                                                       \
1630        return PCIBIOS_DEVICE_NOT_FOUND;                                \
1631}
1632
1633static int
1634null_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
1635                 int len, u32 *val)
1636{
1637        return PCIBIOS_DEVICE_NOT_FOUND;
1638}
1639
1640static int
1641null_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
1642                  int len, u32 val)
1643{
1644        return PCIBIOS_DEVICE_NOT_FOUND;
1645}
1646
1647static struct pci_ops null_pci_ops =
1648{
1649        .read = null_read_config,
1650        .write = null_write_config,
1651};
1652
1653/*
1654 * These functions are used early on before PCI scanning is done
1655 * and all of the pci_dev and pci_bus structures have been created.
1656 */
1657static struct pci_bus *
1658fake_pci_bus(struct pci_controller *hose, int busnr)
1659{
1660        static struct pci_bus bus;
1661
1662        if (hose == NULL) {
1663                printk(KERN_ERR "Can't find hose for PCI bus %d!\n", busnr);
1664        }
1665        bus.number = busnr;
1666        bus.sysdata = hose;
1667        bus.ops = hose? hose->ops: &null_pci_ops;
1668        return &bus;
1669}
1670
1671#define EARLY_PCI_OP(rw, size, type)                                    \
1672int early_##rw##_config_##size(struct pci_controller *hose, int bus,    \
1673                               int devfn, int offset, type value)       \
1674{                                                                       \
1675        return pci_bus_##rw##_config_##size(fake_pci_bus(hose, bus),    \
1676                                            devfn, offset, value);      \
1677}
1678
1679EARLY_PCI_OP(read, byte, u8 *)
1680EARLY_PCI_OP(read, word, u16 *)
1681EARLY_PCI_OP(read, dword, u32 *)
1682EARLY_PCI_OP(write, byte, u8)
1683EARLY_PCI_OP(write, word, u16)
1684EARLY_PCI_OP(write, dword, u32)
1685
1686extern int pci_bus_find_capability (struct pci_bus *bus, unsigned int devfn, int cap);
1687int early_find_capability(struct pci_controller *hose, int bus, int devfn,
1688                          int cap)
1689{
1690        return pci_bus_find_capability(fake_pci_bus(hose, bus), devfn, cap);
1691}
1692
1693struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
1694{
1695        struct pci_controller *hose = bus->sysdata;
1696
1697        return of_node_get(hose->dn);
1698}
1699
1700/**
1701 * pci_scan_phb - Given a pci_controller, setup and scan the PCI bus
1702 * @hose: Pointer to the PCI host controller instance structure
1703 */
1704void pcibios_scan_phb(struct pci_controller *hose)
1705{
1706        LIST_HEAD(resources);
1707        struct pci_bus *bus;
1708        struct device_node *node = hose->dn;
1709        int mode;
1710
1711        pr_debug("PCI: Scanning PHB %s\n", of_node_full_name(node));
1712
1713        /* Get some IO space for the new PHB */
1714        pcibios_setup_phb_io_space(hose);
1715
1716        /* Wire up PHB bus resources */
1717        pcibios_setup_phb_resources(hose, &resources);
1718
1719        hose->busn.start = hose->first_busno;
1720        hose->busn.end   = hose->last_busno;
1721        hose->busn.flags = IORESOURCE_BUS;
1722        pci_add_resource(&resources, &hose->busn);
1723
1724        /* Create an empty bus for the toplevel */
1725        bus = pci_create_root_bus(hose->parent, hose->first_busno,
1726                                  hose->ops, hose, &resources);
1727        if (bus == NULL) {
1728                pr_err("Failed to create bus for PCI domain %04x\n",
1729                        hose->global_number);
1730                pci_free_resource_list(&resources);
1731                return;
1732        }
1733        hose->bus = bus;
1734
1735        /* Get probe mode and perform scan */
1736        mode = PCI_PROBE_NORMAL;
1737        if (node && hose->controller_ops.probe_mode)
1738                mode = hose->controller_ops.probe_mode(bus);
1739        pr_debug("    probe mode: %d\n", mode);
1740        if (mode == PCI_PROBE_DEVTREE)
1741                of_scan_bus(node, bus);
1742
1743        if (mode == PCI_PROBE_NORMAL) {
1744                pci_bus_update_busn_res_end(bus, 255);
1745                hose->last_busno = pci_scan_child_bus(bus);
1746                pci_bus_update_busn_res_end(bus, hose->last_busno);
1747        }
1748
1749        /* Platform gets a chance to do some global fixups before
1750         * we proceed to resource allocation
1751         */
1752        if (ppc_md.pcibios_fixup_phb)
1753                ppc_md.pcibios_fixup_phb(hose);
1754
1755        /* Configure PCI Express settings */
1756        if (bus && !pci_has_flag(PCI_PROBE_ONLY)) {
1757                struct pci_bus *child;
1758                list_for_each_entry(child, &bus->children, node)
1759                        pcie_bus_configure_settings(child);
1760        }
1761}
1762EXPORT_SYMBOL_GPL(pcibios_scan_phb);
1763
1764static void fixup_hide_host_resource_fsl(struct pci_dev *dev)
1765{
1766        int i, class = dev->class >> 8;
1767        /* When configured as agent, programing interface = 1 */
1768        int prog_if = dev->class & 0xf;
1769
1770        if ((class == PCI_CLASS_PROCESSOR_POWERPC ||
1771             class == PCI_CLASS_BRIDGE_OTHER) &&
1772                (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) &&
1773                (prog_if == 0) &&
1774                (dev->bus->parent == NULL)) {
1775                for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1776                        dev->resource[i].start = 0;
1777                        dev->resource[i].end = 0;
1778                        dev->resource[i].flags = 0;
1779                }
1780        }
1781}
1782DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MOTOROLA, PCI_ANY_ID, fixup_hide_host_resource_fsl);
1783DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, fixup_hide_host_resource_fsl);
1784
1785static void fixup_vga(struct pci_dev *pdev)
1786{
1787        u16 cmd;
1788
1789        pci_read_config_word(pdev, PCI_COMMAND, &cmd);
1790        if ((cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) || !vga_default_device())
1791                vga_set_default_device(pdev);
1792
1793}
1794DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
1795                              PCI_CLASS_DISPLAY_VGA, 8, fixup_vga);
1796