linux/arch/arm/kernel/bios32.c
<<
>>
Prefs
   1/*
   2 *  linux/arch/arm/kernel/bios32.c
   3 *
   4 *  PCI bios-type initialisation for PCI machines
   5 *
   6 *  Bits taken from various places.
   7 */
   8#include <linux/export.h>
   9#include <linux/kernel.h>
  10#include <linux/pci.h>
  11#include <linux/slab.h>
  12#include <linux/init.h>
  13#include <linux/io.h>
  14
  15#include <asm/mach-types.h>
  16#include <asm/mach/map.h>
  17#include <asm/mach/pci.h>
  18
  19static int debug_pci;
  20
  21/*
  22 * We can't use pci_find_device() here since we are
  23 * called from interrupt context.
  24 */
  25static void pcibios_bus_report_status(struct pci_bus *bus, u_int status_mask, int warn)
  26{
  27        struct pci_dev *dev;
  28
  29        list_for_each_entry(dev, &bus->devices, bus_list) {
  30                u16 status;
  31
  32                /*
  33                 * ignore host bridge - we handle
  34                 * that separately
  35                 */
  36                if (dev->bus->number == 0 && dev->devfn == 0)
  37                        continue;
  38
  39                pci_read_config_word(dev, PCI_STATUS, &status);
  40                if (status == 0xffff)
  41                        continue;
  42
  43                if ((status & status_mask) == 0)
  44                        continue;
  45
  46                /* clear the status errors */
  47                pci_write_config_word(dev, PCI_STATUS, status & status_mask);
  48
  49                if (warn)
  50                        printk("(%s: %04X) ", pci_name(dev), status);
  51        }
  52
  53        list_for_each_entry(dev, &bus->devices, bus_list)
  54                if (dev->subordinate)
  55                        pcibios_bus_report_status(dev->subordinate, status_mask, warn);
  56}
  57
  58void pcibios_report_status(u_int status_mask, int warn)
  59{
  60        struct list_head *l;
  61
  62        list_for_each(l, &pci_root_buses) {
  63                struct pci_bus *bus = pci_bus_b(l);
  64
  65                pcibios_bus_report_status(bus, status_mask, warn);
  66        }
  67}
  68
  69/*
  70 * We don't use this to fix the device, but initialisation of it.
  71 * It's not the correct use for this, but it works.
  72 * Note that the arbiter/ISA bridge appears to be buggy, specifically in
  73 * the following area:
  74 * 1. park on CPU
  75 * 2. ISA bridge ping-pong
  76 * 3. ISA bridge master handling of target RETRY
  77 *
  78 * Bug 3 is responsible for the sound DMA grinding to a halt.  We now
  79 * live with bug 2.
  80 */
  81static void pci_fixup_83c553(struct pci_dev *dev)
  82{
  83        /*
  84         * Set memory region to start at address 0, and enable IO
  85         */
  86        pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, PCI_BASE_ADDRESS_SPACE_MEMORY);
  87        pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_IO);
  88
  89        dev->resource[0].end -= dev->resource[0].start;
  90        dev->resource[0].start = 0;
  91
  92        /*
  93         * All memory requests from ISA to be channelled to PCI
  94         */
  95        pci_write_config_byte(dev, 0x48, 0xff);
  96
  97        /*
  98         * Enable ping-pong on bus master to ISA bridge transactions.
  99         * This improves the sound DMA substantially.  The fixed
 100         * priority arbiter also helps (see below).
 101         */
 102        pci_write_config_byte(dev, 0x42, 0x01);
 103
 104        /*
 105         * Enable PCI retry
 106         */
 107        pci_write_config_byte(dev, 0x40, 0x22);
 108
 109        /*
 110         * We used to set the arbiter to "park on last master" (bit
 111         * 1 set), but unfortunately the CyberPro does not park the
 112         * bus.  We must therefore park on CPU.  Unfortunately, this
 113         * may trigger yet another bug in the 553.
 114         */
 115        pci_write_config_byte(dev, 0x83, 0x02);
 116
 117        /*
 118         * Make the ISA DMA request lowest priority, and disable
 119         * rotating priorities completely.
 120         */
 121        pci_write_config_byte(dev, 0x80, 0x11);
 122        pci_write_config_byte(dev, 0x81, 0x00);
 123
 124        /*
 125         * Route INTA input to IRQ 11, and set IRQ11 to be level
 126         * sensitive.
 127         */
 128        pci_write_config_word(dev, 0x44, 0xb000);
 129        outb(0x08, 0x4d1);
 130}
 131DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_WINBOND, PCI_DEVICE_ID_WINBOND_83C553, pci_fixup_83c553);
 132
 133static void pci_fixup_unassign(struct pci_dev *dev)
 134{
 135        dev->resource[0].end -= dev->resource[0].start;
 136        dev->resource[0].start = 0;
 137}
 138DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_WINBOND2, PCI_DEVICE_ID_WINBOND2_89C940F, pci_fixup_unassign);
 139
 140/*
 141 * Prevent the PCI layer from seeing the resources allocated to this device
 142 * if it is the host bridge by marking it as such.  These resources are of
 143 * no consequence to the PCI layer (they are handled elsewhere).
 144 */
 145static void pci_fixup_dec21285(struct pci_dev *dev)
 146{
 147        int i;
 148
 149        if (dev->devfn == 0) {
 150                dev->class &= 0xff;
 151                dev->class |= PCI_CLASS_BRIDGE_HOST << 8;
 152                for (i = 0; i < PCI_NUM_RESOURCES; i++) {
 153                        dev->resource[i].start = 0;
 154                        dev->resource[i].end   = 0;
 155                        dev->resource[i].flags = 0;
 156                }
 157        }
 158}
 159DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21285, pci_fixup_dec21285);
 160
 161/*
 162 * PCI IDE controllers use non-standard I/O port decoding, respect it.
 163 */
 164static void pci_fixup_ide_bases(struct pci_dev *dev)
 165{
 166        struct resource *r;
 167        int i;
 168
 169        if ((dev->class >> 8) != PCI_CLASS_STORAGE_IDE)
 170                return;
 171
 172        for (i = 0; i < PCI_NUM_RESOURCES; i++) {
 173                r = dev->resource + i;
 174                if ((r->start & ~0x80) == 0x374) {
 175                        r->start |= 2;
 176                        r->end = r->start;
 177                }
 178        }
 179}
 180DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_ide_bases);
 181
 182/*
 183 * Put the DEC21142 to sleep
 184 */
 185static void pci_fixup_dec21142(struct pci_dev *dev)
 186{
 187        pci_write_config_dword(dev, 0x40, 0x80000000);
 188}
 189DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21142, pci_fixup_dec21142);
 190
 191/*
 192 * The CY82C693 needs some rather major fixups to ensure that it does
 193 * the right thing.  Idea from the Alpha people, with a few additions.
 194 *
 195 * We ensure that the IDE base registers are set to 1f0/3f4 for the
 196 * primary bus, and 170/374 for the secondary bus.  Also, hide them
 197 * from the PCI subsystem view as well so we won't try to perform
 198 * our own auto-configuration on them.
 199 *
 200 * In addition, we ensure that the PCI IDE interrupts are routed to
 201 * IRQ 14 and IRQ 15 respectively.
 202 *
 203 * The above gets us to a point where the IDE on this device is
 204 * functional.  However, The CY82C693U _does not work_ in bus
 205 * master mode without locking the PCI bus solid.
 206 */
 207static void pci_fixup_cy82c693(struct pci_dev *dev)
 208{
 209        if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
 210                u32 base0, base1;
 211
 212                if (dev->class & 0x80) {        /* primary */
 213                        base0 = 0x1f0;
 214                        base1 = 0x3f4;
 215                } else {                        /* secondary */
 216                        base0 = 0x170;
 217                        base1 = 0x374;
 218                }
 219
 220                pci_write_config_dword(dev, PCI_BASE_ADDRESS_0,
 221                                       base0 | PCI_BASE_ADDRESS_SPACE_IO);
 222                pci_write_config_dword(dev, PCI_BASE_ADDRESS_1,
 223                                       base1 | PCI_BASE_ADDRESS_SPACE_IO);
 224
 225                dev->resource[0].start = 0;
 226                dev->resource[0].end   = 0;
 227                dev->resource[0].flags = 0;
 228
 229                dev->resource[1].start = 0;
 230                dev->resource[1].end   = 0;
 231                dev->resource[1].flags = 0;
 232        } else if (PCI_FUNC(dev->devfn) == 0) {
 233                /*
 234                 * Setup IDE IRQ routing.
 235                 */
 236                pci_write_config_byte(dev, 0x4b, 14);
 237                pci_write_config_byte(dev, 0x4c, 15);
 238
 239                /*
 240                 * Disable FREQACK handshake, enable USB.
 241                 */
 242                pci_write_config_byte(dev, 0x4d, 0x41);
 243
 244                /*
 245                 * Enable PCI retry, and PCI post-write buffer.
 246                 */
 247                pci_write_config_byte(dev, 0x44, 0x17);
 248
 249                /*
 250                 * Enable ISA master and DMA post write buffering.
 251                 */
 252                pci_write_config_byte(dev, 0x45, 0x03);
 253        }
 254}
 255DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CONTAQ, PCI_DEVICE_ID_CONTAQ_82C693, pci_fixup_cy82c693);
 256
 257static void pci_fixup_it8152(struct pci_dev *dev)
 258{
 259        int i;
 260        /* fixup for ITE 8152 devices */
 261        /* FIXME: add defines for class 0x68000 and 0x80103 */
 262        if ((dev->class >> 8) == PCI_CLASS_BRIDGE_HOST ||
 263            dev->class == 0x68000 ||
 264            dev->class == 0x80103) {
 265                for (i = 0; i < PCI_NUM_RESOURCES; i++) {
 266                        dev->resource[i].start = 0;
 267                        dev->resource[i].end   = 0;
 268                        dev->resource[i].flags = 0;
 269                }
 270        }
 271}
 272DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ITE, PCI_DEVICE_ID_ITE_8152, pci_fixup_it8152);
 273
 274/*
 275 * If the bus contains any of these devices, then we must not turn on
 276 * parity checking of any kind.  Currently this is CyberPro 20x0 only.
 277 */
 278static inline int pdev_bad_for_parity(struct pci_dev *dev)
 279{
 280        return ((dev->vendor == PCI_VENDOR_ID_INTERG &&
 281                 (dev->device == PCI_DEVICE_ID_INTERG_2000 ||
 282                  dev->device == PCI_DEVICE_ID_INTERG_2010)) ||
 283                (dev->vendor == PCI_VENDOR_ID_ITE &&
 284                 dev->device == PCI_DEVICE_ID_ITE_8152));
 285
 286}
 287
 288/*
 289 * pcibios_fixup_bus - Called after each bus is probed,
 290 * but before its children are examined.
 291 */
 292void pcibios_fixup_bus(struct pci_bus *bus)
 293{
 294        struct pci_dev *dev;
 295        u16 features = PCI_COMMAND_SERR | PCI_COMMAND_PARITY | PCI_COMMAND_FAST_BACK;
 296
 297        /*
 298         * Walk the devices on this bus, working out what we can
 299         * and can't support.
 300         */
 301        list_for_each_entry(dev, &bus->devices, bus_list) {
 302                u16 status;
 303
 304                pci_read_config_word(dev, PCI_STATUS, &status);
 305
 306                /*
 307                 * If any device on this bus does not support fast back
 308                 * to back transfers, then the bus as a whole is not able
 309                 * to support them.  Having fast back to back transfers
 310                 * on saves us one PCI cycle per transaction.
 311                 */
 312                if (!(status & PCI_STATUS_FAST_BACK))
 313                        features &= ~PCI_COMMAND_FAST_BACK;
 314
 315                if (pdev_bad_for_parity(dev))
 316                        features &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
 317
 318                switch (dev->class >> 8) {
 319                case PCI_CLASS_BRIDGE_PCI:
 320                        pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &status);
 321                        status |= PCI_BRIDGE_CTL_PARITY|PCI_BRIDGE_CTL_MASTER_ABORT;
 322                        status &= ~(PCI_BRIDGE_CTL_BUS_RESET|PCI_BRIDGE_CTL_FAST_BACK);
 323                        pci_write_config_word(dev, PCI_BRIDGE_CONTROL, status);
 324                        break;
 325
 326                case PCI_CLASS_BRIDGE_CARDBUS:
 327                        pci_read_config_word(dev, PCI_CB_BRIDGE_CONTROL, &status);
 328                        status |= PCI_CB_BRIDGE_CTL_PARITY|PCI_CB_BRIDGE_CTL_MASTER_ABORT;
 329                        pci_write_config_word(dev, PCI_CB_BRIDGE_CONTROL, status);
 330                        break;
 331                }
 332        }
 333
 334        /*
 335         * Now walk the devices again, this time setting them up.
 336         */
 337        list_for_each_entry(dev, &bus->devices, bus_list) {
 338                u16 cmd;
 339
 340                pci_read_config_word(dev, PCI_COMMAND, &cmd);
 341                cmd |= features;
 342                pci_write_config_word(dev, PCI_COMMAND, cmd);
 343
 344                pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE,
 345                                      L1_CACHE_BYTES >> 2);
 346        }
 347
 348        /*
 349         * Propagate the flags to the PCI bridge.
 350         */
 351        if (bus->self && bus->self->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
 352                if (features & PCI_COMMAND_FAST_BACK)
 353                        bus->bridge_ctl |= PCI_BRIDGE_CTL_FAST_BACK;
 354                if (features & PCI_COMMAND_PARITY)
 355                        bus->bridge_ctl |= PCI_BRIDGE_CTL_PARITY;
 356        }
 357
 358        /*
 359         * Report what we did for this bus
 360         */
 361        printk(KERN_INFO "PCI: bus%d: Fast back to back transfers %sabled\n",
 362                bus->number, (features & PCI_COMMAND_FAST_BACK) ? "en" : "dis");
 363}
 364EXPORT_SYMBOL(pcibios_fixup_bus);
 365
 366/*
 367 * Swizzle the device pin each time we cross a bridge.  If a platform does
 368 * not provide a swizzle function, we perform the standard PCI swizzling.
 369 *
 370 * The default swizzling walks up the bus tree one level at a time, applying
 371 * the standard swizzle function at each step, stopping when it finds the PCI
 372 * root bus.  This will return the slot number of the bridge device on the
 373 * root bus and the interrupt pin on that device which should correspond
 374 * with the downstream device interrupt.
 375 *
 376 * Platforms may override this, in which case the slot and pin returned
 377 * depend entirely on the platform code.  However, please note that the
 378 * PCI standard swizzle is implemented on plug-in cards and Cardbus based
 379 * PCI extenders, so it can not be ignored.
 380 */
 381static u8 pcibios_swizzle(struct pci_dev *dev, u8 *pin)
 382{
 383        struct pci_sys_data *sys = dev->sysdata;
 384        int slot, oldpin = *pin;
 385
 386        if (sys->swizzle)
 387                slot = sys->swizzle(dev, pin);
 388        else
 389                slot = pci_common_swizzle(dev, pin);
 390
 391        if (debug_pci)
 392                printk("PCI: %s swizzling pin %d => pin %d slot %d\n",
 393                        pci_name(dev), oldpin, *pin, slot);
 394
 395        return slot;
 396}
 397
 398/*
 399 * Map a slot/pin to an IRQ.
 400 */
 401static int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 402{
 403        struct pci_sys_data *sys = dev->sysdata;
 404        int irq = -1;
 405
 406        if (sys->map_irq)
 407                irq = sys->map_irq(dev, slot, pin);
 408
 409        if (debug_pci)
 410                printk("PCI: %s mapping slot %d pin %d => irq %d\n",
 411                        pci_name(dev), slot, pin, irq);
 412
 413        return irq;
 414}
 415
 416static int pcibios_init_resources(int busnr, struct pci_sys_data *sys)
 417{
 418        int ret;
 419        struct pci_host_bridge_window *window;
 420
 421        if (list_empty(&sys->resources)) {
 422                pci_add_resource_offset(&sys->resources,
 423                         &iomem_resource, sys->mem_offset);
 424        }
 425
 426        list_for_each_entry(window, &sys->resources, list) {
 427                if (resource_type(window->res) == IORESOURCE_IO)
 428                        return 0;
 429        }
 430
 431        sys->io_res.start = (busnr * SZ_64K) ?  : pcibios_min_io;
 432        sys->io_res.end = (busnr + 1) * SZ_64K - 1;
 433        sys->io_res.flags = IORESOURCE_IO;
 434        sys->io_res.name = sys->io_res_name;
 435        sprintf(sys->io_res_name, "PCI%d I/O", busnr);
 436
 437        ret = request_resource(&ioport_resource, &sys->io_res);
 438        if (ret) {
 439                pr_err("PCI: unable to allocate I/O port region (%d)\n", ret);
 440                return ret;
 441        }
 442        pci_add_resource_offset(&sys->resources, &sys->io_res,
 443                                sys->io_offset);
 444
 445        return 0;
 446}
 447
 448static void pcibios_init_hw(struct hw_pci *hw, struct list_head *head)
 449{
 450        struct pci_sys_data *sys = NULL;
 451        int ret;
 452        int nr, busnr;
 453
 454        for (nr = busnr = 0; nr < hw->nr_controllers; nr++) {
 455                sys = kzalloc(sizeof(struct pci_sys_data), GFP_KERNEL);
 456                if (!sys)
 457                        panic("PCI: unable to allocate sys data!");
 458
 459#ifdef CONFIG_PCI_DOMAINS
 460                sys->domain  = hw->domain;
 461#endif
 462                sys->busnr   = busnr;
 463                sys->swizzle = hw->swizzle;
 464                sys->map_irq = hw->map_irq;
 465                INIT_LIST_HEAD(&sys->resources);
 466
 467                if (hw->private_data)
 468                        sys->private_data = hw->private_data[nr];
 469
 470                ret = hw->setup(nr, sys);
 471
 472                if (ret > 0) {
 473                        ret = pcibios_init_resources(nr, sys);
 474                        if (ret)  {
 475                                kfree(sys);
 476                                break;
 477                        }
 478
 479                        if (hw->scan)
 480                                sys->bus = hw->scan(nr, sys);
 481                        else
 482                                sys->bus = pci_scan_root_bus(NULL, sys->busnr,
 483                                                hw->ops, sys, &sys->resources);
 484
 485                        if (!sys->bus)
 486                                panic("PCI: unable to scan bus!");
 487
 488                        busnr = sys->bus->busn_res.end + 1;
 489
 490                        list_add(&sys->node, head);
 491                } else {
 492                        kfree(sys);
 493                        if (ret < 0)
 494                                break;
 495                }
 496        }
 497}
 498
 499void pci_common_init(struct hw_pci *hw)
 500{
 501        struct pci_sys_data *sys;
 502        LIST_HEAD(head);
 503
 504        pci_add_flags(PCI_REASSIGN_ALL_RSRC);
 505        if (hw->preinit)
 506                hw->preinit();
 507        pcibios_init_hw(hw, &head);
 508        if (hw->postinit)
 509                hw->postinit();
 510
 511        pci_fixup_irqs(pcibios_swizzle, pcibios_map_irq);
 512
 513        list_for_each_entry(sys, &head, node) {
 514                struct pci_bus *bus = sys->bus;
 515
 516                if (!pci_has_flag(PCI_PROBE_ONLY)) {
 517                        /*
 518                         * Size the bridge windows.
 519                         */
 520                        pci_bus_size_bridges(bus);
 521
 522                        /*
 523                         * Assign resources.
 524                         */
 525                        pci_bus_assign_resources(bus);
 526
 527                        /*
 528                         * Enable bridges
 529                         */
 530                        pci_enable_bridges(bus);
 531                }
 532
 533                /*
 534                 * Tell drivers about devices found.
 535                 */
 536                pci_bus_add_devices(bus);
 537        }
 538}
 539
 540#ifndef CONFIG_PCI_HOST_ITE8152
 541void pcibios_set_master(struct pci_dev *dev)
 542{
 543        /* No special bus mastering setup handling */
 544}
 545#endif
 546
 547char * __init pcibios_setup(char *str)
 548{
 549        if (!strcmp(str, "debug")) {
 550                debug_pci = 1;
 551                return NULL;
 552        } else if (!strcmp(str, "firmware")) {
 553                pci_add_flags(PCI_PROBE_ONLY);
 554                return NULL;
 555        }
 556        return str;
 557}
 558
 559/*
 560 * From arch/i386/kernel/pci-i386.c:
 561 *
 562 * We need to avoid collisions with `mirrored' VGA ports
 563 * and other strange ISA hardware, so we always want the
 564 * addresses to be allocated in the 0x000-0x0ff region
 565 * modulo 0x400.
 566 *
 567 * Why? Because some silly external IO cards only decode
 568 * the low 10 bits of the IO address. The 0x00-0xff region
 569 * is reserved for motherboard devices that decode all 16
 570 * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
 571 * but we want to try to avoid allocating at 0x2900-0x2bff
 572 * which might be mirrored at 0x0100-0x03ff..
 573 */
 574resource_size_t pcibios_align_resource(void *data, const struct resource *res,
 575                                resource_size_t size, resource_size_t align)
 576{
 577        resource_size_t start = res->start;
 578
 579        if (res->flags & IORESOURCE_IO && start & 0x300)
 580                start = (start + 0x3ff) & ~0x3ff;
 581
 582        start = (start + align - 1) & ~(align - 1);
 583
 584        return start;
 585}
 586
 587/**
 588 * pcibios_enable_device - Enable I/O and memory.
 589 * @dev: PCI device to be enabled
 590 */
 591int pcibios_enable_device(struct pci_dev *dev, int mask)
 592{
 593        u16 cmd, old_cmd;
 594        int idx;
 595        struct resource *r;
 596
 597        pci_read_config_word(dev, PCI_COMMAND, &cmd);
 598        old_cmd = cmd;
 599        for (idx = 0; idx < 6; idx++) {
 600                /* Only set up the requested stuff */
 601                if (!(mask & (1 << idx)))
 602                        continue;
 603
 604                r = dev->resource + idx;
 605                if (!r->start && r->end) {
 606                        printk(KERN_ERR "PCI: Device %s not available because"
 607                               " of resource collisions\n", pci_name(dev));
 608                        return -EINVAL;
 609                }
 610                if (r->flags & IORESOURCE_IO)
 611                        cmd |= PCI_COMMAND_IO;
 612                if (r->flags & IORESOURCE_MEM)
 613                        cmd |= PCI_COMMAND_MEMORY;
 614        }
 615
 616        /*
 617         * Bridges (eg, cardbus bridges) need to be fully enabled
 618         */
 619        if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE)
 620                cmd |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY;
 621
 622        if (cmd != old_cmd) {
 623                printk("PCI: enabling device %s (%04x -> %04x)\n",
 624                       pci_name(dev), old_cmd, cmd);
 625                pci_write_config_word(dev, PCI_COMMAND, cmd);
 626        }
 627        return 0;
 628}
 629
 630int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
 631                        enum pci_mmap_state mmap_state, int write_combine)
 632{
 633        struct pci_sys_data *root = dev->sysdata;
 634        unsigned long phys;
 635
 636        if (mmap_state == pci_mmap_io) {
 637                return -EINVAL;
 638        } else {
 639                phys = vma->vm_pgoff + (root->mem_offset >> PAGE_SHIFT);
 640        }
 641
 642        /*
 643         * Mark this as IO
 644         */
 645        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 646
 647        if (remap_pfn_range(vma, vma->vm_start, phys,
 648                             vma->vm_end - vma->vm_start,
 649                             vma->vm_page_prot))
 650                return -EAGAIN;
 651
 652        return 0;
 653}
 654
 655void __init pci_map_io_early(unsigned long pfn)
 656{
 657        struct map_desc pci_io_desc = {
 658                .virtual        = PCI_IO_VIRT_BASE,
 659                .type           = MT_DEVICE,
 660                .length         = SZ_64K,
 661        };
 662
 663        pci_io_desc.pfn = pfn;
 664        iotable_init(&pci_io_desc, 1);
 665}
 666