linux/arch/arm/kernel/bios32.c
<<
>>
Prefs
   1/*
   2 *  linux/arch/arm/kernel/bios32.c
   3 *
   4 *  PCI bios-type initialisation for PCI machines
   5 *
   6 *  Bits taken from various places.
   7 */
   8#include <linux/export.h>
   9#include <linux/kernel.h>
  10#include <linux/pci.h>
  11#include <linux/slab.h>
  12#include <linux/init.h>
  13#include <linux/io.h>
  14
  15#include <asm/mach-types.h>
  16#include <asm/mach/map.h>
  17#include <asm/mach/pci.h>
  18
  19static int debug_pci;
  20
  21/*
  22 * We can't use pci_find_device() here since we are
  23 * called from interrupt context.
  24 */
  25static void pcibios_bus_report_status(struct pci_bus *bus, u_int status_mask, int warn)
  26{
  27        struct pci_dev *dev;
  28
  29        list_for_each_entry(dev, &bus->devices, bus_list) {
  30                u16 status;
  31
  32                /*
  33                 * ignore host bridge - we handle
  34                 * that separately
  35                 */
  36                if (dev->bus->number == 0 && dev->devfn == 0)
  37                        continue;
  38
  39                pci_read_config_word(dev, PCI_STATUS, &status);
  40                if (status == 0xffff)
  41                        continue;
  42
  43                if ((status & status_mask) == 0)
  44                        continue;
  45
  46                /* clear the status errors */
  47                pci_write_config_word(dev, PCI_STATUS, status & status_mask);
  48
  49                if (warn)
  50                        printk("(%s: %04X) ", pci_name(dev), status);
  51        }
  52
  53        list_for_each_entry(dev, &bus->devices, bus_list)
  54                if (dev->subordinate)
  55                        pcibios_bus_report_status(dev->subordinate, status_mask, warn);
  56}
  57
  58void pcibios_report_status(u_int status_mask, int warn)
  59{
  60        struct list_head *l;
  61
  62        list_for_each(l, &pci_root_buses) {
  63                struct pci_bus *bus = pci_bus_b(l);
  64
  65                pcibios_bus_report_status(bus, status_mask, warn);
  66        }
  67}
  68
  69/*
  70 * We don't use this to fix the device, but initialisation of it.
  71 * It's not the correct use for this, but it works.
  72 * Note that the arbiter/ISA bridge appears to be buggy, specifically in
  73 * the following area:
  74 * 1. park on CPU
  75 * 2. ISA bridge ping-pong
  76 * 3. ISA bridge master handling of target RETRY
  77 *
  78 * Bug 3 is responsible for the sound DMA grinding to a halt.  We now
  79 * live with bug 2.
  80 */
  81static void pci_fixup_83c553(struct pci_dev *dev)
  82{
  83        /*
  84         * Set memory region to start at address 0, and enable IO
  85         */
  86        pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, PCI_BASE_ADDRESS_SPACE_MEMORY);
  87        pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_IO);
  88
  89        dev->resource[0].end -= dev->resource[0].start;
  90        dev->resource[0].start = 0;
  91
  92        /*
  93         * All memory requests from ISA to be channelled to PCI
  94         */
  95        pci_write_config_byte(dev, 0x48, 0xff);
  96
  97        /*
  98         * Enable ping-pong on bus master to ISA bridge transactions.
  99         * This improves the sound DMA substantially.  The fixed
 100         * priority arbiter also helps (see below).
 101         */
 102        pci_write_config_byte(dev, 0x42, 0x01);
 103
 104        /*
 105         * Enable PCI retry
 106         */
 107        pci_write_config_byte(dev, 0x40, 0x22);
 108
 109        /*
 110         * We used to set the arbiter to "park on last master" (bit
 111         * 1 set), but unfortunately the CyberPro does not park the
 112         * bus.  We must therefore park on CPU.  Unfortunately, this
 113         * may trigger yet another bug in the 553.
 114         */
 115        pci_write_config_byte(dev, 0x83, 0x02);
 116
 117        /*
 118         * Make the ISA DMA request lowest priority, and disable
 119         * rotating priorities completely.
 120         */
 121        pci_write_config_byte(dev, 0x80, 0x11);
 122        pci_write_config_byte(dev, 0x81, 0x00);
 123
 124        /*
 125         * Route INTA input to IRQ 11, and set IRQ11 to be level
 126         * sensitive.
 127         */
 128        pci_write_config_word(dev, 0x44, 0xb000);
 129        outb(0x08, 0x4d1);
 130}
 131DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_WINBOND, PCI_DEVICE_ID_WINBOND_83C553, pci_fixup_83c553);
 132
 133static void pci_fixup_unassign(struct pci_dev *dev)
 134{
 135        dev->resource[0].end -= dev->resource[0].start;
 136        dev->resource[0].start = 0;
 137}
 138DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_WINBOND2, PCI_DEVICE_ID_WINBOND2_89C940F, pci_fixup_unassign);
 139
 140/*
 141 * Prevent the PCI layer from seeing the resources allocated to this device
 142 * if it is the host bridge by marking it as such.  These resources are of
 143 * no consequence to the PCI layer (they are handled elsewhere).
 144 */
 145static void pci_fixup_dec21285(struct pci_dev *dev)
 146{
 147        int i;
 148
 149        if (dev->devfn == 0) {
 150                dev->class &= 0xff;
 151                dev->class |= PCI_CLASS_BRIDGE_HOST << 8;
 152                for (i = 0; i < PCI_NUM_RESOURCES; i++) {
 153                        dev->resource[i].start = 0;
 154                        dev->resource[i].end   = 0;
 155                        dev->resource[i].flags = 0;
 156                }
 157        }
 158}
 159DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21285, pci_fixup_dec21285);
 160
 161/*
 162 * PCI IDE controllers use non-standard I/O port decoding, respect it.
 163 */
 164static void pci_fixup_ide_bases(struct pci_dev *dev)
 165{
 166        struct resource *r;
 167        int i;
 168
 169        if ((dev->class >> 8) != PCI_CLASS_STORAGE_IDE)
 170                return;
 171
 172        for (i = 0; i < PCI_NUM_RESOURCES; i++) {
 173                r = dev->resource + i;
 174                if ((r->start & ~0x80) == 0x374) {
 175                        r->start |= 2;
 176                        r->end = r->start;
 177                }
 178        }
 179}
 180DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_ide_bases);
 181
 182/*
 183 * Put the DEC21142 to sleep
 184 */
 185static void pci_fixup_dec21142(struct pci_dev *dev)
 186{
 187        pci_write_config_dword(dev, 0x40, 0x80000000);
 188}
 189DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21142, pci_fixup_dec21142);
 190
 191/*
 192 * The CY82C693 needs some rather major fixups to ensure that it does
 193 * the right thing.  Idea from the Alpha people, with a few additions.
 194 *
 195 * We ensure that the IDE base registers are set to 1f0/3f4 for the
 196 * primary bus, and 170/374 for the secondary bus.  Also, hide them
 197 * from the PCI subsystem view as well so we won't try to perform
 198 * our own auto-configuration on them.
 199 *
 200 * In addition, we ensure that the PCI IDE interrupts are routed to
 201 * IRQ 14 and IRQ 15 respectively.
 202 *
 203 * The above gets us to a point where the IDE on this device is
 204 * functional.  However, The CY82C693U _does not work_ in bus
 205 * master mode without locking the PCI bus solid.
 206 */
 207static void pci_fixup_cy82c693(struct pci_dev *dev)
 208{
 209        if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
 210                u32 base0, base1;
 211
 212                if (dev->class & 0x80) {        /* primary */
 213                        base0 = 0x1f0;
 214                        base1 = 0x3f4;
 215                } else {                        /* secondary */
 216                        base0 = 0x170;
 217                        base1 = 0x374;
 218                }
 219
 220                pci_write_config_dword(dev, PCI_BASE_ADDRESS_0,
 221                                       base0 | PCI_BASE_ADDRESS_SPACE_IO);
 222                pci_write_config_dword(dev, PCI_BASE_ADDRESS_1,
 223                                       base1 | PCI_BASE_ADDRESS_SPACE_IO);
 224
 225                dev->resource[0].start = 0;
 226                dev->resource[0].end   = 0;
 227                dev->resource[0].flags = 0;
 228
 229                dev->resource[1].start = 0;
 230                dev->resource[1].end   = 0;
 231                dev->resource[1].flags = 0;
 232        } else if (PCI_FUNC(dev->devfn) == 0) {
 233                /*
 234                 * Setup IDE IRQ routing.
 235                 */
 236                pci_write_config_byte(dev, 0x4b, 14);
 237                pci_write_config_byte(dev, 0x4c, 15);
 238
 239                /*
 240                 * Disable FREQACK handshake, enable USB.
 241                 */
 242                pci_write_config_byte(dev, 0x4d, 0x41);
 243
 244                /*
 245                 * Enable PCI retry, and PCI post-write buffer.
 246                 */
 247                pci_write_config_byte(dev, 0x44, 0x17);
 248
 249                /*
 250                 * Enable ISA master and DMA post write buffering.
 251                 */
 252                pci_write_config_byte(dev, 0x45, 0x03);
 253        }
 254}
 255DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CONTAQ, PCI_DEVICE_ID_CONTAQ_82C693, pci_fixup_cy82c693);
 256
 257static void pci_fixup_it8152(struct pci_dev *dev)
 258{
 259        int i;
 260        /* fixup for ITE 8152 devices */
 261        /* FIXME: add defines for class 0x68000 and 0x80103 */
 262        if ((dev->class >> 8) == PCI_CLASS_BRIDGE_HOST ||
 263            dev->class == 0x68000 ||
 264            dev->class == 0x80103) {
 265                for (i = 0; i < PCI_NUM_RESOURCES; i++) {
 266                        dev->resource[i].start = 0;
 267                        dev->resource[i].end   = 0;
 268                        dev->resource[i].flags = 0;
 269                }
 270        }
 271}
 272DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ITE, PCI_DEVICE_ID_ITE_8152, pci_fixup_it8152);
 273
 274/*
 275 * If the bus contains any of these devices, then we must not turn on
 276 * parity checking of any kind.  Currently this is CyberPro 20x0 only.
 277 */
 278static inline int pdev_bad_for_parity(struct pci_dev *dev)
 279{
 280        return ((dev->vendor == PCI_VENDOR_ID_INTERG &&
 281                 (dev->device == PCI_DEVICE_ID_INTERG_2000 ||
 282                  dev->device == PCI_DEVICE_ID_INTERG_2010)) ||
 283                (dev->vendor == PCI_VENDOR_ID_ITE &&
 284                 dev->device == PCI_DEVICE_ID_ITE_8152));
 285
 286}
 287
 288/*
 289 * pcibios_fixup_bus - Called after each bus is probed,
 290 * but before its children are examined.
 291 */
 292void pcibios_fixup_bus(struct pci_bus *bus)
 293{
 294        struct pci_dev *dev;
 295        u16 features = PCI_COMMAND_SERR | PCI_COMMAND_PARITY | PCI_COMMAND_FAST_BACK;
 296
 297        /*
 298         * Walk the devices on this bus, working out what we can
 299         * and can't support.
 300         */
 301        list_for_each_entry(dev, &bus->devices, bus_list) {
 302                u16 status;
 303
 304                pci_read_config_word(dev, PCI_STATUS, &status);
 305
 306                /*
 307                 * If any device on this bus does not support fast back
 308                 * to back transfers, then the bus as a whole is not able
 309                 * to support them.  Having fast back to back transfers
 310                 * on saves us one PCI cycle per transaction.
 311                 */
 312                if (!(status & PCI_STATUS_FAST_BACK))
 313                        features &= ~PCI_COMMAND_FAST_BACK;
 314
 315                if (pdev_bad_for_parity(dev))
 316                        features &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
 317
 318                switch (dev->class >> 8) {
 319                case PCI_CLASS_BRIDGE_PCI:
 320                        pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &status);
 321                        status |= PCI_BRIDGE_CTL_PARITY|PCI_BRIDGE_CTL_MASTER_ABORT;
 322                        status &= ~(PCI_BRIDGE_CTL_BUS_RESET|PCI_BRIDGE_CTL_FAST_BACK);
 323                        pci_write_config_word(dev, PCI_BRIDGE_CONTROL, status);
 324                        break;
 325
 326                case PCI_CLASS_BRIDGE_CARDBUS:
 327                        pci_read_config_word(dev, PCI_CB_BRIDGE_CONTROL, &status);
 328                        status |= PCI_CB_BRIDGE_CTL_PARITY|PCI_CB_BRIDGE_CTL_MASTER_ABORT;
 329                        pci_write_config_word(dev, PCI_CB_BRIDGE_CONTROL, status);
 330                        break;
 331                }
 332        }
 333
 334        /*
 335         * Now walk the devices again, this time setting them up.
 336         */
 337        list_for_each_entry(dev, &bus->devices, bus_list) {
 338                u16 cmd;
 339
 340                pci_read_config_word(dev, PCI_COMMAND, &cmd);
 341                cmd |= features;
 342                pci_write_config_word(dev, PCI_COMMAND, cmd);
 343
 344                pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE,
 345                                      L1_CACHE_BYTES >> 2);
 346        }
 347
 348        /*
 349         * Propagate the flags to the PCI bridge.
 350         */
 351        if (bus->self && bus->self->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
 352                if (features & PCI_COMMAND_FAST_BACK)
 353                        bus->bridge_ctl |= PCI_BRIDGE_CTL_FAST_BACK;
 354                if (features & PCI_COMMAND_PARITY)
 355                        bus->bridge_ctl |= PCI_BRIDGE_CTL_PARITY;
 356        }
 357
 358        /*
 359         * Report what we did for this bus
 360         */
 361        printk(KERN_INFO "PCI: bus%d: Fast back to back transfers %sabled\n",
 362                bus->number, (features & PCI_COMMAND_FAST_BACK) ? "en" : "dis");
 363}
 364EXPORT_SYMBOL(pcibios_fixup_bus);
 365
 366void pcibios_add_bus(struct pci_bus *bus)
 367{
 368        struct pci_sys_data *sys = bus->sysdata;
 369        if (sys->add_bus)
 370                sys->add_bus(bus);
 371}
 372
 373void pcibios_remove_bus(struct pci_bus *bus)
 374{
 375        struct pci_sys_data *sys = bus->sysdata;
 376        if (sys->remove_bus)
 377                sys->remove_bus(bus);
 378}
 379
 380/*
 381 * Swizzle the device pin each time we cross a bridge.  If a platform does
 382 * not provide a swizzle function, we perform the standard PCI swizzling.
 383 *
 384 * The default swizzling walks up the bus tree one level at a time, applying
 385 * the standard swizzle function at each step, stopping when it finds the PCI
 386 * root bus.  This will return the slot number of the bridge device on the
 387 * root bus and the interrupt pin on that device which should correspond
 388 * with the downstream device interrupt.
 389 *
 390 * Platforms may override this, in which case the slot and pin returned
 391 * depend entirely on the platform code.  However, please note that the
 392 * PCI standard swizzle is implemented on plug-in cards and Cardbus based
 393 * PCI extenders, so it can not be ignored.
 394 */
 395static u8 pcibios_swizzle(struct pci_dev *dev, u8 *pin)
 396{
 397        struct pci_sys_data *sys = dev->sysdata;
 398        int slot, oldpin = *pin;
 399
 400        if (sys->swizzle)
 401                slot = sys->swizzle(dev, pin);
 402        else
 403                slot = pci_common_swizzle(dev, pin);
 404
 405        if (debug_pci)
 406                printk("PCI: %s swizzling pin %d => pin %d slot %d\n",
 407                        pci_name(dev), oldpin, *pin, slot);
 408
 409        return slot;
 410}
 411
 412/*
 413 * Map a slot/pin to an IRQ.
 414 */
 415static int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 416{
 417        struct pci_sys_data *sys = dev->sysdata;
 418        int irq = -1;
 419
 420        if (sys->map_irq)
 421                irq = sys->map_irq(dev, slot, pin);
 422
 423        if (debug_pci)
 424                printk("PCI: %s mapping slot %d pin %d => irq %d\n",
 425                        pci_name(dev), slot, pin, irq);
 426
 427        return irq;
 428}
 429
 430static int pcibios_init_resources(int busnr, struct pci_sys_data *sys)
 431{
 432        int ret;
 433        struct pci_host_bridge_window *window;
 434
 435        if (list_empty(&sys->resources)) {
 436                pci_add_resource_offset(&sys->resources,
 437                         &iomem_resource, sys->mem_offset);
 438        }
 439
 440        list_for_each_entry(window, &sys->resources, list) {
 441                if (resource_type(window->res) == IORESOURCE_IO)
 442                        return 0;
 443        }
 444
 445        sys->io_res.start = (busnr * SZ_64K) ?  : pcibios_min_io;
 446        sys->io_res.end = (busnr + 1) * SZ_64K - 1;
 447        sys->io_res.flags = IORESOURCE_IO;
 448        sys->io_res.name = sys->io_res_name;
 449        sprintf(sys->io_res_name, "PCI%d I/O", busnr);
 450
 451        ret = request_resource(&ioport_resource, &sys->io_res);
 452        if (ret) {
 453                pr_err("PCI: unable to allocate I/O port region (%d)\n", ret);
 454                return ret;
 455        }
 456        pci_add_resource_offset(&sys->resources, &sys->io_res,
 457                                sys->io_offset);
 458
 459        return 0;
 460}
 461
 462static void pcibios_init_hw(struct device *parent, struct hw_pci *hw,
 463                            struct list_head *head)
 464{
 465        struct pci_sys_data *sys = NULL;
 466        int ret;
 467        int nr, busnr;
 468
 469        for (nr = busnr = 0; nr < hw->nr_controllers; nr++) {
 470                sys = kzalloc(sizeof(struct pci_sys_data), GFP_KERNEL);
 471                if (!sys)
 472                        panic("PCI: unable to allocate sys data!");
 473
 474#ifdef CONFIG_PCI_DOMAINS
 475                sys->domain  = hw->domain;
 476#endif
 477                sys->busnr   = busnr;
 478                sys->swizzle = hw->swizzle;
 479                sys->map_irq = hw->map_irq;
 480                sys->align_resource = hw->align_resource;
 481                sys->add_bus = hw->add_bus;
 482                sys->remove_bus = hw->remove_bus;
 483                INIT_LIST_HEAD(&sys->resources);
 484
 485                if (hw->private_data)
 486                        sys->private_data = hw->private_data[nr];
 487
 488                ret = hw->setup(nr, sys);
 489
 490                if (ret > 0) {
 491                        ret = pcibios_init_resources(nr, sys);
 492                        if (ret)  {
 493                                kfree(sys);
 494                                break;
 495                        }
 496
 497                        if (hw->scan)
 498                                sys->bus = hw->scan(nr, sys);
 499                        else
 500                                sys->bus = pci_scan_root_bus(parent, sys->busnr,
 501                                                hw->ops, sys, &sys->resources);
 502
 503                        if (!sys->bus)
 504                                panic("PCI: unable to scan bus!");
 505
 506                        busnr = sys->bus->busn_res.end + 1;
 507
 508                        list_add(&sys->node, head);
 509                } else {
 510                        kfree(sys);
 511                        if (ret < 0)
 512                                break;
 513                }
 514        }
 515}
 516
 517void pci_common_init_dev(struct device *parent, struct hw_pci *hw)
 518{
 519        struct pci_sys_data *sys;
 520        LIST_HEAD(head);
 521
 522        pci_add_flags(PCI_REASSIGN_ALL_RSRC);
 523        if (hw->preinit)
 524                hw->preinit();
 525        pcibios_init_hw(parent, hw, &head);
 526        if (hw->postinit)
 527                hw->postinit();
 528
 529        pci_fixup_irqs(pcibios_swizzle, pcibios_map_irq);
 530
 531        list_for_each_entry(sys, &head, node) {
 532                struct pci_bus *bus = sys->bus;
 533
 534                if (!pci_has_flag(PCI_PROBE_ONLY)) {
 535                        /*
 536                         * Size the bridge windows.
 537                         */
 538                        pci_bus_size_bridges(bus);
 539
 540                        /*
 541                         * Assign resources.
 542                         */
 543                        pci_bus_assign_resources(bus);
 544                }
 545
 546                /*
 547                 * Tell drivers about devices found.
 548                 */
 549                pci_bus_add_devices(bus);
 550        }
 551}
 552
 553#ifndef CONFIG_PCI_HOST_ITE8152
 554void pcibios_set_master(struct pci_dev *dev)
 555{
 556        /* No special bus mastering setup handling */
 557}
 558#endif
 559
 560char * __init pcibios_setup(char *str)
 561{
 562        if (!strcmp(str, "debug")) {
 563                debug_pci = 1;
 564                return NULL;
 565        } else if (!strcmp(str, "firmware")) {
 566                pci_add_flags(PCI_PROBE_ONLY);
 567                return NULL;
 568        }
 569        return str;
 570}
 571
 572/*
 573 * From arch/i386/kernel/pci-i386.c:
 574 *
 575 * We need to avoid collisions with `mirrored' VGA ports
 576 * and other strange ISA hardware, so we always want the
 577 * addresses to be allocated in the 0x000-0x0ff region
 578 * modulo 0x400.
 579 *
 580 * Why? Because some silly external IO cards only decode
 581 * the low 10 bits of the IO address. The 0x00-0xff region
 582 * is reserved for motherboard devices that decode all 16
 583 * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
 584 * but we want to try to avoid allocating at 0x2900-0x2bff
 585 * which might be mirrored at 0x0100-0x03ff..
 586 */
 587resource_size_t pcibios_align_resource(void *data, const struct resource *res,
 588                                resource_size_t size, resource_size_t align)
 589{
 590        struct pci_dev *dev = data;
 591        struct pci_sys_data *sys = dev->sysdata;
 592        resource_size_t start = res->start;
 593
 594        if (res->flags & IORESOURCE_IO && start & 0x300)
 595                start = (start + 0x3ff) & ~0x3ff;
 596
 597        start = (start + align - 1) & ~(align - 1);
 598
 599        if (sys->align_resource)
 600                return sys->align_resource(dev, res, start, size, align);
 601
 602        return start;
 603}
 604
 605/**
 606 * pcibios_enable_device - Enable I/O and memory.
 607 * @dev: PCI device to be enabled
 608 */
 609int pcibios_enable_device(struct pci_dev *dev, int mask)
 610{
 611        u16 cmd, old_cmd;
 612        int idx;
 613        struct resource *r;
 614
 615        pci_read_config_word(dev, PCI_COMMAND, &cmd);
 616        old_cmd = cmd;
 617        for (idx = 0; idx < 6; idx++) {
 618                /* Only set up the requested stuff */
 619                if (!(mask & (1 << idx)))
 620                        continue;
 621
 622                r = dev->resource + idx;
 623                if (!r->start && r->end) {
 624                        printk(KERN_ERR "PCI: Device %s not available because"
 625                               " of resource collisions\n", pci_name(dev));
 626                        return -EINVAL;
 627                }
 628                if (r->flags & IORESOURCE_IO)
 629                        cmd |= PCI_COMMAND_IO;
 630                if (r->flags & IORESOURCE_MEM)
 631                        cmd |= PCI_COMMAND_MEMORY;
 632        }
 633
 634        /*
 635         * Bridges (eg, cardbus bridges) need to be fully enabled
 636         */
 637        if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE)
 638                cmd |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY;
 639
 640        if (cmd != old_cmd) {
 641                printk("PCI: enabling device %s (%04x -> %04x)\n",
 642                       pci_name(dev), old_cmd, cmd);
 643                pci_write_config_word(dev, PCI_COMMAND, cmd);
 644        }
 645        return 0;
 646}
 647
 648int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
 649                        enum pci_mmap_state mmap_state, int write_combine)
 650{
 651        struct pci_sys_data *root = dev->sysdata;
 652        unsigned long phys;
 653
 654        if (mmap_state == pci_mmap_io) {
 655                return -EINVAL;
 656        } else {
 657                phys = vma->vm_pgoff + (root->mem_offset >> PAGE_SHIFT);
 658        }
 659
 660        /*
 661         * Mark this as IO
 662         */
 663        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 664
 665        if (remap_pfn_range(vma, vma->vm_start, phys,
 666                             vma->vm_end - vma->vm_start,
 667                             vma->vm_page_prot))
 668                return -EAGAIN;
 669
 670        return 0;
 671}
 672
 673void __init pci_map_io_early(unsigned long pfn)
 674{
 675        struct map_desc pci_io_desc = {
 676                .virtual        = PCI_IO_VIRT_BASE,
 677                .type           = MT_DEVICE,
 678                .length         = SZ_64K,
 679        };
 680
 681        pci_io_desc.pfn = pfn;
 682        iotable_init(&pci_io_desc, 1);
 683}
 684