linux/arch/arm/kernel/bios32.c
<<
>>
Prefs
   1/*
   2 *  linux/arch/arm/kernel/bios32.c
   3 *
   4 *  PCI bios-type initialisation for PCI machines
   5 *
   6 *  Bits taken from various places.
   7 */
   8#include <linux/export.h>
   9#include <linux/kernel.h>
  10#include <linux/pci.h>
  11#include <linux/slab.h>
  12#include <linux/init.h>
  13#include <linux/io.h>
  14
  15#include <asm/mach-types.h>
  16#include <asm/mach/map.h>
  17#include <asm/mach/pci.h>
  18
  19static int debug_pci;
  20
  21#ifdef CONFIG_PCI_MSI
  22struct msi_controller *pcibios_msi_controller(struct pci_dev *dev)
  23{
  24        struct pci_sys_data *sysdata = dev->bus->sysdata;
  25
  26        return sysdata->msi_ctrl;
  27}
  28#endif
  29
  30/*
  31 * We can't use pci_get_device() here since we are
  32 * called from interrupt context.
  33 */
  34static void pcibios_bus_report_status(struct pci_bus *bus, u_int status_mask, int warn)
  35{
  36        struct pci_dev *dev;
  37
  38        list_for_each_entry(dev, &bus->devices, bus_list) {
  39                u16 status;
  40
  41                /*
  42                 * ignore host bridge - we handle
  43                 * that separately
  44                 */
  45                if (dev->bus->number == 0 && dev->devfn == 0)
  46                        continue;
  47
  48                pci_read_config_word(dev, PCI_STATUS, &status);
  49                if (status == 0xffff)
  50                        continue;
  51
  52                if ((status & status_mask) == 0)
  53                        continue;
  54
  55                /* clear the status errors */
  56                pci_write_config_word(dev, PCI_STATUS, status & status_mask);
  57
  58                if (warn)
  59                        printk("(%s: %04X) ", pci_name(dev), status);
  60        }
  61
  62        list_for_each_entry(dev, &bus->devices, bus_list)
  63                if (dev->subordinate)
  64                        pcibios_bus_report_status(dev->subordinate, status_mask, warn);
  65}
  66
  67void pcibios_report_status(u_int status_mask, int warn)
  68{
  69        struct pci_bus *bus;
  70
  71        list_for_each_entry(bus, &pci_root_buses, node)
  72                pcibios_bus_report_status(bus, status_mask, warn);
  73}
  74
  75/*
  76 * We don't use this to fix the device, but initialisation of it.
  77 * It's not the correct use for this, but it works.
  78 * Note that the arbiter/ISA bridge appears to be buggy, specifically in
  79 * the following area:
  80 * 1. park on CPU
  81 * 2. ISA bridge ping-pong
  82 * 3. ISA bridge master handling of target RETRY
  83 *
  84 * Bug 3 is responsible for the sound DMA grinding to a halt.  We now
  85 * live with bug 2.
  86 */
  87static void pci_fixup_83c553(struct pci_dev *dev)
  88{
  89        /*
  90         * Set memory region to start at address 0, and enable IO
  91         */
  92        pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, PCI_BASE_ADDRESS_SPACE_MEMORY);
  93        pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_IO);
  94
  95        dev->resource[0].end -= dev->resource[0].start;
  96        dev->resource[0].start = 0;
  97
  98        /*
  99         * All memory requests from ISA to be channelled to PCI
 100         */
 101        pci_write_config_byte(dev, 0x48, 0xff);
 102
 103        /*
 104         * Enable ping-pong on bus master to ISA bridge transactions.
 105         * This improves the sound DMA substantially.  The fixed
 106         * priority arbiter also helps (see below).
 107         */
 108        pci_write_config_byte(dev, 0x42, 0x01);
 109
 110        /*
 111         * Enable PCI retry
 112         */
 113        pci_write_config_byte(dev, 0x40, 0x22);
 114
 115        /*
 116         * We used to set the arbiter to "park on last master" (bit
 117         * 1 set), but unfortunately the CyberPro does not park the
 118         * bus.  We must therefore park on CPU.  Unfortunately, this
 119         * may trigger yet another bug in the 553.
 120         */
 121        pci_write_config_byte(dev, 0x83, 0x02);
 122
 123        /*
 124         * Make the ISA DMA request lowest priority, and disable
 125         * rotating priorities completely.
 126         */
 127        pci_write_config_byte(dev, 0x80, 0x11);
 128        pci_write_config_byte(dev, 0x81, 0x00);
 129
 130        /*
 131         * Route INTA input to IRQ 11, and set IRQ11 to be level
 132         * sensitive.
 133         */
 134        pci_write_config_word(dev, 0x44, 0xb000);
 135        outb(0x08, 0x4d1);
 136}
 137DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_WINBOND, PCI_DEVICE_ID_WINBOND_83C553, pci_fixup_83c553);
 138
 139static void pci_fixup_unassign(struct pci_dev *dev)
 140{
 141        dev->resource[0].end -= dev->resource[0].start;
 142        dev->resource[0].start = 0;
 143}
 144DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_WINBOND2, PCI_DEVICE_ID_WINBOND2_89C940F, pci_fixup_unassign);
 145
 146/*
 147 * Prevent the PCI layer from seeing the resources allocated to this device
 148 * if it is the host bridge by marking it as such.  These resources are of
 149 * no consequence to the PCI layer (they are handled elsewhere).
 150 */
 151static void pci_fixup_dec21285(struct pci_dev *dev)
 152{
 153        int i;
 154
 155        if (dev->devfn == 0) {
 156                dev->class &= 0xff;
 157                dev->class |= PCI_CLASS_BRIDGE_HOST << 8;
 158                for (i = 0; i < PCI_NUM_RESOURCES; i++) {
 159                        dev->resource[i].start = 0;
 160                        dev->resource[i].end   = 0;
 161                        dev->resource[i].flags = 0;
 162                }
 163        }
 164}
 165DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21285, pci_fixup_dec21285);
 166
 167/*
 168 * PCI IDE controllers use non-standard I/O port decoding, respect it.
 169 */
 170static void pci_fixup_ide_bases(struct pci_dev *dev)
 171{
 172        struct resource *r;
 173        int i;
 174
 175        if ((dev->class >> 8) != PCI_CLASS_STORAGE_IDE)
 176                return;
 177
 178        for (i = 0; i < PCI_NUM_RESOURCES; i++) {
 179                r = dev->resource + i;
 180                if ((r->start & ~0x80) == 0x374) {
 181                        r->start |= 2;
 182                        r->end = r->start;
 183                }
 184        }
 185}
 186DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_ide_bases);
 187
 188/*
 189 * Put the DEC21142 to sleep
 190 */
 191static void pci_fixup_dec21142(struct pci_dev *dev)
 192{
 193        pci_write_config_dword(dev, 0x40, 0x80000000);
 194}
 195DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21142, pci_fixup_dec21142);
 196
 197/*
 198 * The CY82C693 needs some rather major fixups to ensure that it does
 199 * the right thing.  Idea from the Alpha people, with a few additions.
 200 *
 201 * We ensure that the IDE base registers are set to 1f0/3f4 for the
 202 * primary bus, and 170/374 for the secondary bus.  Also, hide them
 203 * from the PCI subsystem view as well so we won't try to perform
 204 * our own auto-configuration on them.
 205 *
 206 * In addition, we ensure that the PCI IDE interrupts are routed to
 207 * IRQ 14 and IRQ 15 respectively.
 208 *
 209 * The above gets us to a point where the IDE on this device is
 210 * functional.  However, The CY82C693U _does not work_ in bus
 211 * master mode without locking the PCI bus solid.
 212 */
 213static void pci_fixup_cy82c693(struct pci_dev *dev)
 214{
 215        if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
 216                u32 base0, base1;
 217
 218                if (dev->class & 0x80) {        /* primary */
 219                        base0 = 0x1f0;
 220                        base1 = 0x3f4;
 221                } else {                        /* secondary */
 222                        base0 = 0x170;
 223                        base1 = 0x374;
 224                }
 225
 226                pci_write_config_dword(dev, PCI_BASE_ADDRESS_0,
 227                                       base0 | PCI_BASE_ADDRESS_SPACE_IO);
 228                pci_write_config_dword(dev, PCI_BASE_ADDRESS_1,
 229                                       base1 | PCI_BASE_ADDRESS_SPACE_IO);
 230
 231                dev->resource[0].start = 0;
 232                dev->resource[0].end   = 0;
 233                dev->resource[0].flags = 0;
 234
 235                dev->resource[1].start = 0;
 236                dev->resource[1].end   = 0;
 237                dev->resource[1].flags = 0;
 238        } else if (PCI_FUNC(dev->devfn) == 0) {
 239                /*
 240                 * Setup IDE IRQ routing.
 241                 */
 242                pci_write_config_byte(dev, 0x4b, 14);
 243                pci_write_config_byte(dev, 0x4c, 15);
 244
 245                /*
 246                 * Disable FREQACK handshake, enable USB.
 247                 */
 248                pci_write_config_byte(dev, 0x4d, 0x41);
 249
 250                /*
 251                 * Enable PCI retry, and PCI post-write buffer.
 252                 */
 253                pci_write_config_byte(dev, 0x44, 0x17);
 254
 255                /*
 256                 * Enable ISA master and DMA post write buffering.
 257                 */
 258                pci_write_config_byte(dev, 0x45, 0x03);
 259        }
 260}
 261DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CONTAQ, PCI_DEVICE_ID_CONTAQ_82C693, pci_fixup_cy82c693);
 262
 263static void pci_fixup_it8152(struct pci_dev *dev)
 264{
 265        int i;
 266        /* fixup for ITE 8152 devices */
 267        /* FIXME: add defines for class 0x68000 and 0x80103 */
 268        if ((dev->class >> 8) == PCI_CLASS_BRIDGE_HOST ||
 269            dev->class == 0x68000 ||
 270            dev->class == 0x80103) {
 271                for (i = 0; i < PCI_NUM_RESOURCES; i++) {
 272                        dev->resource[i].start = 0;
 273                        dev->resource[i].end   = 0;
 274                        dev->resource[i].flags = 0;
 275                }
 276        }
 277}
 278DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ITE, PCI_DEVICE_ID_ITE_8152, pci_fixup_it8152);
 279
 280/*
 281 * If the bus contains any of these devices, then we must not turn on
 282 * parity checking of any kind.  Currently this is CyberPro 20x0 only.
 283 */
 284static inline int pdev_bad_for_parity(struct pci_dev *dev)
 285{
 286        return ((dev->vendor == PCI_VENDOR_ID_INTERG &&
 287                 (dev->device == PCI_DEVICE_ID_INTERG_2000 ||
 288                  dev->device == PCI_DEVICE_ID_INTERG_2010)) ||
 289                (dev->vendor == PCI_VENDOR_ID_ITE &&
 290                 dev->device == PCI_DEVICE_ID_ITE_8152));
 291
 292}
 293
 294/*
 295 * pcibios_fixup_bus - Called after each bus is probed,
 296 * but before its children are examined.
 297 */
 298void pcibios_fixup_bus(struct pci_bus *bus)
 299{
 300        struct pci_dev *dev;
 301        u16 features = PCI_COMMAND_SERR | PCI_COMMAND_PARITY | PCI_COMMAND_FAST_BACK;
 302
 303        /*
 304         * Walk the devices on this bus, working out what we can
 305         * and can't support.
 306         */
 307        list_for_each_entry(dev, &bus->devices, bus_list) {
 308                u16 status;
 309
 310                pci_read_config_word(dev, PCI_STATUS, &status);
 311
 312                /*
 313                 * If any device on this bus does not support fast back
 314                 * to back transfers, then the bus as a whole is not able
 315                 * to support them.  Having fast back to back transfers
 316                 * on saves us one PCI cycle per transaction.
 317                 */
 318                if (!(status & PCI_STATUS_FAST_BACK))
 319                        features &= ~PCI_COMMAND_FAST_BACK;
 320
 321                if (pdev_bad_for_parity(dev))
 322                        features &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
 323
 324                switch (dev->class >> 8) {
 325                case PCI_CLASS_BRIDGE_PCI:
 326                        pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &status);
 327                        status |= PCI_BRIDGE_CTL_PARITY|PCI_BRIDGE_CTL_MASTER_ABORT;
 328                        status &= ~(PCI_BRIDGE_CTL_BUS_RESET|PCI_BRIDGE_CTL_FAST_BACK);
 329                        pci_write_config_word(dev, PCI_BRIDGE_CONTROL, status);
 330                        break;
 331
 332                case PCI_CLASS_BRIDGE_CARDBUS:
 333                        pci_read_config_word(dev, PCI_CB_BRIDGE_CONTROL, &status);
 334                        status |= PCI_CB_BRIDGE_CTL_PARITY|PCI_CB_BRIDGE_CTL_MASTER_ABORT;
 335                        pci_write_config_word(dev, PCI_CB_BRIDGE_CONTROL, status);
 336                        break;
 337                }
 338        }
 339
 340        /*
 341         * Now walk the devices again, this time setting them up.
 342         */
 343        list_for_each_entry(dev, &bus->devices, bus_list) {
 344                u16 cmd;
 345
 346                pci_read_config_word(dev, PCI_COMMAND, &cmd);
 347                cmd |= features;
 348                pci_write_config_word(dev, PCI_COMMAND, cmd);
 349
 350                pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE,
 351                                      L1_CACHE_BYTES >> 2);
 352        }
 353
 354        /*
 355         * Propagate the flags to the PCI bridge.
 356         */
 357        if (bus->self && bus->self->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
 358                if (features & PCI_COMMAND_FAST_BACK)
 359                        bus->bridge_ctl |= PCI_BRIDGE_CTL_FAST_BACK;
 360                if (features & PCI_COMMAND_PARITY)
 361                        bus->bridge_ctl |= PCI_BRIDGE_CTL_PARITY;
 362        }
 363
 364        /*
 365         * Report what we did for this bus
 366         */
 367        pr_info("PCI: bus%d: Fast back to back transfers %sabled\n",
 368                bus->number, (features & PCI_COMMAND_FAST_BACK) ? "en" : "dis");
 369}
 370EXPORT_SYMBOL(pcibios_fixup_bus);
 371
 372/*
 373 * Swizzle the device pin each time we cross a bridge.  If a platform does
 374 * not provide a swizzle function, we perform the standard PCI swizzling.
 375 *
 376 * The default swizzling walks up the bus tree one level at a time, applying
 377 * the standard swizzle function at each step, stopping when it finds the PCI
 378 * root bus.  This will return the slot number of the bridge device on the
 379 * root bus and the interrupt pin on that device which should correspond
 380 * with the downstream device interrupt.
 381 *
 382 * Platforms may override this, in which case the slot and pin returned
 383 * depend entirely on the platform code.  However, please note that the
 384 * PCI standard swizzle is implemented on plug-in cards and Cardbus based
 385 * PCI extenders, so it can not be ignored.
 386 */
 387static u8 pcibios_swizzle(struct pci_dev *dev, u8 *pin)
 388{
 389        struct pci_sys_data *sys = dev->sysdata;
 390        int slot, oldpin = *pin;
 391
 392        if (sys->swizzle)
 393                slot = sys->swizzle(dev, pin);
 394        else
 395                slot = pci_common_swizzle(dev, pin);
 396
 397        if (debug_pci)
 398                printk("PCI: %s swizzling pin %d => pin %d slot %d\n",
 399                        pci_name(dev), oldpin, *pin, slot);
 400
 401        return slot;
 402}
 403
 404/*
 405 * Map a slot/pin to an IRQ.
 406 */
 407static int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 408{
 409        struct pci_sys_data *sys = dev->sysdata;
 410        int irq = -1;
 411
 412        if (sys->map_irq)
 413                irq = sys->map_irq(dev, slot, pin);
 414
 415        if (debug_pci)
 416                printk("PCI: %s mapping slot %d pin %d => irq %d\n",
 417                        pci_name(dev), slot, pin, irq);
 418
 419        return irq;
 420}
 421
 422static int pcibios_init_resources(int busnr, struct pci_sys_data *sys)
 423{
 424        int ret;
 425        struct resource_entry *window;
 426
 427        if (list_empty(&sys->resources)) {
 428                pci_add_resource_offset(&sys->resources,
 429                         &iomem_resource, sys->mem_offset);
 430        }
 431
 432        resource_list_for_each_entry(window, &sys->resources)
 433                if (resource_type(window->res) == IORESOURCE_IO)
 434                        return 0;
 435
 436        sys->io_res.start = (busnr * SZ_64K) ?  : pcibios_min_io;
 437        sys->io_res.end = (busnr + 1) * SZ_64K - 1;
 438        sys->io_res.flags = IORESOURCE_IO;
 439        sys->io_res.name = sys->io_res_name;
 440        sprintf(sys->io_res_name, "PCI%d I/O", busnr);
 441
 442        ret = request_resource(&ioport_resource, &sys->io_res);
 443        if (ret) {
 444                pr_err("PCI: unable to allocate I/O port region (%d)\n", ret);
 445                return ret;
 446        }
 447        pci_add_resource_offset(&sys->resources, &sys->io_res,
 448                                sys->io_offset);
 449
 450        return 0;
 451}
 452
 453static void pcibios_init_hw(struct device *parent, struct hw_pci *hw,
 454                            struct list_head *head)
 455{
 456        struct pci_sys_data *sys = NULL;
 457        int ret;
 458        int nr, busnr;
 459
 460        for (nr = busnr = 0; nr < hw->nr_controllers; nr++) {
 461                sys = kzalloc(sizeof(struct pci_sys_data), GFP_KERNEL);
 462                if (!sys)
 463                        panic("PCI: unable to allocate sys data!");
 464
 465#ifdef CONFIG_PCI_MSI
 466                sys->msi_ctrl = hw->msi_ctrl;
 467#endif
 468                sys->busnr   = busnr;
 469                sys->swizzle = hw->swizzle;
 470                sys->map_irq = hw->map_irq;
 471                sys->align_resource = hw->align_resource;
 472                INIT_LIST_HEAD(&sys->resources);
 473
 474                if (hw->private_data)
 475                        sys->private_data = hw->private_data[nr];
 476
 477                ret = hw->setup(nr, sys);
 478
 479                if (ret > 0) {
 480                        ret = pcibios_init_resources(nr, sys);
 481                        if (ret)  {
 482                                kfree(sys);
 483                                break;
 484                        }
 485
 486                        if (hw->scan)
 487                                sys->bus = hw->scan(nr, sys);
 488                        else
 489                                sys->bus = pci_scan_root_bus(parent, sys->busnr,
 490                                                hw->ops, sys, &sys->resources);
 491
 492                        if (!sys->bus)
 493                                panic("PCI: unable to scan bus!");
 494
 495                        busnr = sys->bus->busn_res.end + 1;
 496
 497                        list_add(&sys->node, head);
 498                } else {
 499                        kfree(sys);
 500                        if (ret < 0)
 501                                break;
 502                }
 503        }
 504}
 505
 506void pci_common_init_dev(struct device *parent, struct hw_pci *hw)
 507{
 508        struct pci_sys_data *sys;
 509        LIST_HEAD(head);
 510
 511        pci_add_flags(PCI_REASSIGN_ALL_RSRC);
 512        if (hw->preinit)
 513                hw->preinit();
 514        pcibios_init_hw(parent, hw, &head);
 515        if (hw->postinit)
 516                hw->postinit();
 517
 518        pci_fixup_irqs(pcibios_swizzle, pcibios_map_irq);
 519
 520        list_for_each_entry(sys, &head, node) {
 521                struct pci_bus *bus = sys->bus;
 522
 523                if (!pci_has_flag(PCI_PROBE_ONLY)) {
 524                        /*
 525                         * Size the bridge windows.
 526                         */
 527                        pci_bus_size_bridges(bus);
 528
 529                        /*
 530                         * Assign resources.
 531                         */
 532                        pci_bus_assign_resources(bus);
 533                }
 534
 535                /*
 536                 * Tell drivers about devices found.
 537                 */
 538                pci_bus_add_devices(bus);
 539        }
 540
 541        list_for_each_entry(sys, &head, node) {
 542                struct pci_bus *bus = sys->bus;
 543
 544                /* Configure PCI Express settings */
 545                if (bus && !pci_has_flag(PCI_PROBE_ONLY)) {
 546                        struct pci_bus *child;
 547
 548                        list_for_each_entry(child, &bus->children, node)
 549                                pcie_bus_configure_settings(child);
 550                }
 551        }
 552}
 553
 554#ifndef CONFIG_PCI_HOST_ITE8152
 555void pcibios_set_master(struct pci_dev *dev)
 556{
 557        /* No special bus mastering setup handling */
 558}
 559#endif
 560
 561char * __init pcibios_setup(char *str)
 562{
 563        if (!strcmp(str, "debug")) {
 564                debug_pci = 1;
 565                return NULL;
 566        } else if (!strcmp(str, "firmware")) {
 567                pci_add_flags(PCI_PROBE_ONLY);
 568                return NULL;
 569        }
 570        return str;
 571}
 572
 573/*
 574 * From arch/i386/kernel/pci-i386.c:
 575 *
 576 * We need to avoid collisions with `mirrored' VGA ports
 577 * and other strange ISA hardware, so we always want the
 578 * addresses to be allocated in the 0x000-0x0ff region
 579 * modulo 0x400.
 580 *
 581 * Why? Because some silly external IO cards only decode
 582 * the low 10 bits of the IO address. The 0x00-0xff region
 583 * is reserved for motherboard devices that decode all 16
 584 * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
 585 * but we want to try to avoid allocating at 0x2900-0x2bff
 586 * which might be mirrored at 0x0100-0x03ff..
 587 */
 588resource_size_t pcibios_align_resource(void *data, const struct resource *res,
 589                                resource_size_t size, resource_size_t align)
 590{
 591        struct pci_dev *dev = data;
 592        struct pci_sys_data *sys = dev->sysdata;
 593        resource_size_t start = res->start;
 594
 595        if (res->flags & IORESOURCE_IO && start & 0x300)
 596                start = (start + 0x3ff) & ~0x3ff;
 597
 598        start = (start + align - 1) & ~(align - 1);
 599
 600        if (sys->align_resource)
 601                return sys->align_resource(dev, res, start, size, align);
 602
 603        return start;
 604}
 605
 606/**
 607 * pcibios_enable_device - Enable I/O and memory.
 608 * @dev: PCI device to be enabled
 609 */
 610int pcibios_enable_device(struct pci_dev *dev, int mask)
 611{
 612        if (pci_has_flag(PCI_PROBE_ONLY))
 613                return 0;
 614
 615        return pci_enable_resources(dev, mask);
 616}
 617
 618int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
 619                        enum pci_mmap_state mmap_state, int write_combine)
 620{
 621        if (mmap_state == pci_mmap_io)
 622                return -EINVAL;
 623
 624        /*
 625         * Mark this as IO
 626         */
 627        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 628
 629        if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
 630                             vma->vm_end - vma->vm_start,
 631                             vma->vm_page_prot))
 632                return -EAGAIN;
 633
 634        return 0;
 635}
 636
 637void __init pci_map_io_early(unsigned long pfn)
 638{
 639        struct map_desc pci_io_desc = {
 640                .virtual        = PCI_IO_VIRT_BASE,
 641                .type           = MT_DEVICE,
 642                .length         = SZ_64K,
 643        };
 644
 645        pci_io_desc.pfn = pfn;
 646        iotable_init(&pci_io_desc, 1);
 647}
 648