linux/arch/ia64/pci/pci.c
<<
>>
Prefs
   1/*
   2 * pci.c - Low-Level PCI Access in IA-64
   3 *
   4 * Derived from bios32.c of i386 tree.
   5 *
   6 * (c) Copyright 2002, 2005 Hewlett-Packard Development Company, L.P.
   7 *      David Mosberger-Tang <davidm@hpl.hp.com>
   8 *      Bjorn Helgaas <bjorn.helgaas@hp.com>
   9 * Copyright (C) 2004 Silicon Graphics, Inc.
  10 *
  11 * Note: Above list of copyright holders is incomplete...
  12 */
  13
  14#include <linux/acpi.h>
  15#include <linux/types.h>
  16#include <linux/kernel.h>
  17#include <linux/pci.h>
  18#include <linux/init.h>
  19#include <linux/ioport.h>
  20#include <linux/slab.h>
  21#include <linux/spinlock.h>
  22#include <linux/bootmem.h>
  23
  24#include <asm/machvec.h>
  25#include <asm/page.h>
  26#include <asm/system.h>
  27#include <asm/io.h>
  28#include <asm/sal.h>
  29#include <asm/smp.h>
  30#include <asm/irq.h>
  31#include <asm/hw_irq.h>
  32
  33/*
  34 * Low-level SAL-based PCI configuration access functions. Note that SAL
  35 * calls are already serialized (via sal_lock), so we don't need another
  36 * synchronization mechanism here.
  37 */
  38
  39#define PCI_SAL_ADDRESS(seg, bus, devfn, reg)           \
  40        (((u64) seg << 24) | (bus << 16) | (devfn << 8) | (reg))
  41
  42/* SAL 3.2 adds support for extended config space. */
  43
  44#define PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg)       \
  45        (((u64) seg << 28) | (bus << 20) | (devfn << 12) | (reg))
  46
  47int raw_pci_read(unsigned int seg, unsigned int bus, unsigned int devfn,
  48              int reg, int len, u32 *value)
  49{
  50        u64 addr, data = 0;
  51        int mode, result;
  52
  53        if (!value || (seg > 65535) || (bus > 255) || (devfn > 255) || (reg > 4095))
  54                return -EINVAL;
  55
  56        if ((seg | reg) <= 255) {
  57                addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg);
  58                mode = 0;
  59        } else if (sal_revision >= SAL_VERSION_CODE(3,2)) {
  60                addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg);
  61                mode = 1;
  62        } else {
  63                return -EINVAL;
  64        }
  65
  66        result = ia64_sal_pci_config_read(addr, mode, len, &data);
  67        if (result != 0)
  68                return -EINVAL;
  69
  70        *value = (u32) data;
  71        return 0;
  72}
  73
  74int raw_pci_write(unsigned int seg, unsigned int bus, unsigned int devfn,
  75               int reg, int len, u32 value)
  76{
  77        u64 addr;
  78        int mode, result;
  79
  80        if ((seg > 65535) || (bus > 255) || (devfn > 255) || (reg > 4095))
  81                return -EINVAL;
  82
  83        if ((seg | reg) <= 255) {
  84                addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg);
  85                mode = 0;
  86        } else if (sal_revision >= SAL_VERSION_CODE(3,2)) {
  87                addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg);
  88                mode = 1;
  89        } else {
  90                return -EINVAL;
  91        }
  92        result = ia64_sal_pci_config_write(addr, mode, len, value);
  93        if (result != 0)
  94                return -EINVAL;
  95        return 0;
  96}
  97
  98static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
  99                                                        int size, u32 *value)
 100{
 101        return raw_pci_read(pci_domain_nr(bus), bus->number,
 102                                 devfn, where, size, value);
 103}
 104
 105static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
 106                                                        int size, u32 value)
 107{
 108        return raw_pci_write(pci_domain_nr(bus), bus->number,
 109                                  devfn, where, size, value);
 110}
 111
 112struct pci_ops pci_root_ops = {
 113        .read = pci_read,
 114        .write = pci_write,
 115};
 116
 117/* Called by ACPI when it finds a new root bus.  */
 118
 119static struct pci_controller * __devinit
 120alloc_pci_controller (int seg)
 121{
 122        struct pci_controller *controller;
 123
 124        controller = kzalloc(sizeof(*controller), GFP_KERNEL);
 125        if (!controller)
 126                return NULL;
 127
 128        controller->segment = seg;
 129        controller->node = -1;
 130        return controller;
 131}
 132
 133struct pci_root_info {
 134        struct acpi_device *bridge;
 135        struct pci_controller *controller;
 136        char *name;
 137};
 138
 139static unsigned int
 140new_space (u64 phys_base, int sparse)
 141{
 142        u64 mmio_base;
 143        int i;
 144
 145        if (phys_base == 0)
 146                return 0;       /* legacy I/O port space */
 147
 148        mmio_base = (u64) ioremap(phys_base, 0);
 149        for (i = 0; i < num_io_spaces; i++)
 150                if (io_space[i].mmio_base == mmio_base &&
 151                    io_space[i].sparse == sparse)
 152                        return i;
 153
 154        if (num_io_spaces == MAX_IO_SPACES) {
 155                printk(KERN_ERR "PCI: Too many IO port spaces "
 156                        "(MAX_IO_SPACES=%lu)\n", MAX_IO_SPACES);
 157                return ~0;
 158        }
 159
 160        i = num_io_spaces++;
 161        io_space[i].mmio_base = mmio_base;
 162        io_space[i].sparse = sparse;
 163
 164        return i;
 165}
 166
 167static u64 __devinit
 168add_io_space (struct pci_root_info *info, struct acpi_resource_address64 *addr)
 169{
 170        struct resource *resource;
 171        char *name;
 172        unsigned long base, min, max, base_port;
 173        unsigned int sparse = 0, space_nr, len;
 174
 175        resource = kzalloc(sizeof(*resource), GFP_KERNEL);
 176        if (!resource) {
 177                printk(KERN_ERR "PCI: No memory for %s I/O port space\n",
 178                        info->name);
 179                goto out;
 180        }
 181
 182        len = strlen(info->name) + 32;
 183        name = kzalloc(len, GFP_KERNEL);
 184        if (!name) {
 185                printk(KERN_ERR "PCI: No memory for %s I/O port space name\n",
 186                        info->name);
 187                goto free_resource;
 188        }
 189
 190        min = addr->minimum;
 191        max = min + addr->address_length - 1;
 192        if (addr->info.io.translation_type == ACPI_SPARSE_TRANSLATION)
 193                sparse = 1;
 194
 195        space_nr = new_space(addr->translation_offset, sparse);
 196        if (space_nr == ~0)
 197                goto free_name;
 198
 199        base = __pa(io_space[space_nr].mmio_base);
 200        base_port = IO_SPACE_BASE(space_nr);
 201        snprintf(name, len, "%s I/O Ports %08lx-%08lx", info->name,
 202                base_port + min, base_port + max);
 203
 204        /*
 205         * The SDM guarantees the legacy 0-64K space is sparse, but if the
 206         * mapping is done by the processor (not the bridge), ACPI may not
 207         * mark it as sparse.
 208         */
 209        if (space_nr == 0)
 210                sparse = 1;
 211
 212        resource->name  = name;
 213        resource->flags = IORESOURCE_MEM;
 214        resource->start = base + (sparse ? IO_SPACE_SPARSE_ENCODING(min) : min);
 215        resource->end   = base + (sparse ? IO_SPACE_SPARSE_ENCODING(max) : max);
 216        insert_resource(&iomem_resource, resource);
 217
 218        return base_port;
 219
 220free_name:
 221        kfree(name);
 222free_resource:
 223        kfree(resource);
 224out:
 225        return ~0;
 226}
 227
 228static acpi_status __devinit resource_to_window(struct acpi_resource *resource,
 229        struct acpi_resource_address64 *addr)
 230{
 231        acpi_status status;
 232
 233        /*
 234         * We're only interested in _CRS descriptors that are
 235         *      - address space descriptors for memory or I/O space
 236         *      - non-zero size
 237         *      - producers, i.e., the address space is routed downstream,
 238         *        not consumed by the bridge itself
 239         */
 240        status = acpi_resource_to_address64(resource, addr);
 241        if (ACPI_SUCCESS(status) &&
 242            (addr->resource_type == ACPI_MEMORY_RANGE ||
 243             addr->resource_type == ACPI_IO_RANGE) &&
 244            addr->address_length &&
 245            addr->producer_consumer == ACPI_PRODUCER)
 246                return AE_OK;
 247
 248        return AE_ERROR;
 249}
 250
 251static acpi_status __devinit
 252count_window (struct acpi_resource *resource, void *data)
 253{
 254        unsigned int *windows = (unsigned int *) data;
 255        struct acpi_resource_address64 addr;
 256        acpi_status status;
 257
 258        status = resource_to_window(resource, &addr);
 259        if (ACPI_SUCCESS(status))
 260                (*windows)++;
 261
 262        return AE_OK;
 263}
 264
 265static __devinit acpi_status add_window(struct acpi_resource *res, void *data)
 266{
 267        struct pci_root_info *info = data;
 268        struct pci_window *window;
 269        struct acpi_resource_address64 addr;
 270        acpi_status status;
 271        unsigned long flags, offset = 0;
 272        struct resource *root;
 273
 274        /* Return AE_OK for non-window resources to keep scanning for more */
 275        status = resource_to_window(res, &addr);
 276        if (!ACPI_SUCCESS(status))
 277                return AE_OK;
 278
 279        if (addr.resource_type == ACPI_MEMORY_RANGE) {
 280                flags = IORESOURCE_MEM;
 281                root = &iomem_resource;
 282                offset = addr.translation_offset;
 283        } else if (addr.resource_type == ACPI_IO_RANGE) {
 284                flags = IORESOURCE_IO;
 285                root = &ioport_resource;
 286                offset = add_io_space(info, &addr);
 287                if (offset == ~0)
 288                        return AE_OK;
 289        } else
 290                return AE_OK;
 291
 292        window = &info->controller->window[info->controller->windows++];
 293        window->resource.name = info->name;
 294        window->resource.flags = flags;
 295        window->resource.start = addr.minimum + offset;
 296        window->resource.end = window->resource.start + addr.address_length - 1;
 297        window->resource.child = NULL;
 298        window->offset = offset;
 299
 300        if (insert_resource(root, &window->resource)) {
 301                dev_err(&info->bridge->dev,
 302                        "can't allocate host bridge window %pR\n",
 303                        &window->resource);
 304        } else {
 305                if (offset)
 306                        dev_info(&info->bridge->dev, "host bridge window %pR "
 307                                 "(PCI address [%#llx-%#llx])\n",
 308                                 &window->resource,
 309                                 window->resource.start - offset,
 310                                 window->resource.end - offset);
 311                else
 312                        dev_info(&info->bridge->dev,
 313                                 "host bridge window %pR\n",
 314                                 &window->resource);
 315        }
 316
 317        return AE_OK;
 318}
 319
 320static void __devinit
 321pcibios_setup_root_windows(struct pci_bus *bus, struct pci_controller *ctrl)
 322{
 323        int i;
 324
 325        pci_bus_remove_resources(bus);
 326        for (i = 0; i < ctrl->windows; i++) {
 327                struct resource *res = &ctrl->window[i].resource;
 328                /* HP's firmware has a hack to work around a Windows bug.
 329                 * Ignore these tiny memory ranges */
 330                if ((res->flags & IORESOURCE_MEM) &&
 331                    (res->end - res->start < 16))
 332                        continue;
 333                pci_bus_add_resource(bus, res, 0);
 334        }
 335}
 336
 337struct pci_bus * __devinit
 338pci_acpi_scan_root(struct acpi_pci_root *root)
 339{
 340        struct acpi_device *device = root->device;
 341        int domain = root->segment;
 342        int bus = root->secondary.start;
 343        struct pci_controller *controller;
 344        unsigned int windows = 0;
 345        struct pci_bus *pbus;
 346        char *name;
 347        int pxm;
 348
 349        controller = alloc_pci_controller(domain);
 350        if (!controller)
 351                goto out1;
 352
 353        controller->acpi_handle = device->handle;
 354
 355        pxm = acpi_get_pxm(controller->acpi_handle);
 356#ifdef CONFIG_NUMA
 357        if (pxm >= 0)
 358                controller->node = pxm_to_node(pxm);
 359#endif
 360
 361        acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_window,
 362                        &windows);
 363        if (windows) {
 364                struct pci_root_info info;
 365
 366                controller->window =
 367                        kmalloc_node(sizeof(*controller->window) * windows,
 368                                     GFP_KERNEL, controller->node);
 369                if (!controller->window)
 370                        goto out2;
 371
 372                name = kmalloc(16, GFP_KERNEL);
 373                if (!name)
 374                        goto out3;
 375
 376                sprintf(name, "PCI Bus %04x:%02x", domain, bus);
 377                info.bridge = device;
 378                info.controller = controller;
 379                info.name = name;
 380                acpi_walk_resources(device->handle, METHOD_NAME__CRS,
 381                        add_window, &info);
 382        }
 383        /*
 384         * See arch/x86/pci/acpi.c.
 385         * The desired pci bus might already be scanned in a quirk. We
 386         * should handle the case here, but it appears that IA64 hasn't
 387         * such quirk. So we just ignore the case now.
 388         */
 389        pbus = pci_scan_bus_parented(NULL, bus, &pci_root_ops, controller);
 390
 391        return pbus;
 392
 393out3:
 394        kfree(controller->window);
 395out2:
 396        kfree(controller);
 397out1:
 398        return NULL;
 399}
 400
 401void pcibios_resource_to_bus(struct pci_dev *dev,
 402                struct pci_bus_region *region, struct resource *res)
 403{
 404        struct pci_controller *controller = PCI_CONTROLLER(dev);
 405        unsigned long offset = 0;
 406        int i;
 407
 408        for (i = 0; i < controller->windows; i++) {
 409                struct pci_window *window = &controller->window[i];
 410                if (!(window->resource.flags & res->flags))
 411                        continue;
 412                if (window->resource.start > res->start)
 413                        continue;
 414                if (window->resource.end < res->end)
 415                        continue;
 416                offset = window->offset;
 417                break;
 418        }
 419
 420        region->start = res->start - offset;
 421        region->end = res->end - offset;
 422}
 423EXPORT_SYMBOL(pcibios_resource_to_bus);
 424
 425void pcibios_bus_to_resource(struct pci_dev *dev,
 426                struct resource *res, struct pci_bus_region *region)
 427{
 428        struct pci_controller *controller = PCI_CONTROLLER(dev);
 429        unsigned long offset = 0;
 430        int i;
 431
 432        for (i = 0; i < controller->windows; i++) {
 433                struct pci_window *window = &controller->window[i];
 434                if (!(window->resource.flags & res->flags))
 435                        continue;
 436                if (window->resource.start - window->offset > region->start)
 437                        continue;
 438                if (window->resource.end - window->offset < region->end)
 439                        continue;
 440                offset = window->offset;
 441                break;
 442        }
 443
 444        res->start = region->start + offset;
 445        res->end = region->end + offset;
 446}
 447EXPORT_SYMBOL(pcibios_bus_to_resource);
 448
 449static int __devinit is_valid_resource(struct pci_dev *dev, int idx)
 450{
 451        unsigned int i, type_mask = IORESOURCE_IO | IORESOURCE_MEM;
 452        struct resource *devr = &dev->resource[idx], *busr;
 453
 454        if (!dev->bus)
 455                return 0;
 456
 457        pci_bus_for_each_resource(dev->bus, busr, i) {
 458                if (!busr || ((busr->flags ^ devr->flags) & type_mask))
 459                        continue;
 460                if ((devr->start) && (devr->start >= busr->start) &&
 461                                (devr->end <= busr->end))
 462                        return 1;
 463        }
 464        return 0;
 465}
 466
 467static void __devinit
 468pcibios_fixup_resources(struct pci_dev *dev, int start, int limit)
 469{
 470        struct pci_bus_region region;
 471        int i;
 472
 473        for (i = start; i < limit; i++) {
 474                if (!dev->resource[i].flags)
 475                        continue;
 476                region.start = dev->resource[i].start;
 477                region.end = dev->resource[i].end;
 478                pcibios_bus_to_resource(dev, &dev->resource[i], &region);
 479                if ((is_valid_resource(dev, i)))
 480                        pci_claim_resource(dev, i);
 481        }
 482}
 483
 484void __devinit pcibios_fixup_device_resources(struct pci_dev *dev)
 485{
 486        pcibios_fixup_resources(dev, 0, PCI_BRIDGE_RESOURCES);
 487}
 488EXPORT_SYMBOL_GPL(pcibios_fixup_device_resources);
 489
 490static void __devinit pcibios_fixup_bridge_resources(struct pci_dev *dev)
 491{
 492        pcibios_fixup_resources(dev, PCI_BRIDGE_RESOURCES, PCI_NUM_RESOURCES);
 493}
 494
 495/*
 496 *  Called after each bus is probed, but before its children are examined.
 497 */
 498void __devinit
 499pcibios_fixup_bus (struct pci_bus *b)
 500{
 501        struct pci_dev *dev;
 502
 503        if (b->self) {
 504                pci_read_bridge_bases(b);
 505                pcibios_fixup_bridge_resources(b->self);
 506        } else {
 507                pcibios_setup_root_windows(b, b->sysdata);
 508        }
 509        list_for_each_entry(dev, &b->devices, bus_list)
 510                pcibios_fixup_device_resources(dev);
 511        platform_pci_fixup_bus(b);
 512
 513        return;
 514}
 515
 516void __devinit
 517pcibios_update_irq (struct pci_dev *dev, int irq)
 518{
 519        pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
 520
 521        /* ??? FIXME -- record old value for shutdown.  */
 522}
 523
 524int
 525pcibios_enable_device (struct pci_dev *dev, int mask)
 526{
 527        int ret;
 528
 529        ret = pci_enable_resources(dev, mask);
 530        if (ret < 0)
 531                return ret;
 532
 533        if (!dev->msi_enabled)
 534                return acpi_pci_irq_enable(dev);
 535        return 0;
 536}
 537
 538void
 539pcibios_disable_device (struct pci_dev *dev)
 540{
 541        BUG_ON(atomic_read(&dev->enable_cnt));
 542        if (!dev->msi_enabled)
 543                acpi_pci_irq_disable(dev);
 544}
 545
 546resource_size_t
 547pcibios_align_resource (void *data, const struct resource *res,
 548                        resource_size_t size, resource_size_t align)
 549{
 550        return res->start;
 551}
 552
 553/*
 554 * PCI BIOS setup, always defaults to SAL interface
 555 */
 556char * __init
 557pcibios_setup (char *str)
 558{
 559        return str;
 560}
 561
 562int
 563pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
 564                     enum pci_mmap_state mmap_state, int write_combine)
 565{
 566        unsigned long size = vma->vm_end - vma->vm_start;
 567        pgprot_t prot;
 568
 569        /*
 570         * I/O space cannot be accessed via normal processor loads and
 571         * stores on this platform.
 572         */
 573        if (mmap_state == pci_mmap_io)
 574                /*
 575                 * XXX we could relax this for I/O spaces for which ACPI
 576                 * indicates that the space is 1-to-1 mapped.  But at the
 577                 * moment, we don't support multiple PCI address spaces and
 578                 * the legacy I/O space is not 1-to-1 mapped, so this is moot.
 579                 */
 580                return -EINVAL;
 581
 582        if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
 583                return -EINVAL;
 584
 585        prot = phys_mem_access_prot(NULL, vma->vm_pgoff, size,
 586                                    vma->vm_page_prot);
 587
 588        /*
 589         * If the user requested WC, the kernel uses UC or WC for this region,
 590         * and the chipset supports WC, we can use WC. Otherwise, we have to
 591         * use the same attribute the kernel uses.
 592         */
 593        if (write_combine &&
 594            ((pgprot_val(prot) & _PAGE_MA_MASK) == _PAGE_MA_UC ||
 595             (pgprot_val(prot) & _PAGE_MA_MASK) == _PAGE_MA_WC) &&
 596            efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start))
 597                vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
 598        else
 599                vma->vm_page_prot = prot;
 600
 601        if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
 602                             vma->vm_end - vma->vm_start, vma->vm_page_prot))
 603                return -EAGAIN;
 604
 605        return 0;
 606}
 607
 608/**
 609 * ia64_pci_get_legacy_mem - generic legacy mem routine
 610 * @bus: bus to get legacy memory base address for
 611 *
 612 * Find the base of legacy memory for @bus.  This is typically the first
 613 * megabyte of bus address space for @bus or is simply 0 on platforms whose
 614 * chipsets support legacy I/O and memory routing.  Returns the base address
 615 * or an error pointer if an error occurred.
 616 *
 617 * This is the ia64 generic version of this routine.  Other platforms
 618 * are free to override it with a machine vector.
 619 */
 620char *ia64_pci_get_legacy_mem(struct pci_bus *bus)
 621{
 622        return (char *)__IA64_UNCACHED_OFFSET;
 623}
 624
 625/**
 626 * pci_mmap_legacy_page_range - map legacy memory space to userland
 627 * @bus: bus whose legacy space we're mapping
 628 * @vma: vma passed in by mmap
 629 *
 630 * Map legacy memory space for this device back to userspace using a machine
 631 * vector to get the base address.
 632 */
 633int
 634pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma,
 635                           enum pci_mmap_state mmap_state)
 636{
 637        unsigned long size = vma->vm_end - vma->vm_start;
 638        pgprot_t prot;
 639        char *addr;
 640
 641        /* We only support mmap'ing of legacy memory space */
 642        if (mmap_state != pci_mmap_mem)
 643                return -ENOSYS;
 644
 645        /*
 646         * Avoid attribute aliasing.  See Documentation/ia64/aliasing.txt
 647         * for more details.
 648         */
 649        if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
 650                return -EINVAL;
 651        prot = phys_mem_access_prot(NULL, vma->vm_pgoff, size,
 652                                    vma->vm_page_prot);
 653
 654        addr = pci_get_legacy_mem(bus);
 655        if (IS_ERR(addr))
 656                return PTR_ERR(addr);
 657
 658        vma->vm_pgoff += (unsigned long)addr >> PAGE_SHIFT;
 659        vma->vm_page_prot = prot;
 660
 661        if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
 662                            size, vma->vm_page_prot))
 663                return -EAGAIN;
 664
 665        return 0;
 666}
 667
 668/**
 669 * ia64_pci_legacy_read - read from legacy I/O space
 670 * @bus: bus to read
 671 * @port: legacy port value
 672 * @val: caller allocated storage for returned value
 673 * @size: number of bytes to read
 674 *
 675 * Simply reads @size bytes from @port and puts the result in @val.
 676 *
 677 * Again, this (and the write routine) are generic versions that can be
 678 * overridden by the platform.  This is necessary on platforms that don't
 679 * support legacy I/O routing or that hard fail on legacy I/O timeouts.
 680 */
 681int ia64_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size)
 682{
 683        int ret = size;
 684
 685        switch (size) {
 686        case 1:
 687                *val = inb(port);
 688                break;
 689        case 2:
 690                *val = inw(port);
 691                break;
 692        case 4:
 693                *val = inl(port);
 694                break;
 695        default:
 696                ret = -EINVAL;
 697                break;
 698        }
 699
 700        return ret;
 701}
 702
 703/**
 704 * ia64_pci_legacy_write - perform a legacy I/O write
 705 * @bus: bus pointer
 706 * @port: port to write
 707 * @val: value to write
 708 * @size: number of bytes to write from @val
 709 *
 710 * Simply writes @size bytes of @val to @port.
 711 */
 712int ia64_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
 713{
 714        int ret = size;
 715
 716        switch (size) {
 717        case 1:
 718                outb(val, port);
 719                break;
 720        case 2:
 721                outw(val, port);
 722                break;
 723        case 4:
 724                outl(val, port);
 725                break;
 726        default:
 727                ret = -EINVAL;
 728                break;
 729        }
 730
 731        return ret;
 732}
 733
 734/**
 735 * set_pci_cacheline_size - determine cacheline size for PCI devices
 736 *
 737 * We want to use the line-size of the outer-most cache.  We assume
 738 * that this line-size is the same for all CPUs.
 739 *
 740 * Code mostly taken from arch/ia64/kernel/palinfo.c:cache_info().
 741 */
 742static void __init set_pci_dfl_cacheline_size(void)
 743{
 744        unsigned long levels, unique_caches;
 745        long status;
 746        pal_cache_config_info_t cci;
 747
 748        status = ia64_pal_cache_summary(&levels, &unique_caches);
 749        if (status != 0) {
 750                printk(KERN_ERR "%s: ia64_pal_cache_summary() failed "
 751                        "(status=%ld)\n", __func__, status);
 752                return;
 753        }
 754
 755        status = ia64_pal_cache_config_info(levels - 1,
 756                                /* cache_type (data_or_unified)= */ 2, &cci);
 757        if (status != 0) {
 758                printk(KERN_ERR "%s: ia64_pal_cache_config_info() failed "
 759                        "(status=%ld)\n", __func__, status);
 760                return;
 761        }
 762        pci_dfl_cache_line_size = (1 << cci.pcci_line_size) / 4;
 763}
 764
 765u64 ia64_dma_get_required_mask(struct device *dev)
 766{
 767        u32 low_totalram = ((max_pfn - 1) << PAGE_SHIFT);
 768        u32 high_totalram = ((max_pfn - 1) >> (32 - PAGE_SHIFT));
 769        u64 mask;
 770
 771        if (!high_totalram) {
 772                /* convert to mask just covering totalram */
 773                low_totalram = (1 << (fls(low_totalram) - 1));
 774                low_totalram += low_totalram - 1;
 775                mask = low_totalram;
 776        } else {
 777                high_totalram = (1 << (fls(high_totalram) - 1));
 778                high_totalram += high_totalram - 1;
 779                mask = (((u64)high_totalram) << 32) + 0xffffffff;
 780        }
 781        return mask;
 782}
 783EXPORT_SYMBOL_GPL(ia64_dma_get_required_mask);
 784
 785u64 dma_get_required_mask(struct device *dev)
 786{
 787        return platform_dma_get_required_mask(dev);
 788}
 789EXPORT_SYMBOL_GPL(dma_get_required_mask);
 790
 791static int __init pcibios_init(void)
 792{
 793        set_pci_dfl_cacheline_size();
 794        return 0;
 795}
 796
 797subsys_initcall(pcibios_init);
 798