linux/arch/ia64/pci/pci.c
<<
>>
Prefs
   1/*
   2 * pci.c - Low-Level PCI Access in IA-64
   3 *
   4 * Derived from bios32.c of i386 tree.
   5 *
   6 * (c) Copyright 2002, 2005 Hewlett-Packard Development Company, L.P.
   7 *      David Mosberger-Tang <davidm@hpl.hp.com>
   8 *      Bjorn Helgaas <bjorn.helgaas@hp.com>
   9 * Copyright (C) 2004 Silicon Graphics, Inc.
  10 *
  11 * Note: Above list of copyright holders is incomplete...
  12 */
  13
  14#include <linux/acpi.h>
  15#include <linux/types.h>
  16#include <linux/kernel.h>
  17#include <linux/pci.h>
  18#include <linux/pci-acpi.h>
  19#include <linux/init.h>
  20#include <linux/ioport.h>
  21#include <linux/slab.h>
  22#include <linux/spinlock.h>
  23#include <linux/bootmem.h>
  24#include <linux/export.h>
  25
  26#include <asm/machvec.h>
  27#include <asm/page.h>
  28#include <asm/io.h>
  29#include <asm/sal.h>
  30#include <asm/smp.h>
  31#include <asm/irq.h>
  32#include <asm/hw_irq.h>
  33
  34/*
  35 * Low-level SAL-based PCI configuration access functions. Note that SAL
  36 * calls are already serialized (via sal_lock), so we don't need another
  37 * synchronization mechanism here.
  38 */
  39
  40#define PCI_SAL_ADDRESS(seg, bus, devfn, reg)           \
  41        (((u64) seg << 24) | (bus << 16) | (devfn << 8) | (reg))
  42
  43/* SAL 3.2 adds support for extended config space. */
  44
  45#define PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg)       \
  46        (((u64) seg << 28) | (bus << 20) | (devfn << 12) | (reg))
  47
  48int raw_pci_read(unsigned int seg, unsigned int bus, unsigned int devfn,
  49              int reg, int len, u32 *value)
  50{
  51        u64 addr, data = 0;
  52        int mode, result;
  53
  54        if (!value || (seg > 65535) || (bus > 255) || (devfn > 255) || (reg > 4095))
  55                return -EINVAL;
  56
  57        if ((seg | reg) <= 255) {
  58                addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg);
  59                mode = 0;
  60        } else if (sal_revision >= SAL_VERSION_CODE(3,2)) {
  61                addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg);
  62                mode = 1;
  63        } else {
  64                return -EINVAL;
  65        }
  66
  67        result = ia64_sal_pci_config_read(addr, mode, len, &data);
  68        if (result != 0)
  69                return -EINVAL;
  70
  71        *value = (u32) data;
  72        return 0;
  73}
  74
  75int raw_pci_write(unsigned int seg, unsigned int bus, unsigned int devfn,
  76               int reg, int len, u32 value)
  77{
  78        u64 addr;
  79        int mode, result;
  80
  81        if ((seg > 65535) || (bus > 255) || (devfn > 255) || (reg > 4095))
  82                return -EINVAL;
  83
  84        if ((seg | reg) <= 255) {
  85                addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg);
  86                mode = 0;
  87        } else if (sal_revision >= SAL_VERSION_CODE(3,2)) {
  88                addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg);
  89                mode = 1;
  90        } else {
  91                return -EINVAL;
  92        }
  93        result = ia64_sal_pci_config_write(addr, mode, len, value);
  94        if (result != 0)
  95                return -EINVAL;
  96        return 0;
  97}
  98
  99static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
 100                                                        int size, u32 *value)
 101{
 102        return raw_pci_read(pci_domain_nr(bus), bus->number,
 103                                 devfn, where, size, value);
 104}
 105
 106static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
 107                                                        int size, u32 value)
 108{
 109        return raw_pci_write(pci_domain_nr(bus), bus->number,
 110                                  devfn, where, size, value);
 111}
 112
 113struct pci_ops pci_root_ops = {
 114        .read = pci_read,
 115        .write = pci_write,
 116};
 117
 118/* Called by ACPI when it finds a new root bus.  */
 119
 120static struct pci_controller *alloc_pci_controller(int seg)
 121{
 122        struct pci_controller *controller;
 123
 124        controller = kzalloc(sizeof(*controller), GFP_KERNEL);
 125        if (!controller)
 126                return NULL;
 127
 128        controller->segment = seg;
 129        controller->node = -1;
 130        return controller;
 131}
 132
 133struct pci_root_info {
 134        struct acpi_device *bridge;
 135        struct pci_controller *controller;
 136        struct list_head resources;
 137        char *name;
 138};
 139
 140static unsigned int
 141new_space (u64 phys_base, int sparse)
 142{
 143        u64 mmio_base;
 144        int i;
 145
 146        if (phys_base == 0)
 147                return 0;       /* legacy I/O port space */
 148
 149        mmio_base = (u64) ioremap(phys_base, 0);
 150        for (i = 0; i < num_io_spaces; i++)
 151                if (io_space[i].mmio_base == mmio_base &&
 152                    io_space[i].sparse == sparse)
 153                        return i;
 154
 155        if (num_io_spaces == MAX_IO_SPACES) {
 156                printk(KERN_ERR "PCI: Too many IO port spaces "
 157                        "(MAX_IO_SPACES=%lu)\n", MAX_IO_SPACES);
 158                return ~0;
 159        }
 160
 161        i = num_io_spaces++;
 162        io_space[i].mmio_base = mmio_base;
 163        io_space[i].sparse = sparse;
 164
 165        return i;
 166}
 167
 168static u64 add_io_space(struct pci_root_info *info,
 169                        struct acpi_resource_address64 *addr)
 170{
 171        struct resource *resource;
 172        char *name;
 173        unsigned long base, min, max, base_port;
 174        unsigned int sparse = 0, space_nr, len;
 175
 176        resource = kzalloc(sizeof(*resource), GFP_KERNEL);
 177        if (!resource) {
 178                printk(KERN_ERR "PCI: No memory for %s I/O port space\n",
 179                        info->name);
 180                goto out;
 181        }
 182
 183        len = strlen(info->name) + 32;
 184        name = kzalloc(len, GFP_KERNEL);
 185        if (!name) {
 186                printk(KERN_ERR "PCI: No memory for %s I/O port space name\n",
 187                        info->name);
 188                goto free_resource;
 189        }
 190
 191        min = addr->minimum;
 192        max = min + addr->address_length - 1;
 193        if (addr->info.io.translation_type == ACPI_SPARSE_TRANSLATION)
 194                sparse = 1;
 195
 196        space_nr = new_space(addr->translation_offset, sparse);
 197        if (space_nr == ~0)
 198                goto free_name;
 199
 200        base = __pa(io_space[space_nr].mmio_base);
 201        base_port = IO_SPACE_BASE(space_nr);
 202        snprintf(name, len, "%s I/O Ports %08lx-%08lx", info->name,
 203                base_port + min, base_port + max);
 204
 205        /*
 206         * The SDM guarantees the legacy 0-64K space is sparse, but if the
 207         * mapping is done by the processor (not the bridge), ACPI may not
 208         * mark it as sparse.
 209         */
 210        if (space_nr == 0)
 211                sparse = 1;
 212
 213        resource->name  = name;
 214        resource->flags = IORESOURCE_MEM;
 215        resource->start = base + (sparse ? IO_SPACE_SPARSE_ENCODING(min) : min);
 216        resource->end   = base + (sparse ? IO_SPACE_SPARSE_ENCODING(max) : max);
 217        insert_resource(&iomem_resource, resource);
 218
 219        return base_port;
 220
 221free_name:
 222        kfree(name);
 223free_resource:
 224        kfree(resource);
 225out:
 226        return ~0;
 227}
 228
 229static acpi_status resource_to_window(struct acpi_resource *resource,
 230                                      struct acpi_resource_address64 *addr)
 231{
 232        acpi_status status;
 233
 234        /*
 235         * We're only interested in _CRS descriptors that are
 236         *      - address space descriptors for memory or I/O space
 237         *      - non-zero size
 238         *      - producers, i.e., the address space is routed downstream,
 239         *        not consumed by the bridge itself
 240         */
 241        status = acpi_resource_to_address64(resource, addr);
 242        if (ACPI_SUCCESS(status) &&
 243            (addr->resource_type == ACPI_MEMORY_RANGE ||
 244             addr->resource_type == ACPI_IO_RANGE) &&
 245            addr->address_length &&
 246            addr->producer_consumer == ACPI_PRODUCER)
 247                return AE_OK;
 248
 249        return AE_ERROR;
 250}
 251
 252static acpi_status count_window(struct acpi_resource *resource, void *data)
 253{
 254        unsigned int *windows = (unsigned int *) data;
 255        struct acpi_resource_address64 addr;
 256        acpi_status status;
 257
 258        status = resource_to_window(resource, &addr);
 259        if (ACPI_SUCCESS(status))
 260                (*windows)++;
 261
 262        return AE_OK;
 263}
 264
 265static acpi_status add_window(struct acpi_resource *res, void *data)
 266{
 267        struct pci_root_info *info = data;
 268        struct pci_window *window;
 269        struct acpi_resource_address64 addr;
 270        acpi_status status;
 271        unsigned long flags, offset = 0;
 272        struct resource *root;
 273
 274        /* Return AE_OK for non-window resources to keep scanning for more */
 275        status = resource_to_window(res, &addr);
 276        if (!ACPI_SUCCESS(status))
 277                return AE_OK;
 278
 279        if (addr.resource_type == ACPI_MEMORY_RANGE) {
 280                flags = IORESOURCE_MEM;
 281                root = &iomem_resource;
 282                offset = addr.translation_offset;
 283        } else if (addr.resource_type == ACPI_IO_RANGE) {
 284                flags = IORESOURCE_IO;
 285                root = &ioport_resource;
 286                offset = add_io_space(info, &addr);
 287                if (offset == ~0)
 288                        return AE_OK;
 289        } else
 290                return AE_OK;
 291
 292        window = &info->controller->window[info->controller->windows++];
 293        window->resource.name = info->name;
 294        window->resource.flags = flags;
 295        window->resource.start = addr.minimum + offset;
 296        window->resource.end = window->resource.start + addr.address_length - 1;
 297        window->offset = offset;
 298
 299        if (insert_resource(root, &window->resource)) {
 300                dev_err(&info->bridge->dev,
 301                        "can't allocate host bridge window %pR\n",
 302                        &window->resource);
 303        } else {
 304                if (offset)
 305                        dev_info(&info->bridge->dev, "host bridge window %pR "
 306                                 "(PCI address [%#llx-%#llx])\n",
 307                                 &window->resource,
 308                                 window->resource.start - offset,
 309                                 window->resource.end - offset);
 310                else
 311                        dev_info(&info->bridge->dev,
 312                                 "host bridge window %pR\n",
 313                                 &window->resource);
 314        }
 315
 316        /* HP's firmware has a hack to work around a Windows bug.
 317         * Ignore these tiny memory ranges */
 318        if (!((window->resource.flags & IORESOURCE_MEM) &&
 319              (window->resource.end - window->resource.start < 16)))
 320                pci_add_resource_offset(&info->resources, &window->resource,
 321                                        window->offset);
 322
 323        return AE_OK;
 324}
 325
 326struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
 327{
 328        struct acpi_device *device = root->device;
 329        int domain = root->segment;
 330        int bus = root->secondary.start;
 331        struct pci_controller *controller;
 332        unsigned int windows = 0;
 333        struct pci_root_info info;
 334        struct pci_bus *pbus;
 335        char *name;
 336        int pxm;
 337
 338        controller = alloc_pci_controller(domain);
 339        if (!controller)
 340                goto out1;
 341
 342        controller->acpi_handle = device->handle;
 343
 344        pxm = acpi_get_pxm(controller->acpi_handle);
 345#ifdef CONFIG_NUMA
 346        if (pxm >= 0)
 347                controller->node = pxm_to_node(pxm);
 348#endif
 349
 350        INIT_LIST_HEAD(&info.resources);
 351        /* insert busn resource at first */
 352        pci_add_resource(&info.resources, &root->secondary);
 353        acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_window,
 354                        &windows);
 355        if (windows) {
 356                controller->window =
 357                        kzalloc_node(sizeof(*controller->window) * windows,
 358                                     GFP_KERNEL, controller->node);
 359                if (!controller->window)
 360                        goto out2;
 361
 362                name = kmalloc(16, GFP_KERNEL);
 363                if (!name)
 364                        goto out3;
 365
 366                sprintf(name, "PCI Bus %04x:%02x", domain, bus);
 367                info.bridge = device;
 368                info.controller = controller;
 369                info.name = name;
 370                acpi_walk_resources(device->handle, METHOD_NAME__CRS,
 371                        add_window, &info);
 372        }
 373        /*
 374         * See arch/x86/pci/acpi.c.
 375         * The desired pci bus might already be scanned in a quirk. We
 376         * should handle the case here, but it appears that IA64 hasn't
 377         * such quirk. So we just ignore the case now.
 378         */
 379        pbus = pci_create_root_bus(NULL, bus, &pci_root_ops, controller,
 380                                   &info.resources);
 381        if (!pbus) {
 382                pci_free_resource_list(&info.resources);
 383                return NULL;
 384        }
 385
 386        pci_scan_child_bus(pbus);
 387        return pbus;
 388
 389out3:
 390        kfree(controller->window);
 391out2:
 392        kfree(controller);
 393out1:
 394        return NULL;
 395}
 396
 397int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
 398{
 399        struct pci_controller *controller = bridge->bus->sysdata;
 400
 401        ACPI_HANDLE_SET(&bridge->dev, controller->acpi_handle);
 402        return 0;
 403}
 404
 405static int is_valid_resource(struct pci_dev *dev, int idx)
 406{
 407        unsigned int i, type_mask = IORESOURCE_IO | IORESOURCE_MEM;
 408        struct resource *devr = &dev->resource[idx], *busr;
 409
 410        if (!dev->bus)
 411                return 0;
 412
 413        pci_bus_for_each_resource(dev->bus, busr, i) {
 414                if (!busr || ((busr->flags ^ devr->flags) & type_mask))
 415                        continue;
 416                if ((devr->start) && (devr->start >= busr->start) &&
 417                                (devr->end <= busr->end))
 418                        return 1;
 419        }
 420        return 0;
 421}
 422
 423static void pcibios_fixup_resources(struct pci_dev *dev, int start, int limit)
 424{
 425        int i;
 426
 427        for (i = start; i < limit; i++) {
 428                if (!dev->resource[i].flags)
 429                        continue;
 430                if ((is_valid_resource(dev, i)))
 431                        pci_claim_resource(dev, i);
 432        }
 433}
 434
 435void pcibios_fixup_device_resources(struct pci_dev *dev)
 436{
 437        pcibios_fixup_resources(dev, 0, PCI_BRIDGE_RESOURCES);
 438}
 439EXPORT_SYMBOL_GPL(pcibios_fixup_device_resources);
 440
 441static void pcibios_fixup_bridge_resources(struct pci_dev *dev)
 442{
 443        pcibios_fixup_resources(dev, PCI_BRIDGE_RESOURCES, PCI_NUM_RESOURCES);
 444}
 445
 446/*
 447 *  Called after each bus is probed, but before its children are examined.
 448 */
 449void pcibios_fixup_bus(struct pci_bus *b)
 450{
 451        struct pci_dev *dev;
 452
 453        if (b->self) {
 454                pci_read_bridge_bases(b);
 455                pcibios_fixup_bridge_resources(b->self);
 456        }
 457        list_for_each_entry(dev, &b->devices, bus_list)
 458                pcibios_fixup_device_resources(dev);
 459        platform_pci_fixup_bus(b);
 460}
 461
 462void pcibios_add_bus(struct pci_bus *bus)
 463{
 464        acpi_pci_add_bus(bus);
 465}
 466
 467void pcibios_remove_bus(struct pci_bus *bus)
 468{
 469        acpi_pci_remove_bus(bus);
 470}
 471
 472void pcibios_set_master (struct pci_dev *dev)
 473{
 474        /* No special bus mastering setup handling */
 475}
 476
 477int
 478pcibios_enable_device (struct pci_dev *dev, int mask)
 479{
 480        int ret;
 481
 482        ret = pci_enable_resources(dev, mask);
 483        if (ret < 0)
 484                return ret;
 485
 486        if (!dev->msi_enabled)
 487                return acpi_pci_irq_enable(dev);
 488        return 0;
 489}
 490
 491void
 492pcibios_disable_device (struct pci_dev *dev)
 493{
 494        BUG_ON(atomic_read(&dev->enable_cnt));
 495        if (!dev->msi_enabled)
 496                acpi_pci_irq_disable(dev);
 497}
 498
 499resource_size_t
 500pcibios_align_resource (void *data, const struct resource *res,
 501                        resource_size_t size, resource_size_t align)
 502{
 503        return res->start;
 504}
 505
 506int
 507pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
 508                     enum pci_mmap_state mmap_state, int write_combine)
 509{
 510        unsigned long size = vma->vm_end - vma->vm_start;
 511        pgprot_t prot;
 512
 513        /*
 514         * I/O space cannot be accessed via normal processor loads and
 515         * stores on this platform.
 516         */
 517        if (mmap_state == pci_mmap_io)
 518                /*
 519                 * XXX we could relax this for I/O spaces for which ACPI
 520                 * indicates that the space is 1-to-1 mapped.  But at the
 521                 * moment, we don't support multiple PCI address spaces and
 522                 * the legacy I/O space is not 1-to-1 mapped, so this is moot.
 523                 */
 524                return -EINVAL;
 525
 526        if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
 527                return -EINVAL;
 528
 529        prot = phys_mem_access_prot(NULL, vma->vm_pgoff, size,
 530                                    vma->vm_page_prot);
 531
 532        /*
 533         * If the user requested WC, the kernel uses UC or WC for this region,
 534         * and the chipset supports WC, we can use WC. Otherwise, we have to
 535         * use the same attribute the kernel uses.
 536         */
 537        if (write_combine &&
 538            ((pgprot_val(prot) & _PAGE_MA_MASK) == _PAGE_MA_UC ||
 539             (pgprot_val(prot) & _PAGE_MA_MASK) == _PAGE_MA_WC) &&
 540            efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start))
 541                vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
 542        else
 543                vma->vm_page_prot = prot;
 544
 545        if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
 546                             vma->vm_end - vma->vm_start, vma->vm_page_prot))
 547                return -EAGAIN;
 548
 549        return 0;
 550}
 551
 552/**
 553 * ia64_pci_get_legacy_mem - generic legacy mem routine
 554 * @bus: bus to get legacy memory base address for
 555 *
 556 * Find the base of legacy memory for @bus.  This is typically the first
 557 * megabyte of bus address space for @bus or is simply 0 on platforms whose
 558 * chipsets support legacy I/O and memory routing.  Returns the base address
 559 * or an error pointer if an error occurred.
 560 *
 561 * This is the ia64 generic version of this routine.  Other platforms
 562 * are free to override it with a machine vector.
 563 */
 564char *ia64_pci_get_legacy_mem(struct pci_bus *bus)
 565{
 566        return (char *)__IA64_UNCACHED_OFFSET;
 567}
 568
 569/**
 570 * pci_mmap_legacy_page_range - map legacy memory space to userland
 571 * @bus: bus whose legacy space we're mapping
 572 * @vma: vma passed in by mmap
 573 *
 574 * Map legacy memory space for this device back to userspace using a machine
 575 * vector to get the base address.
 576 */
 577int
 578pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma,
 579                           enum pci_mmap_state mmap_state)
 580{
 581        unsigned long size = vma->vm_end - vma->vm_start;
 582        pgprot_t prot;
 583        char *addr;
 584
 585        /* We only support mmap'ing of legacy memory space */
 586        if (mmap_state != pci_mmap_mem)
 587                return -ENOSYS;
 588
 589        /*
 590         * Avoid attribute aliasing.  See Documentation/ia64/aliasing.txt
 591         * for more details.
 592         */
 593        if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
 594                return -EINVAL;
 595        prot = phys_mem_access_prot(NULL, vma->vm_pgoff, size,
 596                                    vma->vm_page_prot);
 597
 598        addr = pci_get_legacy_mem(bus);
 599        if (IS_ERR(addr))
 600                return PTR_ERR(addr);
 601
 602        vma->vm_pgoff += (unsigned long)addr >> PAGE_SHIFT;
 603        vma->vm_page_prot = prot;
 604
 605        if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
 606                            size, vma->vm_page_prot))
 607                return -EAGAIN;
 608
 609        return 0;
 610}
 611
 612/**
 613 * ia64_pci_legacy_read - read from legacy I/O space
 614 * @bus: bus to read
 615 * @port: legacy port value
 616 * @val: caller allocated storage for returned value
 617 * @size: number of bytes to read
 618 *
 619 * Simply reads @size bytes from @port and puts the result in @val.
 620 *
 621 * Again, this (and the write routine) are generic versions that can be
 622 * overridden by the platform.  This is necessary on platforms that don't
 623 * support legacy I/O routing or that hard fail on legacy I/O timeouts.
 624 */
 625int ia64_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size)
 626{
 627        int ret = size;
 628
 629        switch (size) {
 630        case 1:
 631                *val = inb(port);
 632                break;
 633        case 2:
 634                *val = inw(port);
 635                break;
 636        case 4:
 637                *val = inl(port);
 638                break;
 639        default:
 640                ret = -EINVAL;
 641                break;
 642        }
 643
 644        return ret;
 645}
 646
 647/**
 648 * ia64_pci_legacy_write - perform a legacy I/O write
 649 * @bus: bus pointer
 650 * @port: port to write
 651 * @val: value to write
 652 * @size: number of bytes to write from @val
 653 *
 654 * Simply writes @size bytes of @val to @port.
 655 */
 656int ia64_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
 657{
 658        int ret = size;
 659
 660        switch (size) {
 661        case 1:
 662                outb(val, port);
 663                break;
 664        case 2:
 665                outw(val, port);
 666                break;
 667        case 4:
 668                outl(val, port);
 669                break;
 670        default:
 671                ret = -EINVAL;
 672                break;
 673        }
 674
 675        return ret;
 676}
 677
 678/**
 679 * set_pci_cacheline_size - determine cacheline size for PCI devices
 680 *
 681 * We want to use the line-size of the outer-most cache.  We assume
 682 * that this line-size is the same for all CPUs.
 683 *
 684 * Code mostly taken from arch/ia64/kernel/palinfo.c:cache_info().
 685 */
 686static void __init set_pci_dfl_cacheline_size(void)
 687{
 688        unsigned long levels, unique_caches;
 689        long status;
 690        pal_cache_config_info_t cci;
 691
 692        status = ia64_pal_cache_summary(&levels, &unique_caches);
 693        if (status != 0) {
 694                printk(KERN_ERR "%s: ia64_pal_cache_summary() failed "
 695                        "(status=%ld)\n", __func__, status);
 696                return;
 697        }
 698
 699        status = ia64_pal_cache_config_info(levels - 1,
 700                                /* cache_type (data_or_unified)= */ 2, &cci);
 701        if (status != 0) {
 702                printk(KERN_ERR "%s: ia64_pal_cache_config_info() failed "
 703                        "(status=%ld)\n", __func__, status);
 704                return;
 705        }
 706        pci_dfl_cache_line_size = (1 << cci.pcci_line_size) / 4;
 707}
 708
 709u64 ia64_dma_get_required_mask(struct device *dev)
 710{
 711        u32 low_totalram = ((max_pfn - 1) << PAGE_SHIFT);
 712        u32 high_totalram = ((max_pfn - 1) >> (32 - PAGE_SHIFT));
 713        u64 mask;
 714
 715        if (!high_totalram) {
 716                /* convert to mask just covering totalram */
 717                low_totalram = (1 << (fls(low_totalram) - 1));
 718                low_totalram += low_totalram - 1;
 719                mask = low_totalram;
 720        } else {
 721                high_totalram = (1 << (fls(high_totalram) - 1));
 722                high_totalram += high_totalram - 1;
 723                mask = (((u64)high_totalram) << 32) + 0xffffffff;
 724        }
 725        return mask;
 726}
 727EXPORT_SYMBOL_GPL(ia64_dma_get_required_mask);
 728
 729u64 dma_get_required_mask(struct device *dev)
 730{
 731        return platform_dma_get_required_mask(dev);
 732}
 733EXPORT_SYMBOL_GPL(dma_get_required_mask);
 734
 735static int __init pcibios_init(void)
 736{
 737        set_pci_dfl_cacheline_size();
 738        return 0;
 739}
 740
 741subsys_initcall(pcibios_init);
 742