linux/arch/ia64/pci/pci.c
<<
>>
Prefs
   1/*
   2 * pci.c - Low-Level PCI Access in IA-64
   3 *
   4 * Derived from bios32.c of i386 tree.
   5 *
   6 * (c) Copyright 2002, 2005 Hewlett-Packard Development Company, L.P.
   7 *      David Mosberger-Tang <davidm@hpl.hp.com>
   8 *      Bjorn Helgaas <bjorn.helgaas@hp.com>
   9 * Copyright (C) 2004 Silicon Graphics, Inc.
  10 *
  11 * Note: Above list of copyright holders is incomplete...
  12 */
  13
  14#include <linux/acpi.h>
  15#include <linux/types.h>
  16#include <linux/kernel.h>
  17#include <linux/pci.h>
  18#include <linux/pci-acpi.h>
  19#include <linux/init.h>
  20#include <linux/ioport.h>
  21#include <linux/slab.h>
  22#include <linux/spinlock.h>
  23#include <linux/bootmem.h>
  24#include <linux/export.h>
  25
  26#include <asm/machvec.h>
  27#include <asm/page.h>
  28#include <asm/io.h>
  29#include <asm/sal.h>
  30#include <asm/smp.h>
  31#include <asm/irq.h>
  32#include <asm/hw_irq.h>
  33
  34/*
  35 * Low-level SAL-based PCI configuration access functions. Note that SAL
  36 * calls are already serialized (via sal_lock), so we don't need another
  37 * synchronization mechanism here.
  38 */
  39
  40#define PCI_SAL_ADDRESS(seg, bus, devfn, reg)           \
  41        (((u64) seg << 24) | (bus << 16) | (devfn << 8) | (reg))
  42
  43/* SAL 3.2 adds support for extended config space. */
  44
  45#define PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg)       \
  46        (((u64) seg << 28) | (bus << 20) | (devfn << 12) | (reg))
  47
  48int raw_pci_read(unsigned int seg, unsigned int bus, unsigned int devfn,
  49              int reg, int len, u32 *value)
  50{
  51        u64 addr, data = 0;
  52        int mode, result;
  53
  54        if (!value || (seg > 65535) || (bus > 255) || (devfn > 255) || (reg > 4095))
  55                return -EINVAL;
  56
  57        if ((seg | reg) <= 255) {
  58                addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg);
  59                mode = 0;
  60        } else if (sal_revision >= SAL_VERSION_CODE(3,2)) {
  61                addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg);
  62                mode = 1;
  63        } else {
  64                return -EINVAL;
  65        }
  66
  67        result = ia64_sal_pci_config_read(addr, mode, len, &data);
  68        if (result != 0)
  69                return -EINVAL;
  70
  71        *value = (u32) data;
  72        return 0;
  73}
  74
  75int raw_pci_write(unsigned int seg, unsigned int bus, unsigned int devfn,
  76               int reg, int len, u32 value)
  77{
  78        u64 addr;
  79        int mode, result;
  80
  81        if ((seg > 65535) || (bus > 255) || (devfn > 255) || (reg > 4095))
  82                return -EINVAL;
  83
  84        if ((seg | reg) <= 255) {
  85                addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg);
  86                mode = 0;
  87        } else if (sal_revision >= SAL_VERSION_CODE(3,2)) {
  88                addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg);
  89                mode = 1;
  90        } else {
  91                return -EINVAL;
  92        }
  93        result = ia64_sal_pci_config_write(addr, mode, len, value);
  94        if (result != 0)
  95                return -EINVAL;
  96        return 0;
  97}
  98
  99static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
 100                                                        int size, u32 *value)
 101{
 102        return raw_pci_read(pci_domain_nr(bus), bus->number,
 103                                 devfn, where, size, value);
 104}
 105
 106static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
 107                                                        int size, u32 value)
 108{
 109        return raw_pci_write(pci_domain_nr(bus), bus->number,
 110                                  devfn, where, size, value);
 111}
 112
 113struct pci_ops pci_root_ops = {
 114        .read = pci_read,
 115        .write = pci_write,
 116};
 117
 118struct pci_root_info {
 119        struct acpi_pci_root_info common;
 120        struct pci_controller controller;
 121        struct list_head io_resources;
 122};
 123
 124static unsigned int new_space(u64 phys_base, int sparse)
 125{
 126        u64 mmio_base;
 127        int i;
 128
 129        if (phys_base == 0)
 130                return 0;       /* legacy I/O port space */
 131
 132        mmio_base = (u64) ioremap(phys_base, 0);
 133        for (i = 0; i < num_io_spaces; i++)
 134                if (io_space[i].mmio_base == mmio_base &&
 135                    io_space[i].sparse == sparse)
 136                        return i;
 137
 138        if (num_io_spaces == MAX_IO_SPACES) {
 139                pr_err("PCI: Too many IO port spaces "
 140                        "(MAX_IO_SPACES=%lu)\n", MAX_IO_SPACES);
 141                return ~0;
 142        }
 143
 144        i = num_io_spaces++;
 145        io_space[i].mmio_base = mmio_base;
 146        io_space[i].sparse = sparse;
 147
 148        return i;
 149}
 150
 151static int add_io_space(struct device *dev, struct pci_root_info *info,
 152                        struct resource_entry *entry)
 153{
 154        struct resource_entry *iospace;
 155        struct resource *resource, *res = entry->res;
 156        char *name;
 157        unsigned long base, min, max, base_port;
 158        unsigned int sparse = 0, space_nr, len;
 159
 160        len = strlen(info->common.name) + 32;
 161        iospace = resource_list_create_entry(NULL, len);
 162        if (!iospace) {
 163                dev_err(dev, "PCI: No memory for %s I/O port space\n",
 164                        info->common.name);
 165                return -ENOMEM;
 166        }
 167
 168        if (res->flags & IORESOURCE_IO_SPARSE)
 169                sparse = 1;
 170        space_nr = new_space(entry->offset, sparse);
 171        if (space_nr == ~0)
 172                goto free_resource;
 173
 174        name = (char *)(iospace + 1);
 175        min = res->start - entry->offset;
 176        max = res->end - entry->offset;
 177        base = __pa(io_space[space_nr].mmio_base);
 178        base_port = IO_SPACE_BASE(space_nr);
 179        snprintf(name, len, "%s I/O Ports %08lx-%08lx", info->common.name,
 180                 base_port + min, base_port + max);
 181
 182        /*
 183         * The SDM guarantees the legacy 0-64K space is sparse, but if the
 184         * mapping is done by the processor (not the bridge), ACPI may not
 185         * mark it as sparse.
 186         */
 187        if (space_nr == 0)
 188                sparse = 1;
 189
 190        resource = iospace->res;
 191        resource->name  = name;
 192        resource->flags = IORESOURCE_MEM;
 193        resource->start = base + (sparse ? IO_SPACE_SPARSE_ENCODING(min) : min);
 194        resource->end   = base + (sparse ? IO_SPACE_SPARSE_ENCODING(max) : max);
 195        if (insert_resource(&iomem_resource, resource)) {
 196                dev_err(dev,
 197                        "can't allocate host bridge io space resource  %pR\n",
 198                        resource);
 199                goto free_resource;
 200        }
 201
 202        entry->offset = base_port;
 203        res->start = min + base_port;
 204        res->end = max + base_port;
 205        resource_list_add_tail(iospace, &info->io_resources);
 206
 207        return 0;
 208
 209free_resource:
 210        resource_list_free_entry(iospace);
 211        return -ENOSPC;
 212}
 213
 214/*
 215 * An IO port or MMIO resource assigned to a PCI host bridge may be
 216 * consumed by the host bridge itself or available to its child
 217 * bus/devices. The ACPI specification defines a bit (Producer/Consumer)
 218 * to tell whether the resource is consumed by the host bridge itself,
 219 * but firmware hasn't used that bit consistently, so we can't rely on it.
 220 *
 221 * On x86 and IA64 platforms, all IO port and MMIO resources are assumed
 222 * to be available to child bus/devices except one special case:
 223 *     IO port [0xCF8-0xCFF] is consumed by the host bridge itself
 224 *     to access PCI configuration space.
 225 *
 226 * So explicitly filter out PCI CFG IO ports[0xCF8-0xCFF].
 227 */
 228static bool resource_is_pcicfg_ioport(struct resource *res)
 229{
 230        return (res->flags & IORESOURCE_IO) &&
 231                res->start == 0xCF8 && res->end == 0xCFF;
 232}
 233
 234static int pci_acpi_root_prepare_resources(struct acpi_pci_root_info *ci)
 235{
 236        struct device *dev = &ci->bridge->dev;
 237        struct pci_root_info *info;
 238        struct resource *res;
 239        struct resource_entry *entry, *tmp;
 240        int status;
 241
 242        status = acpi_pci_probe_root_resources(ci);
 243        if (status > 0) {
 244                info = container_of(ci, struct pci_root_info, common);
 245                resource_list_for_each_entry_safe(entry, tmp, &ci->resources) {
 246                        res = entry->res;
 247                        if (res->flags & IORESOURCE_MEM) {
 248                                /*
 249                                 * HP's firmware has a hack to work around a
 250                                 * Windows bug. Ignore these tiny memory ranges.
 251                                 */
 252                                if (resource_size(res) <= 16) {
 253                                        resource_list_del(entry);
 254                                        insert_resource(&iomem_resource,
 255                                                        entry->res);
 256                                        resource_list_add_tail(entry,
 257                                                        &info->io_resources);
 258                                }
 259                        } else if (res->flags & IORESOURCE_IO) {
 260                                if (resource_is_pcicfg_ioport(entry->res))
 261                                        resource_list_destroy_entry(entry);
 262                                else if (add_io_space(dev, info, entry))
 263                                        resource_list_destroy_entry(entry);
 264                        }
 265                }
 266        }
 267
 268        return status;
 269}
 270
 271static void pci_acpi_root_release_info(struct acpi_pci_root_info *ci)
 272{
 273        struct pci_root_info *info;
 274        struct resource_entry *entry, *tmp;
 275
 276        info = container_of(ci, struct pci_root_info, common);
 277        resource_list_for_each_entry_safe(entry, tmp, &info->io_resources) {
 278                release_resource(entry->res);
 279                resource_list_destroy_entry(entry);
 280        }
 281        kfree(info);
 282}
 283
 284static struct acpi_pci_root_ops pci_acpi_root_ops = {
 285        .pci_ops = &pci_root_ops,
 286        .release_info = pci_acpi_root_release_info,
 287        .prepare_resources = pci_acpi_root_prepare_resources,
 288};
 289
 290struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
 291{
 292        struct acpi_device *device = root->device;
 293        struct pci_root_info *info;
 294
 295        info = kzalloc(sizeof(*info), GFP_KERNEL);
 296        if (!info) {
 297                dev_err(&device->dev,
 298                        "pci_bus %04x:%02x: ignored (out of memory)\n",
 299                        root->segment, (int)root->secondary.start);
 300                return NULL;
 301        }
 302
 303        info->controller.segment = root->segment;
 304        info->controller.companion = device;
 305        info->controller.node = acpi_get_node(device->handle);
 306        INIT_LIST_HEAD(&info->io_resources);
 307        return acpi_pci_root_create(root, &pci_acpi_root_ops,
 308                                    &info->common, &info->controller);
 309}
 310
 311int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
 312{
 313        /*
 314         * We pass NULL as parent to pci_create_root_bus(), so if it is not NULL
 315         * here, pci_create_root_bus() has been called by someone else and
 316         * sysdata is likely to be different from what we expect.  Let it go in
 317         * that case.
 318         */
 319        if (!bridge->dev.parent) {
 320                struct pci_controller *controller = bridge->bus->sysdata;
 321                ACPI_COMPANION_SET(&bridge->dev, controller->companion);
 322        }
 323        return 0;
 324}
 325
 326void pcibios_fixup_device_resources(struct pci_dev *dev)
 327{
 328        int idx;
 329
 330        if (!dev->bus)
 331                return;
 332
 333        for (idx = 0; idx < PCI_BRIDGE_RESOURCES; idx++) {
 334                struct resource *r = &dev->resource[idx];
 335
 336                if (!r->flags || r->parent || !r->start)
 337                        continue;
 338
 339                pci_claim_resource(dev, idx);
 340        }
 341}
 342EXPORT_SYMBOL_GPL(pcibios_fixup_device_resources);
 343
 344static void pcibios_fixup_bridge_resources(struct pci_dev *dev)
 345{
 346        int idx;
 347
 348        if (!dev->bus)
 349                return;
 350
 351        for (idx = PCI_BRIDGE_RESOURCES; idx < PCI_NUM_RESOURCES; idx++) {
 352                struct resource *r = &dev->resource[idx];
 353
 354                if (!r->flags || r->parent || !r->start)
 355                        continue;
 356
 357                pci_claim_bridge_resource(dev, idx);
 358        }
 359}
 360
 361/*
 362 *  Called after each bus is probed, but before its children are examined.
 363 */
 364void pcibios_fixup_bus(struct pci_bus *b)
 365{
 366        struct pci_dev *dev;
 367
 368        if (b->self) {
 369                pci_read_bridge_bases(b);
 370                pcibios_fixup_bridge_resources(b->self);
 371        }
 372        list_for_each_entry(dev, &b->devices, bus_list)
 373                pcibios_fixup_device_resources(dev);
 374        platform_pci_fixup_bus(b);
 375}
 376
 377void pcibios_add_bus(struct pci_bus *bus)
 378{
 379        acpi_pci_add_bus(bus);
 380}
 381
 382void pcibios_remove_bus(struct pci_bus *bus)
 383{
 384        acpi_pci_remove_bus(bus);
 385}
 386
 387void pcibios_set_master (struct pci_dev *dev)
 388{
 389        /* No special bus mastering setup handling */
 390}
 391
 392int
 393pcibios_enable_device (struct pci_dev *dev, int mask)
 394{
 395        int ret;
 396
 397        ret = pci_enable_resources(dev, mask);
 398        if (ret < 0)
 399                return ret;
 400
 401        if (!pci_dev_msi_enabled(dev))
 402                return acpi_pci_irq_enable(dev);
 403        return 0;
 404}
 405
 406void
 407pcibios_disable_device (struct pci_dev *dev)
 408{
 409        BUG_ON(atomic_read(&dev->enable_cnt));
 410        if (!pci_dev_msi_enabled(dev))
 411                acpi_pci_irq_disable(dev);
 412}
 413
 414/**
 415 * ia64_pci_get_legacy_mem - generic legacy mem routine
 416 * @bus: bus to get legacy memory base address for
 417 *
 418 * Find the base of legacy memory for @bus.  This is typically the first
 419 * megabyte of bus address space for @bus or is simply 0 on platforms whose
 420 * chipsets support legacy I/O and memory routing.  Returns the base address
 421 * or an error pointer if an error occurred.
 422 *
 423 * This is the ia64 generic version of this routine.  Other platforms
 424 * are free to override it with a machine vector.
 425 */
 426char *ia64_pci_get_legacy_mem(struct pci_bus *bus)
 427{
 428        return (char *)__IA64_UNCACHED_OFFSET;
 429}
 430
 431/**
 432 * pci_mmap_legacy_page_range - map legacy memory space to userland
 433 * @bus: bus whose legacy space we're mapping
 434 * @vma: vma passed in by mmap
 435 *
 436 * Map legacy memory space for this device back to userspace using a machine
 437 * vector to get the base address.
 438 */
 439int
 440pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma,
 441                           enum pci_mmap_state mmap_state)
 442{
 443        unsigned long size = vma->vm_end - vma->vm_start;
 444        pgprot_t prot;
 445        char *addr;
 446
 447        /* We only support mmap'ing of legacy memory space */
 448        if (mmap_state != pci_mmap_mem)
 449                return -ENOSYS;
 450
 451        /*
 452         * Avoid attribute aliasing.  See Documentation/ia64/aliasing.txt
 453         * for more details.
 454         */
 455        if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
 456                return -EINVAL;
 457        prot = phys_mem_access_prot(NULL, vma->vm_pgoff, size,
 458                                    vma->vm_page_prot);
 459
 460        addr = pci_get_legacy_mem(bus);
 461        if (IS_ERR(addr))
 462                return PTR_ERR(addr);
 463
 464        vma->vm_pgoff += (unsigned long)addr >> PAGE_SHIFT;
 465        vma->vm_page_prot = prot;
 466
 467        if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
 468                            size, vma->vm_page_prot))
 469                return -EAGAIN;
 470
 471        return 0;
 472}
 473
 474/**
 475 * ia64_pci_legacy_read - read from legacy I/O space
 476 * @bus: bus to read
 477 * @port: legacy port value
 478 * @val: caller allocated storage for returned value
 479 * @size: number of bytes to read
 480 *
 481 * Simply reads @size bytes from @port and puts the result in @val.
 482 *
 483 * Again, this (and the write routine) are generic versions that can be
 484 * overridden by the platform.  This is necessary on platforms that don't
 485 * support legacy I/O routing or that hard fail on legacy I/O timeouts.
 486 */
 487int ia64_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size)
 488{
 489        int ret = size;
 490
 491        switch (size) {
 492        case 1:
 493                *val = inb(port);
 494                break;
 495        case 2:
 496                *val = inw(port);
 497                break;
 498        case 4:
 499                *val = inl(port);
 500                break;
 501        default:
 502                ret = -EINVAL;
 503                break;
 504        }
 505
 506        return ret;
 507}
 508
 509/**
 510 * ia64_pci_legacy_write - perform a legacy I/O write
 511 * @bus: bus pointer
 512 * @port: port to write
 513 * @val: value to write
 514 * @size: number of bytes to write from @val
 515 *
 516 * Simply writes @size bytes of @val to @port.
 517 */
 518int ia64_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
 519{
 520        int ret = size;
 521
 522        switch (size) {
 523        case 1:
 524                outb(val, port);
 525                break;
 526        case 2:
 527                outw(val, port);
 528                break;
 529        case 4:
 530                outl(val, port);
 531                break;
 532        default:
 533                ret = -EINVAL;
 534                break;
 535        }
 536
 537        return ret;
 538}
 539
 540/**
 541 * set_pci_cacheline_size - determine cacheline size for PCI devices
 542 *
 543 * We want to use the line-size of the outer-most cache.  We assume
 544 * that this line-size is the same for all CPUs.
 545 *
 546 * Code mostly taken from arch/ia64/kernel/palinfo.c:cache_info().
 547 */
 548static void __init set_pci_dfl_cacheline_size(void)
 549{
 550        unsigned long levels, unique_caches;
 551        long status;
 552        pal_cache_config_info_t cci;
 553
 554        status = ia64_pal_cache_summary(&levels, &unique_caches);
 555        if (status != 0) {
 556                pr_err("%s: ia64_pal_cache_summary() failed "
 557                        "(status=%ld)\n", __func__, status);
 558                return;
 559        }
 560
 561        status = ia64_pal_cache_config_info(levels - 1,
 562                                /* cache_type (data_or_unified)= */ 2, &cci);
 563        if (status != 0) {
 564                pr_err("%s: ia64_pal_cache_config_info() failed "
 565                        "(status=%ld)\n", __func__, status);
 566                return;
 567        }
 568        pci_dfl_cache_line_size = (1 << cci.pcci_line_size) / 4;
 569}
 570
 571static int __init pcibios_init(void)
 572{
 573        set_pci_dfl_cacheline_size();
 574        return 0;
 575}
 576
 577subsys_initcall(pcibios_init);
 578