linux/arch/ia64/pci/pci.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * pci.c - Low-Level PCI Access in IA-64
   4 *
   5 * Derived from bios32.c of i386 tree.
   6 *
   7 * (c) Copyright 2002, 2005 Hewlett-Packard Development Company, L.P.
   8 *      David Mosberger-Tang <davidm@hpl.hp.com>
   9 *      Bjorn Helgaas <bjorn.helgaas@hp.com>
  10 * Copyright (C) 2004 Silicon Graphics, Inc.
  11 *
  12 * Note: Above list of copyright holders is incomplete...
  13 */
  14
  15#include <linux/acpi.h>
  16#include <linux/types.h>
  17#include <linux/kernel.h>
  18#include <linux/pci.h>
  19#include <linux/pci-acpi.h>
  20#include <linux/init.h>
  21#include <linux/ioport.h>
  22#include <linux/slab.h>
  23#include <linux/spinlock.h>
  24#include <linux/memblock.h>
  25#include <linux/export.h>
  26
  27#include <asm/machvec.h>
  28#include <asm/page.h>
  29#include <asm/io.h>
  30#include <asm/sal.h>
  31#include <asm/smp.h>
  32#include <asm/irq.h>
  33#include <asm/hw_irq.h>
  34
  35/*
  36 * Low-level SAL-based PCI configuration access functions. Note that SAL
  37 * calls are already serialized (via sal_lock), so we don't need another
  38 * synchronization mechanism here.
  39 */
  40
  41#define PCI_SAL_ADDRESS(seg, bus, devfn, reg)           \
  42        (((u64) seg << 24) | (bus << 16) | (devfn << 8) | (reg))
  43
  44/* SAL 3.2 adds support for extended config space. */
  45
  46#define PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg)       \
  47        (((u64) seg << 28) | (bus << 20) | (devfn << 12) | (reg))
  48
  49int raw_pci_read(unsigned int seg, unsigned int bus, unsigned int devfn,
  50              int reg, int len, u32 *value)
  51{
  52        u64 addr, data = 0;
  53        int mode, result;
  54
  55        if (!value || (seg > 65535) || (bus > 255) || (devfn > 255) || (reg > 4095))
  56                return -EINVAL;
  57
  58        if ((seg | reg) <= 255) {
  59                addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg);
  60                mode = 0;
  61        } else if (sal_revision >= SAL_VERSION_CODE(3,2)) {
  62                addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg);
  63                mode = 1;
  64        } else {
  65                return -EINVAL;
  66        }
  67
  68        result = ia64_sal_pci_config_read(addr, mode, len, &data);
  69        if (result != 0)
  70                return -EINVAL;
  71
  72        *value = (u32) data;
  73        return 0;
  74}
  75
  76int raw_pci_write(unsigned int seg, unsigned int bus, unsigned int devfn,
  77               int reg, int len, u32 value)
  78{
  79        u64 addr;
  80        int mode, result;
  81
  82        if ((seg > 65535) || (bus > 255) || (devfn > 255) || (reg > 4095))
  83                return -EINVAL;
  84
  85        if ((seg | reg) <= 255) {
  86                addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg);
  87                mode = 0;
  88        } else if (sal_revision >= SAL_VERSION_CODE(3,2)) {
  89                addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg);
  90                mode = 1;
  91        } else {
  92                return -EINVAL;
  93        }
  94        result = ia64_sal_pci_config_write(addr, mode, len, value);
  95        if (result != 0)
  96                return -EINVAL;
  97        return 0;
  98}
  99
 100static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
 101                                                        int size, u32 *value)
 102{
 103        return raw_pci_read(pci_domain_nr(bus), bus->number,
 104                                 devfn, where, size, value);
 105}
 106
 107static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
 108                                                        int size, u32 value)
 109{
 110        return raw_pci_write(pci_domain_nr(bus), bus->number,
 111                                  devfn, where, size, value);
 112}
 113
 114struct pci_ops pci_root_ops = {
 115        .read = pci_read,
 116        .write = pci_write,
 117};
 118
 119struct pci_root_info {
 120        struct acpi_pci_root_info common;
 121        struct pci_controller controller;
 122        struct list_head io_resources;
 123};
 124
 125static unsigned int new_space(u64 phys_base, int sparse)
 126{
 127        u64 mmio_base;
 128        int i;
 129
 130        if (phys_base == 0)
 131                return 0;       /* legacy I/O port space */
 132
 133        mmio_base = (u64) ioremap(phys_base, 0);
 134        for (i = 0; i < num_io_spaces; i++)
 135                if (io_space[i].mmio_base == mmio_base &&
 136                    io_space[i].sparse == sparse)
 137                        return i;
 138
 139        if (num_io_spaces == MAX_IO_SPACES) {
 140                pr_err("PCI: Too many IO port spaces "
 141                        "(MAX_IO_SPACES=%lu)\n", MAX_IO_SPACES);
 142                return ~0;
 143        }
 144
 145        i = num_io_spaces++;
 146        io_space[i].mmio_base = mmio_base;
 147        io_space[i].sparse = sparse;
 148
 149        return i;
 150}
 151
 152static int add_io_space(struct device *dev, struct pci_root_info *info,
 153                        struct resource_entry *entry)
 154{
 155        struct resource_entry *iospace;
 156        struct resource *resource, *res = entry->res;
 157        char *name;
 158        unsigned long base, min, max, base_port;
 159        unsigned int sparse = 0, space_nr, len;
 160
 161        len = strlen(info->common.name) + 32;
 162        iospace = resource_list_create_entry(NULL, len);
 163        if (!iospace) {
 164                dev_err(dev, "PCI: No memory for %s I/O port space\n",
 165                        info->common.name);
 166                return -ENOMEM;
 167        }
 168
 169        if (res->flags & IORESOURCE_IO_SPARSE)
 170                sparse = 1;
 171        space_nr = new_space(entry->offset, sparse);
 172        if (space_nr == ~0)
 173                goto free_resource;
 174
 175        name = (char *)(iospace + 1);
 176        min = res->start - entry->offset;
 177        max = res->end - entry->offset;
 178        base = __pa(io_space[space_nr].mmio_base);
 179        base_port = IO_SPACE_BASE(space_nr);
 180        snprintf(name, len, "%s I/O Ports %08lx-%08lx", info->common.name,
 181                 base_port + min, base_port + max);
 182
 183        /*
 184         * The SDM guarantees the legacy 0-64K space is sparse, but if the
 185         * mapping is done by the processor (not the bridge), ACPI may not
 186         * mark it as sparse.
 187         */
 188        if (space_nr == 0)
 189                sparse = 1;
 190
 191        resource = iospace->res;
 192        resource->name  = name;
 193        resource->flags = IORESOURCE_MEM;
 194        resource->start = base + (sparse ? IO_SPACE_SPARSE_ENCODING(min) : min);
 195        resource->end   = base + (sparse ? IO_SPACE_SPARSE_ENCODING(max) : max);
 196        if (insert_resource(&iomem_resource, resource)) {
 197                dev_err(dev,
 198                        "can't allocate host bridge io space resource  %pR\n",
 199                        resource);
 200                goto free_resource;
 201        }
 202
 203        entry->offset = base_port;
 204        res->start = min + base_port;
 205        res->end = max + base_port;
 206        resource_list_add_tail(iospace, &info->io_resources);
 207
 208        return 0;
 209
 210free_resource:
 211        resource_list_free_entry(iospace);
 212        return -ENOSPC;
 213}
 214
 215/*
 216 * An IO port or MMIO resource assigned to a PCI host bridge may be
 217 * consumed by the host bridge itself or available to its child
 218 * bus/devices. The ACPI specification defines a bit (Producer/Consumer)
 219 * to tell whether the resource is consumed by the host bridge itself,
 220 * but firmware hasn't used that bit consistently, so we can't rely on it.
 221 *
 222 * On x86 and IA64 platforms, all IO port and MMIO resources are assumed
 223 * to be available to child bus/devices except one special case:
 224 *     IO port [0xCF8-0xCFF] is consumed by the host bridge itself
 225 *     to access PCI configuration space.
 226 *
 227 * So explicitly filter out PCI CFG IO ports[0xCF8-0xCFF].
 228 */
 229static bool resource_is_pcicfg_ioport(struct resource *res)
 230{
 231        return (res->flags & IORESOURCE_IO) &&
 232                res->start == 0xCF8 && res->end == 0xCFF;
 233}
 234
 235static int pci_acpi_root_prepare_resources(struct acpi_pci_root_info *ci)
 236{
 237        struct device *dev = &ci->bridge->dev;
 238        struct pci_root_info *info;
 239        struct resource *res;
 240        struct resource_entry *entry, *tmp;
 241        int status;
 242
 243        status = acpi_pci_probe_root_resources(ci);
 244        if (status > 0) {
 245                info = container_of(ci, struct pci_root_info, common);
 246                resource_list_for_each_entry_safe(entry, tmp, &ci->resources) {
 247                        res = entry->res;
 248                        if (res->flags & IORESOURCE_MEM) {
 249                                /*
 250                                 * HP's firmware has a hack to work around a
 251                                 * Windows bug. Ignore these tiny memory ranges.
 252                                 */
 253                                if (resource_size(res) <= 16) {
 254                                        resource_list_del(entry);
 255                                        insert_resource(&iomem_resource,
 256                                                        entry->res);
 257                                        resource_list_add_tail(entry,
 258                                                        &info->io_resources);
 259                                }
 260                        } else if (res->flags & IORESOURCE_IO) {
 261                                if (resource_is_pcicfg_ioport(entry->res))
 262                                        resource_list_destroy_entry(entry);
 263                                else if (add_io_space(dev, info, entry))
 264                                        resource_list_destroy_entry(entry);
 265                        }
 266                }
 267        }
 268
 269        return status;
 270}
 271
 272static void pci_acpi_root_release_info(struct acpi_pci_root_info *ci)
 273{
 274        struct pci_root_info *info;
 275        struct resource_entry *entry, *tmp;
 276
 277        info = container_of(ci, struct pci_root_info, common);
 278        resource_list_for_each_entry_safe(entry, tmp, &info->io_resources) {
 279                release_resource(entry->res);
 280                resource_list_destroy_entry(entry);
 281        }
 282        kfree(info);
 283}
 284
 285static struct acpi_pci_root_ops pci_acpi_root_ops = {
 286        .pci_ops = &pci_root_ops,
 287        .release_info = pci_acpi_root_release_info,
 288        .prepare_resources = pci_acpi_root_prepare_resources,
 289};
 290
 291struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
 292{
 293        struct acpi_device *device = root->device;
 294        struct pci_root_info *info;
 295
 296        info = kzalloc(sizeof(*info), GFP_KERNEL);
 297        if (!info) {
 298                dev_err(&device->dev,
 299                        "pci_bus %04x:%02x: ignored (out of memory)\n",
 300                        root->segment, (int)root->secondary.start);
 301                return NULL;
 302        }
 303
 304        info->controller.segment = root->segment;
 305        info->controller.companion = device;
 306        info->controller.node = acpi_get_node(device->handle);
 307        INIT_LIST_HEAD(&info->io_resources);
 308        return acpi_pci_root_create(root, &pci_acpi_root_ops,
 309                                    &info->common, &info->controller);
 310}
 311
 312int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
 313{
 314        /*
 315         * We pass NULL as parent to pci_create_root_bus(), so if it is not NULL
 316         * here, pci_create_root_bus() has been called by someone else and
 317         * sysdata is likely to be different from what we expect.  Let it go in
 318         * that case.
 319         */
 320        if (!bridge->dev.parent) {
 321                struct pci_controller *controller = bridge->bus->sysdata;
 322                ACPI_COMPANION_SET(&bridge->dev, controller->companion);
 323        }
 324        return 0;
 325}
 326
 327void pcibios_fixup_device_resources(struct pci_dev *dev)
 328{
 329        int idx;
 330
 331        if (!dev->bus)
 332                return;
 333
 334        for (idx = 0; idx < PCI_BRIDGE_RESOURCES; idx++) {
 335                struct resource *r = &dev->resource[idx];
 336
 337                if (!r->flags || r->parent || !r->start)
 338                        continue;
 339
 340                pci_claim_resource(dev, idx);
 341        }
 342}
 343EXPORT_SYMBOL_GPL(pcibios_fixup_device_resources);
 344
 345static void pcibios_fixup_bridge_resources(struct pci_dev *dev)
 346{
 347        int idx;
 348
 349        if (!dev->bus)
 350                return;
 351
 352        for (idx = PCI_BRIDGE_RESOURCES; idx < PCI_NUM_RESOURCES; idx++) {
 353                struct resource *r = &dev->resource[idx];
 354
 355                if (!r->flags || r->parent || !r->start)
 356                        continue;
 357
 358                pci_claim_bridge_resource(dev, idx);
 359        }
 360}
 361
 362/*
 363 *  Called after each bus is probed, but before its children are examined.
 364 */
 365void pcibios_fixup_bus(struct pci_bus *b)
 366{
 367        struct pci_dev *dev;
 368
 369        if (b->self) {
 370                pci_read_bridge_bases(b);
 371                pcibios_fixup_bridge_resources(b->self);
 372        }
 373        list_for_each_entry(dev, &b->devices, bus_list)
 374                pcibios_fixup_device_resources(dev);
 375        platform_pci_fixup_bus(b);
 376}
 377
 378void pcibios_add_bus(struct pci_bus *bus)
 379{
 380        acpi_pci_add_bus(bus);
 381}
 382
 383void pcibios_remove_bus(struct pci_bus *bus)
 384{
 385        acpi_pci_remove_bus(bus);
 386}
 387
 388void pcibios_set_master (struct pci_dev *dev)
 389{
 390        /* No special bus mastering setup handling */
 391}
 392
 393int
 394pcibios_enable_device (struct pci_dev *dev, int mask)
 395{
 396        int ret;
 397
 398        ret = pci_enable_resources(dev, mask);
 399        if (ret < 0)
 400                return ret;
 401
 402        if (!pci_dev_msi_enabled(dev))
 403                return acpi_pci_irq_enable(dev);
 404        return 0;
 405}
 406
 407void
 408pcibios_disable_device (struct pci_dev *dev)
 409{
 410        BUG_ON(atomic_read(&dev->enable_cnt));
 411        if (!pci_dev_msi_enabled(dev))
 412                acpi_pci_irq_disable(dev);
 413}
 414
 415/**
 416 * ia64_pci_get_legacy_mem - generic legacy mem routine
 417 * @bus: bus to get legacy memory base address for
 418 *
 419 * Find the base of legacy memory for @bus.  This is typically the first
 420 * megabyte of bus address space for @bus or is simply 0 on platforms whose
 421 * chipsets support legacy I/O and memory routing.  Returns the base address
 422 * or an error pointer if an error occurred.
 423 *
 424 * This is the ia64 generic version of this routine.  Other platforms
 425 * are free to override it with a machine vector.
 426 */
 427char *ia64_pci_get_legacy_mem(struct pci_bus *bus)
 428{
 429        return (char *)__IA64_UNCACHED_OFFSET;
 430}
 431
 432/**
 433 * pci_mmap_legacy_page_range - map legacy memory space to userland
 434 * @bus: bus whose legacy space we're mapping
 435 * @vma: vma passed in by mmap
 436 *
 437 * Map legacy memory space for this device back to userspace using a machine
 438 * vector to get the base address.
 439 */
 440int
 441pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma,
 442                           enum pci_mmap_state mmap_state)
 443{
 444        unsigned long size = vma->vm_end - vma->vm_start;
 445        pgprot_t prot;
 446        char *addr;
 447
 448        /* We only support mmap'ing of legacy memory space */
 449        if (mmap_state != pci_mmap_mem)
 450                return -ENOSYS;
 451
 452        /*
 453         * Avoid attribute aliasing.  See Documentation/ia64/aliasing.txt
 454         * for more details.
 455         */
 456        if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
 457                return -EINVAL;
 458        prot = phys_mem_access_prot(NULL, vma->vm_pgoff, size,
 459                                    vma->vm_page_prot);
 460
 461        addr = pci_get_legacy_mem(bus);
 462        if (IS_ERR(addr))
 463                return PTR_ERR(addr);
 464
 465        vma->vm_pgoff += (unsigned long)addr >> PAGE_SHIFT;
 466        vma->vm_page_prot = prot;
 467
 468        if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
 469                            size, vma->vm_page_prot))
 470                return -EAGAIN;
 471
 472        return 0;
 473}
 474
 475/**
 476 * ia64_pci_legacy_read - read from legacy I/O space
 477 * @bus: bus to read
 478 * @port: legacy port value
 479 * @val: caller allocated storage for returned value
 480 * @size: number of bytes to read
 481 *
 482 * Simply reads @size bytes from @port and puts the result in @val.
 483 *
 484 * Again, this (and the write routine) are generic versions that can be
 485 * overridden by the platform.  This is necessary on platforms that don't
 486 * support legacy I/O routing or that hard fail on legacy I/O timeouts.
 487 */
 488int ia64_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size)
 489{
 490        int ret = size;
 491
 492        switch (size) {
 493        case 1:
 494                *val = inb(port);
 495                break;
 496        case 2:
 497                *val = inw(port);
 498                break;
 499        case 4:
 500                *val = inl(port);
 501                break;
 502        default:
 503                ret = -EINVAL;
 504                break;
 505        }
 506
 507        return ret;
 508}
 509
 510/**
 511 * ia64_pci_legacy_write - perform a legacy I/O write
 512 * @bus: bus pointer
 513 * @port: port to write
 514 * @val: value to write
 515 * @size: number of bytes to write from @val
 516 *
 517 * Simply writes @size bytes of @val to @port.
 518 */
 519int ia64_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
 520{
 521        int ret = size;
 522
 523        switch (size) {
 524        case 1:
 525                outb(val, port);
 526                break;
 527        case 2:
 528                outw(val, port);
 529                break;
 530        case 4:
 531                outl(val, port);
 532                break;
 533        default:
 534                ret = -EINVAL;
 535                break;
 536        }
 537
 538        return ret;
 539}
 540
 541/**
 542 * set_pci_cacheline_size - determine cacheline size for PCI devices
 543 *
 544 * We want to use the line-size of the outer-most cache.  We assume
 545 * that this line-size is the same for all CPUs.
 546 *
 547 * Code mostly taken from arch/ia64/kernel/palinfo.c:cache_info().
 548 */
 549static void __init set_pci_dfl_cacheline_size(void)
 550{
 551        unsigned long levels, unique_caches;
 552        long status;
 553        pal_cache_config_info_t cci;
 554
 555        status = ia64_pal_cache_summary(&levels, &unique_caches);
 556        if (status != 0) {
 557                pr_err("%s: ia64_pal_cache_summary() failed "
 558                        "(status=%ld)\n", __func__, status);
 559                return;
 560        }
 561
 562        status = ia64_pal_cache_config_info(levels - 1,
 563                                /* cache_type (data_or_unified)= */ 2, &cci);
 564        if (status != 0) {
 565                pr_err("%s: ia64_pal_cache_config_info() failed "
 566                        "(status=%ld)\n", __func__, status);
 567                return;
 568        }
 569        pci_dfl_cache_line_size = (1 << cci.pcci_line_size) / 4;
 570}
 571
 572static int __init pcibios_init(void)
 573{
 574        set_pci_dfl_cacheline_size();
 575        return 0;
 576}
 577
 578subsys_initcall(pcibios_init);
 579