linux/arch/ia64/pci/pci.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * pci.c - Low-Level PCI Access in IA-64
   4 *
   5 * Derived from bios32.c of i386 tree.
   6 *
   7 * (c) Copyright 2002, 2005 Hewlett-Packard Development Company, L.P.
   8 *      David Mosberger-Tang <davidm@hpl.hp.com>
   9 *      Bjorn Helgaas <bjorn.helgaas@hp.com>
  10 * Copyright (C) 2004 Silicon Graphics, Inc.
  11 *
  12 * Note: Above list of copyright holders is incomplete...
  13 */
  14
  15#include <linux/acpi.h>
  16#include <linux/types.h>
  17#include <linux/kernel.h>
  18#include <linux/pci.h>
  19#include <linux/pci-acpi.h>
  20#include <linux/init.h>
  21#include <linux/ioport.h>
  22#include <linux/slab.h>
  23#include <linux/spinlock.h>
  24#include <linux/memblock.h>
  25#include <linux/export.h>
  26
  27#include <asm/page.h>
  28#include <asm/io.h>
  29#include <asm/sal.h>
  30#include <asm/smp.h>
  31#include <asm/irq.h>
  32#include <asm/hw_irq.h>
  33
  34/*
  35 * Low-level SAL-based PCI configuration access functions. Note that SAL
  36 * calls are already serialized (via sal_lock), so we don't need another
  37 * synchronization mechanism here.
  38 */
  39
  40#define PCI_SAL_ADDRESS(seg, bus, devfn, reg)           \
  41        (((u64) seg << 24) | (bus << 16) | (devfn << 8) | (reg))
  42
  43/* SAL 3.2 adds support for extended config space. */
  44
  45#define PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg)       \
  46        (((u64) seg << 28) | (bus << 20) | (devfn << 12) | (reg))
  47
  48int raw_pci_read(unsigned int seg, unsigned int bus, unsigned int devfn,
  49              int reg, int len, u32 *value)
  50{
  51        u64 addr, data = 0;
  52        int mode, result;
  53
  54        if (!value || (seg > 65535) || (bus > 255) || (devfn > 255) || (reg > 4095))
  55                return -EINVAL;
  56
  57        if ((seg | reg) <= 255) {
  58                addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg);
  59                mode = 0;
  60        } else if (sal_revision >= SAL_VERSION_CODE(3,2)) {
  61                addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg);
  62                mode = 1;
  63        } else {
  64                return -EINVAL;
  65        }
  66
  67        result = ia64_sal_pci_config_read(addr, mode, len, &data);
  68        if (result != 0)
  69                return -EINVAL;
  70
  71        *value = (u32) data;
  72        return 0;
  73}
  74
  75int raw_pci_write(unsigned int seg, unsigned int bus, unsigned int devfn,
  76               int reg, int len, u32 value)
  77{
  78        u64 addr;
  79        int mode, result;
  80
  81        if ((seg > 65535) || (bus > 255) || (devfn > 255) || (reg > 4095))
  82                return -EINVAL;
  83
  84        if ((seg | reg) <= 255) {
  85                addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg);
  86                mode = 0;
  87        } else if (sal_revision >= SAL_VERSION_CODE(3,2)) {
  88                addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg);
  89                mode = 1;
  90        } else {
  91                return -EINVAL;
  92        }
  93        result = ia64_sal_pci_config_write(addr, mode, len, value);
  94        if (result != 0)
  95                return -EINVAL;
  96        return 0;
  97}
  98
  99static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
 100                                                        int size, u32 *value)
 101{
 102        return raw_pci_read(pci_domain_nr(bus), bus->number,
 103                                 devfn, where, size, value);
 104}
 105
 106static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
 107                                                        int size, u32 value)
 108{
 109        return raw_pci_write(pci_domain_nr(bus), bus->number,
 110                                  devfn, where, size, value);
 111}
 112
 113struct pci_ops pci_root_ops = {
 114        .read = pci_read,
 115        .write = pci_write,
 116};
 117
 118struct pci_root_info {
 119        struct acpi_pci_root_info common;
 120        struct pci_controller controller;
 121        struct list_head io_resources;
 122};
 123
 124static unsigned int new_space(u64 phys_base, int sparse)
 125{
 126        u64 mmio_base;
 127        int i;
 128
 129        if (phys_base == 0)
 130                return 0;       /* legacy I/O port space */
 131
 132        mmio_base = (u64) ioremap(phys_base, 0);
 133        for (i = 0; i < num_io_spaces; i++)
 134                if (io_space[i].mmio_base == mmio_base &&
 135                    io_space[i].sparse == sparse)
 136                        return i;
 137
 138        if (num_io_spaces == MAX_IO_SPACES) {
 139                pr_err("PCI: Too many IO port spaces "
 140                        "(MAX_IO_SPACES=%lu)\n", MAX_IO_SPACES);
 141                return ~0;
 142        }
 143
 144        i = num_io_spaces++;
 145        io_space[i].mmio_base = mmio_base;
 146        io_space[i].sparse = sparse;
 147
 148        return i;
 149}
 150
 151static int add_io_space(struct device *dev, struct pci_root_info *info,
 152                        struct resource_entry *entry)
 153{
 154        struct resource_entry *iospace;
 155        struct resource *resource, *res = entry->res;
 156        char *name;
 157        unsigned long base, min, max, base_port;
 158        unsigned int sparse = 0, space_nr, len;
 159
 160        len = strlen(info->common.name) + 32;
 161        iospace = resource_list_create_entry(NULL, len);
 162        if (!iospace) {
 163                dev_err(dev, "PCI: No memory for %s I/O port space\n",
 164                        info->common.name);
 165                return -ENOMEM;
 166        }
 167
 168        if (res->flags & IORESOURCE_IO_SPARSE)
 169                sparse = 1;
 170        space_nr = new_space(entry->offset, sparse);
 171        if (space_nr == ~0)
 172                goto free_resource;
 173
 174        name = (char *)(iospace + 1);
 175        min = res->start - entry->offset;
 176        max = res->end - entry->offset;
 177        base = __pa(io_space[space_nr].mmio_base);
 178        base_port = IO_SPACE_BASE(space_nr);
 179        snprintf(name, len, "%s I/O Ports %08lx-%08lx", info->common.name,
 180                 base_port + min, base_port + max);
 181
 182        /*
 183         * The SDM guarantees the legacy 0-64K space is sparse, but if the
 184         * mapping is done by the processor (not the bridge), ACPI may not
 185         * mark it as sparse.
 186         */
 187        if (space_nr == 0)
 188                sparse = 1;
 189
 190        resource = iospace->res;
 191        resource->name  = name;
 192        resource->flags = IORESOURCE_MEM;
 193        resource->start = base + (sparse ? IO_SPACE_SPARSE_ENCODING(min) : min);
 194        resource->end   = base + (sparse ? IO_SPACE_SPARSE_ENCODING(max) : max);
 195        if (insert_resource(&iomem_resource, resource)) {
 196                dev_err(dev,
 197                        "can't allocate host bridge io space resource  %pR\n",
 198                        resource);
 199                goto free_resource;
 200        }
 201
 202        entry->offset = base_port;
 203        res->start = min + base_port;
 204        res->end = max + base_port;
 205        resource_list_add_tail(iospace, &info->io_resources);
 206
 207        return 0;
 208
 209free_resource:
 210        resource_list_free_entry(iospace);
 211        return -ENOSPC;
 212}
 213
 214/*
 215 * An IO port or MMIO resource assigned to a PCI host bridge may be
 216 * consumed by the host bridge itself or available to its child
 217 * bus/devices. The ACPI specification defines a bit (Producer/Consumer)
 218 * to tell whether the resource is consumed by the host bridge itself,
 219 * but firmware hasn't used that bit consistently, so we can't rely on it.
 220 *
 221 * On x86 and IA64 platforms, all IO port and MMIO resources are assumed
 222 * to be available to child bus/devices except one special case:
 223 *     IO port [0xCF8-0xCFF] is consumed by the host bridge itself
 224 *     to access PCI configuration space.
 225 *
 226 * So explicitly filter out PCI CFG IO ports[0xCF8-0xCFF].
 227 */
 228static bool resource_is_pcicfg_ioport(struct resource *res)
 229{
 230        return (res->flags & IORESOURCE_IO) &&
 231                res->start == 0xCF8 && res->end == 0xCFF;
 232}
 233
 234static int pci_acpi_root_prepare_resources(struct acpi_pci_root_info *ci)
 235{
 236        struct device *dev = &ci->bridge->dev;
 237        struct pci_root_info *info;
 238        struct resource *res;
 239        struct resource_entry *entry, *tmp;
 240        int status;
 241
 242        status = acpi_pci_probe_root_resources(ci);
 243        if (status > 0) {
 244                info = container_of(ci, struct pci_root_info, common);
 245                resource_list_for_each_entry_safe(entry, tmp, &ci->resources) {
 246                        res = entry->res;
 247                        if (res->flags & IORESOURCE_MEM) {
 248                                /*
 249                                 * HP's firmware has a hack to work around a
 250                                 * Windows bug. Ignore these tiny memory ranges.
 251                                 */
 252                                if (resource_size(res) <= 16) {
 253                                        resource_list_del(entry);
 254                                        insert_resource(&iomem_resource,
 255                                                        entry->res);
 256                                        resource_list_add_tail(entry,
 257                                                        &info->io_resources);
 258                                }
 259                        } else if (res->flags & IORESOURCE_IO) {
 260                                if (resource_is_pcicfg_ioport(entry->res))
 261                                        resource_list_destroy_entry(entry);
 262                                else if (add_io_space(dev, info, entry))
 263                                        resource_list_destroy_entry(entry);
 264                        }
 265                }
 266        }
 267
 268        return status;
 269}
 270
 271static void pci_acpi_root_release_info(struct acpi_pci_root_info *ci)
 272{
 273        struct pci_root_info *info;
 274        struct resource_entry *entry, *tmp;
 275
 276        info = container_of(ci, struct pci_root_info, common);
 277        resource_list_for_each_entry_safe(entry, tmp, &info->io_resources) {
 278                release_resource(entry->res);
 279                resource_list_destroy_entry(entry);
 280        }
 281        kfree(info);
 282}
 283
 284static struct acpi_pci_root_ops pci_acpi_root_ops = {
 285        .pci_ops = &pci_root_ops,
 286        .release_info = pci_acpi_root_release_info,
 287        .prepare_resources = pci_acpi_root_prepare_resources,
 288};
 289
 290struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
 291{
 292        struct acpi_device *device = root->device;
 293        struct pci_root_info *info;
 294
 295        info = kzalloc(sizeof(*info), GFP_KERNEL);
 296        if (!info) {
 297                dev_err(&device->dev,
 298                        "pci_bus %04x:%02x: ignored (out of memory)\n",
 299                        root->segment, (int)root->secondary.start);
 300                return NULL;
 301        }
 302
 303        info->controller.segment = root->segment;
 304        info->controller.companion = device;
 305        info->controller.node = acpi_get_node(device->handle);
 306        INIT_LIST_HEAD(&info->io_resources);
 307        return acpi_pci_root_create(root, &pci_acpi_root_ops,
 308                                    &info->common, &info->controller);
 309}
 310
 311int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
 312{
 313        /*
 314         * We pass NULL as parent to pci_create_root_bus(), so if it is not NULL
 315         * here, pci_create_root_bus() has been called by someone else and
 316         * sysdata is likely to be different from what we expect.  Let it go in
 317         * that case.
 318         */
 319        if (!bridge->dev.parent) {
 320                struct pci_controller *controller = bridge->bus->sysdata;
 321                ACPI_COMPANION_SET(&bridge->dev, controller->companion);
 322        }
 323        return 0;
 324}
 325
 326void pcibios_fixup_device_resources(struct pci_dev *dev)
 327{
 328        int idx;
 329
 330        if (!dev->bus)
 331                return;
 332
 333        for (idx = 0; idx < PCI_BRIDGE_RESOURCES; idx++) {
 334                struct resource *r = &dev->resource[idx];
 335
 336                if (!r->flags || r->parent || !r->start)
 337                        continue;
 338
 339                pci_claim_resource(dev, idx);
 340        }
 341}
 342EXPORT_SYMBOL_GPL(pcibios_fixup_device_resources);
 343
 344static void pcibios_fixup_bridge_resources(struct pci_dev *dev)
 345{
 346        int idx;
 347
 348        if (!dev->bus)
 349                return;
 350
 351        for (idx = PCI_BRIDGE_RESOURCES; idx < PCI_NUM_RESOURCES; idx++) {
 352                struct resource *r = &dev->resource[idx];
 353
 354                if (!r->flags || r->parent || !r->start)
 355                        continue;
 356
 357                pci_claim_bridge_resource(dev, idx);
 358        }
 359}
 360
 361/*
 362 *  Called after each bus is probed, but before its children are examined.
 363 */
 364void pcibios_fixup_bus(struct pci_bus *b)
 365{
 366        struct pci_dev *dev;
 367
 368        if (b->self) {
 369                pci_read_bridge_bases(b);
 370                pcibios_fixup_bridge_resources(b->self);
 371        }
 372        list_for_each_entry(dev, &b->devices, bus_list)
 373                pcibios_fixup_device_resources(dev);
 374}
 375
 376void pcibios_add_bus(struct pci_bus *bus)
 377{
 378        acpi_pci_add_bus(bus);
 379}
 380
 381void pcibios_remove_bus(struct pci_bus *bus)
 382{
 383        acpi_pci_remove_bus(bus);
 384}
 385
 386void pcibios_set_master (struct pci_dev *dev)
 387{
 388        /* No special bus mastering setup handling */
 389}
 390
 391int
 392pcibios_enable_device (struct pci_dev *dev, int mask)
 393{
 394        int ret;
 395
 396        ret = pci_enable_resources(dev, mask);
 397        if (ret < 0)
 398                return ret;
 399
 400        if (!pci_dev_msi_enabled(dev))
 401                return acpi_pci_irq_enable(dev);
 402        return 0;
 403}
 404
 405void
 406pcibios_disable_device (struct pci_dev *dev)
 407{
 408        BUG_ON(atomic_read(&dev->enable_cnt));
 409        if (!pci_dev_msi_enabled(dev))
 410                acpi_pci_irq_disable(dev);
 411}
 412
 413/**
 414 * pci_get_legacy_mem - generic legacy mem routine
 415 * @bus: bus to get legacy memory base address for
 416 *
 417 * Find the base of legacy memory for @bus.  This is typically the first
 418 * megabyte of bus address space for @bus or is simply 0 on platforms whose
 419 * chipsets support legacy I/O and memory routing.  Returns the base address
 420 * or an error pointer if an error occurred.
 421 *
 422 * This is the ia64 generic version of this routine.  Other platforms
 423 * are free to override it with a machine vector.
 424 */
 425char *pci_get_legacy_mem(struct pci_bus *bus)
 426{
 427        return (char *)__IA64_UNCACHED_OFFSET;
 428}
 429
 430/**
 431 * pci_mmap_legacy_page_range - map legacy memory space to userland
 432 * @bus: bus whose legacy space we're mapping
 433 * @vma: vma passed in by mmap
 434 *
 435 * Map legacy memory space for this device back to userspace using a machine
 436 * vector to get the base address.
 437 */
 438int
 439pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma,
 440                           enum pci_mmap_state mmap_state)
 441{
 442        unsigned long size = vma->vm_end - vma->vm_start;
 443        pgprot_t prot;
 444        char *addr;
 445
 446        /* We only support mmap'ing of legacy memory space */
 447        if (mmap_state != pci_mmap_mem)
 448                return -ENOSYS;
 449
 450        /*
 451         * Avoid attribute aliasing.  See Documentation/ia64/aliasing.rst
 452         * for more details.
 453         */
 454        if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
 455                return -EINVAL;
 456        prot = phys_mem_access_prot(NULL, vma->vm_pgoff, size,
 457                                    vma->vm_page_prot);
 458
 459        addr = pci_get_legacy_mem(bus);
 460        if (IS_ERR(addr))
 461                return PTR_ERR(addr);
 462
 463        vma->vm_pgoff += (unsigned long)addr >> PAGE_SHIFT;
 464        vma->vm_page_prot = prot;
 465
 466        if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
 467                            size, vma->vm_page_prot))
 468                return -EAGAIN;
 469
 470        return 0;
 471}
 472
 473/**
 474 * pci_legacy_read - read from legacy I/O space
 475 * @bus: bus to read
 476 * @port: legacy port value
 477 * @val: caller allocated storage for returned value
 478 * @size: number of bytes to read
 479 *
 480 * Simply reads @size bytes from @port and puts the result in @val.
 481 *
 482 * Again, this (and the write routine) are generic versions that can be
 483 * overridden by the platform.  This is necessary on platforms that don't
 484 * support legacy I/O routing or that hard fail on legacy I/O timeouts.
 485 */
 486int pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size)
 487{
 488        int ret = size;
 489
 490        switch (size) {
 491        case 1:
 492                *val = inb(port);
 493                break;
 494        case 2:
 495                *val = inw(port);
 496                break;
 497        case 4:
 498                *val = inl(port);
 499                break;
 500        default:
 501                ret = -EINVAL;
 502                break;
 503        }
 504
 505        return ret;
 506}
 507
 508/**
 509 * pci_legacy_write - perform a legacy I/O write
 510 * @bus: bus pointer
 511 * @port: port to write
 512 * @val: value to write
 513 * @size: number of bytes to write from @val
 514 *
 515 * Simply writes @size bytes of @val to @port.
 516 */
 517int pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
 518{
 519        int ret = size;
 520
 521        switch (size) {
 522        case 1:
 523                outb(val, port);
 524                break;
 525        case 2:
 526                outw(val, port);
 527                break;
 528        case 4:
 529                outl(val, port);
 530                break;
 531        default:
 532                ret = -EINVAL;
 533                break;
 534        }
 535
 536        return ret;
 537}
 538
 539/**
 540 * set_pci_cacheline_size - determine cacheline size for PCI devices
 541 *
 542 * We want to use the line-size of the outer-most cache.  We assume
 543 * that this line-size is the same for all CPUs.
 544 *
 545 * Code mostly taken from arch/ia64/kernel/palinfo.c:cache_info().
 546 */
 547static void __init set_pci_dfl_cacheline_size(void)
 548{
 549        unsigned long levels, unique_caches;
 550        long status;
 551        pal_cache_config_info_t cci;
 552
 553        status = ia64_pal_cache_summary(&levels, &unique_caches);
 554        if (status != 0) {
 555                pr_err("%s: ia64_pal_cache_summary() failed "
 556                        "(status=%ld)\n", __func__, status);
 557                return;
 558        }
 559
 560        status = ia64_pal_cache_config_info(levels - 1,
 561                                /* cache_type (data_or_unified)= */ 2, &cci);
 562        if (status != 0) {
 563                pr_err("%s: ia64_pal_cache_config_info() failed "
 564                        "(status=%ld)\n", __func__, status);
 565                return;
 566        }
 567        pci_dfl_cache_line_size = (1 << cci.pcci_line_size) / 4;
 568}
 569
 570static int __init pcibios_init(void)
 571{
 572        set_pci_dfl_cacheline_size();
 573        return 0;
 574}
 575
 576subsys_initcall(pcibios_init);
 577