linux/arch/powerpc/kernel/eeh_cache.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * PCI address cache; allows the lookup of PCI devices based on I/O address
   4 *
   5 * Copyright IBM Corporation 2004
   6 * Copyright Linas Vepstas <linas@austin.ibm.com> 2004
   7 */
   8
   9#include <linux/list.h>
  10#include <linux/pci.h>
  11#include <linux/rbtree.h>
  12#include <linux/slab.h>
  13#include <linux/spinlock.h>
  14#include <linux/atomic.h>
  15#include <linux/debugfs.h>
  16#include <asm/pci-bridge.h>
  17#include <asm/ppc-pci.h>
  18
  19
  20/**
  21 * DOC: Overview
  22 *
  23 * The pci address cache subsystem.  This subsystem places
  24 * PCI device address resources into a red-black tree, sorted
  25 * according to the address range, so that given only an i/o
  26 * address, the corresponding PCI device can be **quickly**
  27 * found. It is safe to perform an address lookup in an interrupt
  28 * context; this ability is an important feature.
  29 *
  30 * Currently, the only customer of this code is the EEH subsystem;
  31 * thus, this code has been somewhat tailored to suit EEH better.
  32 * In particular, the cache does *not* hold the addresses of devices
  33 * for which EEH is not enabled.
  34 *
  35 * (Implementation Note: The RB tree seems to be better/faster
  36 * than any hash algo I could think of for this problem, even
  37 * with the penalty of slow pointer chases for d-cache misses).
  38 */
  39
  40struct pci_io_addr_range {
  41        struct rb_node rb_node;
  42        resource_size_t addr_lo;
  43        resource_size_t addr_hi;
  44        struct eeh_dev *edev;
  45        struct pci_dev *pcidev;
  46        unsigned long flags;
  47};
  48
  49static struct pci_io_addr_cache {
  50        struct rb_root rb_root;
  51        spinlock_t piar_lock;
  52} pci_io_addr_cache_root;
  53
  54static inline struct eeh_dev *__eeh_addr_cache_get_device(unsigned long addr)
  55{
  56        struct rb_node *n = pci_io_addr_cache_root.rb_root.rb_node;
  57
  58        while (n) {
  59                struct pci_io_addr_range *piar;
  60                piar = rb_entry(n, struct pci_io_addr_range, rb_node);
  61
  62                if (addr < piar->addr_lo)
  63                        n = n->rb_left;
  64                else if (addr > piar->addr_hi)
  65                        n = n->rb_right;
  66                else
  67                        return piar->edev;
  68        }
  69
  70        return NULL;
  71}
  72
  73/**
  74 * eeh_addr_cache_get_dev - Get device, given only address
  75 * @addr: mmio (PIO) phys address or i/o port number
  76 *
  77 * Given an mmio phys address, or a port number, find a pci device
  78 * that implements this address.  I/O port numbers are assumed to be offset
  79 * from zero (that is, they do *not* have pci_io_addr added in).
  80 * It is safe to call this function within an interrupt.
  81 */
  82struct eeh_dev *eeh_addr_cache_get_dev(unsigned long addr)
  83{
  84        struct eeh_dev *edev;
  85        unsigned long flags;
  86
  87        spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags);
  88        edev = __eeh_addr_cache_get_device(addr);
  89        spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags);
  90        return edev;
  91}
  92
  93#ifdef DEBUG
  94/*
  95 * Handy-dandy debug print routine, does nothing more
  96 * than print out the contents of our addr cache.
  97 */
  98static void eeh_addr_cache_print(struct pci_io_addr_cache *cache)
  99{
 100        struct rb_node *n;
 101        int cnt = 0;
 102
 103        n = rb_first(&cache->rb_root);
 104        while (n) {
 105                struct pci_io_addr_range *piar;
 106                piar = rb_entry(n, struct pci_io_addr_range, rb_node);
 107                pr_info("PCI: %s addr range %d [%pap-%pap]: %s\n",
 108                       (piar->flags & IORESOURCE_IO) ? "i/o" : "mem", cnt,
 109                       &piar->addr_lo, &piar->addr_hi, pci_name(piar->pcidev));
 110                cnt++;
 111                n = rb_next(n);
 112        }
 113}
 114#endif
 115
 116/* Insert address range into the rb tree. */
 117static struct pci_io_addr_range *
 118eeh_addr_cache_insert(struct pci_dev *dev, resource_size_t alo,
 119                      resource_size_t ahi, unsigned long flags)
 120{
 121        struct rb_node **p = &pci_io_addr_cache_root.rb_root.rb_node;
 122        struct rb_node *parent = NULL;
 123        struct pci_io_addr_range *piar;
 124
 125        /* Walk tree, find a place to insert into tree */
 126        while (*p) {
 127                parent = *p;
 128                piar = rb_entry(parent, struct pci_io_addr_range, rb_node);
 129                if (ahi < piar->addr_lo) {
 130                        p = &parent->rb_left;
 131                } else if (alo > piar->addr_hi) {
 132                        p = &parent->rb_right;
 133                } else {
 134                        if (dev != piar->pcidev ||
 135                            alo != piar->addr_lo || ahi != piar->addr_hi) {
 136                                pr_warn("PIAR: overlapping address range\n");
 137                        }
 138                        return piar;
 139                }
 140        }
 141        piar = kzalloc(sizeof(struct pci_io_addr_range), GFP_ATOMIC);
 142        if (!piar)
 143                return NULL;
 144
 145        piar->addr_lo = alo;
 146        piar->addr_hi = ahi;
 147        piar->edev = pci_dev_to_eeh_dev(dev);
 148        piar->pcidev = dev;
 149        piar->flags = flags;
 150
 151        eeh_edev_dbg(piar->edev, "PIAR: insert range=[%pap:%pap]\n",
 152                 &alo, &ahi);
 153
 154        rb_link_node(&piar->rb_node, parent, p);
 155        rb_insert_color(&piar->rb_node, &pci_io_addr_cache_root.rb_root);
 156
 157        return piar;
 158}
 159
 160static void __eeh_addr_cache_insert_dev(struct pci_dev *dev)
 161{
 162        struct eeh_dev *edev;
 163        int i;
 164
 165        edev = pci_dev_to_eeh_dev(dev);
 166        if (!edev) {
 167                pr_warn("PCI: no EEH dev found for %s\n",
 168                        pci_name(dev));
 169                return;
 170        }
 171
 172        /* Skip any devices for which EEH is not enabled. */
 173        if (!edev->pe) {
 174                dev_dbg(&dev->dev, "EEH: Skip building address cache\n");
 175                return;
 176        }
 177
 178        /*
 179         * Walk resources on this device, poke the first 7 (6 normal BAR and 1
 180         * ROM BAR) into the tree.
 181         */
 182        for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
 183                resource_size_t start = pci_resource_start(dev,i);
 184                resource_size_t end = pci_resource_end(dev,i);
 185                unsigned long flags = pci_resource_flags(dev,i);
 186
 187                /* We are interested only bus addresses, not dma or other stuff */
 188                if (0 == (flags & (IORESOURCE_IO | IORESOURCE_MEM)))
 189                        continue;
 190                if (start == 0 || ~start == 0 || end == 0 || ~end == 0)
 191                         continue;
 192                eeh_addr_cache_insert(dev, start, end, flags);
 193        }
 194}
 195
 196/**
 197 * eeh_addr_cache_insert_dev - Add a device to the address cache
 198 * @dev: PCI device whose I/O addresses we are interested in.
 199 *
 200 * In order to support the fast lookup of devices based on addresses,
 201 * we maintain a cache of devices that can be quickly searched.
 202 * This routine adds a device to that cache.
 203 */
 204void eeh_addr_cache_insert_dev(struct pci_dev *dev)
 205{
 206        unsigned long flags;
 207
 208        spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags);
 209        __eeh_addr_cache_insert_dev(dev);
 210        spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags);
 211}
 212
 213static inline void __eeh_addr_cache_rmv_dev(struct pci_dev *dev)
 214{
 215        struct rb_node *n;
 216
 217restart:
 218        n = rb_first(&pci_io_addr_cache_root.rb_root);
 219        while (n) {
 220                struct pci_io_addr_range *piar;
 221                piar = rb_entry(n, struct pci_io_addr_range, rb_node);
 222
 223                if (piar->pcidev == dev) {
 224                        eeh_edev_dbg(piar->edev, "PIAR: remove range=[%pap:%pap]\n",
 225                                 &piar->addr_lo, &piar->addr_hi);
 226                        rb_erase(n, &pci_io_addr_cache_root.rb_root);
 227                        kfree(piar);
 228                        goto restart;
 229                }
 230                n = rb_next(n);
 231        }
 232}
 233
 234/**
 235 * eeh_addr_cache_rmv_dev - remove pci device from addr cache
 236 * @dev: device to remove
 237 *
 238 * Remove a device from the addr-cache tree.
 239 * This is potentially expensive, since it will walk
 240 * the tree multiple times (once per resource).
 241 * But so what; device removal doesn't need to be that fast.
 242 */
 243void eeh_addr_cache_rmv_dev(struct pci_dev *dev)
 244{
 245        unsigned long flags;
 246
 247        spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags);
 248        __eeh_addr_cache_rmv_dev(dev);
 249        spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags);
 250}
 251
 252/**
 253 * eeh_addr_cache_init - Initialize a cache of I/O addresses
 254 *
 255 * Initialize a cache of pci i/o addresses.  This cache will be used to
 256 * find the pci device that corresponds to a given address.
 257 */
 258void eeh_addr_cache_init(void)
 259{
 260        spin_lock_init(&pci_io_addr_cache_root.piar_lock);
 261}
 262
 263static int eeh_addr_cache_show(struct seq_file *s, void *v)
 264{
 265        struct pci_io_addr_range *piar;
 266        struct rb_node *n;
 267        unsigned long flags;
 268
 269        spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags);
 270        for (n = rb_first(&pci_io_addr_cache_root.rb_root); n; n = rb_next(n)) {
 271                piar = rb_entry(n, struct pci_io_addr_range, rb_node);
 272
 273                seq_printf(s, "%s addr range [%pap-%pap]: %s\n",
 274                       (piar->flags & IORESOURCE_IO) ? "i/o" : "mem",
 275                       &piar->addr_lo, &piar->addr_hi, pci_name(piar->pcidev));
 276        }
 277        spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags);
 278
 279        return 0;
 280}
 281DEFINE_SHOW_ATTRIBUTE(eeh_addr_cache);
 282
 283void eeh_cache_debugfs_init(void)
 284{
 285        debugfs_create_file_unsafe("eeh_address_cache", 0400,
 286                        arch_debugfs_dir, NULL,
 287                        &eeh_addr_cache_fops);
 288}
 289