linux/drivers/pci/access.c
<<
>>
Prefs
   1#include <linux/delay.h>
   2#include <linux/pci.h>
   3#include <linux/module.h>
   4#include <linux/sched.h>
   5#include <linux/slab.h>
   6#include <linux/ioport.h>
   7#include <linux/wait.h>
   8
   9#include "pci.h"
  10
  11/*
  12 * This interrupt-safe spinlock protects all accesses to PCI
  13 * configuration space.
  14 */
  15
  16DEFINE_RAW_SPINLOCK(pci_lock);
  17
  18/*
  19 *  Wrappers for all PCI configuration access functions.  They just check
  20 *  alignment, do locking and call the low-level functions pointed to
  21 *  by pci_dev->ops.
  22 */
  23
  24#define PCI_byte_BAD 0
  25#define PCI_word_BAD (pos & 1)
  26#define PCI_dword_BAD (pos & 3)
  27
  28#define PCI_OP_READ(size, type, len) \
  29int pci_bus_read_config_##size \
  30        (struct pci_bus *bus, unsigned int devfn, int pos, type *value) \
  31{                                                                       \
  32        int res;                                                        \
  33        unsigned long flags;                                            \
  34        u32 data = 0;                                                   \
  35        if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER;       \
  36        raw_spin_lock_irqsave(&pci_lock, flags);                        \
  37        res = bus->ops->read(bus, devfn, pos, len, &data);              \
  38        *value = (type)data;                                            \
  39        raw_spin_unlock_irqrestore(&pci_lock, flags);           \
  40        return res;                                                     \
  41}
  42
  43#define PCI_OP_WRITE(size, type, len) \
  44int pci_bus_write_config_##size \
  45        (struct pci_bus *bus, unsigned int devfn, int pos, type value)  \
  46{                                                                       \
  47        int res;                                                        \
  48        unsigned long flags;                                            \
  49        if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER;       \
  50        raw_spin_lock_irqsave(&pci_lock, flags);                        \
  51        res = bus->ops->write(bus, devfn, pos, len, value);             \
  52        raw_spin_unlock_irqrestore(&pci_lock, flags);           \
  53        return res;                                                     \
  54}
  55
  56PCI_OP_READ(byte, u8, 1)
  57PCI_OP_READ(word, u16, 2)
  58PCI_OP_READ(dword, u32, 4)
  59PCI_OP_WRITE(byte, u8, 1)
  60PCI_OP_WRITE(word, u16, 2)
  61PCI_OP_WRITE(dword, u32, 4)
  62
  63EXPORT_SYMBOL(pci_bus_read_config_byte);
  64EXPORT_SYMBOL(pci_bus_read_config_word);
  65EXPORT_SYMBOL(pci_bus_read_config_dword);
  66EXPORT_SYMBOL(pci_bus_write_config_byte);
  67EXPORT_SYMBOL(pci_bus_write_config_word);
  68EXPORT_SYMBOL(pci_bus_write_config_dword);
  69
  70int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn,
  71                            int where, int size, u32 *val)
  72{
  73        void __iomem *addr;
  74
  75        addr = bus->ops->map_bus(bus, devfn, where);
  76        if (!addr) {
  77                *val = ~0;
  78                return PCIBIOS_DEVICE_NOT_FOUND;
  79        }
  80
  81        if (size == 1)
  82                *val = readb(addr);
  83        else if (size == 2)
  84                *val = readw(addr);
  85        else
  86                *val = readl(addr);
  87
  88        return PCIBIOS_SUCCESSFUL;
  89}
  90EXPORT_SYMBOL_GPL(pci_generic_config_read);
  91
  92int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn,
  93                             int where, int size, u32 val)
  94{
  95        void __iomem *addr;
  96
  97        addr = bus->ops->map_bus(bus, devfn, where);
  98        if (!addr)
  99                return PCIBIOS_DEVICE_NOT_FOUND;
 100
 101        if (size == 1)
 102                writeb(val, addr);
 103        else if (size == 2)
 104                writew(val, addr);
 105        else
 106                writel(val, addr);
 107
 108        return PCIBIOS_SUCCESSFUL;
 109}
 110EXPORT_SYMBOL_GPL(pci_generic_config_write);
 111
 112int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn,
 113                              int where, int size, u32 *val)
 114{
 115        void __iomem *addr;
 116
 117        addr = bus->ops->map_bus(bus, devfn, where & ~0x3);
 118        if (!addr) {
 119                *val = ~0;
 120                return PCIBIOS_DEVICE_NOT_FOUND;
 121        }
 122
 123        *val = readl(addr);
 124
 125        if (size <= 2)
 126                *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);
 127
 128        return PCIBIOS_SUCCESSFUL;
 129}
 130EXPORT_SYMBOL_GPL(pci_generic_config_read32);
 131
 132int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn,
 133                               int where, int size, u32 val)
 134{
 135        void __iomem *addr;
 136        u32 mask, tmp;
 137
 138        addr = bus->ops->map_bus(bus, devfn, where & ~0x3);
 139        if (!addr)
 140                return PCIBIOS_DEVICE_NOT_FOUND;
 141
 142        if (size == 4) {
 143                writel(val, addr);
 144                return PCIBIOS_SUCCESSFUL;
 145        } else {
 146                mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8));
 147        }
 148
 149        tmp = readl(addr) & mask;
 150        tmp |= val << ((where & 0x3) * 8);
 151        writel(tmp, addr);
 152
 153        return PCIBIOS_SUCCESSFUL;
 154}
 155EXPORT_SYMBOL_GPL(pci_generic_config_write32);
 156
 157/**
 158 * pci_bus_set_ops - Set raw operations of pci bus
 159 * @bus:        pci bus struct
 160 * @ops:        new raw operations
 161 *
 162 * Return previous raw operations
 163 */
 164struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops)
 165{
 166        struct pci_ops *old_ops;
 167        unsigned long flags;
 168
 169        raw_spin_lock_irqsave(&pci_lock, flags);
 170        old_ops = bus->ops;
 171        bus->ops = ops;
 172        raw_spin_unlock_irqrestore(&pci_lock, flags);
 173        return old_ops;
 174}
 175EXPORT_SYMBOL(pci_bus_set_ops);
 176
 177/*
 178 * The following routines are to prevent the user from accessing PCI config
 179 * space when it's unsafe to do so.  Some devices require this during BIST and
 180 * we're required to prevent it during D-state transitions.
 181 *
 182 * We have a bit per device to indicate it's blocked and a global wait queue
 183 * for callers to sleep on until devices are unblocked.
 184 */
 185static DECLARE_WAIT_QUEUE_HEAD(pci_cfg_wait);
 186
 187static noinline void pci_wait_cfg(struct pci_dev *dev)
 188{
 189        DECLARE_WAITQUEUE(wait, current);
 190
 191        __add_wait_queue(&pci_cfg_wait, &wait);
 192        do {
 193                set_current_state(TASK_UNINTERRUPTIBLE);
 194                raw_spin_unlock_irq(&pci_lock);
 195                schedule();
 196                raw_spin_lock_irq(&pci_lock);
 197        } while (dev->block_cfg_access);
 198        __remove_wait_queue(&pci_cfg_wait, &wait);
 199}
 200
 201/* Returns 0 on success, negative values indicate error. */
 202#define PCI_USER_READ_CONFIG(size, type)                                        \
 203int pci_user_read_config_##size                                         \
 204        (struct pci_dev *dev, int pos, type *val)                       \
 205{                                                                       \
 206        int ret = PCIBIOS_SUCCESSFUL;                                   \
 207        u32 data = -1;                                                  \
 208        if (PCI_##size##_BAD)                                           \
 209                return -EINVAL;                                         \
 210        raw_spin_lock_irq(&pci_lock);                           \
 211        if (unlikely(dev->block_cfg_access))                            \
 212                pci_wait_cfg(dev);                                      \
 213        ret = dev->bus->ops->read(dev->bus, dev->devfn,                 \
 214                                        pos, sizeof(type), &data);      \
 215        raw_spin_unlock_irq(&pci_lock);                         \
 216        *val = (type)data;                                              \
 217        return pcibios_err_to_errno(ret);                               \
 218}                                                                       \
 219EXPORT_SYMBOL_GPL(pci_user_read_config_##size);
 220
 221/* Returns 0 on success, negative values indicate error. */
 222#define PCI_USER_WRITE_CONFIG(size, type)                               \
 223int pci_user_write_config_##size                                        \
 224        (struct pci_dev *dev, int pos, type val)                        \
 225{                                                                       \
 226        int ret = PCIBIOS_SUCCESSFUL;                                   \
 227        if (PCI_##size##_BAD)                                           \
 228                return -EINVAL;                                         \
 229        raw_spin_lock_irq(&pci_lock);                           \
 230        if (unlikely(dev->block_cfg_access))                            \
 231                pci_wait_cfg(dev);                                      \
 232        ret = dev->bus->ops->write(dev->bus, dev->devfn,                \
 233                                        pos, sizeof(type), val);        \
 234        raw_spin_unlock_irq(&pci_lock);                         \
 235        return pcibios_err_to_errno(ret);                               \
 236}                                                                       \
 237EXPORT_SYMBOL_GPL(pci_user_write_config_##size);
 238
 239PCI_USER_READ_CONFIG(byte, u8)
 240PCI_USER_READ_CONFIG(word, u16)
 241PCI_USER_READ_CONFIG(dword, u32)
 242PCI_USER_WRITE_CONFIG(byte, u8)
 243PCI_USER_WRITE_CONFIG(word, u16)
 244PCI_USER_WRITE_CONFIG(dword, u32)
 245
 246/* VPD access through PCI 2.2+ VPD capability */
 247
 248/**
 249 * pci_read_vpd - Read one entry from Vital Product Data
 250 * @dev:        pci device struct
 251 * @pos:        offset in vpd space
 252 * @count:      number of bytes to read
 253 * @buf:        pointer to where to store result
 254 */
 255ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf)
 256{
 257        if (!dev->vpd || !dev->vpd->ops)
 258                return -ENODEV;
 259        return dev->vpd->ops->read(dev, pos, count, buf);
 260}
 261EXPORT_SYMBOL(pci_read_vpd);
 262
 263/**
 264 * pci_write_vpd - Write entry to Vital Product Data
 265 * @dev:        pci device struct
 266 * @pos:        offset in vpd space
 267 * @count:      number of bytes to write
 268 * @buf:        buffer containing write data
 269 */
 270ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf)
 271{
 272        if (!dev->vpd || !dev->vpd->ops)
 273                return -ENODEV;
 274        return dev->vpd->ops->write(dev, pos, count, buf);
 275}
 276EXPORT_SYMBOL(pci_write_vpd);
 277
 278/**
 279 * pci_set_vpd_size - Set size of Vital Product Data space
 280 * @dev:        pci device struct
 281 * @len:        size of vpd space
 282 */
 283int pci_set_vpd_size(struct pci_dev *dev, size_t len)
 284{
 285        if (!dev->vpd || !dev->vpd->ops)
 286                return -ENODEV;
 287        return dev->vpd->ops->set_size(dev, len);
 288}
 289EXPORT_SYMBOL(pci_set_vpd_size);
 290
 291#define PCI_VPD_MAX_SIZE (PCI_VPD_ADDR_MASK + 1)
 292
 293/**
 294 * pci_vpd_size - determine actual size of Vital Product Data
 295 * @dev:        pci device struct
 296 * @old_size:   current assumed size, also maximum allowed size
 297 */
 298static size_t pci_vpd_size(struct pci_dev *dev, size_t old_size)
 299{
 300        size_t off = 0;
 301        unsigned char header[1+2];      /* 1 byte tag, 2 bytes length */
 302
 303        while (off < old_size &&
 304               pci_read_vpd(dev, off, 1, header) == 1) {
 305                unsigned char tag;
 306
 307                if (header[0] & PCI_VPD_LRDT) {
 308                        /* Large Resource Data Type Tag */
 309                        tag = pci_vpd_lrdt_tag(header);
 310                        /* Only read length from known tag items */
 311                        if ((tag == PCI_VPD_LTIN_ID_STRING) ||
 312                            (tag == PCI_VPD_LTIN_RO_DATA) ||
 313                            (tag == PCI_VPD_LTIN_RW_DATA)) {
 314                                if (pci_read_vpd(dev, off+1, 2,
 315                                                 &header[1]) != 2) {
 316                                        dev_warn(&dev->dev,
 317                                                 "invalid large VPD tag %02x size at offset %zu",
 318                                                 tag, off + 1);
 319                                        return 0;
 320                                }
 321                                off += PCI_VPD_LRDT_TAG_SIZE +
 322                                        pci_vpd_lrdt_size(header);
 323                        }
 324                } else {
 325                        /* Short Resource Data Type Tag */
 326                        off += PCI_VPD_SRDT_TAG_SIZE +
 327                                pci_vpd_srdt_size(header);
 328                        tag = pci_vpd_srdt_tag(header);
 329                }
 330
 331                if (tag == PCI_VPD_STIN_END)    /* End tag descriptor */
 332                        return off;
 333
 334                if ((tag != PCI_VPD_LTIN_ID_STRING) &&
 335                    (tag != PCI_VPD_LTIN_RO_DATA) &&
 336                    (tag != PCI_VPD_LTIN_RW_DATA)) {
 337                        dev_warn(&dev->dev,
 338                                 "invalid %s VPD tag %02x at offset %zu",
 339                                 (header[0] & PCI_VPD_LRDT) ? "large" : "short",
 340                                 tag, off);
 341                        return 0;
 342                }
 343        }
 344        return 0;
 345}
 346
 347/*
 348 * Wait for last operation to complete.
 349 * This code has to spin since there is no other notification from the PCI
 350 * hardware. Since the VPD is often implemented by serial attachment to an
 351 * EEPROM, it may take many milliseconds to complete.
 352 *
 353 * Returns 0 on success, negative values indicate error.
 354 */
 355static int pci_vpd_wait(struct pci_dev *dev)
 356{
 357        struct pci_vpd *vpd = dev->vpd;
 358        unsigned long timeout = jiffies + msecs_to_jiffies(50);
 359        unsigned long max_sleep = 16;
 360        u16 status;
 361        int ret;
 362
 363        if (!vpd->busy)
 364                return 0;
 365
 366        while (time_before(jiffies, timeout)) {
 367                ret = pci_user_read_config_word(dev, vpd->cap + PCI_VPD_ADDR,
 368                                                &status);
 369                if (ret < 0)
 370                        return ret;
 371
 372                if ((status & PCI_VPD_ADDR_F) == vpd->flag) {
 373                        vpd->busy = 0;
 374                        return 0;
 375                }
 376
 377                if (fatal_signal_pending(current))
 378                        return -EINTR;
 379
 380                usleep_range(10, max_sleep);
 381                if (max_sleep < 1024)
 382                        max_sleep *= 2;
 383        }
 384
 385        dev_warn(&dev->dev, "VPD access failed.  This is likely a firmware bug on this device.  Contact the card vendor for a firmware update\n");
 386        return -ETIMEDOUT;
 387}
 388
 389static ssize_t pci_vpd_read(struct pci_dev *dev, loff_t pos, size_t count,
 390                            void *arg)
 391{
 392        struct pci_vpd *vpd = dev->vpd;
 393        int ret;
 394        loff_t end = pos + count;
 395        u8 *buf = arg;
 396
 397        if (pos < 0)
 398                return -EINVAL;
 399
 400        if (!vpd->valid) {
 401                vpd->valid = 1;
 402                vpd->len = pci_vpd_size(dev, vpd->len);
 403        }
 404
 405        if (vpd->len == 0)
 406                return -EIO;
 407
 408        if (pos > vpd->len)
 409                return 0;
 410
 411        if (end > vpd->len) {
 412                end = vpd->len;
 413                count = end - pos;
 414        }
 415
 416        if (mutex_lock_killable(&vpd->lock))
 417                return -EINTR;
 418
 419        ret = pci_vpd_wait(dev);
 420        if (ret < 0)
 421                goto out;
 422
 423        while (pos < end) {
 424                u32 val;
 425                unsigned int i, skip;
 426
 427                ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR,
 428                                                 pos & ~3);
 429                if (ret < 0)
 430                        break;
 431                vpd->busy = 1;
 432                vpd->flag = PCI_VPD_ADDR_F;
 433                ret = pci_vpd_wait(dev);
 434                if (ret < 0)
 435                        break;
 436
 437                ret = pci_user_read_config_dword(dev, vpd->cap + PCI_VPD_DATA, &val);
 438                if (ret < 0)
 439                        break;
 440
 441                skip = pos & 3;
 442                for (i = 0;  i < sizeof(u32); i++) {
 443                        if (i >= skip) {
 444                                *buf++ = val;
 445                                if (++pos == end)
 446                                        break;
 447                        }
 448                        val >>= 8;
 449                }
 450        }
 451out:
 452        mutex_unlock(&vpd->lock);
 453        return ret ? ret : count;
 454}
 455
 456static ssize_t pci_vpd_write(struct pci_dev *dev, loff_t pos, size_t count,
 457                             const void *arg)
 458{
 459        struct pci_vpd *vpd = dev->vpd;
 460        const u8 *buf = arg;
 461        loff_t end = pos + count;
 462        int ret = 0;
 463
 464        if (pos < 0 || (pos & 3) || (count & 3))
 465                return -EINVAL;
 466
 467        if (!vpd->valid) {
 468                vpd->valid = 1;
 469                vpd->len = pci_vpd_size(dev, vpd->len);
 470        }
 471
 472        if (vpd->len == 0)
 473                return -EIO;
 474
 475        if (end > vpd->len)
 476                return -EINVAL;
 477
 478        if (mutex_lock_killable(&vpd->lock))
 479                return -EINTR;
 480
 481        ret = pci_vpd_wait(dev);
 482        if (ret < 0)
 483                goto out;
 484
 485        while (pos < end) {
 486                u32 val;
 487
 488                val = *buf++;
 489                val |= *buf++ << 8;
 490                val |= *buf++ << 16;
 491                val |= *buf++ << 24;
 492
 493                ret = pci_user_write_config_dword(dev, vpd->cap + PCI_VPD_DATA, val);
 494                if (ret < 0)
 495                        break;
 496                ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR,
 497                                                 pos | PCI_VPD_ADDR_F);
 498                if (ret < 0)
 499                        break;
 500
 501                vpd->busy = 1;
 502                vpd->flag = 0;
 503                ret = pci_vpd_wait(dev);
 504                if (ret < 0)
 505                        break;
 506
 507                pos += sizeof(u32);
 508        }
 509out:
 510        mutex_unlock(&vpd->lock);
 511        return ret ? ret : count;
 512}
 513
 514static int pci_vpd_set_size(struct pci_dev *dev, size_t len)
 515{
 516        struct pci_vpd *vpd = dev->vpd;
 517
 518        if (len == 0 || len > PCI_VPD_MAX_SIZE)
 519                return -EIO;
 520
 521        vpd->valid = 1;
 522        vpd->len = len;
 523
 524        return 0;
 525}
 526
 527static const struct pci_vpd_ops pci_vpd_ops = {
 528        .read = pci_vpd_read,
 529        .write = pci_vpd_write,
 530        .set_size = pci_vpd_set_size,
 531};
 532
 533static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
 534                               void *arg)
 535{
 536        struct pci_dev *tdev = pci_get_slot(dev->bus,
 537                                            PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
 538        ssize_t ret;
 539
 540        if (!tdev)
 541                return -ENODEV;
 542
 543        ret = pci_read_vpd(tdev, pos, count, arg);
 544        pci_dev_put(tdev);
 545        return ret;
 546}
 547
 548static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
 549                                const void *arg)
 550{
 551        struct pci_dev *tdev = pci_get_slot(dev->bus,
 552                                            PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
 553        ssize_t ret;
 554
 555        if (!tdev)
 556                return -ENODEV;
 557
 558        ret = pci_write_vpd(tdev, pos, count, arg);
 559        pci_dev_put(tdev);
 560        return ret;
 561}
 562
 563static int pci_vpd_f0_set_size(struct pci_dev *dev, size_t len)
 564{
 565        struct pci_dev *tdev = pci_get_slot(dev->bus,
 566                                            PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
 567        int ret;
 568
 569        if (!tdev)
 570                return -ENODEV;
 571
 572        ret = pci_set_vpd_size(tdev, len);
 573        pci_dev_put(tdev);
 574        return ret;
 575}
 576
 577static const struct pci_vpd_ops pci_vpd_f0_ops = {
 578        .read = pci_vpd_f0_read,
 579        .write = pci_vpd_f0_write,
 580        .set_size = pci_vpd_f0_set_size,
 581};
 582
 583int pci_vpd_init(struct pci_dev *dev)
 584{
 585        struct pci_vpd *vpd;
 586        u8 cap;
 587
 588        cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
 589        if (!cap)
 590                return -ENODEV;
 591
 592        vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC);
 593        if (!vpd)
 594                return -ENOMEM;
 595
 596        vpd->len = PCI_VPD_MAX_SIZE;
 597        if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0)
 598                vpd->ops = &pci_vpd_f0_ops;
 599        else
 600                vpd->ops = &pci_vpd_ops;
 601        mutex_init(&vpd->lock);
 602        vpd->cap = cap;
 603        vpd->busy = 0;
 604        vpd->valid = 0;
 605        dev->vpd = vpd;
 606        return 0;
 607}
 608
 609void pci_vpd_release(struct pci_dev *dev)
 610{
 611        kfree(dev->vpd);
 612}
 613
 614/**
 615 * pci_cfg_access_lock - Lock PCI config reads/writes
 616 * @dev:        pci device struct
 617 *
 618 * When access is locked, any userspace reads or writes to config
 619 * space and concurrent lock requests will sleep until access is
 620 * allowed via pci_cfg_access_unlocked again.
 621 */
 622void pci_cfg_access_lock(struct pci_dev *dev)
 623{
 624        might_sleep();
 625
 626        raw_spin_lock_irq(&pci_lock);
 627        if (dev->block_cfg_access)
 628                pci_wait_cfg(dev);
 629        dev->block_cfg_access = 1;
 630        raw_spin_unlock_irq(&pci_lock);
 631}
 632EXPORT_SYMBOL_GPL(pci_cfg_access_lock);
 633
 634/**
 635 * pci_cfg_access_trylock - try to lock PCI config reads/writes
 636 * @dev:        pci device struct
 637 *
 638 * Same as pci_cfg_access_lock, but will return 0 if access is
 639 * already locked, 1 otherwise. This function can be used from
 640 * atomic contexts.
 641 */
 642bool pci_cfg_access_trylock(struct pci_dev *dev)
 643{
 644        unsigned long flags;
 645        bool locked = true;
 646
 647        raw_spin_lock_irqsave(&pci_lock, flags);
 648        if (dev->block_cfg_access)
 649                locked = false;
 650        else
 651                dev->block_cfg_access = 1;
 652        raw_spin_unlock_irqrestore(&pci_lock, flags);
 653
 654        return locked;
 655}
 656EXPORT_SYMBOL_GPL(pci_cfg_access_trylock);
 657
 658/**
 659 * pci_cfg_access_unlock - Unlock PCI config reads/writes
 660 * @dev:        pci device struct
 661 *
 662 * This function allows PCI config accesses to resume.
 663 */
 664void pci_cfg_access_unlock(struct pci_dev *dev)
 665{
 666        unsigned long flags;
 667
 668        raw_spin_lock_irqsave(&pci_lock, flags);
 669
 670        /* This indicates a problem in the caller, but we don't need
 671         * to kill them, unlike a double-block above. */
 672        WARN_ON(!dev->block_cfg_access);
 673
 674        dev->block_cfg_access = 0;
 675        wake_up_all(&pci_cfg_wait);
 676        raw_spin_unlock_irqrestore(&pci_lock, flags);
 677}
 678EXPORT_SYMBOL_GPL(pci_cfg_access_unlock);
 679
 680static inline int pcie_cap_version(const struct pci_dev *dev)
 681{
 682        return pcie_caps_reg(dev) & PCI_EXP_FLAGS_VERS;
 683}
 684
 685static bool pcie_downstream_port(const struct pci_dev *dev)
 686{
 687        int type = pci_pcie_type(dev);
 688
 689        return type == PCI_EXP_TYPE_ROOT_PORT ||
 690               type == PCI_EXP_TYPE_DOWNSTREAM;
 691}
 692
 693bool pcie_cap_has_lnkctl(const struct pci_dev *dev)
 694{
 695        int type = pci_pcie_type(dev);
 696
 697        return type == PCI_EXP_TYPE_ENDPOINT ||
 698               type == PCI_EXP_TYPE_LEG_END ||
 699               type == PCI_EXP_TYPE_ROOT_PORT ||
 700               type == PCI_EXP_TYPE_UPSTREAM ||
 701               type == PCI_EXP_TYPE_DOWNSTREAM ||
 702               type == PCI_EXP_TYPE_PCI_BRIDGE ||
 703               type == PCI_EXP_TYPE_PCIE_BRIDGE;
 704}
 705
 706static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev)
 707{
 708        return pcie_downstream_port(dev) &&
 709               pcie_caps_reg(dev) & PCI_EXP_FLAGS_SLOT;
 710}
 711
 712static inline bool pcie_cap_has_rtctl(const struct pci_dev *dev)
 713{
 714        int type = pci_pcie_type(dev);
 715
 716        return type == PCI_EXP_TYPE_ROOT_PORT ||
 717               type == PCI_EXP_TYPE_RC_EC;
 718}
 719
 720static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos)
 721{
 722        if (!pci_is_pcie(dev))
 723                return false;
 724
 725        switch (pos) {
 726        case PCI_EXP_FLAGS:
 727                return true;
 728        case PCI_EXP_DEVCAP:
 729        case PCI_EXP_DEVCTL:
 730        case PCI_EXP_DEVSTA:
 731                return true;
 732        case PCI_EXP_LNKCAP:
 733        case PCI_EXP_LNKCTL:
 734        case PCI_EXP_LNKSTA:
 735                return pcie_cap_has_lnkctl(dev);
 736        case PCI_EXP_SLTCAP:
 737        case PCI_EXP_SLTCTL:
 738        case PCI_EXP_SLTSTA:
 739                return pcie_cap_has_sltctl(dev);
 740        case PCI_EXP_RTCTL:
 741        case PCI_EXP_RTCAP:
 742        case PCI_EXP_RTSTA:
 743                return pcie_cap_has_rtctl(dev);
 744        case PCI_EXP_DEVCAP2:
 745        case PCI_EXP_DEVCTL2:
 746        case PCI_EXP_LNKCAP2:
 747        case PCI_EXP_LNKCTL2:
 748        case PCI_EXP_LNKSTA2:
 749                return pcie_cap_version(dev) > 1;
 750        default:
 751                return false;
 752        }
 753}
 754
 755/*
 756 * Note that these accessor functions are only for the "PCI Express
 757 * Capability" (see PCIe spec r3.0, sec 7.8).  They do not apply to the
 758 * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.)
 759 */
 760int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val)
 761{
 762        int ret;
 763
 764        *val = 0;
 765        if (pos & 1)
 766                return -EINVAL;
 767
 768        if (pcie_capability_reg_implemented(dev, pos)) {
 769                ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val);
 770                /*
 771                 * Reset *val to 0 if pci_read_config_word() fails, it may
 772                 * have been written as 0xFFFF if hardware error happens
 773                 * during pci_read_config_word().
 774                 */
 775                if (ret)
 776                        *val = 0;
 777                return ret;
 778        }
 779
 780        /*
 781         * For Functions that do not implement the Slot Capabilities,
 782         * Slot Status, and Slot Control registers, these spaces must
 783         * be hardwired to 0b, with the exception of the Presence Detect
 784         * State bit in the Slot Status register of Downstream Ports,
 785         * which must be hardwired to 1b.  (PCIe Base Spec 3.0, sec 7.8)
 786         */
 787        if (pci_is_pcie(dev) && pcie_downstream_port(dev) &&
 788            pos == PCI_EXP_SLTSTA)
 789                *val = PCI_EXP_SLTSTA_PDS;
 790
 791        return 0;
 792}
 793EXPORT_SYMBOL(pcie_capability_read_word);
 794
 795int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val)
 796{
 797        int ret;
 798
 799        *val = 0;
 800        if (pos & 3)
 801                return -EINVAL;
 802
 803        if (pcie_capability_reg_implemented(dev, pos)) {
 804                ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val);
 805                /*
 806                 * Reset *val to 0 if pci_read_config_dword() fails, it may
 807                 * have been written as 0xFFFFFFFF if hardware error happens
 808                 * during pci_read_config_dword().
 809                 */
 810                if (ret)
 811                        *val = 0;
 812                return ret;
 813        }
 814
 815        if (pci_is_pcie(dev) && pcie_downstream_port(dev) &&
 816            pos == PCI_EXP_SLTSTA)
 817                *val = PCI_EXP_SLTSTA_PDS;
 818
 819        return 0;
 820}
 821EXPORT_SYMBOL(pcie_capability_read_dword);
 822
 823int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val)
 824{
 825        if (pos & 1)
 826                return -EINVAL;
 827
 828        if (!pcie_capability_reg_implemented(dev, pos))
 829                return 0;
 830
 831        return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val);
 832}
 833EXPORT_SYMBOL(pcie_capability_write_word);
 834
 835int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val)
 836{
 837        if (pos & 3)
 838                return -EINVAL;
 839
 840        if (!pcie_capability_reg_implemented(dev, pos))
 841                return 0;
 842
 843        return pci_write_config_dword(dev, pci_pcie_cap(dev) + pos, val);
 844}
 845EXPORT_SYMBOL(pcie_capability_write_dword);
 846
 847int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
 848                                       u16 clear, u16 set)
 849{
 850        int ret;
 851        u16 val;
 852
 853        ret = pcie_capability_read_word(dev, pos, &val);
 854        if (!ret) {
 855                val &= ~clear;
 856                val |= set;
 857                ret = pcie_capability_write_word(dev, pos, val);
 858        }
 859
 860        return ret;
 861}
 862EXPORT_SYMBOL(pcie_capability_clear_and_set_word);
 863
 864int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos,
 865                                        u32 clear, u32 set)
 866{
 867        int ret;
 868        u32 val;
 869
 870        ret = pcie_capability_read_dword(dev, pos, &val);
 871        if (!ret) {
 872                val &= ~clear;
 873                val |= set;
 874                ret = pcie_capability_write_dword(dev, pos, val);
 875        }
 876
 877        return ret;
 878}
 879EXPORT_SYMBOL(pcie_capability_clear_and_set_dword);
 880