linux/drivers/pci/access.c
<<
>>
Prefs
   1#include <linux/delay.h>
   2#include <linux/pci.h>
   3#include <linux/module.h>
   4#include <linux/sched.h>
   5#include <linux/slab.h>
   6#include <linux/ioport.h>
   7#include <linux/wait.h>
   8
   9#include "pci.h"
  10
  11/*
  12 * This interrupt-safe spinlock protects all accesses to PCI
  13 * configuration space.
  14 */
  15
  16DEFINE_RAW_SPINLOCK(pci_lock);
  17
  18/*
  19 *  Wrappers for all PCI configuration access functions.  They just check
  20 *  alignment, do locking and call the low-level functions pointed to
  21 *  by pci_dev->ops.
  22 */
  23
  24#define PCI_byte_BAD 0
  25#define PCI_word_BAD (pos & 1)
  26#define PCI_dword_BAD (pos & 3)
  27
  28#define PCI_OP_READ(size, type, len) \
  29int pci_bus_read_config_##size \
  30        (struct pci_bus *bus, unsigned int devfn, int pos, type *value) \
  31{                                                                       \
  32        int res;                                                        \
  33        unsigned long flags;                                            \
  34        u32 data = 0;                                                   \
  35        if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER;       \
  36        raw_spin_lock_irqsave(&pci_lock, flags);                        \
  37        res = bus->ops->read(bus, devfn, pos, len, &data);              \
  38        *value = (type)data;                                            \
  39        raw_spin_unlock_irqrestore(&pci_lock, flags);           \
  40        return res;                                                     \
  41}
  42
  43#define PCI_OP_WRITE(size, type, len) \
  44int pci_bus_write_config_##size \
  45        (struct pci_bus *bus, unsigned int devfn, int pos, type value)  \
  46{                                                                       \
  47        int res;                                                        \
  48        unsigned long flags;                                            \
  49        if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER;       \
  50        raw_spin_lock_irqsave(&pci_lock, flags);                        \
  51        res = bus->ops->write(bus, devfn, pos, len, value);             \
  52        raw_spin_unlock_irqrestore(&pci_lock, flags);           \
  53        return res;                                                     \
  54}
  55
  56PCI_OP_READ(byte, u8, 1)
  57PCI_OP_READ(word, u16, 2)
  58PCI_OP_READ(dword, u32, 4)
  59PCI_OP_WRITE(byte, u8, 1)
  60PCI_OP_WRITE(word, u16, 2)
  61PCI_OP_WRITE(dword, u32, 4)
  62
  63EXPORT_SYMBOL(pci_bus_read_config_byte);
  64EXPORT_SYMBOL(pci_bus_read_config_word);
  65EXPORT_SYMBOL(pci_bus_read_config_dword);
  66EXPORT_SYMBOL(pci_bus_write_config_byte);
  67EXPORT_SYMBOL(pci_bus_write_config_word);
  68EXPORT_SYMBOL(pci_bus_write_config_dword);
  69
  70/**
  71 * pci_bus_set_ops - Set raw operations of pci bus
  72 * @bus:        pci bus struct
  73 * @ops:        new raw operations
  74 *
  75 * Return previous raw operations
  76 */
  77struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops)
  78{
  79        struct pci_ops *old_ops;
  80        unsigned long flags;
  81
  82        raw_spin_lock_irqsave(&pci_lock, flags);
  83        old_ops = bus->ops;
  84        bus->ops = ops;
  85        raw_spin_unlock_irqrestore(&pci_lock, flags);
  86        return old_ops;
  87}
  88EXPORT_SYMBOL(pci_bus_set_ops);
  89
  90/*
  91 * The following routines are to prevent the user from accessing PCI config
  92 * space when it's unsafe to do so.  Some devices require this during BIST and
  93 * we're required to prevent it during D-state transitions.
  94 *
  95 * We have a bit per device to indicate it's blocked and a global wait queue
  96 * for callers to sleep on until devices are unblocked.
  97 */
  98static DECLARE_WAIT_QUEUE_HEAD(pci_cfg_wait);
  99
 100static noinline void pci_wait_cfg(struct pci_dev *dev)
 101{
 102        DECLARE_WAITQUEUE(wait, current);
 103
 104        __add_wait_queue(&pci_cfg_wait, &wait);
 105        do {
 106                set_current_state(TASK_UNINTERRUPTIBLE);
 107                raw_spin_unlock_irq(&pci_lock);
 108                schedule();
 109                raw_spin_lock_irq(&pci_lock);
 110        } while (dev->block_cfg_access);
 111        __remove_wait_queue(&pci_cfg_wait, &wait);
 112}
 113
 114/* Returns 0 on success, negative values indicate error. */
 115#define PCI_USER_READ_CONFIG(size, type)                                        \
 116int pci_user_read_config_##size                                         \
 117        (struct pci_dev *dev, int pos, type *val)                       \
 118{                                                                       \
 119        int ret = PCIBIOS_SUCCESSFUL;                                   \
 120        u32 data = -1;                                                  \
 121        if (PCI_##size##_BAD)                                           \
 122                return -EINVAL;                                         \
 123        raw_spin_lock_irq(&pci_lock);                           \
 124        if (unlikely(dev->block_cfg_access))                            \
 125                pci_wait_cfg(dev);                                      \
 126        ret = dev->bus->ops->read(dev->bus, dev->devfn,                 \
 127                                        pos, sizeof(type), &data);      \
 128        raw_spin_unlock_irq(&pci_lock);                         \
 129        *val = (type)data;                                              \
 130        return pcibios_err_to_errno(ret);                               \
 131}                                                                       \
 132EXPORT_SYMBOL_GPL(pci_user_read_config_##size);
 133
 134/* Returns 0 on success, negative values indicate error. */
 135#define PCI_USER_WRITE_CONFIG(size, type)                               \
 136int pci_user_write_config_##size                                        \
 137        (struct pci_dev *dev, int pos, type val)                        \
 138{                                                                       \
 139        int ret = PCIBIOS_SUCCESSFUL;                                   \
 140        if (PCI_##size##_BAD)                                           \
 141                return -EINVAL;                                         \
 142        raw_spin_lock_irq(&pci_lock);                           \
 143        if (unlikely(dev->block_cfg_access))                            \
 144                pci_wait_cfg(dev);                                      \
 145        ret = dev->bus->ops->write(dev->bus, dev->devfn,                \
 146                                        pos, sizeof(type), val);        \
 147        raw_spin_unlock_irq(&pci_lock);                         \
 148        return pcibios_err_to_errno(ret);                               \
 149}                                                                       \
 150EXPORT_SYMBOL_GPL(pci_user_write_config_##size);
 151
 152PCI_USER_READ_CONFIG(byte, u8)
 153PCI_USER_READ_CONFIG(word, u16)
 154PCI_USER_READ_CONFIG(dword, u32)
 155PCI_USER_WRITE_CONFIG(byte, u8)
 156PCI_USER_WRITE_CONFIG(word, u16)
 157PCI_USER_WRITE_CONFIG(dword, u32)
 158
 159/* VPD access through PCI 2.2+ VPD capability */
 160
 161/**
 162 * pci_read_vpd - Read one entry from Vital Product Data
 163 * @dev:        pci device struct
 164 * @pos:        offset in vpd space
 165 * @count:      number of bytes to read
 166 * @buf:        pointer to where to store result
 167 */
 168ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf)
 169{
 170        if (!dev->vpd || !dev->vpd->ops)
 171                return -ENODEV;
 172        return dev->vpd->ops->read(dev, pos, count, buf);
 173}
 174EXPORT_SYMBOL(pci_read_vpd);
 175
 176/**
 177 * pci_write_vpd - Write entry to Vital Product Data
 178 * @dev:        pci device struct
 179 * @pos:        offset in vpd space
 180 * @count:      number of bytes to write
 181 * @buf:        buffer containing write data
 182 */
 183ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf)
 184{
 185        if (!dev->vpd || !dev->vpd->ops)
 186                return -ENODEV;
 187        return dev->vpd->ops->write(dev, pos, count, buf);
 188}
 189EXPORT_SYMBOL(pci_write_vpd);
 190
 191/**
 192 * pci_set_vpd_size - Set size of Vital Product Data space
 193 * @dev:        pci device struct
 194 * @len:        size of vpd space
 195 */
 196int pci_set_vpd_size(struct pci_dev *dev, size_t len)
 197{
 198        if (!dev->vpd || !dev->vpd->ops)
 199                return -ENODEV;
 200        return dev->vpd->ops->set_size(dev, len);
 201}
 202EXPORT_SYMBOL(pci_set_vpd_size);
 203
 204#define PCI_VPD_MAX_SIZE (PCI_VPD_ADDR_MASK + 1)
 205
 206/**
 207 * pci_vpd_size - determine actual size of Vital Product Data
 208 * @dev:        pci device struct
 209 * @old_size:   current assumed size, also maximum allowed size
 210 */
 211static size_t pci_vpd_size(struct pci_dev *dev, size_t old_size)
 212{
 213        size_t off = 0;
 214        unsigned char header[1+2];      /* 1 byte tag, 2 bytes length */
 215
 216        while (off < old_size &&
 217               pci_read_vpd(dev, off, 1, header) == 1) {
 218                unsigned char tag;
 219
 220                if (header[0] & PCI_VPD_LRDT) {
 221                        /* Large Resource Data Type Tag */
 222                        tag = pci_vpd_lrdt_tag(header);
 223                        /* Only read length from known tag items */
 224                        if ((tag == PCI_VPD_LTIN_ID_STRING) ||
 225                            (tag == PCI_VPD_LTIN_RO_DATA) ||
 226                            (tag == PCI_VPD_LTIN_RW_DATA)) {
 227                                if (pci_read_vpd(dev, off+1, 2,
 228                                                 &header[1]) != 2) {
 229                                        dev_warn(&dev->dev,
 230                                                 "invalid large VPD tag %02x size at offset %zu",
 231                                                 tag, off + 1);
 232                                        return 0;
 233                                }
 234                                off += PCI_VPD_LRDT_TAG_SIZE +
 235                                        pci_vpd_lrdt_size(header);
 236                        }
 237                } else {
 238                        /* Short Resource Data Type Tag */
 239                        off += PCI_VPD_SRDT_TAG_SIZE +
 240                                pci_vpd_srdt_size(header);
 241                        tag = pci_vpd_srdt_tag(header);
 242                }
 243
 244                if (tag == PCI_VPD_STIN_END)    /* End tag descriptor */
 245                        return off;
 246
 247                if ((tag != PCI_VPD_LTIN_ID_STRING) &&
 248                    (tag != PCI_VPD_LTIN_RO_DATA) &&
 249                    (tag != PCI_VPD_LTIN_RW_DATA)) {
 250                        dev_warn(&dev->dev,
 251                                 "invalid %s VPD tag %02x at offset %zu",
 252                                 (header[0] & PCI_VPD_LRDT) ? "large" : "short",
 253                                 tag, off);
 254                        return 0;
 255                }
 256        }
 257        return 0;
 258}
 259
 260/*
 261 * Wait for last operation to complete.
 262 * This code has to spin since there is no other notification from the PCI
 263 * hardware. Since the VPD is often implemented by serial attachment to an
 264 * EEPROM, it may take many milliseconds to complete.
 265 *
 266 * Returns 0 on success, negative values indicate error.
 267 */
 268static int pci_vpd_wait(struct pci_dev *dev)
 269{
 270        struct pci_vpd *vpd = dev->vpd;
 271        unsigned long timeout = jiffies + msecs_to_jiffies(50);
 272        unsigned long max_sleep = 16;
 273        u16 status;
 274        int ret;
 275
 276        if (!vpd->busy)
 277                return 0;
 278
 279        while (time_before(jiffies, timeout)) {
 280                ret = pci_user_read_config_word(dev, vpd->cap + PCI_VPD_ADDR,
 281                                                &status);
 282                if (ret < 0)
 283                        return ret;
 284
 285                if ((status & PCI_VPD_ADDR_F) == vpd->flag) {
 286                        vpd->busy = 0;
 287                        return 0;
 288                }
 289
 290                if (fatal_signal_pending(current))
 291                        return -EINTR;
 292
 293                usleep_range(10, max_sleep);
 294                if (max_sleep < 1024)
 295                        max_sleep *= 2;
 296        }
 297
 298        dev_warn(&dev->dev, "VPD access failed.  This is likely a firmware bug on this device.  Contact the card vendor for a firmware update\n");
 299        return -ETIMEDOUT;
 300}
 301
 302static ssize_t pci_vpd_read(struct pci_dev *dev, loff_t pos, size_t count,
 303                            void *arg)
 304{
 305        struct pci_vpd *vpd = dev->vpd;
 306        int ret;
 307        loff_t end = pos + count;
 308        u8 *buf = arg;
 309
 310        if (pos < 0)
 311                return -EINVAL;
 312
 313        if (!vpd->valid) {
 314                vpd->valid = 1;
 315                vpd->len = pci_vpd_size(dev, vpd->len);
 316        }
 317
 318        if (vpd->len == 0)
 319                return -EIO;
 320
 321        if (pos > vpd->len)
 322                return 0;
 323
 324        if (end > vpd->len) {
 325                end = vpd->len;
 326                count = end - pos;
 327        }
 328
 329        if (mutex_lock_killable(&vpd->lock))
 330                return -EINTR;
 331
 332        ret = pci_vpd_wait(dev);
 333        if (ret < 0)
 334                goto out;
 335
 336        while (pos < end) {
 337                u32 val;
 338                unsigned int i, skip;
 339
 340                ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR,
 341                                                 pos & ~3);
 342                if (ret < 0)
 343                        break;
 344                vpd->busy = 1;
 345                vpd->flag = PCI_VPD_ADDR_F;
 346                ret = pci_vpd_wait(dev);
 347                if (ret < 0)
 348                        break;
 349
 350                ret = pci_user_read_config_dword(dev, vpd->cap + PCI_VPD_DATA, &val);
 351                if (ret < 0)
 352                        break;
 353
 354                skip = pos & 3;
 355                for (i = 0;  i < sizeof(u32); i++) {
 356                        if (i >= skip) {
 357                                *buf++ = val;
 358                                if (++pos == end)
 359                                        break;
 360                        }
 361                        val >>= 8;
 362                }
 363        }
 364out:
 365        mutex_unlock(&vpd->lock);
 366        return ret ? ret : count;
 367}
 368
 369static ssize_t pci_vpd_write(struct pci_dev *dev, loff_t pos, size_t count,
 370                             const void *arg)
 371{
 372        struct pci_vpd *vpd = dev->vpd;
 373        const u8 *buf = arg;
 374        loff_t end = pos + count;
 375        int ret = 0;
 376
 377        if (pos < 0 || (pos & 3) || (count & 3))
 378                return -EINVAL;
 379
 380        if (!vpd->valid) {
 381                vpd->valid = 1;
 382                vpd->len = pci_vpd_size(dev, vpd->len);
 383        }
 384
 385        if (vpd->len == 0)
 386                return -EIO;
 387
 388        if (end > vpd->len)
 389                return -EINVAL;
 390
 391        if (mutex_lock_killable(&vpd->lock))
 392                return -EINTR;
 393
 394        ret = pci_vpd_wait(dev);
 395        if (ret < 0)
 396                goto out;
 397
 398        while (pos < end) {
 399                u32 val;
 400
 401                val = *buf++;
 402                val |= *buf++ << 8;
 403                val |= *buf++ << 16;
 404                val |= *buf++ << 24;
 405
 406                ret = pci_user_write_config_dword(dev, vpd->cap + PCI_VPD_DATA, val);
 407                if (ret < 0)
 408                        break;
 409                ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR,
 410                                                 pos | PCI_VPD_ADDR_F);
 411                if (ret < 0)
 412                        break;
 413
 414                vpd->busy = 1;
 415                vpd->flag = 0;
 416                ret = pci_vpd_wait(dev);
 417                if (ret < 0)
 418                        break;
 419
 420                pos += sizeof(u32);
 421        }
 422out:
 423        mutex_unlock(&vpd->lock);
 424        return ret ? ret : count;
 425}
 426
 427static int pci_vpd_set_size(struct pci_dev *dev, size_t len)
 428{
 429        struct pci_vpd *vpd = dev->vpd;
 430
 431        if (len == 0 || len > PCI_VPD_MAX_SIZE)
 432                return -EIO;
 433
 434        vpd->valid = 1;
 435        vpd->len = len;
 436
 437        return 0;
 438}
 439
 440static const struct pci_vpd_ops pci_vpd_ops = {
 441        .read = pci_vpd_read,
 442        .write = pci_vpd_write,
 443        .set_size = pci_vpd_set_size,
 444};
 445
 446static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
 447                               void *arg)
 448{
 449        struct pci_dev *tdev = pci_get_slot(dev->bus,
 450                                            PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
 451        ssize_t ret;
 452
 453        if (!tdev)
 454                return -ENODEV;
 455
 456        ret = pci_read_vpd(tdev, pos, count, arg);
 457        pci_dev_put(tdev);
 458        return ret;
 459}
 460
 461static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
 462                                const void *arg)
 463{
 464        struct pci_dev *tdev = pci_get_slot(dev->bus,
 465                                            PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
 466        ssize_t ret;
 467
 468        if (!tdev)
 469                return -ENODEV;
 470
 471        ret = pci_write_vpd(tdev, pos, count, arg);
 472        pci_dev_put(tdev);
 473        return ret;
 474}
 475
 476static int pci_vpd_f0_set_size(struct pci_dev *dev, size_t len)
 477{
 478        struct pci_dev *tdev = pci_get_slot(dev->bus,
 479                                            PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
 480        int ret;
 481
 482        if (!tdev)
 483                return -ENODEV;
 484
 485        ret = pci_set_vpd_size(tdev, len);
 486        pci_dev_put(tdev);
 487        return ret;
 488}
 489
 490static const struct pci_vpd_ops pci_vpd_f0_ops = {
 491        .read = pci_vpd_f0_read,
 492        .write = pci_vpd_f0_write,
 493        .set_size = pci_vpd_f0_set_size,
 494};
 495
 496int pci_vpd_init(struct pci_dev *dev)
 497{
 498        struct pci_vpd *vpd;
 499        u8 cap;
 500
 501        cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
 502        if (!cap)
 503                return -ENODEV;
 504
 505        vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC);
 506        if (!vpd)
 507                return -ENOMEM;
 508
 509        vpd->len = PCI_VPD_MAX_SIZE;
 510        if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0)
 511                vpd->ops = &pci_vpd_f0_ops;
 512        else
 513                vpd->ops = &pci_vpd_ops;
 514        mutex_init(&vpd->lock);
 515        vpd->cap = cap;
 516        vpd->busy = 0;
 517        vpd->valid = 0;
 518        dev->vpd = vpd;
 519        return 0;
 520}
 521
 522void pci_vpd_release(struct pci_dev *dev)
 523{
 524        kfree(dev->vpd);
 525}
 526
 527/**
 528 * pci_cfg_access_lock - Lock PCI config reads/writes
 529 * @dev:        pci device struct
 530 *
 531 * When access is locked, any userspace reads or writes to config
 532 * space and concurrent lock requests will sleep until access is
 533 * allowed via pci_cfg_access_unlocked again.
 534 */
 535void pci_cfg_access_lock(struct pci_dev *dev)
 536{
 537        might_sleep();
 538
 539        raw_spin_lock_irq(&pci_lock);
 540        if (dev->block_cfg_access)
 541                pci_wait_cfg(dev);
 542        dev->block_cfg_access = 1;
 543        raw_spin_unlock_irq(&pci_lock);
 544}
 545EXPORT_SYMBOL_GPL(pci_cfg_access_lock);
 546
 547/**
 548 * pci_cfg_access_trylock - try to lock PCI config reads/writes
 549 * @dev:        pci device struct
 550 *
 551 * Same as pci_cfg_access_lock, but will return 0 if access is
 552 * already locked, 1 otherwise. This function can be used from
 553 * atomic contexts.
 554 */
 555bool pci_cfg_access_trylock(struct pci_dev *dev)
 556{
 557        unsigned long flags;
 558        bool locked = true;
 559
 560        raw_spin_lock_irqsave(&pci_lock, flags);
 561        if (dev->block_cfg_access)
 562                locked = false;
 563        else
 564                dev->block_cfg_access = 1;
 565        raw_spin_unlock_irqrestore(&pci_lock, flags);
 566
 567        return locked;
 568}
 569EXPORT_SYMBOL_GPL(pci_cfg_access_trylock);
 570
 571/**
 572 * pci_cfg_access_unlock - Unlock PCI config reads/writes
 573 * @dev:        pci device struct
 574 *
 575 * This function allows PCI config accesses to resume.
 576 */
 577void pci_cfg_access_unlock(struct pci_dev *dev)
 578{
 579        unsigned long flags;
 580
 581        raw_spin_lock_irqsave(&pci_lock, flags);
 582
 583        /* This indicates a problem in the caller, but we don't need
 584         * to kill them, unlike a double-block above. */
 585        WARN_ON(!dev->block_cfg_access);
 586
 587        dev->block_cfg_access = 0;
 588        wake_up_all(&pci_cfg_wait);
 589        raw_spin_unlock_irqrestore(&pci_lock, flags);
 590}
 591EXPORT_SYMBOL_GPL(pci_cfg_access_unlock);
 592
 593/**
 594 * pci_pcie_type - get the PCIe device/port type
 595 * @dev: PCI device
 596 */
 597int pci_pcie_type(const struct pci_dev *dev)
 598{
 599        return (pcie_caps_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4;
 600}
 601EXPORT_SYMBOL_GPL(pci_pcie_type);
 602
 603/**
 604 * pci_pcie_cap - get the saved PCIe capability offset
 605 * @dev: PCI device
 606 *
 607 * PCIe capability offset is calculated at PCI device initialization
 608 * time and saved in the data structure. This function returns saved
 609 * PCIe capability offset. Using this instead of pci_find_capability()
 610 * reduces unnecessary search in the PCI configuration space. If you
 611 * need to calculate PCIe capability offset from raw device for some
 612 * reasons, please use pci_find_capability() instead.
 613 */
 614int pci_pcie_cap(struct pci_dev *dev)
 615{
 616        return dev->pcie_cap;
 617}
 618EXPORT_SYMBOL_GPL(pci_pcie_cap);
 619
 620/**
 621 * pci_is_pcie - check if the PCI device is PCI Express capable
 622 * @dev: PCI device
 623 *
 624 * Returns: true if the PCI device is PCI Express capable, false otherwise.
 625 */
 626bool pci_is_pcie(struct pci_dev *dev)
 627{
 628        return pci_pcie_cap(dev);
 629}
 630EXPORT_SYMBOL_GPL(pci_is_pcie);
 631
 632static inline int pcie_cap_version(const struct pci_dev *dev)
 633{
 634        return pcie_caps_reg(dev) & PCI_EXP_FLAGS_VERS;
 635}
 636
 637static bool pcie_downstream_port(const struct pci_dev *dev)
 638{
 639        int type = pci_pcie_type(dev);
 640
 641        return type == PCI_EXP_TYPE_ROOT_PORT ||
 642               type == PCI_EXP_TYPE_DOWNSTREAM;
 643}
 644
 645bool pcie_cap_has_lnkctl(const struct pci_dev *dev)
 646{
 647        int type = pci_pcie_type(dev);
 648
 649        return type == PCI_EXP_TYPE_ENDPOINT ||
 650               type == PCI_EXP_TYPE_LEG_END ||
 651               type == PCI_EXP_TYPE_ROOT_PORT ||
 652               type == PCI_EXP_TYPE_UPSTREAM ||
 653               type == PCI_EXP_TYPE_DOWNSTREAM ||
 654               type == PCI_EXP_TYPE_PCI_BRIDGE ||
 655               type == PCI_EXP_TYPE_PCIE_BRIDGE;
 656}
 657
 658static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev)
 659{
 660        return pcie_downstream_port(dev) &&
 661               pcie_caps_reg(dev) & PCI_EXP_FLAGS_SLOT;
 662}
 663
 664static inline bool pcie_cap_has_rtctl(const struct pci_dev *dev)
 665{
 666        int type = pci_pcie_type(dev);
 667
 668        return type == PCI_EXP_TYPE_ROOT_PORT ||
 669               type == PCI_EXP_TYPE_RC_EC;
 670}
 671
 672static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos)
 673{
 674        if (!pci_is_pcie(dev))
 675                return false;
 676
 677        switch (pos) {
 678        case PCI_EXP_FLAGS:
 679                return true;
 680        case PCI_EXP_DEVCAP:
 681        case PCI_EXP_DEVCTL:
 682        case PCI_EXP_DEVSTA:
 683                return true;
 684        case PCI_EXP_LNKCAP:
 685        case PCI_EXP_LNKCTL:
 686        case PCI_EXP_LNKSTA:
 687                return pcie_cap_has_lnkctl(dev);
 688        case PCI_EXP_SLTCAP:
 689        case PCI_EXP_SLTCTL:
 690        case PCI_EXP_SLTSTA:
 691                return pcie_cap_has_sltctl(dev);
 692        case PCI_EXP_RTCTL:
 693        case PCI_EXP_RTCAP:
 694        case PCI_EXP_RTSTA:
 695                return pcie_cap_has_rtctl(dev);
 696        case PCI_EXP_DEVCAP2:
 697        case PCI_EXP_DEVCTL2:
 698        case PCI_EXP_LNKCAP2:
 699        case PCI_EXP_LNKCTL2:
 700        case PCI_EXP_LNKSTA2:
 701                return pcie_cap_version(dev) > 1;
 702        default:
 703                return false;
 704        }
 705}
 706
 707/*
 708 * Note that these accessor functions are only for the "PCI Express
 709 * Capability" (see PCIe spec r3.0, sec 7.8).  They do not apply to the
 710 * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.)
 711 */
 712int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val)
 713{
 714        int ret;
 715
 716        *val = 0;
 717        if (pos & 1)
 718                return -EINVAL;
 719
 720        if (pcie_capability_reg_implemented(dev, pos)) {
 721                ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val);
 722                /*
 723                 * Reset *val to 0 if pci_read_config_word() fails, it may
 724                 * have been written as 0xFFFF if hardware error happens
 725                 * during pci_read_config_word().
 726                 */
 727                if (ret)
 728                        *val = 0;
 729                return ret;
 730        }
 731
 732        /*
 733         * For Functions that do not implement the Slot Capabilities,
 734         * Slot Status, and Slot Control registers, these spaces must
 735         * be hardwired to 0b, with the exception of the Presence Detect
 736         * State bit in the Slot Status register of Downstream Ports,
 737         * which must be hardwired to 1b.  (PCIe Base Spec 3.0, sec 7.8)
 738         */
 739        if (pci_is_pcie(dev) && pcie_downstream_port(dev) &&
 740            pos == PCI_EXP_SLTSTA)
 741                *val = PCI_EXP_SLTSTA_PDS;
 742
 743        return 0;
 744}
 745EXPORT_SYMBOL(pcie_capability_read_word);
 746
 747int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val)
 748{
 749        int ret;
 750
 751        *val = 0;
 752        if (pos & 3)
 753                return -EINVAL;
 754
 755        if (pcie_capability_reg_implemented(dev, pos)) {
 756                ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val);
 757                /*
 758                 * Reset *val to 0 if pci_read_config_dword() fails, it may
 759                 * have been written as 0xFFFFFFFF if hardware error happens
 760                 * during pci_read_config_dword().
 761                 */
 762                if (ret)
 763                        *val = 0;
 764                return ret;
 765        }
 766
 767        if (pci_is_pcie(dev) && pcie_downstream_port(dev) &&
 768            pos == PCI_EXP_SLTSTA)
 769                *val = PCI_EXP_SLTSTA_PDS;
 770
 771        return 0;
 772}
 773EXPORT_SYMBOL(pcie_capability_read_dword);
 774
 775int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val)
 776{
 777        if (pos & 1)
 778                return -EINVAL;
 779
 780        if (!pcie_capability_reg_implemented(dev, pos))
 781                return 0;
 782
 783        return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val);
 784}
 785EXPORT_SYMBOL(pcie_capability_write_word);
 786
 787int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val)
 788{
 789        if (pos & 3)
 790                return -EINVAL;
 791
 792        if (!pcie_capability_reg_implemented(dev, pos))
 793                return 0;
 794
 795        return pci_write_config_dword(dev, pci_pcie_cap(dev) + pos, val);
 796}
 797EXPORT_SYMBOL(pcie_capability_write_dword);
 798
 799int pcie_capability_set_word(struct pci_dev *dev, int pos, u16 set)
 800{
 801        return pcie_capability_clear_and_set_word(dev, pos, 0, set);
 802}
 803EXPORT_SYMBOL(pcie_capability_set_word);
 804
 805int pcie_capability_clear_word(struct pci_dev *dev, int pos, u16 clear)
 806{
 807        return pcie_capability_clear_and_set_word(dev, pos, clear, 0);
 808}
 809EXPORT_SYMBOL(pcie_capability_clear_word);
 810
 811int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
 812                                       u16 clear, u16 set)
 813{
 814        int ret;
 815        u16 val;
 816
 817        ret = pcie_capability_read_word(dev, pos, &val);
 818        if (!ret) {
 819                val &= ~clear;
 820                val |= set;
 821                ret = pcie_capability_write_word(dev, pos, val);
 822        }
 823
 824        return ret;
 825}
 826EXPORT_SYMBOL(pcie_capability_clear_and_set_word);
 827
 828int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos,
 829                                        u32 clear, u32 set)
 830{
 831        int ret;
 832        u32 val;
 833
 834        ret = pcie_capability_read_dword(dev, pos, &val);
 835        if (!ret) {
 836                val &= ~clear;
 837                val |= set;
 838                ret = pcie_capability_write_dword(dev, pos, val);
 839        }
 840
 841        return ret;
 842}
 843EXPORT_SYMBOL(pcie_capability_clear_and_set_dword);
 844