linux/drivers/pci/pci.c
<<
>>
Prefs
   1/*
   2 *      PCI Bus Services, see include/linux/pci.h for further explanation.
   3 *
   4 *      Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
   5 *      David Mosberger-Tang
   6 *
   7 *      Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
   8 */
   9
  10#include <linux/kernel.h>
  11#include <linux/delay.h>
  12#include <linux/dmi.h>
  13#include <linux/init.h>
  14#include <linux/pci.h>
  15#include <linux/pm.h>
  16#include <linux/slab.h>
  17#include <linux/module.h>
  18#include <linux/spinlock.h>
  19#include <linux/string.h>
  20#include <linux/log2.h>
  21#include <linux/pci-aspm.h>
  22#include <linux/pm_wakeup.h>
  23#include <linux/interrupt.h>
  24#include <linux/device.h>
  25#include <linux/pm_runtime.h>
  26#include <linux/pci_hotplug.h>
  27#include <asm/setup.h>
  28#include <linux/aer.h>
  29#include "pci.h"
  30
  31const char *pci_power_names[] = {
  32        "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
  33};
  34EXPORT_SYMBOL_GPL(pci_power_names);
  35
  36int isa_dma_bridge_buggy;
  37EXPORT_SYMBOL(isa_dma_bridge_buggy);
  38
  39int pci_pci_problems;
  40EXPORT_SYMBOL(pci_pci_problems);
  41
  42unsigned int pci_pm_d3_delay;
  43
  44static void pci_pme_list_scan(struct work_struct *work);
  45
  46static LIST_HEAD(pci_pme_list);
  47static DEFINE_MUTEX(pci_pme_list_mutex);
  48static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
  49
  50struct pci_pme_device {
  51        struct list_head list;
  52        struct pci_dev *dev;
  53};
  54
  55#define PME_TIMEOUT 1000 /* How long between PME checks */
  56
  57static void pci_dev_d3_sleep(struct pci_dev *dev)
  58{
  59        unsigned int delay = dev->d3_delay;
  60
  61        if (delay < pci_pm_d3_delay)
  62                delay = pci_pm_d3_delay;
  63
  64        msleep(delay);
  65}
  66
  67#ifdef CONFIG_PCI_DOMAINS
  68int pci_domains_supported = 1;
  69#endif
  70
  71#define DEFAULT_CARDBUS_IO_SIZE         (256)
  72#define DEFAULT_CARDBUS_MEM_SIZE        (64*1024*1024)
  73/* pci=cbmemsize=nnM,cbiosize=nn can override this */
  74unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
  75unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
  76
  77#define DEFAULT_HOTPLUG_IO_SIZE         (256)
  78#define DEFAULT_HOTPLUG_MEM_SIZE        (2*1024*1024)
  79/* pci=hpmemsize=nnM,hpiosize=nn can override this */
  80unsigned long pci_hotplug_io_size  = DEFAULT_HOTPLUG_IO_SIZE;
  81unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
  82
  83#define DEFAULT_HOTPLUG_BUS_SIZE        1
  84unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
  85
  86enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
  87
  88/*
  89 * The default CLS is used if arch didn't set CLS explicitly and not
  90 * all pci devices agree on the same value.  Arch can override either
  91 * the dfl or actual value as it sees fit.  Don't forget this is
  92 * measured in 32-bit words, not bytes.
  93 */
  94u8 pci_dfl_cache_line_size = L1_CACHE_BYTES >> 2;
  95u8 pci_cache_line_size;
  96
  97/*
  98 * If we set up a device for bus mastering, we need to check the latency
  99 * timer as certain BIOSes forget to set it properly.
 100 */
 101unsigned int pcibios_max_latency = 255;
 102
 103/* If set, the PCIe ARI capability will not be used. */
 104static bool pcie_ari_disabled;
 105
 106/* Disable bridge_d3 for all PCIe ports */
 107static bool pci_bridge_d3_disable;
 108/* Force bridge_d3 for all PCIe ports */
 109static bool pci_bridge_d3_force;
 110
 111static int __init pcie_port_pm_setup(char *str)
 112{
 113        if (!strcmp(str, "off"))
 114                pci_bridge_d3_disable = true;
 115        else if (!strcmp(str, "force"))
 116                pci_bridge_d3_force = true;
 117        return 1;
 118}
 119__setup("pcie_port_pm=", pcie_port_pm_setup);
 120
 121/**
 122 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
 123 * @bus: pointer to PCI bus structure to search
 124 *
 125 * Given a PCI bus, returns the highest PCI bus number present in the set
 126 * including the given PCI bus and its list of child PCI buses.
 127 */
 128unsigned char pci_bus_max_busnr(struct pci_bus *bus)
 129{
 130        struct pci_bus *tmp;
 131        unsigned char max, n;
 132
 133        max = bus->busn_res.end;
 134        list_for_each_entry(tmp, &bus->children, node) {
 135                n = pci_bus_max_busnr(tmp);
 136                if (n > max)
 137                        max = n;
 138        }
 139        return max;
 140}
 141EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
 142
 143#ifdef CONFIG_HAS_IOMEM
 144void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
 145{
 146        struct resource *res = &pdev->resource[bar];
 147
 148        /*
 149         * Make sure the BAR is actually a memory resource, not an IO resource
 150         */
 151        if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) {
 152                dev_warn(&pdev->dev, "can't ioremap BAR %d: %pR\n", bar, res);
 153                return NULL;
 154        }
 155        return ioremap_nocache(res->start, resource_size(res));
 156}
 157EXPORT_SYMBOL_GPL(pci_ioremap_bar);
 158#endif
 159
 160
 161static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
 162                                   u8 pos, int cap, int *ttl)
 163{
 164        u8 id;
 165        u16 ent;
 166
 167        pci_bus_read_config_byte(bus, devfn, pos, &pos);
 168
 169        while ((*ttl)--) {
 170                if (pos < 0x40)
 171                        break;
 172                pos &= ~3;
 173                pci_bus_read_config_word(bus, devfn, pos, &ent);
 174
 175                id = ent & 0xff;
 176                if (id == 0xff)
 177                        break;
 178                if (id == cap)
 179                        return pos;
 180                pos = (ent >> 8);
 181        }
 182        return 0;
 183}
 184
 185static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
 186                               u8 pos, int cap)
 187{
 188        int ttl = PCI_FIND_CAP_TTL;
 189
 190        return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
 191}
 192
 193int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
 194{
 195        return __pci_find_next_cap(dev->bus, dev->devfn,
 196                                   pos + PCI_CAP_LIST_NEXT, cap);
 197}
 198EXPORT_SYMBOL_GPL(pci_find_next_capability);
 199
 200static int __pci_bus_find_cap_start(struct pci_bus *bus,
 201                                    unsigned int devfn, u8 hdr_type)
 202{
 203        u16 status;
 204
 205        pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
 206        if (!(status & PCI_STATUS_CAP_LIST))
 207                return 0;
 208
 209        switch (hdr_type) {
 210        case PCI_HEADER_TYPE_NORMAL:
 211        case PCI_HEADER_TYPE_BRIDGE:
 212                return PCI_CAPABILITY_LIST;
 213        case PCI_HEADER_TYPE_CARDBUS:
 214                return PCI_CB_CAPABILITY_LIST;
 215        }
 216
 217        return 0;
 218}
 219
 220/**
 221 * pci_find_capability - query for devices' capabilities
 222 * @dev: PCI device to query
 223 * @cap: capability code
 224 *
 225 * Tell if a device supports a given PCI capability.
 226 * Returns the address of the requested capability structure within the
 227 * device's PCI configuration space or 0 in case the device does not
 228 * support it.  Possible values for @cap:
 229 *
 230 *  %PCI_CAP_ID_PM           Power Management
 231 *  %PCI_CAP_ID_AGP          Accelerated Graphics Port
 232 *  %PCI_CAP_ID_VPD          Vital Product Data
 233 *  %PCI_CAP_ID_SLOTID       Slot Identification
 234 *  %PCI_CAP_ID_MSI          Message Signalled Interrupts
 235 *  %PCI_CAP_ID_CHSWP        CompactPCI HotSwap
 236 *  %PCI_CAP_ID_PCIX         PCI-X
 237 *  %PCI_CAP_ID_EXP          PCI Express
 238 */
 239int pci_find_capability(struct pci_dev *dev, int cap)
 240{
 241        int pos;
 242
 243        pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
 244        if (pos)
 245                pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
 246
 247        return pos;
 248}
 249EXPORT_SYMBOL(pci_find_capability);
 250
 251/**
 252 * pci_bus_find_capability - query for devices' capabilities
 253 * @bus:   the PCI bus to query
 254 * @devfn: PCI device to query
 255 * @cap:   capability code
 256 *
 257 * Like pci_find_capability() but works for pci devices that do not have a
 258 * pci_dev structure set up yet.
 259 *
 260 * Returns the address of the requested capability structure within the
 261 * device's PCI configuration space or 0 in case the device does not
 262 * support it.
 263 */
 264int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
 265{
 266        int pos;
 267        u8 hdr_type;
 268
 269        pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
 270
 271        pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
 272        if (pos)
 273                pos = __pci_find_next_cap(bus, devfn, pos, cap);
 274
 275        return pos;
 276}
 277EXPORT_SYMBOL(pci_bus_find_capability);
 278
 279/**
 280 * pci_find_next_ext_capability - Find an extended capability
 281 * @dev: PCI device to query
 282 * @start: address at which to start looking (0 to start at beginning of list)
 283 * @cap: capability code
 284 *
 285 * Returns the address of the next matching extended capability structure
 286 * within the device's PCI configuration space or 0 if the device does
 287 * not support it.  Some capabilities can occur several times, e.g., the
 288 * vendor-specific capability, and this provides a way to find them all.
 289 */
 290int pci_find_next_ext_capability(struct pci_dev *dev, int start, int cap)
 291{
 292        u32 header;
 293        int ttl;
 294        int pos = PCI_CFG_SPACE_SIZE;
 295
 296        /* minimum 8 bytes per capability */
 297        ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
 298
 299        if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
 300                return 0;
 301
 302        if (start)
 303                pos = start;
 304
 305        if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
 306                return 0;
 307
 308        /*
 309         * If we have no capabilities, this is indicated by cap ID,
 310         * cap version and next pointer all being 0.
 311         */
 312        if (header == 0)
 313                return 0;
 314
 315        while (ttl-- > 0) {
 316                if (PCI_EXT_CAP_ID(header) == cap && pos != start)
 317                        return pos;
 318
 319                pos = PCI_EXT_CAP_NEXT(header);
 320                if (pos < PCI_CFG_SPACE_SIZE)
 321                        break;
 322
 323                if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
 324                        break;
 325        }
 326
 327        return 0;
 328}
 329EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
 330
 331/**
 332 * pci_find_ext_capability - Find an extended capability
 333 * @dev: PCI device to query
 334 * @cap: capability code
 335 *
 336 * Returns the address of the requested extended capability structure
 337 * within the device's PCI configuration space or 0 if the device does
 338 * not support it.  Possible values for @cap:
 339 *
 340 *  %PCI_EXT_CAP_ID_ERR         Advanced Error Reporting
 341 *  %PCI_EXT_CAP_ID_VC          Virtual Channel
 342 *  %PCI_EXT_CAP_ID_DSN         Device Serial Number
 343 *  %PCI_EXT_CAP_ID_PWR         Power Budgeting
 344 */
 345int pci_find_ext_capability(struct pci_dev *dev, int cap)
 346{
 347        return pci_find_next_ext_capability(dev, 0, cap);
 348}
 349EXPORT_SYMBOL_GPL(pci_find_ext_capability);
 350
 351static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
 352{
 353        int rc, ttl = PCI_FIND_CAP_TTL;
 354        u8 cap, mask;
 355
 356        if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
 357                mask = HT_3BIT_CAP_MASK;
 358        else
 359                mask = HT_5BIT_CAP_MASK;
 360
 361        pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
 362                                      PCI_CAP_ID_HT, &ttl);
 363        while (pos) {
 364                rc = pci_read_config_byte(dev, pos + 3, &cap);
 365                if (rc != PCIBIOS_SUCCESSFUL)
 366                        return 0;
 367
 368                if ((cap & mask) == ht_cap)
 369                        return pos;
 370
 371                pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
 372                                              pos + PCI_CAP_LIST_NEXT,
 373                                              PCI_CAP_ID_HT, &ttl);
 374        }
 375
 376        return 0;
 377}
 378/**
 379 * pci_find_next_ht_capability - query a device's Hypertransport capabilities
 380 * @dev: PCI device to query
 381 * @pos: Position from which to continue searching
 382 * @ht_cap: Hypertransport capability code
 383 *
 384 * To be used in conjunction with pci_find_ht_capability() to search for
 385 * all capabilities matching @ht_cap. @pos should always be a value returned
 386 * from pci_find_ht_capability().
 387 *
 388 * NB. To be 100% safe against broken PCI devices, the caller should take
 389 * steps to avoid an infinite loop.
 390 */
 391int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
 392{
 393        return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
 394}
 395EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
 396
 397/**
 398 * pci_find_ht_capability - query a device's Hypertransport capabilities
 399 * @dev: PCI device to query
 400 * @ht_cap: Hypertransport capability code
 401 *
 402 * Tell if a device supports a given Hypertransport capability.
 403 * Returns an address within the device's PCI configuration space
 404 * or 0 in case the device does not support the request capability.
 405 * The address points to the PCI capability, of type PCI_CAP_ID_HT,
 406 * which has a Hypertransport capability matching @ht_cap.
 407 */
 408int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
 409{
 410        int pos;
 411
 412        pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
 413        if (pos)
 414                pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
 415
 416        return pos;
 417}
 418EXPORT_SYMBOL_GPL(pci_find_ht_capability);
 419
 420/**
 421 * pci_find_parent_resource - return resource region of parent bus of given region
 422 * @dev: PCI device structure contains resources to be searched
 423 * @res: child resource record for which parent is sought
 424 *
 425 *  For given resource region of given device, return the resource
 426 *  region of parent bus the given region is contained in.
 427 */
 428struct resource *pci_find_parent_resource(const struct pci_dev *dev,
 429                                          struct resource *res)
 430{
 431        const struct pci_bus *bus = dev->bus;
 432        struct resource *r;
 433        int i;
 434
 435        pci_bus_for_each_resource(bus, r, i) {
 436                if (!r)
 437                        continue;
 438                if (res->start && resource_contains(r, res)) {
 439
 440                        /*
 441                         * If the window is prefetchable but the BAR is
 442                         * not, the allocator made a mistake.
 443                         */
 444                        if (r->flags & IORESOURCE_PREFETCH &&
 445                            !(res->flags & IORESOURCE_PREFETCH))
 446                                return NULL;
 447
 448                        /*
 449                         * If we're below a transparent bridge, there may
 450                         * be both a positively-decoded aperture and a
 451                         * subtractively-decoded region that contain the BAR.
 452                         * We want the positively-decoded one, so this depends
 453                         * on pci_bus_for_each_resource() giving us those
 454                         * first.
 455                         */
 456                        return r;
 457                }
 458        }
 459        return NULL;
 460}
 461EXPORT_SYMBOL(pci_find_parent_resource);
 462
 463/**
 464 * pci_find_pcie_root_port - return PCIe Root Port
 465 * @dev: PCI device to query
 466 *
 467 * Traverse up the parent chain and return the PCIe Root Port PCI Device
 468 * for a given PCI Device.
 469 */
 470struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev)
 471{
 472        struct pci_dev *bridge, *highest_pcie_bridge = dev;
 473
 474        bridge = pci_upstream_bridge(dev);
 475        while (bridge && pci_is_pcie(bridge)) {
 476                highest_pcie_bridge = bridge;
 477                bridge = pci_upstream_bridge(bridge);
 478        }
 479
 480        if (pci_pcie_type(highest_pcie_bridge) != PCI_EXP_TYPE_ROOT_PORT)
 481                return NULL;
 482
 483        return highest_pcie_bridge;
 484}
 485EXPORT_SYMBOL(pci_find_pcie_root_port);
 486
 487/**
 488 * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
 489 * @dev: the PCI device to operate on
 490 * @pos: config space offset of status word
 491 * @mask: mask of bit(s) to care about in status word
 492 *
 493 * Return 1 when mask bit(s) in status word clear, 0 otherwise.
 494 */
 495int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
 496{
 497        int i;
 498
 499        /* Wait for Transaction Pending bit clean */
 500        for (i = 0; i < 4; i++) {
 501                u16 status;
 502                if (i)
 503                        msleep((1 << (i - 1)) * 100);
 504
 505                pci_read_config_word(dev, pos, &status);
 506                if (!(status & mask))
 507                        return 1;
 508        }
 509
 510        return 0;
 511}
 512
 513/**
 514 * pci_restore_bars - restore a device's BAR values (e.g. after wake-up)
 515 * @dev: PCI device to have its BARs restored
 516 *
 517 * Restore the BAR values for a given device, so as to make it
 518 * accessible by its driver.
 519 */
 520static void pci_restore_bars(struct pci_dev *dev)
 521{
 522        int i;
 523
 524        /* Per SR-IOV spec 3.4.1.11, VF BARs are RO zero */
 525        if (dev->is_virtfn)
 526                return;
 527
 528        for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
 529                pci_update_resource(dev, i);
 530}
 531
 532static struct pci_platform_pm_ops *pci_platform_pm;
 533
 534int pci_set_platform_pm(struct pci_platform_pm_ops *ops)
 535{
 536        if (!ops->is_manageable || !ops->set_state || !ops->choose_state
 537            || !ops->sleep_wake)
 538                return -EINVAL;
 539        pci_platform_pm = ops;
 540        return 0;
 541}
 542
 543static inline bool platform_pci_power_manageable(struct pci_dev *dev)
 544{
 545        return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
 546}
 547
 548static inline int platform_pci_set_power_state(struct pci_dev *dev,
 549                                               pci_power_t t)
 550{
 551        return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
 552}
 553
 554static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
 555{
 556        return pci_platform_pm ?
 557                        pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
 558}
 559
 560static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
 561{
 562        return pci_platform_pm ?
 563                        pci_platform_pm->sleep_wake(dev, enable) : -ENODEV;
 564}
 565
 566static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable)
 567{
 568        return pci_platform_pm ?
 569                        pci_platform_pm->run_wake(dev, enable) : -ENODEV;
 570}
 571
 572/**
 573 * pci_raw_set_power_state - Use PCI PM registers to set the power state of
 574 *                           given PCI device
 575 * @dev: PCI device to handle.
 576 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
 577 *
 578 * RETURN VALUE:
 579 * -EINVAL if the requested state is invalid.
 580 * -EIO if device does not support PCI PM or its PM capabilities register has a
 581 * wrong version, or device doesn't support the requested state.
 582 * 0 if device already is in the requested state.
 583 * 0 if device's power state has been successfully changed.
 584 */
 585static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
 586{
 587        u16 pmcsr;
 588        bool need_restore = false;
 589
 590        /* Check if we're already there */
 591        if (dev->current_state == state)
 592                return 0;
 593
 594        if (!dev->pm_cap)
 595                return -EIO;
 596
 597        if (state < PCI_D0 || state > PCI_D3hot)
 598                return -EINVAL;
 599
 600        /* Validate current state:
 601         * Can enter D0 from any state, but if we can only go deeper
 602         * to sleep if we're already in a low power state
 603         */
 604        if (state != PCI_D0 && dev->current_state <= PCI_D3cold
 605            && dev->current_state > state) {
 606                dev_err(&dev->dev, "invalid power transition (from state %d to %d)\n",
 607                        dev->current_state, state);
 608                return -EINVAL;
 609        }
 610
 611        /* check if this device supports the desired state */
 612        if ((state == PCI_D1 && !dev->d1_support)
 613           || (state == PCI_D2 && !dev->d2_support))
 614                return -EIO;
 615
 616        pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
 617
 618        /* If we're (effectively) in D3, force entire word to 0.
 619         * This doesn't affect PME_Status, disables PME_En, and
 620         * sets PowerState to 0.
 621         */
 622        switch (dev->current_state) {
 623        case PCI_D0:
 624        case PCI_D1:
 625        case PCI_D2:
 626                pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
 627                pmcsr |= state;
 628                break;
 629        case PCI_D3hot:
 630        case PCI_D3cold:
 631        case PCI_UNKNOWN: /* Boot-up */
 632                if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
 633                 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
 634                        need_restore = true;
 635                /* Fall-through: force to D0 */
 636        default:
 637                pmcsr = 0;
 638                break;
 639        }
 640
 641        /* enter specified state */
 642        pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
 643
 644        /* Mandatory power management transition delays */
 645        /* see PCI PM 1.1 5.6.1 table 18 */
 646        if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
 647                pci_dev_d3_sleep(dev);
 648        else if (state == PCI_D2 || dev->current_state == PCI_D2)
 649                udelay(PCI_PM_D2_DELAY);
 650
 651        pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
 652        dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
 653        if (dev->current_state != state && printk_ratelimit())
 654                dev_info(&dev->dev, "Refused to change power state, currently in D%d\n",
 655                         dev->current_state);
 656
 657        /*
 658         * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
 659         * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
 660         * from D3hot to D0 _may_ perform an internal reset, thereby
 661         * going to "D0 Uninitialized" rather than "D0 Initialized".
 662         * For example, at least some versions of the 3c905B and the
 663         * 3c556B exhibit this behaviour.
 664         *
 665         * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
 666         * devices in a D3hot state at boot.  Consequently, we need to
 667         * restore at least the BARs so that the device will be
 668         * accessible to its driver.
 669         */
 670        if (need_restore)
 671                pci_restore_bars(dev);
 672
 673        if (dev->bus->self)
 674                pcie_aspm_pm_state_change(dev->bus->self);
 675
 676        return 0;
 677}
 678
 679/**
 680 * pci_update_current_state - Read PCI power state of given device from its
 681 *                            PCI PM registers and cache it
 682 * @dev: PCI device to handle.
 683 * @state: State to cache in case the device doesn't have the PM capability
 684 */
 685void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
 686{
 687        if (dev->pm_cap) {
 688                u16 pmcsr;
 689
 690                /*
 691                 * Configuration space is not accessible for device in
 692                 * D3cold, so just keep or set D3cold for safety
 693                 */
 694                if (dev->current_state == PCI_D3cold)
 695                        return;
 696                if (state == PCI_D3cold) {
 697                        dev->current_state = PCI_D3cold;
 698                        return;
 699                }
 700                pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
 701                dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
 702        } else {
 703                dev->current_state = state;
 704        }
 705}
 706
 707/**
 708 * pci_power_up - Put the given device into D0 forcibly
 709 * @dev: PCI device to power up
 710 */
 711void pci_power_up(struct pci_dev *dev)
 712{
 713        if (platform_pci_power_manageable(dev))
 714                platform_pci_set_power_state(dev, PCI_D0);
 715
 716        pci_raw_set_power_state(dev, PCI_D0);
 717        pci_update_current_state(dev, PCI_D0);
 718}
 719
 720/**
 721 * pci_platform_power_transition - Use platform to change device power state
 722 * @dev: PCI device to handle.
 723 * @state: State to put the device into.
 724 */
 725static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
 726{
 727        int error;
 728
 729        if (platform_pci_power_manageable(dev)) {
 730                error = platform_pci_set_power_state(dev, state);
 731                if (!error)
 732                        pci_update_current_state(dev, state);
 733        } else
 734                error = -ENODEV;
 735
 736        if (error && !dev->pm_cap) /* Fall back to PCI_D0 */
 737                dev->current_state = PCI_D0;
 738
 739        return error;
 740}
 741
 742/**
 743 * pci_wakeup - Wake up a PCI device
 744 * @pci_dev: Device to handle.
 745 * @ign: ignored parameter
 746 */
 747static int pci_wakeup(struct pci_dev *pci_dev, void *ign)
 748{
 749        pci_wakeup_event(pci_dev);
 750        pm_request_resume(&pci_dev->dev);
 751        return 0;
 752}
 753
 754/**
 755 * pci_wakeup_bus - Walk given bus and wake up devices on it
 756 * @bus: Top bus of the subtree to walk.
 757 */
 758static void pci_wakeup_bus(struct pci_bus *bus)
 759{
 760        if (bus)
 761                pci_walk_bus(bus, pci_wakeup, NULL);
 762}
 763
 764/**
 765 * __pci_start_power_transition - Start power transition of a PCI device
 766 * @dev: PCI device to handle.
 767 * @state: State to put the device into.
 768 */
 769static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
 770{
 771        if (state == PCI_D0) {
 772                pci_platform_power_transition(dev, PCI_D0);
 773                /*
 774                 * Mandatory power management transition delays, see
 775                 * PCI Express Base Specification Revision 2.0 Section
 776                 * 6.6.1: Conventional Reset.  Do not delay for
 777                 * devices powered on/off by corresponding bridge,
 778                 * because have already delayed for the bridge.
 779                 */
 780                if (dev->runtime_d3cold) {
 781                        msleep(dev->d3cold_delay);
 782                        /*
 783                         * When powering on a bridge from D3cold, the
 784                         * whole hierarchy may be powered on into
 785                         * D0uninitialized state, resume them to give
 786                         * them a chance to suspend again
 787                         */
 788                        pci_wakeup_bus(dev->subordinate);
 789                }
 790        }
 791}
 792
 793/**
 794 * __pci_dev_set_current_state - Set current state of a PCI device
 795 * @dev: Device to handle
 796 * @data: pointer to state to be set
 797 */
 798static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
 799{
 800        pci_power_t state = *(pci_power_t *)data;
 801
 802        dev->current_state = state;
 803        return 0;
 804}
 805
 806/**
 807 * __pci_bus_set_current_state - Walk given bus and set current state of devices
 808 * @bus: Top bus of the subtree to walk.
 809 * @state: state to be set
 810 */
 811static void __pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
 812{
 813        if (bus)
 814                pci_walk_bus(bus, __pci_dev_set_current_state, &state);
 815}
 816
 817/**
 818 * __pci_complete_power_transition - Complete power transition of a PCI device
 819 * @dev: PCI device to handle.
 820 * @state: State to put the device into.
 821 *
 822 * This function should not be called directly by device drivers.
 823 */
 824int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
 825{
 826        int ret;
 827
 828        if (state <= PCI_D0)
 829                return -EINVAL;
 830        ret = pci_platform_power_transition(dev, state);
 831        /* Power off the bridge may power off the whole hierarchy */
 832        if (!ret && state == PCI_D3cold)
 833                __pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
 834        return ret;
 835}
 836EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
 837
 838/**
 839 * pci_set_power_state - Set the power state of a PCI device
 840 * @dev: PCI device to handle.
 841 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
 842 *
 843 * Transition a device to a new power state, using the platform firmware and/or
 844 * the device's PCI PM registers.
 845 *
 846 * RETURN VALUE:
 847 * -EINVAL if the requested state is invalid.
 848 * -EIO if device does not support PCI PM or its PM capabilities register has a
 849 * wrong version, or device doesn't support the requested state.
 850 * 0 if device already is in the requested state.
 851 * 0 if device's power state has been successfully changed.
 852 */
 853int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
 854{
 855        int error;
 856
 857        /* bound the state we're entering */
 858        if (state > PCI_D3cold)
 859                state = PCI_D3cold;
 860        else if (state < PCI_D0)
 861                state = PCI_D0;
 862        else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
 863                /*
 864                 * If the device or the parent bridge do not support PCI PM,
 865                 * ignore the request if we're doing anything other than putting
 866                 * it into D0 (which would only happen on boot).
 867                 */
 868                return 0;
 869
 870        /* Check if we're already there */
 871        if (dev->current_state == state)
 872                return 0;
 873
 874        __pci_start_power_transition(dev, state);
 875
 876        /* This device is quirked not to be put into D3, so
 877           don't put it in D3 */
 878        if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
 879                return 0;
 880
 881        /*
 882         * To put device in D3cold, we put device into D3hot in native
 883         * way, then put device into D3cold with platform ops
 884         */
 885        error = pci_raw_set_power_state(dev, state > PCI_D3hot ?
 886                                        PCI_D3hot : state);
 887
 888        if (!__pci_complete_power_transition(dev, state))
 889                error = 0;
 890
 891        return error;
 892}
 893EXPORT_SYMBOL(pci_set_power_state);
 894
 895/**
 896 * pci_choose_state - Choose the power state of a PCI device
 897 * @dev: PCI device to be suspended
 898 * @state: target sleep state for the whole system. This is the value
 899 *      that is passed to suspend() function.
 900 *
 901 * Returns PCI power state suitable for given device and given system
 902 * message.
 903 */
 904
 905pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
 906{
 907        pci_power_t ret;
 908
 909        if (!dev->pm_cap)
 910                return PCI_D0;
 911
 912        ret = platform_pci_choose_state(dev);
 913        if (ret != PCI_POWER_ERROR)
 914                return ret;
 915
 916        switch (state.event) {
 917        case PM_EVENT_ON:
 918                return PCI_D0;
 919        case PM_EVENT_FREEZE:
 920        case PM_EVENT_PRETHAW:
 921                /* REVISIT both freeze and pre-thaw "should" use D0 */
 922        case PM_EVENT_SUSPEND:
 923        case PM_EVENT_HIBERNATE:
 924                return PCI_D3hot;
 925        default:
 926                dev_info(&dev->dev, "unrecognized suspend event %d\n",
 927                         state.event);
 928                BUG();
 929        }
 930        return PCI_D0;
 931}
 932EXPORT_SYMBOL(pci_choose_state);
 933
 934#define PCI_EXP_SAVE_REGS       7
 935
 936static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
 937                                                       u16 cap, bool extended)
 938{
 939        struct pci_cap_saved_state *tmp;
 940
 941        hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
 942                if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap)
 943                        return tmp;
 944        }
 945        return NULL;
 946}
 947
 948struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap)
 949{
 950        return _pci_find_saved_cap(dev, cap, false);
 951}
 952
 953struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap)
 954{
 955        return _pci_find_saved_cap(dev, cap, true);
 956}
 957
 958static int pci_save_pcie_state(struct pci_dev *dev)
 959{
 960        int i = 0;
 961        struct pci_cap_saved_state *save_state;
 962        u16 *cap;
 963
 964        if (!pci_is_pcie(dev))
 965                return 0;
 966
 967        save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
 968        if (!save_state) {
 969                dev_err(&dev->dev, "buffer not found in %s\n", __func__);
 970                return -ENOMEM;
 971        }
 972
 973        cap = (u16 *)&save_state->cap.data[0];
 974        pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
 975        pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
 976        pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
 977        pcie_capability_read_word(dev, PCI_EXP_RTCTL,  &cap[i++]);
 978        pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]);
 979        pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
 980        pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
 981
 982        return 0;
 983}
 984
 985static void pci_restore_pcie_state(struct pci_dev *dev)
 986{
 987        int i = 0;
 988        struct pci_cap_saved_state *save_state;
 989        u16 *cap;
 990
 991        save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
 992        if (!save_state)
 993                return;
 994
 995        cap = (u16 *)&save_state->cap.data[0];
 996        pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
 997        pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
 998        pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]);
 999        pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]);
1000        pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]);
1001        pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]);
1002        pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
1003}
1004
1005
1006static int pci_save_pcix_state(struct pci_dev *dev)
1007{
1008        int pos;
1009        struct pci_cap_saved_state *save_state;
1010
1011        pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1012        if (!pos)
1013                return 0;
1014
1015        save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1016        if (!save_state) {
1017                dev_err(&dev->dev, "buffer not found in %s\n", __func__);
1018                return -ENOMEM;
1019        }
1020
1021        pci_read_config_word(dev, pos + PCI_X_CMD,
1022                             (u16 *)save_state->cap.data);
1023
1024        return 0;
1025}
1026
1027static void pci_restore_pcix_state(struct pci_dev *dev)
1028{
1029        int i = 0, pos;
1030        struct pci_cap_saved_state *save_state;
1031        u16 *cap;
1032
1033        save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1034        pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1035        if (!save_state || !pos)
1036                return;
1037        cap = (u16 *)&save_state->cap.data[0];
1038
1039        pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
1040}
1041
1042
1043/**
1044 * pci_save_state - save the PCI configuration space of a device before suspending
1045 * @dev: - PCI device that we're dealing with
1046 */
1047int pci_save_state(struct pci_dev *dev)
1048{
1049        int i;
1050        /* XXX: 100% dword access ok here? */
1051        for (i = 0; i < 16; i++)
1052                pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
1053        dev->state_saved = true;
1054
1055        i = pci_save_pcie_state(dev);
1056        if (i != 0)
1057                return i;
1058
1059        i = pci_save_pcix_state(dev);
1060        if (i != 0)
1061                return i;
1062
1063        return pci_save_vc_state(dev);
1064}
1065EXPORT_SYMBOL(pci_save_state);
1066
1067static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1068                                     u32 saved_val, int retry)
1069{
1070        u32 val;
1071
1072        pci_read_config_dword(pdev, offset, &val);
1073        if (val == saved_val)
1074                return;
1075
1076        for (;;) {
1077                dev_dbg(&pdev->dev, "restoring config space at offset %#x (was %#x, writing %#x)\n",
1078                        offset, val, saved_val);
1079                pci_write_config_dword(pdev, offset, saved_val);
1080                if (retry-- <= 0)
1081                        return;
1082
1083                pci_read_config_dword(pdev, offset, &val);
1084                if (val == saved_val)
1085                        return;
1086
1087                mdelay(1);
1088        }
1089}
1090
1091static void pci_restore_config_space_range(struct pci_dev *pdev,
1092                                           int start, int end, int retry)
1093{
1094        int index;
1095
1096        for (index = end; index >= start; index--)
1097                pci_restore_config_dword(pdev, 4 * index,
1098                                         pdev->saved_config_space[index],
1099                                         retry);
1100}
1101
1102static void pci_restore_config_space(struct pci_dev *pdev)
1103{
1104        if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1105                pci_restore_config_space_range(pdev, 10, 15, 0);
1106                /* Restore BARs before the command register. */
1107                pci_restore_config_space_range(pdev, 4, 9, 10);
1108                pci_restore_config_space_range(pdev, 0, 3, 0);
1109        } else {
1110                pci_restore_config_space_range(pdev, 0, 15, 0);
1111        }
1112}
1113
1114/**
1115 * pci_restore_state - Restore the saved state of a PCI device
1116 * @dev: - PCI device that we're dealing with
1117 */
1118void pci_restore_state(struct pci_dev *dev)
1119{
1120        if (!dev->state_saved)
1121                return;
1122
1123        /* PCI Express register must be restored first */
1124        pci_restore_pcie_state(dev);
1125        pci_restore_ats_state(dev);
1126        pci_restore_vc_state(dev);
1127
1128        pci_cleanup_aer_error_status_regs(dev);
1129
1130        pci_restore_config_space(dev);
1131
1132        pci_restore_pcix_state(dev);
1133        pci_restore_msi_state(dev);
1134
1135        /* Restore ACS and IOV configuration state */
1136        pci_enable_acs(dev);
1137        pci_restore_iov_state(dev);
1138
1139        dev->state_saved = false;
1140}
1141EXPORT_SYMBOL(pci_restore_state);
1142
1143struct pci_saved_state {
1144        u32 config_space[16];
1145        struct pci_cap_saved_data cap[0];
1146};
1147
1148/**
1149 * pci_store_saved_state - Allocate and return an opaque struct containing
1150 *                         the device saved state.
1151 * @dev: PCI device that we're dealing with
1152 *
1153 * Return NULL if no state or error.
1154 */
1155struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1156{
1157        struct pci_saved_state *state;
1158        struct pci_cap_saved_state *tmp;
1159        struct pci_cap_saved_data *cap;
1160        size_t size;
1161
1162        if (!dev->state_saved)
1163                return NULL;
1164
1165        size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1166
1167        hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
1168                size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1169
1170        state = kzalloc(size, GFP_KERNEL);
1171        if (!state)
1172                return NULL;
1173
1174        memcpy(state->config_space, dev->saved_config_space,
1175               sizeof(state->config_space));
1176
1177        cap = state->cap;
1178        hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
1179                size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1180                memcpy(cap, &tmp->cap, len);
1181                cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1182        }
1183        /* Empty cap_save terminates list */
1184
1185        return state;
1186}
1187EXPORT_SYMBOL_GPL(pci_store_saved_state);
1188
1189/**
1190 * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1191 * @dev: PCI device that we're dealing with
1192 * @state: Saved state returned from pci_store_saved_state()
1193 */
1194int pci_load_saved_state(struct pci_dev *dev,
1195                         struct pci_saved_state *state)
1196{
1197        struct pci_cap_saved_data *cap;
1198
1199        dev->state_saved = false;
1200
1201        if (!state)
1202                return 0;
1203
1204        memcpy(dev->saved_config_space, state->config_space,
1205               sizeof(state->config_space));
1206
1207        cap = state->cap;
1208        while (cap->size) {
1209                struct pci_cap_saved_state *tmp;
1210
1211                tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended);
1212                if (!tmp || tmp->cap.size != cap->size)
1213                        return -EINVAL;
1214
1215                memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1216                cap = (struct pci_cap_saved_data *)((u8 *)cap +
1217                       sizeof(struct pci_cap_saved_data) + cap->size);
1218        }
1219
1220        dev->state_saved = true;
1221        return 0;
1222}
1223EXPORT_SYMBOL_GPL(pci_load_saved_state);
1224
1225/**
1226 * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1227 *                                 and free the memory allocated for it.
1228 * @dev: PCI device that we're dealing with
1229 * @state: Pointer to saved state returned from pci_store_saved_state()
1230 */
1231int pci_load_and_free_saved_state(struct pci_dev *dev,
1232                                  struct pci_saved_state **state)
1233{
1234        int ret = pci_load_saved_state(dev, *state);
1235        kfree(*state);
1236        *state = NULL;
1237        return ret;
1238}
1239EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1240
1241int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
1242{
1243        return pci_enable_resources(dev, bars);
1244}
1245
1246static int do_pci_enable_device(struct pci_dev *dev, int bars)
1247{
1248        int err;
1249        struct pci_dev *bridge;
1250        u16 cmd;
1251        u8 pin;
1252
1253        err = pci_set_power_state(dev, PCI_D0);
1254        if (err < 0 && err != -EIO)
1255                return err;
1256
1257        bridge = pci_upstream_bridge(dev);
1258        if (bridge)
1259                pcie_aspm_powersave_config_link(bridge);
1260
1261        err = pcibios_enable_device(dev, bars);
1262        if (err < 0)
1263                return err;
1264        pci_fixup_device(pci_fixup_enable, dev);
1265
1266        if (dev->msi_enabled || dev->msix_enabled)
1267                return 0;
1268
1269        pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
1270        if (pin) {
1271                pci_read_config_word(dev, PCI_COMMAND, &cmd);
1272                if (cmd & PCI_COMMAND_INTX_DISABLE)
1273                        pci_write_config_word(dev, PCI_COMMAND,
1274                                              cmd & ~PCI_COMMAND_INTX_DISABLE);
1275        }
1276
1277        return 0;
1278}
1279
1280int pci_is_enabled(struct pci_dev *pdev)
1281{
1282        return (atomic_read(&pdev->enable_cnt) > 0);
1283}
1284EXPORT_SYMBOL(pci_is_enabled);
1285
1286/**
1287 * pci_reenable_device - Resume abandoned device
1288 * @dev: PCI device to be resumed
1289 *
1290 *  Note this function is a backend of pci_default_resume and is not supposed
1291 *  to be called by normal code, write proper resume handler and use it instead.
1292 */
1293int pci_reenable_device(struct pci_dev *dev)
1294{
1295        if (pci_is_enabled(dev))
1296                return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1297        return 0;
1298}
1299EXPORT_SYMBOL(pci_reenable_device);
1300
1301static void pci_enable_bridge(struct pci_dev *dev)
1302{
1303        struct pci_dev *bridge;
1304        int retval;
1305
1306        bridge = pci_upstream_bridge(dev);
1307        if (bridge)
1308                pci_enable_bridge(bridge);
1309
1310        if (pci_is_enabled(dev)) {
1311                if (!dev->is_busmaster)
1312                        pci_set_master(dev);
1313                return;
1314        }
1315
1316        retval = pci_enable_device(dev);
1317        if (retval)
1318                dev_err(&dev->dev, "Error enabling bridge (%d), continuing\n",
1319                        retval);
1320        pci_set_master(dev);
1321}
1322
1323static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
1324{
1325        struct pci_dev *bridge;
1326        int err;
1327        int i, bars = 0;
1328
1329        /*
1330         * Power state could be unknown at this point, either due to a fresh
1331         * boot or a device removal call.  So get the current power state
1332         * so that things like MSI message writing will behave as expected
1333         * (e.g. if the device really is in D0 at enable time).
1334         */
1335        if (dev->pm_cap) {
1336                u16 pmcsr;
1337                pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1338                dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1339        }
1340
1341        if (atomic_inc_return(&dev->enable_cnt) > 1)
1342                return 0;               /* already enabled */
1343
1344        bridge = pci_upstream_bridge(dev);
1345        if (bridge)
1346                pci_enable_bridge(bridge);
1347
1348        /* only skip sriov related */
1349        for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1350                if (dev->resource[i].flags & flags)
1351                        bars |= (1 << i);
1352        for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
1353                if (dev->resource[i].flags & flags)
1354                        bars |= (1 << i);
1355
1356        err = do_pci_enable_device(dev, bars);
1357        if (err < 0)
1358                atomic_dec(&dev->enable_cnt);
1359        return err;
1360}
1361
1362/**
1363 * pci_enable_device_io - Initialize a device for use with IO space
1364 * @dev: PCI device to be initialized
1365 *
1366 *  Initialize device before it's used by a driver. Ask low-level code
1367 *  to enable I/O resources. Wake up the device if it was suspended.
1368 *  Beware, this function can fail.
1369 */
1370int pci_enable_device_io(struct pci_dev *dev)
1371{
1372        return pci_enable_device_flags(dev, IORESOURCE_IO);
1373}
1374EXPORT_SYMBOL(pci_enable_device_io);
1375
1376/**
1377 * pci_enable_device_mem - Initialize a device for use with Memory space
1378 * @dev: PCI device to be initialized
1379 *
1380 *  Initialize device before it's used by a driver. Ask low-level code
1381 *  to enable Memory resources. Wake up the device if it was suspended.
1382 *  Beware, this function can fail.
1383 */
1384int pci_enable_device_mem(struct pci_dev *dev)
1385{
1386        return pci_enable_device_flags(dev, IORESOURCE_MEM);
1387}
1388EXPORT_SYMBOL(pci_enable_device_mem);
1389
1390/**
1391 * pci_enable_device - Initialize device before it's used by a driver.
1392 * @dev: PCI device to be initialized
1393 *
1394 *  Initialize device before it's used by a driver. Ask low-level code
1395 *  to enable I/O and memory. Wake up the device if it was suspended.
1396 *  Beware, this function can fail.
1397 *
1398 *  Note we don't actually enable the device many times if we call
1399 *  this function repeatedly (we just increment the count).
1400 */
1401int pci_enable_device(struct pci_dev *dev)
1402{
1403        return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
1404}
1405EXPORT_SYMBOL(pci_enable_device);
1406
1407/*
1408 * Managed PCI resources.  This manages device on/off, intx/msi/msix
1409 * on/off and BAR regions.  pci_dev itself records msi/msix status, so
1410 * there's no need to track it separately.  pci_devres is initialized
1411 * when a device is enabled using managed PCI device enable interface.
1412 */
1413struct pci_devres {
1414        unsigned int enabled:1;
1415        unsigned int pinned:1;
1416        unsigned int orig_intx:1;
1417        unsigned int restore_intx:1;
1418        u32 region_mask;
1419};
1420
1421static void pcim_release(struct device *gendev, void *res)
1422{
1423        struct pci_dev *dev = to_pci_dev(gendev);
1424        struct pci_devres *this = res;
1425        int i;
1426
1427        if (dev->msi_enabled)
1428                pci_disable_msi(dev);
1429        if (dev->msix_enabled)
1430                pci_disable_msix(dev);
1431
1432        for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1433                if (this->region_mask & (1 << i))
1434                        pci_release_region(dev, i);
1435
1436        if (this->restore_intx)
1437                pci_intx(dev, this->orig_intx);
1438
1439        if (this->enabled && !this->pinned)
1440                pci_disable_device(dev);
1441}
1442
1443static struct pci_devres *get_pci_dr(struct pci_dev *pdev)
1444{
1445        struct pci_devres *dr, *new_dr;
1446
1447        dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
1448        if (dr)
1449                return dr;
1450
1451        new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
1452        if (!new_dr)
1453                return NULL;
1454        return devres_get(&pdev->dev, new_dr, NULL, NULL);
1455}
1456
1457static struct pci_devres *find_pci_dr(struct pci_dev *pdev)
1458{
1459        if (pci_is_managed(pdev))
1460                return devres_find(&pdev->dev, pcim_release, NULL, NULL);
1461        return NULL;
1462}
1463
1464/**
1465 * pcim_enable_device - Managed pci_enable_device()
1466 * @pdev: PCI device to be initialized
1467 *
1468 * Managed pci_enable_device().
1469 */
1470int pcim_enable_device(struct pci_dev *pdev)
1471{
1472        struct pci_devres *dr;
1473        int rc;
1474
1475        dr = get_pci_dr(pdev);
1476        if (unlikely(!dr))
1477                return -ENOMEM;
1478        if (dr->enabled)
1479                return 0;
1480
1481        rc = pci_enable_device(pdev);
1482        if (!rc) {
1483                pdev->is_managed = 1;
1484                dr->enabled = 1;
1485        }
1486        return rc;
1487}
1488EXPORT_SYMBOL(pcim_enable_device);
1489
1490/**
1491 * pcim_pin_device - Pin managed PCI device
1492 * @pdev: PCI device to pin
1493 *
1494 * Pin managed PCI device @pdev.  Pinned device won't be disabled on
1495 * driver detach.  @pdev must have been enabled with
1496 * pcim_enable_device().
1497 */
1498void pcim_pin_device(struct pci_dev *pdev)
1499{
1500        struct pci_devres *dr;
1501
1502        dr = find_pci_dr(pdev);
1503        WARN_ON(!dr || !dr->enabled);
1504        if (dr)
1505                dr->pinned = 1;
1506}
1507EXPORT_SYMBOL(pcim_pin_device);
1508
1509/*
1510 * pcibios_add_device - provide arch specific hooks when adding device dev
1511 * @dev: the PCI device being added
1512 *
1513 * Permits the platform to provide architecture specific functionality when
1514 * devices are added. This is the default implementation. Architecture
1515 * implementations can override this.
1516 */
1517int __weak pcibios_add_device(struct pci_dev *dev)
1518{
1519        return 0;
1520}
1521
1522/**
1523 * pcibios_release_device - provide arch specific hooks when releasing device dev
1524 * @dev: the PCI device being released
1525 *
1526 * Permits the platform to provide architecture specific functionality when
1527 * devices are released. This is the default implementation. Architecture
1528 * implementations can override this.
1529 */
1530void __weak pcibios_release_device(struct pci_dev *dev) {}
1531
1532/**
1533 * pcibios_disable_device - disable arch specific PCI resources for device dev
1534 * @dev: the PCI device to disable
1535 *
1536 * Disables architecture specific PCI resources for the device. This
1537 * is the default implementation. Architecture implementations can
1538 * override this.
1539 */
1540void __weak pcibios_disable_device(struct pci_dev *dev) {}
1541
1542/**
1543 * pcibios_penalize_isa_irq - penalize an ISA IRQ
1544 * @irq: ISA IRQ to penalize
1545 * @active: IRQ active or not
1546 *
1547 * Permits the platform to provide architecture-specific functionality when
1548 * penalizing ISA IRQs. This is the default implementation. Architecture
1549 * implementations can override this.
1550 */
1551void __weak pcibios_penalize_isa_irq(int irq, int active) {}
1552
1553static void do_pci_disable_device(struct pci_dev *dev)
1554{
1555        u16 pci_command;
1556
1557        pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1558        if (pci_command & PCI_COMMAND_MASTER) {
1559                pci_command &= ~PCI_COMMAND_MASTER;
1560                pci_write_config_word(dev, PCI_COMMAND, pci_command);
1561        }
1562
1563        pcibios_disable_device(dev);
1564}
1565
1566/**
1567 * pci_disable_enabled_device - Disable device without updating enable_cnt
1568 * @dev: PCI device to disable
1569 *
1570 * NOTE: This function is a backend of PCI power management routines and is
1571 * not supposed to be called drivers.
1572 */
1573void pci_disable_enabled_device(struct pci_dev *dev)
1574{
1575        if (pci_is_enabled(dev))
1576                do_pci_disable_device(dev);
1577}
1578
1579/**
1580 * pci_disable_device - Disable PCI device after use
1581 * @dev: PCI device to be disabled
1582 *
1583 * Signal to the system that the PCI device is not in use by the system
1584 * anymore.  This only involves disabling PCI bus-mastering, if active.
1585 *
1586 * Note we don't actually disable the device until all callers of
1587 * pci_enable_device() have called pci_disable_device().
1588 */
1589void pci_disable_device(struct pci_dev *dev)
1590{
1591        struct pci_devres *dr;
1592
1593        dr = find_pci_dr(dev);
1594        if (dr)
1595                dr->enabled = 0;
1596
1597        dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
1598                      "disabling already-disabled device");
1599
1600        if (atomic_dec_return(&dev->enable_cnt) != 0)
1601                return;
1602
1603        do_pci_disable_device(dev);
1604
1605        dev->is_busmaster = 0;
1606}
1607EXPORT_SYMBOL(pci_disable_device);
1608
1609/**
1610 * pcibios_set_pcie_reset_state - set reset state for device dev
1611 * @dev: the PCIe device reset
1612 * @state: Reset state to enter into
1613 *
1614 *
1615 * Sets the PCIe reset state for the device. This is the default
1616 * implementation. Architecture implementations can override this.
1617 */
1618int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
1619                                        enum pcie_reset_state state)
1620{
1621        return -EINVAL;
1622}
1623
1624/**
1625 * pci_set_pcie_reset_state - set reset state for device dev
1626 * @dev: the PCIe device reset
1627 * @state: Reset state to enter into
1628 *
1629 *
1630 * Sets the PCI reset state for the device.
1631 */
1632int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
1633{
1634        return pcibios_set_pcie_reset_state(dev, state);
1635}
1636EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
1637
1638/**
1639 * pci_check_pme_status - Check if given device has generated PME.
1640 * @dev: Device to check.
1641 *
1642 * Check the PME status of the device and if set, clear it and clear PME enable
1643 * (if set).  Return 'true' if PME status and PME enable were both set or
1644 * 'false' otherwise.
1645 */
1646bool pci_check_pme_status(struct pci_dev *dev)
1647{
1648        int pmcsr_pos;
1649        u16 pmcsr;
1650        bool ret = false;
1651
1652        if (!dev->pm_cap)
1653                return false;
1654
1655        pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
1656        pci_read_config_word(dev, pmcsr_pos, &pmcsr);
1657        if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
1658                return false;
1659
1660        /* Clear PME status. */
1661        pmcsr |= PCI_PM_CTRL_PME_STATUS;
1662        if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
1663                /* Disable PME to avoid interrupt flood. */
1664                pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1665                ret = true;
1666        }
1667
1668        pci_write_config_word(dev, pmcsr_pos, pmcsr);
1669
1670        return ret;
1671}
1672
1673/**
1674 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
1675 * @dev: Device to handle.
1676 * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
1677 *
1678 * Check if @dev has generated PME and queue a resume request for it in that
1679 * case.
1680 */
1681static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
1682{
1683        if (pme_poll_reset && dev->pme_poll)
1684                dev->pme_poll = false;
1685
1686        if (pci_check_pme_status(dev)) {
1687                pci_wakeup_event(dev);
1688                pm_request_resume(&dev->dev);
1689        }
1690        return 0;
1691}
1692
1693/**
1694 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
1695 * @bus: Top bus of the subtree to walk.
1696 */
1697void pci_pme_wakeup_bus(struct pci_bus *bus)
1698{
1699        if (bus)
1700                pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
1701}
1702
1703
1704/**
1705 * pci_pme_capable - check the capability of PCI device to generate PME#
1706 * @dev: PCI device to handle.
1707 * @state: PCI state from which device will issue PME#.
1708 */
1709bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
1710{
1711        if (!dev->pm_cap)
1712                return false;
1713
1714        return !!(dev->pme_support & (1 << state));
1715}
1716EXPORT_SYMBOL(pci_pme_capable);
1717
1718static void pci_pme_list_scan(struct work_struct *work)
1719{
1720        struct pci_pme_device *pme_dev, *n;
1721
1722        mutex_lock(&pci_pme_list_mutex);
1723        list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
1724                if (pme_dev->dev->pme_poll) {
1725                        struct pci_dev *bridge;
1726
1727                        bridge = pme_dev->dev->bus->self;
1728                        /*
1729                         * If bridge is in low power state, the
1730                         * configuration space of subordinate devices
1731                         * may be not accessible
1732                         */
1733                        if (bridge && bridge->current_state != PCI_D0)
1734                                continue;
1735                        pci_pme_wakeup(pme_dev->dev, NULL);
1736                } else {
1737                        list_del(&pme_dev->list);
1738                        kfree(pme_dev);
1739                }
1740        }
1741        if (!list_empty(&pci_pme_list))
1742                schedule_delayed_work(&pci_pme_work,
1743                                      msecs_to_jiffies(PME_TIMEOUT));
1744        mutex_unlock(&pci_pme_list_mutex);
1745}
1746
1747/**
1748 * pci_pme_active - enable or disable PCI device's PME# function
1749 * @dev: PCI device to handle.
1750 * @enable: 'true' to enable PME# generation; 'false' to disable it.
1751 *
1752 * The caller must verify that the device is capable of generating PME# before
1753 * calling this function with @enable equal to 'true'.
1754 */
1755void pci_pme_active(struct pci_dev *dev, bool enable)
1756{
1757        u16 pmcsr;
1758
1759        if (!dev->pme_support)
1760                return;
1761
1762        pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1763        /* Clear PME_Status by writing 1 to it and enable PME# */
1764        pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
1765        if (!enable)
1766                pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1767
1768        pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1769
1770        /*
1771         * PCI (as opposed to PCIe) PME requires that the device have
1772         * its PME# line hooked up correctly. Not all hardware vendors
1773         * do this, so the PME never gets delivered and the device
1774         * remains asleep. The easiest way around this is to
1775         * periodically walk the list of suspended devices and check
1776         * whether any have their PME flag set. The assumption is that
1777         * we'll wake up often enough anyway that this won't be a huge
1778         * hit, and the power savings from the devices will still be a
1779         * win.
1780         *
1781         * Although PCIe uses in-band PME message instead of PME# line
1782         * to report PME, PME does not work for some PCIe devices in
1783         * reality.  For example, there are devices that set their PME
1784         * status bits, but don't really bother to send a PME message;
1785         * there are PCI Express Root Ports that don't bother to
1786         * trigger interrupts when they receive PME messages from the
1787         * devices below.  So PME poll is used for PCIe devices too.
1788         */
1789
1790        if (dev->pme_poll) {
1791                struct pci_pme_device *pme_dev;
1792                if (enable) {
1793                        pme_dev = kmalloc(sizeof(struct pci_pme_device),
1794                                          GFP_KERNEL);
1795                        if (!pme_dev) {
1796                                dev_warn(&dev->dev, "can't enable PME#\n");
1797                                return;
1798                        }
1799                        pme_dev->dev = dev;
1800                        mutex_lock(&pci_pme_list_mutex);
1801                        list_add(&pme_dev->list, &pci_pme_list);
1802                        if (list_is_singular(&pci_pme_list))
1803                                schedule_delayed_work(&pci_pme_work,
1804                                                      msecs_to_jiffies(PME_TIMEOUT));
1805                        mutex_unlock(&pci_pme_list_mutex);
1806                } else {
1807                        mutex_lock(&pci_pme_list_mutex);
1808                        list_for_each_entry(pme_dev, &pci_pme_list, list) {
1809                                if (pme_dev->dev == dev) {
1810                                        list_del(&pme_dev->list);
1811                                        kfree(pme_dev);
1812                                        break;
1813                                }
1814                        }
1815                        mutex_unlock(&pci_pme_list_mutex);
1816                }
1817        }
1818
1819        dev_dbg(&dev->dev, "PME# %s\n", enable ? "enabled" : "disabled");
1820}
1821EXPORT_SYMBOL(pci_pme_active);
1822
1823int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
1824{
1825        return __pci_enable_wake(dev, state, false, enable);
1826}
1827EXPORT_SYMBOL(pci_enable_wake);
1828
1829/**
1830 * __pci_enable_wake - enable PCI device as wakeup event source
1831 * @dev: PCI device affected
1832 * @state: PCI state from which device will issue wakeup events
1833 * @runtime: True if the events are to be generated at run time
1834 * @enable: True to enable event generation; false to disable
1835 *
1836 * This enables the device as a wakeup event source, or disables it.
1837 * When such events involves platform-specific hooks, those hooks are
1838 * called automatically by this routine.
1839 *
1840 * Devices with legacy power management (no standard PCI PM capabilities)
1841 * always require such platform hooks.
1842 *
1843 * RETURN VALUE:
1844 * 0 is returned on success
1845 * -EINVAL is returned if device is not supposed to wake up the system
1846 * Error code depending on the platform is returned if both the platform and
1847 * the native mechanism fail to enable the generation of wake-up events
1848 */
1849int __pci_enable_wake(struct pci_dev *dev, pci_power_t state,
1850                      bool runtime, bool enable)
1851{
1852        int ret = 0;
1853
1854        if (enable && !runtime && !device_may_wakeup(&dev->dev))
1855                return -EINVAL;
1856
1857        /* Don't do the same thing twice in a row for one device. */
1858        if (!!enable == !!dev->wakeup_prepared)
1859                return 0;
1860
1861        /*
1862         * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
1863         * Anderson we should be doing PME# wake enable followed by ACPI wake
1864         * enable.  To disable wake-up we call the platform first, for symmetry.
1865         */
1866
1867        if (enable) {
1868                int error;
1869
1870                if (pci_pme_capable(dev, state))
1871                        pci_pme_active(dev, true);
1872                else
1873                        ret = 1;
1874                error = runtime ? platform_pci_run_wake(dev, true) :
1875                                        platform_pci_sleep_wake(dev, true);
1876                if (ret)
1877                        ret = error;
1878                if (!ret)
1879                        dev->wakeup_prepared = true;
1880        } else {
1881                if (runtime)
1882                        platform_pci_run_wake(dev, false);
1883                else
1884                        platform_pci_sleep_wake(dev, false);
1885                pci_pme_active(dev, false);
1886                dev->wakeup_prepared = false;
1887        }
1888
1889        return ret;
1890}
1891EXPORT_SYMBOL(__pci_enable_wake);
1892
1893/**
1894 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
1895 * @dev: PCI device to prepare
1896 * @enable: True to enable wake-up event generation; false to disable
1897 *
1898 * Many drivers want the device to wake up the system from D3_hot or D3_cold
1899 * and this function allows them to set that up cleanly - pci_enable_wake()
1900 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
1901 * ordering constraints.
1902 *
1903 * This function only returns error code if the device is not capable of
1904 * generating PME# from both D3_hot and D3_cold, and the platform is unable to
1905 * enable wake-up power for it.
1906 */
1907int pci_wake_from_d3(struct pci_dev *dev, bool enable)
1908{
1909        return pci_pme_capable(dev, PCI_D3cold) ?
1910                        pci_enable_wake(dev, PCI_D3cold, enable) :
1911                        pci_enable_wake(dev, PCI_D3hot, enable);
1912}
1913EXPORT_SYMBOL(pci_wake_from_d3);
1914
1915/**
1916 * pci_target_state - find an appropriate low power state for a given PCI dev
1917 * @dev: PCI device
1918 *
1919 * Use underlying platform code to find a supported low power state for @dev.
1920 * If the platform can't manage @dev, return the deepest state from which it
1921 * can generate wake events, based on any available PME info.
1922 */
1923static pci_power_t pci_target_state(struct pci_dev *dev)
1924{
1925        pci_power_t target_state = PCI_D3hot;
1926
1927        if (platform_pci_power_manageable(dev)) {
1928                /*
1929                 * Call the platform to choose the target state of the device
1930                 * and enable wake-up from this state if supported.
1931                 */
1932                pci_power_t state = platform_pci_choose_state(dev);
1933
1934                switch (state) {
1935                case PCI_POWER_ERROR:
1936                case PCI_UNKNOWN:
1937                        break;
1938                case PCI_D1:
1939                case PCI_D2:
1940                        if (pci_no_d1d2(dev))
1941                                break;
1942                default:
1943                        target_state = state;
1944                }
1945        } else if (!dev->pm_cap) {
1946                target_state = PCI_D0;
1947        } else if (device_may_wakeup(&dev->dev)) {
1948                /*
1949                 * Find the deepest state from which the device can generate
1950                 * wake-up events, make it the target state and enable device
1951                 * to generate PME#.
1952                 */
1953                if (dev->pme_support) {
1954                        while (target_state
1955                              && !(dev->pme_support & (1 << target_state)))
1956                                target_state--;
1957                }
1958        }
1959
1960        return target_state;
1961}
1962
1963/**
1964 * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state
1965 * @dev: Device to handle.
1966 *
1967 * Choose the power state appropriate for the device depending on whether
1968 * it can wake up the system and/or is power manageable by the platform
1969 * (PCI_D3hot is the default) and put the device into that state.
1970 */
1971int pci_prepare_to_sleep(struct pci_dev *dev)
1972{
1973        pci_power_t target_state = pci_target_state(dev);
1974        int error;
1975
1976        if (target_state == PCI_POWER_ERROR)
1977                return -EIO;
1978
1979        pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
1980
1981        error = pci_set_power_state(dev, target_state);
1982
1983        if (error)
1984                pci_enable_wake(dev, target_state, false);
1985
1986        return error;
1987}
1988EXPORT_SYMBOL(pci_prepare_to_sleep);
1989
1990/**
1991 * pci_back_from_sleep - turn PCI device on during system-wide transition into working state
1992 * @dev: Device to handle.
1993 *
1994 * Disable device's system wake-up capability and put it into D0.
1995 */
1996int pci_back_from_sleep(struct pci_dev *dev)
1997{
1998        pci_enable_wake(dev, PCI_D0, false);
1999        return pci_set_power_state(dev, PCI_D0);
2000}
2001EXPORT_SYMBOL(pci_back_from_sleep);
2002
2003/**
2004 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
2005 * @dev: PCI device being suspended.
2006 *
2007 * Prepare @dev to generate wake-up events at run time and put it into a low
2008 * power state.
2009 */
2010int pci_finish_runtime_suspend(struct pci_dev *dev)
2011{
2012        pci_power_t target_state = pci_target_state(dev);
2013        int error;
2014
2015        if (target_state == PCI_POWER_ERROR)
2016                return -EIO;
2017
2018        dev->runtime_d3cold = target_state == PCI_D3cold;
2019
2020        __pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev));
2021
2022        error = pci_set_power_state(dev, target_state);
2023
2024        if (error) {
2025                __pci_enable_wake(dev, target_state, true, false);
2026                dev->runtime_d3cold = false;
2027        }
2028
2029        return error;
2030}
2031
2032/**
2033 * pci_dev_run_wake - Check if device can generate run-time wake-up events.
2034 * @dev: Device to check.
2035 *
2036 * Return true if the device itself is capable of generating wake-up events
2037 * (through the platform or using the native PCIe PME) or if the device supports
2038 * PME and one of its upstream bridges can generate wake-up events.
2039 */
2040bool pci_dev_run_wake(struct pci_dev *dev)
2041{
2042        struct pci_bus *bus = dev->bus;
2043
2044        if (device_run_wake(&dev->dev))
2045                return true;
2046
2047        if (!dev->pme_support)
2048                return false;
2049
2050        while (bus->parent) {
2051                struct pci_dev *bridge = bus->self;
2052
2053                if (device_run_wake(&bridge->dev))
2054                        return true;
2055
2056                bus = bus->parent;
2057        }
2058
2059        /* We have reached the root bus. */
2060        if (bus->bridge)
2061                return device_run_wake(bus->bridge);
2062
2063        return false;
2064}
2065EXPORT_SYMBOL_GPL(pci_dev_run_wake);
2066
2067void pci_config_pm_runtime_get(struct pci_dev *pdev)
2068{
2069        struct device *dev = &pdev->dev;
2070        struct device *parent = dev->parent;
2071
2072        if (parent)
2073                pm_runtime_get_sync(parent);
2074        pm_runtime_get_noresume(dev);
2075        /*
2076         * pdev->current_state is set to PCI_D3cold during suspending,
2077         * so wait until suspending completes
2078         */
2079        pm_runtime_barrier(dev);
2080        /*
2081         * Only need to resume devices in D3cold, because config
2082         * registers are still accessible for devices suspended but
2083         * not in D3cold.
2084         */
2085        if (pdev->current_state == PCI_D3cold)
2086                pm_runtime_resume(dev);
2087}
2088
2089void pci_config_pm_runtime_put(struct pci_dev *pdev)
2090{
2091        struct device *dev = &pdev->dev;
2092        struct device *parent = dev->parent;
2093
2094        pm_runtime_put(dev);
2095        if (parent)
2096                pm_runtime_put_sync(parent);
2097}
2098
2099/**
2100 * pci_bridge_d3_possible - Is it possible to put the bridge into D3
2101 * @bridge: Bridge to check
2102 *
2103 * This function checks if it is possible to move the bridge to D3.
2104 * Currently we only allow D3 for recent enough PCIe ports.
2105 */
2106bool pci_bridge_d3_possible(struct pci_dev *bridge)
2107{
2108        unsigned int year;
2109
2110        if (!pci_is_pcie(bridge))
2111                return false;
2112
2113        switch (pci_pcie_type(bridge)) {
2114        case PCI_EXP_TYPE_ROOT_PORT:
2115        case PCI_EXP_TYPE_UPSTREAM:
2116        case PCI_EXP_TYPE_DOWNSTREAM:
2117                if (pci_bridge_d3_disable)
2118                        return false;
2119
2120                /*
2121                 * Hotplug interrupts cannot be delivered if the link is down,
2122                 * so parents of a hotplug port must stay awake. In addition,
2123                 * hotplug ports handled by firmware in System Management Mode
2124                 * may not be put into D3 by the OS (Thunderbolt on non-Macs).
2125                 * For simplicity, disallow in general for now.
2126                 */
2127                if (bridge->is_hotplug_bridge)
2128                        return false;
2129
2130                if (pci_bridge_d3_force)
2131                        return true;
2132
2133                /*
2134                 * It should be safe to put PCIe ports from 2015 or newer
2135                 * to D3.
2136                 */
2137                if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) &&
2138                    year >= 2015) {
2139                        return true;
2140                }
2141                break;
2142        }
2143
2144        return false;
2145}
2146
2147static int pci_dev_check_d3cold(struct pci_dev *dev, void *data)
2148{
2149        bool *d3cold_ok = data;
2150
2151        if (/* The device needs to be allowed to go D3cold ... */
2152            dev->no_d3cold || !dev->d3cold_allowed ||
2153
2154            /* ... and if it is wakeup capable to do so from D3cold. */
2155            (device_may_wakeup(&dev->dev) &&
2156             !pci_pme_capable(dev, PCI_D3cold)) ||
2157
2158            /* If it is a bridge it must be allowed to go to D3. */
2159            !pci_power_manageable(dev))
2160
2161                *d3cold_ok = false;
2162
2163        return !*d3cold_ok;
2164}
2165
2166/*
2167 * pci_bridge_d3_update - Update bridge D3 capabilities
2168 * @dev: PCI device which is changed
2169 *
2170 * Update upstream bridge PM capabilities accordingly depending on if the
2171 * device PM configuration was changed or the device is being removed.  The
2172 * change is also propagated upstream.
2173 */
2174void pci_bridge_d3_update(struct pci_dev *dev)
2175{
2176        bool remove = !device_is_registered(&dev->dev);
2177        struct pci_dev *bridge;
2178        bool d3cold_ok = true;
2179
2180        bridge = pci_upstream_bridge(dev);
2181        if (!bridge || !pci_bridge_d3_possible(bridge))
2182                return;
2183
2184        /*
2185         * If D3 is currently allowed for the bridge, removing one of its
2186         * children won't change that.
2187         */
2188        if (remove && bridge->bridge_d3)
2189                return;
2190
2191        /*
2192         * If D3 is currently allowed for the bridge and a child is added or
2193         * changed, disallowance of D3 can only be caused by that child, so
2194         * we only need to check that single device, not any of its siblings.
2195         *
2196         * If D3 is currently not allowed for the bridge, checking the device
2197         * first may allow us to skip checking its siblings.
2198         */
2199        if (!remove)
2200                pci_dev_check_d3cold(dev, &d3cold_ok);
2201
2202        /*
2203         * If D3 is currently not allowed for the bridge, this may be caused
2204         * either by the device being changed/removed or any of its siblings,
2205         * so we need to go through all children to find out if one of them
2206         * continues to block D3.
2207         */
2208        if (d3cold_ok && !bridge->bridge_d3)
2209                pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold,
2210                             &d3cold_ok);
2211
2212        if (bridge->bridge_d3 != d3cold_ok) {
2213                bridge->bridge_d3 = d3cold_ok;
2214                /* Propagate change to upstream bridges */
2215                pci_bridge_d3_update(bridge);
2216        }
2217}
2218
2219/**
2220 * pci_d3cold_enable - Enable D3cold for device
2221 * @dev: PCI device to handle
2222 *
2223 * This function can be used in drivers to enable D3cold from the device
2224 * they handle.  It also updates upstream PCI bridge PM capabilities
2225 * accordingly.
2226 */
2227void pci_d3cold_enable(struct pci_dev *dev)
2228{
2229        if (dev->no_d3cold) {
2230                dev->no_d3cold = false;
2231                pci_bridge_d3_update(dev);
2232        }
2233}
2234EXPORT_SYMBOL_GPL(pci_d3cold_enable);
2235
2236/**
2237 * pci_d3cold_disable - Disable D3cold for device
2238 * @dev: PCI device to handle
2239 *
2240 * This function can be used in drivers to disable D3cold from the device
2241 * they handle.  It also updates upstream PCI bridge PM capabilities
2242 * accordingly.
2243 */
2244void pci_d3cold_disable(struct pci_dev *dev)
2245{
2246        if (!dev->no_d3cold) {
2247                dev->no_d3cold = true;
2248                pci_bridge_d3_update(dev);
2249        }
2250}
2251EXPORT_SYMBOL_GPL(pci_d3cold_disable);
2252
2253/**
2254 * pci_pm_init - Initialize PM functions of given PCI device
2255 * @dev: PCI device to handle.
2256 */
2257void pci_pm_init(struct pci_dev *dev)
2258{
2259        int pm;
2260        u16 pmc;
2261
2262        pm_runtime_forbid(&dev->dev);
2263        pm_runtime_set_active(&dev->dev);
2264        pm_runtime_enable(&dev->dev);
2265        device_enable_async_suspend(&dev->dev);
2266        dev->wakeup_prepared = false;
2267
2268        dev->pm_cap = 0;
2269        dev->pme_support = 0;
2270
2271        /* find PCI PM capability in list */
2272        pm = pci_find_capability(dev, PCI_CAP_ID_PM);
2273        if (!pm)
2274                return;
2275        /* Check device's ability to generate PME# */
2276        pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
2277
2278        if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
2279                dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n",
2280                        pmc & PCI_PM_CAP_VER_MASK);
2281                return;
2282        }
2283
2284        dev->pm_cap = pm;
2285        dev->d3_delay = PCI_PM_D3_WAIT;
2286        dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
2287        dev->bridge_d3 = pci_bridge_d3_possible(dev);
2288        dev->d3cold_allowed = true;
2289
2290        dev->d1_support = false;
2291        dev->d2_support = false;
2292        if (!pci_no_d1d2(dev)) {
2293                if (pmc & PCI_PM_CAP_D1)
2294                        dev->d1_support = true;
2295                if (pmc & PCI_PM_CAP_D2)
2296                        dev->d2_support = true;
2297
2298                if (dev->d1_support || dev->d2_support)
2299                        dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n",
2300                                   dev->d1_support ? " D1" : "",
2301                                   dev->d2_support ? " D2" : "");
2302        }
2303
2304        pmc &= PCI_PM_CAP_PME_MASK;
2305        if (pmc) {
2306                dev_printk(KERN_DEBUG, &dev->dev,
2307                         "PME# supported from%s%s%s%s%s\n",
2308                         (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
2309                         (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
2310                         (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
2311                         (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
2312                         (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
2313                dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
2314                dev->pme_poll = true;
2315                /*
2316                 * Make device's PM flags reflect the wake-up capability, but
2317                 * let the user space enable it to wake up the system as needed.
2318                 */
2319                device_set_wakeup_capable(&dev->dev, true);
2320                /* Disable the PME# generation functionality */
2321                pci_pme_active(dev, false);
2322        }
2323}
2324
2325static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
2326{
2327        unsigned long flags = IORESOURCE_PCI_FIXED | IORESOURCE_PCI_EA_BEI;
2328
2329        switch (prop) {
2330        case PCI_EA_P_MEM:
2331        case PCI_EA_P_VF_MEM:
2332                flags |= IORESOURCE_MEM;
2333                break;
2334        case PCI_EA_P_MEM_PREFETCH:
2335        case PCI_EA_P_VF_MEM_PREFETCH:
2336                flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
2337                break;
2338        case PCI_EA_P_IO:
2339                flags |= IORESOURCE_IO;
2340                break;
2341        default:
2342                return 0;
2343        }
2344
2345        return flags;
2346}
2347
2348static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei,
2349                                            u8 prop)
2350{
2351        if (bei <= PCI_EA_BEI_BAR5 && prop <= PCI_EA_P_IO)
2352                return &dev->resource[bei];
2353#ifdef CONFIG_PCI_IOV
2354        else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5 &&
2355                 (prop == PCI_EA_P_VF_MEM || prop == PCI_EA_P_VF_MEM_PREFETCH))
2356                return &dev->resource[PCI_IOV_RESOURCES +
2357                                      bei - PCI_EA_BEI_VF_BAR0];
2358#endif
2359        else if (bei == PCI_EA_BEI_ROM)
2360                return &dev->resource[PCI_ROM_RESOURCE];
2361        else
2362                return NULL;
2363}
2364
2365/* Read an Enhanced Allocation (EA) entry */
2366static int pci_ea_read(struct pci_dev *dev, int offset)
2367{
2368        struct resource *res;
2369        int ent_size, ent_offset = offset;
2370        resource_size_t start, end;
2371        unsigned long flags;
2372        u32 dw0, bei, base, max_offset;
2373        u8 prop;
2374        bool support_64 = (sizeof(resource_size_t) >= 8);
2375
2376        pci_read_config_dword(dev, ent_offset, &dw0);
2377        ent_offset += 4;
2378
2379        /* Entry size field indicates DWORDs after 1st */
2380        ent_size = ((dw0 & PCI_EA_ES) + 1) << 2;
2381
2382        if (!(dw0 & PCI_EA_ENABLE)) /* Entry not enabled */
2383                goto out;
2384
2385        bei = (dw0 & PCI_EA_BEI) >> 4;
2386        prop = (dw0 & PCI_EA_PP) >> 8;
2387
2388        /*
2389         * If the Property is in the reserved range, try the Secondary
2390         * Property instead.
2391         */
2392        if (prop > PCI_EA_P_BRIDGE_IO && prop < PCI_EA_P_MEM_RESERVED)
2393                prop = (dw0 & PCI_EA_SP) >> 16;
2394        if (prop > PCI_EA_P_BRIDGE_IO)
2395                goto out;
2396
2397        res = pci_ea_get_resource(dev, bei, prop);
2398        if (!res) {
2399                dev_err(&dev->dev, "Unsupported EA entry BEI: %u\n", bei);
2400                goto out;
2401        }
2402
2403        flags = pci_ea_flags(dev, prop);
2404        if (!flags) {
2405                dev_err(&dev->dev, "Unsupported EA properties: %#x\n", prop);
2406                goto out;
2407        }
2408
2409        /* Read Base */
2410        pci_read_config_dword(dev, ent_offset, &base);
2411        start = (base & PCI_EA_FIELD_MASK);
2412        ent_offset += 4;
2413
2414        /* Read MaxOffset */
2415        pci_read_config_dword(dev, ent_offset, &max_offset);
2416        ent_offset += 4;
2417
2418        /* Read Base MSBs (if 64-bit entry) */
2419        if (base & PCI_EA_IS_64) {
2420                u32 base_upper;
2421
2422                pci_read_config_dword(dev, ent_offset, &base_upper);
2423                ent_offset += 4;
2424
2425                flags |= IORESOURCE_MEM_64;
2426
2427                /* entry starts above 32-bit boundary, can't use */
2428                if (!support_64 && base_upper)
2429                        goto out;
2430
2431                if (support_64)
2432                        start |= ((u64)base_upper << 32);
2433        }
2434
2435        end = start + (max_offset | 0x03);
2436
2437        /* Read MaxOffset MSBs (if 64-bit entry) */
2438        if (max_offset & PCI_EA_IS_64) {
2439                u32 max_offset_upper;
2440
2441                pci_read_config_dword(dev, ent_offset, &max_offset_upper);
2442                ent_offset += 4;
2443
2444                flags |= IORESOURCE_MEM_64;
2445
2446                /* entry too big, can't use */
2447                if (!support_64 && max_offset_upper)
2448                        goto out;
2449
2450                if (support_64)
2451                        end += ((u64)max_offset_upper << 32);
2452        }
2453
2454        if (end < start) {
2455                dev_err(&dev->dev, "EA Entry crosses address boundary\n");
2456                goto out;
2457        }
2458
2459        if (ent_size != ent_offset - offset) {
2460                dev_err(&dev->dev,
2461                        "EA Entry Size (%d) does not match length read (%d)\n",
2462                        ent_size, ent_offset - offset);
2463                goto out;
2464        }
2465
2466        res->name = pci_name(dev);
2467        res->start = start;
2468        res->end = end;
2469        res->flags = flags;
2470
2471        if (bei <= PCI_EA_BEI_BAR5)
2472                dev_printk(KERN_DEBUG, &dev->dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
2473                           bei, res, prop);
2474        else if (bei == PCI_EA_BEI_ROM)
2475                dev_printk(KERN_DEBUG, &dev->dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n",
2476                           res, prop);
2477        else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
2478                dev_printk(KERN_DEBUG, &dev->dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
2479                           bei - PCI_EA_BEI_VF_BAR0, res, prop);
2480        else
2481                dev_printk(KERN_DEBUG, &dev->dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n",
2482                           bei, res, prop);
2483
2484out:
2485        return offset + ent_size;
2486}
2487
2488/* Enhanced Allocation Initialization */
2489void pci_ea_init(struct pci_dev *dev)
2490{
2491        int ea;
2492        u8 num_ent;
2493        int offset;
2494        int i;
2495
2496        /* find PCI EA capability in list */
2497        ea = pci_find_capability(dev, PCI_CAP_ID_EA);
2498        if (!ea)
2499                return;
2500
2501        /* determine the number of entries */
2502        pci_bus_read_config_byte(dev->bus, dev->devfn, ea + PCI_EA_NUM_ENT,
2503                                        &num_ent);
2504        num_ent &= PCI_EA_NUM_ENT_MASK;
2505
2506        offset = ea + PCI_EA_FIRST_ENT;
2507
2508        /* Skip DWORD 2 for type 1 functions */
2509        if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
2510                offset += 4;
2511
2512        /* parse each EA entry */
2513        for (i = 0; i < num_ent; ++i)
2514                offset = pci_ea_read(dev, offset);
2515}
2516
2517static void pci_add_saved_cap(struct pci_dev *pci_dev,
2518        struct pci_cap_saved_state *new_cap)
2519{
2520        hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
2521}
2522
2523/**
2524 * _pci_add_cap_save_buffer - allocate buffer for saving given
2525 *                            capability registers
2526 * @dev: the PCI device
2527 * @cap: the capability to allocate the buffer for
2528 * @extended: Standard or Extended capability ID
2529 * @size: requested size of the buffer
2530 */
2531static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
2532                                    bool extended, unsigned int size)
2533{
2534        int pos;
2535        struct pci_cap_saved_state *save_state;
2536
2537        if (extended)
2538                pos = pci_find_ext_capability(dev, cap);
2539        else
2540                pos = pci_find_capability(dev, cap);
2541
2542        if (!pos)
2543                return 0;
2544
2545        save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
2546        if (!save_state)
2547                return -ENOMEM;
2548
2549        save_state->cap.cap_nr = cap;
2550        save_state->cap.cap_extended = extended;
2551        save_state->cap.size = size;
2552        pci_add_saved_cap(dev, save_state);
2553
2554        return 0;
2555}
2556
2557int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size)
2558{
2559        return _pci_add_cap_save_buffer(dev, cap, false, size);
2560}
2561
2562int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size)
2563{
2564        return _pci_add_cap_save_buffer(dev, cap, true, size);
2565}
2566
2567/**
2568 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
2569 * @dev: the PCI device
2570 */
2571void pci_allocate_cap_save_buffers(struct pci_dev *dev)
2572{
2573        int error;
2574
2575        error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
2576                                        PCI_EXP_SAVE_REGS * sizeof(u16));
2577        if (error)
2578                dev_err(&dev->dev,
2579                        "unable to preallocate PCI Express save buffer\n");
2580
2581        error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
2582        if (error)
2583                dev_err(&dev->dev,
2584                        "unable to preallocate PCI-X save buffer\n");
2585
2586        pci_allocate_vc_save_buffers(dev);
2587}
2588
2589void pci_free_cap_save_buffers(struct pci_dev *dev)
2590{
2591        struct pci_cap_saved_state *tmp;
2592        struct hlist_node *n;
2593
2594        hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
2595                kfree(tmp);
2596}
2597
2598/**
2599 * pci_configure_ari - enable or disable ARI forwarding
2600 * @dev: the PCI device
2601 *
2602 * If @dev and its upstream bridge both support ARI, enable ARI in the
2603 * bridge.  Otherwise, disable ARI in the bridge.
2604 */
2605void pci_configure_ari(struct pci_dev *dev)
2606{
2607        u32 cap;
2608        struct pci_dev *bridge;
2609
2610        if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
2611                return;
2612
2613        bridge = dev->bus->self;
2614        if (!bridge)
2615                return;
2616
2617        pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
2618        if (!(cap & PCI_EXP_DEVCAP2_ARI))
2619                return;
2620
2621        if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
2622                pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
2623                                         PCI_EXP_DEVCTL2_ARI);
2624                bridge->ari_enabled = 1;
2625        } else {
2626                pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2,
2627                                           PCI_EXP_DEVCTL2_ARI);
2628                bridge->ari_enabled = 0;
2629        }
2630}
2631
2632static int pci_acs_enable;
2633
2634/**
2635 * pci_request_acs - ask for ACS to be enabled if supported
2636 */
2637void pci_request_acs(void)
2638{
2639        pci_acs_enable = 1;
2640}
2641
2642/**
2643 * pci_std_enable_acs - enable ACS on devices using standard ACS capabilites
2644 * @dev: the PCI device
2645 */
2646static void pci_std_enable_acs(struct pci_dev *dev)
2647{
2648        int pos;
2649        u16 cap;
2650        u16 ctrl;
2651
2652        pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
2653        if (!pos)
2654                return;
2655
2656        pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
2657        pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
2658
2659        /* Source Validation */
2660        ctrl |= (cap & PCI_ACS_SV);
2661
2662        /* P2P Request Redirect */
2663        ctrl |= (cap & PCI_ACS_RR);
2664
2665        /* P2P Completion Redirect */
2666        ctrl |= (cap & PCI_ACS_CR);
2667
2668        /* Upstream Forwarding */
2669        ctrl |= (cap & PCI_ACS_UF);
2670
2671        pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
2672}
2673
2674/**
2675 * pci_enable_acs - enable ACS if hardware support it
2676 * @dev: the PCI device
2677 */
2678void pci_enable_acs(struct pci_dev *dev)
2679{
2680        if (!pci_acs_enable)
2681                return;
2682
2683        if (!pci_dev_specific_enable_acs(dev))
2684                return;
2685
2686        pci_std_enable_acs(dev);
2687}
2688
2689static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
2690{
2691        int pos;
2692        u16 cap, ctrl;
2693
2694        pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS);
2695        if (!pos)
2696                return false;
2697
2698        /*
2699         * Except for egress control, capabilities are either required
2700         * or only required if controllable.  Features missing from the
2701         * capability field can therefore be assumed as hard-wired enabled.
2702         */
2703        pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap);
2704        acs_flags &= (cap | PCI_ACS_EC);
2705
2706        pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
2707        return (ctrl & acs_flags) == acs_flags;
2708}
2709
2710/**
2711 * pci_acs_enabled - test ACS against required flags for a given device
2712 * @pdev: device to test
2713 * @acs_flags: required PCI ACS flags
2714 *
2715 * Return true if the device supports the provided flags.  Automatically
2716 * filters out flags that are not implemented on multifunction devices.
2717 *
2718 * Note that this interface checks the effective ACS capabilities of the
2719 * device rather than the actual capabilities.  For instance, most single
2720 * function endpoints are not required to support ACS because they have no
2721 * opportunity for peer-to-peer access.  We therefore return 'true'
2722 * regardless of whether the device exposes an ACS capability.  This makes
2723 * it much easier for callers of this function to ignore the actual type
2724 * or topology of the device when testing ACS support.
2725 */
2726bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
2727{
2728        int ret;
2729
2730        ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
2731        if (ret >= 0)
2732                return ret > 0;
2733
2734        /*
2735         * Conventional PCI and PCI-X devices never support ACS, either
2736         * effectively or actually.  The shared bus topology implies that
2737         * any device on the bus can receive or snoop DMA.
2738         */
2739        if (!pci_is_pcie(pdev))
2740                return false;
2741
2742        switch (pci_pcie_type(pdev)) {
2743        /*
2744         * PCI/X-to-PCIe bridges are not specifically mentioned by the spec,
2745         * but since their primary interface is PCI/X, we conservatively
2746         * handle them as we would a non-PCIe device.
2747         */
2748        case PCI_EXP_TYPE_PCIE_BRIDGE:
2749        /*
2750         * PCIe 3.0, 6.12.1 excludes ACS on these devices.  "ACS is never
2751         * applicable... must never implement an ACS Extended Capability...".
2752         * This seems arbitrary, but we take a conservative interpretation
2753         * of this statement.
2754         */
2755        case PCI_EXP_TYPE_PCI_BRIDGE:
2756        case PCI_EXP_TYPE_RC_EC:
2757                return false;
2758        /*
2759         * PCIe 3.0, 6.12.1.1 specifies that downstream and root ports should
2760         * implement ACS in order to indicate their peer-to-peer capabilities,
2761         * regardless of whether they are single- or multi-function devices.
2762         */
2763        case PCI_EXP_TYPE_DOWNSTREAM:
2764        case PCI_EXP_TYPE_ROOT_PORT:
2765                return pci_acs_flags_enabled(pdev, acs_flags);
2766        /*
2767         * PCIe 3.0, 6.12.1.2 specifies ACS capabilities that should be
2768         * implemented by the remaining PCIe types to indicate peer-to-peer
2769         * capabilities, but only when they are part of a multifunction
2770         * device.  The footnote for section 6.12 indicates the specific
2771         * PCIe types included here.
2772         */
2773        case PCI_EXP_TYPE_ENDPOINT:
2774        case PCI_EXP_TYPE_UPSTREAM:
2775        case PCI_EXP_TYPE_LEG_END:
2776        case PCI_EXP_TYPE_RC_END:
2777                if (!pdev->multifunction)
2778                        break;
2779
2780                return pci_acs_flags_enabled(pdev, acs_flags);
2781        }
2782
2783        /*
2784         * PCIe 3.0, 6.12.1.3 specifies no ACS capabilities are applicable
2785         * to single function devices with the exception of downstream ports.
2786         */
2787        return true;
2788}
2789
2790/**
2791 * pci_acs_path_enable - test ACS flags from start to end in a hierarchy
2792 * @start: starting downstream device
2793 * @end: ending upstream device or NULL to search to the root bus
2794 * @acs_flags: required flags
2795 *
2796 * Walk up a device tree from start to end testing PCI ACS support.  If
2797 * any step along the way does not support the required flags, return false.
2798 */
2799bool pci_acs_path_enabled(struct pci_dev *start,
2800                          struct pci_dev *end, u16 acs_flags)
2801{
2802        struct pci_dev *pdev, *parent = start;
2803
2804        do {
2805                pdev = parent;
2806
2807                if (!pci_acs_enabled(pdev, acs_flags))
2808                        return false;
2809
2810                if (pci_is_root_bus(pdev->bus))
2811                        return (end == NULL);
2812
2813                parent = pdev->bus->self;
2814        } while (pdev != end);
2815
2816        return true;
2817}
2818
2819/**
2820 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
2821 * @dev: the PCI device
2822 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD)
2823 *
2824 * Perform INTx swizzling for a device behind one level of bridge.  This is
2825 * required by section 9.1 of the PCI-to-PCI bridge specification for devices
2826 * behind bridges on add-in cards.  For devices with ARI enabled, the slot
2827 * number is always 0 (see the Implementation Note in section 2.2.8.1 of
2828 * the PCI Express Base Specification, Revision 2.1)
2829 */
2830u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
2831{
2832        int slot;
2833
2834        if (pci_ari_enabled(dev->bus))
2835                slot = 0;
2836        else
2837                slot = PCI_SLOT(dev->devfn);
2838
2839        return (((pin - 1) + slot) % 4) + 1;
2840}
2841
2842int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
2843{
2844        u8 pin;
2845
2846        pin = dev->pin;
2847        if (!pin)
2848                return -1;
2849
2850        while (!pci_is_root_bus(dev->bus)) {
2851                pin = pci_swizzle_interrupt_pin(dev, pin);
2852                dev = dev->bus->self;
2853        }
2854        *bridge = dev;
2855        return pin;
2856}
2857
2858/**
2859 * pci_common_swizzle - swizzle INTx all the way to root bridge
2860 * @dev: the PCI device
2861 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2862 *
2863 * Perform INTx swizzling for a device.  This traverses through all PCI-to-PCI
2864 * bridges all the way up to a PCI root bus.
2865 */
2866u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
2867{
2868        u8 pin = *pinp;
2869
2870        while (!pci_is_root_bus(dev->bus)) {
2871                pin = pci_swizzle_interrupt_pin(dev, pin);
2872                dev = dev->bus->self;
2873        }
2874        *pinp = pin;
2875        return PCI_SLOT(dev->devfn);
2876}
2877EXPORT_SYMBOL_GPL(pci_common_swizzle);
2878
2879/**
2880 *      pci_release_region - Release a PCI bar
2881 *      @pdev: PCI device whose resources were previously reserved by pci_request_region
2882 *      @bar: BAR to release
2883 *
2884 *      Releases the PCI I/O and memory resources previously reserved by a
2885 *      successful call to pci_request_region.  Call this function only
2886 *      after all use of the PCI regions has ceased.
2887 */
2888void pci_release_region(struct pci_dev *pdev, int bar)
2889{
2890        struct pci_devres *dr;
2891
2892        if (pci_resource_len(pdev, bar) == 0)
2893                return;
2894        if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
2895                release_region(pci_resource_start(pdev, bar),
2896                                pci_resource_len(pdev, bar));
2897        else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
2898                release_mem_region(pci_resource_start(pdev, bar),
2899                                pci_resource_len(pdev, bar));
2900
2901        dr = find_pci_dr(pdev);
2902        if (dr)
2903                dr->region_mask &= ~(1 << bar);
2904}
2905EXPORT_SYMBOL(pci_release_region);
2906
2907/**
2908 *      __pci_request_region - Reserved PCI I/O and memory resource
2909 *      @pdev: PCI device whose resources are to be reserved
2910 *      @bar: BAR to be reserved
2911 *      @res_name: Name to be associated with resource.
2912 *      @exclusive: whether the region access is exclusive or not
2913 *
2914 *      Mark the PCI region associated with PCI device @pdev BR @bar as
2915 *      being reserved by owner @res_name.  Do not access any
2916 *      address inside the PCI regions unless this call returns
2917 *      successfully.
2918 *
2919 *      If @exclusive is set, then the region is marked so that userspace
2920 *      is explicitly not allowed to map the resource via /dev/mem or
2921 *      sysfs MMIO access.
2922 *
2923 *      Returns 0 on success, or %EBUSY on error.  A warning
2924 *      message is also printed on failure.
2925 */
2926static int __pci_request_region(struct pci_dev *pdev, int bar,
2927                                const char *res_name, int exclusive)
2928{
2929        struct pci_devres *dr;
2930
2931        if (pci_resource_len(pdev, bar) == 0)
2932                return 0;
2933
2934        if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
2935                if (!request_region(pci_resource_start(pdev, bar),
2936                            pci_resource_len(pdev, bar), res_name))
2937                        goto err_out;
2938        } else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
2939                if (!__request_mem_region(pci_resource_start(pdev, bar),
2940                                        pci_resource_len(pdev, bar), res_name,
2941                                        exclusive))
2942                        goto err_out;
2943        }
2944
2945        dr = find_pci_dr(pdev);
2946        if (dr)
2947                dr->region_mask |= 1 << bar;
2948
2949        return 0;
2950
2951err_out:
2952        dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar,
2953                 &pdev->resource[bar]);
2954        return -EBUSY;
2955}
2956
2957/**
2958 *      pci_request_region - Reserve PCI I/O and memory resource
2959 *      @pdev: PCI device whose resources are to be reserved
2960 *      @bar: BAR to be reserved
2961 *      @res_name: Name to be associated with resource
2962 *
2963 *      Mark the PCI region associated with PCI device @pdev BAR @bar as
2964 *      being reserved by owner @res_name.  Do not access any
2965 *      address inside the PCI regions unless this call returns
2966 *      successfully.
2967 *
2968 *      Returns 0 on success, or %EBUSY on error.  A warning
2969 *      message is also printed on failure.
2970 */
2971int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
2972{
2973        return __pci_request_region(pdev, bar, res_name, 0);
2974}
2975EXPORT_SYMBOL(pci_request_region);
2976
2977/**
2978 *      pci_request_region_exclusive - Reserved PCI I/O and memory resource
2979 *      @pdev: PCI device whose resources are to be reserved
2980 *      @bar: BAR to be reserved
2981 *      @res_name: Name to be associated with resource.
2982 *
2983 *      Mark the PCI region associated with PCI device @pdev BR @bar as
2984 *      being reserved by owner @res_name.  Do not access any
2985 *      address inside the PCI regions unless this call returns
2986 *      successfully.
2987 *
2988 *      Returns 0 on success, or %EBUSY on error.  A warning
2989 *      message is also printed on failure.
2990 *
2991 *      The key difference that _exclusive makes it that userspace is
2992 *      explicitly not allowed to map the resource via /dev/mem or
2993 *      sysfs.
2994 */
2995int pci_request_region_exclusive(struct pci_dev *pdev, int bar,
2996                                 const char *res_name)
2997{
2998        return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
2999}
3000EXPORT_SYMBOL(pci_request_region_exclusive);
3001
3002/**
3003 * pci_release_selected_regions - Release selected PCI I/O and memory resources
3004 * @pdev: PCI device whose resources were previously reserved
3005 * @bars: Bitmask of BARs to be released
3006 *
3007 * Release selected PCI I/O and memory resources previously reserved.
3008 * Call this function only after all use of the PCI regions has ceased.
3009 */
3010void pci_release_selected_regions(struct pci_dev *pdev, int bars)
3011{
3012        int i;
3013
3014        for (i = 0; i < 6; i++)
3015                if (bars & (1 << i))
3016                        pci_release_region(pdev, i);
3017}
3018EXPORT_SYMBOL(pci_release_selected_regions);
3019
3020static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
3021                                          const char *res_name, int excl)
3022{
3023        int i;
3024
3025        for (i = 0; i < 6; i++)
3026                if (bars & (1 << i))
3027                        if (__pci_request_region(pdev, i, res_name, excl))
3028                                goto err_out;
3029        return 0;
3030
3031err_out:
3032        while (--i >= 0)
3033                if (bars & (1 << i))
3034                        pci_release_region(pdev, i);
3035
3036        return -EBUSY;
3037}
3038
3039
3040/**
3041 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
3042 * @pdev: PCI device whose resources are to be reserved
3043 * @bars: Bitmask of BARs to be requested
3044 * @res_name: Name to be associated with resource
3045 */
3046int pci_request_selected_regions(struct pci_dev *pdev, int bars,
3047                                 const char *res_name)
3048{
3049        return __pci_request_selected_regions(pdev, bars, res_name, 0);
3050}
3051EXPORT_SYMBOL(pci_request_selected_regions);
3052
3053int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
3054                                           const char *res_name)
3055{
3056        return __pci_request_selected_regions(pdev, bars, res_name,
3057                        IORESOURCE_EXCLUSIVE);
3058}
3059EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
3060
3061/**
3062 *      pci_release_regions - Release reserved PCI I/O and memory resources
3063 *      @pdev: PCI device whose resources were previously reserved by pci_request_regions
3064 *
3065 *      Releases all PCI I/O and memory resources previously reserved by a
3066 *      successful call to pci_request_regions.  Call this function only
3067 *      after all use of the PCI regions has ceased.
3068 */
3069
3070void pci_release_regions(struct pci_dev *pdev)
3071{
3072        pci_release_selected_regions(pdev, (1 << 6) - 1);
3073}
3074EXPORT_SYMBOL(pci_release_regions);
3075
3076/**
3077 *      pci_request_regions - Reserved PCI I/O and memory resources
3078 *      @pdev: PCI device whose resources are to be reserved
3079 *      @res_name: Name to be associated with resource.
3080 *
3081 *      Mark all PCI regions associated with PCI device @pdev as
3082 *      being reserved by owner @res_name.  Do not access any
3083 *      address inside the PCI regions unless this call returns
3084 *      successfully.
3085 *
3086 *      Returns 0 on success, or %EBUSY on error.  A warning
3087 *      message is also printed on failure.
3088 */
3089int pci_request_regions(struct pci_dev *pdev, const char *res_name)
3090{
3091        return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
3092}
3093EXPORT_SYMBOL(pci_request_regions);
3094
3095/**
3096 *      pci_request_regions_exclusive - Reserved PCI I/O and memory resources
3097 *      @pdev: PCI device whose resources are to be reserved
3098 *      @res_name: Name to be associated with resource.
3099 *
3100 *      Mark all PCI regions associated with PCI device @pdev as
3101 *      being reserved by owner @res_name.  Do not access any
3102 *      address inside the PCI regions unless this call returns
3103 *      successfully.
3104 *
3105 *      pci_request_regions_exclusive() will mark the region so that
3106 *      /dev/mem and the sysfs MMIO access will not be allowed.
3107 *
3108 *      Returns 0 on success, or %EBUSY on error.  A warning
3109 *      message is also printed on failure.
3110 */
3111int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
3112{
3113        return pci_request_selected_regions_exclusive(pdev,
3114                                        ((1 << 6) - 1), res_name);
3115}
3116EXPORT_SYMBOL(pci_request_regions_exclusive);
3117
3118/**
3119 *      pci_remap_iospace - Remap the memory mapped I/O space
3120 *      @res: Resource describing the I/O space
3121 *      @phys_addr: physical address of range to be mapped
3122 *
3123 *      Remap the memory mapped I/O space described by the @res
3124 *      and the CPU physical address @phys_addr into virtual address space.
3125 *      Only architectures that have memory mapped IO functions defined
3126 *      (and the PCI_IOBASE value defined) should call this function.
3127 */
3128int __weak pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
3129{
3130#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
3131        unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
3132
3133        if (!(res->flags & IORESOURCE_IO))
3134                return -EINVAL;
3135
3136        if (res->end > IO_SPACE_LIMIT)
3137                return -EINVAL;
3138
3139        return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
3140                                  pgprot_device(PAGE_KERNEL));
3141#else
3142        /* this architecture does not have memory mapped I/O space,
3143           so this function should never be called */
3144        WARN_ONCE(1, "This architecture does not support memory mapped I/O\n");
3145        return -ENODEV;
3146#endif
3147}
3148
3149static void __pci_set_master(struct pci_dev *dev, bool enable)
3150{
3151        u16 old_cmd, cmd;
3152
3153        pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
3154        if (enable)
3155                cmd = old_cmd | PCI_COMMAND_MASTER;
3156        else
3157                cmd = old_cmd & ~PCI_COMMAND_MASTER;
3158        if (cmd != old_cmd) {
3159                dev_dbg(&dev->dev, "%s bus mastering\n",
3160                        enable ? "enabling" : "disabling");
3161                pci_write_config_word(dev, PCI_COMMAND, cmd);
3162        }
3163        dev->is_busmaster = enable;
3164}
3165
3166/**
3167 * pcibios_setup - process "pci=" kernel boot arguments
3168 * @str: string used to pass in "pci=" kernel boot arguments
3169 *
3170 * Process kernel boot arguments.  This is the default implementation.
3171 * Architecture specific implementations can override this as necessary.
3172 */
3173char * __weak __init pcibios_setup(char *str)
3174{
3175        return str;
3176}
3177
3178/**
3179 * pcibios_set_master - enable PCI bus-mastering for device dev
3180 * @dev: the PCI device to enable
3181 *
3182 * Enables PCI bus-mastering for the device.  This is the default
3183 * implementation.  Architecture specific implementations can override
3184 * this if necessary.
3185 */
3186void __weak pcibios_set_master(struct pci_dev *dev)
3187{
3188        u8 lat;
3189
3190        /* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
3191        if (pci_is_pcie(dev))
3192                return;
3193
3194        pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
3195        if (lat < 16)
3196                lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
3197        else if (lat > pcibios_max_latency)
3198                lat = pcibios_max_latency;
3199        else
3200                return;
3201
3202        pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
3203}
3204
3205/**
3206 * pci_set_master - enables bus-mastering for device dev
3207 * @dev: the PCI device to enable
3208 *
3209 * Enables bus-mastering on the device and calls pcibios_set_master()
3210 * to do the needed arch specific settings.
3211 */
3212void pci_set_master(struct pci_dev *dev)
3213{
3214        __pci_set_master(dev, true);
3215        pcibios_set_master(dev);
3216}
3217EXPORT_SYMBOL(pci_set_master);
3218
3219/**
3220 * pci_clear_master - disables bus-mastering for device dev
3221 * @dev: the PCI device to disable
3222 */
3223void pci_clear_master(struct pci_dev *dev)
3224{
3225        __pci_set_master(dev, false);
3226}
3227EXPORT_SYMBOL(pci_clear_master);
3228
3229/**
3230 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
3231 * @dev: the PCI device for which MWI is to be enabled
3232 *
3233 * Helper function for pci_set_mwi.
3234 * Originally copied from drivers/net/acenic.c.
3235 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
3236 *
3237 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
3238 */
3239int pci_set_cacheline_size(struct pci_dev *dev)
3240{
3241        u8 cacheline_size;
3242
3243        if (!pci_cache_line_size)
3244                return -EINVAL;
3245
3246        /* Validate current setting: the PCI_CACHE_LINE_SIZE must be
3247           equal to or multiple of the right value. */
3248        pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
3249        if (cacheline_size >= pci_cache_line_size &&
3250            (cacheline_size % pci_cache_line_size) == 0)
3251                return 0;
3252
3253        /* Write the correct value. */
3254        pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
3255        /* Read it back. */
3256        pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
3257        if (cacheline_size == pci_cache_line_size)
3258                return 0;
3259
3260        dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not supported\n",
3261                   pci_cache_line_size << 2);
3262
3263        return -EINVAL;
3264}
3265EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
3266
3267/**
3268 * pci_set_mwi - enables memory-write-invalidate PCI transaction
3269 * @dev: the PCI device for which MWI is enabled
3270 *
3271 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
3272 *
3273 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
3274 */
3275int pci_set_mwi(struct pci_dev *dev)
3276{
3277#ifdef PCI_DISABLE_MWI
3278        return 0;
3279#else
3280        int rc;
3281        u16 cmd;
3282
3283        rc = pci_set_cacheline_size(dev);
3284        if (rc)
3285                return rc;
3286
3287        pci_read_config_word(dev, PCI_COMMAND, &cmd);
3288        if (!(cmd & PCI_COMMAND_INVALIDATE)) {
3289                dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n");
3290                cmd |= PCI_COMMAND_INVALIDATE;
3291                pci_write_config_word(dev, PCI_COMMAND, cmd);
3292        }
3293        return 0;
3294#endif
3295}
3296EXPORT_SYMBOL(pci_set_mwi);
3297
3298/**
3299 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
3300 * @dev: the PCI device for which MWI is enabled
3301 *
3302 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
3303 * Callers are not required to check the return value.
3304 *
3305 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
3306 */
3307int pci_try_set_mwi(struct pci_dev *dev)
3308{
3309#ifdef PCI_DISABLE_MWI
3310        return 0;
3311#else
3312        return pci_set_mwi(dev);
3313#endif
3314}
3315EXPORT_SYMBOL(pci_try_set_mwi);
3316
3317/**
3318 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
3319 * @dev: the PCI device to disable
3320 *
3321 * Disables PCI Memory-Write-Invalidate transaction on the device
3322 */
3323void pci_clear_mwi(struct pci_dev *dev)
3324{
3325#ifndef PCI_DISABLE_MWI
3326        u16 cmd;
3327
3328        pci_read_config_word(dev, PCI_COMMAND, &cmd);
3329        if (cmd & PCI_COMMAND_INVALIDATE) {
3330                cmd &= ~PCI_COMMAND_INVALIDATE;
3331                pci_write_config_word(dev, PCI_COMMAND, cmd);
3332        }
3333#endif
3334}
3335EXPORT_SYMBOL(pci_clear_mwi);
3336
3337/**
3338 * pci_intx - enables/disables PCI INTx for device dev
3339 * @pdev: the PCI device to operate on
3340 * @enable: boolean: whether to enable or disable PCI INTx
3341 *
3342 * Enables/disables PCI INTx for device dev
3343 */
3344void pci_intx(struct pci_dev *pdev, int enable)
3345{
3346        u16 pci_command, new;
3347
3348        pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
3349
3350        if (enable)
3351                new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
3352        else
3353                new = pci_command | PCI_COMMAND_INTX_DISABLE;
3354
3355        if (new != pci_command) {
3356                struct pci_devres *dr;
3357
3358                pci_write_config_word(pdev, PCI_COMMAND, new);
3359
3360                dr = find_pci_dr(pdev);
3361                if (dr && !dr->restore_intx) {
3362                        dr->restore_intx = 1;
3363                        dr->orig_intx = !enable;
3364                }
3365        }
3366}
3367EXPORT_SYMBOL_GPL(pci_intx);
3368
3369/**
3370 * pci_intx_mask_supported - probe for INTx masking support
3371 * @dev: the PCI device to operate on
3372 *
3373 * Check if the device dev support INTx masking via the config space
3374 * command word.
3375 */
3376bool pci_intx_mask_supported(struct pci_dev *dev)
3377{
3378        bool mask_supported = false;
3379        u16 orig, new;
3380
3381        if (dev->broken_intx_masking)
3382                return false;
3383
3384        pci_cfg_access_lock(dev);
3385
3386        pci_read_config_word(dev, PCI_COMMAND, &orig);
3387        pci_write_config_word(dev, PCI_COMMAND,
3388                              orig ^ PCI_COMMAND_INTX_DISABLE);
3389        pci_read_config_word(dev, PCI_COMMAND, &new);
3390
3391        /*
3392         * There's no way to protect against hardware bugs or detect them
3393         * reliably, but as long as we know what the value should be, let's
3394         * go ahead and check it.
3395         */
3396        if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) {
3397                dev_err(&dev->dev, "Command register changed from 0x%x to 0x%x: driver or hardware bug?\n",
3398                        orig, new);
3399        } else if ((new ^ orig) & PCI_COMMAND_INTX_DISABLE) {
3400                mask_supported = true;
3401                pci_write_config_word(dev, PCI_COMMAND, orig);
3402        }
3403
3404        pci_cfg_access_unlock(dev);
3405        return mask_supported;
3406}
3407EXPORT_SYMBOL_GPL(pci_intx_mask_supported);
3408
3409static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
3410{
3411        struct pci_bus *bus = dev->bus;
3412        bool mask_updated = true;
3413        u32 cmd_status_dword;
3414        u16 origcmd, newcmd;
3415        unsigned long flags;
3416        bool irq_pending;
3417
3418        /*
3419         * We do a single dword read to retrieve both command and status.
3420         * Document assumptions that make this possible.
3421         */
3422        BUILD_BUG_ON(PCI_COMMAND % 4);
3423        BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
3424
3425        raw_spin_lock_irqsave(&pci_lock, flags);
3426
3427        bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
3428
3429        irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
3430
3431        /*
3432         * Check interrupt status register to see whether our device
3433         * triggered the interrupt (when masking) or the next IRQ is
3434         * already pending (when unmasking).
3435         */
3436        if (mask != irq_pending) {
3437                mask_updated = false;
3438                goto done;
3439        }
3440
3441        origcmd = cmd_status_dword;
3442        newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
3443        if (mask)
3444                newcmd |= PCI_COMMAND_INTX_DISABLE;
3445        if (newcmd != origcmd)
3446                bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
3447
3448done:
3449        raw_spin_unlock_irqrestore(&pci_lock, flags);
3450
3451        return mask_updated;
3452}
3453
3454/**
3455 * pci_check_and_mask_intx - mask INTx on pending interrupt
3456 * @dev: the PCI device to operate on
3457 *
3458 * Check if the device dev has its INTx line asserted, mask it and
3459 * return true in that case. False is returned if not interrupt was
3460 * pending.
3461 */
3462bool pci_check_and_mask_intx(struct pci_dev *dev)
3463{
3464        return pci_check_and_set_intx_mask(dev, true);
3465}
3466EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
3467
3468/**
3469 * pci_check_and_unmask_intx - unmask INTx if no interrupt is pending
3470 * @dev: the PCI device to operate on
3471 *
3472 * Check if the device dev has its INTx line asserted, unmask it if not
3473 * and return true. False is returned and the mask remains active if
3474 * there was still an interrupt pending.
3475 */
3476bool pci_check_and_unmask_intx(struct pci_dev *dev)
3477{
3478        return pci_check_and_set_intx_mask(dev, false);
3479}
3480EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
3481
3482/**
3483 * pci_wait_for_pending_transaction - waits for pending transaction
3484 * @dev: the PCI device to operate on
3485 *
3486 * Return 0 if transaction is pending 1 otherwise.
3487 */
3488int pci_wait_for_pending_transaction(struct pci_dev *dev)
3489{
3490        if (!pci_is_pcie(dev))
3491                return 1;
3492
3493        return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
3494                                    PCI_EXP_DEVSTA_TRPND);
3495}
3496EXPORT_SYMBOL(pci_wait_for_pending_transaction);
3497
3498/*
3499 * We should only need to wait 100ms after FLR, but some devices take longer.
3500 * Wait for up to 1000ms for config space to return something other than -1.
3501 * Intel IGD requires this when an LCD panel is attached.  We read the 2nd
3502 * dword because VFs don't implement the 1st dword.
3503 */
3504static void pci_flr_wait(struct pci_dev *dev)
3505{
3506        int i = 0;
3507        u32 id;
3508
3509        do {
3510                msleep(100);
3511                pci_read_config_dword(dev, PCI_COMMAND, &id);
3512        } while (i++ < 10 && id == ~0);
3513
3514        if (id == ~0)
3515                dev_warn(&dev->dev, "Failed to return from FLR\n");
3516        else if (i > 1)
3517                dev_info(&dev->dev, "Required additional %dms to return from FLR\n",
3518                         (i - 1) * 100);
3519}
3520
3521/**
3522 * pcie_has_flr - check if a device supports function level resets
3523 * @dev:        device to check
3524 *
3525 * Returns true if the device advertises support for PCIe function level
3526 * resets.
3527 */
3528static bool pcie_has_flr(struct pci_dev *dev)
3529{
3530        u32 cap;
3531
3532        if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
3533                return false;
3534
3535        pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
3536        return cap & PCI_EXP_DEVCAP_FLR;
3537}
3538
3539/**
3540 * pcie_flr - initiate a PCIe function level reset
3541 * @dev:        device to reset
3542 *
3543 * Initiate a function level reset on @dev.  The caller should ensure the
3544 * device supports FLR before calling this function, e.g. by using the
3545 * pcie_has_flr() helper.
3546 */
3547void pcie_flr(struct pci_dev *dev)
3548{
3549        if (!pci_wait_for_pending_transaction(dev))
3550                dev_err(&dev->dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
3551
3552        pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
3553        pci_flr_wait(dev);
3554}
3555EXPORT_SYMBOL_GPL(pcie_flr);
3556
3557static int pci_af_flr(struct pci_dev *dev, int probe)
3558{
3559        int pos;
3560        u8 cap;
3561
3562        pos = pci_find_capability(dev, PCI_CAP_ID_AF);
3563        if (!pos)
3564                return -ENOTTY;
3565
3566        if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
3567                return -ENOTTY;
3568
3569        pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
3570        if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
3571                return -ENOTTY;
3572
3573        if (probe)
3574                return 0;
3575
3576        /*
3577         * Wait for Transaction Pending bit to clear.  A word-aligned test
3578         * is used, so we use the conrol offset rather than status and shift
3579         * the test bit to match.
3580         */
3581        if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
3582                                 PCI_AF_STATUS_TP << 8))
3583                dev_err(&dev->dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n");
3584
3585        pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
3586        pci_flr_wait(dev);
3587        return 0;
3588}
3589
3590/**
3591 * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
3592 * @dev: Device to reset.
3593 * @probe: If set, only check if the device can be reset this way.
3594 *
3595 * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
3596 * unset, it will be reinitialized internally when going from PCI_D3hot to
3597 * PCI_D0.  If that's the case and the device is not in a low-power state
3598 * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
3599 *
3600 * NOTE: This causes the caller to sleep for twice the device power transition
3601 * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
3602 * by default (i.e. unless the @dev's d3_delay field has a different value).
3603 * Moreover, only devices in D0 can be reset by this function.
3604 */
3605static int pci_pm_reset(struct pci_dev *dev, int probe)
3606{
3607        u16 csr;
3608
3609        if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET)
3610                return -ENOTTY;
3611
3612        pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
3613        if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
3614                return -ENOTTY;
3615
3616        if (probe)
3617                return 0;
3618
3619        if (dev->current_state != PCI_D0)
3620                return -EINVAL;
3621
3622        csr &= ~PCI_PM_CTRL_STATE_MASK;
3623        csr |= PCI_D3hot;
3624        pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
3625        pci_dev_d3_sleep(dev);
3626
3627        csr &= ~PCI_PM_CTRL_STATE_MASK;
3628        csr |= PCI_D0;
3629        pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
3630        pci_dev_d3_sleep(dev);
3631
3632        return 0;
3633}
3634
3635void pci_reset_secondary_bus(struct pci_dev *dev)
3636{
3637        u16 ctrl;
3638
3639        pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
3640        ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
3641        pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
3642        /*
3643         * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms.  Double
3644         * this to 2ms to ensure that we meet the minimum requirement.
3645         */
3646        msleep(2);
3647
3648        ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
3649        pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
3650
3651        /*
3652         * Trhfa for conventional PCI is 2^25 clock cycles.
3653         * Assuming a minimum 33MHz clock this results in a 1s
3654         * delay before we can consider subordinate devices to
3655         * be re-initialized.  PCIe has some ways to shorten this,
3656         * but we don't make use of them yet.
3657         */
3658        ssleep(1);
3659}
3660
3661void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
3662{
3663        pci_reset_secondary_bus(dev);
3664}
3665
3666/**
3667 * pci_reset_bridge_secondary_bus - Reset the secondary bus on a PCI bridge.
3668 * @dev: Bridge device
3669 *
3670 * Use the bridge control register to assert reset on the secondary bus.
3671 * Devices on the secondary bus are left in power-on state.
3672 */
3673void pci_reset_bridge_secondary_bus(struct pci_dev *dev)
3674{
3675        pcibios_reset_secondary_bus(dev);
3676}
3677EXPORT_SYMBOL_GPL(pci_reset_bridge_secondary_bus);
3678
3679static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
3680{
3681        struct pci_dev *pdev;
3682
3683        if (pci_is_root_bus(dev->bus) || dev->subordinate ||
3684            !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
3685                return -ENOTTY;
3686
3687        list_for_each_entry(pdev, &dev->bus->devices, bus_list)
3688                if (pdev != dev)
3689                        return -ENOTTY;
3690
3691        if (probe)
3692                return 0;
3693
3694        pci_reset_bridge_secondary_bus(dev->bus->self);
3695
3696        return 0;
3697}
3698
3699static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, int probe)
3700{
3701        int rc = -ENOTTY;
3702
3703        if (!hotplug || !try_module_get(hotplug->ops->owner))
3704                return rc;
3705
3706        if (hotplug->ops->reset_slot)
3707                rc = hotplug->ops->reset_slot(hotplug, probe);
3708
3709        module_put(hotplug->ops->owner);
3710
3711        return rc;
3712}
3713
3714static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
3715{
3716        struct pci_dev *pdev;
3717
3718        if (dev->subordinate || !dev->slot ||
3719            dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
3720                return -ENOTTY;
3721
3722        list_for_each_entry(pdev, &dev->bus->devices, bus_list)
3723                if (pdev != dev && pdev->slot == dev->slot)
3724                        return -ENOTTY;
3725
3726        return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
3727}
3728
3729static int __pci_dev_reset(struct pci_dev *dev, int probe)
3730{
3731        int rc;
3732
3733        might_sleep();
3734
3735        rc = pci_dev_specific_reset(dev, probe);
3736        if (rc != -ENOTTY)
3737                goto done;
3738
3739        if (pcie_has_flr(dev)) {
3740                if (!probe)
3741                        pcie_flr(dev);
3742                rc = 0;
3743                goto done;
3744        }
3745
3746        rc = pci_af_flr(dev, probe);
3747        if (rc != -ENOTTY)
3748                goto done;
3749
3750        rc = pci_pm_reset(dev, probe);
3751        if (rc != -ENOTTY)
3752                goto done;
3753
3754        rc = pci_dev_reset_slot_function(dev, probe);
3755        if (rc != -ENOTTY)
3756                goto done;
3757
3758        rc = pci_parent_bus_reset(dev, probe);
3759done:
3760        return rc;
3761}
3762
3763static void pci_dev_lock(struct pci_dev *dev)
3764{
3765        pci_cfg_access_lock(dev);
3766        /* block PM suspend, driver probe, etc. */
3767        device_lock(&dev->dev);
3768}
3769
3770/* Return 1 on successful lock, 0 on contention */
3771static int pci_dev_trylock(struct pci_dev *dev)
3772{
3773        if (pci_cfg_access_trylock(dev)) {
3774                if (device_trylock(&dev->dev))
3775                        return 1;
3776                pci_cfg_access_unlock(dev);
3777        }
3778
3779        return 0;
3780}
3781
3782static void pci_dev_unlock(struct pci_dev *dev)
3783{
3784        device_unlock(&dev->dev);
3785        pci_cfg_access_unlock(dev);
3786}
3787
3788/**
3789 * pci_reset_notify - notify device driver of reset
3790 * @dev: device to be notified of reset
3791 * @prepare: 'true' if device is about to be reset; 'false' if reset attempt
3792 *           completed
3793 *
3794 * Must be called prior to device access being disabled and after device
3795 * access is restored.
3796 */
3797static void pci_reset_notify(struct pci_dev *dev, bool prepare)
3798{
3799        const struct pci_error_handlers *err_handler =
3800                        dev->driver ? dev->driver->err_handler : NULL;
3801        if (err_handler && dev->driver->pci_driver_rh
3802            && dev->driver->pci_driver_rh->reset_notify)
3803                dev->driver->pci_driver_rh->reset_notify(dev, prepare);
3804}
3805
3806static void pci_dev_save_and_disable(struct pci_dev *dev)
3807{
3808        pci_reset_notify(dev, true);
3809
3810        /*
3811         * Wake-up device prior to save.  PM registers default to D0 after
3812         * reset and a simple register restore doesn't reliably return
3813         * to a non-D0 state anyway.
3814         */
3815        pci_set_power_state(dev, PCI_D0);
3816
3817        pci_save_state(dev);
3818        /*
3819         * Disable the device by clearing the Command register, except for
3820         * INTx-disable which is set.  This not only disables MMIO and I/O port
3821         * BARs, but also prevents the device from being Bus Master, preventing
3822         * DMA from the device including MSI/MSI-X interrupts.  For PCI 2.3
3823         * compliant devices, INTx-disable prevents legacy interrupts.
3824         */
3825        pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
3826}
3827
3828static void pci_dev_restore(struct pci_dev *dev)
3829{
3830        pci_restore_state(dev);
3831        pci_reset_notify(dev, false);
3832}
3833
3834static int pci_dev_reset(struct pci_dev *dev, int probe)
3835{
3836        int rc;
3837
3838        if (!probe)
3839                pci_dev_lock(dev);
3840
3841        rc = __pci_dev_reset(dev, probe);
3842
3843        if (!probe)
3844                pci_dev_unlock(dev);
3845
3846        return rc;
3847}
3848
3849/**
3850 * __pci_reset_function - reset a PCI device function
3851 * @dev: PCI device to reset
3852 *
3853 * Some devices allow an individual function to be reset without affecting
3854 * other functions in the same device.  The PCI device must be responsive
3855 * to PCI config space in order to use this function.
3856 *
3857 * The device function is presumed to be unused when this function is called.
3858 * Resetting the device will make the contents of PCI configuration space
3859 * random, so any caller of this must be prepared to reinitialise the
3860 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3861 * etc.
3862 *
3863 * Returns 0 if the device function was successfully reset or negative if the
3864 * device doesn't support resetting a single function.
3865 */
3866int __pci_reset_function(struct pci_dev *dev)
3867{
3868        return pci_dev_reset(dev, 0);
3869}
3870EXPORT_SYMBOL_GPL(__pci_reset_function);
3871
3872/**
3873 * __pci_reset_function_locked - reset a PCI device function while holding
3874 * the @dev mutex lock.
3875 * @dev: PCI device to reset
3876 *
3877 * Some devices allow an individual function to be reset without affecting
3878 * other functions in the same device.  The PCI device must be responsive
3879 * to PCI config space in order to use this function.
3880 *
3881 * The device function is presumed to be unused and the caller is holding
3882 * the device mutex lock when this function is called.
3883 * Resetting the device will make the contents of PCI configuration space
3884 * random, so any caller of this must be prepared to reinitialise the
3885 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3886 * etc.
3887 *
3888 * Returns 0 if the device function was successfully reset or negative if the
3889 * device doesn't support resetting a single function.
3890 */
3891int __pci_reset_function_locked(struct pci_dev *dev)
3892{
3893        return __pci_dev_reset(dev, 0);
3894}
3895EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
3896
3897/**
3898 * pci_probe_reset_function - check whether the device can be safely reset
3899 * @dev: PCI device to reset
3900 *
3901 * Some devices allow an individual function to be reset without affecting
3902 * other functions in the same device.  The PCI device must be responsive
3903 * to PCI config space in order to use this function.
3904 *
3905 * Returns 0 if the device function can be reset or negative if the
3906 * device doesn't support resetting a single function.
3907 */
3908int pci_probe_reset_function(struct pci_dev *dev)
3909{
3910        return pci_dev_reset(dev, 1);
3911}
3912
3913/**
3914 * pci_reset_function - quiesce and reset a PCI device function
3915 * @dev: PCI device to reset
3916 *
3917 * Some devices allow an individual function to be reset without affecting
3918 * other functions in the same device.  The PCI device must be responsive
3919 * to PCI config space in order to use this function.
3920 *
3921 * This function does not just reset the PCI portion of a device, but
3922 * clears all the state associated with the device.  This function differs
3923 * from __pci_reset_function in that it saves and restores device state
3924 * over the reset.
3925 *
3926 * Returns 0 if the device function was successfully reset or negative if the
3927 * device doesn't support resetting a single function.
3928 */
3929int pci_reset_function(struct pci_dev *dev)
3930{
3931        int rc;
3932
3933        rc = pci_dev_reset(dev, 1);
3934        if (rc)
3935                return rc;
3936
3937        pci_dev_save_and_disable(dev);
3938
3939        rc = pci_dev_reset(dev, 0);
3940
3941        pci_dev_restore(dev);
3942
3943        return rc;
3944}
3945EXPORT_SYMBOL_GPL(pci_reset_function);
3946
3947/**
3948 * pci_try_reset_function - quiesce and reset a PCI device function
3949 * @dev: PCI device to reset
3950 *
3951 * Same as above, except return -EAGAIN if unable to lock device.
3952 */
3953int pci_try_reset_function(struct pci_dev *dev)
3954{
3955        int rc;
3956
3957        rc = pci_dev_reset(dev, 1);
3958        if (rc)
3959                return rc;
3960
3961        pci_dev_save_and_disable(dev);
3962
3963        if (pci_dev_trylock(dev)) {
3964                rc = __pci_dev_reset(dev, 0);
3965                pci_dev_unlock(dev);
3966        } else
3967                rc = -EAGAIN;
3968
3969        pci_dev_restore(dev);
3970
3971        return rc;
3972}
3973EXPORT_SYMBOL_GPL(pci_try_reset_function);
3974
3975/* Do any devices on or below this bus prevent a bus reset? */
3976static bool pci_bus_resetable(struct pci_bus *bus)
3977{
3978        struct pci_dev *dev;
3979
3980        list_for_each_entry(dev, &bus->devices, bus_list) {
3981                if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
3982                    (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
3983                        return false;
3984        }
3985
3986        return true;
3987}
3988
3989/* Lock devices from the top of the tree down */
3990static void pci_bus_lock(struct pci_bus *bus)
3991{
3992        struct pci_dev *dev;
3993
3994        list_for_each_entry(dev, &bus->devices, bus_list) {
3995                pci_dev_lock(dev);
3996                if (dev->subordinate)
3997                        pci_bus_lock(dev->subordinate);
3998        }
3999}
4000
4001/* Unlock devices from the bottom of the tree up */
4002static void pci_bus_unlock(struct pci_bus *bus)
4003{
4004        struct pci_dev *dev;
4005
4006        list_for_each_entry(dev, &bus->devices, bus_list) {
4007                if (dev->subordinate)
4008                        pci_bus_unlock(dev->subordinate);
4009                pci_dev_unlock(dev);
4010        }
4011}
4012
4013/* Return 1 on successful lock, 0 on contention */
4014static int pci_bus_trylock(struct pci_bus *bus)
4015{
4016        struct pci_dev *dev;
4017
4018        list_for_each_entry(dev, &bus->devices, bus_list) {
4019                if (!pci_dev_trylock(dev))
4020                        goto unlock;
4021                if (dev->subordinate) {
4022                        if (!pci_bus_trylock(dev->subordinate)) {
4023                                pci_dev_unlock(dev);
4024                                goto unlock;
4025                        }
4026                }
4027        }
4028        return 1;
4029
4030unlock:
4031        list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
4032                if (dev->subordinate)
4033                        pci_bus_unlock(dev->subordinate);
4034                pci_dev_unlock(dev);
4035        }
4036        return 0;
4037}
4038
4039/* Do any devices on or below this slot prevent a bus reset? */
4040static bool pci_slot_resetable(struct pci_slot *slot)
4041{
4042        struct pci_dev *dev;
4043
4044        list_for_each_entry(dev, &slot->bus->devices, bus_list) {
4045                if (!dev->slot || dev->slot != slot)
4046                        continue;
4047                if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
4048                    (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
4049                        return false;
4050        }
4051
4052        return true;
4053}
4054
4055/* Lock devices from the top of the tree down */
4056static void pci_slot_lock(struct pci_slot *slot)
4057{
4058        struct pci_dev *dev;
4059
4060        list_for_each_entry(dev, &slot->bus->devices, bus_list) {
4061                if (!dev->slot || dev->slot != slot)
4062                        continue;
4063                pci_dev_lock(dev);
4064                if (dev->subordinate)
4065                        pci_bus_lock(dev->subordinate);
4066        }
4067}
4068
4069/* Unlock devices from the bottom of the tree up */
4070static void pci_slot_unlock(struct pci_slot *slot)
4071{
4072        struct pci_dev *dev;
4073
4074        list_for_each_entry(dev, &slot->bus->devices, bus_list) {
4075                if (!dev->slot || dev->slot != slot)
4076                        continue;
4077                if (dev->subordinate)
4078                        pci_bus_unlock(dev->subordinate);
4079                pci_dev_unlock(dev);
4080        }
4081}
4082
4083/* Return 1 on successful lock, 0 on contention */
4084static int pci_slot_trylock(struct pci_slot *slot)
4085{
4086        struct pci_dev *dev;
4087
4088        list_for_each_entry(dev, &slot->bus->devices, bus_list) {
4089                if (!dev->slot || dev->slot != slot)
4090                        continue;
4091                if (!pci_dev_trylock(dev))
4092                        goto unlock;
4093                if (dev->subordinate) {
4094                        if (!pci_bus_trylock(dev->subordinate)) {
4095                                pci_dev_unlock(dev);
4096                                goto unlock;
4097                        }
4098                }
4099        }
4100        return 1;
4101
4102unlock:
4103        list_for_each_entry_continue_reverse(dev,
4104                                             &slot->bus->devices, bus_list) {
4105                if (!dev->slot || dev->slot != slot)
4106                        continue;
4107                if (dev->subordinate)
4108                        pci_bus_unlock(dev->subordinate);
4109                pci_dev_unlock(dev);
4110        }
4111        return 0;
4112}
4113
4114/* Save and disable devices from the top of the tree down */
4115static void pci_bus_save_and_disable(struct pci_bus *bus)
4116{
4117        struct pci_dev *dev;
4118
4119        list_for_each_entry(dev, &bus->devices, bus_list) {
4120                pci_dev_save_and_disable(dev);
4121                if (dev->subordinate)
4122                        pci_bus_save_and_disable(dev->subordinate);
4123        }
4124}
4125
4126/*
4127 * Restore devices from top of the tree down - parent bridges need to be
4128 * restored before we can get to subordinate devices.
4129 */
4130static void pci_bus_restore(struct pci_bus *bus)
4131{
4132        struct pci_dev *dev;
4133
4134        list_for_each_entry(dev, &bus->devices, bus_list) {
4135                pci_dev_restore(dev);
4136                if (dev->subordinate)
4137                        pci_bus_restore(dev->subordinate);
4138        }
4139}
4140
4141/* Save and disable devices from the top of the tree down */
4142static void pci_slot_save_and_disable(struct pci_slot *slot)
4143{
4144        struct pci_dev *dev;
4145
4146        list_for_each_entry(dev, &slot->bus->devices, bus_list) {
4147                if (!dev->slot || dev->slot != slot)
4148                        continue;
4149                pci_dev_save_and_disable(dev);
4150                if (dev->subordinate)
4151                        pci_bus_save_and_disable(dev->subordinate);
4152        }
4153}
4154
4155/*
4156 * Restore devices from top of the tree down - parent bridges need to be
4157 * restored before we can get to subordinate devices.
4158 */
4159static void pci_slot_restore(struct pci_slot *slot)
4160{
4161        struct pci_dev *dev;
4162
4163        list_for_each_entry(dev, &slot->bus->devices, bus_list) {
4164                if (!dev->slot || dev->slot != slot)
4165                        continue;
4166                pci_dev_restore(dev);
4167                if (dev->subordinate)
4168                        pci_bus_restore(dev->subordinate);
4169        }
4170}
4171
4172static int pci_slot_reset(struct pci_slot *slot, int probe)
4173{
4174        int rc;
4175
4176        if (!slot || !pci_slot_resetable(slot))
4177                return -ENOTTY;
4178
4179        if (!probe)
4180                pci_slot_lock(slot);
4181
4182        might_sleep();
4183
4184        rc = pci_reset_hotplug_slot(slot->hotplug, probe);
4185
4186        if (!probe)
4187                pci_slot_unlock(slot);
4188
4189        return rc;
4190}
4191
4192/**
4193 * pci_probe_reset_slot - probe whether a PCI slot can be reset
4194 * @slot: PCI slot to probe
4195 *
4196 * Return 0 if slot can be reset, negative if a slot reset is not supported.
4197 */
4198int pci_probe_reset_slot(struct pci_slot *slot)
4199{
4200        return pci_slot_reset(slot, 1);
4201}
4202EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
4203
4204/**
4205 * pci_reset_slot - reset a PCI slot
4206 * @slot: PCI slot to reset
4207 *
4208 * A PCI bus may host multiple slots, each slot may support a reset mechanism
4209 * independent of other slots.  For instance, some slots may support slot power
4210 * control.  In the case of a 1:1 bus to slot architecture, this function may
4211 * wrap the bus reset to avoid spurious slot related events such as hotplug.
4212 * Generally a slot reset should be attempted before a bus reset.  All of the
4213 * function of the slot and any subordinate buses behind the slot are reset
4214 * through this function.  PCI config space of all devices in the slot and
4215 * behind the slot is saved before and restored after reset.
4216 *
4217 * Return 0 on success, non-zero on error.
4218 */
4219int pci_reset_slot(struct pci_slot *slot)
4220{
4221        int rc;
4222
4223        rc = pci_slot_reset(slot, 1);
4224        if (rc)
4225                return rc;
4226
4227        pci_slot_save_and_disable(slot);
4228
4229        rc = pci_slot_reset(slot, 0);
4230
4231        pci_slot_restore(slot);
4232
4233        return rc;
4234}
4235EXPORT_SYMBOL_GPL(pci_reset_slot);
4236
4237/**
4238 * pci_try_reset_slot - Try to reset a PCI slot
4239 * @slot: PCI slot to reset
4240 *
4241 * Same as above except return -EAGAIN if the slot cannot be locked
4242 */
4243int pci_try_reset_slot(struct pci_slot *slot)
4244{
4245        int rc;
4246
4247        rc = pci_slot_reset(slot, 1);
4248        if (rc)
4249                return rc;
4250
4251        pci_slot_save_and_disable(slot);
4252
4253        if (pci_slot_trylock(slot)) {
4254                might_sleep();
4255                rc = pci_reset_hotplug_slot(slot->hotplug, 0);
4256                pci_slot_unlock(slot);
4257        } else
4258                rc = -EAGAIN;
4259
4260        pci_slot_restore(slot);
4261
4262        return rc;
4263}
4264EXPORT_SYMBOL_GPL(pci_try_reset_slot);
4265
4266static int pci_bus_reset(struct pci_bus *bus, int probe)
4267{
4268        if (!bus->self || !pci_bus_resetable(bus))
4269                return -ENOTTY;
4270
4271        if (probe)
4272                return 0;
4273
4274        pci_bus_lock(bus);
4275
4276        might_sleep();
4277
4278        pci_reset_bridge_secondary_bus(bus->self);
4279
4280        pci_bus_unlock(bus);
4281
4282        return 0;
4283}
4284
4285/**
4286 * pci_probe_reset_bus - probe whether a PCI bus can be reset
4287 * @bus: PCI bus to probe
4288 *
4289 * Return 0 if bus can be reset, negative if a bus reset is not supported.
4290 */
4291int pci_probe_reset_bus(struct pci_bus *bus)
4292{
4293        return pci_bus_reset(bus, 1);
4294}
4295EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
4296
4297/**
4298 * pci_reset_bus - reset a PCI bus
4299 * @bus: top level PCI bus to reset
4300 *
4301 * Do a bus reset on the given bus and any subordinate buses, saving
4302 * and restoring state of all devices.
4303 *
4304 * Return 0 on success, non-zero on error.
4305 */
4306int pci_reset_bus(struct pci_bus *bus)
4307{
4308        int rc;
4309
4310        rc = pci_bus_reset(bus, 1);
4311        if (rc)
4312                return rc;
4313
4314        pci_bus_save_and_disable(bus);
4315
4316        rc = pci_bus_reset(bus, 0);
4317
4318        pci_bus_restore(bus);
4319
4320        return rc;
4321}
4322EXPORT_SYMBOL_GPL(pci_reset_bus);
4323
4324/**
4325 * pci_try_reset_bus - Try to reset a PCI bus
4326 * @bus: top level PCI bus to reset
4327 *
4328 * Same as above except return -EAGAIN if the bus cannot be locked
4329 */
4330int pci_try_reset_bus(struct pci_bus *bus)
4331{
4332        int rc;
4333
4334        rc = pci_bus_reset(bus, 1);
4335        if (rc)
4336                return rc;
4337
4338        pci_bus_save_and_disable(bus);
4339
4340        if (pci_bus_trylock(bus)) {
4341                might_sleep();
4342                pci_reset_bridge_secondary_bus(bus->self);
4343                pci_bus_unlock(bus);
4344        } else
4345                rc = -EAGAIN;
4346
4347        pci_bus_restore(bus);
4348
4349        return rc;
4350}
4351EXPORT_SYMBOL_GPL(pci_try_reset_bus);
4352
4353/**
4354 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
4355 * @dev: PCI device to query
4356 *
4357 * Returns mmrbc: maximum designed memory read count in bytes
4358 *    or appropriate error value.
4359 */
4360int pcix_get_max_mmrbc(struct pci_dev *dev)
4361{
4362        int cap;
4363        u32 stat;
4364
4365        cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
4366        if (!cap)
4367                return -EINVAL;
4368
4369        if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
4370                return -EINVAL;
4371
4372        return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
4373}
4374EXPORT_SYMBOL(pcix_get_max_mmrbc);
4375
4376/**
4377 * pcix_get_mmrbc - get PCI-X maximum memory read byte count
4378 * @dev: PCI device to query
4379 *
4380 * Returns mmrbc: maximum memory read count in bytes
4381 *    or appropriate error value.
4382 */
4383int pcix_get_mmrbc(struct pci_dev *dev)
4384{
4385        int cap;
4386        u16 cmd;
4387
4388        cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
4389        if (!cap)
4390                return -EINVAL;
4391
4392        if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
4393                return -EINVAL;
4394
4395        return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
4396}
4397EXPORT_SYMBOL(pcix_get_mmrbc);
4398
4399/**
4400 * pcix_set_mmrbc - set PCI-X maximum memory read byte count
4401 * @dev: PCI device to query
4402 * @mmrbc: maximum memory read count in bytes
4403 *    valid values are 512, 1024, 2048, 4096
4404 *
4405 * If possible sets maximum memory read byte count, some bridges have erratas
4406 * that prevent this.
4407 */
4408int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
4409{
4410        int cap;
4411        u32 stat, v, o;
4412        u16 cmd;
4413
4414        if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
4415                return -EINVAL;
4416
4417        v = ffs(mmrbc) - 10;
4418
4419        cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
4420        if (!cap)
4421                return -EINVAL;
4422
4423        if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
4424                return -EINVAL;
4425
4426        if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
4427                return -E2BIG;
4428
4429        if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
4430                return -EINVAL;
4431
4432        o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
4433        if (o != v) {
4434                if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
4435                        return -EIO;
4436
4437                cmd &= ~PCI_X_CMD_MAX_READ;
4438                cmd |= v << 2;
4439                if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
4440                        return -EIO;
4441        }
4442        return 0;
4443}
4444EXPORT_SYMBOL(pcix_set_mmrbc);
4445
4446/**
4447 * pcie_get_readrq - get PCI Express read request size
4448 * @dev: PCI device to query
4449 *
4450 * Returns maximum memory read request in bytes
4451 *    or appropriate error value.
4452 */
4453int pcie_get_readrq(struct pci_dev *dev)
4454{
4455        u16 ctl;
4456
4457        pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
4458
4459        return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
4460}
4461EXPORT_SYMBOL(pcie_get_readrq);
4462
4463/**
4464 * pcie_set_readrq - set PCI Express maximum memory read request
4465 * @dev: PCI device to query
4466 * @rq: maximum memory read count in bytes
4467 *    valid values are 128, 256, 512, 1024, 2048, 4096
4468 *
4469 * If possible sets maximum memory read request in bytes
4470 */
4471int pcie_set_readrq(struct pci_dev *dev, int rq)
4472{
4473        u16 v;
4474
4475        if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
4476                return -EINVAL;
4477
4478        /*
4479         * If using the "performance" PCIe config, we clamp the
4480         * read rq size to the max packet size to prevent the
4481         * host bridge generating requests larger than we can
4482         * cope with
4483         */
4484        if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
4485                int mps = pcie_get_mps(dev);
4486
4487                if (mps < rq)
4488                        rq = mps;
4489        }
4490
4491        v = (ffs(rq) - 8) << 12;
4492
4493        return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
4494                                                  PCI_EXP_DEVCTL_READRQ, v);
4495}
4496EXPORT_SYMBOL(pcie_set_readrq);
4497
4498/**
4499 * pcie_get_mps - get PCI Express maximum payload size
4500 * @dev: PCI device to query
4501 *
4502 * Returns maximum payload size in bytes
4503 */
4504int pcie_get_mps(struct pci_dev *dev)
4505{
4506        u16 ctl;
4507
4508        pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
4509
4510        return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
4511}
4512EXPORT_SYMBOL(pcie_get_mps);
4513
4514/**
4515 * pcie_set_mps - set PCI Express maximum payload size
4516 * @dev: PCI device to query
4517 * @mps: maximum payload size in bytes
4518 *    valid values are 128, 256, 512, 1024, 2048, 4096
4519 *
4520 * If possible sets maximum payload size
4521 */
4522int pcie_set_mps(struct pci_dev *dev, int mps)
4523{
4524        u16 v;
4525
4526        if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
4527                return -EINVAL;
4528
4529        v = ffs(mps) - 8;
4530        if (v > dev->pcie_mpss)
4531                return -EINVAL;
4532        v <<= 5;
4533
4534        return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
4535                                                  PCI_EXP_DEVCTL_PAYLOAD, v);
4536}
4537EXPORT_SYMBOL(pcie_set_mps);
4538
4539/**
4540 * pcie_get_minimum_link - determine minimum link settings of a PCI device
4541 * @dev: PCI device to query
4542 * @speed: storage for minimum speed
4543 * @width: storage for minimum width
4544 *
4545 * This function will walk up the PCI device chain and determine the minimum
4546 * link width and speed of the device.
4547 */
4548int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed,
4549                          enum pcie_link_width *width)
4550{
4551        int ret;
4552
4553        *speed = PCI_SPEED_UNKNOWN;
4554        *width = PCIE_LNK_WIDTH_UNKNOWN;
4555
4556        while (dev) {
4557                u16 lnksta;
4558                enum pci_bus_speed next_speed;
4559                enum pcie_link_width next_width;
4560
4561                ret = pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
4562                if (ret)
4563                        return ret;
4564
4565                next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
4566                next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
4567                        PCI_EXP_LNKSTA_NLW_SHIFT;
4568
4569                if (next_speed < *speed)
4570                        *speed = next_speed;
4571
4572                if (next_width < *width)
4573                        *width = next_width;
4574
4575                dev = dev->bus->self;
4576        }
4577
4578        return 0;
4579}
4580EXPORT_SYMBOL(pcie_get_minimum_link);
4581
4582/**
4583 * pci_select_bars - Make BAR mask from the type of resource
4584 * @dev: the PCI device for which BAR mask is made
4585 * @flags: resource type mask to be selected
4586 *
4587 * This helper routine makes bar mask from the type of resource.
4588 */
4589int pci_select_bars(struct pci_dev *dev, unsigned long flags)
4590{
4591        int i, bars = 0;
4592        for (i = 0; i < PCI_NUM_RESOURCES; i++)
4593                if (pci_resource_flags(dev, i) & flags)
4594                        bars |= (1 << i);
4595        return bars;
4596}
4597EXPORT_SYMBOL(pci_select_bars);
4598
4599/**
4600 * pci_resource_bar - get position of the BAR associated with a resource
4601 * @dev: the PCI device
4602 * @resno: the resource number
4603 * @type: the BAR type to be filled in
4604 *
4605 * Returns BAR position in config space, or 0 if the BAR is invalid.
4606 */
4607int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
4608{
4609        int reg;
4610
4611        if (resno < PCI_ROM_RESOURCE) {
4612                *type = pci_bar_unknown;
4613                return PCI_BASE_ADDRESS_0 + 4 * resno;
4614        } else if (resno == PCI_ROM_RESOURCE) {
4615                *type = pci_bar_mem32;
4616                return dev->rom_base_reg;
4617        } else if (resno < PCI_BRIDGE_RESOURCES) {
4618                /* device specific resource */
4619                *type = pci_bar_unknown;
4620                reg = pci_iov_resource_bar(dev, resno);
4621                if (reg)
4622                        return reg;
4623        }
4624
4625        dev_err(&dev->dev, "BAR %d: invalid resource\n", resno);
4626        return 0;
4627}
4628
4629/* Some architectures require additional programming to enable VGA */
4630static arch_set_vga_state_t arch_set_vga_state;
4631
4632void __init pci_register_set_vga_state(arch_set_vga_state_t func)
4633{
4634        arch_set_vga_state = func;      /* NULL disables */
4635}
4636
4637static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
4638                                  unsigned int command_bits, u32 flags)
4639{
4640        if (arch_set_vga_state)
4641                return arch_set_vga_state(dev, decode, command_bits,
4642                                                flags);
4643        return 0;
4644}
4645
4646/**
4647 * pci_set_vga_state - set VGA decode state on device and parents if requested
4648 * @dev: the PCI device
4649 * @decode: true = enable decoding, false = disable decoding
4650 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
4651 * @flags: traverse ancestors and change bridges
4652 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
4653 */
4654int pci_set_vga_state(struct pci_dev *dev, bool decode,
4655                      unsigned int command_bits, u32 flags)
4656{
4657        struct pci_bus *bus;
4658        struct pci_dev *bridge;
4659        u16 cmd;
4660        int rc;
4661
4662        WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
4663
4664        /* ARCH specific VGA enables */
4665        rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
4666        if (rc)
4667                return rc;
4668
4669        if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
4670                pci_read_config_word(dev, PCI_COMMAND, &cmd);
4671                if (decode == true)
4672                        cmd |= command_bits;
4673                else
4674                        cmd &= ~command_bits;
4675                pci_write_config_word(dev, PCI_COMMAND, cmd);
4676        }
4677
4678        if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
4679                return 0;
4680
4681        bus = dev->bus;
4682        while (bus) {
4683                bridge = bus->self;
4684                if (bridge) {
4685                        pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
4686                                             &cmd);
4687                        if (decode == true)
4688                                cmd |= PCI_BRIDGE_CTL_VGA;
4689                        else
4690                                cmd &= ~PCI_BRIDGE_CTL_VGA;
4691                        pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
4692                                              cmd);
4693                }
4694                bus = bus->parent;
4695        }
4696        return 0;
4697}
4698
4699/**
4700 * pci_add_dma_alias - Add a DMA devfn alias for a device
4701 * @dev: the PCI device for which alias is added
4702 * @devfn: alias slot and function
4703 *
4704 * This helper encodes 8-bit devfn as bit number in dma_alias_mask.
4705 * It should be called early, preferably as PCI fixup header quirk.
4706 */
4707void pci_add_dma_alias(struct pci_dev *dev, u8 devfn)
4708{
4709        if (!dev->pci_dev_rh->dma_alias_mask)
4710                dev->pci_dev_rh->dma_alias_mask = kcalloc(BITS_TO_LONGS(U8_MAX),
4711                                                          sizeof(long), GFP_KERNEL);
4712        if (!dev->pci_dev_rh->dma_alias_mask) {
4713                dev_warn(&dev->dev, "Unable to allocate DMA alias mask\n");
4714                return;
4715        }
4716        set_bit(devfn, dev->pci_dev_rh->dma_alias_mask);
4717        dev_info(&dev->dev, "Enabling fixed DMA alias to %02x.%d\n",
4718                 PCI_SLOT(devfn), PCI_FUNC(devfn));
4719}
4720
4721bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2)
4722{
4723        return (dev1->pci_dev_rh->dma_alias_mask &&
4724                test_bit(dev2->devfn, dev1->pci_dev_rh->dma_alias_mask)) ||
4725                (dev2->pci_dev_rh->dma_alias_mask &&
4726                 test_bit(dev1->devfn, dev2->pci_dev_rh->dma_alias_mask));
4727}
4728
4729bool pci_device_is_present(struct pci_dev *pdev)
4730{
4731        u32 v;
4732
4733        return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
4734}
4735EXPORT_SYMBOL_GPL(pci_device_is_present);
4736
4737void pci_ignore_hotplug(struct pci_dev *dev)
4738{
4739        struct pci_dev *bridge = dev->bus->self;
4740
4741        dev->ignore_hotplug = 1;
4742        /* Propagate the "ignore hotplug" setting to the parent bridge. */
4743        if (bridge)
4744                bridge->ignore_hotplug = 1;
4745}
4746EXPORT_SYMBOL_GPL(pci_ignore_hotplug);
4747
4748#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
4749static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
4750static DEFINE_SPINLOCK(resource_alignment_lock);
4751
4752/**
4753 * pci_specified_resource_alignment - get resource alignment specified by user.
4754 * @dev: the PCI device to get
4755 *
4756 * RETURNS: Resource alignment if it is specified.
4757 *          Zero if it is not specified.
4758 */
4759static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
4760{
4761        int seg, bus, slot, func, align_order, count;
4762        resource_size_t align = 0;
4763        char *p;
4764
4765        spin_lock(&resource_alignment_lock);
4766        p = resource_alignment_param;
4767        while (*p) {
4768                count = 0;
4769                if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
4770                                                        p[count] == '@') {
4771                        p += count + 1;
4772                } else {
4773                        align_order = -1;
4774                }
4775                if (sscanf(p, "%x:%x:%x.%x%n",
4776                        &seg, &bus, &slot, &func, &count) != 4) {
4777                        seg = 0;
4778                        if (sscanf(p, "%x:%x.%x%n",
4779                                        &bus, &slot, &func, &count) != 3) {
4780                                /* Invalid format */
4781                                printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
4782                                        p);
4783                                break;
4784                        }
4785                }
4786                p += count;
4787                if (seg == pci_domain_nr(dev->bus) &&
4788                        bus == dev->bus->number &&
4789                        slot == PCI_SLOT(dev->devfn) &&
4790                        func == PCI_FUNC(dev->devfn)) {
4791                        if (align_order == -1)
4792                                align = PAGE_SIZE;
4793                        else
4794                                align = 1 << align_order;
4795                        /* Found */
4796                        break;
4797                }
4798                if (*p != ';' && *p != ',') {
4799                        /* End of param or invalid format */
4800                        break;
4801                }
4802                p++;
4803        }
4804        spin_unlock(&resource_alignment_lock);
4805        return align;
4806}
4807
4808/*
4809 * This function disables memory decoding and releases memory resources
4810 * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
4811 * It also rounds up size to specified alignment.
4812 * Later on, the kernel will assign page-aligned memory resource back
4813 * to the device.
4814 */
4815void pci_reassigndev_resource_alignment(struct pci_dev *dev)
4816{
4817        int i;
4818        struct resource *r;
4819        resource_size_t align, size;
4820        u16 command;
4821
4822        /* check if specified PCI is target device to reassign */
4823        align = pci_specified_resource_alignment(dev);
4824        if (!align)
4825                return;
4826
4827        if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
4828            (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
4829                dev_warn(&dev->dev,
4830                        "Can't reassign resources to host bridge.\n");
4831                return;
4832        }
4833
4834        dev_info(&dev->dev,
4835                "Disabling memory decoding and releasing memory resources.\n");
4836        pci_read_config_word(dev, PCI_COMMAND, &command);
4837        command &= ~PCI_COMMAND_MEMORY;
4838        pci_write_config_word(dev, PCI_COMMAND, command);
4839
4840        for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {
4841                r = &dev->resource[i];
4842                if (!(r->flags & IORESOURCE_MEM))
4843                        continue;
4844                size = resource_size(r);
4845                if (size < align) {
4846                        size = align;
4847                        dev_info(&dev->dev,
4848                                "Rounding up size of resource #%d to %#llx.\n",
4849                                i, (unsigned long long)size);
4850                }
4851                r->flags |= IORESOURCE_UNSET;
4852                r->end = size - 1;
4853                r->start = 0;
4854        }
4855        /* Need to disable bridge's resource window,
4856         * to enable the kernel to reassign new resource
4857         * window later on.
4858         */
4859        if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
4860            (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
4861                for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
4862                        r = &dev->resource[i];
4863                        if (!(r->flags & IORESOURCE_MEM))
4864                                continue;
4865                        r->flags |= IORESOURCE_UNSET;
4866                        r->end = resource_size(r) - 1;
4867                        r->start = 0;
4868                }
4869                pci_disable_bridge_window(dev);
4870        }
4871}
4872
4873static ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
4874{
4875        if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1)
4876                count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1;
4877        spin_lock(&resource_alignment_lock);
4878        strncpy(resource_alignment_param, buf, count);
4879        resource_alignment_param[count] = '\0';
4880        spin_unlock(&resource_alignment_lock);
4881        return count;
4882}
4883
4884static ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
4885{
4886        size_t count;
4887        spin_lock(&resource_alignment_lock);
4888        count = snprintf(buf, size, "%s", resource_alignment_param);
4889        spin_unlock(&resource_alignment_lock);
4890        return count;
4891}
4892
4893static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf)
4894{
4895        return pci_get_resource_alignment_param(buf, PAGE_SIZE);
4896}
4897
4898static ssize_t pci_resource_alignment_store(struct bus_type *bus,
4899                                        const char *buf, size_t count)
4900{
4901        return pci_set_resource_alignment_param(buf, count);
4902}
4903
4904BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
4905                                        pci_resource_alignment_store);
4906
4907static int __init pci_resource_alignment_sysfs_init(void)
4908{
4909        return bus_create_file(&pci_bus_type,
4910                                        &bus_attr_resource_alignment);
4911}
4912late_initcall(pci_resource_alignment_sysfs_init);
4913
4914static void pci_no_domains(void)
4915{
4916#ifdef CONFIG_PCI_DOMAINS
4917        pci_domains_supported = 0;
4918#endif
4919}
4920
4921/**
4922 * pci_ext_cfg_avail - can we access extended PCI config space?
4923 *
4924 * Returns 1 if we can access PCI extended config space (offsets
4925 * greater than 0xff). This is the default implementation. Architecture
4926 * implementations can override this.
4927 */
4928int __weak pci_ext_cfg_avail(void)
4929{
4930        return 1;
4931}
4932
4933void __weak pci_fixup_cardbus(struct pci_bus *bus)
4934{
4935}
4936EXPORT_SYMBOL(pci_fixup_cardbus);
4937
4938static int __init pci_setup(char *str)
4939{
4940        while (str) {
4941                char *k = strchr(str, ',');
4942                if (k)
4943                        *k++ = 0;
4944                if (*str && (str = pcibios_setup(str)) && *str) {
4945                        if (!strcmp(str, "nomsi")) {
4946                                pci_no_msi();
4947                        } else if (!strcmp(str, "noaer")) {
4948                                pci_no_aer();
4949                        } else if (!strncmp(str, "realloc=", 8)) {
4950                                pci_realloc_get_opt(str + 8);
4951                        } else if (!strncmp(str, "realloc", 7)) {
4952                                pci_realloc_get_opt("on");
4953                        } else if (!strcmp(str, "nodomains")) {
4954                                pci_no_domains();
4955                        } else if (!strncmp(str, "noari", 5)) {
4956                                pcie_ari_disabled = true;
4957                        } else if (!strncmp(str, "cbiosize=", 9)) {
4958                                pci_cardbus_io_size = memparse(str + 9, &str);
4959                        } else if (!strncmp(str, "cbmemsize=", 10)) {
4960                                pci_cardbus_mem_size = memparse(str + 10, &str);
4961                        } else if (!strncmp(str, "resource_alignment=", 19)) {
4962                                pci_set_resource_alignment_param(str + 19,
4963                                                        strlen(str + 19));
4964                        } else if (!strncmp(str, "ecrc=", 5)) {
4965                                pcie_ecrc_get_policy(str + 5);
4966                        } else if (!strncmp(str, "hpiosize=", 9)) {
4967                                pci_hotplug_io_size = memparse(str + 9, &str);
4968                        } else if (!strncmp(str, "hpmemsize=", 10)) {
4969                                pci_hotplug_mem_size = memparse(str + 10, &str);
4970                        } else if (!strncmp(str, "hpbussize=", 10)) {
4971                                pci_hotplug_bus_size =
4972                                        simple_strtoul(str + 10, &str, 0);
4973                                if (pci_hotplug_bus_size > 0xff)
4974                                        pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
4975                        } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
4976                                pcie_bus_config = PCIE_BUS_TUNE_OFF;
4977                        } else if (!strncmp(str, "pcie_bus_safe", 13)) {
4978                                pcie_bus_config = PCIE_BUS_SAFE;
4979                        } else if (!strncmp(str, "pcie_bus_perf", 13)) {
4980                                pcie_bus_config = PCIE_BUS_PERFORMANCE;
4981                        } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
4982                                pcie_bus_config = PCIE_BUS_PEER2PEER;
4983                        } else if (!strncmp(str, "pcie_scan_all", 13)) {
4984                                pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
4985                        } else {
4986                                printk(KERN_ERR "PCI: Unknown option `%s'\n",
4987                                                str);
4988                        }
4989                }
4990                str = k;
4991        }
4992        return 0;
4993}
4994early_param("pci", pci_setup);
4995