linux/drivers/pci/pci.c
<<
>>
Prefs
   1/*
   2 *      PCI Bus Services, see include/linux/pci.h for further explanation.
   3 *
   4 *      Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
   5 *      David Mosberger-Tang
   6 *
   7 *      Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
   8 */
   9
  10#include <linux/kernel.h>
  11#include <linux/delay.h>
  12#include <linux/init.h>
  13#include <linux/pci.h>
  14#include <linux/pm.h>
  15#include <linux/slab.h>
  16#include <linux/module.h>
  17#include <linux/spinlock.h>
  18#include <linux/string.h>
  19#include <linux/log2.h>
  20#include <linux/pci-aspm.h>
  21#include <linux/pm_wakeup.h>
  22#include <linux/interrupt.h>
  23#include <linux/device.h>
  24#include <linux/pm_runtime.h>
  25#include <asm/setup.h>
  26#include "pci.h"
  27
  28const char *pci_power_names[] = {
  29        "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
  30};
  31EXPORT_SYMBOL_GPL(pci_power_names);
  32
  33int isa_dma_bridge_buggy;
  34EXPORT_SYMBOL(isa_dma_bridge_buggy);
  35
  36int pci_pci_problems;
  37EXPORT_SYMBOL(pci_pci_problems);
  38
  39unsigned int pci_pm_d3_delay;
  40
  41static void pci_pme_list_scan(struct work_struct *work);
  42
  43static LIST_HEAD(pci_pme_list);
  44static DEFINE_MUTEX(pci_pme_list_mutex);
  45static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
  46
  47struct pci_pme_device {
  48        struct list_head list;
  49        struct pci_dev *dev;
  50};
  51
  52#define PME_TIMEOUT 1000 /* How long between PME checks */
  53
  54static void pci_dev_d3_sleep(struct pci_dev *dev)
  55{
  56        unsigned int delay = dev->d3_delay;
  57
  58        if (delay < pci_pm_d3_delay)
  59                delay = pci_pm_d3_delay;
  60
  61        msleep(delay);
  62}
  63
  64#ifdef CONFIG_PCI_DOMAINS
  65int pci_domains_supported = 1;
  66#endif
  67
  68#define DEFAULT_CARDBUS_IO_SIZE         (256)
  69#define DEFAULT_CARDBUS_MEM_SIZE        (64*1024*1024)
  70/* pci=cbmemsize=nnM,cbiosize=nn can override this */
  71unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
  72unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
  73
  74#define DEFAULT_HOTPLUG_IO_SIZE         (256)
  75#define DEFAULT_HOTPLUG_MEM_SIZE        (2*1024*1024)
  76/* pci=hpmemsize=nnM,hpiosize=nn can override this */
  77unsigned long pci_hotplug_io_size  = DEFAULT_HOTPLUG_IO_SIZE;
  78unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
  79
  80enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
  81
  82/*
  83 * The default CLS is used if arch didn't set CLS explicitly and not
  84 * all pci devices agree on the same value.  Arch can override either
  85 * the dfl or actual value as it sees fit.  Don't forget this is
  86 * measured in 32-bit words, not bytes.
  87 */
  88u8 pci_dfl_cache_line_size __devinitdata = L1_CACHE_BYTES >> 2;
  89u8 pci_cache_line_size;
  90
  91/**
  92 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
  93 * @bus: pointer to PCI bus structure to search
  94 *
  95 * Given a PCI bus, returns the highest PCI bus number present in the set
  96 * including the given PCI bus and its list of child PCI buses.
  97 */
  98unsigned char pci_bus_max_busnr(struct pci_bus* bus)
  99{
 100        struct list_head *tmp;
 101        unsigned char max, n;
 102
 103        max = bus->subordinate;
 104        list_for_each(tmp, &bus->children) {
 105                n = pci_bus_max_busnr(pci_bus_b(tmp));
 106                if(n > max)
 107                        max = n;
 108        }
 109        return max;
 110}
 111EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
 112
 113#ifdef CONFIG_HAS_IOMEM
 114void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
 115{
 116        /*
 117         * Make sure the BAR is actually a memory resource, not an IO resource
 118         */
 119        if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
 120                WARN_ON(1);
 121                return NULL;
 122        }
 123        return ioremap_nocache(pci_resource_start(pdev, bar),
 124                                     pci_resource_len(pdev, bar));
 125}
 126EXPORT_SYMBOL_GPL(pci_ioremap_bar);
 127#endif
 128
 129#if 0
 130/**
 131 * pci_max_busnr - returns maximum PCI bus number
 132 *
 133 * Returns the highest PCI bus number present in the system global list of
 134 * PCI buses.
 135 */
 136unsigned char __devinit
 137pci_max_busnr(void)
 138{
 139        struct pci_bus *bus = NULL;
 140        unsigned char max, n;
 141
 142        max = 0;
 143        while ((bus = pci_find_next_bus(bus)) != NULL) {
 144                n = pci_bus_max_busnr(bus);
 145                if(n > max)
 146                        max = n;
 147        }
 148        return max;
 149}
 150
 151#endif  /*  0  */
 152
 153#define PCI_FIND_CAP_TTL        48
 154
 155static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
 156                                   u8 pos, int cap, int *ttl)
 157{
 158        u8 id;
 159
 160        while ((*ttl)--) {
 161                pci_bus_read_config_byte(bus, devfn, pos, &pos);
 162                if (pos < 0x40)
 163                        break;
 164                pos &= ~3;
 165                pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID,
 166                                         &id);
 167                if (id == 0xff)
 168                        break;
 169                if (id == cap)
 170                        return pos;
 171                pos += PCI_CAP_LIST_NEXT;
 172        }
 173        return 0;
 174}
 175
 176static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
 177                               u8 pos, int cap)
 178{
 179        int ttl = PCI_FIND_CAP_TTL;
 180
 181        return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
 182}
 183
 184int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
 185{
 186        return __pci_find_next_cap(dev->bus, dev->devfn,
 187                                   pos + PCI_CAP_LIST_NEXT, cap);
 188}
 189EXPORT_SYMBOL_GPL(pci_find_next_capability);
 190
 191static int __pci_bus_find_cap_start(struct pci_bus *bus,
 192                                    unsigned int devfn, u8 hdr_type)
 193{
 194        u16 status;
 195
 196        pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
 197        if (!(status & PCI_STATUS_CAP_LIST))
 198                return 0;
 199
 200        switch (hdr_type) {
 201        case PCI_HEADER_TYPE_NORMAL:
 202        case PCI_HEADER_TYPE_BRIDGE:
 203                return PCI_CAPABILITY_LIST;
 204        case PCI_HEADER_TYPE_CARDBUS:
 205                return PCI_CB_CAPABILITY_LIST;
 206        default:
 207                return 0;
 208        }
 209
 210        return 0;
 211}
 212
 213/**
 214 * pci_find_capability - query for devices' capabilities 
 215 * @dev: PCI device to query
 216 * @cap: capability code
 217 *
 218 * Tell if a device supports a given PCI capability.
 219 * Returns the address of the requested capability structure within the
 220 * device's PCI configuration space or 0 in case the device does not
 221 * support it.  Possible values for @cap:
 222 *
 223 *  %PCI_CAP_ID_PM           Power Management 
 224 *  %PCI_CAP_ID_AGP          Accelerated Graphics Port 
 225 *  %PCI_CAP_ID_VPD          Vital Product Data 
 226 *  %PCI_CAP_ID_SLOTID       Slot Identification 
 227 *  %PCI_CAP_ID_MSI          Message Signalled Interrupts
 228 *  %PCI_CAP_ID_CHSWP        CompactPCI HotSwap 
 229 *  %PCI_CAP_ID_PCIX         PCI-X
 230 *  %PCI_CAP_ID_EXP          PCI Express
 231 */
 232int pci_find_capability(struct pci_dev *dev, int cap)
 233{
 234        int pos;
 235
 236        pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
 237        if (pos)
 238                pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
 239
 240        return pos;
 241}
 242
 243/**
 244 * pci_bus_find_capability - query for devices' capabilities 
 245 * @bus:   the PCI bus to query
 246 * @devfn: PCI device to query
 247 * @cap:   capability code
 248 *
 249 * Like pci_find_capability() but works for pci devices that do not have a
 250 * pci_dev structure set up yet. 
 251 *
 252 * Returns the address of the requested capability structure within the
 253 * device's PCI configuration space or 0 in case the device does not
 254 * support it.
 255 */
 256int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
 257{
 258        int pos;
 259        u8 hdr_type;
 260
 261        pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
 262
 263        pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
 264        if (pos)
 265                pos = __pci_find_next_cap(bus, devfn, pos, cap);
 266
 267        return pos;
 268}
 269
 270/**
 271 * pci_find_ext_capability - Find an extended capability
 272 * @dev: PCI device to query
 273 * @cap: capability code
 274 *
 275 * Returns the address of the requested extended capability structure
 276 * within the device's PCI configuration space or 0 if the device does
 277 * not support it.  Possible values for @cap:
 278 *
 279 *  %PCI_EXT_CAP_ID_ERR         Advanced Error Reporting
 280 *  %PCI_EXT_CAP_ID_VC          Virtual Channel
 281 *  %PCI_EXT_CAP_ID_DSN         Device Serial Number
 282 *  %PCI_EXT_CAP_ID_PWR         Power Budgeting
 283 */
 284int pci_find_ext_capability(struct pci_dev *dev, int cap)
 285{
 286        u32 header;
 287        int ttl;
 288        int pos = PCI_CFG_SPACE_SIZE;
 289
 290        /* minimum 8 bytes per capability */
 291        ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
 292
 293        if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
 294                return 0;
 295
 296        if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
 297                return 0;
 298
 299        /*
 300         * If we have no capabilities, this is indicated by cap ID,
 301         * cap version and next pointer all being 0.
 302         */
 303        if (header == 0)
 304                return 0;
 305
 306        while (ttl-- > 0) {
 307                if (PCI_EXT_CAP_ID(header) == cap)
 308                        return pos;
 309
 310                pos = PCI_EXT_CAP_NEXT(header);
 311                if (pos < PCI_CFG_SPACE_SIZE)
 312                        break;
 313
 314                if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
 315                        break;
 316        }
 317
 318        return 0;
 319}
 320EXPORT_SYMBOL_GPL(pci_find_ext_capability);
 321
 322/**
 323 * pci_bus_find_ext_capability - find an extended capability
 324 * @bus:   the PCI bus to query
 325 * @devfn: PCI device to query
 326 * @cap:   capability code
 327 *
 328 * Like pci_find_ext_capability() but works for pci devices that do not have a
 329 * pci_dev structure set up yet.
 330 *
 331 * Returns the address of the requested capability structure within the
 332 * device's PCI configuration space or 0 in case the device does not
 333 * support it.
 334 */
 335int pci_bus_find_ext_capability(struct pci_bus *bus, unsigned int devfn,
 336                                int cap)
 337{
 338        u32 header;
 339        int ttl;
 340        int pos = PCI_CFG_SPACE_SIZE;
 341
 342        /* minimum 8 bytes per capability */
 343        ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
 344
 345        if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
 346                return 0;
 347        if (header == 0xffffffff || header == 0)
 348                return 0;
 349
 350        while (ttl-- > 0) {
 351                if (PCI_EXT_CAP_ID(header) == cap)
 352                        return pos;
 353
 354                pos = PCI_EXT_CAP_NEXT(header);
 355                if (pos < PCI_CFG_SPACE_SIZE)
 356                        break;
 357
 358                if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
 359                        break;
 360        }
 361
 362        return 0;
 363}
 364
 365static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
 366{
 367        int rc, ttl = PCI_FIND_CAP_TTL;
 368        u8 cap, mask;
 369
 370        if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
 371                mask = HT_3BIT_CAP_MASK;
 372        else
 373                mask = HT_5BIT_CAP_MASK;
 374
 375        pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
 376                                      PCI_CAP_ID_HT, &ttl);
 377        while (pos) {
 378                rc = pci_read_config_byte(dev, pos + 3, &cap);
 379                if (rc != PCIBIOS_SUCCESSFUL)
 380                        return 0;
 381
 382                if ((cap & mask) == ht_cap)
 383                        return pos;
 384
 385                pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
 386                                              pos + PCI_CAP_LIST_NEXT,
 387                                              PCI_CAP_ID_HT, &ttl);
 388        }
 389
 390        return 0;
 391}
 392/**
 393 * pci_find_next_ht_capability - query a device's Hypertransport capabilities
 394 * @dev: PCI device to query
 395 * @pos: Position from which to continue searching
 396 * @ht_cap: Hypertransport capability code
 397 *
 398 * To be used in conjunction with pci_find_ht_capability() to search for
 399 * all capabilities matching @ht_cap. @pos should always be a value returned
 400 * from pci_find_ht_capability().
 401 *
 402 * NB. To be 100% safe against broken PCI devices, the caller should take
 403 * steps to avoid an infinite loop.
 404 */
 405int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
 406{
 407        return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
 408}
 409EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
 410
 411/**
 412 * pci_find_ht_capability - query a device's Hypertransport capabilities
 413 * @dev: PCI device to query
 414 * @ht_cap: Hypertransport capability code
 415 *
 416 * Tell if a device supports a given Hypertransport capability.
 417 * Returns an address within the device's PCI configuration space
 418 * or 0 in case the device does not support the request capability.
 419 * The address points to the PCI capability, of type PCI_CAP_ID_HT,
 420 * which has a Hypertransport capability matching @ht_cap.
 421 */
 422int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
 423{
 424        int pos;
 425
 426        pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
 427        if (pos)
 428                pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
 429
 430        return pos;
 431}
 432EXPORT_SYMBOL_GPL(pci_find_ht_capability);
 433
 434/**
 435 * pci_find_parent_resource - return resource region of parent bus of given region
 436 * @dev: PCI device structure contains resources to be searched
 437 * @res: child resource record for which parent is sought
 438 *
 439 *  For given resource region of given device, return the resource
 440 *  region of parent bus the given region is contained in or where
 441 *  it should be allocated from.
 442 */
 443struct resource *
 444pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
 445{
 446        const struct pci_bus *bus = dev->bus;
 447        int i;
 448        struct resource *best = NULL, *r;
 449
 450        pci_bus_for_each_resource(bus, r, i) {
 451                if (!r)
 452                        continue;
 453                if (res->start && !(res->start >= r->start && res->end <= r->end))
 454                        continue;       /* Not contained */
 455                if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
 456                        continue;       /* Wrong type */
 457                if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH))
 458                        return r;       /* Exact match */
 459                /* We can't insert a non-prefetch resource inside a prefetchable parent .. */
 460                if (r->flags & IORESOURCE_PREFETCH)
 461                        continue;
 462                /* .. but we can put a prefetchable resource inside a non-prefetchable one */
 463                if (!best)
 464                        best = r;
 465        }
 466        return best;
 467}
 468
 469/**
 470 * pci_restore_bars - restore a devices BAR values (e.g. after wake-up)
 471 * @dev: PCI device to have its BARs restored
 472 *
 473 * Restore the BAR values for a given device, so as to make it
 474 * accessible by its driver.
 475 */
 476static void
 477pci_restore_bars(struct pci_dev *dev)
 478{
 479        int i;
 480
 481        for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
 482                pci_update_resource(dev, i);
 483}
 484
 485static struct pci_platform_pm_ops *pci_platform_pm;
 486
 487int pci_set_platform_pm(struct pci_platform_pm_ops *ops)
 488{
 489        if (!ops->is_manageable || !ops->set_state || !ops->choose_state
 490            || !ops->sleep_wake || !ops->can_wakeup)
 491                return -EINVAL;
 492        pci_platform_pm = ops;
 493        return 0;
 494}
 495
 496static inline bool platform_pci_power_manageable(struct pci_dev *dev)
 497{
 498        return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
 499}
 500
 501static inline int platform_pci_set_power_state(struct pci_dev *dev,
 502                                                pci_power_t t)
 503{
 504        return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
 505}
 506
 507static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
 508{
 509        return pci_platform_pm ?
 510                        pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
 511}
 512
 513static inline bool platform_pci_can_wakeup(struct pci_dev *dev)
 514{
 515        return pci_platform_pm ? pci_platform_pm->can_wakeup(dev) : false;
 516}
 517
 518static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
 519{
 520        return pci_platform_pm ?
 521                        pci_platform_pm->sleep_wake(dev, enable) : -ENODEV;
 522}
 523
 524static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable)
 525{
 526        return pci_platform_pm ?
 527                        pci_platform_pm->run_wake(dev, enable) : -ENODEV;
 528}
 529
 530/**
 531 * pci_raw_set_power_state - Use PCI PM registers to set the power state of
 532 *                           given PCI device
 533 * @dev: PCI device to handle.
 534 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
 535 *
 536 * RETURN VALUE:
 537 * -EINVAL if the requested state is invalid.
 538 * -EIO if device does not support PCI PM or its PM capabilities register has a
 539 * wrong version, or device doesn't support the requested state.
 540 * 0 if device already is in the requested state.
 541 * 0 if device's power state has been successfully changed.
 542 */
 543static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
 544{
 545        u16 pmcsr;
 546        bool need_restore = false;
 547
 548        /* Check if we're already there */
 549        if (dev->current_state == state)
 550                return 0;
 551
 552        if (!dev->pm_cap)
 553                return -EIO;
 554
 555        if (state < PCI_D0 || state > PCI_D3hot)
 556                return -EINVAL;
 557
 558        /* Validate current state:
 559         * Can enter D0 from any state, but if we can only go deeper 
 560         * to sleep if we're already in a low power state
 561         */
 562        if (state != PCI_D0 && dev->current_state <= PCI_D3cold
 563            && dev->current_state > state) {
 564                dev_err(&dev->dev, "invalid power transition "
 565                        "(from state %d to %d)\n", dev->current_state, state);
 566                return -EINVAL;
 567        }
 568
 569        /* check if this device supports the desired state */
 570        if ((state == PCI_D1 && !dev->d1_support)
 571           || (state == PCI_D2 && !dev->d2_support))
 572                return -EIO;
 573
 574        pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
 575
 576        /* If we're (effectively) in D3, force entire word to 0.
 577         * This doesn't affect PME_Status, disables PME_En, and
 578         * sets PowerState to 0.
 579         */
 580        switch (dev->current_state) {
 581        case PCI_D0:
 582        case PCI_D1:
 583        case PCI_D2:
 584                pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
 585                pmcsr |= state;
 586                break;
 587        case PCI_D3hot:
 588        case PCI_D3cold:
 589        case PCI_UNKNOWN: /* Boot-up */
 590                if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
 591                 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
 592                        need_restore = true;
 593                /* Fall-through: force to D0 */
 594        default:
 595                pmcsr = 0;
 596                break;
 597        }
 598
 599        /* enter specified state */
 600        pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
 601
 602        /* Mandatory power management transition delays */
 603        /* see PCI PM 1.1 5.6.1 table 18 */
 604        if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
 605                pci_dev_d3_sleep(dev);
 606        else if (state == PCI_D2 || dev->current_state == PCI_D2)
 607                udelay(PCI_PM_D2_DELAY);
 608
 609        pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
 610        dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
 611        if (dev->current_state != state && printk_ratelimit())
 612                dev_info(&dev->dev, "Refused to change power state, "
 613                        "currently in D%d\n", dev->current_state);
 614
 615        /* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
 616         * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
 617         * from D3hot to D0 _may_ perform an internal reset, thereby
 618         * going to "D0 Uninitialized" rather than "D0 Initialized".
 619         * For example, at least some versions of the 3c905B and the
 620         * 3c556B exhibit this behaviour.
 621         *
 622         * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
 623         * devices in a D3hot state at boot.  Consequently, we need to
 624         * restore at least the BARs so that the device will be
 625         * accessible to its driver.
 626         */
 627        if (need_restore)
 628                pci_restore_bars(dev);
 629
 630        if (dev->bus->self)
 631                pcie_aspm_pm_state_change(dev->bus->self);
 632
 633        return 0;
 634}
 635
 636/**
 637 * pci_update_current_state - Read PCI power state of given device from its
 638 *                            PCI PM registers and cache it
 639 * @dev: PCI device to handle.
 640 * @state: State to cache in case the device doesn't have the PM capability
 641 */
 642void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
 643{
 644        if (dev->pm_cap) {
 645                u16 pmcsr;
 646
 647                pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
 648                dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
 649        } else {
 650                dev->current_state = state;
 651        }
 652}
 653
 654/**
 655 * pci_platform_power_transition - Use platform to change device power state
 656 * @dev: PCI device to handle.
 657 * @state: State to put the device into.
 658 */
 659static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
 660{
 661        int error;
 662
 663        if (platform_pci_power_manageable(dev)) {
 664                error = platform_pci_set_power_state(dev, state);
 665                if (!error)
 666                        pci_update_current_state(dev, state);
 667                /* Fall back to PCI_D0 if native PM is not supported */
 668                if (!dev->pm_cap)
 669                        dev->current_state = PCI_D0;
 670        } else {
 671                error = -ENODEV;
 672                /* Fall back to PCI_D0 if native PM is not supported */
 673                if (!dev->pm_cap)
 674                        dev->current_state = PCI_D0;
 675        }
 676
 677        return error;
 678}
 679
 680/**
 681 * __pci_start_power_transition - Start power transition of a PCI device
 682 * @dev: PCI device to handle.
 683 * @state: State to put the device into.
 684 */
 685static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
 686{
 687        if (state == PCI_D0)
 688                pci_platform_power_transition(dev, PCI_D0);
 689}
 690
 691/**
 692 * __pci_complete_power_transition - Complete power transition of a PCI device
 693 * @dev: PCI device to handle.
 694 * @state: State to put the device into.
 695 *
 696 * This function should not be called directly by device drivers.
 697 */
 698int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
 699{
 700        return state >= PCI_D0 ?
 701                        pci_platform_power_transition(dev, state) : -EINVAL;
 702}
 703EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
 704
 705/**
 706 * pci_set_power_state - Set the power state of a PCI device
 707 * @dev: PCI device to handle.
 708 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
 709 *
 710 * Transition a device to a new power state, using the platform firmware and/or
 711 * the device's PCI PM registers.
 712 *
 713 * RETURN VALUE:
 714 * -EINVAL if the requested state is invalid.
 715 * -EIO if device does not support PCI PM or its PM capabilities register has a
 716 * wrong version, or device doesn't support the requested state.
 717 * 0 if device already is in the requested state.
 718 * 0 if device's power state has been successfully changed.
 719 */
 720int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
 721{
 722        int error;
 723
 724        /* bound the state we're entering */
 725        if (state > PCI_D3hot)
 726                state = PCI_D3hot;
 727        else if (state < PCI_D0)
 728                state = PCI_D0;
 729        else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
 730                /*
 731                 * If the device or the parent bridge do not support PCI PM,
 732                 * ignore the request if we're doing anything other than putting
 733                 * it into D0 (which would only happen on boot).
 734                 */
 735                return 0;
 736
 737        __pci_start_power_transition(dev, state);
 738
 739        /* This device is quirked not to be put into D3, so
 740           don't put it in D3 */
 741        if (state == PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
 742                return 0;
 743
 744        error = pci_raw_set_power_state(dev, state);
 745
 746        if (!__pci_complete_power_transition(dev, state))
 747                error = 0;
 748        /*
 749         * When aspm_policy is "powersave" this call ensures
 750         * that ASPM is configured.
 751         */
 752        if (!error && dev->bus->self)
 753                pcie_aspm_powersave_config_link(dev->bus->self);
 754
 755        return error;
 756}
 757
 758/**
 759 * pci_choose_state - Choose the power state of a PCI device
 760 * @dev: PCI device to be suspended
 761 * @state: target sleep state for the whole system. This is the value
 762 *      that is passed to suspend() function.
 763 *
 764 * Returns PCI power state suitable for given device and given system
 765 * message.
 766 */
 767
 768pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
 769{
 770        pci_power_t ret;
 771
 772        if (!pci_find_capability(dev, PCI_CAP_ID_PM))
 773                return PCI_D0;
 774
 775        ret = platform_pci_choose_state(dev);
 776        if (ret != PCI_POWER_ERROR)
 777                return ret;
 778
 779        switch (state.event) {
 780        case PM_EVENT_ON:
 781                return PCI_D0;
 782        case PM_EVENT_FREEZE:
 783        case PM_EVENT_PRETHAW:
 784                /* REVISIT both freeze and pre-thaw "should" use D0 */
 785        case PM_EVENT_SUSPEND:
 786        case PM_EVENT_HIBERNATE:
 787                return PCI_D3hot;
 788        default:
 789                dev_info(&dev->dev, "unrecognized suspend event %d\n",
 790                         state.event);
 791                BUG();
 792        }
 793        return PCI_D0;
 794}
 795
 796EXPORT_SYMBOL(pci_choose_state);
 797
 798#define PCI_EXP_SAVE_REGS       7
 799
 800#define pcie_cap_has_devctl(type, flags)        1
 801#define pcie_cap_has_lnkctl(type, flags)                \
 802                ((flags & PCI_EXP_FLAGS_VERS) > 1 ||    \
 803                 (type == PCI_EXP_TYPE_ROOT_PORT ||     \
 804                  type == PCI_EXP_TYPE_ENDPOINT ||      \
 805                  type == PCI_EXP_TYPE_LEG_END))
 806#define pcie_cap_has_sltctl(type, flags)                \
 807                ((flags & PCI_EXP_FLAGS_VERS) > 1 ||    \
 808                 ((type == PCI_EXP_TYPE_ROOT_PORT) ||   \
 809                  (type == PCI_EXP_TYPE_DOWNSTREAM &&   \
 810                   (flags & PCI_EXP_FLAGS_SLOT))))
 811#define pcie_cap_has_rtctl(type, flags)                 \
 812                ((flags & PCI_EXP_FLAGS_VERS) > 1 ||    \
 813                 (type == PCI_EXP_TYPE_ROOT_PORT ||     \
 814                  type == PCI_EXP_TYPE_RC_EC))
 815#define pcie_cap_has_devctl2(type, flags)               \
 816                ((flags & PCI_EXP_FLAGS_VERS) > 1)
 817#define pcie_cap_has_lnkctl2(type, flags)               \
 818                ((flags & PCI_EXP_FLAGS_VERS) > 1)
 819#define pcie_cap_has_sltctl2(type, flags)               \
 820                ((flags & PCI_EXP_FLAGS_VERS) > 1)
 821
 822static int pci_save_pcie_state(struct pci_dev *dev)
 823{
 824        int pos, i = 0;
 825        struct pci_cap_saved_state *save_state;
 826        u16 *cap;
 827        u16 flags;
 828
 829        pos = pci_pcie_cap(dev);
 830        if (!pos)
 831                return 0;
 832
 833        save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
 834        if (!save_state) {
 835                dev_err(&dev->dev, "buffer not found in %s\n", __func__);
 836                return -ENOMEM;
 837        }
 838        cap = (u16 *)&save_state->cap.data[0];
 839
 840        pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
 841
 842        if (pcie_cap_has_devctl(dev->pcie_type, flags))
 843                pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &cap[i++]);
 844        if (pcie_cap_has_lnkctl(dev->pcie_type, flags))
 845                pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]);
 846        if (pcie_cap_has_sltctl(dev->pcie_type, flags))
 847                pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]);
 848        if (pcie_cap_has_rtctl(dev->pcie_type, flags))
 849                pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]);
 850        if (pcie_cap_has_devctl2(dev->pcie_type, flags))
 851                pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &cap[i++]);
 852        if (pcie_cap_has_lnkctl2(dev->pcie_type, flags))
 853                pci_read_config_word(dev, pos + PCI_EXP_LNKCTL2, &cap[i++]);
 854        if (pcie_cap_has_sltctl2(dev->pcie_type, flags))
 855                pci_read_config_word(dev, pos + PCI_EXP_SLTCTL2, &cap[i++]);
 856
 857        return 0;
 858}
 859
 860static void pci_restore_pcie_state(struct pci_dev *dev)
 861{
 862        int i = 0, pos;
 863        struct pci_cap_saved_state *save_state;
 864        u16 *cap;
 865        u16 flags;
 866
 867        save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
 868        pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
 869        if (!save_state || pos <= 0)
 870                return;
 871        cap = (u16 *)&save_state->cap.data[0];
 872
 873        pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
 874
 875        if (pcie_cap_has_devctl(dev->pcie_type, flags))
 876                pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, cap[i++]);
 877        if (pcie_cap_has_lnkctl(dev->pcie_type, flags))
 878                pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, cap[i++]);
 879        if (pcie_cap_has_sltctl(dev->pcie_type, flags))
 880                pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]);
 881        if (pcie_cap_has_rtctl(dev->pcie_type, flags))
 882                pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]);
 883        if (pcie_cap_has_devctl2(dev->pcie_type, flags))
 884                pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, cap[i++]);
 885        if (pcie_cap_has_lnkctl2(dev->pcie_type, flags))
 886                pci_write_config_word(dev, pos + PCI_EXP_LNKCTL2, cap[i++]);
 887        if (pcie_cap_has_sltctl2(dev->pcie_type, flags))
 888                pci_write_config_word(dev, pos + PCI_EXP_SLTCTL2, cap[i++]);
 889}
 890
 891
 892static int pci_save_pcix_state(struct pci_dev *dev)
 893{
 894        int pos;
 895        struct pci_cap_saved_state *save_state;
 896
 897        pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
 898        if (pos <= 0)
 899                return 0;
 900
 901        save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
 902        if (!save_state) {
 903                dev_err(&dev->dev, "buffer not found in %s\n", __func__);
 904                return -ENOMEM;
 905        }
 906
 907        pci_read_config_word(dev, pos + PCI_X_CMD,
 908                             (u16 *)save_state->cap.data);
 909
 910        return 0;
 911}
 912
 913static void pci_restore_pcix_state(struct pci_dev *dev)
 914{
 915        int i = 0, pos;
 916        struct pci_cap_saved_state *save_state;
 917        u16 *cap;
 918
 919        save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
 920        pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
 921        if (!save_state || pos <= 0)
 922                return;
 923        cap = (u16 *)&save_state->cap.data[0];
 924
 925        pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
 926}
 927
 928
 929/**
 930 * pci_save_state - save the PCI configuration space of a device before suspending
 931 * @dev: - PCI device that we're dealing with
 932 */
 933int
 934pci_save_state(struct pci_dev *dev)
 935{
 936        int i;
 937        /* XXX: 100% dword access ok here? */
 938        for (i = 0; i < 16; i++)
 939                pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
 940        dev->state_saved = true;
 941        if ((i = pci_save_pcie_state(dev)) != 0)
 942                return i;
 943        if ((i = pci_save_pcix_state(dev)) != 0)
 944                return i;
 945        return 0;
 946}
 947
 948/** 
 949 * pci_restore_state - Restore the saved state of a PCI device
 950 * @dev: - PCI device that we're dealing with
 951 */
 952void pci_restore_state(struct pci_dev *dev)
 953{
 954        int i;
 955        u32 val;
 956
 957        if (!dev->state_saved)
 958                return;
 959
 960        /* PCI Express register must be restored first */
 961        pci_restore_pcie_state(dev);
 962
 963        /*
 964         * The Base Address register should be programmed before the command
 965         * register(s)
 966         */
 967        for (i = 15; i >= 0; i--) {
 968                pci_read_config_dword(dev, i * 4, &val);
 969                if (val != dev->saved_config_space[i]) {
 970                        dev_printk(KERN_DEBUG, &dev->dev, "restoring config "
 971                                "space at offset %#x (was %#x, writing %#x)\n",
 972                                i, val, (int)dev->saved_config_space[i]);
 973                        pci_write_config_dword(dev,i * 4,
 974                                dev->saved_config_space[i]);
 975                }
 976        }
 977        pci_restore_pcix_state(dev);
 978        pci_restore_msi_state(dev);
 979        pci_restore_iov_state(dev);
 980
 981        dev->state_saved = false;
 982}
 983
 984struct pci_saved_state {
 985        u32 config_space[16];
 986        struct pci_cap_saved_data cap[0];
 987};
 988
 989/**
 990 * pci_store_saved_state - Allocate and return an opaque struct containing
 991 *                         the device saved state.
 992 * @dev: PCI device that we're dealing with
 993 *
 994 * Rerturn NULL if no state or error.
 995 */
 996struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
 997{
 998        struct pci_saved_state *state;
 999        struct pci_cap_saved_state *tmp;
1000        struct pci_cap_saved_data *cap;
1001        struct hlist_node *pos;
1002        size_t size;
1003
1004        if (!dev->state_saved)
1005                return NULL;
1006
1007        size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1008
1009        hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next)
1010                size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1011
1012        state = kzalloc(size, GFP_KERNEL);
1013        if (!state)
1014                return NULL;
1015
1016        memcpy(state->config_space, dev->saved_config_space,
1017               sizeof(state->config_space));
1018
1019        cap = state->cap;
1020        hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next) {
1021                size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1022                memcpy(cap, &tmp->cap, len);
1023                cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1024        }
1025        /* Empty cap_save terminates list */
1026
1027        return state;
1028}
1029EXPORT_SYMBOL_GPL(pci_store_saved_state);
1030
1031/**
1032 * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1033 * @dev: PCI device that we're dealing with
1034 * @state: Saved state returned from pci_store_saved_state()
1035 */
1036int pci_load_saved_state(struct pci_dev *dev, struct pci_saved_state *state)
1037{
1038        struct pci_cap_saved_data *cap;
1039
1040        dev->state_saved = false;
1041
1042        if (!state)
1043                return 0;
1044
1045        memcpy(dev->saved_config_space, state->config_space,
1046               sizeof(state->config_space));
1047
1048        cap = state->cap;
1049        while (cap->size) {
1050                struct pci_cap_saved_state *tmp;
1051
1052                tmp = pci_find_saved_cap(dev, cap->cap_nr);
1053                if (!tmp || tmp->cap.size != cap->size)
1054                        return -EINVAL;
1055
1056                memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1057                cap = (struct pci_cap_saved_data *)((u8 *)cap +
1058                       sizeof(struct pci_cap_saved_data) + cap->size);
1059        }
1060
1061        dev->state_saved = true;
1062        return 0;
1063}
1064EXPORT_SYMBOL_GPL(pci_load_saved_state);
1065
1066/**
1067 * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1068 *                                 and free the memory allocated for it.
1069 * @dev: PCI device that we're dealing with
1070 * @state: Pointer to saved state returned from pci_store_saved_state()
1071 */
1072int pci_load_and_free_saved_state(struct pci_dev *dev,
1073                                  struct pci_saved_state **state)
1074{
1075        int ret = pci_load_saved_state(dev, *state);
1076        kfree(*state);
1077        *state = NULL;
1078        return ret;
1079}
1080EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1081
1082static int do_pci_enable_device(struct pci_dev *dev, int bars)
1083{
1084        int err;
1085
1086        err = pci_set_power_state(dev, PCI_D0);
1087        if (err < 0 && err != -EIO)
1088                return err;
1089        err = pcibios_enable_device(dev, bars);
1090        if (err < 0)
1091                return err;
1092        pci_fixup_device(pci_fixup_enable, dev);
1093
1094        return 0;
1095}
1096
1097/**
1098 * pci_reenable_device - Resume abandoned device
1099 * @dev: PCI device to be resumed
1100 *
1101 *  Note this function is a backend of pci_default_resume and is not supposed
1102 *  to be called by normal code, write proper resume handler and use it instead.
1103 */
1104int pci_reenable_device(struct pci_dev *dev)
1105{
1106        if (pci_is_enabled(dev))
1107                return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1108        return 0;
1109}
1110
1111static int __pci_enable_device_flags(struct pci_dev *dev,
1112                                     resource_size_t flags)
1113{
1114        int err;
1115        int i, bars = 0;
1116
1117        /*
1118         * Power state could be unknown at this point, either due to a fresh
1119         * boot or a device removal call.  So get the current power state
1120         * so that things like MSI message writing will behave as expected
1121         * (e.g. if the device really is in D0 at enable time).
1122         */
1123        if (dev->pm_cap) {
1124                u16 pmcsr;
1125                pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1126                dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1127        }
1128
1129        if (atomic_add_return(1, &dev->enable_cnt) > 1)
1130                return 0;               /* already enabled */
1131
1132        /* only skip sriov related */
1133        for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1134                if (dev->resource[i].flags & flags)
1135                        bars |= (1 << i);
1136        for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
1137                if (dev->resource[i].flags & flags)
1138                        bars |= (1 << i);
1139
1140        err = do_pci_enable_device(dev, bars);
1141        if (err < 0)
1142                atomic_dec(&dev->enable_cnt);
1143        return err;
1144}
1145
1146/**
1147 * pci_enable_device_io - Initialize a device for use with IO space
1148 * @dev: PCI device to be initialized
1149 *
1150 *  Initialize device before it's used by a driver. Ask low-level code
1151 *  to enable I/O resources. Wake up the device if it was suspended.
1152 *  Beware, this function can fail.
1153 */
1154int pci_enable_device_io(struct pci_dev *dev)
1155{
1156        return __pci_enable_device_flags(dev, IORESOURCE_IO);
1157}
1158
1159/**
1160 * pci_enable_device_mem - Initialize a device for use with Memory space
1161 * @dev: PCI device to be initialized
1162 *
1163 *  Initialize device before it's used by a driver. Ask low-level code
1164 *  to enable Memory resources. Wake up the device if it was suspended.
1165 *  Beware, this function can fail.
1166 */
1167int pci_enable_device_mem(struct pci_dev *dev)
1168{
1169        return __pci_enable_device_flags(dev, IORESOURCE_MEM);
1170}
1171
1172/**
1173 * pci_enable_device - Initialize device before it's used by a driver.
1174 * @dev: PCI device to be initialized
1175 *
1176 *  Initialize device before it's used by a driver. Ask low-level code
1177 *  to enable I/O and memory. Wake up the device if it was suspended.
1178 *  Beware, this function can fail.
1179 *
1180 *  Note we don't actually enable the device many times if we call
1181 *  this function repeatedly (we just increment the count).
1182 */
1183int pci_enable_device(struct pci_dev *dev)
1184{
1185        return __pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
1186}
1187
1188/*
1189 * Managed PCI resources.  This manages device on/off, intx/msi/msix
1190 * on/off and BAR regions.  pci_dev itself records msi/msix status, so
1191 * there's no need to track it separately.  pci_devres is initialized
1192 * when a device is enabled using managed PCI device enable interface.
1193 */
1194struct pci_devres {
1195        unsigned int enabled:1;
1196        unsigned int pinned:1;
1197        unsigned int orig_intx:1;
1198        unsigned int restore_intx:1;
1199        u32 region_mask;
1200};
1201
1202static void pcim_release(struct device *gendev, void *res)
1203{
1204        struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
1205        struct pci_devres *this = res;
1206        int i;
1207
1208        if (dev->msi_enabled)
1209                pci_disable_msi(dev);
1210        if (dev->msix_enabled)
1211                pci_disable_msix(dev);
1212
1213        for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1214                if (this->region_mask & (1 << i))
1215                        pci_release_region(dev, i);
1216
1217        if (this->restore_intx)
1218                pci_intx(dev, this->orig_intx);
1219
1220        if (this->enabled && !this->pinned)
1221                pci_disable_device(dev);
1222}
1223
1224static struct pci_devres * get_pci_dr(struct pci_dev *pdev)
1225{
1226        struct pci_devres *dr, *new_dr;
1227
1228        dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
1229        if (dr)
1230                return dr;
1231
1232        new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
1233        if (!new_dr)
1234                return NULL;
1235        return devres_get(&pdev->dev, new_dr, NULL, NULL);
1236}
1237
1238static struct pci_devres * find_pci_dr(struct pci_dev *pdev)
1239{
1240        if (pci_is_managed(pdev))
1241                return devres_find(&pdev->dev, pcim_release, NULL, NULL);
1242        return NULL;
1243}
1244
1245/**
1246 * pcim_enable_device - Managed pci_enable_device()
1247 * @pdev: PCI device to be initialized
1248 *
1249 * Managed pci_enable_device().
1250 */
1251int pcim_enable_device(struct pci_dev *pdev)
1252{
1253        struct pci_devres *dr;
1254        int rc;
1255
1256        dr = get_pci_dr(pdev);
1257        if (unlikely(!dr))
1258                return -ENOMEM;
1259        if (dr->enabled)
1260                return 0;
1261
1262        rc = pci_enable_device(pdev);
1263        if (!rc) {
1264                pdev->is_managed = 1;
1265                dr->enabled = 1;
1266        }
1267        return rc;
1268}
1269
1270/**
1271 * pcim_pin_device - Pin managed PCI device
1272 * @pdev: PCI device to pin
1273 *
1274 * Pin managed PCI device @pdev.  Pinned device won't be disabled on
1275 * driver detach.  @pdev must have been enabled with
1276 * pcim_enable_device().
1277 */
1278void pcim_pin_device(struct pci_dev *pdev)
1279{
1280        struct pci_devres *dr;
1281
1282        dr = find_pci_dr(pdev);
1283        WARN_ON(!dr || !dr->enabled);
1284        if (dr)
1285                dr->pinned = 1;
1286}
1287
1288/**
1289 * pcibios_disable_device - disable arch specific PCI resources for device dev
1290 * @dev: the PCI device to disable
1291 *
1292 * Disables architecture specific PCI resources for the device. This
1293 * is the default implementation. Architecture implementations can
1294 * override this.
1295 */
1296void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {}
1297
1298static void do_pci_disable_device(struct pci_dev *dev)
1299{
1300        u16 pci_command;
1301
1302        pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1303        if (pci_command & PCI_COMMAND_MASTER) {
1304                pci_command &= ~PCI_COMMAND_MASTER;
1305                pci_write_config_word(dev, PCI_COMMAND, pci_command);
1306        }
1307
1308        pcibios_disable_device(dev);
1309}
1310
1311/**
1312 * pci_disable_enabled_device - Disable device without updating enable_cnt
1313 * @dev: PCI device to disable
1314 *
1315 * NOTE: This function is a backend of PCI power management routines and is
1316 * not supposed to be called drivers.
1317 */
1318void pci_disable_enabled_device(struct pci_dev *dev)
1319{
1320        if (pci_is_enabled(dev))
1321                do_pci_disable_device(dev);
1322}
1323
1324/**
1325 * pci_disable_device - Disable PCI device after use
1326 * @dev: PCI device to be disabled
1327 *
1328 * Signal to the system that the PCI device is not in use by the system
1329 * anymore.  This only involves disabling PCI bus-mastering, if active.
1330 *
1331 * Note we don't actually disable the device until all callers of
1332 * pci_enable_device() have called pci_disable_device().
1333 */
1334void
1335pci_disable_device(struct pci_dev *dev)
1336{
1337        struct pci_devres *dr;
1338
1339        dr = find_pci_dr(dev);
1340        if (dr)
1341                dr->enabled = 0;
1342
1343        if (atomic_sub_return(1, &dev->enable_cnt) != 0)
1344                return;
1345
1346        do_pci_disable_device(dev);
1347
1348        dev->is_busmaster = 0;
1349}
1350
1351/**
1352 * pcibios_set_pcie_reset_state - set reset state for device dev
1353 * @dev: the PCIe device reset
1354 * @state: Reset state to enter into
1355 *
1356 *
1357 * Sets the PCIe reset state for the device. This is the default
1358 * implementation. Architecture implementations can override this.
1359 */
1360int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev,
1361                                                        enum pcie_reset_state state)
1362{
1363        return -EINVAL;
1364}
1365
1366/**
1367 * pci_set_pcie_reset_state - set reset state for device dev
1368 * @dev: the PCIe device reset
1369 * @state: Reset state to enter into
1370 *
1371 *
1372 * Sets the PCI reset state for the device.
1373 */
1374int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
1375{
1376        return pcibios_set_pcie_reset_state(dev, state);
1377}
1378
1379/**
1380 * pci_check_pme_status - Check if given device has generated PME.
1381 * @dev: Device to check.
1382 *
1383 * Check the PME status of the device and if set, clear it and clear PME enable
1384 * (if set).  Return 'true' if PME status and PME enable were both set or
1385 * 'false' otherwise.
1386 */
1387bool pci_check_pme_status(struct pci_dev *dev)
1388{
1389        int pmcsr_pos;
1390        u16 pmcsr;
1391        bool ret = false;
1392
1393        if (!dev->pm_cap)
1394                return false;
1395
1396        pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
1397        pci_read_config_word(dev, pmcsr_pos, &pmcsr);
1398        if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
1399                return false;
1400
1401        /* Clear PME status. */
1402        pmcsr |= PCI_PM_CTRL_PME_STATUS;
1403        if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
1404                /* Disable PME to avoid interrupt flood. */
1405                pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1406                ret = true;
1407        }
1408
1409        pci_write_config_word(dev, pmcsr_pos, pmcsr);
1410
1411        return ret;
1412}
1413
1414/**
1415 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
1416 * @dev: Device to handle.
1417 * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
1418 *
1419 * Check if @dev has generated PME and queue a resume request for it in that
1420 * case.
1421 */
1422static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
1423{
1424        if (pme_poll_reset && dev->pme_poll)
1425                dev->pme_poll = false;
1426
1427        if (pci_check_pme_status(dev)) {
1428                pci_wakeup_event(dev);
1429                pm_request_resume(&dev->dev);
1430        }
1431        return 0;
1432}
1433
1434/**
1435 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
1436 * @bus: Top bus of the subtree to walk.
1437 */
1438void pci_pme_wakeup_bus(struct pci_bus *bus)
1439{
1440        if (bus)
1441                pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
1442}
1443
1444/**
1445 * pci_pme_capable - check the capability of PCI device to generate PME#
1446 * @dev: PCI device to handle.
1447 * @state: PCI state from which device will issue PME#.
1448 */
1449bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
1450{
1451        if (!dev->pm_cap)
1452                return false;
1453
1454        return !!(dev->pme_support & (1 << state));
1455}
1456
1457static void pci_pme_list_scan(struct work_struct *work)
1458{
1459        struct pci_pme_device *pme_dev, *n;
1460
1461        mutex_lock(&pci_pme_list_mutex);
1462        if (!list_empty(&pci_pme_list)) {
1463                list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
1464                        if (pme_dev->dev->pme_poll) {
1465                                pci_pme_wakeup(pme_dev->dev, NULL);
1466                        } else {
1467                                list_del(&pme_dev->list);
1468                                kfree(pme_dev);
1469                        }
1470                }
1471                if (!list_empty(&pci_pme_list))
1472                        schedule_delayed_work(&pci_pme_work,
1473                                              msecs_to_jiffies(PME_TIMEOUT));
1474        }
1475        mutex_unlock(&pci_pme_list_mutex);
1476}
1477
1478/**
1479 * pci_pme_active - enable or disable PCI device's PME# function
1480 * @dev: PCI device to handle.
1481 * @enable: 'true' to enable PME# generation; 'false' to disable it.
1482 *
1483 * The caller must verify that the device is capable of generating PME# before
1484 * calling this function with @enable equal to 'true'.
1485 */
1486void pci_pme_active(struct pci_dev *dev, bool enable)
1487{
1488        u16 pmcsr;
1489
1490        if (!dev->pm_cap)
1491                return;
1492
1493        pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1494        /* Clear PME_Status by writing 1 to it and enable PME# */
1495        pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
1496        if (!enable)
1497                pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1498
1499        pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1500
1501        /* PCI (as opposed to PCIe) PME requires that the device have
1502           its PME# line hooked up correctly. Not all hardware vendors
1503           do this, so the PME never gets delivered and the device
1504           remains asleep. The easiest way around this is to
1505           periodically walk the list of suspended devices and check
1506           whether any have their PME flag set. The assumption is that
1507           we'll wake up often enough anyway that this won't be a huge
1508           hit, and the power savings from the devices will still be a
1509           win. */
1510
1511        if (dev->pme_poll) {
1512                struct pci_pme_device *pme_dev;
1513                if (enable) {
1514                        pme_dev = kmalloc(sizeof(struct pci_pme_device),
1515                                          GFP_KERNEL);
1516                        if (!pme_dev)
1517                                goto out;
1518                        pme_dev->dev = dev;
1519                        mutex_lock(&pci_pme_list_mutex);
1520                        list_add(&pme_dev->list, &pci_pme_list);
1521                        if (list_is_singular(&pci_pme_list))
1522                                schedule_delayed_work(&pci_pme_work,
1523                                                      msecs_to_jiffies(PME_TIMEOUT));
1524                        mutex_unlock(&pci_pme_list_mutex);
1525                } else {
1526                        mutex_lock(&pci_pme_list_mutex);
1527                        list_for_each_entry(pme_dev, &pci_pme_list, list) {
1528                                if (pme_dev->dev == dev) {
1529                                        list_del(&pme_dev->list);
1530                                        kfree(pme_dev);
1531                                        break;
1532                                }
1533                        }
1534                        mutex_unlock(&pci_pme_list_mutex);
1535                }
1536        }
1537
1538out:
1539        dev_printk(KERN_DEBUG, &dev->dev, "PME# %s\n",
1540                        enable ? "enabled" : "disabled");
1541}
1542
1543/**
1544 * __pci_enable_wake - enable PCI device as wakeup event source
1545 * @dev: PCI device affected
1546 * @state: PCI state from which device will issue wakeup events
1547 * @runtime: True if the events are to be generated at run time
1548 * @enable: True to enable event generation; false to disable
1549 *
1550 * This enables the device as a wakeup event source, or disables it.
1551 * When such events involves platform-specific hooks, those hooks are
1552 * called automatically by this routine.
1553 *
1554 * Devices with legacy power management (no standard PCI PM capabilities)
1555 * always require such platform hooks.
1556 *
1557 * RETURN VALUE:
1558 * 0 is returned on success
1559 * -EINVAL is returned if device is not supposed to wake up the system
1560 * Error code depending on the platform is returned if both the platform and
1561 * the native mechanism fail to enable the generation of wake-up events
1562 */
1563int __pci_enable_wake(struct pci_dev *dev, pci_power_t state,
1564                      bool runtime, bool enable)
1565{
1566        int ret = 0;
1567
1568        if (enable && !runtime && !device_may_wakeup(&dev->dev))
1569                return -EINVAL;
1570
1571        /* Don't do the same thing twice in a row for one device. */
1572        if (!!enable == !!dev->wakeup_prepared)
1573                return 0;
1574
1575        /*
1576         * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
1577         * Anderson we should be doing PME# wake enable followed by ACPI wake
1578         * enable.  To disable wake-up we call the platform first, for symmetry.
1579         */
1580
1581        if (enable) {
1582                int error;
1583
1584                if (pci_pme_capable(dev, state))
1585                        pci_pme_active(dev, true);
1586                else
1587                        ret = 1;
1588                error = runtime ? platform_pci_run_wake(dev, true) :
1589                                        platform_pci_sleep_wake(dev, true);
1590                if (ret)
1591                        ret = error;
1592                if (!ret)
1593                        dev->wakeup_prepared = true;
1594        } else {
1595                if (runtime)
1596                        platform_pci_run_wake(dev, false);
1597                else
1598                        platform_pci_sleep_wake(dev, false);
1599                pci_pme_active(dev, false);
1600                dev->wakeup_prepared = false;
1601        }
1602
1603        return ret;
1604}
1605EXPORT_SYMBOL(__pci_enable_wake);
1606
1607/**
1608 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
1609 * @dev: PCI device to prepare
1610 * @enable: True to enable wake-up event generation; false to disable
1611 *
1612 * Many drivers want the device to wake up the system from D3_hot or D3_cold
1613 * and this function allows them to set that up cleanly - pci_enable_wake()
1614 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
1615 * ordering constraints.
1616 *
1617 * This function only returns error code if the device is not capable of
1618 * generating PME# from both D3_hot and D3_cold, and the platform is unable to
1619 * enable wake-up power for it.
1620 */
1621int pci_wake_from_d3(struct pci_dev *dev, bool enable)
1622{
1623        return pci_pme_capable(dev, PCI_D3cold) ?
1624                        pci_enable_wake(dev, PCI_D3cold, enable) :
1625                        pci_enable_wake(dev, PCI_D3hot, enable);
1626}
1627
1628/**
1629 * pci_target_state - find an appropriate low power state for a given PCI dev
1630 * @dev: PCI device
1631 *
1632 * Use underlying platform code to find a supported low power state for @dev.
1633 * If the platform can't manage @dev, return the deepest state from which it
1634 * can generate wake events, based on any available PME info.
1635 */
1636pci_power_t pci_target_state(struct pci_dev *dev)
1637{
1638        pci_power_t target_state = PCI_D3hot;
1639
1640        if (platform_pci_power_manageable(dev)) {
1641                /*
1642                 * Call the platform to choose the target state of the device
1643                 * and enable wake-up from this state if supported.
1644                 */
1645                pci_power_t state = platform_pci_choose_state(dev);
1646
1647                switch (state) {
1648                case PCI_POWER_ERROR:
1649                case PCI_UNKNOWN:
1650                        break;
1651                case PCI_D1:
1652                case PCI_D2:
1653                        if (pci_no_d1d2(dev))
1654                                break;
1655                default:
1656                        target_state = state;
1657                }
1658        } else if (!dev->pm_cap) {
1659                target_state = PCI_D0;
1660        } else if (device_may_wakeup(&dev->dev)) {
1661                /*
1662                 * Find the deepest state from which the device can generate
1663                 * wake-up events, make it the target state and enable device
1664                 * to generate PME#.
1665                 */
1666                if (dev->pme_support) {
1667                        while (target_state
1668                              && !(dev->pme_support & (1 << target_state)))
1669                                target_state--;
1670                }
1671        }
1672
1673        return target_state;
1674}
1675
1676/**
1677 * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state
1678 * @dev: Device to handle.
1679 *
1680 * Choose the power state appropriate for the device depending on whether
1681 * it can wake up the system and/or is power manageable by the platform
1682 * (PCI_D3hot is the default) and put the device into that state.
1683 */
1684int pci_prepare_to_sleep(struct pci_dev *dev)
1685{
1686        pci_power_t target_state = pci_target_state(dev);
1687        int error;
1688
1689        if (target_state == PCI_POWER_ERROR)
1690                return -EIO;
1691
1692        pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
1693
1694        error = pci_set_power_state(dev, target_state);
1695
1696        if (error)
1697                pci_enable_wake(dev, target_state, false);
1698
1699        return error;
1700}
1701
1702/**
1703 * pci_back_from_sleep - turn PCI device on during system-wide transition into working state
1704 * @dev: Device to handle.
1705 *
1706 * Disable device's system wake-up capability and put it into D0.
1707 */
1708int pci_back_from_sleep(struct pci_dev *dev)
1709{
1710        pci_enable_wake(dev, PCI_D0, false);
1711        return pci_set_power_state(dev, PCI_D0);
1712}
1713
1714/**
1715 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
1716 * @dev: PCI device being suspended.
1717 *
1718 * Prepare @dev to generate wake-up events at run time and put it into a low
1719 * power state.
1720 */
1721int pci_finish_runtime_suspend(struct pci_dev *dev)
1722{
1723        pci_power_t target_state = pci_target_state(dev);
1724        int error;
1725
1726        if (target_state == PCI_POWER_ERROR)
1727                return -EIO;
1728
1729        __pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev));
1730
1731        error = pci_set_power_state(dev, target_state);
1732
1733        if (error)
1734                __pci_enable_wake(dev, target_state, true, false);
1735
1736        return error;
1737}
1738
1739/**
1740 * pci_dev_run_wake - Check if device can generate run-time wake-up events.
1741 * @dev: Device to check.
1742 *
1743 * Return true if the device itself is cabable of generating wake-up events
1744 * (through the platform or using the native PCIe PME) or if the device supports
1745 * PME and one of its upstream bridges can generate wake-up events.
1746 */
1747bool pci_dev_run_wake(struct pci_dev *dev)
1748{
1749        struct pci_bus *bus = dev->bus;
1750
1751        if (device_run_wake(&dev->dev))
1752                return true;
1753
1754        if (!dev->pme_support)
1755                return false;
1756
1757        while (bus->parent) {
1758                struct pci_dev *bridge = bus->self;
1759
1760                if (device_run_wake(&bridge->dev))
1761                        return true;
1762
1763                bus = bus->parent;
1764        }
1765
1766        /* We have reached the root bus. */
1767        if (bus->bridge)
1768                return device_run_wake(bus->bridge);
1769
1770        return false;
1771}
1772EXPORT_SYMBOL_GPL(pci_dev_run_wake);
1773
1774/**
1775 * pci_pm_init - Initialize PM functions of given PCI device
1776 * @dev: PCI device to handle.
1777 */
1778void pci_pm_init(struct pci_dev *dev)
1779{
1780        int pm;
1781        u16 pmc;
1782
1783        pm_runtime_forbid(&dev->dev);
1784        device_enable_async_suspend(&dev->dev);
1785        dev->wakeup_prepared = false;
1786
1787        dev->pm_cap = 0;
1788
1789        /* find PCI PM capability in list */
1790        pm = pci_find_capability(dev, PCI_CAP_ID_PM);
1791        if (!pm)
1792                return;
1793        /* Check device's ability to generate PME# */
1794        pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
1795
1796        if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
1797                dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n",
1798                        pmc & PCI_PM_CAP_VER_MASK);
1799                return;
1800        }
1801
1802        dev->pm_cap = pm;
1803        dev->d3_delay = PCI_PM_D3_WAIT;
1804
1805        dev->d1_support = false;
1806        dev->d2_support = false;
1807        if (!pci_no_d1d2(dev)) {
1808                if (pmc & PCI_PM_CAP_D1)
1809                        dev->d1_support = true;
1810                if (pmc & PCI_PM_CAP_D2)
1811                        dev->d2_support = true;
1812
1813                if (dev->d1_support || dev->d2_support)
1814                        dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n",
1815                                   dev->d1_support ? " D1" : "",
1816                                   dev->d2_support ? " D2" : "");
1817        }
1818
1819        pmc &= PCI_PM_CAP_PME_MASK;
1820        if (pmc) {
1821                dev_printk(KERN_DEBUG, &dev->dev,
1822                         "PME# supported from%s%s%s%s%s\n",
1823                         (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
1824                         (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
1825                         (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
1826                         (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
1827                         (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
1828                dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
1829                dev->pme_poll = true;
1830                /*
1831                 * Make device's PM flags reflect the wake-up capability, but
1832                 * let the user space enable it to wake up the system as needed.
1833                 */
1834                device_set_wakeup_capable(&dev->dev, true);
1835                /* Disable the PME# generation functionality */
1836                pci_pme_active(dev, false);
1837        } else {
1838                dev->pme_support = 0;
1839        }
1840}
1841
1842/**
1843 * platform_pci_wakeup_init - init platform wakeup if present
1844 * @dev: PCI device
1845 *
1846 * Some devices don't have PCI PM caps but can still generate wakeup
1847 * events through platform methods (like ACPI events).  If @dev supports
1848 * platform wakeup events, set the device flag to indicate as much.  This
1849 * may be redundant if the device also supports PCI PM caps, but double
1850 * initialization should be safe in that case.
1851 */
1852void platform_pci_wakeup_init(struct pci_dev *dev)
1853{
1854        if (!platform_pci_can_wakeup(dev))
1855                return;
1856
1857        device_set_wakeup_capable(&dev->dev, true);
1858        platform_pci_sleep_wake(dev, false);
1859}
1860
1861/**
1862 * pci_add_save_buffer - allocate buffer for saving given capability registers
1863 * @dev: the PCI device
1864 * @cap: the capability to allocate the buffer for
1865 * @size: requested size of the buffer
1866 */
1867static int pci_add_cap_save_buffer(
1868        struct pci_dev *dev, char cap, unsigned int size)
1869{
1870        int pos;
1871        struct pci_cap_saved_state *save_state;
1872
1873        pos = pci_find_capability(dev, cap);
1874        if (pos <= 0)
1875                return 0;
1876
1877        save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
1878        if (!save_state)
1879                return -ENOMEM;
1880
1881        save_state->cap.cap_nr = cap;
1882        save_state->cap.size = size;
1883        pci_add_saved_cap(dev, save_state);
1884
1885        return 0;
1886}
1887
1888/**
1889 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
1890 * @dev: the PCI device
1891 */
1892void pci_allocate_cap_save_buffers(struct pci_dev *dev)
1893{
1894        int error;
1895
1896        error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
1897                                        PCI_EXP_SAVE_REGS * sizeof(u16));
1898        if (error)
1899                dev_err(&dev->dev,
1900                        "unable to preallocate PCI Express save buffer\n");
1901
1902        error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
1903        if (error)
1904                dev_err(&dev->dev,
1905                        "unable to preallocate PCI-X save buffer\n");
1906}
1907
1908/**
1909 * pci_enable_ari - enable ARI forwarding if hardware support it
1910 * @dev: the PCI device
1911 */
1912void pci_enable_ari(struct pci_dev *dev)
1913{
1914        int pos;
1915        u32 cap;
1916        u16 flags, ctrl;
1917        struct pci_dev *bridge;
1918
1919        if (!pci_is_pcie(dev) || dev->devfn)
1920                return;
1921
1922        pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
1923        if (!pos)
1924                return;
1925
1926        bridge = dev->bus->self;
1927        if (!bridge || !pci_is_pcie(bridge))
1928                return;
1929
1930        pos = pci_pcie_cap(bridge);
1931        if (!pos)
1932                return;
1933
1934        /* ARI is a PCIe v2 feature */
1935        pci_read_config_word(bridge, pos + PCI_EXP_FLAGS, &flags);
1936        if ((flags & PCI_EXP_FLAGS_VERS) < 2)
1937                return;
1938
1939        pci_read_config_dword(bridge, pos + PCI_EXP_DEVCAP2, &cap);
1940        if (!(cap & PCI_EXP_DEVCAP2_ARI))
1941                return;
1942
1943        pci_read_config_word(bridge, pos + PCI_EXP_DEVCTL2, &ctrl);
1944        ctrl |= PCI_EXP_DEVCTL2_ARI;
1945        pci_write_config_word(bridge, pos + PCI_EXP_DEVCTL2, ctrl);
1946
1947        bridge->ari_enabled = 1;
1948}
1949
1950/**
1951 * pci_enable_ido - enable ID-based ordering on a device
1952 * @dev: the PCI device
1953 * @type: which types of IDO to enable
1954 *
1955 * Enable ID-based ordering on @dev.  @type can contain the bits
1956 * %PCI_EXP_IDO_REQUEST and/or %PCI_EXP_IDO_COMPLETION to indicate
1957 * which types of transactions are allowed to be re-ordered.
1958 */
1959void pci_enable_ido(struct pci_dev *dev, unsigned long type)
1960{
1961        int pos;
1962        u16 ctrl;
1963
1964        pos = pci_pcie_cap(dev);
1965        if (!pos)
1966                return;
1967
1968        pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
1969        if (type & PCI_EXP_IDO_REQUEST)
1970                ctrl |= PCI_EXP_IDO_REQ_EN;
1971        if (type & PCI_EXP_IDO_COMPLETION)
1972                ctrl |= PCI_EXP_IDO_CMP_EN;
1973        pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
1974}
1975EXPORT_SYMBOL(pci_enable_ido);
1976
1977/**
1978 * pci_disable_ido - disable ID-based ordering on a device
1979 * @dev: the PCI device
1980 * @type: which types of IDO to disable
1981 */
1982void pci_disable_ido(struct pci_dev *dev, unsigned long type)
1983{
1984        int pos;
1985        u16 ctrl;
1986
1987        if (!pci_is_pcie(dev))
1988                return;
1989
1990        pos = pci_pcie_cap(dev);
1991        if (!pos)
1992                return;
1993
1994        pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
1995        if (type & PCI_EXP_IDO_REQUEST)
1996                ctrl &= ~PCI_EXP_IDO_REQ_EN;
1997        if (type & PCI_EXP_IDO_COMPLETION)
1998                ctrl &= ~PCI_EXP_IDO_CMP_EN;
1999        pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2000}
2001EXPORT_SYMBOL(pci_disable_ido);
2002
2003/**
2004 * pci_enable_obff - enable optimized buffer flush/fill
2005 * @dev: PCI device
2006 * @type: type of signaling to use
2007 *
2008 * Try to enable @type OBFF signaling on @dev.  It will try using WAKE#
2009 * signaling if possible, falling back to message signaling only if
2010 * WAKE# isn't supported.  @type should indicate whether the PCIe link
2011 * be brought out of L0s or L1 to send the message.  It should be either
2012 * %PCI_EXP_OBFF_SIGNAL_ALWAYS or %PCI_OBFF_SIGNAL_L0.
2013 *
2014 * If your device can benefit from receiving all messages, even at the
2015 * power cost of bringing the link back up from a low power state, use
2016 * %PCI_EXP_OBFF_SIGNAL_ALWAYS.  Otherwise, use %PCI_OBFF_SIGNAL_L0 (the
2017 * preferred type).
2018 *
2019 * RETURNS:
2020 * Zero on success, appropriate error number on failure.
2021 */
2022int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type type)
2023{
2024        int pos;
2025        u32 cap;
2026        u16 ctrl;
2027        int ret;
2028
2029        if (!pci_is_pcie(dev))
2030                return -ENOTSUPP;
2031
2032        pos = pci_pcie_cap(dev);
2033        if (!pos)
2034                return -ENOTSUPP;
2035
2036        pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap);
2037        if (!(cap & PCI_EXP_OBFF_MASK))
2038                return -ENOTSUPP; /* no OBFF support at all */
2039
2040        /* Make sure the topology supports OBFF as well */
2041        if (dev->bus) {
2042                ret = pci_enable_obff(dev->bus->self, type);
2043                if (ret)
2044                        return ret;
2045        }
2046
2047        pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2048        if (cap & PCI_EXP_OBFF_WAKE)
2049                ctrl |= PCI_EXP_OBFF_WAKE_EN;
2050        else {
2051                switch (type) {
2052                case PCI_EXP_OBFF_SIGNAL_L0:
2053                        if (!(ctrl & PCI_EXP_OBFF_WAKE_EN))
2054                                ctrl |= PCI_EXP_OBFF_MSGA_EN;
2055                        break;
2056                case PCI_EXP_OBFF_SIGNAL_ALWAYS:
2057                        ctrl &= ~PCI_EXP_OBFF_WAKE_EN;
2058                        ctrl |= PCI_EXP_OBFF_MSGB_EN;
2059                        break;
2060                default:
2061                        WARN(1, "bad OBFF signal type\n");
2062                        return -ENOTSUPP;
2063                }
2064        }
2065        pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2066
2067        return 0;
2068}
2069EXPORT_SYMBOL(pci_enable_obff);
2070
2071/**
2072 * pci_disable_obff - disable optimized buffer flush/fill
2073 * @dev: PCI device
2074 *
2075 * Disable OBFF on @dev.
2076 */
2077void pci_disable_obff(struct pci_dev *dev)
2078{
2079        int pos;
2080        u16 ctrl;
2081
2082        if (!pci_is_pcie(dev))
2083                return;
2084
2085        pos = pci_pcie_cap(dev);
2086        if (!pos)
2087                return;
2088
2089        pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2090        ctrl &= ~PCI_EXP_OBFF_WAKE_EN;
2091        pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2092}
2093EXPORT_SYMBOL(pci_disable_obff);
2094
2095/**
2096 * pci_ltr_supported - check whether a device supports LTR
2097 * @dev: PCI device
2098 *
2099 * RETURNS:
2100 * True if @dev supports latency tolerance reporting, false otherwise.
2101 */
2102bool pci_ltr_supported(struct pci_dev *dev)
2103{
2104        int pos;
2105        u32 cap;
2106
2107        if (!pci_is_pcie(dev))
2108                return false;
2109
2110        pos = pci_pcie_cap(dev);
2111        if (!pos)
2112                return false;
2113
2114        pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap);
2115
2116        return cap & PCI_EXP_DEVCAP2_LTR;
2117}
2118EXPORT_SYMBOL(pci_ltr_supported);
2119
2120/**
2121 * pci_enable_ltr - enable latency tolerance reporting
2122 * @dev: PCI device
2123 *
2124 * Enable LTR on @dev if possible, which means enabling it first on
2125 * upstream ports.
2126 *
2127 * RETURNS:
2128 * Zero on success, errno on failure.
2129 */
2130int pci_enable_ltr(struct pci_dev *dev)
2131{
2132        int pos;
2133        u16 ctrl;
2134        int ret;
2135
2136        if (!pci_ltr_supported(dev))
2137                return -ENOTSUPP;
2138
2139        pos = pci_pcie_cap(dev);
2140        if (!pos)
2141                return -ENOTSUPP;
2142
2143        /* Only primary function can enable/disable LTR */
2144        if (PCI_FUNC(dev->devfn) != 0)
2145                return -EINVAL;
2146
2147        /* Enable upstream ports first */
2148        if (dev->bus) {
2149                ret = pci_enable_ltr(dev->bus->self);
2150                if (ret)
2151                        return ret;
2152        }
2153
2154        pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2155        ctrl |= PCI_EXP_LTR_EN;
2156        pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2157
2158        return 0;
2159}
2160EXPORT_SYMBOL(pci_enable_ltr);
2161
2162/**
2163 * pci_disable_ltr - disable latency tolerance reporting
2164 * @dev: PCI device
2165 */
2166void pci_disable_ltr(struct pci_dev *dev)
2167{
2168        int pos;
2169        u16 ctrl;
2170
2171        if (!pci_ltr_supported(dev))
2172                return;
2173
2174        pos = pci_pcie_cap(dev);
2175        if (!pos)
2176                return;
2177
2178        /* Only primary function can enable/disable LTR */
2179        if (PCI_FUNC(dev->devfn) != 0)
2180                return;
2181
2182        pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2183        ctrl &= ~PCI_EXP_LTR_EN;
2184        pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2185}
2186EXPORT_SYMBOL(pci_disable_ltr);
2187
2188static int __pci_ltr_scale(int *val)
2189{
2190        int scale = 0;
2191
2192        while (*val > 1023) {
2193                *val = (*val + 31) / 32;
2194                scale++;
2195        }
2196        return scale;
2197}
2198
2199/**
2200 * pci_set_ltr - set LTR latency values
2201 * @dev: PCI device
2202 * @snoop_lat_ns: snoop latency in nanoseconds
2203 * @nosnoop_lat_ns: nosnoop latency in nanoseconds
2204 *
2205 * Figure out the scale and set the LTR values accordingly.
2206 */
2207int pci_set_ltr(struct pci_dev *dev, int snoop_lat_ns, int nosnoop_lat_ns)
2208{
2209        int pos, ret, snoop_scale, nosnoop_scale;
2210        u16 val;
2211
2212        if (!pci_ltr_supported(dev))
2213                return -ENOTSUPP;
2214
2215        snoop_scale = __pci_ltr_scale(&snoop_lat_ns);
2216        nosnoop_scale = __pci_ltr_scale(&nosnoop_lat_ns);
2217
2218        if (snoop_lat_ns > PCI_LTR_VALUE_MASK ||
2219            nosnoop_lat_ns > PCI_LTR_VALUE_MASK)
2220                return -EINVAL;
2221
2222        if ((snoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)) ||
2223            (nosnoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)))
2224                return -EINVAL;
2225
2226        pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
2227        if (!pos)
2228                return -ENOTSUPP;
2229
2230        val = (snoop_scale << PCI_LTR_SCALE_SHIFT) | snoop_lat_ns;
2231        ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_SNOOP_LAT, val);
2232        if (ret != 4)
2233                return -EIO;
2234
2235        val = (nosnoop_scale << PCI_LTR_SCALE_SHIFT) | nosnoop_lat_ns;
2236        ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_NOSNOOP_LAT, val);
2237        if (ret != 4)
2238                return -EIO;
2239
2240        return 0;
2241}
2242EXPORT_SYMBOL(pci_set_ltr);
2243
2244static int pci_acs_enable;
2245
2246/**
2247 * pci_request_acs - ask for ACS to be enabled if supported
2248 */
2249void pci_request_acs(void)
2250{
2251        pci_acs_enable = 1;
2252}
2253
2254/**
2255 * pci_enable_acs - enable ACS if hardware support it
2256 * @dev: the PCI device
2257 */
2258void pci_enable_acs(struct pci_dev *dev)
2259{
2260        int pos;
2261        u16 cap;
2262        u16 ctrl;
2263
2264        if (!pci_acs_enable)
2265                return;
2266
2267        if (!pci_is_pcie(dev))
2268                return;
2269
2270        pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
2271        if (!pos)
2272                return;
2273
2274        pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
2275        pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
2276
2277        /* Source Validation */
2278        ctrl |= (cap & PCI_ACS_SV);
2279
2280        /* P2P Request Redirect */
2281        ctrl |= (cap & PCI_ACS_RR);
2282
2283        /* P2P Completion Redirect */
2284        ctrl |= (cap & PCI_ACS_CR);
2285
2286        /* Upstream Forwarding */
2287        ctrl |= (cap & PCI_ACS_UF);
2288
2289        pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
2290}
2291
2292/**
2293 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
2294 * @dev: the PCI device
2295 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2296 *
2297 * Perform INTx swizzling for a device behind one level of bridge.  This is
2298 * required by section 9.1 of the PCI-to-PCI bridge specification for devices
2299 * behind bridges on add-in cards.  For devices with ARI enabled, the slot
2300 * number is always 0 (see the Implementation Note in section 2.2.8.1 of
2301 * the PCI Express Base Specification, Revision 2.1)
2302 */
2303u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin)
2304{
2305        int slot;
2306
2307        if (pci_ari_enabled(dev->bus))
2308                slot = 0;
2309        else
2310                slot = PCI_SLOT(dev->devfn);
2311
2312        return (((pin - 1) + slot) % 4) + 1;
2313}
2314
2315int
2316pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
2317{
2318        u8 pin;
2319
2320        pin = dev->pin;
2321        if (!pin)
2322                return -1;
2323
2324        while (!pci_is_root_bus(dev->bus)) {
2325                pin = pci_swizzle_interrupt_pin(dev, pin);
2326                dev = dev->bus->self;
2327        }
2328        *bridge = dev;
2329        return pin;
2330}
2331
2332/**
2333 * pci_common_swizzle - swizzle INTx all the way to root bridge
2334 * @dev: the PCI device
2335 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2336 *
2337 * Perform INTx swizzling for a device.  This traverses through all PCI-to-PCI
2338 * bridges all the way up to a PCI root bus.
2339 */
2340u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
2341{
2342        u8 pin = *pinp;
2343
2344        while (!pci_is_root_bus(dev->bus)) {
2345                pin = pci_swizzle_interrupt_pin(dev, pin);
2346                dev = dev->bus->self;
2347        }
2348        *pinp = pin;
2349        return PCI_SLOT(dev->devfn);
2350}
2351
2352/**
2353 *      pci_release_region - Release a PCI bar
2354 *      @pdev: PCI device whose resources were previously reserved by pci_request_region
2355 *      @bar: BAR to release
2356 *
2357 *      Releases the PCI I/O and memory resources previously reserved by a
2358 *      successful call to pci_request_region.  Call this function only
2359 *      after all use of the PCI regions has ceased.
2360 */
2361void pci_release_region(struct pci_dev *pdev, int bar)
2362{
2363        struct pci_devres *dr;
2364
2365        if (pci_resource_len(pdev, bar) == 0)
2366                return;
2367        if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
2368                release_region(pci_resource_start(pdev, bar),
2369                                pci_resource_len(pdev, bar));
2370        else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
2371                release_mem_region(pci_resource_start(pdev, bar),
2372                                pci_resource_len(pdev, bar));
2373
2374        dr = find_pci_dr(pdev);
2375        if (dr)
2376                dr->region_mask &= ~(1 << bar);
2377}
2378
2379/**
2380 *      __pci_request_region - Reserved PCI I/O and memory resource
2381 *      @pdev: PCI device whose resources are to be reserved
2382 *      @bar: BAR to be reserved
2383 *      @res_name: Name to be associated with resource.
2384 *      @exclusive: whether the region access is exclusive or not
2385 *
2386 *      Mark the PCI region associated with PCI device @pdev BR @bar as
2387 *      being reserved by owner @res_name.  Do not access any
2388 *      address inside the PCI regions unless this call returns
2389 *      successfully.
2390 *
2391 *      If @exclusive is set, then the region is marked so that userspace
2392 *      is explicitly not allowed to map the resource via /dev/mem or
2393 *      sysfs MMIO access.
2394 *
2395 *      Returns 0 on success, or %EBUSY on error.  A warning
2396 *      message is also printed on failure.
2397 */
2398static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name,
2399                                                                        int exclusive)
2400{
2401        struct pci_devres *dr;
2402
2403        if (pci_resource_len(pdev, bar) == 0)
2404                return 0;
2405                
2406        if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
2407                if (!request_region(pci_resource_start(pdev, bar),
2408                            pci_resource_len(pdev, bar), res_name))
2409                        goto err_out;
2410        }
2411        else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
2412                if (!__request_mem_region(pci_resource_start(pdev, bar),
2413                                        pci_resource_len(pdev, bar), res_name,
2414                                        exclusive))
2415                        goto err_out;
2416        }
2417
2418        dr = find_pci_dr(pdev);
2419        if (dr)
2420                dr->region_mask |= 1 << bar;
2421
2422        return 0;
2423
2424err_out:
2425        dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar,
2426                 &pdev->resource[bar]);
2427        return -EBUSY;
2428}
2429
2430/**
2431 *      pci_request_region - Reserve PCI I/O and memory resource
2432 *      @pdev: PCI device whose resources are to be reserved
2433 *      @bar: BAR to be reserved
2434 *      @res_name: Name to be associated with resource
2435 *
2436 *      Mark the PCI region associated with PCI device @pdev BAR @bar as
2437 *      being reserved by owner @res_name.  Do not access any
2438 *      address inside the PCI regions unless this call returns
2439 *      successfully.
2440 *
2441 *      Returns 0 on success, or %EBUSY on error.  A warning
2442 *      message is also printed on failure.
2443 */
2444int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
2445{
2446        return __pci_request_region(pdev, bar, res_name, 0);
2447}
2448
2449/**
2450 *      pci_request_region_exclusive - Reserved PCI I/O and memory resource
2451 *      @pdev: PCI device whose resources are to be reserved
2452 *      @bar: BAR to be reserved
2453 *      @res_name: Name to be associated with resource.
2454 *
2455 *      Mark the PCI region associated with PCI device @pdev BR @bar as
2456 *      being reserved by owner @res_name.  Do not access any
2457 *      address inside the PCI regions unless this call returns
2458 *      successfully.
2459 *
2460 *      Returns 0 on success, or %EBUSY on error.  A warning
2461 *      message is also printed on failure.
2462 *
2463 *      The key difference that _exclusive makes it that userspace is
2464 *      explicitly not allowed to map the resource via /dev/mem or
2465 *      sysfs.
2466 */
2467int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name)
2468{
2469        return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
2470}
2471/**
2472 * pci_release_selected_regions - Release selected PCI I/O and memory resources
2473 * @pdev: PCI device whose resources were previously reserved
2474 * @bars: Bitmask of BARs to be released
2475 *
2476 * Release selected PCI I/O and memory resources previously reserved.
2477 * Call this function only after all use of the PCI regions has ceased.
2478 */
2479void pci_release_selected_regions(struct pci_dev *pdev, int bars)
2480{
2481        int i;
2482
2483        for (i = 0; i < 6; i++)
2484                if (bars & (1 << i))
2485                        pci_release_region(pdev, i);
2486}
2487
2488int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
2489                                 const char *res_name, int excl)
2490{
2491        int i;
2492
2493        for (i = 0; i < 6; i++)
2494                if (bars & (1 << i))
2495                        if (__pci_request_region(pdev, i, res_name, excl))
2496                                goto err_out;
2497        return 0;
2498
2499err_out:
2500        while(--i >= 0)
2501                if (bars & (1 << i))
2502                        pci_release_region(pdev, i);
2503
2504        return -EBUSY;
2505}
2506
2507
2508/**
2509 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
2510 * @pdev: PCI device whose resources are to be reserved
2511 * @bars: Bitmask of BARs to be requested
2512 * @res_name: Name to be associated with resource
2513 */
2514int pci_request_selected_regions(struct pci_dev *pdev, int bars,
2515                                 const char *res_name)
2516{
2517        return __pci_request_selected_regions(pdev, bars, res_name, 0);
2518}
2519
2520int pci_request_selected_regions_exclusive(struct pci_dev *pdev,
2521                                 int bars, const char *res_name)
2522{
2523        return __pci_request_selected_regions(pdev, bars, res_name,
2524                        IORESOURCE_EXCLUSIVE);
2525}
2526
2527/**
2528 *      pci_release_regions - Release reserved PCI I/O and memory resources
2529 *      @pdev: PCI device whose resources were previously reserved by pci_request_regions
2530 *
2531 *      Releases all PCI I/O and memory resources previously reserved by a
2532 *      successful call to pci_request_regions.  Call this function only
2533 *      after all use of the PCI regions has ceased.
2534 */
2535
2536void pci_release_regions(struct pci_dev *pdev)
2537{
2538        pci_release_selected_regions(pdev, (1 << 6) - 1);
2539}
2540
2541/**
2542 *      pci_request_regions - Reserved PCI I/O and memory resources
2543 *      @pdev: PCI device whose resources are to be reserved
2544 *      @res_name: Name to be associated with resource.
2545 *
2546 *      Mark all PCI regions associated with PCI device @pdev as
2547 *      being reserved by owner @res_name.  Do not access any
2548 *      address inside the PCI regions unless this call returns
2549 *      successfully.
2550 *
2551 *      Returns 0 on success, or %EBUSY on error.  A warning
2552 *      message is also printed on failure.
2553 */
2554int pci_request_regions(struct pci_dev *pdev, const char *res_name)
2555{
2556        return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
2557}
2558
2559/**
2560 *      pci_request_regions_exclusive - Reserved PCI I/O and memory resources
2561 *      @pdev: PCI device whose resources are to be reserved
2562 *      @res_name: Name to be associated with resource.
2563 *
2564 *      Mark all PCI regions associated with PCI device @pdev as
2565 *      being reserved by owner @res_name.  Do not access any
2566 *      address inside the PCI regions unless this call returns
2567 *      successfully.
2568 *
2569 *      pci_request_regions_exclusive() will mark the region so that
2570 *      /dev/mem and the sysfs MMIO access will not be allowed.
2571 *
2572 *      Returns 0 on success, or %EBUSY on error.  A warning
2573 *      message is also printed on failure.
2574 */
2575int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
2576{
2577        return pci_request_selected_regions_exclusive(pdev,
2578                                        ((1 << 6) - 1), res_name);
2579}
2580
2581static void __pci_set_master(struct pci_dev *dev, bool enable)
2582{
2583        u16 old_cmd, cmd;
2584
2585        pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
2586        if (enable)
2587                cmd = old_cmd | PCI_COMMAND_MASTER;
2588        else
2589                cmd = old_cmd & ~PCI_COMMAND_MASTER;
2590        if (cmd != old_cmd) {
2591                dev_dbg(&dev->dev, "%s bus mastering\n",
2592                        enable ? "enabling" : "disabling");
2593                pci_write_config_word(dev, PCI_COMMAND, cmd);
2594        }
2595        dev->is_busmaster = enable;
2596}
2597
2598/**
2599 * pci_set_master - enables bus-mastering for device dev
2600 * @dev: the PCI device to enable
2601 *
2602 * Enables bus-mastering on the device and calls pcibios_set_master()
2603 * to do the needed arch specific settings.
2604 */
2605void pci_set_master(struct pci_dev *dev)
2606{
2607        __pci_set_master(dev, true);
2608        pcibios_set_master(dev);
2609}
2610
2611/**
2612 * pci_clear_master - disables bus-mastering for device dev
2613 * @dev: the PCI device to disable
2614 */
2615void pci_clear_master(struct pci_dev *dev)
2616{
2617        __pci_set_master(dev, false);
2618}
2619
2620/**
2621 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
2622 * @dev: the PCI device for which MWI is to be enabled
2623 *
2624 * Helper function for pci_set_mwi.
2625 * Originally copied from drivers/net/acenic.c.
2626 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
2627 *
2628 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2629 */
2630int pci_set_cacheline_size(struct pci_dev *dev)
2631{
2632        u8 cacheline_size;
2633
2634        if (!pci_cache_line_size)
2635                return -EINVAL;
2636
2637        /* Validate current setting: the PCI_CACHE_LINE_SIZE must be
2638           equal to or multiple of the right value. */
2639        pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2640        if (cacheline_size >= pci_cache_line_size &&
2641            (cacheline_size % pci_cache_line_size) == 0)
2642                return 0;
2643
2644        /* Write the correct value. */
2645        pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
2646        /* Read it back. */
2647        pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2648        if (cacheline_size == pci_cache_line_size)
2649                return 0;
2650
2651        dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not "
2652                   "supported\n", pci_cache_line_size << 2);
2653
2654        return -EINVAL;
2655}
2656EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
2657
2658#ifdef PCI_DISABLE_MWI
2659int pci_set_mwi(struct pci_dev *dev)
2660{
2661        return 0;
2662}
2663
2664int pci_try_set_mwi(struct pci_dev *dev)
2665{
2666        return 0;
2667}
2668
2669void pci_clear_mwi(struct pci_dev *dev)
2670{
2671}
2672
2673#else
2674
2675/**
2676 * pci_set_mwi - enables memory-write-invalidate PCI transaction
2677 * @dev: the PCI device for which MWI is enabled
2678 *
2679 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
2680 *
2681 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2682 */
2683int
2684pci_set_mwi(struct pci_dev *dev)
2685{
2686        int rc;
2687        u16 cmd;
2688
2689        rc = pci_set_cacheline_size(dev);
2690        if (rc)
2691                return rc;
2692
2693        pci_read_config_word(dev, PCI_COMMAND, &cmd);
2694        if (! (cmd & PCI_COMMAND_INVALIDATE)) {
2695                dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n");
2696                cmd |= PCI_COMMAND_INVALIDATE;
2697                pci_write_config_word(dev, PCI_COMMAND, cmd);
2698        }
2699        
2700        return 0;
2701}
2702
2703/**
2704 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
2705 * @dev: the PCI device for which MWI is enabled
2706 *
2707 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
2708 * Callers are not required to check the return value.
2709 *
2710 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2711 */
2712int pci_try_set_mwi(struct pci_dev *dev)
2713{
2714        int rc = pci_set_mwi(dev);
2715        return rc;
2716}
2717
2718/**
2719 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
2720 * @dev: the PCI device to disable
2721 *
2722 * Disables PCI Memory-Write-Invalidate transaction on the device
2723 */
2724void
2725pci_clear_mwi(struct pci_dev *dev)
2726{
2727        u16 cmd;
2728
2729        pci_read_config_word(dev, PCI_COMMAND, &cmd);
2730        if (cmd & PCI_COMMAND_INVALIDATE) {
2731                cmd &= ~PCI_COMMAND_INVALIDATE;
2732                pci_write_config_word(dev, PCI_COMMAND, cmd);
2733        }
2734}
2735#endif /* ! PCI_DISABLE_MWI */
2736
2737/**
2738 * pci_intx - enables/disables PCI INTx for device dev
2739 * @pdev: the PCI device to operate on
2740 * @enable: boolean: whether to enable or disable PCI INTx
2741 *
2742 * Enables/disables PCI INTx for device dev
2743 */
2744void
2745pci_intx(struct pci_dev *pdev, int enable)
2746{
2747        u16 pci_command, new;
2748
2749        pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
2750
2751        if (enable) {
2752                new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
2753        } else {
2754                new = pci_command | PCI_COMMAND_INTX_DISABLE;
2755        }
2756
2757        if (new != pci_command) {
2758                struct pci_devres *dr;
2759
2760                pci_write_config_word(pdev, PCI_COMMAND, new);
2761
2762                dr = find_pci_dr(pdev);
2763                if (dr && !dr->restore_intx) {
2764                        dr->restore_intx = 1;
2765                        dr->orig_intx = !enable;
2766                }
2767        }
2768}
2769
2770/**
2771 * pci_msi_off - disables any msi or msix capabilities
2772 * @dev: the PCI device to operate on
2773 *
2774 * If you want to use msi see pci_enable_msi and friends.
2775 * This is a lower level primitive that allows us to disable
2776 * msi operation at the device level.
2777 */
2778void pci_msi_off(struct pci_dev *dev)
2779{
2780        int pos;
2781        u16 control;
2782
2783        pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
2784        if (pos) {
2785                pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
2786                control &= ~PCI_MSI_FLAGS_ENABLE;
2787                pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
2788        }
2789        pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
2790        if (pos) {
2791                pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
2792                control &= ~PCI_MSIX_FLAGS_ENABLE;
2793                pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
2794        }
2795}
2796EXPORT_SYMBOL_GPL(pci_msi_off);
2797
2798int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size)
2799{
2800        return dma_set_max_seg_size(&dev->dev, size);
2801}
2802EXPORT_SYMBOL(pci_set_dma_max_seg_size);
2803
2804int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask)
2805{
2806        return dma_set_seg_boundary(&dev->dev, mask);
2807}
2808EXPORT_SYMBOL(pci_set_dma_seg_boundary);
2809
2810static int pcie_flr(struct pci_dev *dev, int probe)
2811{
2812        int i;
2813        int pos;
2814        u32 cap;
2815        u16 status, control;
2816
2817        pos = pci_pcie_cap(dev);
2818        if (!pos)
2819                return -ENOTTY;
2820
2821        pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap);
2822        if (!(cap & PCI_EXP_DEVCAP_FLR))
2823                return -ENOTTY;
2824
2825        if (probe)
2826                return 0;
2827
2828        /* Wait for Transaction Pending bit clean */
2829        for (i = 0; i < 4; i++) {
2830                if (i)
2831                        msleep((1 << (i - 1)) * 100);
2832
2833                pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status);
2834                if (!(status & PCI_EXP_DEVSTA_TRPND))
2835                        goto clear;
2836        }
2837
2838        dev_err(&dev->dev, "transaction is not cleared; "
2839                        "proceeding with reset anyway\n");
2840
2841clear:
2842        pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &control);
2843        control |= PCI_EXP_DEVCTL_BCR_FLR;
2844        pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, control);
2845
2846        msleep(100);
2847
2848        return 0;
2849}
2850
2851static int pci_af_flr(struct pci_dev *dev, int probe)
2852{
2853        int i;
2854        int pos;
2855        u8 cap;
2856        u8 status;
2857
2858        pos = pci_find_capability(dev, PCI_CAP_ID_AF);
2859        if (!pos)
2860                return -ENOTTY;
2861
2862        pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
2863        if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
2864                return -ENOTTY;
2865
2866        if (probe)
2867                return 0;
2868
2869        /* Wait for Transaction Pending bit clean */
2870        for (i = 0; i < 4; i++) {
2871                if (i)
2872                        msleep((1 << (i - 1)) * 100);
2873
2874                pci_read_config_byte(dev, pos + PCI_AF_STATUS, &status);
2875                if (!(status & PCI_AF_STATUS_TP))
2876                        goto clear;
2877        }
2878
2879        dev_err(&dev->dev, "transaction is not cleared; "
2880                        "proceeding with reset anyway\n");
2881
2882clear:
2883        pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
2884        msleep(100);
2885
2886        return 0;
2887}
2888
2889/**
2890 * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
2891 * @dev: Device to reset.
2892 * @probe: If set, only check if the device can be reset this way.
2893 *
2894 * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
2895 * unset, it will be reinitialized internally when going from PCI_D3hot to
2896 * PCI_D0.  If that's the case and the device is not in a low-power state
2897 * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
2898 *
2899 * NOTE: This causes the caller to sleep for twice the device power transition
2900 * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
2901 * by devault (i.e. unless the @dev's d3_delay field has a different value).
2902 * Moreover, only devices in D0 can be reset by this function.
2903 */
2904static int pci_pm_reset(struct pci_dev *dev, int probe)
2905{
2906        u16 csr;
2907
2908        if (!dev->pm_cap)
2909                return -ENOTTY;
2910
2911        pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
2912        if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
2913                return -ENOTTY;
2914
2915        if (probe)
2916                return 0;
2917
2918        if (dev->current_state != PCI_D0)
2919                return -EINVAL;
2920
2921        csr &= ~PCI_PM_CTRL_STATE_MASK;
2922        csr |= PCI_D3hot;
2923        pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
2924        pci_dev_d3_sleep(dev);
2925
2926        csr &= ~PCI_PM_CTRL_STATE_MASK;
2927        csr |= PCI_D0;
2928        pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
2929        pci_dev_d3_sleep(dev);
2930
2931        return 0;
2932}
2933
2934static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
2935{
2936        u16 ctrl;
2937        struct pci_dev *pdev;
2938
2939        if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self)
2940                return -ENOTTY;
2941
2942        list_for_each_entry(pdev, &dev->bus->devices, bus_list)
2943                if (pdev != dev)
2944                        return -ENOTTY;
2945
2946        if (probe)
2947                return 0;
2948
2949        pci_read_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, &ctrl);
2950        ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
2951        pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
2952        msleep(100);
2953
2954        ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
2955        pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
2956        msleep(100);
2957
2958        return 0;
2959}
2960
2961static int pci_dev_reset(struct pci_dev *dev, int probe)
2962{
2963        int rc;
2964
2965        might_sleep();
2966
2967        if (!probe) {
2968                pci_block_user_cfg_access(dev);
2969                /* block PM suspend, driver probe, etc. */
2970                device_lock(&dev->dev);
2971        }
2972
2973        rc = pci_dev_specific_reset(dev, probe);
2974        if (rc != -ENOTTY)
2975                goto done;
2976
2977        rc = pcie_flr(dev, probe);
2978        if (rc != -ENOTTY)
2979                goto done;
2980
2981        rc = pci_af_flr(dev, probe);
2982        if (rc != -ENOTTY)
2983                goto done;
2984
2985        rc = pci_pm_reset(dev, probe);
2986        if (rc != -ENOTTY)
2987                goto done;
2988
2989        rc = pci_parent_bus_reset(dev, probe);
2990done:
2991        if (!probe) {
2992                device_unlock(&dev->dev);
2993                pci_unblock_user_cfg_access(dev);
2994        }
2995
2996        return rc;
2997}
2998
2999/**
3000 * __pci_reset_function - reset a PCI device function
3001 * @dev: PCI device to reset
3002 *
3003 * Some devices allow an individual function to be reset without affecting
3004 * other functions in the same device.  The PCI device must be responsive
3005 * to PCI config space in order to use this function.
3006 *
3007 * The device function is presumed to be unused when this function is called.
3008 * Resetting the device will make the contents of PCI configuration space
3009 * random, so any caller of this must be prepared to reinitialise the
3010 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3011 * etc.
3012 *
3013 * Returns 0 if the device function was successfully reset or negative if the
3014 * device doesn't support resetting a single function.
3015 */
3016int __pci_reset_function(struct pci_dev *dev)
3017{
3018        return pci_dev_reset(dev, 0);
3019}
3020EXPORT_SYMBOL_GPL(__pci_reset_function);
3021
3022/**
3023 * pci_probe_reset_function - check whether the device can be safely reset
3024 * @dev: PCI device to reset
3025 *
3026 * Some devices allow an individual function to be reset without affecting
3027 * other functions in the same device.  The PCI device must be responsive
3028 * to PCI config space in order to use this function.
3029 *
3030 * Returns 0 if the device function can be reset or negative if the
3031 * device doesn't support resetting a single function.
3032 */
3033int pci_probe_reset_function(struct pci_dev *dev)
3034{
3035        return pci_dev_reset(dev, 1);
3036}
3037
3038/**
3039 * pci_reset_function - quiesce and reset a PCI device function
3040 * @dev: PCI device to reset
3041 *
3042 * Some devices allow an individual function to be reset without affecting
3043 * other functions in the same device.  The PCI device must be responsive
3044 * to PCI config space in order to use this function.
3045 *
3046 * This function does not just reset the PCI portion of a device, but
3047 * clears all the state associated with the device.  This function differs
3048 * from __pci_reset_function in that it saves and restores device state
3049 * over the reset.
3050 *
3051 * Returns 0 if the device function was successfully reset or negative if the
3052 * device doesn't support resetting a single function.
3053 */
3054int pci_reset_function(struct pci_dev *dev)
3055{
3056        int rc;
3057
3058        rc = pci_dev_reset(dev, 1);
3059        if (rc)
3060                return rc;
3061
3062        pci_save_state(dev);
3063
3064        /*
3065         * both INTx and MSI are disabled after the Interrupt Disable bit
3066         * is set and the Bus Master bit is cleared.
3067         */
3068        pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
3069
3070        rc = pci_dev_reset(dev, 0);
3071
3072        pci_restore_state(dev);
3073
3074        return rc;
3075}
3076EXPORT_SYMBOL_GPL(pci_reset_function);
3077
3078/**
3079 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
3080 * @dev: PCI device to query
3081 *
3082 * Returns mmrbc: maximum designed memory read count in bytes
3083 *    or appropriate error value.
3084 */
3085int pcix_get_max_mmrbc(struct pci_dev *dev)
3086{
3087        int cap;
3088        u32 stat;
3089
3090        cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3091        if (!cap)
3092                return -EINVAL;
3093
3094        if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
3095                return -EINVAL;
3096
3097        return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
3098}
3099EXPORT_SYMBOL(pcix_get_max_mmrbc);
3100
3101/**
3102 * pcix_get_mmrbc - get PCI-X maximum memory read byte count
3103 * @dev: PCI device to query
3104 *
3105 * Returns mmrbc: maximum memory read count in bytes
3106 *    or appropriate error value.
3107 */
3108int pcix_get_mmrbc(struct pci_dev *dev)
3109{
3110        int cap;
3111        u16 cmd;
3112
3113        cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3114        if (!cap)
3115                return -EINVAL;
3116
3117        if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3118                return -EINVAL;
3119
3120        return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
3121}
3122EXPORT_SYMBOL(pcix_get_mmrbc);
3123
3124/**
3125 * pcix_set_mmrbc - set PCI-X maximum memory read byte count
3126 * @dev: PCI device to query
3127 * @mmrbc: maximum memory read count in bytes
3128 *    valid values are 512, 1024, 2048, 4096
3129 *
3130 * If possible sets maximum memory read byte count, some bridges have erratas
3131 * that prevent this.
3132 */
3133int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
3134{
3135        int cap;
3136        u32 stat, v, o;
3137        u16 cmd;
3138
3139        if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
3140                return -EINVAL;
3141
3142        v = ffs(mmrbc) - 10;
3143
3144        cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3145        if (!cap)
3146                return -EINVAL;
3147
3148        if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
3149                return -EINVAL;
3150
3151        if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
3152                return -E2BIG;
3153
3154        if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3155                return -EINVAL;
3156
3157        o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
3158        if (o != v) {
3159                if (v > o && dev->bus &&
3160                   (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
3161                        return -EIO;
3162
3163                cmd &= ~PCI_X_CMD_MAX_READ;
3164                cmd |= v << 2;
3165                if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
3166                        return -EIO;
3167        }
3168        return 0;
3169}
3170EXPORT_SYMBOL(pcix_set_mmrbc);
3171
3172/**
3173 * pcie_get_readrq - get PCI Express read request size
3174 * @dev: PCI device to query
3175 *
3176 * Returns maximum memory read request in bytes
3177 *    or appropriate error value.
3178 */
3179int pcie_get_readrq(struct pci_dev *dev)
3180{
3181        int ret, cap;
3182        u16 ctl;
3183
3184        cap = pci_pcie_cap(dev);
3185        if (!cap)
3186                return -EINVAL;
3187
3188        ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3189        if (!ret)
3190                ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
3191
3192        return ret;
3193}
3194EXPORT_SYMBOL(pcie_get_readrq);
3195
3196/**
3197 * pcie_set_readrq - set PCI Express maximum memory read request
3198 * @dev: PCI device to query
3199 * @rq: maximum memory read count in bytes
3200 *    valid values are 128, 256, 512, 1024, 2048, 4096
3201 *
3202 * If possible sets maximum memory read request in bytes
3203 */
3204int pcie_set_readrq(struct pci_dev *dev, int rq)
3205{
3206        int cap, err = -EINVAL;
3207        u16 ctl, v;
3208
3209        if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
3210                goto out;
3211
3212        cap = pci_pcie_cap(dev);
3213        if (!cap)
3214                goto out;
3215
3216        err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3217        if (err)
3218                goto out;
3219        /*
3220         * If using the "performance" PCIe config, we clamp the
3221         * read rq size to the max packet size to prevent the
3222         * host bridge generating requests larger than we can
3223         * cope with
3224         */
3225        if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
3226                int mps = pcie_get_mps(dev);
3227
3228                if (mps < 0)
3229                        return mps;
3230                if (mps < rq)
3231                        rq = mps;
3232        }
3233
3234        v = (ffs(rq) - 8) << 12;
3235
3236        if ((ctl & PCI_EXP_DEVCTL_READRQ) != v) {
3237                ctl &= ~PCI_EXP_DEVCTL_READRQ;
3238                ctl |= v;
3239                err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl);
3240        }
3241
3242out:
3243        return err;
3244}
3245EXPORT_SYMBOL(pcie_set_readrq);
3246
3247/**
3248 * pcie_get_mps - get PCI Express maximum payload size
3249 * @dev: PCI device to query
3250 *
3251 * Returns maximum payload size in bytes
3252 *    or appropriate error value.
3253 */
3254int pcie_get_mps(struct pci_dev *dev)
3255{
3256        int ret, cap;
3257        u16 ctl;
3258
3259        cap = pci_pcie_cap(dev);
3260        if (!cap)
3261                return -EINVAL;
3262
3263        ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3264        if (!ret)
3265                ret = 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
3266
3267        return ret;
3268}
3269
3270/**
3271 * pcie_set_mps - set PCI Express maximum payload size
3272 * @dev: PCI device to query
3273 * @mps: maximum payload size in bytes
3274 *    valid values are 128, 256, 512, 1024, 2048, 4096
3275 *
3276 * If possible sets maximum payload size
3277 */
3278int pcie_set_mps(struct pci_dev *dev, int mps)
3279{
3280        int cap, err = -EINVAL;
3281        u16 ctl, v;
3282
3283        if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
3284                goto out;
3285
3286        v = ffs(mps) - 8;
3287        if (v > dev->pcie_mpss) 
3288                goto out;
3289        v <<= 5;
3290
3291        cap = pci_pcie_cap(dev);
3292        if (!cap)
3293                goto out;
3294
3295        err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3296        if (err)
3297                goto out;
3298
3299        if ((ctl & PCI_EXP_DEVCTL_PAYLOAD) != v) {
3300                ctl &= ~PCI_EXP_DEVCTL_PAYLOAD;
3301                ctl |= v;
3302                err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl);
3303        }
3304out:
3305        return err;
3306}
3307
3308/**
3309 * pci_select_bars - Make BAR mask from the type of resource
3310 * @dev: the PCI device for which BAR mask is made
3311 * @flags: resource type mask to be selected
3312 *
3313 * This helper routine makes bar mask from the type of resource.
3314 */
3315int pci_select_bars(struct pci_dev *dev, unsigned long flags)
3316{
3317        int i, bars = 0;
3318        for (i = 0; i < PCI_NUM_RESOURCES; i++)
3319                if (pci_resource_flags(dev, i) & flags)
3320                        bars |= (1 << i);
3321        return bars;
3322}
3323
3324/**
3325 * pci_resource_bar - get position of the BAR associated with a resource
3326 * @dev: the PCI device
3327 * @resno: the resource number
3328 * @type: the BAR type to be filled in
3329 *
3330 * Returns BAR position in config space, or 0 if the BAR is invalid.
3331 */
3332int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
3333{
3334        int reg;
3335
3336        if (resno < PCI_ROM_RESOURCE) {
3337                *type = pci_bar_unknown;
3338                return PCI_BASE_ADDRESS_0 + 4 * resno;
3339        } else if (resno == PCI_ROM_RESOURCE) {
3340                *type = pci_bar_mem32;
3341                return dev->rom_base_reg;
3342        } else if (resno < PCI_BRIDGE_RESOURCES) {
3343                /* device specific resource */
3344                reg = pci_iov_resource_bar(dev, resno, type);
3345                if (reg)
3346                        return reg;
3347        }
3348
3349        dev_err(&dev->dev, "BAR %d: invalid resource\n", resno);
3350        return 0;
3351}
3352
3353/* Some architectures require additional programming to enable VGA */
3354static arch_set_vga_state_t arch_set_vga_state;
3355
3356void __init pci_register_set_vga_state(arch_set_vga_state_t func)
3357{
3358        arch_set_vga_state = func;      /* NULL disables */
3359}
3360
3361static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
3362                      unsigned int command_bits, u32 flags)
3363{
3364        if (arch_set_vga_state)
3365                return arch_set_vga_state(dev, decode, command_bits,
3366                                                flags);
3367        return 0;
3368}
3369
3370/**
3371 * pci_set_vga_state - set VGA decode state on device and parents if requested
3372 * @dev: the PCI device
3373 * @decode: true = enable decoding, false = disable decoding
3374 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
3375 * @flags: traverse ancestors and change bridges
3376 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
3377 */
3378int pci_set_vga_state(struct pci_dev *dev, bool decode,
3379                      unsigned int command_bits, u32 flags)
3380{
3381        struct pci_bus *bus;
3382        struct pci_dev *bridge;
3383        u16 cmd;
3384        int rc;
3385
3386        WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) & (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
3387
3388        /* ARCH specific VGA enables */
3389        rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
3390        if (rc)
3391                return rc;
3392
3393        if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
3394                pci_read_config_word(dev, PCI_COMMAND, &cmd);
3395                if (decode == true)
3396                        cmd |= command_bits;
3397                else
3398                        cmd &= ~command_bits;
3399                pci_write_config_word(dev, PCI_COMMAND, cmd);
3400        }
3401
3402        if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
3403                return 0;
3404
3405        bus = dev->bus;
3406        while (bus) {
3407                bridge = bus->self;
3408                if (bridge) {
3409                        pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
3410                                             &cmd);
3411                        if (decode == true)
3412                                cmd |= PCI_BRIDGE_CTL_VGA;
3413                        else
3414                                cmd &= ~PCI_BRIDGE_CTL_VGA;
3415                        pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
3416                                              cmd);
3417                }
3418                bus = bus->parent;
3419        }
3420        return 0;
3421}
3422
3423#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
3424static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
3425static DEFINE_SPINLOCK(resource_alignment_lock);
3426
3427/**
3428 * pci_specified_resource_alignment - get resource alignment specified by user.
3429 * @dev: the PCI device to get
3430 *
3431 * RETURNS: Resource alignment if it is specified.
3432 *          Zero if it is not specified.
3433 */
3434resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
3435{
3436        int seg, bus, slot, func, align_order, count;
3437        resource_size_t align = 0;
3438        char *p;
3439
3440        spin_lock(&resource_alignment_lock);
3441        p = resource_alignment_param;
3442        while (*p) {
3443                count = 0;
3444                if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
3445                                                        p[count] == '@') {
3446                        p += count + 1;
3447                } else {
3448                        align_order = -1;
3449                }
3450                if (sscanf(p, "%x:%x:%x.%x%n",
3451                        &seg, &bus, &slot, &func, &count) != 4) {
3452                        seg = 0;
3453                        if (sscanf(p, "%x:%x.%x%n",
3454                                        &bus, &slot, &func, &count) != 3) {
3455                                /* Invalid format */
3456                                printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
3457                                        p);
3458                                break;
3459                        }
3460                }
3461                p += count;
3462                if (seg == pci_domain_nr(dev->bus) &&
3463                        bus == dev->bus->number &&
3464                        slot == PCI_SLOT(dev->devfn) &&
3465                        func == PCI_FUNC(dev->devfn)) {
3466                        if (align_order == -1) {
3467                                align = PAGE_SIZE;
3468                        } else {
3469                                align = 1 << align_order;
3470                        }
3471                        /* Found */
3472                        break;
3473                }
3474                if (*p != ';' && *p != ',') {
3475                        /* End of param or invalid format */
3476                        break;
3477                }
3478                p++;
3479        }
3480        spin_unlock(&resource_alignment_lock);
3481        return align;
3482}
3483
3484/**
3485 * pci_is_reassigndev - check if specified PCI is target device to reassign
3486 * @dev: the PCI device to check
3487 *
3488 * RETURNS: non-zero for PCI device is a target device to reassign,
3489 *          or zero is not.
3490 */
3491int pci_is_reassigndev(struct pci_dev *dev)
3492{
3493        return (pci_specified_resource_alignment(dev) != 0);
3494}
3495
3496ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
3497{
3498        if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1)
3499                count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1;
3500        spin_lock(&resource_alignment_lock);
3501        strncpy(resource_alignment_param, buf, count);
3502        resource_alignment_param[count] = '\0';
3503        spin_unlock(&resource_alignment_lock);
3504        return count;
3505}
3506
3507ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
3508{
3509        size_t count;
3510        spin_lock(&resource_alignment_lock);
3511        count = snprintf(buf, size, "%s", resource_alignment_param);
3512        spin_unlock(&resource_alignment_lock);
3513        return count;
3514}
3515
3516static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf)
3517{
3518        return pci_get_resource_alignment_param(buf, PAGE_SIZE);
3519}
3520
3521static ssize_t pci_resource_alignment_store(struct bus_type *bus,
3522                                        const char *buf, size_t count)
3523{
3524        return pci_set_resource_alignment_param(buf, count);
3525}
3526
3527BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
3528                                        pci_resource_alignment_store);
3529
3530static int __init pci_resource_alignment_sysfs_init(void)
3531{
3532        return bus_create_file(&pci_bus_type,
3533                                        &bus_attr_resource_alignment);
3534}
3535
3536late_initcall(pci_resource_alignment_sysfs_init);
3537
3538static void __devinit pci_no_domains(void)
3539{
3540#ifdef CONFIG_PCI_DOMAINS
3541        pci_domains_supported = 0;
3542#endif
3543}
3544
3545/**
3546 * pci_ext_cfg_enabled - can we access extended PCI config space?
3547 * @dev: The PCI device of the root bridge.
3548 *
3549 * Returns 1 if we can access PCI extended config space (offsets
3550 * greater than 0xff). This is the default implementation. Architecture
3551 * implementations can override this.
3552 */
3553int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev)
3554{
3555        return 1;
3556}
3557
3558void __weak pci_fixup_cardbus(struct pci_bus *bus)
3559{
3560}
3561EXPORT_SYMBOL(pci_fixup_cardbus);
3562
3563static int __init pci_setup(char *str)
3564{
3565        while (str) {
3566                char *k = strchr(str, ',');
3567                if (k)
3568                        *k++ = 0;
3569                if (*str && (str = pcibios_setup(str)) && *str) {
3570                        if (!strcmp(str, "nomsi")) {
3571                                pci_no_msi();
3572                        } else if (!strcmp(str, "noaer")) {
3573                                pci_no_aer();
3574                        } else if (!strncmp(str, "realloc", 7)) {
3575                                pci_realloc();
3576                        } else if (!strcmp(str, "nodomains")) {
3577                                pci_no_domains();
3578                        } else if (!strncmp(str, "cbiosize=", 9)) {
3579                                pci_cardbus_io_size = memparse(str + 9, &str);
3580                        } else if (!strncmp(str, "cbmemsize=", 10)) {
3581                                pci_cardbus_mem_size = memparse(str + 10, &str);
3582                        } else if (!strncmp(str, "resource_alignment=", 19)) {
3583                                pci_set_resource_alignment_param(str + 19,
3584                                                        strlen(str + 19));
3585                        } else if (!strncmp(str, "ecrc=", 5)) {
3586                                pcie_ecrc_get_policy(str + 5);
3587                        } else if (!strncmp(str, "hpiosize=", 9)) {
3588                                pci_hotplug_io_size = memparse(str + 9, &str);
3589                        } else if (!strncmp(str, "hpmemsize=", 10)) {
3590                                pci_hotplug_mem_size = memparse(str + 10, &str);
3591                        } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
3592                                pcie_bus_config = PCIE_BUS_TUNE_OFF;
3593                        } else if (!strncmp(str, "pcie_bus_safe", 13)) {
3594                                pcie_bus_config = PCIE_BUS_SAFE;
3595                        } else if (!strncmp(str, "pcie_bus_perf", 13)) {
3596                                pcie_bus_config = PCIE_BUS_PERFORMANCE;
3597                        } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
3598                                pcie_bus_config = PCIE_BUS_PEER2PEER;
3599                        } else {
3600                                printk(KERN_ERR "PCI: Unknown option `%s'\n",
3601                                                str);
3602                        }
3603                }
3604                str = k;
3605        }
3606        return 0;
3607}
3608early_param("pci", pci_setup);
3609
3610EXPORT_SYMBOL(pci_reenable_device);
3611EXPORT_SYMBOL(pci_enable_device_io);
3612EXPORT_SYMBOL(pci_enable_device_mem);
3613EXPORT_SYMBOL(pci_enable_device);
3614EXPORT_SYMBOL(pcim_enable_device);
3615EXPORT_SYMBOL(pcim_pin_device);
3616EXPORT_SYMBOL(pci_disable_device);
3617EXPORT_SYMBOL(pci_find_capability);
3618EXPORT_SYMBOL(pci_bus_find_capability);
3619EXPORT_SYMBOL(pci_release_regions);
3620EXPORT_SYMBOL(pci_request_regions);
3621EXPORT_SYMBOL(pci_request_regions_exclusive);
3622EXPORT_SYMBOL(pci_release_region);
3623EXPORT_SYMBOL(pci_request_region);
3624EXPORT_SYMBOL(pci_request_region_exclusive);
3625EXPORT_SYMBOL(pci_release_selected_regions);
3626EXPORT_SYMBOL(pci_request_selected_regions);
3627EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
3628EXPORT_SYMBOL(pci_set_master);
3629EXPORT_SYMBOL(pci_clear_master);
3630EXPORT_SYMBOL(pci_set_mwi);
3631EXPORT_SYMBOL(pci_try_set_mwi);
3632EXPORT_SYMBOL(pci_clear_mwi);
3633EXPORT_SYMBOL_GPL(pci_intx);
3634EXPORT_SYMBOL(pci_assign_resource);
3635EXPORT_SYMBOL(pci_find_parent_resource);
3636EXPORT_SYMBOL(pci_select_bars);
3637
3638EXPORT_SYMBOL(pci_set_power_state);
3639EXPORT_SYMBOL(pci_save_state);
3640EXPORT_SYMBOL(pci_restore_state);
3641EXPORT_SYMBOL(pci_pme_capable);
3642EXPORT_SYMBOL(pci_pme_active);
3643EXPORT_SYMBOL(pci_wake_from_d3);
3644EXPORT_SYMBOL(pci_target_state);
3645EXPORT_SYMBOL(pci_prepare_to_sleep);
3646EXPORT_SYMBOL(pci_back_from_sleep);
3647EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
3648