linux/drivers/pci/pci.c
<<
>>
Prefs
   1/*
   2 *      $Id: pci.c,v 1.91 1999/01/21 13:34:01 davem Exp $
   3 *
   4 *      PCI Bus Services, see include/linux/pci.h for further explanation.
   5 *
   6 *      Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
   7 *      David Mosberger-Tang
   8 *
   9 *      Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
  10 */
  11
  12#include <linux/kernel.h>
  13#include <linux/delay.h>
  14#include <linux/init.h>
  15#include <linux/pci.h>
  16#include <linux/pm.h>
  17#include <linux/module.h>
  18#include <linux/spinlock.h>
  19#include <linux/string.h>
  20#include <linux/log2.h>
  21#include <asm/dma.h>    /* isa_dma_bridge_buggy */
  22#include "pci.h"
  23
  24unsigned int pci_pm_d3_delay = 10;
  25
  26#ifdef CONFIG_PCI_DOMAINS
  27int pci_domains_supported = 1;
  28#endif
  29
  30#define DEFAULT_CARDBUS_IO_SIZE         (256)
  31#define DEFAULT_CARDBUS_MEM_SIZE        (64*1024*1024)
  32/* pci=cbmemsize=nnM,cbiosize=nn can override this */
  33unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
  34unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
  35
  36/**
  37 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
  38 * @bus: pointer to PCI bus structure to search
  39 *
  40 * Given a PCI bus, returns the highest PCI bus number present in the set
  41 * including the given PCI bus and its list of child PCI buses.
  42 */
  43unsigned char pci_bus_max_busnr(struct pci_bus* bus)
  44{
  45        struct list_head *tmp;
  46        unsigned char max, n;
  47
  48        max = bus->subordinate;
  49        list_for_each(tmp, &bus->children) {
  50                n = pci_bus_max_busnr(pci_bus_b(tmp));
  51                if(n > max)
  52                        max = n;
  53        }
  54        return max;
  55}
  56EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
  57
  58#if 0
  59/**
  60 * pci_max_busnr - returns maximum PCI bus number
  61 *
  62 * Returns the highest PCI bus number present in the system global list of
  63 * PCI buses.
  64 */
  65unsigned char __devinit
  66pci_max_busnr(void)
  67{
  68        struct pci_bus *bus = NULL;
  69        unsigned char max, n;
  70
  71        max = 0;
  72        while ((bus = pci_find_next_bus(bus)) != NULL) {
  73                n = pci_bus_max_busnr(bus);
  74                if(n > max)
  75                        max = n;
  76        }
  77        return max;
  78}
  79
  80#endif  /*  0  */
  81
  82#define PCI_FIND_CAP_TTL        48
  83
  84static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
  85                                   u8 pos, int cap, int *ttl)
  86{
  87        u8 id;
  88
  89        while ((*ttl)--) {
  90                pci_bus_read_config_byte(bus, devfn, pos, &pos);
  91                if (pos < 0x40)
  92                        break;
  93                pos &= ~3;
  94                pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID,
  95                                         &id);
  96                if (id == 0xff)
  97                        break;
  98                if (id == cap)
  99                        return pos;
 100                pos += PCI_CAP_LIST_NEXT;
 101        }
 102        return 0;
 103}
 104
 105static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
 106                               u8 pos, int cap)
 107{
 108        int ttl = PCI_FIND_CAP_TTL;
 109
 110        return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
 111}
 112
 113int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
 114{
 115        return __pci_find_next_cap(dev->bus, dev->devfn,
 116                                   pos + PCI_CAP_LIST_NEXT, cap);
 117}
 118EXPORT_SYMBOL_GPL(pci_find_next_capability);
 119
 120static int __pci_bus_find_cap_start(struct pci_bus *bus,
 121                                    unsigned int devfn, u8 hdr_type)
 122{
 123        u16 status;
 124
 125        pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
 126        if (!(status & PCI_STATUS_CAP_LIST))
 127                return 0;
 128
 129        switch (hdr_type) {
 130        case PCI_HEADER_TYPE_NORMAL:
 131        case PCI_HEADER_TYPE_BRIDGE:
 132                return PCI_CAPABILITY_LIST;
 133        case PCI_HEADER_TYPE_CARDBUS:
 134                return PCI_CB_CAPABILITY_LIST;
 135        default:
 136                return 0;
 137        }
 138
 139        return 0;
 140}
 141
 142/**
 143 * pci_find_capability - query for devices' capabilities 
 144 * @dev: PCI device to query
 145 * @cap: capability code
 146 *
 147 * Tell if a device supports a given PCI capability.
 148 * Returns the address of the requested capability structure within the
 149 * device's PCI configuration space or 0 in case the device does not
 150 * support it.  Possible values for @cap:
 151 *
 152 *  %PCI_CAP_ID_PM           Power Management 
 153 *  %PCI_CAP_ID_AGP          Accelerated Graphics Port 
 154 *  %PCI_CAP_ID_VPD          Vital Product Data 
 155 *  %PCI_CAP_ID_SLOTID       Slot Identification 
 156 *  %PCI_CAP_ID_MSI          Message Signalled Interrupts
 157 *  %PCI_CAP_ID_CHSWP        CompactPCI HotSwap 
 158 *  %PCI_CAP_ID_PCIX         PCI-X
 159 *  %PCI_CAP_ID_EXP          PCI Express
 160 */
 161int pci_find_capability(struct pci_dev *dev, int cap)
 162{
 163        int pos;
 164
 165        pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
 166        if (pos)
 167                pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
 168
 169        return pos;
 170}
 171
 172/**
 173 * pci_bus_find_capability - query for devices' capabilities 
 174 * @bus:   the PCI bus to query
 175 * @devfn: PCI device to query
 176 * @cap:   capability code
 177 *
 178 * Like pci_find_capability() but works for pci devices that do not have a
 179 * pci_dev structure set up yet. 
 180 *
 181 * Returns the address of the requested capability structure within the
 182 * device's PCI configuration space or 0 in case the device does not
 183 * support it.
 184 */
 185int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
 186{
 187        int pos;
 188        u8 hdr_type;
 189
 190        pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
 191
 192        pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
 193        if (pos)
 194                pos = __pci_find_next_cap(bus, devfn, pos, cap);
 195
 196        return pos;
 197}
 198
 199/**
 200 * pci_find_ext_capability - Find an extended capability
 201 * @dev: PCI device to query
 202 * @cap: capability code
 203 *
 204 * Returns the address of the requested extended capability structure
 205 * within the device's PCI configuration space or 0 if the device does
 206 * not support it.  Possible values for @cap:
 207 *
 208 *  %PCI_EXT_CAP_ID_ERR         Advanced Error Reporting
 209 *  %PCI_EXT_CAP_ID_VC          Virtual Channel
 210 *  %PCI_EXT_CAP_ID_DSN         Device Serial Number
 211 *  %PCI_EXT_CAP_ID_PWR         Power Budgeting
 212 */
 213int pci_find_ext_capability(struct pci_dev *dev, int cap)
 214{
 215        u32 header;
 216        int ttl = 480; /* 3840 bytes, minimum 8 bytes per capability */
 217        int pos = 0x100;
 218
 219        if (dev->cfg_size <= 256)
 220                return 0;
 221
 222        if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
 223                return 0;
 224
 225        /*
 226         * If we have no capabilities, this is indicated by cap ID,
 227         * cap version and next pointer all being 0.
 228         */
 229        if (header == 0)
 230                return 0;
 231
 232        while (ttl-- > 0) {
 233                if (PCI_EXT_CAP_ID(header) == cap)
 234                        return pos;
 235
 236                pos = PCI_EXT_CAP_NEXT(header);
 237                if (pos < 0x100)
 238                        break;
 239
 240                if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
 241                        break;
 242        }
 243
 244        return 0;
 245}
 246EXPORT_SYMBOL_GPL(pci_find_ext_capability);
 247
 248static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
 249{
 250        int rc, ttl = PCI_FIND_CAP_TTL;
 251        u8 cap, mask;
 252
 253        if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
 254                mask = HT_3BIT_CAP_MASK;
 255        else
 256                mask = HT_5BIT_CAP_MASK;
 257
 258        pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
 259                                      PCI_CAP_ID_HT, &ttl);
 260        while (pos) {
 261                rc = pci_read_config_byte(dev, pos + 3, &cap);
 262                if (rc != PCIBIOS_SUCCESSFUL)
 263                        return 0;
 264
 265                if ((cap & mask) == ht_cap)
 266                        return pos;
 267
 268                pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
 269                                              pos + PCI_CAP_LIST_NEXT,
 270                                              PCI_CAP_ID_HT, &ttl);
 271        }
 272
 273        return 0;
 274}
 275/**
 276 * pci_find_next_ht_capability - query a device's Hypertransport capabilities
 277 * @dev: PCI device to query
 278 * @pos: Position from which to continue searching
 279 * @ht_cap: Hypertransport capability code
 280 *
 281 * To be used in conjunction with pci_find_ht_capability() to search for
 282 * all capabilities matching @ht_cap. @pos should always be a value returned
 283 * from pci_find_ht_capability().
 284 *
 285 * NB. To be 100% safe against broken PCI devices, the caller should take
 286 * steps to avoid an infinite loop.
 287 */
 288int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
 289{
 290        return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
 291}
 292EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
 293
 294/**
 295 * pci_find_ht_capability - query a device's Hypertransport capabilities
 296 * @dev: PCI device to query
 297 * @ht_cap: Hypertransport capability code
 298 *
 299 * Tell if a device supports a given Hypertransport capability.
 300 * Returns an address within the device's PCI configuration space
 301 * or 0 in case the device does not support the request capability.
 302 * The address points to the PCI capability, of type PCI_CAP_ID_HT,
 303 * which has a Hypertransport capability matching @ht_cap.
 304 */
 305int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
 306{
 307        int pos;
 308
 309        pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
 310        if (pos)
 311                pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
 312
 313        return pos;
 314}
 315EXPORT_SYMBOL_GPL(pci_find_ht_capability);
 316
 317/**
 318 * pci_find_parent_resource - return resource region of parent bus of given region
 319 * @dev: PCI device structure contains resources to be searched
 320 * @res: child resource record for which parent is sought
 321 *
 322 *  For given resource region of given device, return the resource
 323 *  region of parent bus the given region is contained in or where
 324 *  it should be allocated from.
 325 */
 326struct resource *
 327pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
 328{
 329        const struct pci_bus *bus = dev->bus;
 330        int i;
 331        struct resource *best = NULL;
 332
 333        for(i = 0; i < PCI_BUS_NUM_RESOURCES; i++) {
 334                struct resource *r = bus->resource[i];
 335                if (!r)
 336                        continue;
 337                if (res->start && !(res->start >= r->start && res->end <= r->end))
 338                        continue;       /* Not contained */
 339                if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
 340                        continue;       /* Wrong type */
 341                if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH))
 342                        return r;       /* Exact match */
 343                if ((res->flags & IORESOURCE_PREFETCH) && !(r->flags & IORESOURCE_PREFETCH))
 344                        best = r;       /* Approximating prefetchable by non-prefetchable */
 345        }
 346        return best;
 347}
 348
 349/**
 350 * pci_restore_bars - restore a devices BAR values (e.g. after wake-up)
 351 * @dev: PCI device to have its BARs restored
 352 *
 353 * Restore the BAR values for a given device, so as to make it
 354 * accessible by its driver.
 355 */
 356void
 357pci_restore_bars(struct pci_dev *dev)
 358{
 359        int i, numres;
 360
 361        switch (dev->hdr_type) {
 362        case PCI_HEADER_TYPE_NORMAL:
 363                numres = 6;
 364                break;
 365        case PCI_HEADER_TYPE_BRIDGE:
 366                numres = 2;
 367                break;
 368        case PCI_HEADER_TYPE_CARDBUS:
 369                numres = 1;
 370                break;
 371        default:
 372                /* Should never get here, but just in case... */
 373                return;
 374        }
 375
 376        for (i = 0; i < numres; i ++)
 377                pci_update_resource(dev, &dev->resource[i], i);
 378}
 379
 380int (*platform_pci_set_power_state)(struct pci_dev *dev, pci_power_t t);
 381
 382/**
 383 * pci_set_power_state - Set the power state of a PCI device
 384 * @dev: PCI device to be suspended
 385 * @state: PCI power state (D0, D1, D2, D3hot, D3cold) we're entering
 386 *
 387 * Transition a device to a new power state, using the Power Management 
 388 * Capabilities in the device's config space.
 389 *
 390 * RETURN VALUE: 
 391 * -EINVAL if trying to enter a lower state than we're already in.
 392 * 0 if we're already in the requested state.
 393 * -EIO if device does not support PCI PM.
 394 * 0 if we can successfully change the power state.
 395 */
 396int
 397pci_set_power_state(struct pci_dev *dev, pci_power_t state)
 398{
 399        int pm, need_restore = 0;
 400        u16 pmcsr, pmc;
 401
 402        /* bound the state we're entering */
 403        if (state > PCI_D3hot)
 404                state = PCI_D3hot;
 405
 406        /*
 407         * If the device or the parent bridge can't support PCI PM, ignore
 408         * the request if we're doing anything besides putting it into D0
 409         * (which would only happen on boot).
 410         */
 411        if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
 412                return 0;
 413
 414        /* find PCI PM capability in list */
 415        pm = pci_find_capability(dev, PCI_CAP_ID_PM);
 416
 417        /* abort if the device doesn't support PM capabilities */
 418        if (!pm)
 419                return -EIO;
 420
 421        /* Validate current state:
 422         * Can enter D0 from any state, but if we can only go deeper 
 423         * to sleep if we're already in a low power state
 424         */
 425        if (state != PCI_D0 && dev->current_state > state) {
 426                printk(KERN_ERR "%s(): %s: state=%d, current state=%d\n",
 427                        __FUNCTION__, pci_name(dev), state, dev->current_state);
 428                return -EINVAL;
 429        } else if (dev->current_state == state)
 430                return 0;        /* we're already there */
 431
 432
 433        pci_read_config_word(dev,pm + PCI_PM_PMC,&pmc);
 434        if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
 435                printk(KERN_DEBUG
 436                       "PCI: %s has unsupported PM cap regs version (%u)\n",
 437                       pci_name(dev), pmc & PCI_PM_CAP_VER_MASK);
 438                return -EIO;
 439        }
 440
 441        /* check if this device supports the desired state */
 442        if (state == PCI_D1 && !(pmc & PCI_PM_CAP_D1))
 443                return -EIO;
 444        else if (state == PCI_D2 && !(pmc & PCI_PM_CAP_D2))
 445                return -EIO;
 446
 447        pci_read_config_word(dev, pm + PCI_PM_CTRL, &pmcsr);
 448
 449        /* If we're (effectively) in D3, force entire word to 0.
 450         * This doesn't affect PME_Status, disables PME_En, and
 451         * sets PowerState to 0.
 452         */
 453        switch (dev->current_state) {
 454        case PCI_D0:
 455        case PCI_D1:
 456        case PCI_D2:
 457                pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
 458                pmcsr |= state;
 459                break;
 460        case PCI_UNKNOWN: /* Boot-up */
 461                if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
 462                 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
 463                        need_restore = 1;
 464                /* Fall-through: force to D0 */
 465        default:
 466                pmcsr = 0;
 467                break;
 468        }
 469
 470        /* enter specified state */
 471        pci_write_config_word(dev, pm + PCI_PM_CTRL, pmcsr);
 472
 473        /* Mandatory power management transition delays */
 474        /* see PCI PM 1.1 5.6.1 table 18 */
 475        if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
 476                msleep(pci_pm_d3_delay);
 477        else if (state == PCI_D2 || dev->current_state == PCI_D2)
 478                udelay(200);
 479
 480        /*
 481         * Give firmware a chance to be called, such as ACPI _PRx, _PSx
 482         * Firmware method after native method ?
 483         */
 484        if (platform_pci_set_power_state)
 485                platform_pci_set_power_state(dev, state);
 486
 487        dev->current_state = state;
 488
 489        /* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
 490         * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
 491         * from D3hot to D0 _may_ perform an internal reset, thereby
 492         * going to "D0 Uninitialized" rather than "D0 Initialized".
 493         * For example, at least some versions of the 3c905B and the
 494         * 3c556B exhibit this behaviour.
 495         *
 496         * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
 497         * devices in a D3hot state at boot.  Consequently, we need to
 498         * restore at least the BARs so that the device will be
 499         * accessible to its driver.
 500         */
 501        if (need_restore)
 502                pci_restore_bars(dev);
 503
 504        return 0;
 505}
 506
 507pci_power_t (*platform_pci_choose_state)(struct pci_dev *dev, pm_message_t state);
 508 
 509/**
 510 * pci_choose_state - Choose the power state of a PCI device
 511 * @dev: PCI device to be suspended
 512 * @state: target sleep state for the whole system. This is the value
 513 *      that is passed to suspend() function.
 514 *
 515 * Returns PCI power state suitable for given device and given system
 516 * message.
 517 */
 518
 519pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
 520{
 521        pci_power_t ret;
 522
 523        if (!pci_find_capability(dev, PCI_CAP_ID_PM))
 524                return PCI_D0;
 525
 526        if (platform_pci_choose_state) {
 527                ret = platform_pci_choose_state(dev, state);
 528                if (ret != PCI_POWER_ERROR)
 529                        return ret;
 530        }
 531
 532        switch (state.event) {
 533        case PM_EVENT_ON:
 534                return PCI_D0;
 535        case PM_EVENT_FREEZE:
 536        case PM_EVENT_PRETHAW:
 537                /* REVISIT both freeze and pre-thaw "should" use D0 */
 538        case PM_EVENT_SUSPEND:
 539                return PCI_D3hot;
 540        default:
 541                printk("Unrecognized suspend event %d\n", state.event);
 542                BUG();
 543        }
 544        return PCI_D0;
 545}
 546
 547EXPORT_SYMBOL(pci_choose_state);
 548
 549static int pci_save_pcie_state(struct pci_dev *dev)
 550{
 551        int pos, i = 0;
 552        struct pci_cap_saved_state *save_state;
 553        u16 *cap;
 554
 555        pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
 556        if (pos <= 0)
 557                return 0;
 558
 559        save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
 560        if (!save_state)
 561                save_state = kzalloc(sizeof(*save_state) + sizeof(u16) * 4, GFP_KERNEL);
 562        if (!save_state) {
 563                dev_err(&dev->dev, "Out of memory in pci_save_pcie_state\n");
 564                return -ENOMEM;
 565        }
 566        cap = (u16 *)&save_state->data[0];
 567
 568        pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &cap[i++]);
 569        pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]);
 570        pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]);
 571        pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]);
 572        pci_add_saved_cap(dev, save_state);
 573        return 0;
 574}
 575
 576static void pci_restore_pcie_state(struct pci_dev *dev)
 577{
 578        int i = 0, pos;
 579        struct pci_cap_saved_state *save_state;
 580        u16 *cap;
 581
 582        save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
 583        pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
 584        if (!save_state || pos <= 0)
 585                return;
 586        cap = (u16 *)&save_state->data[0];
 587
 588        pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, cap[i++]);
 589        pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, cap[i++]);
 590        pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]);
 591        pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]);
 592}
 593
 594
 595static int pci_save_pcix_state(struct pci_dev *dev)
 596{
 597        int pos, i = 0;
 598        struct pci_cap_saved_state *save_state;
 599        u16 *cap;
 600
 601        pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
 602        if (pos <= 0)
 603                return 0;
 604
 605        save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
 606        if (!save_state)
 607                save_state = kzalloc(sizeof(*save_state) + sizeof(u16), GFP_KERNEL);
 608        if (!save_state) {
 609                dev_err(&dev->dev, "Out of memory in pci_save_pcie_state\n");
 610                return -ENOMEM;
 611        }
 612        cap = (u16 *)&save_state->data[0];
 613
 614        pci_read_config_word(dev, pos + PCI_X_CMD, &cap[i++]);
 615        pci_add_saved_cap(dev, save_state);
 616        return 0;
 617}
 618
 619static void pci_restore_pcix_state(struct pci_dev *dev)
 620{
 621        int i = 0, pos;
 622        struct pci_cap_saved_state *save_state;
 623        u16 *cap;
 624
 625        save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
 626        pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
 627        if (!save_state || pos <= 0)
 628                return;
 629        cap = (u16 *)&save_state->data[0];
 630
 631        pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
 632}
 633
 634
 635/**
 636 * pci_save_state - save the PCI configuration space of a device before suspending
 637 * @dev: - PCI device that we're dealing with
 638 */
 639int
 640pci_save_state(struct pci_dev *dev)
 641{
 642        int i;
 643        /* XXX: 100% dword access ok here? */
 644        for (i = 0; i < 16; i++)
 645                pci_read_config_dword(dev, i * 4,&dev->saved_config_space[i]);
 646        if ((i = pci_save_pcie_state(dev)) != 0)
 647                return i;
 648        if ((i = pci_save_pcix_state(dev)) != 0)
 649                return i;
 650        return 0;
 651}
 652
 653/** 
 654 * pci_restore_state - Restore the saved state of a PCI device
 655 * @dev: - PCI device that we're dealing with
 656 */
 657int 
 658pci_restore_state(struct pci_dev *dev)
 659{
 660        int i;
 661        u32 val;
 662
 663        /* PCI Express register must be restored first */
 664        pci_restore_pcie_state(dev);
 665
 666        /*
 667         * The Base Address register should be programmed before the command
 668         * register(s)
 669         */
 670        for (i = 15; i >= 0; i--) {
 671                pci_read_config_dword(dev, i * 4, &val);
 672                if (val != dev->saved_config_space[i]) {
 673                        printk(KERN_DEBUG "PM: Writing back config space on "
 674                                "device %s at offset %x (was %x, writing %x)\n",
 675                                pci_name(dev), i,
 676                                val, (int)dev->saved_config_space[i]);
 677                        pci_write_config_dword(dev,i * 4,
 678                                dev->saved_config_space[i]);
 679                }
 680        }
 681        pci_restore_pcix_state(dev);
 682        pci_restore_msi_state(dev);
 683
 684        return 0;
 685}
 686
 687static int do_pci_enable_device(struct pci_dev *dev, int bars)
 688{
 689        int err;
 690
 691        err = pci_set_power_state(dev, PCI_D0);
 692        if (err < 0 && err != -EIO)
 693                return err;
 694        err = pcibios_enable_device(dev, bars);
 695        if (err < 0)
 696                return err;
 697        pci_fixup_device(pci_fixup_enable, dev);
 698
 699        return 0;
 700}
 701
 702/**
 703 * pci_reenable_device - Resume abandoned device
 704 * @dev: PCI device to be resumed
 705 *
 706 *  Note this function is a backend of pci_default_resume and is not supposed
 707 *  to be called by normal code, write proper resume handler and use it instead.
 708 */
 709int pci_reenable_device(struct pci_dev *dev)
 710{
 711        if (atomic_read(&dev->enable_cnt))
 712                return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
 713        return 0;
 714}
 715
 716/**
 717 * pci_enable_device_bars - Initialize some of a device for use
 718 * @dev: PCI device to be initialized
 719 * @bars: bitmask of BAR's that must be configured
 720 *
 721 *  Initialize device before it's used by a driver. Ask low-level code
 722 *  to enable selected I/O and memory resources. Wake up the device if it
 723 *  was suspended. Beware, this function can fail.
 724 */
 725int
 726pci_enable_device_bars(struct pci_dev *dev, int bars)
 727{
 728        int err;
 729
 730        if (atomic_add_return(1, &dev->enable_cnt) > 1)
 731                return 0;               /* already enabled */
 732
 733        err = do_pci_enable_device(dev, bars);
 734        if (err < 0)
 735                atomic_dec(&dev->enable_cnt);
 736        return err;
 737}
 738
 739/**
 740 * pci_enable_device - Initialize device before it's used by a driver.
 741 * @dev: PCI device to be initialized
 742 *
 743 *  Initialize device before it's used by a driver. Ask low-level code
 744 *  to enable I/O and memory. Wake up the device if it was suspended.
 745 *  Beware, this function can fail.
 746 *
 747 *  Note we don't actually enable the device many times if we call
 748 *  this function repeatedly (we just increment the count).
 749 */
 750int pci_enable_device(struct pci_dev *dev)
 751{
 752        return pci_enable_device_bars(dev, (1 << PCI_NUM_RESOURCES) - 1);
 753}
 754
 755/*
 756 * Managed PCI resources.  This manages device on/off, intx/msi/msix
 757 * on/off and BAR regions.  pci_dev itself records msi/msix status, so
 758 * there's no need to track it separately.  pci_devres is initialized
 759 * when a device is enabled using managed PCI device enable interface.
 760 */
 761struct pci_devres {
 762        unsigned int enabled:1;
 763        unsigned int pinned:1;
 764        unsigned int orig_intx:1;
 765        unsigned int restore_intx:1;
 766        u32 region_mask;
 767};
 768
 769static void pcim_release(struct device *gendev, void *res)
 770{
 771        struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
 772        struct pci_devres *this = res;
 773        int i;
 774
 775        if (dev->msi_enabled)
 776                pci_disable_msi(dev);
 777        if (dev->msix_enabled)
 778                pci_disable_msix(dev);
 779
 780        for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
 781                if (this->region_mask & (1 << i))
 782                        pci_release_region(dev, i);
 783
 784        if (this->restore_intx)
 785                pci_intx(dev, this->orig_intx);
 786
 787        if (this->enabled && !this->pinned)
 788                pci_disable_device(dev);
 789}
 790
 791static struct pci_devres * get_pci_dr(struct pci_dev *pdev)
 792{
 793        struct pci_devres *dr, *new_dr;
 794
 795        dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
 796        if (dr)
 797                return dr;
 798
 799        new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
 800        if (!new_dr)
 801                return NULL;
 802        return devres_get(&pdev->dev, new_dr, NULL, NULL);
 803}
 804
 805static struct pci_devres * find_pci_dr(struct pci_dev *pdev)
 806{
 807        if (pci_is_managed(pdev))
 808                return devres_find(&pdev->dev, pcim_release, NULL, NULL);
 809        return NULL;
 810}
 811
 812/**
 813 * pcim_enable_device - Managed pci_enable_device()
 814 * @pdev: PCI device to be initialized
 815 *
 816 * Managed pci_enable_device().
 817 */
 818int pcim_enable_device(struct pci_dev *pdev)
 819{
 820        struct pci_devres *dr;
 821        int rc;
 822
 823        dr = get_pci_dr(pdev);
 824        if (unlikely(!dr))
 825                return -ENOMEM;
 826        WARN_ON(!!dr->enabled);
 827
 828        rc = pci_enable_device(pdev);
 829        if (!rc) {
 830                pdev->is_managed = 1;
 831                dr->enabled = 1;
 832        }
 833        return rc;
 834}
 835
 836/**
 837 * pcim_pin_device - Pin managed PCI device
 838 * @pdev: PCI device to pin
 839 *
 840 * Pin managed PCI device @pdev.  Pinned device won't be disabled on
 841 * driver detach.  @pdev must have been enabled with
 842 * pcim_enable_device().
 843 */
 844void pcim_pin_device(struct pci_dev *pdev)
 845{
 846        struct pci_devres *dr;
 847
 848        dr = find_pci_dr(pdev);
 849        WARN_ON(!dr || !dr->enabled);
 850        if (dr)
 851                dr->pinned = 1;
 852}
 853
 854/**
 855 * pcibios_disable_device - disable arch specific PCI resources for device dev
 856 * @dev: the PCI device to disable
 857 *
 858 * Disables architecture specific PCI resources for the device. This
 859 * is the default implementation. Architecture implementations can
 860 * override this.
 861 */
 862void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {}
 863
 864/**
 865 * pci_disable_device - Disable PCI device after use
 866 * @dev: PCI device to be disabled
 867 *
 868 * Signal to the system that the PCI device is not in use by the system
 869 * anymore.  This only involves disabling PCI bus-mastering, if active.
 870 *
 871 * Note we don't actually disable the device until all callers of
 872 * pci_device_enable() have called pci_device_disable().
 873 */
 874void
 875pci_disable_device(struct pci_dev *dev)
 876{
 877        struct pci_devres *dr;
 878        u16 pci_command;
 879
 880        dr = find_pci_dr(dev);
 881        if (dr)
 882                dr->enabled = 0;
 883
 884        if (atomic_sub_return(1, &dev->enable_cnt) != 0)
 885                return;
 886
 887        pci_read_config_word(dev, PCI_COMMAND, &pci_command);
 888        if (pci_command & PCI_COMMAND_MASTER) {
 889                pci_command &= ~PCI_COMMAND_MASTER;
 890                pci_write_config_word(dev, PCI_COMMAND, pci_command);
 891        }
 892        dev->is_busmaster = 0;
 893
 894        pcibios_disable_device(dev);
 895}
 896
 897/**
 898 * pcibios_set_pcie_reset_state - set reset state for device dev
 899 * @dev: the PCI-E device reset
 900 * @state: Reset state to enter into
 901 *
 902 *
 903 * Sets the PCI-E reset state for the device. This is the default
 904 * implementation. Architecture implementations can override this.
 905 */
 906int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev,
 907                                                        enum pcie_reset_state state)
 908{
 909        return -EINVAL;
 910}
 911
 912/**
 913 * pci_set_pcie_reset_state - set reset state for device dev
 914 * @dev: the PCI-E device reset
 915 * @state: Reset state to enter into
 916 *
 917 *
 918 * Sets the PCI reset state for the device.
 919 */
 920int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
 921{
 922        return pcibios_set_pcie_reset_state(dev, state);
 923}
 924
 925/**
 926 * pci_enable_wake - enable PCI device as wakeup event source
 927 * @dev: PCI device affected
 928 * @state: PCI state from which device will issue wakeup events
 929 * @enable: True to enable event generation; false to disable
 930 *
 931 * This enables the device as a wakeup event source, or disables it.
 932 * When such events involves platform-specific hooks, those hooks are
 933 * called automatically by this routine.
 934 *
 935 * Devices with legacy power management (no standard PCI PM capabilities)
 936 * always require such platform hooks.  Depending on the platform, devices
 937 * supporting the standard PCI PME# signal may require such platform hooks;
 938 * they always update bits in config space to allow PME# generation.
 939 *
 940 * -EIO is returned if the device can't ever be a wakeup event source.
 941 * -EINVAL is returned if the device can't generate wakeup events from
 942 * the specified PCI state.  Returns zero if the operation is successful.
 943 */
 944int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable)
 945{
 946        int pm;
 947        int status;
 948        u16 value;
 949
 950        /* Note that drivers should verify device_may_wakeup(&dev->dev)
 951         * before calling this function.  Platform code should report
 952         * errors when drivers try to enable wakeup on devices that
 953         * can't issue wakeups, or on which wakeups were disabled by
 954         * userspace updating the /sys/devices.../power/wakeup file.
 955         */
 956
 957        status = call_platform_enable_wakeup(&dev->dev, enable);
 958
 959        /* find PCI PM capability in list */
 960        pm = pci_find_capability(dev, PCI_CAP_ID_PM);
 961
 962        /* If device doesn't support PM Capabilities, but caller wants to
 963         * disable wake events, it's a NOP.  Otherwise fail unless the
 964         * platform hooks handled this legacy device already.
 965         */
 966        if (!pm)
 967                return enable ? status : 0;
 968
 969        /* Check device's ability to generate PME# */
 970        pci_read_config_word(dev,pm+PCI_PM_PMC,&value);
 971
 972        value &= PCI_PM_CAP_PME_MASK;
 973        value >>= ffs(PCI_PM_CAP_PME_MASK) - 1;   /* First bit of mask */
 974
 975        /* Check if it can generate PME# from requested state. */
 976        if (!value || !(value & (1 << state))) {
 977                /* if it can't, revert what the platform hook changed,
 978                 * always reporting the base "EINVAL, can't PME#" error
 979                 */
 980                if (enable)
 981                        call_platform_enable_wakeup(&dev->dev, 0);
 982                return enable ? -EINVAL : 0;
 983        }
 984
 985        pci_read_config_word(dev, pm + PCI_PM_CTRL, &value);
 986
 987        /* Clear PME_Status by writing 1 to it and enable PME# */
 988        value |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
 989
 990        if (!enable)
 991                value &= ~PCI_PM_CTRL_PME_ENABLE;
 992
 993        pci_write_config_word(dev, pm + PCI_PM_CTRL, value);
 994
 995        return 0;
 996}
 997
 998int
 999pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
1000{
1001        u8 pin;
1002
1003        pin = dev->pin;
1004        if (!pin)
1005                return -1;
1006        pin--;
1007        while (dev->bus->self) {
1008                pin = (pin + PCI_SLOT(dev->devfn)) % 4;
1009                dev = dev->bus->self;
1010        }
1011        *bridge = dev;
1012        return pin;
1013}
1014
1015/**
1016 *      pci_release_region - Release a PCI bar
1017 *      @pdev: PCI device whose resources were previously reserved by pci_request_region
1018 *      @bar: BAR to release
1019 *
1020 *      Releases the PCI I/O and memory resources previously reserved by a
1021 *      successful call to pci_request_region.  Call this function only
1022 *      after all use of the PCI regions has ceased.
1023 */
1024void pci_release_region(struct pci_dev *pdev, int bar)
1025{
1026        struct pci_devres *dr;
1027
1028        if (pci_resource_len(pdev, bar) == 0)
1029                return;
1030        if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
1031                release_region(pci_resource_start(pdev, bar),
1032                                pci_resource_len(pdev, bar));
1033        else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
1034                release_mem_region(pci_resource_start(pdev, bar),
1035                                pci_resource_len(pdev, bar));
1036
1037        dr = find_pci_dr(pdev);
1038        if (dr)
1039                dr->region_mask &= ~(1 << bar);
1040}
1041
1042/**
1043 *      pci_request_region - Reserved PCI I/O and memory resource
1044 *      @pdev: PCI device whose resources are to be reserved
1045 *      @bar: BAR to be reserved
1046 *      @res_name: Name to be associated with resource.
1047 *
1048 *      Mark the PCI region associated with PCI device @pdev BR @bar as
1049 *      being reserved by owner @res_name.  Do not access any
1050 *      address inside the PCI regions unless this call returns
1051 *      successfully.
1052 *
1053 *      Returns 0 on success, or %EBUSY on error.  A warning
1054 *      message is also printed on failure.
1055 */
1056int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
1057{
1058        struct pci_devres *dr;
1059
1060        if (pci_resource_len(pdev, bar) == 0)
1061                return 0;
1062                
1063        if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
1064                if (!request_region(pci_resource_start(pdev, bar),
1065                            pci_resource_len(pdev, bar), res_name))
1066                        goto err_out;
1067        }
1068        else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
1069                if (!request_mem_region(pci_resource_start(pdev, bar),
1070                                        pci_resource_len(pdev, bar), res_name))
1071                        goto err_out;
1072        }
1073
1074        dr = find_pci_dr(pdev);
1075        if (dr)
1076                dr->region_mask |= 1 << bar;
1077
1078        return 0;
1079
1080err_out:
1081        printk (KERN_WARNING "PCI: Unable to reserve %s region #%d:%llx@%llx "
1082                "for device %s\n",
1083                pci_resource_flags(pdev, bar) & IORESOURCE_IO ? "I/O" : "mem",
1084                bar + 1, /* PCI BAR # */
1085                (unsigned long long)pci_resource_len(pdev, bar),
1086                (unsigned long long)pci_resource_start(pdev, bar),
1087                pci_name(pdev));
1088        return -EBUSY;
1089}
1090
1091/**
1092 * pci_release_selected_regions - Release selected PCI I/O and memory resources
1093 * @pdev: PCI device whose resources were previously reserved
1094 * @bars: Bitmask of BARs to be released
1095 *
1096 * Release selected PCI I/O and memory resources previously reserved.
1097 * Call this function only after all use of the PCI regions has ceased.
1098 */
1099void pci_release_selected_regions(struct pci_dev *pdev, int bars)
1100{
1101        int i;
1102
1103        for (i = 0; i < 6; i++)
1104                if (bars & (1 << i))
1105                        pci_release_region(pdev, i);
1106}
1107
1108/**
1109 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
1110 * @pdev: PCI device whose resources are to be reserved
1111 * @bars: Bitmask of BARs to be requested
1112 * @res_name: Name to be associated with resource
1113 */
1114int pci_request_selected_regions(struct pci_dev *pdev, int bars,
1115                                 const char *res_name)
1116{
1117        int i;
1118
1119        for (i = 0; i < 6; i++)
1120                if (bars & (1 << i))
1121                        if(pci_request_region(pdev, i, res_name))
1122                                goto err_out;
1123        return 0;
1124
1125err_out:
1126        while(--i >= 0)
1127                if (bars & (1 << i))
1128                        pci_release_region(pdev, i);
1129
1130        return -EBUSY;
1131}
1132
1133/**
1134 *      pci_release_regions - Release reserved PCI I/O and memory resources
1135 *      @pdev: PCI device whose resources were previously reserved by pci_request_regions
1136 *
1137 *      Releases all PCI I/O and memory resources previously reserved by a
1138 *      successful call to pci_request_regions.  Call this function only
1139 *      after all use of the PCI regions has ceased.
1140 */
1141
1142void pci_release_regions(struct pci_dev *pdev)
1143{
1144        pci_release_selected_regions(pdev, (1 << 6) - 1);
1145}
1146
1147/**
1148 *      pci_request_regions - Reserved PCI I/O and memory resources
1149 *      @pdev: PCI device whose resources are to be reserved
1150 *      @res_name: Name to be associated with resource.
1151 *
1152 *      Mark all PCI regions associated with PCI device @pdev as
1153 *      being reserved by owner @res_name.  Do not access any
1154 *      address inside the PCI regions unless this call returns
1155 *      successfully.
1156 *
1157 *      Returns 0 on success, or %EBUSY on error.  A warning
1158 *      message is also printed on failure.
1159 */
1160int pci_request_regions(struct pci_dev *pdev, const char *res_name)
1161{
1162        return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
1163}
1164
1165/**
1166 * pci_set_master - enables bus-mastering for device dev
1167 * @dev: the PCI device to enable
1168 *
1169 * Enables bus-mastering on the device and calls pcibios_set_master()
1170 * to do the needed arch specific settings.
1171 */
1172void
1173pci_set_master(struct pci_dev *dev)
1174{
1175        u16 cmd;
1176
1177        pci_read_config_word(dev, PCI_COMMAND, &cmd);
1178        if (! (cmd & PCI_COMMAND_MASTER)) {
1179                pr_debug("PCI: Enabling bus mastering for device %s\n", pci_name(dev));
1180                cmd |= PCI_COMMAND_MASTER;
1181                pci_write_config_word(dev, PCI_COMMAND, cmd);
1182        }
1183        dev->is_busmaster = 1;
1184        pcibios_set_master(dev);
1185}
1186
1187#ifdef PCI_DISABLE_MWI
1188int pci_set_mwi(struct pci_dev *dev)
1189{
1190        return 0;
1191}
1192
1193int pci_try_set_mwi(struct pci_dev *dev)
1194{
1195        return 0;
1196}
1197
1198void pci_clear_mwi(struct pci_dev *dev)
1199{
1200}
1201
1202#else
1203
1204#ifndef PCI_CACHE_LINE_BYTES
1205#define PCI_CACHE_LINE_BYTES L1_CACHE_BYTES
1206#endif
1207
1208/* This can be overridden by arch code. */
1209/* Don't forget this is measured in 32-bit words, not bytes */
1210u8 pci_cache_line_size = PCI_CACHE_LINE_BYTES / 4;
1211
1212/**
1213 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
1214 * @dev: the PCI device for which MWI is to be enabled
1215 *
1216 * Helper function for pci_set_mwi.
1217 * Originally copied from drivers/net/acenic.c.
1218 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
1219 *
1220 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
1221 */
1222static int
1223pci_set_cacheline_size(struct pci_dev *dev)
1224{
1225        u8 cacheline_size;
1226
1227        if (!pci_cache_line_size)
1228                return -EINVAL;         /* The system doesn't support MWI. */
1229
1230        /* Validate current setting: the PCI_CACHE_LINE_SIZE must be
1231           equal to or multiple of the right value. */
1232        pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
1233        if (cacheline_size >= pci_cache_line_size &&
1234            (cacheline_size % pci_cache_line_size) == 0)
1235                return 0;
1236
1237        /* Write the correct value. */
1238        pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
1239        /* Read it back. */
1240        pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
1241        if (cacheline_size == pci_cache_line_size)
1242                return 0;
1243
1244        printk(KERN_DEBUG "PCI: cache line size of %d is not supported "
1245               "by device %s\n", pci_cache_line_size << 2, pci_name(dev));
1246
1247        return -EINVAL;
1248}
1249
1250/**
1251 * pci_set_mwi - enables memory-write-invalidate PCI transaction
1252 * @dev: the PCI device for which MWI is enabled
1253 *
1254 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
1255 *
1256 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
1257 */
1258int
1259pci_set_mwi(struct pci_dev *dev)
1260{
1261        int rc;
1262        u16 cmd;
1263
1264        rc = pci_set_cacheline_size(dev);
1265        if (rc)
1266                return rc;
1267
1268        pci_read_config_word(dev, PCI_COMMAND, &cmd);
1269        if (! (cmd & PCI_COMMAND_INVALIDATE)) {
1270                pr_debug("PCI: Enabling Mem-Wr-Inval for device %s\n",
1271                        pci_name(dev));
1272                cmd |= PCI_COMMAND_INVALIDATE;
1273                pci_write_config_word(dev, PCI_COMMAND, cmd);
1274        }
1275        
1276        return 0;
1277}
1278
1279/**
1280 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
1281 * @dev: the PCI device for which MWI is enabled
1282 *
1283 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
1284 * Callers are not required to check the return value.
1285 *
1286 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
1287 */
1288int pci_try_set_mwi(struct pci_dev *dev)
1289{
1290        int rc = pci_set_mwi(dev);
1291        return rc;
1292}
1293
1294/**
1295 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
1296 * @dev: the PCI device to disable
1297 *
1298 * Disables PCI Memory-Write-Invalidate transaction on the device
1299 */
1300void
1301pci_clear_mwi(struct pci_dev *dev)
1302{
1303        u16 cmd;
1304
1305        pci_read_config_word(dev, PCI_COMMAND, &cmd);
1306        if (cmd & PCI_COMMAND_INVALIDATE) {
1307                cmd &= ~PCI_COMMAND_INVALIDATE;
1308                pci_write_config_word(dev, PCI_COMMAND, cmd);
1309        }
1310}
1311#endif /* ! PCI_DISABLE_MWI */
1312
1313/**
1314 * pci_intx - enables/disables PCI INTx for device dev
1315 * @pdev: the PCI device to operate on
1316 * @enable: boolean: whether to enable or disable PCI INTx
1317 *
1318 * Enables/disables PCI INTx for device dev
1319 */
1320void
1321pci_intx(struct pci_dev *pdev, int enable)
1322{
1323        u16 pci_command, new;
1324
1325        pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
1326
1327        if (enable) {
1328                new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
1329        } else {
1330                new = pci_command | PCI_COMMAND_INTX_DISABLE;
1331        }
1332
1333        if (new != pci_command) {
1334                struct pci_devres *dr;
1335
1336                pci_write_config_word(pdev, PCI_COMMAND, new);
1337
1338                dr = find_pci_dr(pdev);
1339                if (dr && !dr->restore_intx) {
1340                        dr->restore_intx = 1;
1341                        dr->orig_intx = !enable;
1342                }
1343        }
1344}
1345
1346/**
1347 * pci_msi_off - disables any msi or msix capabilities
1348 * @dev: the PCI device to operate on
1349 *
1350 * If you want to use msi see pci_enable_msi and friends.
1351 * This is a lower level primitive that allows us to disable
1352 * msi operation at the device level.
1353 */
1354void pci_msi_off(struct pci_dev *dev)
1355{
1356        int pos;
1357        u16 control;
1358
1359        pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
1360        if (pos) {
1361                pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
1362                control &= ~PCI_MSI_FLAGS_ENABLE;
1363                pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
1364        }
1365        pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
1366        if (pos) {
1367                pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
1368                control &= ~PCI_MSIX_FLAGS_ENABLE;
1369                pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
1370        }
1371}
1372
1373#ifndef HAVE_ARCH_PCI_SET_DMA_MASK
1374/*
1375 * These can be overridden by arch-specific implementations
1376 */
1377int
1378pci_set_dma_mask(struct pci_dev *dev, u64 mask)
1379{
1380        if (!pci_dma_supported(dev, mask))
1381                return -EIO;
1382
1383        dev->dma_mask = mask;
1384
1385        return 0;
1386}
1387    
1388int
1389pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
1390{
1391        if (!pci_dma_supported(dev, mask))
1392                return -EIO;
1393
1394        dev->dev.coherent_dma_mask = mask;
1395
1396        return 0;
1397}
1398#endif
1399
1400/**
1401 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
1402 * @dev: PCI device to query
1403 *
1404 * Returns mmrbc: maximum designed memory read count in bytes
1405 *    or appropriate error value.
1406 */
1407int pcix_get_max_mmrbc(struct pci_dev *dev)
1408{
1409        int err, cap;
1410        u32 stat;
1411
1412        cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1413        if (!cap)
1414                return -EINVAL;
1415
1416        err = pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat);
1417        if (err)
1418                return -EINVAL;
1419
1420        return (stat & PCI_X_STATUS_MAX_READ) >> 12;
1421}
1422EXPORT_SYMBOL(pcix_get_max_mmrbc);
1423
1424/**
1425 * pcix_get_mmrbc - get PCI-X maximum memory read byte count
1426 * @dev: PCI device to query
1427 *
1428 * Returns mmrbc: maximum memory read count in bytes
1429 *    or appropriate error value.
1430 */
1431int pcix_get_mmrbc(struct pci_dev *dev)
1432{
1433        int ret, cap;
1434        u32 cmd;
1435
1436        cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1437        if (!cap)
1438                return -EINVAL;
1439
1440        ret = pci_read_config_dword(dev, cap + PCI_X_CMD, &cmd);
1441        if (!ret)
1442                ret = 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
1443
1444        return ret;
1445}
1446EXPORT_SYMBOL(pcix_get_mmrbc);
1447
1448/**
1449 * pcix_set_mmrbc - set PCI-X maximum memory read byte count
1450 * @dev: PCI device to query
1451 * @mmrbc: maximum memory read count in bytes
1452 *    valid values are 512, 1024, 2048, 4096
1453 *
1454 * If possible sets maximum memory read byte count, some bridges have erratas
1455 * that prevent this.
1456 */
1457int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
1458{
1459        int cap, err = -EINVAL;
1460        u32 stat, cmd, v, o;
1461
1462        if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
1463                goto out;
1464
1465        v = ffs(mmrbc) - 10;
1466
1467        cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1468        if (!cap)
1469                goto out;
1470
1471        err = pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat);
1472        if (err)
1473                goto out;
1474
1475        if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
1476                return -E2BIG;
1477
1478        err = pci_read_config_dword(dev, cap + PCI_X_CMD, &cmd);
1479        if (err)
1480                goto out;
1481
1482        o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
1483        if (o != v) {
1484                if (v > o && dev->bus &&
1485                   (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
1486                        return -EIO;
1487
1488                cmd &= ~PCI_X_CMD_MAX_READ;
1489                cmd |= v << 2;
1490                err = pci_write_config_dword(dev, cap + PCI_X_CMD, cmd);
1491        }
1492out:
1493        return err;
1494}
1495EXPORT_SYMBOL(pcix_set_mmrbc);
1496
1497/**
1498 * pcie_get_readrq - get PCI Express read request size
1499 * @dev: PCI device to query
1500 *
1501 * Returns maximum memory read request in bytes
1502 *    or appropriate error value.
1503 */
1504int pcie_get_readrq(struct pci_dev *dev)
1505{
1506        int ret, cap;
1507        u16 ctl;
1508
1509        cap = pci_find_capability(dev, PCI_CAP_ID_EXP);
1510        if (!cap)
1511                return -EINVAL;
1512
1513        ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
1514        if (!ret)
1515        ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
1516
1517        return ret;
1518}
1519EXPORT_SYMBOL(pcie_get_readrq);
1520
1521/**
1522 * pcie_set_readrq - set PCI Express maximum memory read request
1523 * @dev: PCI device to query
1524 * @rq: maximum memory read count in bytes
1525 *    valid values are 128, 256, 512, 1024, 2048, 4096
1526 *
1527 * If possible sets maximum read byte count
1528 */
1529int pcie_set_readrq(struct pci_dev *dev, int rq)
1530{
1531        int cap, err = -EINVAL;
1532        u16 ctl, v;
1533
1534        if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
1535                goto out;
1536
1537        v = (ffs(rq) - 8) << 12;
1538
1539        cap = pci_find_capability(dev, PCI_CAP_ID_EXP);
1540        if (!cap)
1541                goto out;
1542
1543        err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
1544        if (err)
1545                goto out;
1546
1547        if ((ctl & PCI_EXP_DEVCTL_READRQ) != v) {
1548                ctl &= ~PCI_EXP_DEVCTL_READRQ;
1549                ctl |= v;
1550                err = pci_write_config_dword(dev, cap + PCI_EXP_DEVCTL, ctl);
1551        }
1552
1553out:
1554        return err;
1555}
1556EXPORT_SYMBOL(pcie_set_readrq);
1557
1558/**
1559 * pci_select_bars - Make BAR mask from the type of resource
1560 * @dev: the PCI device for which BAR mask is made
1561 * @flags: resource type mask to be selected
1562 *
1563 * This helper routine makes bar mask from the type of resource.
1564 */
1565int pci_select_bars(struct pci_dev *dev, unsigned long flags)
1566{
1567        int i, bars = 0;
1568        for (i = 0; i < PCI_NUM_RESOURCES; i++)
1569                if (pci_resource_flags(dev, i) & flags)
1570                        bars |= (1 << i);
1571        return bars;
1572}
1573
1574static void __devinit pci_no_domains(void)
1575{
1576#ifdef CONFIG_PCI_DOMAINS
1577        pci_domains_supported = 0;
1578#endif
1579}
1580
1581static int __devinit pci_init(void)
1582{
1583        struct pci_dev *dev = NULL;
1584
1585        while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
1586                pci_fixup_device(pci_fixup_final, dev);
1587        }
1588        return 0;
1589}
1590
1591static int __devinit pci_setup(char *str)
1592{
1593        while (str) {
1594                char *k = strchr(str, ',');
1595                if (k)
1596                        *k++ = 0;
1597                if (*str && (str = pcibios_setup(str)) && *str) {
1598                        if (!strcmp(str, "nomsi")) {
1599                                pci_no_msi();
1600                        } else if (!strcmp(str, "noaer")) {
1601                                pci_no_aer();
1602                        } else if (!strcmp(str, "nodomains")) {
1603                                pci_no_domains();
1604                        } else if (!strncmp(str, "cbiosize=", 9)) {
1605                                pci_cardbus_io_size = memparse(str + 9, &str);
1606                        } else if (!strncmp(str, "cbmemsize=", 10)) {
1607                                pci_cardbus_mem_size = memparse(str + 10, &str);
1608                        } else {
1609                                printk(KERN_ERR "PCI: Unknown option `%s'\n",
1610                                                str);
1611                        }
1612                }
1613                str = k;
1614        }
1615        return 0;
1616}
1617early_param("pci", pci_setup);
1618
1619device_initcall(pci_init);
1620
1621EXPORT_SYMBOL_GPL(pci_restore_bars);
1622EXPORT_SYMBOL(pci_reenable_device);
1623EXPORT_SYMBOL(pci_enable_device_bars);
1624EXPORT_SYMBOL(pci_enable_device);
1625EXPORT_SYMBOL(pcim_enable_device);
1626EXPORT_SYMBOL(pcim_pin_device);
1627EXPORT_SYMBOL(pci_disable_device);
1628EXPORT_SYMBOL(pci_find_capability);
1629EXPORT_SYMBOL(pci_bus_find_capability);
1630EXPORT_SYMBOL(pci_release_regions);
1631EXPORT_SYMBOL(pci_request_regions);
1632EXPORT_SYMBOL(pci_release_region);
1633EXPORT_SYMBOL(pci_request_region);
1634EXPORT_SYMBOL(pci_release_selected_regions);
1635EXPORT_SYMBOL(pci_request_selected_regions);
1636EXPORT_SYMBOL(pci_set_master);
1637EXPORT_SYMBOL(pci_set_mwi);
1638EXPORT_SYMBOL(pci_try_set_mwi);
1639EXPORT_SYMBOL(pci_clear_mwi);
1640EXPORT_SYMBOL_GPL(pci_intx);
1641EXPORT_SYMBOL(pci_set_dma_mask);
1642EXPORT_SYMBOL(pci_set_consistent_dma_mask);
1643EXPORT_SYMBOL(pci_assign_resource);
1644EXPORT_SYMBOL(pci_find_parent_resource);
1645EXPORT_SYMBOL(pci_select_bars);
1646
1647EXPORT_SYMBOL(pci_set_power_state);
1648EXPORT_SYMBOL(pci_save_state);
1649EXPORT_SYMBOL(pci_restore_state);
1650EXPORT_SYMBOL(pci_enable_wake);
1651EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
1652
1653