linux/drivers/pci/probe.c
<<
>>
Prefs
   1/*
   2 * probe.c - PCI detection and setup code
   3 */
   4
   5#include <linux/kernel.h>
   6#include <linux/delay.h>
   7#include <linux/init.h>
   8#include <linux/pci.h>
   9#include <linux/of_device.h>
  10#include <linux/of_pci.h>
  11#include <linux/pci_hotplug.h>
  12#include <linux/slab.h>
  13#include <linux/module.h>
  14#include <linux/cpumask.h>
  15#include <linux/pci-aspm.h>
  16#include <linux/aer.h>
  17#include <linux/acpi.h>
  18#include <linux/irqdomain.h>
  19#include <linux/pm_runtime.h>
  20#include "pci.h"
  21
  22#define CARDBUS_LATENCY_TIMER   176     /* secondary latency timer */
  23#define CARDBUS_RESERVE_BUSNR   3
  24
  25static struct resource busn_resource = {
  26        .name   = "PCI busn",
  27        .start  = 0,
  28        .end    = 255,
  29        .flags  = IORESOURCE_BUS,
  30};
  31
  32/* Ugh.  Need to stop exporting this to modules. */
  33LIST_HEAD(pci_root_buses);
  34EXPORT_SYMBOL(pci_root_buses);
  35
  36static LIST_HEAD(pci_domain_busn_res_list);
  37
  38struct pci_domain_busn_res {
  39        struct list_head list;
  40        struct resource res;
  41        int domain_nr;
  42};
  43
  44static struct resource *get_pci_domain_busn_res(int domain_nr)
  45{
  46        struct pci_domain_busn_res *r;
  47
  48        list_for_each_entry(r, &pci_domain_busn_res_list, list)
  49                if (r->domain_nr == domain_nr)
  50                        return &r->res;
  51
  52        r = kzalloc(sizeof(*r), GFP_KERNEL);
  53        if (!r)
  54                return NULL;
  55
  56        r->domain_nr = domain_nr;
  57        r->res.start = 0;
  58        r->res.end = 0xff;
  59        r->res.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED;
  60
  61        list_add_tail(&r->list, &pci_domain_busn_res_list);
  62
  63        return &r->res;
  64}
  65
  66static int find_anything(struct device *dev, void *data)
  67{
  68        return 1;
  69}
  70
  71/*
  72 * Some device drivers need know if pci is initiated.
  73 * Basically, we think pci is not initiated when there
  74 * is no device to be found on the pci_bus_type.
  75 */
  76int no_pci_devices(void)
  77{
  78        struct device *dev;
  79        int no_devices;
  80
  81        dev = bus_find_device(&pci_bus_type, NULL, NULL, find_anything);
  82        no_devices = (dev == NULL);
  83        put_device(dev);
  84        return no_devices;
  85}
  86EXPORT_SYMBOL(no_pci_devices);
  87
  88/*
  89 * PCI Bus Class
  90 */
  91static void release_pcibus_dev(struct device *dev)
  92{
  93        struct pci_bus *pci_bus = to_pci_bus(dev);
  94
  95        put_device(pci_bus->bridge);
  96        pci_bus_remove_resources(pci_bus);
  97        pci_release_bus_of_node(pci_bus);
  98        kfree(pci_bus);
  99}
 100
 101static struct class pcibus_class = {
 102        .name           = "pci_bus",
 103        .dev_release    = &release_pcibus_dev,
 104        .dev_groups     = pcibus_groups,
 105};
 106
 107static int __init pcibus_class_init(void)
 108{
 109        return class_register(&pcibus_class);
 110}
 111postcore_initcall(pcibus_class_init);
 112
 113static u64 pci_size(u64 base, u64 maxbase, u64 mask)
 114{
 115        u64 size = mask & maxbase;      /* Find the significant bits */
 116        if (!size)
 117                return 0;
 118
 119        /* Get the lowest of them to find the decode size, and
 120           from that the extent.  */
 121        size = (size & ~(size-1)) - 1;
 122
 123        /* base == maxbase can be valid only if the BAR has
 124           already been programmed with all 1s.  */
 125        if (base == maxbase && ((base | size) & mask) != mask)
 126                return 0;
 127
 128        return size;
 129}
 130
 131static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
 132{
 133        u32 mem_type;
 134        unsigned long flags;
 135
 136        if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
 137                flags = bar & ~PCI_BASE_ADDRESS_IO_MASK;
 138                flags |= IORESOURCE_IO;
 139                return flags;
 140        }
 141
 142        flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK;
 143        flags |= IORESOURCE_MEM;
 144        if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
 145                flags |= IORESOURCE_PREFETCH;
 146
 147        mem_type = bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK;
 148        switch (mem_type) {
 149        case PCI_BASE_ADDRESS_MEM_TYPE_32:
 150                break;
 151        case PCI_BASE_ADDRESS_MEM_TYPE_1M:
 152                /* 1M mem BAR treated as 32-bit BAR */
 153                break;
 154        case PCI_BASE_ADDRESS_MEM_TYPE_64:
 155                flags |= IORESOURCE_MEM_64;
 156                break;
 157        default:
 158                /* mem unknown type treated as 32-bit BAR */
 159                break;
 160        }
 161        return flags;
 162}
 163
 164#define PCI_COMMAND_DECODE_ENABLE       (PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
 165
 166/**
 167 * pci_read_base - read a PCI BAR
 168 * @dev: the PCI device
 169 * @type: type of the BAR
 170 * @res: resource buffer to be filled in
 171 * @pos: BAR position in the config space
 172 *
 173 * Returns 1 if the BAR is 64-bit, or 0 if 32-bit.
 174 */
 175int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
 176                    struct resource *res, unsigned int pos)
 177{
 178        u32 l, sz, mask;
 179        u64 l64, sz64, mask64;
 180        u16 orig_cmd;
 181        struct pci_bus_region region, inverted_region;
 182
 183        mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
 184
 185        /* No printks while decoding is disabled! */
 186        if (!dev->mmio_always_on) {
 187                pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
 188                if (orig_cmd & PCI_COMMAND_DECODE_ENABLE) {
 189                        pci_write_config_word(dev, PCI_COMMAND,
 190                                orig_cmd & ~PCI_COMMAND_DECODE_ENABLE);
 191                }
 192        }
 193
 194        res->name = pci_name(dev);
 195
 196        pci_read_config_dword(dev, pos, &l);
 197        pci_write_config_dword(dev, pos, l | mask);
 198        pci_read_config_dword(dev, pos, &sz);
 199        pci_write_config_dword(dev, pos, l);
 200
 201        /*
 202         * All bits set in sz means the device isn't working properly.
 203         * If the BAR isn't implemented, all bits must be 0.  If it's a
 204         * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit
 205         * 1 must be clear.
 206         */
 207        if (sz == 0xffffffff)
 208                sz = 0;
 209
 210        /*
 211         * I don't know how l can have all bits set.  Copied from old code.
 212         * Maybe it fixes a bug on some ancient platform.
 213         */
 214        if (l == 0xffffffff)
 215                l = 0;
 216
 217        if (type == pci_bar_unknown) {
 218                res->flags = decode_bar(dev, l);
 219                res->flags |= IORESOURCE_SIZEALIGN;
 220                if (res->flags & IORESOURCE_IO) {
 221                        l64 = l & PCI_BASE_ADDRESS_IO_MASK;
 222                        sz64 = sz & PCI_BASE_ADDRESS_IO_MASK;
 223                        mask64 = PCI_BASE_ADDRESS_IO_MASK & (u32)IO_SPACE_LIMIT;
 224                } else {
 225                        l64 = l & PCI_BASE_ADDRESS_MEM_MASK;
 226                        sz64 = sz & PCI_BASE_ADDRESS_MEM_MASK;
 227                        mask64 = (u32)PCI_BASE_ADDRESS_MEM_MASK;
 228                }
 229        } else {
 230                res->flags |= (l & IORESOURCE_ROM_ENABLE);
 231                l64 = l & PCI_ROM_ADDRESS_MASK;
 232                sz64 = sz & PCI_ROM_ADDRESS_MASK;
 233                mask64 = (u32)PCI_ROM_ADDRESS_MASK;
 234        }
 235
 236        if (res->flags & IORESOURCE_MEM_64) {
 237                pci_read_config_dword(dev, pos + 4, &l);
 238                pci_write_config_dword(dev, pos + 4, ~0);
 239                pci_read_config_dword(dev, pos + 4, &sz);
 240                pci_write_config_dword(dev, pos + 4, l);
 241
 242                l64 |= ((u64)l << 32);
 243                sz64 |= ((u64)sz << 32);
 244                mask64 |= ((u64)~0 << 32);
 245        }
 246
 247        if (!dev->mmio_always_on && (orig_cmd & PCI_COMMAND_DECODE_ENABLE))
 248                pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
 249
 250        if (!sz64)
 251                goto fail;
 252
 253        sz64 = pci_size(l64, sz64, mask64);
 254        if (!sz64) {
 255                dev_info(&dev->dev, FW_BUG "reg 0x%x: invalid BAR (can't size)\n",
 256                         pos);
 257                goto fail;
 258        }
 259
 260        if (res->flags & IORESOURCE_MEM_64) {
 261                if ((sizeof(pci_bus_addr_t) < 8 || sizeof(resource_size_t) < 8)
 262                    && sz64 > 0x100000000ULL) {
 263                        res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
 264                        res->start = 0;
 265                        res->end = 0;
 266                        dev_err(&dev->dev, "reg 0x%x: can't handle BAR larger than 4GB (size %#010llx)\n",
 267                                pos, (unsigned long long)sz64);
 268                        goto out;
 269                }
 270
 271                if ((sizeof(pci_bus_addr_t) < 8) && l) {
 272                        /* Above 32-bit boundary; try to reallocate */
 273                        res->flags |= IORESOURCE_UNSET;
 274                        res->start = 0;
 275                        res->end = sz64;
 276                        dev_info(&dev->dev, "reg 0x%x: can't handle BAR above 4GB (bus address %#010llx)\n",
 277                                 pos, (unsigned long long)l64);
 278                        goto out;
 279                }
 280        }
 281
 282        region.start = l64;
 283        region.end = l64 + sz64;
 284
 285        pcibios_bus_to_resource(dev->bus, res, &region);
 286        pcibios_resource_to_bus(dev->bus, &inverted_region, res);
 287
 288        /*
 289         * If "A" is a BAR value (a bus address), "bus_to_resource(A)" is
 290         * the corresponding resource address (the physical address used by
 291         * the CPU.  Converting that resource address back to a bus address
 292         * should yield the original BAR value:
 293         *
 294         *     resource_to_bus(bus_to_resource(A)) == A
 295         *
 296         * If it doesn't, CPU accesses to "bus_to_resource(A)" will not
 297         * be claimed by the device.
 298         */
 299        if (inverted_region.start != region.start) {
 300                res->flags |= IORESOURCE_UNSET;
 301                res->start = 0;
 302                res->end = region.end - region.start;
 303                dev_info(&dev->dev, "reg 0x%x: initial BAR value %#010llx invalid\n",
 304                         pos, (unsigned long long)region.start);
 305        }
 306
 307        goto out;
 308
 309
 310fail:
 311        res->flags = 0;
 312out:
 313        if (res->flags)
 314                dev_printk(KERN_DEBUG, &dev->dev, "reg 0x%x: %pR\n", pos, res);
 315
 316        return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
 317}
 318
 319static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
 320{
 321        unsigned int pos, reg;
 322
 323        if (dev->non_compliant_bars)
 324                return;
 325
 326        for (pos = 0; pos < howmany; pos++) {
 327                struct resource *res = &dev->resource[pos];
 328                reg = PCI_BASE_ADDRESS_0 + (pos << 2);
 329                pos += __pci_read_base(dev, pci_bar_unknown, res, reg);
 330        }
 331
 332        if (rom) {
 333                struct resource *res = &dev->resource[PCI_ROM_RESOURCE];
 334                dev->rom_base_reg = rom;
 335                res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH |
 336                                IORESOURCE_READONLY | IORESOURCE_SIZEALIGN;
 337                __pci_read_base(dev, pci_bar_mem32, res, rom);
 338        }
 339}
 340
 341static void pci_read_bridge_io(struct pci_bus *child)
 342{
 343        struct pci_dev *dev = child->self;
 344        u8 io_base_lo, io_limit_lo;
 345        unsigned long io_mask, io_granularity, base, limit;
 346        struct pci_bus_region region;
 347        struct resource *res;
 348
 349        io_mask = PCI_IO_RANGE_MASK;
 350        io_granularity = 0x1000;
 351        if (dev->io_window_1k) {
 352                /* Support 1K I/O space granularity */
 353                io_mask = PCI_IO_1K_RANGE_MASK;
 354                io_granularity = 0x400;
 355        }
 356
 357        res = child->resource[0];
 358        pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
 359        pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
 360        base = (io_base_lo & io_mask) << 8;
 361        limit = (io_limit_lo & io_mask) << 8;
 362
 363        if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
 364                u16 io_base_hi, io_limit_hi;
 365
 366                pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi);
 367                pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi);
 368                base |= ((unsigned long) io_base_hi << 16);
 369                limit |= ((unsigned long) io_limit_hi << 16);
 370        }
 371
 372        if (base <= limit) {
 373                res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
 374                region.start = base;
 375                region.end = limit + io_granularity - 1;
 376                pcibios_bus_to_resource(dev->bus, res, &region);
 377                dev_printk(KERN_DEBUG, &dev->dev, "  bridge window %pR\n", res);
 378        }
 379}
 380
 381static void pci_read_bridge_mmio(struct pci_bus *child)
 382{
 383        struct pci_dev *dev = child->self;
 384        u16 mem_base_lo, mem_limit_lo;
 385        unsigned long base, limit;
 386        struct pci_bus_region region;
 387        struct resource *res;
 388
 389        res = child->resource[1];
 390        pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
 391        pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
 392        base = ((unsigned long) mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
 393        limit = ((unsigned long) mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
 394        if (base <= limit) {
 395                res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
 396                region.start = base;
 397                region.end = limit + 0xfffff;
 398                pcibios_bus_to_resource(dev->bus, res, &region);
 399                dev_printk(KERN_DEBUG, &dev->dev, "  bridge window %pR\n", res);
 400        }
 401}
 402
 403static void pci_read_bridge_mmio_pref(struct pci_bus *child)
 404{
 405        struct pci_dev *dev = child->self;
 406        u16 mem_base_lo, mem_limit_lo;
 407        u64 base64, limit64;
 408        pci_bus_addr_t base, limit;
 409        struct pci_bus_region region;
 410        struct resource *res;
 411
 412        res = child->resource[2];
 413        pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
 414        pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
 415        base64 = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
 416        limit64 = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
 417
 418        if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
 419                u32 mem_base_hi, mem_limit_hi;
 420
 421                pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi);
 422                pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi);
 423
 424                /*
 425                 * Some bridges set the base > limit by default, and some
 426                 * (broken) BIOSes do not initialize them.  If we find
 427                 * this, just assume they are not being used.
 428                 */
 429                if (mem_base_hi <= mem_limit_hi) {
 430                        base64 |= (u64) mem_base_hi << 32;
 431                        limit64 |= (u64) mem_limit_hi << 32;
 432                }
 433        }
 434
 435        base = (pci_bus_addr_t) base64;
 436        limit = (pci_bus_addr_t) limit64;
 437
 438        if (base != base64) {
 439                dev_err(&dev->dev, "can't handle bridge window above 4GB (bus address %#010llx)\n",
 440                        (unsigned long long) base64);
 441                return;
 442        }
 443
 444        if (base <= limit) {
 445                res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
 446                                         IORESOURCE_MEM | IORESOURCE_PREFETCH;
 447                if (res->flags & PCI_PREF_RANGE_TYPE_64)
 448                        res->flags |= IORESOURCE_MEM_64;
 449                region.start = base;
 450                region.end = limit + 0xfffff;
 451                pcibios_bus_to_resource(dev->bus, res, &region);
 452                dev_printk(KERN_DEBUG, &dev->dev, "  bridge window %pR\n", res);
 453        }
 454}
 455
 456void pci_read_bridge_bases(struct pci_bus *child)
 457{
 458        struct pci_dev *dev = child->self;
 459        struct resource *res;
 460        int i;
 461
 462        if (pci_is_root_bus(child))     /* It's a host bus, nothing to read */
 463                return;
 464
 465        dev_info(&dev->dev, "PCI bridge to %pR%s\n",
 466                 &child->busn_res,
 467                 dev->transparent ? " (subtractive decode)" : "");
 468
 469        pci_bus_remove_resources(child);
 470        for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
 471                child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
 472
 473        pci_read_bridge_io(child);
 474        pci_read_bridge_mmio(child);
 475        pci_read_bridge_mmio_pref(child);
 476
 477        if (dev->transparent) {
 478                pci_bus_for_each_resource(child->parent, res, i) {
 479                        if (res && res->flags) {
 480                                pci_bus_add_resource(child, res,
 481                                                     PCI_SUBTRACTIVE_DECODE);
 482                                dev_printk(KERN_DEBUG, &dev->dev,
 483                                           "  bridge window %pR (subtractive decode)\n",
 484                                           res);
 485                        }
 486                }
 487        }
 488}
 489
 490static struct pci_bus *pci_alloc_bus(struct pci_bus *parent)
 491{
 492        struct pci_bus *b;
 493
 494        b = kzalloc(sizeof(*b), GFP_KERNEL);
 495        if (!b)
 496                return NULL;
 497
 498        INIT_LIST_HEAD(&b->node);
 499        INIT_LIST_HEAD(&b->children);
 500        INIT_LIST_HEAD(&b->devices);
 501        INIT_LIST_HEAD(&b->slots);
 502        INIT_LIST_HEAD(&b->resources);
 503        b->max_bus_speed = PCI_SPEED_UNKNOWN;
 504        b->cur_bus_speed = PCI_SPEED_UNKNOWN;
 505#ifdef CONFIG_PCI_DOMAINS_GENERIC
 506        if (parent)
 507                b->domain_nr = parent->domain_nr;
 508#endif
 509        return b;
 510}
 511
 512static void pci_release_host_bridge_dev(struct device *dev)
 513{
 514        struct pci_host_bridge *bridge = to_pci_host_bridge(dev);
 515
 516        if (bridge->release_fn)
 517                bridge->release_fn(bridge);
 518
 519        pci_free_resource_list(&bridge->windows);
 520
 521        kfree(bridge);
 522}
 523
 524static struct pci_host_bridge *pci_alloc_host_bridge(struct pci_bus *b)
 525{
 526        struct pci_host_bridge *bridge;
 527
 528        bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
 529        if (!bridge)
 530                return NULL;
 531
 532        INIT_LIST_HEAD(&bridge->windows);
 533        bridge->bus = b;
 534        return bridge;
 535}
 536
 537static const unsigned char pcix_bus_speed[] = {
 538        PCI_SPEED_UNKNOWN,              /* 0 */
 539        PCI_SPEED_66MHz_PCIX,           /* 1 */
 540        PCI_SPEED_100MHz_PCIX,          /* 2 */
 541        PCI_SPEED_133MHz_PCIX,          /* 3 */
 542        PCI_SPEED_UNKNOWN,              /* 4 */
 543        PCI_SPEED_66MHz_PCIX_ECC,       /* 5 */
 544        PCI_SPEED_100MHz_PCIX_ECC,      /* 6 */
 545        PCI_SPEED_133MHz_PCIX_ECC,      /* 7 */
 546        PCI_SPEED_UNKNOWN,              /* 8 */
 547        PCI_SPEED_66MHz_PCIX_266,       /* 9 */
 548        PCI_SPEED_100MHz_PCIX_266,      /* A */
 549        PCI_SPEED_133MHz_PCIX_266,      /* B */
 550        PCI_SPEED_UNKNOWN,              /* C */
 551        PCI_SPEED_66MHz_PCIX_533,       /* D */
 552        PCI_SPEED_100MHz_PCIX_533,      /* E */
 553        PCI_SPEED_133MHz_PCIX_533       /* F */
 554};
 555
 556const unsigned char pcie_link_speed[] = {
 557        PCI_SPEED_UNKNOWN,              /* 0 */
 558        PCIE_SPEED_2_5GT,               /* 1 */
 559        PCIE_SPEED_5_0GT,               /* 2 */
 560        PCIE_SPEED_8_0GT,               /* 3 */
 561        PCI_SPEED_UNKNOWN,              /* 4 */
 562        PCI_SPEED_UNKNOWN,              /* 5 */
 563        PCI_SPEED_UNKNOWN,              /* 6 */
 564        PCI_SPEED_UNKNOWN,              /* 7 */
 565        PCI_SPEED_UNKNOWN,              /* 8 */
 566        PCI_SPEED_UNKNOWN,              /* 9 */
 567        PCI_SPEED_UNKNOWN,              /* A */
 568        PCI_SPEED_UNKNOWN,              /* B */
 569        PCI_SPEED_UNKNOWN,              /* C */
 570        PCI_SPEED_UNKNOWN,              /* D */
 571        PCI_SPEED_UNKNOWN,              /* E */
 572        PCI_SPEED_UNKNOWN               /* F */
 573};
 574
 575void pcie_update_link_speed(struct pci_bus *bus, u16 linksta)
 576{
 577        bus->cur_bus_speed = pcie_link_speed[linksta & PCI_EXP_LNKSTA_CLS];
 578}
 579EXPORT_SYMBOL_GPL(pcie_update_link_speed);
 580
 581static unsigned char agp_speeds[] = {
 582        AGP_UNKNOWN,
 583        AGP_1X,
 584        AGP_2X,
 585        AGP_4X,
 586        AGP_8X
 587};
 588
 589static enum pci_bus_speed agp_speed(int agp3, int agpstat)
 590{
 591        int index = 0;
 592
 593        if (agpstat & 4)
 594                index = 3;
 595        else if (agpstat & 2)
 596                index = 2;
 597        else if (agpstat & 1)
 598                index = 1;
 599        else
 600                goto out;
 601
 602        if (agp3) {
 603                index += 2;
 604                if (index == 5)
 605                        index = 0;
 606        }
 607
 608 out:
 609        return agp_speeds[index];
 610}
 611
 612static void pci_set_bus_speed(struct pci_bus *bus)
 613{
 614        struct pci_dev *bridge = bus->self;
 615        int pos;
 616
 617        pos = pci_find_capability(bridge, PCI_CAP_ID_AGP);
 618        if (!pos)
 619                pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3);
 620        if (pos) {
 621                u32 agpstat, agpcmd;
 622
 623                pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat);
 624                bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7);
 625
 626                pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd);
 627                bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7);
 628        }
 629
 630        pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
 631        if (pos) {
 632                u16 status;
 633                enum pci_bus_speed max;
 634
 635                pci_read_config_word(bridge, pos + PCI_X_BRIDGE_SSTATUS,
 636                                     &status);
 637
 638                if (status & PCI_X_SSTATUS_533MHZ) {
 639                        max = PCI_SPEED_133MHz_PCIX_533;
 640                } else if (status & PCI_X_SSTATUS_266MHZ) {
 641                        max = PCI_SPEED_133MHz_PCIX_266;
 642                } else if (status & PCI_X_SSTATUS_133MHZ) {
 643                        if ((status & PCI_X_SSTATUS_VERS) == PCI_X_SSTATUS_V2)
 644                                max = PCI_SPEED_133MHz_PCIX_ECC;
 645                        else
 646                                max = PCI_SPEED_133MHz_PCIX;
 647                } else {
 648                        max = PCI_SPEED_66MHz_PCIX;
 649                }
 650
 651                bus->max_bus_speed = max;
 652                bus->cur_bus_speed = pcix_bus_speed[
 653                        (status & PCI_X_SSTATUS_FREQ) >> 6];
 654
 655                return;
 656        }
 657
 658        if (pci_is_pcie(bridge)) {
 659                u32 linkcap;
 660                u16 linksta;
 661
 662                pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap);
 663                bus->max_bus_speed = pcie_link_speed[linkcap & PCI_EXP_LNKCAP_SLS];
 664
 665                pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta);
 666                pcie_update_link_speed(bus, linksta);
 667        }
 668}
 669
 670static struct irq_domain *pci_host_bridge_msi_domain(struct pci_bus *bus)
 671{
 672        struct irq_domain *d;
 673
 674        /*
 675         * Any firmware interface that can resolve the msi_domain
 676         * should be called from here.
 677         */
 678        d = pci_host_bridge_of_msi_domain(bus);
 679        if (!d)
 680                d = pci_host_bridge_acpi_msi_domain(bus);
 681
 682#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
 683        /*
 684         * If no IRQ domain was found via the OF tree, try looking it up
 685         * directly through the fwnode_handle.
 686         */
 687        if (!d) {
 688                struct fwnode_handle *fwnode = pci_root_bus_fwnode(bus);
 689
 690                if (fwnode)
 691                        d = irq_find_matching_fwnode(fwnode,
 692                                                     DOMAIN_BUS_PCI_MSI);
 693        }
 694#endif
 695
 696        return d;
 697}
 698
 699static void pci_set_bus_msi_domain(struct pci_bus *bus)
 700{
 701        struct irq_domain *d;
 702        struct pci_bus *b;
 703
 704        /*
 705         * The bus can be a root bus, a subordinate bus, or a virtual bus
 706         * created by an SR-IOV device.  Walk up to the first bridge device
 707         * found or derive the domain from the host bridge.
 708         */
 709        for (b = bus, d = NULL; !d && !pci_is_root_bus(b); b = b->parent) {
 710                if (b->self)
 711                        d = dev_get_msi_domain(&b->self->dev);
 712        }
 713
 714        if (!d)
 715                d = pci_host_bridge_msi_domain(b);
 716
 717        dev_set_msi_domain(&bus->dev, d);
 718}
 719
 720static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
 721                                           struct pci_dev *bridge, int busnr)
 722{
 723        struct pci_bus *child;
 724        int i;
 725        int ret;
 726
 727        /*
 728         * Allocate a new bus, and inherit stuff from the parent..
 729         */
 730        child = pci_alloc_bus(parent);
 731        if (!child)
 732                return NULL;
 733
 734        child->parent = parent;
 735        child->ops = parent->ops;
 736        child->msi = parent->msi;
 737        child->sysdata = parent->sysdata;
 738        child->bus_flags = parent->bus_flags;
 739
 740        /* initialize some portions of the bus device, but don't register it
 741         * now as the parent is not properly set up yet.
 742         */
 743        child->dev.class = &pcibus_class;
 744        dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr);
 745
 746        /*
 747         * Set up the primary, secondary and subordinate
 748         * bus numbers.
 749         */
 750        child->number = child->busn_res.start = busnr;
 751        child->primary = parent->busn_res.start;
 752        child->busn_res.end = 0xff;
 753
 754        if (!bridge) {
 755                child->dev.parent = parent->bridge;
 756                goto add_dev;
 757        }
 758
 759        child->self = bridge;
 760        child->bridge = get_device(&bridge->dev);
 761        child->dev.parent = child->bridge;
 762        pci_set_bus_of_node(child);
 763        pci_set_bus_speed(child);
 764
 765        /* Set up default resource pointers and names.. */
 766        for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
 767                child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i];
 768                child->resource[i]->name = child->name;
 769        }
 770        bridge->subordinate = child;
 771
 772add_dev:
 773        pci_set_bus_msi_domain(child);
 774        ret = device_register(&child->dev);
 775        WARN_ON(ret < 0);
 776
 777        pcibios_add_bus(child);
 778
 779        if (child->ops->add_bus) {
 780                ret = child->ops->add_bus(child);
 781                if (WARN_ON(ret < 0))
 782                        dev_err(&child->dev, "failed to add bus: %d\n", ret);
 783        }
 784
 785        /* Create legacy_io and legacy_mem files for this bus */
 786        pci_create_legacy_files(child);
 787
 788        return child;
 789}
 790
 791struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
 792                                int busnr)
 793{
 794        struct pci_bus *child;
 795
 796        child = pci_alloc_child_bus(parent, dev, busnr);
 797        if (child) {
 798                down_write(&pci_bus_sem);
 799                list_add_tail(&child->node, &parent->children);
 800                up_write(&pci_bus_sem);
 801        }
 802        return child;
 803}
 804EXPORT_SYMBOL(pci_add_new_bus);
 805
 806static void pci_enable_crs(struct pci_dev *pdev)
 807{
 808        u16 root_cap = 0;
 809
 810        /* Enable CRS Software Visibility if supported */
 811        pcie_capability_read_word(pdev, PCI_EXP_RTCAP, &root_cap);
 812        if (root_cap & PCI_EXP_RTCAP_CRSVIS)
 813                pcie_capability_set_word(pdev, PCI_EXP_RTCTL,
 814                                         PCI_EXP_RTCTL_CRSSVE);
 815}
 816
 817/*
 818 * If it's a bridge, configure it and scan the bus behind it.
 819 * For CardBus bridges, we don't scan behind as the devices will
 820 * be handled by the bridge driver itself.
 821 *
 822 * We need to process bridges in two passes -- first we scan those
 823 * already configured by the BIOS and after we are done with all of
 824 * them, we proceed to assigning numbers to the remaining buses in
 825 * order to avoid overlaps between old and new bus numbers.
 826 */
 827int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
 828{
 829        struct pci_bus *child;
 830        int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
 831        u32 buses, i, j = 0;
 832        u16 bctl;
 833        u8 primary, secondary, subordinate;
 834        int broken = 0;
 835
 836        /*
 837         * Make sure the bridge is powered on to be able to access config
 838         * space of devices below it.
 839         */
 840        pm_runtime_get_sync(&dev->dev);
 841
 842        pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
 843        primary = buses & 0xFF;
 844        secondary = (buses >> 8) & 0xFF;
 845        subordinate = (buses >> 16) & 0xFF;
 846
 847        dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n",
 848                secondary, subordinate, pass);
 849
 850        if (!primary && (primary != bus->number) && secondary && subordinate) {
 851                dev_warn(&dev->dev, "Primary bus is hard wired to 0\n");
 852                primary = bus->number;
 853        }
 854
 855        /* Check if setup is sensible at all */
 856        if (!pass &&
 857            (primary != bus->number || secondary <= bus->number ||
 858             secondary > subordinate)) {
 859                dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n",
 860                         secondary, subordinate);
 861                broken = 1;
 862        }
 863
 864        /* Disable MasterAbortMode during probing to avoid reporting
 865           of bus errors (in some architectures) */
 866        pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
 867        pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
 868                              bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
 869
 870        pci_enable_crs(dev);
 871
 872        if ((secondary || subordinate) && !pcibios_assign_all_busses() &&
 873            !is_cardbus && !broken) {
 874                unsigned int cmax;
 875                /*
 876                 * Bus already configured by firmware, process it in the first
 877                 * pass and just note the configuration.
 878                 */
 879                if (pass)
 880                        goto out;
 881
 882                /*
 883                 * The bus might already exist for two reasons: Either we are
 884                 * rescanning the bus or the bus is reachable through more than
 885                 * one bridge. The second case can happen with the i450NX
 886                 * chipset.
 887                 */
 888                child = pci_find_bus(pci_domain_nr(bus), secondary);
 889                if (!child) {
 890                        child = pci_add_new_bus(bus, dev, secondary);
 891                        if (!child)
 892                                goto out;
 893                        child->primary = primary;
 894                        pci_bus_insert_busn_res(child, secondary, subordinate);
 895                        child->bridge_ctl = bctl;
 896                }
 897
 898                cmax = pci_scan_child_bus(child);
 899                if (cmax > subordinate)
 900                        dev_warn(&dev->dev, "bridge has subordinate %02x but max busn %02x\n",
 901                                 subordinate, cmax);
 902                /* subordinate should equal child->busn_res.end */
 903                if (subordinate > max)
 904                        max = subordinate;
 905        } else {
 906                /*
 907                 * We need to assign a number to this bus which we always
 908                 * do in the second pass.
 909                 */
 910                if (!pass) {
 911                        if (pcibios_assign_all_busses() || broken || is_cardbus)
 912                                /* Temporarily disable forwarding of the
 913                                   configuration cycles on all bridges in
 914                                   this bus segment to avoid possible
 915                                   conflicts in the second pass between two
 916                                   bridges programmed with overlapping
 917                                   bus ranges. */
 918                                pci_write_config_dword(dev, PCI_PRIMARY_BUS,
 919                                                       buses & ~0xffffff);
 920                        goto out;
 921                }
 922
 923                /* Clear errors */
 924                pci_write_config_word(dev, PCI_STATUS, 0xffff);
 925
 926                /* Prevent assigning a bus number that already exists.
 927                 * This can happen when a bridge is hot-plugged, so in
 928                 * this case we only re-scan this bus. */
 929                child = pci_find_bus(pci_domain_nr(bus), max+1);
 930                if (!child) {
 931                        child = pci_add_new_bus(bus, dev, max+1);
 932                        if (!child)
 933                                goto out;
 934                        pci_bus_insert_busn_res(child, max+1, 0xff);
 935                }
 936                max++;
 937                buses = (buses & 0xff000000)
 938                      | ((unsigned int)(child->primary)     <<  0)
 939                      | ((unsigned int)(child->busn_res.start)   <<  8)
 940                      | ((unsigned int)(child->busn_res.end) << 16);
 941
 942                /*
 943                 * yenta.c forces a secondary latency timer of 176.
 944                 * Copy that behaviour here.
 945                 */
 946                if (is_cardbus) {
 947                        buses &= ~0xff000000;
 948                        buses |= CARDBUS_LATENCY_TIMER << 24;
 949                }
 950
 951                /*
 952                 * We need to blast all three values with a single write.
 953                 */
 954                pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses);
 955
 956                if (!is_cardbus) {
 957                        child->bridge_ctl = bctl;
 958                        max = pci_scan_child_bus(child);
 959                } else {
 960                        /*
 961                         * For CardBus bridges, we leave 4 bus numbers
 962                         * as cards with a PCI-to-PCI bridge can be
 963                         * inserted later.
 964                         */
 965                        for (i = 0; i < CARDBUS_RESERVE_BUSNR; i++) {
 966                                struct pci_bus *parent = bus;
 967                                if (pci_find_bus(pci_domain_nr(bus),
 968                                                        max+i+1))
 969                                        break;
 970                                while (parent->parent) {
 971                                        if ((!pcibios_assign_all_busses()) &&
 972                                            (parent->busn_res.end > max) &&
 973                                            (parent->busn_res.end <= max+i)) {
 974                                                j = 1;
 975                                        }
 976                                        parent = parent->parent;
 977                                }
 978                                if (j) {
 979                                        /*
 980                                         * Often, there are two cardbus bridges
 981                                         * -- try to leave one valid bus number
 982                                         * for each one.
 983                                         */
 984                                        i /= 2;
 985                                        break;
 986                                }
 987                        }
 988                        max += i;
 989                }
 990                /*
 991                 * Set the subordinate bus number to its real value.
 992                 */
 993                pci_bus_update_busn_res_end(child, max);
 994                pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
 995        }
 996
 997        sprintf(child->name,
 998                (is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"),
 999                pci_domain_nr(bus), child->number);
1000
1001        /* Has only triggered on CardBus, fixup is in yenta_socket */
1002        while (bus->parent) {
1003                if ((child->busn_res.end > bus->busn_res.end) ||
1004                    (child->number > bus->busn_res.end) ||
1005                    (child->number < bus->number) ||
1006                    (child->busn_res.end < bus->number)) {
1007                        dev_info(&child->dev, "%pR %s hidden behind%s bridge %s %pR\n",
1008                                &child->busn_res,
1009                                (bus->number > child->busn_res.end &&
1010                                 bus->busn_res.end < child->number) ?
1011                                        "wholly" : "partially",
1012                                bus->self->transparent ? " transparent" : "",
1013                                dev_name(&bus->dev),
1014                                &bus->busn_res);
1015                }
1016                bus = bus->parent;
1017        }
1018
1019out:
1020        pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl);
1021
1022        pm_runtime_put(&dev->dev);
1023
1024        return max;
1025}
1026EXPORT_SYMBOL(pci_scan_bridge);
1027
1028/*
1029 * Read interrupt line and base address registers.
1030 * The architecture-dependent code can tweak these, of course.
1031 */
1032static void pci_read_irq(struct pci_dev *dev)
1033{
1034        unsigned char irq;
1035
1036        pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq);
1037        dev->pin = irq;
1038        if (irq)
1039                pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
1040        dev->irq = irq;
1041}
1042
1043void set_pcie_port_type(struct pci_dev *pdev)
1044{
1045        int pos;
1046        u16 reg16;
1047        int type;
1048        struct pci_dev *parent;
1049
1050        pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
1051        if (!pos)
1052                return;
1053        pdev->pcie_cap = pos;
1054        pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
1055        pdev->pcie_flags_reg = reg16;
1056        pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16);
1057        pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
1058
1059        /*
1060         * A Root Port is always the upstream end of a Link.  No PCIe
1061         * component has two Links.  Two Links are connected by a Switch
1062         * that has a Port on each Link and internal logic to connect the
1063         * two Ports.
1064         */
1065        type = pci_pcie_type(pdev);
1066        if (type == PCI_EXP_TYPE_ROOT_PORT)
1067                pdev->has_secondary_link = 1;
1068        else if (type == PCI_EXP_TYPE_UPSTREAM ||
1069                 type == PCI_EXP_TYPE_DOWNSTREAM) {
1070                parent = pci_upstream_bridge(pdev);
1071
1072                /*
1073                 * Usually there's an upstream device (Root Port or Switch
1074                 * Downstream Port), but we can't assume one exists.
1075                 */
1076                if (parent && !parent->has_secondary_link)
1077                        pdev->has_secondary_link = 1;
1078        }
1079}
1080
1081void set_pcie_hotplug_bridge(struct pci_dev *pdev)
1082{
1083        u32 reg32;
1084
1085        pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &reg32);
1086        if (reg32 & PCI_EXP_SLTCAP_HPC)
1087                pdev->is_hotplug_bridge = 1;
1088}
1089
1090/**
1091 * pci_ext_cfg_is_aliased - is ext config space just an alias of std config?
1092 * @dev: PCI device
1093 *
1094 * PCI Express to PCI/PCI-X Bridge Specification, rev 1.0, 4.1.4 says that
1095 * when forwarding a type1 configuration request the bridge must check that
1096 * the extended register address field is zero.  The bridge is not permitted
1097 * to forward the transactions and must handle it as an Unsupported Request.
1098 * Some bridges do not follow this rule and simply drop the extended register
1099 * bits, resulting in the standard config space being aliased, every 256
1100 * bytes across the entire configuration space.  Test for this condition by
1101 * comparing the first dword of each potential alias to the vendor/device ID.
1102 * Known offenders:
1103 *   ASM1083/1085 PCIe-to-PCI Reversible Bridge (1b21:1080, rev 01 & 03)
1104 *   AMD/ATI SBx00 PCI to PCI Bridge (1002:4384, rev 40)
1105 */
1106static bool pci_ext_cfg_is_aliased(struct pci_dev *dev)
1107{
1108#ifdef CONFIG_PCI_QUIRKS
1109        int pos;
1110        u32 header, tmp;
1111
1112        pci_read_config_dword(dev, PCI_VENDOR_ID, &header);
1113
1114        for (pos = PCI_CFG_SPACE_SIZE;
1115             pos < PCI_CFG_SPACE_EXP_SIZE; pos += PCI_CFG_SPACE_SIZE) {
1116                if (pci_read_config_dword(dev, pos, &tmp) != PCIBIOS_SUCCESSFUL
1117                    || header != tmp)
1118                        return false;
1119        }
1120
1121        return true;
1122#else
1123        return false;
1124#endif
1125}
1126
1127/**
1128 * pci_cfg_space_size - get the configuration space size of the PCI device.
1129 * @dev: PCI device
1130 *
1131 * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices
1132 * have 4096 bytes.  Even if the device is capable, that doesn't mean we can
1133 * access it.  Maybe we don't have a way to generate extended config space
1134 * accesses, or the device is behind a reverse Express bridge.  So we try
1135 * reading the dword at 0x100 which must either be 0 or a valid extended
1136 * capability header.
1137 */
1138static int pci_cfg_space_size_ext(struct pci_dev *dev)
1139{
1140        u32 status;
1141        int pos = PCI_CFG_SPACE_SIZE;
1142
1143        if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL)
1144                return PCI_CFG_SPACE_SIZE;
1145        if (status == 0xffffffff || pci_ext_cfg_is_aliased(dev))
1146                return PCI_CFG_SPACE_SIZE;
1147
1148        return PCI_CFG_SPACE_EXP_SIZE;
1149}
1150
1151int pci_cfg_space_size(struct pci_dev *dev)
1152{
1153        int pos;
1154        u32 status;
1155        u16 class;
1156
1157        class = dev->class >> 8;
1158        if (class == PCI_CLASS_BRIDGE_HOST)
1159                return pci_cfg_space_size_ext(dev);
1160
1161        if (pci_is_pcie(dev))
1162                return pci_cfg_space_size_ext(dev);
1163
1164        pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1165        if (!pos)
1166                return PCI_CFG_SPACE_SIZE;
1167
1168        pci_read_config_dword(dev, pos + PCI_X_STATUS, &status);
1169        if (status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ))
1170                return pci_cfg_space_size_ext(dev);
1171
1172        return PCI_CFG_SPACE_SIZE;
1173}
1174
1175#define LEGACY_IO_RESOURCE      (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
1176
1177static void pci_msi_setup_pci_dev(struct pci_dev *dev)
1178{
1179        /*
1180         * Disable the MSI hardware to avoid screaming interrupts
1181         * during boot.  This is the power on reset default so
1182         * usually this should be a noop.
1183         */
1184        dev->msi_cap = pci_find_capability(dev, PCI_CAP_ID_MSI);
1185        if (dev->msi_cap)
1186                pci_msi_set_enable(dev, 0);
1187
1188        dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX);
1189        if (dev->msix_cap)
1190                pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
1191}
1192
1193/**
1194 * pci_setup_device - fill in class and map information of a device
1195 * @dev: the device structure to fill
1196 *
1197 * Initialize the device structure with information about the device's
1198 * vendor,class,memory and IO-space addresses,IRQ lines etc.
1199 * Called at initialisation of the PCI subsystem and by CardBus services.
1200 * Returns 0 on success and negative if unknown type of device (not normal,
1201 * bridge or CardBus).
1202 */
1203int pci_setup_device(struct pci_dev *dev)
1204{
1205        u32 class;
1206        u16 cmd;
1207        u8 hdr_type;
1208        int pos = 0;
1209        struct pci_bus_region region;
1210        struct resource *res;
1211
1212        if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type))
1213                return -EIO;
1214
1215        dev->sysdata = dev->bus->sysdata;
1216        dev->dev.parent = dev->bus->bridge;
1217        dev->dev.bus = &pci_bus_type;
1218        dev->hdr_type = hdr_type & 0x7f;
1219        dev->multifunction = !!(hdr_type & 0x80);
1220        dev->error_state = pci_channel_io_normal;
1221        set_pcie_port_type(dev);
1222
1223        pci_dev_assign_slot(dev);
1224        /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
1225           set this higher, assuming the system even supports it.  */
1226        dev->dma_mask = 0xffffffff;
1227
1228        dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus),
1229                     dev->bus->number, PCI_SLOT(dev->devfn),
1230                     PCI_FUNC(dev->devfn));
1231
1232        pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
1233        dev->revision = class & 0xff;
1234        dev->class = class >> 8;                    /* upper 3 bytes */
1235
1236        dev_printk(KERN_DEBUG, &dev->dev, "[%04x:%04x] type %02x class %#08x\n",
1237                   dev->vendor, dev->device, dev->hdr_type, dev->class);
1238
1239        /* need to have dev->class ready */
1240        dev->cfg_size = pci_cfg_space_size(dev);
1241
1242        /* "Unknown power state" */
1243        dev->current_state = PCI_UNKNOWN;
1244
1245        /* Early fixups, before probing the BARs */
1246        pci_fixup_device(pci_fixup_early, dev);
1247        /* device class may be changed after fixup */
1248        class = dev->class >> 8;
1249
1250        if (dev->non_compliant_bars) {
1251                pci_read_config_word(dev, PCI_COMMAND, &cmd);
1252                if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
1253                        dev_info(&dev->dev, "device has non-compliant BARs; disabling IO/MEM decoding\n");
1254                        cmd &= ~PCI_COMMAND_IO;
1255                        cmd &= ~PCI_COMMAND_MEMORY;
1256                        pci_write_config_word(dev, PCI_COMMAND, cmd);
1257                }
1258        }
1259
1260        switch (dev->hdr_type) {                    /* header type */
1261        case PCI_HEADER_TYPE_NORMAL:                /* standard header */
1262                if (class == PCI_CLASS_BRIDGE_PCI)
1263                        goto bad;
1264                pci_read_irq(dev);
1265                pci_read_bases(dev, 6, PCI_ROM_ADDRESS);
1266                pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1267                pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device);
1268
1269                /*
1270                 * Do the ugly legacy mode stuff here rather than broken chip
1271                 * quirk code. Legacy mode ATA controllers have fixed
1272                 * addresses. These are not always echoed in BAR0-3, and
1273                 * BAR0-3 in a few cases contain junk!
1274                 */
1275                if (class == PCI_CLASS_STORAGE_IDE) {
1276                        u8 progif;
1277                        pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
1278                        if ((progif & 1) == 0) {
1279                                region.start = 0x1F0;
1280                                region.end = 0x1F7;
1281                                res = &dev->resource[0];
1282                                res->flags = LEGACY_IO_RESOURCE;
1283                                pcibios_bus_to_resource(dev->bus, res, &region);
1284                                dev_info(&dev->dev, "legacy IDE quirk: reg 0x10: %pR\n",
1285                                         res);
1286                                region.start = 0x3F6;
1287                                region.end = 0x3F6;
1288                                res = &dev->resource[1];
1289                                res->flags = LEGACY_IO_RESOURCE;
1290                                pcibios_bus_to_resource(dev->bus, res, &region);
1291                                dev_info(&dev->dev, "legacy IDE quirk: reg 0x14: %pR\n",
1292                                         res);
1293                        }
1294                        if ((progif & 4) == 0) {
1295                                region.start = 0x170;
1296                                region.end = 0x177;
1297                                res = &dev->resource[2];
1298                                res->flags = LEGACY_IO_RESOURCE;
1299                                pcibios_bus_to_resource(dev->bus, res, &region);
1300                                dev_info(&dev->dev, "legacy IDE quirk: reg 0x18: %pR\n",
1301                                         res);
1302                                region.start = 0x376;
1303                                region.end = 0x376;
1304                                res = &dev->resource[3];
1305                                res->flags = LEGACY_IO_RESOURCE;
1306                                pcibios_bus_to_resource(dev->bus, res, &region);
1307                                dev_info(&dev->dev, "legacy IDE quirk: reg 0x1c: %pR\n",
1308                                         res);
1309                        }
1310                }
1311                break;
1312
1313        case PCI_HEADER_TYPE_BRIDGE:                /* bridge header */
1314                if (class != PCI_CLASS_BRIDGE_PCI)
1315                        goto bad;
1316                /* The PCI-to-PCI bridge spec requires that subtractive
1317                   decoding (i.e. transparent) bridge must have programming
1318                   interface code of 0x01. */
1319                pci_read_irq(dev);
1320                dev->transparent = ((dev->class & 0xff) == 1);
1321                pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
1322                set_pcie_hotplug_bridge(dev);
1323                pos = pci_find_capability(dev, PCI_CAP_ID_SSVID);
1324                if (pos) {
1325                        pci_read_config_word(dev, pos + PCI_SSVID_VENDOR_ID, &dev->subsystem_vendor);
1326                        pci_read_config_word(dev, pos + PCI_SSVID_DEVICE_ID, &dev->subsystem_device);
1327                }
1328                break;
1329
1330        case PCI_HEADER_TYPE_CARDBUS:               /* CardBus bridge header */
1331                if (class != PCI_CLASS_BRIDGE_CARDBUS)
1332                        goto bad;
1333                pci_read_irq(dev);
1334                pci_read_bases(dev, 1, 0);
1335                pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1336                pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device);
1337                break;
1338
1339        default:                                    /* unknown header */
1340                dev_err(&dev->dev, "unknown header type %02x, ignoring device\n",
1341                        dev->hdr_type);
1342                return -EIO;
1343
1344        bad:
1345                dev_err(&dev->dev, "ignoring class %#08x (doesn't match header type %02x)\n",
1346                        dev->class, dev->hdr_type);
1347                dev->class = PCI_CLASS_NOT_DEFINED << 8;
1348        }
1349
1350        /* We found a fine healthy device, go go go... */
1351        return 0;
1352}
1353
1354static void pci_configure_mps(struct pci_dev *dev)
1355{
1356        struct pci_dev *bridge = pci_upstream_bridge(dev);
1357        int mps, p_mps, rc;
1358
1359        if (!pci_is_pcie(dev) || !bridge || !pci_is_pcie(bridge))
1360                return;
1361
1362        mps = pcie_get_mps(dev);
1363        p_mps = pcie_get_mps(bridge);
1364
1365        if (mps == p_mps)
1366                return;
1367
1368        if (pcie_bus_config == PCIE_BUS_TUNE_OFF) {
1369                dev_warn(&dev->dev, "Max Payload Size %d, but upstream %s set to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
1370                         mps, pci_name(bridge), p_mps);
1371                return;
1372        }
1373
1374        /*
1375         * Fancier MPS configuration is done later by
1376         * pcie_bus_configure_settings()
1377         */
1378        if (pcie_bus_config != PCIE_BUS_DEFAULT)
1379                return;
1380
1381        rc = pcie_set_mps(dev, p_mps);
1382        if (rc) {
1383                dev_warn(&dev->dev, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
1384                         p_mps);
1385                return;
1386        }
1387
1388        dev_info(&dev->dev, "Max Payload Size set to %d (was %d, max %d)\n",
1389                 p_mps, mps, 128 << dev->pcie_mpss);
1390}
1391
1392static struct hpp_type0 pci_default_type0 = {
1393        .revision = 1,
1394        .cache_line_size = 8,
1395        .latency_timer = 0x40,
1396        .enable_serr = 0,
1397        .enable_perr = 0,
1398};
1399
1400static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp)
1401{
1402        u16 pci_cmd, pci_bctl;
1403
1404        if (!hpp)
1405                hpp = &pci_default_type0;
1406
1407        if (hpp->revision > 1) {
1408                dev_warn(&dev->dev,
1409                         "PCI settings rev %d not supported; using defaults\n",
1410                         hpp->revision);
1411                hpp = &pci_default_type0;
1412        }
1413
1414        pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpp->cache_line_size);
1415        pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpp->latency_timer);
1416        pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
1417        if (hpp->enable_serr)
1418                pci_cmd |= PCI_COMMAND_SERR;
1419        if (hpp->enable_perr)
1420                pci_cmd |= PCI_COMMAND_PARITY;
1421        pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
1422
1423        /* Program bridge control value */
1424        if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
1425                pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER,
1426                                      hpp->latency_timer);
1427                pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl);
1428                if (hpp->enable_serr)
1429                        pci_bctl |= PCI_BRIDGE_CTL_SERR;
1430                if (hpp->enable_perr)
1431                        pci_bctl |= PCI_BRIDGE_CTL_PARITY;
1432                pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl);
1433        }
1434}
1435
1436static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp)
1437{
1438        if (hpp)
1439                dev_warn(&dev->dev, "PCI-X settings not supported\n");
1440}
1441
1442static bool pcie_root_rcb_set(struct pci_dev *dev)
1443{
1444        struct pci_dev *rp = pcie_find_root_port(dev);
1445        u16 lnkctl;
1446
1447        if (!rp)
1448                return false;
1449
1450        pcie_capability_read_word(rp, PCI_EXP_LNKCTL, &lnkctl);
1451        if (lnkctl & PCI_EXP_LNKCTL_RCB)
1452                return true;
1453
1454        return false;
1455}
1456
1457static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
1458{
1459        int pos;
1460        u32 reg32;
1461
1462        if (!hpp)
1463                return;
1464
1465        if (hpp->revision > 1) {
1466                dev_warn(&dev->dev, "PCIe settings rev %d not supported\n",
1467                         hpp->revision);
1468                return;
1469        }
1470
1471        /*
1472         * Don't allow _HPX to change MPS or MRRS settings.  We manage
1473         * those to make sure they're consistent with the rest of the
1474         * platform.
1475         */
1476        hpp->pci_exp_devctl_and |= PCI_EXP_DEVCTL_PAYLOAD |
1477                                    PCI_EXP_DEVCTL_READRQ;
1478        hpp->pci_exp_devctl_or &= ~(PCI_EXP_DEVCTL_PAYLOAD |
1479                                    PCI_EXP_DEVCTL_READRQ);
1480
1481        /* Initialize Device Control Register */
1482        pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
1483                        ~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or);
1484
1485        /* Initialize Link Control Register */
1486        if (pcie_cap_has_lnkctl(dev)) {
1487
1488                /*
1489                 * If the Root Port supports Read Completion Boundary of
1490                 * 128, set RCB to 128.  Otherwise, clear it.
1491                 */
1492                hpp->pci_exp_lnkctl_and |= PCI_EXP_LNKCTL_RCB;
1493                hpp->pci_exp_lnkctl_or &= ~PCI_EXP_LNKCTL_RCB;
1494                if (pcie_root_rcb_set(dev))
1495                        hpp->pci_exp_lnkctl_or |= PCI_EXP_LNKCTL_RCB;
1496
1497                pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL,
1498                        ~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or);
1499        }
1500
1501        /* Find Advanced Error Reporting Enhanced Capability */
1502        pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
1503        if (!pos)
1504                return;
1505
1506        /* Initialize Uncorrectable Error Mask Register */
1507        pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &reg32);
1508        reg32 = (reg32 & hpp->unc_err_mask_and) | hpp->unc_err_mask_or;
1509        pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32);
1510
1511        /* Initialize Uncorrectable Error Severity Register */
1512        pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &reg32);
1513        reg32 = (reg32 & hpp->unc_err_sever_and) | hpp->unc_err_sever_or;
1514        pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32);
1515
1516        /* Initialize Correctable Error Mask Register */
1517        pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &reg32);
1518        reg32 = (reg32 & hpp->cor_err_mask_and) | hpp->cor_err_mask_or;
1519        pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32);
1520
1521        /* Initialize Advanced Error Capabilities and Control Register */
1522        pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
1523        reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or;
1524        pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
1525
1526        /*
1527         * FIXME: The following two registers are not supported yet.
1528         *
1529         *   o Secondary Uncorrectable Error Severity Register
1530         *   o Secondary Uncorrectable Error Mask Register
1531         */
1532}
1533
1534static void pci_configure_device(struct pci_dev *dev)
1535{
1536        struct hotplug_params hpp;
1537        int ret;
1538
1539        pci_configure_mps(dev);
1540
1541        memset(&hpp, 0, sizeof(hpp));
1542        ret = pci_get_hp_params(dev, &hpp);
1543        if (ret)
1544                return;
1545
1546        program_hpp_type2(dev, hpp.t2);
1547        program_hpp_type1(dev, hpp.t1);
1548        program_hpp_type0(dev, hpp.t0);
1549}
1550
1551static void pci_release_capabilities(struct pci_dev *dev)
1552{
1553        pci_vpd_release(dev);
1554        pci_iov_release(dev);
1555        pci_free_cap_save_buffers(dev);
1556}
1557
1558/**
1559 * pci_release_dev - free a pci device structure when all users of it are finished.
1560 * @dev: device that's been disconnected
1561 *
1562 * Will be called only by the device core when all users of this pci device are
1563 * done.
1564 */
1565static void pci_release_dev(struct device *dev)
1566{
1567        struct pci_dev *pci_dev;
1568
1569        pci_dev = to_pci_dev(dev);
1570        pci_release_capabilities(pci_dev);
1571        pci_release_of_node(pci_dev);
1572        pcibios_release_device(pci_dev);
1573        pci_bus_put(pci_dev->bus);
1574        kfree(pci_dev->driver_override);
1575        kfree(pci_dev->dma_alias_mask);
1576        kfree(pci_dev);
1577}
1578
1579struct pci_dev *pci_alloc_dev(struct pci_bus *bus)
1580{
1581        struct pci_dev *dev;
1582
1583        dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL);
1584        if (!dev)
1585                return NULL;
1586
1587        INIT_LIST_HEAD(&dev->bus_list);
1588        dev->dev.type = &pci_dev_type;
1589        dev->bus = pci_bus_get(bus);
1590
1591        return dev;
1592}
1593EXPORT_SYMBOL(pci_alloc_dev);
1594
1595bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
1596                                int crs_timeout)
1597{
1598        int delay = 1;
1599
1600        if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1601                return false;
1602
1603        /* some broken boards return 0 or ~0 if a slot is empty: */
1604        if (*l == 0xffffffff || *l == 0x00000000 ||
1605            *l == 0x0000ffff || *l == 0xffff0000)
1606                return false;
1607
1608        /*
1609         * Configuration Request Retry Status.  Some root ports return the
1610         * actual device ID instead of the synthetic ID (0xFFFF) required
1611         * by the PCIe spec.  Ignore the device ID and only check for
1612         * (vendor id == 1).
1613         */
1614        while ((*l & 0xffff) == 0x0001) {
1615                if (!crs_timeout)
1616                        return false;
1617
1618                msleep(delay);
1619                delay *= 2;
1620                if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1621                        return false;
1622                /* Card hasn't responded in 60 seconds?  Must be stuck. */
1623                if (delay > crs_timeout) {
1624                        printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not responding\n",
1625                               pci_domain_nr(bus), bus->number, PCI_SLOT(devfn),
1626                               PCI_FUNC(devfn));
1627                        return false;
1628                }
1629        }
1630
1631        return true;
1632}
1633EXPORT_SYMBOL(pci_bus_read_dev_vendor_id);
1634
1635/*
1636 * Read the config data for a PCI device, sanity-check it
1637 * and fill in the dev structure...
1638 */
1639static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
1640{
1641        struct pci_dev *dev;
1642        u32 l;
1643
1644        if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000))
1645                return NULL;
1646
1647        dev = pci_alloc_dev(bus);
1648        if (!dev)
1649                return NULL;
1650
1651        dev->devfn = devfn;
1652        dev->vendor = l & 0xffff;
1653        dev->device = (l >> 16) & 0xffff;
1654
1655        pci_set_of_node(dev);
1656
1657        if (pci_setup_device(dev)) {
1658                pci_bus_put(dev->bus);
1659                kfree(dev);
1660                return NULL;
1661        }
1662
1663        return dev;
1664}
1665
1666static void pci_init_capabilities(struct pci_dev *dev)
1667{
1668        /* Enhanced Allocation */
1669        pci_ea_init(dev);
1670
1671        /* Setup MSI caps & disable MSI/MSI-X interrupts */
1672        pci_msi_setup_pci_dev(dev);
1673
1674        /* Buffers for saving PCIe and PCI-X capabilities */
1675        pci_allocate_cap_save_buffers(dev);
1676
1677        /* Power Management */
1678        pci_pm_init(dev);
1679
1680        /* Vital Product Data */
1681        pci_vpd_init(dev);
1682
1683        /* Alternative Routing-ID Forwarding */
1684        pci_configure_ari(dev);
1685
1686        /* Single Root I/O Virtualization */
1687        pci_iov_init(dev);
1688
1689        /* Address Translation Services */
1690        pci_ats_init(dev);
1691
1692        /* Enable ACS P2P upstream forwarding */
1693        pci_enable_acs(dev);
1694
1695        /* Precision Time Measurement */
1696        pci_ptm_init(dev);
1697
1698        /* Advanced Error Reporting */
1699        pci_aer_init(dev);
1700}
1701
1702/*
1703 * This is the equivalent of pci_host_bridge_msi_domain that acts on
1704 * devices. Firmware interfaces that can select the MSI domain on a
1705 * per-device basis should be called from here.
1706 */
1707static struct irq_domain *pci_dev_msi_domain(struct pci_dev *dev)
1708{
1709        struct irq_domain *d;
1710
1711        /*
1712         * If a domain has been set through the pcibios_add_device
1713         * callback, then this is the one (platform code knows best).
1714         */
1715        d = dev_get_msi_domain(&dev->dev);
1716        if (d)
1717                return d;
1718
1719        /*
1720         * Let's see if we have a firmware interface able to provide
1721         * the domain.
1722         */
1723        d = pci_msi_get_device_domain(dev);
1724        if (d)
1725                return d;
1726
1727        return NULL;
1728}
1729
1730static void pci_set_msi_domain(struct pci_dev *dev)
1731{
1732        struct irq_domain *d;
1733
1734        /*
1735         * If the platform or firmware interfaces cannot supply a
1736         * device-specific MSI domain, then inherit the default domain
1737         * from the host bridge itself.
1738         */
1739        d = pci_dev_msi_domain(dev);
1740        if (!d)
1741                d = dev_get_msi_domain(&dev->bus->dev);
1742
1743        dev_set_msi_domain(&dev->dev, d);
1744}
1745
1746/**
1747 * pci_dma_configure - Setup DMA configuration
1748 * @dev: ptr to pci_dev struct of the PCI device
1749 *
1750 * Function to update PCI devices's DMA configuration using the same
1751 * info from the OF node or ACPI node of host bridge's parent (if any).
1752 */
1753static void pci_dma_configure(struct pci_dev *dev)
1754{
1755        struct device *bridge = pci_get_host_bridge_device(dev);
1756
1757        if (IS_ENABLED(CONFIG_OF) &&
1758                bridge->parent && bridge->parent->of_node) {
1759                        of_dma_configure(&dev->dev, bridge->parent->of_node);
1760        } else if (has_acpi_companion(bridge)) {
1761                struct acpi_device *adev = to_acpi_device_node(bridge->fwnode);
1762                enum dev_dma_attr attr = acpi_get_dma_attr(adev);
1763
1764                if (attr == DEV_DMA_NOT_SUPPORTED)
1765                        dev_warn(&dev->dev, "DMA not supported.\n");
1766                else
1767                        arch_setup_dma_ops(&dev->dev, 0, 0, NULL,
1768                                           attr == DEV_DMA_COHERENT);
1769        }
1770
1771        pci_put_host_bridge_device(bridge);
1772}
1773
1774void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
1775{
1776        int ret;
1777
1778        pci_configure_device(dev);
1779
1780        device_initialize(&dev->dev);
1781        dev->dev.release = pci_release_dev;
1782
1783        set_dev_node(&dev->dev, pcibus_to_node(bus));
1784        dev->dev.dma_mask = &dev->dma_mask;
1785        dev->dev.dma_parms = &dev->dma_parms;
1786        dev->dev.coherent_dma_mask = 0xffffffffull;
1787        pci_dma_configure(dev);
1788
1789        pci_set_dma_max_seg_size(dev, 65536);
1790        pci_set_dma_seg_boundary(dev, 0xffffffff);
1791
1792        /* Fix up broken headers */
1793        pci_fixup_device(pci_fixup_header, dev);
1794
1795        /* moved out from quirk header fixup code */
1796        pci_reassigndev_resource_alignment(dev);
1797
1798        /* Clear the state_saved flag. */
1799        dev->state_saved = false;
1800
1801        /* Initialize various capabilities */
1802        pci_init_capabilities(dev);
1803
1804        /*
1805         * Add the device to our list of discovered devices
1806         * and the bus list for fixup functions, etc.
1807         */
1808        down_write(&pci_bus_sem);
1809        list_add_tail(&dev->bus_list, &bus->devices);
1810        up_write(&pci_bus_sem);
1811
1812        ret = pcibios_add_device(dev);
1813        WARN_ON(ret < 0);
1814
1815        /* Setup MSI irq domain */
1816        pci_set_msi_domain(dev);
1817
1818        /* Notifier could use PCI capabilities */
1819        dev->match_driver = false;
1820        ret = device_add(&dev->dev);
1821        WARN_ON(ret < 0);
1822}
1823
1824struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn)
1825{
1826        struct pci_dev *dev;
1827
1828        dev = pci_get_slot(bus, devfn);
1829        if (dev) {
1830                pci_dev_put(dev);
1831                return dev;
1832        }
1833
1834        dev = pci_scan_device(bus, devfn);
1835        if (!dev)
1836                return NULL;
1837
1838        pci_device_add(dev, bus);
1839
1840        return dev;
1841}
1842EXPORT_SYMBOL(pci_scan_single_device);
1843
1844static unsigned next_fn(struct pci_bus *bus, struct pci_dev *dev, unsigned fn)
1845{
1846        int pos;
1847        u16 cap = 0;
1848        unsigned next_fn;
1849
1850        if (pci_ari_enabled(bus)) {
1851                if (!dev)
1852                        return 0;
1853                pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
1854                if (!pos)
1855                        return 0;
1856
1857                pci_read_config_word(dev, pos + PCI_ARI_CAP, &cap);
1858                next_fn = PCI_ARI_CAP_NFN(cap);
1859                if (next_fn <= fn)
1860                        return 0;       /* protect against malformed list */
1861
1862                return next_fn;
1863        }
1864
1865        /* dev may be NULL for non-contiguous multifunction devices */
1866        if (!dev || dev->multifunction)
1867                return (fn + 1) % 8;
1868
1869        return 0;
1870}
1871
1872static int only_one_child(struct pci_bus *bus)
1873{
1874        struct pci_dev *parent = bus->self;
1875
1876        if (!parent || !pci_is_pcie(parent))
1877                return 0;
1878        if (pci_pcie_type(parent) == PCI_EXP_TYPE_ROOT_PORT)
1879                return 1;
1880
1881        /*
1882         * PCIe downstream ports are bridges that normally lead to only a
1883         * device 0, but if PCI_SCAN_ALL_PCIE_DEVS is set, scan all
1884         * possible devices, not just device 0.  See PCIe spec r3.0,
1885         * sec 7.3.1.
1886         */
1887        if (parent->has_secondary_link &&
1888            !pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS))
1889                return 1;
1890        return 0;
1891}
1892
1893/**
1894 * pci_scan_slot - scan a PCI slot on a bus for devices.
1895 * @bus: PCI bus to scan
1896 * @devfn: slot number to scan (must have zero function.)
1897 *
1898 * Scan a PCI slot on the specified PCI bus for devices, adding
1899 * discovered devices to the @bus->devices list.  New devices
1900 * will not have is_added set.
1901 *
1902 * Returns the number of new devices found.
1903 */
1904int pci_scan_slot(struct pci_bus *bus, int devfn)
1905{
1906        unsigned fn, nr = 0;
1907        struct pci_dev *dev;
1908
1909        if (only_one_child(bus) && (devfn > 0))
1910                return 0; /* Already scanned the entire slot */
1911
1912        dev = pci_scan_single_device(bus, devfn);
1913        if (!dev)
1914                return 0;
1915        if (!dev->is_added)
1916                nr++;
1917
1918        for (fn = next_fn(bus, dev, 0); fn > 0; fn = next_fn(bus, dev, fn)) {
1919                dev = pci_scan_single_device(bus, devfn + fn);
1920                if (dev) {
1921                        if (!dev->is_added)
1922                                nr++;
1923                        dev->multifunction = 1;
1924                }
1925        }
1926
1927        /* only one slot has pcie device */
1928        if (bus->self && nr)
1929                pcie_aspm_init_link_state(bus->self);
1930
1931        return nr;
1932}
1933EXPORT_SYMBOL(pci_scan_slot);
1934
1935static int pcie_find_smpss(struct pci_dev *dev, void *data)
1936{
1937        u8 *smpss = data;
1938
1939        if (!pci_is_pcie(dev))
1940                return 0;
1941
1942        /*
1943         * We don't have a way to change MPS settings on devices that have
1944         * drivers attached.  A hot-added device might support only the minimum
1945         * MPS setting (MPS=128).  Therefore, if the fabric contains a bridge
1946         * where devices may be hot-added, we limit the fabric MPS to 128 so
1947         * hot-added devices will work correctly.
1948         *
1949         * However, if we hot-add a device to a slot directly below a Root
1950         * Port, it's impossible for there to be other existing devices below
1951         * the port.  We don't limit the MPS in this case because we can
1952         * reconfigure MPS on both the Root Port and the hot-added device,
1953         * and there are no other devices involved.
1954         *
1955         * Note that this PCIE_BUS_SAFE path assumes no peer-to-peer DMA.
1956         */
1957        if (dev->is_hotplug_bridge &&
1958            pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
1959                *smpss = 0;
1960
1961        if (*smpss > dev->pcie_mpss)
1962                *smpss = dev->pcie_mpss;
1963
1964        return 0;
1965}
1966
1967static void pcie_write_mps(struct pci_dev *dev, int mps)
1968{
1969        int rc;
1970
1971        if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
1972                mps = 128 << dev->pcie_mpss;
1973
1974                if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT &&
1975                    dev->bus->self)
1976                        /* For "Performance", the assumption is made that
1977                         * downstream communication will never be larger than
1978                         * the MRRS.  So, the MPS only needs to be configured
1979                         * for the upstream communication.  This being the case,
1980                         * walk from the top down and set the MPS of the child
1981                         * to that of the parent bus.
1982                         *
1983                         * Configure the device MPS with the smaller of the
1984                         * device MPSS or the bridge MPS (which is assumed to be
1985                         * properly configured at this point to the largest
1986                         * allowable MPS based on its parent bus).
1987                         */
1988                        mps = min(mps, pcie_get_mps(dev->bus->self));
1989        }
1990
1991        rc = pcie_set_mps(dev, mps);
1992        if (rc)
1993                dev_err(&dev->dev, "Failed attempting to set the MPS\n");
1994}
1995
1996static void pcie_write_mrrs(struct pci_dev *dev)
1997{
1998        int rc, mrrs;
1999
2000        /* In the "safe" case, do not configure the MRRS.  There appear to be
2001         * issues with setting MRRS to 0 on a number of devices.
2002         */
2003        if (pcie_bus_config != PCIE_BUS_PERFORMANCE)
2004                return;
2005
2006        /* For Max performance, the MRRS must be set to the largest supported
2007         * value.  However, it cannot be configured larger than the MPS the
2008         * device or the bus can support.  This should already be properly
2009         * configured by a prior call to pcie_write_mps.
2010         */
2011        mrrs = pcie_get_mps(dev);
2012
2013        /* MRRS is a R/W register.  Invalid values can be written, but a
2014         * subsequent read will verify if the value is acceptable or not.
2015         * If the MRRS value provided is not acceptable (e.g., too large),
2016         * shrink the value until it is acceptable to the HW.
2017         */
2018        while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) {
2019                rc = pcie_set_readrq(dev, mrrs);
2020                if (!rc)
2021                        break;
2022
2023                dev_warn(&dev->dev, "Failed attempting to set the MRRS\n");
2024                mrrs /= 2;
2025        }
2026
2027        if (mrrs < 128)
2028                dev_err(&dev->dev, "MRRS was unable to be configured with a safe value.  If problems are experienced, try running with pci=pcie_bus_safe\n");
2029}
2030
2031static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
2032{
2033        int mps, orig_mps;
2034
2035        if (!pci_is_pcie(dev))
2036                return 0;
2037
2038        if (pcie_bus_config == PCIE_BUS_TUNE_OFF ||
2039            pcie_bus_config == PCIE_BUS_DEFAULT)
2040                return 0;
2041
2042        mps = 128 << *(u8 *)data;
2043        orig_mps = pcie_get_mps(dev);
2044
2045        pcie_write_mps(dev, mps);
2046        pcie_write_mrrs(dev);
2047
2048        dev_info(&dev->dev, "Max Payload Size set to %4d/%4d (was %4d), Max Read Rq %4d\n",
2049                 pcie_get_mps(dev), 128 << dev->pcie_mpss,
2050                 orig_mps, pcie_get_readrq(dev));
2051
2052        return 0;
2053}
2054
2055/* pcie_bus_configure_settings requires that pci_walk_bus work in a top-down,
2056 * parents then children fashion.  If this changes, then this code will not
2057 * work as designed.
2058 */
2059void pcie_bus_configure_settings(struct pci_bus *bus)
2060{
2061        u8 smpss = 0;
2062
2063        if (!bus->self)
2064                return;
2065
2066        if (!pci_is_pcie(bus->self))
2067                return;
2068
2069        /* FIXME - Peer to peer DMA is possible, though the endpoint would need
2070         * to be aware of the MPS of the destination.  To work around this,
2071         * simply force the MPS of the entire system to the smallest possible.
2072         */
2073        if (pcie_bus_config == PCIE_BUS_PEER2PEER)
2074                smpss = 0;
2075
2076        if (pcie_bus_config == PCIE_BUS_SAFE) {
2077                smpss = bus->self->pcie_mpss;
2078
2079                pcie_find_smpss(bus->self, &smpss);
2080                pci_walk_bus(bus, pcie_find_smpss, &smpss);
2081        }
2082
2083        pcie_bus_configure_set(bus->self, &smpss);
2084        pci_walk_bus(bus, pcie_bus_configure_set, &smpss);
2085}
2086EXPORT_SYMBOL_GPL(pcie_bus_configure_settings);
2087
2088unsigned int pci_scan_child_bus(struct pci_bus *bus)
2089{
2090        unsigned int devfn, pass, max = bus->busn_res.start;
2091        struct pci_dev *dev;
2092
2093        dev_dbg(&bus->dev, "scanning bus\n");
2094
2095        /* Go find them, Rover! */
2096        for (devfn = 0; devfn < 0x100; devfn += 8)
2097                pci_scan_slot(bus, devfn);
2098
2099        /* Reserve buses for SR-IOV capability. */
2100        max += pci_iov_bus_range(bus);
2101
2102        /*
2103         * After performing arch-dependent fixup of the bus, look behind
2104         * all PCI-to-PCI bridges on this bus.
2105         */
2106        if (!bus->is_added) {
2107                dev_dbg(&bus->dev, "fixups for bus\n");
2108                pcibios_fixup_bus(bus);
2109                bus->is_added = 1;
2110        }
2111
2112        for (pass = 0; pass < 2; pass++)
2113                list_for_each_entry(dev, &bus->devices, bus_list) {
2114                        if (pci_is_bridge(dev))
2115                                max = pci_scan_bridge(bus, dev, max, pass);
2116                }
2117
2118        /*
2119         * Make sure a hotplug bridge has at least the minimum requested
2120         * number of buses.
2121         */
2122        if (bus->self && bus->self->is_hotplug_bridge && pci_hotplug_bus_size) {
2123                if (max - bus->busn_res.start < pci_hotplug_bus_size - 1)
2124                        max = bus->busn_res.start + pci_hotplug_bus_size - 1;
2125        }
2126
2127        /*
2128         * We've scanned the bus and so we know all about what's on
2129         * the other side of any bridges that may be on this bus plus
2130         * any devices.
2131         *
2132         * Return how far we've got finding sub-buses.
2133         */
2134        dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max);
2135        return max;
2136}
2137EXPORT_SYMBOL_GPL(pci_scan_child_bus);
2138
2139/**
2140 * pcibios_root_bridge_prepare - Platform-specific host bridge setup.
2141 * @bridge: Host bridge to set up.
2142 *
2143 * Default empty implementation.  Replace with an architecture-specific setup
2144 * routine, if necessary.
2145 */
2146int __weak pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
2147{
2148        return 0;
2149}
2150
2151void __weak pcibios_add_bus(struct pci_bus *bus)
2152{
2153}
2154
2155void __weak pcibios_remove_bus(struct pci_bus *bus)
2156{
2157}
2158
2159struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
2160                struct pci_ops *ops, void *sysdata, struct list_head *resources)
2161{
2162        int error;
2163        struct pci_host_bridge *bridge;
2164        struct pci_bus *b, *b2;
2165        struct resource_entry *window, *n;
2166        struct resource *res;
2167        resource_size_t offset;
2168        char bus_addr[64];
2169        char *fmt;
2170
2171        b = pci_alloc_bus(NULL);
2172        if (!b)
2173                return NULL;
2174
2175        b->sysdata = sysdata;
2176        b->ops = ops;
2177        b->number = b->busn_res.start = bus;
2178#ifdef CONFIG_PCI_DOMAINS_GENERIC
2179        b->domain_nr = pci_bus_find_domain_nr(b, parent);
2180#endif
2181        b2 = pci_find_bus(pci_domain_nr(b), bus);
2182        if (b2) {
2183                /* If we already got to this bus through a different bridge, ignore it */
2184                dev_dbg(&b2->dev, "bus already known\n");
2185                goto err_out;
2186        }
2187
2188        bridge = pci_alloc_host_bridge(b);
2189        if (!bridge)
2190                goto err_out;
2191
2192        bridge->dev.parent = parent;
2193        bridge->dev.release = pci_release_host_bridge_dev;
2194        dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(b), bus);
2195        error = pcibios_root_bridge_prepare(bridge);
2196        if (error) {
2197                kfree(bridge);
2198                goto err_out;
2199        }
2200
2201        error = device_register(&bridge->dev);
2202        if (error) {
2203                put_device(&bridge->dev);
2204                goto err_out;
2205        }
2206        b->bridge = get_device(&bridge->dev);
2207        device_enable_async_suspend(b->bridge);
2208        pci_set_bus_of_node(b);
2209        pci_set_bus_msi_domain(b);
2210
2211        if (!parent)
2212                set_dev_node(b->bridge, pcibus_to_node(b));
2213
2214        b->dev.class = &pcibus_class;
2215        b->dev.parent = b->bridge;
2216        dev_set_name(&b->dev, "%04x:%02x", pci_domain_nr(b), bus);
2217        error = device_register(&b->dev);
2218        if (error)
2219                goto class_dev_reg_err;
2220
2221        pcibios_add_bus(b);
2222
2223        /* Create legacy_io and legacy_mem files for this bus */
2224        pci_create_legacy_files(b);
2225
2226        if (parent)
2227                dev_info(parent, "PCI host bridge to bus %s\n", dev_name(&b->dev));
2228        else
2229                printk(KERN_INFO "PCI host bridge to bus %s\n", dev_name(&b->dev));
2230
2231        /* Add initial resources to the bus */
2232        resource_list_for_each_entry_safe(window, n, resources) {
2233                list_move_tail(&window->node, &bridge->windows);
2234                res = window->res;
2235                offset = window->offset;
2236                if (res->flags & IORESOURCE_BUS)
2237                        pci_bus_insert_busn_res(b, bus, res->end);
2238                else
2239                        pci_bus_add_resource(b, res, 0);
2240                if (offset) {
2241                        if (resource_type(res) == IORESOURCE_IO)
2242                                fmt = " (bus address [%#06llx-%#06llx])";
2243                        else
2244                                fmt = " (bus address [%#010llx-%#010llx])";
2245                        snprintf(bus_addr, sizeof(bus_addr), fmt,
2246                                 (unsigned long long) (res->start - offset),
2247                                 (unsigned long long) (res->end - offset));
2248                } else
2249                        bus_addr[0] = '\0';
2250                dev_info(&b->dev, "root bus resource %pR%s\n", res, bus_addr);
2251        }
2252
2253        down_write(&pci_bus_sem);
2254        list_add_tail(&b->node, &pci_root_buses);
2255        up_write(&pci_bus_sem);
2256
2257        return b;
2258
2259class_dev_reg_err:
2260        put_device(&bridge->dev);
2261        device_unregister(&bridge->dev);
2262err_out:
2263        kfree(b);
2264        return NULL;
2265}
2266EXPORT_SYMBOL_GPL(pci_create_root_bus);
2267
2268int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max)
2269{
2270        struct resource *res = &b->busn_res;
2271        struct resource *parent_res, *conflict;
2272
2273        res->start = bus;
2274        res->end = bus_max;
2275        res->flags = IORESOURCE_BUS;
2276
2277        if (!pci_is_root_bus(b))
2278                parent_res = &b->parent->busn_res;
2279        else {
2280                parent_res = get_pci_domain_busn_res(pci_domain_nr(b));
2281                res->flags |= IORESOURCE_PCI_FIXED;
2282        }
2283
2284        conflict = request_resource_conflict(parent_res, res);
2285
2286        if (conflict)
2287                dev_printk(KERN_DEBUG, &b->dev,
2288                           "busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n",
2289                            res, pci_is_root_bus(b) ? "domain " : "",
2290                            parent_res, conflict->name, conflict);
2291
2292        return conflict == NULL;
2293}
2294
2295int pci_bus_update_busn_res_end(struct pci_bus *b, int bus_max)
2296{
2297        struct resource *res = &b->busn_res;
2298        struct resource old_res = *res;
2299        resource_size_t size;
2300        int ret;
2301
2302        if (res->start > bus_max)
2303                return -EINVAL;
2304
2305        size = bus_max - res->start + 1;
2306        ret = adjust_resource(res, res->start, size);
2307        dev_printk(KERN_DEBUG, &b->dev,
2308                        "busn_res: %pR end %s updated to %02x\n",
2309                        &old_res, ret ? "can not be" : "is", bus_max);
2310
2311        if (!ret && !res->parent)
2312                pci_bus_insert_busn_res(b, res->start, res->end);
2313
2314        return ret;
2315}
2316
2317void pci_bus_release_busn_res(struct pci_bus *b)
2318{
2319        struct resource *res = &b->busn_res;
2320        int ret;
2321
2322        if (!res->flags || !res->parent)
2323                return;
2324
2325        ret = release_resource(res);
2326        dev_printk(KERN_DEBUG, &b->dev,
2327                        "busn_res: %pR %s released\n",
2328                        res, ret ? "can not be" : "is");
2329}
2330
2331struct pci_bus *pci_scan_root_bus_msi(struct device *parent, int bus,
2332                struct pci_ops *ops, void *sysdata,
2333                struct list_head *resources, struct msi_controller *msi)
2334{
2335        struct resource_entry *window;
2336        bool found = false;
2337        struct pci_bus *b;
2338        int max;
2339
2340        resource_list_for_each_entry(window, resources)
2341                if (window->res->flags & IORESOURCE_BUS) {
2342                        found = true;
2343                        break;
2344                }
2345
2346        b = pci_create_root_bus(parent, bus, ops, sysdata, resources);
2347        if (!b)
2348                return NULL;
2349
2350        b->msi = msi;
2351
2352        if (!found) {
2353                dev_info(&b->dev,
2354                 "No busn resource found for root bus, will use [bus %02x-ff]\n",
2355                        bus);
2356                pci_bus_insert_busn_res(b, bus, 255);
2357        }
2358
2359        max = pci_scan_child_bus(b);
2360
2361        if (!found)
2362                pci_bus_update_busn_res_end(b, max);
2363
2364        return b;
2365}
2366
2367struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
2368                struct pci_ops *ops, void *sysdata, struct list_head *resources)
2369{
2370        return pci_scan_root_bus_msi(parent, bus, ops, sysdata, resources,
2371                                     NULL);
2372}
2373EXPORT_SYMBOL(pci_scan_root_bus);
2374
2375struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops,
2376                                        void *sysdata)
2377{
2378        LIST_HEAD(resources);
2379        struct pci_bus *b;
2380
2381        pci_add_resource(&resources, &ioport_resource);
2382        pci_add_resource(&resources, &iomem_resource);
2383        pci_add_resource(&resources, &busn_resource);
2384        b = pci_create_root_bus(NULL, bus, ops, sysdata, &resources);
2385        if (b) {
2386                pci_scan_child_bus(b);
2387        } else {
2388                pci_free_resource_list(&resources);
2389        }
2390        return b;
2391}
2392EXPORT_SYMBOL(pci_scan_bus);
2393
2394/**
2395 * pci_rescan_bus_bridge_resize - scan a PCI bus for devices.
2396 * @bridge: PCI bridge for the bus to scan
2397 *
2398 * Scan a PCI bus and child buses for new devices, add them,
2399 * and enable them, resizing bridge mmio/io resource if necessary
2400 * and possible.  The caller must ensure the child devices are already
2401 * removed for resizing to occur.
2402 *
2403 * Returns the max number of subordinate bus discovered.
2404 */
2405unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge)
2406{
2407        unsigned int max;
2408        struct pci_bus *bus = bridge->subordinate;
2409
2410        max = pci_scan_child_bus(bus);
2411
2412        pci_assign_unassigned_bridge_resources(bridge);
2413
2414        pci_bus_add_devices(bus);
2415
2416        return max;
2417}
2418
2419/**
2420 * pci_rescan_bus - scan a PCI bus for devices.
2421 * @bus: PCI bus to scan
2422 *
2423 * Scan a PCI bus and child buses for new devices, adds them,
2424 * and enables them.
2425 *
2426 * Returns the max number of subordinate bus discovered.
2427 */
2428unsigned int pci_rescan_bus(struct pci_bus *bus)
2429{
2430        unsigned int max;
2431
2432        max = pci_scan_child_bus(bus);
2433        pci_assign_unassigned_bus_resources(bus);
2434        pci_bus_add_devices(bus);
2435
2436        return max;
2437}
2438EXPORT_SYMBOL_GPL(pci_rescan_bus);
2439
2440/*
2441 * pci_rescan_bus(), pci_rescan_bus_bridge_resize() and PCI device removal
2442 * routines should always be executed under this mutex.
2443 */
2444static DEFINE_MUTEX(pci_rescan_remove_lock);
2445
2446void pci_lock_rescan_remove(void)
2447{
2448        mutex_lock(&pci_rescan_remove_lock);
2449}
2450EXPORT_SYMBOL_GPL(pci_lock_rescan_remove);
2451
2452void pci_unlock_rescan_remove(void)
2453{
2454        mutex_unlock(&pci_rescan_remove_lock);
2455}
2456EXPORT_SYMBOL_GPL(pci_unlock_rescan_remove);
2457
2458static int __init pci_sort_bf_cmp(const struct device *d_a,
2459                                  const struct device *d_b)
2460{
2461        const struct pci_dev *a = to_pci_dev(d_a);
2462        const struct pci_dev *b = to_pci_dev(d_b);
2463
2464        if      (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1;
2465        else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return  1;
2466
2467        if      (a->bus->number < b->bus->number) return -1;
2468        else if (a->bus->number > b->bus->number) return  1;
2469
2470        if      (a->devfn < b->devfn) return -1;
2471        else if (a->devfn > b->devfn) return  1;
2472
2473        return 0;
2474}
2475
2476void __init pci_sort_breadthfirst(void)
2477{
2478        bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp);
2479}
2480