linux/arch/x86/pci/mmconfig-shared.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * mmconfig-shared.c - Low-level direct PCI config space access via
   4 *                     MMCONFIG - common code between i386 and x86-64.
   5 *
   6 * This code does:
   7 * - known chipset handling
   8 * - ACPI decoding and validation
   9 *
  10 * Per-architecture code takes care of the mappings and accesses
  11 * themselves.
  12 */
  13
  14#include <linux/acpi.h>
  15#include <linux/pci.h>
  16#include <linux/init.h>
  17#include <linux/bitmap.h>
  18#include <linux/dmi.h>
  19#include <linux/slab.h>
  20#include <linux/mutex.h>
  21#include <linux/rculist.h>
  22#include <asm/e820/api.h>
  23#include <asm/pci_x86.h>
  24#include <asm/acpi.h>
  25
  26#define PREFIX "PCI: "
  27
  28/* Indicate if the mmcfg resources have been placed into the resource table. */
  29static bool pci_mmcfg_running_state;
  30static bool pci_mmcfg_arch_init_failed;
  31static DEFINE_MUTEX(pci_mmcfg_lock);
  32#define pci_mmcfg_lock_held() lock_is_held(&(pci_mmcfg_lock).dep_map)
  33
  34LIST_HEAD(pci_mmcfg_list);
  35
  36static void __init pci_mmconfig_remove(struct pci_mmcfg_region *cfg)
  37{
  38        if (cfg->res.parent)
  39                release_resource(&cfg->res);
  40        list_del(&cfg->list);
  41        kfree(cfg);
  42}
  43
  44static void __init free_all_mmcfg(void)
  45{
  46        struct pci_mmcfg_region *cfg, *tmp;
  47
  48        pci_mmcfg_arch_free();
  49        list_for_each_entry_safe(cfg, tmp, &pci_mmcfg_list, list)
  50                pci_mmconfig_remove(cfg);
  51}
  52
  53static void list_add_sorted(struct pci_mmcfg_region *new)
  54{
  55        struct pci_mmcfg_region *cfg;
  56
  57        /* keep list sorted by segment and starting bus number */
  58        list_for_each_entry_rcu(cfg, &pci_mmcfg_list, list, pci_mmcfg_lock_held()) {
  59                if (cfg->segment > new->segment ||
  60                    (cfg->segment == new->segment &&
  61                     cfg->start_bus >= new->start_bus)) {
  62                        list_add_tail_rcu(&new->list, &cfg->list);
  63                        return;
  64                }
  65        }
  66        list_add_tail_rcu(&new->list, &pci_mmcfg_list);
  67}
  68
  69static struct pci_mmcfg_region *pci_mmconfig_alloc(int segment, int start,
  70                                                   int end, u64 addr)
  71{
  72        struct pci_mmcfg_region *new;
  73        struct resource *res;
  74
  75        if (addr == 0)
  76                return NULL;
  77
  78        new = kzalloc(sizeof(*new), GFP_KERNEL);
  79        if (!new)
  80                return NULL;
  81
  82        new->address = addr;
  83        new->segment = segment;
  84        new->start_bus = start;
  85        new->end_bus = end;
  86
  87        res = &new->res;
  88        res->start = addr + PCI_MMCFG_BUS_OFFSET(start);
  89        res->end = addr + PCI_MMCFG_BUS_OFFSET(end + 1) - 1;
  90        res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
  91        snprintf(new->name, PCI_MMCFG_RESOURCE_NAME_LEN,
  92                 "PCI MMCONFIG %04x [bus %02x-%02x]", segment, start, end);
  93        res->name = new->name;
  94
  95        return new;
  96}
  97
  98struct pci_mmcfg_region *__init pci_mmconfig_add(int segment, int start,
  99                                                 int end, u64 addr)
 100{
 101        struct pci_mmcfg_region *new;
 102
 103        new = pci_mmconfig_alloc(segment, start, end, addr);
 104        if (new) {
 105                mutex_lock(&pci_mmcfg_lock);
 106                list_add_sorted(new);
 107                mutex_unlock(&pci_mmcfg_lock);
 108
 109                pr_info(PREFIX
 110                       "MMCONFIG for domain %04x [bus %02x-%02x] at %pR "
 111                       "(base %#lx)\n",
 112                       segment, start, end, &new->res, (unsigned long)addr);
 113        }
 114
 115        return new;
 116}
 117
 118struct pci_mmcfg_region *pci_mmconfig_lookup(int segment, int bus)
 119{
 120        struct pci_mmcfg_region *cfg;
 121
 122        list_for_each_entry_rcu(cfg, &pci_mmcfg_list, list, pci_mmcfg_lock_held())
 123                if (cfg->segment == segment &&
 124                    cfg->start_bus <= bus && bus <= cfg->end_bus)
 125                        return cfg;
 126
 127        return NULL;
 128}
 129
 130static const char *__init pci_mmcfg_e7520(void)
 131{
 132        u32 win;
 133        raw_pci_ops->read(0, 0, PCI_DEVFN(0, 0), 0xce, 2, &win);
 134
 135        win = win & 0xf000;
 136        if (win == 0x0000 || win == 0xf000)
 137                return NULL;
 138
 139        if (pci_mmconfig_add(0, 0, 255, win << 16) == NULL)
 140                return NULL;
 141
 142        return "Intel Corporation E7520 Memory Controller Hub";
 143}
 144
 145static const char *__init pci_mmcfg_intel_945(void)
 146{
 147        u32 pciexbar, mask = 0, len = 0;
 148
 149        raw_pci_ops->read(0, 0, PCI_DEVFN(0, 0), 0x48, 4, &pciexbar);
 150
 151        /* Enable bit */
 152        if (!(pciexbar & 1))
 153                return NULL;
 154
 155        /* Size bits */
 156        switch ((pciexbar >> 1) & 3) {
 157        case 0:
 158                mask = 0xf0000000U;
 159                len  = 0x10000000U;
 160                break;
 161        case 1:
 162                mask = 0xf8000000U;
 163                len  = 0x08000000U;
 164                break;
 165        case 2:
 166                mask = 0xfc000000U;
 167                len  = 0x04000000U;
 168                break;
 169        default:
 170                return NULL;
 171        }
 172
 173        /* Errata #2, things break when not aligned on a 256Mb boundary */
 174        /* Can only happen in 64M/128M mode */
 175
 176        if ((pciexbar & mask) & 0x0fffffffU)
 177                return NULL;
 178
 179        /* Don't hit the APIC registers and their friends */
 180        if ((pciexbar & mask) >= 0xf0000000U)
 181                return NULL;
 182
 183        if (pci_mmconfig_add(0, 0, (len >> 20) - 1, pciexbar & mask) == NULL)
 184                return NULL;
 185
 186        return "Intel Corporation 945G/GZ/P/PL Express Memory Controller Hub";
 187}
 188
 189static const char *__init pci_mmcfg_amd_fam10h(void)
 190{
 191        u32 low, high, address;
 192        u64 base, msr;
 193        int i;
 194        unsigned segnbits = 0, busnbits, end_bus;
 195
 196        if (!(pci_probe & PCI_CHECK_ENABLE_AMD_MMCONF))
 197                return NULL;
 198
 199        address = MSR_FAM10H_MMIO_CONF_BASE;
 200        if (rdmsr_safe(address, &low, &high))
 201                return NULL;
 202
 203        msr = high;
 204        msr <<= 32;
 205        msr |= low;
 206
 207        /* mmconfig is not enable */
 208        if (!(msr & FAM10H_MMIO_CONF_ENABLE))
 209                return NULL;
 210
 211        base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
 212
 213        busnbits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
 214                         FAM10H_MMIO_CONF_BUSRANGE_MASK;
 215
 216        /*
 217         * only handle bus 0 ?
 218         * need to skip it
 219         */
 220        if (!busnbits)
 221                return NULL;
 222
 223        if (busnbits > 8) {
 224                segnbits = busnbits - 8;
 225                busnbits = 8;
 226        }
 227
 228        end_bus = (1 << busnbits) - 1;
 229        for (i = 0; i < (1 << segnbits); i++)
 230                if (pci_mmconfig_add(i, 0, end_bus,
 231                                     base + (1<<28) * i) == NULL) {
 232                        free_all_mmcfg();
 233                        return NULL;
 234                }
 235
 236        return "AMD Family 10h NB";
 237}
 238
 239static bool __initdata mcp55_checked;
 240static const char *__init pci_mmcfg_nvidia_mcp55(void)
 241{
 242        int bus;
 243        int mcp55_mmconf_found = 0;
 244
 245        static const u32 extcfg_regnum __initconst      = 0x90;
 246        static const u32 extcfg_regsize __initconst     = 4;
 247        static const u32 extcfg_enable_mask __initconst = 1 << 31;
 248        static const u32 extcfg_start_mask __initconst  = 0xff << 16;
 249        static const int extcfg_start_shift __initconst = 16;
 250        static const u32 extcfg_size_mask __initconst   = 0x3 << 28;
 251        static const int extcfg_size_shift __initconst  = 28;
 252        static const int extcfg_sizebus[] __initconst   = {
 253                0x100, 0x80, 0x40, 0x20
 254        };
 255        static const u32 extcfg_base_mask[] __initconst = {
 256                0x7ff8, 0x7ffc, 0x7ffe, 0x7fff
 257        };
 258        static const int extcfg_base_lshift __initconst = 25;
 259
 260        /*
 261         * do check if amd fam10h already took over
 262         */
 263        if (!acpi_disabled || !list_empty(&pci_mmcfg_list) || mcp55_checked)
 264                return NULL;
 265
 266        mcp55_checked = true;
 267        for (bus = 0; bus < 256; bus++) {
 268                u64 base;
 269                u32 l, extcfg;
 270                u16 vendor, device;
 271                int start, size_index, end;
 272
 273                raw_pci_ops->read(0, bus, PCI_DEVFN(0, 0), 0, 4, &l);
 274                vendor = l & 0xffff;
 275                device = (l >> 16) & 0xffff;
 276
 277                if (PCI_VENDOR_ID_NVIDIA != vendor || 0x0369 != device)
 278                        continue;
 279
 280                raw_pci_ops->read(0, bus, PCI_DEVFN(0, 0), extcfg_regnum,
 281                                  extcfg_regsize, &extcfg);
 282
 283                if (!(extcfg & extcfg_enable_mask))
 284                        continue;
 285
 286                size_index = (extcfg & extcfg_size_mask) >> extcfg_size_shift;
 287                base = extcfg & extcfg_base_mask[size_index];
 288                /* base could > 4G */
 289                base <<= extcfg_base_lshift;
 290                start = (extcfg & extcfg_start_mask) >> extcfg_start_shift;
 291                end = start + extcfg_sizebus[size_index] - 1;
 292                if (pci_mmconfig_add(0, start, end, base) == NULL)
 293                        continue;
 294                mcp55_mmconf_found++;
 295        }
 296
 297        if (!mcp55_mmconf_found)
 298                return NULL;
 299
 300        return "nVidia MCP55";
 301}
 302
 303struct pci_mmcfg_hostbridge_probe {
 304        u32 bus;
 305        u32 devfn;
 306        u32 vendor;
 307        u32 device;
 308        const char *(*probe)(void);
 309};
 310
 311static const struct pci_mmcfg_hostbridge_probe pci_mmcfg_probes[] __initconst = {
 312        { 0, PCI_DEVFN(0, 0), PCI_VENDOR_ID_INTEL,
 313          PCI_DEVICE_ID_INTEL_E7520_MCH, pci_mmcfg_e7520 },
 314        { 0, PCI_DEVFN(0, 0), PCI_VENDOR_ID_INTEL,
 315          PCI_DEVICE_ID_INTEL_82945G_HB, pci_mmcfg_intel_945 },
 316        { 0, PCI_DEVFN(0x18, 0), PCI_VENDOR_ID_AMD,
 317          0x1200, pci_mmcfg_amd_fam10h },
 318        { 0xff, PCI_DEVFN(0, 0), PCI_VENDOR_ID_AMD,
 319          0x1200, pci_mmcfg_amd_fam10h },
 320        { 0, PCI_DEVFN(0, 0), PCI_VENDOR_ID_NVIDIA,
 321          0x0369, pci_mmcfg_nvidia_mcp55 },
 322};
 323
 324static void __init pci_mmcfg_check_end_bus_number(void)
 325{
 326        struct pci_mmcfg_region *cfg, *cfgx;
 327
 328        /* Fixup overlaps */
 329        list_for_each_entry(cfg, &pci_mmcfg_list, list) {
 330                if (cfg->end_bus < cfg->start_bus)
 331                        cfg->end_bus = 255;
 332
 333                /* Don't access the list head ! */
 334                if (cfg->list.next == &pci_mmcfg_list)
 335                        break;
 336
 337                cfgx = list_entry(cfg->list.next, typeof(*cfg), list);
 338                if (cfg->end_bus >= cfgx->start_bus)
 339                        cfg->end_bus = cfgx->start_bus - 1;
 340        }
 341}
 342
 343static int __init pci_mmcfg_check_hostbridge(void)
 344{
 345        u32 l;
 346        u32 bus, devfn;
 347        u16 vendor, device;
 348        int i;
 349        const char *name;
 350
 351        if (!raw_pci_ops)
 352                return 0;
 353
 354        free_all_mmcfg();
 355
 356        for (i = 0; i < ARRAY_SIZE(pci_mmcfg_probes); i++) {
 357                bus =  pci_mmcfg_probes[i].bus;
 358                devfn = pci_mmcfg_probes[i].devfn;
 359                raw_pci_ops->read(0, bus, devfn, 0, 4, &l);
 360                vendor = l & 0xffff;
 361                device = (l >> 16) & 0xffff;
 362
 363                name = NULL;
 364                if (pci_mmcfg_probes[i].vendor == vendor &&
 365                    pci_mmcfg_probes[i].device == device)
 366                        name = pci_mmcfg_probes[i].probe();
 367
 368                if (name)
 369                        pr_info(PREFIX "%s with MMCONFIG support\n", name);
 370        }
 371
 372        /* some end_bus_number is crazy, fix it */
 373        pci_mmcfg_check_end_bus_number();
 374
 375        return !list_empty(&pci_mmcfg_list);
 376}
 377
 378static acpi_status check_mcfg_resource(struct acpi_resource *res, void *data)
 379{
 380        struct resource *mcfg_res = data;
 381        struct acpi_resource_address64 address;
 382        acpi_status status;
 383
 384        if (res->type == ACPI_RESOURCE_TYPE_FIXED_MEMORY32) {
 385                struct acpi_resource_fixed_memory32 *fixmem32 =
 386                        &res->data.fixed_memory32;
 387                if (!fixmem32)
 388                        return AE_OK;
 389                if ((mcfg_res->start >= fixmem32->address) &&
 390                    (mcfg_res->end < (fixmem32->address +
 391                                      fixmem32->address_length))) {
 392                        mcfg_res->flags = 1;
 393                        return AE_CTRL_TERMINATE;
 394                }
 395        }
 396        if ((res->type != ACPI_RESOURCE_TYPE_ADDRESS32) &&
 397            (res->type != ACPI_RESOURCE_TYPE_ADDRESS64))
 398                return AE_OK;
 399
 400        status = acpi_resource_to_address64(res, &address);
 401        if (ACPI_FAILURE(status) ||
 402           (address.address.address_length <= 0) ||
 403           (address.resource_type != ACPI_MEMORY_RANGE))
 404                return AE_OK;
 405
 406        if ((mcfg_res->start >= address.address.minimum) &&
 407            (mcfg_res->end < (address.address.minimum + address.address.address_length))) {
 408                mcfg_res->flags = 1;
 409                return AE_CTRL_TERMINATE;
 410        }
 411        return AE_OK;
 412}
 413
 414static acpi_status find_mboard_resource(acpi_handle handle, u32 lvl,
 415                                        void *context, void **rv)
 416{
 417        struct resource *mcfg_res = context;
 418
 419        acpi_walk_resources(handle, METHOD_NAME__CRS,
 420                            check_mcfg_resource, context);
 421
 422        if (mcfg_res->flags)
 423                return AE_CTRL_TERMINATE;
 424
 425        return AE_OK;
 426}
 427
 428static bool is_acpi_reserved(u64 start, u64 end, enum e820_type not_used)
 429{
 430        struct resource mcfg_res;
 431
 432        mcfg_res.start = start;
 433        mcfg_res.end = end - 1;
 434        mcfg_res.flags = 0;
 435
 436        acpi_get_devices("PNP0C01", find_mboard_resource, &mcfg_res, NULL);
 437
 438        if (!mcfg_res.flags)
 439                acpi_get_devices("PNP0C02", find_mboard_resource, &mcfg_res,
 440                                 NULL);
 441
 442        return mcfg_res.flags;
 443}
 444
 445typedef bool (*check_reserved_t)(u64 start, u64 end, enum e820_type type);
 446
 447static bool __ref is_mmconf_reserved(check_reserved_t is_reserved,
 448                                     struct pci_mmcfg_region *cfg,
 449                                     struct device *dev, int with_e820)
 450{
 451        u64 addr = cfg->res.start;
 452        u64 size = resource_size(&cfg->res);
 453        u64 old_size = size;
 454        int num_buses;
 455        char *method = with_e820 ? "E820" : "ACPI motherboard resources";
 456
 457        while (!is_reserved(addr, addr + size, E820_TYPE_RESERVED)) {
 458                size >>= 1;
 459                if (size < (16UL<<20))
 460                        break;
 461        }
 462
 463        if (size < (16UL<<20) && size != old_size)
 464                return false;
 465
 466        if (dev)
 467                dev_info(dev, "MMCONFIG at %pR reserved in %s\n",
 468                         &cfg->res, method);
 469        else
 470                pr_info(PREFIX "MMCONFIG at %pR reserved in %s\n",
 471                       &cfg->res, method);
 472
 473        if (old_size != size) {
 474                /* update end_bus */
 475                cfg->end_bus = cfg->start_bus + ((size>>20) - 1);
 476                num_buses = cfg->end_bus - cfg->start_bus + 1;
 477                cfg->res.end = cfg->res.start +
 478                    PCI_MMCFG_BUS_OFFSET(num_buses) - 1;
 479                snprintf(cfg->name, PCI_MMCFG_RESOURCE_NAME_LEN,
 480                         "PCI MMCONFIG %04x [bus %02x-%02x]",
 481                         cfg->segment, cfg->start_bus, cfg->end_bus);
 482
 483                if (dev)
 484                        dev_info(dev,
 485                                "MMCONFIG "
 486                                "at %pR (base %#lx) (size reduced!)\n",
 487                                &cfg->res, (unsigned long) cfg->address);
 488                else
 489                        pr_info(PREFIX
 490                                "MMCONFIG for %04x [bus%02x-%02x] "
 491                                "at %pR (base %#lx) (size reduced!)\n",
 492                                cfg->segment, cfg->start_bus, cfg->end_bus,
 493                                &cfg->res, (unsigned long) cfg->address);
 494        }
 495
 496        return true;
 497}
 498
 499static bool __ref
 500pci_mmcfg_check_reserved(struct device *dev, struct pci_mmcfg_region *cfg, int early)
 501{
 502        if (!early && !acpi_disabled) {
 503                if (is_mmconf_reserved(is_acpi_reserved, cfg, dev, 0))
 504                        return true;
 505
 506                if (dev)
 507                        dev_info(dev, FW_INFO
 508                                 "MMCONFIG at %pR not reserved in "
 509                                 "ACPI motherboard resources\n",
 510                                 &cfg->res);
 511                else
 512                        pr_info(FW_INFO PREFIX
 513                               "MMCONFIG at %pR not reserved in "
 514                               "ACPI motherboard resources\n",
 515                               &cfg->res);
 516        }
 517
 518        /*
 519         * e820__mapped_all() is marked as __init.
 520         * All entries from ACPI MCFG table have been checked at boot time.
 521         * For MCFG information constructed from hotpluggable host bridge's
 522         * _CBA method, just assume it's reserved.
 523         */
 524        if (pci_mmcfg_running_state)
 525                return true;
 526
 527        /* Don't try to do this check unless configuration
 528           type 1 is available. how about type 2 ?*/
 529        if (raw_pci_ops)
 530                return is_mmconf_reserved(e820__mapped_all, cfg, dev, 1);
 531
 532        return false;
 533}
 534
 535static void __init pci_mmcfg_reject_broken(int early)
 536{
 537        struct pci_mmcfg_region *cfg;
 538
 539        list_for_each_entry(cfg, &pci_mmcfg_list, list) {
 540                if (pci_mmcfg_check_reserved(NULL, cfg, early) == 0) {
 541                        pr_info(PREFIX "not using MMCONFIG\n");
 542                        free_all_mmcfg();
 543                        return;
 544                }
 545        }
 546}
 547
 548static int __init acpi_mcfg_check_entry(struct acpi_table_mcfg *mcfg,
 549                                        struct acpi_mcfg_allocation *cfg)
 550{
 551        if (cfg->address < 0xFFFFFFFF)
 552                return 0;
 553
 554        if (!strncmp(mcfg->header.oem_id, "SGI", 3))
 555                return 0;
 556
 557        if ((mcfg->header.revision >= 1) && (dmi_get_bios_year() >= 2010))
 558                return 0;
 559
 560        pr_err(PREFIX "MCFG region for %04x [bus %02x-%02x] at %#llx "
 561               "is above 4GB, ignored\n", cfg->pci_segment,
 562               cfg->start_bus_number, cfg->end_bus_number, cfg->address);
 563        return -EINVAL;
 564}
 565
 566static int __init pci_parse_mcfg(struct acpi_table_header *header)
 567{
 568        struct acpi_table_mcfg *mcfg;
 569        struct acpi_mcfg_allocation *cfg_table, *cfg;
 570        unsigned long i;
 571        int entries;
 572
 573        if (!header)
 574                return -EINVAL;
 575
 576        mcfg = (struct acpi_table_mcfg *)header;
 577
 578        /* how many config structures do we have */
 579        free_all_mmcfg();
 580        entries = 0;
 581        i = header->length - sizeof(struct acpi_table_mcfg);
 582        while (i >= sizeof(struct acpi_mcfg_allocation)) {
 583                entries++;
 584                i -= sizeof(struct acpi_mcfg_allocation);
 585        }
 586        if (entries == 0) {
 587                pr_err(PREFIX "MMCONFIG has no entries\n");
 588                return -ENODEV;
 589        }
 590
 591        cfg_table = (struct acpi_mcfg_allocation *) &mcfg[1];
 592        for (i = 0; i < entries; i++) {
 593                cfg = &cfg_table[i];
 594                if (acpi_mcfg_check_entry(mcfg, cfg)) {
 595                        free_all_mmcfg();
 596                        return -ENODEV;
 597                }
 598
 599                if (pci_mmconfig_add(cfg->pci_segment, cfg->start_bus_number,
 600                                   cfg->end_bus_number, cfg->address) == NULL) {
 601                        pr_warn(PREFIX "no memory for MCFG entries\n");
 602                        free_all_mmcfg();
 603                        return -ENOMEM;
 604                }
 605        }
 606
 607        return 0;
 608}
 609
 610#ifdef CONFIG_ACPI_APEI
 611extern int (*arch_apei_filter_addr)(int (*func)(__u64 start, __u64 size,
 612                                     void *data), void *data);
 613
 614static int pci_mmcfg_for_each_region(int (*func)(__u64 start, __u64 size,
 615                                     void *data), void *data)
 616{
 617        struct pci_mmcfg_region *cfg;
 618        int rc;
 619
 620        if (list_empty(&pci_mmcfg_list))
 621                return 0;
 622
 623        list_for_each_entry(cfg, &pci_mmcfg_list, list) {
 624                rc = func(cfg->res.start, resource_size(&cfg->res), data);
 625                if (rc)
 626                        return rc;
 627        }
 628
 629        return 0;
 630}
 631#define set_apei_filter() (arch_apei_filter_addr = pci_mmcfg_for_each_region)
 632#else
 633#define set_apei_filter()
 634#endif
 635
 636static void __init __pci_mmcfg_init(int early)
 637{
 638        pci_mmcfg_reject_broken(early);
 639        if (list_empty(&pci_mmcfg_list))
 640                return;
 641
 642        if (pcibios_last_bus < 0) {
 643                const struct pci_mmcfg_region *cfg;
 644
 645                list_for_each_entry(cfg, &pci_mmcfg_list, list) {
 646                        if (cfg->segment)
 647                                break;
 648                        pcibios_last_bus = cfg->end_bus;
 649                }
 650        }
 651
 652        if (pci_mmcfg_arch_init())
 653                pci_probe = (pci_probe & ~PCI_PROBE_MASK) | PCI_PROBE_MMCONF;
 654        else {
 655                free_all_mmcfg();
 656                pci_mmcfg_arch_init_failed = true;
 657        }
 658}
 659
 660static int __initdata known_bridge;
 661
 662void __init pci_mmcfg_early_init(void)
 663{
 664        if (pci_probe & PCI_PROBE_MMCONF) {
 665                if (pci_mmcfg_check_hostbridge())
 666                        known_bridge = 1;
 667                else
 668                        acpi_table_parse(ACPI_SIG_MCFG, pci_parse_mcfg);
 669                __pci_mmcfg_init(1);
 670
 671                set_apei_filter();
 672        }
 673}
 674
 675void __init pci_mmcfg_late_init(void)
 676{
 677        /* MMCONFIG disabled */
 678        if ((pci_probe & PCI_PROBE_MMCONF) == 0)
 679                return;
 680
 681        if (known_bridge)
 682                return;
 683
 684        /* MMCONFIG hasn't been enabled yet, try again */
 685        if (pci_probe & PCI_PROBE_MASK & ~PCI_PROBE_MMCONF) {
 686                acpi_table_parse(ACPI_SIG_MCFG, pci_parse_mcfg);
 687                __pci_mmcfg_init(0);
 688        }
 689}
 690
 691static int __init pci_mmcfg_late_insert_resources(void)
 692{
 693        struct pci_mmcfg_region *cfg;
 694
 695        pci_mmcfg_running_state = true;
 696
 697        /* If we are not using MMCONFIG, don't insert the resources. */
 698        if ((pci_probe & PCI_PROBE_MMCONF) == 0)
 699                return 1;
 700
 701        /*
 702         * Attempt to insert the mmcfg resources but not with the busy flag
 703         * marked so it won't cause request errors when __request_region is
 704         * called.
 705         */
 706        list_for_each_entry(cfg, &pci_mmcfg_list, list)
 707                if (!cfg->res.parent)
 708                        insert_resource(&iomem_resource, &cfg->res);
 709
 710        return 0;
 711}
 712
 713/*
 714 * Perform MMCONFIG resource insertion after PCI initialization to allow for
 715 * misprogrammed MCFG tables that state larger sizes but actually conflict
 716 * with other system resources.
 717 */
 718late_initcall(pci_mmcfg_late_insert_resources);
 719
 720/* Add MMCFG information for host bridges */
 721int pci_mmconfig_insert(struct device *dev, u16 seg, u8 start, u8 end,
 722                        phys_addr_t addr)
 723{
 724        int rc;
 725        struct resource *tmp = NULL;
 726        struct pci_mmcfg_region *cfg;
 727
 728        if (!(pci_probe & PCI_PROBE_MMCONF) || pci_mmcfg_arch_init_failed)
 729                return -ENODEV;
 730
 731        if (start > end)
 732                return -EINVAL;
 733
 734        mutex_lock(&pci_mmcfg_lock);
 735        cfg = pci_mmconfig_lookup(seg, start);
 736        if (cfg) {
 737                if (cfg->end_bus < end)
 738                        dev_info(dev, FW_INFO
 739                                 "MMCONFIG for "
 740                                 "domain %04x [bus %02x-%02x] "
 741                                 "only partially covers this bridge\n",
 742                                  cfg->segment, cfg->start_bus, cfg->end_bus);
 743                mutex_unlock(&pci_mmcfg_lock);
 744                return -EEXIST;
 745        }
 746
 747        if (!addr) {
 748                mutex_unlock(&pci_mmcfg_lock);
 749                return -EINVAL;
 750        }
 751
 752        rc = -EBUSY;
 753        cfg = pci_mmconfig_alloc(seg, start, end, addr);
 754        if (cfg == NULL) {
 755                dev_warn(dev, "fail to add MMCONFIG (out of memory)\n");
 756                rc = -ENOMEM;
 757        } else if (!pci_mmcfg_check_reserved(dev, cfg, 0)) {
 758                dev_warn(dev, FW_BUG "MMCONFIG %pR isn't reserved\n",
 759                         &cfg->res);
 760        } else {
 761                /* Insert resource if it's not in boot stage */
 762                if (pci_mmcfg_running_state)
 763                        tmp = insert_resource_conflict(&iomem_resource,
 764                                                       &cfg->res);
 765
 766                if (tmp) {
 767                        dev_warn(dev,
 768                                 "MMCONFIG %pR conflicts with "
 769                                 "%s %pR\n",
 770                                 &cfg->res, tmp->name, tmp);
 771                } else if (pci_mmcfg_arch_map(cfg)) {
 772                        dev_warn(dev, "fail to map MMCONFIG %pR.\n",
 773                                 &cfg->res);
 774                } else {
 775                        list_add_sorted(cfg);
 776                        dev_info(dev, "MMCONFIG at %pR (base %#lx)\n",
 777                                 &cfg->res, (unsigned long)addr);
 778                        cfg = NULL;
 779                        rc = 0;
 780                }
 781        }
 782
 783        if (cfg) {
 784                if (cfg->res.parent)
 785                        release_resource(&cfg->res);
 786                kfree(cfg);
 787        }
 788
 789        mutex_unlock(&pci_mmcfg_lock);
 790
 791        return rc;
 792}
 793
 794/* Delete MMCFG information for host bridges */
 795int pci_mmconfig_delete(u16 seg, u8 start, u8 end)
 796{
 797        struct pci_mmcfg_region *cfg;
 798
 799        mutex_lock(&pci_mmcfg_lock);
 800        list_for_each_entry_rcu(cfg, &pci_mmcfg_list, list)
 801                if (cfg->segment == seg && cfg->start_bus == start &&
 802                    cfg->end_bus == end) {
 803                        list_del_rcu(&cfg->list);
 804                        synchronize_rcu();
 805                        pci_mmcfg_arch_unmap(cfg);
 806                        if (cfg->res.parent)
 807                                release_resource(&cfg->res);
 808                        mutex_unlock(&pci_mmcfg_lock);
 809                        kfree(cfg);
 810                        return 0;
 811                }
 812        mutex_unlock(&pci_mmcfg_lock);
 813
 814        return -ENOENT;
 815}
 816