linux/drivers/pci/pci-acpi.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * PCI support in ACPI
   4 *
   5 * Copyright (C) 2005 David Shaohua Li <shaohua.li@intel.com>
   6 * Copyright (C) 2004 Tom Long Nguyen <tom.l.nguyen@intel.com>
   7 * Copyright (C) 2004 Intel Corp.
   8 */
   9
  10#include <linux/delay.h>
  11#include <linux/init.h>
  12#include <linux/irqdomain.h>
  13#include <linux/pci.h>
  14#include <linux/msi.h>
  15#include <linux/pci_hotplug.h>
  16#include <linux/module.h>
  17#include <linux/pci-aspm.h>
  18#include <linux/pci-acpi.h>
  19#include <linux/pm_runtime.h>
  20#include <linux/pm_qos.h>
  21#include "pci.h"
  22
  23/*
  24 * The GUID is defined in the PCI Firmware Specification available here:
  25 * https://www.pcisig.com/members/downloads/pcifw_r3_1_13Dec10.pdf
  26 */
  27const guid_t pci_acpi_dsm_guid =
  28        GUID_INIT(0xe5c937d0, 0x3553, 0x4d7a,
  29                  0x91, 0x17, 0xea, 0x4d, 0x19, 0xc3, 0x43, 0x4d);
  30
  31#if defined(CONFIG_PCI_QUIRKS) && defined(CONFIG_ARM64)
  32static int acpi_get_rc_addr(struct acpi_device *adev, struct resource *res)
  33{
  34        struct device *dev = &adev->dev;
  35        struct resource_entry *entry;
  36        struct list_head list;
  37        unsigned long flags;
  38        int ret;
  39
  40        INIT_LIST_HEAD(&list);
  41        flags = IORESOURCE_MEM;
  42        ret = acpi_dev_get_resources(adev, &list,
  43                                     acpi_dev_filter_resource_type_cb,
  44                                     (void *) flags);
  45        if (ret < 0) {
  46                dev_err(dev, "failed to parse _CRS method, error code %d\n",
  47                        ret);
  48                return ret;
  49        }
  50
  51        if (ret == 0) {
  52                dev_err(dev, "no IO and memory resources present in _CRS\n");
  53                return -EINVAL;
  54        }
  55
  56        entry = list_first_entry(&list, struct resource_entry, node);
  57        *res = *entry->res;
  58        acpi_dev_free_resource_list(&list);
  59        return 0;
  60}
  61
  62static acpi_status acpi_match_rc(acpi_handle handle, u32 lvl, void *context,
  63                                 void **retval)
  64{
  65        u16 *segment = context;
  66        unsigned long long uid;
  67        acpi_status status;
  68
  69        status = acpi_evaluate_integer(handle, "_UID", NULL, &uid);
  70        if (ACPI_FAILURE(status) || uid != *segment)
  71                return AE_CTRL_DEPTH;
  72
  73        *(acpi_handle *)retval = handle;
  74        return AE_CTRL_TERMINATE;
  75}
  76
  77int acpi_get_rc_resources(struct device *dev, const char *hid, u16 segment,
  78                          struct resource *res)
  79{
  80        struct acpi_device *adev;
  81        acpi_status status;
  82        acpi_handle handle;
  83        int ret;
  84
  85        status = acpi_get_devices(hid, acpi_match_rc, &segment, &handle);
  86        if (ACPI_FAILURE(status)) {
  87                dev_err(dev, "can't find _HID %s device to locate resources\n",
  88                        hid);
  89                return -ENODEV;
  90        }
  91
  92        ret = acpi_bus_get_device(handle, &adev);
  93        if (ret)
  94                return ret;
  95
  96        ret = acpi_get_rc_addr(adev, res);
  97        if (ret) {
  98                dev_err(dev, "can't get resource from %s\n",
  99                        dev_name(&adev->dev));
 100                return ret;
 101        }
 102
 103        return 0;
 104}
 105#endif
 106
 107phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle)
 108{
 109        acpi_status status = AE_NOT_EXIST;
 110        unsigned long long mcfg_addr;
 111
 112        if (handle)
 113                status = acpi_evaluate_integer(handle, METHOD_NAME__CBA,
 114                                               NULL, &mcfg_addr);
 115        if (ACPI_FAILURE(status))
 116                return 0;
 117
 118        return (phys_addr_t)mcfg_addr;
 119}
 120
 121static acpi_status decode_type0_hpx_record(union acpi_object *record,
 122                                           struct hotplug_params *hpx)
 123{
 124        int i;
 125        union acpi_object *fields = record->package.elements;
 126        u32 revision = fields[1].integer.value;
 127
 128        switch (revision) {
 129        case 1:
 130                if (record->package.count != 6)
 131                        return AE_ERROR;
 132                for (i = 2; i < 6; i++)
 133                        if (fields[i].type != ACPI_TYPE_INTEGER)
 134                                return AE_ERROR;
 135                hpx->t0 = &hpx->type0_data;
 136                hpx->t0->revision        = revision;
 137                hpx->t0->cache_line_size = fields[2].integer.value;
 138                hpx->t0->latency_timer   = fields[3].integer.value;
 139                hpx->t0->enable_serr     = fields[4].integer.value;
 140                hpx->t0->enable_perr     = fields[5].integer.value;
 141                break;
 142        default:
 143                printk(KERN_WARNING
 144                       "%s: Type 0 Revision %d record not supported\n",
 145                       __func__, revision);
 146                return AE_ERROR;
 147        }
 148        return AE_OK;
 149}
 150
 151static acpi_status decode_type1_hpx_record(union acpi_object *record,
 152                                           struct hotplug_params *hpx)
 153{
 154        int i;
 155        union acpi_object *fields = record->package.elements;
 156        u32 revision = fields[1].integer.value;
 157
 158        switch (revision) {
 159        case 1:
 160                if (record->package.count != 5)
 161                        return AE_ERROR;
 162                for (i = 2; i < 5; i++)
 163                        if (fields[i].type != ACPI_TYPE_INTEGER)
 164                                return AE_ERROR;
 165                hpx->t1 = &hpx->type1_data;
 166                hpx->t1->revision      = revision;
 167                hpx->t1->max_mem_read  = fields[2].integer.value;
 168                hpx->t1->avg_max_split = fields[3].integer.value;
 169                hpx->t1->tot_max_split = fields[4].integer.value;
 170                break;
 171        default:
 172                printk(KERN_WARNING
 173                       "%s: Type 1 Revision %d record not supported\n",
 174                       __func__, revision);
 175                return AE_ERROR;
 176        }
 177        return AE_OK;
 178}
 179
 180static acpi_status decode_type2_hpx_record(union acpi_object *record,
 181                                           struct hotplug_params *hpx)
 182{
 183        int i;
 184        union acpi_object *fields = record->package.elements;
 185        u32 revision = fields[1].integer.value;
 186
 187        switch (revision) {
 188        case 1:
 189                if (record->package.count != 18)
 190                        return AE_ERROR;
 191                for (i = 2; i < 18; i++)
 192                        if (fields[i].type != ACPI_TYPE_INTEGER)
 193                                return AE_ERROR;
 194                hpx->t2 = &hpx->type2_data;
 195                hpx->t2->revision      = revision;
 196                hpx->t2->unc_err_mask_and      = fields[2].integer.value;
 197                hpx->t2->unc_err_mask_or       = fields[3].integer.value;
 198                hpx->t2->unc_err_sever_and     = fields[4].integer.value;
 199                hpx->t2->unc_err_sever_or      = fields[5].integer.value;
 200                hpx->t2->cor_err_mask_and      = fields[6].integer.value;
 201                hpx->t2->cor_err_mask_or       = fields[7].integer.value;
 202                hpx->t2->adv_err_cap_and       = fields[8].integer.value;
 203                hpx->t2->adv_err_cap_or        = fields[9].integer.value;
 204                hpx->t2->pci_exp_devctl_and    = fields[10].integer.value;
 205                hpx->t2->pci_exp_devctl_or     = fields[11].integer.value;
 206                hpx->t2->pci_exp_lnkctl_and    = fields[12].integer.value;
 207                hpx->t2->pci_exp_lnkctl_or     = fields[13].integer.value;
 208                hpx->t2->sec_unc_err_sever_and = fields[14].integer.value;
 209                hpx->t2->sec_unc_err_sever_or  = fields[15].integer.value;
 210                hpx->t2->sec_unc_err_mask_and  = fields[16].integer.value;
 211                hpx->t2->sec_unc_err_mask_or   = fields[17].integer.value;
 212                break;
 213        default:
 214                printk(KERN_WARNING
 215                       "%s: Type 2 Revision %d record not supported\n",
 216                       __func__, revision);
 217                return AE_ERROR;
 218        }
 219        return AE_OK;
 220}
 221
 222static acpi_status acpi_run_hpx(acpi_handle handle, struct hotplug_params *hpx)
 223{
 224        acpi_status status;
 225        struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
 226        union acpi_object *package, *record, *fields;
 227        u32 type;
 228        int i;
 229
 230        /* Clear the return buffer with zeros */
 231        memset(hpx, 0, sizeof(struct hotplug_params));
 232
 233        status = acpi_evaluate_object(handle, "_HPX", NULL, &buffer);
 234        if (ACPI_FAILURE(status))
 235                return status;
 236
 237        package = (union acpi_object *)buffer.pointer;
 238        if (package->type != ACPI_TYPE_PACKAGE) {
 239                status = AE_ERROR;
 240                goto exit;
 241        }
 242
 243        for (i = 0; i < package->package.count; i++) {
 244                record = &package->package.elements[i];
 245                if (record->type != ACPI_TYPE_PACKAGE) {
 246                        status = AE_ERROR;
 247                        goto exit;
 248                }
 249
 250                fields = record->package.elements;
 251                if (fields[0].type != ACPI_TYPE_INTEGER ||
 252                    fields[1].type != ACPI_TYPE_INTEGER) {
 253                        status = AE_ERROR;
 254                        goto exit;
 255                }
 256
 257                type = fields[0].integer.value;
 258                switch (type) {
 259                case 0:
 260                        status = decode_type0_hpx_record(record, hpx);
 261                        if (ACPI_FAILURE(status))
 262                                goto exit;
 263                        break;
 264                case 1:
 265                        status = decode_type1_hpx_record(record, hpx);
 266                        if (ACPI_FAILURE(status))
 267                                goto exit;
 268                        break;
 269                case 2:
 270                        status = decode_type2_hpx_record(record, hpx);
 271                        if (ACPI_FAILURE(status))
 272                                goto exit;
 273                        break;
 274                default:
 275                        printk(KERN_ERR "%s: Type %d record not supported\n",
 276                               __func__, type);
 277                        status = AE_ERROR;
 278                        goto exit;
 279                }
 280        }
 281 exit:
 282        kfree(buffer.pointer);
 283        return status;
 284}
 285
 286static acpi_status acpi_run_hpp(acpi_handle handle, struct hotplug_params *hpp)
 287{
 288        acpi_status status;
 289        struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
 290        union acpi_object *package, *fields;
 291        int i;
 292
 293        memset(hpp, 0, sizeof(struct hotplug_params));
 294
 295        status = acpi_evaluate_object(handle, "_HPP", NULL, &buffer);
 296        if (ACPI_FAILURE(status))
 297                return status;
 298
 299        package = (union acpi_object *) buffer.pointer;
 300        if (package->type != ACPI_TYPE_PACKAGE ||
 301            package->package.count != 4) {
 302                status = AE_ERROR;
 303                goto exit;
 304        }
 305
 306        fields = package->package.elements;
 307        for (i = 0; i < 4; i++) {
 308                if (fields[i].type != ACPI_TYPE_INTEGER) {
 309                        status = AE_ERROR;
 310                        goto exit;
 311                }
 312        }
 313
 314        hpp->t0 = &hpp->type0_data;
 315        hpp->t0->revision        = 1;
 316        hpp->t0->cache_line_size = fields[0].integer.value;
 317        hpp->t0->latency_timer   = fields[1].integer.value;
 318        hpp->t0->enable_serr     = fields[2].integer.value;
 319        hpp->t0->enable_perr     = fields[3].integer.value;
 320
 321exit:
 322        kfree(buffer.pointer);
 323        return status;
 324}
 325
 326/* pci_get_hp_params
 327 *
 328 * @dev - the pci_dev for which we want parameters
 329 * @hpp - allocated by the caller
 330 */
 331int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp)
 332{
 333        acpi_status status;
 334        acpi_handle handle, phandle;
 335        struct pci_bus *pbus;
 336
 337        if (acpi_pci_disabled)
 338                return -ENODEV;
 339
 340        handle = NULL;
 341        for (pbus = dev->bus; pbus; pbus = pbus->parent) {
 342                handle = acpi_pci_get_bridge_handle(pbus);
 343                if (handle)
 344                        break;
 345        }
 346
 347        /*
 348         * _HPP settings apply to all child buses, until another _HPP is
 349         * encountered. If we don't find an _HPP for the input pci dev,
 350         * look for it in the parent device scope since that would apply to
 351         * this pci dev.
 352         */
 353        while (handle) {
 354                status = acpi_run_hpx(handle, hpp);
 355                if (ACPI_SUCCESS(status))
 356                        return 0;
 357                status = acpi_run_hpp(handle, hpp);
 358                if (ACPI_SUCCESS(status))
 359                        return 0;
 360                if (acpi_is_root_bridge(handle))
 361                        break;
 362                status = acpi_get_parent(handle, &phandle);
 363                if (ACPI_FAILURE(status))
 364                        break;
 365                handle = phandle;
 366        }
 367        return -ENODEV;
 368}
 369EXPORT_SYMBOL_GPL(pci_get_hp_params);
 370
 371/**
 372 * pciehp_is_native - Check whether a hotplug port is handled by the OS
 373 * @bridge: Hotplug port to check
 374 *
 375 * Returns true if the given @bridge is handled by the native PCIe hotplug
 376 * driver.
 377 */
 378bool pciehp_is_native(struct pci_dev *bridge)
 379{
 380        const struct pci_host_bridge *host;
 381        u32 slot_cap;
 382
 383        if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
 384                return false;
 385
 386        pcie_capability_read_dword(bridge, PCI_EXP_SLTCAP, &slot_cap);
 387        if (!(slot_cap & PCI_EXP_SLTCAP_HPC))
 388                return false;
 389
 390        if (pcie_ports_native)
 391                return true;
 392
 393        host = pci_find_host_bridge(bridge->bus);
 394        return host->native_pcie_hotplug;
 395}
 396
 397/**
 398 * shpchp_is_native - Check whether a hotplug port is handled by the OS
 399 * @bridge: Hotplug port to check
 400 *
 401 * Returns true if the given @bridge is handled by the native SHPC hotplug
 402 * driver.
 403 */
 404bool shpchp_is_native(struct pci_dev *bridge)
 405{
 406        return bridge->shpc_managed;
 407}
 408
 409/**
 410 * pci_acpi_wake_bus - Root bus wakeup notification fork function.
 411 * @context: Device wakeup context.
 412 */
 413static void pci_acpi_wake_bus(struct acpi_device_wakeup_context *context)
 414{
 415        struct acpi_device *adev;
 416        struct acpi_pci_root *root;
 417
 418        adev = container_of(context, struct acpi_device, wakeup.context);
 419        root = acpi_driver_data(adev);
 420        pci_pme_wakeup_bus(root->bus);
 421}
 422
 423/**
 424 * pci_acpi_wake_dev - PCI device wakeup notification work function.
 425 * @context: Device wakeup context.
 426 */
 427static void pci_acpi_wake_dev(struct acpi_device_wakeup_context *context)
 428{
 429        struct pci_dev *pci_dev;
 430
 431        pci_dev = to_pci_dev(context->dev);
 432
 433        if (pci_dev->pme_poll)
 434                pci_dev->pme_poll = false;
 435
 436        if (pci_dev->current_state == PCI_D3cold) {
 437                pci_wakeup_event(pci_dev);
 438                pm_request_resume(&pci_dev->dev);
 439                return;
 440        }
 441
 442        /* Clear PME Status if set. */
 443        if (pci_dev->pme_support)
 444                pci_check_pme_status(pci_dev);
 445
 446        pci_wakeup_event(pci_dev);
 447        pm_request_resume(&pci_dev->dev);
 448
 449        pci_pme_wakeup_bus(pci_dev->subordinate);
 450}
 451
 452/**
 453 * pci_acpi_add_bus_pm_notifier - Register PM notifier for root PCI bus.
 454 * @dev: PCI root bridge ACPI device.
 455 */
 456acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev)
 457{
 458        return acpi_add_pm_notifier(dev, NULL, pci_acpi_wake_bus);
 459}
 460
 461/**
 462 * pci_acpi_add_pm_notifier - Register PM notifier for given PCI device.
 463 * @dev: ACPI device to add the notifier for.
 464 * @pci_dev: PCI device to check for the PME status if an event is signaled.
 465 */
 466acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev,
 467                                     struct pci_dev *pci_dev)
 468{
 469        return acpi_add_pm_notifier(dev, &pci_dev->dev, pci_acpi_wake_dev);
 470}
 471
 472/*
 473 * _SxD returns the D-state with the highest power
 474 * (lowest D-state number) supported in the S-state "x".
 475 *
 476 * If the devices does not have a _PRW
 477 * (Power Resources for Wake) supporting system wakeup from "x"
 478 * then the OS is free to choose a lower power (higher number
 479 * D-state) than the return value from _SxD.
 480 *
 481 * But if _PRW is enabled at S-state "x", the OS
 482 * must not choose a power lower than _SxD --
 483 * unless the device has an _SxW method specifying
 484 * the lowest power (highest D-state number) the device
 485 * may enter while still able to wake the system.
 486 *
 487 * ie. depending on global OS policy:
 488 *
 489 * if (_PRW at S-state x)
 490 *      choose from highest power _SxD to lowest power _SxW
 491 * else // no _PRW at S-state x
 492 *      choose highest power _SxD or any lower power
 493 */
 494
 495static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
 496{
 497        int acpi_state, d_max;
 498
 499        if (pdev->no_d3cold)
 500                d_max = ACPI_STATE_D3_HOT;
 501        else
 502                d_max = ACPI_STATE_D3_COLD;
 503        acpi_state = acpi_pm_device_sleep_state(&pdev->dev, NULL, d_max);
 504        if (acpi_state < 0)
 505                return PCI_POWER_ERROR;
 506
 507        switch (acpi_state) {
 508        case ACPI_STATE_D0:
 509                return PCI_D0;
 510        case ACPI_STATE_D1:
 511                return PCI_D1;
 512        case ACPI_STATE_D2:
 513                return PCI_D2;
 514        case ACPI_STATE_D3_HOT:
 515                return PCI_D3hot;
 516        case ACPI_STATE_D3_COLD:
 517                return PCI_D3cold;
 518        }
 519        return PCI_POWER_ERROR;
 520}
 521
 522static bool acpi_pci_power_manageable(struct pci_dev *dev)
 523{
 524        struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
 525        return adev ? acpi_device_power_manageable(adev) : false;
 526}
 527
 528static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
 529{
 530        struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
 531        static const u8 state_conv[] = {
 532                [PCI_D0] = ACPI_STATE_D0,
 533                [PCI_D1] = ACPI_STATE_D1,
 534                [PCI_D2] = ACPI_STATE_D2,
 535                [PCI_D3hot] = ACPI_STATE_D3_HOT,
 536                [PCI_D3cold] = ACPI_STATE_D3_COLD,
 537        };
 538        int error = -EINVAL;
 539
 540        /* If the ACPI device has _EJ0, ignore the device */
 541        if (!adev || acpi_has_method(adev->handle, "_EJ0"))
 542                return -ENODEV;
 543
 544        switch (state) {
 545        case PCI_D3cold:
 546                if (dev_pm_qos_flags(&dev->dev, PM_QOS_FLAG_NO_POWER_OFF) ==
 547                                PM_QOS_FLAGS_ALL) {
 548                        error = -EBUSY;
 549                        break;
 550                }
 551        case PCI_D0:
 552        case PCI_D1:
 553        case PCI_D2:
 554        case PCI_D3hot:
 555                error = acpi_device_set_power(adev, state_conv[state]);
 556        }
 557
 558        if (!error)
 559                pci_dbg(dev, "power state changed by ACPI to %s\n",
 560                         acpi_power_state_string(state_conv[state]));
 561
 562        return error;
 563}
 564
 565static pci_power_t acpi_pci_get_power_state(struct pci_dev *dev)
 566{
 567        struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
 568        static const pci_power_t state_conv[] = {
 569                [ACPI_STATE_D0]      = PCI_D0,
 570                [ACPI_STATE_D1]      = PCI_D1,
 571                [ACPI_STATE_D2]      = PCI_D2,
 572                [ACPI_STATE_D3_HOT]  = PCI_D3hot,
 573                [ACPI_STATE_D3_COLD] = PCI_D3cold,
 574        };
 575        int state;
 576
 577        if (!adev || !acpi_device_power_manageable(adev))
 578                return PCI_UNKNOWN;
 579
 580        if (acpi_device_get_power(adev, &state) || state == ACPI_STATE_UNKNOWN)
 581                return PCI_UNKNOWN;
 582
 583        return state_conv[state];
 584}
 585
 586static int acpi_pci_propagate_wakeup(struct pci_bus *bus, bool enable)
 587{
 588        while (bus->parent) {
 589                if (acpi_pm_device_can_wakeup(&bus->self->dev))
 590                        return acpi_pm_set_bridge_wakeup(&bus->self->dev, enable);
 591
 592                bus = bus->parent;
 593        }
 594
 595        /* We have reached the root bus. */
 596        if (bus->bridge) {
 597                if (acpi_pm_device_can_wakeup(bus->bridge))
 598                        return acpi_pm_set_bridge_wakeup(bus->bridge, enable);
 599        }
 600        return 0;
 601}
 602
 603static int acpi_pci_wakeup(struct pci_dev *dev, bool enable)
 604{
 605        if (acpi_pm_device_can_wakeup(&dev->dev))
 606                return acpi_pm_set_device_wakeup(&dev->dev, enable);
 607
 608        return acpi_pci_propagate_wakeup(dev->bus, enable);
 609}
 610
 611static bool acpi_pci_need_resume(struct pci_dev *dev)
 612{
 613        struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
 614
 615        /*
 616         * In some cases (eg. Samsung 305V4A) leaving a bridge in suspend over
 617         * system-wide suspend/resume confuses the platform firmware, so avoid
 618         * doing that.  According to Section 16.1.6 of ACPI 6.2, endpoint
 619         * devices are expected to be in D3 before invoking the S3 entry path
 620         * from the firmware, so they should not be affected by this issue.
 621         */
 622        if (pci_is_bridge(dev) && acpi_target_system_state() != ACPI_STATE_S0)
 623                return true;
 624
 625        if (!adev || !acpi_device_power_manageable(adev))
 626                return false;
 627
 628        if (device_may_wakeup(&dev->dev) != !!adev->wakeup.prepare_count)
 629                return true;
 630
 631        if (acpi_target_system_state() == ACPI_STATE_S0)
 632                return false;
 633
 634        return !!adev->power.flags.dsw_present;
 635}
 636
 637static const struct pci_platform_pm_ops acpi_pci_platform_pm = {
 638        .is_manageable = acpi_pci_power_manageable,
 639        .set_state = acpi_pci_set_power_state,
 640        .get_state = acpi_pci_get_power_state,
 641        .choose_state = acpi_pci_choose_state,
 642        .set_wakeup = acpi_pci_wakeup,
 643        .need_resume = acpi_pci_need_resume,
 644};
 645
 646void acpi_pci_add_bus(struct pci_bus *bus)
 647{
 648        union acpi_object *obj;
 649        struct pci_host_bridge *bridge;
 650
 651        if (acpi_pci_disabled || !bus->bridge || !ACPI_HANDLE(bus->bridge))
 652                return;
 653
 654        acpi_pci_slot_enumerate(bus);
 655        acpiphp_enumerate_slots(bus);
 656
 657        /*
 658         * For a host bridge, check its _DSM for function 8 and if
 659         * that is available, mark it in pci_host_bridge.
 660         */
 661        if (!pci_is_root_bus(bus))
 662                return;
 663
 664        obj = acpi_evaluate_dsm(ACPI_HANDLE(bus->bridge), &pci_acpi_dsm_guid, 3,
 665                                RESET_DELAY_DSM, NULL);
 666        if (!obj)
 667                return;
 668
 669        if (obj->type == ACPI_TYPE_INTEGER && obj->integer.value == 1) {
 670                bridge = pci_find_host_bridge(bus);
 671                bridge->ignore_reset_delay = 1;
 672        }
 673        ACPI_FREE(obj);
 674}
 675
 676void acpi_pci_remove_bus(struct pci_bus *bus)
 677{
 678        if (acpi_pci_disabled || !bus->bridge)
 679                return;
 680
 681        acpiphp_remove_slots(bus);
 682        acpi_pci_slot_remove(bus);
 683}
 684
 685/* ACPI bus type */
 686static struct acpi_device *acpi_pci_find_companion(struct device *dev)
 687{
 688        struct pci_dev *pci_dev = to_pci_dev(dev);
 689        bool check_children;
 690        u64 addr;
 691
 692        check_children = pci_is_bridge(pci_dev);
 693        /* Please ref to ACPI spec for the syntax of _ADR */
 694        addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn);
 695        return acpi_find_child_device(ACPI_COMPANION(dev->parent), addr,
 696                                      check_children);
 697}
 698
 699/**
 700 * pci_acpi_optimize_delay - optimize PCI D3 and D3cold delay from ACPI
 701 * @pdev: the PCI device whose delay is to be updated
 702 * @handle: ACPI handle of this device
 703 *
 704 * Update the d3_delay and d3cold_delay of a PCI device from the ACPI _DSM
 705 * control method of either the device itself or the PCI host bridge.
 706 *
 707 * Function 8, "Reset Delay," applies to the entire hierarchy below a PCI
 708 * host bridge.  If it returns one, the OS may assume that all devices in
 709 * the hierarchy have already completed power-on reset delays.
 710 *
 711 * Function 9, "Device Readiness Durations," applies only to the object
 712 * where it is located.  It returns delay durations required after various
 713 * events if the device requires less time than the spec requires.  Delays
 714 * from this function take precedence over the Reset Delay function.
 715 *
 716 * These _DSM functions are defined by the draft ECN of January 28, 2014,
 717 * titled "ACPI additions for FW latency optimizations."
 718 */
 719static void pci_acpi_optimize_delay(struct pci_dev *pdev,
 720                                    acpi_handle handle)
 721{
 722        struct pci_host_bridge *bridge = pci_find_host_bridge(pdev->bus);
 723        int value;
 724        union acpi_object *obj, *elements;
 725
 726        if (bridge->ignore_reset_delay)
 727                pdev->d3cold_delay = 0;
 728
 729        obj = acpi_evaluate_dsm(handle, &pci_acpi_dsm_guid, 3,
 730                                FUNCTION_DELAY_DSM, NULL);
 731        if (!obj)
 732                return;
 733
 734        if (obj->type == ACPI_TYPE_PACKAGE && obj->package.count == 5) {
 735                elements = obj->package.elements;
 736                if (elements[0].type == ACPI_TYPE_INTEGER) {
 737                        value = (int)elements[0].integer.value / 1000;
 738                        if (value < PCI_PM_D3COLD_WAIT)
 739                                pdev->d3cold_delay = value;
 740                }
 741                if (elements[3].type == ACPI_TYPE_INTEGER) {
 742                        value = (int)elements[3].integer.value / 1000;
 743                        if (value < PCI_PM_D3_WAIT)
 744                                pdev->d3_delay = value;
 745                }
 746        }
 747        ACPI_FREE(obj);
 748}
 749
 750static void pci_acpi_setup(struct device *dev)
 751{
 752        struct pci_dev *pci_dev = to_pci_dev(dev);
 753        struct acpi_device *adev = ACPI_COMPANION(dev);
 754
 755        if (!adev)
 756                return;
 757
 758        pci_acpi_optimize_delay(pci_dev, adev->handle);
 759
 760        pci_acpi_add_pm_notifier(adev, pci_dev);
 761        if (!adev->wakeup.flags.valid)
 762                return;
 763
 764        device_set_wakeup_capable(dev, true);
 765        acpi_pci_wakeup(pci_dev, false);
 766}
 767
 768static void pci_acpi_cleanup(struct device *dev)
 769{
 770        struct acpi_device *adev = ACPI_COMPANION(dev);
 771
 772        if (!adev)
 773                return;
 774
 775        pci_acpi_remove_pm_notifier(adev);
 776        if (adev->wakeup.flags.valid)
 777                device_set_wakeup_capable(dev, false);
 778}
 779
 780static bool pci_acpi_bus_match(struct device *dev)
 781{
 782        return dev_is_pci(dev);
 783}
 784
 785static struct acpi_bus_type acpi_pci_bus = {
 786        .name = "PCI",
 787        .match = pci_acpi_bus_match,
 788        .find_companion = acpi_pci_find_companion,
 789        .setup = pci_acpi_setup,
 790        .cleanup = pci_acpi_cleanup,
 791};
 792
 793
 794static struct fwnode_handle *(*pci_msi_get_fwnode_cb)(struct device *dev);
 795
 796/**
 797 * pci_msi_register_fwnode_provider - Register callback to retrieve fwnode
 798 * @fn:       Callback matching a device to a fwnode that identifies a PCI
 799 *            MSI domain.
 800 *
 801 * This should be called by irqchip driver, which is the parent of
 802 * the MSI domain to provide callback interface to query fwnode.
 803 */
 804void
 805pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *))
 806{
 807        pci_msi_get_fwnode_cb = fn;
 808}
 809
 810/**
 811 * pci_host_bridge_acpi_msi_domain - Retrieve MSI domain of a PCI host bridge
 812 * @bus:      The PCI host bridge bus.
 813 *
 814 * This function uses the callback function registered by
 815 * pci_msi_register_fwnode_provider() to retrieve the irq_domain with
 816 * type DOMAIN_BUS_PCI_MSI of the specified host bridge bus.
 817 * This returns NULL on error or when the domain is not found.
 818 */
 819struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus)
 820{
 821        struct fwnode_handle *fwnode;
 822
 823        if (!pci_msi_get_fwnode_cb)
 824                return NULL;
 825
 826        fwnode = pci_msi_get_fwnode_cb(&bus->dev);
 827        if (!fwnode)
 828                return NULL;
 829
 830        return irq_find_matching_fwnode(fwnode, DOMAIN_BUS_PCI_MSI);
 831}
 832
 833static int __init acpi_pci_init(void)
 834{
 835        int ret;
 836
 837        if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_MSI) {
 838                pr_info("ACPI FADT declares the system doesn't support MSI, so disable it\n");
 839                pci_no_msi();
 840        }
 841
 842        if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) {
 843                pr_info("ACPI FADT declares the system doesn't support PCIe ASPM, so disable it\n");
 844                pcie_no_aspm();
 845        }
 846
 847        ret = register_acpi_bus_type(&acpi_pci_bus);
 848        if (ret)
 849                return 0;
 850
 851        pci_set_platform_pm(&acpi_pci_platform_pm);
 852        acpi_pci_slot_init();
 853        acpiphp_init();
 854
 855        return 0;
 856}
 857arch_initcall(acpi_pci_init);
 858