linux/drivers/pci/pci-acpi.c
<<
>>
Prefs
   1/*
   2 * File:        pci-acpi.c
   3 * Purpose:     Provide PCI support in ACPI
   4 *
   5 * Copyright (C) 2005 David Shaohua Li <shaohua.li@intel.com>
   6 * Copyright (C) 2004 Tom Long Nguyen <tom.l.nguyen@intel.com>
   7 * Copyright (C) 2004 Intel Corp.
   8 */
   9
  10#include <linux/delay.h>
  11#include <linux/init.h>
  12#include <linux/irqdomain.h>
  13#include <linux/pci.h>
  14#include <linux/msi.h>
  15#include <linux/pci_hotplug.h>
  16#include <linux/module.h>
  17#include <linux/pci-aspm.h>
  18#include <linux/pci-acpi.h>
  19#include <linux/pm_runtime.h>
  20#include <linux/pm_qos.h>
  21#include "pci.h"
  22
  23/*
  24 * The UUID is defined in the PCI Firmware Specification available here:
  25 * https://www.pcisig.com/members/downloads/pcifw_r3_1_13Dec10.pdf
  26 */
  27const u8 pci_acpi_dsm_uuid[] = {
  28        0xd0, 0x37, 0xc9, 0xe5, 0x53, 0x35, 0x7a, 0x4d,
  29        0x91, 0x17, 0xea, 0x4d, 0x19, 0xc3, 0x43, 0x4d
  30};
  31
  32phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle)
  33{
  34        acpi_status status = AE_NOT_EXIST;
  35        unsigned long long mcfg_addr;
  36
  37        if (handle)
  38                status = acpi_evaluate_integer(handle, METHOD_NAME__CBA,
  39                                               NULL, &mcfg_addr);
  40        if (ACPI_FAILURE(status))
  41                return 0;
  42
  43        return (phys_addr_t)mcfg_addr;
  44}
  45
  46static acpi_status decode_type0_hpx_record(union acpi_object *record,
  47                                           struct hotplug_params *hpx)
  48{
  49        int i;
  50        union acpi_object *fields = record->package.elements;
  51        u32 revision = fields[1].integer.value;
  52
  53        switch (revision) {
  54        case 1:
  55                if (record->package.count != 6)
  56                        return AE_ERROR;
  57                for (i = 2; i < 6; i++)
  58                        if (fields[i].type != ACPI_TYPE_INTEGER)
  59                                return AE_ERROR;
  60                hpx->t0 = &hpx->type0_data;
  61                hpx->t0->revision        = revision;
  62                hpx->t0->cache_line_size = fields[2].integer.value;
  63                hpx->t0->latency_timer   = fields[3].integer.value;
  64                hpx->t0->enable_serr     = fields[4].integer.value;
  65                hpx->t0->enable_perr     = fields[5].integer.value;
  66                break;
  67        default:
  68                printk(KERN_WARNING
  69                       "%s: Type 0 Revision %d record not supported\n",
  70                       __func__, revision);
  71                return AE_ERROR;
  72        }
  73        return AE_OK;
  74}
  75
  76static acpi_status decode_type1_hpx_record(union acpi_object *record,
  77                                           struct hotplug_params *hpx)
  78{
  79        int i;
  80        union acpi_object *fields = record->package.elements;
  81        u32 revision = fields[1].integer.value;
  82
  83        switch (revision) {
  84        case 1:
  85                if (record->package.count != 5)
  86                        return AE_ERROR;
  87                for (i = 2; i < 5; i++)
  88                        if (fields[i].type != ACPI_TYPE_INTEGER)
  89                                return AE_ERROR;
  90                hpx->t1 = &hpx->type1_data;
  91                hpx->t1->revision      = revision;
  92                hpx->t1->max_mem_read  = fields[2].integer.value;
  93                hpx->t1->avg_max_split = fields[3].integer.value;
  94                hpx->t1->tot_max_split = fields[4].integer.value;
  95                break;
  96        default:
  97                printk(KERN_WARNING
  98                       "%s: Type 1 Revision %d record not supported\n",
  99                       __func__, revision);
 100                return AE_ERROR;
 101        }
 102        return AE_OK;
 103}
 104
 105static acpi_status decode_type2_hpx_record(union acpi_object *record,
 106                                           struct hotplug_params *hpx)
 107{
 108        int i;
 109        union acpi_object *fields = record->package.elements;
 110        u32 revision = fields[1].integer.value;
 111
 112        switch (revision) {
 113        case 1:
 114                if (record->package.count != 18)
 115                        return AE_ERROR;
 116                for (i = 2; i < 18; i++)
 117                        if (fields[i].type != ACPI_TYPE_INTEGER)
 118                                return AE_ERROR;
 119                hpx->t2 = &hpx->type2_data;
 120                hpx->t2->revision      = revision;
 121                hpx->t2->unc_err_mask_and      = fields[2].integer.value;
 122                hpx->t2->unc_err_mask_or       = fields[3].integer.value;
 123                hpx->t2->unc_err_sever_and     = fields[4].integer.value;
 124                hpx->t2->unc_err_sever_or      = fields[5].integer.value;
 125                hpx->t2->cor_err_mask_and      = fields[6].integer.value;
 126                hpx->t2->cor_err_mask_or       = fields[7].integer.value;
 127                hpx->t2->adv_err_cap_and       = fields[8].integer.value;
 128                hpx->t2->adv_err_cap_or        = fields[9].integer.value;
 129                hpx->t2->pci_exp_devctl_and    = fields[10].integer.value;
 130                hpx->t2->pci_exp_devctl_or     = fields[11].integer.value;
 131                hpx->t2->pci_exp_lnkctl_and    = fields[12].integer.value;
 132                hpx->t2->pci_exp_lnkctl_or     = fields[13].integer.value;
 133                hpx->t2->sec_unc_err_sever_and = fields[14].integer.value;
 134                hpx->t2->sec_unc_err_sever_or  = fields[15].integer.value;
 135                hpx->t2->sec_unc_err_mask_and  = fields[16].integer.value;
 136                hpx->t2->sec_unc_err_mask_or   = fields[17].integer.value;
 137                break;
 138        default:
 139                printk(KERN_WARNING
 140                       "%s: Type 2 Revision %d record not supported\n",
 141                       __func__, revision);
 142                return AE_ERROR;
 143        }
 144        return AE_OK;
 145}
 146
 147static acpi_status acpi_run_hpx(acpi_handle handle, struct hotplug_params *hpx)
 148{
 149        acpi_status status;
 150        struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
 151        union acpi_object *package, *record, *fields;
 152        u32 type;
 153        int i;
 154
 155        /* Clear the return buffer with zeros */
 156        memset(hpx, 0, sizeof(struct hotplug_params));
 157
 158        status = acpi_evaluate_object(handle, "_HPX", NULL, &buffer);
 159        if (ACPI_FAILURE(status))
 160                return status;
 161
 162        package = (union acpi_object *)buffer.pointer;
 163        if (package->type != ACPI_TYPE_PACKAGE) {
 164                status = AE_ERROR;
 165                goto exit;
 166        }
 167
 168        for (i = 0; i < package->package.count; i++) {
 169                record = &package->package.elements[i];
 170                if (record->type != ACPI_TYPE_PACKAGE) {
 171                        status = AE_ERROR;
 172                        goto exit;
 173                }
 174
 175                fields = record->package.elements;
 176                if (fields[0].type != ACPI_TYPE_INTEGER ||
 177                    fields[1].type != ACPI_TYPE_INTEGER) {
 178                        status = AE_ERROR;
 179                        goto exit;
 180                }
 181
 182                type = fields[0].integer.value;
 183                switch (type) {
 184                case 0:
 185                        status = decode_type0_hpx_record(record, hpx);
 186                        if (ACPI_FAILURE(status))
 187                                goto exit;
 188                        break;
 189                case 1:
 190                        status = decode_type1_hpx_record(record, hpx);
 191                        if (ACPI_FAILURE(status))
 192                                goto exit;
 193                        break;
 194                case 2:
 195                        status = decode_type2_hpx_record(record, hpx);
 196                        if (ACPI_FAILURE(status))
 197                                goto exit;
 198                        break;
 199                default:
 200                        printk(KERN_ERR "%s: Type %d record not supported\n",
 201                               __func__, type);
 202                        status = AE_ERROR;
 203                        goto exit;
 204                }
 205        }
 206 exit:
 207        kfree(buffer.pointer);
 208        return status;
 209}
 210
 211static acpi_status acpi_run_hpp(acpi_handle handle, struct hotplug_params *hpp)
 212{
 213        acpi_status status;
 214        struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
 215        union acpi_object *package, *fields;
 216        int i;
 217
 218        memset(hpp, 0, sizeof(struct hotplug_params));
 219
 220        status = acpi_evaluate_object(handle, "_HPP", NULL, &buffer);
 221        if (ACPI_FAILURE(status))
 222                return status;
 223
 224        package = (union acpi_object *) buffer.pointer;
 225        if (package->type != ACPI_TYPE_PACKAGE ||
 226            package->package.count != 4) {
 227                status = AE_ERROR;
 228                goto exit;
 229        }
 230
 231        fields = package->package.elements;
 232        for (i = 0; i < 4; i++) {
 233                if (fields[i].type != ACPI_TYPE_INTEGER) {
 234                        status = AE_ERROR;
 235                        goto exit;
 236                }
 237        }
 238
 239        hpp->t0 = &hpp->type0_data;
 240        hpp->t0->revision        = 1;
 241        hpp->t0->cache_line_size = fields[0].integer.value;
 242        hpp->t0->latency_timer   = fields[1].integer.value;
 243        hpp->t0->enable_serr     = fields[2].integer.value;
 244        hpp->t0->enable_perr     = fields[3].integer.value;
 245
 246exit:
 247        kfree(buffer.pointer);
 248        return status;
 249}
 250
 251/* pci_get_hp_params
 252 *
 253 * @dev - the pci_dev for which we want parameters
 254 * @hpp - allocated by the caller
 255 */
 256int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp)
 257{
 258        acpi_status status;
 259        acpi_handle handle, phandle;
 260        struct pci_bus *pbus;
 261
 262        if (acpi_pci_disabled)
 263                return -ENODEV;
 264
 265        handle = NULL;
 266        for (pbus = dev->bus; pbus; pbus = pbus->parent) {
 267                handle = acpi_pci_get_bridge_handle(pbus);
 268                if (handle)
 269                        break;
 270        }
 271
 272        /*
 273         * _HPP settings apply to all child buses, until another _HPP is
 274         * encountered. If we don't find an _HPP for the input pci dev,
 275         * look for it in the parent device scope since that would apply to
 276         * this pci dev.
 277         */
 278        while (handle) {
 279                status = acpi_run_hpx(handle, hpp);
 280                if (ACPI_SUCCESS(status))
 281                        return 0;
 282                status = acpi_run_hpp(handle, hpp);
 283                if (ACPI_SUCCESS(status))
 284                        return 0;
 285                if (acpi_is_root_bridge(handle))
 286                        break;
 287                status = acpi_get_parent(handle, &phandle);
 288                if (ACPI_FAILURE(status))
 289                        break;
 290                handle = phandle;
 291        }
 292        return -ENODEV;
 293}
 294EXPORT_SYMBOL_GPL(pci_get_hp_params);
 295
 296/**
 297 * pci_acpi_wake_bus - Root bus wakeup notification fork function.
 298 * @work: Work item to handle.
 299 */
 300static void pci_acpi_wake_bus(struct work_struct *work)
 301{
 302        struct acpi_device *adev;
 303        struct acpi_pci_root *root;
 304
 305        adev = container_of(work, struct acpi_device, wakeup.context.work);
 306        root = acpi_driver_data(adev);
 307        pci_pme_wakeup_bus(root->bus);
 308}
 309
 310/**
 311 * pci_acpi_wake_dev - PCI device wakeup notification work function.
 312 * @handle: ACPI handle of a device the notification is for.
 313 * @work: Work item to handle.
 314 */
 315static void pci_acpi_wake_dev(struct work_struct *work)
 316{
 317        struct acpi_device_wakeup_context *context;
 318        struct pci_dev *pci_dev;
 319
 320        context = container_of(work, struct acpi_device_wakeup_context, work);
 321        pci_dev = to_pci_dev(context->dev);
 322
 323        if (pci_dev->pme_poll)
 324                pci_dev->pme_poll = false;
 325
 326        if (pci_dev->current_state == PCI_D3cold) {
 327                pci_wakeup_event(pci_dev);
 328                pm_runtime_resume(&pci_dev->dev);
 329                return;
 330        }
 331
 332        /* Clear PME Status if set. */
 333        if (pci_dev->pme_support)
 334                pci_check_pme_status(pci_dev);
 335
 336        pci_wakeup_event(pci_dev);
 337        pm_runtime_resume(&pci_dev->dev);
 338
 339        pci_pme_wakeup_bus(pci_dev->subordinate);
 340}
 341
 342/**
 343 * pci_acpi_add_bus_pm_notifier - Register PM notifier for root PCI bus.
 344 * @dev: PCI root bridge ACPI device.
 345 */
 346acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev)
 347{
 348        return acpi_add_pm_notifier(dev, NULL, pci_acpi_wake_bus);
 349}
 350
 351/**
 352 * pci_acpi_add_pm_notifier - Register PM notifier for given PCI device.
 353 * @dev: ACPI device to add the notifier for.
 354 * @pci_dev: PCI device to check for the PME status if an event is signaled.
 355 */
 356acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev,
 357                                     struct pci_dev *pci_dev)
 358{
 359        return acpi_add_pm_notifier(dev, &pci_dev->dev, pci_acpi_wake_dev);
 360}
 361
 362/*
 363 * _SxD returns the D-state with the highest power
 364 * (lowest D-state number) supported in the S-state "x".
 365 *
 366 * If the devices does not have a _PRW
 367 * (Power Resources for Wake) supporting system wakeup from "x"
 368 * then the OS is free to choose a lower power (higher number
 369 * D-state) than the return value from _SxD.
 370 *
 371 * But if _PRW is enabled at S-state "x", the OS
 372 * must not choose a power lower than _SxD --
 373 * unless the device has an _SxW method specifying
 374 * the lowest power (highest D-state number) the device
 375 * may enter while still able to wake the system.
 376 *
 377 * ie. depending on global OS policy:
 378 *
 379 * if (_PRW at S-state x)
 380 *      choose from highest power _SxD to lowest power _SxW
 381 * else // no _PRW at S-state x
 382 *      choose highest power _SxD or any lower power
 383 */
 384
 385static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
 386{
 387        int acpi_state, d_max;
 388
 389        if (pdev->no_d3cold)
 390                d_max = ACPI_STATE_D3_HOT;
 391        else
 392                d_max = ACPI_STATE_D3_COLD;
 393        acpi_state = acpi_pm_device_sleep_state(&pdev->dev, NULL, d_max);
 394        if (acpi_state < 0)
 395                return PCI_POWER_ERROR;
 396
 397        switch (acpi_state) {
 398        case ACPI_STATE_D0:
 399                return PCI_D0;
 400        case ACPI_STATE_D1:
 401                return PCI_D1;
 402        case ACPI_STATE_D2:
 403                return PCI_D2;
 404        case ACPI_STATE_D3_HOT:
 405                return PCI_D3hot;
 406        case ACPI_STATE_D3_COLD:
 407                return PCI_D3cold;
 408        }
 409        return PCI_POWER_ERROR;
 410}
 411
 412static bool acpi_pci_power_manageable(struct pci_dev *dev)
 413{
 414        struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
 415        return adev ? acpi_device_power_manageable(adev) : false;
 416}
 417
 418static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
 419{
 420        struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
 421        static const u8 state_conv[] = {
 422                [PCI_D0] = ACPI_STATE_D0,
 423                [PCI_D1] = ACPI_STATE_D1,
 424                [PCI_D2] = ACPI_STATE_D2,
 425                [PCI_D3hot] = ACPI_STATE_D3_HOT,
 426                [PCI_D3cold] = ACPI_STATE_D3_COLD,
 427        };
 428        int error = -EINVAL;
 429
 430        /* If the ACPI device has _EJ0, ignore the device */
 431        if (!adev || acpi_has_method(adev->handle, "_EJ0"))
 432                return -ENODEV;
 433
 434        switch (state) {
 435        case PCI_D3cold:
 436                if (dev_pm_qos_flags(&dev->dev, PM_QOS_FLAG_NO_POWER_OFF) ==
 437                                PM_QOS_FLAGS_ALL) {
 438                        error = -EBUSY;
 439                        break;
 440                }
 441        case PCI_D0:
 442        case PCI_D1:
 443        case PCI_D2:
 444        case PCI_D3hot:
 445                error = acpi_device_set_power(adev, state_conv[state]);
 446        }
 447
 448        if (!error)
 449                dev_dbg(&dev->dev, "power state changed by ACPI to %s\n",
 450                         acpi_power_state_string(state_conv[state]));
 451
 452        return error;
 453}
 454
 455static bool acpi_pci_can_wakeup(struct pci_dev *dev)
 456{
 457        struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
 458        return adev ? acpi_device_can_wakeup(adev) : false;
 459}
 460
 461static void acpi_pci_propagate_wakeup_enable(struct pci_bus *bus, bool enable)
 462{
 463        while (bus->parent) {
 464                if (!acpi_pm_device_sleep_wake(&bus->self->dev, enable))
 465                        return;
 466                bus = bus->parent;
 467        }
 468
 469        /* We have reached the root bus. */
 470        if (bus->bridge)
 471                acpi_pm_device_sleep_wake(bus->bridge, enable);
 472}
 473
 474static int acpi_pci_sleep_wake(struct pci_dev *dev, bool enable)
 475{
 476        if (acpi_pci_can_wakeup(dev))
 477                return acpi_pm_device_sleep_wake(&dev->dev, enable);
 478
 479        acpi_pci_propagate_wakeup_enable(dev->bus, enable);
 480        return 0;
 481}
 482
 483static void acpi_pci_propagate_run_wake(struct pci_bus *bus, bool enable)
 484{
 485        while (bus->parent) {
 486                struct pci_dev *bridge = bus->self;
 487
 488                if (bridge->pme_interrupt)
 489                        return;
 490                if (!acpi_pm_device_run_wake(&bridge->dev, enable))
 491                        return;
 492                bus = bus->parent;
 493        }
 494
 495        /* We have reached the root bus. */
 496        if (bus->bridge)
 497                acpi_pm_device_run_wake(bus->bridge, enable);
 498}
 499
 500static int acpi_pci_run_wake(struct pci_dev *dev, bool enable)
 501{
 502        /*
 503         * Per PCI Express Base Specification Revision 2.0 section
 504         * 5.3.3.2 Link Wakeup, platform support is needed for D3cold
 505         * waking up to power on the main link even if there is PME
 506         * support for D3cold
 507         */
 508        if (dev->pme_interrupt && !dev->runtime_d3cold)
 509                return 0;
 510
 511        if (!acpi_pm_device_run_wake(&dev->dev, enable))
 512                return 0;
 513
 514        acpi_pci_propagate_run_wake(dev->bus, enable);
 515        return 0;
 516}
 517
 518static bool acpi_pci_need_resume(struct pci_dev *dev)
 519{
 520        struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
 521
 522        if (!adev || !acpi_device_power_manageable(adev))
 523                return false;
 524
 525        if (device_may_wakeup(&dev->dev) != !!adev->wakeup.prepare_count)
 526                return true;
 527
 528        if (acpi_target_system_state() == ACPI_STATE_S0)
 529                return false;
 530
 531        return !!adev->power.flags.dsw_present;
 532}
 533
 534static const struct pci_platform_pm_ops acpi_pci_platform_pm = {
 535        .is_manageable = acpi_pci_power_manageable,
 536        .set_state = acpi_pci_set_power_state,
 537        .choose_state = acpi_pci_choose_state,
 538        .sleep_wake = acpi_pci_sleep_wake,
 539        .run_wake = acpi_pci_run_wake,
 540        .need_resume = acpi_pci_need_resume,
 541};
 542
 543void acpi_pci_add_bus(struct pci_bus *bus)
 544{
 545        union acpi_object *obj;
 546        struct pci_host_bridge *bridge;
 547
 548        if (acpi_pci_disabled || !bus->bridge)
 549                return;
 550
 551        acpi_pci_slot_enumerate(bus);
 552        acpiphp_enumerate_slots(bus);
 553
 554        /*
 555         * For a host bridge, check its _DSM for function 8 and if
 556         * that is available, mark it in pci_host_bridge.
 557         */
 558        if (!pci_is_root_bus(bus))
 559                return;
 560
 561        obj = acpi_evaluate_dsm(ACPI_HANDLE(bus->bridge), pci_acpi_dsm_uuid, 3,
 562                                RESET_DELAY_DSM, NULL);
 563        if (!obj)
 564                return;
 565
 566        if (obj->type == ACPI_TYPE_INTEGER && obj->integer.value == 1) {
 567                bridge = pci_find_host_bridge(bus);
 568                bridge->ignore_reset_delay = 1;
 569        }
 570        ACPI_FREE(obj);
 571}
 572
 573void acpi_pci_remove_bus(struct pci_bus *bus)
 574{
 575        if (acpi_pci_disabled || !bus->bridge)
 576                return;
 577
 578        acpiphp_remove_slots(bus);
 579        acpi_pci_slot_remove(bus);
 580}
 581
 582/* ACPI bus type */
 583static struct acpi_device *acpi_pci_find_companion(struct device *dev)
 584{
 585        struct pci_dev *pci_dev = to_pci_dev(dev);
 586        bool check_children;
 587        u64 addr;
 588
 589        check_children = pci_is_bridge(pci_dev);
 590        /* Please ref to ACPI spec for the syntax of _ADR */
 591        addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn);
 592        return acpi_find_child_device(ACPI_COMPANION(dev->parent), addr,
 593                                      check_children);
 594}
 595
 596/**
 597 * pci_acpi_optimize_delay - optimize PCI D3 and D3cold delay from ACPI
 598 * @pdev: the PCI device whose delay is to be updated
 599 * @handle: ACPI handle of this device
 600 *
 601 * Update the d3_delay and d3cold_delay of a PCI device from the ACPI _DSM
 602 * control method of either the device itself or the PCI host bridge.
 603 *
 604 * Function 8, "Reset Delay," applies to the entire hierarchy below a PCI
 605 * host bridge.  If it returns one, the OS may assume that all devices in
 606 * the hierarchy have already completed power-on reset delays.
 607 *
 608 * Function 9, "Device Readiness Durations," applies only to the object
 609 * where it is located.  It returns delay durations required after various
 610 * events if the device requires less time than the spec requires.  Delays
 611 * from this function take precedence over the Reset Delay function.
 612 *
 613 * These _DSM functions are defined by the draft ECN of January 28, 2014,
 614 * titled "ACPI additions for FW latency optimizations."
 615 */
 616static void pci_acpi_optimize_delay(struct pci_dev *pdev,
 617                                    acpi_handle handle)
 618{
 619        struct pci_host_bridge *bridge = pci_find_host_bridge(pdev->bus);
 620        int value;
 621        union acpi_object *obj, *elements;
 622
 623        if (bridge->ignore_reset_delay)
 624                pdev->d3cold_delay = 0;
 625
 626        obj = acpi_evaluate_dsm(handle, pci_acpi_dsm_uuid, 3,
 627                                FUNCTION_DELAY_DSM, NULL);
 628        if (!obj)
 629                return;
 630
 631        if (obj->type == ACPI_TYPE_PACKAGE && obj->package.count == 5) {
 632                elements = obj->package.elements;
 633                if (elements[0].type == ACPI_TYPE_INTEGER) {
 634                        value = (int)elements[0].integer.value / 1000;
 635                        if (value < PCI_PM_D3COLD_WAIT)
 636                                pdev->d3cold_delay = value;
 637                }
 638                if (elements[3].type == ACPI_TYPE_INTEGER) {
 639                        value = (int)elements[3].integer.value / 1000;
 640                        if (value < PCI_PM_D3_WAIT)
 641                                pdev->d3_delay = value;
 642                }
 643        }
 644        ACPI_FREE(obj);
 645}
 646
 647static void pci_acpi_setup(struct device *dev)
 648{
 649        struct pci_dev *pci_dev = to_pci_dev(dev);
 650        struct acpi_device *adev = ACPI_COMPANION(dev);
 651
 652        if (!adev)
 653                return;
 654
 655        pci_acpi_optimize_delay(pci_dev, adev->handle);
 656
 657        pci_acpi_add_pm_notifier(adev, pci_dev);
 658        if (!adev->wakeup.flags.valid)
 659                return;
 660
 661        device_set_wakeup_capable(dev, true);
 662        acpi_pci_sleep_wake(pci_dev, false);
 663        if (adev->wakeup.flags.run_wake)
 664                device_set_run_wake(dev, true);
 665}
 666
 667static void pci_acpi_cleanup(struct device *dev)
 668{
 669        struct acpi_device *adev = ACPI_COMPANION(dev);
 670
 671        if (!adev)
 672                return;
 673
 674        pci_acpi_remove_pm_notifier(adev);
 675        if (adev->wakeup.flags.valid) {
 676                device_set_wakeup_capable(dev, false);
 677                device_set_run_wake(dev, false);
 678        }
 679}
 680
 681static bool pci_acpi_bus_match(struct device *dev)
 682{
 683        return dev_is_pci(dev);
 684}
 685
 686static struct acpi_bus_type acpi_pci_bus = {
 687        .name = "PCI",
 688        .match = pci_acpi_bus_match,
 689        .find_companion = acpi_pci_find_companion,
 690        .setup = pci_acpi_setup,
 691        .cleanup = pci_acpi_cleanup,
 692};
 693
 694
 695static struct fwnode_handle *(*pci_msi_get_fwnode_cb)(struct device *dev);
 696
 697/**
 698 * pci_msi_register_fwnode_provider - Register callback to retrieve fwnode
 699 * @fn:       Callback matching a device to a fwnode that identifies a PCI
 700 *            MSI domain.
 701 *
 702 * This should be called by irqchip driver, which is the parent of
 703 * the MSI domain to provide callback interface to query fwnode.
 704 */
 705void
 706pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *))
 707{
 708        pci_msi_get_fwnode_cb = fn;
 709}
 710
 711/**
 712 * pci_host_bridge_acpi_msi_domain - Retrieve MSI domain of a PCI host bridge
 713 * @bus:      The PCI host bridge bus.
 714 *
 715 * This function uses the callback function registered by
 716 * pci_msi_register_fwnode_provider() to retrieve the irq_domain with
 717 * type DOMAIN_BUS_PCI_MSI of the specified host bridge bus.
 718 * This returns NULL on error or when the domain is not found.
 719 */
 720struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus)
 721{
 722        struct fwnode_handle *fwnode;
 723
 724        if (!pci_msi_get_fwnode_cb)
 725                return NULL;
 726
 727        fwnode = pci_msi_get_fwnode_cb(&bus->dev);
 728        if (!fwnode)
 729                return NULL;
 730
 731        return irq_find_matching_fwnode(fwnode, DOMAIN_BUS_PCI_MSI);
 732}
 733
 734static int __init acpi_pci_init(void)
 735{
 736        int ret;
 737
 738        if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_MSI) {
 739                pr_info("ACPI FADT declares the system doesn't support MSI, so disable it\n");
 740                pci_no_msi();
 741        }
 742
 743        if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) {
 744                pr_info("ACPI FADT declares the system doesn't support PCIe ASPM, so disable it\n");
 745                pcie_no_aspm();
 746        }
 747
 748        ret = register_acpi_bus_type(&acpi_pci_bus);
 749        if (ret)
 750                return 0;
 751
 752        pci_set_platform_pm(&acpi_pci_platform_pm);
 753        acpi_pci_slot_init();
 754        acpiphp_init();
 755
 756        return 0;
 757}
 758arch_initcall(acpi_pci_init);
 759