linux/drivers/pci/pci-acpi.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * PCI support in ACPI
   4 *
   5 * Copyright (C) 2005 David Shaohua Li <shaohua.li@intel.com>
   6 * Copyright (C) 2004 Tom Long Nguyen <tom.l.nguyen@intel.com>
   7 * Copyright (C) 2004 Intel Corp.
   8 */
   9
  10#include <linux/delay.h>
  11#include <linux/init.h>
  12#include <linux/irqdomain.h>
  13#include <linux/pci.h>
  14#include <linux/msi.h>
  15#include <linux/pci_hotplug.h>
  16#include <linux/module.h>
  17#include <linux/pci-acpi.h>
  18#include <linux/pm_runtime.h>
  19#include <linux/pm_qos.h>
  20#include "pci.h"
  21
  22/*
  23 * The GUID is defined in the PCI Firmware Specification available here:
  24 * https://www.pcisig.com/members/downloads/pcifw_r3_1_13Dec10.pdf
  25 */
  26const guid_t pci_acpi_dsm_guid =
  27        GUID_INIT(0xe5c937d0, 0x3553, 0x4d7a,
  28                  0x91, 0x17, 0xea, 0x4d, 0x19, 0xc3, 0x43, 0x4d);
  29
  30#if defined(CONFIG_PCI_QUIRKS) && defined(CONFIG_ARM64)
  31static int acpi_get_rc_addr(struct acpi_device *adev, struct resource *res)
  32{
  33        struct device *dev = &adev->dev;
  34        struct resource_entry *entry;
  35        struct list_head list;
  36        unsigned long flags;
  37        int ret;
  38
  39        INIT_LIST_HEAD(&list);
  40        flags = IORESOURCE_MEM;
  41        ret = acpi_dev_get_resources(adev, &list,
  42                                     acpi_dev_filter_resource_type_cb,
  43                                     (void *) flags);
  44        if (ret < 0) {
  45                dev_err(dev, "failed to parse _CRS method, error code %d\n",
  46                        ret);
  47                return ret;
  48        }
  49
  50        if (ret == 0) {
  51                dev_err(dev, "no IO and memory resources present in _CRS\n");
  52                return -EINVAL;
  53        }
  54
  55        entry = list_first_entry(&list, struct resource_entry, node);
  56        *res = *entry->res;
  57        acpi_dev_free_resource_list(&list);
  58        return 0;
  59}
  60
  61static acpi_status acpi_match_rc(acpi_handle handle, u32 lvl, void *context,
  62                                 void **retval)
  63{
  64        u16 *segment = context;
  65        unsigned long long uid;
  66        acpi_status status;
  67
  68        status = acpi_evaluate_integer(handle, "_UID", NULL, &uid);
  69        if (ACPI_FAILURE(status) || uid != *segment)
  70                return AE_CTRL_DEPTH;
  71
  72        *(acpi_handle *)retval = handle;
  73        return AE_CTRL_TERMINATE;
  74}
  75
  76int acpi_get_rc_resources(struct device *dev, const char *hid, u16 segment,
  77                          struct resource *res)
  78{
  79        struct acpi_device *adev;
  80        acpi_status status;
  81        acpi_handle handle;
  82        int ret;
  83
  84        status = acpi_get_devices(hid, acpi_match_rc, &segment, &handle);
  85        if (ACPI_FAILURE(status)) {
  86                dev_err(dev, "can't find _HID %s device to locate resources\n",
  87                        hid);
  88                return -ENODEV;
  89        }
  90
  91        ret = acpi_bus_get_device(handle, &adev);
  92        if (ret)
  93                return ret;
  94
  95        ret = acpi_get_rc_addr(adev, res);
  96        if (ret) {
  97                dev_err(dev, "can't get resource from %s\n",
  98                        dev_name(&adev->dev));
  99                return ret;
 100        }
 101
 102        return 0;
 103}
 104#endif
 105
 106phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle)
 107{
 108        acpi_status status = AE_NOT_EXIST;
 109        unsigned long long mcfg_addr;
 110
 111        if (handle)
 112                status = acpi_evaluate_integer(handle, METHOD_NAME__CBA,
 113                                               NULL, &mcfg_addr);
 114        if (ACPI_FAILURE(status))
 115                return 0;
 116
 117        return (phys_addr_t)mcfg_addr;
 118}
 119
 120/* _HPX PCI Setting Record (Type 0); same as _HPP */
 121struct hpx_type0 {
 122        u32 revision;           /* Not present in _HPP */
 123        u8  cache_line_size;    /* Not applicable to PCIe */
 124        u8  latency_timer;      /* Not applicable to PCIe */
 125        u8  enable_serr;
 126        u8  enable_perr;
 127};
 128
 129static struct hpx_type0 pci_default_type0 = {
 130        .revision = 1,
 131        .cache_line_size = 8,
 132        .latency_timer = 0x40,
 133        .enable_serr = 0,
 134        .enable_perr = 0,
 135};
 136
 137static void program_hpx_type0(struct pci_dev *dev, struct hpx_type0 *hpx)
 138{
 139        u16 pci_cmd, pci_bctl;
 140
 141        if (!hpx)
 142                hpx = &pci_default_type0;
 143
 144        if (hpx->revision > 1) {
 145                pci_warn(dev, "PCI settings rev %d not supported; using defaults\n",
 146                         hpx->revision);
 147                hpx = &pci_default_type0;
 148        }
 149
 150        pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpx->cache_line_size);
 151        pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpx->latency_timer);
 152        pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
 153        if (hpx->enable_serr)
 154                pci_cmd |= PCI_COMMAND_SERR;
 155        if (hpx->enable_perr)
 156                pci_cmd |= PCI_COMMAND_PARITY;
 157        pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
 158
 159        /* Program bridge control value */
 160        if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
 161                pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER,
 162                                      hpx->latency_timer);
 163                pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl);
 164                if (hpx->enable_perr)
 165                        pci_bctl |= PCI_BRIDGE_CTL_PARITY;
 166                pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl);
 167        }
 168}
 169
 170static acpi_status decode_type0_hpx_record(union acpi_object *record,
 171                                           struct hpx_type0 *hpx0)
 172{
 173        int i;
 174        union acpi_object *fields = record->package.elements;
 175        u32 revision = fields[1].integer.value;
 176
 177        switch (revision) {
 178        case 1:
 179                if (record->package.count != 6)
 180                        return AE_ERROR;
 181                for (i = 2; i < 6; i++)
 182                        if (fields[i].type != ACPI_TYPE_INTEGER)
 183                                return AE_ERROR;
 184                hpx0->revision        = revision;
 185                hpx0->cache_line_size = fields[2].integer.value;
 186                hpx0->latency_timer   = fields[3].integer.value;
 187                hpx0->enable_serr     = fields[4].integer.value;
 188                hpx0->enable_perr     = fields[5].integer.value;
 189                break;
 190        default:
 191                pr_warn("%s: Type 0 Revision %d record not supported\n",
 192                       __func__, revision);
 193                return AE_ERROR;
 194        }
 195        return AE_OK;
 196}
 197
 198/* _HPX PCI-X Setting Record (Type 1) */
 199struct hpx_type1 {
 200        u32 revision;
 201        u8  max_mem_read;
 202        u8  avg_max_split;
 203        u16 tot_max_split;
 204};
 205
 206static void program_hpx_type1(struct pci_dev *dev, struct hpx_type1 *hpx)
 207{
 208        int pos;
 209
 210        if (!hpx)
 211                return;
 212
 213        pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
 214        if (!pos)
 215                return;
 216
 217        pci_warn(dev, "PCI-X settings not supported\n");
 218}
 219
 220static acpi_status decode_type1_hpx_record(union acpi_object *record,
 221                                           struct hpx_type1 *hpx1)
 222{
 223        int i;
 224        union acpi_object *fields = record->package.elements;
 225        u32 revision = fields[1].integer.value;
 226
 227        switch (revision) {
 228        case 1:
 229                if (record->package.count != 5)
 230                        return AE_ERROR;
 231                for (i = 2; i < 5; i++)
 232                        if (fields[i].type != ACPI_TYPE_INTEGER)
 233                                return AE_ERROR;
 234                hpx1->revision      = revision;
 235                hpx1->max_mem_read  = fields[2].integer.value;
 236                hpx1->avg_max_split = fields[3].integer.value;
 237                hpx1->tot_max_split = fields[4].integer.value;
 238                break;
 239        default:
 240                pr_warn("%s: Type 1 Revision %d record not supported\n",
 241                       __func__, revision);
 242                return AE_ERROR;
 243        }
 244        return AE_OK;
 245}
 246
 247static bool pcie_root_rcb_set(struct pci_dev *dev)
 248{
 249        struct pci_dev *rp = pcie_find_root_port(dev);
 250        u16 lnkctl;
 251
 252        if (!rp)
 253                return false;
 254
 255        pcie_capability_read_word(rp, PCI_EXP_LNKCTL, &lnkctl);
 256        if (lnkctl & PCI_EXP_LNKCTL_RCB)
 257                return true;
 258
 259        return false;
 260}
 261
 262/* _HPX PCI Express Setting Record (Type 2) */
 263struct hpx_type2 {
 264        u32 revision;
 265        u32 unc_err_mask_and;
 266        u32 unc_err_mask_or;
 267        u32 unc_err_sever_and;
 268        u32 unc_err_sever_or;
 269        u32 cor_err_mask_and;
 270        u32 cor_err_mask_or;
 271        u32 adv_err_cap_and;
 272        u32 adv_err_cap_or;
 273        u16 pci_exp_devctl_and;
 274        u16 pci_exp_devctl_or;
 275        u16 pci_exp_lnkctl_and;
 276        u16 pci_exp_lnkctl_or;
 277        u32 sec_unc_err_sever_and;
 278        u32 sec_unc_err_sever_or;
 279        u32 sec_unc_err_mask_and;
 280        u32 sec_unc_err_mask_or;
 281};
 282
 283static void program_hpx_type2(struct pci_dev *dev, struct hpx_type2 *hpx)
 284{
 285        int pos;
 286        u32 reg32;
 287
 288        if (!hpx)
 289                return;
 290
 291        if (!pci_is_pcie(dev))
 292                return;
 293
 294        if (hpx->revision > 1) {
 295                pci_warn(dev, "PCIe settings rev %d not supported\n",
 296                         hpx->revision);
 297                return;
 298        }
 299
 300        /*
 301         * Don't allow _HPX to change MPS or MRRS settings.  We manage
 302         * those to make sure they're consistent with the rest of the
 303         * platform.
 304         */
 305        hpx->pci_exp_devctl_and |= PCI_EXP_DEVCTL_PAYLOAD |
 306                                    PCI_EXP_DEVCTL_READRQ;
 307        hpx->pci_exp_devctl_or &= ~(PCI_EXP_DEVCTL_PAYLOAD |
 308                                    PCI_EXP_DEVCTL_READRQ);
 309
 310        /* Initialize Device Control Register */
 311        pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
 312                        ~hpx->pci_exp_devctl_and, hpx->pci_exp_devctl_or);
 313
 314        /* Initialize Link Control Register */
 315        if (pcie_cap_has_lnkctl(dev)) {
 316
 317                /*
 318                 * If the Root Port supports Read Completion Boundary of
 319                 * 128, set RCB to 128.  Otherwise, clear it.
 320                 */
 321                hpx->pci_exp_lnkctl_and |= PCI_EXP_LNKCTL_RCB;
 322                hpx->pci_exp_lnkctl_or &= ~PCI_EXP_LNKCTL_RCB;
 323                if (pcie_root_rcb_set(dev))
 324                        hpx->pci_exp_lnkctl_or |= PCI_EXP_LNKCTL_RCB;
 325
 326                pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL,
 327                        ~hpx->pci_exp_lnkctl_and, hpx->pci_exp_lnkctl_or);
 328        }
 329
 330        /* Find Advanced Error Reporting Enhanced Capability */
 331        pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
 332        if (!pos)
 333                return;
 334
 335        /* Initialize Uncorrectable Error Mask Register */
 336        pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &reg32);
 337        reg32 = (reg32 & hpx->unc_err_mask_and) | hpx->unc_err_mask_or;
 338        pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32);
 339
 340        /* Initialize Uncorrectable Error Severity Register */
 341        pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &reg32);
 342        reg32 = (reg32 & hpx->unc_err_sever_and) | hpx->unc_err_sever_or;
 343        pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32);
 344
 345        /* Initialize Correctable Error Mask Register */
 346        pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &reg32);
 347        reg32 = (reg32 & hpx->cor_err_mask_and) | hpx->cor_err_mask_or;
 348        pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32);
 349
 350        /* Initialize Advanced Error Capabilities and Control Register */
 351        pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
 352        reg32 = (reg32 & hpx->adv_err_cap_and) | hpx->adv_err_cap_or;
 353
 354        /* Don't enable ECRC generation or checking if unsupported */
 355        if (!(reg32 & PCI_ERR_CAP_ECRC_GENC))
 356                reg32 &= ~PCI_ERR_CAP_ECRC_GENE;
 357        if (!(reg32 & PCI_ERR_CAP_ECRC_CHKC))
 358                reg32 &= ~PCI_ERR_CAP_ECRC_CHKE;
 359        pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
 360
 361        /*
 362         * FIXME: The following two registers are not supported yet.
 363         *
 364         *   o Secondary Uncorrectable Error Severity Register
 365         *   o Secondary Uncorrectable Error Mask Register
 366         */
 367}
 368
 369static acpi_status decode_type2_hpx_record(union acpi_object *record,
 370                                           struct hpx_type2 *hpx2)
 371{
 372        int i;
 373        union acpi_object *fields = record->package.elements;
 374        u32 revision = fields[1].integer.value;
 375
 376        switch (revision) {
 377        case 1:
 378                if (record->package.count != 18)
 379                        return AE_ERROR;
 380                for (i = 2; i < 18; i++)
 381                        if (fields[i].type != ACPI_TYPE_INTEGER)
 382                                return AE_ERROR;
 383                hpx2->revision      = revision;
 384                hpx2->unc_err_mask_and      = fields[2].integer.value;
 385                hpx2->unc_err_mask_or       = fields[3].integer.value;
 386                hpx2->unc_err_sever_and     = fields[4].integer.value;
 387                hpx2->unc_err_sever_or      = fields[5].integer.value;
 388                hpx2->cor_err_mask_and      = fields[6].integer.value;
 389                hpx2->cor_err_mask_or       = fields[7].integer.value;
 390                hpx2->adv_err_cap_and       = fields[8].integer.value;
 391                hpx2->adv_err_cap_or        = fields[9].integer.value;
 392                hpx2->pci_exp_devctl_and    = fields[10].integer.value;
 393                hpx2->pci_exp_devctl_or     = fields[11].integer.value;
 394                hpx2->pci_exp_lnkctl_and    = fields[12].integer.value;
 395                hpx2->pci_exp_lnkctl_or     = fields[13].integer.value;
 396                hpx2->sec_unc_err_sever_and = fields[14].integer.value;
 397                hpx2->sec_unc_err_sever_or  = fields[15].integer.value;
 398                hpx2->sec_unc_err_mask_and  = fields[16].integer.value;
 399                hpx2->sec_unc_err_mask_or   = fields[17].integer.value;
 400                break;
 401        default:
 402                pr_warn("%s: Type 2 Revision %d record not supported\n",
 403                       __func__, revision);
 404                return AE_ERROR;
 405        }
 406        return AE_OK;
 407}
 408
 409/* _HPX PCI Express Setting Record (Type 3) */
 410struct hpx_type3 {
 411        u16 device_type;
 412        u16 function_type;
 413        u16 config_space_location;
 414        u16 pci_exp_cap_id;
 415        u16 pci_exp_cap_ver;
 416        u16 pci_exp_vendor_id;
 417        u16 dvsec_id;
 418        u16 dvsec_rev;
 419        u16 match_offset;
 420        u32 match_mask_and;
 421        u32 match_value;
 422        u16 reg_offset;
 423        u32 reg_mask_and;
 424        u32 reg_mask_or;
 425};
 426
 427enum hpx_type3_dev_type {
 428        HPX_TYPE_ENDPOINT       = BIT(0),
 429        HPX_TYPE_LEG_END        = BIT(1),
 430        HPX_TYPE_RC_END         = BIT(2),
 431        HPX_TYPE_RC_EC          = BIT(3),
 432        HPX_TYPE_ROOT_PORT      = BIT(4),
 433        HPX_TYPE_UPSTREAM       = BIT(5),
 434        HPX_TYPE_DOWNSTREAM     = BIT(6),
 435        HPX_TYPE_PCI_BRIDGE     = BIT(7),
 436        HPX_TYPE_PCIE_BRIDGE    = BIT(8),
 437};
 438
 439static u16 hpx3_device_type(struct pci_dev *dev)
 440{
 441        u16 pcie_type = pci_pcie_type(dev);
 442        static const int pcie_to_hpx3_type[] = {
 443                [PCI_EXP_TYPE_ENDPOINT]    = HPX_TYPE_ENDPOINT,
 444                [PCI_EXP_TYPE_LEG_END]     = HPX_TYPE_LEG_END,
 445                [PCI_EXP_TYPE_RC_END]      = HPX_TYPE_RC_END,
 446                [PCI_EXP_TYPE_RC_EC]       = HPX_TYPE_RC_EC,
 447                [PCI_EXP_TYPE_ROOT_PORT]   = HPX_TYPE_ROOT_PORT,
 448                [PCI_EXP_TYPE_UPSTREAM]    = HPX_TYPE_UPSTREAM,
 449                [PCI_EXP_TYPE_DOWNSTREAM]  = HPX_TYPE_DOWNSTREAM,
 450                [PCI_EXP_TYPE_PCI_BRIDGE]  = HPX_TYPE_PCI_BRIDGE,
 451                [PCI_EXP_TYPE_PCIE_BRIDGE] = HPX_TYPE_PCIE_BRIDGE,
 452        };
 453
 454        if (pcie_type >= ARRAY_SIZE(pcie_to_hpx3_type))
 455                return 0;
 456
 457        return pcie_to_hpx3_type[pcie_type];
 458}
 459
 460enum hpx_type3_fn_type {
 461        HPX_FN_NORMAL           = BIT(0),
 462        HPX_FN_SRIOV_PHYS       = BIT(1),
 463        HPX_FN_SRIOV_VIRT       = BIT(2),
 464};
 465
 466static u8 hpx3_function_type(struct pci_dev *dev)
 467{
 468        if (dev->is_virtfn)
 469                return HPX_FN_SRIOV_VIRT;
 470        else if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV) > 0)
 471                return HPX_FN_SRIOV_PHYS;
 472        else
 473                return HPX_FN_NORMAL;
 474}
 475
 476static bool hpx3_cap_ver_matches(u8 pcie_cap_id, u8 hpx3_cap_id)
 477{
 478        u8 cap_ver = hpx3_cap_id & 0xf;
 479
 480        if ((hpx3_cap_id & BIT(4)) && cap_ver >= pcie_cap_id)
 481                return true;
 482        else if (cap_ver == pcie_cap_id)
 483                return true;
 484
 485        return false;
 486}
 487
 488enum hpx_type3_cfg_loc {
 489        HPX_CFG_PCICFG          = 0,
 490        HPX_CFG_PCIE_CAP        = 1,
 491        HPX_CFG_PCIE_CAP_EXT    = 2,
 492        HPX_CFG_VEND_CAP        = 3,
 493        HPX_CFG_DVSEC           = 4,
 494        HPX_CFG_MAX,
 495};
 496
 497static void program_hpx_type3_register(struct pci_dev *dev,
 498                                       const struct hpx_type3 *reg)
 499{
 500        u32 match_reg, write_reg, header, orig_value;
 501        u16 pos;
 502
 503        if (!(hpx3_device_type(dev) & reg->device_type))
 504                return;
 505
 506        if (!(hpx3_function_type(dev) & reg->function_type))
 507                return;
 508
 509        switch (reg->config_space_location) {
 510        case HPX_CFG_PCICFG:
 511                pos = 0;
 512                break;
 513        case HPX_CFG_PCIE_CAP:
 514                pos = pci_find_capability(dev, reg->pci_exp_cap_id);
 515                if (pos == 0)
 516                        return;
 517
 518                break;
 519        case HPX_CFG_PCIE_CAP_EXT:
 520                pos = pci_find_ext_capability(dev, reg->pci_exp_cap_id);
 521                if (pos == 0)
 522                        return;
 523
 524                pci_read_config_dword(dev, pos, &header);
 525                if (!hpx3_cap_ver_matches(PCI_EXT_CAP_VER(header),
 526                                          reg->pci_exp_cap_ver))
 527                        return;
 528
 529                break;
 530        case HPX_CFG_VEND_CAP:
 531        case HPX_CFG_DVSEC:
 532        default:
 533                pci_warn(dev, "Encountered _HPX type 3 with unsupported config space location");
 534                return;
 535        }
 536
 537        pci_read_config_dword(dev, pos + reg->match_offset, &match_reg);
 538
 539        if ((match_reg & reg->match_mask_and) != reg->match_value)
 540                return;
 541
 542        pci_read_config_dword(dev, pos + reg->reg_offset, &write_reg);
 543        orig_value = write_reg;
 544        write_reg &= reg->reg_mask_and;
 545        write_reg |= reg->reg_mask_or;
 546
 547        if (orig_value == write_reg)
 548                return;
 549
 550        pci_write_config_dword(dev, pos + reg->reg_offset, write_reg);
 551
 552        pci_dbg(dev, "Applied _HPX3 at [0x%x]: 0x%08x -> 0x%08x",
 553                pos, orig_value, write_reg);
 554}
 555
 556static void program_hpx_type3(struct pci_dev *dev, struct hpx_type3 *hpx)
 557{
 558        if (!hpx)
 559                return;
 560
 561        if (!pci_is_pcie(dev))
 562                return;
 563
 564        program_hpx_type3_register(dev, hpx);
 565}
 566
 567static void parse_hpx3_register(struct hpx_type3 *hpx3_reg,
 568                                union acpi_object *reg_fields)
 569{
 570        hpx3_reg->device_type            = reg_fields[0].integer.value;
 571        hpx3_reg->function_type          = reg_fields[1].integer.value;
 572        hpx3_reg->config_space_location  = reg_fields[2].integer.value;
 573        hpx3_reg->pci_exp_cap_id         = reg_fields[3].integer.value;
 574        hpx3_reg->pci_exp_cap_ver        = reg_fields[4].integer.value;
 575        hpx3_reg->pci_exp_vendor_id      = reg_fields[5].integer.value;
 576        hpx3_reg->dvsec_id               = reg_fields[6].integer.value;
 577        hpx3_reg->dvsec_rev              = reg_fields[7].integer.value;
 578        hpx3_reg->match_offset           = reg_fields[8].integer.value;
 579        hpx3_reg->match_mask_and         = reg_fields[9].integer.value;
 580        hpx3_reg->match_value            = reg_fields[10].integer.value;
 581        hpx3_reg->reg_offset             = reg_fields[11].integer.value;
 582        hpx3_reg->reg_mask_and           = reg_fields[12].integer.value;
 583        hpx3_reg->reg_mask_or            = reg_fields[13].integer.value;
 584}
 585
 586static acpi_status program_type3_hpx_record(struct pci_dev *dev,
 587                                           union acpi_object *record)
 588{
 589        union acpi_object *fields = record->package.elements;
 590        u32 desc_count, expected_length, revision;
 591        union acpi_object *reg_fields;
 592        struct hpx_type3 hpx3;
 593        int i;
 594
 595        revision = fields[1].integer.value;
 596        switch (revision) {
 597        case 1:
 598                desc_count = fields[2].integer.value;
 599                expected_length = 3 + desc_count * 14;
 600
 601                if (record->package.count != expected_length)
 602                        return AE_ERROR;
 603
 604                for (i = 2; i < expected_length; i++)
 605                        if (fields[i].type != ACPI_TYPE_INTEGER)
 606                                return AE_ERROR;
 607
 608                for (i = 0; i < desc_count; i++) {
 609                        reg_fields = fields + 3 + i * 14;
 610                        parse_hpx3_register(&hpx3, reg_fields);
 611                        program_hpx_type3(dev, &hpx3);
 612                }
 613
 614                break;
 615        default:
 616                printk(KERN_WARNING
 617                        "%s: Type 3 Revision %d record not supported\n",
 618                        __func__, revision);
 619                return AE_ERROR;
 620        }
 621        return AE_OK;
 622}
 623
 624static acpi_status acpi_run_hpx(struct pci_dev *dev, acpi_handle handle)
 625{
 626        acpi_status status;
 627        struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
 628        union acpi_object *package, *record, *fields;
 629        struct hpx_type0 hpx0;
 630        struct hpx_type1 hpx1;
 631        struct hpx_type2 hpx2;
 632        u32 type;
 633        int i;
 634
 635        status = acpi_evaluate_object(handle, "_HPX", NULL, &buffer);
 636        if (ACPI_FAILURE(status))
 637                return status;
 638
 639        package = (union acpi_object *)buffer.pointer;
 640        if (package->type != ACPI_TYPE_PACKAGE) {
 641                status = AE_ERROR;
 642                goto exit;
 643        }
 644
 645        for (i = 0; i < package->package.count; i++) {
 646                record = &package->package.elements[i];
 647                if (record->type != ACPI_TYPE_PACKAGE) {
 648                        status = AE_ERROR;
 649                        goto exit;
 650                }
 651
 652                fields = record->package.elements;
 653                if (fields[0].type != ACPI_TYPE_INTEGER ||
 654                    fields[1].type != ACPI_TYPE_INTEGER) {
 655                        status = AE_ERROR;
 656                        goto exit;
 657                }
 658
 659                type = fields[0].integer.value;
 660                switch (type) {
 661                case 0:
 662                        memset(&hpx0, 0, sizeof(hpx0));
 663                        status = decode_type0_hpx_record(record, &hpx0);
 664                        if (ACPI_FAILURE(status))
 665                                goto exit;
 666                        program_hpx_type0(dev, &hpx0);
 667                        break;
 668                case 1:
 669                        memset(&hpx1, 0, sizeof(hpx1));
 670                        status = decode_type1_hpx_record(record, &hpx1);
 671                        if (ACPI_FAILURE(status))
 672                                goto exit;
 673                        program_hpx_type1(dev, &hpx1);
 674                        break;
 675                case 2:
 676                        memset(&hpx2, 0, sizeof(hpx2));
 677                        status = decode_type2_hpx_record(record, &hpx2);
 678                        if (ACPI_FAILURE(status))
 679                                goto exit;
 680                        program_hpx_type2(dev, &hpx2);
 681                        break;
 682                case 3:
 683                        status = program_type3_hpx_record(dev, record);
 684                        if (ACPI_FAILURE(status))
 685                                goto exit;
 686                        break;
 687                default:
 688                        pr_err("%s: Type %d record not supported\n",
 689                               __func__, type);
 690                        status = AE_ERROR;
 691                        goto exit;
 692                }
 693        }
 694 exit:
 695        kfree(buffer.pointer);
 696        return status;
 697}
 698
 699static acpi_status acpi_run_hpp(struct pci_dev *dev, acpi_handle handle)
 700{
 701        acpi_status status;
 702        struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
 703        union acpi_object *package, *fields;
 704        struct hpx_type0 hpx0;
 705        int i;
 706
 707        memset(&hpx0, 0, sizeof(hpx0));
 708
 709        status = acpi_evaluate_object(handle, "_HPP", NULL, &buffer);
 710        if (ACPI_FAILURE(status))
 711                return status;
 712
 713        package = (union acpi_object *) buffer.pointer;
 714        if (package->type != ACPI_TYPE_PACKAGE ||
 715            package->package.count != 4) {
 716                status = AE_ERROR;
 717                goto exit;
 718        }
 719
 720        fields = package->package.elements;
 721        for (i = 0; i < 4; i++) {
 722                if (fields[i].type != ACPI_TYPE_INTEGER) {
 723                        status = AE_ERROR;
 724                        goto exit;
 725                }
 726        }
 727
 728        hpx0.revision        = 1;
 729        hpx0.cache_line_size = fields[0].integer.value;
 730        hpx0.latency_timer   = fields[1].integer.value;
 731        hpx0.enable_serr     = fields[2].integer.value;
 732        hpx0.enable_perr     = fields[3].integer.value;
 733
 734        program_hpx_type0(dev, &hpx0);
 735
 736exit:
 737        kfree(buffer.pointer);
 738        return status;
 739}
 740
 741/* pci_acpi_program_hp_params
 742 *
 743 * @dev - the pci_dev for which we want parameters
 744 */
 745int pci_acpi_program_hp_params(struct pci_dev *dev)
 746{
 747        acpi_status status;
 748        acpi_handle handle, phandle;
 749        struct pci_bus *pbus;
 750
 751        if (acpi_pci_disabled)
 752                return -ENODEV;
 753
 754        handle = NULL;
 755        for (pbus = dev->bus; pbus; pbus = pbus->parent) {
 756                handle = acpi_pci_get_bridge_handle(pbus);
 757                if (handle)
 758                        break;
 759        }
 760
 761        /*
 762         * _HPP settings apply to all child buses, until another _HPP is
 763         * encountered. If we don't find an _HPP for the input pci dev,
 764         * look for it in the parent device scope since that would apply to
 765         * this pci dev.
 766         */
 767        while (handle) {
 768                status = acpi_run_hpx(dev, handle);
 769                if (ACPI_SUCCESS(status))
 770                        return 0;
 771                status = acpi_run_hpp(dev, handle);
 772                if (ACPI_SUCCESS(status))
 773                        return 0;
 774                if (acpi_is_root_bridge(handle))
 775                        break;
 776                status = acpi_get_parent(handle, &phandle);
 777                if (ACPI_FAILURE(status))
 778                        break;
 779                handle = phandle;
 780        }
 781        return -ENODEV;
 782}
 783
 784/**
 785 * pciehp_is_native - Check whether a hotplug port is handled by the OS
 786 * @bridge: Hotplug port to check
 787 *
 788 * Returns true if the given @bridge is handled by the native PCIe hotplug
 789 * driver.
 790 */
 791bool pciehp_is_native(struct pci_dev *bridge)
 792{
 793        const struct pci_host_bridge *host;
 794        u32 slot_cap;
 795
 796        if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
 797                return false;
 798
 799        pcie_capability_read_dword(bridge, PCI_EXP_SLTCAP, &slot_cap);
 800        if (!(slot_cap & PCI_EXP_SLTCAP_HPC))
 801                return false;
 802
 803        if (pcie_ports_native)
 804                return true;
 805
 806        host = pci_find_host_bridge(bridge->bus);
 807        return host->native_pcie_hotplug;
 808}
 809
 810/**
 811 * shpchp_is_native - Check whether a hotplug port is handled by the OS
 812 * @bridge: Hotplug port to check
 813 *
 814 * Returns true if the given @bridge is handled by the native SHPC hotplug
 815 * driver.
 816 */
 817bool shpchp_is_native(struct pci_dev *bridge)
 818{
 819        return bridge->shpc_managed;
 820}
 821
 822/**
 823 * pci_acpi_wake_bus - Root bus wakeup notification fork function.
 824 * @context: Device wakeup context.
 825 */
 826static void pci_acpi_wake_bus(struct acpi_device_wakeup_context *context)
 827{
 828        struct acpi_device *adev;
 829        struct acpi_pci_root *root;
 830
 831        adev = container_of(context, struct acpi_device, wakeup.context);
 832        root = acpi_driver_data(adev);
 833        pci_pme_wakeup_bus(root->bus);
 834}
 835
 836/**
 837 * pci_acpi_wake_dev - PCI device wakeup notification work function.
 838 * @context: Device wakeup context.
 839 */
 840static void pci_acpi_wake_dev(struct acpi_device_wakeup_context *context)
 841{
 842        struct pci_dev *pci_dev;
 843
 844        pci_dev = to_pci_dev(context->dev);
 845
 846        if (pci_dev->pme_poll)
 847                pci_dev->pme_poll = false;
 848
 849        if (pci_dev->current_state == PCI_D3cold) {
 850                pci_wakeup_event(pci_dev);
 851                pm_request_resume(&pci_dev->dev);
 852                return;
 853        }
 854
 855        /* Clear PME Status if set. */
 856        if (pci_dev->pme_support)
 857                pci_check_pme_status(pci_dev);
 858
 859        pci_wakeup_event(pci_dev);
 860        pm_request_resume(&pci_dev->dev);
 861
 862        pci_pme_wakeup_bus(pci_dev->subordinate);
 863}
 864
 865/**
 866 * pci_acpi_add_bus_pm_notifier - Register PM notifier for root PCI bus.
 867 * @dev: PCI root bridge ACPI device.
 868 */
 869acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev)
 870{
 871        return acpi_add_pm_notifier(dev, NULL, pci_acpi_wake_bus);
 872}
 873
 874/**
 875 * pci_acpi_add_pm_notifier - Register PM notifier for given PCI device.
 876 * @dev: ACPI device to add the notifier for.
 877 * @pci_dev: PCI device to check for the PME status if an event is signaled.
 878 */
 879acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev,
 880                                     struct pci_dev *pci_dev)
 881{
 882        return acpi_add_pm_notifier(dev, &pci_dev->dev, pci_acpi_wake_dev);
 883}
 884
 885/*
 886 * _SxD returns the D-state with the highest power
 887 * (lowest D-state number) supported in the S-state "x".
 888 *
 889 * If the devices does not have a _PRW
 890 * (Power Resources for Wake) supporting system wakeup from "x"
 891 * then the OS is free to choose a lower power (higher number
 892 * D-state) than the return value from _SxD.
 893 *
 894 * But if _PRW is enabled at S-state "x", the OS
 895 * must not choose a power lower than _SxD --
 896 * unless the device has an _SxW method specifying
 897 * the lowest power (highest D-state number) the device
 898 * may enter while still able to wake the system.
 899 *
 900 * ie. depending on global OS policy:
 901 *
 902 * if (_PRW at S-state x)
 903 *      choose from highest power _SxD to lowest power _SxW
 904 * else // no _PRW at S-state x
 905 *      choose highest power _SxD or any lower power
 906 */
 907
 908static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
 909{
 910        int acpi_state, d_max;
 911
 912        if (pdev->no_d3cold)
 913                d_max = ACPI_STATE_D3_HOT;
 914        else
 915                d_max = ACPI_STATE_D3_COLD;
 916        acpi_state = acpi_pm_device_sleep_state(&pdev->dev, NULL, d_max);
 917        if (acpi_state < 0)
 918                return PCI_POWER_ERROR;
 919
 920        switch (acpi_state) {
 921        case ACPI_STATE_D0:
 922                return PCI_D0;
 923        case ACPI_STATE_D1:
 924                return PCI_D1;
 925        case ACPI_STATE_D2:
 926                return PCI_D2;
 927        case ACPI_STATE_D3_HOT:
 928                return PCI_D3hot;
 929        case ACPI_STATE_D3_COLD:
 930                return PCI_D3cold;
 931        }
 932        return PCI_POWER_ERROR;
 933}
 934
 935static struct acpi_device *acpi_pci_find_companion(struct device *dev);
 936
 937static bool acpi_pci_bridge_d3(struct pci_dev *dev)
 938{
 939        const struct fwnode_handle *fwnode;
 940        struct acpi_device *adev;
 941        struct pci_dev *root;
 942        u8 val;
 943
 944        if (!dev->is_hotplug_bridge)
 945                return false;
 946
 947        /*
 948         * Look for a special _DSD property for the root port and if it
 949         * is set we know the hierarchy behind it supports D3 just fine.
 950         */
 951        root = pcie_find_root_port(dev);
 952        if (!root)
 953                return false;
 954
 955        adev = ACPI_COMPANION(&root->dev);
 956        if (root == dev) {
 957                /*
 958                 * It is possible that the ACPI companion is not yet bound
 959                 * for the root port so look it up manually here.
 960                 */
 961                if (!adev && !pci_dev_is_added(root))
 962                        adev = acpi_pci_find_companion(&root->dev);
 963        }
 964
 965        if (!adev)
 966                return false;
 967
 968        fwnode = acpi_fwnode_handle(adev);
 969        if (fwnode_property_read_u8(fwnode, "HotPlugSupportInD3", &val))
 970                return false;
 971
 972        return val == 1;
 973}
 974
 975static bool acpi_pci_power_manageable(struct pci_dev *dev)
 976{
 977        struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
 978        return adev ? acpi_device_power_manageable(adev) : false;
 979}
 980
 981static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
 982{
 983        struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
 984        static const u8 state_conv[] = {
 985                [PCI_D0] = ACPI_STATE_D0,
 986                [PCI_D1] = ACPI_STATE_D1,
 987                [PCI_D2] = ACPI_STATE_D2,
 988                [PCI_D3hot] = ACPI_STATE_D3_HOT,
 989                [PCI_D3cold] = ACPI_STATE_D3_COLD,
 990        };
 991        int error = -EINVAL;
 992
 993        /* If the ACPI device has _EJ0, ignore the device */
 994        if (!adev || acpi_has_method(adev->handle, "_EJ0"))
 995                return -ENODEV;
 996
 997        switch (state) {
 998        case PCI_D3cold:
 999                if (dev_pm_qos_flags(&dev->dev, PM_QOS_FLAG_NO_POWER_OFF) ==
1000                                PM_QOS_FLAGS_ALL) {
1001                        error = -EBUSY;
1002                        break;
1003                }
1004                fallthrough;
1005        case PCI_D0:
1006        case PCI_D1:
1007        case PCI_D2:
1008        case PCI_D3hot:
1009                error = acpi_device_set_power(adev, state_conv[state]);
1010        }
1011
1012        if (!error)
1013                pci_dbg(dev, "power state changed by ACPI to %s\n",
1014                         acpi_power_state_string(state_conv[state]));
1015
1016        return error;
1017}
1018
1019static pci_power_t acpi_pci_get_power_state(struct pci_dev *dev)
1020{
1021        struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
1022        static const pci_power_t state_conv[] = {
1023                [ACPI_STATE_D0]      = PCI_D0,
1024                [ACPI_STATE_D1]      = PCI_D1,
1025                [ACPI_STATE_D2]      = PCI_D2,
1026                [ACPI_STATE_D3_HOT]  = PCI_D3hot,
1027                [ACPI_STATE_D3_COLD] = PCI_D3cold,
1028        };
1029        int state;
1030
1031        if (!adev || !acpi_device_power_manageable(adev))
1032                return PCI_UNKNOWN;
1033
1034        state = adev->power.state;
1035        if (state == ACPI_STATE_UNKNOWN)
1036                return PCI_UNKNOWN;
1037
1038        return state_conv[state];
1039}
1040
1041static void acpi_pci_refresh_power_state(struct pci_dev *dev)
1042{
1043        struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
1044
1045        if (adev && acpi_device_power_manageable(adev))
1046                acpi_device_update_power(adev, NULL);
1047}
1048
1049static int acpi_pci_propagate_wakeup(struct pci_bus *bus, bool enable)
1050{
1051        while (bus->parent) {
1052                if (acpi_pm_device_can_wakeup(&bus->self->dev))
1053                        return acpi_pm_set_bridge_wakeup(&bus->self->dev, enable);
1054
1055                bus = bus->parent;
1056        }
1057
1058        /* We have reached the root bus. */
1059        if (bus->bridge) {
1060                if (acpi_pm_device_can_wakeup(bus->bridge))
1061                        return acpi_pm_set_bridge_wakeup(bus->bridge, enable);
1062        }
1063        return 0;
1064}
1065
1066static int acpi_pci_wakeup(struct pci_dev *dev, bool enable)
1067{
1068        if (acpi_pm_device_can_wakeup(&dev->dev))
1069                return acpi_pm_set_device_wakeup(&dev->dev, enable);
1070
1071        return acpi_pci_propagate_wakeup(dev->bus, enable);
1072}
1073
1074static bool acpi_pci_need_resume(struct pci_dev *dev)
1075{
1076        struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
1077
1078        /*
1079         * In some cases (eg. Samsung 305V4A) leaving a bridge in suspend over
1080         * system-wide suspend/resume confuses the platform firmware, so avoid
1081         * doing that.  According to Section 16.1.6 of ACPI 6.2, endpoint
1082         * devices are expected to be in D3 before invoking the S3 entry path
1083         * from the firmware, so they should not be affected by this issue.
1084         */
1085        if (pci_is_bridge(dev) && acpi_target_system_state() != ACPI_STATE_S0)
1086                return true;
1087
1088        if (!adev || !acpi_device_power_manageable(adev))
1089                return false;
1090
1091        if (adev->wakeup.flags.valid &&
1092            device_may_wakeup(&dev->dev) != !!adev->wakeup.prepare_count)
1093                return true;
1094
1095        if (acpi_target_system_state() == ACPI_STATE_S0)
1096                return false;
1097
1098        return !!adev->power.flags.dsw_present;
1099}
1100
1101static const struct pci_platform_pm_ops acpi_pci_platform_pm = {
1102        .bridge_d3 = acpi_pci_bridge_d3,
1103        .is_manageable = acpi_pci_power_manageable,
1104        .set_state = acpi_pci_set_power_state,
1105        .get_state = acpi_pci_get_power_state,
1106        .refresh_state = acpi_pci_refresh_power_state,
1107        .choose_state = acpi_pci_choose_state,
1108        .set_wakeup = acpi_pci_wakeup,
1109        .need_resume = acpi_pci_need_resume,
1110};
1111
1112void acpi_pci_add_bus(struct pci_bus *bus)
1113{
1114        union acpi_object *obj;
1115        struct pci_host_bridge *bridge;
1116
1117        if (acpi_pci_disabled || !bus->bridge || !ACPI_HANDLE(bus->bridge))
1118                return;
1119
1120        acpi_pci_slot_enumerate(bus);
1121        acpiphp_enumerate_slots(bus);
1122
1123        /*
1124         * For a host bridge, check its _DSM for function 8 and if
1125         * that is available, mark it in pci_host_bridge.
1126         */
1127        if (!pci_is_root_bus(bus))
1128                return;
1129
1130        obj = acpi_evaluate_dsm(ACPI_HANDLE(bus->bridge), &pci_acpi_dsm_guid, 3,
1131                                DSM_PCI_POWER_ON_RESET_DELAY, NULL);
1132        if (!obj)
1133                return;
1134
1135        if (obj->type == ACPI_TYPE_INTEGER && obj->integer.value == 1) {
1136                bridge = pci_find_host_bridge(bus);
1137                bridge->ignore_reset_delay = 1;
1138        }
1139        ACPI_FREE(obj);
1140}
1141
1142void acpi_pci_remove_bus(struct pci_bus *bus)
1143{
1144        if (acpi_pci_disabled || !bus->bridge)
1145                return;
1146
1147        acpiphp_remove_slots(bus);
1148        acpi_pci_slot_remove(bus);
1149}
1150
1151/* ACPI bus type */
1152static struct acpi_device *acpi_pci_find_companion(struct device *dev)
1153{
1154        struct pci_dev *pci_dev = to_pci_dev(dev);
1155        bool check_children;
1156        u64 addr;
1157
1158        check_children = pci_is_bridge(pci_dev);
1159        /* Please ref to ACPI spec for the syntax of _ADR */
1160        addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn);
1161        return acpi_find_child_device(ACPI_COMPANION(dev->parent), addr,
1162                                      check_children);
1163}
1164
1165/**
1166 * pci_acpi_optimize_delay - optimize PCI D3 and D3cold delay from ACPI
1167 * @pdev: the PCI device whose delay is to be updated
1168 * @handle: ACPI handle of this device
1169 *
1170 * Update the d3_delay and d3cold_delay of a PCI device from the ACPI _DSM
1171 * control method of either the device itself or the PCI host bridge.
1172 *
1173 * Function 8, "Reset Delay," applies to the entire hierarchy below a PCI
1174 * host bridge.  If it returns one, the OS may assume that all devices in
1175 * the hierarchy have already completed power-on reset delays.
1176 *
1177 * Function 9, "Device Readiness Durations," applies only to the object
1178 * where it is located.  It returns delay durations required after various
1179 * events if the device requires less time than the spec requires.  Delays
1180 * from this function take precedence over the Reset Delay function.
1181 *
1182 * These _DSM functions are defined by the draft ECN of January 28, 2014,
1183 * titled "ACPI additions for FW latency optimizations."
1184 */
1185static void pci_acpi_optimize_delay(struct pci_dev *pdev,
1186                                    acpi_handle handle)
1187{
1188        struct pci_host_bridge *bridge = pci_find_host_bridge(pdev->bus);
1189        int value;
1190        union acpi_object *obj, *elements;
1191
1192        if (bridge->ignore_reset_delay)
1193                pdev->d3cold_delay = 0;
1194
1195        obj = acpi_evaluate_dsm(handle, &pci_acpi_dsm_guid, 3,
1196                                DSM_PCI_DEVICE_READINESS_DURATIONS, NULL);
1197        if (!obj)
1198                return;
1199
1200        if (obj->type == ACPI_TYPE_PACKAGE && obj->package.count == 5) {
1201                elements = obj->package.elements;
1202                if (elements[0].type == ACPI_TYPE_INTEGER) {
1203                        value = (int)elements[0].integer.value / 1000;
1204                        if (value < PCI_PM_D3COLD_WAIT)
1205                                pdev->d3cold_delay = value;
1206                }
1207                if (elements[3].type == ACPI_TYPE_INTEGER) {
1208                        value = (int)elements[3].integer.value / 1000;
1209                        if (value < PCI_PM_D3_WAIT)
1210                                pdev->d3_delay = value;
1211                }
1212        }
1213        ACPI_FREE(obj);
1214}
1215
1216static void pci_acpi_set_external_facing(struct pci_dev *dev)
1217{
1218        u8 val;
1219
1220        if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
1221                return;
1222        if (device_property_read_u8(&dev->dev, "ExternalFacingPort", &val))
1223                return;
1224
1225        /*
1226         * These root ports expose PCIe (including DMA) outside of the
1227         * system.  Everything downstream from them is external.
1228         */
1229        if (val)
1230                dev->external_facing = 1;
1231}
1232
1233static void pci_acpi_setup(struct device *dev)
1234{
1235        struct pci_dev *pci_dev = to_pci_dev(dev);
1236        struct acpi_device *adev = ACPI_COMPANION(dev);
1237
1238        if (!adev)
1239                return;
1240
1241        pci_acpi_optimize_delay(pci_dev, adev->handle);
1242        pci_acpi_set_external_facing(pci_dev);
1243        pci_acpi_add_edr_notifier(pci_dev);
1244
1245        pci_acpi_add_pm_notifier(adev, pci_dev);
1246        if (!adev->wakeup.flags.valid)
1247                return;
1248
1249        device_set_wakeup_capable(dev, true);
1250        /*
1251         * For bridges that can do D3 we enable wake automatically (as
1252         * we do for the power management itself in that case). The
1253         * reason is that the bridge may have additional methods such as
1254         * _DSW that need to be called.
1255         */
1256        if (pci_dev->bridge_d3)
1257                device_wakeup_enable(dev);
1258
1259        acpi_pci_wakeup(pci_dev, false);
1260        acpi_device_power_add_dependent(adev, dev);
1261}
1262
1263static void pci_acpi_cleanup(struct device *dev)
1264{
1265        struct acpi_device *adev = ACPI_COMPANION(dev);
1266        struct pci_dev *pci_dev = to_pci_dev(dev);
1267
1268        if (!adev)
1269                return;
1270
1271        pci_acpi_remove_edr_notifier(pci_dev);
1272        pci_acpi_remove_pm_notifier(adev);
1273        if (adev->wakeup.flags.valid) {
1274                acpi_device_power_remove_dependent(adev, dev);
1275                if (pci_dev->bridge_d3)
1276                        device_wakeup_disable(dev);
1277
1278                device_set_wakeup_capable(dev, false);
1279        }
1280}
1281
1282static bool pci_acpi_bus_match(struct device *dev)
1283{
1284        return dev_is_pci(dev);
1285}
1286
1287static struct acpi_bus_type acpi_pci_bus = {
1288        .name = "PCI",
1289        .match = pci_acpi_bus_match,
1290        .find_companion = acpi_pci_find_companion,
1291        .setup = pci_acpi_setup,
1292        .cleanup = pci_acpi_cleanup,
1293};
1294
1295
1296static struct fwnode_handle *(*pci_msi_get_fwnode_cb)(struct device *dev);
1297
1298/**
1299 * pci_msi_register_fwnode_provider - Register callback to retrieve fwnode
1300 * @fn:       Callback matching a device to a fwnode that identifies a PCI
1301 *            MSI domain.
1302 *
1303 * This should be called by irqchip driver, which is the parent of
1304 * the MSI domain to provide callback interface to query fwnode.
1305 */
1306void
1307pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *))
1308{
1309        pci_msi_get_fwnode_cb = fn;
1310}
1311
1312/**
1313 * pci_host_bridge_acpi_msi_domain - Retrieve MSI domain of a PCI host bridge
1314 * @bus:      The PCI host bridge bus.
1315 *
1316 * This function uses the callback function registered by
1317 * pci_msi_register_fwnode_provider() to retrieve the irq_domain with
1318 * type DOMAIN_BUS_PCI_MSI of the specified host bridge bus.
1319 * This returns NULL on error or when the domain is not found.
1320 */
1321struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus)
1322{
1323        struct fwnode_handle *fwnode;
1324
1325        if (!pci_msi_get_fwnode_cb)
1326                return NULL;
1327
1328        fwnode = pci_msi_get_fwnode_cb(&bus->dev);
1329        if (!fwnode)
1330                return NULL;
1331
1332        return irq_find_matching_fwnode(fwnode, DOMAIN_BUS_PCI_MSI);
1333}
1334
1335static int __init acpi_pci_init(void)
1336{
1337        int ret;
1338
1339        if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_MSI) {
1340                pr_info("ACPI FADT declares the system doesn't support MSI, so disable it\n");
1341                pci_no_msi();
1342        }
1343
1344        if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) {
1345                pr_info("ACPI FADT declares the system doesn't support PCIe ASPM, so disable it\n");
1346                pcie_no_aspm();
1347        }
1348
1349        ret = register_acpi_bus_type(&acpi_pci_bus);
1350        if (ret)
1351                return 0;
1352
1353        pci_set_platform_pm(&acpi_pci_platform_pm);
1354        acpi_pci_slot_init();
1355        acpiphp_init();
1356
1357        return 0;
1358}
1359arch_initcall(acpi_pci_init);
1360