linux/drivers/pci/pci-acpi.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * PCI support in ACPI
   4 *
   5 * Copyright (C) 2005 David Shaohua Li <shaohua.li@intel.com>
   6 * Copyright (C) 2004 Tom Long Nguyen <tom.l.nguyen@intel.com>
   7 * Copyright (C) 2004 Intel Corp.
   8 */
   9
  10#include <linux/delay.h>
  11#include <linux/init.h>
  12#include <linux/irqdomain.h>
  13#include <linux/pci.h>
  14#include <linux/msi.h>
  15#include <linux/pci_hotplug.h>
  16#include <linux/module.h>
  17#include <linux/pci-acpi.h>
  18#include <linux/pm_runtime.h>
  19#include <linux/pm_qos.h>
  20#include "pci.h"
  21
  22/*
  23 * The GUID is defined in the PCI Firmware Specification available here:
  24 * https://www.pcisig.com/members/downloads/pcifw_r3_1_13Dec10.pdf
  25 */
  26const guid_t pci_acpi_dsm_guid =
  27        GUID_INIT(0xe5c937d0, 0x3553, 0x4d7a,
  28                  0x91, 0x17, 0xea, 0x4d, 0x19, 0xc3, 0x43, 0x4d);
  29
  30#if defined(CONFIG_PCI_QUIRKS) && defined(CONFIG_ARM64)
  31static int acpi_get_rc_addr(struct acpi_device *adev, struct resource *res)
  32{
  33        struct device *dev = &adev->dev;
  34        struct resource_entry *entry;
  35        struct list_head list;
  36        unsigned long flags;
  37        int ret;
  38
  39        INIT_LIST_HEAD(&list);
  40        flags = IORESOURCE_MEM;
  41        ret = acpi_dev_get_resources(adev, &list,
  42                                     acpi_dev_filter_resource_type_cb,
  43                                     (void *) flags);
  44        if (ret < 0) {
  45                dev_err(dev, "failed to parse _CRS method, error code %d\n",
  46                        ret);
  47                return ret;
  48        }
  49
  50        if (ret == 0) {
  51                dev_err(dev, "no IO and memory resources present in _CRS\n");
  52                return -EINVAL;
  53        }
  54
  55        entry = list_first_entry(&list, struct resource_entry, node);
  56        *res = *entry->res;
  57        acpi_dev_free_resource_list(&list);
  58        return 0;
  59}
  60
  61static acpi_status acpi_match_rc(acpi_handle handle, u32 lvl, void *context,
  62                                 void **retval)
  63{
  64        u16 *segment = context;
  65        unsigned long long uid;
  66        acpi_status status;
  67
  68        status = acpi_evaluate_integer(handle, "_UID", NULL, &uid);
  69        if (ACPI_FAILURE(status) || uid != *segment)
  70                return AE_CTRL_DEPTH;
  71
  72        *(acpi_handle *)retval = handle;
  73        return AE_CTRL_TERMINATE;
  74}
  75
  76int acpi_get_rc_resources(struct device *dev, const char *hid, u16 segment,
  77                          struct resource *res)
  78{
  79        struct acpi_device *adev;
  80        acpi_status status;
  81        acpi_handle handle;
  82        int ret;
  83
  84        status = acpi_get_devices(hid, acpi_match_rc, &segment, &handle);
  85        if (ACPI_FAILURE(status)) {
  86                dev_err(dev, "can't find _HID %s device to locate resources\n",
  87                        hid);
  88                return -ENODEV;
  89        }
  90
  91        ret = acpi_bus_get_device(handle, &adev);
  92        if (ret)
  93                return ret;
  94
  95        ret = acpi_get_rc_addr(adev, res);
  96        if (ret) {
  97                dev_err(dev, "can't get resource from %s\n",
  98                        dev_name(&adev->dev));
  99                return ret;
 100        }
 101
 102        return 0;
 103}
 104#endif
 105
 106phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle)
 107{
 108        acpi_status status = AE_NOT_EXIST;
 109        unsigned long long mcfg_addr;
 110
 111        if (handle)
 112                status = acpi_evaluate_integer(handle, METHOD_NAME__CBA,
 113                                               NULL, &mcfg_addr);
 114        if (ACPI_FAILURE(status))
 115                return 0;
 116
 117        return (phys_addr_t)mcfg_addr;
 118}
 119
 120/* _HPX PCI Setting Record (Type 0); same as _HPP */
 121struct hpx_type0 {
 122        u32 revision;           /* Not present in _HPP */
 123        u8  cache_line_size;    /* Not applicable to PCIe */
 124        u8  latency_timer;      /* Not applicable to PCIe */
 125        u8  enable_serr;
 126        u8  enable_perr;
 127};
 128
 129static struct hpx_type0 pci_default_type0 = {
 130        .revision = 1,
 131        .cache_line_size = 8,
 132        .latency_timer = 0x40,
 133        .enable_serr = 0,
 134        .enable_perr = 0,
 135};
 136
 137static void program_hpx_type0(struct pci_dev *dev, struct hpx_type0 *hpx)
 138{
 139        u16 pci_cmd, pci_bctl;
 140
 141        if (!hpx)
 142                hpx = &pci_default_type0;
 143
 144        if (hpx->revision > 1) {
 145                pci_warn(dev, "PCI settings rev %d not supported; using defaults\n",
 146                         hpx->revision);
 147                hpx = &pci_default_type0;
 148        }
 149
 150        pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpx->cache_line_size);
 151        pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpx->latency_timer);
 152        pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
 153        if (hpx->enable_serr)
 154                pci_cmd |= PCI_COMMAND_SERR;
 155        if (hpx->enable_perr)
 156                pci_cmd |= PCI_COMMAND_PARITY;
 157        pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
 158
 159        /* Program bridge control value */
 160        if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
 161                pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER,
 162                                      hpx->latency_timer);
 163                pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl);
 164                if (hpx->enable_perr)
 165                        pci_bctl |= PCI_BRIDGE_CTL_PARITY;
 166                pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl);
 167        }
 168}
 169
 170static acpi_status decode_type0_hpx_record(union acpi_object *record,
 171                                           struct hpx_type0 *hpx0)
 172{
 173        int i;
 174        union acpi_object *fields = record->package.elements;
 175        u32 revision = fields[1].integer.value;
 176
 177        switch (revision) {
 178        case 1:
 179                if (record->package.count != 6)
 180                        return AE_ERROR;
 181                for (i = 2; i < 6; i++)
 182                        if (fields[i].type != ACPI_TYPE_INTEGER)
 183                                return AE_ERROR;
 184                hpx0->revision        = revision;
 185                hpx0->cache_line_size = fields[2].integer.value;
 186                hpx0->latency_timer   = fields[3].integer.value;
 187                hpx0->enable_serr     = fields[4].integer.value;
 188                hpx0->enable_perr     = fields[5].integer.value;
 189                break;
 190        default:
 191                pr_warn("%s: Type 0 Revision %d record not supported\n",
 192                       __func__, revision);
 193                return AE_ERROR;
 194        }
 195        return AE_OK;
 196}
 197
 198/* _HPX PCI-X Setting Record (Type 1) */
 199struct hpx_type1 {
 200        u32 revision;
 201        u8  max_mem_read;
 202        u8  avg_max_split;
 203        u16 tot_max_split;
 204};
 205
 206static void program_hpx_type1(struct pci_dev *dev, struct hpx_type1 *hpx)
 207{
 208        int pos;
 209
 210        if (!hpx)
 211                return;
 212
 213        pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
 214        if (!pos)
 215                return;
 216
 217        pci_warn(dev, "PCI-X settings not supported\n");
 218}
 219
 220static acpi_status decode_type1_hpx_record(union acpi_object *record,
 221                                           struct hpx_type1 *hpx1)
 222{
 223        int i;
 224        union acpi_object *fields = record->package.elements;
 225        u32 revision = fields[1].integer.value;
 226
 227        switch (revision) {
 228        case 1:
 229                if (record->package.count != 5)
 230                        return AE_ERROR;
 231                for (i = 2; i < 5; i++)
 232                        if (fields[i].type != ACPI_TYPE_INTEGER)
 233                                return AE_ERROR;
 234                hpx1->revision      = revision;
 235                hpx1->max_mem_read  = fields[2].integer.value;
 236                hpx1->avg_max_split = fields[3].integer.value;
 237                hpx1->tot_max_split = fields[4].integer.value;
 238                break;
 239        default:
 240                pr_warn("%s: Type 1 Revision %d record not supported\n",
 241                       __func__, revision);
 242                return AE_ERROR;
 243        }
 244        return AE_OK;
 245}
 246
 247static bool pcie_root_rcb_set(struct pci_dev *dev)
 248{
 249        struct pci_dev *rp = pcie_find_root_port(dev);
 250        u16 lnkctl;
 251
 252        if (!rp)
 253                return false;
 254
 255        pcie_capability_read_word(rp, PCI_EXP_LNKCTL, &lnkctl);
 256        if (lnkctl & PCI_EXP_LNKCTL_RCB)
 257                return true;
 258
 259        return false;
 260}
 261
 262/* _HPX PCI Express Setting Record (Type 2) */
 263struct hpx_type2 {
 264        u32 revision;
 265        u32 unc_err_mask_and;
 266        u32 unc_err_mask_or;
 267        u32 unc_err_sever_and;
 268        u32 unc_err_sever_or;
 269        u32 cor_err_mask_and;
 270        u32 cor_err_mask_or;
 271        u32 adv_err_cap_and;
 272        u32 adv_err_cap_or;
 273        u16 pci_exp_devctl_and;
 274        u16 pci_exp_devctl_or;
 275        u16 pci_exp_lnkctl_and;
 276        u16 pci_exp_lnkctl_or;
 277        u32 sec_unc_err_sever_and;
 278        u32 sec_unc_err_sever_or;
 279        u32 sec_unc_err_mask_and;
 280        u32 sec_unc_err_mask_or;
 281};
 282
 283static void program_hpx_type2(struct pci_dev *dev, struct hpx_type2 *hpx)
 284{
 285        int pos;
 286        u32 reg32;
 287
 288        if (!hpx)
 289                return;
 290
 291        if (!pci_is_pcie(dev))
 292                return;
 293
 294        if (hpx->revision > 1) {
 295                pci_warn(dev, "PCIe settings rev %d not supported\n",
 296                         hpx->revision);
 297                return;
 298        }
 299
 300        /*
 301         * Don't allow _HPX to change MPS or MRRS settings.  We manage
 302         * those to make sure they're consistent with the rest of the
 303         * platform.
 304         */
 305        hpx->pci_exp_devctl_and |= PCI_EXP_DEVCTL_PAYLOAD |
 306                                    PCI_EXP_DEVCTL_READRQ;
 307        hpx->pci_exp_devctl_or &= ~(PCI_EXP_DEVCTL_PAYLOAD |
 308                                    PCI_EXP_DEVCTL_READRQ);
 309
 310        /* Initialize Device Control Register */
 311        pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
 312                        ~hpx->pci_exp_devctl_and, hpx->pci_exp_devctl_or);
 313
 314        /* Initialize Link Control Register */
 315        if (pcie_cap_has_lnkctl(dev)) {
 316
 317                /*
 318                 * If the Root Port supports Read Completion Boundary of
 319                 * 128, set RCB to 128.  Otherwise, clear it.
 320                 */
 321                hpx->pci_exp_lnkctl_and |= PCI_EXP_LNKCTL_RCB;
 322                hpx->pci_exp_lnkctl_or &= ~PCI_EXP_LNKCTL_RCB;
 323                if (pcie_root_rcb_set(dev))
 324                        hpx->pci_exp_lnkctl_or |= PCI_EXP_LNKCTL_RCB;
 325
 326                pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL,
 327                        ~hpx->pci_exp_lnkctl_and, hpx->pci_exp_lnkctl_or);
 328        }
 329
 330        /* Find Advanced Error Reporting Enhanced Capability */
 331        pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
 332        if (!pos)
 333                return;
 334
 335        /* Initialize Uncorrectable Error Mask Register */
 336        pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &reg32);
 337        reg32 = (reg32 & hpx->unc_err_mask_and) | hpx->unc_err_mask_or;
 338        pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32);
 339
 340        /* Initialize Uncorrectable Error Severity Register */
 341        pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &reg32);
 342        reg32 = (reg32 & hpx->unc_err_sever_and) | hpx->unc_err_sever_or;
 343        pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32);
 344
 345        /* Initialize Correctable Error Mask Register */
 346        pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &reg32);
 347        reg32 = (reg32 & hpx->cor_err_mask_and) | hpx->cor_err_mask_or;
 348        pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32);
 349
 350        /* Initialize Advanced Error Capabilities and Control Register */
 351        pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
 352        reg32 = (reg32 & hpx->adv_err_cap_and) | hpx->adv_err_cap_or;
 353
 354        /* Don't enable ECRC generation or checking if unsupported */
 355        if (!(reg32 & PCI_ERR_CAP_ECRC_GENC))
 356                reg32 &= ~PCI_ERR_CAP_ECRC_GENE;
 357        if (!(reg32 & PCI_ERR_CAP_ECRC_CHKC))
 358                reg32 &= ~PCI_ERR_CAP_ECRC_CHKE;
 359        pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
 360
 361        /*
 362         * FIXME: The following two registers are not supported yet.
 363         *
 364         *   o Secondary Uncorrectable Error Severity Register
 365         *   o Secondary Uncorrectable Error Mask Register
 366         */
 367}
 368
 369static acpi_status decode_type2_hpx_record(union acpi_object *record,
 370                                           struct hpx_type2 *hpx2)
 371{
 372        int i;
 373        union acpi_object *fields = record->package.elements;
 374        u32 revision = fields[1].integer.value;
 375
 376        switch (revision) {
 377        case 1:
 378                if (record->package.count != 18)
 379                        return AE_ERROR;
 380                for (i = 2; i < 18; i++)
 381                        if (fields[i].type != ACPI_TYPE_INTEGER)
 382                                return AE_ERROR;
 383                hpx2->revision      = revision;
 384                hpx2->unc_err_mask_and      = fields[2].integer.value;
 385                hpx2->unc_err_mask_or       = fields[3].integer.value;
 386                hpx2->unc_err_sever_and     = fields[4].integer.value;
 387                hpx2->unc_err_sever_or      = fields[5].integer.value;
 388                hpx2->cor_err_mask_and      = fields[6].integer.value;
 389                hpx2->cor_err_mask_or       = fields[7].integer.value;
 390                hpx2->adv_err_cap_and       = fields[8].integer.value;
 391                hpx2->adv_err_cap_or        = fields[9].integer.value;
 392                hpx2->pci_exp_devctl_and    = fields[10].integer.value;
 393                hpx2->pci_exp_devctl_or     = fields[11].integer.value;
 394                hpx2->pci_exp_lnkctl_and    = fields[12].integer.value;
 395                hpx2->pci_exp_lnkctl_or     = fields[13].integer.value;
 396                hpx2->sec_unc_err_sever_and = fields[14].integer.value;
 397                hpx2->sec_unc_err_sever_or  = fields[15].integer.value;
 398                hpx2->sec_unc_err_mask_and  = fields[16].integer.value;
 399                hpx2->sec_unc_err_mask_or   = fields[17].integer.value;
 400                break;
 401        default:
 402                pr_warn("%s: Type 2 Revision %d record not supported\n",
 403                       __func__, revision);
 404                return AE_ERROR;
 405        }
 406        return AE_OK;
 407}
 408
 409/* _HPX PCI Express Setting Record (Type 3) */
 410struct hpx_type3 {
 411        u16 device_type;
 412        u16 function_type;
 413        u16 config_space_location;
 414        u16 pci_exp_cap_id;
 415        u16 pci_exp_cap_ver;
 416        u16 pci_exp_vendor_id;
 417        u16 dvsec_id;
 418        u16 dvsec_rev;
 419        u16 match_offset;
 420        u32 match_mask_and;
 421        u32 match_value;
 422        u16 reg_offset;
 423        u32 reg_mask_and;
 424        u32 reg_mask_or;
 425};
 426
 427enum hpx_type3_dev_type {
 428        HPX_TYPE_ENDPOINT       = BIT(0),
 429        HPX_TYPE_LEG_END        = BIT(1),
 430        HPX_TYPE_RC_END         = BIT(2),
 431        HPX_TYPE_RC_EC          = BIT(3),
 432        HPX_TYPE_ROOT_PORT      = BIT(4),
 433        HPX_TYPE_UPSTREAM       = BIT(5),
 434        HPX_TYPE_DOWNSTREAM     = BIT(6),
 435        HPX_TYPE_PCI_BRIDGE     = BIT(7),
 436        HPX_TYPE_PCIE_BRIDGE    = BIT(8),
 437};
 438
 439static u16 hpx3_device_type(struct pci_dev *dev)
 440{
 441        u16 pcie_type = pci_pcie_type(dev);
 442        static const int pcie_to_hpx3_type[] = {
 443                [PCI_EXP_TYPE_ENDPOINT]    = HPX_TYPE_ENDPOINT,
 444                [PCI_EXP_TYPE_LEG_END]     = HPX_TYPE_LEG_END,
 445                [PCI_EXP_TYPE_RC_END]      = HPX_TYPE_RC_END,
 446                [PCI_EXP_TYPE_RC_EC]       = HPX_TYPE_RC_EC,
 447                [PCI_EXP_TYPE_ROOT_PORT]   = HPX_TYPE_ROOT_PORT,
 448                [PCI_EXP_TYPE_UPSTREAM]    = HPX_TYPE_UPSTREAM,
 449                [PCI_EXP_TYPE_DOWNSTREAM]  = HPX_TYPE_DOWNSTREAM,
 450                [PCI_EXP_TYPE_PCI_BRIDGE]  = HPX_TYPE_PCI_BRIDGE,
 451                [PCI_EXP_TYPE_PCIE_BRIDGE] = HPX_TYPE_PCIE_BRIDGE,
 452        };
 453
 454        if (pcie_type >= ARRAY_SIZE(pcie_to_hpx3_type))
 455                return 0;
 456
 457        return pcie_to_hpx3_type[pcie_type];
 458}
 459
 460enum hpx_type3_fn_type {
 461        HPX_FN_NORMAL           = BIT(0),
 462        HPX_FN_SRIOV_PHYS       = BIT(1),
 463        HPX_FN_SRIOV_VIRT       = BIT(2),
 464};
 465
 466static u8 hpx3_function_type(struct pci_dev *dev)
 467{
 468        if (dev->is_virtfn)
 469                return HPX_FN_SRIOV_VIRT;
 470        else if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV) > 0)
 471                return HPX_FN_SRIOV_PHYS;
 472        else
 473                return HPX_FN_NORMAL;
 474}
 475
 476static bool hpx3_cap_ver_matches(u8 pcie_cap_id, u8 hpx3_cap_id)
 477{
 478        u8 cap_ver = hpx3_cap_id & 0xf;
 479
 480        if ((hpx3_cap_id & BIT(4)) && cap_ver >= pcie_cap_id)
 481                return true;
 482        else if (cap_ver == pcie_cap_id)
 483                return true;
 484
 485        return false;
 486}
 487
 488enum hpx_type3_cfg_loc {
 489        HPX_CFG_PCICFG          = 0,
 490        HPX_CFG_PCIE_CAP        = 1,
 491        HPX_CFG_PCIE_CAP_EXT    = 2,
 492        HPX_CFG_VEND_CAP        = 3,
 493        HPX_CFG_DVSEC           = 4,
 494        HPX_CFG_MAX,
 495};
 496
 497static void program_hpx_type3_register(struct pci_dev *dev,
 498                                       const struct hpx_type3 *reg)
 499{
 500        u32 match_reg, write_reg, header, orig_value;
 501        u16 pos;
 502
 503        if (!(hpx3_device_type(dev) & reg->device_type))
 504                return;
 505
 506        if (!(hpx3_function_type(dev) & reg->function_type))
 507                return;
 508
 509        switch (reg->config_space_location) {
 510        case HPX_CFG_PCICFG:
 511                pos = 0;
 512                break;
 513        case HPX_CFG_PCIE_CAP:
 514                pos = pci_find_capability(dev, reg->pci_exp_cap_id);
 515                if (pos == 0)
 516                        return;
 517
 518                break;
 519        case HPX_CFG_PCIE_CAP_EXT:
 520                pos = pci_find_ext_capability(dev, reg->pci_exp_cap_id);
 521                if (pos == 0)
 522                        return;
 523
 524                pci_read_config_dword(dev, pos, &header);
 525                if (!hpx3_cap_ver_matches(PCI_EXT_CAP_VER(header),
 526                                          reg->pci_exp_cap_ver))
 527                        return;
 528
 529                break;
 530        case HPX_CFG_VEND_CAP:
 531        case HPX_CFG_DVSEC:
 532        default:
 533                pci_warn(dev, "Encountered _HPX type 3 with unsupported config space location");
 534                return;
 535        }
 536
 537        pci_read_config_dword(dev, pos + reg->match_offset, &match_reg);
 538
 539        if ((match_reg & reg->match_mask_and) != reg->match_value)
 540                return;
 541
 542        pci_read_config_dword(dev, pos + reg->reg_offset, &write_reg);
 543        orig_value = write_reg;
 544        write_reg &= reg->reg_mask_and;
 545        write_reg |= reg->reg_mask_or;
 546
 547        if (orig_value == write_reg)
 548                return;
 549
 550        pci_write_config_dword(dev, pos + reg->reg_offset, write_reg);
 551
 552        pci_dbg(dev, "Applied _HPX3 at [0x%x]: 0x%08x -> 0x%08x",
 553                pos, orig_value, write_reg);
 554}
 555
 556static void program_hpx_type3(struct pci_dev *dev, struct hpx_type3 *hpx)
 557{
 558        if (!hpx)
 559                return;
 560
 561        if (!pci_is_pcie(dev))
 562                return;
 563
 564        program_hpx_type3_register(dev, hpx);
 565}
 566
 567static void parse_hpx3_register(struct hpx_type3 *hpx3_reg,
 568                                union acpi_object *reg_fields)
 569{
 570        hpx3_reg->device_type            = reg_fields[0].integer.value;
 571        hpx3_reg->function_type          = reg_fields[1].integer.value;
 572        hpx3_reg->config_space_location  = reg_fields[2].integer.value;
 573        hpx3_reg->pci_exp_cap_id         = reg_fields[3].integer.value;
 574        hpx3_reg->pci_exp_cap_ver        = reg_fields[4].integer.value;
 575        hpx3_reg->pci_exp_vendor_id      = reg_fields[5].integer.value;
 576        hpx3_reg->dvsec_id               = reg_fields[6].integer.value;
 577        hpx3_reg->dvsec_rev              = reg_fields[7].integer.value;
 578        hpx3_reg->match_offset           = reg_fields[8].integer.value;
 579        hpx3_reg->match_mask_and         = reg_fields[9].integer.value;
 580        hpx3_reg->match_value            = reg_fields[10].integer.value;
 581        hpx3_reg->reg_offset             = reg_fields[11].integer.value;
 582        hpx3_reg->reg_mask_and           = reg_fields[12].integer.value;
 583        hpx3_reg->reg_mask_or            = reg_fields[13].integer.value;
 584}
 585
 586static acpi_status program_type3_hpx_record(struct pci_dev *dev,
 587                                           union acpi_object *record)
 588{
 589        union acpi_object *fields = record->package.elements;
 590        u32 desc_count, expected_length, revision;
 591        union acpi_object *reg_fields;
 592        struct hpx_type3 hpx3;
 593        int i;
 594
 595        revision = fields[1].integer.value;
 596        switch (revision) {
 597        case 1:
 598                desc_count = fields[2].integer.value;
 599                expected_length = 3 + desc_count * 14;
 600
 601                if (record->package.count != expected_length)
 602                        return AE_ERROR;
 603
 604                for (i = 2; i < expected_length; i++)
 605                        if (fields[i].type != ACPI_TYPE_INTEGER)
 606                                return AE_ERROR;
 607
 608                for (i = 0; i < desc_count; i++) {
 609                        reg_fields = fields + 3 + i * 14;
 610                        parse_hpx3_register(&hpx3, reg_fields);
 611                        program_hpx_type3(dev, &hpx3);
 612                }
 613
 614                break;
 615        default:
 616                printk(KERN_WARNING
 617                        "%s: Type 3 Revision %d record not supported\n",
 618                        __func__, revision);
 619                return AE_ERROR;
 620        }
 621        return AE_OK;
 622}
 623
 624static acpi_status acpi_run_hpx(struct pci_dev *dev, acpi_handle handle)
 625{
 626        acpi_status status;
 627        struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
 628        union acpi_object *package, *record, *fields;
 629        struct hpx_type0 hpx0;
 630        struct hpx_type1 hpx1;
 631        struct hpx_type2 hpx2;
 632        u32 type;
 633        int i;
 634
 635        status = acpi_evaluate_object(handle, "_HPX", NULL, &buffer);
 636        if (ACPI_FAILURE(status))
 637                return status;
 638
 639        package = (union acpi_object *)buffer.pointer;
 640        if (package->type != ACPI_TYPE_PACKAGE) {
 641                status = AE_ERROR;
 642                goto exit;
 643        }
 644
 645        for (i = 0; i < package->package.count; i++) {
 646                record = &package->package.elements[i];
 647                if (record->type != ACPI_TYPE_PACKAGE) {
 648                        status = AE_ERROR;
 649                        goto exit;
 650                }
 651
 652                fields = record->package.elements;
 653                if (fields[0].type != ACPI_TYPE_INTEGER ||
 654                    fields[1].type != ACPI_TYPE_INTEGER) {
 655                        status = AE_ERROR;
 656                        goto exit;
 657                }
 658
 659                type = fields[0].integer.value;
 660                switch (type) {
 661                case 0:
 662                        memset(&hpx0, 0, sizeof(hpx0));
 663                        status = decode_type0_hpx_record(record, &hpx0);
 664                        if (ACPI_FAILURE(status))
 665                                goto exit;
 666                        program_hpx_type0(dev, &hpx0);
 667                        break;
 668                case 1:
 669                        memset(&hpx1, 0, sizeof(hpx1));
 670                        status = decode_type1_hpx_record(record, &hpx1);
 671                        if (ACPI_FAILURE(status))
 672                                goto exit;
 673                        program_hpx_type1(dev, &hpx1);
 674                        break;
 675                case 2:
 676                        memset(&hpx2, 0, sizeof(hpx2));
 677                        status = decode_type2_hpx_record(record, &hpx2);
 678                        if (ACPI_FAILURE(status))
 679                                goto exit;
 680                        program_hpx_type2(dev, &hpx2);
 681                        break;
 682                case 3:
 683                        status = program_type3_hpx_record(dev, record);
 684                        if (ACPI_FAILURE(status))
 685                                goto exit;
 686                        break;
 687                default:
 688                        pr_err("%s: Type %d record not supported\n",
 689                               __func__, type);
 690                        status = AE_ERROR;
 691                        goto exit;
 692                }
 693        }
 694 exit:
 695        kfree(buffer.pointer);
 696        return status;
 697}
 698
 699static acpi_status acpi_run_hpp(struct pci_dev *dev, acpi_handle handle)
 700{
 701        acpi_status status;
 702        struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
 703        union acpi_object *package, *fields;
 704        struct hpx_type0 hpx0;
 705        int i;
 706
 707        memset(&hpx0, 0, sizeof(hpx0));
 708
 709        status = acpi_evaluate_object(handle, "_HPP", NULL, &buffer);
 710        if (ACPI_FAILURE(status))
 711                return status;
 712
 713        package = (union acpi_object *) buffer.pointer;
 714        if (package->type != ACPI_TYPE_PACKAGE ||
 715            package->package.count != 4) {
 716                status = AE_ERROR;
 717                goto exit;
 718        }
 719
 720        fields = package->package.elements;
 721        for (i = 0; i < 4; i++) {
 722                if (fields[i].type != ACPI_TYPE_INTEGER) {
 723                        status = AE_ERROR;
 724                        goto exit;
 725                }
 726        }
 727
 728        hpx0.revision        = 1;
 729        hpx0.cache_line_size = fields[0].integer.value;
 730        hpx0.latency_timer   = fields[1].integer.value;
 731        hpx0.enable_serr     = fields[2].integer.value;
 732        hpx0.enable_perr     = fields[3].integer.value;
 733
 734        program_hpx_type0(dev, &hpx0);
 735
 736exit:
 737        kfree(buffer.pointer);
 738        return status;
 739}
 740
 741/* pci_acpi_program_hp_params
 742 *
 743 * @dev - the pci_dev for which we want parameters
 744 */
 745int pci_acpi_program_hp_params(struct pci_dev *dev)
 746{
 747        acpi_status status;
 748        acpi_handle handle, phandle;
 749        struct pci_bus *pbus;
 750
 751        if (acpi_pci_disabled)
 752                return -ENODEV;
 753
 754        handle = NULL;
 755        for (pbus = dev->bus; pbus; pbus = pbus->parent) {
 756                handle = acpi_pci_get_bridge_handle(pbus);
 757                if (handle)
 758                        break;
 759        }
 760
 761        /*
 762         * _HPP settings apply to all child buses, until another _HPP is
 763         * encountered. If we don't find an _HPP for the input pci dev,
 764         * look for it in the parent device scope since that would apply to
 765         * this pci dev.
 766         */
 767        while (handle) {
 768                status = acpi_run_hpx(dev, handle);
 769                if (ACPI_SUCCESS(status))
 770                        return 0;
 771                status = acpi_run_hpp(dev, handle);
 772                if (ACPI_SUCCESS(status))
 773                        return 0;
 774                if (acpi_is_root_bridge(handle))
 775                        break;
 776                status = acpi_get_parent(handle, &phandle);
 777                if (ACPI_FAILURE(status))
 778                        break;
 779                handle = phandle;
 780        }
 781        return -ENODEV;
 782}
 783
 784/**
 785 * pciehp_is_native - Check whether a hotplug port is handled by the OS
 786 * @bridge: Hotplug port to check
 787 *
 788 * Returns true if the given @bridge is handled by the native PCIe hotplug
 789 * driver.
 790 */
 791bool pciehp_is_native(struct pci_dev *bridge)
 792{
 793        const struct pci_host_bridge *host;
 794        u32 slot_cap;
 795
 796        if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
 797                return false;
 798
 799        pcie_capability_read_dword(bridge, PCI_EXP_SLTCAP, &slot_cap);
 800        if (!(slot_cap & PCI_EXP_SLTCAP_HPC))
 801                return false;
 802
 803        if (pcie_ports_native)
 804                return true;
 805
 806        host = pci_find_host_bridge(bridge->bus);
 807        return host->native_pcie_hotplug;
 808}
 809
 810/**
 811 * shpchp_is_native - Check whether a hotplug port is handled by the OS
 812 * @bridge: Hotplug port to check
 813 *
 814 * Returns true if the given @bridge is handled by the native SHPC hotplug
 815 * driver.
 816 */
 817bool shpchp_is_native(struct pci_dev *bridge)
 818{
 819        return bridge->shpc_managed;
 820}
 821
 822/**
 823 * pci_acpi_wake_bus - Root bus wakeup notification fork function.
 824 * @context: Device wakeup context.
 825 */
 826static void pci_acpi_wake_bus(struct acpi_device_wakeup_context *context)
 827{
 828        struct acpi_device *adev;
 829        struct acpi_pci_root *root;
 830
 831        adev = container_of(context, struct acpi_device, wakeup.context);
 832        root = acpi_driver_data(adev);
 833        pci_pme_wakeup_bus(root->bus);
 834}
 835
 836/**
 837 * pci_acpi_wake_dev - PCI device wakeup notification work function.
 838 * @context: Device wakeup context.
 839 */
 840static void pci_acpi_wake_dev(struct acpi_device_wakeup_context *context)
 841{
 842        struct pci_dev *pci_dev;
 843
 844        pci_dev = to_pci_dev(context->dev);
 845
 846        if (pci_dev->pme_poll)
 847                pci_dev->pme_poll = false;
 848
 849        if (pci_dev->current_state == PCI_D3cold) {
 850                pci_wakeup_event(pci_dev);
 851                pm_request_resume(&pci_dev->dev);
 852                return;
 853        }
 854
 855        /* Clear PME Status if set. */
 856        if (pci_dev->pme_support)
 857                pci_check_pme_status(pci_dev);
 858
 859        pci_wakeup_event(pci_dev);
 860        pm_request_resume(&pci_dev->dev);
 861
 862        pci_pme_wakeup_bus(pci_dev->subordinate);
 863}
 864
 865/**
 866 * pci_acpi_add_bus_pm_notifier - Register PM notifier for root PCI bus.
 867 * @dev: PCI root bridge ACPI device.
 868 */
 869acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev)
 870{
 871        return acpi_add_pm_notifier(dev, NULL, pci_acpi_wake_bus);
 872}
 873
 874/**
 875 * pci_acpi_add_pm_notifier - Register PM notifier for given PCI device.
 876 * @dev: ACPI device to add the notifier for.
 877 * @pci_dev: PCI device to check for the PME status if an event is signaled.
 878 */
 879acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev,
 880                                     struct pci_dev *pci_dev)
 881{
 882        return acpi_add_pm_notifier(dev, &pci_dev->dev, pci_acpi_wake_dev);
 883}
 884
 885/*
 886 * _SxD returns the D-state with the highest power
 887 * (lowest D-state number) supported in the S-state "x".
 888 *
 889 * If the devices does not have a _PRW
 890 * (Power Resources for Wake) supporting system wakeup from "x"
 891 * then the OS is free to choose a lower power (higher number
 892 * D-state) than the return value from _SxD.
 893 *
 894 * But if _PRW is enabled at S-state "x", the OS
 895 * must not choose a power lower than _SxD --
 896 * unless the device has an _SxW method specifying
 897 * the lowest power (highest D-state number) the device
 898 * may enter while still able to wake the system.
 899 *
 900 * ie. depending on global OS policy:
 901 *
 902 * if (_PRW at S-state x)
 903 *      choose from highest power _SxD to lowest power _SxW
 904 * else // no _PRW at S-state x
 905 *      choose highest power _SxD or any lower power
 906 */
 907
 908static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
 909{
 910        int acpi_state, d_max;
 911
 912        if (pdev->no_d3cold)
 913                d_max = ACPI_STATE_D3_HOT;
 914        else
 915                d_max = ACPI_STATE_D3_COLD;
 916        acpi_state = acpi_pm_device_sleep_state(&pdev->dev, NULL, d_max);
 917        if (acpi_state < 0)
 918                return PCI_POWER_ERROR;
 919
 920        switch (acpi_state) {
 921        case ACPI_STATE_D0:
 922                return PCI_D0;
 923        case ACPI_STATE_D1:
 924                return PCI_D1;
 925        case ACPI_STATE_D2:
 926                return PCI_D2;
 927        case ACPI_STATE_D3_HOT:
 928                return PCI_D3hot;
 929        case ACPI_STATE_D3_COLD:
 930                return PCI_D3cold;
 931        }
 932        return PCI_POWER_ERROR;
 933}
 934
 935static struct acpi_device *acpi_pci_find_companion(struct device *dev);
 936
 937static bool acpi_pci_bridge_d3(struct pci_dev *dev)
 938{
 939        const struct fwnode_handle *fwnode;
 940        struct acpi_device *adev;
 941        struct pci_dev *root;
 942        u8 val;
 943
 944        if (!dev->is_hotplug_bridge)
 945                return false;
 946
 947        /* Assume D3 support if the bridge is power-manageable by ACPI. */
 948        adev = ACPI_COMPANION(&dev->dev);
 949        if (!adev && !pci_dev_is_added(dev)) {
 950                adev = acpi_pci_find_companion(&dev->dev);
 951                ACPI_COMPANION_SET(&dev->dev, adev);
 952        }
 953
 954        if (adev && acpi_device_power_manageable(adev))
 955                return true;
 956
 957        /*
 958         * Look for a special _DSD property for the root port and if it
 959         * is set we know the hierarchy behind it supports D3 just fine.
 960         */
 961        root = pcie_find_root_port(dev);
 962        if (!root)
 963                return false;
 964
 965        adev = ACPI_COMPANION(&root->dev);
 966        if (root == dev) {
 967                /*
 968                 * It is possible that the ACPI companion is not yet bound
 969                 * for the root port so look it up manually here.
 970                 */
 971                if (!adev && !pci_dev_is_added(root))
 972                        adev = acpi_pci_find_companion(&root->dev);
 973        }
 974
 975        if (!adev)
 976                return false;
 977
 978        fwnode = acpi_fwnode_handle(adev);
 979        if (fwnode_property_read_u8(fwnode, "HotPlugSupportInD3", &val))
 980                return false;
 981
 982        return val == 1;
 983}
 984
 985static bool acpi_pci_power_manageable(struct pci_dev *dev)
 986{
 987        struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
 988        return adev ? acpi_device_power_manageable(adev) : false;
 989}
 990
 991static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
 992{
 993        struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
 994        static const u8 state_conv[] = {
 995                [PCI_D0] = ACPI_STATE_D0,
 996                [PCI_D1] = ACPI_STATE_D1,
 997                [PCI_D2] = ACPI_STATE_D2,
 998                [PCI_D3hot] = ACPI_STATE_D3_HOT,
 999                [PCI_D3cold] = ACPI_STATE_D3_COLD,
1000        };
1001        int error = -EINVAL;
1002
1003        /* If the ACPI device has _EJ0, ignore the device */
1004        if (!adev || acpi_has_method(adev->handle, "_EJ0"))
1005                return -ENODEV;
1006
1007        switch (state) {
1008        case PCI_D3cold:
1009                if (dev_pm_qos_flags(&dev->dev, PM_QOS_FLAG_NO_POWER_OFF) ==
1010                                PM_QOS_FLAGS_ALL) {
1011                        error = -EBUSY;
1012                        break;
1013                }
1014                fallthrough;
1015        case PCI_D0:
1016        case PCI_D1:
1017        case PCI_D2:
1018        case PCI_D3hot:
1019                error = acpi_device_set_power(adev, state_conv[state]);
1020        }
1021
1022        if (!error)
1023                pci_dbg(dev, "power state changed by ACPI to %s\n",
1024                         acpi_power_state_string(state_conv[state]));
1025
1026        return error;
1027}
1028
1029static pci_power_t acpi_pci_get_power_state(struct pci_dev *dev)
1030{
1031        struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
1032        static const pci_power_t state_conv[] = {
1033                [ACPI_STATE_D0]      = PCI_D0,
1034                [ACPI_STATE_D1]      = PCI_D1,
1035                [ACPI_STATE_D2]      = PCI_D2,
1036                [ACPI_STATE_D3_HOT]  = PCI_D3hot,
1037                [ACPI_STATE_D3_COLD] = PCI_D3cold,
1038        };
1039        int state;
1040
1041        if (!adev || !acpi_device_power_manageable(adev))
1042                return PCI_UNKNOWN;
1043
1044        state = adev->power.state;
1045        if (state == ACPI_STATE_UNKNOWN)
1046                return PCI_UNKNOWN;
1047
1048        return state_conv[state];
1049}
1050
1051static void acpi_pci_refresh_power_state(struct pci_dev *dev)
1052{
1053        struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
1054
1055        if (adev && acpi_device_power_manageable(adev))
1056                acpi_device_update_power(adev, NULL);
1057}
1058
1059static int acpi_pci_propagate_wakeup(struct pci_bus *bus, bool enable)
1060{
1061        while (bus->parent) {
1062                if (acpi_pm_device_can_wakeup(&bus->self->dev))
1063                        return acpi_pm_set_device_wakeup(&bus->self->dev, enable);
1064
1065                bus = bus->parent;
1066        }
1067
1068        /* We have reached the root bus. */
1069        if (bus->bridge) {
1070                if (acpi_pm_device_can_wakeup(bus->bridge))
1071                        return acpi_pm_set_device_wakeup(bus->bridge, enable);
1072        }
1073        return 0;
1074}
1075
1076static int acpi_pci_wakeup(struct pci_dev *dev, bool enable)
1077{
1078        if (acpi_pm_device_can_wakeup(&dev->dev))
1079                return acpi_pm_set_device_wakeup(&dev->dev, enable);
1080
1081        return acpi_pci_propagate_wakeup(dev->bus, enable);
1082}
1083
1084static bool acpi_pci_need_resume(struct pci_dev *dev)
1085{
1086        struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
1087
1088        /*
1089         * In some cases (eg. Samsung 305V4A) leaving a bridge in suspend over
1090         * system-wide suspend/resume confuses the platform firmware, so avoid
1091         * doing that.  According to Section 16.1.6 of ACPI 6.2, endpoint
1092         * devices are expected to be in D3 before invoking the S3 entry path
1093         * from the firmware, so they should not be affected by this issue.
1094         */
1095        if (pci_is_bridge(dev) && acpi_target_system_state() != ACPI_STATE_S0)
1096                return true;
1097
1098        if (!adev || !acpi_device_power_manageable(adev))
1099                return false;
1100
1101        if (adev->wakeup.flags.valid &&
1102            device_may_wakeup(&dev->dev) != !!adev->wakeup.prepare_count)
1103                return true;
1104
1105        if (acpi_target_system_state() == ACPI_STATE_S0)
1106                return false;
1107
1108        return !!adev->power.flags.dsw_present;
1109}
1110
1111static const struct pci_platform_pm_ops acpi_pci_platform_pm = {
1112        .bridge_d3 = acpi_pci_bridge_d3,
1113        .is_manageable = acpi_pci_power_manageable,
1114        .set_state = acpi_pci_set_power_state,
1115        .get_state = acpi_pci_get_power_state,
1116        .refresh_state = acpi_pci_refresh_power_state,
1117        .choose_state = acpi_pci_choose_state,
1118        .set_wakeup = acpi_pci_wakeup,
1119        .need_resume = acpi_pci_need_resume,
1120};
1121
1122void acpi_pci_add_bus(struct pci_bus *bus)
1123{
1124        union acpi_object *obj;
1125        struct pci_host_bridge *bridge;
1126
1127        if (acpi_pci_disabled || !bus->bridge || !ACPI_HANDLE(bus->bridge))
1128                return;
1129
1130        acpi_pci_slot_enumerate(bus);
1131        acpiphp_enumerate_slots(bus);
1132
1133        /*
1134         * For a host bridge, check its _DSM for function 8 and if
1135         * that is available, mark it in pci_host_bridge.
1136         */
1137        if (!pci_is_root_bus(bus))
1138                return;
1139
1140        obj = acpi_evaluate_dsm(ACPI_HANDLE(bus->bridge), &pci_acpi_dsm_guid, 3,
1141                                DSM_PCI_POWER_ON_RESET_DELAY, NULL);
1142        if (!obj)
1143                return;
1144
1145        if (obj->type == ACPI_TYPE_INTEGER && obj->integer.value == 1) {
1146                bridge = pci_find_host_bridge(bus);
1147                bridge->ignore_reset_delay = 1;
1148        }
1149        ACPI_FREE(obj);
1150}
1151
1152void acpi_pci_remove_bus(struct pci_bus *bus)
1153{
1154        if (acpi_pci_disabled || !bus->bridge)
1155                return;
1156
1157        acpiphp_remove_slots(bus);
1158        acpi_pci_slot_remove(bus);
1159}
1160
1161/* ACPI bus type */
1162static struct acpi_device *acpi_pci_find_companion(struct device *dev)
1163{
1164        struct pci_dev *pci_dev = to_pci_dev(dev);
1165        struct acpi_device *adev;
1166        bool check_children;
1167        u64 addr;
1168
1169        check_children = pci_is_bridge(pci_dev);
1170        /* Please ref to ACPI spec for the syntax of _ADR */
1171        addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn);
1172        adev = acpi_find_child_device(ACPI_COMPANION(dev->parent), addr,
1173                                      check_children);
1174
1175        /*
1176         * There may be ACPI device objects in the ACPI namespace that are
1177         * children of the device object representing the host bridge, but don't
1178         * represent PCI devices.  Both _HID and _ADR may be present for them,
1179         * even though that is against the specification (for example, see
1180         * Section 6.1 of ACPI 6.3), but in many cases the _ADR returns 0 which
1181         * appears to indicate that they should not be taken into consideration
1182         * as potential companions of PCI devices on the root bus.
1183         *
1184         * To catch this special case, disregard the returned device object if
1185         * it has a valid _HID, addr is 0 and the PCI device at hand is on the
1186         * root bus.
1187         */
1188        if (adev && adev->pnp.type.platform_id && !addr &&
1189            pci_is_root_bus(pci_dev->bus))
1190                return NULL;
1191
1192        return adev;
1193}
1194
1195/**
1196 * pci_acpi_optimize_delay - optimize PCI D3 and D3cold delay from ACPI
1197 * @pdev: the PCI device whose delay is to be updated
1198 * @handle: ACPI handle of this device
1199 *
1200 * Update the d3hot_delay and d3cold_delay of a PCI device from the ACPI _DSM
1201 * control method of either the device itself or the PCI host bridge.
1202 *
1203 * Function 8, "Reset Delay," applies to the entire hierarchy below a PCI
1204 * host bridge.  If it returns one, the OS may assume that all devices in
1205 * the hierarchy have already completed power-on reset delays.
1206 *
1207 * Function 9, "Device Readiness Durations," applies only to the object
1208 * where it is located.  It returns delay durations required after various
1209 * events if the device requires less time than the spec requires.  Delays
1210 * from this function take precedence over the Reset Delay function.
1211 *
1212 * These _DSM functions are defined by the draft ECN of January 28, 2014,
1213 * titled "ACPI additions for FW latency optimizations."
1214 */
1215static void pci_acpi_optimize_delay(struct pci_dev *pdev,
1216                                    acpi_handle handle)
1217{
1218        struct pci_host_bridge *bridge = pci_find_host_bridge(pdev->bus);
1219        int value;
1220        union acpi_object *obj, *elements;
1221
1222        if (bridge->ignore_reset_delay)
1223                pdev->d3cold_delay = 0;
1224
1225        obj = acpi_evaluate_dsm(handle, &pci_acpi_dsm_guid, 3,
1226                                DSM_PCI_DEVICE_READINESS_DURATIONS, NULL);
1227        if (!obj)
1228                return;
1229
1230        if (obj->type == ACPI_TYPE_PACKAGE && obj->package.count == 5) {
1231                elements = obj->package.elements;
1232                if (elements[0].type == ACPI_TYPE_INTEGER) {
1233                        value = (int)elements[0].integer.value / 1000;
1234                        if (value < PCI_PM_D3COLD_WAIT)
1235                                pdev->d3cold_delay = value;
1236                }
1237                if (elements[3].type == ACPI_TYPE_INTEGER) {
1238                        value = (int)elements[3].integer.value / 1000;
1239                        if (value < PCI_PM_D3HOT_WAIT)
1240                                pdev->d3hot_delay = value;
1241                }
1242        }
1243        ACPI_FREE(obj);
1244}
1245
1246static void pci_acpi_set_external_facing(struct pci_dev *dev)
1247{
1248        u8 val;
1249
1250        if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
1251                return;
1252        if (device_property_read_u8(&dev->dev, "ExternalFacingPort", &val))
1253                return;
1254
1255        /*
1256         * These root ports expose PCIe (including DMA) outside of the
1257         * system.  Everything downstream from them is external.
1258         */
1259        if (val)
1260                dev->external_facing = 1;
1261}
1262
1263static void pci_acpi_setup(struct device *dev)
1264{
1265        struct pci_dev *pci_dev = to_pci_dev(dev);
1266        struct acpi_device *adev = ACPI_COMPANION(dev);
1267
1268        if (!adev)
1269                return;
1270
1271        pci_acpi_optimize_delay(pci_dev, adev->handle);
1272        pci_acpi_set_external_facing(pci_dev);
1273        pci_acpi_add_edr_notifier(pci_dev);
1274
1275        pci_acpi_add_pm_notifier(adev, pci_dev);
1276        if (!adev->wakeup.flags.valid)
1277                return;
1278
1279        device_set_wakeup_capable(dev, true);
1280        /*
1281         * For bridges that can do D3 we enable wake automatically (as
1282         * we do for the power management itself in that case). The
1283         * reason is that the bridge may have additional methods such as
1284         * _DSW that need to be called.
1285         */
1286        if (pci_dev->bridge_d3)
1287                device_wakeup_enable(dev);
1288
1289        acpi_pci_wakeup(pci_dev, false);
1290        acpi_device_power_add_dependent(adev, dev);
1291}
1292
1293static void pci_acpi_cleanup(struct device *dev)
1294{
1295        struct acpi_device *adev = ACPI_COMPANION(dev);
1296        struct pci_dev *pci_dev = to_pci_dev(dev);
1297
1298        if (!adev)
1299                return;
1300
1301        pci_acpi_remove_edr_notifier(pci_dev);
1302        pci_acpi_remove_pm_notifier(adev);
1303        if (adev->wakeup.flags.valid) {
1304                acpi_device_power_remove_dependent(adev, dev);
1305                if (pci_dev->bridge_d3)
1306                        device_wakeup_disable(dev);
1307
1308                device_set_wakeup_capable(dev, false);
1309        }
1310}
1311
1312static bool pci_acpi_bus_match(struct device *dev)
1313{
1314        return dev_is_pci(dev);
1315}
1316
1317static struct acpi_bus_type acpi_pci_bus = {
1318        .name = "PCI",
1319        .match = pci_acpi_bus_match,
1320        .find_companion = acpi_pci_find_companion,
1321        .setup = pci_acpi_setup,
1322        .cleanup = pci_acpi_cleanup,
1323};
1324
1325
1326static struct fwnode_handle *(*pci_msi_get_fwnode_cb)(struct device *dev);
1327
1328/**
1329 * pci_msi_register_fwnode_provider - Register callback to retrieve fwnode
1330 * @fn:       Callback matching a device to a fwnode that identifies a PCI
1331 *            MSI domain.
1332 *
1333 * This should be called by irqchip driver, which is the parent of
1334 * the MSI domain to provide callback interface to query fwnode.
1335 */
1336void
1337pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *))
1338{
1339        pci_msi_get_fwnode_cb = fn;
1340}
1341
1342/**
1343 * pci_host_bridge_acpi_msi_domain - Retrieve MSI domain of a PCI host bridge
1344 * @bus:      The PCI host bridge bus.
1345 *
1346 * This function uses the callback function registered by
1347 * pci_msi_register_fwnode_provider() to retrieve the irq_domain with
1348 * type DOMAIN_BUS_PCI_MSI of the specified host bridge bus.
1349 * This returns NULL on error or when the domain is not found.
1350 */
1351struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus)
1352{
1353        struct fwnode_handle *fwnode;
1354
1355        if (!pci_msi_get_fwnode_cb)
1356                return NULL;
1357
1358        fwnode = pci_msi_get_fwnode_cb(&bus->dev);
1359        if (!fwnode)
1360                return NULL;
1361
1362        return irq_find_matching_fwnode(fwnode, DOMAIN_BUS_PCI_MSI);
1363}
1364
1365static int __init acpi_pci_init(void)
1366{
1367        int ret;
1368
1369        if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_MSI) {
1370                pr_info("ACPI FADT declares the system doesn't support MSI, so disable it\n");
1371                pci_no_msi();
1372        }
1373
1374        if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) {
1375                pr_info("ACPI FADT declares the system doesn't support PCIe ASPM, so disable it\n");
1376                pcie_no_aspm();
1377        }
1378
1379        ret = register_acpi_bus_type(&acpi_pci_bus);
1380        if (ret)
1381                return 0;
1382
1383        pci_set_platform_pm(&acpi_pci_platform_pm);
1384        acpi_pci_slot_init();
1385        acpiphp_init();
1386
1387        return 0;
1388}
1389arch_initcall(acpi_pci_init);
1390