linux/drivers/pci/controller/dwc/pcie-qcom.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Qualcomm PCIe root complex driver
   4 *
   5 * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
   6 * Copyright 2015 Linaro Limited.
   7 *
   8 * Author: Stanimir Varbanov <svarbanov@mm-sol.com>
   9 */
  10
  11#include <linux/clk.h>
  12#include <linux/delay.h>
  13#include <linux/gpio/consumer.h>
  14#include <linux/interrupt.h>
  15#include <linux/io.h>
  16#include <linux/iopoll.h>
  17#include <linux/kernel.h>
  18#include <linux/init.h>
  19#include <linux/of_device.h>
  20#include <linux/of_gpio.h>
  21#include <linux/pci.h>
  22#include <linux/pm_runtime.h>
  23#include <linux/platform_device.h>
  24#include <linux/phy/phy.h>
  25#include <linux/regulator/consumer.h>
  26#include <linux/reset.h>
  27#include <linux/slab.h>
  28#include <linux/types.h>
  29
  30#include "pcie-designware.h"
  31
  32#define PCIE20_PARF_SYS_CTRL                    0x00
  33#define MST_WAKEUP_EN                           BIT(13)
  34#define SLV_WAKEUP_EN                           BIT(12)
  35#define MSTR_ACLK_CGC_DIS                       BIT(10)
  36#define SLV_ACLK_CGC_DIS                        BIT(9)
  37#define CORE_CLK_CGC_DIS                        BIT(6)
  38#define AUX_PWR_DET                             BIT(4)
  39#define L23_CLK_RMV_DIS                         BIT(2)
  40#define L1_CLK_RMV_DIS                          BIT(1)
  41
  42#define PCIE20_COMMAND_STATUS                   0x04
  43#define CMD_BME_VAL                             0x4
  44#define PCIE20_DEVICE_CONTROL2_STATUS2          0x98
  45#define PCIE_CAP_CPL_TIMEOUT_DISABLE            0x10
  46
  47#define PCIE20_PARF_PHY_CTRL                    0x40
  48#define PCIE20_PARF_PHY_REFCLK                  0x4C
  49#define PCIE20_PARF_DBI_BASE_ADDR               0x168
  50#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE         0x16C
  51#define PCIE20_PARF_MHI_CLOCK_RESET_CTRL        0x174
  52#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT       0x178
  53#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2    0x1A8
  54#define PCIE20_PARF_LTSSM                       0x1B0
  55#define PCIE20_PARF_SID_OFFSET                  0x234
  56#define PCIE20_PARF_BDF_TRANSLATE_CFG           0x24C
  57#define PCIE20_PARF_DEVICE_TYPE                 0x1000
  58
  59#define PCIE20_ELBI_SYS_CTRL                    0x04
  60#define PCIE20_ELBI_SYS_CTRL_LT_ENABLE          BIT(0)
  61
  62#define PCIE20_AXI_MSTR_RESP_COMP_CTRL0         0x818
  63#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K        0x4
  64#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K        0x5
  65#define PCIE20_AXI_MSTR_RESP_COMP_CTRL1         0x81c
  66#define CFG_BRIDGE_SB_INIT                      BIT(0)
  67
  68#define PCIE20_CAP                              0x70
  69#define PCIE20_CAP_LINK_CAPABILITIES            (PCIE20_CAP + 0xC)
  70#define PCIE20_CAP_ACTIVE_STATE_LINK_PM_SUPPORT (BIT(10) | BIT(11))
  71#define PCIE20_CAP_LINK_1                       (PCIE20_CAP + 0x14)
  72#define PCIE_CAP_LINK1_VAL                      0x2FD7F
  73
  74#define PCIE20_PARF_Q2A_FLUSH                   0x1AC
  75
  76#define PCIE20_MISC_CONTROL_1_REG               0x8BC
  77#define DBI_RO_WR_EN                            1
  78
  79#define PERST_DELAY_US                          1000
  80
  81#define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE      0x358
  82#define SLV_ADDR_SPACE_SZ                       0x10000000
  83
  84#define DEVICE_TYPE_RC                          0x4
  85
  86#define QCOM_PCIE_2_1_0_MAX_SUPPLY      3
  87struct qcom_pcie_resources_2_1_0 {
  88        struct clk *iface_clk;
  89        struct clk *core_clk;
  90        struct clk *phy_clk;
  91        struct reset_control *pci_reset;
  92        struct reset_control *axi_reset;
  93        struct reset_control *ahb_reset;
  94        struct reset_control *por_reset;
  95        struct reset_control *phy_reset;
  96        struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY];
  97};
  98
  99struct qcom_pcie_resources_1_0_0 {
 100        struct clk *iface;
 101        struct clk *aux;
 102        struct clk *master_bus;
 103        struct clk *slave_bus;
 104        struct reset_control *core;
 105        struct regulator *vdda;
 106};
 107
 108#define QCOM_PCIE_2_3_2_MAX_SUPPLY      2
 109struct qcom_pcie_resources_2_3_2 {
 110        struct clk *aux_clk;
 111        struct clk *master_clk;
 112        struct clk *slave_clk;
 113        struct clk *cfg_clk;
 114        struct clk *pipe_clk;
 115        struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY];
 116};
 117
 118#define QCOM_PCIE_2_4_0_MAX_CLOCKS      4
 119struct qcom_pcie_resources_2_4_0 {
 120        struct clk_bulk_data clks[QCOM_PCIE_2_4_0_MAX_CLOCKS];
 121        int num_clks;
 122        struct reset_control *axi_m_reset;
 123        struct reset_control *axi_s_reset;
 124        struct reset_control *pipe_reset;
 125        struct reset_control *axi_m_vmid_reset;
 126        struct reset_control *axi_s_xpu_reset;
 127        struct reset_control *parf_reset;
 128        struct reset_control *phy_reset;
 129        struct reset_control *axi_m_sticky_reset;
 130        struct reset_control *pipe_sticky_reset;
 131        struct reset_control *pwr_reset;
 132        struct reset_control *ahb_reset;
 133        struct reset_control *phy_ahb_reset;
 134};
 135
 136struct qcom_pcie_resources_2_3_3 {
 137        struct clk *iface;
 138        struct clk *axi_m_clk;
 139        struct clk *axi_s_clk;
 140        struct clk *ahb_clk;
 141        struct clk *aux_clk;
 142        struct reset_control *rst[7];
 143};
 144
 145struct qcom_pcie_resources_2_7_0 {
 146        struct clk_bulk_data clks[6];
 147        struct regulator_bulk_data supplies[2];
 148        struct reset_control *pci_reset;
 149        struct clk *pipe_clk;
 150};
 151
 152union qcom_pcie_resources {
 153        struct qcom_pcie_resources_1_0_0 v1_0_0;
 154        struct qcom_pcie_resources_2_1_0 v2_1_0;
 155        struct qcom_pcie_resources_2_3_2 v2_3_2;
 156        struct qcom_pcie_resources_2_3_3 v2_3_3;
 157        struct qcom_pcie_resources_2_4_0 v2_4_0;
 158        struct qcom_pcie_resources_2_7_0 v2_7_0;
 159};
 160
 161struct qcom_pcie;
 162
 163struct qcom_pcie_ops {
 164        int (*get_resources)(struct qcom_pcie *pcie);
 165        int (*init)(struct qcom_pcie *pcie);
 166        int (*post_init)(struct qcom_pcie *pcie);
 167        void (*deinit)(struct qcom_pcie *pcie);
 168        void (*post_deinit)(struct qcom_pcie *pcie);
 169        void (*ltssm_enable)(struct qcom_pcie *pcie);
 170};
 171
 172struct qcom_pcie {
 173        struct dw_pcie *pci;
 174        void __iomem *parf;                     /* DT parf */
 175        void __iomem *elbi;                     /* DT elbi */
 176        union qcom_pcie_resources res;
 177        struct phy *phy;
 178        struct gpio_desc *reset;
 179        const struct qcom_pcie_ops *ops;
 180};
 181
 182#define to_qcom_pcie(x)         dev_get_drvdata((x)->dev)
 183
 184static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
 185{
 186        gpiod_set_value_cansleep(pcie->reset, 1);
 187        usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
 188}
 189
 190static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
 191{
 192        /* Ensure that PERST has been asserted for at least 100 ms */
 193        msleep(100);
 194        gpiod_set_value_cansleep(pcie->reset, 0);
 195        usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
 196}
 197
 198static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
 199{
 200        struct dw_pcie *pci = pcie->pci;
 201
 202        if (dw_pcie_link_up(pci))
 203                return 0;
 204
 205        /* Enable Link Training state machine */
 206        if (pcie->ops->ltssm_enable)
 207                pcie->ops->ltssm_enable(pcie);
 208
 209        return dw_pcie_wait_for_link(pci);
 210}
 211
 212static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie)
 213{
 214        u32 val;
 215
 216        /* enable link training */
 217        val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL);
 218        val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE;
 219        writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL);
 220}
 221
 222static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
 223{
 224        struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
 225        struct dw_pcie *pci = pcie->pci;
 226        struct device *dev = pci->dev;
 227        int ret;
 228
 229        res->supplies[0].supply = "vdda";
 230        res->supplies[1].supply = "vdda_phy";
 231        res->supplies[2].supply = "vdda_refclk";
 232        ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
 233                                      res->supplies);
 234        if (ret)
 235                return ret;
 236
 237        res->iface_clk = devm_clk_get(dev, "iface");
 238        if (IS_ERR(res->iface_clk))
 239                return PTR_ERR(res->iface_clk);
 240
 241        res->core_clk = devm_clk_get(dev, "core");
 242        if (IS_ERR(res->core_clk))
 243                return PTR_ERR(res->core_clk);
 244
 245        res->phy_clk = devm_clk_get(dev, "phy");
 246        if (IS_ERR(res->phy_clk))
 247                return PTR_ERR(res->phy_clk);
 248
 249        res->pci_reset = devm_reset_control_get_exclusive(dev, "pci");
 250        if (IS_ERR(res->pci_reset))
 251                return PTR_ERR(res->pci_reset);
 252
 253        res->axi_reset = devm_reset_control_get_exclusive(dev, "axi");
 254        if (IS_ERR(res->axi_reset))
 255                return PTR_ERR(res->axi_reset);
 256
 257        res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb");
 258        if (IS_ERR(res->ahb_reset))
 259                return PTR_ERR(res->ahb_reset);
 260
 261        res->por_reset = devm_reset_control_get_exclusive(dev, "por");
 262        if (IS_ERR(res->por_reset))
 263                return PTR_ERR(res->por_reset);
 264
 265        res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
 266        return PTR_ERR_OR_ZERO(res->phy_reset);
 267}
 268
 269static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
 270{
 271        struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
 272
 273        reset_control_assert(res->pci_reset);
 274        reset_control_assert(res->axi_reset);
 275        reset_control_assert(res->ahb_reset);
 276        reset_control_assert(res->por_reset);
 277        reset_control_assert(res->pci_reset);
 278        clk_disable_unprepare(res->iface_clk);
 279        clk_disable_unprepare(res->core_clk);
 280        clk_disable_unprepare(res->phy_clk);
 281        regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
 282}
 283
 284static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
 285{
 286        struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
 287        struct dw_pcie *pci = pcie->pci;
 288        struct device *dev = pci->dev;
 289        u32 val;
 290        int ret;
 291
 292        ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
 293        if (ret < 0) {
 294                dev_err(dev, "cannot enable regulators\n");
 295                return ret;
 296        }
 297
 298        ret = reset_control_assert(res->ahb_reset);
 299        if (ret) {
 300                dev_err(dev, "cannot assert ahb reset\n");
 301                goto err_assert_ahb;
 302        }
 303
 304        ret = clk_prepare_enable(res->iface_clk);
 305        if (ret) {
 306                dev_err(dev, "cannot prepare/enable iface clock\n");
 307                goto err_assert_ahb;
 308        }
 309
 310        ret = clk_prepare_enable(res->phy_clk);
 311        if (ret) {
 312                dev_err(dev, "cannot prepare/enable phy clock\n");
 313                goto err_clk_phy;
 314        }
 315
 316        ret = clk_prepare_enable(res->core_clk);
 317        if (ret) {
 318                dev_err(dev, "cannot prepare/enable core clock\n");
 319                goto err_clk_core;
 320        }
 321
 322        ret = reset_control_deassert(res->ahb_reset);
 323        if (ret) {
 324                dev_err(dev, "cannot deassert ahb reset\n");
 325                goto err_deassert_ahb;
 326        }
 327
 328        /* enable PCIe clocks and resets */
 329        val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
 330        val &= ~BIT(0);
 331        writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
 332
 333        /* enable external reference clock */
 334        val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK);
 335        val |= BIT(16);
 336        writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK);
 337
 338        ret = reset_control_deassert(res->phy_reset);
 339        if (ret) {
 340                dev_err(dev, "cannot deassert phy reset\n");
 341                return ret;
 342        }
 343
 344        ret = reset_control_deassert(res->pci_reset);
 345        if (ret) {
 346                dev_err(dev, "cannot deassert pci reset\n");
 347                return ret;
 348        }
 349
 350        ret = reset_control_deassert(res->por_reset);
 351        if (ret) {
 352                dev_err(dev, "cannot deassert por reset\n");
 353                return ret;
 354        }
 355
 356        ret = reset_control_deassert(res->axi_reset);
 357        if (ret) {
 358                dev_err(dev, "cannot deassert axi reset\n");
 359                return ret;
 360        }
 361
 362        /* wait for clock acquisition */
 363        usleep_range(1000, 1500);
 364
 365
 366        /* Set the Max TLP size to 2K, instead of using default of 4K */
 367        writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K,
 368               pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0);
 369        writel(CFG_BRIDGE_SB_INIT,
 370               pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1);
 371
 372        return 0;
 373
 374err_deassert_ahb:
 375        clk_disable_unprepare(res->core_clk);
 376err_clk_core:
 377        clk_disable_unprepare(res->phy_clk);
 378err_clk_phy:
 379        clk_disable_unprepare(res->iface_clk);
 380err_assert_ahb:
 381        regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
 382
 383        return ret;
 384}
 385
 386static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie)
 387{
 388        struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
 389        struct dw_pcie *pci = pcie->pci;
 390        struct device *dev = pci->dev;
 391
 392        res->vdda = devm_regulator_get(dev, "vdda");
 393        if (IS_ERR(res->vdda))
 394                return PTR_ERR(res->vdda);
 395
 396        res->iface = devm_clk_get(dev, "iface");
 397        if (IS_ERR(res->iface))
 398                return PTR_ERR(res->iface);
 399
 400        res->aux = devm_clk_get(dev, "aux");
 401        if (IS_ERR(res->aux))
 402                return PTR_ERR(res->aux);
 403
 404        res->master_bus = devm_clk_get(dev, "master_bus");
 405        if (IS_ERR(res->master_bus))
 406                return PTR_ERR(res->master_bus);
 407
 408        res->slave_bus = devm_clk_get(dev, "slave_bus");
 409        if (IS_ERR(res->slave_bus))
 410                return PTR_ERR(res->slave_bus);
 411
 412        res->core = devm_reset_control_get_exclusive(dev, "core");
 413        return PTR_ERR_OR_ZERO(res->core);
 414}
 415
 416static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie)
 417{
 418        struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
 419
 420        reset_control_assert(res->core);
 421        clk_disable_unprepare(res->slave_bus);
 422        clk_disable_unprepare(res->master_bus);
 423        clk_disable_unprepare(res->iface);
 424        clk_disable_unprepare(res->aux);
 425        regulator_disable(res->vdda);
 426}
 427
 428static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie)
 429{
 430        struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
 431        struct dw_pcie *pci = pcie->pci;
 432        struct device *dev = pci->dev;
 433        int ret;
 434
 435        ret = reset_control_deassert(res->core);
 436        if (ret) {
 437                dev_err(dev, "cannot deassert core reset\n");
 438                return ret;
 439        }
 440
 441        ret = clk_prepare_enable(res->aux);
 442        if (ret) {
 443                dev_err(dev, "cannot prepare/enable aux clock\n");
 444                goto err_res;
 445        }
 446
 447        ret = clk_prepare_enable(res->iface);
 448        if (ret) {
 449                dev_err(dev, "cannot prepare/enable iface clock\n");
 450                goto err_aux;
 451        }
 452
 453        ret = clk_prepare_enable(res->master_bus);
 454        if (ret) {
 455                dev_err(dev, "cannot prepare/enable master_bus clock\n");
 456                goto err_iface;
 457        }
 458
 459        ret = clk_prepare_enable(res->slave_bus);
 460        if (ret) {
 461                dev_err(dev, "cannot prepare/enable slave_bus clock\n");
 462                goto err_master;
 463        }
 464
 465        ret = regulator_enable(res->vdda);
 466        if (ret) {
 467                dev_err(dev, "cannot enable vdda regulator\n");
 468                goto err_slave;
 469        }
 470
 471        /* change DBI base address */
 472        writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
 473
 474        if (IS_ENABLED(CONFIG_PCI_MSI)) {
 475                u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
 476
 477                val |= BIT(31);
 478                writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
 479        }
 480
 481        return 0;
 482err_slave:
 483        clk_disable_unprepare(res->slave_bus);
 484err_master:
 485        clk_disable_unprepare(res->master_bus);
 486err_iface:
 487        clk_disable_unprepare(res->iface);
 488err_aux:
 489        clk_disable_unprepare(res->aux);
 490err_res:
 491        reset_control_assert(res->core);
 492
 493        return ret;
 494}
 495
 496static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie)
 497{
 498        u32 val;
 499
 500        /* enable link training */
 501        val = readl(pcie->parf + PCIE20_PARF_LTSSM);
 502        val |= BIT(8);
 503        writel(val, pcie->parf + PCIE20_PARF_LTSSM);
 504}
 505
 506static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie)
 507{
 508        struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
 509        struct dw_pcie *pci = pcie->pci;
 510        struct device *dev = pci->dev;
 511        int ret;
 512
 513        res->supplies[0].supply = "vdda";
 514        res->supplies[1].supply = "vddpe-3v3";
 515        ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
 516                                      res->supplies);
 517        if (ret)
 518                return ret;
 519
 520        res->aux_clk = devm_clk_get(dev, "aux");
 521        if (IS_ERR(res->aux_clk))
 522                return PTR_ERR(res->aux_clk);
 523
 524        res->cfg_clk = devm_clk_get(dev, "cfg");
 525        if (IS_ERR(res->cfg_clk))
 526                return PTR_ERR(res->cfg_clk);
 527
 528        res->master_clk = devm_clk_get(dev, "bus_master");
 529        if (IS_ERR(res->master_clk))
 530                return PTR_ERR(res->master_clk);
 531
 532        res->slave_clk = devm_clk_get(dev, "bus_slave");
 533        if (IS_ERR(res->slave_clk))
 534                return PTR_ERR(res->slave_clk);
 535
 536        res->pipe_clk = devm_clk_get(dev, "pipe");
 537        return PTR_ERR_OR_ZERO(res->pipe_clk);
 538}
 539
 540static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie)
 541{
 542        struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
 543
 544        clk_disable_unprepare(res->slave_clk);
 545        clk_disable_unprepare(res->master_clk);
 546        clk_disable_unprepare(res->cfg_clk);
 547        clk_disable_unprepare(res->aux_clk);
 548
 549        regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
 550}
 551
 552static void qcom_pcie_post_deinit_2_3_2(struct qcom_pcie *pcie)
 553{
 554        struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
 555
 556        clk_disable_unprepare(res->pipe_clk);
 557}
 558
 559static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)
 560{
 561        struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
 562        struct dw_pcie *pci = pcie->pci;
 563        struct device *dev = pci->dev;
 564        u32 val;
 565        int ret;
 566
 567        ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
 568        if (ret < 0) {
 569                dev_err(dev, "cannot enable regulators\n");
 570                return ret;
 571        }
 572
 573        ret = clk_prepare_enable(res->aux_clk);
 574        if (ret) {
 575                dev_err(dev, "cannot prepare/enable aux clock\n");
 576                goto err_aux_clk;
 577        }
 578
 579        ret = clk_prepare_enable(res->cfg_clk);
 580        if (ret) {
 581                dev_err(dev, "cannot prepare/enable cfg clock\n");
 582                goto err_cfg_clk;
 583        }
 584
 585        ret = clk_prepare_enable(res->master_clk);
 586        if (ret) {
 587                dev_err(dev, "cannot prepare/enable master clock\n");
 588                goto err_master_clk;
 589        }
 590
 591        ret = clk_prepare_enable(res->slave_clk);
 592        if (ret) {
 593                dev_err(dev, "cannot prepare/enable slave clock\n");
 594                goto err_slave_clk;
 595        }
 596
 597        /* enable PCIe clocks and resets */
 598        val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
 599        val &= ~BIT(0);
 600        writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
 601
 602        /* change DBI base address */
 603        writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
 604
 605        /* MAC PHY_POWERDOWN MUX DISABLE  */
 606        val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
 607        val &= ~BIT(29);
 608        writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
 609
 610        val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
 611        val |= BIT(4);
 612        writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
 613
 614        val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
 615        val |= BIT(31);
 616        writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
 617
 618        return 0;
 619
 620err_slave_clk:
 621        clk_disable_unprepare(res->master_clk);
 622err_master_clk:
 623        clk_disable_unprepare(res->cfg_clk);
 624err_cfg_clk:
 625        clk_disable_unprepare(res->aux_clk);
 626
 627err_aux_clk:
 628        regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
 629
 630        return ret;
 631}
 632
 633static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie)
 634{
 635        struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
 636        struct dw_pcie *pci = pcie->pci;
 637        struct device *dev = pci->dev;
 638        int ret;
 639
 640        ret = clk_prepare_enable(res->pipe_clk);
 641        if (ret) {
 642                dev_err(dev, "cannot prepare/enable pipe clock\n");
 643                return ret;
 644        }
 645
 646        return 0;
 647}
 648
 649static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie)
 650{
 651        struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
 652        struct dw_pcie *pci = pcie->pci;
 653        struct device *dev = pci->dev;
 654        bool is_ipq = of_device_is_compatible(dev->of_node, "qcom,pcie-ipq4019");
 655        int ret;
 656
 657        res->clks[0].id = "aux";
 658        res->clks[1].id = "master_bus";
 659        res->clks[2].id = "slave_bus";
 660        res->clks[3].id = "iface";
 661
 662        /* qcom,pcie-ipq4019 is defined without "iface" */
 663        res->num_clks = is_ipq ? 3 : 4;
 664
 665        ret = devm_clk_bulk_get(dev, res->num_clks, res->clks);
 666        if (ret < 0)
 667                return ret;
 668
 669        res->axi_m_reset = devm_reset_control_get_exclusive(dev, "axi_m");
 670        if (IS_ERR(res->axi_m_reset))
 671                return PTR_ERR(res->axi_m_reset);
 672
 673        res->axi_s_reset = devm_reset_control_get_exclusive(dev, "axi_s");
 674        if (IS_ERR(res->axi_s_reset))
 675                return PTR_ERR(res->axi_s_reset);
 676
 677        if (is_ipq) {
 678                /*
 679                 * These resources relates to the PHY or are secure clocks, but
 680                 * are controlled here for IPQ4019
 681                 */
 682                res->pipe_reset = devm_reset_control_get_exclusive(dev, "pipe");
 683                if (IS_ERR(res->pipe_reset))
 684                        return PTR_ERR(res->pipe_reset);
 685
 686                res->axi_m_vmid_reset = devm_reset_control_get_exclusive(dev,
 687                                                                         "axi_m_vmid");
 688                if (IS_ERR(res->axi_m_vmid_reset))
 689                        return PTR_ERR(res->axi_m_vmid_reset);
 690
 691                res->axi_s_xpu_reset = devm_reset_control_get_exclusive(dev,
 692                                                                        "axi_s_xpu");
 693                if (IS_ERR(res->axi_s_xpu_reset))
 694                        return PTR_ERR(res->axi_s_xpu_reset);
 695
 696                res->parf_reset = devm_reset_control_get_exclusive(dev, "parf");
 697                if (IS_ERR(res->parf_reset))
 698                        return PTR_ERR(res->parf_reset);
 699
 700                res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
 701                if (IS_ERR(res->phy_reset))
 702                        return PTR_ERR(res->phy_reset);
 703        }
 704
 705        res->axi_m_sticky_reset = devm_reset_control_get_exclusive(dev,
 706                                                                   "axi_m_sticky");
 707        if (IS_ERR(res->axi_m_sticky_reset))
 708                return PTR_ERR(res->axi_m_sticky_reset);
 709
 710        res->pipe_sticky_reset = devm_reset_control_get_exclusive(dev,
 711                                                                  "pipe_sticky");
 712        if (IS_ERR(res->pipe_sticky_reset))
 713                return PTR_ERR(res->pipe_sticky_reset);
 714
 715        res->pwr_reset = devm_reset_control_get_exclusive(dev, "pwr");
 716        if (IS_ERR(res->pwr_reset))
 717                return PTR_ERR(res->pwr_reset);
 718
 719        res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb");
 720        if (IS_ERR(res->ahb_reset))
 721                return PTR_ERR(res->ahb_reset);
 722
 723        if (is_ipq) {
 724                res->phy_ahb_reset = devm_reset_control_get_exclusive(dev, "phy_ahb");
 725                if (IS_ERR(res->phy_ahb_reset))
 726                        return PTR_ERR(res->phy_ahb_reset);
 727        }
 728
 729        return 0;
 730}
 731
 732static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie)
 733{
 734        struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
 735
 736        reset_control_assert(res->axi_m_reset);
 737        reset_control_assert(res->axi_s_reset);
 738        reset_control_assert(res->pipe_reset);
 739        reset_control_assert(res->pipe_sticky_reset);
 740        reset_control_assert(res->phy_reset);
 741        reset_control_assert(res->phy_ahb_reset);
 742        reset_control_assert(res->axi_m_sticky_reset);
 743        reset_control_assert(res->pwr_reset);
 744        reset_control_assert(res->ahb_reset);
 745        clk_bulk_disable_unprepare(res->num_clks, res->clks);
 746}
 747
 748static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)
 749{
 750        struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
 751        struct dw_pcie *pci = pcie->pci;
 752        struct device *dev = pci->dev;
 753        u32 val;
 754        int ret;
 755
 756        ret = reset_control_assert(res->axi_m_reset);
 757        if (ret) {
 758                dev_err(dev, "cannot assert axi master reset\n");
 759                return ret;
 760        }
 761
 762        ret = reset_control_assert(res->axi_s_reset);
 763        if (ret) {
 764                dev_err(dev, "cannot assert axi slave reset\n");
 765                return ret;
 766        }
 767
 768        usleep_range(10000, 12000);
 769
 770        ret = reset_control_assert(res->pipe_reset);
 771        if (ret) {
 772                dev_err(dev, "cannot assert pipe reset\n");
 773                return ret;
 774        }
 775
 776        ret = reset_control_assert(res->pipe_sticky_reset);
 777        if (ret) {
 778                dev_err(dev, "cannot assert pipe sticky reset\n");
 779                return ret;
 780        }
 781
 782        ret = reset_control_assert(res->phy_reset);
 783        if (ret) {
 784                dev_err(dev, "cannot assert phy reset\n");
 785                return ret;
 786        }
 787
 788        ret = reset_control_assert(res->phy_ahb_reset);
 789        if (ret) {
 790                dev_err(dev, "cannot assert phy ahb reset\n");
 791                return ret;
 792        }
 793
 794        usleep_range(10000, 12000);
 795
 796        ret = reset_control_assert(res->axi_m_sticky_reset);
 797        if (ret) {
 798                dev_err(dev, "cannot assert axi master sticky reset\n");
 799                return ret;
 800        }
 801
 802        ret = reset_control_assert(res->pwr_reset);
 803        if (ret) {
 804                dev_err(dev, "cannot assert power reset\n");
 805                return ret;
 806        }
 807
 808        ret = reset_control_assert(res->ahb_reset);
 809        if (ret) {
 810                dev_err(dev, "cannot assert ahb reset\n");
 811                return ret;
 812        }
 813
 814        usleep_range(10000, 12000);
 815
 816        ret = reset_control_deassert(res->phy_ahb_reset);
 817        if (ret) {
 818                dev_err(dev, "cannot deassert phy ahb reset\n");
 819                return ret;
 820        }
 821
 822        ret = reset_control_deassert(res->phy_reset);
 823        if (ret) {
 824                dev_err(dev, "cannot deassert phy reset\n");
 825                goto err_rst_phy;
 826        }
 827
 828        ret = reset_control_deassert(res->pipe_reset);
 829        if (ret) {
 830                dev_err(dev, "cannot deassert pipe reset\n");
 831                goto err_rst_pipe;
 832        }
 833
 834        ret = reset_control_deassert(res->pipe_sticky_reset);
 835        if (ret) {
 836                dev_err(dev, "cannot deassert pipe sticky reset\n");
 837                goto err_rst_pipe_sticky;
 838        }
 839
 840        usleep_range(10000, 12000);
 841
 842        ret = reset_control_deassert(res->axi_m_reset);
 843        if (ret) {
 844                dev_err(dev, "cannot deassert axi master reset\n");
 845                goto err_rst_axi_m;
 846        }
 847
 848        ret = reset_control_deassert(res->axi_m_sticky_reset);
 849        if (ret) {
 850                dev_err(dev, "cannot deassert axi master sticky reset\n");
 851                goto err_rst_axi_m_sticky;
 852        }
 853
 854        ret = reset_control_deassert(res->axi_s_reset);
 855        if (ret) {
 856                dev_err(dev, "cannot deassert axi slave reset\n");
 857                goto err_rst_axi_s;
 858        }
 859
 860        ret = reset_control_deassert(res->pwr_reset);
 861        if (ret) {
 862                dev_err(dev, "cannot deassert power reset\n");
 863                goto err_rst_pwr;
 864        }
 865
 866        ret = reset_control_deassert(res->ahb_reset);
 867        if (ret) {
 868                dev_err(dev, "cannot deassert ahb reset\n");
 869                goto err_rst_ahb;
 870        }
 871
 872        usleep_range(10000, 12000);
 873
 874        ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
 875        if (ret)
 876                goto err_clks;
 877
 878        /* enable PCIe clocks and resets */
 879        val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
 880        val &= ~BIT(0);
 881        writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
 882
 883        /* change DBI base address */
 884        writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
 885
 886        /* MAC PHY_POWERDOWN MUX DISABLE  */
 887        val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
 888        val &= ~BIT(29);
 889        writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
 890
 891        val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
 892        val |= BIT(4);
 893        writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
 894
 895        val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
 896        val |= BIT(31);
 897        writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
 898
 899        return 0;
 900
 901err_clks:
 902        reset_control_assert(res->ahb_reset);
 903err_rst_ahb:
 904        reset_control_assert(res->pwr_reset);
 905err_rst_pwr:
 906        reset_control_assert(res->axi_s_reset);
 907err_rst_axi_s:
 908        reset_control_assert(res->axi_m_sticky_reset);
 909err_rst_axi_m_sticky:
 910        reset_control_assert(res->axi_m_reset);
 911err_rst_axi_m:
 912        reset_control_assert(res->pipe_sticky_reset);
 913err_rst_pipe_sticky:
 914        reset_control_assert(res->pipe_reset);
 915err_rst_pipe:
 916        reset_control_assert(res->phy_reset);
 917err_rst_phy:
 918        reset_control_assert(res->phy_ahb_reset);
 919        return ret;
 920}
 921
 922static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie)
 923{
 924        struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
 925        struct dw_pcie *pci = pcie->pci;
 926        struct device *dev = pci->dev;
 927        int i;
 928        const char *rst_names[] = { "axi_m", "axi_s", "pipe",
 929                                    "axi_m_sticky", "sticky",
 930                                    "ahb", "sleep", };
 931
 932        res->iface = devm_clk_get(dev, "iface");
 933        if (IS_ERR(res->iface))
 934                return PTR_ERR(res->iface);
 935
 936        res->axi_m_clk = devm_clk_get(dev, "axi_m");
 937        if (IS_ERR(res->axi_m_clk))
 938                return PTR_ERR(res->axi_m_clk);
 939
 940        res->axi_s_clk = devm_clk_get(dev, "axi_s");
 941        if (IS_ERR(res->axi_s_clk))
 942                return PTR_ERR(res->axi_s_clk);
 943
 944        res->ahb_clk = devm_clk_get(dev, "ahb");
 945        if (IS_ERR(res->ahb_clk))
 946                return PTR_ERR(res->ahb_clk);
 947
 948        res->aux_clk = devm_clk_get(dev, "aux");
 949        if (IS_ERR(res->aux_clk))
 950                return PTR_ERR(res->aux_clk);
 951
 952        for (i = 0; i < ARRAY_SIZE(rst_names); i++) {
 953                res->rst[i] = devm_reset_control_get(dev, rst_names[i]);
 954                if (IS_ERR(res->rst[i]))
 955                        return PTR_ERR(res->rst[i]);
 956        }
 957
 958        return 0;
 959}
 960
 961static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie)
 962{
 963        struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
 964
 965        clk_disable_unprepare(res->iface);
 966        clk_disable_unprepare(res->axi_m_clk);
 967        clk_disable_unprepare(res->axi_s_clk);
 968        clk_disable_unprepare(res->ahb_clk);
 969        clk_disable_unprepare(res->aux_clk);
 970}
 971
 972static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
 973{
 974        struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
 975        struct dw_pcie *pci = pcie->pci;
 976        struct device *dev = pci->dev;
 977        int i, ret;
 978        u32 val;
 979
 980        for (i = 0; i < ARRAY_SIZE(res->rst); i++) {
 981                ret = reset_control_assert(res->rst[i]);
 982                if (ret) {
 983                        dev_err(dev, "reset #%d assert failed (%d)\n", i, ret);
 984                        return ret;
 985                }
 986        }
 987
 988        usleep_range(2000, 2500);
 989
 990        for (i = 0; i < ARRAY_SIZE(res->rst); i++) {
 991                ret = reset_control_deassert(res->rst[i]);
 992                if (ret) {
 993                        dev_err(dev, "reset #%d deassert failed (%d)\n", i,
 994                                ret);
 995                        return ret;
 996                }
 997        }
 998
 999        /*
1000         * Don't have a way to see if the reset has completed.
1001         * Wait for some time.
1002         */
1003        usleep_range(2000, 2500);
1004
1005        ret = clk_prepare_enable(res->iface);
1006        if (ret) {
1007                dev_err(dev, "cannot prepare/enable core clock\n");
1008                goto err_clk_iface;
1009        }
1010
1011        ret = clk_prepare_enable(res->axi_m_clk);
1012        if (ret) {
1013                dev_err(dev, "cannot prepare/enable core clock\n");
1014                goto err_clk_axi_m;
1015        }
1016
1017        ret = clk_prepare_enable(res->axi_s_clk);
1018        if (ret) {
1019                dev_err(dev, "cannot prepare/enable axi slave clock\n");
1020                goto err_clk_axi_s;
1021        }
1022
1023        ret = clk_prepare_enable(res->ahb_clk);
1024        if (ret) {
1025                dev_err(dev, "cannot prepare/enable ahb clock\n");
1026                goto err_clk_ahb;
1027        }
1028
1029        ret = clk_prepare_enable(res->aux_clk);
1030        if (ret) {
1031                dev_err(dev, "cannot prepare/enable aux clock\n");
1032                goto err_clk_aux;
1033        }
1034
1035        writel(SLV_ADDR_SPACE_SZ,
1036                pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE);
1037
1038        val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
1039        val &= ~BIT(0);
1040        writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
1041
1042        writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
1043
1044        writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS
1045                | SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
1046                AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS,
1047                pcie->parf + PCIE20_PARF_SYS_CTRL);
1048        writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH);
1049
1050        writel(CMD_BME_VAL, pci->dbi_base + PCIE20_COMMAND_STATUS);
1051        writel(DBI_RO_WR_EN, pci->dbi_base + PCIE20_MISC_CONTROL_1_REG);
1052        writel(PCIE_CAP_LINK1_VAL, pci->dbi_base + PCIE20_CAP_LINK_1);
1053
1054        val = readl(pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES);
1055        val &= ~PCIE20_CAP_ACTIVE_STATE_LINK_PM_SUPPORT;
1056        writel(val, pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES);
1057
1058        writel(PCIE_CAP_CPL_TIMEOUT_DISABLE, pci->dbi_base +
1059                PCIE20_DEVICE_CONTROL2_STATUS2);
1060
1061        return 0;
1062
1063err_clk_aux:
1064        clk_disable_unprepare(res->ahb_clk);
1065err_clk_ahb:
1066        clk_disable_unprepare(res->axi_s_clk);
1067err_clk_axi_s:
1068        clk_disable_unprepare(res->axi_m_clk);
1069err_clk_axi_m:
1070        clk_disable_unprepare(res->iface);
1071err_clk_iface:
1072        /*
1073         * Not checking for failure, will anyway return
1074         * the original failure in 'ret'.
1075         */
1076        for (i = 0; i < ARRAY_SIZE(res->rst); i++)
1077                reset_control_assert(res->rst[i]);
1078
1079        return ret;
1080}
1081
1082static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
1083{
1084        struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
1085        struct dw_pcie *pci = pcie->pci;
1086        struct device *dev = pci->dev;
1087        int ret;
1088
1089        res->pci_reset = devm_reset_control_get_exclusive(dev, "pci");
1090        if (IS_ERR(res->pci_reset))
1091                return PTR_ERR(res->pci_reset);
1092
1093        res->supplies[0].supply = "vdda";
1094        res->supplies[1].supply = "vddpe-3v3";
1095        ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
1096                                      res->supplies);
1097        if (ret)
1098                return ret;
1099
1100        res->clks[0].id = "aux";
1101        res->clks[1].id = "cfg";
1102        res->clks[2].id = "bus_master";
1103        res->clks[3].id = "bus_slave";
1104        res->clks[4].id = "slave_q2a";
1105        res->clks[5].id = "tbu";
1106
1107        ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
1108        if (ret < 0)
1109                return ret;
1110
1111        res->pipe_clk = devm_clk_get(dev, "pipe");
1112        return PTR_ERR_OR_ZERO(res->pipe_clk);
1113}
1114
1115static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
1116{
1117        struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
1118        struct dw_pcie *pci = pcie->pci;
1119        struct device *dev = pci->dev;
1120        u32 val;
1121        int ret;
1122
1123        ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
1124        if (ret < 0) {
1125                dev_err(dev, "cannot enable regulators\n");
1126                return ret;
1127        }
1128
1129        ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
1130        if (ret < 0)
1131                goto err_disable_regulators;
1132
1133        ret = reset_control_assert(res->pci_reset);
1134        if (ret < 0) {
1135                dev_err(dev, "cannot deassert pci reset\n");
1136                goto err_disable_clocks;
1137        }
1138
1139        usleep_range(1000, 1500);
1140
1141        ret = reset_control_deassert(res->pci_reset);
1142        if (ret < 0) {
1143                dev_err(dev, "cannot deassert pci reset\n");
1144                goto err_disable_clocks;
1145        }
1146
1147        ret = clk_prepare_enable(res->pipe_clk);
1148        if (ret) {
1149                dev_err(dev, "cannot prepare/enable pipe clock\n");
1150                goto err_disable_clocks;
1151        }
1152
1153        /* configure PCIe to RC mode */
1154        writel(DEVICE_TYPE_RC, pcie->parf + PCIE20_PARF_DEVICE_TYPE);
1155
1156        /* enable PCIe clocks and resets */
1157        val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
1158        val &= ~BIT(0);
1159        writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
1160
1161        /* change DBI base address */
1162        writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
1163
1164        /* MAC PHY_POWERDOWN MUX DISABLE  */
1165        val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
1166        val &= ~BIT(29);
1167        writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
1168
1169        val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
1170        val |= BIT(4);
1171        writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
1172
1173        if (IS_ENABLED(CONFIG_PCI_MSI)) {
1174                val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
1175                val |= BIT(31);
1176                writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
1177        }
1178
1179        return 0;
1180err_disable_clocks:
1181        clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
1182err_disable_regulators:
1183        regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
1184
1185        return ret;
1186}
1187
1188static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie)
1189{
1190        struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
1191
1192        clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
1193        regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
1194}
1195
1196static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie)
1197{
1198        struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
1199
1200        return clk_prepare_enable(res->pipe_clk);
1201}
1202
1203static void qcom_pcie_post_deinit_2_7_0(struct qcom_pcie *pcie)
1204{
1205        struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
1206
1207        clk_disable_unprepare(res->pipe_clk);
1208}
1209
1210static int qcom_pcie_link_up(struct dw_pcie *pci)
1211{
1212        u16 val = readw(pci->dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA);
1213
1214        return !!(val & PCI_EXP_LNKSTA_DLLLA);
1215}
1216
1217static int qcom_pcie_host_init(struct pcie_port *pp)
1218{
1219        struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1220        struct qcom_pcie *pcie = to_qcom_pcie(pci);
1221        int ret;
1222
1223        qcom_ep_reset_assert(pcie);
1224
1225        ret = pcie->ops->init(pcie);
1226        if (ret)
1227                return ret;
1228
1229        ret = phy_power_on(pcie->phy);
1230        if (ret)
1231                goto err_deinit;
1232
1233        if (pcie->ops->post_init) {
1234                ret = pcie->ops->post_init(pcie);
1235                if (ret)
1236                        goto err_disable_phy;
1237        }
1238
1239        dw_pcie_setup_rc(pp);
1240
1241        if (IS_ENABLED(CONFIG_PCI_MSI))
1242                dw_pcie_msi_init(pp);
1243
1244        qcom_ep_reset_deassert(pcie);
1245
1246        ret = qcom_pcie_establish_link(pcie);
1247        if (ret)
1248                goto err;
1249
1250        return 0;
1251err:
1252        qcom_ep_reset_assert(pcie);
1253        if (pcie->ops->post_deinit)
1254                pcie->ops->post_deinit(pcie);
1255err_disable_phy:
1256        phy_power_off(pcie->phy);
1257err_deinit:
1258        pcie->ops->deinit(pcie);
1259
1260        return ret;
1261}
1262
1263static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
1264        .host_init = qcom_pcie_host_init,
1265};
1266
1267/* Qcom IP rev.: 2.1.0  Synopsys IP rev.: 4.01a */
1268static const struct qcom_pcie_ops ops_2_1_0 = {
1269        .get_resources = qcom_pcie_get_resources_2_1_0,
1270        .init = qcom_pcie_init_2_1_0,
1271        .deinit = qcom_pcie_deinit_2_1_0,
1272        .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
1273};
1274
1275/* Qcom IP rev.: 1.0.0  Synopsys IP rev.: 4.11a */
1276static const struct qcom_pcie_ops ops_1_0_0 = {
1277        .get_resources = qcom_pcie_get_resources_1_0_0,
1278        .init = qcom_pcie_init_1_0_0,
1279        .deinit = qcom_pcie_deinit_1_0_0,
1280        .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
1281};
1282
1283/* Qcom IP rev.: 2.3.2  Synopsys IP rev.: 4.21a */
1284static const struct qcom_pcie_ops ops_2_3_2 = {
1285        .get_resources = qcom_pcie_get_resources_2_3_2,
1286        .init = qcom_pcie_init_2_3_2,
1287        .post_init = qcom_pcie_post_init_2_3_2,
1288        .deinit = qcom_pcie_deinit_2_3_2,
1289        .post_deinit = qcom_pcie_post_deinit_2_3_2,
1290        .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1291};
1292
1293/* Qcom IP rev.: 2.4.0  Synopsys IP rev.: 4.20a */
1294static const struct qcom_pcie_ops ops_2_4_0 = {
1295        .get_resources = qcom_pcie_get_resources_2_4_0,
1296        .init = qcom_pcie_init_2_4_0,
1297        .deinit = qcom_pcie_deinit_2_4_0,
1298        .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1299};
1300
1301/* Qcom IP rev.: 2.3.3  Synopsys IP rev.: 4.30a */
1302static const struct qcom_pcie_ops ops_2_3_3 = {
1303        .get_resources = qcom_pcie_get_resources_2_3_3,
1304        .init = qcom_pcie_init_2_3_3,
1305        .deinit = qcom_pcie_deinit_2_3_3,
1306        .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1307};
1308
1309/* Qcom IP rev.: 2.7.0  Synopsys IP rev.: 4.30a */
1310static const struct qcom_pcie_ops ops_2_7_0 = {
1311        .get_resources = qcom_pcie_get_resources_2_7_0,
1312        .init = qcom_pcie_init_2_7_0,
1313        .deinit = qcom_pcie_deinit_2_7_0,
1314        .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1315        .post_init = qcom_pcie_post_init_2_7_0,
1316        .post_deinit = qcom_pcie_post_deinit_2_7_0,
1317};
1318
1319static const struct dw_pcie_ops dw_pcie_ops = {
1320        .link_up = qcom_pcie_link_up,
1321};
1322
1323static int qcom_pcie_probe(struct platform_device *pdev)
1324{
1325        struct device *dev = &pdev->dev;
1326        struct resource *res;
1327        struct pcie_port *pp;
1328        struct dw_pcie *pci;
1329        struct qcom_pcie *pcie;
1330        int ret;
1331
1332        pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
1333        if (!pcie)
1334                return -ENOMEM;
1335
1336        pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
1337        if (!pci)
1338                return -ENOMEM;
1339
1340        pm_runtime_enable(dev);
1341        ret = pm_runtime_get_sync(dev);
1342        if (ret < 0) {
1343                pm_runtime_disable(dev);
1344                return ret;
1345        }
1346
1347        pci->dev = dev;
1348        pci->ops = &dw_pcie_ops;
1349        pp = &pci->pp;
1350
1351        pcie->pci = pci;
1352
1353        pcie->ops = of_device_get_match_data(dev);
1354
1355        pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH);
1356        if (IS_ERR(pcie->reset)) {
1357                ret = PTR_ERR(pcie->reset);
1358                goto err_pm_runtime_put;
1359        }
1360
1361        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "parf");
1362        pcie->parf = devm_ioremap_resource(dev, res);
1363        if (IS_ERR(pcie->parf)) {
1364                ret = PTR_ERR(pcie->parf);
1365                goto err_pm_runtime_put;
1366        }
1367
1368        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
1369        pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
1370        if (IS_ERR(pci->dbi_base)) {
1371                ret = PTR_ERR(pci->dbi_base);
1372                goto err_pm_runtime_put;
1373        }
1374
1375        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
1376        pcie->elbi = devm_ioremap_resource(dev, res);
1377        if (IS_ERR(pcie->elbi)) {
1378                ret = PTR_ERR(pcie->elbi);
1379                goto err_pm_runtime_put;
1380        }
1381
1382        pcie->phy = devm_phy_optional_get(dev, "pciephy");
1383        if (IS_ERR(pcie->phy)) {
1384                ret = PTR_ERR(pcie->phy);
1385                goto err_pm_runtime_put;
1386        }
1387
1388        ret = pcie->ops->get_resources(pcie);
1389        if (ret)
1390                goto err_pm_runtime_put;
1391
1392        pp->ops = &qcom_pcie_dw_ops;
1393
1394        if (IS_ENABLED(CONFIG_PCI_MSI)) {
1395                pp->msi_irq = platform_get_irq_byname(pdev, "msi");
1396                if (pp->msi_irq < 0) {
1397                        ret = pp->msi_irq;
1398                        goto err_pm_runtime_put;
1399                }
1400        }
1401
1402        ret = phy_init(pcie->phy);
1403        if (ret) {
1404                pm_runtime_disable(&pdev->dev);
1405                goto err_pm_runtime_put;
1406        }
1407
1408        platform_set_drvdata(pdev, pcie);
1409
1410        ret = dw_pcie_host_init(pp);
1411        if (ret) {
1412                dev_err(dev, "cannot initialize host\n");
1413                pm_runtime_disable(&pdev->dev);
1414                goto err_pm_runtime_put;
1415        }
1416
1417        return 0;
1418
1419err_pm_runtime_put:
1420        pm_runtime_put(dev);
1421        pm_runtime_disable(dev);
1422
1423        return ret;
1424}
1425
1426static const struct of_device_id qcom_pcie_match[] = {
1427        { .compatible = "qcom,pcie-apq8084", .data = &ops_1_0_0 },
1428        { .compatible = "qcom,pcie-ipq8064", .data = &ops_2_1_0 },
1429        { .compatible = "qcom,pcie-apq8064", .data = &ops_2_1_0 },
1430        { .compatible = "qcom,pcie-msm8996", .data = &ops_2_3_2 },
1431        { .compatible = "qcom,pcie-ipq8074", .data = &ops_2_3_3 },
1432        { .compatible = "qcom,pcie-ipq4019", .data = &ops_2_4_0 },
1433        { .compatible = "qcom,pcie-qcs404", .data = &ops_2_4_0 },
1434        { .compatible = "qcom,pcie-sdm845", .data = &ops_2_7_0 },
1435        { }
1436};
1437
1438static void qcom_fixup_class(struct pci_dev *dev)
1439{
1440        dev->class = PCI_CLASS_BRIDGE_PCI << 8;
1441}
1442DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0101, qcom_fixup_class);
1443DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0104, qcom_fixup_class);
1444DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0106, qcom_fixup_class);
1445DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0107, qcom_fixup_class);
1446DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class);
1447DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class);
1448DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class);
1449
1450static struct platform_driver qcom_pcie_driver = {
1451        .probe = qcom_pcie_probe,
1452        .driver = {
1453                .name = "qcom-pcie",
1454                .suppress_bind_attrs = true,
1455                .of_match_table = qcom_pcie_match,
1456        },
1457};
1458builtin_platform_driver(qcom_pcie_driver);
1459