linux/drivers/pci/controller/dwc/pcie-qcom.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Qualcomm PCIe root complex driver
   4 *
   5 * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
   6 * Copyright 2015 Linaro Limited.
   7 *
   8 * Author: Stanimir Varbanov <svarbanov@mm-sol.com>
   9 */
  10
  11#include <linux/clk.h>
  12#include <linux/crc8.h>
  13#include <linux/delay.h>
  14#include <linux/gpio/consumer.h>
  15#include <linux/interrupt.h>
  16#include <linux/io.h>
  17#include <linux/iopoll.h>
  18#include <linux/kernel.h>
  19#include <linux/init.h>
  20#include <linux/of_device.h>
  21#include <linux/of_gpio.h>
  22#include <linux/pci.h>
  23#include <linux/pm_runtime.h>
  24#include <linux/platform_device.h>
  25#include <linux/phy/phy.h>
  26#include <linux/regulator/consumer.h>
  27#include <linux/reset.h>
  28#include <linux/slab.h>
  29#include <linux/types.h>
  30
  31#include "../../pci.h"
  32#include "pcie-designware.h"
  33
  34#define PCIE20_PARF_SYS_CTRL                    0x00
  35#define MST_WAKEUP_EN                           BIT(13)
  36#define SLV_WAKEUP_EN                           BIT(12)
  37#define MSTR_ACLK_CGC_DIS                       BIT(10)
  38#define SLV_ACLK_CGC_DIS                        BIT(9)
  39#define CORE_CLK_CGC_DIS                        BIT(6)
  40#define AUX_PWR_DET                             BIT(4)
  41#define L23_CLK_RMV_DIS                         BIT(2)
  42#define L1_CLK_RMV_DIS                          BIT(1)
  43
  44#define PCIE20_PARF_PHY_CTRL                    0x40
  45#define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK       GENMASK(20, 16)
  46#define PHY_CTRL_PHY_TX0_TERM_OFFSET(x)         ((x) << 16)
  47
  48#define PCIE20_PARF_PHY_REFCLK                  0x4C
  49#define PHY_REFCLK_SSP_EN                       BIT(16)
  50#define PHY_REFCLK_USE_PAD                      BIT(12)
  51
  52#define PCIE20_PARF_DBI_BASE_ADDR               0x168
  53#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE         0x16C
  54#define PCIE20_PARF_MHI_CLOCK_RESET_CTRL        0x174
  55#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT       0x178
  56#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2    0x1A8
  57#define PCIE20_PARF_LTSSM                       0x1B0
  58#define PCIE20_PARF_SID_OFFSET                  0x234
  59#define PCIE20_PARF_BDF_TRANSLATE_CFG           0x24C
  60#define PCIE20_PARF_DEVICE_TYPE                 0x1000
  61#define PCIE20_PARF_BDF_TO_SID_TABLE_N          0x2000
  62
  63#define PCIE20_ELBI_SYS_CTRL                    0x04
  64#define PCIE20_ELBI_SYS_CTRL_LT_ENABLE          BIT(0)
  65
  66#define PCIE20_AXI_MSTR_RESP_COMP_CTRL0         0x818
  67#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K        0x4
  68#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K        0x5
  69#define PCIE20_AXI_MSTR_RESP_COMP_CTRL1         0x81c
  70#define CFG_BRIDGE_SB_INIT                      BIT(0)
  71
  72#define PCIE_CAP_LINK1_VAL                      0x2FD7F
  73
  74#define PCIE20_PARF_Q2A_FLUSH                   0x1AC
  75
  76#define PCIE20_MISC_CONTROL_1_REG               0x8BC
  77#define DBI_RO_WR_EN                            1
  78
  79#define PERST_DELAY_US                          1000
  80/* PARF registers */
  81#define PCIE20_PARF_PCS_DEEMPH                  0x34
  82#define PCS_DEEMPH_TX_DEEMPH_GEN1(x)            ((x) << 16)
  83#define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x)      ((x) << 8)
  84#define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x)        ((x) << 0)
  85
  86#define PCIE20_PARF_PCS_SWING                   0x38
  87#define PCS_SWING_TX_SWING_FULL(x)              ((x) << 8)
  88#define PCS_SWING_TX_SWING_LOW(x)               ((x) << 0)
  89
  90#define PCIE20_PARF_CONFIG_BITS         0x50
  91#define PHY_RX0_EQ(x)                           ((x) << 24)
  92
  93#define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE      0x358
  94#define SLV_ADDR_SPACE_SZ                       0x10000000
  95
  96#define PCIE20_LNK_CONTROL2_LINK_STATUS2        0xa0
  97
  98#define DEVICE_TYPE_RC                          0x4
  99
 100#define QCOM_PCIE_2_1_0_MAX_SUPPLY      3
 101#define QCOM_PCIE_2_1_0_MAX_CLOCKS      5
 102
 103#define QCOM_PCIE_CRC8_POLYNOMIAL (BIT(2) | BIT(1) | BIT(0))
 104
 105struct qcom_pcie_resources_2_1_0 {
 106        struct clk_bulk_data clks[QCOM_PCIE_2_1_0_MAX_CLOCKS];
 107        struct reset_control *pci_reset;
 108        struct reset_control *axi_reset;
 109        struct reset_control *ahb_reset;
 110        struct reset_control *por_reset;
 111        struct reset_control *phy_reset;
 112        struct reset_control *ext_reset;
 113        struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY];
 114};
 115
 116struct qcom_pcie_resources_1_0_0 {
 117        struct clk *iface;
 118        struct clk *aux;
 119        struct clk *master_bus;
 120        struct clk *slave_bus;
 121        struct reset_control *core;
 122        struct regulator *vdda;
 123};
 124
 125#define QCOM_PCIE_2_3_2_MAX_SUPPLY      2
 126struct qcom_pcie_resources_2_3_2 {
 127        struct clk *aux_clk;
 128        struct clk *master_clk;
 129        struct clk *slave_clk;
 130        struct clk *cfg_clk;
 131        struct clk *pipe_clk;
 132        struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY];
 133};
 134
 135#define QCOM_PCIE_2_4_0_MAX_CLOCKS      4
 136struct qcom_pcie_resources_2_4_0 {
 137        struct clk_bulk_data clks[QCOM_PCIE_2_4_0_MAX_CLOCKS];
 138        int num_clks;
 139        struct reset_control *axi_m_reset;
 140        struct reset_control *axi_s_reset;
 141        struct reset_control *pipe_reset;
 142        struct reset_control *axi_m_vmid_reset;
 143        struct reset_control *axi_s_xpu_reset;
 144        struct reset_control *parf_reset;
 145        struct reset_control *phy_reset;
 146        struct reset_control *axi_m_sticky_reset;
 147        struct reset_control *pipe_sticky_reset;
 148        struct reset_control *pwr_reset;
 149        struct reset_control *ahb_reset;
 150        struct reset_control *phy_ahb_reset;
 151};
 152
 153struct qcom_pcie_resources_2_3_3 {
 154        struct clk *iface;
 155        struct clk *axi_m_clk;
 156        struct clk *axi_s_clk;
 157        struct clk *ahb_clk;
 158        struct clk *aux_clk;
 159        struct reset_control *rst[7];
 160};
 161
 162/* 6 clocks typically, 7 for sm8250 */
 163struct qcom_pcie_resources_2_7_0 {
 164        struct clk_bulk_data clks[7];
 165        int num_clks;
 166        struct regulator_bulk_data supplies[2];
 167        struct reset_control *pci_reset;
 168        struct clk *pipe_clk;
 169};
 170
 171union qcom_pcie_resources {
 172        struct qcom_pcie_resources_1_0_0 v1_0_0;
 173        struct qcom_pcie_resources_2_1_0 v2_1_0;
 174        struct qcom_pcie_resources_2_3_2 v2_3_2;
 175        struct qcom_pcie_resources_2_3_3 v2_3_3;
 176        struct qcom_pcie_resources_2_4_0 v2_4_0;
 177        struct qcom_pcie_resources_2_7_0 v2_7_0;
 178};
 179
 180struct qcom_pcie;
 181
 182struct qcom_pcie_ops {
 183        int (*get_resources)(struct qcom_pcie *pcie);
 184        int (*init)(struct qcom_pcie *pcie);
 185        int (*post_init)(struct qcom_pcie *pcie);
 186        void (*deinit)(struct qcom_pcie *pcie);
 187        void (*post_deinit)(struct qcom_pcie *pcie);
 188        void (*ltssm_enable)(struct qcom_pcie *pcie);
 189        int (*config_sid)(struct qcom_pcie *pcie);
 190};
 191
 192struct qcom_pcie {
 193        struct dw_pcie *pci;
 194        void __iomem *parf;                     /* DT parf */
 195        void __iomem *elbi;                     /* DT elbi */
 196        union qcom_pcie_resources res;
 197        struct phy *phy;
 198        struct gpio_desc *reset;
 199        const struct qcom_pcie_ops *ops;
 200};
 201
 202#define to_qcom_pcie(x)         dev_get_drvdata((x)->dev)
 203
 204static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
 205{
 206        gpiod_set_value_cansleep(pcie->reset, 1);
 207        usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
 208}
 209
 210static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
 211{
 212        /* Ensure that PERST has been asserted for at least 100 ms */
 213        msleep(100);
 214        gpiod_set_value_cansleep(pcie->reset, 0);
 215        usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
 216}
 217
 218static int qcom_pcie_start_link(struct dw_pcie *pci)
 219{
 220        struct qcom_pcie *pcie = to_qcom_pcie(pci);
 221
 222        /* Enable Link Training state machine */
 223        if (pcie->ops->ltssm_enable)
 224                pcie->ops->ltssm_enable(pcie);
 225
 226        return 0;
 227}
 228
 229static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie)
 230{
 231        u32 val;
 232
 233        /* enable link training */
 234        val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL);
 235        val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE;
 236        writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL);
 237}
 238
 239static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
 240{
 241        struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
 242        struct dw_pcie *pci = pcie->pci;
 243        struct device *dev = pci->dev;
 244        int ret;
 245
 246        res->supplies[0].supply = "vdda";
 247        res->supplies[1].supply = "vdda_phy";
 248        res->supplies[2].supply = "vdda_refclk";
 249        ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
 250                                      res->supplies);
 251        if (ret)
 252                return ret;
 253
 254        res->clks[0].id = "iface";
 255        res->clks[1].id = "core";
 256        res->clks[2].id = "phy";
 257        res->clks[3].id = "aux";
 258        res->clks[4].id = "ref";
 259
 260        /* iface, core, phy are required */
 261        ret = devm_clk_bulk_get(dev, 3, res->clks);
 262        if (ret < 0)
 263                return ret;
 264
 265        /* aux, ref are optional */
 266        ret = devm_clk_bulk_get_optional(dev, 2, res->clks + 3);
 267        if (ret < 0)
 268                return ret;
 269
 270        res->pci_reset = devm_reset_control_get_exclusive(dev, "pci");
 271        if (IS_ERR(res->pci_reset))
 272                return PTR_ERR(res->pci_reset);
 273
 274        res->axi_reset = devm_reset_control_get_exclusive(dev, "axi");
 275        if (IS_ERR(res->axi_reset))
 276                return PTR_ERR(res->axi_reset);
 277
 278        res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb");
 279        if (IS_ERR(res->ahb_reset))
 280                return PTR_ERR(res->ahb_reset);
 281
 282        res->por_reset = devm_reset_control_get_exclusive(dev, "por");
 283        if (IS_ERR(res->por_reset))
 284                return PTR_ERR(res->por_reset);
 285
 286        res->ext_reset = devm_reset_control_get_optional_exclusive(dev, "ext");
 287        if (IS_ERR(res->ext_reset))
 288                return PTR_ERR(res->ext_reset);
 289
 290        res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
 291        return PTR_ERR_OR_ZERO(res->phy_reset);
 292}
 293
 294static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
 295{
 296        struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
 297
 298        clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
 299        reset_control_assert(res->pci_reset);
 300        reset_control_assert(res->axi_reset);
 301        reset_control_assert(res->ahb_reset);
 302        reset_control_assert(res->por_reset);
 303        reset_control_assert(res->ext_reset);
 304        reset_control_assert(res->phy_reset);
 305
 306        writel(1, pcie->parf + PCIE20_PARF_PHY_CTRL);
 307
 308        regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
 309}
 310
 311static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
 312{
 313        struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
 314        struct dw_pcie *pci = pcie->pci;
 315        struct device *dev = pci->dev;
 316        struct device_node *node = dev->of_node;
 317        u32 val;
 318        int ret;
 319
 320        /* reset the PCIe interface as uboot can leave it undefined state */
 321        reset_control_assert(res->pci_reset);
 322        reset_control_assert(res->axi_reset);
 323        reset_control_assert(res->ahb_reset);
 324        reset_control_assert(res->por_reset);
 325        reset_control_assert(res->ext_reset);
 326        reset_control_assert(res->phy_reset);
 327
 328        writel(1, pcie->parf + PCIE20_PARF_PHY_CTRL);
 329
 330        ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
 331        if (ret < 0) {
 332                dev_err(dev, "cannot enable regulators\n");
 333                return ret;
 334        }
 335
 336        ret = reset_control_deassert(res->ahb_reset);
 337        if (ret) {
 338                dev_err(dev, "cannot deassert ahb reset\n");
 339                goto err_deassert_ahb;
 340        }
 341
 342        ret = reset_control_deassert(res->ext_reset);
 343        if (ret) {
 344                dev_err(dev, "cannot deassert ext reset\n");
 345                goto err_deassert_ext;
 346        }
 347
 348        ret = reset_control_deassert(res->phy_reset);
 349        if (ret) {
 350                dev_err(dev, "cannot deassert phy reset\n");
 351                goto err_deassert_phy;
 352        }
 353
 354        ret = reset_control_deassert(res->pci_reset);
 355        if (ret) {
 356                dev_err(dev, "cannot deassert pci reset\n");
 357                goto err_deassert_pci;
 358        }
 359
 360        ret = reset_control_deassert(res->por_reset);
 361        if (ret) {
 362                dev_err(dev, "cannot deassert por reset\n");
 363                goto err_deassert_por;
 364        }
 365
 366        ret = reset_control_deassert(res->axi_reset);
 367        if (ret) {
 368                dev_err(dev, "cannot deassert axi reset\n");
 369                goto err_deassert_axi;
 370        }
 371
 372        ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
 373        if (ret)
 374                goto err_clks;
 375
 376        /* enable PCIe clocks and resets */
 377        val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
 378        val &= ~BIT(0);
 379        writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
 380
 381        if (of_device_is_compatible(node, "qcom,pcie-ipq8064") ||
 382            of_device_is_compatible(node, "qcom,pcie-ipq8064-v2")) {
 383                writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) |
 384                               PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) |
 385                               PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34),
 386                       pcie->parf + PCIE20_PARF_PCS_DEEMPH);
 387                writel(PCS_SWING_TX_SWING_FULL(120) |
 388                               PCS_SWING_TX_SWING_LOW(120),
 389                       pcie->parf + PCIE20_PARF_PCS_SWING);
 390                writel(PHY_RX0_EQ(4), pcie->parf + PCIE20_PARF_CONFIG_BITS);
 391        }
 392
 393        if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) {
 394                /* set TX termination offset */
 395                val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
 396                val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK;
 397                val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(7);
 398                writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
 399        }
 400
 401        /* enable external reference clock */
 402        val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK);
 403        /* USE_PAD is required only for ipq806x */
 404        if (!of_device_is_compatible(node, "qcom,pcie-apq8064"))
 405                val &= ~PHY_REFCLK_USE_PAD;
 406        val |= PHY_REFCLK_SSP_EN;
 407        writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK);
 408
 409        /* wait for clock acquisition */
 410        usleep_range(1000, 1500);
 411
 412        /* Set the Max TLP size to 2K, instead of using default of 4K */
 413        writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K,
 414               pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0);
 415        writel(CFG_BRIDGE_SB_INIT,
 416               pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1);
 417
 418        return 0;
 419
 420err_clks:
 421        reset_control_assert(res->axi_reset);
 422err_deassert_axi:
 423        reset_control_assert(res->por_reset);
 424err_deassert_por:
 425        reset_control_assert(res->pci_reset);
 426err_deassert_pci:
 427        reset_control_assert(res->phy_reset);
 428err_deassert_phy:
 429        reset_control_assert(res->ext_reset);
 430err_deassert_ext:
 431        reset_control_assert(res->ahb_reset);
 432err_deassert_ahb:
 433        regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
 434
 435        return ret;
 436}
 437
 438static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie)
 439{
 440        struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
 441        struct dw_pcie *pci = pcie->pci;
 442        struct device *dev = pci->dev;
 443
 444        res->vdda = devm_regulator_get(dev, "vdda");
 445        if (IS_ERR(res->vdda))
 446                return PTR_ERR(res->vdda);
 447
 448        res->iface = devm_clk_get(dev, "iface");
 449        if (IS_ERR(res->iface))
 450                return PTR_ERR(res->iface);
 451
 452        res->aux = devm_clk_get(dev, "aux");
 453        if (IS_ERR(res->aux))
 454                return PTR_ERR(res->aux);
 455
 456        res->master_bus = devm_clk_get(dev, "master_bus");
 457        if (IS_ERR(res->master_bus))
 458                return PTR_ERR(res->master_bus);
 459
 460        res->slave_bus = devm_clk_get(dev, "slave_bus");
 461        if (IS_ERR(res->slave_bus))
 462                return PTR_ERR(res->slave_bus);
 463
 464        res->core = devm_reset_control_get_exclusive(dev, "core");
 465        return PTR_ERR_OR_ZERO(res->core);
 466}
 467
 468static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie)
 469{
 470        struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
 471
 472        reset_control_assert(res->core);
 473        clk_disable_unprepare(res->slave_bus);
 474        clk_disable_unprepare(res->master_bus);
 475        clk_disable_unprepare(res->iface);
 476        clk_disable_unprepare(res->aux);
 477        regulator_disable(res->vdda);
 478}
 479
 480static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie)
 481{
 482        struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
 483        struct dw_pcie *pci = pcie->pci;
 484        struct device *dev = pci->dev;
 485        int ret;
 486
 487        ret = reset_control_deassert(res->core);
 488        if (ret) {
 489                dev_err(dev, "cannot deassert core reset\n");
 490                return ret;
 491        }
 492
 493        ret = clk_prepare_enable(res->aux);
 494        if (ret) {
 495                dev_err(dev, "cannot prepare/enable aux clock\n");
 496                goto err_res;
 497        }
 498
 499        ret = clk_prepare_enable(res->iface);
 500        if (ret) {
 501                dev_err(dev, "cannot prepare/enable iface clock\n");
 502                goto err_aux;
 503        }
 504
 505        ret = clk_prepare_enable(res->master_bus);
 506        if (ret) {
 507                dev_err(dev, "cannot prepare/enable master_bus clock\n");
 508                goto err_iface;
 509        }
 510
 511        ret = clk_prepare_enable(res->slave_bus);
 512        if (ret) {
 513                dev_err(dev, "cannot prepare/enable slave_bus clock\n");
 514                goto err_master;
 515        }
 516
 517        ret = regulator_enable(res->vdda);
 518        if (ret) {
 519                dev_err(dev, "cannot enable vdda regulator\n");
 520                goto err_slave;
 521        }
 522
 523        /* change DBI base address */
 524        writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
 525
 526        if (IS_ENABLED(CONFIG_PCI_MSI)) {
 527                u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
 528
 529                val |= BIT(31);
 530                writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
 531        }
 532
 533        return 0;
 534err_slave:
 535        clk_disable_unprepare(res->slave_bus);
 536err_master:
 537        clk_disable_unprepare(res->master_bus);
 538err_iface:
 539        clk_disable_unprepare(res->iface);
 540err_aux:
 541        clk_disable_unprepare(res->aux);
 542err_res:
 543        reset_control_assert(res->core);
 544
 545        return ret;
 546}
 547
 548static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie)
 549{
 550        u32 val;
 551
 552        /* enable link training */
 553        val = readl(pcie->parf + PCIE20_PARF_LTSSM);
 554        val |= BIT(8);
 555        writel(val, pcie->parf + PCIE20_PARF_LTSSM);
 556}
 557
 558static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie)
 559{
 560        struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
 561        struct dw_pcie *pci = pcie->pci;
 562        struct device *dev = pci->dev;
 563        int ret;
 564
 565        res->supplies[0].supply = "vdda";
 566        res->supplies[1].supply = "vddpe-3v3";
 567        ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
 568                                      res->supplies);
 569        if (ret)
 570                return ret;
 571
 572        res->aux_clk = devm_clk_get(dev, "aux");
 573        if (IS_ERR(res->aux_clk))
 574                return PTR_ERR(res->aux_clk);
 575
 576        res->cfg_clk = devm_clk_get(dev, "cfg");
 577        if (IS_ERR(res->cfg_clk))
 578                return PTR_ERR(res->cfg_clk);
 579
 580        res->master_clk = devm_clk_get(dev, "bus_master");
 581        if (IS_ERR(res->master_clk))
 582                return PTR_ERR(res->master_clk);
 583
 584        res->slave_clk = devm_clk_get(dev, "bus_slave");
 585        if (IS_ERR(res->slave_clk))
 586                return PTR_ERR(res->slave_clk);
 587
 588        res->pipe_clk = devm_clk_get(dev, "pipe");
 589        return PTR_ERR_OR_ZERO(res->pipe_clk);
 590}
 591
 592static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie)
 593{
 594        struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
 595
 596        clk_disable_unprepare(res->slave_clk);
 597        clk_disable_unprepare(res->master_clk);
 598        clk_disable_unprepare(res->cfg_clk);
 599        clk_disable_unprepare(res->aux_clk);
 600
 601        regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
 602}
 603
 604static void qcom_pcie_post_deinit_2_3_2(struct qcom_pcie *pcie)
 605{
 606        struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
 607
 608        clk_disable_unprepare(res->pipe_clk);
 609}
 610
 611static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)
 612{
 613        struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
 614        struct dw_pcie *pci = pcie->pci;
 615        struct device *dev = pci->dev;
 616        u32 val;
 617        int ret;
 618
 619        ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
 620        if (ret < 0) {
 621                dev_err(dev, "cannot enable regulators\n");
 622                return ret;
 623        }
 624
 625        ret = clk_prepare_enable(res->aux_clk);
 626        if (ret) {
 627                dev_err(dev, "cannot prepare/enable aux clock\n");
 628                goto err_aux_clk;
 629        }
 630
 631        ret = clk_prepare_enable(res->cfg_clk);
 632        if (ret) {
 633                dev_err(dev, "cannot prepare/enable cfg clock\n");
 634                goto err_cfg_clk;
 635        }
 636
 637        ret = clk_prepare_enable(res->master_clk);
 638        if (ret) {
 639                dev_err(dev, "cannot prepare/enable master clock\n");
 640                goto err_master_clk;
 641        }
 642
 643        ret = clk_prepare_enable(res->slave_clk);
 644        if (ret) {
 645                dev_err(dev, "cannot prepare/enable slave clock\n");
 646                goto err_slave_clk;
 647        }
 648
 649        /* enable PCIe clocks and resets */
 650        val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
 651        val &= ~BIT(0);
 652        writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
 653
 654        /* change DBI base address */
 655        writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
 656
 657        /* MAC PHY_POWERDOWN MUX DISABLE  */
 658        val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
 659        val &= ~BIT(29);
 660        writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
 661
 662        val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
 663        val |= BIT(4);
 664        writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
 665
 666        val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
 667        val |= BIT(31);
 668        writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
 669
 670        return 0;
 671
 672err_slave_clk:
 673        clk_disable_unprepare(res->master_clk);
 674err_master_clk:
 675        clk_disable_unprepare(res->cfg_clk);
 676err_cfg_clk:
 677        clk_disable_unprepare(res->aux_clk);
 678
 679err_aux_clk:
 680        regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
 681
 682        return ret;
 683}
 684
 685static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie)
 686{
 687        struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
 688        struct dw_pcie *pci = pcie->pci;
 689        struct device *dev = pci->dev;
 690        int ret;
 691
 692        ret = clk_prepare_enable(res->pipe_clk);
 693        if (ret) {
 694                dev_err(dev, "cannot prepare/enable pipe clock\n");
 695                return ret;
 696        }
 697
 698        return 0;
 699}
 700
 701static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie)
 702{
 703        struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
 704        struct dw_pcie *pci = pcie->pci;
 705        struct device *dev = pci->dev;
 706        bool is_ipq = of_device_is_compatible(dev->of_node, "qcom,pcie-ipq4019");
 707        int ret;
 708
 709        res->clks[0].id = "aux";
 710        res->clks[1].id = "master_bus";
 711        res->clks[2].id = "slave_bus";
 712        res->clks[3].id = "iface";
 713
 714        /* qcom,pcie-ipq4019 is defined without "iface" */
 715        res->num_clks = is_ipq ? 3 : 4;
 716
 717        ret = devm_clk_bulk_get(dev, res->num_clks, res->clks);
 718        if (ret < 0)
 719                return ret;
 720
 721        res->axi_m_reset = devm_reset_control_get_exclusive(dev, "axi_m");
 722        if (IS_ERR(res->axi_m_reset))
 723                return PTR_ERR(res->axi_m_reset);
 724
 725        res->axi_s_reset = devm_reset_control_get_exclusive(dev, "axi_s");
 726        if (IS_ERR(res->axi_s_reset))
 727                return PTR_ERR(res->axi_s_reset);
 728
 729        if (is_ipq) {
 730                /*
 731                 * These resources relates to the PHY or are secure clocks, but
 732                 * are controlled here for IPQ4019
 733                 */
 734                res->pipe_reset = devm_reset_control_get_exclusive(dev, "pipe");
 735                if (IS_ERR(res->pipe_reset))
 736                        return PTR_ERR(res->pipe_reset);
 737
 738                res->axi_m_vmid_reset = devm_reset_control_get_exclusive(dev,
 739                                                                         "axi_m_vmid");
 740                if (IS_ERR(res->axi_m_vmid_reset))
 741                        return PTR_ERR(res->axi_m_vmid_reset);
 742
 743                res->axi_s_xpu_reset = devm_reset_control_get_exclusive(dev,
 744                                                                        "axi_s_xpu");
 745                if (IS_ERR(res->axi_s_xpu_reset))
 746                        return PTR_ERR(res->axi_s_xpu_reset);
 747
 748                res->parf_reset = devm_reset_control_get_exclusive(dev, "parf");
 749                if (IS_ERR(res->parf_reset))
 750                        return PTR_ERR(res->parf_reset);
 751
 752                res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
 753                if (IS_ERR(res->phy_reset))
 754                        return PTR_ERR(res->phy_reset);
 755        }
 756
 757        res->axi_m_sticky_reset = devm_reset_control_get_exclusive(dev,
 758                                                                   "axi_m_sticky");
 759        if (IS_ERR(res->axi_m_sticky_reset))
 760                return PTR_ERR(res->axi_m_sticky_reset);
 761
 762        res->pipe_sticky_reset = devm_reset_control_get_exclusive(dev,
 763                                                                  "pipe_sticky");
 764        if (IS_ERR(res->pipe_sticky_reset))
 765                return PTR_ERR(res->pipe_sticky_reset);
 766
 767        res->pwr_reset = devm_reset_control_get_exclusive(dev, "pwr");
 768        if (IS_ERR(res->pwr_reset))
 769                return PTR_ERR(res->pwr_reset);
 770
 771        res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb");
 772        if (IS_ERR(res->ahb_reset))
 773                return PTR_ERR(res->ahb_reset);
 774
 775        if (is_ipq) {
 776                res->phy_ahb_reset = devm_reset_control_get_exclusive(dev, "phy_ahb");
 777                if (IS_ERR(res->phy_ahb_reset))
 778                        return PTR_ERR(res->phy_ahb_reset);
 779        }
 780
 781        return 0;
 782}
 783
 784static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie)
 785{
 786        struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
 787
 788        reset_control_assert(res->axi_m_reset);
 789        reset_control_assert(res->axi_s_reset);
 790        reset_control_assert(res->pipe_reset);
 791        reset_control_assert(res->pipe_sticky_reset);
 792        reset_control_assert(res->phy_reset);
 793        reset_control_assert(res->phy_ahb_reset);
 794        reset_control_assert(res->axi_m_sticky_reset);
 795        reset_control_assert(res->pwr_reset);
 796        reset_control_assert(res->ahb_reset);
 797        clk_bulk_disable_unprepare(res->num_clks, res->clks);
 798}
 799
 800static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)
 801{
 802        struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
 803        struct dw_pcie *pci = pcie->pci;
 804        struct device *dev = pci->dev;
 805        u32 val;
 806        int ret;
 807
 808        ret = reset_control_assert(res->axi_m_reset);
 809        if (ret) {
 810                dev_err(dev, "cannot assert axi master reset\n");
 811                return ret;
 812        }
 813
 814        ret = reset_control_assert(res->axi_s_reset);
 815        if (ret) {
 816                dev_err(dev, "cannot assert axi slave reset\n");
 817                return ret;
 818        }
 819
 820        usleep_range(10000, 12000);
 821
 822        ret = reset_control_assert(res->pipe_reset);
 823        if (ret) {
 824                dev_err(dev, "cannot assert pipe reset\n");
 825                return ret;
 826        }
 827
 828        ret = reset_control_assert(res->pipe_sticky_reset);
 829        if (ret) {
 830                dev_err(dev, "cannot assert pipe sticky reset\n");
 831                return ret;
 832        }
 833
 834        ret = reset_control_assert(res->phy_reset);
 835        if (ret) {
 836                dev_err(dev, "cannot assert phy reset\n");
 837                return ret;
 838        }
 839
 840        ret = reset_control_assert(res->phy_ahb_reset);
 841        if (ret) {
 842                dev_err(dev, "cannot assert phy ahb reset\n");
 843                return ret;
 844        }
 845
 846        usleep_range(10000, 12000);
 847
 848        ret = reset_control_assert(res->axi_m_sticky_reset);
 849        if (ret) {
 850                dev_err(dev, "cannot assert axi master sticky reset\n");
 851                return ret;
 852        }
 853
 854        ret = reset_control_assert(res->pwr_reset);
 855        if (ret) {
 856                dev_err(dev, "cannot assert power reset\n");
 857                return ret;
 858        }
 859
 860        ret = reset_control_assert(res->ahb_reset);
 861        if (ret) {
 862                dev_err(dev, "cannot assert ahb reset\n");
 863                return ret;
 864        }
 865
 866        usleep_range(10000, 12000);
 867
 868        ret = reset_control_deassert(res->phy_ahb_reset);
 869        if (ret) {
 870                dev_err(dev, "cannot deassert phy ahb reset\n");
 871                return ret;
 872        }
 873
 874        ret = reset_control_deassert(res->phy_reset);
 875        if (ret) {
 876                dev_err(dev, "cannot deassert phy reset\n");
 877                goto err_rst_phy;
 878        }
 879
 880        ret = reset_control_deassert(res->pipe_reset);
 881        if (ret) {
 882                dev_err(dev, "cannot deassert pipe reset\n");
 883                goto err_rst_pipe;
 884        }
 885
 886        ret = reset_control_deassert(res->pipe_sticky_reset);
 887        if (ret) {
 888                dev_err(dev, "cannot deassert pipe sticky reset\n");
 889                goto err_rst_pipe_sticky;
 890        }
 891
 892        usleep_range(10000, 12000);
 893
 894        ret = reset_control_deassert(res->axi_m_reset);
 895        if (ret) {
 896                dev_err(dev, "cannot deassert axi master reset\n");
 897                goto err_rst_axi_m;
 898        }
 899
 900        ret = reset_control_deassert(res->axi_m_sticky_reset);
 901        if (ret) {
 902                dev_err(dev, "cannot deassert axi master sticky reset\n");
 903                goto err_rst_axi_m_sticky;
 904        }
 905
 906        ret = reset_control_deassert(res->axi_s_reset);
 907        if (ret) {
 908                dev_err(dev, "cannot deassert axi slave reset\n");
 909                goto err_rst_axi_s;
 910        }
 911
 912        ret = reset_control_deassert(res->pwr_reset);
 913        if (ret) {
 914                dev_err(dev, "cannot deassert power reset\n");
 915                goto err_rst_pwr;
 916        }
 917
 918        ret = reset_control_deassert(res->ahb_reset);
 919        if (ret) {
 920                dev_err(dev, "cannot deassert ahb reset\n");
 921                goto err_rst_ahb;
 922        }
 923
 924        usleep_range(10000, 12000);
 925
 926        ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
 927        if (ret)
 928                goto err_clks;
 929
 930        /* enable PCIe clocks and resets */
 931        val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
 932        val &= ~BIT(0);
 933        writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
 934
 935        /* change DBI base address */
 936        writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
 937
 938        /* MAC PHY_POWERDOWN MUX DISABLE  */
 939        val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
 940        val &= ~BIT(29);
 941        writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
 942
 943        val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
 944        val |= BIT(4);
 945        writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
 946
 947        val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
 948        val |= BIT(31);
 949        writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
 950
 951        return 0;
 952
 953err_clks:
 954        reset_control_assert(res->ahb_reset);
 955err_rst_ahb:
 956        reset_control_assert(res->pwr_reset);
 957err_rst_pwr:
 958        reset_control_assert(res->axi_s_reset);
 959err_rst_axi_s:
 960        reset_control_assert(res->axi_m_sticky_reset);
 961err_rst_axi_m_sticky:
 962        reset_control_assert(res->axi_m_reset);
 963err_rst_axi_m:
 964        reset_control_assert(res->pipe_sticky_reset);
 965err_rst_pipe_sticky:
 966        reset_control_assert(res->pipe_reset);
 967err_rst_pipe:
 968        reset_control_assert(res->phy_reset);
 969err_rst_phy:
 970        reset_control_assert(res->phy_ahb_reset);
 971        return ret;
 972}
 973
 974static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie)
 975{
 976        struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
 977        struct dw_pcie *pci = pcie->pci;
 978        struct device *dev = pci->dev;
 979        int i;
 980        const char *rst_names[] = { "axi_m", "axi_s", "pipe",
 981                                    "axi_m_sticky", "sticky",
 982                                    "ahb", "sleep", };
 983
 984        res->iface = devm_clk_get(dev, "iface");
 985        if (IS_ERR(res->iface))
 986                return PTR_ERR(res->iface);
 987
 988        res->axi_m_clk = devm_clk_get(dev, "axi_m");
 989        if (IS_ERR(res->axi_m_clk))
 990                return PTR_ERR(res->axi_m_clk);
 991
 992        res->axi_s_clk = devm_clk_get(dev, "axi_s");
 993        if (IS_ERR(res->axi_s_clk))
 994                return PTR_ERR(res->axi_s_clk);
 995
 996        res->ahb_clk = devm_clk_get(dev, "ahb");
 997        if (IS_ERR(res->ahb_clk))
 998                return PTR_ERR(res->ahb_clk);
 999
1000        res->aux_clk = devm_clk_get(dev, "aux");
1001        if (IS_ERR(res->aux_clk))
1002                return PTR_ERR(res->aux_clk);
1003
1004        for (i = 0; i < ARRAY_SIZE(rst_names); i++) {
1005                res->rst[i] = devm_reset_control_get(dev, rst_names[i]);
1006                if (IS_ERR(res->rst[i]))
1007                        return PTR_ERR(res->rst[i]);
1008        }
1009
1010        return 0;
1011}
1012
1013static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie)
1014{
1015        struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
1016
1017        clk_disable_unprepare(res->iface);
1018        clk_disable_unprepare(res->axi_m_clk);
1019        clk_disable_unprepare(res->axi_s_clk);
1020        clk_disable_unprepare(res->ahb_clk);
1021        clk_disable_unprepare(res->aux_clk);
1022}
1023
1024static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
1025{
1026        struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
1027        struct dw_pcie *pci = pcie->pci;
1028        struct device *dev = pci->dev;
1029        u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
1030        int i, ret;
1031        u32 val;
1032
1033        for (i = 0; i < ARRAY_SIZE(res->rst); i++) {
1034                ret = reset_control_assert(res->rst[i]);
1035                if (ret) {
1036                        dev_err(dev, "reset #%d assert failed (%d)\n", i, ret);
1037                        return ret;
1038                }
1039        }
1040
1041        usleep_range(2000, 2500);
1042
1043        for (i = 0; i < ARRAY_SIZE(res->rst); i++) {
1044                ret = reset_control_deassert(res->rst[i]);
1045                if (ret) {
1046                        dev_err(dev, "reset #%d deassert failed (%d)\n", i,
1047                                ret);
1048                        return ret;
1049                }
1050        }
1051
1052        /*
1053         * Don't have a way to see if the reset has completed.
1054         * Wait for some time.
1055         */
1056        usleep_range(2000, 2500);
1057
1058        ret = clk_prepare_enable(res->iface);
1059        if (ret) {
1060                dev_err(dev, "cannot prepare/enable core clock\n");
1061                goto err_clk_iface;
1062        }
1063
1064        ret = clk_prepare_enable(res->axi_m_clk);
1065        if (ret) {
1066                dev_err(dev, "cannot prepare/enable core clock\n");
1067                goto err_clk_axi_m;
1068        }
1069
1070        ret = clk_prepare_enable(res->axi_s_clk);
1071        if (ret) {
1072                dev_err(dev, "cannot prepare/enable axi slave clock\n");
1073                goto err_clk_axi_s;
1074        }
1075
1076        ret = clk_prepare_enable(res->ahb_clk);
1077        if (ret) {
1078                dev_err(dev, "cannot prepare/enable ahb clock\n");
1079                goto err_clk_ahb;
1080        }
1081
1082        ret = clk_prepare_enable(res->aux_clk);
1083        if (ret) {
1084                dev_err(dev, "cannot prepare/enable aux clock\n");
1085                goto err_clk_aux;
1086        }
1087
1088        writel(SLV_ADDR_SPACE_SZ,
1089                pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE);
1090
1091        val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
1092        val &= ~BIT(0);
1093        writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
1094
1095        writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
1096
1097        writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS
1098                | SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
1099                AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS,
1100                pcie->parf + PCIE20_PARF_SYS_CTRL);
1101        writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH);
1102
1103        writel(PCI_COMMAND_MASTER, pci->dbi_base + PCI_COMMAND);
1104        writel(DBI_RO_WR_EN, pci->dbi_base + PCIE20_MISC_CONTROL_1_REG);
1105        writel(PCIE_CAP_LINK1_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP);
1106
1107        val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP);
1108        val &= ~PCI_EXP_LNKCAP_ASPMS;
1109        writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP);
1110
1111        writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset +
1112                PCI_EXP_DEVCTL2);
1113
1114        return 0;
1115
1116err_clk_aux:
1117        clk_disable_unprepare(res->ahb_clk);
1118err_clk_ahb:
1119        clk_disable_unprepare(res->axi_s_clk);
1120err_clk_axi_s:
1121        clk_disable_unprepare(res->axi_m_clk);
1122err_clk_axi_m:
1123        clk_disable_unprepare(res->iface);
1124err_clk_iface:
1125        /*
1126         * Not checking for failure, will anyway return
1127         * the original failure in 'ret'.
1128         */
1129        for (i = 0; i < ARRAY_SIZE(res->rst); i++)
1130                reset_control_assert(res->rst[i]);
1131
1132        return ret;
1133}
1134
1135static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
1136{
1137        struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
1138        struct dw_pcie *pci = pcie->pci;
1139        struct device *dev = pci->dev;
1140        int ret;
1141
1142        res->pci_reset = devm_reset_control_get_exclusive(dev, "pci");
1143        if (IS_ERR(res->pci_reset))
1144                return PTR_ERR(res->pci_reset);
1145
1146        res->supplies[0].supply = "vdda";
1147        res->supplies[1].supply = "vddpe-3v3";
1148        ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
1149                                      res->supplies);
1150        if (ret)
1151                return ret;
1152
1153        res->clks[0].id = "aux";
1154        res->clks[1].id = "cfg";
1155        res->clks[2].id = "bus_master";
1156        res->clks[3].id = "bus_slave";
1157        res->clks[4].id = "slave_q2a";
1158        res->clks[5].id = "tbu";
1159        if (of_device_is_compatible(dev->of_node, "qcom,pcie-sm8250")) {
1160                res->clks[6].id = "ddrss_sf_tbu";
1161                res->num_clks = 7;
1162        } else {
1163                res->num_clks = 6;
1164        }
1165
1166        ret = devm_clk_bulk_get(dev, res->num_clks, res->clks);
1167        if (ret < 0)
1168                return ret;
1169
1170        res->pipe_clk = devm_clk_get(dev, "pipe");
1171        return PTR_ERR_OR_ZERO(res->pipe_clk);
1172}
1173
1174static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
1175{
1176        struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
1177        struct dw_pcie *pci = pcie->pci;
1178        struct device *dev = pci->dev;
1179        u32 val;
1180        int ret;
1181
1182        ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
1183        if (ret < 0) {
1184                dev_err(dev, "cannot enable regulators\n");
1185                return ret;
1186        }
1187
1188        ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
1189        if (ret < 0)
1190                goto err_disable_regulators;
1191
1192        ret = reset_control_assert(res->pci_reset);
1193        if (ret < 0) {
1194                dev_err(dev, "cannot deassert pci reset\n");
1195                goto err_disable_clocks;
1196        }
1197
1198        usleep_range(1000, 1500);
1199
1200        ret = reset_control_deassert(res->pci_reset);
1201        if (ret < 0) {
1202                dev_err(dev, "cannot deassert pci reset\n");
1203                goto err_disable_clocks;
1204        }
1205
1206        ret = clk_prepare_enable(res->pipe_clk);
1207        if (ret) {
1208                dev_err(dev, "cannot prepare/enable pipe clock\n");
1209                goto err_disable_clocks;
1210        }
1211
1212        /* configure PCIe to RC mode */
1213        writel(DEVICE_TYPE_RC, pcie->parf + PCIE20_PARF_DEVICE_TYPE);
1214
1215        /* enable PCIe clocks and resets */
1216        val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
1217        val &= ~BIT(0);
1218        writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
1219
1220        /* change DBI base address */
1221        writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
1222
1223        /* MAC PHY_POWERDOWN MUX DISABLE  */
1224        val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
1225        val &= ~BIT(29);
1226        writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
1227
1228        val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
1229        val |= BIT(4);
1230        writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
1231
1232        if (IS_ENABLED(CONFIG_PCI_MSI)) {
1233                val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
1234                val |= BIT(31);
1235                writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
1236        }
1237
1238        return 0;
1239err_disable_clocks:
1240        clk_bulk_disable_unprepare(res->num_clks, res->clks);
1241err_disable_regulators:
1242        regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
1243
1244        return ret;
1245}
1246
1247static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie)
1248{
1249        struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
1250
1251        clk_bulk_disable_unprepare(res->num_clks, res->clks);
1252        regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
1253}
1254
1255static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie)
1256{
1257        struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
1258
1259        return clk_prepare_enable(res->pipe_clk);
1260}
1261
1262static void qcom_pcie_post_deinit_2_7_0(struct qcom_pcie *pcie)
1263{
1264        struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
1265
1266        clk_disable_unprepare(res->pipe_clk);
1267}
1268
1269static int qcom_pcie_link_up(struct dw_pcie *pci)
1270{
1271        u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
1272        u16 val = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA);
1273
1274        return !!(val & PCI_EXP_LNKSTA_DLLLA);
1275}
1276
1277static int qcom_pcie_config_sid_sm8250(struct qcom_pcie *pcie)
1278{
1279        /* iommu map structure */
1280        struct {
1281                u32 bdf;
1282                u32 phandle;
1283                u32 smmu_sid;
1284                u32 smmu_sid_len;
1285        } *map;
1286        void __iomem *bdf_to_sid_base = pcie->parf + PCIE20_PARF_BDF_TO_SID_TABLE_N;
1287        struct device *dev = pcie->pci->dev;
1288        u8 qcom_pcie_crc8_table[CRC8_TABLE_SIZE];
1289        int i, nr_map, size = 0;
1290        u32 smmu_sid_base;
1291
1292        of_get_property(dev->of_node, "iommu-map", &size);
1293        if (!size)
1294                return 0;
1295
1296        map = kzalloc(size, GFP_KERNEL);
1297        if (!map)
1298                return -ENOMEM;
1299
1300        of_property_read_u32_array(dev->of_node,
1301                "iommu-map", (u32 *)map, size / sizeof(u32));
1302
1303        nr_map = size / (sizeof(*map));
1304
1305        crc8_populate_msb(qcom_pcie_crc8_table, QCOM_PCIE_CRC8_POLYNOMIAL);
1306
1307        /* Registers need to be zero out first */
1308        memset_io(bdf_to_sid_base, 0, CRC8_TABLE_SIZE * sizeof(u32));
1309
1310        /* Extract the SMMU SID base from the first entry of iommu-map */
1311        smmu_sid_base = map[0].smmu_sid;
1312
1313        /* Look for an available entry to hold the mapping */
1314        for (i = 0; i < nr_map; i++) {
1315                u16 bdf_be = cpu_to_be16(map[i].bdf);
1316                u32 val;
1317                u8 hash;
1318
1319                hash = crc8(qcom_pcie_crc8_table, (u8 *)&bdf_be, sizeof(bdf_be),
1320                        0);
1321
1322                val = readl(bdf_to_sid_base + hash * sizeof(u32));
1323
1324                /* If the register is already populated, look for next available entry */
1325                while (val) {
1326                        u8 current_hash = hash++;
1327                        u8 next_mask = 0xff;
1328
1329                        /* If NEXT field is NULL then update it with next hash */
1330                        if (!(val & next_mask)) {
1331                                val |= (u32)hash;
1332                                writel(val, bdf_to_sid_base + current_hash * sizeof(u32));
1333                        }
1334
1335                        val = readl(bdf_to_sid_base + hash * sizeof(u32));
1336                }
1337
1338                /* BDF [31:16] | SID [15:8] | NEXT [7:0] */
1339                val = map[i].bdf << 16 | (map[i].smmu_sid - smmu_sid_base) << 8 | 0;
1340                writel(val, bdf_to_sid_base + hash * sizeof(u32));
1341        }
1342
1343        kfree(map);
1344
1345        return 0;
1346}
1347
1348static int qcom_pcie_host_init(struct pcie_port *pp)
1349{
1350        struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1351        struct qcom_pcie *pcie = to_qcom_pcie(pci);
1352        int ret;
1353
1354        qcom_ep_reset_assert(pcie);
1355
1356        ret = pcie->ops->init(pcie);
1357        if (ret)
1358                return ret;
1359
1360        ret = phy_power_on(pcie->phy);
1361        if (ret)
1362                goto err_deinit;
1363
1364        if (pcie->ops->post_init) {
1365                ret = pcie->ops->post_init(pcie);
1366                if (ret)
1367                        goto err_disable_phy;
1368        }
1369
1370        qcom_ep_reset_deassert(pcie);
1371
1372        if (pcie->ops->config_sid) {
1373                ret = pcie->ops->config_sid(pcie);
1374                if (ret)
1375                        goto err;
1376        }
1377
1378        return 0;
1379
1380err:
1381        qcom_ep_reset_assert(pcie);
1382        if (pcie->ops->post_deinit)
1383                pcie->ops->post_deinit(pcie);
1384err_disable_phy:
1385        phy_power_off(pcie->phy);
1386err_deinit:
1387        pcie->ops->deinit(pcie);
1388
1389        return ret;
1390}
1391
1392static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
1393        .host_init = qcom_pcie_host_init,
1394};
1395
1396/* Qcom IP rev.: 2.1.0  Synopsys IP rev.: 4.01a */
1397static const struct qcom_pcie_ops ops_2_1_0 = {
1398        .get_resources = qcom_pcie_get_resources_2_1_0,
1399        .init = qcom_pcie_init_2_1_0,
1400        .deinit = qcom_pcie_deinit_2_1_0,
1401        .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
1402};
1403
1404/* Qcom IP rev.: 1.0.0  Synopsys IP rev.: 4.11a */
1405static const struct qcom_pcie_ops ops_1_0_0 = {
1406        .get_resources = qcom_pcie_get_resources_1_0_0,
1407        .init = qcom_pcie_init_1_0_0,
1408        .deinit = qcom_pcie_deinit_1_0_0,
1409        .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
1410};
1411
1412/* Qcom IP rev.: 2.3.2  Synopsys IP rev.: 4.21a */
1413static const struct qcom_pcie_ops ops_2_3_2 = {
1414        .get_resources = qcom_pcie_get_resources_2_3_2,
1415        .init = qcom_pcie_init_2_3_2,
1416        .post_init = qcom_pcie_post_init_2_3_2,
1417        .deinit = qcom_pcie_deinit_2_3_2,
1418        .post_deinit = qcom_pcie_post_deinit_2_3_2,
1419        .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1420};
1421
1422/* Qcom IP rev.: 2.4.0  Synopsys IP rev.: 4.20a */
1423static const struct qcom_pcie_ops ops_2_4_0 = {
1424        .get_resources = qcom_pcie_get_resources_2_4_0,
1425        .init = qcom_pcie_init_2_4_0,
1426        .deinit = qcom_pcie_deinit_2_4_0,
1427        .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1428};
1429
1430/* Qcom IP rev.: 2.3.3  Synopsys IP rev.: 4.30a */
1431static const struct qcom_pcie_ops ops_2_3_3 = {
1432        .get_resources = qcom_pcie_get_resources_2_3_3,
1433        .init = qcom_pcie_init_2_3_3,
1434        .deinit = qcom_pcie_deinit_2_3_3,
1435        .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1436};
1437
1438/* Qcom IP rev.: 2.7.0  Synopsys IP rev.: 4.30a */
1439static const struct qcom_pcie_ops ops_2_7_0 = {
1440        .get_resources = qcom_pcie_get_resources_2_7_0,
1441        .init = qcom_pcie_init_2_7_0,
1442        .deinit = qcom_pcie_deinit_2_7_0,
1443        .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1444        .post_init = qcom_pcie_post_init_2_7_0,
1445        .post_deinit = qcom_pcie_post_deinit_2_7_0,
1446};
1447
1448/* Qcom IP rev.: 1.9.0 */
1449static const struct qcom_pcie_ops ops_1_9_0 = {
1450        .get_resources = qcom_pcie_get_resources_2_7_0,
1451        .init = qcom_pcie_init_2_7_0,
1452        .deinit = qcom_pcie_deinit_2_7_0,
1453        .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1454        .post_init = qcom_pcie_post_init_2_7_0,
1455        .post_deinit = qcom_pcie_post_deinit_2_7_0,
1456        .config_sid = qcom_pcie_config_sid_sm8250,
1457};
1458
1459static const struct dw_pcie_ops dw_pcie_ops = {
1460        .link_up = qcom_pcie_link_up,
1461        .start_link = qcom_pcie_start_link,
1462};
1463
1464static int qcom_pcie_probe(struct platform_device *pdev)
1465{
1466        struct device *dev = &pdev->dev;
1467        struct pcie_port *pp;
1468        struct dw_pcie *pci;
1469        struct qcom_pcie *pcie;
1470        int ret;
1471
1472        pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
1473        if (!pcie)
1474                return -ENOMEM;
1475
1476        pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
1477        if (!pci)
1478                return -ENOMEM;
1479
1480        pm_runtime_enable(dev);
1481        ret = pm_runtime_get_sync(dev);
1482        if (ret < 0)
1483                goto err_pm_runtime_put;
1484
1485        pci->dev = dev;
1486        pci->ops = &dw_pcie_ops;
1487        pp = &pci->pp;
1488
1489        pcie->pci = pci;
1490
1491        pcie->ops = of_device_get_match_data(dev);
1492
1493        pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH);
1494        if (IS_ERR(pcie->reset)) {
1495                ret = PTR_ERR(pcie->reset);
1496                goto err_pm_runtime_put;
1497        }
1498
1499        pcie->parf = devm_platform_ioremap_resource_byname(pdev, "parf");
1500        if (IS_ERR(pcie->parf)) {
1501                ret = PTR_ERR(pcie->parf);
1502                goto err_pm_runtime_put;
1503        }
1504
1505        pcie->elbi = devm_platform_ioremap_resource_byname(pdev, "elbi");
1506        if (IS_ERR(pcie->elbi)) {
1507                ret = PTR_ERR(pcie->elbi);
1508                goto err_pm_runtime_put;
1509        }
1510
1511        pcie->phy = devm_phy_optional_get(dev, "pciephy");
1512        if (IS_ERR(pcie->phy)) {
1513                ret = PTR_ERR(pcie->phy);
1514                goto err_pm_runtime_put;
1515        }
1516
1517        ret = pcie->ops->get_resources(pcie);
1518        if (ret)
1519                goto err_pm_runtime_put;
1520
1521        pp->ops = &qcom_pcie_dw_ops;
1522
1523        ret = phy_init(pcie->phy);
1524        if (ret) {
1525                pm_runtime_disable(&pdev->dev);
1526                goto err_pm_runtime_put;
1527        }
1528
1529        platform_set_drvdata(pdev, pcie);
1530
1531        ret = dw_pcie_host_init(pp);
1532        if (ret) {
1533                dev_err(dev, "cannot initialize host\n");
1534                pm_runtime_disable(&pdev->dev);
1535                goto err_pm_runtime_put;
1536        }
1537
1538        return 0;
1539
1540err_pm_runtime_put:
1541        pm_runtime_put(dev);
1542        pm_runtime_disable(dev);
1543
1544        return ret;
1545}
1546
1547static const struct of_device_id qcom_pcie_match[] = {
1548        { .compatible = "qcom,pcie-apq8084", .data = &ops_1_0_0 },
1549        { .compatible = "qcom,pcie-ipq8064", .data = &ops_2_1_0 },
1550        { .compatible = "qcom,pcie-ipq8064-v2", .data = &ops_2_1_0 },
1551        { .compatible = "qcom,pcie-apq8064", .data = &ops_2_1_0 },
1552        { .compatible = "qcom,pcie-msm8996", .data = &ops_2_3_2 },
1553        { .compatible = "qcom,pcie-ipq8074", .data = &ops_2_3_3 },
1554        { .compatible = "qcom,pcie-ipq4019", .data = &ops_2_4_0 },
1555        { .compatible = "qcom,pcie-qcs404", .data = &ops_2_4_0 },
1556        { .compatible = "qcom,pcie-sdm845", .data = &ops_2_7_0 },
1557        { .compatible = "qcom,pcie-sm8250", .data = &ops_1_9_0 },
1558        { }
1559};
1560
1561static void qcom_fixup_class(struct pci_dev *dev)
1562{
1563        dev->class = PCI_CLASS_BRIDGE_PCI << 8;
1564}
1565DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0101, qcom_fixup_class);
1566DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0104, qcom_fixup_class);
1567DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0106, qcom_fixup_class);
1568DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0107, qcom_fixup_class);
1569DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class);
1570DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class);
1571DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class);
1572
1573static struct platform_driver qcom_pcie_driver = {
1574        .probe = qcom_pcie_probe,
1575        .driver = {
1576                .name = "qcom-pcie",
1577                .suppress_bind_attrs = true,
1578                .of_match_table = qcom_pcie_match,
1579        },
1580};
1581builtin_platform_driver(qcom_pcie_driver);
1582