linux/drivers/pci/controller/pci-tegra.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * PCIe host controller driver for Tegra SoCs
   4 *
   5 * Copyright (c) 2010, CompuLab, Ltd.
   6 * Author: Mike Rapoport <mike@compulab.co.il>
   7 *
   8 * Based on NVIDIA PCIe driver
   9 * Copyright (c) 2008-2009, NVIDIA Corporation.
  10 *
  11 * Bits taken from arch/arm/mach-dove/pcie.c
  12 *
  13 * Author: Thierry Reding <treding@nvidia.com>
  14 */
  15
  16#include <linux/clk.h>
  17#include <linux/debugfs.h>
  18#include <linux/delay.h>
  19#include <linux/export.h>
  20#include <linux/gpio/consumer.h>
  21#include <linux/interrupt.h>
  22#include <linux/iopoll.h>
  23#include <linux/irq.h>
  24#include <linux/irqdomain.h>
  25#include <linux/kernel.h>
  26#include <linux/init.h>
  27#include <linux/module.h>
  28#include <linux/msi.h>
  29#include <linux/of_address.h>
  30#include <linux/of_pci.h>
  31#include <linux/of_platform.h>
  32#include <linux/pci.h>
  33#include <linux/phy/phy.h>
  34#include <linux/pinctrl/consumer.h>
  35#include <linux/platform_device.h>
  36#include <linux/reset.h>
  37#include <linux/sizes.h>
  38#include <linux/slab.h>
  39#include <linux/vmalloc.h>
  40#include <linux/regulator/consumer.h>
  41
  42#include <soc/tegra/cpuidle.h>
  43#include <soc/tegra/pmc.h>
  44
  45#include "../pci.h"
  46
  47#define INT_PCI_MSI_NR (8 * 32)
  48
  49/* register definitions */
  50
  51#define AFI_AXI_BAR0_SZ 0x00
  52#define AFI_AXI_BAR1_SZ 0x04
  53#define AFI_AXI_BAR2_SZ 0x08
  54#define AFI_AXI_BAR3_SZ 0x0c
  55#define AFI_AXI_BAR4_SZ 0x10
  56#define AFI_AXI_BAR5_SZ 0x14
  57
  58#define AFI_AXI_BAR0_START      0x18
  59#define AFI_AXI_BAR1_START      0x1c
  60#define AFI_AXI_BAR2_START      0x20
  61#define AFI_AXI_BAR3_START      0x24
  62#define AFI_AXI_BAR4_START      0x28
  63#define AFI_AXI_BAR5_START      0x2c
  64
  65#define AFI_FPCI_BAR0   0x30
  66#define AFI_FPCI_BAR1   0x34
  67#define AFI_FPCI_BAR2   0x38
  68#define AFI_FPCI_BAR3   0x3c
  69#define AFI_FPCI_BAR4   0x40
  70#define AFI_FPCI_BAR5   0x44
  71
  72#define AFI_CACHE_BAR0_SZ       0x48
  73#define AFI_CACHE_BAR0_ST       0x4c
  74#define AFI_CACHE_BAR1_SZ       0x50
  75#define AFI_CACHE_BAR1_ST       0x54
  76
  77#define AFI_MSI_BAR_SZ          0x60
  78#define AFI_MSI_FPCI_BAR_ST     0x64
  79#define AFI_MSI_AXI_BAR_ST      0x68
  80
  81#define AFI_MSI_VEC0            0x6c
  82#define AFI_MSI_VEC1            0x70
  83#define AFI_MSI_VEC2            0x74
  84#define AFI_MSI_VEC3            0x78
  85#define AFI_MSI_VEC4            0x7c
  86#define AFI_MSI_VEC5            0x80
  87#define AFI_MSI_VEC6            0x84
  88#define AFI_MSI_VEC7            0x88
  89
  90#define AFI_MSI_EN_VEC0         0x8c
  91#define AFI_MSI_EN_VEC1         0x90
  92#define AFI_MSI_EN_VEC2         0x94
  93#define AFI_MSI_EN_VEC3         0x98
  94#define AFI_MSI_EN_VEC4         0x9c
  95#define AFI_MSI_EN_VEC5         0xa0
  96#define AFI_MSI_EN_VEC6         0xa4
  97#define AFI_MSI_EN_VEC7         0xa8
  98
  99#define AFI_CONFIGURATION               0xac
 100#define  AFI_CONFIGURATION_EN_FPCI              (1 << 0)
 101#define  AFI_CONFIGURATION_CLKEN_OVERRIDE       (1 << 31)
 102
 103#define AFI_FPCI_ERROR_MASKS    0xb0
 104
 105#define AFI_INTR_MASK           0xb4
 106#define  AFI_INTR_MASK_INT_MASK (1 << 0)
 107#define  AFI_INTR_MASK_MSI_MASK (1 << 8)
 108
 109#define AFI_INTR_CODE                   0xb8
 110#define  AFI_INTR_CODE_MASK             0xf
 111#define  AFI_INTR_INI_SLAVE_ERROR       1
 112#define  AFI_INTR_INI_DECODE_ERROR      2
 113#define  AFI_INTR_TARGET_ABORT          3
 114#define  AFI_INTR_MASTER_ABORT          4
 115#define  AFI_INTR_INVALID_WRITE         5
 116#define  AFI_INTR_LEGACY                6
 117#define  AFI_INTR_FPCI_DECODE_ERROR     7
 118#define  AFI_INTR_AXI_DECODE_ERROR      8
 119#define  AFI_INTR_FPCI_TIMEOUT          9
 120#define  AFI_INTR_PE_PRSNT_SENSE        10
 121#define  AFI_INTR_PE_CLKREQ_SENSE       11
 122#define  AFI_INTR_CLKCLAMP_SENSE        12
 123#define  AFI_INTR_RDY4PD_SENSE          13
 124#define  AFI_INTR_P2P_ERROR             14
 125
 126#define AFI_INTR_SIGNATURE      0xbc
 127#define AFI_UPPER_FPCI_ADDRESS  0xc0
 128#define AFI_SM_INTR_ENABLE      0xc4
 129#define  AFI_SM_INTR_INTA_ASSERT        (1 << 0)
 130#define  AFI_SM_INTR_INTB_ASSERT        (1 << 1)
 131#define  AFI_SM_INTR_INTC_ASSERT        (1 << 2)
 132#define  AFI_SM_INTR_INTD_ASSERT        (1 << 3)
 133#define  AFI_SM_INTR_INTA_DEASSERT      (1 << 4)
 134#define  AFI_SM_INTR_INTB_DEASSERT      (1 << 5)
 135#define  AFI_SM_INTR_INTC_DEASSERT      (1 << 6)
 136#define  AFI_SM_INTR_INTD_DEASSERT      (1 << 7)
 137
 138#define AFI_AFI_INTR_ENABLE             0xc8
 139#define  AFI_INTR_EN_INI_SLVERR         (1 << 0)
 140#define  AFI_INTR_EN_INI_DECERR         (1 << 1)
 141#define  AFI_INTR_EN_TGT_SLVERR         (1 << 2)
 142#define  AFI_INTR_EN_TGT_DECERR         (1 << 3)
 143#define  AFI_INTR_EN_TGT_WRERR          (1 << 4)
 144#define  AFI_INTR_EN_DFPCI_DECERR       (1 << 5)
 145#define  AFI_INTR_EN_AXI_DECERR         (1 << 6)
 146#define  AFI_INTR_EN_FPCI_TIMEOUT       (1 << 7)
 147#define  AFI_INTR_EN_PRSNT_SENSE        (1 << 8)
 148
 149#define AFI_PCIE_PME            0xf0
 150
 151#define AFI_PCIE_CONFIG                                 0x0f8
 152#define  AFI_PCIE_CONFIG_PCIE_DISABLE(x)                (1 << ((x) + 1))
 153#define  AFI_PCIE_CONFIG_PCIE_DISABLE_ALL               0xe
 154#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK       (0xf << 20)
 155#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE     (0x0 << 20)
 156#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420        (0x0 << 20)
 157#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1      (0x0 << 20)
 158#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401        (0x0 << 20)
 159#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL       (0x1 << 20)
 160#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222        (0x1 << 20)
 161#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1      (0x1 << 20)
 162#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211        (0x1 << 20)
 163#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411        (0x2 << 20)
 164#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111        (0x2 << 20)
 165#define  AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(x)            (1 << ((x) + 29))
 166#define  AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO_ALL           (0x7 << 29)
 167
 168#define AFI_FUSE                        0x104
 169#define  AFI_FUSE_PCIE_T0_GEN2_DIS      (1 << 2)
 170
 171#define AFI_PEX0_CTRL                   0x110
 172#define AFI_PEX1_CTRL                   0x118
 173#define  AFI_PEX_CTRL_RST               (1 << 0)
 174#define  AFI_PEX_CTRL_CLKREQ_EN         (1 << 1)
 175#define  AFI_PEX_CTRL_REFCLK_EN         (1 << 3)
 176#define  AFI_PEX_CTRL_OVERRIDE_EN       (1 << 4)
 177
 178#define AFI_PLLE_CONTROL                0x160
 179#define  AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL (1 << 9)
 180#define  AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN (1 << 1)
 181
 182#define AFI_PEXBIAS_CTRL_0              0x168
 183
 184#define RP_PRIV_XP_DL           0x00000494
 185#define  RP_PRIV_XP_DL_GEN2_UPD_FC_TSHOLD       (0x1ff << 1)
 186
 187#define RP_RX_HDR_LIMIT         0x00000e00
 188#define  RP_RX_HDR_LIMIT_PW_MASK        (0xff << 8)
 189#define  RP_RX_HDR_LIMIT_PW             (0x0e << 8)
 190
 191#define RP_ECTL_2_R1    0x00000e84
 192#define  RP_ECTL_2_R1_RX_CTLE_1C_MASK           0xffff
 193
 194#define RP_ECTL_4_R1    0x00000e8c
 195#define  RP_ECTL_4_R1_RX_CDR_CTRL_1C_MASK       (0xffff << 16)
 196#define  RP_ECTL_4_R1_RX_CDR_CTRL_1C_SHIFT      16
 197
 198#define RP_ECTL_5_R1    0x00000e90
 199#define  RP_ECTL_5_R1_RX_EQ_CTRL_L_1C_MASK      0xffffffff
 200
 201#define RP_ECTL_6_R1    0x00000e94
 202#define  RP_ECTL_6_R1_RX_EQ_CTRL_H_1C_MASK      0xffffffff
 203
 204#define RP_ECTL_2_R2    0x00000ea4
 205#define  RP_ECTL_2_R2_RX_CTLE_1C_MASK   0xffff
 206
 207#define RP_ECTL_4_R2    0x00000eac
 208#define  RP_ECTL_4_R2_RX_CDR_CTRL_1C_MASK       (0xffff << 16)
 209#define  RP_ECTL_4_R2_RX_CDR_CTRL_1C_SHIFT      16
 210
 211#define RP_ECTL_5_R2    0x00000eb0
 212#define  RP_ECTL_5_R2_RX_EQ_CTRL_L_1C_MASK      0xffffffff
 213
 214#define RP_ECTL_6_R2    0x00000eb4
 215#define  RP_ECTL_6_R2_RX_EQ_CTRL_H_1C_MASK      0xffffffff
 216
 217#define RP_VEND_XP      0x00000f00
 218#define  RP_VEND_XP_DL_UP                       (1 << 30)
 219#define  RP_VEND_XP_OPPORTUNISTIC_ACK           (1 << 27)
 220#define  RP_VEND_XP_OPPORTUNISTIC_UPDATEFC      (1 << 28)
 221#define  RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK    (0xff << 18)
 222
 223#define RP_VEND_CTL0    0x00000f44
 224#define  RP_VEND_CTL0_DSK_RST_PULSE_WIDTH_MASK  (0xf << 12)
 225#define  RP_VEND_CTL0_DSK_RST_PULSE_WIDTH       (0x9 << 12)
 226
 227#define RP_VEND_CTL1    0x00000f48
 228#define  RP_VEND_CTL1_ERPT      (1 << 13)
 229
 230#define RP_VEND_XP_BIST 0x00000f4c
 231#define  RP_VEND_XP_BIST_GOTO_L1_L2_AFTER_DLLP_DONE     (1 << 28)
 232
 233#define RP_VEND_CTL2 0x00000fa8
 234#define  RP_VEND_CTL2_PCA_ENABLE (1 << 7)
 235
 236#define RP_PRIV_MISC    0x00000fe0
 237#define  RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT                (0xe << 0)
 238#define  RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT                (0xf << 0)
 239#define  RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD_MASK     (0x7f << 16)
 240#define  RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD          (0xf << 16)
 241#define  RP_PRIV_MISC_CTLR_CLK_CLAMP_ENABLE             (1 << 23)
 242#define  RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD_MASK      (0x7f << 24)
 243#define  RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD           (0xf << 24)
 244#define  RP_PRIV_MISC_TMS_CLK_CLAMP_ENABLE              (1 << 31)
 245
 246#define RP_LINK_CONTROL_STATUS                  0x00000090
 247#define  RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE  0x20000000
 248#define  RP_LINK_CONTROL_STATUS_LINKSTAT_MASK   0x3fff0000
 249
 250#define RP_LINK_CONTROL_STATUS_2                0x000000b0
 251
 252#define PADS_CTL_SEL            0x0000009c
 253
 254#define PADS_CTL                0x000000a0
 255#define  PADS_CTL_IDDQ_1L       (1 << 0)
 256#define  PADS_CTL_TX_DATA_EN_1L (1 << 6)
 257#define  PADS_CTL_RX_DATA_EN_1L (1 << 10)
 258
 259#define PADS_PLL_CTL_TEGRA20                    0x000000b8
 260#define PADS_PLL_CTL_TEGRA30                    0x000000b4
 261#define  PADS_PLL_CTL_RST_B4SM                  (1 << 1)
 262#define  PADS_PLL_CTL_LOCKDET                   (1 << 8)
 263#define  PADS_PLL_CTL_REFCLK_MASK               (0x3 << 16)
 264#define  PADS_PLL_CTL_REFCLK_INTERNAL_CML       (0 << 16)
 265#define  PADS_PLL_CTL_REFCLK_INTERNAL_CMOS      (1 << 16)
 266#define  PADS_PLL_CTL_REFCLK_EXTERNAL           (2 << 16)
 267#define  PADS_PLL_CTL_TXCLKREF_MASK             (0x1 << 20)
 268#define  PADS_PLL_CTL_TXCLKREF_DIV10            (0 << 20)
 269#define  PADS_PLL_CTL_TXCLKREF_DIV5             (1 << 20)
 270#define  PADS_PLL_CTL_TXCLKREF_BUF_EN           (1 << 22)
 271
 272#define PADS_REFCLK_CFG0                        0x000000c8
 273#define PADS_REFCLK_CFG1                        0x000000cc
 274#define PADS_REFCLK_BIAS                        0x000000d0
 275
 276/*
 277 * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit
 278 * entries, one entry per PCIe port. These field definitions and desired
 279 * values aren't in the TRM, but do come from NVIDIA.
 280 */
 281#define PADS_REFCLK_CFG_TERM_SHIFT              2  /* 6:2 */
 282#define PADS_REFCLK_CFG_E_TERM_SHIFT            7
 283#define PADS_REFCLK_CFG_PREDI_SHIFT             8  /* 11:8 */
 284#define PADS_REFCLK_CFG_DRVI_SHIFT              12 /* 15:12 */
 285
 286#define PME_ACK_TIMEOUT 10000
 287#define LINK_RETRAIN_TIMEOUT 100000 /* in usec */
 288
 289struct tegra_msi {
 290        struct msi_controller chip;
 291        DECLARE_BITMAP(used, INT_PCI_MSI_NR);
 292        struct irq_domain *domain;
 293        struct mutex lock;
 294        void *virt;
 295        dma_addr_t phys;
 296        int irq;
 297};
 298
 299/* used to differentiate between Tegra SoC generations */
 300struct tegra_pcie_port_soc {
 301        struct {
 302                u8 turnoff_bit;
 303                u8 ack_bit;
 304        } pme;
 305};
 306
 307struct tegra_pcie_soc {
 308        unsigned int num_ports;
 309        const struct tegra_pcie_port_soc *ports;
 310        unsigned int msi_base_shift;
 311        unsigned long afi_pex2_ctrl;
 312        u32 pads_pll_ctl;
 313        u32 tx_ref_sel;
 314        u32 pads_refclk_cfg0;
 315        u32 pads_refclk_cfg1;
 316        u32 update_fc_threshold;
 317        bool has_pex_clkreq_en;
 318        bool has_pex_bias_ctrl;
 319        bool has_intr_prsnt_sense;
 320        bool has_cml_clk;
 321        bool has_gen2;
 322        bool force_pca_enable;
 323        bool program_uphy;
 324        bool update_clamp_threshold;
 325        bool program_deskew_time;
 326        bool raw_violation_fixup;
 327        bool update_fc_timer;
 328        bool has_cache_bars;
 329        struct {
 330                struct {
 331                        u32 rp_ectl_2_r1;
 332                        u32 rp_ectl_4_r1;
 333                        u32 rp_ectl_5_r1;
 334                        u32 rp_ectl_6_r1;
 335                        u32 rp_ectl_2_r2;
 336                        u32 rp_ectl_4_r2;
 337                        u32 rp_ectl_5_r2;
 338                        u32 rp_ectl_6_r2;
 339                } regs;
 340                bool enable;
 341        } ectl;
 342};
 343
 344static inline struct tegra_msi *to_tegra_msi(struct msi_controller *chip)
 345{
 346        return container_of(chip, struct tegra_msi, chip);
 347}
 348
 349struct tegra_pcie {
 350        struct device *dev;
 351
 352        void __iomem *pads;
 353        void __iomem *afi;
 354        void __iomem *cfg;
 355        int irq;
 356
 357        struct resource cs;
 358        struct resource io;
 359        struct resource pio;
 360        struct resource mem;
 361        struct resource prefetch;
 362        struct resource busn;
 363
 364        struct {
 365                resource_size_t mem;
 366                resource_size_t io;
 367        } offset;
 368
 369        struct clk *pex_clk;
 370        struct clk *afi_clk;
 371        struct clk *pll_e;
 372        struct clk *cml_clk;
 373
 374        struct reset_control *pex_rst;
 375        struct reset_control *afi_rst;
 376        struct reset_control *pcie_xrst;
 377
 378        bool legacy_phy;
 379        struct phy *phy;
 380
 381        struct tegra_msi msi;
 382
 383        struct list_head ports;
 384        u32 xbar_config;
 385
 386        struct regulator_bulk_data *supplies;
 387        unsigned int num_supplies;
 388
 389        const struct tegra_pcie_soc *soc;
 390        struct dentry *debugfs;
 391};
 392
 393struct tegra_pcie_port {
 394        struct tegra_pcie *pcie;
 395        struct device_node *np;
 396        struct list_head list;
 397        struct resource regs;
 398        void __iomem *base;
 399        unsigned int index;
 400        unsigned int lanes;
 401
 402        struct phy **phys;
 403
 404        struct gpio_desc *reset_gpio;
 405};
 406
 407struct tegra_pcie_bus {
 408        struct list_head list;
 409        unsigned int nr;
 410};
 411
 412static inline void afi_writel(struct tegra_pcie *pcie, u32 value,
 413                              unsigned long offset)
 414{
 415        writel(value, pcie->afi + offset);
 416}
 417
 418static inline u32 afi_readl(struct tegra_pcie *pcie, unsigned long offset)
 419{
 420        return readl(pcie->afi + offset);
 421}
 422
 423static inline void pads_writel(struct tegra_pcie *pcie, u32 value,
 424                               unsigned long offset)
 425{
 426        writel(value, pcie->pads + offset);
 427}
 428
 429static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset)
 430{
 431        return readl(pcie->pads + offset);
 432}
 433
 434/*
 435 * The configuration space mapping on Tegra is somewhat similar to the ECAM
 436 * defined by PCIe. However it deviates a bit in how the 4 bits for extended
 437 * register accesses are mapped:
 438 *
 439 *    [27:24] extended register number
 440 *    [23:16] bus number
 441 *    [15:11] device number
 442 *    [10: 8] function number
 443 *    [ 7: 0] register number
 444 *
 445 * Mapping the whole extended configuration space would require 256 MiB of
 446 * virtual address space, only a small part of which will actually be used.
 447 *
 448 * To work around this, a 4 KiB region is used to generate the required
 449 * configuration transaction with relevant B:D:F and register offset values.
 450 * This is achieved by dynamically programming base address and size of
 451 * AFI_AXI_BAR used for end point config space mapping to make sure that the
 452 * address (access to which generates correct config transaction) falls in
 453 * this 4 KiB region.
 454 */
 455static unsigned int tegra_pcie_conf_offset(u8 bus, unsigned int devfn,
 456                                           unsigned int where)
 457{
 458        return ((where & 0xf00) << 16) | (bus << 16) | (PCI_SLOT(devfn) << 11) |
 459               (PCI_FUNC(devfn) << 8) | (where & 0xff);
 460}
 461
 462static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus,
 463                                        unsigned int devfn,
 464                                        int where)
 465{
 466        struct tegra_pcie *pcie = bus->sysdata;
 467        void __iomem *addr = NULL;
 468
 469        if (bus->number == 0) {
 470                unsigned int slot = PCI_SLOT(devfn);
 471                struct tegra_pcie_port *port;
 472
 473                list_for_each_entry(port, &pcie->ports, list) {
 474                        if (port->index + 1 == slot) {
 475                                addr = port->base + (where & ~3);
 476                                break;
 477                        }
 478                }
 479        } else {
 480                unsigned int offset;
 481                u32 base;
 482
 483                offset = tegra_pcie_conf_offset(bus->number, devfn, where);
 484
 485                /* move 4 KiB window to offset within the FPCI region */
 486                base = 0xfe100000 + ((offset & ~(SZ_4K - 1)) >> 8);
 487                afi_writel(pcie, base, AFI_FPCI_BAR0);
 488
 489                /* move to correct offset within the 4 KiB page */
 490                addr = pcie->cfg + (offset & (SZ_4K - 1));
 491        }
 492
 493        return addr;
 494}
 495
 496static int tegra_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
 497                                  int where, int size, u32 *value)
 498{
 499        if (bus->number == 0)
 500                return pci_generic_config_read32(bus, devfn, where, size,
 501                                                 value);
 502
 503        return pci_generic_config_read(bus, devfn, where, size, value);
 504}
 505
 506static int tegra_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
 507                                   int where, int size, u32 value)
 508{
 509        if (bus->number == 0)
 510                return pci_generic_config_write32(bus, devfn, where, size,
 511                                                  value);
 512
 513        return pci_generic_config_write(bus, devfn, where, size, value);
 514}
 515
 516static struct pci_ops tegra_pcie_ops = {
 517        .map_bus = tegra_pcie_map_bus,
 518        .read = tegra_pcie_config_read,
 519        .write = tegra_pcie_config_write,
 520};
 521
 522static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port)
 523{
 524        const struct tegra_pcie_soc *soc = port->pcie->soc;
 525        unsigned long ret = 0;
 526
 527        switch (port->index) {
 528        case 0:
 529                ret = AFI_PEX0_CTRL;
 530                break;
 531
 532        case 1:
 533                ret = AFI_PEX1_CTRL;
 534                break;
 535
 536        case 2:
 537                ret = soc->afi_pex2_ctrl;
 538                break;
 539        }
 540
 541        return ret;
 542}
 543
 544static void tegra_pcie_port_reset(struct tegra_pcie_port *port)
 545{
 546        unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
 547        unsigned long value;
 548
 549        /* pulse reset signal */
 550        if (port->reset_gpio) {
 551                gpiod_set_value(port->reset_gpio, 1);
 552        } else {
 553                value = afi_readl(port->pcie, ctrl);
 554                value &= ~AFI_PEX_CTRL_RST;
 555                afi_writel(port->pcie, value, ctrl);
 556        }
 557
 558        usleep_range(1000, 2000);
 559
 560        if (port->reset_gpio) {
 561                gpiod_set_value(port->reset_gpio, 0);
 562        } else {
 563                value = afi_readl(port->pcie, ctrl);
 564                value |= AFI_PEX_CTRL_RST;
 565                afi_writel(port->pcie, value, ctrl);
 566        }
 567}
 568
 569static void tegra_pcie_enable_rp_features(struct tegra_pcie_port *port)
 570{
 571        const struct tegra_pcie_soc *soc = port->pcie->soc;
 572        u32 value;
 573
 574        /* Enable AER capability */
 575        value = readl(port->base + RP_VEND_CTL1);
 576        value |= RP_VEND_CTL1_ERPT;
 577        writel(value, port->base + RP_VEND_CTL1);
 578
 579        /* Optimal settings to enhance bandwidth */
 580        value = readl(port->base + RP_VEND_XP);
 581        value |= RP_VEND_XP_OPPORTUNISTIC_ACK;
 582        value |= RP_VEND_XP_OPPORTUNISTIC_UPDATEFC;
 583        writel(value, port->base + RP_VEND_XP);
 584
 585        /*
 586         * LTSSM will wait for DLLP to finish before entering L1 or L2,
 587         * to avoid truncation of PM messages which results in receiver errors
 588         */
 589        value = readl(port->base + RP_VEND_XP_BIST);
 590        value |= RP_VEND_XP_BIST_GOTO_L1_L2_AFTER_DLLP_DONE;
 591        writel(value, port->base + RP_VEND_XP_BIST);
 592
 593        value = readl(port->base + RP_PRIV_MISC);
 594        value |= RP_PRIV_MISC_CTLR_CLK_CLAMP_ENABLE;
 595        value |= RP_PRIV_MISC_TMS_CLK_CLAMP_ENABLE;
 596
 597        if (soc->update_clamp_threshold) {
 598                value &= ~(RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD_MASK |
 599                                RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD_MASK);
 600                value |= RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD |
 601                        RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD;
 602        }
 603
 604        writel(value, port->base + RP_PRIV_MISC);
 605}
 606
 607static void tegra_pcie_program_ectl_settings(struct tegra_pcie_port *port)
 608{
 609        const struct tegra_pcie_soc *soc = port->pcie->soc;
 610        u32 value;
 611
 612        value = readl(port->base + RP_ECTL_2_R1);
 613        value &= ~RP_ECTL_2_R1_RX_CTLE_1C_MASK;
 614        value |= soc->ectl.regs.rp_ectl_2_r1;
 615        writel(value, port->base + RP_ECTL_2_R1);
 616
 617        value = readl(port->base + RP_ECTL_4_R1);
 618        value &= ~RP_ECTL_4_R1_RX_CDR_CTRL_1C_MASK;
 619        value |= soc->ectl.regs.rp_ectl_4_r1 <<
 620                                RP_ECTL_4_R1_RX_CDR_CTRL_1C_SHIFT;
 621        writel(value, port->base + RP_ECTL_4_R1);
 622
 623        value = readl(port->base + RP_ECTL_5_R1);
 624        value &= ~RP_ECTL_5_R1_RX_EQ_CTRL_L_1C_MASK;
 625        value |= soc->ectl.regs.rp_ectl_5_r1;
 626        writel(value, port->base + RP_ECTL_5_R1);
 627
 628        value = readl(port->base + RP_ECTL_6_R1);
 629        value &= ~RP_ECTL_6_R1_RX_EQ_CTRL_H_1C_MASK;
 630        value |= soc->ectl.regs.rp_ectl_6_r1;
 631        writel(value, port->base + RP_ECTL_6_R1);
 632
 633        value = readl(port->base + RP_ECTL_2_R2);
 634        value &= ~RP_ECTL_2_R2_RX_CTLE_1C_MASK;
 635        value |= soc->ectl.regs.rp_ectl_2_r2;
 636        writel(value, port->base + RP_ECTL_2_R2);
 637
 638        value = readl(port->base + RP_ECTL_4_R2);
 639        value &= ~RP_ECTL_4_R2_RX_CDR_CTRL_1C_MASK;
 640        value |= soc->ectl.regs.rp_ectl_4_r2 <<
 641                                RP_ECTL_4_R2_RX_CDR_CTRL_1C_SHIFT;
 642        writel(value, port->base + RP_ECTL_4_R2);
 643
 644        value = readl(port->base + RP_ECTL_5_R2);
 645        value &= ~RP_ECTL_5_R2_RX_EQ_CTRL_L_1C_MASK;
 646        value |= soc->ectl.regs.rp_ectl_5_r2;
 647        writel(value, port->base + RP_ECTL_5_R2);
 648
 649        value = readl(port->base + RP_ECTL_6_R2);
 650        value &= ~RP_ECTL_6_R2_RX_EQ_CTRL_H_1C_MASK;
 651        value |= soc->ectl.regs.rp_ectl_6_r2;
 652        writel(value, port->base + RP_ECTL_6_R2);
 653}
 654
 655static void tegra_pcie_apply_sw_fixup(struct tegra_pcie_port *port)
 656{
 657        const struct tegra_pcie_soc *soc = port->pcie->soc;
 658        u32 value;
 659
 660        /*
 661         * Sometimes link speed change from Gen2 to Gen1 fails due to
 662         * instability in deskew logic on lane-0. Increase the deskew
 663         * retry time to resolve this issue.
 664         */
 665        if (soc->program_deskew_time) {
 666                value = readl(port->base + RP_VEND_CTL0);
 667                value &= ~RP_VEND_CTL0_DSK_RST_PULSE_WIDTH_MASK;
 668                value |= RP_VEND_CTL0_DSK_RST_PULSE_WIDTH;
 669                writel(value, port->base + RP_VEND_CTL0);
 670        }
 671
 672        /* Fixup for read after write violation. */
 673        if (soc->raw_violation_fixup) {
 674                value = readl(port->base + RP_RX_HDR_LIMIT);
 675                value &= ~RP_RX_HDR_LIMIT_PW_MASK;
 676                value |= RP_RX_HDR_LIMIT_PW;
 677                writel(value, port->base + RP_RX_HDR_LIMIT);
 678
 679                value = readl(port->base + RP_PRIV_XP_DL);
 680                value |= RP_PRIV_XP_DL_GEN2_UPD_FC_TSHOLD;
 681                writel(value, port->base + RP_PRIV_XP_DL);
 682
 683                value = readl(port->base + RP_VEND_XP);
 684                value &= ~RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK;
 685                value |= soc->update_fc_threshold;
 686                writel(value, port->base + RP_VEND_XP);
 687        }
 688
 689        if (soc->update_fc_timer) {
 690                value = readl(port->base + RP_VEND_XP);
 691                value &= ~RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK;
 692                value |= soc->update_fc_threshold;
 693                writel(value, port->base + RP_VEND_XP);
 694        }
 695
 696        /*
 697         * PCIe link doesn't come up with few legacy PCIe endpoints if
 698         * root port advertises both Gen-1 and Gen-2 speeds in Tegra.
 699         * Hence, the strategy followed here is to initially advertise
 700         * only Gen-1 and after link is up, retrain link to Gen-2 speed
 701         */
 702        value = readl(port->base + RP_LINK_CONTROL_STATUS_2);
 703        value &= ~PCI_EXP_LNKSTA_CLS;
 704        value |= PCI_EXP_LNKSTA_CLS_2_5GB;
 705        writel(value, port->base + RP_LINK_CONTROL_STATUS_2);
 706}
 707
 708static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
 709{
 710        unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
 711        const struct tegra_pcie_soc *soc = port->pcie->soc;
 712        unsigned long value;
 713
 714        /* enable reference clock */
 715        value = afi_readl(port->pcie, ctrl);
 716        value |= AFI_PEX_CTRL_REFCLK_EN;
 717
 718        if (soc->has_pex_clkreq_en)
 719                value |= AFI_PEX_CTRL_CLKREQ_EN;
 720
 721        value |= AFI_PEX_CTRL_OVERRIDE_EN;
 722
 723        afi_writel(port->pcie, value, ctrl);
 724
 725        tegra_pcie_port_reset(port);
 726
 727        if (soc->force_pca_enable) {
 728                value = readl(port->base + RP_VEND_CTL2);
 729                value |= RP_VEND_CTL2_PCA_ENABLE;
 730                writel(value, port->base + RP_VEND_CTL2);
 731        }
 732
 733        tegra_pcie_enable_rp_features(port);
 734
 735        if (soc->ectl.enable)
 736                tegra_pcie_program_ectl_settings(port);
 737
 738        tegra_pcie_apply_sw_fixup(port);
 739}
 740
 741static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
 742{
 743        unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
 744        const struct tegra_pcie_soc *soc = port->pcie->soc;
 745        unsigned long value;
 746
 747        /* assert port reset */
 748        value = afi_readl(port->pcie, ctrl);
 749        value &= ~AFI_PEX_CTRL_RST;
 750        afi_writel(port->pcie, value, ctrl);
 751
 752        /* disable reference clock */
 753        value = afi_readl(port->pcie, ctrl);
 754
 755        if (soc->has_pex_clkreq_en)
 756                value &= ~AFI_PEX_CTRL_CLKREQ_EN;
 757
 758        value &= ~AFI_PEX_CTRL_REFCLK_EN;
 759        afi_writel(port->pcie, value, ctrl);
 760
 761        /* disable PCIe port and set CLKREQ# as GPIO to allow PLLE power down */
 762        value = afi_readl(port->pcie, AFI_PCIE_CONFIG);
 763        value |= AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
 764        value |= AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(port->index);
 765        afi_writel(port->pcie, value, AFI_PCIE_CONFIG);
 766}
 767
 768static void tegra_pcie_port_free(struct tegra_pcie_port *port)
 769{
 770        struct tegra_pcie *pcie = port->pcie;
 771        struct device *dev = pcie->dev;
 772
 773        devm_iounmap(dev, port->base);
 774        devm_release_mem_region(dev, port->regs.start,
 775                                resource_size(&port->regs));
 776        list_del(&port->list);
 777        devm_kfree(dev, port);
 778}
 779
 780/* Tegra PCIE root complex wrongly reports device class */
 781static void tegra_pcie_fixup_class(struct pci_dev *dev)
 782{
 783        dev->class = PCI_CLASS_BRIDGE_PCI << 8;
 784}
 785DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class);
 786DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class);
 787DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class);
 788DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class);
 789
 790/* Tegra20 and Tegra30 PCIE requires relaxed ordering */
 791static void tegra_pcie_relax_enable(struct pci_dev *dev)
 792{
 793        pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
 794}
 795DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_relax_enable);
 796DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_relax_enable);
 797DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_relax_enable);
 798DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_relax_enable);
 799
 800static int tegra_pcie_request_resources(struct tegra_pcie *pcie)
 801{
 802        struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
 803        struct list_head *windows = &host->windows;
 804        struct device *dev = pcie->dev;
 805        int err;
 806
 807        pci_add_resource_offset(windows, &pcie->pio, pcie->offset.io);
 808        pci_add_resource_offset(windows, &pcie->mem, pcie->offset.mem);
 809        pci_add_resource_offset(windows, &pcie->prefetch, pcie->offset.mem);
 810        pci_add_resource(windows, &pcie->busn);
 811
 812        err = devm_request_pci_bus_resources(dev, windows);
 813        if (err < 0) {
 814                pci_free_resource_list(windows);
 815                return err;
 816        }
 817
 818        pci_remap_iospace(&pcie->pio, pcie->io.start);
 819
 820        return 0;
 821}
 822
 823static void tegra_pcie_free_resources(struct tegra_pcie *pcie)
 824{
 825        struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
 826        struct list_head *windows = &host->windows;
 827
 828        pci_unmap_iospace(&pcie->pio);
 829        pci_free_resource_list(windows);
 830}
 831
 832static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
 833{
 834        struct tegra_pcie *pcie = pdev->bus->sysdata;
 835        int irq;
 836
 837        tegra_cpuidle_pcie_irqs_in_use();
 838
 839        irq = of_irq_parse_and_map_pci(pdev, slot, pin);
 840        if (!irq)
 841                irq = pcie->irq;
 842
 843        return irq;
 844}
 845
 846static irqreturn_t tegra_pcie_isr(int irq, void *arg)
 847{
 848        const char *err_msg[] = {
 849                "Unknown",
 850                "AXI slave error",
 851                "AXI decode error",
 852                "Target abort",
 853                "Master abort",
 854                "Invalid write",
 855                "Legacy interrupt",
 856                "Response decoding error",
 857                "AXI response decoding error",
 858                "Transaction timeout",
 859                "Slot present pin change",
 860                "Slot clock request change",
 861                "TMS clock ramp change",
 862                "TMS ready for power down",
 863                "Peer2Peer error",
 864        };
 865        struct tegra_pcie *pcie = arg;
 866        struct device *dev = pcie->dev;
 867        u32 code, signature;
 868
 869        code = afi_readl(pcie, AFI_INTR_CODE) & AFI_INTR_CODE_MASK;
 870        signature = afi_readl(pcie, AFI_INTR_SIGNATURE);
 871        afi_writel(pcie, 0, AFI_INTR_CODE);
 872
 873        if (code == AFI_INTR_LEGACY)
 874                return IRQ_NONE;
 875
 876        if (code >= ARRAY_SIZE(err_msg))
 877                code = 0;
 878
 879        /*
 880         * do not pollute kernel log with master abort reports since they
 881         * happen a lot during enumeration
 882         */
 883        if (code == AFI_INTR_MASTER_ABORT || code == AFI_INTR_PE_PRSNT_SENSE)
 884                dev_dbg(dev, "%s, signature: %08x\n", err_msg[code], signature);
 885        else
 886                dev_err(dev, "%s, signature: %08x\n", err_msg[code], signature);
 887
 888        if (code == AFI_INTR_TARGET_ABORT || code == AFI_INTR_MASTER_ABORT ||
 889            code == AFI_INTR_FPCI_DECODE_ERROR) {
 890                u32 fpci = afi_readl(pcie, AFI_UPPER_FPCI_ADDRESS) & 0xff;
 891                u64 address = (u64)fpci << 32 | (signature & 0xfffffffc);
 892
 893                if (code == AFI_INTR_MASTER_ABORT)
 894                        dev_dbg(dev, "  FPCI address: %10llx\n", address);
 895                else
 896                        dev_err(dev, "  FPCI address: %10llx\n", address);
 897        }
 898
 899        return IRQ_HANDLED;
 900}
 901
 902/*
 903 * FPCI map is as follows:
 904 * - 0xfdfc000000: I/O space
 905 * - 0xfdfe000000: type 0 configuration space
 906 * - 0xfdff000000: type 1 configuration space
 907 * - 0xfe00000000: type 0 extended configuration space
 908 * - 0xfe10000000: type 1 extended configuration space
 909 */
 910static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
 911{
 912        u32 fpci_bar, size, axi_address;
 913
 914        /* Bar 0: type 1 extended configuration space */
 915        size = resource_size(&pcie->cs);
 916        afi_writel(pcie, pcie->cs.start, AFI_AXI_BAR0_START);
 917        afi_writel(pcie, size >> 12, AFI_AXI_BAR0_SZ);
 918
 919        /* Bar 1: downstream IO bar */
 920        fpci_bar = 0xfdfc0000;
 921        size = resource_size(&pcie->io);
 922        axi_address = pcie->io.start;
 923        afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
 924        afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
 925        afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
 926
 927        /* Bar 2: prefetchable memory BAR */
 928        fpci_bar = (((pcie->prefetch.start >> 12) & 0x0fffffff) << 4) | 0x1;
 929        size = resource_size(&pcie->prefetch);
 930        axi_address = pcie->prefetch.start;
 931        afi_writel(pcie, axi_address, AFI_AXI_BAR2_START);
 932        afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ);
 933        afi_writel(pcie, fpci_bar, AFI_FPCI_BAR2);
 934
 935        /* Bar 3: non prefetchable memory BAR */
 936        fpci_bar = (((pcie->mem.start >> 12) & 0x0fffffff) << 4) | 0x1;
 937        size = resource_size(&pcie->mem);
 938        axi_address = pcie->mem.start;
 939        afi_writel(pcie, axi_address, AFI_AXI_BAR3_START);
 940        afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ);
 941        afi_writel(pcie, fpci_bar, AFI_FPCI_BAR3);
 942
 943        /* NULL out the remaining BARs as they are not used */
 944        afi_writel(pcie, 0, AFI_AXI_BAR4_START);
 945        afi_writel(pcie, 0, AFI_AXI_BAR4_SZ);
 946        afi_writel(pcie, 0, AFI_FPCI_BAR4);
 947
 948        afi_writel(pcie, 0, AFI_AXI_BAR5_START);
 949        afi_writel(pcie, 0, AFI_AXI_BAR5_SZ);
 950        afi_writel(pcie, 0, AFI_FPCI_BAR5);
 951
 952        if (pcie->soc->has_cache_bars) {
 953                /* map all upstream transactions as uncached */
 954                afi_writel(pcie, 0, AFI_CACHE_BAR0_ST);
 955                afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ);
 956                afi_writel(pcie, 0, AFI_CACHE_BAR1_ST);
 957                afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ);
 958        }
 959
 960        /* MSI translations are setup only when needed */
 961        afi_writel(pcie, 0, AFI_MSI_FPCI_BAR_ST);
 962        afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
 963        afi_writel(pcie, 0, AFI_MSI_AXI_BAR_ST);
 964        afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
 965}
 966
 967static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout)
 968{
 969        const struct tegra_pcie_soc *soc = pcie->soc;
 970        u32 value;
 971
 972        timeout = jiffies + msecs_to_jiffies(timeout);
 973
 974        while (time_before(jiffies, timeout)) {
 975                value = pads_readl(pcie, soc->pads_pll_ctl);
 976                if (value & PADS_PLL_CTL_LOCKDET)
 977                        return 0;
 978        }
 979
 980        return -ETIMEDOUT;
 981}
 982
 983static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
 984{
 985        struct device *dev = pcie->dev;
 986        const struct tegra_pcie_soc *soc = pcie->soc;
 987        u32 value;
 988        int err;
 989
 990        /* initialize internal PHY, enable up to 16 PCIE lanes */
 991        pads_writel(pcie, 0x0, PADS_CTL_SEL);
 992
 993        /* override IDDQ to 1 on all 4 lanes */
 994        value = pads_readl(pcie, PADS_CTL);
 995        value |= PADS_CTL_IDDQ_1L;
 996        pads_writel(pcie, value, PADS_CTL);
 997
 998        /*
 999         * Set up PHY PLL inputs select PLLE output as refclock,
1000         * set TX ref sel to div10 (not div5).
1001         */
1002        value = pads_readl(pcie, soc->pads_pll_ctl);
1003        value &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK);
1004        value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel;
1005        pads_writel(pcie, value, soc->pads_pll_ctl);
1006
1007        /* reset PLL */
1008        value = pads_readl(pcie, soc->pads_pll_ctl);
1009        value &= ~PADS_PLL_CTL_RST_B4SM;
1010        pads_writel(pcie, value, soc->pads_pll_ctl);
1011
1012        usleep_range(20, 100);
1013
1014        /* take PLL out of reset  */
1015        value = pads_readl(pcie, soc->pads_pll_ctl);
1016        value |= PADS_PLL_CTL_RST_B4SM;
1017        pads_writel(pcie, value, soc->pads_pll_ctl);
1018
1019        /* wait for the PLL to lock */
1020        err = tegra_pcie_pll_wait(pcie, 500);
1021        if (err < 0) {
1022                dev_err(dev, "PLL failed to lock: %d\n", err);
1023                return err;
1024        }
1025
1026        /* turn off IDDQ override */
1027        value = pads_readl(pcie, PADS_CTL);
1028        value &= ~PADS_CTL_IDDQ_1L;
1029        pads_writel(pcie, value, PADS_CTL);
1030
1031        /* enable TX/RX data */
1032        value = pads_readl(pcie, PADS_CTL);
1033        value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L;
1034        pads_writel(pcie, value, PADS_CTL);
1035
1036        return 0;
1037}
1038
1039static int tegra_pcie_phy_disable(struct tegra_pcie *pcie)
1040{
1041        const struct tegra_pcie_soc *soc = pcie->soc;
1042        u32 value;
1043
1044        /* disable TX/RX data */
1045        value = pads_readl(pcie, PADS_CTL);
1046        value &= ~(PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L);
1047        pads_writel(pcie, value, PADS_CTL);
1048
1049        /* override IDDQ */
1050        value = pads_readl(pcie, PADS_CTL);
1051        value |= PADS_CTL_IDDQ_1L;
1052        pads_writel(pcie, value, PADS_CTL);
1053
1054        /* reset PLL */
1055        value = pads_readl(pcie, soc->pads_pll_ctl);
1056        value &= ~PADS_PLL_CTL_RST_B4SM;
1057        pads_writel(pcie, value, soc->pads_pll_ctl);
1058
1059        usleep_range(20, 100);
1060
1061        return 0;
1062}
1063
1064static int tegra_pcie_port_phy_power_on(struct tegra_pcie_port *port)
1065{
1066        struct device *dev = port->pcie->dev;
1067        unsigned int i;
1068        int err;
1069
1070        for (i = 0; i < port->lanes; i++) {
1071                err = phy_power_on(port->phys[i]);
1072                if (err < 0) {
1073                        dev_err(dev, "failed to power on PHY#%u: %d\n", i, err);
1074                        return err;
1075                }
1076        }
1077
1078        return 0;
1079}
1080
1081static int tegra_pcie_port_phy_power_off(struct tegra_pcie_port *port)
1082{
1083        struct device *dev = port->pcie->dev;
1084        unsigned int i;
1085        int err;
1086
1087        for (i = 0; i < port->lanes; i++) {
1088                err = phy_power_off(port->phys[i]);
1089                if (err < 0) {
1090                        dev_err(dev, "failed to power off PHY#%u: %d\n", i,
1091                                err);
1092                        return err;
1093                }
1094        }
1095
1096        return 0;
1097}
1098
1099static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
1100{
1101        struct device *dev = pcie->dev;
1102        struct tegra_pcie_port *port;
1103        int err;
1104
1105        if (pcie->legacy_phy) {
1106                if (pcie->phy)
1107                        err = phy_power_on(pcie->phy);
1108                else
1109                        err = tegra_pcie_phy_enable(pcie);
1110
1111                if (err < 0)
1112                        dev_err(dev, "failed to power on PHY: %d\n", err);
1113
1114                return err;
1115        }
1116
1117        list_for_each_entry(port, &pcie->ports, list) {
1118                err = tegra_pcie_port_phy_power_on(port);
1119                if (err < 0) {
1120                        dev_err(dev,
1121                                "failed to power on PCIe port %u PHY: %d\n",
1122                                port->index, err);
1123                        return err;
1124                }
1125        }
1126
1127        return 0;
1128}
1129
1130static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie)
1131{
1132        struct device *dev = pcie->dev;
1133        struct tegra_pcie_port *port;
1134        int err;
1135
1136        if (pcie->legacy_phy) {
1137                if (pcie->phy)
1138                        err = phy_power_off(pcie->phy);
1139                else
1140                        err = tegra_pcie_phy_disable(pcie);
1141
1142                if (err < 0)
1143                        dev_err(dev, "failed to power off PHY: %d\n", err);
1144
1145                return err;
1146        }
1147
1148        list_for_each_entry(port, &pcie->ports, list) {
1149                err = tegra_pcie_port_phy_power_off(port);
1150                if (err < 0) {
1151                        dev_err(dev,
1152                                "failed to power off PCIe port %u PHY: %d\n",
1153                                port->index, err);
1154                        return err;
1155                }
1156        }
1157
1158        return 0;
1159}
1160
1161static void tegra_pcie_enable_controller(struct tegra_pcie *pcie)
1162{
1163        const struct tegra_pcie_soc *soc = pcie->soc;
1164        struct tegra_pcie_port *port;
1165        unsigned long value;
1166
1167        /* enable PLL power down */
1168        if (pcie->phy) {
1169                value = afi_readl(pcie, AFI_PLLE_CONTROL);
1170                value &= ~AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL;
1171                value |= AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN;
1172                afi_writel(pcie, value, AFI_PLLE_CONTROL);
1173        }
1174
1175        /* power down PCIe slot clock bias pad */
1176        if (soc->has_pex_bias_ctrl)
1177                afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0);
1178
1179        /* configure mode and disable all ports */
1180        value = afi_readl(pcie, AFI_PCIE_CONFIG);
1181        value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK;
1182        value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config;
1183        value |= AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO_ALL;
1184
1185        list_for_each_entry(port, &pcie->ports, list) {
1186                value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
1187                value &= ~AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(port->index);
1188        }
1189
1190        afi_writel(pcie, value, AFI_PCIE_CONFIG);
1191
1192        if (soc->has_gen2) {
1193                value = afi_readl(pcie, AFI_FUSE);
1194                value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS;
1195                afi_writel(pcie, value, AFI_FUSE);
1196        } else {
1197                value = afi_readl(pcie, AFI_FUSE);
1198                value |= AFI_FUSE_PCIE_T0_GEN2_DIS;
1199                afi_writel(pcie, value, AFI_FUSE);
1200        }
1201
1202        /* Disable AFI dynamic clock gating and enable PCIe */
1203        value = afi_readl(pcie, AFI_CONFIGURATION);
1204        value |= AFI_CONFIGURATION_EN_FPCI;
1205        value |= AFI_CONFIGURATION_CLKEN_OVERRIDE;
1206        afi_writel(pcie, value, AFI_CONFIGURATION);
1207
1208        value = AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR |
1209                AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR |
1210                AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR;
1211
1212        if (soc->has_intr_prsnt_sense)
1213                value |= AFI_INTR_EN_PRSNT_SENSE;
1214
1215        afi_writel(pcie, value, AFI_AFI_INTR_ENABLE);
1216        afi_writel(pcie, 0xffffffff, AFI_SM_INTR_ENABLE);
1217
1218        /* don't enable MSI for now, only when needed */
1219        afi_writel(pcie, AFI_INTR_MASK_INT_MASK, AFI_INTR_MASK);
1220
1221        /* disable all exceptions */
1222        afi_writel(pcie, 0, AFI_FPCI_ERROR_MASKS);
1223}
1224
1225static void tegra_pcie_power_off(struct tegra_pcie *pcie)
1226{
1227        struct device *dev = pcie->dev;
1228        const struct tegra_pcie_soc *soc = pcie->soc;
1229        int err;
1230
1231        reset_control_assert(pcie->afi_rst);
1232
1233        clk_disable_unprepare(pcie->pll_e);
1234        if (soc->has_cml_clk)
1235                clk_disable_unprepare(pcie->cml_clk);
1236        clk_disable_unprepare(pcie->afi_clk);
1237
1238        if (!dev->pm_domain)
1239                tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1240
1241        err = regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
1242        if (err < 0)
1243                dev_warn(dev, "failed to disable regulators: %d\n", err);
1244}
1245
1246static int tegra_pcie_power_on(struct tegra_pcie *pcie)
1247{
1248        struct device *dev = pcie->dev;
1249        const struct tegra_pcie_soc *soc = pcie->soc;
1250        int err;
1251
1252        reset_control_assert(pcie->pcie_xrst);
1253        reset_control_assert(pcie->afi_rst);
1254        reset_control_assert(pcie->pex_rst);
1255
1256        if (!dev->pm_domain)
1257                tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1258
1259        /* enable regulators */
1260        err = regulator_bulk_enable(pcie->num_supplies, pcie->supplies);
1261        if (err < 0)
1262                dev_err(dev, "failed to enable regulators: %d\n", err);
1263
1264        if (!dev->pm_domain) {
1265                err = tegra_powergate_power_on(TEGRA_POWERGATE_PCIE);
1266                if (err) {
1267                        dev_err(dev, "failed to power ungate: %d\n", err);
1268                        goto regulator_disable;
1269                }
1270                err = tegra_powergate_remove_clamping(TEGRA_POWERGATE_PCIE);
1271                if (err) {
1272                        dev_err(dev, "failed to remove clamp: %d\n", err);
1273                        goto powergate;
1274                }
1275        }
1276
1277        err = clk_prepare_enable(pcie->afi_clk);
1278        if (err < 0) {
1279                dev_err(dev, "failed to enable AFI clock: %d\n", err);
1280                goto powergate;
1281        }
1282
1283        if (soc->has_cml_clk) {
1284                err = clk_prepare_enable(pcie->cml_clk);
1285                if (err < 0) {
1286                        dev_err(dev, "failed to enable CML clock: %d\n", err);
1287                        goto disable_afi_clk;
1288                }
1289        }
1290
1291        err = clk_prepare_enable(pcie->pll_e);
1292        if (err < 0) {
1293                dev_err(dev, "failed to enable PLLE clock: %d\n", err);
1294                goto disable_cml_clk;
1295        }
1296
1297        reset_control_deassert(pcie->afi_rst);
1298
1299        return 0;
1300
1301disable_cml_clk:
1302        if (soc->has_cml_clk)
1303                clk_disable_unprepare(pcie->cml_clk);
1304disable_afi_clk:
1305        clk_disable_unprepare(pcie->afi_clk);
1306powergate:
1307        if (!dev->pm_domain)
1308                tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1309regulator_disable:
1310        regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
1311
1312        return err;
1313}
1314
1315static void tegra_pcie_apply_pad_settings(struct tegra_pcie *pcie)
1316{
1317        const struct tegra_pcie_soc *soc = pcie->soc;
1318
1319        /* Configure the reference clock driver */
1320        pads_writel(pcie, soc->pads_refclk_cfg0, PADS_REFCLK_CFG0);
1321
1322        if (soc->num_ports > 2)
1323                pads_writel(pcie, soc->pads_refclk_cfg1, PADS_REFCLK_CFG1);
1324}
1325
1326static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
1327{
1328        struct device *dev = pcie->dev;
1329        const struct tegra_pcie_soc *soc = pcie->soc;
1330
1331        pcie->pex_clk = devm_clk_get(dev, "pex");
1332        if (IS_ERR(pcie->pex_clk))
1333                return PTR_ERR(pcie->pex_clk);
1334
1335        pcie->afi_clk = devm_clk_get(dev, "afi");
1336        if (IS_ERR(pcie->afi_clk))
1337                return PTR_ERR(pcie->afi_clk);
1338
1339        pcie->pll_e = devm_clk_get(dev, "pll_e");
1340        if (IS_ERR(pcie->pll_e))
1341                return PTR_ERR(pcie->pll_e);
1342
1343        if (soc->has_cml_clk) {
1344                pcie->cml_clk = devm_clk_get(dev, "cml");
1345                if (IS_ERR(pcie->cml_clk))
1346                        return PTR_ERR(pcie->cml_clk);
1347        }
1348
1349        return 0;
1350}
1351
1352static int tegra_pcie_resets_get(struct tegra_pcie *pcie)
1353{
1354        struct device *dev = pcie->dev;
1355
1356        pcie->pex_rst = devm_reset_control_get_exclusive(dev, "pex");
1357        if (IS_ERR(pcie->pex_rst))
1358                return PTR_ERR(pcie->pex_rst);
1359
1360        pcie->afi_rst = devm_reset_control_get_exclusive(dev, "afi");
1361        if (IS_ERR(pcie->afi_rst))
1362                return PTR_ERR(pcie->afi_rst);
1363
1364        pcie->pcie_xrst = devm_reset_control_get_exclusive(dev, "pcie_x");
1365        if (IS_ERR(pcie->pcie_xrst))
1366                return PTR_ERR(pcie->pcie_xrst);
1367
1368        return 0;
1369}
1370
1371static int tegra_pcie_phys_get_legacy(struct tegra_pcie *pcie)
1372{
1373        struct device *dev = pcie->dev;
1374        int err;
1375
1376        pcie->phy = devm_phy_optional_get(dev, "pcie");
1377        if (IS_ERR(pcie->phy)) {
1378                err = PTR_ERR(pcie->phy);
1379                dev_err(dev, "failed to get PHY: %d\n", err);
1380                return err;
1381        }
1382
1383        err = phy_init(pcie->phy);
1384        if (err < 0) {
1385                dev_err(dev, "failed to initialize PHY: %d\n", err);
1386                return err;
1387        }
1388
1389        pcie->legacy_phy = true;
1390
1391        return 0;
1392}
1393
1394static struct phy *devm_of_phy_optional_get_index(struct device *dev,
1395                                                  struct device_node *np,
1396                                                  const char *consumer,
1397                                                  unsigned int index)
1398{
1399        struct phy *phy;
1400        char *name;
1401
1402        name = kasprintf(GFP_KERNEL, "%s-%u", consumer, index);
1403        if (!name)
1404                return ERR_PTR(-ENOMEM);
1405
1406        phy = devm_of_phy_get(dev, np, name);
1407        kfree(name);
1408
1409        if (IS_ERR(phy) && PTR_ERR(phy) == -ENODEV)
1410                phy = NULL;
1411
1412        return phy;
1413}
1414
1415static int tegra_pcie_port_get_phys(struct tegra_pcie_port *port)
1416{
1417        struct device *dev = port->pcie->dev;
1418        struct phy *phy;
1419        unsigned int i;
1420        int err;
1421
1422        port->phys = devm_kcalloc(dev, sizeof(phy), port->lanes, GFP_KERNEL);
1423        if (!port->phys)
1424                return -ENOMEM;
1425
1426        for (i = 0; i < port->lanes; i++) {
1427                phy = devm_of_phy_optional_get_index(dev, port->np, "pcie", i);
1428                if (IS_ERR(phy)) {
1429                        dev_err(dev, "failed to get PHY#%u: %ld\n", i,
1430                                PTR_ERR(phy));
1431                        return PTR_ERR(phy);
1432                }
1433
1434                err = phy_init(phy);
1435                if (err < 0) {
1436                        dev_err(dev, "failed to initialize PHY#%u: %d\n", i,
1437                                err);
1438                        return err;
1439                }
1440
1441                port->phys[i] = phy;
1442        }
1443
1444        return 0;
1445}
1446
1447static int tegra_pcie_phys_get(struct tegra_pcie *pcie)
1448{
1449        const struct tegra_pcie_soc *soc = pcie->soc;
1450        struct device_node *np = pcie->dev->of_node;
1451        struct tegra_pcie_port *port;
1452        int err;
1453
1454        if (!soc->has_gen2 || of_find_property(np, "phys", NULL) != NULL)
1455                return tegra_pcie_phys_get_legacy(pcie);
1456
1457        list_for_each_entry(port, &pcie->ports, list) {
1458                err = tegra_pcie_port_get_phys(port);
1459                if (err < 0)
1460                        return err;
1461        }
1462
1463        return 0;
1464}
1465
1466static void tegra_pcie_phys_put(struct tegra_pcie *pcie)
1467{
1468        struct tegra_pcie_port *port;
1469        struct device *dev = pcie->dev;
1470        int err, i;
1471
1472        if (pcie->legacy_phy) {
1473                err = phy_exit(pcie->phy);
1474                if (err < 0)
1475                        dev_err(dev, "failed to teardown PHY: %d\n", err);
1476                return;
1477        }
1478
1479        list_for_each_entry(port, &pcie->ports, list) {
1480                for (i = 0; i < port->lanes; i++) {
1481                        err = phy_exit(port->phys[i]);
1482                        if (err < 0)
1483                                dev_err(dev, "failed to teardown PHY#%u: %d\n",
1484                                        i, err);
1485                }
1486        }
1487}
1488
1489
1490static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
1491{
1492        struct device *dev = pcie->dev;
1493        struct platform_device *pdev = to_platform_device(dev);
1494        struct resource *pads, *afi, *res;
1495        const struct tegra_pcie_soc *soc = pcie->soc;
1496        int err;
1497
1498        err = tegra_pcie_clocks_get(pcie);
1499        if (err) {
1500                dev_err(dev, "failed to get clocks: %d\n", err);
1501                return err;
1502        }
1503
1504        err = tegra_pcie_resets_get(pcie);
1505        if (err) {
1506                dev_err(dev, "failed to get resets: %d\n", err);
1507                return err;
1508        }
1509
1510        if (soc->program_uphy) {
1511                err = tegra_pcie_phys_get(pcie);
1512                if (err < 0) {
1513                        dev_err(dev, "failed to get PHYs: %d\n", err);
1514                        return err;
1515                }
1516        }
1517
1518        pads = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pads");
1519        pcie->pads = devm_ioremap_resource(dev, pads);
1520        if (IS_ERR(pcie->pads)) {
1521                err = PTR_ERR(pcie->pads);
1522                goto phys_put;
1523        }
1524
1525        afi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "afi");
1526        pcie->afi = devm_ioremap_resource(dev, afi);
1527        if (IS_ERR(pcie->afi)) {
1528                err = PTR_ERR(pcie->afi);
1529                goto phys_put;
1530        }
1531
1532        /* request configuration space, but remap later, on demand */
1533        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs");
1534        if (!res) {
1535                err = -EADDRNOTAVAIL;
1536                goto phys_put;
1537        }
1538
1539        pcie->cs = *res;
1540
1541        /* constrain configuration space to 4 KiB */
1542        pcie->cs.end = pcie->cs.start + SZ_4K - 1;
1543
1544        pcie->cfg = devm_ioremap_resource(dev, &pcie->cs);
1545        if (IS_ERR(pcie->cfg)) {
1546                err = PTR_ERR(pcie->cfg);
1547                goto phys_put;
1548        }
1549
1550        /* request interrupt */
1551        err = platform_get_irq_byname(pdev, "intr");
1552        if (err < 0) {
1553                dev_err(dev, "failed to get IRQ: %d\n", err);
1554                goto phys_put;
1555        }
1556
1557        pcie->irq = err;
1558
1559        err = request_irq(pcie->irq, tegra_pcie_isr, IRQF_SHARED, "PCIE", pcie);
1560        if (err) {
1561                dev_err(dev, "failed to register IRQ: %d\n", err);
1562                goto phys_put;
1563        }
1564
1565        return 0;
1566
1567phys_put:
1568        if (soc->program_uphy)
1569                tegra_pcie_phys_put(pcie);
1570        return err;
1571}
1572
1573static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
1574{
1575        const struct tegra_pcie_soc *soc = pcie->soc;
1576
1577        if (pcie->irq > 0)
1578                free_irq(pcie->irq, pcie);
1579
1580        if (soc->program_uphy)
1581                tegra_pcie_phys_put(pcie);
1582
1583        return 0;
1584}
1585
1586static void tegra_pcie_pme_turnoff(struct tegra_pcie_port *port)
1587{
1588        struct tegra_pcie *pcie = port->pcie;
1589        const struct tegra_pcie_soc *soc = pcie->soc;
1590        int err;
1591        u32 val;
1592        u8 ack_bit;
1593
1594        val = afi_readl(pcie, AFI_PCIE_PME);
1595        val |= (0x1 << soc->ports[port->index].pme.turnoff_bit);
1596        afi_writel(pcie, val, AFI_PCIE_PME);
1597
1598        ack_bit = soc->ports[port->index].pme.ack_bit;
1599        err = readl_poll_timeout(pcie->afi + AFI_PCIE_PME, val,
1600                                 val & (0x1 << ack_bit), 1, PME_ACK_TIMEOUT);
1601        if (err)
1602                dev_err(pcie->dev, "PME Ack is not received on port: %d\n",
1603                        port->index);
1604
1605        usleep_range(10000, 11000);
1606
1607        val = afi_readl(pcie, AFI_PCIE_PME);
1608        val &= ~(0x1 << soc->ports[port->index].pme.turnoff_bit);
1609        afi_writel(pcie, val, AFI_PCIE_PME);
1610}
1611
1612static int tegra_msi_alloc(struct tegra_msi *chip)
1613{
1614        int msi;
1615
1616        mutex_lock(&chip->lock);
1617
1618        msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
1619        if (msi < INT_PCI_MSI_NR)
1620                set_bit(msi, chip->used);
1621        else
1622                msi = -ENOSPC;
1623
1624        mutex_unlock(&chip->lock);
1625
1626        return msi;
1627}
1628
1629static void tegra_msi_free(struct tegra_msi *chip, unsigned long irq)
1630{
1631        struct device *dev = chip->chip.dev;
1632
1633        mutex_lock(&chip->lock);
1634
1635        if (!test_bit(irq, chip->used))
1636                dev_err(dev, "trying to free unused MSI#%lu\n", irq);
1637        else
1638                clear_bit(irq, chip->used);
1639
1640        mutex_unlock(&chip->lock);
1641}
1642
1643static irqreturn_t tegra_pcie_msi_irq(int irq, void *data)
1644{
1645        struct tegra_pcie *pcie = data;
1646        struct device *dev = pcie->dev;
1647        struct tegra_msi *msi = &pcie->msi;
1648        unsigned int i, processed = 0;
1649
1650        for (i = 0; i < 8; i++) {
1651                unsigned long reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1652
1653                while (reg) {
1654                        unsigned int offset = find_first_bit(&reg, 32);
1655                        unsigned int index = i * 32 + offset;
1656                        unsigned int irq;
1657
1658                        /* clear the interrupt */
1659                        afi_writel(pcie, 1 << offset, AFI_MSI_VEC0 + i * 4);
1660
1661                        irq = irq_find_mapping(msi->domain, index);
1662                        if (irq) {
1663                                if (test_bit(index, msi->used))
1664                                        generic_handle_irq(irq);
1665                                else
1666                                        dev_info(dev, "unhandled MSI\n");
1667                        } else {
1668                                /*
1669                                 * that's weird who triggered this?
1670                                 * just clear it
1671                                 */
1672                                dev_info(dev, "unexpected MSI\n");
1673                        }
1674
1675                        /* see if there's any more pending in this vector */
1676                        reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1677
1678                        processed++;
1679                }
1680        }
1681
1682        return processed > 0 ? IRQ_HANDLED : IRQ_NONE;
1683}
1684
1685static int tegra_msi_setup_irq(struct msi_controller *chip,
1686                               struct pci_dev *pdev, struct msi_desc *desc)
1687{
1688        struct tegra_msi *msi = to_tegra_msi(chip);
1689        struct msi_msg msg;
1690        unsigned int irq;
1691        int hwirq;
1692
1693        hwirq = tegra_msi_alloc(msi);
1694        if (hwirq < 0)
1695                return hwirq;
1696
1697        irq = irq_create_mapping(msi->domain, hwirq);
1698        if (!irq) {
1699                tegra_msi_free(msi, hwirq);
1700                return -EINVAL;
1701        }
1702
1703        irq_set_msi_desc(irq, desc);
1704
1705        msg.address_lo = lower_32_bits(msi->phys);
1706        msg.address_hi = upper_32_bits(msi->phys);
1707        msg.data = hwirq;
1708
1709        pci_write_msi_msg(irq, &msg);
1710
1711        return 0;
1712}
1713
1714static void tegra_msi_teardown_irq(struct msi_controller *chip,
1715                                   unsigned int irq)
1716{
1717        struct tegra_msi *msi = to_tegra_msi(chip);
1718        struct irq_data *d = irq_get_irq_data(irq);
1719        irq_hw_number_t hwirq = irqd_to_hwirq(d);
1720
1721        irq_dispose_mapping(irq);
1722        tegra_msi_free(msi, hwirq);
1723}
1724
1725static struct irq_chip tegra_msi_irq_chip = {
1726        .name = "Tegra PCIe MSI",
1727        .irq_enable = pci_msi_unmask_irq,
1728        .irq_disable = pci_msi_mask_irq,
1729        .irq_mask = pci_msi_mask_irq,
1730        .irq_unmask = pci_msi_unmask_irq,
1731};
1732
1733static int tegra_msi_map(struct irq_domain *domain, unsigned int irq,
1734                         irq_hw_number_t hwirq)
1735{
1736        irq_set_chip_and_handler(irq, &tegra_msi_irq_chip, handle_simple_irq);
1737        irq_set_chip_data(irq, domain->host_data);
1738
1739        tegra_cpuidle_pcie_irqs_in_use();
1740
1741        return 0;
1742}
1743
1744static const struct irq_domain_ops msi_domain_ops = {
1745        .map = tegra_msi_map,
1746};
1747
1748static int tegra_pcie_msi_setup(struct tegra_pcie *pcie)
1749{
1750        struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
1751        struct platform_device *pdev = to_platform_device(pcie->dev);
1752        struct tegra_msi *msi = &pcie->msi;
1753        struct device *dev = pcie->dev;
1754        int err;
1755
1756        mutex_init(&msi->lock);
1757
1758        msi->chip.dev = dev;
1759        msi->chip.setup_irq = tegra_msi_setup_irq;
1760        msi->chip.teardown_irq = tegra_msi_teardown_irq;
1761
1762        msi->domain = irq_domain_add_linear(dev->of_node, INT_PCI_MSI_NR,
1763                                            &msi_domain_ops, &msi->chip);
1764        if (!msi->domain) {
1765                dev_err(dev, "failed to create IRQ domain\n");
1766                return -ENOMEM;
1767        }
1768
1769        err = platform_get_irq_byname(pdev, "msi");
1770        if (err < 0) {
1771                dev_err(dev, "failed to get IRQ: %d\n", err);
1772                goto free_irq_domain;
1773        }
1774
1775        msi->irq = err;
1776
1777        err = request_irq(msi->irq, tegra_pcie_msi_irq, IRQF_NO_THREAD,
1778                          tegra_msi_irq_chip.name, pcie);
1779        if (err < 0) {
1780                dev_err(dev, "failed to request IRQ: %d\n", err);
1781                goto free_irq_domain;
1782        }
1783
1784        /* Though the PCIe controller can address >32-bit address space, to
1785         * facilitate endpoints that support only 32-bit MSI target address,
1786         * the mask is set to 32-bit to make sure that MSI target address is
1787         * always a 32-bit address
1788         */
1789        err = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
1790        if (err < 0) {
1791                dev_err(dev, "failed to set DMA coherent mask: %d\n", err);
1792                goto free_irq;
1793        }
1794
1795        msi->virt = dma_alloc_attrs(dev, PAGE_SIZE, &msi->phys, GFP_KERNEL,
1796                                    DMA_ATTR_NO_KERNEL_MAPPING);
1797        if (!msi->virt) {
1798                dev_err(dev, "failed to allocate DMA memory for MSI\n");
1799                err = -ENOMEM;
1800                goto free_irq;
1801        }
1802
1803        host->msi = &msi->chip;
1804
1805        return 0;
1806
1807free_irq:
1808        free_irq(msi->irq, pcie);
1809free_irq_domain:
1810        irq_domain_remove(msi->domain);
1811        return err;
1812}
1813
1814static void tegra_pcie_enable_msi(struct tegra_pcie *pcie)
1815{
1816        const struct tegra_pcie_soc *soc = pcie->soc;
1817        struct tegra_msi *msi = &pcie->msi;
1818        u32 reg;
1819
1820        afi_writel(pcie, msi->phys >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
1821        afi_writel(pcie, msi->phys, AFI_MSI_AXI_BAR_ST);
1822        /* this register is in 4K increments */
1823        afi_writel(pcie, 1, AFI_MSI_BAR_SZ);
1824
1825        /* enable all MSI vectors */
1826        afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC0);
1827        afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC1);
1828        afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC2);
1829        afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC3);
1830        afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC4);
1831        afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC5);
1832        afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC6);
1833        afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC7);
1834
1835        /* and unmask the MSI interrupt */
1836        reg = afi_readl(pcie, AFI_INTR_MASK);
1837        reg |= AFI_INTR_MASK_MSI_MASK;
1838        afi_writel(pcie, reg, AFI_INTR_MASK);
1839}
1840
1841static void tegra_pcie_msi_teardown(struct tegra_pcie *pcie)
1842{
1843        struct tegra_msi *msi = &pcie->msi;
1844        unsigned int i, irq;
1845
1846        dma_free_attrs(pcie->dev, PAGE_SIZE, msi->virt, msi->phys,
1847                       DMA_ATTR_NO_KERNEL_MAPPING);
1848
1849        if (msi->irq > 0)
1850                free_irq(msi->irq, pcie);
1851
1852        for (i = 0; i < INT_PCI_MSI_NR; i++) {
1853                irq = irq_find_mapping(msi->domain, i);
1854                if (irq > 0)
1855                        irq_dispose_mapping(irq);
1856        }
1857
1858        irq_domain_remove(msi->domain);
1859}
1860
1861static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
1862{
1863        u32 value;
1864
1865        /* mask the MSI interrupt */
1866        value = afi_readl(pcie, AFI_INTR_MASK);
1867        value &= ~AFI_INTR_MASK_MSI_MASK;
1868        afi_writel(pcie, value, AFI_INTR_MASK);
1869
1870        /* disable all MSI vectors */
1871        afi_writel(pcie, 0, AFI_MSI_EN_VEC0);
1872        afi_writel(pcie, 0, AFI_MSI_EN_VEC1);
1873        afi_writel(pcie, 0, AFI_MSI_EN_VEC2);
1874        afi_writel(pcie, 0, AFI_MSI_EN_VEC3);
1875        afi_writel(pcie, 0, AFI_MSI_EN_VEC4);
1876        afi_writel(pcie, 0, AFI_MSI_EN_VEC5);
1877        afi_writel(pcie, 0, AFI_MSI_EN_VEC6);
1878        afi_writel(pcie, 0, AFI_MSI_EN_VEC7);
1879
1880        return 0;
1881}
1882
1883static void tegra_pcie_disable_interrupts(struct tegra_pcie *pcie)
1884{
1885        u32 value;
1886
1887        value = afi_readl(pcie, AFI_INTR_MASK);
1888        value &= ~AFI_INTR_MASK_INT_MASK;
1889        afi_writel(pcie, value, AFI_INTR_MASK);
1890}
1891
1892static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
1893                                      u32 *xbar)
1894{
1895        struct device *dev = pcie->dev;
1896        struct device_node *np = dev->of_node;
1897
1898        if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) {
1899                switch (lanes) {
1900                case 0x010004:
1901                        dev_info(dev, "4x1, 1x1 configuration\n");
1902                        *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401;
1903                        return 0;
1904
1905                case 0x010102:
1906                        dev_info(dev, "2x1, 1X1, 1x1 configuration\n");
1907                        *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211;
1908                        return 0;
1909
1910                case 0x010101:
1911                        dev_info(dev, "1x1, 1x1, 1x1 configuration\n");
1912                        *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111;
1913                        return 0;
1914
1915                default:
1916                        dev_info(dev, "wrong configuration updated in DT, "
1917                                 "switching to default 2x1, 1x1, 1x1 "
1918                                 "configuration\n");
1919                        *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211;
1920                        return 0;
1921                }
1922        } else if (of_device_is_compatible(np, "nvidia,tegra124-pcie") ||
1923                   of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
1924                switch (lanes) {
1925                case 0x0000104:
1926                        dev_info(dev, "4x1, 1x1 configuration\n");
1927                        *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1;
1928                        return 0;
1929
1930                case 0x0000102:
1931                        dev_info(dev, "2x1, 1x1 configuration\n");
1932                        *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1;
1933                        return 0;
1934                }
1935        } else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
1936                switch (lanes) {
1937                case 0x00000204:
1938                        dev_info(dev, "4x1, 2x1 configuration\n");
1939                        *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420;
1940                        return 0;
1941
1942                case 0x00020202:
1943                        dev_info(dev, "2x3 configuration\n");
1944                        *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222;
1945                        return 0;
1946
1947                case 0x00010104:
1948                        dev_info(dev, "4x1, 1x2 configuration\n");
1949                        *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411;
1950                        return 0;
1951                }
1952        } else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
1953                switch (lanes) {
1954                case 0x00000004:
1955                        dev_info(dev, "single-mode configuration\n");
1956                        *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE;
1957                        return 0;
1958
1959                case 0x00000202:
1960                        dev_info(dev, "dual-mode configuration\n");
1961                        *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL;
1962                        return 0;
1963                }
1964        }
1965
1966        return -EINVAL;
1967}
1968
1969/*
1970 * Check whether a given set of supplies is available in a device tree node.
1971 * This is used to check whether the new or the legacy device tree bindings
1972 * should be used.
1973 */
1974static bool of_regulator_bulk_available(struct device_node *np,
1975                                        struct regulator_bulk_data *supplies,
1976                                        unsigned int num_supplies)
1977{
1978        char property[32];
1979        unsigned int i;
1980
1981        for (i = 0; i < num_supplies; i++) {
1982                snprintf(property, 32, "%s-supply", supplies[i].supply);
1983
1984                if (of_find_property(np, property, NULL) == NULL)
1985                        return false;
1986        }
1987
1988        return true;
1989}
1990
1991/*
1992 * Old versions of the device tree binding for this device used a set of power
1993 * supplies that didn't match the hardware inputs. This happened to work for a
1994 * number of cases but is not future proof. However to preserve backwards-
1995 * compatibility with old device trees, this function will try to use the old
1996 * set of supplies.
1997 */
1998static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
1999{
2000        struct device *dev = pcie->dev;
2001        struct device_node *np = dev->of_node;
2002
2003        if (of_device_is_compatible(np, "nvidia,tegra30-pcie"))
2004                pcie->num_supplies = 3;
2005        else if (of_device_is_compatible(np, "nvidia,tegra20-pcie"))
2006                pcie->num_supplies = 2;
2007
2008        if (pcie->num_supplies == 0) {
2009                dev_err(dev, "device %pOF not supported in legacy mode\n", np);
2010                return -ENODEV;
2011        }
2012
2013        pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
2014                                      sizeof(*pcie->supplies),
2015                                      GFP_KERNEL);
2016        if (!pcie->supplies)
2017                return -ENOMEM;
2018
2019        pcie->supplies[0].supply = "pex-clk";
2020        pcie->supplies[1].supply = "vdd";
2021
2022        if (pcie->num_supplies > 2)
2023                pcie->supplies[2].supply = "avdd";
2024
2025        return devm_regulator_bulk_get(dev, pcie->num_supplies, pcie->supplies);
2026}
2027
2028/*
2029 * Obtains the list of regulators required for a particular generation of the
2030 * IP block.
2031 *
2032 * This would've been nice to do simply by providing static tables for use
2033 * with the regulator_bulk_*() API, but unfortunately Tegra30 is a bit quirky
2034 * in that it has two pairs or AVDD_PEX and VDD_PEX supplies (PEXA and PEXB)
2035 * and either seems to be optional depending on which ports are being used.
2036 */
2037static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
2038{
2039        struct device *dev = pcie->dev;
2040        struct device_node *np = dev->of_node;
2041        unsigned int i = 0;
2042
2043        if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) {
2044                pcie->num_supplies = 4;
2045
2046                pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
2047                                              sizeof(*pcie->supplies),
2048                                              GFP_KERNEL);
2049                if (!pcie->supplies)
2050                        return -ENOMEM;
2051
2052                pcie->supplies[i++].supply = "dvdd-pex";
2053                pcie->supplies[i++].supply = "hvdd-pex-pll";
2054                pcie->supplies[i++].supply = "hvdd-pex";
2055                pcie->supplies[i++].supply = "vddio-pexctl-aud";
2056        } else if (of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
2057                pcie->num_supplies = 6;
2058
2059                pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
2060                                              sizeof(*pcie->supplies),
2061                                              GFP_KERNEL);
2062                if (!pcie->supplies)
2063                        return -ENOMEM;
2064
2065                pcie->supplies[i++].supply = "avdd-pll-uerefe";
2066                pcie->supplies[i++].supply = "hvddio-pex";
2067                pcie->supplies[i++].supply = "dvddio-pex";
2068                pcie->supplies[i++].supply = "dvdd-pex-pll";
2069                pcie->supplies[i++].supply = "hvdd-pex-pll-e";
2070                pcie->supplies[i++].supply = "vddio-pex-ctl";
2071        } else if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
2072                pcie->num_supplies = 7;
2073
2074                pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
2075                                              sizeof(*pcie->supplies),
2076                                              GFP_KERNEL);
2077                if (!pcie->supplies)
2078                        return -ENOMEM;
2079
2080                pcie->supplies[i++].supply = "avddio-pex";
2081                pcie->supplies[i++].supply = "dvddio-pex";
2082                pcie->supplies[i++].supply = "avdd-pex-pll";
2083                pcie->supplies[i++].supply = "hvdd-pex";
2084                pcie->supplies[i++].supply = "hvdd-pex-pll-e";
2085                pcie->supplies[i++].supply = "vddio-pex-ctl";
2086                pcie->supplies[i++].supply = "avdd-pll-erefe";
2087        } else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
2088                bool need_pexa = false, need_pexb = false;
2089
2090                /* VDD_PEXA and AVDD_PEXA supply lanes 0 to 3 */
2091                if (lane_mask & 0x0f)
2092                        need_pexa = true;
2093
2094                /* VDD_PEXB and AVDD_PEXB supply lanes 4 to 5 */
2095                if (lane_mask & 0x30)
2096                        need_pexb = true;
2097
2098                pcie->num_supplies = 4 + (need_pexa ? 2 : 0) +
2099                                         (need_pexb ? 2 : 0);
2100
2101                pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
2102                                              sizeof(*pcie->supplies),
2103                                              GFP_KERNEL);
2104                if (!pcie->supplies)
2105                        return -ENOMEM;
2106
2107                pcie->supplies[i++].supply = "avdd-pex-pll";
2108                pcie->supplies[i++].supply = "hvdd-pex";
2109                pcie->supplies[i++].supply = "vddio-pex-ctl";
2110                pcie->supplies[i++].supply = "avdd-plle";
2111
2112                if (need_pexa) {
2113                        pcie->supplies[i++].supply = "avdd-pexa";
2114                        pcie->supplies[i++].supply = "vdd-pexa";
2115                }
2116
2117                if (need_pexb) {
2118                        pcie->supplies[i++].supply = "avdd-pexb";
2119                        pcie->supplies[i++].supply = "vdd-pexb";
2120                }
2121        } else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
2122                pcie->num_supplies = 5;
2123
2124                pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
2125                                              sizeof(*pcie->supplies),
2126                                              GFP_KERNEL);
2127                if (!pcie->supplies)
2128                        return -ENOMEM;
2129
2130                pcie->supplies[0].supply = "avdd-pex";
2131                pcie->supplies[1].supply = "vdd-pex";
2132                pcie->supplies[2].supply = "avdd-pex-pll";
2133                pcie->supplies[3].supply = "avdd-plle";
2134                pcie->supplies[4].supply = "vddio-pex-clk";
2135        }
2136
2137        if (of_regulator_bulk_available(dev->of_node, pcie->supplies,
2138                                        pcie->num_supplies))
2139                return devm_regulator_bulk_get(dev, pcie->num_supplies,
2140                                               pcie->supplies);
2141
2142        /*
2143         * If not all regulators are available for this new scheme, assume
2144         * that the device tree complies with an older version of the device
2145         * tree binding.
2146         */
2147        dev_info(dev, "using legacy DT binding for power supplies\n");
2148
2149        devm_kfree(dev, pcie->supplies);
2150        pcie->num_supplies = 0;
2151
2152        return tegra_pcie_get_legacy_regulators(pcie);
2153}
2154
2155static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
2156{
2157        struct device *dev = pcie->dev;
2158        struct device_node *np = dev->of_node, *port;
2159        const struct tegra_pcie_soc *soc = pcie->soc;
2160        struct of_pci_range_parser parser;
2161        struct of_pci_range range;
2162        u32 lanes = 0, mask = 0;
2163        unsigned int lane = 0;
2164        struct resource res;
2165        int err;
2166
2167        if (of_pci_range_parser_init(&parser, np)) {
2168                dev_err(dev, "missing \"ranges\" property\n");
2169                return -EINVAL;
2170        }
2171
2172        for_each_of_pci_range(&parser, &range) {
2173                err = of_pci_range_to_resource(&range, np, &res);
2174                if (err < 0)
2175                        return err;
2176
2177                switch (res.flags & IORESOURCE_TYPE_BITS) {
2178                case IORESOURCE_IO:
2179                        /* Track the bus -> CPU I/O mapping offset. */
2180                        pcie->offset.io = res.start - range.pci_addr;
2181
2182                        memcpy(&pcie->pio, &res, sizeof(res));
2183                        pcie->pio.name = np->full_name;
2184
2185                        /*
2186                         * The Tegra PCIe host bridge uses this to program the
2187                         * mapping of the I/O space to the physical address,
2188                         * so we override the .start and .end fields here that
2189                         * of_pci_range_to_resource() converted to I/O space.
2190                         * We also set the IORESOURCE_MEM type to clarify that
2191                         * the resource is in the physical memory space.
2192                         */
2193                        pcie->io.start = range.cpu_addr;
2194                        pcie->io.end = range.cpu_addr + range.size - 1;
2195                        pcie->io.flags = IORESOURCE_MEM;
2196                        pcie->io.name = "I/O";
2197
2198                        memcpy(&res, &pcie->io, sizeof(res));
2199                        break;
2200
2201                case IORESOURCE_MEM:
2202                        /*
2203                         * Track the bus -> CPU memory mapping offset. This
2204                         * assumes that the prefetchable and non-prefetchable
2205                         * regions will be the last of type IORESOURCE_MEM in
2206                         * the ranges property.
2207                         * */
2208                        pcie->offset.mem = res.start - range.pci_addr;
2209
2210                        if (res.flags & IORESOURCE_PREFETCH) {
2211                                memcpy(&pcie->prefetch, &res, sizeof(res));
2212                                pcie->prefetch.name = "prefetchable";
2213                        } else {
2214                                memcpy(&pcie->mem, &res, sizeof(res));
2215                                pcie->mem.name = "non-prefetchable";
2216                        }
2217                        break;
2218                }
2219        }
2220
2221        err = of_pci_parse_bus_range(np, &pcie->busn);
2222        if (err < 0) {
2223                dev_err(dev, "failed to parse ranges property: %d\n", err);
2224                pcie->busn.name = np->name;
2225                pcie->busn.start = 0;
2226                pcie->busn.end = 0xff;
2227                pcie->busn.flags = IORESOURCE_BUS;
2228        }
2229
2230        /* parse root ports */
2231        for_each_child_of_node(np, port) {
2232                struct tegra_pcie_port *rp;
2233                unsigned int index;
2234                u32 value;
2235                char *label;
2236
2237                err = of_pci_get_devfn(port);
2238                if (err < 0) {
2239                        dev_err(dev, "failed to parse address: %d\n", err);
2240                        return err;
2241                }
2242
2243                index = PCI_SLOT(err);
2244
2245                if (index < 1 || index > soc->num_ports) {
2246                        dev_err(dev, "invalid port number: %d\n", index);
2247                        return -EINVAL;
2248                }
2249
2250                index--;
2251
2252                err = of_property_read_u32(port, "nvidia,num-lanes", &value);
2253                if (err < 0) {
2254                        dev_err(dev, "failed to parse # of lanes: %d\n",
2255                                err);
2256                        return err;
2257                }
2258
2259                if (value > 16) {
2260                        dev_err(dev, "invalid # of lanes: %u\n", value);
2261                        return -EINVAL;
2262                }
2263
2264                lanes |= value << (index << 3);
2265
2266                if (!of_device_is_available(port)) {
2267                        lane += value;
2268                        continue;
2269                }
2270
2271                mask |= ((1 << value) - 1) << lane;
2272                lane += value;
2273
2274                rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL);
2275                if (!rp)
2276                        return -ENOMEM;
2277
2278                err = of_address_to_resource(port, 0, &rp->regs);
2279                if (err < 0) {
2280                        dev_err(dev, "failed to parse address: %d\n", err);
2281                        return err;
2282                }
2283
2284                INIT_LIST_HEAD(&rp->list);
2285                rp->index = index;
2286                rp->lanes = value;
2287                rp->pcie = pcie;
2288                rp->np = port;
2289
2290                rp->base = devm_pci_remap_cfg_resource(dev, &rp->regs);
2291                if (IS_ERR(rp->base))
2292                        return PTR_ERR(rp->base);
2293
2294                label = devm_kasprintf(dev, GFP_KERNEL, "pex-reset-%u", index);
2295                if (!label) {
2296                        dev_err(dev, "failed to create reset GPIO label\n");
2297                        return -ENOMEM;
2298                }
2299
2300                /*
2301                 * Returns -ENOENT if reset-gpios property is not populated
2302                 * and in this case fall back to using AFI per port register
2303                 * to toggle PERST# SFIO line.
2304                 */
2305                rp->reset_gpio = devm_gpiod_get_from_of_node(dev, port,
2306                                                             "reset-gpios", 0,
2307                                                             GPIOD_OUT_LOW,
2308                                                             label);
2309                if (IS_ERR(rp->reset_gpio)) {
2310                        if (PTR_ERR(rp->reset_gpio) == -ENOENT) {
2311                                rp->reset_gpio = NULL;
2312                        } else {
2313                                dev_err(dev, "failed to get reset GPIO: %d\n",
2314                                        err);
2315                                return PTR_ERR(rp->reset_gpio);
2316                        }
2317                }
2318
2319                list_add_tail(&rp->list, &pcie->ports);
2320        }
2321
2322        err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config);
2323        if (err < 0) {
2324                dev_err(dev, "invalid lane configuration\n");
2325                return err;
2326        }
2327
2328        err = tegra_pcie_get_regulators(pcie, mask);
2329        if (err < 0)
2330                return err;
2331
2332        return 0;
2333}
2334
2335/*
2336 * FIXME: If there are no PCIe cards attached, then calling this function
2337 * can result in the increase of the bootup time as there are big timeout
2338 * loops.
2339 */
2340#define TEGRA_PCIE_LINKUP_TIMEOUT       200     /* up to 1.2 seconds */
2341static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
2342{
2343        struct device *dev = port->pcie->dev;
2344        unsigned int retries = 3;
2345        unsigned long value;
2346
2347        /* override presence detection */
2348        value = readl(port->base + RP_PRIV_MISC);
2349        value &= ~RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT;
2350        value |= RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT;
2351        writel(value, port->base + RP_PRIV_MISC);
2352
2353        do {
2354                unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
2355
2356                do {
2357                        value = readl(port->base + RP_VEND_XP);
2358
2359                        if (value & RP_VEND_XP_DL_UP)
2360                                break;
2361
2362                        usleep_range(1000, 2000);
2363                } while (--timeout);
2364
2365                if (!timeout) {
2366                        dev_dbg(dev, "link %u down, retrying\n", port->index);
2367                        goto retry;
2368                }
2369
2370                timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
2371
2372                do {
2373                        value = readl(port->base + RP_LINK_CONTROL_STATUS);
2374
2375                        if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
2376                                return true;
2377
2378                        usleep_range(1000, 2000);
2379                } while (--timeout);
2380
2381retry:
2382                tegra_pcie_port_reset(port);
2383        } while (--retries);
2384
2385        return false;
2386}
2387
2388static void tegra_pcie_change_link_speed(struct tegra_pcie *pcie)
2389{
2390        struct device *dev = pcie->dev;
2391        struct tegra_pcie_port *port;
2392        ktime_t deadline;
2393        u32 value;
2394
2395        list_for_each_entry(port, &pcie->ports, list) {
2396                /*
2397                 * "Supported Link Speeds Vector" in "Link Capabilities 2"
2398                 * is not supported by Tegra. tegra_pcie_change_link_speed()
2399                 * is called only for Tegra chips which support Gen2.
2400                 * So there no harm if supported link speed is not verified.
2401                 */
2402                value = readl(port->base + RP_LINK_CONTROL_STATUS_2);
2403                value &= ~PCI_EXP_LNKSTA_CLS;
2404                value |= PCI_EXP_LNKSTA_CLS_5_0GB;
2405                writel(value, port->base + RP_LINK_CONTROL_STATUS_2);
2406
2407                /*
2408                 * Poll until link comes back from recovery to avoid race
2409                 * condition.
2410                 */
2411                deadline = ktime_add_us(ktime_get(), LINK_RETRAIN_TIMEOUT);
2412
2413                while (ktime_before(ktime_get(), deadline)) {
2414                        value = readl(port->base + RP_LINK_CONTROL_STATUS);
2415                        if ((value & PCI_EXP_LNKSTA_LT) == 0)
2416                                break;
2417
2418                        usleep_range(2000, 3000);
2419                }
2420
2421                if (value & PCI_EXP_LNKSTA_LT)
2422                        dev_warn(dev, "PCIe port %u link is in recovery\n",
2423                                 port->index);
2424
2425                /* Retrain the link */
2426                value = readl(port->base + RP_LINK_CONTROL_STATUS);
2427                value |= PCI_EXP_LNKCTL_RL;
2428                writel(value, port->base + RP_LINK_CONTROL_STATUS);
2429
2430                deadline = ktime_add_us(ktime_get(), LINK_RETRAIN_TIMEOUT);
2431
2432                while (ktime_before(ktime_get(), deadline)) {
2433                        value = readl(port->base + RP_LINK_CONTROL_STATUS);
2434                        if ((value & PCI_EXP_LNKSTA_LT) == 0)
2435                                break;
2436
2437                        usleep_range(2000, 3000);
2438                }
2439
2440                if (value & PCI_EXP_LNKSTA_LT)
2441                        dev_err(dev, "failed to retrain link of port %u\n",
2442                                port->index);
2443        }
2444}
2445
2446static void tegra_pcie_enable_ports(struct tegra_pcie *pcie)
2447{
2448        struct device *dev = pcie->dev;
2449        struct tegra_pcie_port *port, *tmp;
2450
2451        list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
2452                dev_info(dev, "probing port %u, using %u lanes\n",
2453                         port->index, port->lanes);
2454
2455                tegra_pcie_port_enable(port);
2456        }
2457
2458        /* Start LTSSM from Tegra side */
2459        reset_control_deassert(pcie->pcie_xrst);
2460
2461        list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
2462                if (tegra_pcie_port_check_link(port))
2463                        continue;
2464
2465                dev_info(dev, "link %u down, ignoring\n", port->index);
2466
2467                tegra_pcie_port_disable(port);
2468                tegra_pcie_port_free(port);
2469        }
2470
2471        if (pcie->soc->has_gen2)
2472                tegra_pcie_change_link_speed(pcie);
2473}
2474
2475static void tegra_pcie_disable_ports(struct tegra_pcie *pcie)
2476{
2477        struct tegra_pcie_port *port, *tmp;
2478
2479        reset_control_assert(pcie->pcie_xrst);
2480
2481        list_for_each_entry_safe(port, tmp, &pcie->ports, list)
2482                tegra_pcie_port_disable(port);
2483}
2484
2485static const struct tegra_pcie_port_soc tegra20_pcie_ports[] = {
2486        { .pme.turnoff_bit = 0, .pme.ack_bit =  5 },
2487        { .pme.turnoff_bit = 8, .pme.ack_bit = 10 },
2488};
2489
2490static const struct tegra_pcie_soc tegra20_pcie = {
2491        .num_ports = 2,
2492        .ports = tegra20_pcie_ports,
2493        .msi_base_shift = 0,
2494        .afi_pex2_ctrl = 0x128,
2495        .pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
2496        .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10,
2497        .pads_refclk_cfg0 = 0xfa5cfa5c,
2498        .has_pex_clkreq_en = false,
2499        .has_pex_bias_ctrl = false,
2500        .has_intr_prsnt_sense = false,
2501        .has_cml_clk = false,
2502        .has_gen2 = false,
2503        .force_pca_enable = false,
2504        .program_uphy = true,
2505        .update_clamp_threshold = false,
2506        .program_deskew_time = false,
2507        .raw_violation_fixup = false,
2508        .update_fc_timer = false,
2509        .has_cache_bars = true,
2510        .ectl.enable = false,
2511};
2512
2513static const struct tegra_pcie_port_soc tegra30_pcie_ports[] = {
2514        { .pme.turnoff_bit =  0, .pme.ack_bit =  5 },
2515        { .pme.turnoff_bit =  8, .pme.ack_bit = 10 },
2516        { .pme.turnoff_bit = 16, .pme.ack_bit = 18 },
2517};
2518
2519static const struct tegra_pcie_soc tegra30_pcie = {
2520        .num_ports = 3,
2521        .ports = tegra30_pcie_ports,
2522        .msi_base_shift = 8,
2523        .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2524        .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2525        .pads_refclk_cfg0 = 0xfa5cfa5c,
2526        .pads_refclk_cfg1 = 0xfa5cfa5c,
2527        .has_pex_clkreq_en = true,
2528        .has_pex_bias_ctrl = true,
2529        .has_intr_prsnt_sense = true,
2530        .has_cml_clk = true,
2531        .has_gen2 = false,
2532        .force_pca_enable = false,
2533        .program_uphy = true,
2534        .update_clamp_threshold = false,
2535        .program_deskew_time = false,
2536        .raw_violation_fixup = false,
2537        .update_fc_timer = false,
2538        .has_cache_bars = false,
2539        .ectl.enable = false,
2540};
2541
2542static const struct tegra_pcie_soc tegra124_pcie = {
2543        .num_ports = 2,
2544        .ports = tegra20_pcie_ports,
2545        .msi_base_shift = 8,
2546        .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2547        .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2548        .pads_refclk_cfg0 = 0x44ac44ac,
2549        /* FC threshold is bit[25:18] */
2550        .update_fc_threshold = 0x03fc0000,
2551        .has_pex_clkreq_en = true,
2552        .has_pex_bias_ctrl = true,
2553        .has_intr_prsnt_sense = true,
2554        .has_cml_clk = true,
2555        .has_gen2 = true,
2556        .force_pca_enable = false,
2557        .program_uphy = true,
2558        .update_clamp_threshold = true,
2559        .program_deskew_time = false,
2560        .raw_violation_fixup = true,
2561        .update_fc_timer = false,
2562        .has_cache_bars = false,
2563        .ectl.enable = false,
2564};
2565
2566static const struct tegra_pcie_soc tegra210_pcie = {
2567        .num_ports = 2,
2568        .ports = tegra20_pcie_ports,
2569        .msi_base_shift = 8,
2570        .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2571        .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2572        .pads_refclk_cfg0 = 0x90b890b8,
2573        /* FC threshold is bit[25:18] */
2574        .update_fc_threshold = 0x01800000,
2575        .has_pex_clkreq_en = true,
2576        .has_pex_bias_ctrl = true,
2577        .has_intr_prsnt_sense = true,
2578        .has_cml_clk = true,
2579        .has_gen2 = true,
2580        .force_pca_enable = true,
2581        .program_uphy = true,
2582        .update_clamp_threshold = true,
2583        .program_deskew_time = true,
2584        .raw_violation_fixup = false,
2585        .update_fc_timer = true,
2586        .has_cache_bars = false,
2587        .ectl = {
2588                .regs = {
2589                        .rp_ectl_2_r1 = 0x0000000f,
2590                        .rp_ectl_4_r1 = 0x00000067,
2591                        .rp_ectl_5_r1 = 0x55010000,
2592                        .rp_ectl_6_r1 = 0x00000001,
2593                        .rp_ectl_2_r2 = 0x0000008f,
2594                        .rp_ectl_4_r2 = 0x000000c7,
2595                        .rp_ectl_5_r2 = 0x55010000,
2596                        .rp_ectl_6_r2 = 0x00000001,
2597                },
2598                .enable = true,
2599        },
2600};
2601
2602static const struct tegra_pcie_port_soc tegra186_pcie_ports[] = {
2603        { .pme.turnoff_bit =  0, .pme.ack_bit =  5 },
2604        { .pme.turnoff_bit =  8, .pme.ack_bit = 10 },
2605        { .pme.turnoff_bit = 12, .pme.ack_bit = 14 },
2606};
2607
2608static const struct tegra_pcie_soc tegra186_pcie = {
2609        .num_ports = 3,
2610        .ports = tegra186_pcie_ports,
2611        .msi_base_shift = 8,
2612        .afi_pex2_ctrl = 0x19c,
2613        .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2614        .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2615        .pads_refclk_cfg0 = 0x80b880b8,
2616        .pads_refclk_cfg1 = 0x000480b8,
2617        .has_pex_clkreq_en = true,
2618        .has_pex_bias_ctrl = true,
2619        .has_intr_prsnt_sense = true,
2620        .has_cml_clk = false,
2621        .has_gen2 = true,
2622        .force_pca_enable = false,
2623        .program_uphy = false,
2624        .update_clamp_threshold = false,
2625        .program_deskew_time = false,
2626        .raw_violation_fixup = false,
2627        .update_fc_timer = false,
2628        .has_cache_bars = false,
2629        .ectl.enable = false,
2630};
2631
2632static const struct of_device_id tegra_pcie_of_match[] = {
2633        { .compatible = "nvidia,tegra186-pcie", .data = &tegra186_pcie },
2634        { .compatible = "nvidia,tegra210-pcie", .data = &tegra210_pcie },
2635        { .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie },
2636        { .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie },
2637        { .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie },
2638        { },
2639};
2640
2641static void *tegra_pcie_ports_seq_start(struct seq_file *s, loff_t *pos)
2642{
2643        struct tegra_pcie *pcie = s->private;
2644
2645        if (list_empty(&pcie->ports))
2646                return NULL;
2647
2648        seq_printf(s, "Index  Status\n");
2649
2650        return seq_list_start(&pcie->ports, *pos);
2651}
2652
2653static void *tegra_pcie_ports_seq_next(struct seq_file *s, void *v, loff_t *pos)
2654{
2655        struct tegra_pcie *pcie = s->private;
2656
2657        return seq_list_next(v, &pcie->ports, pos);
2658}
2659
2660static void tegra_pcie_ports_seq_stop(struct seq_file *s, void *v)
2661{
2662}
2663
2664static int tegra_pcie_ports_seq_show(struct seq_file *s, void *v)
2665{
2666        bool up = false, active = false;
2667        struct tegra_pcie_port *port;
2668        unsigned int value;
2669
2670        port = list_entry(v, struct tegra_pcie_port, list);
2671
2672        value = readl(port->base + RP_VEND_XP);
2673
2674        if (value & RP_VEND_XP_DL_UP)
2675                up = true;
2676
2677        value = readl(port->base + RP_LINK_CONTROL_STATUS);
2678
2679        if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
2680                active = true;
2681
2682        seq_printf(s, "%2u     ", port->index);
2683
2684        if (up)
2685                seq_printf(s, "up");
2686
2687        if (active) {
2688                if (up)
2689                        seq_printf(s, ", ");
2690
2691                seq_printf(s, "active");
2692        }
2693
2694        seq_printf(s, "\n");
2695        return 0;
2696}
2697
2698static const struct seq_operations tegra_pcie_ports_seq_ops = {
2699        .start = tegra_pcie_ports_seq_start,
2700        .next = tegra_pcie_ports_seq_next,
2701        .stop = tegra_pcie_ports_seq_stop,
2702        .show = tegra_pcie_ports_seq_show,
2703};
2704
2705static int tegra_pcie_ports_open(struct inode *inode, struct file *file)
2706{
2707        struct tegra_pcie *pcie = inode->i_private;
2708        struct seq_file *s;
2709        int err;
2710
2711        err = seq_open(file, &tegra_pcie_ports_seq_ops);
2712        if (err)
2713                return err;
2714
2715        s = file->private_data;
2716        s->private = pcie;
2717
2718        return 0;
2719}
2720
2721static const struct file_operations tegra_pcie_ports_ops = {
2722        .owner = THIS_MODULE,
2723        .open = tegra_pcie_ports_open,
2724        .read = seq_read,
2725        .llseek = seq_lseek,
2726        .release = seq_release,
2727};
2728
2729static void tegra_pcie_debugfs_exit(struct tegra_pcie *pcie)
2730{
2731        debugfs_remove_recursive(pcie->debugfs);
2732        pcie->debugfs = NULL;
2733}
2734
2735static int tegra_pcie_debugfs_init(struct tegra_pcie *pcie)
2736{
2737        struct dentry *file;
2738
2739        pcie->debugfs = debugfs_create_dir("pcie", NULL);
2740        if (!pcie->debugfs)
2741                return -ENOMEM;
2742
2743        file = debugfs_create_file("ports", S_IFREG | S_IRUGO, pcie->debugfs,
2744                                   pcie, &tegra_pcie_ports_ops);
2745        if (!file)
2746                goto remove;
2747
2748        return 0;
2749
2750remove:
2751        tegra_pcie_debugfs_exit(pcie);
2752        return -ENOMEM;
2753}
2754
2755static int tegra_pcie_probe(struct platform_device *pdev)
2756{
2757        struct device *dev = &pdev->dev;
2758        struct pci_host_bridge *host;
2759        struct tegra_pcie *pcie;
2760        struct pci_bus *child;
2761        int err;
2762
2763        host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
2764        if (!host)
2765                return -ENOMEM;
2766
2767        pcie = pci_host_bridge_priv(host);
2768        host->sysdata = pcie;
2769        platform_set_drvdata(pdev, pcie);
2770
2771        pcie->soc = of_device_get_match_data(dev);
2772        INIT_LIST_HEAD(&pcie->ports);
2773        pcie->dev = dev;
2774
2775        err = tegra_pcie_parse_dt(pcie);
2776        if (err < 0)
2777                return err;
2778
2779        err = tegra_pcie_get_resources(pcie);
2780        if (err < 0) {
2781                dev_err(dev, "failed to request resources: %d\n", err);
2782                return err;
2783        }
2784
2785        err = tegra_pcie_msi_setup(pcie);
2786        if (err < 0) {
2787                dev_err(dev, "failed to enable MSI support: %d\n", err);
2788                goto put_resources;
2789        }
2790
2791        pm_runtime_enable(pcie->dev);
2792        err = pm_runtime_get_sync(pcie->dev);
2793        if (err) {
2794                dev_err(dev, "fail to enable pcie controller: %d\n", err);
2795                goto teardown_msi;
2796        }
2797
2798        err = tegra_pcie_request_resources(pcie);
2799        if (err)
2800                goto pm_runtime_put;
2801
2802        host->busnr = pcie->busn.start;
2803        host->dev.parent = &pdev->dev;
2804        host->ops = &tegra_pcie_ops;
2805        host->map_irq = tegra_pcie_map_irq;
2806        host->swizzle_irq = pci_common_swizzle;
2807
2808        err = pci_scan_root_bus_bridge(host);
2809        if (err < 0) {
2810                dev_err(dev, "failed to register host: %d\n", err);
2811                goto free_resources;
2812        }
2813
2814        pci_bus_size_bridges(host->bus);
2815        pci_bus_assign_resources(host->bus);
2816
2817        list_for_each_entry(child, &host->bus->children, node)
2818                pcie_bus_configure_settings(child);
2819
2820        pci_bus_add_devices(host->bus);
2821
2822        if (IS_ENABLED(CONFIG_DEBUG_FS)) {
2823                err = tegra_pcie_debugfs_init(pcie);
2824                if (err < 0)
2825                        dev_err(dev, "failed to setup debugfs: %d\n", err);
2826        }
2827
2828        return 0;
2829
2830free_resources:
2831        tegra_pcie_free_resources(pcie);
2832pm_runtime_put:
2833        pm_runtime_put_sync(pcie->dev);
2834        pm_runtime_disable(pcie->dev);
2835teardown_msi:
2836        tegra_pcie_msi_teardown(pcie);
2837put_resources:
2838        tegra_pcie_put_resources(pcie);
2839        return err;
2840}
2841
2842static int tegra_pcie_remove(struct platform_device *pdev)
2843{
2844        struct tegra_pcie *pcie = platform_get_drvdata(pdev);
2845        struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
2846        struct tegra_pcie_port *port, *tmp;
2847
2848        if (IS_ENABLED(CONFIG_DEBUG_FS))
2849                tegra_pcie_debugfs_exit(pcie);
2850
2851        pci_stop_root_bus(host->bus);
2852        pci_remove_root_bus(host->bus);
2853        tegra_pcie_free_resources(pcie);
2854        pm_runtime_put_sync(pcie->dev);
2855        pm_runtime_disable(pcie->dev);
2856
2857        if (IS_ENABLED(CONFIG_PCI_MSI))
2858                tegra_pcie_msi_teardown(pcie);
2859
2860        tegra_pcie_put_resources(pcie);
2861
2862        list_for_each_entry_safe(port, tmp, &pcie->ports, list)
2863                tegra_pcie_port_free(port);
2864
2865        return 0;
2866}
2867
2868static int __maybe_unused tegra_pcie_pm_suspend(struct device *dev)
2869{
2870        struct tegra_pcie *pcie = dev_get_drvdata(dev);
2871        struct tegra_pcie_port *port;
2872        int err;
2873
2874        list_for_each_entry(port, &pcie->ports, list)
2875                tegra_pcie_pme_turnoff(port);
2876
2877        tegra_pcie_disable_ports(pcie);
2878
2879        /*
2880         * AFI_INTR is unmasked in tegra_pcie_enable_controller(), mask it to
2881         * avoid unwanted interrupts raised by AFI after pex_rst is asserted.
2882         */
2883        tegra_pcie_disable_interrupts(pcie);
2884
2885        if (pcie->soc->program_uphy) {
2886                err = tegra_pcie_phy_power_off(pcie);
2887                if (err < 0)
2888                        dev_err(dev, "failed to power off PHY(s): %d\n", err);
2889        }
2890
2891        reset_control_assert(pcie->pex_rst);
2892        clk_disable_unprepare(pcie->pex_clk);
2893
2894        if (IS_ENABLED(CONFIG_PCI_MSI))
2895                tegra_pcie_disable_msi(pcie);
2896
2897        pinctrl_pm_select_idle_state(dev);
2898        tegra_pcie_power_off(pcie);
2899
2900        return 0;
2901}
2902
2903static int __maybe_unused tegra_pcie_pm_resume(struct device *dev)
2904{
2905        struct tegra_pcie *pcie = dev_get_drvdata(dev);
2906        int err;
2907
2908        err = tegra_pcie_power_on(pcie);
2909        if (err) {
2910                dev_err(dev, "tegra pcie power on fail: %d\n", err);
2911                return err;
2912        }
2913
2914        err = pinctrl_pm_select_default_state(dev);
2915        if (err < 0) {
2916                dev_err(dev, "failed to disable PCIe IO DPD: %d\n", err);
2917                goto poweroff;
2918        }
2919
2920        tegra_pcie_enable_controller(pcie);
2921        tegra_pcie_setup_translations(pcie);
2922
2923        if (IS_ENABLED(CONFIG_PCI_MSI))
2924                tegra_pcie_enable_msi(pcie);
2925
2926        err = clk_prepare_enable(pcie->pex_clk);
2927        if (err) {
2928                dev_err(dev, "failed to enable PEX clock: %d\n", err);
2929                goto pex_dpd_enable;
2930        }
2931
2932        reset_control_deassert(pcie->pex_rst);
2933
2934        if (pcie->soc->program_uphy) {
2935                err = tegra_pcie_phy_power_on(pcie);
2936                if (err < 0) {
2937                        dev_err(dev, "failed to power on PHY(s): %d\n", err);
2938                        goto disable_pex_clk;
2939                }
2940        }
2941
2942        tegra_pcie_apply_pad_settings(pcie);
2943        tegra_pcie_enable_ports(pcie);
2944
2945        return 0;
2946
2947disable_pex_clk:
2948        reset_control_assert(pcie->pex_rst);
2949        clk_disable_unprepare(pcie->pex_clk);
2950pex_dpd_enable:
2951        pinctrl_pm_select_idle_state(dev);
2952poweroff:
2953        tegra_pcie_power_off(pcie);
2954
2955        return err;
2956}
2957
2958static const struct dev_pm_ops tegra_pcie_pm_ops = {
2959        SET_RUNTIME_PM_OPS(tegra_pcie_pm_suspend, tegra_pcie_pm_resume, NULL)
2960        SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(tegra_pcie_pm_suspend,
2961                                      tegra_pcie_pm_resume)
2962};
2963
2964static struct platform_driver tegra_pcie_driver = {
2965        .driver = {
2966                .name = "tegra-pcie",
2967                .of_match_table = tegra_pcie_of_match,
2968                .suppress_bind_attrs = true,
2969                .pm = &tegra_pcie_pm_ops,
2970        },
2971        .probe = tegra_pcie_probe,
2972        .remove = tegra_pcie_remove,
2973};
2974module_platform_driver(tegra_pcie_driver);
2975MODULE_LICENSE("GPL");
2976