linux/drivers/pci/host/pci-tegra.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * PCIe host controller driver for Tegra SoCs
   4 *
   5 * Copyright (c) 2010, CompuLab, Ltd.
   6 * Author: Mike Rapoport <mike@compulab.co.il>
   7 *
   8 * Based on NVIDIA PCIe driver
   9 * Copyright (c) 2008-2009, NVIDIA Corporation.
  10 *
  11 * Bits taken from arch/arm/mach-dove/pcie.c
  12 *
  13 * Author: Thierry Reding <treding@nvidia.com>
  14 */
  15
  16#include <linux/clk.h>
  17#include <linux/debugfs.h>
  18#include <linux/delay.h>
  19#include <linux/export.h>
  20#include <linux/interrupt.h>
  21#include <linux/iopoll.h>
  22#include <linux/irq.h>
  23#include <linux/irqdomain.h>
  24#include <linux/kernel.h>
  25#include <linux/init.h>
  26#include <linux/module.h>
  27#include <linux/msi.h>
  28#include <linux/of_address.h>
  29#include <linux/of_pci.h>
  30#include <linux/of_platform.h>
  31#include <linux/pci.h>
  32#include <linux/phy/phy.h>
  33#include <linux/platform_device.h>
  34#include <linux/reset.h>
  35#include <linux/sizes.h>
  36#include <linux/slab.h>
  37#include <linux/vmalloc.h>
  38#include <linux/regulator/consumer.h>
  39
  40#include <soc/tegra/cpuidle.h>
  41#include <soc/tegra/pmc.h>
  42
  43#define INT_PCI_MSI_NR (8 * 32)
  44
  45/* register definitions */
  46
  47#define AFI_AXI_BAR0_SZ 0x00
  48#define AFI_AXI_BAR1_SZ 0x04
  49#define AFI_AXI_BAR2_SZ 0x08
  50#define AFI_AXI_BAR3_SZ 0x0c
  51#define AFI_AXI_BAR4_SZ 0x10
  52#define AFI_AXI_BAR5_SZ 0x14
  53
  54#define AFI_AXI_BAR0_START      0x18
  55#define AFI_AXI_BAR1_START      0x1c
  56#define AFI_AXI_BAR2_START      0x20
  57#define AFI_AXI_BAR3_START      0x24
  58#define AFI_AXI_BAR4_START      0x28
  59#define AFI_AXI_BAR5_START      0x2c
  60
  61#define AFI_FPCI_BAR0   0x30
  62#define AFI_FPCI_BAR1   0x34
  63#define AFI_FPCI_BAR2   0x38
  64#define AFI_FPCI_BAR3   0x3c
  65#define AFI_FPCI_BAR4   0x40
  66#define AFI_FPCI_BAR5   0x44
  67
  68#define AFI_CACHE_BAR0_SZ       0x48
  69#define AFI_CACHE_BAR0_ST       0x4c
  70#define AFI_CACHE_BAR1_SZ       0x50
  71#define AFI_CACHE_BAR1_ST       0x54
  72
  73#define AFI_MSI_BAR_SZ          0x60
  74#define AFI_MSI_FPCI_BAR_ST     0x64
  75#define AFI_MSI_AXI_BAR_ST      0x68
  76
  77#define AFI_MSI_VEC0            0x6c
  78#define AFI_MSI_VEC1            0x70
  79#define AFI_MSI_VEC2            0x74
  80#define AFI_MSI_VEC3            0x78
  81#define AFI_MSI_VEC4            0x7c
  82#define AFI_MSI_VEC5            0x80
  83#define AFI_MSI_VEC6            0x84
  84#define AFI_MSI_VEC7            0x88
  85
  86#define AFI_MSI_EN_VEC0         0x8c
  87#define AFI_MSI_EN_VEC1         0x90
  88#define AFI_MSI_EN_VEC2         0x94
  89#define AFI_MSI_EN_VEC3         0x98
  90#define AFI_MSI_EN_VEC4         0x9c
  91#define AFI_MSI_EN_VEC5         0xa0
  92#define AFI_MSI_EN_VEC6         0xa4
  93#define AFI_MSI_EN_VEC7         0xa8
  94
  95#define AFI_CONFIGURATION               0xac
  96#define  AFI_CONFIGURATION_EN_FPCI      (1 << 0)
  97
  98#define AFI_FPCI_ERROR_MASKS    0xb0
  99
 100#define AFI_INTR_MASK           0xb4
 101#define  AFI_INTR_MASK_INT_MASK (1 << 0)
 102#define  AFI_INTR_MASK_MSI_MASK (1 << 8)
 103
 104#define AFI_INTR_CODE                   0xb8
 105#define  AFI_INTR_CODE_MASK             0xf
 106#define  AFI_INTR_INI_SLAVE_ERROR       1
 107#define  AFI_INTR_INI_DECODE_ERROR      2
 108#define  AFI_INTR_TARGET_ABORT          3
 109#define  AFI_INTR_MASTER_ABORT          4
 110#define  AFI_INTR_INVALID_WRITE         5
 111#define  AFI_INTR_LEGACY                6
 112#define  AFI_INTR_FPCI_DECODE_ERROR     7
 113#define  AFI_INTR_AXI_DECODE_ERROR      8
 114#define  AFI_INTR_FPCI_TIMEOUT          9
 115#define  AFI_INTR_PE_PRSNT_SENSE        10
 116#define  AFI_INTR_PE_CLKREQ_SENSE       11
 117#define  AFI_INTR_CLKCLAMP_SENSE        12
 118#define  AFI_INTR_RDY4PD_SENSE          13
 119#define  AFI_INTR_P2P_ERROR             14
 120
 121#define AFI_INTR_SIGNATURE      0xbc
 122#define AFI_UPPER_FPCI_ADDRESS  0xc0
 123#define AFI_SM_INTR_ENABLE      0xc4
 124#define  AFI_SM_INTR_INTA_ASSERT        (1 << 0)
 125#define  AFI_SM_INTR_INTB_ASSERT        (1 << 1)
 126#define  AFI_SM_INTR_INTC_ASSERT        (1 << 2)
 127#define  AFI_SM_INTR_INTD_ASSERT        (1 << 3)
 128#define  AFI_SM_INTR_INTA_DEASSERT      (1 << 4)
 129#define  AFI_SM_INTR_INTB_DEASSERT      (1 << 5)
 130#define  AFI_SM_INTR_INTC_DEASSERT      (1 << 6)
 131#define  AFI_SM_INTR_INTD_DEASSERT      (1 << 7)
 132
 133#define AFI_AFI_INTR_ENABLE             0xc8
 134#define  AFI_INTR_EN_INI_SLVERR         (1 << 0)
 135#define  AFI_INTR_EN_INI_DECERR         (1 << 1)
 136#define  AFI_INTR_EN_TGT_SLVERR         (1 << 2)
 137#define  AFI_INTR_EN_TGT_DECERR         (1 << 3)
 138#define  AFI_INTR_EN_TGT_WRERR          (1 << 4)
 139#define  AFI_INTR_EN_DFPCI_DECERR       (1 << 5)
 140#define  AFI_INTR_EN_AXI_DECERR         (1 << 6)
 141#define  AFI_INTR_EN_FPCI_TIMEOUT       (1 << 7)
 142#define  AFI_INTR_EN_PRSNT_SENSE        (1 << 8)
 143
 144#define AFI_PCIE_PME            0xf0
 145
 146#define AFI_PCIE_CONFIG                                 0x0f8
 147#define  AFI_PCIE_CONFIG_PCIE_DISABLE(x)                (1 << ((x) + 1))
 148#define  AFI_PCIE_CONFIG_PCIE_DISABLE_ALL               0xe
 149#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK       (0xf << 20)
 150#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE     (0x0 << 20)
 151#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420        (0x0 << 20)
 152#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1      (0x0 << 20)
 153#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401        (0x0 << 20)
 154#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL       (0x1 << 20)
 155#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222        (0x1 << 20)
 156#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1      (0x1 << 20)
 157#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211        (0x1 << 20)
 158#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411        (0x2 << 20)
 159#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111        (0x2 << 20)
 160
 161#define AFI_FUSE                        0x104
 162#define  AFI_FUSE_PCIE_T0_GEN2_DIS      (1 << 2)
 163
 164#define AFI_PEX0_CTRL                   0x110
 165#define AFI_PEX1_CTRL                   0x118
 166#define AFI_PEX2_CTRL                   0x128
 167#define  AFI_PEX_CTRL_RST               (1 << 0)
 168#define  AFI_PEX_CTRL_CLKREQ_EN         (1 << 1)
 169#define  AFI_PEX_CTRL_REFCLK_EN         (1 << 3)
 170#define  AFI_PEX_CTRL_OVERRIDE_EN       (1 << 4)
 171
 172#define AFI_PLLE_CONTROL                0x160
 173#define  AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL (1 << 9)
 174#define  AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN (1 << 1)
 175
 176#define AFI_PEXBIAS_CTRL_0              0x168
 177
 178#define RP_VEND_XP      0x00000f00
 179#define  RP_VEND_XP_DL_UP       (1 << 30)
 180
 181#define RP_VEND_CTL2 0x00000fa8
 182#define  RP_VEND_CTL2_PCA_ENABLE (1 << 7)
 183
 184#define RP_PRIV_MISC    0x00000fe0
 185#define  RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xe << 0)
 186#define  RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xf << 0)
 187
 188#define RP_LINK_CONTROL_STATUS                  0x00000090
 189#define  RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE  0x20000000
 190#define  RP_LINK_CONTROL_STATUS_LINKSTAT_MASK   0x3fff0000
 191
 192#define PADS_CTL_SEL            0x0000009c
 193
 194#define PADS_CTL                0x000000a0
 195#define  PADS_CTL_IDDQ_1L       (1 << 0)
 196#define  PADS_CTL_TX_DATA_EN_1L (1 << 6)
 197#define  PADS_CTL_RX_DATA_EN_1L (1 << 10)
 198
 199#define PADS_PLL_CTL_TEGRA20                    0x000000b8
 200#define PADS_PLL_CTL_TEGRA30                    0x000000b4
 201#define  PADS_PLL_CTL_RST_B4SM                  (1 << 1)
 202#define  PADS_PLL_CTL_LOCKDET                   (1 << 8)
 203#define  PADS_PLL_CTL_REFCLK_MASK               (0x3 << 16)
 204#define  PADS_PLL_CTL_REFCLK_INTERNAL_CML       (0 << 16)
 205#define  PADS_PLL_CTL_REFCLK_INTERNAL_CMOS      (1 << 16)
 206#define  PADS_PLL_CTL_REFCLK_EXTERNAL           (2 << 16)
 207#define  PADS_PLL_CTL_TXCLKREF_MASK             (0x1 << 20)
 208#define  PADS_PLL_CTL_TXCLKREF_DIV10            (0 << 20)
 209#define  PADS_PLL_CTL_TXCLKREF_DIV5             (1 << 20)
 210#define  PADS_PLL_CTL_TXCLKREF_BUF_EN           (1 << 22)
 211
 212#define PADS_REFCLK_CFG0                        0x000000c8
 213#define PADS_REFCLK_CFG1                        0x000000cc
 214#define PADS_REFCLK_BIAS                        0x000000d0
 215
 216/*
 217 * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit
 218 * entries, one entry per PCIe port. These field definitions and desired
 219 * values aren't in the TRM, but do come from NVIDIA.
 220 */
 221#define PADS_REFCLK_CFG_TERM_SHIFT              2  /* 6:2 */
 222#define PADS_REFCLK_CFG_E_TERM_SHIFT            7
 223#define PADS_REFCLK_CFG_PREDI_SHIFT             8  /* 11:8 */
 224#define PADS_REFCLK_CFG_DRVI_SHIFT              12 /* 15:12 */
 225
 226#define PME_ACK_TIMEOUT 10000
 227
 228struct tegra_msi {
 229        struct msi_controller chip;
 230        DECLARE_BITMAP(used, INT_PCI_MSI_NR);
 231        struct irq_domain *domain;
 232        unsigned long pages;
 233        struct mutex lock;
 234        u64 phys;
 235        int irq;
 236};
 237
 238/* used to differentiate between Tegra SoC generations */
 239struct tegra_pcie_port_soc {
 240        struct {
 241                u8 turnoff_bit;
 242                u8 ack_bit;
 243        } pme;
 244};
 245
 246struct tegra_pcie_soc {
 247        unsigned int num_ports;
 248        const struct tegra_pcie_port_soc *ports;
 249        unsigned int msi_base_shift;
 250        u32 pads_pll_ctl;
 251        u32 tx_ref_sel;
 252        u32 pads_refclk_cfg0;
 253        u32 pads_refclk_cfg1;
 254        bool has_pex_clkreq_en;
 255        bool has_pex_bias_ctrl;
 256        bool has_intr_prsnt_sense;
 257        bool has_cml_clk;
 258        bool has_gen2;
 259        bool force_pca_enable;
 260        bool program_uphy;
 261};
 262
 263static inline struct tegra_msi *to_tegra_msi(struct msi_controller *chip)
 264{
 265        return container_of(chip, struct tegra_msi, chip);
 266}
 267
 268struct tegra_pcie {
 269        struct device *dev;
 270
 271        void __iomem *pads;
 272        void __iomem *afi;
 273        void __iomem *cfg;
 274        int irq;
 275
 276        struct resource cs;
 277        struct resource io;
 278        struct resource pio;
 279        struct resource mem;
 280        struct resource prefetch;
 281        struct resource busn;
 282
 283        struct {
 284                resource_size_t mem;
 285                resource_size_t io;
 286        } offset;
 287
 288        struct clk *pex_clk;
 289        struct clk *afi_clk;
 290        struct clk *pll_e;
 291        struct clk *cml_clk;
 292
 293        struct reset_control *pex_rst;
 294        struct reset_control *afi_rst;
 295        struct reset_control *pcie_xrst;
 296
 297        bool legacy_phy;
 298        struct phy *phy;
 299
 300        struct tegra_msi msi;
 301
 302        struct list_head ports;
 303        u32 xbar_config;
 304
 305        struct regulator_bulk_data *supplies;
 306        unsigned int num_supplies;
 307
 308        const struct tegra_pcie_soc *soc;
 309        struct dentry *debugfs;
 310};
 311
 312struct tegra_pcie_port {
 313        struct tegra_pcie *pcie;
 314        struct device_node *np;
 315        struct list_head list;
 316        struct resource regs;
 317        void __iomem *base;
 318        unsigned int index;
 319        unsigned int lanes;
 320
 321        struct phy **phys;
 322};
 323
 324struct tegra_pcie_bus {
 325        struct list_head list;
 326        unsigned int nr;
 327};
 328
 329static inline void afi_writel(struct tegra_pcie *pcie, u32 value,
 330                              unsigned long offset)
 331{
 332        writel(value, pcie->afi + offset);
 333}
 334
 335static inline u32 afi_readl(struct tegra_pcie *pcie, unsigned long offset)
 336{
 337        return readl(pcie->afi + offset);
 338}
 339
 340static inline void pads_writel(struct tegra_pcie *pcie, u32 value,
 341                               unsigned long offset)
 342{
 343        writel(value, pcie->pads + offset);
 344}
 345
 346static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset)
 347{
 348        return readl(pcie->pads + offset);
 349}
 350
 351/*
 352 * The configuration space mapping on Tegra is somewhat similar to the ECAM
 353 * defined by PCIe. However it deviates a bit in how the 4 bits for extended
 354 * register accesses are mapped:
 355 *
 356 *    [27:24] extended register number
 357 *    [23:16] bus number
 358 *    [15:11] device number
 359 *    [10: 8] function number
 360 *    [ 7: 0] register number
 361 *
 362 * Mapping the whole extended configuration space would require 256 MiB of
 363 * virtual address space, only a small part of which will actually be used.
 364 *
 365 * To work around this, a 4 KiB region is used to generate the required
 366 * configuration transaction with relevant B:D:F and register offset values.
 367 * This is achieved by dynamically programming base address and size of
 368 * AFI_AXI_BAR used for end point config space mapping to make sure that the
 369 * address (access to which generates correct config transaction) falls in
 370 * this 4 KiB region.
 371 */
 372static unsigned int tegra_pcie_conf_offset(u8 bus, unsigned int devfn,
 373                                           unsigned int where)
 374{
 375        return ((where & 0xf00) << 16) | (bus << 16) | (PCI_SLOT(devfn) << 11) |
 376               (PCI_FUNC(devfn) << 8) | (where & 0xff);
 377}
 378
 379static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus,
 380                                        unsigned int devfn,
 381                                        int where)
 382{
 383        struct tegra_pcie *pcie = bus->sysdata;
 384        void __iomem *addr = NULL;
 385
 386        if (bus->number == 0) {
 387                unsigned int slot = PCI_SLOT(devfn);
 388                struct tegra_pcie_port *port;
 389
 390                list_for_each_entry(port, &pcie->ports, list) {
 391                        if (port->index + 1 == slot) {
 392                                addr = port->base + (where & ~3);
 393                                break;
 394                        }
 395                }
 396        } else {
 397                unsigned int offset;
 398                u32 base;
 399
 400                offset = tegra_pcie_conf_offset(bus->number, devfn, where);
 401
 402                /* move 4 KiB window to offset within the FPCI region */
 403                base = 0xfe100000 + ((offset & ~(SZ_4K - 1)) >> 8);
 404                afi_writel(pcie, base, AFI_FPCI_BAR0);
 405
 406                /* move to correct offset within the 4 KiB page */
 407                addr = pcie->cfg + (offset & (SZ_4K - 1));
 408        }
 409
 410        return addr;
 411}
 412
 413static int tegra_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
 414                                  int where, int size, u32 *value)
 415{
 416        if (bus->number == 0)
 417                return pci_generic_config_read32(bus, devfn, where, size,
 418                                                 value);
 419
 420        return pci_generic_config_read(bus, devfn, where, size, value);
 421}
 422
 423static int tegra_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
 424                                   int where, int size, u32 value)
 425{
 426        if (bus->number == 0)
 427                return pci_generic_config_write32(bus, devfn, where, size,
 428                                                  value);
 429
 430        return pci_generic_config_write(bus, devfn, where, size, value);
 431}
 432
 433static struct pci_ops tegra_pcie_ops = {
 434        .map_bus = tegra_pcie_map_bus,
 435        .read = tegra_pcie_config_read,
 436        .write = tegra_pcie_config_write,
 437};
 438
 439static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port)
 440{
 441        unsigned long ret = 0;
 442
 443        switch (port->index) {
 444        case 0:
 445                ret = AFI_PEX0_CTRL;
 446                break;
 447
 448        case 1:
 449                ret = AFI_PEX1_CTRL;
 450                break;
 451
 452        case 2:
 453                ret = AFI_PEX2_CTRL;
 454                break;
 455        }
 456
 457        return ret;
 458}
 459
 460static void tegra_pcie_port_reset(struct tegra_pcie_port *port)
 461{
 462        unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
 463        unsigned long value;
 464
 465        /* pulse reset signal */
 466        value = afi_readl(port->pcie, ctrl);
 467        value &= ~AFI_PEX_CTRL_RST;
 468        afi_writel(port->pcie, value, ctrl);
 469
 470        usleep_range(1000, 2000);
 471
 472        value = afi_readl(port->pcie, ctrl);
 473        value |= AFI_PEX_CTRL_RST;
 474        afi_writel(port->pcie, value, ctrl);
 475}
 476
 477static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
 478{
 479        unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
 480        const struct tegra_pcie_soc *soc = port->pcie->soc;
 481        unsigned long value;
 482
 483        /* enable reference clock */
 484        value = afi_readl(port->pcie, ctrl);
 485        value |= AFI_PEX_CTRL_REFCLK_EN;
 486
 487        if (soc->has_pex_clkreq_en)
 488                value |= AFI_PEX_CTRL_CLKREQ_EN;
 489
 490        value |= AFI_PEX_CTRL_OVERRIDE_EN;
 491
 492        afi_writel(port->pcie, value, ctrl);
 493
 494        tegra_pcie_port_reset(port);
 495
 496        if (soc->force_pca_enable) {
 497                value = readl(port->base + RP_VEND_CTL2);
 498                value |= RP_VEND_CTL2_PCA_ENABLE;
 499                writel(value, port->base + RP_VEND_CTL2);
 500        }
 501}
 502
 503static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
 504{
 505        unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
 506        const struct tegra_pcie_soc *soc = port->pcie->soc;
 507        unsigned long value;
 508
 509        /* assert port reset */
 510        value = afi_readl(port->pcie, ctrl);
 511        value &= ~AFI_PEX_CTRL_RST;
 512        afi_writel(port->pcie, value, ctrl);
 513
 514        /* disable reference clock */
 515        value = afi_readl(port->pcie, ctrl);
 516
 517        if (soc->has_pex_clkreq_en)
 518                value &= ~AFI_PEX_CTRL_CLKREQ_EN;
 519
 520        value &= ~AFI_PEX_CTRL_REFCLK_EN;
 521        afi_writel(port->pcie, value, ctrl);
 522}
 523
 524static void tegra_pcie_port_free(struct tegra_pcie_port *port)
 525{
 526        struct tegra_pcie *pcie = port->pcie;
 527        struct device *dev = pcie->dev;
 528
 529        devm_iounmap(dev, port->base);
 530        devm_release_mem_region(dev, port->regs.start,
 531                                resource_size(&port->regs));
 532        list_del(&port->list);
 533        devm_kfree(dev, port);
 534}
 535
 536/* Tegra PCIE root complex wrongly reports device class */
 537static void tegra_pcie_fixup_class(struct pci_dev *dev)
 538{
 539        dev->class = PCI_CLASS_BRIDGE_PCI << 8;
 540}
 541DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class);
 542DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class);
 543DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class);
 544DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class);
 545
 546/* Tegra PCIE requires relaxed ordering */
 547static void tegra_pcie_relax_enable(struct pci_dev *dev)
 548{
 549        pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
 550}
 551DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable);
 552
 553static int tegra_pcie_request_resources(struct tegra_pcie *pcie)
 554{
 555        struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
 556        struct list_head *windows = &host->windows;
 557        struct device *dev = pcie->dev;
 558        int err;
 559
 560        pci_add_resource_offset(windows, &pcie->pio, pcie->offset.io);
 561        pci_add_resource_offset(windows, &pcie->mem, pcie->offset.mem);
 562        pci_add_resource_offset(windows, &pcie->prefetch, pcie->offset.mem);
 563        pci_add_resource(windows, &pcie->busn);
 564
 565        err = devm_request_pci_bus_resources(dev, windows);
 566        if (err < 0) {
 567                pci_free_resource_list(windows);
 568                return err;
 569        }
 570
 571        pci_remap_iospace(&pcie->pio, pcie->io.start);
 572
 573        return 0;
 574}
 575
 576static void tegra_pcie_free_resources(struct tegra_pcie *pcie)
 577{
 578        struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
 579        struct list_head *windows = &host->windows;
 580
 581        pci_unmap_iospace(&pcie->pio);
 582        pci_free_resource_list(windows);
 583}
 584
 585static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
 586{
 587        struct tegra_pcie *pcie = pdev->bus->sysdata;
 588        int irq;
 589
 590        tegra_cpuidle_pcie_irqs_in_use();
 591
 592        irq = of_irq_parse_and_map_pci(pdev, slot, pin);
 593        if (!irq)
 594                irq = pcie->irq;
 595
 596        return irq;
 597}
 598
 599static irqreturn_t tegra_pcie_isr(int irq, void *arg)
 600{
 601        const char *err_msg[] = {
 602                "Unknown",
 603                "AXI slave error",
 604                "AXI decode error",
 605                "Target abort",
 606                "Master abort",
 607                "Invalid write",
 608                "Legacy interrupt",
 609                "Response decoding error",
 610                "AXI response decoding error",
 611                "Transaction timeout",
 612                "Slot present pin change",
 613                "Slot clock request change",
 614                "TMS clock ramp change",
 615                "TMS ready for power down",
 616                "Peer2Peer error",
 617        };
 618        struct tegra_pcie *pcie = arg;
 619        struct device *dev = pcie->dev;
 620        u32 code, signature;
 621
 622        code = afi_readl(pcie, AFI_INTR_CODE) & AFI_INTR_CODE_MASK;
 623        signature = afi_readl(pcie, AFI_INTR_SIGNATURE);
 624        afi_writel(pcie, 0, AFI_INTR_CODE);
 625
 626        if (code == AFI_INTR_LEGACY)
 627                return IRQ_NONE;
 628
 629        if (code >= ARRAY_SIZE(err_msg))
 630                code = 0;
 631
 632        /*
 633         * do not pollute kernel log with master abort reports since they
 634         * happen a lot during enumeration
 635         */
 636        if (code == AFI_INTR_MASTER_ABORT)
 637                dev_dbg(dev, "%s, signature: %08x\n", err_msg[code], signature);
 638        else
 639                dev_err(dev, "%s, signature: %08x\n", err_msg[code], signature);
 640
 641        if (code == AFI_INTR_TARGET_ABORT || code == AFI_INTR_MASTER_ABORT ||
 642            code == AFI_INTR_FPCI_DECODE_ERROR) {
 643                u32 fpci = afi_readl(pcie, AFI_UPPER_FPCI_ADDRESS) & 0xff;
 644                u64 address = (u64)fpci << 32 | (signature & 0xfffffffc);
 645
 646                if (code == AFI_INTR_MASTER_ABORT)
 647                        dev_dbg(dev, "  FPCI address: %10llx\n", address);
 648                else
 649                        dev_err(dev, "  FPCI address: %10llx\n", address);
 650        }
 651
 652        return IRQ_HANDLED;
 653}
 654
 655/*
 656 * FPCI map is as follows:
 657 * - 0xfdfc000000: I/O space
 658 * - 0xfdfe000000: type 0 configuration space
 659 * - 0xfdff000000: type 1 configuration space
 660 * - 0xfe00000000: type 0 extended configuration space
 661 * - 0xfe10000000: type 1 extended configuration space
 662 */
 663static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
 664{
 665        u32 fpci_bar, size, axi_address;
 666
 667        /* Bar 0: type 1 extended configuration space */
 668        size = resource_size(&pcie->cs);
 669        afi_writel(pcie, pcie->cs.start, AFI_AXI_BAR0_START);
 670        afi_writel(pcie, size >> 12, AFI_AXI_BAR0_SZ);
 671
 672        /* Bar 1: downstream IO bar */
 673        fpci_bar = 0xfdfc0000;
 674        size = resource_size(&pcie->io);
 675        axi_address = pcie->io.start;
 676        afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
 677        afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
 678        afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
 679
 680        /* Bar 2: prefetchable memory BAR */
 681        fpci_bar = (((pcie->prefetch.start >> 12) & 0x0fffffff) << 4) | 0x1;
 682        size = resource_size(&pcie->prefetch);
 683        axi_address = pcie->prefetch.start;
 684        afi_writel(pcie, axi_address, AFI_AXI_BAR2_START);
 685        afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ);
 686        afi_writel(pcie, fpci_bar, AFI_FPCI_BAR2);
 687
 688        /* Bar 3: non prefetchable memory BAR */
 689        fpci_bar = (((pcie->mem.start >> 12) & 0x0fffffff) << 4) | 0x1;
 690        size = resource_size(&pcie->mem);
 691        axi_address = pcie->mem.start;
 692        afi_writel(pcie, axi_address, AFI_AXI_BAR3_START);
 693        afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ);
 694        afi_writel(pcie, fpci_bar, AFI_FPCI_BAR3);
 695
 696        /* NULL out the remaining BARs as they are not used */
 697        afi_writel(pcie, 0, AFI_AXI_BAR4_START);
 698        afi_writel(pcie, 0, AFI_AXI_BAR4_SZ);
 699        afi_writel(pcie, 0, AFI_FPCI_BAR4);
 700
 701        afi_writel(pcie, 0, AFI_AXI_BAR5_START);
 702        afi_writel(pcie, 0, AFI_AXI_BAR5_SZ);
 703        afi_writel(pcie, 0, AFI_FPCI_BAR5);
 704
 705        /* map all upstream transactions as uncached */
 706        afi_writel(pcie, 0, AFI_CACHE_BAR0_ST);
 707        afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ);
 708        afi_writel(pcie, 0, AFI_CACHE_BAR1_ST);
 709        afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ);
 710
 711        /* MSI translations are setup only when needed */
 712        afi_writel(pcie, 0, AFI_MSI_FPCI_BAR_ST);
 713        afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
 714        afi_writel(pcie, 0, AFI_MSI_AXI_BAR_ST);
 715        afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
 716}
 717
 718static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout)
 719{
 720        const struct tegra_pcie_soc *soc = pcie->soc;
 721        u32 value;
 722
 723        timeout = jiffies + msecs_to_jiffies(timeout);
 724
 725        while (time_before(jiffies, timeout)) {
 726                value = pads_readl(pcie, soc->pads_pll_ctl);
 727                if (value & PADS_PLL_CTL_LOCKDET)
 728                        return 0;
 729        }
 730
 731        return -ETIMEDOUT;
 732}
 733
 734static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
 735{
 736        struct device *dev = pcie->dev;
 737        const struct tegra_pcie_soc *soc = pcie->soc;
 738        u32 value;
 739        int err;
 740
 741        /* initialize internal PHY, enable up to 16 PCIE lanes */
 742        pads_writel(pcie, 0x0, PADS_CTL_SEL);
 743
 744        /* override IDDQ to 1 on all 4 lanes */
 745        value = pads_readl(pcie, PADS_CTL);
 746        value |= PADS_CTL_IDDQ_1L;
 747        pads_writel(pcie, value, PADS_CTL);
 748
 749        /*
 750         * Set up PHY PLL inputs select PLLE output as refclock,
 751         * set TX ref sel to div10 (not div5).
 752         */
 753        value = pads_readl(pcie, soc->pads_pll_ctl);
 754        value &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK);
 755        value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel;
 756        pads_writel(pcie, value, soc->pads_pll_ctl);
 757
 758        /* reset PLL */
 759        value = pads_readl(pcie, soc->pads_pll_ctl);
 760        value &= ~PADS_PLL_CTL_RST_B4SM;
 761        pads_writel(pcie, value, soc->pads_pll_ctl);
 762
 763        usleep_range(20, 100);
 764
 765        /* take PLL out of reset  */
 766        value = pads_readl(pcie, soc->pads_pll_ctl);
 767        value |= PADS_PLL_CTL_RST_B4SM;
 768        pads_writel(pcie, value, soc->pads_pll_ctl);
 769
 770        /* wait for the PLL to lock */
 771        err = tegra_pcie_pll_wait(pcie, 500);
 772        if (err < 0) {
 773                dev_err(dev, "PLL failed to lock: %d\n", err);
 774                return err;
 775        }
 776
 777        /* turn off IDDQ override */
 778        value = pads_readl(pcie, PADS_CTL);
 779        value &= ~PADS_CTL_IDDQ_1L;
 780        pads_writel(pcie, value, PADS_CTL);
 781
 782        /* enable TX/RX data */
 783        value = pads_readl(pcie, PADS_CTL);
 784        value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L;
 785        pads_writel(pcie, value, PADS_CTL);
 786
 787        return 0;
 788}
 789
 790static int tegra_pcie_phy_disable(struct tegra_pcie *pcie)
 791{
 792        const struct tegra_pcie_soc *soc = pcie->soc;
 793        u32 value;
 794
 795        /* disable TX/RX data */
 796        value = pads_readl(pcie, PADS_CTL);
 797        value &= ~(PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L);
 798        pads_writel(pcie, value, PADS_CTL);
 799
 800        /* override IDDQ */
 801        value = pads_readl(pcie, PADS_CTL);
 802        value |= PADS_CTL_IDDQ_1L;
 803        pads_writel(pcie, value, PADS_CTL);
 804
 805        /* reset PLL */
 806        value = pads_readl(pcie, soc->pads_pll_ctl);
 807        value &= ~PADS_PLL_CTL_RST_B4SM;
 808        pads_writel(pcie, value, soc->pads_pll_ctl);
 809
 810        usleep_range(20, 100);
 811
 812        return 0;
 813}
 814
 815static int tegra_pcie_port_phy_power_on(struct tegra_pcie_port *port)
 816{
 817        struct device *dev = port->pcie->dev;
 818        unsigned int i;
 819        int err;
 820
 821        for (i = 0; i < port->lanes; i++) {
 822                err = phy_power_on(port->phys[i]);
 823                if (err < 0) {
 824                        dev_err(dev, "failed to power on PHY#%u: %d\n", i, err);
 825                        return err;
 826                }
 827        }
 828
 829        return 0;
 830}
 831
 832static int tegra_pcie_port_phy_power_off(struct tegra_pcie_port *port)
 833{
 834        struct device *dev = port->pcie->dev;
 835        unsigned int i;
 836        int err;
 837
 838        for (i = 0; i < port->lanes; i++) {
 839                err = phy_power_off(port->phys[i]);
 840                if (err < 0) {
 841                        dev_err(dev, "failed to power off PHY#%u: %d\n", i,
 842                                err);
 843                        return err;
 844                }
 845        }
 846
 847        return 0;
 848}
 849
 850static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
 851{
 852        struct device *dev = pcie->dev;
 853        const struct tegra_pcie_soc *soc = pcie->soc;
 854        struct tegra_pcie_port *port;
 855        int err;
 856
 857        if (pcie->legacy_phy) {
 858                if (pcie->phy)
 859                        err = phy_power_on(pcie->phy);
 860                else
 861                        err = tegra_pcie_phy_enable(pcie);
 862
 863                if (err < 0)
 864                        dev_err(dev, "failed to power on PHY: %d\n", err);
 865
 866                return err;
 867        }
 868
 869        list_for_each_entry(port, &pcie->ports, list) {
 870                err = tegra_pcie_port_phy_power_on(port);
 871                if (err < 0) {
 872                        dev_err(dev,
 873                                "failed to power on PCIe port %u PHY: %d\n",
 874                                port->index, err);
 875                        return err;
 876                }
 877        }
 878
 879        /* Configure the reference clock driver */
 880        pads_writel(pcie, soc->pads_refclk_cfg0, PADS_REFCLK_CFG0);
 881
 882        if (soc->num_ports > 2)
 883                pads_writel(pcie, soc->pads_refclk_cfg1, PADS_REFCLK_CFG1);
 884
 885        return 0;
 886}
 887
 888static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie)
 889{
 890        struct device *dev = pcie->dev;
 891        struct tegra_pcie_port *port;
 892        int err;
 893
 894        if (pcie->legacy_phy) {
 895                if (pcie->phy)
 896                        err = phy_power_off(pcie->phy);
 897                else
 898                        err = tegra_pcie_phy_disable(pcie);
 899
 900                if (err < 0)
 901                        dev_err(dev, "failed to power off PHY: %d\n", err);
 902
 903                return err;
 904        }
 905
 906        list_for_each_entry(port, &pcie->ports, list) {
 907                err = tegra_pcie_port_phy_power_off(port);
 908                if (err < 0) {
 909                        dev_err(dev,
 910                                "failed to power off PCIe port %u PHY: %d\n",
 911                                port->index, err);
 912                        return err;
 913                }
 914        }
 915
 916        return 0;
 917}
 918
 919static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
 920{
 921        struct device *dev = pcie->dev;
 922        const struct tegra_pcie_soc *soc = pcie->soc;
 923        struct tegra_pcie_port *port;
 924        unsigned long value;
 925        int err;
 926
 927        /* enable PLL power down */
 928        if (pcie->phy) {
 929                value = afi_readl(pcie, AFI_PLLE_CONTROL);
 930                value &= ~AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL;
 931                value |= AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN;
 932                afi_writel(pcie, value, AFI_PLLE_CONTROL);
 933        }
 934
 935        /* power down PCIe slot clock bias pad */
 936        if (soc->has_pex_bias_ctrl)
 937                afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0);
 938
 939        /* configure mode and disable all ports */
 940        value = afi_readl(pcie, AFI_PCIE_CONFIG);
 941        value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK;
 942        value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config;
 943
 944        list_for_each_entry(port, &pcie->ports, list)
 945                value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
 946
 947        afi_writel(pcie, value, AFI_PCIE_CONFIG);
 948
 949        if (soc->has_gen2) {
 950                value = afi_readl(pcie, AFI_FUSE);
 951                value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS;
 952                afi_writel(pcie, value, AFI_FUSE);
 953        } else {
 954                value = afi_readl(pcie, AFI_FUSE);
 955                value |= AFI_FUSE_PCIE_T0_GEN2_DIS;
 956                afi_writel(pcie, value, AFI_FUSE);
 957        }
 958
 959        if (soc->program_uphy) {
 960                err = tegra_pcie_phy_power_on(pcie);
 961                if (err < 0) {
 962                        dev_err(dev, "failed to power on PHY(s): %d\n", err);
 963                        return err;
 964                }
 965        }
 966
 967        /* take the PCIe interface module out of reset */
 968        reset_control_deassert(pcie->pcie_xrst);
 969
 970        /* finally enable PCIe */
 971        value = afi_readl(pcie, AFI_CONFIGURATION);
 972        value |= AFI_CONFIGURATION_EN_FPCI;
 973        afi_writel(pcie, value, AFI_CONFIGURATION);
 974
 975        value = AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR |
 976                AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR |
 977                AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR;
 978
 979        if (soc->has_intr_prsnt_sense)
 980                value |= AFI_INTR_EN_PRSNT_SENSE;
 981
 982        afi_writel(pcie, value, AFI_AFI_INTR_ENABLE);
 983        afi_writel(pcie, 0xffffffff, AFI_SM_INTR_ENABLE);
 984
 985        /* don't enable MSI for now, only when needed */
 986        afi_writel(pcie, AFI_INTR_MASK_INT_MASK, AFI_INTR_MASK);
 987
 988        /* disable all exceptions */
 989        afi_writel(pcie, 0, AFI_FPCI_ERROR_MASKS);
 990
 991        return 0;
 992}
 993
 994static void tegra_pcie_disable_controller(struct tegra_pcie *pcie)
 995{
 996        int err;
 997
 998        reset_control_assert(pcie->pcie_xrst);
 999
1000        if (pcie->soc->program_uphy) {
1001                err = tegra_pcie_phy_power_off(pcie);
1002                if (err < 0)
1003                        dev_err(pcie->dev, "failed to power off PHY(s): %d\n",
1004                                err);
1005        }
1006}
1007
1008static void tegra_pcie_power_off(struct tegra_pcie *pcie)
1009{
1010        struct device *dev = pcie->dev;
1011        const struct tegra_pcie_soc *soc = pcie->soc;
1012        int err;
1013
1014        reset_control_assert(pcie->afi_rst);
1015        reset_control_assert(pcie->pex_rst);
1016
1017        clk_disable_unprepare(pcie->pll_e);
1018        if (soc->has_cml_clk)
1019                clk_disable_unprepare(pcie->cml_clk);
1020        clk_disable_unprepare(pcie->afi_clk);
1021        clk_disable_unprepare(pcie->pex_clk);
1022
1023        if (!dev->pm_domain)
1024                tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1025
1026        err = regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
1027        if (err < 0)
1028                dev_warn(dev, "failed to disable regulators: %d\n", err);
1029}
1030
1031static int tegra_pcie_power_on(struct tegra_pcie *pcie)
1032{
1033        struct device *dev = pcie->dev;
1034        const struct tegra_pcie_soc *soc = pcie->soc;
1035        int err;
1036
1037        reset_control_assert(pcie->pcie_xrst);
1038        reset_control_assert(pcie->afi_rst);
1039        reset_control_assert(pcie->pex_rst);
1040
1041        if (!dev->pm_domain)
1042                tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1043
1044        /* enable regulators */
1045        err = regulator_bulk_enable(pcie->num_supplies, pcie->supplies);
1046        if (err < 0)
1047                dev_err(dev, "failed to enable regulators: %d\n", err);
1048
1049        if (dev->pm_domain) {
1050                err = clk_prepare_enable(pcie->pex_clk);
1051                if (err) {
1052                        dev_err(dev, "failed to enable PEX clock: %d\n", err);
1053                        return err;
1054                }
1055                reset_control_deassert(pcie->pex_rst);
1056        } else {
1057                err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE,
1058                                                        pcie->pex_clk,
1059                                                        pcie->pex_rst);
1060                if (err) {
1061                        dev_err(dev, "powerup sequence failed: %d\n", err);
1062                        return err;
1063                }
1064        }
1065
1066        reset_control_deassert(pcie->afi_rst);
1067
1068        err = clk_prepare_enable(pcie->afi_clk);
1069        if (err < 0) {
1070                dev_err(dev, "failed to enable AFI clock: %d\n", err);
1071                return err;
1072        }
1073
1074        if (soc->has_cml_clk) {
1075                err = clk_prepare_enable(pcie->cml_clk);
1076                if (err < 0) {
1077                        dev_err(dev, "failed to enable CML clock: %d\n", err);
1078                        return err;
1079                }
1080        }
1081
1082        err = clk_prepare_enable(pcie->pll_e);
1083        if (err < 0) {
1084                dev_err(dev, "failed to enable PLLE clock: %d\n", err);
1085                return err;
1086        }
1087
1088        return 0;
1089}
1090
1091static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
1092{
1093        struct device *dev = pcie->dev;
1094        const struct tegra_pcie_soc *soc = pcie->soc;
1095
1096        pcie->pex_clk = devm_clk_get(dev, "pex");
1097        if (IS_ERR(pcie->pex_clk))
1098                return PTR_ERR(pcie->pex_clk);
1099
1100        pcie->afi_clk = devm_clk_get(dev, "afi");
1101        if (IS_ERR(pcie->afi_clk))
1102                return PTR_ERR(pcie->afi_clk);
1103
1104        pcie->pll_e = devm_clk_get(dev, "pll_e");
1105        if (IS_ERR(pcie->pll_e))
1106                return PTR_ERR(pcie->pll_e);
1107
1108        if (soc->has_cml_clk) {
1109                pcie->cml_clk = devm_clk_get(dev, "cml");
1110                if (IS_ERR(pcie->cml_clk))
1111                        return PTR_ERR(pcie->cml_clk);
1112        }
1113
1114        return 0;
1115}
1116
1117static int tegra_pcie_resets_get(struct tegra_pcie *pcie)
1118{
1119        struct device *dev = pcie->dev;
1120
1121        pcie->pex_rst = devm_reset_control_get_exclusive(dev, "pex");
1122        if (IS_ERR(pcie->pex_rst))
1123                return PTR_ERR(pcie->pex_rst);
1124
1125        pcie->afi_rst = devm_reset_control_get_exclusive(dev, "afi");
1126        if (IS_ERR(pcie->afi_rst))
1127                return PTR_ERR(pcie->afi_rst);
1128
1129        pcie->pcie_xrst = devm_reset_control_get_exclusive(dev, "pcie_x");
1130        if (IS_ERR(pcie->pcie_xrst))
1131                return PTR_ERR(pcie->pcie_xrst);
1132
1133        return 0;
1134}
1135
1136static int tegra_pcie_phys_get_legacy(struct tegra_pcie *pcie)
1137{
1138        struct device *dev = pcie->dev;
1139        int err;
1140
1141        pcie->phy = devm_phy_optional_get(dev, "pcie");
1142        if (IS_ERR(pcie->phy)) {
1143                err = PTR_ERR(pcie->phy);
1144                dev_err(dev, "failed to get PHY: %d\n", err);
1145                return err;
1146        }
1147
1148        err = phy_init(pcie->phy);
1149        if (err < 0) {
1150                dev_err(dev, "failed to initialize PHY: %d\n", err);
1151                return err;
1152        }
1153
1154        pcie->legacy_phy = true;
1155
1156        return 0;
1157}
1158
1159static struct phy *devm_of_phy_optional_get_index(struct device *dev,
1160                                                  struct device_node *np,
1161                                                  const char *consumer,
1162                                                  unsigned int index)
1163{
1164        struct phy *phy;
1165        char *name;
1166
1167        name = kasprintf(GFP_KERNEL, "%s-%u", consumer, index);
1168        if (!name)
1169                return ERR_PTR(-ENOMEM);
1170
1171        phy = devm_of_phy_get(dev, np, name);
1172        kfree(name);
1173
1174        if (IS_ERR(phy) && PTR_ERR(phy) == -ENODEV)
1175                phy = NULL;
1176
1177        return phy;
1178}
1179
1180static int tegra_pcie_port_get_phys(struct tegra_pcie_port *port)
1181{
1182        struct device *dev = port->pcie->dev;
1183        struct phy *phy;
1184        unsigned int i;
1185        int err;
1186
1187        port->phys = devm_kcalloc(dev, sizeof(phy), port->lanes, GFP_KERNEL);
1188        if (!port->phys)
1189                return -ENOMEM;
1190
1191        for (i = 0; i < port->lanes; i++) {
1192                phy = devm_of_phy_optional_get_index(dev, port->np, "pcie", i);
1193                if (IS_ERR(phy)) {
1194                        dev_err(dev, "failed to get PHY#%u: %ld\n", i,
1195                                PTR_ERR(phy));
1196                        return PTR_ERR(phy);
1197                }
1198
1199                err = phy_init(phy);
1200                if (err < 0) {
1201                        dev_err(dev, "failed to initialize PHY#%u: %d\n", i,
1202                                err);
1203                        return err;
1204                }
1205
1206                port->phys[i] = phy;
1207        }
1208
1209        return 0;
1210}
1211
1212static int tegra_pcie_phys_get(struct tegra_pcie *pcie)
1213{
1214        const struct tegra_pcie_soc *soc = pcie->soc;
1215        struct device_node *np = pcie->dev->of_node;
1216        struct tegra_pcie_port *port;
1217        int err;
1218
1219        if (!soc->has_gen2 || of_find_property(np, "phys", NULL) != NULL)
1220                return tegra_pcie_phys_get_legacy(pcie);
1221
1222        list_for_each_entry(port, &pcie->ports, list) {
1223                err = tegra_pcie_port_get_phys(port);
1224                if (err < 0)
1225                        return err;
1226        }
1227
1228        return 0;
1229}
1230
1231static void tegra_pcie_phys_put(struct tegra_pcie *pcie)
1232{
1233        struct tegra_pcie_port *port;
1234        struct device *dev = pcie->dev;
1235        int err, i;
1236
1237        if (pcie->legacy_phy) {
1238                err = phy_exit(pcie->phy);
1239                if (err < 0)
1240                        dev_err(dev, "failed to teardown PHY: %d\n", err);
1241                return;
1242        }
1243
1244        list_for_each_entry(port, &pcie->ports, list) {
1245                for (i = 0; i < port->lanes; i++) {
1246                        err = phy_exit(port->phys[i]);
1247                        if (err < 0)
1248                                dev_err(dev, "failed to teardown PHY#%u: %d\n",
1249                                        i, err);
1250                }
1251        }
1252}
1253
1254
1255static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
1256{
1257        struct device *dev = pcie->dev;
1258        struct platform_device *pdev = to_platform_device(dev);
1259        struct resource *pads, *afi, *res;
1260        const struct tegra_pcie_soc *soc = pcie->soc;
1261        int err;
1262
1263        err = tegra_pcie_clocks_get(pcie);
1264        if (err) {
1265                dev_err(dev, "failed to get clocks: %d\n", err);
1266                return err;
1267        }
1268
1269        err = tegra_pcie_resets_get(pcie);
1270        if (err) {
1271                dev_err(dev, "failed to get resets: %d\n", err);
1272                return err;
1273        }
1274
1275        if (soc->program_uphy) {
1276                err = tegra_pcie_phys_get(pcie);
1277                if (err < 0) {
1278                        dev_err(dev, "failed to get PHYs: %d\n", err);
1279                        return err;
1280                }
1281        }
1282
1283        pads = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pads");
1284        pcie->pads = devm_ioremap_resource(dev, pads);
1285        if (IS_ERR(pcie->pads)) {
1286                err = PTR_ERR(pcie->pads);
1287                goto phys_put;
1288        }
1289
1290        afi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "afi");
1291        pcie->afi = devm_ioremap_resource(dev, afi);
1292        if (IS_ERR(pcie->afi)) {
1293                err = PTR_ERR(pcie->afi);
1294                goto phys_put;
1295        }
1296
1297        /* request configuration space, but remap later, on demand */
1298        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs");
1299        if (!res) {
1300                err = -EADDRNOTAVAIL;
1301                goto phys_put;
1302        }
1303
1304        pcie->cs = *res;
1305
1306        /* constrain configuration space to 4 KiB */
1307        pcie->cs.end = pcie->cs.start + SZ_4K - 1;
1308
1309        pcie->cfg = devm_ioremap_resource(dev, &pcie->cs);
1310        if (IS_ERR(pcie->cfg)) {
1311                err = PTR_ERR(pcie->cfg);
1312                goto phys_put;
1313        }
1314
1315        /* request interrupt */
1316        err = platform_get_irq_byname(pdev, "intr");
1317        if (err < 0) {
1318                dev_err(dev, "failed to get IRQ: %d\n", err);
1319                goto phys_put;
1320        }
1321
1322        pcie->irq = err;
1323
1324        err = request_irq(pcie->irq, tegra_pcie_isr, IRQF_SHARED, "PCIE", pcie);
1325        if (err) {
1326                dev_err(dev, "failed to register IRQ: %d\n", err);
1327                goto phys_put;
1328        }
1329
1330        return 0;
1331
1332phys_put:
1333        if (soc->program_uphy)
1334                tegra_pcie_phys_put(pcie);
1335        return err;
1336}
1337
1338static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
1339{
1340        const struct tegra_pcie_soc *soc = pcie->soc;
1341
1342        if (pcie->irq > 0)
1343                free_irq(pcie->irq, pcie);
1344
1345        if (soc->program_uphy)
1346                tegra_pcie_phys_put(pcie);
1347
1348        return 0;
1349}
1350
1351static void tegra_pcie_pme_turnoff(struct tegra_pcie_port *port)
1352{
1353        struct tegra_pcie *pcie = port->pcie;
1354        const struct tegra_pcie_soc *soc = pcie->soc;
1355        int err;
1356        u32 val;
1357        u8 ack_bit;
1358
1359        val = afi_readl(pcie, AFI_PCIE_PME);
1360        val |= (0x1 << soc->ports[port->index].pme.turnoff_bit);
1361        afi_writel(pcie, val, AFI_PCIE_PME);
1362
1363        ack_bit = soc->ports[port->index].pme.ack_bit;
1364        err = readl_poll_timeout(pcie->afi + AFI_PCIE_PME, val,
1365                                 val & (0x1 << ack_bit), 1, PME_ACK_TIMEOUT);
1366        if (err)
1367                dev_err(pcie->dev, "PME Ack is not received on port: %d\n",
1368                        port->index);
1369
1370        usleep_range(10000, 11000);
1371
1372        val = afi_readl(pcie, AFI_PCIE_PME);
1373        val &= ~(0x1 << soc->ports[port->index].pme.turnoff_bit);
1374        afi_writel(pcie, val, AFI_PCIE_PME);
1375}
1376
1377static int tegra_msi_alloc(struct tegra_msi *chip)
1378{
1379        int msi;
1380
1381        mutex_lock(&chip->lock);
1382
1383        msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
1384        if (msi < INT_PCI_MSI_NR)
1385                set_bit(msi, chip->used);
1386        else
1387                msi = -ENOSPC;
1388
1389        mutex_unlock(&chip->lock);
1390
1391        return msi;
1392}
1393
1394static void tegra_msi_free(struct tegra_msi *chip, unsigned long irq)
1395{
1396        struct device *dev = chip->chip.dev;
1397
1398        mutex_lock(&chip->lock);
1399
1400        if (!test_bit(irq, chip->used))
1401                dev_err(dev, "trying to free unused MSI#%lu\n", irq);
1402        else
1403                clear_bit(irq, chip->used);
1404
1405        mutex_unlock(&chip->lock);
1406}
1407
1408static irqreturn_t tegra_pcie_msi_irq(int irq, void *data)
1409{
1410        struct tegra_pcie *pcie = data;
1411        struct device *dev = pcie->dev;
1412        struct tegra_msi *msi = &pcie->msi;
1413        unsigned int i, processed = 0;
1414
1415        for (i = 0; i < 8; i++) {
1416                unsigned long reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1417
1418                while (reg) {
1419                        unsigned int offset = find_first_bit(&reg, 32);
1420                        unsigned int index = i * 32 + offset;
1421                        unsigned int irq;
1422
1423                        /* clear the interrupt */
1424                        afi_writel(pcie, 1 << offset, AFI_MSI_VEC0 + i * 4);
1425
1426                        irq = irq_find_mapping(msi->domain, index);
1427                        if (irq) {
1428                                if (test_bit(index, msi->used))
1429                                        generic_handle_irq(irq);
1430                                else
1431                                        dev_info(dev, "unhandled MSI\n");
1432                        } else {
1433                                /*
1434                                 * that's weird who triggered this?
1435                                 * just clear it
1436                                 */
1437                                dev_info(dev, "unexpected MSI\n");
1438                        }
1439
1440                        /* see if there's any more pending in this vector */
1441                        reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1442
1443                        processed++;
1444                }
1445        }
1446
1447        return processed > 0 ? IRQ_HANDLED : IRQ_NONE;
1448}
1449
1450static int tegra_msi_setup_irq(struct msi_controller *chip,
1451                               struct pci_dev *pdev, struct msi_desc *desc)
1452{
1453        struct tegra_msi *msi = to_tegra_msi(chip);
1454        struct msi_msg msg;
1455        unsigned int irq;
1456        int hwirq;
1457
1458        hwirq = tegra_msi_alloc(msi);
1459        if (hwirq < 0)
1460                return hwirq;
1461
1462        irq = irq_create_mapping(msi->domain, hwirq);
1463        if (!irq) {
1464                tegra_msi_free(msi, hwirq);
1465                return -EINVAL;
1466        }
1467
1468        irq_set_msi_desc(irq, desc);
1469
1470        msg.address_lo = lower_32_bits(msi->phys);
1471        msg.address_hi = upper_32_bits(msi->phys);
1472        msg.data = hwirq;
1473
1474        pci_write_msi_msg(irq, &msg);
1475
1476        return 0;
1477}
1478
1479static void tegra_msi_teardown_irq(struct msi_controller *chip,
1480                                   unsigned int irq)
1481{
1482        struct tegra_msi *msi = to_tegra_msi(chip);
1483        struct irq_data *d = irq_get_irq_data(irq);
1484        irq_hw_number_t hwirq = irqd_to_hwirq(d);
1485
1486        irq_dispose_mapping(irq);
1487        tegra_msi_free(msi, hwirq);
1488}
1489
1490static struct irq_chip tegra_msi_irq_chip = {
1491        .name = "Tegra PCIe MSI",
1492        .irq_enable = pci_msi_unmask_irq,
1493        .irq_disable = pci_msi_mask_irq,
1494        .irq_mask = pci_msi_mask_irq,
1495        .irq_unmask = pci_msi_unmask_irq,
1496};
1497
1498static int tegra_msi_map(struct irq_domain *domain, unsigned int irq,
1499                         irq_hw_number_t hwirq)
1500{
1501        irq_set_chip_and_handler(irq, &tegra_msi_irq_chip, handle_simple_irq);
1502        irq_set_chip_data(irq, domain->host_data);
1503
1504        tegra_cpuidle_pcie_irqs_in_use();
1505
1506        return 0;
1507}
1508
1509static const struct irq_domain_ops msi_domain_ops = {
1510        .map = tegra_msi_map,
1511};
1512
1513static int tegra_pcie_msi_setup(struct tegra_pcie *pcie)
1514{
1515        struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
1516        struct platform_device *pdev = to_platform_device(pcie->dev);
1517        struct tegra_msi *msi = &pcie->msi;
1518        struct device *dev = pcie->dev;
1519        int err;
1520
1521        mutex_init(&msi->lock);
1522
1523        msi->chip.dev = dev;
1524        msi->chip.setup_irq = tegra_msi_setup_irq;
1525        msi->chip.teardown_irq = tegra_msi_teardown_irq;
1526
1527        msi->domain = irq_domain_add_linear(dev->of_node, INT_PCI_MSI_NR,
1528                                            &msi_domain_ops, &msi->chip);
1529        if (!msi->domain) {
1530                dev_err(dev, "failed to create IRQ domain\n");
1531                return -ENOMEM;
1532        }
1533
1534        err = platform_get_irq_byname(pdev, "msi");
1535        if (err < 0) {
1536                dev_err(dev, "failed to get IRQ: %d\n", err);
1537                goto err;
1538        }
1539
1540        msi->irq = err;
1541
1542        err = request_irq(msi->irq, tegra_pcie_msi_irq, IRQF_NO_THREAD,
1543                          tegra_msi_irq_chip.name, pcie);
1544        if (err < 0) {
1545                dev_err(dev, "failed to request IRQ: %d\n", err);
1546                goto err;
1547        }
1548
1549        /* setup AFI/FPCI range */
1550        msi->pages = __get_free_pages(GFP_KERNEL, 0);
1551        msi->phys = virt_to_phys((void *)msi->pages);
1552        host->msi = &msi->chip;
1553
1554        return 0;
1555
1556err:
1557        irq_domain_remove(msi->domain);
1558        return err;
1559}
1560
1561static void tegra_pcie_enable_msi(struct tegra_pcie *pcie)
1562{
1563        const struct tegra_pcie_soc *soc = pcie->soc;
1564        struct tegra_msi *msi = &pcie->msi;
1565        u32 reg;
1566
1567        afi_writel(pcie, msi->phys >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
1568        afi_writel(pcie, msi->phys, AFI_MSI_AXI_BAR_ST);
1569        /* this register is in 4K increments */
1570        afi_writel(pcie, 1, AFI_MSI_BAR_SZ);
1571
1572        /* enable all MSI vectors */
1573        afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC0);
1574        afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC1);
1575        afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC2);
1576        afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC3);
1577        afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC4);
1578        afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC5);
1579        afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC6);
1580        afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC7);
1581
1582        /* and unmask the MSI interrupt */
1583        reg = afi_readl(pcie, AFI_INTR_MASK);
1584        reg |= AFI_INTR_MASK_MSI_MASK;
1585        afi_writel(pcie, reg, AFI_INTR_MASK);
1586}
1587
1588static void tegra_pcie_msi_teardown(struct tegra_pcie *pcie)
1589{
1590        struct tegra_msi *msi = &pcie->msi;
1591        unsigned int i, irq;
1592
1593        free_pages(msi->pages, 0);
1594
1595        if (msi->irq > 0)
1596                free_irq(msi->irq, pcie);
1597
1598        for (i = 0; i < INT_PCI_MSI_NR; i++) {
1599                irq = irq_find_mapping(msi->domain, i);
1600                if (irq > 0)
1601                        irq_dispose_mapping(irq);
1602        }
1603
1604        irq_domain_remove(msi->domain);
1605}
1606
1607static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
1608{
1609        u32 value;
1610
1611        /* mask the MSI interrupt */
1612        value = afi_readl(pcie, AFI_INTR_MASK);
1613        value &= ~AFI_INTR_MASK_MSI_MASK;
1614        afi_writel(pcie, value, AFI_INTR_MASK);
1615
1616        /* disable all MSI vectors */
1617        afi_writel(pcie, 0, AFI_MSI_EN_VEC0);
1618        afi_writel(pcie, 0, AFI_MSI_EN_VEC1);
1619        afi_writel(pcie, 0, AFI_MSI_EN_VEC2);
1620        afi_writel(pcie, 0, AFI_MSI_EN_VEC3);
1621        afi_writel(pcie, 0, AFI_MSI_EN_VEC4);
1622        afi_writel(pcie, 0, AFI_MSI_EN_VEC5);
1623        afi_writel(pcie, 0, AFI_MSI_EN_VEC6);
1624        afi_writel(pcie, 0, AFI_MSI_EN_VEC7);
1625
1626        return 0;
1627}
1628
1629static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
1630                                      u32 *xbar)
1631{
1632        struct device *dev = pcie->dev;
1633        struct device_node *np = dev->of_node;
1634
1635        if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) {
1636                switch (lanes) {
1637                case 0x010004:
1638                        dev_info(dev, "4x1, 1x1 configuration\n");
1639                        *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401;
1640                        return 0;
1641
1642                case 0x010102:
1643                        dev_info(dev, "2x1, 1X1, 1x1 configuration\n");
1644                        *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211;
1645                        return 0;
1646
1647                case 0x010101:
1648                        dev_info(dev, "1x1, 1x1, 1x1 configuration\n");
1649                        *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111;
1650                        return 0;
1651
1652                default:
1653                        dev_info(dev, "wrong configuration updated in DT, "
1654                                 "switching to default 2x1, 1x1, 1x1 "
1655                                 "configuration\n");
1656                        *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211;
1657                        return 0;
1658                }
1659        } else if (of_device_is_compatible(np, "nvidia,tegra124-pcie") ||
1660                   of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
1661                switch (lanes) {
1662                case 0x0000104:
1663                        dev_info(dev, "4x1, 1x1 configuration\n");
1664                        *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1;
1665                        return 0;
1666
1667                case 0x0000102:
1668                        dev_info(dev, "2x1, 1x1 configuration\n");
1669                        *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1;
1670                        return 0;
1671                }
1672        } else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
1673                switch (lanes) {
1674                case 0x00000204:
1675                        dev_info(dev, "4x1, 2x1 configuration\n");
1676                        *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420;
1677                        return 0;
1678
1679                case 0x00020202:
1680                        dev_info(dev, "2x3 configuration\n");
1681                        *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222;
1682                        return 0;
1683
1684                case 0x00010104:
1685                        dev_info(dev, "4x1, 1x2 configuration\n");
1686                        *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411;
1687                        return 0;
1688                }
1689        } else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
1690                switch (lanes) {
1691                case 0x00000004:
1692                        dev_info(dev, "single-mode configuration\n");
1693                        *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE;
1694                        return 0;
1695
1696                case 0x00000202:
1697                        dev_info(dev, "dual-mode configuration\n");
1698                        *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL;
1699                        return 0;
1700                }
1701        }
1702
1703        return -EINVAL;
1704}
1705
1706/*
1707 * Check whether a given set of supplies is available in a device tree node.
1708 * This is used to check whether the new or the legacy device tree bindings
1709 * should be used.
1710 */
1711static bool of_regulator_bulk_available(struct device_node *np,
1712                                        struct regulator_bulk_data *supplies,
1713                                        unsigned int num_supplies)
1714{
1715        char property[32];
1716        unsigned int i;
1717
1718        for (i = 0; i < num_supplies; i++) {
1719                snprintf(property, 32, "%s-supply", supplies[i].supply);
1720
1721                if (of_find_property(np, property, NULL) == NULL)
1722                        return false;
1723        }
1724
1725        return true;
1726}
1727
1728/*
1729 * Old versions of the device tree binding for this device used a set of power
1730 * supplies that didn't match the hardware inputs. This happened to work for a
1731 * number of cases but is not future proof. However to preserve backwards-
1732 * compatibility with old device trees, this function will try to use the old
1733 * set of supplies.
1734 */
1735static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
1736{
1737        struct device *dev = pcie->dev;
1738        struct device_node *np = dev->of_node;
1739
1740        if (of_device_is_compatible(np, "nvidia,tegra30-pcie"))
1741                pcie->num_supplies = 3;
1742        else if (of_device_is_compatible(np, "nvidia,tegra20-pcie"))
1743                pcie->num_supplies = 2;
1744
1745        if (pcie->num_supplies == 0) {
1746                dev_err(dev, "device %pOF not supported in legacy mode\n", np);
1747                return -ENODEV;
1748        }
1749
1750        pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
1751                                      sizeof(*pcie->supplies),
1752                                      GFP_KERNEL);
1753        if (!pcie->supplies)
1754                return -ENOMEM;
1755
1756        pcie->supplies[0].supply = "pex-clk";
1757        pcie->supplies[1].supply = "vdd";
1758
1759        if (pcie->num_supplies > 2)
1760                pcie->supplies[2].supply = "avdd";
1761
1762        return devm_regulator_bulk_get(dev, pcie->num_supplies, pcie->supplies);
1763}
1764
1765/*
1766 * Obtains the list of regulators required for a particular generation of the
1767 * IP block.
1768 *
1769 * This would've been nice to do simply by providing static tables for use
1770 * with the regulator_bulk_*() API, but unfortunately Tegra30 is a bit quirky
1771 * in that it has two pairs or AVDD_PEX and VDD_PEX supplies (PEXA and PEXB)
1772 * and either seems to be optional depending on which ports are being used.
1773 */
1774static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
1775{
1776        struct device *dev = pcie->dev;
1777        struct device_node *np = dev->of_node;
1778        unsigned int i = 0;
1779
1780        if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) {
1781                pcie->num_supplies = 4;
1782
1783                pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
1784                                              sizeof(*pcie->supplies),
1785                                              GFP_KERNEL);
1786                if (!pcie->supplies)
1787                        return -ENOMEM;
1788
1789                pcie->supplies[i++].supply = "dvdd-pex";
1790                pcie->supplies[i++].supply = "hvdd-pex-pll";
1791                pcie->supplies[i++].supply = "hvdd-pex";
1792                pcie->supplies[i++].supply = "vddio-pexctl-aud";
1793        } else if (of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
1794                pcie->num_supplies = 6;
1795
1796                pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
1797                                              sizeof(*pcie->supplies),
1798                                              GFP_KERNEL);
1799                if (!pcie->supplies)
1800                        return -ENOMEM;
1801
1802                pcie->supplies[i++].supply = "avdd-pll-uerefe";
1803                pcie->supplies[i++].supply = "hvddio-pex";
1804                pcie->supplies[i++].supply = "dvddio-pex";
1805                pcie->supplies[i++].supply = "dvdd-pex-pll";
1806                pcie->supplies[i++].supply = "hvdd-pex-pll-e";
1807                pcie->supplies[i++].supply = "vddio-pex-ctl";
1808        } else if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
1809                pcie->num_supplies = 7;
1810
1811                pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
1812                                              sizeof(*pcie->supplies),
1813                                              GFP_KERNEL);
1814                if (!pcie->supplies)
1815                        return -ENOMEM;
1816
1817                pcie->supplies[i++].supply = "avddio-pex";
1818                pcie->supplies[i++].supply = "dvddio-pex";
1819                pcie->supplies[i++].supply = "avdd-pex-pll";
1820                pcie->supplies[i++].supply = "hvdd-pex";
1821                pcie->supplies[i++].supply = "hvdd-pex-pll-e";
1822                pcie->supplies[i++].supply = "vddio-pex-ctl";
1823                pcie->supplies[i++].supply = "avdd-pll-erefe";
1824        } else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
1825                bool need_pexa = false, need_pexb = false;
1826
1827                /* VDD_PEXA and AVDD_PEXA supply lanes 0 to 3 */
1828                if (lane_mask & 0x0f)
1829                        need_pexa = true;
1830
1831                /* VDD_PEXB and AVDD_PEXB supply lanes 4 to 5 */
1832                if (lane_mask & 0x30)
1833                        need_pexb = true;
1834
1835                pcie->num_supplies = 4 + (need_pexa ? 2 : 0) +
1836                                         (need_pexb ? 2 : 0);
1837
1838                pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
1839                                              sizeof(*pcie->supplies),
1840                                              GFP_KERNEL);
1841                if (!pcie->supplies)
1842                        return -ENOMEM;
1843
1844                pcie->supplies[i++].supply = "avdd-pex-pll";
1845                pcie->supplies[i++].supply = "hvdd-pex";
1846                pcie->supplies[i++].supply = "vddio-pex-ctl";
1847                pcie->supplies[i++].supply = "avdd-plle";
1848
1849                if (need_pexa) {
1850                        pcie->supplies[i++].supply = "avdd-pexa";
1851                        pcie->supplies[i++].supply = "vdd-pexa";
1852                }
1853
1854                if (need_pexb) {
1855                        pcie->supplies[i++].supply = "avdd-pexb";
1856                        pcie->supplies[i++].supply = "vdd-pexb";
1857                }
1858        } else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
1859                pcie->num_supplies = 5;
1860
1861                pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
1862                                              sizeof(*pcie->supplies),
1863                                              GFP_KERNEL);
1864                if (!pcie->supplies)
1865                        return -ENOMEM;
1866
1867                pcie->supplies[0].supply = "avdd-pex";
1868                pcie->supplies[1].supply = "vdd-pex";
1869                pcie->supplies[2].supply = "avdd-pex-pll";
1870                pcie->supplies[3].supply = "avdd-plle";
1871                pcie->supplies[4].supply = "vddio-pex-clk";
1872        }
1873
1874        if (of_regulator_bulk_available(dev->of_node, pcie->supplies,
1875                                        pcie->num_supplies))
1876                return devm_regulator_bulk_get(dev, pcie->num_supplies,
1877                                               pcie->supplies);
1878
1879        /*
1880         * If not all regulators are available for this new scheme, assume
1881         * that the device tree complies with an older version of the device
1882         * tree binding.
1883         */
1884        dev_info(dev, "using legacy DT binding for power supplies\n");
1885
1886        devm_kfree(dev, pcie->supplies);
1887        pcie->num_supplies = 0;
1888
1889        return tegra_pcie_get_legacy_regulators(pcie);
1890}
1891
1892static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
1893{
1894        struct device *dev = pcie->dev;
1895        struct device_node *np = dev->of_node, *port;
1896        const struct tegra_pcie_soc *soc = pcie->soc;
1897        struct of_pci_range_parser parser;
1898        struct of_pci_range range;
1899        u32 lanes = 0, mask = 0;
1900        unsigned int lane = 0;
1901        struct resource res;
1902        int err;
1903
1904        if (of_pci_range_parser_init(&parser, np)) {
1905                dev_err(dev, "missing \"ranges\" property\n");
1906                return -EINVAL;
1907        }
1908
1909        for_each_of_pci_range(&parser, &range) {
1910                err = of_pci_range_to_resource(&range, np, &res);
1911                if (err < 0)
1912                        return err;
1913
1914                switch (res.flags & IORESOURCE_TYPE_BITS) {
1915                case IORESOURCE_IO:
1916                        /* Track the bus -> CPU I/O mapping offset. */
1917                        pcie->offset.io = res.start - range.pci_addr;
1918
1919                        memcpy(&pcie->pio, &res, sizeof(res));
1920                        pcie->pio.name = np->full_name;
1921
1922                        /*
1923                         * The Tegra PCIe host bridge uses this to program the
1924                         * mapping of the I/O space to the physical address,
1925                         * so we override the .start and .end fields here that
1926                         * of_pci_range_to_resource() converted to I/O space.
1927                         * We also set the IORESOURCE_MEM type to clarify that
1928                         * the resource is in the physical memory space.
1929                         */
1930                        pcie->io.start = range.cpu_addr;
1931                        pcie->io.end = range.cpu_addr + range.size - 1;
1932                        pcie->io.flags = IORESOURCE_MEM;
1933                        pcie->io.name = "I/O";
1934
1935                        memcpy(&res, &pcie->io, sizeof(res));
1936                        break;
1937
1938                case IORESOURCE_MEM:
1939                        /*
1940                         * Track the bus -> CPU memory mapping offset. This
1941                         * assumes that the prefetchable and non-prefetchable
1942                         * regions will be the last of type IORESOURCE_MEM in
1943                         * the ranges property.
1944                         * */
1945                        pcie->offset.mem = res.start - range.pci_addr;
1946
1947                        if (res.flags & IORESOURCE_PREFETCH) {
1948                                memcpy(&pcie->prefetch, &res, sizeof(res));
1949                                pcie->prefetch.name = "prefetchable";
1950                        } else {
1951                                memcpy(&pcie->mem, &res, sizeof(res));
1952                                pcie->mem.name = "non-prefetchable";
1953                        }
1954                        break;
1955                }
1956        }
1957
1958        err = of_pci_parse_bus_range(np, &pcie->busn);
1959        if (err < 0) {
1960                dev_err(dev, "failed to parse ranges property: %d\n", err);
1961                pcie->busn.name = np->name;
1962                pcie->busn.start = 0;
1963                pcie->busn.end = 0xff;
1964                pcie->busn.flags = IORESOURCE_BUS;
1965        }
1966
1967        /* parse root ports */
1968        for_each_child_of_node(np, port) {
1969                struct tegra_pcie_port *rp;
1970                unsigned int index;
1971                u32 value;
1972
1973                err = of_pci_get_devfn(port);
1974                if (err < 0) {
1975                        dev_err(dev, "failed to parse address: %d\n", err);
1976                        return err;
1977                }
1978
1979                index = PCI_SLOT(err);
1980
1981                if (index < 1 || index > soc->num_ports) {
1982                        dev_err(dev, "invalid port number: %d\n", index);
1983                        return -EINVAL;
1984                }
1985
1986                index--;
1987
1988                err = of_property_read_u32(port, "nvidia,num-lanes", &value);
1989                if (err < 0) {
1990                        dev_err(dev, "failed to parse # of lanes: %d\n",
1991                                err);
1992                        return err;
1993                }
1994
1995                if (value > 16) {
1996                        dev_err(dev, "invalid # of lanes: %u\n", value);
1997                        return -EINVAL;
1998                }
1999
2000                lanes |= value << (index << 3);
2001
2002                if (!of_device_is_available(port)) {
2003                        lane += value;
2004                        continue;
2005                }
2006
2007                mask |= ((1 << value) - 1) << lane;
2008                lane += value;
2009
2010                rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL);
2011                if (!rp)
2012                        return -ENOMEM;
2013
2014                err = of_address_to_resource(port, 0, &rp->regs);
2015                if (err < 0) {
2016                        dev_err(dev, "failed to parse address: %d\n", err);
2017                        return err;
2018                }
2019
2020                INIT_LIST_HEAD(&rp->list);
2021                rp->index = index;
2022                rp->lanes = value;
2023                rp->pcie = pcie;
2024                rp->np = port;
2025
2026                rp->base = devm_pci_remap_cfg_resource(dev, &rp->regs);
2027                if (IS_ERR(rp->base))
2028                        return PTR_ERR(rp->base);
2029
2030                list_add_tail(&rp->list, &pcie->ports);
2031        }
2032
2033        err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config);
2034        if (err < 0) {
2035                dev_err(dev, "invalid lane configuration\n");
2036                return err;
2037        }
2038
2039        err = tegra_pcie_get_regulators(pcie, mask);
2040        if (err < 0)
2041                return err;
2042
2043        return 0;
2044}
2045
2046/*
2047 * FIXME: If there are no PCIe cards attached, then calling this function
2048 * can result in the increase of the bootup time as there are big timeout
2049 * loops.
2050 */
2051#define TEGRA_PCIE_LINKUP_TIMEOUT       200     /* up to 1.2 seconds */
2052static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
2053{
2054        struct device *dev = port->pcie->dev;
2055        unsigned int retries = 3;
2056        unsigned long value;
2057
2058        /* override presence detection */
2059        value = readl(port->base + RP_PRIV_MISC);
2060        value &= ~RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT;
2061        value |= RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT;
2062        writel(value, port->base + RP_PRIV_MISC);
2063
2064        do {
2065                unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
2066
2067                do {
2068                        value = readl(port->base + RP_VEND_XP);
2069
2070                        if (value & RP_VEND_XP_DL_UP)
2071                                break;
2072
2073                        usleep_range(1000, 2000);
2074                } while (--timeout);
2075
2076                if (!timeout) {
2077                        dev_err(dev, "link %u down, retrying\n", port->index);
2078                        goto retry;
2079                }
2080
2081                timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
2082
2083                do {
2084                        value = readl(port->base + RP_LINK_CONTROL_STATUS);
2085
2086                        if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
2087                                return true;
2088
2089                        usleep_range(1000, 2000);
2090                } while (--timeout);
2091
2092retry:
2093                tegra_pcie_port_reset(port);
2094        } while (--retries);
2095
2096        return false;
2097}
2098
2099static void tegra_pcie_enable_ports(struct tegra_pcie *pcie)
2100{
2101        struct device *dev = pcie->dev;
2102        struct tegra_pcie_port *port, *tmp;
2103
2104        list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
2105                dev_info(dev, "probing port %u, using %u lanes\n",
2106                         port->index, port->lanes);
2107
2108                tegra_pcie_port_enable(port);
2109
2110                if (tegra_pcie_port_check_link(port))
2111                        continue;
2112
2113                dev_info(dev, "link %u down, ignoring\n", port->index);
2114
2115                tegra_pcie_port_disable(port);
2116                tegra_pcie_port_free(port);
2117        }
2118}
2119
2120static void tegra_pcie_disable_ports(struct tegra_pcie *pcie)
2121{
2122        struct tegra_pcie_port *port, *tmp;
2123
2124        list_for_each_entry_safe(port, tmp, &pcie->ports, list)
2125                tegra_pcie_port_disable(port);
2126}
2127
2128static const struct tegra_pcie_port_soc tegra20_pcie_ports[] = {
2129        { .pme.turnoff_bit = 0, .pme.ack_bit =  5 },
2130        { .pme.turnoff_bit = 8, .pme.ack_bit = 10 },
2131};
2132
2133static const struct tegra_pcie_soc tegra20_pcie = {
2134        .num_ports = 2,
2135        .ports = tegra20_pcie_ports,
2136        .msi_base_shift = 0,
2137        .pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
2138        .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10,
2139        .pads_refclk_cfg0 = 0xfa5cfa5c,
2140        .has_pex_clkreq_en = false,
2141        .has_pex_bias_ctrl = false,
2142        .has_intr_prsnt_sense = false,
2143        .has_cml_clk = false,
2144        .has_gen2 = false,
2145        .force_pca_enable = false,
2146        .program_uphy = true,
2147};
2148
2149static const struct tegra_pcie_port_soc tegra30_pcie_ports[] = {
2150        { .pme.turnoff_bit =  0, .pme.ack_bit =  5 },
2151        { .pme.turnoff_bit =  8, .pme.ack_bit = 10 },
2152        { .pme.turnoff_bit = 16, .pme.ack_bit = 18 },
2153};
2154
2155static const struct tegra_pcie_soc tegra30_pcie = {
2156        .num_ports = 3,
2157        .ports = tegra30_pcie_ports,
2158        .msi_base_shift = 8,
2159        .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2160        .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2161        .pads_refclk_cfg0 = 0xfa5cfa5c,
2162        .pads_refclk_cfg1 = 0xfa5cfa5c,
2163        .has_pex_clkreq_en = true,
2164        .has_pex_bias_ctrl = true,
2165        .has_intr_prsnt_sense = true,
2166        .has_cml_clk = true,
2167        .has_gen2 = false,
2168        .force_pca_enable = false,
2169        .program_uphy = true,
2170};
2171
2172static const struct tegra_pcie_soc tegra124_pcie = {
2173        .num_ports = 2,
2174        .ports = tegra20_pcie_ports,
2175        .msi_base_shift = 8,
2176        .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2177        .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2178        .pads_refclk_cfg0 = 0x44ac44ac,
2179        .has_pex_clkreq_en = true,
2180        .has_pex_bias_ctrl = true,
2181        .has_intr_prsnt_sense = true,
2182        .has_cml_clk = true,
2183        .has_gen2 = true,
2184        .force_pca_enable = false,
2185        .program_uphy = true,
2186};
2187
2188static const struct tegra_pcie_soc tegra210_pcie = {
2189        .num_ports = 2,
2190        .ports = tegra20_pcie_ports,
2191        .msi_base_shift = 8,
2192        .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2193        .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2194        .pads_refclk_cfg0 = 0x90b890b8,
2195        .has_pex_clkreq_en = true,
2196        .has_pex_bias_ctrl = true,
2197        .has_intr_prsnt_sense = true,
2198        .has_cml_clk = true,
2199        .has_gen2 = true,
2200        .force_pca_enable = true,
2201        .program_uphy = true,
2202};
2203
2204static const struct tegra_pcie_port_soc tegra186_pcie_ports[] = {
2205        { .pme.turnoff_bit =  0, .pme.ack_bit =  5 },
2206        { .pme.turnoff_bit =  8, .pme.ack_bit = 10 },
2207        { .pme.turnoff_bit = 12, .pme.ack_bit = 14 },
2208};
2209
2210static const struct tegra_pcie_soc tegra186_pcie = {
2211        .num_ports = 3,
2212        .ports = tegra186_pcie_ports,
2213        .msi_base_shift = 8,
2214        .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2215        .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2216        .pads_refclk_cfg0 = 0x80b880b8,
2217        .pads_refclk_cfg1 = 0x000480b8,
2218        .has_pex_clkreq_en = true,
2219        .has_pex_bias_ctrl = true,
2220        .has_intr_prsnt_sense = true,
2221        .has_cml_clk = false,
2222        .has_gen2 = true,
2223        .force_pca_enable = false,
2224        .program_uphy = false,
2225};
2226
2227static const struct of_device_id tegra_pcie_of_match[] = {
2228        { .compatible = "nvidia,tegra186-pcie", .data = &tegra186_pcie },
2229        { .compatible = "nvidia,tegra210-pcie", .data = &tegra210_pcie },
2230        { .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie },
2231        { .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie },
2232        { .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie },
2233        { },
2234};
2235
2236static void *tegra_pcie_ports_seq_start(struct seq_file *s, loff_t *pos)
2237{
2238        struct tegra_pcie *pcie = s->private;
2239
2240        if (list_empty(&pcie->ports))
2241                return NULL;
2242
2243        seq_printf(s, "Index  Status\n");
2244
2245        return seq_list_start(&pcie->ports, *pos);
2246}
2247
2248static void *tegra_pcie_ports_seq_next(struct seq_file *s, void *v, loff_t *pos)
2249{
2250        struct tegra_pcie *pcie = s->private;
2251
2252        return seq_list_next(v, &pcie->ports, pos);
2253}
2254
2255static void tegra_pcie_ports_seq_stop(struct seq_file *s, void *v)
2256{
2257}
2258
2259static int tegra_pcie_ports_seq_show(struct seq_file *s, void *v)
2260{
2261        bool up = false, active = false;
2262        struct tegra_pcie_port *port;
2263        unsigned int value;
2264
2265        port = list_entry(v, struct tegra_pcie_port, list);
2266
2267        value = readl(port->base + RP_VEND_XP);
2268
2269        if (value & RP_VEND_XP_DL_UP)
2270                up = true;
2271
2272        value = readl(port->base + RP_LINK_CONTROL_STATUS);
2273
2274        if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
2275                active = true;
2276
2277        seq_printf(s, "%2u     ", port->index);
2278
2279        if (up)
2280                seq_printf(s, "up");
2281
2282        if (active) {
2283                if (up)
2284                        seq_printf(s, ", ");
2285
2286                seq_printf(s, "active");
2287        }
2288
2289        seq_printf(s, "\n");
2290        return 0;
2291}
2292
2293static const struct seq_operations tegra_pcie_ports_seq_ops = {
2294        .start = tegra_pcie_ports_seq_start,
2295        .next = tegra_pcie_ports_seq_next,
2296        .stop = tegra_pcie_ports_seq_stop,
2297        .show = tegra_pcie_ports_seq_show,
2298};
2299
2300static int tegra_pcie_ports_open(struct inode *inode, struct file *file)
2301{
2302        struct tegra_pcie *pcie = inode->i_private;
2303        struct seq_file *s;
2304        int err;
2305
2306        err = seq_open(file, &tegra_pcie_ports_seq_ops);
2307        if (err)
2308                return err;
2309
2310        s = file->private_data;
2311        s->private = pcie;
2312
2313        return 0;
2314}
2315
2316static const struct file_operations tegra_pcie_ports_ops = {
2317        .owner = THIS_MODULE,
2318        .open = tegra_pcie_ports_open,
2319        .read = seq_read,
2320        .llseek = seq_lseek,
2321        .release = seq_release,
2322};
2323
2324static void tegra_pcie_debugfs_exit(struct tegra_pcie *pcie)
2325{
2326        debugfs_remove_recursive(pcie->debugfs);
2327        pcie->debugfs = NULL;
2328}
2329
2330static int tegra_pcie_debugfs_init(struct tegra_pcie *pcie)
2331{
2332        struct dentry *file;
2333
2334        pcie->debugfs = debugfs_create_dir("pcie", NULL);
2335        if (!pcie->debugfs)
2336                return -ENOMEM;
2337
2338        file = debugfs_create_file("ports", S_IFREG | S_IRUGO, pcie->debugfs,
2339                                   pcie, &tegra_pcie_ports_ops);
2340        if (!file)
2341                goto remove;
2342
2343        return 0;
2344
2345remove:
2346        tegra_pcie_debugfs_exit(pcie);
2347        return -ENOMEM;
2348}
2349
2350static int tegra_pcie_probe(struct platform_device *pdev)
2351{
2352        struct device *dev = &pdev->dev;
2353        struct pci_host_bridge *host;
2354        struct tegra_pcie *pcie;
2355        struct pci_bus *child;
2356        int err;
2357
2358        host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
2359        if (!host)
2360                return -ENOMEM;
2361
2362        pcie = pci_host_bridge_priv(host);
2363        host->sysdata = pcie;
2364        platform_set_drvdata(pdev, pcie);
2365
2366        pcie->soc = of_device_get_match_data(dev);
2367        INIT_LIST_HEAD(&pcie->ports);
2368        pcie->dev = dev;
2369
2370        err = tegra_pcie_parse_dt(pcie);
2371        if (err < 0)
2372                return err;
2373
2374        err = tegra_pcie_get_resources(pcie);
2375        if (err < 0) {
2376                dev_err(dev, "failed to request resources: %d\n", err);
2377                return err;
2378        }
2379
2380        err = tegra_pcie_msi_setup(pcie);
2381        if (err < 0) {
2382                dev_err(dev, "failed to enable MSI support: %d\n", err);
2383                goto put_resources;
2384        }
2385
2386        pm_runtime_enable(pcie->dev);
2387        err = pm_runtime_get_sync(pcie->dev);
2388        if (err) {
2389                dev_err(dev, "fail to enable pcie controller: %d\n", err);
2390                goto teardown_msi;
2391        }
2392
2393        err = tegra_pcie_request_resources(pcie);
2394        if (err)
2395                goto pm_runtime_put;
2396
2397        host->busnr = pcie->busn.start;
2398        host->dev.parent = &pdev->dev;
2399        host->ops = &tegra_pcie_ops;
2400        host->map_irq = tegra_pcie_map_irq;
2401        host->swizzle_irq = pci_common_swizzle;
2402
2403        err = pci_scan_root_bus_bridge(host);
2404        if (err < 0) {
2405                dev_err(dev, "failed to register host: %d\n", err);
2406                goto free_resources;
2407        }
2408
2409        pci_bus_size_bridges(host->bus);
2410        pci_bus_assign_resources(host->bus);
2411
2412        list_for_each_entry(child, &host->bus->children, node)
2413                pcie_bus_configure_settings(child);
2414
2415        pci_bus_add_devices(host->bus);
2416
2417        if (IS_ENABLED(CONFIG_DEBUG_FS)) {
2418                err = tegra_pcie_debugfs_init(pcie);
2419                if (err < 0)
2420                        dev_err(dev, "failed to setup debugfs: %d\n", err);
2421        }
2422
2423        return 0;
2424
2425free_resources:
2426        tegra_pcie_free_resources(pcie);
2427pm_runtime_put:
2428        pm_runtime_put_sync(pcie->dev);
2429        pm_runtime_disable(pcie->dev);
2430teardown_msi:
2431        tegra_pcie_msi_teardown(pcie);
2432put_resources:
2433        tegra_pcie_put_resources(pcie);
2434        return err;
2435}
2436
2437static int tegra_pcie_remove(struct platform_device *pdev)
2438{
2439        struct tegra_pcie *pcie = platform_get_drvdata(pdev);
2440        struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
2441        struct tegra_pcie_port *port, *tmp;
2442
2443        if (IS_ENABLED(CONFIG_DEBUG_FS))
2444                tegra_pcie_debugfs_exit(pcie);
2445
2446        pci_stop_root_bus(host->bus);
2447        pci_remove_root_bus(host->bus);
2448        tegra_pcie_free_resources(pcie);
2449        pm_runtime_put_sync(pcie->dev);
2450        pm_runtime_disable(pcie->dev);
2451
2452        if (IS_ENABLED(CONFIG_PCI_MSI))
2453                tegra_pcie_msi_teardown(pcie);
2454
2455        tegra_pcie_put_resources(pcie);
2456
2457        list_for_each_entry_safe(port, tmp, &pcie->ports, list)
2458                tegra_pcie_port_free(port);
2459
2460        return 0;
2461}
2462
2463static int __maybe_unused tegra_pcie_pm_suspend(struct device *dev)
2464{
2465        struct tegra_pcie *pcie = dev_get_drvdata(dev);
2466        struct tegra_pcie_port *port;
2467
2468        list_for_each_entry(port, &pcie->ports, list)
2469                tegra_pcie_pme_turnoff(port);
2470
2471        tegra_pcie_disable_ports(pcie);
2472
2473        if (IS_ENABLED(CONFIG_PCI_MSI))
2474                tegra_pcie_disable_msi(pcie);
2475
2476        tegra_pcie_disable_controller(pcie);
2477        tegra_pcie_power_off(pcie);
2478
2479        return 0;
2480}
2481
2482static int __maybe_unused tegra_pcie_pm_resume(struct device *dev)
2483{
2484        struct tegra_pcie *pcie = dev_get_drvdata(dev);
2485        int err;
2486
2487        err = tegra_pcie_power_on(pcie);
2488        if (err) {
2489                dev_err(dev, "tegra pcie power on fail: %d\n", err);
2490                return err;
2491        }
2492        err = tegra_pcie_enable_controller(pcie);
2493        if (err) {
2494                dev_err(dev, "tegra pcie controller enable fail: %d\n", err);
2495                goto poweroff;
2496        }
2497        tegra_pcie_setup_translations(pcie);
2498
2499        if (IS_ENABLED(CONFIG_PCI_MSI))
2500                tegra_pcie_enable_msi(pcie);
2501
2502        tegra_pcie_enable_ports(pcie);
2503
2504        return 0;
2505
2506poweroff:
2507        tegra_pcie_power_off(pcie);
2508
2509        return err;
2510}
2511
2512static const struct dev_pm_ops tegra_pcie_pm_ops = {
2513        SET_RUNTIME_PM_OPS(tegra_pcie_pm_suspend, tegra_pcie_pm_resume, NULL)
2514        SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(tegra_pcie_pm_suspend,
2515                                      tegra_pcie_pm_resume)
2516};
2517
2518static struct platform_driver tegra_pcie_driver = {
2519        .driver = {
2520                .name = "tegra-pcie",
2521                .of_match_table = tegra_pcie_of_match,
2522                .suppress_bind_attrs = true,
2523                .pm = &tegra_pcie_pm_ops,
2524        },
2525        .probe = tegra_pcie_probe,
2526        .remove = tegra_pcie_remove,
2527};
2528module_platform_driver(tegra_pcie_driver);
2529MODULE_LICENSE("GPL");
2530