linux/drivers/pci/host/pci-tegra.c
<<
>>
Prefs
   1/*
   2 * PCIe host controller driver for Tegra SoCs
   3 *
   4 * Copyright (c) 2010, CompuLab, Ltd.
   5 * Author: Mike Rapoport <mike@compulab.co.il>
   6 *
   7 * Based on NVIDIA PCIe driver
   8 * Copyright (c) 2008-2009, NVIDIA Corporation.
   9 *
  10 * Bits taken from arch/arm/mach-dove/pcie.c
  11 *
  12 * This program is free software; you can redistribute it and/or modify
  13 * it under the terms of the GNU General Public License as published by
  14 * the Free Software Foundation; either version 2 of the License, or
  15 * (at your option) any later version.
  16 *
  17 * This program is distributed in the hope that it will be useful, but WITHOUT
  18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  19 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  20 * more details.
  21 *
  22 * You should have received a copy of the GNU General Public License along
  23 * with this program; if not, write to the Free Software Foundation, Inc.,
  24 * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
  25 */
  26
  27#include <linux/clk.h>
  28#include <linux/debugfs.h>
  29#include <linux/delay.h>
  30#include <linux/export.h>
  31#include <linux/interrupt.h>
  32#include <linux/irq.h>
  33#include <linux/irqdomain.h>
  34#include <linux/kernel.h>
  35#include <linux/module.h>
  36#include <linux/msi.h>
  37#include <linux/of_address.h>
  38#include <linux/of_pci.h>
  39#include <linux/of_platform.h>
  40#include <linux/pci.h>
  41#include <linux/phy/phy.h>
  42#include <linux/platform_device.h>
  43#include <linux/reset.h>
  44#include <linux/sizes.h>
  45#include <linux/slab.h>
  46#include <linux/vmalloc.h>
  47#include <linux/regulator/consumer.h>
  48
  49#include <soc/tegra/cpuidle.h>
  50#include <soc/tegra/pmc.h>
  51
  52#include <asm/mach/irq.h>
  53#include <asm/mach/map.h>
  54#include <asm/mach/pci.h>
  55
  56#define INT_PCI_MSI_NR (8 * 32)
  57
  58/* register definitions */
  59
  60#define AFI_AXI_BAR0_SZ 0x00
  61#define AFI_AXI_BAR1_SZ 0x04
  62#define AFI_AXI_BAR2_SZ 0x08
  63#define AFI_AXI_BAR3_SZ 0x0c
  64#define AFI_AXI_BAR4_SZ 0x10
  65#define AFI_AXI_BAR5_SZ 0x14
  66
  67#define AFI_AXI_BAR0_START      0x18
  68#define AFI_AXI_BAR1_START      0x1c
  69#define AFI_AXI_BAR2_START      0x20
  70#define AFI_AXI_BAR3_START      0x24
  71#define AFI_AXI_BAR4_START      0x28
  72#define AFI_AXI_BAR5_START      0x2c
  73
  74#define AFI_FPCI_BAR0   0x30
  75#define AFI_FPCI_BAR1   0x34
  76#define AFI_FPCI_BAR2   0x38
  77#define AFI_FPCI_BAR3   0x3c
  78#define AFI_FPCI_BAR4   0x40
  79#define AFI_FPCI_BAR5   0x44
  80
  81#define AFI_CACHE_BAR0_SZ       0x48
  82#define AFI_CACHE_BAR0_ST       0x4c
  83#define AFI_CACHE_BAR1_SZ       0x50
  84#define AFI_CACHE_BAR1_ST       0x54
  85
  86#define AFI_MSI_BAR_SZ          0x60
  87#define AFI_MSI_FPCI_BAR_ST     0x64
  88#define AFI_MSI_AXI_BAR_ST      0x68
  89
  90#define AFI_MSI_VEC0            0x6c
  91#define AFI_MSI_VEC1            0x70
  92#define AFI_MSI_VEC2            0x74
  93#define AFI_MSI_VEC3            0x78
  94#define AFI_MSI_VEC4            0x7c
  95#define AFI_MSI_VEC5            0x80
  96#define AFI_MSI_VEC6            0x84
  97#define AFI_MSI_VEC7            0x88
  98
  99#define AFI_MSI_EN_VEC0         0x8c
 100#define AFI_MSI_EN_VEC1         0x90
 101#define AFI_MSI_EN_VEC2         0x94
 102#define AFI_MSI_EN_VEC3         0x98
 103#define AFI_MSI_EN_VEC4         0x9c
 104#define AFI_MSI_EN_VEC5         0xa0
 105#define AFI_MSI_EN_VEC6         0xa4
 106#define AFI_MSI_EN_VEC7         0xa8
 107
 108#define AFI_CONFIGURATION               0xac
 109#define  AFI_CONFIGURATION_EN_FPCI      (1 << 0)
 110
 111#define AFI_FPCI_ERROR_MASKS    0xb0
 112
 113#define AFI_INTR_MASK           0xb4
 114#define  AFI_INTR_MASK_INT_MASK (1 << 0)
 115#define  AFI_INTR_MASK_MSI_MASK (1 << 8)
 116
 117#define AFI_INTR_CODE                   0xb8
 118#define  AFI_INTR_CODE_MASK             0xf
 119#define  AFI_INTR_INI_SLAVE_ERROR       1
 120#define  AFI_INTR_INI_DECODE_ERROR      2
 121#define  AFI_INTR_TARGET_ABORT          3
 122#define  AFI_INTR_MASTER_ABORT          4
 123#define  AFI_INTR_INVALID_WRITE         5
 124#define  AFI_INTR_LEGACY                6
 125#define  AFI_INTR_FPCI_DECODE_ERROR     7
 126#define  AFI_INTR_AXI_DECODE_ERROR      8
 127#define  AFI_INTR_FPCI_TIMEOUT          9
 128#define  AFI_INTR_PE_PRSNT_SENSE        10
 129#define  AFI_INTR_PE_CLKREQ_SENSE       11
 130#define  AFI_INTR_CLKCLAMP_SENSE        12
 131#define  AFI_INTR_RDY4PD_SENSE          13
 132#define  AFI_INTR_P2P_ERROR             14
 133
 134#define AFI_INTR_SIGNATURE      0xbc
 135#define AFI_UPPER_FPCI_ADDRESS  0xc0
 136#define AFI_SM_INTR_ENABLE      0xc4
 137#define  AFI_SM_INTR_INTA_ASSERT        (1 << 0)
 138#define  AFI_SM_INTR_INTB_ASSERT        (1 << 1)
 139#define  AFI_SM_INTR_INTC_ASSERT        (1 << 2)
 140#define  AFI_SM_INTR_INTD_ASSERT        (1 << 3)
 141#define  AFI_SM_INTR_INTA_DEASSERT      (1 << 4)
 142#define  AFI_SM_INTR_INTB_DEASSERT      (1 << 5)
 143#define  AFI_SM_INTR_INTC_DEASSERT      (1 << 6)
 144#define  AFI_SM_INTR_INTD_DEASSERT      (1 << 7)
 145
 146#define AFI_AFI_INTR_ENABLE             0xc8
 147#define  AFI_INTR_EN_INI_SLVERR         (1 << 0)
 148#define  AFI_INTR_EN_INI_DECERR         (1 << 1)
 149#define  AFI_INTR_EN_TGT_SLVERR         (1 << 2)
 150#define  AFI_INTR_EN_TGT_DECERR         (1 << 3)
 151#define  AFI_INTR_EN_TGT_WRERR          (1 << 4)
 152#define  AFI_INTR_EN_DFPCI_DECERR       (1 << 5)
 153#define  AFI_INTR_EN_AXI_DECERR         (1 << 6)
 154#define  AFI_INTR_EN_FPCI_TIMEOUT       (1 << 7)
 155#define  AFI_INTR_EN_PRSNT_SENSE        (1 << 8)
 156
 157#define AFI_PCIE_CONFIG                                 0x0f8
 158#define  AFI_PCIE_CONFIG_PCIE_DISABLE(x)                (1 << ((x) + 1))
 159#define  AFI_PCIE_CONFIG_PCIE_DISABLE_ALL               0xe
 160#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK       (0xf << 20)
 161#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE     (0x0 << 20)
 162#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420        (0x0 << 20)
 163#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1      (0x0 << 20)
 164#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL       (0x1 << 20)
 165#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222        (0x1 << 20)
 166#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1      (0x1 << 20)
 167#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411        (0x2 << 20)
 168
 169#define AFI_FUSE                        0x104
 170#define  AFI_FUSE_PCIE_T0_GEN2_DIS      (1 << 2)
 171
 172#define AFI_PEX0_CTRL                   0x110
 173#define AFI_PEX1_CTRL                   0x118
 174#define AFI_PEX2_CTRL                   0x128
 175#define  AFI_PEX_CTRL_RST               (1 << 0)
 176#define  AFI_PEX_CTRL_CLKREQ_EN         (1 << 1)
 177#define  AFI_PEX_CTRL_REFCLK_EN         (1 << 3)
 178#define  AFI_PEX_CTRL_OVERRIDE_EN       (1 << 4)
 179
 180#define AFI_PLLE_CONTROL                0x160
 181#define  AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL (1 << 9)
 182#define  AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN (1 << 1)
 183
 184#define AFI_PEXBIAS_CTRL_0              0x168
 185
 186#define RP_VEND_XP      0x00000F00
 187#define  RP_VEND_XP_DL_UP       (1 << 30)
 188
 189#define RP_PRIV_MISC    0x00000FE0
 190#define  RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xE << 0)
 191#define  RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xF << 0)
 192
 193#define RP_LINK_CONTROL_STATUS                  0x00000090
 194#define  RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE  0x20000000
 195#define  RP_LINK_CONTROL_STATUS_LINKSTAT_MASK   0x3fff0000
 196
 197#define PADS_CTL_SEL            0x0000009C
 198
 199#define PADS_CTL                0x000000A0
 200#define  PADS_CTL_IDDQ_1L       (1 << 0)
 201#define  PADS_CTL_TX_DATA_EN_1L (1 << 6)
 202#define  PADS_CTL_RX_DATA_EN_1L (1 << 10)
 203
 204#define PADS_PLL_CTL_TEGRA20                    0x000000B8
 205#define PADS_PLL_CTL_TEGRA30                    0x000000B4
 206#define  PADS_PLL_CTL_RST_B4SM                  (1 << 1)
 207#define  PADS_PLL_CTL_LOCKDET                   (1 << 8)
 208#define  PADS_PLL_CTL_REFCLK_MASK               (0x3 << 16)
 209#define  PADS_PLL_CTL_REFCLK_INTERNAL_CML       (0 << 16)
 210#define  PADS_PLL_CTL_REFCLK_INTERNAL_CMOS      (1 << 16)
 211#define  PADS_PLL_CTL_REFCLK_EXTERNAL           (2 << 16)
 212#define  PADS_PLL_CTL_TXCLKREF_MASK             (0x1 << 20)
 213#define  PADS_PLL_CTL_TXCLKREF_DIV10            (0 << 20)
 214#define  PADS_PLL_CTL_TXCLKREF_DIV5             (1 << 20)
 215#define  PADS_PLL_CTL_TXCLKREF_BUF_EN           (1 << 22)
 216
 217#define PADS_REFCLK_CFG0                        0x000000C8
 218#define PADS_REFCLK_CFG1                        0x000000CC
 219#define PADS_REFCLK_BIAS                        0x000000D0
 220
 221/*
 222 * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit
 223 * entries, one entry per PCIe port. These field definitions and desired
 224 * values aren't in the TRM, but do come from NVIDIA.
 225 */
 226#define PADS_REFCLK_CFG_TERM_SHIFT              2  /* 6:2 */
 227#define PADS_REFCLK_CFG_E_TERM_SHIFT            7
 228#define PADS_REFCLK_CFG_PREDI_SHIFT             8  /* 11:8 */
 229#define PADS_REFCLK_CFG_DRVI_SHIFT              12 /* 15:12 */
 230
 231/* Default value provided by HW engineering is 0xfa5c */
 232#define PADS_REFCLK_CFG_VALUE \
 233        ( \
 234                (0x17 << PADS_REFCLK_CFG_TERM_SHIFT)   | \
 235                (0    << PADS_REFCLK_CFG_E_TERM_SHIFT) | \
 236                (0xa  << PADS_REFCLK_CFG_PREDI_SHIFT)  | \
 237                (0xf  << PADS_REFCLK_CFG_DRVI_SHIFT)     \
 238        )
 239
 240struct tegra_msi {
 241        struct msi_controller chip;
 242        DECLARE_BITMAP(used, INT_PCI_MSI_NR);
 243        struct irq_domain *domain;
 244        unsigned long pages;
 245        struct mutex lock;
 246        int irq;
 247};
 248
 249/* used to differentiate between Tegra SoC generations */
 250struct tegra_pcie_soc_data {
 251        unsigned int num_ports;
 252        unsigned int msi_base_shift;
 253        u32 pads_pll_ctl;
 254        u32 tx_ref_sel;
 255        bool has_pex_clkreq_en;
 256        bool has_pex_bias_ctrl;
 257        bool has_intr_prsnt_sense;
 258        bool has_cml_clk;
 259        bool has_gen2;
 260};
 261
 262static inline struct tegra_msi *to_tegra_msi(struct msi_controller *chip)
 263{
 264        return container_of(chip, struct tegra_msi, chip);
 265}
 266
 267struct tegra_pcie {
 268        struct device *dev;
 269
 270        void __iomem *pads;
 271        void __iomem *afi;
 272        int irq;
 273
 274        struct list_head buses;
 275        struct resource *cs;
 276
 277        struct resource all;
 278        struct resource io;
 279        struct resource pio;
 280        struct resource mem;
 281        struct resource prefetch;
 282        struct resource busn;
 283
 284        struct clk *pex_clk;
 285        struct clk *afi_clk;
 286        struct clk *pll_e;
 287        struct clk *cml_clk;
 288
 289        struct reset_control *pex_rst;
 290        struct reset_control *afi_rst;
 291        struct reset_control *pcie_xrst;
 292
 293        struct phy *phy;
 294
 295        struct tegra_msi msi;
 296
 297        struct list_head ports;
 298        unsigned int num_ports;
 299        u32 xbar_config;
 300
 301        struct regulator_bulk_data *supplies;
 302        unsigned int num_supplies;
 303
 304        const struct tegra_pcie_soc_data *soc_data;
 305        struct dentry *debugfs;
 306};
 307
 308struct tegra_pcie_port {
 309        struct tegra_pcie *pcie;
 310        struct list_head list;
 311        struct resource regs;
 312        void __iomem *base;
 313        unsigned int index;
 314        unsigned int lanes;
 315};
 316
 317struct tegra_pcie_bus {
 318        struct vm_struct *area;
 319        struct list_head list;
 320        unsigned int nr;
 321};
 322
 323static inline struct tegra_pcie *sys_to_pcie(struct pci_sys_data *sys)
 324{
 325        return sys->private_data;
 326}
 327
 328static inline void afi_writel(struct tegra_pcie *pcie, u32 value,
 329                              unsigned long offset)
 330{
 331        writel(value, pcie->afi + offset);
 332}
 333
 334static inline u32 afi_readl(struct tegra_pcie *pcie, unsigned long offset)
 335{
 336        return readl(pcie->afi + offset);
 337}
 338
 339static inline void pads_writel(struct tegra_pcie *pcie, u32 value,
 340                               unsigned long offset)
 341{
 342        writel(value, pcie->pads + offset);
 343}
 344
 345static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset)
 346{
 347        return readl(pcie->pads + offset);
 348}
 349
 350/*
 351 * The configuration space mapping on Tegra is somewhat similar to the ECAM
 352 * defined by PCIe. However it deviates a bit in how the 4 bits for extended
 353 * register accesses are mapped:
 354 *
 355 *    [27:24] extended register number
 356 *    [23:16] bus number
 357 *    [15:11] device number
 358 *    [10: 8] function number
 359 *    [ 7: 0] register number
 360 *
 361 * Mapping the whole extended configuration space would require 256 MiB of
 362 * virtual address space, only a small part of which will actually be used.
 363 * To work around this, a 1 MiB of virtual addresses are allocated per bus
 364 * when the bus is first accessed. When the physical range is mapped, the
 365 * the bus number bits are hidden so that the extended register number bits
 366 * appear as bits [19:16]. Therefore the virtual mapping looks like this:
 367 *
 368 *    [19:16] extended register number
 369 *    [15:11] device number
 370 *    [10: 8] function number
 371 *    [ 7: 0] register number
 372 *
 373 * This is achieved by stitching together 16 chunks of 64 KiB of physical
 374 * address space via the MMU.
 375 */
 376static unsigned long tegra_pcie_conf_offset(unsigned int devfn, int where)
 377{
 378        return ((where & 0xf00) << 8) | (PCI_SLOT(devfn) << 11) |
 379               (PCI_FUNC(devfn) << 8) | (where & 0xfc);
 380}
 381
 382static struct tegra_pcie_bus *tegra_pcie_bus_alloc(struct tegra_pcie *pcie,
 383                                                   unsigned int busnr)
 384{
 385        pgprot_t prot = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_XN |
 386                        L_PTE_MT_DEV_SHARED | L_PTE_SHARED;
 387        phys_addr_t cs = pcie->cs->start;
 388        struct tegra_pcie_bus *bus;
 389        unsigned int i;
 390        int err;
 391
 392        bus = kzalloc(sizeof(*bus), GFP_KERNEL);
 393        if (!bus)
 394                return ERR_PTR(-ENOMEM);
 395
 396        INIT_LIST_HEAD(&bus->list);
 397        bus->nr = busnr;
 398
 399        /* allocate 1 MiB of virtual addresses */
 400        bus->area = get_vm_area(SZ_1M, VM_IOREMAP);
 401        if (!bus->area) {
 402                err = -ENOMEM;
 403                goto free;
 404        }
 405
 406        /* map each of the 16 chunks of 64 KiB each */
 407        for (i = 0; i < 16; i++) {
 408                unsigned long virt = (unsigned long)bus->area->addr +
 409                                     i * SZ_64K;
 410                phys_addr_t phys = cs + i * SZ_16M + busnr * SZ_64K;
 411
 412                err = ioremap_page_range(virt, virt + SZ_64K, phys, prot);
 413                if (err < 0) {
 414                        dev_err(pcie->dev, "ioremap_page_range() failed: %d\n",
 415                                err);
 416                        goto unmap;
 417                }
 418        }
 419
 420        return bus;
 421
 422unmap:
 423        vunmap(bus->area->addr);
 424free:
 425        kfree(bus);
 426        return ERR_PTR(err);
 427}
 428
 429/*
 430 * Look up a virtual address mapping for the specified bus number. If no such
 431 * mapping exists, try to create one.
 432 */
 433static void __iomem *tegra_pcie_bus_map(struct tegra_pcie *pcie,
 434                                        unsigned int busnr)
 435{
 436        struct tegra_pcie_bus *bus;
 437
 438        list_for_each_entry(bus, &pcie->buses, list)
 439                if (bus->nr == busnr)
 440                        return (void __iomem *)bus->area->addr;
 441
 442        bus = tegra_pcie_bus_alloc(pcie, busnr);
 443        if (IS_ERR(bus))
 444                return NULL;
 445
 446        list_add_tail(&bus->list, &pcie->buses);
 447
 448        return (void __iomem *)bus->area->addr;
 449}
 450
 451static void __iomem *tegra_pcie_conf_address(struct pci_bus *bus,
 452                                             unsigned int devfn,
 453                                             int where)
 454{
 455        struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata);
 456        void __iomem *addr = NULL;
 457
 458        if (bus->number == 0) {
 459                unsigned int slot = PCI_SLOT(devfn);
 460                struct tegra_pcie_port *port;
 461
 462                list_for_each_entry(port, &pcie->ports, list) {
 463                        if (port->index + 1 == slot) {
 464                                addr = port->base + (where & ~3);
 465                                break;
 466                        }
 467                }
 468        } else {
 469                addr = tegra_pcie_bus_map(pcie, bus->number);
 470                if (!addr) {
 471                        dev_err(pcie->dev,
 472                                "failed to map cfg. space for bus %u\n",
 473                                bus->number);
 474                        return NULL;
 475                }
 476
 477                addr += tegra_pcie_conf_offset(devfn, where);
 478        }
 479
 480        return addr;
 481}
 482
 483static struct pci_ops tegra_pcie_ops = {
 484        .map_bus = tegra_pcie_conf_address,
 485        .read = pci_generic_config_read32,
 486        .write = pci_generic_config_write32,
 487};
 488
 489static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port)
 490{
 491        unsigned long ret = 0;
 492
 493        switch (port->index) {
 494        case 0:
 495                ret = AFI_PEX0_CTRL;
 496                break;
 497
 498        case 1:
 499                ret = AFI_PEX1_CTRL;
 500                break;
 501
 502        case 2:
 503                ret = AFI_PEX2_CTRL;
 504                break;
 505        }
 506
 507        return ret;
 508}
 509
 510static void tegra_pcie_port_reset(struct tegra_pcie_port *port)
 511{
 512        unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
 513        unsigned long value;
 514
 515        /* pulse reset signal */
 516        value = afi_readl(port->pcie, ctrl);
 517        value &= ~AFI_PEX_CTRL_RST;
 518        afi_writel(port->pcie, value, ctrl);
 519
 520        usleep_range(1000, 2000);
 521
 522        value = afi_readl(port->pcie, ctrl);
 523        value |= AFI_PEX_CTRL_RST;
 524        afi_writel(port->pcie, value, ctrl);
 525}
 526
 527static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
 528{
 529        const struct tegra_pcie_soc_data *soc = port->pcie->soc_data;
 530        unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
 531        unsigned long value;
 532
 533        /* enable reference clock */
 534        value = afi_readl(port->pcie, ctrl);
 535        value |= AFI_PEX_CTRL_REFCLK_EN;
 536
 537        if (soc->has_pex_clkreq_en)
 538                value |= AFI_PEX_CTRL_CLKREQ_EN;
 539
 540        value |= AFI_PEX_CTRL_OVERRIDE_EN;
 541
 542        afi_writel(port->pcie, value, ctrl);
 543
 544        tegra_pcie_port_reset(port);
 545}
 546
 547static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
 548{
 549        const struct tegra_pcie_soc_data *soc = port->pcie->soc_data;
 550        unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
 551        unsigned long value;
 552
 553        /* assert port reset */
 554        value = afi_readl(port->pcie, ctrl);
 555        value &= ~AFI_PEX_CTRL_RST;
 556        afi_writel(port->pcie, value, ctrl);
 557
 558        /* disable reference clock */
 559        value = afi_readl(port->pcie, ctrl);
 560
 561        if (soc->has_pex_clkreq_en)
 562                value &= ~AFI_PEX_CTRL_CLKREQ_EN;
 563
 564        value &= ~AFI_PEX_CTRL_REFCLK_EN;
 565        afi_writel(port->pcie, value, ctrl);
 566}
 567
 568static void tegra_pcie_port_free(struct tegra_pcie_port *port)
 569{
 570        struct tegra_pcie *pcie = port->pcie;
 571
 572        devm_iounmap(pcie->dev, port->base);
 573        devm_release_mem_region(pcie->dev, port->regs.start,
 574                                resource_size(&port->regs));
 575        list_del(&port->list);
 576        devm_kfree(pcie->dev, port);
 577}
 578
 579/* Tegra PCIE root complex wrongly reports device class */
 580static void tegra_pcie_fixup_class(struct pci_dev *dev)
 581{
 582        dev->class = PCI_CLASS_BRIDGE_PCI << 8;
 583}
 584DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class);
 585DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class);
 586DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class);
 587DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class);
 588
 589/* Tegra PCIE requires relaxed ordering */
 590static void tegra_pcie_relax_enable(struct pci_dev *dev)
 591{
 592        pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
 593}
 594DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable);
 595
 596static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
 597{
 598        struct tegra_pcie *pcie = sys_to_pcie(sys);
 599        int err;
 600
 601        err = devm_request_resource(pcie->dev, &pcie->all, &pcie->mem);
 602        if (err < 0)
 603                return err;
 604
 605        err = devm_request_resource(pcie->dev, &pcie->all, &pcie->prefetch);
 606        if (err)
 607                return err;
 608
 609        pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
 610        pci_add_resource_offset(&sys->resources, &pcie->prefetch,
 611                                sys->mem_offset);
 612        pci_add_resource(&sys->resources, &pcie->busn);
 613
 614        pci_ioremap_io(pcie->pio.start, pcie->io.start);
 615
 616        return 1;
 617}
 618
 619static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
 620{
 621        struct tegra_pcie *pcie = sys_to_pcie(pdev->bus->sysdata);
 622        int irq;
 623
 624        tegra_cpuidle_pcie_irqs_in_use();
 625
 626        irq = of_irq_parse_and_map_pci(pdev, slot, pin);
 627        if (!irq)
 628                irq = pcie->irq;
 629
 630        return irq;
 631}
 632
 633static irqreturn_t tegra_pcie_isr(int irq, void *arg)
 634{
 635        const char *err_msg[] = {
 636                "Unknown",
 637                "AXI slave error",
 638                "AXI decode error",
 639                "Target abort",
 640                "Master abort",
 641                "Invalid write",
 642                "Legacy interrupt",
 643                "Response decoding error",
 644                "AXI response decoding error",
 645                "Transaction timeout",
 646                "Slot present pin change",
 647                "Slot clock request change",
 648                "TMS clock ramp change",
 649                "TMS ready for power down",
 650                "Peer2Peer error",
 651        };
 652        struct tegra_pcie *pcie = arg;
 653        u32 code, signature;
 654
 655        code = afi_readl(pcie, AFI_INTR_CODE) & AFI_INTR_CODE_MASK;
 656        signature = afi_readl(pcie, AFI_INTR_SIGNATURE);
 657        afi_writel(pcie, 0, AFI_INTR_CODE);
 658
 659        if (code == AFI_INTR_LEGACY)
 660                return IRQ_NONE;
 661
 662        if (code >= ARRAY_SIZE(err_msg))
 663                code = 0;
 664
 665        /*
 666         * do not pollute kernel log with master abort reports since they
 667         * happen a lot during enumeration
 668         */
 669        if (code == AFI_INTR_MASTER_ABORT)
 670                dev_dbg(pcie->dev, "%s, signature: %08x\n", err_msg[code],
 671                        signature);
 672        else
 673                dev_err(pcie->dev, "%s, signature: %08x\n", err_msg[code],
 674                        signature);
 675
 676        if (code == AFI_INTR_TARGET_ABORT || code == AFI_INTR_MASTER_ABORT ||
 677            code == AFI_INTR_FPCI_DECODE_ERROR) {
 678                u32 fpci = afi_readl(pcie, AFI_UPPER_FPCI_ADDRESS) & 0xff;
 679                u64 address = (u64)fpci << 32 | (signature & 0xfffffffc);
 680
 681                if (code == AFI_INTR_MASTER_ABORT)
 682                        dev_dbg(pcie->dev, "  FPCI address: %10llx\n", address);
 683                else
 684                        dev_err(pcie->dev, "  FPCI address: %10llx\n", address);
 685        }
 686
 687        return IRQ_HANDLED;
 688}
 689
 690/*
 691 * FPCI map is as follows:
 692 * - 0xfdfc000000: I/O space
 693 * - 0xfdfe000000: type 0 configuration space
 694 * - 0xfdff000000: type 1 configuration space
 695 * - 0xfe00000000: type 0 extended configuration space
 696 * - 0xfe10000000: type 1 extended configuration space
 697 */
 698static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
 699{
 700        u32 fpci_bar, size, axi_address;
 701
 702        /* Bar 0: type 1 extended configuration space */
 703        fpci_bar = 0xfe100000;
 704        size = resource_size(pcie->cs);
 705        axi_address = pcie->cs->start;
 706        afi_writel(pcie, axi_address, AFI_AXI_BAR0_START);
 707        afi_writel(pcie, size >> 12, AFI_AXI_BAR0_SZ);
 708        afi_writel(pcie, fpci_bar, AFI_FPCI_BAR0);
 709
 710        /* Bar 1: downstream IO bar */
 711        fpci_bar = 0xfdfc0000;
 712        size = resource_size(&pcie->io);
 713        axi_address = pcie->io.start;
 714        afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
 715        afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
 716        afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
 717
 718        /* Bar 2: prefetchable memory BAR */
 719        fpci_bar = (((pcie->prefetch.start >> 12) & 0x0fffffff) << 4) | 0x1;
 720        size = resource_size(&pcie->prefetch);
 721        axi_address = pcie->prefetch.start;
 722        afi_writel(pcie, axi_address, AFI_AXI_BAR2_START);
 723        afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ);
 724        afi_writel(pcie, fpci_bar, AFI_FPCI_BAR2);
 725
 726        /* Bar 3: non prefetchable memory BAR */
 727        fpci_bar = (((pcie->mem.start >> 12) & 0x0fffffff) << 4) | 0x1;
 728        size = resource_size(&pcie->mem);
 729        axi_address = pcie->mem.start;
 730        afi_writel(pcie, axi_address, AFI_AXI_BAR3_START);
 731        afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ);
 732        afi_writel(pcie, fpci_bar, AFI_FPCI_BAR3);
 733
 734        /* NULL out the remaining BARs as they are not used */
 735        afi_writel(pcie, 0, AFI_AXI_BAR4_START);
 736        afi_writel(pcie, 0, AFI_AXI_BAR4_SZ);
 737        afi_writel(pcie, 0, AFI_FPCI_BAR4);
 738
 739        afi_writel(pcie, 0, AFI_AXI_BAR5_START);
 740        afi_writel(pcie, 0, AFI_AXI_BAR5_SZ);
 741        afi_writel(pcie, 0, AFI_FPCI_BAR5);
 742
 743        /* map all upstream transactions as uncached */
 744        afi_writel(pcie, PHYS_OFFSET, AFI_CACHE_BAR0_ST);
 745        afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ);
 746        afi_writel(pcie, 0, AFI_CACHE_BAR1_ST);
 747        afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ);
 748
 749        /* MSI translations are setup only when needed */
 750        afi_writel(pcie, 0, AFI_MSI_FPCI_BAR_ST);
 751        afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
 752        afi_writel(pcie, 0, AFI_MSI_AXI_BAR_ST);
 753        afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
 754}
 755
 756static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout)
 757{
 758        const struct tegra_pcie_soc_data *soc = pcie->soc_data;
 759        u32 value;
 760
 761        timeout = jiffies + msecs_to_jiffies(timeout);
 762
 763        while (time_before(jiffies, timeout)) {
 764                value = pads_readl(pcie, soc->pads_pll_ctl);
 765                if (value & PADS_PLL_CTL_LOCKDET)
 766                        return 0;
 767        }
 768
 769        return -ETIMEDOUT;
 770}
 771
 772static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
 773{
 774        const struct tegra_pcie_soc_data *soc = pcie->soc_data;
 775        u32 value;
 776        int err;
 777
 778        /* initialize internal PHY, enable up to 16 PCIE lanes */
 779        pads_writel(pcie, 0x0, PADS_CTL_SEL);
 780
 781        /* override IDDQ to 1 on all 4 lanes */
 782        value = pads_readl(pcie, PADS_CTL);
 783        value |= PADS_CTL_IDDQ_1L;
 784        pads_writel(pcie, value, PADS_CTL);
 785
 786        /*
 787         * Set up PHY PLL inputs select PLLE output as refclock,
 788         * set TX ref sel to div10 (not div5).
 789         */
 790        value = pads_readl(pcie, soc->pads_pll_ctl);
 791        value &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK);
 792        value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel;
 793        pads_writel(pcie, value, soc->pads_pll_ctl);
 794
 795        /* reset PLL */
 796        value = pads_readl(pcie, soc->pads_pll_ctl);
 797        value &= ~PADS_PLL_CTL_RST_B4SM;
 798        pads_writel(pcie, value, soc->pads_pll_ctl);
 799
 800        usleep_range(20, 100);
 801
 802        /* take PLL out of reset  */
 803        value = pads_readl(pcie, soc->pads_pll_ctl);
 804        value |= PADS_PLL_CTL_RST_B4SM;
 805        pads_writel(pcie, value, soc->pads_pll_ctl);
 806
 807        /* Configure the reference clock driver */
 808        value = PADS_REFCLK_CFG_VALUE | (PADS_REFCLK_CFG_VALUE << 16);
 809        pads_writel(pcie, value, PADS_REFCLK_CFG0);
 810        if (soc->num_ports > 2)
 811                pads_writel(pcie, PADS_REFCLK_CFG_VALUE, PADS_REFCLK_CFG1);
 812
 813        /* wait for the PLL to lock */
 814        err = tegra_pcie_pll_wait(pcie, 500);
 815        if (err < 0) {
 816                dev_err(pcie->dev, "PLL failed to lock: %d\n", err);
 817                return err;
 818        }
 819
 820        /* turn off IDDQ override */
 821        value = pads_readl(pcie, PADS_CTL);
 822        value &= ~PADS_CTL_IDDQ_1L;
 823        pads_writel(pcie, value, PADS_CTL);
 824
 825        /* enable TX/RX data */
 826        value = pads_readl(pcie, PADS_CTL);
 827        value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L;
 828        pads_writel(pcie, value, PADS_CTL);
 829
 830        return 0;
 831}
 832
 833static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
 834{
 835        const struct tegra_pcie_soc_data *soc = pcie->soc_data;
 836        struct tegra_pcie_port *port;
 837        unsigned long value;
 838        int err;
 839
 840        /* enable PLL power down */
 841        if (pcie->phy) {
 842                value = afi_readl(pcie, AFI_PLLE_CONTROL);
 843                value &= ~AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL;
 844                value |= AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN;
 845                afi_writel(pcie, value, AFI_PLLE_CONTROL);
 846        }
 847
 848        /* power down PCIe slot clock bias pad */
 849        if (soc->has_pex_bias_ctrl)
 850                afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0);
 851
 852        /* configure mode and disable all ports */
 853        value = afi_readl(pcie, AFI_PCIE_CONFIG);
 854        value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK;
 855        value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config;
 856
 857        list_for_each_entry(port, &pcie->ports, list)
 858                value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
 859
 860        afi_writel(pcie, value, AFI_PCIE_CONFIG);
 861
 862        if (soc->has_gen2) {
 863                value = afi_readl(pcie, AFI_FUSE);
 864                value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS;
 865                afi_writel(pcie, value, AFI_FUSE);
 866        } else {
 867                value = afi_readl(pcie, AFI_FUSE);
 868                value |= AFI_FUSE_PCIE_T0_GEN2_DIS;
 869                afi_writel(pcie, value, AFI_FUSE);
 870        }
 871
 872        if (!pcie->phy)
 873                err = tegra_pcie_phy_enable(pcie);
 874        else
 875                err = phy_power_on(pcie->phy);
 876
 877        if (err < 0) {
 878                dev_err(pcie->dev, "failed to power on PHY: %d\n", err);
 879                return err;
 880        }
 881
 882        /* take the PCIe interface module out of reset */
 883        reset_control_deassert(pcie->pcie_xrst);
 884
 885        /* finally enable PCIe */
 886        value = afi_readl(pcie, AFI_CONFIGURATION);
 887        value |= AFI_CONFIGURATION_EN_FPCI;
 888        afi_writel(pcie, value, AFI_CONFIGURATION);
 889
 890        value = AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR |
 891                AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR |
 892                AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR;
 893
 894        if (soc->has_intr_prsnt_sense)
 895                value |= AFI_INTR_EN_PRSNT_SENSE;
 896
 897        afi_writel(pcie, value, AFI_AFI_INTR_ENABLE);
 898        afi_writel(pcie, 0xffffffff, AFI_SM_INTR_ENABLE);
 899
 900        /* don't enable MSI for now, only when needed */
 901        afi_writel(pcie, AFI_INTR_MASK_INT_MASK, AFI_INTR_MASK);
 902
 903        /* disable all exceptions */
 904        afi_writel(pcie, 0, AFI_FPCI_ERROR_MASKS);
 905
 906        return 0;
 907}
 908
 909static void tegra_pcie_power_off(struct tegra_pcie *pcie)
 910{
 911        int err;
 912
 913        /* TODO: disable and unprepare clocks? */
 914
 915        err = phy_power_off(pcie->phy);
 916        if (err < 0)
 917                dev_warn(pcie->dev, "failed to power off PHY: %d\n", err);
 918
 919        reset_control_assert(pcie->pcie_xrst);
 920        reset_control_assert(pcie->afi_rst);
 921        reset_control_assert(pcie->pex_rst);
 922
 923        tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
 924
 925        err = regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
 926        if (err < 0)
 927                dev_warn(pcie->dev, "failed to disable regulators: %d\n", err);
 928}
 929
 930static int tegra_pcie_power_on(struct tegra_pcie *pcie)
 931{
 932        const struct tegra_pcie_soc_data *soc = pcie->soc_data;
 933        int err;
 934
 935        reset_control_assert(pcie->pcie_xrst);
 936        reset_control_assert(pcie->afi_rst);
 937        reset_control_assert(pcie->pex_rst);
 938
 939        tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
 940
 941        /* enable regulators */
 942        err = regulator_bulk_enable(pcie->num_supplies, pcie->supplies);
 943        if (err < 0)
 944                dev_err(pcie->dev, "failed to enable regulators: %d\n", err);
 945
 946        err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE,
 947                                                pcie->pex_clk,
 948                                                pcie->pex_rst);
 949        if (err) {
 950                dev_err(pcie->dev, "powerup sequence failed: %d\n", err);
 951                return err;
 952        }
 953
 954        reset_control_deassert(pcie->afi_rst);
 955
 956        err = clk_prepare_enable(pcie->afi_clk);
 957        if (err < 0) {
 958                dev_err(pcie->dev, "failed to enable AFI clock: %d\n", err);
 959                return err;
 960        }
 961
 962        if (soc->has_cml_clk) {
 963                err = clk_prepare_enable(pcie->cml_clk);
 964                if (err < 0) {
 965                        dev_err(pcie->dev, "failed to enable CML clock: %d\n",
 966                                err);
 967                        return err;
 968                }
 969        }
 970
 971        err = clk_prepare_enable(pcie->pll_e);
 972        if (err < 0) {
 973                dev_err(pcie->dev, "failed to enable PLLE clock: %d\n", err);
 974                return err;
 975        }
 976
 977        return 0;
 978}
 979
 980static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
 981{
 982        const struct tegra_pcie_soc_data *soc = pcie->soc_data;
 983
 984        pcie->pex_clk = devm_clk_get(pcie->dev, "pex");
 985        if (IS_ERR(pcie->pex_clk))
 986                return PTR_ERR(pcie->pex_clk);
 987
 988        pcie->afi_clk = devm_clk_get(pcie->dev, "afi");
 989        if (IS_ERR(pcie->afi_clk))
 990                return PTR_ERR(pcie->afi_clk);
 991
 992        pcie->pll_e = devm_clk_get(pcie->dev, "pll_e");
 993        if (IS_ERR(pcie->pll_e))
 994                return PTR_ERR(pcie->pll_e);
 995
 996        if (soc->has_cml_clk) {
 997                pcie->cml_clk = devm_clk_get(pcie->dev, "cml");
 998                if (IS_ERR(pcie->cml_clk))
 999                        return PTR_ERR(pcie->cml_clk);
1000        }
1001
1002        return 0;
1003}
1004
1005static int tegra_pcie_resets_get(struct tegra_pcie *pcie)
1006{
1007        pcie->pex_rst = devm_reset_control_get(pcie->dev, "pex");
1008        if (IS_ERR(pcie->pex_rst))
1009                return PTR_ERR(pcie->pex_rst);
1010
1011        pcie->afi_rst = devm_reset_control_get(pcie->dev, "afi");
1012        if (IS_ERR(pcie->afi_rst))
1013                return PTR_ERR(pcie->afi_rst);
1014
1015        pcie->pcie_xrst = devm_reset_control_get(pcie->dev, "pcie_x");
1016        if (IS_ERR(pcie->pcie_xrst))
1017                return PTR_ERR(pcie->pcie_xrst);
1018
1019        return 0;
1020}
1021
1022static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
1023{
1024        struct platform_device *pdev = to_platform_device(pcie->dev);
1025        struct resource *pads, *afi, *res;
1026        int err;
1027
1028        err = tegra_pcie_clocks_get(pcie);
1029        if (err) {
1030                dev_err(&pdev->dev, "failed to get clocks: %d\n", err);
1031                return err;
1032        }
1033
1034        err = tegra_pcie_resets_get(pcie);
1035        if (err) {
1036                dev_err(&pdev->dev, "failed to get resets: %d\n", err);
1037                return err;
1038        }
1039
1040        pcie->phy = devm_phy_optional_get(pcie->dev, "pcie");
1041        if (IS_ERR(pcie->phy)) {
1042                err = PTR_ERR(pcie->phy);
1043                dev_err(&pdev->dev, "failed to get PHY: %d\n", err);
1044                return err;
1045        }
1046
1047        err = phy_init(pcie->phy);
1048        if (err < 0) {
1049                dev_err(&pdev->dev, "failed to initialize PHY: %d\n", err);
1050                return err;
1051        }
1052
1053        err = tegra_pcie_power_on(pcie);
1054        if (err) {
1055                dev_err(&pdev->dev, "failed to power up: %d\n", err);
1056                return err;
1057        }
1058
1059        pads = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pads");
1060        pcie->pads = devm_ioremap_resource(&pdev->dev, pads);
1061        if (IS_ERR(pcie->pads)) {
1062                err = PTR_ERR(pcie->pads);
1063                goto poweroff;
1064        }
1065
1066        afi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "afi");
1067        pcie->afi = devm_ioremap_resource(&pdev->dev, afi);
1068        if (IS_ERR(pcie->afi)) {
1069                err = PTR_ERR(pcie->afi);
1070                goto poweroff;
1071        }
1072
1073        /* request configuration space, but remap later, on demand */
1074        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs");
1075        if (!res) {
1076                err = -EADDRNOTAVAIL;
1077                goto poweroff;
1078        }
1079
1080        pcie->cs = devm_request_mem_region(pcie->dev, res->start,
1081                                           resource_size(res), res->name);
1082        if (!pcie->cs) {
1083                err = -EADDRNOTAVAIL;
1084                goto poweroff;
1085        }
1086
1087        /* request interrupt */
1088        err = platform_get_irq_byname(pdev, "intr");
1089        if (err < 0) {
1090                dev_err(&pdev->dev, "failed to get IRQ: %d\n", err);
1091                goto poweroff;
1092        }
1093
1094        pcie->irq = err;
1095
1096        err = request_irq(pcie->irq, tegra_pcie_isr, IRQF_SHARED, "PCIE", pcie);
1097        if (err) {
1098                dev_err(&pdev->dev, "failed to register IRQ: %d\n", err);
1099                goto poweroff;
1100        }
1101
1102        return 0;
1103
1104poweroff:
1105        tegra_pcie_power_off(pcie);
1106        return err;
1107}
1108
1109static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
1110{
1111        int err;
1112
1113        if (pcie->irq > 0)
1114                free_irq(pcie->irq, pcie);
1115
1116        tegra_pcie_power_off(pcie);
1117
1118        err = phy_exit(pcie->phy);
1119        if (err < 0)
1120                dev_err(pcie->dev, "failed to teardown PHY: %d\n", err);
1121
1122        return 0;
1123}
1124
1125static int tegra_msi_alloc(struct tegra_msi *chip)
1126{
1127        int msi;
1128
1129        mutex_lock(&chip->lock);
1130
1131        msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
1132        if (msi < INT_PCI_MSI_NR)
1133                set_bit(msi, chip->used);
1134        else
1135                msi = -ENOSPC;
1136
1137        mutex_unlock(&chip->lock);
1138
1139        return msi;
1140}
1141
1142static void tegra_msi_free(struct tegra_msi *chip, unsigned long irq)
1143{
1144        struct device *dev = chip->chip.dev;
1145
1146        mutex_lock(&chip->lock);
1147
1148        if (!test_bit(irq, chip->used))
1149                dev_err(dev, "trying to free unused MSI#%lu\n", irq);
1150        else
1151                clear_bit(irq, chip->used);
1152
1153        mutex_unlock(&chip->lock);
1154}
1155
1156static irqreturn_t tegra_pcie_msi_irq(int irq, void *data)
1157{
1158        struct tegra_pcie *pcie = data;
1159        struct tegra_msi *msi = &pcie->msi;
1160        unsigned int i, processed = 0;
1161
1162        for (i = 0; i < 8; i++) {
1163                unsigned long reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1164
1165                while (reg) {
1166                        unsigned int offset = find_first_bit(&reg, 32);
1167                        unsigned int index = i * 32 + offset;
1168                        unsigned int irq;
1169
1170                        /* clear the interrupt */
1171                        afi_writel(pcie, 1 << offset, AFI_MSI_VEC0 + i * 4);
1172
1173                        irq = irq_find_mapping(msi->domain, index);
1174                        if (irq) {
1175                                if (test_bit(index, msi->used))
1176                                        generic_handle_irq(irq);
1177                                else
1178                                        dev_info(pcie->dev, "unhandled MSI\n");
1179                        } else {
1180                                /*
1181                                 * that's weird who triggered this?
1182                                 * just clear it
1183                                 */
1184                                dev_info(pcie->dev, "unexpected MSI\n");
1185                        }
1186
1187                        /* see if there's any more pending in this vector */
1188                        reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1189
1190                        processed++;
1191                }
1192        }
1193
1194        return processed > 0 ? IRQ_HANDLED : IRQ_NONE;
1195}
1196
1197static int tegra_msi_setup_irq(struct msi_controller *chip,
1198                               struct pci_dev *pdev, struct msi_desc *desc)
1199{
1200        struct tegra_msi *msi = to_tegra_msi(chip);
1201        struct msi_msg msg;
1202        unsigned int irq;
1203        int hwirq;
1204
1205        hwirq = tegra_msi_alloc(msi);
1206        if (hwirq < 0)
1207                return hwirq;
1208
1209        irq = irq_create_mapping(msi->domain, hwirq);
1210        if (!irq) {
1211                tegra_msi_free(msi, hwirq);
1212                return -EINVAL;
1213        }
1214
1215        irq_set_msi_desc(irq, desc);
1216
1217        msg.address_lo = virt_to_phys((void *)msi->pages);
1218        /* 32 bit address only */
1219        msg.address_hi = 0;
1220        msg.data = hwirq;
1221
1222        pci_write_msi_msg(irq, &msg);
1223
1224        return 0;
1225}
1226
1227static void tegra_msi_teardown_irq(struct msi_controller *chip,
1228                                   unsigned int irq)
1229{
1230        struct tegra_msi *msi = to_tegra_msi(chip);
1231        struct irq_data *d = irq_get_irq_data(irq);
1232        irq_hw_number_t hwirq = irqd_to_hwirq(d);
1233
1234        irq_dispose_mapping(irq);
1235        tegra_msi_free(msi, hwirq);
1236}
1237
1238static struct irq_chip tegra_msi_irq_chip = {
1239        .name = "Tegra PCIe MSI",
1240        .irq_enable = pci_msi_unmask_irq,
1241        .irq_disable = pci_msi_mask_irq,
1242        .irq_mask = pci_msi_mask_irq,
1243        .irq_unmask = pci_msi_unmask_irq,
1244};
1245
1246static int tegra_msi_map(struct irq_domain *domain, unsigned int irq,
1247                         irq_hw_number_t hwirq)
1248{
1249        irq_set_chip_and_handler(irq, &tegra_msi_irq_chip, handle_simple_irq);
1250        irq_set_chip_data(irq, domain->host_data);
1251
1252        tegra_cpuidle_pcie_irqs_in_use();
1253
1254        return 0;
1255}
1256
1257static const struct irq_domain_ops msi_domain_ops = {
1258        .map = tegra_msi_map,
1259};
1260
1261static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
1262{
1263        struct platform_device *pdev = to_platform_device(pcie->dev);
1264        const struct tegra_pcie_soc_data *soc = pcie->soc_data;
1265        struct tegra_msi *msi = &pcie->msi;
1266        unsigned long base;
1267        int err;
1268        u32 reg;
1269
1270        mutex_init(&msi->lock);
1271
1272        msi->chip.dev = pcie->dev;
1273        msi->chip.setup_irq = tegra_msi_setup_irq;
1274        msi->chip.teardown_irq = tegra_msi_teardown_irq;
1275
1276        msi->domain = irq_domain_add_linear(pcie->dev->of_node, INT_PCI_MSI_NR,
1277                                            &msi_domain_ops, &msi->chip);
1278        if (!msi->domain) {
1279                dev_err(&pdev->dev, "failed to create IRQ domain\n");
1280                return -ENOMEM;
1281        }
1282
1283        err = platform_get_irq_byname(pdev, "msi");
1284        if (err < 0) {
1285                dev_err(&pdev->dev, "failed to get IRQ: %d\n", err);
1286                goto err;
1287        }
1288
1289        msi->irq = err;
1290
1291        err = request_irq(msi->irq, tegra_pcie_msi_irq, 0,
1292                          tegra_msi_irq_chip.name, pcie);
1293        if (err < 0) {
1294                dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
1295                goto err;
1296        }
1297
1298        /* setup AFI/FPCI range */
1299        msi->pages = __get_free_pages(GFP_KERNEL, 0);
1300        base = virt_to_phys((void *)msi->pages);
1301
1302        afi_writel(pcie, base >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
1303        afi_writel(pcie, base, AFI_MSI_AXI_BAR_ST);
1304        /* this register is in 4K increments */
1305        afi_writel(pcie, 1, AFI_MSI_BAR_SZ);
1306
1307        /* enable all MSI vectors */
1308        afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC0);
1309        afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC1);
1310        afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC2);
1311        afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC3);
1312        afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC4);
1313        afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC5);
1314        afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC6);
1315        afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC7);
1316
1317        /* and unmask the MSI interrupt */
1318        reg = afi_readl(pcie, AFI_INTR_MASK);
1319        reg |= AFI_INTR_MASK_MSI_MASK;
1320        afi_writel(pcie, reg, AFI_INTR_MASK);
1321
1322        return 0;
1323
1324err:
1325        irq_domain_remove(msi->domain);
1326        return err;
1327}
1328
1329static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
1330{
1331        struct tegra_msi *msi = &pcie->msi;
1332        unsigned int i, irq;
1333        u32 value;
1334
1335        /* mask the MSI interrupt */
1336        value = afi_readl(pcie, AFI_INTR_MASK);
1337        value &= ~AFI_INTR_MASK_MSI_MASK;
1338        afi_writel(pcie, value, AFI_INTR_MASK);
1339
1340        /* disable all MSI vectors */
1341        afi_writel(pcie, 0, AFI_MSI_EN_VEC0);
1342        afi_writel(pcie, 0, AFI_MSI_EN_VEC1);
1343        afi_writel(pcie, 0, AFI_MSI_EN_VEC2);
1344        afi_writel(pcie, 0, AFI_MSI_EN_VEC3);
1345        afi_writel(pcie, 0, AFI_MSI_EN_VEC4);
1346        afi_writel(pcie, 0, AFI_MSI_EN_VEC5);
1347        afi_writel(pcie, 0, AFI_MSI_EN_VEC6);
1348        afi_writel(pcie, 0, AFI_MSI_EN_VEC7);
1349
1350        free_pages(msi->pages, 0);
1351
1352        if (msi->irq > 0)
1353                free_irq(msi->irq, pcie);
1354
1355        for (i = 0; i < INT_PCI_MSI_NR; i++) {
1356                irq = irq_find_mapping(msi->domain, i);
1357                if (irq > 0)
1358                        irq_dispose_mapping(irq);
1359        }
1360
1361        irq_domain_remove(msi->domain);
1362
1363        return 0;
1364}
1365
1366static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
1367                                      u32 *xbar)
1368{
1369        struct device_node *np = pcie->dev->of_node;
1370
1371        if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
1372                switch (lanes) {
1373                case 0x0000104:
1374                        dev_info(pcie->dev, "4x1, 1x1 configuration\n");
1375                        *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1;
1376                        return 0;
1377
1378                case 0x0000102:
1379                        dev_info(pcie->dev, "2x1, 1x1 configuration\n");
1380                        *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1;
1381                        return 0;
1382                }
1383        } else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
1384                switch (lanes) {
1385                case 0x00000204:
1386                        dev_info(pcie->dev, "4x1, 2x1 configuration\n");
1387                        *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420;
1388                        return 0;
1389
1390                case 0x00020202:
1391                        dev_info(pcie->dev, "2x3 configuration\n");
1392                        *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222;
1393                        return 0;
1394
1395                case 0x00010104:
1396                        dev_info(pcie->dev, "4x1, 1x2 configuration\n");
1397                        *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411;
1398                        return 0;
1399                }
1400        } else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
1401                switch (lanes) {
1402                case 0x00000004:
1403                        dev_info(pcie->dev, "single-mode configuration\n");
1404                        *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE;
1405                        return 0;
1406
1407                case 0x00000202:
1408                        dev_info(pcie->dev, "dual-mode configuration\n");
1409                        *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL;
1410                        return 0;
1411                }
1412        }
1413
1414        return -EINVAL;
1415}
1416
1417/*
1418 * Check whether a given set of supplies is available in a device tree node.
1419 * This is used to check whether the new or the legacy device tree bindings
1420 * should be used.
1421 */
1422static bool of_regulator_bulk_available(struct device_node *np,
1423                                        struct regulator_bulk_data *supplies,
1424                                        unsigned int num_supplies)
1425{
1426        char property[32];
1427        unsigned int i;
1428
1429        for (i = 0; i < num_supplies; i++) {
1430                snprintf(property, 32, "%s-supply", supplies[i].supply);
1431
1432                if (of_find_property(np, property, NULL) == NULL)
1433                        return false;
1434        }
1435
1436        return true;
1437}
1438
1439/*
1440 * Old versions of the device tree binding for this device used a set of power
1441 * supplies that didn't match the hardware inputs. This happened to work for a
1442 * number of cases but is not future proof. However to preserve backwards-
1443 * compatibility with old device trees, this function will try to use the old
1444 * set of supplies.
1445 */
1446static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
1447{
1448        struct device_node *np = pcie->dev->of_node;
1449
1450        if (of_device_is_compatible(np, "nvidia,tegra30-pcie"))
1451                pcie->num_supplies = 3;
1452        else if (of_device_is_compatible(np, "nvidia,tegra20-pcie"))
1453                pcie->num_supplies = 2;
1454
1455        if (pcie->num_supplies == 0) {
1456                dev_err(pcie->dev, "device %s not supported in legacy mode\n",
1457                        np->full_name);
1458                return -ENODEV;
1459        }
1460
1461        pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
1462                                      sizeof(*pcie->supplies),
1463                                      GFP_KERNEL);
1464        if (!pcie->supplies)
1465                return -ENOMEM;
1466
1467        pcie->supplies[0].supply = "pex-clk";
1468        pcie->supplies[1].supply = "vdd";
1469
1470        if (pcie->num_supplies > 2)
1471                pcie->supplies[2].supply = "avdd";
1472
1473        return devm_regulator_bulk_get(pcie->dev, pcie->num_supplies,
1474                                       pcie->supplies);
1475}
1476
1477/*
1478 * Obtains the list of regulators required for a particular generation of the
1479 * IP block.
1480 *
1481 * This would've been nice to do simply by providing static tables for use
1482 * with the regulator_bulk_*() API, but unfortunately Tegra30 is a bit quirky
1483 * in that it has two pairs or AVDD_PEX and VDD_PEX supplies (PEXA and PEXB)
1484 * and either seems to be optional depending on which ports are being used.
1485 */
1486static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
1487{
1488        struct device_node *np = pcie->dev->of_node;
1489        unsigned int i = 0;
1490
1491        if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
1492                pcie->num_supplies = 7;
1493
1494                pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
1495                                              sizeof(*pcie->supplies),
1496                                              GFP_KERNEL);
1497                if (!pcie->supplies)
1498                        return -ENOMEM;
1499
1500                pcie->supplies[i++].supply = "avddio-pex";
1501                pcie->supplies[i++].supply = "dvddio-pex";
1502                pcie->supplies[i++].supply = "avdd-pex-pll";
1503                pcie->supplies[i++].supply = "hvdd-pex";
1504                pcie->supplies[i++].supply = "hvdd-pex-pll-e";
1505                pcie->supplies[i++].supply = "vddio-pex-ctl";
1506                pcie->supplies[i++].supply = "avdd-pll-erefe";
1507        } else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
1508                bool need_pexa = false, need_pexb = false;
1509
1510                /* VDD_PEXA and AVDD_PEXA supply lanes 0 to 3 */
1511                if (lane_mask & 0x0f)
1512                        need_pexa = true;
1513
1514                /* VDD_PEXB and AVDD_PEXB supply lanes 4 to 5 */
1515                if (lane_mask & 0x30)
1516                        need_pexb = true;
1517
1518                pcie->num_supplies = 4 + (need_pexa ? 2 : 0) +
1519                                         (need_pexb ? 2 : 0);
1520
1521                pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
1522                                              sizeof(*pcie->supplies),
1523                                              GFP_KERNEL);
1524                if (!pcie->supplies)
1525                        return -ENOMEM;
1526
1527                pcie->supplies[i++].supply = "avdd-pex-pll";
1528                pcie->supplies[i++].supply = "hvdd-pex";
1529                pcie->supplies[i++].supply = "vddio-pex-ctl";
1530                pcie->supplies[i++].supply = "avdd-plle";
1531
1532                if (need_pexa) {
1533                        pcie->supplies[i++].supply = "avdd-pexa";
1534                        pcie->supplies[i++].supply = "vdd-pexa";
1535                }
1536
1537                if (need_pexb) {
1538                        pcie->supplies[i++].supply = "avdd-pexb";
1539                        pcie->supplies[i++].supply = "vdd-pexb";
1540                }
1541        } else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
1542                pcie->num_supplies = 5;
1543
1544                pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
1545                                              sizeof(*pcie->supplies),
1546                                              GFP_KERNEL);
1547                if (!pcie->supplies)
1548                        return -ENOMEM;
1549
1550                pcie->supplies[0].supply = "avdd-pex";
1551                pcie->supplies[1].supply = "vdd-pex";
1552                pcie->supplies[2].supply = "avdd-pex-pll";
1553                pcie->supplies[3].supply = "avdd-plle";
1554                pcie->supplies[4].supply = "vddio-pex-clk";
1555        }
1556
1557        if (of_regulator_bulk_available(pcie->dev->of_node, pcie->supplies,
1558                                        pcie->num_supplies))
1559                return devm_regulator_bulk_get(pcie->dev, pcie->num_supplies,
1560                                               pcie->supplies);
1561
1562        /*
1563         * If not all regulators are available for this new scheme, assume
1564         * that the device tree complies with an older version of the device
1565         * tree binding.
1566         */
1567        dev_info(pcie->dev, "using legacy DT binding for power supplies\n");
1568
1569        devm_kfree(pcie->dev, pcie->supplies);
1570        pcie->num_supplies = 0;
1571
1572        return tegra_pcie_get_legacy_regulators(pcie);
1573}
1574
1575static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
1576{
1577        const struct tegra_pcie_soc_data *soc = pcie->soc_data;
1578        struct device_node *np = pcie->dev->of_node, *port;
1579        struct of_pci_range_parser parser;
1580        struct of_pci_range range;
1581        u32 lanes = 0, mask = 0;
1582        unsigned int lane = 0;
1583        struct resource res;
1584        int err;
1585
1586        memset(&pcie->all, 0, sizeof(pcie->all));
1587        pcie->all.flags = IORESOURCE_MEM;
1588        pcie->all.name = np->full_name;
1589        pcie->all.start = ~0;
1590        pcie->all.end = 0;
1591
1592        if (of_pci_range_parser_init(&parser, np)) {
1593                dev_err(pcie->dev, "missing \"ranges\" property\n");
1594                return -EINVAL;
1595        }
1596
1597        for_each_of_pci_range(&parser, &range) {
1598                err = of_pci_range_to_resource(&range, np, &res);
1599                if (err < 0)
1600                        return err;
1601
1602                switch (res.flags & IORESOURCE_TYPE_BITS) {
1603                case IORESOURCE_IO:
1604                        memcpy(&pcie->pio, &res, sizeof(res));
1605                        pcie->pio.name = np->full_name;
1606
1607                        /*
1608                         * The Tegra PCIe host bridge uses this to program the
1609                         * mapping of the I/O space to the physical address,
1610                         * so we override the .start and .end fields here that
1611                         * of_pci_range_to_resource() converted to I/O space.
1612                         * We also set the IORESOURCE_MEM type to clarify that
1613                         * the resource is in the physical memory space.
1614                         */
1615                        pcie->io.start = range.cpu_addr;
1616                        pcie->io.end = range.cpu_addr + range.size - 1;
1617                        pcie->io.flags = IORESOURCE_MEM;
1618                        pcie->io.name = "I/O";
1619
1620                        memcpy(&res, &pcie->io, sizeof(res));
1621                        break;
1622
1623                case IORESOURCE_MEM:
1624                        if (res.flags & IORESOURCE_PREFETCH) {
1625                                memcpy(&pcie->prefetch, &res, sizeof(res));
1626                                pcie->prefetch.name = "prefetchable";
1627                        } else {
1628                                memcpy(&pcie->mem, &res, sizeof(res));
1629                                pcie->mem.name = "non-prefetchable";
1630                        }
1631                        break;
1632                }
1633
1634                if (res.start <= pcie->all.start)
1635                        pcie->all.start = res.start;
1636
1637                if (res.end >= pcie->all.end)
1638                        pcie->all.end = res.end;
1639        }
1640
1641        err = devm_request_resource(pcie->dev, &iomem_resource, &pcie->all);
1642        if (err < 0)
1643                return err;
1644
1645        err = of_pci_parse_bus_range(np, &pcie->busn);
1646        if (err < 0) {
1647                dev_err(pcie->dev, "failed to parse ranges property: %d\n",
1648                        err);
1649                pcie->busn.name = np->name;
1650                pcie->busn.start = 0;
1651                pcie->busn.end = 0xff;
1652                pcie->busn.flags = IORESOURCE_BUS;
1653        }
1654
1655        /* parse root ports */
1656        for_each_child_of_node(np, port) {
1657                struct tegra_pcie_port *rp;
1658                unsigned int index;
1659                u32 value;
1660
1661                err = of_pci_get_devfn(port);
1662                if (err < 0) {
1663                        dev_err(pcie->dev, "failed to parse address: %d\n",
1664                                err);
1665                        return err;
1666                }
1667
1668                index = PCI_SLOT(err);
1669
1670                if (index < 1 || index > soc->num_ports) {
1671                        dev_err(pcie->dev, "invalid port number: %d\n", index);
1672                        return -EINVAL;
1673                }
1674
1675                index--;
1676
1677                err = of_property_read_u32(port, "nvidia,num-lanes", &value);
1678                if (err < 0) {
1679                        dev_err(pcie->dev, "failed to parse # of lanes: %d\n",
1680                                err);
1681                        return err;
1682                }
1683
1684                if (value > 16) {
1685                        dev_err(pcie->dev, "invalid # of lanes: %u\n", value);
1686                        return -EINVAL;
1687                }
1688
1689                lanes |= value << (index << 3);
1690
1691                if (!of_device_is_available(port)) {
1692                        lane += value;
1693                        continue;
1694                }
1695
1696                mask |= ((1 << value) - 1) << lane;
1697                lane += value;
1698
1699                rp = devm_kzalloc(pcie->dev, sizeof(*rp), GFP_KERNEL);
1700                if (!rp)
1701                        return -ENOMEM;
1702
1703                err = of_address_to_resource(port, 0, &rp->regs);
1704                if (err < 0) {
1705                        dev_err(pcie->dev, "failed to parse address: %d\n",
1706                                err);
1707                        return err;
1708                }
1709
1710                INIT_LIST_HEAD(&rp->list);
1711                rp->index = index;
1712                rp->lanes = value;
1713                rp->pcie = pcie;
1714
1715                rp->base = devm_ioremap_resource(pcie->dev, &rp->regs);
1716                if (IS_ERR(rp->base))
1717                        return PTR_ERR(rp->base);
1718
1719                list_add_tail(&rp->list, &pcie->ports);
1720        }
1721
1722        err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config);
1723        if (err < 0) {
1724                dev_err(pcie->dev, "invalid lane configuration\n");
1725                return err;
1726        }
1727
1728        err = tegra_pcie_get_regulators(pcie, mask);
1729        if (err < 0)
1730                return err;
1731
1732        return 0;
1733}
1734
1735/*
1736 * FIXME: If there are no PCIe cards attached, then calling this function
1737 * can result in the increase of the bootup time as there are big timeout
1738 * loops.
1739 */
1740#define TEGRA_PCIE_LINKUP_TIMEOUT       200     /* up to 1.2 seconds */
1741static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
1742{
1743        unsigned int retries = 3;
1744        unsigned long value;
1745
1746        /* override presence detection */
1747        value = readl(port->base + RP_PRIV_MISC);
1748        value &= ~RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT;
1749        value |= RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT;
1750        writel(value, port->base + RP_PRIV_MISC);
1751
1752        do {
1753                unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
1754
1755                do {
1756                        value = readl(port->base + RP_VEND_XP);
1757
1758                        if (value & RP_VEND_XP_DL_UP)
1759                                break;
1760
1761                        usleep_range(1000, 2000);
1762                } while (--timeout);
1763
1764                if (!timeout) {
1765                        dev_err(port->pcie->dev, "link %u down, retrying\n",
1766                                port->index);
1767                        goto retry;
1768                }
1769
1770                timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
1771
1772                do {
1773                        value = readl(port->base + RP_LINK_CONTROL_STATUS);
1774
1775                        if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
1776                                return true;
1777
1778                        usleep_range(1000, 2000);
1779                } while (--timeout);
1780
1781retry:
1782                tegra_pcie_port_reset(port);
1783        } while (--retries);
1784
1785        return false;
1786}
1787
1788static int tegra_pcie_enable(struct tegra_pcie *pcie)
1789{
1790        struct tegra_pcie_port *port, *tmp;
1791        struct hw_pci hw;
1792
1793        list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
1794                dev_info(pcie->dev, "probing port %u, using %u lanes\n",
1795                         port->index, port->lanes);
1796
1797                tegra_pcie_port_enable(port);
1798
1799                if (tegra_pcie_port_check_link(port))
1800                        continue;
1801
1802                dev_info(pcie->dev, "link %u down, ignoring\n", port->index);
1803
1804                tegra_pcie_port_disable(port);
1805                tegra_pcie_port_free(port);
1806        }
1807
1808        memset(&hw, 0, sizeof(hw));
1809
1810#ifdef CONFIG_PCI_MSI
1811        hw.msi_ctrl = &pcie->msi.chip;
1812#endif
1813
1814        hw.nr_controllers = 1;
1815        hw.private_data = (void **)&pcie;
1816        hw.setup = tegra_pcie_setup;
1817        hw.map_irq = tegra_pcie_map_irq;
1818        hw.ops = &tegra_pcie_ops;
1819
1820        pci_common_init_dev(pcie->dev, &hw);
1821
1822        return 0;
1823}
1824
1825static const struct tegra_pcie_soc_data tegra20_pcie_data = {
1826        .num_ports = 2,
1827        .msi_base_shift = 0,
1828        .pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
1829        .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10,
1830        .has_pex_clkreq_en = false,
1831        .has_pex_bias_ctrl = false,
1832        .has_intr_prsnt_sense = false,
1833        .has_cml_clk = false,
1834        .has_gen2 = false,
1835};
1836
1837static const struct tegra_pcie_soc_data tegra30_pcie_data = {
1838        .num_ports = 3,
1839        .msi_base_shift = 8,
1840        .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
1841        .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
1842        .has_pex_clkreq_en = true,
1843        .has_pex_bias_ctrl = true,
1844        .has_intr_prsnt_sense = true,
1845        .has_cml_clk = true,
1846        .has_gen2 = false,
1847};
1848
1849static const struct tegra_pcie_soc_data tegra124_pcie_data = {
1850        .num_ports = 2,
1851        .msi_base_shift = 8,
1852        .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
1853        .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
1854        .has_pex_clkreq_en = true,
1855        .has_pex_bias_ctrl = true,
1856        .has_intr_prsnt_sense = true,
1857        .has_cml_clk = true,
1858        .has_gen2 = true,
1859};
1860
1861static const struct of_device_id tegra_pcie_of_match[] = {
1862        { .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie_data },
1863        { .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie_data },
1864        { .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie_data },
1865        { },
1866};
1867MODULE_DEVICE_TABLE(of, tegra_pcie_of_match);
1868
1869static void *tegra_pcie_ports_seq_start(struct seq_file *s, loff_t *pos)
1870{
1871        struct tegra_pcie *pcie = s->private;
1872
1873        if (list_empty(&pcie->ports))
1874                return NULL;
1875
1876        seq_printf(s, "Index  Status\n");
1877
1878        return seq_list_start(&pcie->ports, *pos);
1879}
1880
1881static void *tegra_pcie_ports_seq_next(struct seq_file *s, void *v, loff_t *pos)
1882{
1883        struct tegra_pcie *pcie = s->private;
1884
1885        return seq_list_next(v, &pcie->ports, pos);
1886}
1887
1888static void tegra_pcie_ports_seq_stop(struct seq_file *s, void *v)
1889{
1890}
1891
1892static int tegra_pcie_ports_seq_show(struct seq_file *s, void *v)
1893{
1894        bool up = false, active = false;
1895        struct tegra_pcie_port *port;
1896        unsigned int value;
1897
1898        port = list_entry(v, struct tegra_pcie_port, list);
1899
1900        value = readl(port->base + RP_VEND_XP);
1901
1902        if (value & RP_VEND_XP_DL_UP)
1903                up = true;
1904
1905        value = readl(port->base + RP_LINK_CONTROL_STATUS);
1906
1907        if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
1908                active = true;
1909
1910        seq_printf(s, "%2u     ", port->index);
1911
1912        if (up)
1913                seq_printf(s, "up");
1914
1915        if (active) {
1916                if (up)
1917                        seq_printf(s, ", ");
1918
1919                seq_printf(s, "active");
1920        }
1921
1922        seq_printf(s, "\n");
1923        return 0;
1924}
1925
1926static const struct seq_operations tegra_pcie_ports_seq_ops = {
1927        .start = tegra_pcie_ports_seq_start,
1928        .next = tegra_pcie_ports_seq_next,
1929        .stop = tegra_pcie_ports_seq_stop,
1930        .show = tegra_pcie_ports_seq_show,
1931};
1932
1933static int tegra_pcie_ports_open(struct inode *inode, struct file *file)
1934{
1935        struct tegra_pcie *pcie = inode->i_private;
1936        struct seq_file *s;
1937        int err;
1938
1939        err = seq_open(file, &tegra_pcie_ports_seq_ops);
1940        if (err)
1941                return err;
1942
1943        s = file->private_data;
1944        s->private = pcie;
1945
1946        return 0;
1947}
1948
1949static const struct file_operations tegra_pcie_ports_ops = {
1950        .owner = THIS_MODULE,
1951        .open = tegra_pcie_ports_open,
1952        .read = seq_read,
1953        .llseek = seq_lseek,
1954        .release = seq_release,
1955};
1956
1957static int tegra_pcie_debugfs_init(struct tegra_pcie *pcie)
1958{
1959        struct dentry *file;
1960
1961        pcie->debugfs = debugfs_create_dir("pcie", NULL);
1962        if (!pcie->debugfs)
1963                return -ENOMEM;
1964
1965        file = debugfs_create_file("ports", S_IFREG | S_IRUGO, pcie->debugfs,
1966                                   pcie, &tegra_pcie_ports_ops);
1967        if (!file)
1968                goto remove;
1969
1970        return 0;
1971
1972remove:
1973        debugfs_remove_recursive(pcie->debugfs);
1974        pcie->debugfs = NULL;
1975        return -ENOMEM;
1976}
1977
1978static int tegra_pcie_probe(struct platform_device *pdev)
1979{
1980        const struct of_device_id *match;
1981        struct tegra_pcie *pcie;
1982        int err;
1983
1984        match = of_match_device(tegra_pcie_of_match, &pdev->dev);
1985        if (!match)
1986                return -ENODEV;
1987
1988        pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
1989        if (!pcie)
1990                return -ENOMEM;
1991
1992        INIT_LIST_HEAD(&pcie->buses);
1993        INIT_LIST_HEAD(&pcie->ports);
1994        pcie->soc_data = match->data;
1995        pcie->dev = &pdev->dev;
1996
1997        err = tegra_pcie_parse_dt(pcie);
1998        if (err < 0)
1999                return err;
2000
2001        pcibios_min_mem = 0;
2002
2003        err = tegra_pcie_get_resources(pcie);
2004        if (err < 0) {
2005                dev_err(&pdev->dev, "failed to request resources: %d\n", err);
2006                return err;
2007        }
2008
2009        err = tegra_pcie_enable_controller(pcie);
2010        if (err)
2011                goto put_resources;
2012
2013        /* setup the AFI address translations */
2014        tegra_pcie_setup_translations(pcie);
2015
2016        if (IS_ENABLED(CONFIG_PCI_MSI)) {
2017                err = tegra_pcie_enable_msi(pcie);
2018                if (err < 0) {
2019                        dev_err(&pdev->dev,
2020                                "failed to enable MSI support: %d\n",
2021                                err);
2022                        goto put_resources;
2023                }
2024        }
2025
2026        err = tegra_pcie_enable(pcie);
2027        if (err < 0) {
2028                dev_err(&pdev->dev, "failed to enable PCIe ports: %d\n", err);
2029                goto disable_msi;
2030        }
2031
2032        if (IS_ENABLED(CONFIG_DEBUG_FS)) {
2033                err = tegra_pcie_debugfs_init(pcie);
2034                if (err < 0)
2035                        dev_err(&pdev->dev, "failed to setup debugfs: %d\n",
2036                                err);
2037        }
2038
2039        platform_set_drvdata(pdev, pcie);
2040        return 0;
2041
2042disable_msi:
2043        if (IS_ENABLED(CONFIG_PCI_MSI))
2044                tegra_pcie_disable_msi(pcie);
2045put_resources:
2046        tegra_pcie_put_resources(pcie);
2047        return err;
2048}
2049
2050static struct platform_driver tegra_pcie_driver = {
2051        .driver = {
2052                .name = "tegra-pcie",
2053                .of_match_table = tegra_pcie_of_match,
2054                .suppress_bind_attrs = true,
2055        },
2056        .probe = tegra_pcie_probe,
2057};
2058module_platform_driver(tegra_pcie_driver);
2059
2060MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
2061MODULE_DESCRIPTION("NVIDIA Tegra PCIe driver");
2062MODULE_LICENSE("GPL v2");
2063