linux/drivers/pci/controller/pcie-brcmstb.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/* Copyright (C) 2009 - 2019 Broadcom */
   3
   4#include <linux/bitfield.h>
   5#include <linux/bitops.h>
   6#include <linux/clk.h>
   7#include <linux/compiler.h>
   8#include <linux/delay.h>
   9#include <linux/init.h>
  10#include <linux/interrupt.h>
  11#include <linux/io.h>
  12#include <linux/ioport.h>
  13#include <linux/irqchip/chained_irq.h>
  14#include <linux/irqdomain.h>
  15#include <linux/kernel.h>
  16#include <linux/list.h>
  17#include <linux/log2.h>
  18#include <linux/module.h>
  19#include <linux/msi.h>
  20#include <linux/of_address.h>
  21#include <linux/of_irq.h>
  22#include <linux/of_pci.h>
  23#include <linux/of_platform.h>
  24#include <linux/pci.h>
  25#include <linux/pci-ecam.h>
  26#include <linux/printk.h>
  27#include <linux/reset.h>
  28#include <linux/sizes.h>
  29#include <linux/slab.h>
  30#include <linux/string.h>
  31#include <linux/types.h>
  32
  33#include "../pci.h"
  34
  35/* BRCM_PCIE_CAP_REGS - Offset for the mandatory capability config regs */
  36#define BRCM_PCIE_CAP_REGS                              0x00ac
  37
  38/* Broadcom STB PCIe Register Offsets */
  39#define PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1                         0x0188
  40#define  PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK  0xc
  41#define  PCIE_RC_CFG_VENDOR_SPCIFIC_REG1_LITTLE_ENDIAN                  0x0
  42
  43#define PCIE_RC_CFG_PRIV1_ID_VAL3                       0x043c
  44#define  PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK      0xffffff
  45
  46#define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY                       0x04dc
  47#define  PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK    0xc00
  48
  49#define PCIE_RC_DL_MDIO_ADDR                            0x1100
  50#define PCIE_RC_DL_MDIO_WR_DATA                         0x1104
  51#define PCIE_RC_DL_MDIO_RD_DATA                         0x1108
  52
  53#define PCIE_MISC_MISC_CTRL                             0x4008
  54#define  PCIE_MISC_MISC_CTRL_SCB_ACCESS_EN_MASK         0x1000
  55#define  PCIE_MISC_MISC_CTRL_CFG_READ_UR_MODE_MASK      0x2000
  56#define  PCIE_MISC_MISC_CTRL_MAX_BURST_SIZE_MASK        0x300000
  57
  58#define  PCIE_MISC_MISC_CTRL_SCB0_SIZE_MASK             0xf8000000
  59#define  PCIE_MISC_MISC_CTRL_SCB1_SIZE_MASK             0x07c00000
  60#define  PCIE_MISC_MISC_CTRL_SCB2_SIZE_MASK             0x0000001f
  61#define  SCB_SIZE_MASK(x) PCIE_MISC_MISC_CTRL_SCB ## x ## _SIZE_MASK
  62
  63#define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LO                0x400c
  64#define PCIE_MEM_WIN0_LO(win)   \
  65                PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LO + ((win) * 8)
  66
  67#define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_HI                0x4010
  68#define PCIE_MEM_WIN0_HI(win)   \
  69                PCIE_MISC_CPU_2_PCIE_MEM_WIN0_HI + ((win) * 8)
  70
  71#define PCIE_MISC_RC_BAR1_CONFIG_LO                     0x402c
  72#define  PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK          0x1f
  73
  74#define PCIE_MISC_RC_BAR2_CONFIG_LO                     0x4034
  75#define  PCIE_MISC_RC_BAR2_CONFIG_LO_SIZE_MASK          0x1f
  76#define PCIE_MISC_RC_BAR2_CONFIG_HI                     0x4038
  77
  78#define PCIE_MISC_RC_BAR3_CONFIG_LO                     0x403c
  79#define  PCIE_MISC_RC_BAR3_CONFIG_LO_SIZE_MASK          0x1f
  80
  81#define PCIE_MISC_MSI_BAR_CONFIG_LO                     0x4044
  82#define PCIE_MISC_MSI_BAR_CONFIG_HI                     0x4048
  83
  84#define PCIE_MISC_MSI_DATA_CONFIG                       0x404c
  85#define  PCIE_MISC_MSI_DATA_CONFIG_VAL_32               0xffe06540
  86#define  PCIE_MISC_MSI_DATA_CONFIG_VAL_8                0xfff86540
  87
  88#define PCIE_MISC_PCIE_CTRL                             0x4064
  89#define  PCIE_MISC_PCIE_CTRL_PCIE_L23_REQUEST_MASK      0x1
  90#define PCIE_MISC_PCIE_CTRL_PCIE_PERSTB_MASK            0x4
  91
  92#define PCIE_MISC_PCIE_STATUS                           0x4068
  93#define  PCIE_MISC_PCIE_STATUS_PCIE_PORT_MASK           0x80
  94#define  PCIE_MISC_PCIE_STATUS_PCIE_DL_ACTIVE_MASK      0x20
  95#define  PCIE_MISC_PCIE_STATUS_PCIE_PHYLINKUP_MASK      0x10
  96#define  PCIE_MISC_PCIE_STATUS_PCIE_LINK_IN_L23_MASK    0x40
  97
  98#define PCIE_MISC_REVISION                              0x406c
  99#define  BRCM_PCIE_HW_REV_33                            0x0303
 100
 101#define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT                0x4070
 102#define  PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_LIMIT_MASK    0xfff00000
 103#define  PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_BASE_MASK     0xfff0
 104#define PCIE_MEM_WIN0_BASE_LIMIT(win)   \
 105                PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT + ((win) * 4)
 106
 107#define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_HI                   0x4080
 108#define  PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_HI_BASE_MASK        0xff
 109#define PCIE_MEM_WIN0_BASE_HI(win)      \
 110                PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_HI + ((win) * 8)
 111
 112#define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI                  0x4084
 113#define  PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI_LIMIT_MASK      0xff
 114#define PCIE_MEM_WIN0_LIMIT_HI(win)     \
 115                PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI + ((win) * 8)
 116
 117#define PCIE_MISC_HARD_PCIE_HARD_DEBUG                                  0x4204
 118#define  PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK        0x2
 119#define  PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK                0x08000000
 120
 121
 122#define PCIE_INTR2_CPU_BASE             0x4300
 123#define PCIE_MSI_INTR2_BASE             0x4500
 124/* Offsets from PCIE_INTR2_CPU_BASE and PCIE_MSI_INTR2_BASE */
 125#define  MSI_INT_STATUS                 0x0
 126#define  MSI_INT_CLR                    0x8
 127#define  MSI_INT_MASK_SET               0x10
 128#define  MSI_INT_MASK_CLR               0x14
 129
 130#define PCIE_EXT_CFG_DATA                               0x8000
 131#define PCIE_EXT_CFG_INDEX                              0x9000
 132
 133#define  PCIE_RGR1_SW_INIT_1_PERST_MASK                 0x1
 134#define  PCIE_RGR1_SW_INIT_1_PERST_SHIFT                0x0
 135
 136#define RGR1_SW_INIT_1_INIT_GENERIC_MASK                0x2
 137#define RGR1_SW_INIT_1_INIT_GENERIC_SHIFT               0x1
 138#define RGR1_SW_INIT_1_INIT_7278_MASK                   0x1
 139#define RGR1_SW_INIT_1_INIT_7278_SHIFT                  0x0
 140
 141/* PCIe parameters */
 142#define BRCM_NUM_PCIE_OUT_WINS          0x4
 143#define BRCM_INT_PCI_MSI_NR             32
 144#define BRCM_INT_PCI_MSI_LEGACY_NR      8
 145#define BRCM_INT_PCI_MSI_SHIFT          0
 146
 147/* MSI target adresses */
 148#define BRCM_MSI_TARGET_ADDR_LT_4GB     0x0fffffffcULL
 149#define BRCM_MSI_TARGET_ADDR_GT_4GB     0xffffffffcULL
 150
 151/* MDIO registers */
 152#define MDIO_PORT0                      0x0
 153#define MDIO_DATA_MASK                  0x7fffffff
 154#define MDIO_PORT_MASK                  0xf0000
 155#define MDIO_REGAD_MASK                 0xffff
 156#define MDIO_CMD_MASK                   0xfff00000
 157#define MDIO_CMD_READ                   0x1
 158#define MDIO_CMD_WRITE                  0x0
 159#define MDIO_DATA_DONE_MASK             0x80000000
 160#define MDIO_RD_DONE(x)                 (((x) & MDIO_DATA_DONE_MASK) ? 1 : 0)
 161#define MDIO_WT_DONE(x)                 (((x) & MDIO_DATA_DONE_MASK) ? 0 : 1)
 162#define SSC_REGS_ADDR                   0x1100
 163#define SET_ADDR_OFFSET                 0x1f
 164#define SSC_CNTL_OFFSET                 0x2
 165#define SSC_CNTL_OVRD_EN_MASK           0x8000
 166#define SSC_CNTL_OVRD_VAL_MASK          0x4000
 167#define SSC_STATUS_OFFSET               0x1
 168#define SSC_STATUS_SSC_MASK             0x400
 169#define SSC_STATUS_PLL_LOCK_MASK        0x800
 170#define PCIE_BRCM_MAX_MEMC              3
 171
 172#define IDX_ADDR(pcie)                  (pcie->reg_offsets[EXT_CFG_INDEX])
 173#define DATA_ADDR(pcie)                 (pcie->reg_offsets[EXT_CFG_DATA])
 174#define PCIE_RGR1_SW_INIT_1(pcie)       (pcie->reg_offsets[RGR1_SW_INIT_1])
 175
 176/* Rescal registers */
 177#define PCIE_DVT_PMU_PCIE_PHY_CTRL                              0xc700
 178#define  PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS                  0x3
 179#define  PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_DIG_RESET_MASK         0x4
 180#define  PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_DIG_RESET_SHIFT        0x2
 181#define  PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_RESET_MASK             0x2
 182#define  PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_RESET_SHIFT            0x1
 183#define  PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_PWRDN_MASK             0x1
 184#define  PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_PWRDN_SHIFT            0x0
 185
 186/* Forward declarations */
 187struct brcm_pcie;
 188static inline void brcm_pcie_bridge_sw_init_set_7278(struct brcm_pcie *pcie, u32 val);
 189static inline void brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, u32 val);
 190static inline void brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val);
 191static inline void brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val);
 192
 193enum {
 194        RGR1_SW_INIT_1,
 195        EXT_CFG_INDEX,
 196        EXT_CFG_DATA,
 197};
 198
 199enum {
 200        RGR1_SW_INIT_1_INIT_MASK,
 201        RGR1_SW_INIT_1_INIT_SHIFT,
 202};
 203
 204enum pcie_type {
 205        GENERIC,
 206        BCM7278,
 207        BCM2711,
 208};
 209
 210struct pcie_cfg_data {
 211        const int *offsets;
 212        const enum pcie_type type;
 213        void (*perst_set)(struct brcm_pcie *pcie, u32 val);
 214        void (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
 215};
 216
 217static const int pcie_offsets[] = {
 218        [RGR1_SW_INIT_1] = 0x9210,
 219        [EXT_CFG_INDEX]  = 0x9000,
 220        [EXT_CFG_DATA]   = 0x9004,
 221};
 222
 223static const struct pcie_cfg_data generic_cfg = {
 224        .offsets        = pcie_offsets,
 225        .type           = GENERIC,
 226        .perst_set      = brcm_pcie_perst_set_generic,
 227        .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
 228};
 229
 230static const int pcie_offset_bcm7278[] = {
 231        [RGR1_SW_INIT_1] = 0xc010,
 232        [EXT_CFG_INDEX] = 0x9000,
 233        [EXT_CFG_DATA] = 0x9004,
 234};
 235
 236static const struct pcie_cfg_data bcm7278_cfg = {
 237        .offsets        = pcie_offset_bcm7278,
 238        .type           = BCM7278,
 239        .perst_set      = brcm_pcie_perst_set_7278,
 240        .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_7278,
 241};
 242
 243static const struct pcie_cfg_data bcm2711_cfg = {
 244        .offsets        = pcie_offsets,
 245        .type           = BCM2711,
 246        .perst_set      = brcm_pcie_perst_set_generic,
 247        .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
 248};
 249
 250struct brcm_msi {
 251        struct device           *dev;
 252        void __iomem            *base;
 253        struct device_node      *np;
 254        struct irq_domain       *msi_domain;
 255        struct irq_domain       *inner_domain;
 256        struct mutex            lock; /* guards the alloc/free operations */
 257        u64                     target_addr;
 258        int                     irq;
 259        /* used indicates which MSI interrupts have been alloc'd */
 260        unsigned long           used;
 261        bool                    legacy;
 262        /* Some chips have MSIs in bits [31..24] of a shared register. */
 263        int                     legacy_shift;
 264        int                     nr; /* No. of MSI available, depends on chip */
 265        /* This is the base pointer for interrupt status/set/clr regs */
 266        void __iomem            *intr_base;
 267};
 268
 269/* Internal PCIe Host Controller Information.*/
 270struct brcm_pcie {
 271        struct device           *dev;
 272        void __iomem            *base;
 273        struct clk              *clk;
 274        struct device_node      *np;
 275        bool                    ssc;
 276        int                     gen;
 277        u64                     msi_target_addr;
 278        struct brcm_msi         *msi;
 279        const int               *reg_offsets;
 280        enum pcie_type          type;
 281        struct reset_control    *rescal;
 282        int                     num_memc;
 283        u64                     memc_size[PCIE_BRCM_MAX_MEMC];
 284        u32                     hw_rev;
 285        void                    (*perst_set)(struct brcm_pcie *pcie, u32 val);
 286        void                    (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
 287};
 288
 289/*
 290 * This is to convert the size of the inbound "BAR" region to the
 291 * non-linear values of PCIE_X_MISC_RC_BAR[123]_CONFIG_LO.SIZE
 292 */
 293static int brcm_pcie_encode_ibar_size(u64 size)
 294{
 295        int log2_in = ilog2(size);
 296
 297        if (log2_in >= 12 && log2_in <= 15)
 298                /* Covers 4KB to 32KB (inclusive) */
 299                return (log2_in - 12) + 0x1c;
 300        else if (log2_in >= 16 && log2_in <= 35)
 301                /* Covers 64KB to 32GB, (inclusive) */
 302                return log2_in - 15;
 303        /* Something is awry so disable */
 304        return 0;
 305}
 306
 307static u32 brcm_pcie_mdio_form_pkt(int port, int regad, int cmd)
 308{
 309        u32 pkt = 0;
 310
 311        pkt |= FIELD_PREP(MDIO_PORT_MASK, port);
 312        pkt |= FIELD_PREP(MDIO_REGAD_MASK, regad);
 313        pkt |= FIELD_PREP(MDIO_CMD_MASK, cmd);
 314
 315        return pkt;
 316}
 317
 318/* negative return value indicates error */
 319static int brcm_pcie_mdio_read(void __iomem *base, u8 port, u8 regad, u32 *val)
 320{
 321        int tries;
 322        u32 data;
 323
 324        writel(brcm_pcie_mdio_form_pkt(port, regad, MDIO_CMD_READ),
 325                   base + PCIE_RC_DL_MDIO_ADDR);
 326        readl(base + PCIE_RC_DL_MDIO_ADDR);
 327
 328        data = readl(base + PCIE_RC_DL_MDIO_RD_DATA);
 329        for (tries = 0; !MDIO_RD_DONE(data) && tries < 10; tries++) {
 330                udelay(10);
 331                data = readl(base + PCIE_RC_DL_MDIO_RD_DATA);
 332        }
 333
 334        *val = FIELD_GET(MDIO_DATA_MASK, data);
 335        return MDIO_RD_DONE(data) ? 0 : -EIO;
 336}
 337
 338/* negative return value indicates error */
 339static int brcm_pcie_mdio_write(void __iomem *base, u8 port,
 340                                u8 regad, u16 wrdata)
 341{
 342        int tries;
 343        u32 data;
 344
 345        writel(brcm_pcie_mdio_form_pkt(port, regad, MDIO_CMD_WRITE),
 346                   base + PCIE_RC_DL_MDIO_ADDR);
 347        readl(base + PCIE_RC_DL_MDIO_ADDR);
 348        writel(MDIO_DATA_DONE_MASK | wrdata, base + PCIE_RC_DL_MDIO_WR_DATA);
 349
 350        data = readl(base + PCIE_RC_DL_MDIO_WR_DATA);
 351        for (tries = 0; !MDIO_WT_DONE(data) && tries < 10; tries++) {
 352                udelay(10);
 353                data = readl(base + PCIE_RC_DL_MDIO_WR_DATA);
 354        }
 355
 356        return MDIO_WT_DONE(data) ? 0 : -EIO;
 357}
 358
 359/*
 360 * Configures device for Spread Spectrum Clocking (SSC) mode; a negative
 361 * return value indicates error.
 362 */
 363static int brcm_pcie_set_ssc(struct brcm_pcie *pcie)
 364{
 365        int pll, ssc;
 366        int ret;
 367        u32 tmp;
 368
 369        ret = brcm_pcie_mdio_write(pcie->base, MDIO_PORT0, SET_ADDR_OFFSET,
 370                                   SSC_REGS_ADDR);
 371        if (ret < 0)
 372                return ret;
 373
 374        ret = brcm_pcie_mdio_read(pcie->base, MDIO_PORT0,
 375                                  SSC_CNTL_OFFSET, &tmp);
 376        if (ret < 0)
 377                return ret;
 378
 379        u32p_replace_bits(&tmp, 1, SSC_CNTL_OVRD_EN_MASK);
 380        u32p_replace_bits(&tmp, 1, SSC_CNTL_OVRD_VAL_MASK);
 381        ret = brcm_pcie_mdio_write(pcie->base, MDIO_PORT0,
 382                                   SSC_CNTL_OFFSET, tmp);
 383        if (ret < 0)
 384                return ret;
 385
 386        usleep_range(1000, 2000);
 387        ret = brcm_pcie_mdio_read(pcie->base, MDIO_PORT0,
 388                                  SSC_STATUS_OFFSET, &tmp);
 389        if (ret < 0)
 390                return ret;
 391
 392        ssc = FIELD_GET(SSC_STATUS_SSC_MASK, tmp);
 393        pll = FIELD_GET(SSC_STATUS_PLL_LOCK_MASK, tmp);
 394
 395        return ssc && pll ? 0 : -EIO;
 396}
 397
 398/* Limits operation to a specific generation (1, 2, or 3) */
 399static void brcm_pcie_set_gen(struct brcm_pcie *pcie, int gen)
 400{
 401        u16 lnkctl2 = readw(pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCTL2);
 402        u32 lnkcap = readl(pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCAP);
 403
 404        lnkcap = (lnkcap & ~PCI_EXP_LNKCAP_SLS) | gen;
 405        writel(lnkcap, pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCAP);
 406
 407        lnkctl2 = (lnkctl2 & ~0xf) | gen;
 408        writew(lnkctl2, pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCTL2);
 409}
 410
 411static void brcm_pcie_set_outbound_win(struct brcm_pcie *pcie,
 412                                       unsigned int win, u64 cpu_addr,
 413                                       u64 pcie_addr, u64 size)
 414{
 415        u32 cpu_addr_mb_high, limit_addr_mb_high;
 416        phys_addr_t cpu_addr_mb, limit_addr_mb;
 417        int high_addr_shift;
 418        u32 tmp;
 419
 420        /* Set the base of the pcie_addr window */
 421        writel(lower_32_bits(pcie_addr), pcie->base + PCIE_MEM_WIN0_LO(win));
 422        writel(upper_32_bits(pcie_addr), pcie->base + PCIE_MEM_WIN0_HI(win));
 423
 424        /* Write the addr base & limit lower bits (in MBs) */
 425        cpu_addr_mb = cpu_addr / SZ_1M;
 426        limit_addr_mb = (cpu_addr + size - 1) / SZ_1M;
 427
 428        tmp = readl(pcie->base + PCIE_MEM_WIN0_BASE_LIMIT(win));
 429        u32p_replace_bits(&tmp, cpu_addr_mb,
 430                          PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_BASE_MASK);
 431        u32p_replace_bits(&tmp, limit_addr_mb,
 432                          PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_LIMIT_MASK);
 433        writel(tmp, pcie->base + PCIE_MEM_WIN0_BASE_LIMIT(win));
 434
 435        /* Write the cpu & limit addr upper bits */
 436        high_addr_shift =
 437                HWEIGHT32(PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_BASE_MASK);
 438
 439        cpu_addr_mb_high = cpu_addr_mb >> high_addr_shift;
 440        tmp = readl(pcie->base + PCIE_MEM_WIN0_BASE_HI(win));
 441        u32p_replace_bits(&tmp, cpu_addr_mb_high,
 442                          PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_HI_BASE_MASK);
 443        writel(tmp, pcie->base + PCIE_MEM_WIN0_BASE_HI(win));
 444
 445        limit_addr_mb_high = limit_addr_mb >> high_addr_shift;
 446        tmp = readl(pcie->base + PCIE_MEM_WIN0_LIMIT_HI(win));
 447        u32p_replace_bits(&tmp, limit_addr_mb_high,
 448                          PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI_LIMIT_MASK);
 449        writel(tmp, pcie->base + PCIE_MEM_WIN0_LIMIT_HI(win));
 450}
 451
 452static struct irq_chip brcm_msi_irq_chip = {
 453        .name            = "BRCM STB PCIe MSI",
 454        .irq_ack         = irq_chip_ack_parent,
 455        .irq_mask        = pci_msi_mask_irq,
 456        .irq_unmask      = pci_msi_unmask_irq,
 457};
 458
 459static struct msi_domain_info brcm_msi_domain_info = {
 460        /* Multi MSI is supported by the controller, but not by this driver */
 461        .flags  = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
 462        .chip   = &brcm_msi_irq_chip,
 463};
 464
 465static void brcm_pcie_msi_isr(struct irq_desc *desc)
 466{
 467        struct irq_chip *chip = irq_desc_get_chip(desc);
 468        unsigned long status, virq;
 469        struct brcm_msi *msi;
 470        struct device *dev;
 471        u32 bit;
 472
 473        chained_irq_enter(chip, desc);
 474        msi = irq_desc_get_handler_data(desc);
 475        dev = msi->dev;
 476
 477        status = readl(msi->intr_base + MSI_INT_STATUS);
 478        status >>= msi->legacy_shift;
 479
 480        for_each_set_bit(bit, &status, msi->nr) {
 481                virq = irq_find_mapping(msi->inner_domain, bit);
 482                if (virq)
 483                        generic_handle_irq(virq);
 484                else
 485                        dev_dbg(dev, "unexpected MSI\n");
 486        }
 487
 488        chained_irq_exit(chip, desc);
 489}
 490
 491static void brcm_msi_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
 492{
 493        struct brcm_msi *msi = irq_data_get_irq_chip_data(data);
 494
 495        msg->address_lo = lower_32_bits(msi->target_addr);
 496        msg->address_hi = upper_32_bits(msi->target_addr);
 497        msg->data = (0xffff & PCIE_MISC_MSI_DATA_CONFIG_VAL_32) | data->hwirq;
 498}
 499
 500static int brcm_msi_set_affinity(struct irq_data *irq_data,
 501                                 const struct cpumask *mask, bool force)
 502{
 503        return -EINVAL;
 504}
 505
 506static void brcm_msi_ack_irq(struct irq_data *data)
 507{
 508        struct brcm_msi *msi = irq_data_get_irq_chip_data(data);
 509        const int shift_amt = data->hwirq + msi->legacy_shift;
 510
 511        writel(1 << shift_amt, msi->intr_base + MSI_INT_CLR);
 512}
 513
 514
 515static struct irq_chip brcm_msi_bottom_irq_chip = {
 516        .name                   = "BRCM STB MSI",
 517        .irq_compose_msi_msg    = brcm_msi_compose_msi_msg,
 518        .irq_set_affinity       = brcm_msi_set_affinity,
 519        .irq_ack                = brcm_msi_ack_irq,
 520};
 521
 522static int brcm_msi_alloc(struct brcm_msi *msi)
 523{
 524        int hwirq;
 525
 526        mutex_lock(&msi->lock);
 527        hwirq = bitmap_find_free_region(&msi->used, msi->nr, 0);
 528        mutex_unlock(&msi->lock);
 529
 530        return hwirq;
 531}
 532
 533static void brcm_msi_free(struct brcm_msi *msi, unsigned long hwirq)
 534{
 535        mutex_lock(&msi->lock);
 536        bitmap_release_region(&msi->used, hwirq, 0);
 537        mutex_unlock(&msi->lock);
 538}
 539
 540static int brcm_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
 541                                 unsigned int nr_irqs, void *args)
 542{
 543        struct brcm_msi *msi = domain->host_data;
 544        int hwirq;
 545
 546        hwirq = brcm_msi_alloc(msi);
 547
 548        if (hwirq < 0)
 549                return hwirq;
 550
 551        irq_domain_set_info(domain, virq, (irq_hw_number_t)hwirq,
 552                            &brcm_msi_bottom_irq_chip, domain->host_data,
 553                            handle_edge_irq, NULL, NULL);
 554        return 0;
 555}
 556
 557static void brcm_irq_domain_free(struct irq_domain *domain,
 558                                 unsigned int virq, unsigned int nr_irqs)
 559{
 560        struct irq_data *d = irq_domain_get_irq_data(domain, virq);
 561        struct brcm_msi *msi = irq_data_get_irq_chip_data(d);
 562
 563        brcm_msi_free(msi, d->hwirq);
 564}
 565
 566static const struct irq_domain_ops msi_domain_ops = {
 567        .alloc  = brcm_irq_domain_alloc,
 568        .free   = brcm_irq_domain_free,
 569};
 570
 571static int brcm_allocate_domains(struct brcm_msi *msi)
 572{
 573        struct fwnode_handle *fwnode = of_node_to_fwnode(msi->np);
 574        struct device *dev = msi->dev;
 575
 576        msi->inner_domain = irq_domain_add_linear(NULL, msi->nr, &msi_domain_ops, msi);
 577        if (!msi->inner_domain) {
 578                dev_err(dev, "failed to create IRQ domain\n");
 579                return -ENOMEM;
 580        }
 581
 582        msi->msi_domain = pci_msi_create_irq_domain(fwnode,
 583                                                    &brcm_msi_domain_info,
 584                                                    msi->inner_domain);
 585        if (!msi->msi_domain) {
 586                dev_err(dev, "failed to create MSI domain\n");
 587                irq_domain_remove(msi->inner_domain);
 588                return -ENOMEM;
 589        }
 590
 591        return 0;
 592}
 593
 594static void brcm_free_domains(struct brcm_msi *msi)
 595{
 596        irq_domain_remove(msi->msi_domain);
 597        irq_domain_remove(msi->inner_domain);
 598}
 599
 600static void brcm_msi_remove(struct brcm_pcie *pcie)
 601{
 602        struct brcm_msi *msi = pcie->msi;
 603
 604        if (!msi)
 605                return;
 606        irq_set_chained_handler(msi->irq, NULL);
 607        irq_set_handler_data(msi->irq, NULL);
 608        brcm_free_domains(msi);
 609}
 610
 611static void brcm_msi_set_regs(struct brcm_msi *msi)
 612{
 613        u32 val = __GENMASK(31, msi->legacy_shift);
 614
 615        writel(val, msi->intr_base + MSI_INT_MASK_CLR);
 616        writel(val, msi->intr_base + MSI_INT_CLR);
 617
 618        /*
 619         * The 0 bit of PCIE_MISC_MSI_BAR_CONFIG_LO is repurposed to MSI
 620         * enable, which we set to 1.
 621         */
 622        writel(lower_32_bits(msi->target_addr) | 0x1,
 623               msi->base + PCIE_MISC_MSI_BAR_CONFIG_LO);
 624        writel(upper_32_bits(msi->target_addr),
 625               msi->base + PCIE_MISC_MSI_BAR_CONFIG_HI);
 626
 627        val = msi->legacy ? PCIE_MISC_MSI_DATA_CONFIG_VAL_8 : PCIE_MISC_MSI_DATA_CONFIG_VAL_32;
 628        writel(val, msi->base + PCIE_MISC_MSI_DATA_CONFIG);
 629}
 630
 631static int brcm_pcie_enable_msi(struct brcm_pcie *pcie)
 632{
 633        struct brcm_msi *msi;
 634        int irq, ret;
 635        struct device *dev = pcie->dev;
 636
 637        irq = irq_of_parse_and_map(dev->of_node, 1);
 638        if (irq <= 0) {
 639                dev_err(dev, "cannot map MSI interrupt\n");
 640                return -ENODEV;
 641        }
 642
 643        msi = devm_kzalloc(dev, sizeof(struct brcm_msi), GFP_KERNEL);
 644        if (!msi)
 645                return -ENOMEM;
 646
 647        mutex_init(&msi->lock);
 648        msi->dev = dev;
 649        msi->base = pcie->base;
 650        msi->np = pcie->np;
 651        msi->target_addr = pcie->msi_target_addr;
 652        msi->irq = irq;
 653        msi->legacy = pcie->hw_rev < BRCM_PCIE_HW_REV_33;
 654
 655        if (msi->legacy) {
 656                msi->intr_base = msi->base + PCIE_INTR2_CPU_BASE;
 657                msi->nr = BRCM_INT_PCI_MSI_LEGACY_NR;
 658                msi->legacy_shift = 24;
 659        } else {
 660                msi->intr_base = msi->base + PCIE_MSI_INTR2_BASE;
 661                msi->nr = BRCM_INT_PCI_MSI_NR;
 662                msi->legacy_shift = 0;
 663        }
 664
 665        ret = brcm_allocate_domains(msi);
 666        if (ret)
 667                return ret;
 668
 669        irq_set_chained_handler_and_data(msi->irq, brcm_pcie_msi_isr, msi);
 670
 671        brcm_msi_set_regs(msi);
 672        pcie->msi = msi;
 673
 674        return 0;
 675}
 676
 677/* The controller is capable of serving in both RC and EP roles */
 678static bool brcm_pcie_rc_mode(struct brcm_pcie *pcie)
 679{
 680        void __iomem *base = pcie->base;
 681        u32 val = readl(base + PCIE_MISC_PCIE_STATUS);
 682
 683        return !!FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_PORT_MASK, val);
 684}
 685
 686static bool brcm_pcie_link_up(struct brcm_pcie *pcie)
 687{
 688        u32 val = readl(pcie->base + PCIE_MISC_PCIE_STATUS);
 689        u32 dla = FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_DL_ACTIVE_MASK, val);
 690        u32 plu = FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_PHYLINKUP_MASK, val);
 691
 692        return dla && plu;
 693}
 694
 695static void __iomem *brcm_pcie_map_conf(struct pci_bus *bus, unsigned int devfn,
 696                                        int where)
 697{
 698        struct brcm_pcie *pcie = bus->sysdata;
 699        void __iomem *base = pcie->base;
 700        int idx;
 701
 702        /* Accesses to the RC go right to the RC registers if slot==0 */
 703        if (pci_is_root_bus(bus))
 704                return PCI_SLOT(devfn) ? NULL : base + where;
 705
 706        /* For devices, write to the config space index register */
 707        idx = PCIE_ECAM_OFFSET(bus->number, devfn, 0);
 708        writel(idx, pcie->base + PCIE_EXT_CFG_INDEX);
 709        return base + PCIE_EXT_CFG_DATA + where;
 710}
 711
 712static struct pci_ops brcm_pcie_ops = {
 713        .map_bus = brcm_pcie_map_conf,
 714        .read = pci_generic_config_read,
 715        .write = pci_generic_config_write,
 716};
 717
 718static inline void brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, u32 val)
 719{
 720        u32 tmp, mask =  RGR1_SW_INIT_1_INIT_GENERIC_MASK;
 721        u32 shift = RGR1_SW_INIT_1_INIT_GENERIC_SHIFT;
 722
 723        tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
 724        tmp = (tmp & ~mask) | ((val << shift) & mask);
 725        writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
 726}
 727
 728static inline void brcm_pcie_bridge_sw_init_set_7278(struct brcm_pcie *pcie, u32 val)
 729{
 730        u32 tmp, mask =  RGR1_SW_INIT_1_INIT_7278_MASK;
 731        u32 shift = RGR1_SW_INIT_1_INIT_7278_SHIFT;
 732
 733        tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
 734        tmp = (tmp & ~mask) | ((val << shift) & mask);
 735        writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
 736}
 737
 738static inline void brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val)
 739{
 740        u32 tmp;
 741
 742        /* Perst bit has moved and assert value is 0 */
 743        tmp = readl(pcie->base + PCIE_MISC_PCIE_CTRL);
 744        u32p_replace_bits(&tmp, !val, PCIE_MISC_PCIE_CTRL_PCIE_PERSTB_MASK);
 745        writel(tmp, pcie->base +  PCIE_MISC_PCIE_CTRL);
 746}
 747
 748static inline void brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val)
 749{
 750        u32 tmp;
 751
 752        tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
 753        u32p_replace_bits(&tmp, val, PCIE_RGR1_SW_INIT_1_PERST_MASK);
 754        writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
 755}
 756
 757static inline int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie,
 758                                                        u64 *rc_bar2_size,
 759                                                        u64 *rc_bar2_offset)
 760{
 761        struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
 762        struct resource_entry *entry;
 763        struct device *dev = pcie->dev;
 764        u64 lowest_pcie_addr = ~(u64)0;
 765        int ret, i = 0;
 766        u64 size = 0;
 767
 768        resource_list_for_each_entry(entry, &bridge->dma_ranges) {
 769                u64 pcie_beg = entry->res->start - entry->offset;
 770
 771                size += entry->res->end - entry->res->start + 1;
 772                if (pcie_beg < lowest_pcie_addr)
 773                        lowest_pcie_addr = pcie_beg;
 774        }
 775
 776        if (lowest_pcie_addr == ~(u64)0) {
 777                dev_err(dev, "DT node has no dma-ranges\n");
 778                return -EINVAL;
 779        }
 780
 781        ret = of_property_read_variable_u64_array(pcie->np, "brcm,scb-sizes", pcie->memc_size, 1,
 782                                                  PCIE_BRCM_MAX_MEMC);
 783
 784        if (ret <= 0) {
 785                /* Make an educated guess */
 786                pcie->num_memc = 1;
 787                pcie->memc_size[0] = 1ULL << fls64(size - 1);
 788        } else {
 789                pcie->num_memc = ret;
 790        }
 791
 792        /* Each memc is viewed through a "port" that is a power of 2 */
 793        for (i = 0, size = 0; i < pcie->num_memc; i++)
 794                size += pcie->memc_size[i];
 795
 796        /* System memory starts at this address in PCIe-space */
 797        *rc_bar2_offset = lowest_pcie_addr;
 798        /* The sum of all memc views must also be a power of 2 */
 799        *rc_bar2_size = 1ULL << fls64(size - 1);
 800
 801        /*
 802         * We validate the inbound memory view even though we should trust
 803         * whatever the device-tree provides. This is because of an HW issue on
 804         * early Raspberry Pi 4's revisions (bcm2711). It turns out its
 805         * firmware has to dynamically edit dma-ranges due to a bug on the
 806         * PCIe controller integration, which prohibits any access above the
 807         * lower 3GB of memory. Given this, we decided to keep the dma-ranges
 808         * in check, avoiding hard to debug device-tree related issues in the
 809         * future:
 810         *
 811         * The PCIe host controller by design must set the inbound viewport to
 812         * be a contiguous arrangement of all of the system's memory.  In
 813         * addition, its size mut be a power of two.  To further complicate
 814         * matters, the viewport must start on a pcie-address that is aligned
 815         * on a multiple of its size.  If a portion of the viewport does not
 816         * represent system memory -- e.g. 3GB of memory requires a 4GB
 817         * viewport -- we can map the outbound memory in or after 3GB and even
 818         * though the viewport will overlap the outbound memory the controller
 819         * will know to send outbound memory downstream and everything else
 820         * upstream.
 821         *
 822         * For example:
 823         *
 824         * - The best-case scenario, memory up to 3GB, is to place the inbound
 825         *   region in the first 4GB of pcie-space, as some legacy devices can
 826         *   only address 32bits. We would also like to put the MSI under 4GB
 827         *   as well, since some devices require a 32bit MSI target address.
 828         *
 829         * - If the system memory is 4GB or larger we cannot start the inbound
 830         *   region at location 0 (since we have to allow some space for
 831         *   outbound memory @ 3GB). So instead it will  start at the 1x
 832         *   multiple of its size
 833         */
 834        if (!*rc_bar2_size || (*rc_bar2_offset & (*rc_bar2_size - 1)) ||
 835            (*rc_bar2_offset < SZ_4G && *rc_bar2_offset > SZ_2G)) {
 836                dev_err(dev, "Invalid rc_bar2_offset/size: size 0x%llx, off 0x%llx\n",
 837                        *rc_bar2_size, *rc_bar2_offset);
 838                return -EINVAL;
 839        }
 840
 841        return 0;
 842}
 843
 844static int brcm_pcie_setup(struct brcm_pcie *pcie)
 845{
 846        struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
 847        u64 rc_bar2_offset, rc_bar2_size;
 848        void __iomem *base = pcie->base;
 849        struct device *dev = pcie->dev;
 850        struct resource_entry *entry;
 851        bool ssc_good = false;
 852        struct resource *res;
 853        int num_out_wins = 0;
 854        u16 nlw, cls, lnksta;
 855        int i, ret, memc;
 856        u32 tmp, burst, aspm_support;
 857
 858        /* Reset the bridge */
 859        pcie->bridge_sw_init_set(pcie, 1);
 860        usleep_range(100, 200);
 861
 862        /* Take the bridge out of reset */
 863        pcie->bridge_sw_init_set(pcie, 0);
 864
 865        tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
 866        tmp &= ~PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK;
 867        writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
 868        /* Wait for SerDes to be stable */
 869        usleep_range(100, 200);
 870
 871        /*
 872         * SCB_MAX_BURST_SIZE is a two bit field.  For GENERIC chips it
 873         * is encoded as 0=128, 1=256, 2=512, 3=Rsvd, for BCM7278 it
 874         * is encoded as 0=Rsvd, 1=128, 2=256, 3=512.
 875         */
 876        if (pcie->type == BCM2711)
 877                burst = 0x0; /* 128B */
 878        else if (pcie->type == BCM7278)
 879                burst = 0x3; /* 512 bytes */
 880        else
 881                burst = 0x2; /* 512 bytes */
 882
 883        /* Set SCB_MAX_BURST_SIZE, CFG_READ_UR_MODE, SCB_ACCESS_EN */
 884        tmp = readl(base + PCIE_MISC_MISC_CTRL);
 885        u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_SCB_ACCESS_EN_MASK);
 886        u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_CFG_READ_UR_MODE_MASK);
 887        u32p_replace_bits(&tmp, burst, PCIE_MISC_MISC_CTRL_MAX_BURST_SIZE_MASK);
 888        writel(tmp, base + PCIE_MISC_MISC_CTRL);
 889
 890        ret = brcm_pcie_get_rc_bar2_size_and_offset(pcie, &rc_bar2_size,
 891                                                    &rc_bar2_offset);
 892        if (ret)
 893                return ret;
 894
 895        tmp = lower_32_bits(rc_bar2_offset);
 896        u32p_replace_bits(&tmp, brcm_pcie_encode_ibar_size(rc_bar2_size),
 897                          PCIE_MISC_RC_BAR2_CONFIG_LO_SIZE_MASK);
 898        writel(tmp, base + PCIE_MISC_RC_BAR2_CONFIG_LO);
 899        writel(upper_32_bits(rc_bar2_offset),
 900               base + PCIE_MISC_RC_BAR2_CONFIG_HI);
 901
 902        tmp = readl(base + PCIE_MISC_MISC_CTRL);
 903        for (memc = 0; memc < pcie->num_memc; memc++) {
 904                u32 scb_size_val = ilog2(pcie->memc_size[memc]) - 15;
 905
 906                if (memc == 0)
 907                        u32p_replace_bits(&tmp, scb_size_val, SCB_SIZE_MASK(0));
 908                else if (memc == 1)
 909                        u32p_replace_bits(&tmp, scb_size_val, SCB_SIZE_MASK(1));
 910                else if (memc == 2)
 911                        u32p_replace_bits(&tmp, scb_size_val, SCB_SIZE_MASK(2));
 912        }
 913        writel(tmp, base + PCIE_MISC_MISC_CTRL);
 914
 915        /*
 916         * We ideally want the MSI target address to be located in the 32bit
 917         * addressable memory area. Some devices might depend on it. This is
 918         * possible either when the inbound window is located above the lower
 919         * 4GB or when the inbound area is smaller than 4GB (taking into
 920         * account the rounding-up we're forced to perform).
 921         */
 922        if (rc_bar2_offset >= SZ_4G || (rc_bar2_size + rc_bar2_offset) < SZ_4G)
 923                pcie->msi_target_addr = BRCM_MSI_TARGET_ADDR_LT_4GB;
 924        else
 925                pcie->msi_target_addr = BRCM_MSI_TARGET_ADDR_GT_4GB;
 926
 927        /* disable the PCIe->GISB memory window (RC_BAR1) */
 928        tmp = readl(base + PCIE_MISC_RC_BAR1_CONFIG_LO);
 929        tmp &= ~PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK;
 930        writel(tmp, base + PCIE_MISC_RC_BAR1_CONFIG_LO);
 931
 932        /* disable the PCIe->SCB memory window (RC_BAR3) */
 933        tmp = readl(base + PCIE_MISC_RC_BAR3_CONFIG_LO);
 934        tmp &= ~PCIE_MISC_RC_BAR3_CONFIG_LO_SIZE_MASK;
 935        writel(tmp, base + PCIE_MISC_RC_BAR3_CONFIG_LO);
 936
 937        if (pcie->gen)
 938                brcm_pcie_set_gen(pcie, pcie->gen);
 939
 940        /* Unassert the fundamental reset */
 941        pcie->perst_set(pcie, 0);
 942
 943        /*
 944         * Give the RC/EP time to wake up, before trying to configure RC.
 945         * Intermittently check status for link-up, up to a total of 100ms.
 946         */
 947        for (i = 0; i < 100 && !brcm_pcie_link_up(pcie); i += 5)
 948                msleep(5);
 949
 950        if (!brcm_pcie_link_up(pcie)) {
 951                dev_err(dev, "link down\n");
 952                return -ENODEV;
 953        }
 954
 955        if (!brcm_pcie_rc_mode(pcie)) {
 956                dev_err(dev, "PCIe misconfigured; is in EP mode\n");
 957                return -EINVAL;
 958        }
 959
 960        resource_list_for_each_entry(entry, &bridge->windows) {
 961                res = entry->res;
 962
 963                if (resource_type(res) != IORESOURCE_MEM)
 964                        continue;
 965
 966                if (num_out_wins >= BRCM_NUM_PCIE_OUT_WINS) {
 967                        dev_err(pcie->dev, "too many outbound wins\n");
 968                        return -EINVAL;
 969                }
 970
 971                brcm_pcie_set_outbound_win(pcie, num_out_wins, res->start,
 972                                           res->start - entry->offset,
 973                                           resource_size(res));
 974                num_out_wins++;
 975        }
 976
 977        /* Don't advertise L0s capability if 'aspm-no-l0s' */
 978        aspm_support = PCIE_LINK_STATE_L1;
 979        if (!of_property_read_bool(pcie->np, "aspm-no-l0s"))
 980                aspm_support |= PCIE_LINK_STATE_L0S;
 981        tmp = readl(base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
 982        u32p_replace_bits(&tmp, aspm_support,
 983                PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK);
 984        writel(tmp, base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
 985
 986        /*
 987         * For config space accesses on the RC, show the right class for
 988         * a PCIe-PCIe bridge (the default setting is to be EP mode).
 989         */
 990        tmp = readl(base + PCIE_RC_CFG_PRIV1_ID_VAL3);
 991        u32p_replace_bits(&tmp, 0x060400,
 992                          PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK);
 993        writel(tmp, base + PCIE_RC_CFG_PRIV1_ID_VAL3);
 994
 995        if (pcie->ssc) {
 996                ret = brcm_pcie_set_ssc(pcie);
 997                if (ret == 0)
 998                        ssc_good = true;
 999                else
1000                        dev_err(dev, "failed attempt to enter ssc mode\n");
1001        }
1002
1003        lnksta = readw(base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKSTA);
1004        cls = FIELD_GET(PCI_EXP_LNKSTA_CLS, lnksta);
1005        nlw = FIELD_GET(PCI_EXP_LNKSTA_NLW, lnksta);
1006        dev_info(dev, "link up, %s x%u %s\n",
1007                 pci_speed_string(pcie_link_speed[cls]), nlw,
1008                 ssc_good ? "(SSC)" : "(!SSC)");
1009
1010        /* PCIe->SCB endian mode for BAR */
1011        tmp = readl(base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1);
1012        u32p_replace_bits(&tmp, PCIE_RC_CFG_VENDOR_SPCIFIC_REG1_LITTLE_ENDIAN,
1013                PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK);
1014        writel(tmp, base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1);
1015
1016        /*
1017         * Refclk from RC should be gated with CLKREQ# input when ASPM L0s,L1
1018         * is enabled => setting the CLKREQ_DEBUG_ENABLE field to 1.
1019         */
1020        tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
1021        tmp |= PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK;
1022        writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
1023
1024        return 0;
1025}
1026
1027/* L23 is a low-power PCIe link state */
1028static void brcm_pcie_enter_l23(struct brcm_pcie *pcie)
1029{
1030        void __iomem *base = pcie->base;
1031        int l23, i;
1032        u32 tmp;
1033
1034        /* Assert request for L23 */
1035        tmp = readl(base + PCIE_MISC_PCIE_CTRL);
1036        u32p_replace_bits(&tmp, 1, PCIE_MISC_PCIE_CTRL_PCIE_L23_REQUEST_MASK);
1037        writel(tmp, base + PCIE_MISC_PCIE_CTRL);
1038
1039        /* Wait up to 36 msec for L23 */
1040        tmp = readl(base + PCIE_MISC_PCIE_STATUS);
1041        l23 = FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_LINK_IN_L23_MASK, tmp);
1042        for (i = 0; i < 15 && !l23; i++) {
1043                usleep_range(2000, 2400);
1044                tmp = readl(base + PCIE_MISC_PCIE_STATUS);
1045                l23 = FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_LINK_IN_L23_MASK,
1046                                tmp);
1047        }
1048
1049        if (!l23)
1050                dev_err(pcie->dev, "failed to enter low-power link state\n");
1051}
1052
1053static int brcm_phy_cntl(struct brcm_pcie *pcie, const int start)
1054{
1055        static const u32 shifts[PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS] = {
1056                PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_PWRDN_SHIFT,
1057                PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_RESET_SHIFT,
1058                PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_DIG_RESET_SHIFT,};
1059        static const u32 masks[PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS] = {
1060                PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_PWRDN_MASK,
1061                PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_RESET_MASK,
1062                PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_DIG_RESET_MASK,};
1063        const int beg = start ? 0 : PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS - 1;
1064        const int end = start ? PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS : -1;
1065        u32 tmp, combined_mask = 0;
1066        u32 val;
1067        void __iomem *base = pcie->base;
1068        int i, ret;
1069
1070        for (i = beg; i != end; start ? i++ : i--) {
1071                val = start ? BIT_MASK(shifts[i]) : 0;
1072                tmp = readl(base + PCIE_DVT_PMU_PCIE_PHY_CTRL);
1073                tmp = (tmp & ~masks[i]) | (val & masks[i]);
1074                writel(tmp, base + PCIE_DVT_PMU_PCIE_PHY_CTRL);
1075                usleep_range(50, 200);
1076                combined_mask |= masks[i];
1077        }
1078
1079        tmp = readl(base + PCIE_DVT_PMU_PCIE_PHY_CTRL);
1080        val = start ? combined_mask : 0;
1081
1082        ret = (tmp & combined_mask) == val ? 0 : -EIO;
1083        if (ret)
1084                dev_err(pcie->dev, "failed to %s phy\n", (start ? "start" : "stop"));
1085
1086        return ret;
1087}
1088
1089static inline int brcm_phy_start(struct brcm_pcie *pcie)
1090{
1091        return pcie->rescal ? brcm_phy_cntl(pcie, 1) : 0;
1092}
1093
1094static inline int brcm_phy_stop(struct brcm_pcie *pcie)
1095{
1096        return pcie->rescal ? brcm_phy_cntl(pcie, 0) : 0;
1097}
1098
1099static void brcm_pcie_turn_off(struct brcm_pcie *pcie)
1100{
1101        void __iomem *base = pcie->base;
1102        int tmp;
1103
1104        if (brcm_pcie_link_up(pcie))
1105                brcm_pcie_enter_l23(pcie);
1106        /* Assert fundamental reset */
1107        pcie->perst_set(pcie, 1);
1108
1109        /* Deassert request for L23 in case it was asserted */
1110        tmp = readl(base + PCIE_MISC_PCIE_CTRL);
1111        u32p_replace_bits(&tmp, 0, PCIE_MISC_PCIE_CTRL_PCIE_L23_REQUEST_MASK);
1112        writel(tmp, base + PCIE_MISC_PCIE_CTRL);
1113
1114        /* Turn off SerDes */
1115        tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
1116        u32p_replace_bits(&tmp, 1, PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK);
1117        writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
1118
1119        /* Shutdown PCIe bridge */
1120        pcie->bridge_sw_init_set(pcie, 1);
1121}
1122
1123static int brcm_pcie_suspend(struct device *dev)
1124{
1125        struct brcm_pcie *pcie = dev_get_drvdata(dev);
1126        int ret;
1127
1128        brcm_pcie_turn_off(pcie);
1129        ret = brcm_phy_stop(pcie);
1130        clk_disable_unprepare(pcie->clk);
1131
1132        return ret;
1133}
1134
1135static int brcm_pcie_resume(struct device *dev)
1136{
1137        struct brcm_pcie *pcie = dev_get_drvdata(dev);
1138        void __iomem *base;
1139        u32 tmp;
1140        int ret;
1141
1142        base = pcie->base;
1143        clk_prepare_enable(pcie->clk);
1144
1145        ret = brcm_phy_start(pcie);
1146        if (ret)
1147                goto err;
1148
1149        /* Take bridge out of reset so we can access the SERDES reg */
1150        pcie->bridge_sw_init_set(pcie, 0);
1151
1152        /* SERDES_IDDQ = 0 */
1153        tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
1154        u32p_replace_bits(&tmp, 0, PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK);
1155        writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
1156
1157        /* wait for serdes to be stable */
1158        udelay(100);
1159
1160        ret = brcm_pcie_setup(pcie);
1161        if (ret)
1162                goto err;
1163
1164        if (pcie->msi)
1165                brcm_msi_set_regs(pcie->msi);
1166
1167        return 0;
1168
1169err:
1170        clk_disable_unprepare(pcie->clk);
1171        return ret;
1172}
1173
1174static void __brcm_pcie_remove(struct brcm_pcie *pcie)
1175{
1176        brcm_msi_remove(pcie);
1177        brcm_pcie_turn_off(pcie);
1178        brcm_phy_stop(pcie);
1179        reset_control_assert(pcie->rescal);
1180        clk_disable_unprepare(pcie->clk);
1181}
1182
1183static int brcm_pcie_remove(struct platform_device *pdev)
1184{
1185        struct brcm_pcie *pcie = platform_get_drvdata(pdev);
1186        struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
1187
1188        pci_stop_root_bus(bridge->bus);
1189        pci_remove_root_bus(bridge->bus);
1190        __brcm_pcie_remove(pcie);
1191
1192        return 0;
1193}
1194
1195static const struct of_device_id brcm_pcie_match[] = {
1196        { .compatible = "brcm,bcm2711-pcie", .data = &bcm2711_cfg },
1197        { .compatible = "brcm,bcm7211-pcie", .data = &generic_cfg },
1198        { .compatible = "brcm,bcm7278-pcie", .data = &bcm7278_cfg },
1199        { .compatible = "brcm,bcm7216-pcie", .data = &bcm7278_cfg },
1200        { .compatible = "brcm,bcm7445-pcie", .data = &generic_cfg },
1201        {},
1202};
1203
1204static int brcm_pcie_probe(struct platform_device *pdev)
1205{
1206        struct device_node *np = pdev->dev.of_node, *msi_np;
1207        struct pci_host_bridge *bridge;
1208        const struct pcie_cfg_data *data;
1209        struct brcm_pcie *pcie;
1210        int ret;
1211
1212        bridge = devm_pci_alloc_host_bridge(&pdev->dev, sizeof(*pcie));
1213        if (!bridge)
1214                return -ENOMEM;
1215
1216        data = of_device_get_match_data(&pdev->dev);
1217        if (!data) {
1218                pr_err("failed to look up compatible string\n");
1219                return -EINVAL;
1220        }
1221
1222        pcie = pci_host_bridge_priv(bridge);
1223        pcie->dev = &pdev->dev;
1224        pcie->np = np;
1225        pcie->reg_offsets = data->offsets;
1226        pcie->type = data->type;
1227        pcie->perst_set = data->perst_set;
1228        pcie->bridge_sw_init_set = data->bridge_sw_init_set;
1229
1230        pcie->base = devm_platform_ioremap_resource(pdev, 0);
1231        if (IS_ERR(pcie->base))
1232                return PTR_ERR(pcie->base);
1233
1234        pcie->clk = devm_clk_get_optional(&pdev->dev, "sw_pcie");
1235        if (IS_ERR(pcie->clk))
1236                return PTR_ERR(pcie->clk);
1237
1238        ret = of_pci_get_max_link_speed(np);
1239        pcie->gen = (ret < 0) ? 0 : ret;
1240
1241        pcie->ssc = of_property_read_bool(np, "brcm,enable-ssc");
1242
1243        ret = clk_prepare_enable(pcie->clk);
1244        if (ret) {
1245                dev_err(&pdev->dev, "could not enable clock\n");
1246                return ret;
1247        }
1248        pcie->rescal = devm_reset_control_get_optional_shared(&pdev->dev, "rescal");
1249        if (IS_ERR(pcie->rescal)) {
1250                clk_disable_unprepare(pcie->clk);
1251                return PTR_ERR(pcie->rescal);
1252        }
1253
1254        ret = reset_control_deassert(pcie->rescal);
1255        if (ret)
1256                dev_err(&pdev->dev, "failed to deassert 'rescal'\n");
1257
1258        ret = brcm_phy_start(pcie);
1259        if (ret) {
1260                reset_control_assert(pcie->rescal);
1261                clk_disable_unprepare(pcie->clk);
1262                return ret;
1263        }
1264
1265        ret = brcm_pcie_setup(pcie);
1266        if (ret)
1267                goto fail;
1268
1269        pcie->hw_rev = readl(pcie->base + PCIE_MISC_REVISION);
1270
1271        msi_np = of_parse_phandle(pcie->np, "msi-parent", 0);
1272        if (pci_msi_enabled() && msi_np == pcie->np) {
1273                ret = brcm_pcie_enable_msi(pcie);
1274                if (ret) {
1275                        dev_err(pcie->dev, "probe of internal MSI failed");
1276                        goto fail;
1277                }
1278        }
1279
1280        bridge->ops = &brcm_pcie_ops;
1281        bridge->sysdata = pcie;
1282
1283        platform_set_drvdata(pdev, pcie);
1284
1285        return pci_host_probe(bridge);
1286fail:
1287        __brcm_pcie_remove(pcie);
1288        return ret;
1289}
1290
1291MODULE_DEVICE_TABLE(of, brcm_pcie_match);
1292
1293static const struct dev_pm_ops brcm_pcie_pm_ops = {
1294        .suspend = brcm_pcie_suspend,
1295        .resume = brcm_pcie_resume,
1296};
1297
1298static struct platform_driver brcm_pcie_driver = {
1299        .probe = brcm_pcie_probe,
1300        .remove = brcm_pcie_remove,
1301        .driver = {
1302                .name = "brcm-pcie",
1303                .of_match_table = brcm_pcie_match,
1304                .pm = &brcm_pcie_pm_ops,
1305        },
1306};
1307module_platform_driver(brcm_pcie_driver);
1308
1309MODULE_LICENSE("GPL");
1310MODULE_DESCRIPTION("Broadcom STB PCIe RC driver");
1311MODULE_AUTHOR("Broadcom");
1312