linux/drivers/pci/controller/pcie-iproc.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2014 Hauke Mehrtens <hauke@hauke-m.de>
   4 * Copyright (C) 2015 Broadcom Corporation
   5 */
   6
   7#include <linux/kernel.h>
   8#include <linux/pci.h>
   9#include <linux/msi.h>
  10#include <linux/clk.h>
  11#include <linux/module.h>
  12#include <linux/mbus.h>
  13#include <linux/slab.h>
  14#include <linux/delay.h>
  15#include <linux/interrupt.h>
  16#include <linux/irqchip/arm-gic-v3.h>
  17#include <linux/platform_device.h>
  18#include <linux/of_address.h>
  19#include <linux/of_pci.h>
  20#include <linux/of_irq.h>
  21#include <linux/of_platform.h>
  22#include <linux/phy/phy.h>
  23
  24#include "pcie-iproc.h"
  25
  26#define EP_PERST_SOURCE_SELECT_SHIFT    2
  27#define EP_PERST_SOURCE_SELECT          BIT(EP_PERST_SOURCE_SELECT_SHIFT)
  28#define EP_MODE_SURVIVE_PERST_SHIFT     1
  29#define EP_MODE_SURVIVE_PERST           BIT(EP_MODE_SURVIVE_PERST_SHIFT)
  30#define RC_PCIE_RST_OUTPUT_SHIFT        0
  31#define RC_PCIE_RST_OUTPUT              BIT(RC_PCIE_RST_OUTPUT_SHIFT)
  32#define PAXC_RESET_MASK                 0x7f
  33
  34#define GIC_V3_CFG_SHIFT                0
  35#define GIC_V3_CFG                      BIT(GIC_V3_CFG_SHIFT)
  36
  37#define MSI_ENABLE_CFG_SHIFT            0
  38#define MSI_ENABLE_CFG                  BIT(MSI_ENABLE_CFG_SHIFT)
  39
  40#define CFG_IND_ADDR_MASK               0x00001ffc
  41
  42#define CFG_ADDR_BUS_NUM_SHIFT          20
  43#define CFG_ADDR_BUS_NUM_MASK           0x0ff00000
  44#define CFG_ADDR_DEV_NUM_SHIFT          15
  45#define CFG_ADDR_DEV_NUM_MASK           0x000f8000
  46#define CFG_ADDR_FUNC_NUM_SHIFT         12
  47#define CFG_ADDR_FUNC_NUM_MASK          0x00007000
  48#define CFG_ADDR_REG_NUM_SHIFT          2
  49#define CFG_ADDR_REG_NUM_MASK           0x00000ffc
  50#define CFG_ADDR_CFG_TYPE_SHIFT         0
  51#define CFG_ADDR_CFG_TYPE_MASK          0x00000003
  52
  53#define SYS_RC_INTX_MASK                0xf
  54
  55#define PCIE_PHYLINKUP_SHIFT            3
  56#define PCIE_PHYLINKUP                  BIT(PCIE_PHYLINKUP_SHIFT)
  57#define PCIE_DL_ACTIVE_SHIFT            2
  58#define PCIE_DL_ACTIVE                  BIT(PCIE_DL_ACTIVE_SHIFT)
  59
  60#define APB_ERR_EN_SHIFT                0
  61#define APB_ERR_EN                      BIT(APB_ERR_EN_SHIFT)
  62
  63#define CFG_RETRY_STATUS                0xffff0001
  64#define CFG_RETRY_STATUS_TIMEOUT_US     500000 /* 500 milliseconds */
  65
  66/* derive the enum index of the outbound/inbound mapping registers */
  67#define MAP_REG(base_reg, index)        ((base_reg) + (index) * 2)
  68
  69/*
  70 * Maximum number of outbound mapping window sizes that can be supported by any
  71 * OARR/OMAP mapping pair
  72 */
  73#define MAX_NUM_OB_WINDOW_SIZES         4
  74
  75#define OARR_VALID_SHIFT                0
  76#define OARR_VALID                      BIT(OARR_VALID_SHIFT)
  77#define OARR_SIZE_CFG_SHIFT             1
  78
  79/*
  80 * Maximum number of inbound mapping region sizes that can be supported by an
  81 * IARR
  82 */
  83#define MAX_NUM_IB_REGION_SIZES         9
  84
  85#define IMAP_VALID_SHIFT                0
  86#define IMAP_VALID                      BIT(IMAP_VALID_SHIFT)
  87
  88#define IPROC_PCI_PM_CAP                0x48
  89#define IPROC_PCI_PM_CAP_MASK           0xffff
  90#define IPROC_PCI_EXP_CAP               0xac
  91
  92#define IPROC_PCIE_REG_INVALID          0xffff
  93
  94/**
  95 * iProc PCIe outbound mapping controller specific parameters
  96 *
  97 * @window_sizes: list of supported outbound mapping window sizes in MB
  98 * @nr_sizes: number of supported outbound mapping window sizes
  99 */
 100struct iproc_pcie_ob_map {
 101        resource_size_t window_sizes[MAX_NUM_OB_WINDOW_SIZES];
 102        unsigned int nr_sizes;
 103};
 104
 105static const struct iproc_pcie_ob_map paxb_ob_map[] = {
 106        {
 107                /* OARR0/OMAP0 */
 108                .window_sizes = { 128, 256 },
 109                .nr_sizes = 2,
 110        },
 111        {
 112                /* OARR1/OMAP1 */
 113                .window_sizes = { 128, 256 },
 114                .nr_sizes = 2,
 115        },
 116};
 117
 118static const struct iproc_pcie_ob_map paxb_v2_ob_map[] = {
 119        {
 120                /* OARR0/OMAP0 */
 121                .window_sizes = { 128, 256 },
 122                .nr_sizes = 2,
 123        },
 124        {
 125                /* OARR1/OMAP1 */
 126                .window_sizes = { 128, 256 },
 127                .nr_sizes = 2,
 128        },
 129        {
 130                /* OARR2/OMAP2 */
 131                .window_sizes = { 128, 256, 512, 1024 },
 132                .nr_sizes = 4,
 133        },
 134        {
 135                /* OARR3/OMAP3 */
 136                .window_sizes = { 128, 256, 512, 1024 },
 137                .nr_sizes = 4,
 138        },
 139};
 140
 141/**
 142 * iProc PCIe inbound mapping type
 143 */
 144enum iproc_pcie_ib_map_type {
 145        /* for DDR memory */
 146        IPROC_PCIE_IB_MAP_MEM = 0,
 147
 148        /* for device I/O memory */
 149        IPROC_PCIE_IB_MAP_IO,
 150
 151        /* invalid or unused */
 152        IPROC_PCIE_IB_MAP_INVALID
 153};
 154
 155/**
 156 * iProc PCIe inbound mapping controller specific parameters
 157 *
 158 * @type: inbound mapping region type
 159 * @size_unit: inbound mapping region size unit, could be SZ_1K, SZ_1M, or
 160 * SZ_1G
 161 * @region_sizes: list of supported inbound mapping region sizes in KB, MB, or
 162 * GB, depedning on the size unit
 163 * @nr_sizes: number of supported inbound mapping region sizes
 164 * @nr_windows: number of supported inbound mapping windows for the region
 165 * @imap_addr_offset: register offset between the upper and lower 32-bit
 166 * IMAP address registers
 167 * @imap_window_offset: register offset between each IMAP window
 168 */
 169struct iproc_pcie_ib_map {
 170        enum iproc_pcie_ib_map_type type;
 171        unsigned int size_unit;
 172        resource_size_t region_sizes[MAX_NUM_IB_REGION_SIZES];
 173        unsigned int nr_sizes;
 174        unsigned int nr_windows;
 175        u16 imap_addr_offset;
 176        u16 imap_window_offset;
 177};
 178
 179static const struct iproc_pcie_ib_map paxb_v2_ib_map[] = {
 180        {
 181                /* IARR0/IMAP0 */
 182                .type = IPROC_PCIE_IB_MAP_IO,
 183                .size_unit = SZ_1K,
 184                .region_sizes = { 32 },
 185                .nr_sizes = 1,
 186                .nr_windows = 8,
 187                .imap_addr_offset = 0x40,
 188                .imap_window_offset = 0x4,
 189        },
 190        {
 191                /* IARR1/IMAP1 (currently unused) */
 192                .type = IPROC_PCIE_IB_MAP_INVALID,
 193        },
 194        {
 195                /* IARR2/IMAP2 */
 196                .type = IPROC_PCIE_IB_MAP_MEM,
 197                .size_unit = SZ_1M,
 198                .region_sizes = { 64, 128, 256, 512, 1024, 2048, 4096, 8192,
 199                                  16384 },
 200                .nr_sizes = 9,
 201                .nr_windows = 1,
 202                .imap_addr_offset = 0x4,
 203                .imap_window_offset = 0x8,
 204        },
 205        {
 206                /* IARR3/IMAP3 */
 207                .type = IPROC_PCIE_IB_MAP_MEM,
 208                .size_unit = SZ_1G,
 209                .region_sizes = { 1, 2, 4, 8, 16, 32 },
 210                .nr_sizes = 6,
 211                .nr_windows = 8,
 212                .imap_addr_offset = 0x4,
 213                .imap_window_offset = 0x8,
 214        },
 215        {
 216                /* IARR4/IMAP4 */
 217                .type = IPROC_PCIE_IB_MAP_MEM,
 218                .size_unit = SZ_1G,
 219                .region_sizes = { 32, 64, 128, 256, 512 },
 220                .nr_sizes = 5,
 221                .nr_windows = 8,
 222                .imap_addr_offset = 0x4,
 223                .imap_window_offset = 0x8,
 224        },
 225};
 226
 227/*
 228 * iProc PCIe host registers
 229 */
 230enum iproc_pcie_reg {
 231        /* clock/reset signal control */
 232        IPROC_PCIE_CLK_CTRL = 0,
 233
 234        /*
 235         * To allow MSI to be steered to an external MSI controller (e.g., ARM
 236         * GICv3 ITS)
 237         */
 238        IPROC_PCIE_MSI_GIC_MODE,
 239
 240        /*
 241         * IPROC_PCIE_MSI_BASE_ADDR and IPROC_PCIE_MSI_WINDOW_SIZE define the
 242         * window where the MSI posted writes are written, for the writes to be
 243         * interpreted as MSI writes.
 244         */
 245        IPROC_PCIE_MSI_BASE_ADDR,
 246        IPROC_PCIE_MSI_WINDOW_SIZE,
 247
 248        /*
 249         * To hold the address of the register where the MSI writes are
 250         * programed.  When ARM GICv3 ITS is used, this should be programmed
 251         * with the address of the GITS_TRANSLATER register.
 252         */
 253        IPROC_PCIE_MSI_ADDR_LO,
 254        IPROC_PCIE_MSI_ADDR_HI,
 255
 256        /* enable MSI */
 257        IPROC_PCIE_MSI_EN_CFG,
 258
 259        /* allow access to root complex configuration space */
 260        IPROC_PCIE_CFG_IND_ADDR,
 261        IPROC_PCIE_CFG_IND_DATA,
 262
 263        /* allow access to device configuration space */
 264        IPROC_PCIE_CFG_ADDR,
 265        IPROC_PCIE_CFG_DATA,
 266
 267        /* enable INTx */
 268        IPROC_PCIE_INTX_EN,
 269
 270        /* outbound address mapping */
 271        IPROC_PCIE_OARR0,
 272        IPROC_PCIE_OMAP0,
 273        IPROC_PCIE_OARR1,
 274        IPROC_PCIE_OMAP1,
 275        IPROC_PCIE_OARR2,
 276        IPROC_PCIE_OMAP2,
 277        IPROC_PCIE_OARR3,
 278        IPROC_PCIE_OMAP3,
 279
 280        /* inbound address mapping */
 281        IPROC_PCIE_IARR0,
 282        IPROC_PCIE_IMAP0,
 283        IPROC_PCIE_IARR1,
 284        IPROC_PCIE_IMAP1,
 285        IPROC_PCIE_IARR2,
 286        IPROC_PCIE_IMAP2,
 287        IPROC_PCIE_IARR3,
 288        IPROC_PCIE_IMAP3,
 289        IPROC_PCIE_IARR4,
 290        IPROC_PCIE_IMAP4,
 291
 292        /* link status */
 293        IPROC_PCIE_LINK_STATUS,
 294
 295        /* enable APB error for unsupported requests */
 296        IPROC_PCIE_APB_ERR_EN,
 297
 298        /* total number of core registers */
 299        IPROC_PCIE_MAX_NUM_REG,
 300};
 301
 302/* iProc PCIe PAXB BCMA registers */
 303static const u16 iproc_pcie_reg_paxb_bcma[] = {
 304        [IPROC_PCIE_CLK_CTRL]           = 0x000,
 305        [IPROC_PCIE_CFG_IND_ADDR]       = 0x120,
 306        [IPROC_PCIE_CFG_IND_DATA]       = 0x124,
 307        [IPROC_PCIE_CFG_ADDR]           = 0x1f8,
 308        [IPROC_PCIE_CFG_DATA]           = 0x1fc,
 309        [IPROC_PCIE_INTX_EN]            = 0x330,
 310        [IPROC_PCIE_LINK_STATUS]        = 0xf0c,
 311};
 312
 313/* iProc PCIe PAXB registers */
 314static const u16 iproc_pcie_reg_paxb[] = {
 315        [IPROC_PCIE_CLK_CTRL]           = 0x000,
 316        [IPROC_PCIE_CFG_IND_ADDR]       = 0x120,
 317        [IPROC_PCIE_CFG_IND_DATA]       = 0x124,
 318        [IPROC_PCIE_CFG_ADDR]           = 0x1f8,
 319        [IPROC_PCIE_CFG_DATA]           = 0x1fc,
 320        [IPROC_PCIE_INTX_EN]            = 0x330,
 321        [IPROC_PCIE_OARR0]              = 0xd20,
 322        [IPROC_PCIE_OMAP0]              = 0xd40,
 323        [IPROC_PCIE_OARR1]              = 0xd28,
 324        [IPROC_PCIE_OMAP1]              = 0xd48,
 325        [IPROC_PCIE_LINK_STATUS]        = 0xf0c,
 326        [IPROC_PCIE_APB_ERR_EN]         = 0xf40,
 327};
 328
 329/* iProc PCIe PAXB v2 registers */
 330static const u16 iproc_pcie_reg_paxb_v2[] = {
 331        [IPROC_PCIE_CLK_CTRL]           = 0x000,
 332        [IPROC_PCIE_CFG_IND_ADDR]       = 0x120,
 333        [IPROC_PCIE_CFG_IND_DATA]       = 0x124,
 334        [IPROC_PCIE_CFG_ADDR]           = 0x1f8,
 335        [IPROC_PCIE_CFG_DATA]           = 0x1fc,
 336        [IPROC_PCIE_INTX_EN]            = 0x330,
 337        [IPROC_PCIE_OARR0]              = 0xd20,
 338        [IPROC_PCIE_OMAP0]              = 0xd40,
 339        [IPROC_PCIE_OARR1]              = 0xd28,
 340        [IPROC_PCIE_OMAP1]              = 0xd48,
 341        [IPROC_PCIE_OARR2]              = 0xd60,
 342        [IPROC_PCIE_OMAP2]              = 0xd68,
 343        [IPROC_PCIE_OARR3]              = 0xdf0,
 344        [IPROC_PCIE_OMAP3]              = 0xdf8,
 345        [IPROC_PCIE_IARR0]              = 0xd00,
 346        [IPROC_PCIE_IMAP0]              = 0xc00,
 347        [IPROC_PCIE_IARR2]              = 0xd10,
 348        [IPROC_PCIE_IMAP2]              = 0xcc0,
 349        [IPROC_PCIE_IARR3]              = 0xe00,
 350        [IPROC_PCIE_IMAP3]              = 0xe08,
 351        [IPROC_PCIE_IARR4]              = 0xe68,
 352        [IPROC_PCIE_IMAP4]              = 0xe70,
 353        [IPROC_PCIE_LINK_STATUS]        = 0xf0c,
 354        [IPROC_PCIE_APB_ERR_EN]         = 0xf40,
 355};
 356
 357/* iProc PCIe PAXC v1 registers */
 358static const u16 iproc_pcie_reg_paxc[] = {
 359        [IPROC_PCIE_CLK_CTRL]           = 0x000,
 360        [IPROC_PCIE_CFG_IND_ADDR]       = 0x1f0,
 361        [IPROC_PCIE_CFG_IND_DATA]       = 0x1f4,
 362        [IPROC_PCIE_CFG_ADDR]           = 0x1f8,
 363        [IPROC_PCIE_CFG_DATA]           = 0x1fc,
 364};
 365
 366/* iProc PCIe PAXC v2 registers */
 367static const u16 iproc_pcie_reg_paxc_v2[] = {
 368        [IPROC_PCIE_MSI_GIC_MODE]       = 0x050,
 369        [IPROC_PCIE_MSI_BASE_ADDR]      = 0x074,
 370        [IPROC_PCIE_MSI_WINDOW_SIZE]    = 0x078,
 371        [IPROC_PCIE_MSI_ADDR_LO]        = 0x07c,
 372        [IPROC_PCIE_MSI_ADDR_HI]        = 0x080,
 373        [IPROC_PCIE_MSI_EN_CFG]         = 0x09c,
 374        [IPROC_PCIE_CFG_IND_ADDR]       = 0x1f0,
 375        [IPROC_PCIE_CFG_IND_DATA]       = 0x1f4,
 376        [IPROC_PCIE_CFG_ADDR]           = 0x1f8,
 377        [IPROC_PCIE_CFG_DATA]           = 0x1fc,
 378};
 379
 380/*
 381 * List of device IDs of controllers that have corrupted capability list that
 382 * require SW fixup
 383 */
 384static const u16 iproc_pcie_corrupt_cap_did[] = {
 385        0x16cd,
 386        0x16f0,
 387        0xd802,
 388        0xd804
 389};
 390
 391static inline struct iproc_pcie *iproc_data(struct pci_bus *bus)
 392{
 393        struct iproc_pcie *pcie = bus->sysdata;
 394        return pcie;
 395}
 396
 397static inline bool iproc_pcie_reg_is_invalid(u16 reg_offset)
 398{
 399        return !!(reg_offset == IPROC_PCIE_REG_INVALID);
 400}
 401
 402static inline u16 iproc_pcie_reg_offset(struct iproc_pcie *pcie,
 403                                        enum iproc_pcie_reg reg)
 404{
 405        return pcie->reg_offsets[reg];
 406}
 407
 408static inline u32 iproc_pcie_read_reg(struct iproc_pcie *pcie,
 409                                      enum iproc_pcie_reg reg)
 410{
 411        u16 offset = iproc_pcie_reg_offset(pcie, reg);
 412
 413        if (iproc_pcie_reg_is_invalid(offset))
 414                return 0;
 415
 416        return readl(pcie->base + offset);
 417}
 418
 419static inline void iproc_pcie_write_reg(struct iproc_pcie *pcie,
 420                                        enum iproc_pcie_reg reg, u32 val)
 421{
 422        u16 offset = iproc_pcie_reg_offset(pcie, reg);
 423
 424        if (iproc_pcie_reg_is_invalid(offset))
 425                return;
 426
 427        writel(val, pcie->base + offset);
 428}
 429
 430/**
 431 * APB error forwarding can be disabled during access of configuration
 432 * registers of the endpoint device, to prevent unsupported requests
 433 * (typically seen during enumeration with multi-function devices) from
 434 * triggering a system exception.
 435 */
 436static inline void iproc_pcie_apb_err_disable(struct pci_bus *bus,
 437                                              bool disable)
 438{
 439        struct iproc_pcie *pcie = iproc_data(bus);
 440        u32 val;
 441
 442        if (bus->number && pcie->has_apb_err_disable) {
 443                val = iproc_pcie_read_reg(pcie, IPROC_PCIE_APB_ERR_EN);
 444                if (disable)
 445                        val &= ~APB_ERR_EN;
 446                else
 447                        val |= APB_ERR_EN;
 448                iproc_pcie_write_reg(pcie, IPROC_PCIE_APB_ERR_EN, val);
 449        }
 450}
 451
 452static void __iomem *iproc_pcie_map_ep_cfg_reg(struct iproc_pcie *pcie,
 453                                               unsigned int busno,
 454                                               unsigned int slot,
 455                                               unsigned int fn,
 456                                               int where)
 457{
 458        u16 offset;
 459        u32 val;
 460
 461        /* EP device access */
 462        val = (busno << CFG_ADDR_BUS_NUM_SHIFT) |
 463                (slot << CFG_ADDR_DEV_NUM_SHIFT) |
 464                (fn << CFG_ADDR_FUNC_NUM_SHIFT) |
 465                (where & CFG_ADDR_REG_NUM_MASK) |
 466                (1 & CFG_ADDR_CFG_TYPE_MASK);
 467
 468        iproc_pcie_write_reg(pcie, IPROC_PCIE_CFG_ADDR, val);
 469        offset = iproc_pcie_reg_offset(pcie, IPROC_PCIE_CFG_DATA);
 470
 471        if (iproc_pcie_reg_is_invalid(offset))
 472                return NULL;
 473
 474        return (pcie->base + offset);
 475}
 476
 477static unsigned int iproc_pcie_cfg_retry(void __iomem *cfg_data_p)
 478{
 479        int timeout = CFG_RETRY_STATUS_TIMEOUT_US;
 480        unsigned int data;
 481
 482        /*
 483         * As per PCIe spec r3.1, sec 2.3.2, CRS Software Visibility only
 484         * affects config reads of the Vendor ID.  For config writes or any
 485         * other config reads, the Root may automatically reissue the
 486         * configuration request again as a new request.
 487         *
 488         * For config reads, this hardware returns CFG_RETRY_STATUS data
 489         * when it receives a CRS completion, regardless of the address of
 490         * the read or the CRS Software Visibility Enable bit.  As a
 491         * partial workaround for this, we retry in software any read that
 492         * returns CFG_RETRY_STATUS.
 493         *
 494         * Note that a non-Vendor ID config register may have a value of
 495         * CFG_RETRY_STATUS.  If we read that, we can't distinguish it from
 496         * a CRS completion, so we will incorrectly retry the read and
 497         * eventually return the wrong data (0xffffffff).
 498         */
 499        data = readl(cfg_data_p);
 500        while (data == CFG_RETRY_STATUS && timeout--) {
 501                udelay(1);
 502                data = readl(cfg_data_p);
 503        }
 504
 505        if (data == CFG_RETRY_STATUS)
 506                data = 0xffffffff;
 507
 508        return data;
 509}
 510
 511static void iproc_pcie_fix_cap(struct iproc_pcie *pcie, int where, u32 *val)
 512{
 513        u32 i, dev_id;
 514
 515        switch (where & ~0x3) {
 516        case PCI_VENDOR_ID:
 517                dev_id = *val >> 16;
 518
 519                /*
 520                 * Activate fixup for those controllers that have corrupted
 521                 * capability list registers
 522                 */
 523                for (i = 0; i < ARRAY_SIZE(iproc_pcie_corrupt_cap_did); i++)
 524                        if (dev_id == iproc_pcie_corrupt_cap_did[i])
 525                                pcie->fix_paxc_cap = true;
 526                break;
 527
 528        case IPROC_PCI_PM_CAP:
 529                if (pcie->fix_paxc_cap) {
 530                        /* advertise PM, force next capability to PCIe */
 531                        *val &= ~IPROC_PCI_PM_CAP_MASK;
 532                        *val |= IPROC_PCI_EXP_CAP << 8 | PCI_CAP_ID_PM;
 533                }
 534                break;
 535
 536        case IPROC_PCI_EXP_CAP:
 537                if (pcie->fix_paxc_cap) {
 538                        /* advertise root port, version 2, terminate here */
 539                        *val = (PCI_EXP_TYPE_ROOT_PORT << 4 | 2) << 16 |
 540                                PCI_CAP_ID_EXP;
 541                }
 542                break;
 543
 544        case IPROC_PCI_EXP_CAP + PCI_EXP_RTCTL:
 545                /* Don't advertise CRS SV support */
 546                *val &= ~(PCI_EXP_RTCAP_CRSVIS << 16);
 547                break;
 548
 549        default:
 550                break;
 551        }
 552}
 553
 554static int iproc_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
 555                                  int where, int size, u32 *val)
 556{
 557        struct iproc_pcie *pcie = iproc_data(bus);
 558        unsigned int slot = PCI_SLOT(devfn);
 559        unsigned int fn = PCI_FUNC(devfn);
 560        unsigned int busno = bus->number;
 561        void __iomem *cfg_data_p;
 562        unsigned int data;
 563        int ret;
 564
 565        /* root complex access */
 566        if (busno == 0) {
 567                ret = pci_generic_config_read32(bus, devfn, where, size, val);
 568                if (ret == PCIBIOS_SUCCESSFUL)
 569                        iproc_pcie_fix_cap(pcie, where, val);
 570
 571                return ret;
 572        }
 573
 574        cfg_data_p = iproc_pcie_map_ep_cfg_reg(pcie, busno, slot, fn, where);
 575
 576        if (!cfg_data_p)
 577                return PCIBIOS_DEVICE_NOT_FOUND;
 578
 579        data = iproc_pcie_cfg_retry(cfg_data_p);
 580
 581        *val = data;
 582        if (size <= 2)
 583                *val = (data >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);
 584
 585        /*
 586         * For PAXC and PAXCv2, the total number of PFs that one can enumerate
 587         * depends on the firmware configuration. Unfortunately, due to an ASIC
 588         * bug, unconfigured PFs cannot be properly hidden from the root
 589         * complex. As a result, write access to these PFs will cause bus lock
 590         * up on the embedded processor
 591         *
 592         * Since all unconfigured PFs are left with an incorrect, staled device
 593         * ID of 0x168e (PCI_DEVICE_ID_NX2_57810), we try to catch those access
 594         * early here and reject them all
 595         */
 596#define DEVICE_ID_MASK     0xffff0000
 597#define DEVICE_ID_SHIFT    16
 598        if (pcie->rej_unconfig_pf &&
 599            (where & CFG_ADDR_REG_NUM_MASK) == PCI_VENDOR_ID)
 600                if ((*val & DEVICE_ID_MASK) ==
 601                    (PCI_DEVICE_ID_NX2_57810 << DEVICE_ID_SHIFT))
 602                        return PCIBIOS_FUNC_NOT_SUPPORTED;
 603
 604        return PCIBIOS_SUCCESSFUL;
 605}
 606
 607/**
 608 * Note access to the configuration registers are protected at the higher layer
 609 * by 'pci_lock' in drivers/pci/access.c
 610 */
 611static void __iomem *iproc_pcie_map_cfg_bus(struct iproc_pcie *pcie,
 612                                            int busno, unsigned int devfn,
 613                                            int where)
 614{
 615        unsigned slot = PCI_SLOT(devfn);
 616        unsigned fn = PCI_FUNC(devfn);
 617        u16 offset;
 618
 619        /* root complex access */
 620        if (busno == 0) {
 621                if (slot > 0 || fn > 0)
 622                        return NULL;
 623
 624                iproc_pcie_write_reg(pcie, IPROC_PCIE_CFG_IND_ADDR,
 625                                     where & CFG_IND_ADDR_MASK);
 626                offset = iproc_pcie_reg_offset(pcie, IPROC_PCIE_CFG_IND_DATA);
 627                if (iproc_pcie_reg_is_invalid(offset))
 628                        return NULL;
 629                else
 630                        return (pcie->base + offset);
 631        }
 632
 633        /*
 634         * PAXC is connected to an internally emulated EP within the SoC.  It
 635         * allows only one device.
 636         */
 637        if (pcie->ep_is_internal)
 638                if (slot > 0)
 639                        return NULL;
 640
 641        return iproc_pcie_map_ep_cfg_reg(pcie, busno, slot, fn, where);
 642}
 643
 644static void __iomem *iproc_pcie_bus_map_cfg_bus(struct pci_bus *bus,
 645                                                unsigned int devfn,
 646                                                int where)
 647{
 648        return iproc_pcie_map_cfg_bus(iproc_data(bus), bus->number, devfn,
 649                                      where);
 650}
 651
 652static int iproc_pci_raw_config_read32(struct iproc_pcie *pcie,
 653                                       unsigned int devfn, int where,
 654                                       int size, u32 *val)
 655{
 656        void __iomem *addr;
 657
 658        addr = iproc_pcie_map_cfg_bus(pcie, 0, devfn, where & ~0x3);
 659        if (!addr) {
 660                *val = ~0;
 661                return PCIBIOS_DEVICE_NOT_FOUND;
 662        }
 663
 664        *val = readl(addr);
 665
 666        if (size <= 2)
 667                *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);
 668
 669        return PCIBIOS_SUCCESSFUL;
 670}
 671
 672static int iproc_pci_raw_config_write32(struct iproc_pcie *pcie,
 673                                        unsigned int devfn, int where,
 674                                        int size, u32 val)
 675{
 676        void __iomem *addr;
 677        u32 mask, tmp;
 678
 679        addr = iproc_pcie_map_cfg_bus(pcie, 0, devfn, where & ~0x3);
 680        if (!addr)
 681                return PCIBIOS_DEVICE_NOT_FOUND;
 682
 683        if (size == 4) {
 684                writel(val, addr);
 685                return PCIBIOS_SUCCESSFUL;
 686        }
 687
 688        mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8));
 689        tmp = readl(addr) & mask;
 690        tmp |= val << ((where & 0x3) * 8);
 691        writel(tmp, addr);
 692
 693        return PCIBIOS_SUCCESSFUL;
 694}
 695
 696static int iproc_pcie_config_read32(struct pci_bus *bus, unsigned int devfn,
 697                                    int where, int size, u32 *val)
 698{
 699        int ret;
 700        struct iproc_pcie *pcie = iproc_data(bus);
 701
 702        iproc_pcie_apb_err_disable(bus, true);
 703        if (pcie->iproc_cfg_read)
 704                ret = iproc_pcie_config_read(bus, devfn, where, size, val);
 705        else
 706                ret = pci_generic_config_read32(bus, devfn, where, size, val);
 707        iproc_pcie_apb_err_disable(bus, false);
 708
 709        return ret;
 710}
 711
 712static int iproc_pcie_config_write32(struct pci_bus *bus, unsigned int devfn,
 713                                     int where, int size, u32 val)
 714{
 715        int ret;
 716
 717        iproc_pcie_apb_err_disable(bus, true);
 718        ret = pci_generic_config_write32(bus, devfn, where, size, val);
 719        iproc_pcie_apb_err_disable(bus, false);
 720
 721        return ret;
 722}
 723
 724static struct pci_ops iproc_pcie_ops = {
 725        .map_bus = iproc_pcie_bus_map_cfg_bus,
 726        .read = iproc_pcie_config_read32,
 727        .write = iproc_pcie_config_write32,
 728};
 729
 730static void iproc_pcie_perst_ctrl(struct iproc_pcie *pcie, bool assert)
 731{
 732        u32 val;
 733
 734        /*
 735         * PAXC and the internal emulated endpoint device downstream should not
 736         * be reset.  If firmware has been loaded on the endpoint device at an
 737         * earlier boot stage, reset here causes issues.
 738         */
 739        if (pcie->ep_is_internal)
 740                return;
 741
 742        if (assert) {
 743                val = iproc_pcie_read_reg(pcie, IPROC_PCIE_CLK_CTRL);
 744                val &= ~EP_PERST_SOURCE_SELECT & ~EP_MODE_SURVIVE_PERST &
 745                        ~RC_PCIE_RST_OUTPUT;
 746                iproc_pcie_write_reg(pcie, IPROC_PCIE_CLK_CTRL, val);
 747                udelay(250);
 748        } else {
 749                val = iproc_pcie_read_reg(pcie, IPROC_PCIE_CLK_CTRL);
 750                val |= RC_PCIE_RST_OUTPUT;
 751                iproc_pcie_write_reg(pcie, IPROC_PCIE_CLK_CTRL, val);
 752                msleep(100);
 753        }
 754}
 755
 756int iproc_pcie_shutdown(struct iproc_pcie *pcie)
 757{
 758        iproc_pcie_perst_ctrl(pcie, true);
 759        msleep(500);
 760
 761        return 0;
 762}
 763EXPORT_SYMBOL_GPL(iproc_pcie_shutdown);
 764
 765static int iproc_pcie_check_link(struct iproc_pcie *pcie)
 766{
 767        struct device *dev = pcie->dev;
 768        u32 hdr_type, link_ctrl, link_status, class, val;
 769        bool link_is_active = false;
 770
 771        /*
 772         * PAXC connects to emulated endpoint devices directly and does not
 773         * have a Serdes.  Therefore skip the link detection logic here.
 774         */
 775        if (pcie->ep_is_internal)
 776                return 0;
 777
 778        val = iproc_pcie_read_reg(pcie, IPROC_PCIE_LINK_STATUS);
 779        if (!(val & PCIE_PHYLINKUP) || !(val & PCIE_DL_ACTIVE)) {
 780                dev_err(dev, "PHY or data link is INACTIVE!\n");
 781                return -ENODEV;
 782        }
 783
 784        /* make sure we are not in EP mode */
 785        iproc_pci_raw_config_read32(pcie, 0, PCI_HEADER_TYPE, 1, &hdr_type);
 786        if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE) {
 787                dev_err(dev, "in EP mode, hdr=%#02x\n", hdr_type);
 788                return -EFAULT;
 789        }
 790
 791        /* force class to PCI_CLASS_BRIDGE_PCI (0x0604) */
 792#define PCI_BRIDGE_CTRL_REG_OFFSET      0x43c
 793#define PCI_CLASS_BRIDGE_MASK           0xffff00
 794#define PCI_CLASS_BRIDGE_SHIFT          8
 795        iproc_pci_raw_config_read32(pcie, 0, PCI_BRIDGE_CTRL_REG_OFFSET,
 796                                    4, &class);
 797        class &= ~PCI_CLASS_BRIDGE_MASK;
 798        class |= (PCI_CLASS_BRIDGE_PCI << PCI_CLASS_BRIDGE_SHIFT);
 799        iproc_pci_raw_config_write32(pcie, 0, PCI_BRIDGE_CTRL_REG_OFFSET,
 800                                     4, class);
 801
 802        /* check link status to see if link is active */
 803        iproc_pci_raw_config_read32(pcie, 0, IPROC_PCI_EXP_CAP + PCI_EXP_LNKSTA,
 804                                    2, &link_status);
 805        if (link_status & PCI_EXP_LNKSTA_NLW)
 806                link_is_active = true;
 807
 808        if (!link_is_active) {
 809                /* try GEN 1 link speed */
 810#define PCI_TARGET_LINK_SPEED_MASK      0xf
 811#define PCI_TARGET_LINK_SPEED_GEN2      0x2
 812#define PCI_TARGET_LINK_SPEED_GEN1      0x1
 813                iproc_pci_raw_config_read32(pcie, 0,
 814                                            IPROC_PCI_EXP_CAP + PCI_EXP_LNKCTL2,
 815                                            4, &link_ctrl);
 816                if ((link_ctrl & PCI_TARGET_LINK_SPEED_MASK) ==
 817                    PCI_TARGET_LINK_SPEED_GEN2) {
 818                        link_ctrl &= ~PCI_TARGET_LINK_SPEED_MASK;
 819                        link_ctrl |= PCI_TARGET_LINK_SPEED_GEN1;
 820                        iproc_pci_raw_config_write32(pcie, 0,
 821                                        IPROC_PCI_EXP_CAP + PCI_EXP_LNKCTL2,
 822                                        4, link_ctrl);
 823                        msleep(100);
 824
 825                        iproc_pci_raw_config_read32(pcie, 0,
 826                                        IPROC_PCI_EXP_CAP + PCI_EXP_LNKSTA,
 827                                        2, &link_status);
 828                        if (link_status & PCI_EXP_LNKSTA_NLW)
 829                                link_is_active = true;
 830                }
 831        }
 832
 833        dev_info(dev, "link: %s\n", link_is_active ? "UP" : "DOWN");
 834
 835        return link_is_active ? 0 : -ENODEV;
 836}
 837
 838static void iproc_pcie_enable(struct iproc_pcie *pcie)
 839{
 840        iproc_pcie_write_reg(pcie, IPROC_PCIE_INTX_EN, SYS_RC_INTX_MASK);
 841}
 842
 843static inline bool iproc_pcie_ob_is_valid(struct iproc_pcie *pcie,
 844                                          int window_idx)
 845{
 846        u32 val;
 847
 848        val = iproc_pcie_read_reg(pcie, MAP_REG(IPROC_PCIE_OARR0, window_idx));
 849
 850        return !!(val & OARR_VALID);
 851}
 852
 853static inline int iproc_pcie_ob_write(struct iproc_pcie *pcie, int window_idx,
 854                                      int size_idx, u64 axi_addr, u64 pci_addr)
 855{
 856        struct device *dev = pcie->dev;
 857        u16 oarr_offset, omap_offset;
 858
 859        /*
 860         * Derive the OARR/OMAP offset from the first pair (OARR0/OMAP0) based
 861         * on window index.
 862         */
 863        oarr_offset = iproc_pcie_reg_offset(pcie, MAP_REG(IPROC_PCIE_OARR0,
 864                                                          window_idx));
 865        omap_offset = iproc_pcie_reg_offset(pcie, MAP_REG(IPROC_PCIE_OMAP0,
 866                                                          window_idx));
 867        if (iproc_pcie_reg_is_invalid(oarr_offset) ||
 868            iproc_pcie_reg_is_invalid(omap_offset))
 869                return -EINVAL;
 870
 871        /*
 872         * Program the OARR registers.  The upper 32-bit OARR register is
 873         * always right after the lower 32-bit OARR register.
 874         */
 875        writel(lower_32_bits(axi_addr) | (size_idx << OARR_SIZE_CFG_SHIFT) |
 876               OARR_VALID, pcie->base + oarr_offset);
 877        writel(upper_32_bits(axi_addr), pcie->base + oarr_offset + 4);
 878
 879        /* now program the OMAP registers */
 880        writel(lower_32_bits(pci_addr), pcie->base + omap_offset);
 881        writel(upper_32_bits(pci_addr), pcie->base + omap_offset + 4);
 882
 883        dev_dbg(dev, "ob window [%d]: offset 0x%x axi %pap pci %pap\n",
 884                window_idx, oarr_offset, &axi_addr, &pci_addr);
 885        dev_dbg(dev, "oarr lo 0x%x oarr hi 0x%x\n",
 886                readl(pcie->base + oarr_offset),
 887                readl(pcie->base + oarr_offset + 4));
 888        dev_dbg(dev, "omap lo 0x%x omap hi 0x%x\n",
 889                readl(pcie->base + omap_offset),
 890                readl(pcie->base + omap_offset + 4));
 891
 892        return 0;
 893}
 894
 895/**
 896 * Some iProc SoCs require the SW to configure the outbound address mapping
 897 *
 898 * Outbound address translation:
 899 *
 900 * iproc_pcie_address = axi_address - axi_offset
 901 * OARR = iproc_pcie_address
 902 * OMAP = pci_addr
 903 *
 904 * axi_addr -> iproc_pcie_address -> OARR -> OMAP -> pci_address
 905 */
 906static int iproc_pcie_setup_ob(struct iproc_pcie *pcie, u64 axi_addr,
 907                               u64 pci_addr, resource_size_t size)
 908{
 909        struct iproc_pcie_ob *ob = &pcie->ob;
 910        struct device *dev = pcie->dev;
 911        int ret = -EINVAL, window_idx, size_idx;
 912
 913        if (axi_addr < ob->axi_offset) {
 914                dev_err(dev, "axi address %pap less than offset %pap\n",
 915                        &axi_addr, &ob->axi_offset);
 916                return -EINVAL;
 917        }
 918
 919        /*
 920         * Translate the AXI address to the internal address used by the iProc
 921         * PCIe core before programming the OARR
 922         */
 923        axi_addr -= ob->axi_offset;
 924
 925        /* iterate through all OARR/OMAP mapping windows */
 926        for (window_idx = ob->nr_windows - 1; window_idx >= 0; window_idx--) {
 927                const struct iproc_pcie_ob_map *ob_map =
 928                        &pcie->ob_map[window_idx];
 929
 930                /*
 931                 * If current outbound window is already in use, move on to the
 932                 * next one.
 933                 */
 934                if (iproc_pcie_ob_is_valid(pcie, window_idx))
 935                        continue;
 936
 937                /*
 938                 * Iterate through all supported window sizes within the
 939                 * OARR/OMAP pair to find a match.  Go through the window sizes
 940                 * in a descending order.
 941                 */
 942                for (size_idx = ob_map->nr_sizes - 1; size_idx >= 0;
 943                     size_idx--) {
 944                        resource_size_t window_size =
 945                                ob_map->window_sizes[size_idx] * SZ_1M;
 946
 947                        if (size < window_size)
 948                                continue;
 949
 950                        if (!IS_ALIGNED(axi_addr, window_size) ||
 951                            !IS_ALIGNED(pci_addr, window_size)) {
 952                                dev_err(dev,
 953                                        "axi %pap or pci %pap not aligned\n",
 954                                        &axi_addr, &pci_addr);
 955                                return -EINVAL;
 956                        }
 957
 958                        /*
 959                         * Match found!  Program both OARR and OMAP and mark
 960                         * them as a valid entry.
 961                         */
 962                        ret = iproc_pcie_ob_write(pcie, window_idx, size_idx,
 963                                                  axi_addr, pci_addr);
 964                        if (ret)
 965                                goto err_ob;
 966
 967                        size -= window_size;
 968                        if (size == 0)
 969                                return 0;
 970
 971                        /*
 972                         * If we are here, we are done with the current window,
 973                         * but not yet finished all mappings.  Need to move on
 974                         * to the next window.
 975                         */
 976                        axi_addr += window_size;
 977                        pci_addr += window_size;
 978                        break;
 979                }
 980        }
 981
 982err_ob:
 983        dev_err(dev, "unable to configure outbound mapping\n");
 984        dev_err(dev,
 985                "axi %pap, axi offset %pap, pci %pap, res size %pap\n",
 986                &axi_addr, &ob->axi_offset, &pci_addr, &size);
 987
 988        return ret;
 989}
 990
 991static int iproc_pcie_map_ranges(struct iproc_pcie *pcie,
 992                                 struct list_head *resources)
 993{
 994        struct device *dev = pcie->dev;
 995        struct resource_entry *window;
 996        int ret;
 997
 998        resource_list_for_each_entry(window, resources) {
 999                struct resource *res = window->res;
1000                u64 res_type = resource_type(res);
1001
1002                switch (res_type) {
1003                case IORESOURCE_IO:
1004                case IORESOURCE_BUS:
1005                        break;
1006                case IORESOURCE_MEM:
1007                        ret = iproc_pcie_setup_ob(pcie, res->start,
1008                                                  res->start - window->offset,
1009                                                  resource_size(res));
1010                        if (ret)
1011                                return ret;
1012                        break;
1013                default:
1014                        dev_err(dev, "invalid resource %pR\n", res);
1015                        return -EINVAL;
1016                }
1017        }
1018
1019        return 0;
1020}
1021
1022static inline bool iproc_pcie_ib_is_in_use(struct iproc_pcie *pcie,
1023                                           int region_idx)
1024{
1025        const struct iproc_pcie_ib_map *ib_map = &pcie->ib_map[region_idx];
1026        u32 val;
1027
1028        val = iproc_pcie_read_reg(pcie, MAP_REG(IPROC_PCIE_IARR0, region_idx));
1029
1030        return !!(val & (BIT(ib_map->nr_sizes) - 1));
1031}
1032
1033static inline bool iproc_pcie_ib_check_type(const struct iproc_pcie_ib_map *ib_map,
1034                                            enum iproc_pcie_ib_map_type type)
1035{
1036        return !!(ib_map->type == type);
1037}
1038
1039static int iproc_pcie_ib_write(struct iproc_pcie *pcie, int region_idx,
1040                               int size_idx, int nr_windows, u64 axi_addr,
1041                               u64 pci_addr, resource_size_t size)
1042{
1043        struct device *dev = pcie->dev;
1044        const struct iproc_pcie_ib_map *ib_map = &pcie->ib_map[region_idx];
1045        u16 iarr_offset, imap_offset;
1046        u32 val;
1047        int window_idx;
1048
1049        iarr_offset = iproc_pcie_reg_offset(pcie,
1050                                MAP_REG(IPROC_PCIE_IARR0, region_idx));
1051        imap_offset = iproc_pcie_reg_offset(pcie,
1052                                MAP_REG(IPROC_PCIE_IMAP0, region_idx));
1053        if (iproc_pcie_reg_is_invalid(iarr_offset) ||
1054            iproc_pcie_reg_is_invalid(imap_offset))
1055                return -EINVAL;
1056
1057        dev_dbg(dev, "ib region [%d]: offset 0x%x axi %pap pci %pap\n",
1058                region_idx, iarr_offset, &axi_addr, &pci_addr);
1059
1060        /*
1061         * Program the IARR registers.  The upper 32-bit IARR register is
1062         * always right after the lower 32-bit IARR register.
1063         */
1064        writel(lower_32_bits(pci_addr) | BIT(size_idx),
1065               pcie->base + iarr_offset);
1066        writel(upper_32_bits(pci_addr), pcie->base + iarr_offset + 4);
1067
1068        dev_dbg(dev, "iarr lo 0x%x iarr hi 0x%x\n",
1069                readl(pcie->base + iarr_offset),
1070                readl(pcie->base + iarr_offset + 4));
1071
1072        /*
1073         * Now program the IMAP registers.  Each IARR region may have one or
1074         * more IMAP windows.
1075         */
1076        size >>= ilog2(nr_windows);
1077        for (window_idx = 0; window_idx < nr_windows; window_idx++) {
1078                val = readl(pcie->base + imap_offset);
1079                val |= lower_32_bits(axi_addr) | IMAP_VALID;
1080                writel(val, pcie->base + imap_offset);
1081                writel(upper_32_bits(axi_addr),
1082                       pcie->base + imap_offset + ib_map->imap_addr_offset);
1083
1084                dev_dbg(dev, "imap window [%d] lo 0x%x hi 0x%x\n",
1085                        window_idx, readl(pcie->base + imap_offset),
1086                        readl(pcie->base + imap_offset +
1087                              ib_map->imap_addr_offset));
1088
1089                imap_offset += ib_map->imap_window_offset;
1090                axi_addr += size;
1091        }
1092
1093        return 0;
1094}
1095
1096static int iproc_pcie_setup_ib(struct iproc_pcie *pcie,
1097                               struct of_pci_range *range,
1098                               enum iproc_pcie_ib_map_type type)
1099{
1100        struct device *dev = pcie->dev;
1101        struct iproc_pcie_ib *ib = &pcie->ib;
1102        int ret;
1103        unsigned int region_idx, size_idx;
1104        u64 axi_addr = range->cpu_addr, pci_addr = range->pci_addr;
1105        resource_size_t size = range->size;
1106
1107        /* iterate through all IARR mapping regions */
1108        for (region_idx = 0; region_idx < ib->nr_regions; region_idx++) {
1109                const struct iproc_pcie_ib_map *ib_map =
1110                        &pcie->ib_map[region_idx];
1111
1112                /*
1113                 * If current inbound region is already in use or not a
1114                 * compatible type, move on to the next.
1115                 */
1116                if (iproc_pcie_ib_is_in_use(pcie, region_idx) ||
1117                    !iproc_pcie_ib_check_type(ib_map, type))
1118                        continue;
1119
1120                /* iterate through all supported region sizes to find a match */
1121                for (size_idx = 0; size_idx < ib_map->nr_sizes; size_idx++) {
1122                        resource_size_t region_size =
1123                        ib_map->region_sizes[size_idx] * ib_map->size_unit;
1124
1125                        if (size != region_size)
1126                                continue;
1127
1128                        if (!IS_ALIGNED(axi_addr, region_size) ||
1129                            !IS_ALIGNED(pci_addr, region_size)) {
1130                                dev_err(dev,
1131                                        "axi %pap or pci %pap not aligned\n",
1132                                        &axi_addr, &pci_addr);
1133                                return -EINVAL;
1134                        }
1135
1136                        /* Match found!  Program IARR and all IMAP windows. */
1137                        ret = iproc_pcie_ib_write(pcie, region_idx, size_idx,
1138                                                  ib_map->nr_windows, axi_addr,
1139                                                  pci_addr, size);
1140                        if (ret)
1141                                goto err_ib;
1142                        else
1143                                return 0;
1144
1145                }
1146        }
1147        ret = -EINVAL;
1148
1149err_ib:
1150        dev_err(dev, "unable to configure inbound mapping\n");
1151        dev_err(dev, "axi %pap, pci %pap, res size %pap\n",
1152                &axi_addr, &pci_addr, &size);
1153
1154        return ret;
1155}
1156
1157static int iproc_pcie_map_dma_ranges(struct iproc_pcie *pcie)
1158{
1159        struct of_pci_range range;
1160        struct of_pci_range_parser parser;
1161        int ret;
1162
1163        /* Get the dma-ranges from DT */
1164        ret = of_pci_dma_range_parser_init(&parser, pcie->dev->of_node);
1165        if (ret)
1166                return ret;
1167
1168        for_each_of_pci_range(&parser, &range) {
1169                /* Each range entry corresponds to an inbound mapping region */
1170                ret = iproc_pcie_setup_ib(pcie, &range, IPROC_PCIE_IB_MAP_MEM);
1171                if (ret)
1172                        return ret;
1173        }
1174
1175        return 0;
1176}
1177
1178static int iproce_pcie_get_msi(struct iproc_pcie *pcie,
1179                               struct device_node *msi_node,
1180                               u64 *msi_addr)
1181{
1182        struct device *dev = pcie->dev;
1183        int ret;
1184        struct resource res;
1185
1186        /*
1187         * Check if 'msi-map' points to ARM GICv3 ITS, which is the only
1188         * supported external MSI controller that requires steering.
1189         */
1190        if (!of_device_is_compatible(msi_node, "arm,gic-v3-its")) {
1191                dev_err(dev, "unable to find compatible MSI controller\n");
1192                return -ENODEV;
1193        }
1194
1195        /* derive GITS_TRANSLATER address from GICv3 */
1196        ret = of_address_to_resource(msi_node, 0, &res);
1197        if (ret < 0) {
1198                dev_err(dev, "unable to obtain MSI controller resources\n");
1199                return ret;
1200        }
1201
1202        *msi_addr = res.start + GITS_TRANSLATER;
1203        return 0;
1204}
1205
1206static int iproc_pcie_paxb_v2_msi_steer(struct iproc_pcie *pcie, u64 msi_addr)
1207{
1208        int ret;
1209        struct of_pci_range range;
1210
1211        memset(&range, 0, sizeof(range));
1212        range.size = SZ_32K;
1213        range.pci_addr = range.cpu_addr = msi_addr & ~(range.size - 1);
1214
1215        ret = iproc_pcie_setup_ib(pcie, &range, IPROC_PCIE_IB_MAP_IO);
1216        return ret;
1217}
1218
1219static void iproc_pcie_paxc_v2_msi_steer(struct iproc_pcie *pcie, u64 msi_addr,
1220                                         bool enable)
1221{
1222        u32 val;
1223
1224        if (!enable) {
1225                /*
1226                 * Disable PAXC MSI steering. All write transfers will be
1227                 * treated as non-MSI transfers
1228                 */
1229                val = iproc_pcie_read_reg(pcie, IPROC_PCIE_MSI_EN_CFG);
1230                val &= ~MSI_ENABLE_CFG;
1231                iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_EN_CFG, val);
1232                return;
1233        }
1234
1235        /*
1236         * Program bits [43:13] of address of GITS_TRANSLATER register into
1237         * bits [30:0] of the MSI base address register.  In fact, in all iProc
1238         * based SoCs, all I/O register bases are well below the 32-bit
1239         * boundary, so we can safely assume bits [43:32] are always zeros.
1240         */
1241        iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_BASE_ADDR,
1242                             (u32)(msi_addr >> 13));
1243
1244        /* use a default 8K window size */
1245        iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_WINDOW_SIZE, 0);
1246
1247        /* steering MSI to GICv3 ITS */
1248        val = iproc_pcie_read_reg(pcie, IPROC_PCIE_MSI_GIC_MODE);
1249        val |= GIC_V3_CFG;
1250        iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_GIC_MODE, val);
1251
1252        /*
1253         * Program bits [43:2] of address of GITS_TRANSLATER register into the
1254         * iProc MSI address registers.
1255         */
1256        msi_addr >>= 2;
1257        iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_ADDR_HI,
1258                             upper_32_bits(msi_addr));
1259        iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_ADDR_LO,
1260                             lower_32_bits(msi_addr));
1261
1262        /* enable MSI */
1263        val = iproc_pcie_read_reg(pcie, IPROC_PCIE_MSI_EN_CFG);
1264        val |= MSI_ENABLE_CFG;
1265        iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_EN_CFG, val);
1266}
1267
1268static int iproc_pcie_msi_steer(struct iproc_pcie *pcie,
1269                                struct device_node *msi_node)
1270{
1271        struct device *dev = pcie->dev;
1272        int ret;
1273        u64 msi_addr;
1274
1275        ret = iproce_pcie_get_msi(pcie, msi_node, &msi_addr);
1276        if (ret < 0) {
1277                dev_err(dev, "msi steering failed\n");
1278                return ret;
1279        }
1280
1281        switch (pcie->type) {
1282        case IPROC_PCIE_PAXB_V2:
1283                ret = iproc_pcie_paxb_v2_msi_steer(pcie, msi_addr);
1284                if (ret)
1285                        return ret;
1286                break;
1287        case IPROC_PCIE_PAXC_V2:
1288                iproc_pcie_paxc_v2_msi_steer(pcie, msi_addr, true);
1289                break;
1290        default:
1291                return -EINVAL;
1292        }
1293
1294        return 0;
1295}
1296
1297static int iproc_pcie_msi_enable(struct iproc_pcie *pcie)
1298{
1299        struct device_node *msi_node;
1300        int ret;
1301
1302        /*
1303         * Either the "msi-parent" or the "msi-map" phandle needs to exist
1304         * for us to obtain the MSI node.
1305         */
1306
1307        msi_node = of_parse_phandle(pcie->dev->of_node, "msi-parent", 0);
1308        if (!msi_node) {
1309                const __be32 *msi_map = NULL;
1310                int len;
1311                u32 phandle;
1312
1313                msi_map = of_get_property(pcie->dev->of_node, "msi-map", &len);
1314                if (!msi_map)
1315                        return -ENODEV;
1316
1317                phandle = be32_to_cpup(msi_map + 1);
1318                msi_node = of_find_node_by_phandle(phandle);
1319                if (!msi_node)
1320                        return -ENODEV;
1321        }
1322
1323        /*
1324         * Certain revisions of the iProc PCIe controller require additional
1325         * configurations to steer the MSI writes towards an external MSI
1326         * controller.
1327         */
1328        if (pcie->need_msi_steer) {
1329                ret = iproc_pcie_msi_steer(pcie, msi_node);
1330                if (ret)
1331                        return ret;
1332        }
1333
1334        /*
1335         * If another MSI controller is being used, the call below should fail
1336         * but that is okay
1337         */
1338        return iproc_msi_init(pcie, msi_node);
1339}
1340
1341static void iproc_pcie_msi_disable(struct iproc_pcie *pcie)
1342{
1343        iproc_msi_exit(pcie);
1344}
1345
1346static int iproc_pcie_rev_init(struct iproc_pcie *pcie)
1347{
1348        struct device *dev = pcie->dev;
1349        unsigned int reg_idx;
1350        const u16 *regs;
1351
1352        switch (pcie->type) {
1353        case IPROC_PCIE_PAXB_BCMA:
1354                regs = iproc_pcie_reg_paxb_bcma;
1355                break;
1356        case IPROC_PCIE_PAXB:
1357                regs = iproc_pcie_reg_paxb;
1358                pcie->iproc_cfg_read = true;
1359                pcie->has_apb_err_disable = true;
1360                if (pcie->need_ob_cfg) {
1361                        pcie->ob_map = paxb_ob_map;
1362                        pcie->ob.nr_windows = ARRAY_SIZE(paxb_ob_map);
1363                }
1364                break;
1365        case IPROC_PCIE_PAXB_V2:
1366                regs = iproc_pcie_reg_paxb_v2;
1367                pcie->has_apb_err_disable = true;
1368                if (pcie->need_ob_cfg) {
1369                        pcie->ob_map = paxb_v2_ob_map;
1370                        pcie->ob.nr_windows = ARRAY_SIZE(paxb_v2_ob_map);
1371                }
1372                pcie->ib.nr_regions = ARRAY_SIZE(paxb_v2_ib_map);
1373                pcie->ib_map = paxb_v2_ib_map;
1374                pcie->need_msi_steer = true;
1375                dev_warn(dev, "reads of config registers that contain %#x return incorrect data\n",
1376                         CFG_RETRY_STATUS);
1377                break;
1378        case IPROC_PCIE_PAXC:
1379                regs = iproc_pcie_reg_paxc;
1380                pcie->ep_is_internal = true;
1381                pcie->iproc_cfg_read = true;
1382                pcie->rej_unconfig_pf = true;
1383                break;
1384        case IPROC_PCIE_PAXC_V2:
1385                regs = iproc_pcie_reg_paxc_v2;
1386                pcie->ep_is_internal = true;
1387                pcie->iproc_cfg_read = true;
1388                pcie->rej_unconfig_pf = true;
1389                pcie->need_msi_steer = true;
1390                break;
1391        default:
1392                dev_err(dev, "incompatible iProc PCIe interface\n");
1393                return -EINVAL;
1394        }
1395
1396        pcie->reg_offsets = devm_kcalloc(dev, IPROC_PCIE_MAX_NUM_REG,
1397                                         sizeof(*pcie->reg_offsets),
1398                                         GFP_KERNEL);
1399        if (!pcie->reg_offsets)
1400                return -ENOMEM;
1401
1402        /* go through the register table and populate all valid registers */
1403        pcie->reg_offsets[0] = (pcie->type == IPROC_PCIE_PAXC_V2) ?
1404                IPROC_PCIE_REG_INVALID : regs[0];
1405        for (reg_idx = 1; reg_idx < IPROC_PCIE_MAX_NUM_REG; reg_idx++)
1406                pcie->reg_offsets[reg_idx] = regs[reg_idx] ?
1407                        regs[reg_idx] : IPROC_PCIE_REG_INVALID;
1408
1409        return 0;
1410}
1411
1412int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
1413{
1414        struct device *dev;
1415        int ret;
1416        struct pci_bus *child;
1417        struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
1418
1419        dev = pcie->dev;
1420
1421        ret = iproc_pcie_rev_init(pcie);
1422        if (ret) {
1423                dev_err(dev, "unable to initialize controller parameters\n");
1424                return ret;
1425        }
1426
1427        ret = devm_request_pci_bus_resources(dev, res);
1428        if (ret)
1429                return ret;
1430
1431        ret = phy_init(pcie->phy);
1432        if (ret) {
1433                dev_err(dev, "unable to initialize PCIe PHY\n");
1434                return ret;
1435        }
1436
1437        ret = phy_power_on(pcie->phy);
1438        if (ret) {
1439                dev_err(dev, "unable to power on PCIe PHY\n");
1440                goto err_exit_phy;
1441        }
1442
1443        iproc_pcie_perst_ctrl(pcie, true);
1444        iproc_pcie_perst_ctrl(pcie, false);
1445
1446        if (pcie->need_ob_cfg) {
1447                ret = iproc_pcie_map_ranges(pcie, res);
1448                if (ret) {
1449                        dev_err(dev, "map failed\n");
1450                        goto err_power_off_phy;
1451                }
1452        }
1453
1454        if (pcie->need_ib_cfg) {
1455                ret = iproc_pcie_map_dma_ranges(pcie);
1456                if (ret && ret != -ENOENT)
1457                        goto err_power_off_phy;
1458        }
1459
1460        ret = iproc_pcie_check_link(pcie);
1461        if (ret) {
1462                dev_err(dev, "no PCIe EP device detected\n");
1463                goto err_power_off_phy;
1464        }
1465
1466        iproc_pcie_enable(pcie);
1467
1468        if (IS_ENABLED(CONFIG_PCI_MSI))
1469                if (iproc_pcie_msi_enable(pcie))
1470                        dev_info(dev, "not using iProc MSI\n");
1471
1472        list_splice_init(res, &host->windows);
1473        host->busnr = 0;
1474        host->dev.parent = dev;
1475        host->ops = &iproc_pcie_ops;
1476        host->sysdata = pcie;
1477        host->map_irq = pcie->map_irq;
1478        host->swizzle_irq = pci_common_swizzle;
1479
1480        ret = pci_scan_root_bus_bridge(host);
1481        if (ret < 0) {
1482                dev_err(dev, "failed to scan host: %d\n", ret);
1483                goto err_power_off_phy;
1484        }
1485
1486        pci_assign_unassigned_bus_resources(host->bus);
1487
1488        pcie->root_bus = host->bus;
1489
1490        list_for_each_entry(child, &host->bus->children, node)
1491                pcie_bus_configure_settings(child);
1492
1493        pci_bus_add_devices(host->bus);
1494
1495        return 0;
1496
1497err_power_off_phy:
1498        phy_power_off(pcie->phy);
1499err_exit_phy:
1500        phy_exit(pcie->phy);
1501        return ret;
1502}
1503EXPORT_SYMBOL(iproc_pcie_setup);
1504
1505int iproc_pcie_remove(struct iproc_pcie *pcie)
1506{
1507        pci_stop_root_bus(pcie->root_bus);
1508        pci_remove_root_bus(pcie->root_bus);
1509
1510        iproc_pcie_msi_disable(pcie);
1511
1512        phy_power_off(pcie->phy);
1513        phy_exit(pcie->phy);
1514
1515        return 0;
1516}
1517EXPORT_SYMBOL(iproc_pcie_remove);
1518
1519/*
1520 * The MSI parsing logic in certain revisions of Broadcom PAXC based root
1521 * complex does not work and needs to be disabled
1522 */
1523static void quirk_paxc_disable_msi_parsing(struct pci_dev *pdev)
1524{
1525        struct iproc_pcie *pcie = iproc_data(pdev->bus);
1526
1527        if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
1528                iproc_pcie_paxc_v2_msi_steer(pcie, 0, false);
1529}
1530DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16f0,
1531                        quirk_paxc_disable_msi_parsing);
1532DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd802,
1533                        quirk_paxc_disable_msi_parsing);
1534DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd804,
1535                        quirk_paxc_disable_msi_parsing);
1536
1537MODULE_AUTHOR("Ray Jui <rjui@broadcom.com>");
1538MODULE_DESCRIPTION("Broadcom iPROC PCIe common driver");
1539MODULE_LICENSE("GPL v2");
1540