linux/drivers/pci/controller/pcie-mobiveil.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * PCIe host controller driver for Mobiveil PCIe Host controller
   4 *
   5 * Copyright (c) 2018 Mobiveil Inc.
   6 * Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
   7 */
   8
   9#include <linux/delay.h>
  10#include <linux/init.h>
  11#include <linux/interrupt.h>
  12#include <linux/irq.h>
  13#include <linux/irqchip/chained_irq.h>
  14#include <linux/irqdomain.h>
  15#include <linux/kernel.h>
  16#include <linux/module.h>
  17#include <linux/msi.h>
  18#include <linux/of_address.h>
  19#include <linux/of_irq.h>
  20#include <linux/of_platform.h>
  21#include <linux/of_pci.h>
  22#include <linux/pci.h>
  23#include <linux/platform_device.h>
  24#include <linux/slab.h>
  25
  26#include "../pci.h"
  27
  28/* register offsets and bit positions */
  29
  30/*
  31 * translation tables are grouped into windows, each window registers are
  32 * grouped into blocks of 4 or 16 registers each
  33 */
  34#define PAB_REG_BLOCK_SIZE      16
  35#define PAB_EXT_REG_BLOCK_SIZE  4
  36
  37#define PAB_REG_ADDR(offset, win) (offset + (win * PAB_REG_BLOCK_SIZE))
  38#define PAB_EXT_REG_ADDR(offset, win) (offset + (win * PAB_EXT_REG_BLOCK_SIZE))
  39
  40#define LTSSM_STATUS            0x0404
  41#define  LTSSM_STATUS_L0_MASK   0x3f
  42#define  LTSSM_STATUS_L0        0x2d
  43
  44#define PAB_CTRL                0x0808
  45#define  AMBA_PIO_ENABLE_SHIFT  0
  46#define  PEX_PIO_ENABLE_SHIFT   1
  47#define  PAGE_SEL_SHIFT 13
  48#define  PAGE_SEL_MASK          0x3f
  49#define  PAGE_LO_MASK           0x3ff
  50#define  PAGE_SEL_EN            0xc00
  51#define  PAGE_SEL_OFFSET_SHIFT  10
  52
  53#define PAB_AXI_PIO_CTRL        0x0840
  54#define  APIO_EN_MASK           0xf
  55
  56#define PAB_PEX_PIO_CTRL        0x08c0
  57#define  PIO_ENABLE_SHIFT       0
  58
  59#define PAB_INTP_AMBA_MISC_ENB          0x0b0c
  60#define PAB_INTP_AMBA_MISC_STAT 0x0b1c
  61#define  PAB_INTP_INTX_MASK             0x01e0
  62#define  PAB_INTP_MSI_MASK              0x8
  63
  64#define PAB_AXI_AMAP_CTRL(win)  PAB_REG_ADDR(0x0ba0, win)
  65#define  WIN_ENABLE_SHIFT       0
  66#define  WIN_TYPE_SHIFT 1
  67
  68#define PAB_EXT_AXI_AMAP_SIZE(win)      PAB_EXT_REG_ADDR(0xbaf0, win)
  69
  70#define PAB_AXI_AMAP_AXI_WIN(win)       PAB_REG_ADDR(0x0ba4, win)
  71#define  AXI_WINDOW_ALIGN_MASK          3
  72
  73#define PAB_AXI_AMAP_PEX_WIN_L(win)     PAB_REG_ADDR(0x0ba8, win)
  74#define  PAB_BUS_SHIFT          24
  75#define  PAB_DEVICE_SHIFT       19
  76#define  PAB_FUNCTION_SHIFT     16
  77
  78#define PAB_AXI_AMAP_PEX_WIN_H(win)     PAB_REG_ADDR(0x0bac, win)
  79#define PAB_INTP_AXI_PIO_CLASS          0x474
  80
  81#define PAB_PEX_AMAP_CTRL(win)  PAB_REG_ADDR(0x4ba0, win)
  82#define  AMAP_CTRL_EN_SHIFT     0
  83#define  AMAP_CTRL_TYPE_SHIFT   1
  84
  85#define PAB_EXT_PEX_AMAP_SIZEN(win)     PAB_EXT_REG_ADDR(0xbef0, win)
  86#define PAB_PEX_AMAP_AXI_WIN(win)       PAB_REG_ADDR(0x4ba4, win)
  87#define PAB_PEX_AMAP_PEX_WIN_L(win)     PAB_REG_ADDR(0x4ba8, win)
  88#define PAB_PEX_AMAP_PEX_WIN_H(win)     PAB_REG_ADDR(0x4bac, win)
  89
  90/* starting offset of INTX bits in status register */
  91#define PAB_INTX_START  5
  92
  93/* supported number of MSI interrupts */
  94#define PCI_NUM_MSI     16
  95
  96/* MSI registers */
  97#define MSI_BASE_LO_OFFSET      0x04
  98#define MSI_BASE_HI_OFFSET      0x08
  99#define MSI_SIZE_OFFSET 0x0c
 100#define MSI_ENABLE_OFFSET       0x14
 101#define MSI_STATUS_OFFSET       0x18
 102#define MSI_DATA_OFFSET 0x20
 103#define MSI_ADDR_L_OFFSET       0x24
 104#define MSI_ADDR_H_OFFSET       0x28
 105
 106/* outbound and inbound window definitions */
 107#define WIN_NUM_0               0
 108#define WIN_NUM_1               1
 109#define CFG_WINDOW_TYPE 0
 110#define IO_WINDOW_TYPE          1
 111#define MEM_WINDOW_TYPE 2
 112#define IB_WIN_SIZE             ((u64)256 * 1024 * 1024 * 1024)
 113#define MAX_PIO_WINDOWS 8
 114
 115/* Parameters for the waiting for link up routine */
 116#define LINK_WAIT_MAX_RETRIES   10
 117#define LINK_WAIT_MIN   90000
 118#define LINK_WAIT_MAX   100000
 119
 120struct mobiveil_msi {                   /* MSI information */
 121        struct mutex lock;              /* protect bitmap variable */
 122        struct irq_domain *msi_domain;
 123        struct irq_domain *dev_domain;
 124        phys_addr_t msi_pages_phys;
 125        int num_of_vectors;
 126        DECLARE_BITMAP(msi_irq_in_use, PCI_NUM_MSI);
 127};
 128
 129struct mobiveil_pcie {
 130        struct platform_device *pdev;
 131        struct list_head resources;
 132        void __iomem *config_axi_slave_base;    /* endpoint config base */
 133        void __iomem *csr_axi_slave_base;       /* root port config base */
 134        void __iomem *apb_csr_base;     /* MSI register base */
 135        phys_addr_t pcie_reg_base;      /* Physical PCIe Controller Base */
 136        struct irq_domain *intx_domain;
 137        raw_spinlock_t intx_mask_lock;
 138        int irq;
 139        int apio_wins;
 140        int ppio_wins;
 141        int ob_wins_configured;         /* configured outbound windows */
 142        int ib_wins_configured;         /* configured inbound windows */
 143        struct resource *ob_io_res;
 144        char root_bus_nr;
 145        struct mobiveil_msi msi;
 146};
 147
 148static inline void csr_writel(struct mobiveil_pcie *pcie, const u32 value,
 149                const u32 reg)
 150{
 151        writel_relaxed(value, pcie->csr_axi_slave_base + reg);
 152}
 153
 154static inline u32 csr_readl(struct mobiveil_pcie *pcie, const u32 reg)
 155{
 156        return readl_relaxed(pcie->csr_axi_slave_base + reg);
 157}
 158
 159static bool mobiveil_pcie_link_up(struct mobiveil_pcie *pcie)
 160{
 161        return (csr_readl(pcie, LTSSM_STATUS) &
 162                LTSSM_STATUS_L0_MASK) == LTSSM_STATUS_L0;
 163}
 164
 165static bool mobiveil_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
 166{
 167        struct mobiveil_pcie *pcie = bus->sysdata;
 168
 169        /* Only one device down on each root port */
 170        if ((bus->number == pcie->root_bus_nr) && (devfn > 0))
 171                return false;
 172
 173        /*
 174         * Do not read more than one device on the bus directly
 175         * attached to RC
 176         */
 177        if ((bus->primary == pcie->root_bus_nr) && (devfn > 0))
 178                return false;
 179
 180        return true;
 181}
 182
 183/*
 184 * mobiveil_pcie_map_bus - routine to get the configuration base of either
 185 * root port or endpoint
 186 */
 187static void __iomem *mobiveil_pcie_map_bus(struct pci_bus *bus,
 188                                        unsigned int devfn, int where)
 189{
 190        struct mobiveil_pcie *pcie = bus->sysdata;
 191
 192        if (!mobiveil_pcie_valid_device(bus, devfn))
 193                return NULL;
 194
 195        if (bus->number == pcie->root_bus_nr) {
 196                /* RC config access */
 197                return pcie->csr_axi_slave_base + where;
 198        }
 199
 200        /*
 201         * EP config access (in Config/APIO space)
 202         * Program PEX Address base (31..16 bits) with appropriate value
 203         * (BDF) in PAB_AXI_AMAP_PEX_WIN_L0 Register.
 204         * Relies on pci_lock serialization
 205         */
 206        csr_writel(pcie, bus->number << PAB_BUS_SHIFT |
 207                        PCI_SLOT(devfn) << PAB_DEVICE_SHIFT |
 208                        PCI_FUNC(devfn) << PAB_FUNCTION_SHIFT,
 209                        PAB_AXI_AMAP_PEX_WIN_L(WIN_NUM_0));
 210        return pcie->config_axi_slave_base + where;
 211}
 212
 213static struct pci_ops mobiveil_pcie_ops = {
 214        .map_bus = mobiveil_pcie_map_bus,
 215        .read = pci_generic_config_read,
 216        .write = pci_generic_config_write,
 217};
 218
 219static void mobiveil_pcie_isr(struct irq_desc *desc)
 220{
 221        struct irq_chip *chip = irq_desc_get_chip(desc);
 222        struct mobiveil_pcie *pcie = irq_desc_get_handler_data(desc);
 223        struct device *dev = &pcie->pdev->dev;
 224        struct mobiveil_msi *msi = &pcie->msi;
 225        u32 msi_data, msi_addr_lo, msi_addr_hi;
 226        u32 intr_status, msi_status;
 227        unsigned long shifted_status;
 228        u32 bit, virq, val, mask;
 229
 230        /*
 231         * The core provides a single interrupt for both INTx/MSI messages.
 232         * So we'll read both INTx and MSI status
 233         */
 234
 235        chained_irq_enter(chip, desc);
 236
 237        /* read INTx status */
 238        val = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT);
 239        mask = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
 240        intr_status = val & mask;
 241
 242        /* Handle INTx */
 243        if (intr_status & PAB_INTP_INTX_MASK) {
 244                shifted_status = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT) >>
 245                        PAB_INTX_START;
 246                do {
 247                        for_each_set_bit(bit, &shifted_status, PCI_NUM_INTX) {
 248                                virq = irq_find_mapping(pcie->intx_domain,
 249                                                bit + 1);
 250                                if (virq)
 251                                        generic_handle_irq(virq);
 252                                else
 253                                        dev_err_ratelimited(dev,
 254                                                "unexpected IRQ, INT%d\n", bit);
 255
 256                                /* clear interrupt */
 257                                csr_writel(pcie,
 258                                        shifted_status << PAB_INTX_START,
 259                                        PAB_INTP_AMBA_MISC_STAT);
 260                        }
 261                } while ((shifted_status >> PAB_INTX_START) != 0);
 262        }
 263
 264        /* read extra MSI status register */
 265        msi_status = readl_relaxed(pcie->apb_csr_base + MSI_STATUS_OFFSET);
 266
 267        /* handle MSI interrupts */
 268        while (msi_status & 1) {
 269                msi_data = readl_relaxed(pcie->apb_csr_base
 270                                + MSI_DATA_OFFSET);
 271
 272                /*
 273                 * MSI_STATUS_OFFSET register gets updated to zero
 274                 * once we pop not only the MSI data but also address
 275                 * from MSI hardware FIFO. So keeping these following
 276                 * two dummy reads.
 277                 */
 278                msi_addr_lo = readl_relaxed(pcie->apb_csr_base +
 279                                MSI_ADDR_L_OFFSET);
 280                msi_addr_hi = readl_relaxed(pcie->apb_csr_base +
 281                                MSI_ADDR_H_OFFSET);
 282                dev_dbg(dev, "MSI registers, data: %08x, addr: %08x:%08x\n",
 283                                msi_data, msi_addr_hi, msi_addr_lo);
 284
 285                virq = irq_find_mapping(msi->dev_domain, msi_data);
 286                if (virq)
 287                        generic_handle_irq(virq);
 288
 289                msi_status = readl_relaxed(pcie->apb_csr_base +
 290                                MSI_STATUS_OFFSET);
 291        }
 292
 293        /* Clear the interrupt status */
 294        csr_writel(pcie, intr_status, PAB_INTP_AMBA_MISC_STAT);
 295        chained_irq_exit(chip, desc);
 296}
 297
 298static int mobiveil_pcie_parse_dt(struct mobiveil_pcie *pcie)
 299{
 300        struct device *dev = &pcie->pdev->dev;
 301        struct platform_device *pdev = pcie->pdev;
 302        struct device_node *node = dev->of_node;
 303        struct resource *res;
 304
 305        /* map config resource */
 306        res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
 307                        "config_axi_slave");
 308        pcie->config_axi_slave_base = devm_pci_remap_cfg_resource(dev, res);
 309        if (IS_ERR(pcie->config_axi_slave_base))
 310                return PTR_ERR(pcie->config_axi_slave_base);
 311        pcie->ob_io_res = res;
 312
 313        /* map csr resource */
 314        res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
 315                        "csr_axi_slave");
 316        pcie->csr_axi_slave_base = devm_pci_remap_cfg_resource(dev, res);
 317        if (IS_ERR(pcie->csr_axi_slave_base))
 318                return PTR_ERR(pcie->csr_axi_slave_base);
 319        pcie->pcie_reg_base = res->start;
 320
 321        /* map MSI config resource */
 322        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apb_csr");
 323        pcie->apb_csr_base = devm_pci_remap_cfg_resource(dev, res);
 324        if (IS_ERR(pcie->apb_csr_base))
 325                return PTR_ERR(pcie->apb_csr_base);
 326
 327        /* read the number of windows requested */
 328        if (of_property_read_u32(node, "apio-wins", &pcie->apio_wins))
 329                pcie->apio_wins = MAX_PIO_WINDOWS;
 330
 331        if (of_property_read_u32(node, "ppio-wins", &pcie->ppio_wins))
 332                pcie->ppio_wins = MAX_PIO_WINDOWS;
 333
 334        pcie->irq = platform_get_irq(pdev, 0);
 335        if (pcie->irq <= 0) {
 336                dev_err(dev, "failed to map IRQ: %d\n", pcie->irq);
 337                return -ENODEV;
 338        }
 339
 340        irq_set_chained_handler_and_data(pcie->irq, mobiveil_pcie_isr, pcie);
 341
 342        return 0;
 343}
 344
 345/*
 346 * select_paged_register - routine to access paged register of root complex
 347 *
 348 * registers of RC are paged, for this scheme to work
 349 * extracted higher 6 bits of the offset will be written to pg_sel
 350 * field of PAB_CTRL register and rest of the lower 10 bits enabled with
 351 * PAGE_SEL_EN are used as offset of the register.
 352 */
 353static void select_paged_register(struct mobiveil_pcie *pcie, u32 offset)
 354{
 355        int pab_ctrl_dw, pg_sel;
 356
 357        /* clear pg_sel field */
 358        pab_ctrl_dw = csr_readl(pcie, PAB_CTRL);
 359        pab_ctrl_dw = (pab_ctrl_dw & ~(PAGE_SEL_MASK << PAGE_SEL_SHIFT));
 360
 361        /* set pg_sel field */
 362        pg_sel = (offset >> PAGE_SEL_OFFSET_SHIFT) & PAGE_SEL_MASK;
 363        pab_ctrl_dw |= ((pg_sel << PAGE_SEL_SHIFT));
 364        csr_writel(pcie, pab_ctrl_dw, PAB_CTRL);
 365}
 366
 367static void write_paged_register(struct mobiveil_pcie *pcie,
 368                u32 val, u32 offset)
 369{
 370        u32 off = (offset & PAGE_LO_MASK) | PAGE_SEL_EN;
 371
 372        select_paged_register(pcie, offset);
 373        csr_writel(pcie, val, off);
 374}
 375
 376static u32 read_paged_register(struct mobiveil_pcie *pcie, u32 offset)
 377{
 378        u32 off = (offset & PAGE_LO_MASK) | PAGE_SEL_EN;
 379
 380        select_paged_register(pcie, offset);
 381        return csr_readl(pcie, off);
 382}
 383
 384static void program_ib_windows(struct mobiveil_pcie *pcie, int win_num,
 385                int pci_addr, u32 type, u64 size)
 386{
 387        int pio_ctrl_val;
 388        int amap_ctrl_dw;
 389        u64 size64 = ~(size - 1);
 390
 391        if ((pcie->ib_wins_configured + 1) > pcie->ppio_wins) {
 392                dev_err(&pcie->pdev->dev,
 393                        "ERROR: max inbound windows reached !\n");
 394                return;
 395        }
 396
 397        pio_ctrl_val = csr_readl(pcie, PAB_PEX_PIO_CTRL);
 398        csr_writel(pcie,
 399                pio_ctrl_val | (1 << PIO_ENABLE_SHIFT), PAB_PEX_PIO_CTRL);
 400        amap_ctrl_dw = read_paged_register(pcie, PAB_PEX_AMAP_CTRL(win_num));
 401        amap_ctrl_dw = (amap_ctrl_dw | (type << AMAP_CTRL_TYPE_SHIFT));
 402        amap_ctrl_dw = (amap_ctrl_dw | (1 << AMAP_CTRL_EN_SHIFT));
 403
 404        write_paged_register(pcie, amap_ctrl_dw | lower_32_bits(size64),
 405                                PAB_PEX_AMAP_CTRL(win_num));
 406
 407        write_paged_register(pcie, upper_32_bits(size64),
 408                                PAB_EXT_PEX_AMAP_SIZEN(win_num));
 409
 410        write_paged_register(pcie, pci_addr, PAB_PEX_AMAP_AXI_WIN(win_num));
 411        write_paged_register(pcie, pci_addr, PAB_PEX_AMAP_PEX_WIN_L(win_num));
 412        write_paged_register(pcie, 0, PAB_PEX_AMAP_PEX_WIN_H(win_num));
 413}
 414
 415/*
 416 * routine to program the outbound windows
 417 */
 418static void program_ob_windows(struct mobiveil_pcie *pcie, int win_num,
 419                u64 cpu_addr, u64 pci_addr, u32 config_io_bit, u64 size)
 420{
 421
 422        u32 value, type;
 423        u64 size64 = ~(size - 1);
 424
 425        if ((pcie->ob_wins_configured + 1) > pcie->apio_wins) {
 426                dev_err(&pcie->pdev->dev,
 427                        "ERROR: max outbound windows reached !\n");
 428                return;
 429        }
 430
 431        /*
 432         * program Enable Bit to 1, Type Bit to (00) base 2, AXI Window Size Bit
 433         * to 4 KB in PAB_AXI_AMAP_CTRL register
 434         */
 435        type = config_io_bit;
 436        value = csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num));
 437        csr_writel(pcie, 1 << WIN_ENABLE_SHIFT | type << WIN_TYPE_SHIFT |
 438                        lower_32_bits(size64), PAB_AXI_AMAP_CTRL(win_num));
 439
 440        write_paged_register(pcie, upper_32_bits(size64),
 441                                PAB_EXT_AXI_AMAP_SIZE(win_num));
 442
 443        /*
 444         * program AXI window base with appropriate value in
 445         * PAB_AXI_AMAP_AXI_WIN0 register
 446         */
 447        value = csr_readl(pcie, PAB_AXI_AMAP_AXI_WIN(win_num));
 448        csr_writel(pcie, cpu_addr & (~AXI_WINDOW_ALIGN_MASK),
 449                        PAB_AXI_AMAP_AXI_WIN(win_num));
 450
 451        value = csr_readl(pcie, PAB_AXI_AMAP_PEX_WIN_H(win_num));
 452
 453        csr_writel(pcie, lower_32_bits(pci_addr),
 454                        PAB_AXI_AMAP_PEX_WIN_L(win_num));
 455        csr_writel(pcie, upper_32_bits(pci_addr),
 456                        PAB_AXI_AMAP_PEX_WIN_H(win_num));
 457
 458        pcie->ob_wins_configured++;
 459}
 460
 461static int mobiveil_bringup_link(struct mobiveil_pcie *pcie)
 462{
 463        int retries;
 464
 465        /* check if the link is up or not */
 466        for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
 467                if (mobiveil_pcie_link_up(pcie))
 468                        return 0;
 469
 470                usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX);
 471        }
 472        dev_err(&pcie->pdev->dev, "link never came up\n");
 473        return -ETIMEDOUT;
 474}
 475
 476static void mobiveil_pcie_enable_msi(struct mobiveil_pcie *pcie)
 477{
 478        phys_addr_t msg_addr = pcie->pcie_reg_base;
 479        struct mobiveil_msi *msi = &pcie->msi;
 480
 481        pcie->msi.num_of_vectors = PCI_NUM_MSI;
 482        msi->msi_pages_phys = (phys_addr_t)msg_addr;
 483
 484        writel_relaxed(lower_32_bits(msg_addr),
 485                pcie->apb_csr_base + MSI_BASE_LO_OFFSET);
 486        writel_relaxed(upper_32_bits(msg_addr),
 487                pcie->apb_csr_base + MSI_BASE_HI_OFFSET);
 488        writel_relaxed(4096, pcie->apb_csr_base + MSI_SIZE_OFFSET);
 489        writel_relaxed(1, pcie->apb_csr_base + MSI_ENABLE_OFFSET);
 490}
 491
 492static int mobiveil_host_init(struct mobiveil_pcie *pcie)
 493{
 494        u32 value, pab_ctrl, type = 0;
 495        int err;
 496        struct resource_entry *win, *tmp;
 497
 498        err = mobiveil_bringup_link(pcie);
 499        if (err) {
 500                dev_info(&pcie->pdev->dev, "link bring-up failed\n");
 501                return err;
 502        }
 503
 504        /*
 505         * program Bus Master Enable Bit in Command Register in PAB Config
 506         * Space
 507         */
 508        value = csr_readl(pcie, PCI_COMMAND);
 509        csr_writel(pcie, value | PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
 510                PCI_COMMAND_MASTER, PCI_COMMAND);
 511
 512        /*
 513         * program PIO Enable Bit to 1 (and PEX PIO Enable to 1) in PAB_CTRL
 514         * register
 515         */
 516        pab_ctrl = csr_readl(pcie, PAB_CTRL);
 517        csr_writel(pcie, pab_ctrl | (1 << AMBA_PIO_ENABLE_SHIFT) |
 518                (1 << PEX_PIO_ENABLE_SHIFT), PAB_CTRL);
 519
 520        csr_writel(pcie, (PAB_INTP_INTX_MASK | PAB_INTP_MSI_MASK),
 521                PAB_INTP_AMBA_MISC_ENB);
 522
 523        /*
 524         * program PIO Enable Bit to 1 and Config Window Enable Bit to 1 in
 525         * PAB_AXI_PIO_CTRL Register
 526         */
 527        value = csr_readl(pcie, PAB_AXI_PIO_CTRL);
 528        csr_writel(pcie, value | APIO_EN_MASK, PAB_AXI_PIO_CTRL);
 529
 530        /*
 531         * we'll program one outbound window for config reads and
 532         * another default inbound window for all the upstream traffic
 533         * rest of the outbound windows will be configured according to
 534         * the "ranges" field defined in device tree
 535         */
 536
 537        /* config outbound translation window */
 538        program_ob_windows(pcie, pcie->ob_wins_configured,
 539                        pcie->ob_io_res->start, 0, CFG_WINDOW_TYPE,
 540                        resource_size(pcie->ob_io_res));
 541
 542        /* memory inbound translation window */
 543        program_ib_windows(pcie, WIN_NUM_1, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE);
 544
 545        /* Get the I/O and memory ranges from DT */
 546        resource_list_for_each_entry_safe(win, tmp, &pcie->resources) {
 547                type = 0;
 548                if (resource_type(win->res) == IORESOURCE_MEM)
 549                        type = MEM_WINDOW_TYPE;
 550                if (resource_type(win->res) == IORESOURCE_IO)
 551                        type = IO_WINDOW_TYPE;
 552                if (type) {
 553                        /* configure outbound translation window */
 554                        program_ob_windows(pcie, pcie->ob_wins_configured,
 555                                win->res->start, 0, type,
 556                                resource_size(win->res));
 557                }
 558        }
 559
 560        /* setup MSI hardware registers */
 561        mobiveil_pcie_enable_msi(pcie);
 562
 563        return err;
 564}
 565
 566static void mobiveil_mask_intx_irq(struct irq_data *data)
 567{
 568        struct irq_desc *desc = irq_to_desc(data->irq);
 569        struct mobiveil_pcie *pcie;
 570        unsigned long flags;
 571        u32 mask, shifted_val;
 572
 573        pcie = irq_desc_get_chip_data(desc);
 574        mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
 575        raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags);
 576        shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
 577        csr_writel(pcie, (shifted_val & (~mask)), PAB_INTP_AMBA_MISC_ENB);
 578        raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags);
 579}
 580
 581static void mobiveil_unmask_intx_irq(struct irq_data *data)
 582{
 583        struct irq_desc *desc = irq_to_desc(data->irq);
 584        struct mobiveil_pcie *pcie;
 585        unsigned long flags;
 586        u32 shifted_val, mask;
 587
 588        pcie = irq_desc_get_chip_data(desc);
 589        mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
 590        raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags);
 591        shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
 592        csr_writel(pcie, (shifted_val | mask), PAB_INTP_AMBA_MISC_ENB);
 593        raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags);
 594}
 595
 596static struct irq_chip intx_irq_chip = {
 597        .name = "mobiveil_pcie:intx",
 598        .irq_enable = mobiveil_unmask_intx_irq,
 599        .irq_disable = mobiveil_mask_intx_irq,
 600        .irq_mask = mobiveil_mask_intx_irq,
 601        .irq_unmask = mobiveil_unmask_intx_irq,
 602};
 603
 604/* routine to setup the INTx related data */
 605static int mobiveil_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
 606                irq_hw_number_t hwirq)
 607{
 608        irq_set_chip_and_handler(irq, &intx_irq_chip, handle_level_irq);
 609        irq_set_chip_data(irq, domain->host_data);
 610        return 0;
 611}
 612
 613/* INTx domain operations structure */
 614static const struct irq_domain_ops intx_domain_ops = {
 615        .map = mobiveil_pcie_intx_map,
 616};
 617
 618static struct irq_chip mobiveil_msi_irq_chip = {
 619        .name = "Mobiveil PCIe MSI",
 620        .irq_mask = pci_msi_mask_irq,
 621        .irq_unmask = pci_msi_unmask_irq,
 622};
 623
 624static struct msi_domain_info mobiveil_msi_domain_info = {
 625        .flags  = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
 626                MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX),
 627        .chip   = &mobiveil_msi_irq_chip,
 628};
 629
 630static void mobiveil_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
 631{
 632        struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(data);
 633        phys_addr_t addr = pcie->pcie_reg_base + (data->hwirq * sizeof(int));
 634
 635        msg->address_lo = lower_32_bits(addr);
 636        msg->address_hi = upper_32_bits(addr);
 637        msg->data = data->hwirq;
 638
 639        dev_dbg(&pcie->pdev->dev, "msi#%d address_hi %#x address_lo %#x\n",
 640                (int)data->hwirq, msg->address_hi, msg->address_lo);
 641}
 642
 643static int mobiveil_msi_set_affinity(struct irq_data *irq_data,
 644                const struct cpumask *mask, bool force)
 645{
 646        return -EINVAL;
 647}
 648
 649static struct irq_chip mobiveil_msi_bottom_irq_chip = {
 650        .name                   = "Mobiveil MSI",
 651        .irq_compose_msi_msg    = mobiveil_compose_msi_msg,
 652        .irq_set_affinity       = mobiveil_msi_set_affinity,
 653};
 654
 655static int mobiveil_irq_msi_domain_alloc(struct irq_domain *domain,
 656                unsigned int virq, unsigned int nr_irqs, void *args)
 657{
 658        struct mobiveil_pcie *pcie = domain->host_data;
 659        struct mobiveil_msi *msi = &pcie->msi;
 660        unsigned long bit;
 661
 662        WARN_ON(nr_irqs != 1);
 663        mutex_lock(&msi->lock);
 664
 665        bit = find_first_zero_bit(msi->msi_irq_in_use, msi->num_of_vectors);
 666        if (bit >= msi->num_of_vectors) {
 667                mutex_unlock(&msi->lock);
 668                return -ENOSPC;
 669        }
 670
 671        set_bit(bit, msi->msi_irq_in_use);
 672
 673        mutex_unlock(&msi->lock);
 674
 675        irq_domain_set_info(domain, virq, bit, &mobiveil_msi_bottom_irq_chip,
 676                                domain->host_data, handle_level_irq,
 677                                NULL, NULL);
 678        return 0;
 679}
 680
 681static void mobiveil_irq_msi_domain_free(struct irq_domain *domain,
 682                unsigned int virq, unsigned int nr_irqs)
 683{
 684        struct irq_data *d = irq_domain_get_irq_data(domain, virq);
 685        struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(d);
 686        struct mobiveil_msi *msi = &pcie->msi;
 687
 688        mutex_lock(&msi->lock);
 689
 690        if (!test_bit(d->hwirq, msi->msi_irq_in_use)) {
 691                dev_err(&pcie->pdev->dev, "trying to free unused MSI#%lu\n",
 692                        d->hwirq);
 693        } else {
 694                __clear_bit(d->hwirq, msi->msi_irq_in_use);
 695        }
 696
 697        mutex_unlock(&msi->lock);
 698}
 699static const struct irq_domain_ops msi_domain_ops = {
 700        .alloc  = mobiveil_irq_msi_domain_alloc,
 701        .free   = mobiveil_irq_msi_domain_free,
 702};
 703
 704static int mobiveil_allocate_msi_domains(struct mobiveil_pcie *pcie)
 705{
 706        struct device *dev = &pcie->pdev->dev;
 707        struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
 708        struct mobiveil_msi *msi = &pcie->msi;
 709
 710        mutex_init(&pcie->msi.lock);
 711        msi->dev_domain = irq_domain_add_linear(NULL, msi->num_of_vectors,
 712                                                &msi_domain_ops, pcie);
 713        if (!msi->dev_domain) {
 714                dev_err(dev, "failed to create IRQ domain\n");
 715                return -ENOMEM;
 716        }
 717
 718        msi->msi_domain = pci_msi_create_irq_domain(fwnode,
 719                                &mobiveil_msi_domain_info, msi->dev_domain);
 720        if (!msi->msi_domain) {
 721                dev_err(dev, "failed to create MSI domain\n");
 722                irq_domain_remove(msi->dev_domain);
 723                return -ENOMEM;
 724        }
 725        return 0;
 726}
 727
 728static int mobiveil_pcie_init_irq_domain(struct mobiveil_pcie *pcie)
 729{
 730        struct device *dev = &pcie->pdev->dev;
 731        struct device_node *node = dev->of_node;
 732        int ret;
 733
 734        /* setup INTx */
 735        pcie->intx_domain = irq_domain_add_linear(node,
 736                                PCI_NUM_INTX, &intx_domain_ops, pcie);
 737
 738        if (!pcie->intx_domain) {
 739                dev_err(dev, "Failed to get a INTx IRQ domain\n");
 740                return -ENODEV;
 741        }
 742
 743        raw_spin_lock_init(&pcie->intx_mask_lock);
 744
 745        /* setup MSI */
 746        ret = mobiveil_allocate_msi_domains(pcie);
 747        if (ret)
 748                return ret;
 749
 750        return 0;
 751}
 752
 753static int mobiveil_pcie_probe(struct platform_device *pdev)
 754{
 755        struct mobiveil_pcie *pcie;
 756        struct pci_bus *bus;
 757        struct pci_bus *child;
 758        struct pci_host_bridge *bridge;
 759        struct device *dev = &pdev->dev;
 760        resource_size_t iobase;
 761        int ret;
 762
 763        /* allocate the PCIe port */
 764        bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
 765        if (!bridge)
 766                return -ENODEV;
 767
 768        pcie = pci_host_bridge_priv(bridge);
 769        if (!pcie)
 770                return -ENOMEM;
 771
 772        pcie->pdev = pdev;
 773
 774        ret = mobiveil_pcie_parse_dt(pcie);
 775        if (ret) {
 776                dev_err(dev, "Parsing DT failed, ret: %x\n", ret);
 777                return ret;
 778        }
 779
 780        INIT_LIST_HEAD(&pcie->resources);
 781
 782        /* parse the host bridge base addresses from the device tree file */
 783        ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
 784                                                    &pcie->resources, &iobase);
 785        if (ret) {
 786                dev_err(dev, "Getting bridge resources failed\n");
 787                return -ENOMEM;
 788        }
 789
 790        /*
 791         * configure all inbound and outbound windows and prepare the RC for
 792         * config access
 793         */
 794        ret = mobiveil_host_init(pcie);
 795        if (ret) {
 796                dev_err(dev, "Failed to initialize host\n");
 797                goto error;
 798        }
 799
 800        /* fixup for PCIe class register */
 801        csr_writel(pcie, 0x060402ab, PAB_INTP_AXI_PIO_CLASS);
 802
 803        /* initialize the IRQ domains */
 804        ret = mobiveil_pcie_init_irq_domain(pcie);
 805        if (ret) {
 806                dev_err(dev, "Failed creating IRQ Domain\n");
 807                goto error;
 808        }
 809
 810        ret = devm_request_pci_bus_resources(dev, &pcie->resources);
 811        if (ret)
 812                goto error;
 813
 814        /* Initialize bridge */
 815        list_splice_init(&pcie->resources, &bridge->windows);
 816        bridge->dev.parent = dev;
 817        bridge->sysdata = pcie;
 818        bridge->busnr = pcie->root_bus_nr;
 819        bridge->ops = &mobiveil_pcie_ops;
 820        bridge->map_irq = of_irq_parse_and_map_pci;
 821        bridge->swizzle_irq = pci_common_swizzle;
 822
 823        /* setup the kernel resources for the newly added PCIe root bus */
 824        ret = pci_scan_root_bus_bridge(bridge);
 825        if (ret)
 826                goto error;
 827
 828        bus = bridge->bus;
 829
 830        pci_assign_unassigned_bus_resources(bus);
 831        list_for_each_entry(child, &bus->children, node)
 832                pcie_bus_configure_settings(child);
 833        pci_bus_add_devices(bus);
 834
 835        return 0;
 836error:
 837        pci_free_resource_list(&pcie->resources);
 838        return ret;
 839}
 840
 841static const struct of_device_id mobiveil_pcie_of_match[] = {
 842        {.compatible = "mbvl,gpex40-pcie",},
 843        {},
 844};
 845
 846MODULE_DEVICE_TABLE(of, mobiveil_pcie_of_match);
 847
 848static struct platform_driver mobiveil_pcie_driver = {
 849        .probe = mobiveil_pcie_probe,
 850        .driver = {
 851                        .name = "mobiveil-pcie",
 852                        .of_match_table = mobiveil_pcie_of_match,
 853                        .suppress_bind_attrs = true,
 854                },
 855};
 856
 857builtin_platform_driver(mobiveil_pcie_driver);
 858
 859MODULE_LICENSE("GPL v2");
 860MODULE_DESCRIPTION("Mobiveil PCIe host controller driver");
 861MODULE_AUTHOR("Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>");
 862