linux/drivers/pci/controller/pcie-xilinx-nwl.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * PCIe host controller driver for NWL PCIe Bridge
   4 * Based on pcie-xilinx.c, pci-tegra.c
   5 *
   6 * (C) Copyright 2014 - 2015, Xilinx, Inc.
   7 */
   8
   9#include <linux/clk.h>
  10#include <linux/delay.h>
  11#include <linux/interrupt.h>
  12#include <linux/irq.h>
  13#include <linux/irqdomain.h>
  14#include <linux/kernel.h>
  15#include <linux/init.h>
  16#include <linux/msi.h>
  17#include <linux/of_address.h>
  18#include <linux/of_pci.h>
  19#include <linux/of_platform.h>
  20#include <linux/of_irq.h>
  21#include <linux/pci.h>
  22#include <linux/platform_device.h>
  23#include <linux/irqchip/chained_irq.h>
  24
  25#include "../pci.h"
  26
  27/* Bridge core config registers */
  28#define BRCFG_PCIE_RX0                  0x00000000
  29#define BRCFG_INTERRUPT                 0x00000010
  30#define BRCFG_PCIE_RX_MSG_FILTER        0x00000020
  31
  32/* Egress - Bridge translation registers */
  33#define E_BREG_CAPABILITIES             0x00000200
  34#define E_BREG_CONTROL                  0x00000208
  35#define E_BREG_BASE_LO                  0x00000210
  36#define E_BREG_BASE_HI                  0x00000214
  37#define E_ECAM_CAPABILITIES             0x00000220
  38#define E_ECAM_CONTROL                  0x00000228
  39#define E_ECAM_BASE_LO                  0x00000230
  40#define E_ECAM_BASE_HI                  0x00000234
  41#define E_DREG_CTRL                     0x00000288
  42#define E_DREG_BASE_LO                  0x00000290
  43
  44#define DREG_DMA_EN                     BIT(0)
  45#define DREG_DMA_BASE_LO                0xFD0F0000
  46
  47/* Ingress - address translations */
  48#define I_MSII_CAPABILITIES             0x00000300
  49#define I_MSII_CONTROL                  0x00000308
  50#define I_MSII_BASE_LO                  0x00000310
  51#define I_MSII_BASE_HI                  0x00000314
  52
  53#define I_ISUB_CONTROL                  0x000003E8
  54#define SET_ISUB_CONTROL                BIT(0)
  55/* Rxed msg fifo  - Interrupt status registers */
  56#define MSGF_MISC_STATUS                0x00000400
  57#define MSGF_MISC_MASK                  0x00000404
  58#define MSGF_LEG_STATUS                 0x00000420
  59#define MSGF_LEG_MASK                   0x00000424
  60#define MSGF_MSI_STATUS_LO              0x00000440
  61#define MSGF_MSI_STATUS_HI              0x00000444
  62#define MSGF_MSI_MASK_LO                0x00000448
  63#define MSGF_MSI_MASK_HI                0x0000044C
  64/* Root DMA Interrupt register */
  65#define MSGF_DMA_MASK                   0x00000464
  66
  67#define MSGF_INTR_EN                    BIT(0)
  68
  69/* Msg filter mask bits */
  70#define CFG_ENABLE_PM_MSG_FWD           BIT(1)
  71#define CFG_ENABLE_INT_MSG_FWD          BIT(2)
  72#define CFG_ENABLE_ERR_MSG_FWD          BIT(3)
  73#define CFG_ENABLE_MSG_FILTER_MASK      (CFG_ENABLE_PM_MSG_FWD | \
  74                                        CFG_ENABLE_INT_MSG_FWD | \
  75                                        CFG_ENABLE_ERR_MSG_FWD)
  76
  77/* Misc interrupt status mask bits */
  78#define MSGF_MISC_SR_RXMSG_AVAIL        BIT(0)
  79#define MSGF_MISC_SR_RXMSG_OVER         BIT(1)
  80#define MSGF_MISC_SR_SLAVE_ERR          BIT(4)
  81#define MSGF_MISC_SR_MASTER_ERR         BIT(5)
  82#define MSGF_MISC_SR_I_ADDR_ERR         BIT(6)
  83#define MSGF_MISC_SR_E_ADDR_ERR         BIT(7)
  84#define MSGF_MISC_SR_FATAL_AER          BIT(16)
  85#define MSGF_MISC_SR_NON_FATAL_AER      BIT(17)
  86#define MSGF_MISC_SR_CORR_AER           BIT(18)
  87#define MSGF_MISC_SR_UR_DETECT          BIT(20)
  88#define MSGF_MISC_SR_NON_FATAL_DEV      BIT(22)
  89#define MSGF_MISC_SR_FATAL_DEV          BIT(23)
  90#define MSGF_MISC_SR_LINK_DOWN          BIT(24)
  91#define MSGF_MSIC_SR_LINK_AUTO_BWIDTH   BIT(25)
  92#define MSGF_MSIC_SR_LINK_BWIDTH        BIT(26)
  93
  94#define MSGF_MISC_SR_MASKALL            (MSGF_MISC_SR_RXMSG_AVAIL | \
  95                                        MSGF_MISC_SR_RXMSG_OVER | \
  96                                        MSGF_MISC_SR_SLAVE_ERR | \
  97                                        MSGF_MISC_SR_MASTER_ERR | \
  98                                        MSGF_MISC_SR_I_ADDR_ERR | \
  99                                        MSGF_MISC_SR_E_ADDR_ERR | \
 100                                        MSGF_MISC_SR_FATAL_AER | \
 101                                        MSGF_MISC_SR_NON_FATAL_AER | \
 102                                        MSGF_MISC_SR_CORR_AER | \
 103                                        MSGF_MISC_SR_UR_DETECT | \
 104                                        MSGF_MISC_SR_NON_FATAL_DEV | \
 105                                        MSGF_MISC_SR_FATAL_DEV | \
 106                                        MSGF_MISC_SR_LINK_DOWN | \
 107                                        MSGF_MSIC_SR_LINK_AUTO_BWIDTH | \
 108                                        MSGF_MSIC_SR_LINK_BWIDTH)
 109
 110/* Legacy interrupt status mask bits */
 111#define MSGF_LEG_SR_INTA                BIT(0)
 112#define MSGF_LEG_SR_INTB                BIT(1)
 113#define MSGF_LEG_SR_INTC                BIT(2)
 114#define MSGF_LEG_SR_INTD                BIT(3)
 115#define MSGF_LEG_SR_MASKALL             (MSGF_LEG_SR_INTA | MSGF_LEG_SR_INTB | \
 116                                        MSGF_LEG_SR_INTC | MSGF_LEG_SR_INTD)
 117
 118/* MSI interrupt status mask bits */
 119#define MSGF_MSI_SR_LO_MASK             GENMASK(31, 0)
 120#define MSGF_MSI_SR_HI_MASK             GENMASK(31, 0)
 121
 122#define MSII_PRESENT                    BIT(0)
 123#define MSII_ENABLE                     BIT(0)
 124#define MSII_STATUS_ENABLE              BIT(15)
 125
 126/* Bridge config interrupt mask */
 127#define BRCFG_INTERRUPT_MASK            BIT(0)
 128#define BREG_PRESENT                    BIT(0)
 129#define BREG_ENABLE                     BIT(0)
 130#define BREG_ENABLE_FORCE               BIT(1)
 131
 132/* E_ECAM status mask bits */
 133#define E_ECAM_PRESENT                  BIT(0)
 134#define E_ECAM_CR_ENABLE                BIT(0)
 135#define E_ECAM_SIZE_LOC                 GENMASK(20, 16)
 136#define E_ECAM_SIZE_SHIFT               16
 137#define ECAM_BUS_LOC_SHIFT              20
 138#define ECAM_DEV_LOC_SHIFT              12
 139#define NWL_ECAM_VALUE_DEFAULT          12
 140
 141#define CFG_DMA_REG_BAR                 GENMASK(2, 0)
 142
 143#define INT_PCI_MSI_NR                  (2 * 32)
 144
 145/* Readin the PS_LINKUP */
 146#define PS_LINKUP_OFFSET                0x00000238
 147#define PCIE_PHY_LINKUP_BIT             BIT(0)
 148#define PHY_RDY_LINKUP_BIT              BIT(1)
 149
 150/* Parameters for the waiting for link up routine */
 151#define LINK_WAIT_MAX_RETRIES          10
 152#define LINK_WAIT_USLEEP_MIN           90000
 153#define LINK_WAIT_USLEEP_MAX           100000
 154
 155struct nwl_msi {                        /* MSI information */
 156        struct irq_domain *msi_domain;
 157        unsigned long *bitmap;
 158        struct irq_domain *dev_domain;
 159        struct mutex lock;              /* protect bitmap variable */
 160        int irq_msi0;
 161        int irq_msi1;
 162};
 163
 164struct nwl_pcie {
 165        struct device *dev;
 166        void __iomem *breg_base;
 167        void __iomem *pcireg_base;
 168        void __iomem *ecam_base;
 169        phys_addr_t phys_breg_base;     /* Physical Bridge Register Base */
 170        phys_addr_t phys_pcie_reg_base; /* Physical PCIe Controller Base */
 171        phys_addr_t phys_ecam_base;     /* Physical Configuration Base */
 172        u32 breg_size;
 173        u32 pcie_reg_size;
 174        u32 ecam_size;
 175        int irq_intx;
 176        int irq_misc;
 177        u32 ecam_value;
 178        u8 last_busno;
 179        u8 root_busno;
 180        struct nwl_msi msi;
 181        struct irq_domain *legacy_irq_domain;
 182        struct clk *clk;
 183        raw_spinlock_t leg_mask_lock;
 184};
 185
 186static inline u32 nwl_bridge_readl(struct nwl_pcie *pcie, u32 off)
 187{
 188        return readl(pcie->breg_base + off);
 189}
 190
 191static inline void nwl_bridge_writel(struct nwl_pcie *pcie, u32 val, u32 off)
 192{
 193        writel(val, pcie->breg_base + off);
 194}
 195
 196static bool nwl_pcie_link_up(struct nwl_pcie *pcie)
 197{
 198        if (readl(pcie->pcireg_base + PS_LINKUP_OFFSET) & PCIE_PHY_LINKUP_BIT)
 199                return true;
 200        return false;
 201}
 202
 203static bool nwl_phy_link_up(struct nwl_pcie *pcie)
 204{
 205        if (readl(pcie->pcireg_base + PS_LINKUP_OFFSET) & PHY_RDY_LINKUP_BIT)
 206                return true;
 207        return false;
 208}
 209
 210static int nwl_wait_for_link(struct nwl_pcie *pcie)
 211{
 212        struct device *dev = pcie->dev;
 213        int retries;
 214
 215        /* check if the link is up or not */
 216        for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
 217                if (nwl_phy_link_up(pcie))
 218                        return 0;
 219                usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
 220        }
 221
 222        dev_err(dev, "PHY link never came up\n");
 223        return -ETIMEDOUT;
 224}
 225
 226static bool nwl_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
 227{
 228        struct nwl_pcie *pcie = bus->sysdata;
 229
 230        /* Check link before accessing downstream ports */
 231        if (bus->number != pcie->root_busno) {
 232                if (!nwl_pcie_link_up(pcie))
 233                        return false;
 234        }
 235
 236        /* Only one device down on each root port */
 237        if (bus->number == pcie->root_busno && devfn > 0)
 238                return false;
 239
 240        return true;
 241}
 242
 243/**
 244 * nwl_pcie_map_bus - Get configuration base
 245 *
 246 * @bus: Bus structure of current bus
 247 * @devfn: Device/function
 248 * @where: Offset from base
 249 *
 250 * Return: Base address of the configuration space needed to be
 251 *         accessed.
 252 */
 253static void __iomem *nwl_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
 254                                      int where)
 255{
 256        struct nwl_pcie *pcie = bus->sysdata;
 257        int relbus;
 258
 259        if (!nwl_pcie_valid_device(bus, devfn))
 260                return NULL;
 261
 262        relbus = (bus->number << ECAM_BUS_LOC_SHIFT) |
 263                        (devfn << ECAM_DEV_LOC_SHIFT);
 264
 265        return pcie->ecam_base + relbus + where;
 266}
 267
 268/* PCIe operations */
 269static struct pci_ops nwl_pcie_ops = {
 270        .map_bus = nwl_pcie_map_bus,
 271        .read  = pci_generic_config_read,
 272        .write = pci_generic_config_write,
 273};
 274
 275static irqreturn_t nwl_pcie_misc_handler(int irq, void *data)
 276{
 277        struct nwl_pcie *pcie = data;
 278        struct device *dev = pcie->dev;
 279        u32 misc_stat;
 280
 281        /* Checking for misc interrupts */
 282        misc_stat = nwl_bridge_readl(pcie, MSGF_MISC_STATUS) &
 283                                     MSGF_MISC_SR_MASKALL;
 284        if (!misc_stat)
 285                return IRQ_NONE;
 286
 287        if (misc_stat & MSGF_MISC_SR_RXMSG_OVER)
 288                dev_err(dev, "Received Message FIFO Overflow\n");
 289
 290        if (misc_stat & MSGF_MISC_SR_SLAVE_ERR)
 291                dev_err(dev, "Slave error\n");
 292
 293        if (misc_stat & MSGF_MISC_SR_MASTER_ERR)
 294                dev_err(dev, "Master error\n");
 295
 296        if (misc_stat & MSGF_MISC_SR_I_ADDR_ERR)
 297                dev_err(dev, "In Misc Ingress address translation error\n");
 298
 299        if (misc_stat & MSGF_MISC_SR_E_ADDR_ERR)
 300                dev_err(dev, "In Misc Egress address translation error\n");
 301
 302        if (misc_stat & MSGF_MISC_SR_FATAL_AER)
 303                dev_err(dev, "Fatal Error in AER Capability\n");
 304
 305        if (misc_stat & MSGF_MISC_SR_NON_FATAL_AER)
 306                dev_err(dev, "Non-Fatal Error in AER Capability\n");
 307
 308        if (misc_stat & MSGF_MISC_SR_CORR_AER)
 309                dev_err(dev, "Correctable Error in AER Capability\n");
 310
 311        if (misc_stat & MSGF_MISC_SR_UR_DETECT)
 312                dev_err(dev, "Unsupported request Detected\n");
 313
 314        if (misc_stat & MSGF_MISC_SR_NON_FATAL_DEV)
 315                dev_err(dev, "Non-Fatal Error Detected\n");
 316
 317        if (misc_stat & MSGF_MISC_SR_FATAL_DEV)
 318                dev_err(dev, "Fatal Error Detected\n");
 319
 320        if (misc_stat & MSGF_MSIC_SR_LINK_AUTO_BWIDTH)
 321                dev_info(dev, "Link Autonomous Bandwidth Management Status bit set\n");
 322
 323        if (misc_stat & MSGF_MSIC_SR_LINK_BWIDTH)
 324                dev_info(dev, "Link Bandwidth Management Status bit set\n");
 325
 326        /* Clear misc interrupt status */
 327        nwl_bridge_writel(pcie, misc_stat, MSGF_MISC_STATUS);
 328
 329        return IRQ_HANDLED;
 330}
 331
 332static void nwl_pcie_leg_handler(struct irq_desc *desc)
 333{
 334        struct irq_chip *chip = irq_desc_get_chip(desc);
 335        struct nwl_pcie *pcie;
 336        unsigned long status;
 337        u32 bit;
 338        u32 virq;
 339
 340        chained_irq_enter(chip, desc);
 341        pcie = irq_desc_get_handler_data(desc);
 342
 343        while ((status = nwl_bridge_readl(pcie, MSGF_LEG_STATUS) &
 344                                MSGF_LEG_SR_MASKALL) != 0) {
 345                for_each_set_bit(bit, &status, PCI_NUM_INTX) {
 346                        virq = irq_find_mapping(pcie->legacy_irq_domain, bit);
 347                        if (virq)
 348                                generic_handle_irq(virq);
 349                }
 350        }
 351
 352        chained_irq_exit(chip, desc);
 353}
 354
 355static void nwl_pcie_handle_msi_irq(struct nwl_pcie *pcie, u32 status_reg)
 356{
 357        struct nwl_msi *msi;
 358        unsigned long status;
 359        u32 bit;
 360        u32 virq;
 361
 362        msi = &pcie->msi;
 363
 364        while ((status = nwl_bridge_readl(pcie, status_reg)) != 0) {
 365                for_each_set_bit(bit, &status, 32) {
 366                        nwl_bridge_writel(pcie, 1 << bit, status_reg);
 367                        virq = irq_find_mapping(msi->dev_domain, bit);
 368                        if (virq)
 369                                generic_handle_irq(virq);
 370                }
 371        }
 372}
 373
 374static void nwl_pcie_msi_handler_high(struct irq_desc *desc)
 375{
 376        struct irq_chip *chip = irq_desc_get_chip(desc);
 377        struct nwl_pcie *pcie = irq_desc_get_handler_data(desc);
 378
 379        chained_irq_enter(chip, desc);
 380        nwl_pcie_handle_msi_irq(pcie, MSGF_MSI_STATUS_HI);
 381        chained_irq_exit(chip, desc);
 382}
 383
 384static void nwl_pcie_msi_handler_low(struct irq_desc *desc)
 385{
 386        struct irq_chip *chip = irq_desc_get_chip(desc);
 387        struct nwl_pcie *pcie = irq_desc_get_handler_data(desc);
 388
 389        chained_irq_enter(chip, desc);
 390        nwl_pcie_handle_msi_irq(pcie, MSGF_MSI_STATUS_LO);
 391        chained_irq_exit(chip, desc);
 392}
 393
 394static void nwl_mask_leg_irq(struct irq_data *data)
 395{
 396        struct irq_desc *desc = irq_to_desc(data->irq);
 397        struct nwl_pcie *pcie;
 398        unsigned long flags;
 399        u32 mask;
 400        u32 val;
 401
 402        pcie = irq_desc_get_chip_data(desc);
 403        mask = 1 << (data->hwirq - 1);
 404        raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags);
 405        val = nwl_bridge_readl(pcie, MSGF_LEG_MASK);
 406        nwl_bridge_writel(pcie, (val & (~mask)), MSGF_LEG_MASK);
 407        raw_spin_unlock_irqrestore(&pcie->leg_mask_lock, flags);
 408}
 409
 410static void nwl_unmask_leg_irq(struct irq_data *data)
 411{
 412        struct irq_desc *desc = irq_to_desc(data->irq);
 413        struct nwl_pcie *pcie;
 414        unsigned long flags;
 415        u32 mask;
 416        u32 val;
 417
 418        pcie = irq_desc_get_chip_data(desc);
 419        mask = 1 << (data->hwirq - 1);
 420        raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags);
 421        val = nwl_bridge_readl(pcie, MSGF_LEG_MASK);
 422        nwl_bridge_writel(pcie, (val | mask), MSGF_LEG_MASK);
 423        raw_spin_unlock_irqrestore(&pcie->leg_mask_lock, flags);
 424}
 425
 426static struct irq_chip nwl_leg_irq_chip = {
 427        .name = "nwl_pcie:legacy",
 428        .irq_enable = nwl_unmask_leg_irq,
 429        .irq_disable = nwl_mask_leg_irq,
 430        .irq_mask = nwl_mask_leg_irq,
 431        .irq_unmask = nwl_unmask_leg_irq,
 432};
 433
 434static int nwl_legacy_map(struct irq_domain *domain, unsigned int irq,
 435                          irq_hw_number_t hwirq)
 436{
 437        irq_set_chip_and_handler(irq, &nwl_leg_irq_chip, handle_level_irq);
 438        irq_set_chip_data(irq, domain->host_data);
 439        irq_set_status_flags(irq, IRQ_LEVEL);
 440
 441        return 0;
 442}
 443
 444static const struct irq_domain_ops legacy_domain_ops = {
 445        .map = nwl_legacy_map,
 446        .xlate = pci_irqd_intx_xlate,
 447};
 448
 449#ifdef CONFIG_PCI_MSI
 450static struct irq_chip nwl_msi_irq_chip = {
 451        .name = "nwl_pcie:msi",
 452        .irq_enable = unmask_msi_irq,
 453        .irq_disable = mask_msi_irq,
 454        .irq_mask = mask_msi_irq,
 455        .irq_unmask = unmask_msi_irq,
 456
 457};
 458
 459static struct msi_domain_info nwl_msi_domain_info = {
 460        .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
 461                  MSI_FLAG_MULTI_PCI_MSI),
 462        .chip = &nwl_msi_irq_chip,
 463};
 464#endif
 465
 466static void nwl_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
 467{
 468        struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data);
 469        phys_addr_t msi_addr = pcie->phys_pcie_reg_base;
 470
 471        msg->address_lo = lower_32_bits(msi_addr);
 472        msg->address_hi = upper_32_bits(msi_addr);
 473        msg->data = data->hwirq;
 474}
 475
 476static int nwl_msi_set_affinity(struct irq_data *irq_data,
 477                                const struct cpumask *mask, bool force)
 478{
 479        return -EINVAL;
 480}
 481
 482static struct irq_chip nwl_irq_chip = {
 483        .name = "Xilinx MSI",
 484        .irq_compose_msi_msg = nwl_compose_msi_msg,
 485        .irq_set_affinity = nwl_msi_set_affinity,
 486};
 487
 488static int nwl_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
 489                                unsigned int nr_irqs, void *args)
 490{
 491        struct nwl_pcie *pcie = domain->host_data;
 492        struct nwl_msi *msi = &pcie->msi;
 493        int bit;
 494        int i;
 495
 496        mutex_lock(&msi->lock);
 497        bit = bitmap_find_next_zero_area(msi->bitmap, INT_PCI_MSI_NR, 0,
 498                                         nr_irqs, 0);
 499        if (bit >= INT_PCI_MSI_NR) {
 500                mutex_unlock(&msi->lock);
 501                return -ENOSPC;
 502        }
 503
 504        bitmap_set(msi->bitmap, bit, nr_irqs);
 505
 506        for (i = 0; i < nr_irqs; i++) {
 507                irq_domain_set_info(domain, virq + i, bit + i, &nwl_irq_chip,
 508                                    domain->host_data, handle_simple_irq,
 509                                NULL, NULL);
 510        }
 511        mutex_unlock(&msi->lock);
 512        return 0;
 513}
 514
 515static void nwl_irq_domain_free(struct irq_domain *domain, unsigned int virq,
 516                                unsigned int nr_irqs)
 517{
 518        struct irq_data *data = irq_domain_get_irq_data(domain, virq);
 519        struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data);
 520        struct nwl_msi *msi = &pcie->msi;
 521
 522        mutex_lock(&msi->lock);
 523        bitmap_clear(msi->bitmap, data->hwirq, nr_irqs);
 524        mutex_unlock(&msi->lock);
 525}
 526
 527static const struct irq_domain_ops dev_msi_domain_ops = {
 528        .alloc  = nwl_irq_domain_alloc,
 529        .free   = nwl_irq_domain_free,
 530};
 531
 532static int nwl_pcie_init_msi_irq_domain(struct nwl_pcie *pcie)
 533{
 534#ifdef CONFIG_PCI_MSI
 535        struct device *dev = pcie->dev;
 536        struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
 537        struct nwl_msi *msi = &pcie->msi;
 538
 539        msi->dev_domain = irq_domain_add_linear(NULL, INT_PCI_MSI_NR,
 540                                                &dev_msi_domain_ops, pcie);
 541        if (!msi->dev_domain) {
 542                dev_err(dev, "failed to create dev IRQ domain\n");
 543                return -ENOMEM;
 544        }
 545        msi->msi_domain = pci_msi_create_irq_domain(fwnode,
 546                                                    &nwl_msi_domain_info,
 547                                                    msi->dev_domain);
 548        if (!msi->msi_domain) {
 549                dev_err(dev, "failed to create msi IRQ domain\n");
 550                irq_domain_remove(msi->dev_domain);
 551                return -ENOMEM;
 552        }
 553#endif
 554        return 0;
 555}
 556
 557static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie)
 558{
 559        struct device *dev = pcie->dev;
 560        struct device_node *node = dev->of_node;
 561        struct device_node *legacy_intc_node;
 562
 563        legacy_intc_node = of_get_next_child(node, NULL);
 564        if (!legacy_intc_node) {
 565                dev_err(dev, "No legacy intc node found\n");
 566                return -EINVAL;
 567        }
 568
 569        pcie->legacy_irq_domain = irq_domain_add_linear(legacy_intc_node,
 570                                                        PCI_NUM_INTX,
 571                                                        &legacy_domain_ops,
 572                                                        pcie);
 573        of_node_put(legacy_intc_node);
 574        if (!pcie->legacy_irq_domain) {
 575                dev_err(dev, "failed to create IRQ domain\n");
 576                return -ENOMEM;
 577        }
 578
 579        raw_spin_lock_init(&pcie->leg_mask_lock);
 580        nwl_pcie_init_msi_irq_domain(pcie);
 581        return 0;
 582}
 583
 584static int nwl_pcie_enable_msi(struct nwl_pcie *pcie)
 585{
 586        struct device *dev = pcie->dev;
 587        struct platform_device *pdev = to_platform_device(dev);
 588        struct nwl_msi *msi = &pcie->msi;
 589        unsigned long base;
 590        int ret;
 591        int size = BITS_TO_LONGS(INT_PCI_MSI_NR) * sizeof(long);
 592
 593        mutex_init(&msi->lock);
 594
 595        msi->bitmap = kzalloc(size, GFP_KERNEL);
 596        if (!msi->bitmap)
 597                return -ENOMEM;
 598
 599        /* Get msi_1 IRQ number */
 600        msi->irq_msi1 = platform_get_irq_byname(pdev, "msi1");
 601        if (msi->irq_msi1 < 0) {
 602                dev_err(dev, "failed to get IRQ#%d\n", msi->irq_msi1);
 603                ret = -EINVAL;
 604                goto err;
 605        }
 606
 607        irq_set_chained_handler_and_data(msi->irq_msi1,
 608                                         nwl_pcie_msi_handler_high, pcie);
 609
 610        /* Get msi_0 IRQ number */
 611        msi->irq_msi0 = platform_get_irq_byname(pdev, "msi0");
 612        if (msi->irq_msi0 < 0) {
 613                dev_err(dev, "failed to get IRQ#%d\n", msi->irq_msi0);
 614                ret = -EINVAL;
 615                goto err;
 616        }
 617
 618        irq_set_chained_handler_and_data(msi->irq_msi0,
 619                                         nwl_pcie_msi_handler_low, pcie);
 620
 621        /* Check for msii_present bit */
 622        ret = nwl_bridge_readl(pcie, I_MSII_CAPABILITIES) & MSII_PRESENT;
 623        if (!ret) {
 624                dev_err(dev, "MSI not present\n");
 625                ret = -EIO;
 626                goto err;
 627        }
 628
 629        /* Enable MSII */
 630        nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, I_MSII_CONTROL) |
 631                          MSII_ENABLE, I_MSII_CONTROL);
 632
 633        /* Enable MSII status */
 634        nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, I_MSII_CONTROL) |
 635                          MSII_STATUS_ENABLE, I_MSII_CONTROL);
 636
 637        /* setup AFI/FPCI range */
 638        base = pcie->phys_pcie_reg_base;
 639        nwl_bridge_writel(pcie, lower_32_bits(base), I_MSII_BASE_LO);
 640        nwl_bridge_writel(pcie, upper_32_bits(base), I_MSII_BASE_HI);
 641
 642        /*
 643         * For high range MSI interrupts: disable, clear any pending,
 644         * and enable
 645         */
 646        nwl_bridge_writel(pcie, 0, MSGF_MSI_MASK_HI);
 647
 648        nwl_bridge_writel(pcie, nwl_bridge_readl(pcie,  MSGF_MSI_STATUS_HI) &
 649                          MSGF_MSI_SR_HI_MASK, MSGF_MSI_STATUS_HI);
 650
 651        nwl_bridge_writel(pcie, MSGF_MSI_SR_HI_MASK, MSGF_MSI_MASK_HI);
 652
 653        /*
 654         * For low range MSI interrupts: disable, clear any pending,
 655         * and enable
 656         */
 657        nwl_bridge_writel(pcie, 0, MSGF_MSI_MASK_LO);
 658
 659        nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_MSI_STATUS_LO) &
 660                          MSGF_MSI_SR_LO_MASK, MSGF_MSI_STATUS_LO);
 661
 662        nwl_bridge_writel(pcie, MSGF_MSI_SR_LO_MASK, MSGF_MSI_MASK_LO);
 663
 664        return 0;
 665err:
 666        kfree(msi->bitmap);
 667        msi->bitmap = NULL;
 668        return ret;
 669}
 670
 671static int nwl_pcie_bridge_init(struct nwl_pcie *pcie)
 672{
 673        struct device *dev = pcie->dev;
 674        struct platform_device *pdev = to_platform_device(dev);
 675        u32 breg_val, ecam_val, first_busno = 0;
 676        int err;
 677
 678        breg_val = nwl_bridge_readl(pcie, E_BREG_CAPABILITIES) & BREG_PRESENT;
 679        if (!breg_val) {
 680                dev_err(dev, "BREG is not present\n");
 681                return breg_val;
 682        }
 683
 684        /* Write bridge_off to breg base */
 685        nwl_bridge_writel(pcie, lower_32_bits(pcie->phys_breg_base),
 686                          E_BREG_BASE_LO);
 687        nwl_bridge_writel(pcie, upper_32_bits(pcie->phys_breg_base),
 688                          E_BREG_BASE_HI);
 689
 690        /* Enable BREG */
 691        nwl_bridge_writel(pcie, ~BREG_ENABLE_FORCE & BREG_ENABLE,
 692                          E_BREG_CONTROL);
 693
 694        /* Disable DMA channel registers */
 695        nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, BRCFG_PCIE_RX0) |
 696                          CFG_DMA_REG_BAR, BRCFG_PCIE_RX0);
 697
 698        /* Enable Ingress subtractive decode translation */
 699        nwl_bridge_writel(pcie, SET_ISUB_CONTROL, I_ISUB_CONTROL);
 700
 701        /* Enable msg filtering details */
 702        nwl_bridge_writel(pcie, CFG_ENABLE_MSG_FILTER_MASK,
 703                          BRCFG_PCIE_RX_MSG_FILTER);
 704
 705        err = nwl_wait_for_link(pcie);
 706        if (err)
 707                return err;
 708
 709        ecam_val = nwl_bridge_readl(pcie, E_ECAM_CAPABILITIES) & E_ECAM_PRESENT;
 710        if (!ecam_val) {
 711                dev_err(dev, "ECAM is not present\n");
 712                return ecam_val;
 713        }
 714
 715        /* Enable ECAM */
 716        nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, E_ECAM_CONTROL) |
 717                          E_ECAM_CR_ENABLE, E_ECAM_CONTROL);
 718
 719        nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, E_ECAM_CONTROL) |
 720                          (pcie->ecam_value << E_ECAM_SIZE_SHIFT),
 721                          E_ECAM_CONTROL);
 722
 723        nwl_bridge_writel(pcie, lower_32_bits(pcie->phys_ecam_base),
 724                          E_ECAM_BASE_LO);
 725        nwl_bridge_writel(pcie, upper_32_bits(pcie->phys_ecam_base),
 726                          E_ECAM_BASE_HI);
 727
 728        /* Get bus range */
 729        ecam_val = nwl_bridge_readl(pcie, E_ECAM_CONTROL);
 730        pcie->last_busno = (ecam_val & E_ECAM_SIZE_LOC) >> E_ECAM_SIZE_SHIFT;
 731        /* Write primary, secondary and subordinate bus numbers */
 732        ecam_val = first_busno;
 733        ecam_val |= (first_busno + 1) << 8;
 734        ecam_val |= (pcie->last_busno << E_ECAM_SIZE_SHIFT);
 735        writel(ecam_val, (pcie->ecam_base + PCI_PRIMARY_BUS));
 736
 737        if (nwl_pcie_link_up(pcie))
 738                dev_info(dev, "Link is UP\n");
 739        else
 740                dev_info(dev, "Link is DOWN\n");
 741
 742        /* Get misc IRQ number */
 743        pcie->irq_misc = platform_get_irq_byname(pdev, "misc");
 744        if (pcie->irq_misc < 0) {
 745                dev_err(dev, "failed to get misc IRQ %d\n",
 746                        pcie->irq_misc);
 747                return -EINVAL;
 748        }
 749
 750        err = devm_request_irq(dev, pcie->irq_misc,
 751                               nwl_pcie_misc_handler, IRQF_SHARED,
 752                               "nwl_pcie:misc", pcie);
 753        if (err) {
 754                dev_err(dev, "fail to register misc IRQ#%d\n",
 755                        pcie->irq_misc);
 756                return err;
 757        }
 758
 759        /* Disable all misc interrupts */
 760        nwl_bridge_writel(pcie, (u32)~MSGF_MISC_SR_MASKALL, MSGF_MISC_MASK);
 761
 762        /* Clear pending misc interrupts */
 763        nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_MISC_STATUS) &
 764                          MSGF_MISC_SR_MASKALL, MSGF_MISC_STATUS);
 765
 766        /* Enable all misc interrupts */
 767        nwl_bridge_writel(pcie, MSGF_MISC_SR_MASKALL, MSGF_MISC_MASK);
 768
 769        /* Disable all legacy interrupts */
 770        nwl_bridge_writel(pcie, (u32)~MSGF_LEG_SR_MASKALL, MSGF_LEG_MASK);
 771
 772        /* Clear pending legacy interrupts */
 773        nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_LEG_STATUS) &
 774                          MSGF_LEG_SR_MASKALL, MSGF_LEG_STATUS);
 775
 776        /* Enabling DREG translations */
 777        nwl_bridge_writel(pcie, DREG_DMA_EN, E_DREG_CTRL);
 778        nwl_bridge_writel(pcie, DREG_DMA_BASE_LO, E_DREG_BASE_LO);
 779        /* Enabling Root DMA interrupts */
 780        nwl_bridge_writel(pcie, MSGF_INTR_EN, MSGF_DMA_MASK);
 781
 782        /* Enable all legacy interrupts */
 783        nwl_bridge_writel(pcie, MSGF_LEG_SR_MASKALL, MSGF_LEG_MASK);
 784
 785        /* Enable the bridge config interrupt */
 786        nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, BRCFG_INTERRUPT) |
 787                          BRCFG_INTERRUPT_MASK, BRCFG_INTERRUPT);
 788
 789        return 0;
 790}
 791
 792static int nwl_pcie_parse_dt(struct nwl_pcie *pcie,
 793                             struct platform_device *pdev)
 794{
 795        struct device *dev = pcie->dev;
 796        struct device_node *node = dev->of_node;
 797        struct resource *res;
 798        const char *type;
 799
 800        /* Check for device type */
 801        type = of_get_property(node, "device_type", NULL);
 802        if (!type || strcmp(type, "pci")) {
 803                dev_err(dev, "invalid \"device_type\" %s\n", type);
 804                return -EINVAL;
 805        }
 806
 807        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "breg");
 808        pcie->breg_base = devm_ioremap_resource(dev, res);
 809        if (IS_ERR(pcie->breg_base))
 810                return PTR_ERR(pcie->breg_base);
 811        pcie->phys_breg_base = res->start;
 812
 813        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcireg");
 814        pcie->pcireg_base = devm_ioremap_resource(dev, res);
 815        if (IS_ERR(pcie->pcireg_base))
 816                return PTR_ERR(pcie->pcireg_base);
 817        pcie->phys_pcie_reg_base = res->start;
 818
 819        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
 820        pcie->ecam_base = devm_pci_remap_cfg_resource(dev, res);
 821        if (IS_ERR(pcie->ecam_base))
 822                return PTR_ERR(pcie->ecam_base);
 823        pcie->phys_ecam_base = res->start;
 824
 825        /* Get intx IRQ number */
 826        pcie->irq_intx = platform_get_irq_byname(pdev, "intx");
 827        if (pcie->irq_intx < 0) {
 828                dev_err(dev, "failed to get intx IRQ %d\n", pcie->irq_intx);
 829                return pcie->irq_intx;
 830        }
 831
 832        irq_set_chained_handler_and_data(pcie->irq_intx,
 833                                         nwl_pcie_leg_handler, pcie);
 834
 835        return 0;
 836}
 837
 838static const struct of_device_id nwl_pcie_of_match[] = {
 839        { .compatible = "xlnx,nwl-pcie-2.11", },
 840        {}
 841};
 842
 843static int nwl_pcie_probe(struct platform_device *pdev)
 844{
 845        struct device *dev = &pdev->dev;
 846        struct nwl_pcie *pcie;
 847        struct pci_bus *bus;
 848        struct pci_bus *child;
 849        struct pci_host_bridge *bridge;
 850        int err;
 851        resource_size_t iobase = 0;
 852        LIST_HEAD(res);
 853
 854        bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
 855        if (!bridge)
 856                return -ENODEV;
 857
 858        pcie = pci_host_bridge_priv(bridge);
 859
 860        pcie->dev = dev;
 861        pcie->ecam_value = NWL_ECAM_VALUE_DEFAULT;
 862
 863        err = nwl_pcie_parse_dt(pcie, pdev);
 864        if (err) {
 865                dev_err(dev, "Parsing DT failed\n");
 866                return err;
 867        }
 868
 869        pcie->clk = devm_clk_get(dev, NULL);
 870        if (IS_ERR(pcie->clk))
 871                return PTR_ERR(pcie->clk);
 872        clk_prepare_enable(pcie->clk);
 873
 874        err = nwl_pcie_bridge_init(pcie);
 875        if (err) {
 876                dev_err(dev, "HW Initialization failed\n");
 877                return err;
 878        }
 879
 880        err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res,
 881                                                    &iobase);
 882        if (err) {
 883                dev_err(dev, "Getting bridge resources failed\n");
 884                return err;
 885        }
 886
 887        err = devm_request_pci_bus_resources(dev, &res);
 888        if (err)
 889                goto error;
 890
 891        err = nwl_pcie_init_irq_domain(pcie);
 892        if (err) {
 893                dev_err(dev, "Failed creating IRQ Domain\n");
 894                goto error;
 895        }
 896
 897        list_splice_init(&res, &bridge->windows);
 898        bridge->dev.parent = dev;
 899        bridge->sysdata = pcie;
 900        bridge->busnr = pcie->root_busno;
 901        bridge->ops = &nwl_pcie_ops;
 902        bridge->map_irq = of_irq_parse_and_map_pci;
 903        bridge->swizzle_irq = pci_common_swizzle;
 904
 905        if (IS_ENABLED(CONFIG_PCI_MSI)) {
 906                err = nwl_pcie_enable_msi(pcie);
 907                if (err < 0) {
 908                        dev_err(dev, "failed to enable MSI support: %d\n", err);
 909                        goto error;
 910                }
 911        }
 912
 913        err = pci_scan_root_bus_bridge(bridge);
 914        if (err)
 915                goto error;
 916
 917        bus = bridge->bus;
 918
 919        pci_assign_unassigned_bus_resources(bus);
 920        list_for_each_entry(child, &bus->children, node)
 921                pcie_bus_configure_settings(child);
 922        pci_bus_add_devices(bus);
 923        return 0;
 924
 925error:
 926        pci_free_resource_list(&res);
 927        return err;
 928}
 929
 930static struct platform_driver nwl_pcie_driver = {
 931        .driver = {
 932                .name = "nwl-pcie",
 933                .suppress_bind_attrs = true,
 934                .of_match_table = nwl_pcie_of_match,
 935        },
 936        .probe = nwl_pcie_probe,
 937};
 938builtin_platform_driver(nwl_pcie_driver);
 939