linux/drivers/pci/host/pcie-xilinx-nwl.c
<<
>>
Prefs
   1/*
   2 * PCIe host controller driver for NWL PCIe Bridge
   3 * Based on pcie-xilinx.c, pci-tegra.c
   4 *
   5 * (C) Copyright 2014 - 2015, Xilinx, Inc.
   6 *
   7 * This program is free software: you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License as published by
   9 * the Free Software Foundation, either version 2 of the License, or
  10 * (at your option) any later version.
  11 */
  12
  13#include <linux/clk.h>
  14#include <linux/delay.h>
  15#include <linux/interrupt.h>
  16#include <linux/irq.h>
  17#include <linux/irqdomain.h>
  18#include <linux/kernel.h>
  19#include <linux/init.h>
  20#include <linux/msi.h>
  21#include <linux/of_address.h>
  22#include <linux/of_pci.h>
  23#include <linux/of_platform.h>
  24#include <linux/of_irq.h>
  25#include <linux/pci.h>
  26#include <linux/platform_device.h>
  27#include <linux/irqchip/chained_irq.h>
  28
  29/* Bridge core config registers */
  30#define BRCFG_PCIE_RX0                  0x00000000
  31#define BRCFG_INTERRUPT                 0x00000010
  32#define BRCFG_PCIE_RX_MSG_FILTER        0x00000020
  33
  34/* Egress - Bridge translation registers */
  35#define E_BREG_CAPABILITIES             0x00000200
  36#define E_BREG_CONTROL                  0x00000208
  37#define E_BREG_BASE_LO                  0x00000210
  38#define E_BREG_BASE_HI                  0x00000214
  39#define E_ECAM_CAPABILITIES             0x00000220
  40#define E_ECAM_CONTROL                  0x00000228
  41#define E_ECAM_BASE_LO                  0x00000230
  42#define E_ECAM_BASE_HI                  0x00000234
  43
  44/* Ingress - address translations */
  45#define I_MSII_CAPABILITIES             0x00000300
  46#define I_MSII_CONTROL                  0x00000308
  47#define I_MSII_BASE_LO                  0x00000310
  48#define I_MSII_BASE_HI                  0x00000314
  49
  50#define I_ISUB_CONTROL                  0x000003E8
  51#define SET_ISUB_CONTROL                BIT(0)
  52/* Rxed msg fifo  - Interrupt status registers */
  53#define MSGF_MISC_STATUS                0x00000400
  54#define MSGF_MISC_MASK                  0x00000404
  55#define MSGF_LEG_STATUS                 0x00000420
  56#define MSGF_LEG_MASK                   0x00000424
  57#define MSGF_MSI_STATUS_LO              0x00000440
  58#define MSGF_MSI_STATUS_HI              0x00000444
  59#define MSGF_MSI_MASK_LO                0x00000448
  60#define MSGF_MSI_MASK_HI                0x0000044C
  61
  62/* Msg filter mask bits */
  63#define CFG_ENABLE_PM_MSG_FWD           BIT(1)
  64#define CFG_ENABLE_INT_MSG_FWD          BIT(2)
  65#define CFG_ENABLE_ERR_MSG_FWD          BIT(3)
  66#define CFG_ENABLE_MSG_FILTER_MASK      (CFG_ENABLE_PM_MSG_FWD | \
  67                                        CFG_ENABLE_INT_MSG_FWD | \
  68                                        CFG_ENABLE_ERR_MSG_FWD)
  69
  70/* Misc interrupt status mask bits */
  71#define MSGF_MISC_SR_RXMSG_AVAIL        BIT(0)
  72#define MSGF_MISC_SR_RXMSG_OVER         BIT(1)
  73#define MSGF_MISC_SR_SLAVE_ERR          BIT(4)
  74#define MSGF_MISC_SR_MASTER_ERR         BIT(5)
  75#define MSGF_MISC_SR_I_ADDR_ERR         BIT(6)
  76#define MSGF_MISC_SR_E_ADDR_ERR         BIT(7)
  77#define MSGF_MISC_SR_FATAL_AER          BIT(16)
  78#define MSGF_MISC_SR_NON_FATAL_AER      BIT(17)
  79#define MSGF_MISC_SR_CORR_AER           BIT(18)
  80#define MSGF_MISC_SR_UR_DETECT          BIT(20)
  81#define MSGF_MISC_SR_NON_FATAL_DEV      BIT(22)
  82#define MSGF_MISC_SR_FATAL_DEV          BIT(23)
  83#define MSGF_MISC_SR_LINK_DOWN          BIT(24)
  84#define MSGF_MSIC_SR_LINK_AUTO_BWIDTH   BIT(25)
  85#define MSGF_MSIC_SR_LINK_BWIDTH        BIT(26)
  86
  87#define MSGF_MISC_SR_MASKALL            (MSGF_MISC_SR_RXMSG_AVAIL | \
  88                                        MSGF_MISC_SR_RXMSG_OVER | \
  89                                        MSGF_MISC_SR_SLAVE_ERR | \
  90                                        MSGF_MISC_SR_MASTER_ERR | \
  91                                        MSGF_MISC_SR_I_ADDR_ERR | \
  92                                        MSGF_MISC_SR_E_ADDR_ERR | \
  93                                        MSGF_MISC_SR_FATAL_AER | \
  94                                        MSGF_MISC_SR_NON_FATAL_AER | \
  95                                        MSGF_MISC_SR_CORR_AER | \
  96                                        MSGF_MISC_SR_UR_DETECT | \
  97                                        MSGF_MISC_SR_NON_FATAL_DEV | \
  98                                        MSGF_MISC_SR_FATAL_DEV | \
  99                                        MSGF_MISC_SR_LINK_DOWN | \
 100                                        MSGF_MSIC_SR_LINK_AUTO_BWIDTH | \
 101                                        MSGF_MSIC_SR_LINK_BWIDTH)
 102
 103/* Legacy interrupt status mask bits */
 104#define MSGF_LEG_SR_INTA                BIT(0)
 105#define MSGF_LEG_SR_INTB                BIT(1)
 106#define MSGF_LEG_SR_INTC                BIT(2)
 107#define MSGF_LEG_SR_INTD                BIT(3)
 108#define MSGF_LEG_SR_MASKALL             (MSGF_LEG_SR_INTA | MSGF_LEG_SR_INTB | \
 109                                        MSGF_LEG_SR_INTC | MSGF_LEG_SR_INTD)
 110
 111/* MSI interrupt status mask bits */
 112#define MSGF_MSI_SR_LO_MASK             GENMASK(31, 0)
 113#define MSGF_MSI_SR_HI_MASK             GENMASK(31, 0)
 114
 115#define MSII_PRESENT                    BIT(0)
 116#define MSII_ENABLE                     BIT(0)
 117#define MSII_STATUS_ENABLE              BIT(15)
 118
 119/* Bridge config interrupt mask */
 120#define BRCFG_INTERRUPT_MASK            BIT(0)
 121#define BREG_PRESENT                    BIT(0)
 122#define BREG_ENABLE                     BIT(0)
 123#define BREG_ENABLE_FORCE               BIT(1)
 124
 125/* E_ECAM status mask bits */
 126#define E_ECAM_PRESENT                  BIT(0)
 127#define E_ECAM_CR_ENABLE                BIT(0)
 128#define E_ECAM_SIZE_LOC                 GENMASK(20, 16)
 129#define E_ECAM_SIZE_SHIFT               16
 130#define ECAM_BUS_LOC_SHIFT              20
 131#define ECAM_DEV_LOC_SHIFT              12
 132#define NWL_ECAM_VALUE_DEFAULT          12
 133
 134#define CFG_DMA_REG_BAR                 GENMASK(2, 0)
 135
 136#define INT_PCI_MSI_NR                  (2 * 32)
 137#define INTX_NUM                        4
 138
 139/* Readin the PS_LINKUP */
 140#define PS_LINKUP_OFFSET                0x00000238
 141#define PCIE_PHY_LINKUP_BIT             BIT(0)
 142#define PHY_RDY_LINKUP_BIT              BIT(1)
 143
 144/* Parameters for the waiting for link up routine */
 145#define LINK_WAIT_MAX_RETRIES          10
 146#define LINK_WAIT_USLEEP_MIN           90000
 147#define LINK_WAIT_USLEEP_MAX           100000
 148
 149struct nwl_msi {                        /* MSI information */
 150        struct irq_domain *msi_domain;
 151        unsigned long *bitmap;
 152        struct irq_domain *dev_domain;
 153        struct mutex lock;              /* protect bitmap variable */
 154        int irq_msi0;
 155        int irq_msi1;
 156};
 157
 158struct nwl_pcie {
 159        struct device *dev;
 160        void __iomem *breg_base;
 161        void __iomem *pcireg_base;
 162        void __iomem *ecam_base;
 163        phys_addr_t phys_breg_base;     /* Physical Bridge Register Base */
 164        phys_addr_t phys_pcie_reg_base; /* Physical PCIe Controller Base */
 165        phys_addr_t phys_ecam_base;     /* Physical Configuration Base */
 166        u32 breg_size;
 167        u32 pcie_reg_size;
 168        u32 ecam_size;
 169        int irq_intx;
 170        int irq_misc;
 171        u32 ecam_value;
 172        u8 last_busno;
 173        u8 root_busno;
 174        struct nwl_msi msi;
 175        struct irq_domain *legacy_irq_domain;
 176        struct clk *clk;
 177};
 178
 179static inline u32 nwl_bridge_readl(struct nwl_pcie *pcie, u32 off)
 180{
 181        return readl(pcie->breg_base + off);
 182}
 183
 184static inline void nwl_bridge_writel(struct nwl_pcie *pcie, u32 val, u32 off)
 185{
 186        writel(val, pcie->breg_base + off);
 187}
 188
 189static bool nwl_pcie_link_up(struct nwl_pcie *pcie)
 190{
 191        if (readl(pcie->pcireg_base + PS_LINKUP_OFFSET) & PCIE_PHY_LINKUP_BIT)
 192                return true;
 193        return false;
 194}
 195
 196static bool nwl_phy_link_up(struct nwl_pcie *pcie)
 197{
 198        if (readl(pcie->pcireg_base + PS_LINKUP_OFFSET) & PHY_RDY_LINKUP_BIT)
 199                return true;
 200        return false;
 201}
 202
 203static int nwl_wait_for_link(struct nwl_pcie *pcie)
 204{
 205        struct device *dev = pcie->dev;
 206        int retries;
 207
 208        /* check if the link is up or not */
 209        for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
 210                if (nwl_phy_link_up(pcie))
 211                        return 0;
 212                usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
 213        }
 214
 215        dev_err(dev, "PHY link never came up\n");
 216        return -ETIMEDOUT;
 217}
 218
 219static bool nwl_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
 220{
 221        struct nwl_pcie *pcie = bus->sysdata;
 222
 223        /* Check link before accessing downstream ports */
 224        if (bus->number != pcie->root_busno) {
 225                if (!nwl_pcie_link_up(pcie))
 226                        return false;
 227        }
 228
 229        /* Only one device down on each root port */
 230        if (bus->number == pcie->root_busno && devfn > 0)
 231                return false;
 232
 233        return true;
 234}
 235
 236/**
 237 * nwl_pcie_map_bus - Get configuration base
 238 *
 239 * @bus: Bus structure of current bus
 240 * @devfn: Device/function
 241 * @where: Offset from base
 242 *
 243 * Return: Base address of the configuration space needed to be
 244 *         accessed.
 245 */
 246static void __iomem *nwl_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
 247                                      int where)
 248{
 249        struct nwl_pcie *pcie = bus->sysdata;
 250        int relbus;
 251
 252        if (!nwl_pcie_valid_device(bus, devfn))
 253                return NULL;
 254
 255        relbus = (bus->number << ECAM_BUS_LOC_SHIFT) |
 256                        (devfn << ECAM_DEV_LOC_SHIFT);
 257
 258        return pcie->ecam_base + relbus + where;
 259}
 260
 261/* PCIe operations */
 262static struct pci_ops nwl_pcie_ops = {
 263        .map_bus = nwl_pcie_map_bus,
 264        .read  = pci_generic_config_read,
 265        .write = pci_generic_config_write,
 266};
 267
 268static irqreturn_t nwl_pcie_misc_handler(int irq, void *data)
 269{
 270        struct nwl_pcie *pcie = data;
 271        struct device *dev = pcie->dev;
 272        u32 misc_stat;
 273
 274        /* Checking for misc interrupts */
 275        misc_stat = nwl_bridge_readl(pcie, MSGF_MISC_STATUS) &
 276                                     MSGF_MISC_SR_MASKALL;
 277        if (!misc_stat)
 278                return IRQ_NONE;
 279
 280        if (misc_stat & MSGF_MISC_SR_RXMSG_OVER)
 281                dev_err(dev, "Received Message FIFO Overflow\n");
 282
 283        if (misc_stat & MSGF_MISC_SR_SLAVE_ERR)
 284                dev_err(dev, "Slave error\n");
 285
 286        if (misc_stat & MSGF_MISC_SR_MASTER_ERR)
 287                dev_err(dev, "Master error\n");
 288
 289        if (misc_stat & MSGF_MISC_SR_I_ADDR_ERR)
 290                dev_err(dev, "In Misc Ingress address translation error\n");
 291
 292        if (misc_stat & MSGF_MISC_SR_E_ADDR_ERR)
 293                dev_err(dev, "In Misc Egress address translation error\n");
 294
 295        if (misc_stat & MSGF_MISC_SR_FATAL_AER)
 296                dev_err(dev, "Fatal Error in AER Capability\n");
 297
 298        if (misc_stat & MSGF_MISC_SR_NON_FATAL_AER)
 299                dev_err(dev, "Non-Fatal Error in AER Capability\n");
 300
 301        if (misc_stat & MSGF_MISC_SR_CORR_AER)
 302                dev_err(dev, "Correctable Error in AER Capability\n");
 303
 304        if (misc_stat & MSGF_MISC_SR_UR_DETECT)
 305                dev_err(dev, "Unsupported request Detected\n");
 306
 307        if (misc_stat & MSGF_MISC_SR_NON_FATAL_DEV)
 308                dev_err(dev, "Non-Fatal Error Detected\n");
 309
 310        if (misc_stat & MSGF_MISC_SR_FATAL_DEV)
 311                dev_err(dev, "Fatal Error Detected\n");
 312
 313        if (misc_stat & MSGF_MSIC_SR_LINK_AUTO_BWIDTH)
 314                dev_info(dev, "Link Autonomous Bandwidth Management Status bit set\n");
 315
 316        if (misc_stat & MSGF_MSIC_SR_LINK_BWIDTH)
 317                dev_info(dev, "Link Bandwidth Management Status bit set\n");
 318
 319        /* Clear misc interrupt status */
 320        nwl_bridge_writel(pcie, misc_stat, MSGF_MISC_STATUS);
 321
 322        return IRQ_HANDLED;
 323}
 324
 325static void nwl_pcie_leg_handler(struct irq_desc *desc)
 326{
 327        struct irq_chip *chip = irq_desc_get_chip(desc);
 328        struct nwl_pcie *pcie;
 329        unsigned long status;
 330        u32 bit;
 331        u32 virq;
 332
 333        chained_irq_enter(chip, desc);
 334        pcie = irq_desc_get_handler_data(desc);
 335
 336        while ((status = nwl_bridge_readl(pcie, MSGF_LEG_STATUS) &
 337                                MSGF_LEG_SR_MASKALL) != 0) {
 338                for_each_set_bit(bit, &status, INTX_NUM) {
 339                        virq = irq_find_mapping(pcie->legacy_irq_domain,
 340                                                bit + 1);
 341                        if (virq)
 342                                generic_handle_irq(virq);
 343                }
 344        }
 345
 346        chained_irq_exit(chip, desc);
 347}
 348
 349static void nwl_pcie_handle_msi_irq(struct nwl_pcie *pcie, u32 status_reg)
 350{
 351        struct nwl_msi *msi;
 352        unsigned long status;
 353        u32 bit;
 354        u32 virq;
 355
 356        msi = &pcie->msi;
 357
 358        while ((status = nwl_bridge_readl(pcie, status_reg)) != 0) {
 359                for_each_set_bit(bit, &status, 32) {
 360                        nwl_bridge_writel(pcie, 1 << bit, status_reg);
 361                        virq = irq_find_mapping(msi->dev_domain, bit);
 362                        if (virq)
 363                                generic_handle_irq(virq);
 364                }
 365        }
 366}
 367
 368static void nwl_pcie_msi_handler_high(struct irq_desc *desc)
 369{
 370        struct irq_chip *chip = irq_desc_get_chip(desc);
 371        struct nwl_pcie *pcie = irq_desc_get_handler_data(desc);
 372
 373        chained_irq_enter(chip, desc);
 374        nwl_pcie_handle_msi_irq(pcie, MSGF_MSI_STATUS_HI);
 375        chained_irq_exit(chip, desc);
 376}
 377
 378static void nwl_pcie_msi_handler_low(struct irq_desc *desc)
 379{
 380        struct irq_chip *chip = irq_desc_get_chip(desc);
 381        struct nwl_pcie *pcie = irq_desc_get_handler_data(desc);
 382
 383        chained_irq_enter(chip, desc);
 384        nwl_pcie_handle_msi_irq(pcie, MSGF_MSI_STATUS_LO);
 385        chained_irq_exit(chip, desc);
 386}
 387
 388static int nwl_legacy_map(struct irq_domain *domain, unsigned int irq,
 389                          irq_hw_number_t hwirq)
 390{
 391        irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
 392        irq_set_chip_data(irq, domain->host_data);
 393
 394        return 0;
 395}
 396
 397static const struct irq_domain_ops legacy_domain_ops = {
 398        .map = nwl_legacy_map,
 399};
 400
 401#ifdef CONFIG_PCI_MSI
 402static struct irq_chip nwl_msi_irq_chip = {
 403        .name = "nwl_pcie:msi",
 404        .irq_enable = unmask_msi_irq,
 405        .irq_disable = mask_msi_irq,
 406        .irq_mask = mask_msi_irq,
 407        .irq_unmask = unmask_msi_irq,
 408
 409};
 410
 411static struct msi_domain_info nwl_msi_domain_info = {
 412        .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
 413                  MSI_FLAG_MULTI_PCI_MSI),
 414        .chip = &nwl_msi_irq_chip,
 415};
 416#endif
 417
 418static void nwl_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
 419{
 420        struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data);
 421        phys_addr_t msi_addr = pcie->phys_pcie_reg_base;
 422
 423        msg->address_lo = lower_32_bits(msi_addr);
 424        msg->address_hi = upper_32_bits(msi_addr);
 425        msg->data = data->hwirq;
 426}
 427
 428static int nwl_msi_set_affinity(struct irq_data *irq_data,
 429                                const struct cpumask *mask, bool force)
 430{
 431        return -EINVAL;
 432}
 433
 434static struct irq_chip nwl_irq_chip = {
 435        .name = "Xilinx MSI",
 436        .irq_compose_msi_msg = nwl_compose_msi_msg,
 437        .irq_set_affinity = nwl_msi_set_affinity,
 438};
 439
 440static int nwl_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
 441                                unsigned int nr_irqs, void *args)
 442{
 443        struct nwl_pcie *pcie = domain->host_data;
 444        struct nwl_msi *msi = &pcie->msi;
 445        int bit;
 446        int i;
 447
 448        mutex_lock(&msi->lock);
 449        bit = bitmap_find_next_zero_area(msi->bitmap, INT_PCI_MSI_NR, 0,
 450                                         nr_irqs, 0);
 451        if (bit >= INT_PCI_MSI_NR) {
 452                mutex_unlock(&msi->lock);
 453                return -ENOSPC;
 454        }
 455
 456        bitmap_set(msi->bitmap, bit, nr_irqs);
 457
 458        for (i = 0; i < nr_irqs; i++) {
 459                irq_domain_set_info(domain, virq + i, bit + i, &nwl_irq_chip,
 460                                domain->host_data, handle_simple_irq,
 461                                NULL, NULL);
 462        }
 463        mutex_unlock(&msi->lock);
 464        return 0;
 465}
 466
 467static void nwl_irq_domain_free(struct irq_domain *domain, unsigned int virq,
 468                                        unsigned int nr_irqs)
 469{
 470        struct irq_data *data = irq_domain_get_irq_data(domain, virq);
 471        struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data);
 472        struct nwl_msi *msi = &pcie->msi;
 473
 474        mutex_lock(&msi->lock);
 475        bitmap_clear(msi->bitmap, data->hwirq, nr_irqs);
 476        mutex_unlock(&msi->lock);
 477}
 478
 479static const struct irq_domain_ops dev_msi_domain_ops = {
 480        .alloc  = nwl_irq_domain_alloc,
 481        .free   = nwl_irq_domain_free,
 482};
 483
 484static int nwl_pcie_init_msi_irq_domain(struct nwl_pcie *pcie)
 485{
 486#ifdef CONFIG_PCI_MSI
 487        struct device *dev = pcie->dev;
 488        struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
 489        struct nwl_msi *msi = &pcie->msi;
 490
 491        msi->dev_domain = irq_domain_add_linear(NULL, INT_PCI_MSI_NR,
 492                                                &dev_msi_domain_ops, pcie);
 493        if (!msi->dev_domain) {
 494                dev_err(dev, "failed to create dev IRQ domain\n");
 495                return -ENOMEM;
 496        }
 497        msi->msi_domain = pci_msi_create_irq_domain(fwnode,
 498                                                    &nwl_msi_domain_info,
 499                                                    msi->dev_domain);
 500        if (!msi->msi_domain) {
 501                dev_err(dev, "failed to create msi IRQ domain\n");
 502                irq_domain_remove(msi->dev_domain);
 503                return -ENOMEM;
 504        }
 505#endif
 506        return 0;
 507}
 508
 509static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie)
 510{
 511        struct device *dev = pcie->dev;
 512        struct device_node *node = dev->of_node;
 513        struct device_node *legacy_intc_node;
 514
 515        legacy_intc_node = of_get_next_child(node, NULL);
 516        if (!legacy_intc_node) {
 517                dev_err(dev, "No legacy intc node found\n");
 518                return -EINVAL;
 519        }
 520
 521        pcie->legacy_irq_domain = irq_domain_add_linear(legacy_intc_node,
 522                                                        INTX_NUM,
 523                                                        &legacy_domain_ops,
 524                                                        pcie);
 525
 526        if (!pcie->legacy_irq_domain) {
 527                dev_err(dev, "failed to create IRQ domain\n");
 528                return -ENOMEM;
 529        }
 530
 531        nwl_pcie_init_msi_irq_domain(pcie);
 532        return 0;
 533}
 534
 535static int nwl_pcie_enable_msi(struct nwl_pcie *pcie, struct pci_bus *bus)
 536{
 537        struct device *dev = pcie->dev;
 538        struct platform_device *pdev = to_platform_device(dev);
 539        struct nwl_msi *msi = &pcie->msi;
 540        unsigned long base;
 541        int ret;
 542        int size = BITS_TO_LONGS(INT_PCI_MSI_NR) * sizeof(long);
 543
 544        mutex_init(&msi->lock);
 545
 546        msi->bitmap = kzalloc(size, GFP_KERNEL);
 547        if (!msi->bitmap)
 548                return -ENOMEM;
 549
 550        /* Get msi_1 IRQ number */
 551        msi->irq_msi1 = platform_get_irq_byname(pdev, "msi1");
 552        if (msi->irq_msi1 < 0) {
 553                dev_err(dev, "failed to get IRQ#%d\n", msi->irq_msi1);
 554                ret = -EINVAL;
 555                goto err;
 556        }
 557
 558        irq_set_chained_handler_and_data(msi->irq_msi1,
 559                                         nwl_pcie_msi_handler_high, pcie);
 560
 561        /* Get msi_0 IRQ number */
 562        msi->irq_msi0 = platform_get_irq_byname(pdev, "msi0");
 563        if (msi->irq_msi0 < 0) {
 564                dev_err(dev, "failed to get IRQ#%d\n", msi->irq_msi0);
 565                ret = -EINVAL;
 566                goto err;
 567        }
 568
 569        irq_set_chained_handler_and_data(msi->irq_msi0,
 570                                         nwl_pcie_msi_handler_low, pcie);
 571
 572        /* Check for msii_present bit */
 573        ret = nwl_bridge_readl(pcie, I_MSII_CAPABILITIES) & MSII_PRESENT;
 574        if (!ret) {
 575                dev_err(dev, "MSI not present\n");
 576                ret = -EIO;
 577                goto err;
 578        }
 579
 580        /* Enable MSII */
 581        nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, I_MSII_CONTROL) |
 582                          MSII_ENABLE, I_MSII_CONTROL);
 583
 584        /* Enable MSII status */
 585        nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, I_MSII_CONTROL) |
 586                          MSII_STATUS_ENABLE, I_MSII_CONTROL);
 587
 588        /* setup AFI/FPCI range */
 589        base = pcie->phys_pcie_reg_base;
 590        nwl_bridge_writel(pcie, lower_32_bits(base), I_MSII_BASE_LO);
 591        nwl_bridge_writel(pcie, upper_32_bits(base), I_MSII_BASE_HI);
 592
 593        /*
 594         * For high range MSI interrupts: disable, clear any pending,
 595         * and enable
 596         */
 597        nwl_bridge_writel(pcie, (u32)~MSGF_MSI_SR_HI_MASK, MSGF_MSI_MASK_HI);
 598
 599        nwl_bridge_writel(pcie, nwl_bridge_readl(pcie,  MSGF_MSI_STATUS_HI) &
 600                          MSGF_MSI_SR_HI_MASK, MSGF_MSI_STATUS_HI);
 601
 602        nwl_bridge_writel(pcie, MSGF_MSI_SR_HI_MASK, MSGF_MSI_MASK_HI);
 603
 604        /*
 605         * For low range MSI interrupts: disable, clear any pending,
 606         * and enable
 607         */
 608        nwl_bridge_writel(pcie, (u32)~MSGF_MSI_SR_LO_MASK, MSGF_MSI_MASK_LO);
 609
 610        nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_MSI_STATUS_LO) &
 611                          MSGF_MSI_SR_LO_MASK, MSGF_MSI_STATUS_LO);
 612
 613        nwl_bridge_writel(pcie, MSGF_MSI_SR_LO_MASK, MSGF_MSI_MASK_LO);
 614
 615        return 0;
 616err:
 617        kfree(msi->bitmap);
 618        msi->bitmap = NULL;
 619        return ret;
 620}
 621
 622static int nwl_pcie_bridge_init(struct nwl_pcie *pcie)
 623{
 624        struct device *dev = pcie->dev;
 625        struct platform_device *pdev = to_platform_device(dev);
 626        u32 breg_val, ecam_val, first_busno = 0;
 627        int err;
 628
 629        breg_val = nwl_bridge_readl(pcie, E_BREG_CAPABILITIES) & BREG_PRESENT;
 630        if (!breg_val) {
 631                dev_err(dev, "BREG is not present\n");
 632                return breg_val;
 633        }
 634
 635        /* Write bridge_off to breg base */
 636        nwl_bridge_writel(pcie, lower_32_bits(pcie->phys_breg_base),
 637                          E_BREG_BASE_LO);
 638        nwl_bridge_writel(pcie, upper_32_bits(pcie->phys_breg_base),
 639                          E_BREG_BASE_HI);
 640
 641        /* Enable BREG */
 642        nwl_bridge_writel(pcie, ~BREG_ENABLE_FORCE & BREG_ENABLE,
 643                          E_BREG_CONTROL);
 644
 645        /* Disable DMA channel registers */
 646        nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, BRCFG_PCIE_RX0) |
 647                          CFG_DMA_REG_BAR, BRCFG_PCIE_RX0);
 648
 649        /* Enable Ingress subtractive decode translation */
 650        nwl_bridge_writel(pcie, SET_ISUB_CONTROL, I_ISUB_CONTROL);
 651
 652        /* Enable msg filtering details */
 653        nwl_bridge_writel(pcie, CFG_ENABLE_MSG_FILTER_MASK,
 654                          BRCFG_PCIE_RX_MSG_FILTER);
 655
 656        err = nwl_wait_for_link(pcie);
 657        if (err)
 658                return err;
 659
 660        ecam_val = nwl_bridge_readl(pcie, E_ECAM_CAPABILITIES) & E_ECAM_PRESENT;
 661        if (!ecam_val) {
 662                dev_err(dev, "ECAM is not present\n");
 663                return ecam_val;
 664        }
 665
 666        /* Enable ECAM */
 667        nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, E_ECAM_CONTROL) |
 668                          E_ECAM_CR_ENABLE, E_ECAM_CONTROL);
 669
 670        nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, E_ECAM_CONTROL) |
 671                          (pcie->ecam_value << E_ECAM_SIZE_SHIFT),
 672                          E_ECAM_CONTROL);
 673
 674        nwl_bridge_writel(pcie, lower_32_bits(pcie->phys_ecam_base),
 675                          E_ECAM_BASE_LO);
 676        nwl_bridge_writel(pcie, upper_32_bits(pcie->phys_ecam_base),
 677                          E_ECAM_BASE_HI);
 678
 679        /* Get bus range */
 680        ecam_val = nwl_bridge_readl(pcie, E_ECAM_CONTROL);
 681        pcie->last_busno = (ecam_val & E_ECAM_SIZE_LOC) >> E_ECAM_SIZE_SHIFT;
 682        /* Write primary, secondary and subordinate bus numbers */
 683        ecam_val = first_busno;
 684        ecam_val |= (first_busno + 1) << 8;
 685        ecam_val |= (pcie->last_busno << E_ECAM_SIZE_SHIFT);
 686        writel(ecam_val, (pcie->ecam_base + PCI_PRIMARY_BUS));
 687
 688        if (nwl_pcie_link_up(pcie))
 689                dev_info(dev, "Link is UP\n");
 690        else
 691                dev_info(dev, "Link is DOWN\n");
 692
 693        /* Get misc IRQ number */
 694        pcie->irq_misc = platform_get_irq_byname(pdev, "misc");
 695        if (pcie->irq_misc < 0) {
 696                dev_err(dev, "failed to get misc IRQ %d\n",
 697                        pcie->irq_misc);
 698                return -EINVAL;
 699        }
 700
 701        err = devm_request_irq(dev, pcie->irq_misc,
 702                               nwl_pcie_misc_handler, IRQF_SHARED,
 703                               "nwl_pcie:misc", pcie);
 704        if (err) {
 705                dev_err(dev, "fail to register misc IRQ#%d\n",
 706                        pcie->irq_misc);
 707                return err;
 708        }
 709
 710        /* Disable all misc interrupts */
 711        nwl_bridge_writel(pcie, (u32)~MSGF_MISC_SR_MASKALL, MSGF_MISC_MASK);
 712
 713        /* Clear pending misc interrupts */
 714        nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_MISC_STATUS) &
 715                          MSGF_MISC_SR_MASKALL, MSGF_MISC_STATUS);
 716
 717        /* Enable all misc interrupts */
 718        nwl_bridge_writel(pcie, MSGF_MISC_SR_MASKALL, MSGF_MISC_MASK);
 719
 720
 721        /* Disable all legacy interrupts */
 722        nwl_bridge_writel(pcie, (u32)~MSGF_LEG_SR_MASKALL, MSGF_LEG_MASK);
 723
 724        /* Clear pending legacy interrupts */
 725        nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_LEG_STATUS) &
 726                          MSGF_LEG_SR_MASKALL, MSGF_LEG_STATUS);
 727
 728        /* Enable all legacy interrupts */
 729        nwl_bridge_writel(pcie, MSGF_LEG_SR_MASKALL, MSGF_LEG_MASK);
 730
 731        /* Enable the bridge config interrupt */
 732        nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, BRCFG_INTERRUPT) |
 733                          BRCFG_INTERRUPT_MASK, BRCFG_INTERRUPT);
 734
 735        return 0;
 736}
 737
 738static int nwl_pcie_parse_dt(struct nwl_pcie *pcie,
 739                             struct platform_device *pdev)
 740{
 741        struct device *dev = pcie->dev;
 742        struct device_node *node = dev->of_node;
 743        struct resource *res;
 744        const char *type;
 745
 746        /* Check for device type */
 747        type = of_get_property(node, "device_type", NULL);
 748        if (!type || strcmp(type, "pci")) {
 749                dev_err(dev, "invalid \"device_type\" %s\n", type);
 750                return -EINVAL;
 751        }
 752
 753        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "breg");
 754        pcie->breg_base = devm_ioremap_resource(dev, res);
 755        if (IS_ERR(pcie->breg_base))
 756                return PTR_ERR(pcie->breg_base);
 757        pcie->phys_breg_base = res->start;
 758
 759        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcireg");
 760        pcie->pcireg_base = devm_ioremap_resource(dev, res);
 761        if (IS_ERR(pcie->pcireg_base))
 762                return PTR_ERR(pcie->pcireg_base);
 763        pcie->phys_pcie_reg_base = res->start;
 764
 765        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
 766        pcie->ecam_base = devm_ioremap_resource(dev, res);
 767        if (IS_ERR(pcie->ecam_base))
 768                return PTR_ERR(pcie->ecam_base);
 769        pcie->phys_ecam_base = res->start;
 770
 771        /* Get intx IRQ number */
 772        pcie->irq_intx = platform_get_irq_byname(pdev, "intx");
 773        if (pcie->irq_intx < 0) {
 774                dev_err(dev, "failed to get intx IRQ %d\n", pcie->irq_intx);
 775                return -EINVAL;
 776        }
 777
 778        irq_set_chained_handler_and_data(pcie->irq_intx,
 779                                         nwl_pcie_leg_handler, pcie);
 780
 781        return 0;
 782}
 783
 784static const struct of_device_id nwl_pcie_of_match[] = {
 785        { .compatible = "xlnx,nwl-pcie-2.11", },
 786        {}
 787};
 788
 789static int nwl_pcie_probe(struct platform_device *pdev)
 790{
 791        struct device *dev = &pdev->dev;
 792        struct device_node *node = dev->of_node;
 793        struct nwl_pcie *pcie;
 794        struct pci_bus *bus;
 795        struct pci_bus *child;
 796        int err;
 797        resource_size_t iobase = 0;
 798        LIST_HEAD(res);
 799
 800        pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
 801        if (!pcie)
 802                return -ENOMEM;
 803
 804        pcie->dev = dev;
 805        pcie->ecam_value = NWL_ECAM_VALUE_DEFAULT;
 806
 807        err = nwl_pcie_parse_dt(pcie, pdev);
 808        if (err) {
 809                dev_err(dev, "Parsing DT failed\n");
 810                return err;
 811        }
 812
 813        pcie->clk = devm_clk_get(dev, NULL);
 814        if (IS_ERR(pcie->clk))
 815                return PTR_ERR(pcie->clk);
 816        clk_prepare_enable(pcie->clk);
 817
 818        err = nwl_pcie_bridge_init(pcie);
 819        if (err) {
 820                dev_err(dev, "HW Initialization failed\n");
 821                return err;
 822        }
 823
 824        err = of_pci_get_host_bridge_resources(node, 0, 0xff, &res, &iobase);
 825        if (err) {
 826                dev_err(dev, "Getting bridge resources failed\n");
 827                return err;
 828        }
 829
 830        err = devm_request_pci_bus_resources(dev, &res);
 831        if (err)
 832                goto error;
 833
 834        err = nwl_pcie_init_irq_domain(pcie);
 835        if (err) {
 836                dev_err(dev, "Failed creating IRQ Domain\n");
 837                goto error;
 838        }
 839
 840        bus = pci_create_root_bus(dev, pcie->root_busno,
 841                                  &nwl_pcie_ops, pcie, &res);
 842        if (!bus) {
 843                err = -ENOMEM;
 844                goto error;
 845        }
 846
 847        if (IS_ENABLED(CONFIG_PCI_MSI)) {
 848                err = nwl_pcie_enable_msi(pcie, bus);
 849                if (err < 0) {
 850                        dev_err(dev, "failed to enable MSI support: %d\n", err);
 851                        goto error;
 852                }
 853        }
 854        pci_scan_child_bus(bus);
 855        pci_assign_unassigned_bus_resources(bus);
 856        list_for_each_entry(child, &bus->children, node)
 857                pcie_bus_configure_settings(child);
 858        pci_bus_add_devices(bus);
 859        return 0;
 860
 861error:
 862        pci_free_resource_list(&res);
 863        return err;
 864}
 865
 866static struct platform_driver nwl_pcie_driver = {
 867        .driver = {
 868                .name = "nwl-pcie",
 869                .suppress_bind_attrs = true,
 870                .of_match_table = nwl_pcie_of_match,
 871        },
 872        .probe = nwl_pcie_probe,
 873};
 874builtin_platform_driver(nwl_pcie_driver);
 875