linux/drivers/pci/controller/pcie-mediatek-gen3.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * MediaTek PCIe host controller driver.
   4 *
   5 * Copyright (c) 2020 MediaTek Inc.
   6 * Author: Jianjun Wang <jianjun.wang@mediatek.com>
   7 */
   8
   9#include <linux/clk.h>
  10#include <linux/delay.h>
  11#include <linux/iopoll.h>
  12#include <linux/irq.h>
  13#include <linux/irqchip/chained_irq.h>
  14#include <linux/irqdomain.h>
  15#include <linux/kernel.h>
  16#include <linux/module.h>
  17#include <linux/msi.h>
  18#include <linux/pci.h>
  19#include <linux/phy/phy.h>
  20#include <linux/platform_device.h>
  21#include <linux/pm_domain.h>
  22#include <linux/pm_runtime.h>
  23#include <linux/reset.h>
  24
  25#include "../pci.h"
  26
  27#define PCIE_SETTING_REG                0x80
  28#define PCIE_PCI_IDS_1                  0x9c
  29#define PCI_CLASS(class)                (class << 8)
  30#define PCIE_RC_MODE                    BIT(0)
  31
  32#define PCIE_CFGNUM_REG                 0x140
  33#define PCIE_CFG_DEVFN(devfn)           ((devfn) & GENMASK(7, 0))
  34#define PCIE_CFG_BUS(bus)               (((bus) << 8) & GENMASK(15, 8))
  35#define PCIE_CFG_BYTE_EN(bytes)         (((bytes) << 16) & GENMASK(19, 16))
  36#define PCIE_CFG_FORCE_BYTE_EN          BIT(20)
  37#define PCIE_CFG_OFFSET_ADDR            0x1000
  38#define PCIE_CFG_HEADER(bus, devfn) \
  39        (PCIE_CFG_BUS(bus) | PCIE_CFG_DEVFN(devfn))
  40
  41#define PCIE_RST_CTRL_REG               0x148
  42#define PCIE_MAC_RSTB                   BIT(0)
  43#define PCIE_PHY_RSTB                   BIT(1)
  44#define PCIE_BRG_RSTB                   BIT(2)
  45#define PCIE_PE_RSTB                    BIT(3)
  46
  47#define PCIE_LTSSM_STATUS_REG           0x150
  48#define PCIE_LTSSM_STATE_MASK           GENMASK(28, 24)
  49#define PCIE_LTSSM_STATE(val)           ((val & PCIE_LTSSM_STATE_MASK) >> 24)
  50#define PCIE_LTSSM_STATE_L2_IDLE        0x14
  51
  52#define PCIE_LINK_STATUS_REG            0x154
  53#define PCIE_PORT_LINKUP                BIT(8)
  54
  55#define PCIE_MSI_SET_NUM                8
  56#define PCIE_MSI_IRQS_PER_SET           32
  57#define PCIE_MSI_IRQS_NUM \
  58        (PCIE_MSI_IRQS_PER_SET * PCIE_MSI_SET_NUM)
  59
  60#define PCIE_INT_ENABLE_REG             0x180
  61#define PCIE_MSI_ENABLE                 GENMASK(PCIE_MSI_SET_NUM + 8 - 1, 8)
  62#define PCIE_MSI_SHIFT                  8
  63#define PCIE_INTX_SHIFT                 24
  64#define PCIE_INTX_ENABLE \
  65        GENMASK(PCIE_INTX_SHIFT + PCI_NUM_INTX - 1, PCIE_INTX_SHIFT)
  66
  67#define PCIE_INT_STATUS_REG             0x184
  68#define PCIE_MSI_SET_ENABLE_REG         0x190
  69#define PCIE_MSI_SET_ENABLE             GENMASK(PCIE_MSI_SET_NUM - 1, 0)
  70
  71#define PCIE_MSI_SET_BASE_REG           0xc00
  72#define PCIE_MSI_SET_OFFSET             0x10
  73#define PCIE_MSI_SET_STATUS_OFFSET      0x04
  74#define PCIE_MSI_SET_ENABLE_OFFSET      0x08
  75
  76#define PCIE_MSI_SET_ADDR_HI_BASE       0xc80
  77#define PCIE_MSI_SET_ADDR_HI_OFFSET     0x04
  78
  79#define PCIE_ICMD_PM_REG                0x198
  80#define PCIE_TURN_OFF_LINK              BIT(4)
  81
  82#define PCIE_MISC_CTRL_REG              0x348
  83#define PCIE_DISABLE_DVFSRC_VLT_REQ     BIT(1)
  84
  85#define PCIE_TRANS_TABLE_BASE_REG       0x800
  86#define PCIE_ATR_SRC_ADDR_MSB_OFFSET    0x4
  87#define PCIE_ATR_TRSL_ADDR_LSB_OFFSET   0x8
  88#define PCIE_ATR_TRSL_ADDR_MSB_OFFSET   0xc
  89#define PCIE_ATR_TRSL_PARAM_OFFSET      0x10
  90#define PCIE_ATR_TLB_SET_OFFSET         0x20
  91
  92#define PCIE_MAX_TRANS_TABLES           8
  93#define PCIE_ATR_EN                     BIT(0)
  94#define PCIE_ATR_SIZE(size) \
  95        (((((size) - 1) << 1) & GENMASK(6, 1)) | PCIE_ATR_EN)
  96#define PCIE_ATR_ID(id)                 ((id) & GENMASK(3, 0))
  97#define PCIE_ATR_TYPE_MEM               PCIE_ATR_ID(0)
  98#define PCIE_ATR_TYPE_IO                PCIE_ATR_ID(1)
  99#define PCIE_ATR_TLP_TYPE(type)         (((type) << 16) & GENMASK(18, 16))
 100#define PCIE_ATR_TLP_TYPE_MEM           PCIE_ATR_TLP_TYPE(0)
 101#define PCIE_ATR_TLP_TYPE_IO            PCIE_ATR_TLP_TYPE(2)
 102
 103/**
 104 * struct mtk_msi_set - MSI information for each set
 105 * @base: IO mapped register base
 106 * @msg_addr: MSI message address
 107 * @saved_irq_state: IRQ enable state saved at suspend time
 108 */
 109struct mtk_msi_set {
 110        void __iomem *base;
 111        phys_addr_t msg_addr;
 112        u32 saved_irq_state;
 113};
 114
 115/**
 116 * struct mtk_gen3_pcie - PCIe port information
 117 * @dev: pointer to PCIe device
 118 * @base: IO mapped register base
 119 * @reg_base: physical register base
 120 * @mac_reset: MAC reset control
 121 * @phy_reset: PHY reset control
 122 * @phy: PHY controller block
 123 * @clks: PCIe clocks
 124 * @num_clks: PCIe clocks count for this port
 125 * @irq: PCIe controller interrupt number
 126 * @saved_irq_state: IRQ enable state saved at suspend time
 127 * @irq_lock: lock protecting IRQ register access
 128 * @intx_domain: legacy INTx IRQ domain
 129 * @msi_domain: MSI IRQ domain
 130 * @msi_bottom_domain: MSI IRQ bottom domain
 131 * @msi_sets: MSI sets information
 132 * @lock: lock protecting IRQ bit map
 133 * @msi_irq_in_use: bit map for assigned MSI IRQ
 134 */
 135struct mtk_gen3_pcie {
 136        struct device *dev;
 137        void __iomem *base;
 138        phys_addr_t reg_base;
 139        struct reset_control *mac_reset;
 140        struct reset_control *phy_reset;
 141        struct phy *phy;
 142        struct clk_bulk_data *clks;
 143        int num_clks;
 144
 145        int irq;
 146        u32 saved_irq_state;
 147        raw_spinlock_t irq_lock;
 148        struct irq_domain *intx_domain;
 149        struct irq_domain *msi_domain;
 150        struct irq_domain *msi_bottom_domain;
 151        struct mtk_msi_set msi_sets[PCIE_MSI_SET_NUM];
 152        struct mutex lock;
 153        DECLARE_BITMAP(msi_irq_in_use, PCIE_MSI_IRQS_NUM);
 154};
 155
 156/**
 157 * mtk_pcie_config_tlp_header() - Configure a configuration TLP header
 158 * @bus: PCI bus to query
 159 * @devfn: device/function number
 160 * @where: offset in config space
 161 * @size: data size in TLP header
 162 *
 163 * Set byte enable field and device information in configuration TLP header.
 164 */
 165static void mtk_pcie_config_tlp_header(struct pci_bus *bus, unsigned int devfn,
 166                                        int where, int size)
 167{
 168        struct mtk_gen3_pcie *pcie = bus->sysdata;
 169        int bytes;
 170        u32 val;
 171
 172        bytes = (GENMASK(size - 1, 0) & 0xf) << (where & 0x3);
 173
 174        val = PCIE_CFG_FORCE_BYTE_EN | PCIE_CFG_BYTE_EN(bytes) |
 175              PCIE_CFG_HEADER(bus->number, devfn);
 176
 177        writel_relaxed(val, pcie->base + PCIE_CFGNUM_REG);
 178}
 179
 180static void __iomem *mtk_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
 181                                      int where)
 182{
 183        struct mtk_gen3_pcie *pcie = bus->sysdata;
 184
 185        return pcie->base + PCIE_CFG_OFFSET_ADDR + where;
 186}
 187
 188static int mtk_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
 189                                int where, int size, u32 *val)
 190{
 191        mtk_pcie_config_tlp_header(bus, devfn, where, size);
 192
 193        return pci_generic_config_read32(bus, devfn, where, size, val);
 194}
 195
 196static int mtk_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
 197                                 int where, int size, u32 val)
 198{
 199        mtk_pcie_config_tlp_header(bus, devfn, where, size);
 200
 201        if (size <= 2)
 202                val <<= (where & 0x3) * 8;
 203
 204        return pci_generic_config_write32(bus, devfn, where, 4, val);
 205}
 206
 207static struct pci_ops mtk_pcie_ops = {
 208        .map_bus = mtk_pcie_map_bus,
 209        .read  = mtk_pcie_config_read,
 210        .write = mtk_pcie_config_write,
 211};
 212
 213static int mtk_pcie_set_trans_table(struct mtk_gen3_pcie *pcie,
 214                                    resource_size_t cpu_addr,
 215                                    resource_size_t pci_addr,
 216                                    resource_size_t size,
 217                                    unsigned long type, int num)
 218{
 219        void __iomem *table;
 220        u32 val;
 221
 222        if (num >= PCIE_MAX_TRANS_TABLES) {
 223                dev_err(pcie->dev, "not enough translate table for addr: %#llx, limited to [%d]\n",
 224                        (unsigned long long)cpu_addr, PCIE_MAX_TRANS_TABLES);
 225                return -ENODEV;
 226        }
 227
 228        table = pcie->base + PCIE_TRANS_TABLE_BASE_REG +
 229                num * PCIE_ATR_TLB_SET_OFFSET;
 230
 231        writel_relaxed(lower_32_bits(cpu_addr) | PCIE_ATR_SIZE(fls(size) - 1),
 232                       table);
 233        writel_relaxed(upper_32_bits(cpu_addr),
 234                       table + PCIE_ATR_SRC_ADDR_MSB_OFFSET);
 235        writel_relaxed(lower_32_bits(pci_addr),
 236                       table + PCIE_ATR_TRSL_ADDR_LSB_OFFSET);
 237        writel_relaxed(upper_32_bits(pci_addr),
 238                       table + PCIE_ATR_TRSL_ADDR_MSB_OFFSET);
 239
 240        if (type == IORESOURCE_IO)
 241                val = PCIE_ATR_TYPE_IO | PCIE_ATR_TLP_TYPE_IO;
 242        else
 243                val = PCIE_ATR_TYPE_MEM | PCIE_ATR_TLP_TYPE_MEM;
 244
 245        writel_relaxed(val, table + PCIE_ATR_TRSL_PARAM_OFFSET);
 246
 247        return 0;
 248}
 249
 250static void mtk_pcie_enable_msi(struct mtk_gen3_pcie *pcie)
 251{
 252        int i;
 253        u32 val;
 254
 255        for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
 256                struct mtk_msi_set *msi_set = &pcie->msi_sets[i];
 257
 258                msi_set->base = pcie->base + PCIE_MSI_SET_BASE_REG +
 259                                i * PCIE_MSI_SET_OFFSET;
 260                msi_set->msg_addr = pcie->reg_base + PCIE_MSI_SET_BASE_REG +
 261                                    i * PCIE_MSI_SET_OFFSET;
 262
 263                /* Configure the MSI capture address */
 264                writel_relaxed(lower_32_bits(msi_set->msg_addr), msi_set->base);
 265                writel_relaxed(upper_32_bits(msi_set->msg_addr),
 266                               pcie->base + PCIE_MSI_SET_ADDR_HI_BASE +
 267                               i * PCIE_MSI_SET_ADDR_HI_OFFSET);
 268        }
 269
 270        val = readl_relaxed(pcie->base + PCIE_MSI_SET_ENABLE_REG);
 271        val |= PCIE_MSI_SET_ENABLE;
 272        writel_relaxed(val, pcie->base + PCIE_MSI_SET_ENABLE_REG);
 273
 274        val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
 275        val |= PCIE_MSI_ENABLE;
 276        writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG);
 277}
 278
 279static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie)
 280{
 281        struct resource_entry *entry;
 282        struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
 283        unsigned int table_index = 0;
 284        int err;
 285        u32 val;
 286
 287        /* Set as RC mode */
 288        val = readl_relaxed(pcie->base + PCIE_SETTING_REG);
 289        val |= PCIE_RC_MODE;
 290        writel_relaxed(val, pcie->base + PCIE_SETTING_REG);
 291
 292        /* Set class code */
 293        val = readl_relaxed(pcie->base + PCIE_PCI_IDS_1);
 294        val &= ~GENMASK(31, 8);
 295        val |= PCI_CLASS(PCI_CLASS_BRIDGE_PCI_NORMAL);
 296        writel_relaxed(val, pcie->base + PCIE_PCI_IDS_1);
 297
 298        /* Mask all INTx interrupts */
 299        val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
 300        val &= ~PCIE_INTX_ENABLE;
 301        writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG);
 302
 303        /* Disable DVFSRC voltage request */
 304        val = readl_relaxed(pcie->base + PCIE_MISC_CTRL_REG);
 305        val |= PCIE_DISABLE_DVFSRC_VLT_REQ;
 306        writel_relaxed(val, pcie->base + PCIE_MISC_CTRL_REG);
 307
 308        /* Assert all reset signals */
 309        val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
 310        val |= PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB;
 311        writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
 312
 313        /*
 314         * Described in PCIe CEM specification sections 2.2 (PERST# Signal)
 315         * and 2.2.1 (Initial Power-Up (G3 to S0)).
 316         * The deassertion of PERST# should be delayed 100ms (TPVPERL)
 317         * for the power and clock to become stable.
 318         */
 319        msleep(100);
 320
 321        /* De-assert reset signals */
 322        val &= ~(PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB);
 323        writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
 324
 325        /* Check if the link is up or not */
 326        err = readl_poll_timeout(pcie->base + PCIE_LINK_STATUS_REG, val,
 327                                 !!(val & PCIE_PORT_LINKUP), 20,
 328                                 PCI_PM_D3COLD_WAIT * USEC_PER_MSEC);
 329        if (err) {
 330                val = readl_relaxed(pcie->base + PCIE_LTSSM_STATUS_REG);
 331                dev_err(pcie->dev, "PCIe link down, ltssm reg val: %#x\n", val);
 332                return err;
 333        }
 334
 335        mtk_pcie_enable_msi(pcie);
 336
 337        /* Set PCIe translation windows */
 338        resource_list_for_each_entry(entry, &host->windows) {
 339                struct resource *res = entry->res;
 340                unsigned long type = resource_type(res);
 341                resource_size_t cpu_addr;
 342                resource_size_t pci_addr;
 343                resource_size_t size;
 344                const char *range_type;
 345
 346                if (type == IORESOURCE_IO) {
 347                        cpu_addr = pci_pio_to_address(res->start);
 348                        range_type = "IO";
 349                } else if (type == IORESOURCE_MEM) {
 350                        cpu_addr = res->start;
 351                        range_type = "MEM";
 352                } else {
 353                        continue;
 354                }
 355
 356                pci_addr = res->start - entry->offset;
 357                size = resource_size(res);
 358                err = mtk_pcie_set_trans_table(pcie, cpu_addr, pci_addr, size,
 359                                               type, table_index);
 360                if (err)
 361                        return err;
 362
 363                dev_dbg(pcie->dev, "set %s trans window[%d]: cpu_addr = %#llx, pci_addr = %#llx, size = %#llx\n",
 364                        range_type, table_index, (unsigned long long)cpu_addr,
 365                        (unsigned long long)pci_addr, (unsigned long long)size);
 366
 367                table_index++;
 368        }
 369
 370        return 0;
 371}
 372
 373static int mtk_pcie_set_affinity(struct irq_data *data,
 374                                 const struct cpumask *mask, bool force)
 375{
 376        return -EINVAL;
 377}
 378
 379static void mtk_pcie_msi_irq_mask(struct irq_data *data)
 380{
 381        pci_msi_mask_irq(data);
 382        irq_chip_mask_parent(data);
 383}
 384
 385static void mtk_pcie_msi_irq_unmask(struct irq_data *data)
 386{
 387        pci_msi_unmask_irq(data);
 388        irq_chip_unmask_parent(data);
 389}
 390
 391static struct irq_chip mtk_msi_irq_chip = {
 392        .irq_ack = irq_chip_ack_parent,
 393        .irq_mask = mtk_pcie_msi_irq_mask,
 394        .irq_unmask = mtk_pcie_msi_irq_unmask,
 395        .name = "MSI",
 396};
 397
 398static struct msi_domain_info mtk_msi_domain_info = {
 399        .flags  = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
 400                   MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
 401        .chip   = &mtk_msi_irq_chip,
 402};
 403
 404static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
 405{
 406        struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
 407        struct mtk_gen3_pcie *pcie = data->domain->host_data;
 408        unsigned long hwirq;
 409
 410        hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
 411
 412        msg->address_hi = upper_32_bits(msi_set->msg_addr);
 413        msg->address_lo = lower_32_bits(msi_set->msg_addr);
 414        msg->data = hwirq;
 415        dev_dbg(pcie->dev, "msi#%#lx address_hi %#x address_lo %#x data %d\n",
 416                hwirq, msg->address_hi, msg->address_lo, msg->data);
 417}
 418
 419static void mtk_msi_bottom_irq_ack(struct irq_data *data)
 420{
 421        struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
 422        unsigned long hwirq;
 423
 424        hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
 425
 426        writel_relaxed(BIT(hwirq), msi_set->base + PCIE_MSI_SET_STATUS_OFFSET);
 427}
 428
 429static void mtk_msi_bottom_irq_mask(struct irq_data *data)
 430{
 431        struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
 432        struct mtk_gen3_pcie *pcie = data->domain->host_data;
 433        unsigned long hwirq, flags;
 434        u32 val;
 435
 436        hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
 437
 438        raw_spin_lock_irqsave(&pcie->irq_lock, flags);
 439        val = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
 440        val &= ~BIT(hwirq);
 441        writel_relaxed(val, msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
 442        raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
 443}
 444
 445static void mtk_msi_bottom_irq_unmask(struct irq_data *data)
 446{
 447        struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
 448        struct mtk_gen3_pcie *pcie = data->domain->host_data;
 449        unsigned long hwirq, flags;
 450        u32 val;
 451
 452        hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
 453
 454        raw_spin_lock_irqsave(&pcie->irq_lock, flags);
 455        val = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
 456        val |= BIT(hwirq);
 457        writel_relaxed(val, msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
 458        raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
 459}
 460
 461static struct irq_chip mtk_msi_bottom_irq_chip = {
 462        .irq_ack                = mtk_msi_bottom_irq_ack,
 463        .irq_mask               = mtk_msi_bottom_irq_mask,
 464        .irq_unmask             = mtk_msi_bottom_irq_unmask,
 465        .irq_compose_msi_msg    = mtk_compose_msi_msg,
 466        .irq_set_affinity       = mtk_pcie_set_affinity,
 467        .name                   = "MSI",
 468};
 469
 470static int mtk_msi_bottom_domain_alloc(struct irq_domain *domain,
 471                                       unsigned int virq, unsigned int nr_irqs,
 472                                       void *arg)
 473{
 474        struct mtk_gen3_pcie *pcie = domain->host_data;
 475        struct mtk_msi_set *msi_set;
 476        int i, hwirq, set_idx;
 477
 478        mutex_lock(&pcie->lock);
 479
 480        hwirq = bitmap_find_free_region(pcie->msi_irq_in_use, PCIE_MSI_IRQS_NUM,
 481                                        order_base_2(nr_irqs));
 482
 483        mutex_unlock(&pcie->lock);
 484
 485        if (hwirq < 0)
 486                return -ENOSPC;
 487
 488        set_idx = hwirq / PCIE_MSI_IRQS_PER_SET;
 489        msi_set = &pcie->msi_sets[set_idx];
 490
 491        for (i = 0; i < nr_irqs; i++)
 492                irq_domain_set_info(domain, virq + i, hwirq + i,
 493                                    &mtk_msi_bottom_irq_chip, msi_set,
 494                                    handle_edge_irq, NULL, NULL);
 495
 496        return 0;
 497}
 498
 499static void mtk_msi_bottom_domain_free(struct irq_domain *domain,
 500                                       unsigned int virq, unsigned int nr_irqs)
 501{
 502        struct mtk_gen3_pcie *pcie = domain->host_data;
 503        struct irq_data *data = irq_domain_get_irq_data(domain, virq);
 504
 505        mutex_lock(&pcie->lock);
 506
 507        bitmap_release_region(pcie->msi_irq_in_use, data->hwirq,
 508                              order_base_2(nr_irqs));
 509
 510        mutex_unlock(&pcie->lock);
 511
 512        irq_domain_free_irqs_common(domain, virq, nr_irqs);
 513}
 514
 515static const struct irq_domain_ops mtk_msi_bottom_domain_ops = {
 516        .alloc = mtk_msi_bottom_domain_alloc,
 517        .free = mtk_msi_bottom_domain_free,
 518};
 519
 520static void mtk_intx_mask(struct irq_data *data)
 521{
 522        struct mtk_gen3_pcie *pcie = irq_data_get_irq_chip_data(data);
 523        unsigned long flags;
 524        u32 val;
 525
 526        raw_spin_lock_irqsave(&pcie->irq_lock, flags);
 527        val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
 528        val &= ~BIT(data->hwirq + PCIE_INTX_SHIFT);
 529        writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG);
 530        raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
 531}
 532
 533static void mtk_intx_unmask(struct irq_data *data)
 534{
 535        struct mtk_gen3_pcie *pcie = irq_data_get_irq_chip_data(data);
 536        unsigned long flags;
 537        u32 val;
 538
 539        raw_spin_lock_irqsave(&pcie->irq_lock, flags);
 540        val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
 541        val |= BIT(data->hwirq + PCIE_INTX_SHIFT);
 542        writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG);
 543        raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
 544}
 545
 546/**
 547 * mtk_intx_eoi() - Clear INTx IRQ status at the end of interrupt
 548 * @data: pointer to chip specific data
 549 *
 550 * As an emulated level IRQ, its interrupt status will remain
 551 * until the corresponding de-assert message is received; hence that
 552 * the status can only be cleared when the interrupt has been serviced.
 553 */
 554static void mtk_intx_eoi(struct irq_data *data)
 555{
 556        struct mtk_gen3_pcie *pcie = irq_data_get_irq_chip_data(data);
 557        unsigned long hwirq;
 558
 559        hwirq = data->hwirq + PCIE_INTX_SHIFT;
 560        writel_relaxed(BIT(hwirq), pcie->base + PCIE_INT_STATUS_REG);
 561}
 562
 563static struct irq_chip mtk_intx_irq_chip = {
 564        .irq_mask               = mtk_intx_mask,
 565        .irq_unmask             = mtk_intx_unmask,
 566        .irq_eoi                = mtk_intx_eoi,
 567        .irq_set_affinity       = mtk_pcie_set_affinity,
 568        .name                   = "INTx",
 569};
 570
 571static int mtk_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
 572                             irq_hw_number_t hwirq)
 573{
 574        irq_set_chip_data(irq, domain->host_data);
 575        irq_set_chip_and_handler_name(irq, &mtk_intx_irq_chip,
 576                                      handle_fasteoi_irq, "INTx");
 577        return 0;
 578}
 579
 580static const struct irq_domain_ops intx_domain_ops = {
 581        .map = mtk_pcie_intx_map,
 582};
 583
 584static int mtk_pcie_init_irq_domains(struct mtk_gen3_pcie *pcie)
 585{
 586        struct device *dev = pcie->dev;
 587        struct device_node *intc_node, *node = dev->of_node;
 588        int ret;
 589
 590        raw_spin_lock_init(&pcie->irq_lock);
 591
 592        /* Setup INTx */
 593        intc_node = of_get_child_by_name(node, "interrupt-controller");
 594        if (!intc_node) {
 595                dev_err(dev, "missing interrupt-controller node\n");
 596                return -ENODEV;
 597        }
 598
 599        pcie->intx_domain = irq_domain_add_linear(intc_node, PCI_NUM_INTX,
 600                                                  &intx_domain_ops, pcie);
 601        if (!pcie->intx_domain) {
 602                dev_err(dev, "failed to create INTx IRQ domain\n");
 603                return -ENODEV;
 604        }
 605
 606        /* Setup MSI */
 607        mutex_init(&pcie->lock);
 608
 609        pcie->msi_bottom_domain = irq_domain_add_linear(node, PCIE_MSI_IRQS_NUM,
 610                                  &mtk_msi_bottom_domain_ops, pcie);
 611        if (!pcie->msi_bottom_domain) {
 612                dev_err(dev, "failed to create MSI bottom domain\n");
 613                ret = -ENODEV;
 614                goto err_msi_bottom_domain;
 615        }
 616
 617        pcie->msi_domain = pci_msi_create_irq_domain(dev->fwnode,
 618                                                     &mtk_msi_domain_info,
 619                                                     pcie->msi_bottom_domain);
 620        if (!pcie->msi_domain) {
 621                dev_err(dev, "failed to create MSI domain\n");
 622                ret = -ENODEV;
 623                goto err_msi_domain;
 624        }
 625
 626        return 0;
 627
 628err_msi_domain:
 629        irq_domain_remove(pcie->msi_bottom_domain);
 630err_msi_bottom_domain:
 631        irq_domain_remove(pcie->intx_domain);
 632
 633        return ret;
 634}
 635
 636static void mtk_pcie_irq_teardown(struct mtk_gen3_pcie *pcie)
 637{
 638        irq_set_chained_handler_and_data(pcie->irq, NULL, NULL);
 639
 640        if (pcie->intx_domain)
 641                irq_domain_remove(pcie->intx_domain);
 642
 643        if (pcie->msi_domain)
 644                irq_domain_remove(pcie->msi_domain);
 645
 646        if (pcie->msi_bottom_domain)
 647                irq_domain_remove(pcie->msi_bottom_domain);
 648
 649        irq_dispose_mapping(pcie->irq);
 650}
 651
 652static void mtk_pcie_msi_handler(struct mtk_gen3_pcie *pcie, int set_idx)
 653{
 654        struct mtk_msi_set *msi_set = &pcie->msi_sets[set_idx];
 655        unsigned long msi_enable, msi_status;
 656        irq_hw_number_t bit, hwirq;
 657
 658        msi_enable = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
 659
 660        do {
 661                msi_status = readl_relaxed(msi_set->base +
 662                                           PCIE_MSI_SET_STATUS_OFFSET);
 663                msi_status &= msi_enable;
 664                if (!msi_status)
 665                        break;
 666
 667                for_each_set_bit(bit, &msi_status, PCIE_MSI_IRQS_PER_SET) {
 668                        hwirq = bit + set_idx * PCIE_MSI_IRQS_PER_SET;
 669                        generic_handle_domain_irq(pcie->msi_bottom_domain, hwirq);
 670                }
 671        } while (true);
 672}
 673
 674static void mtk_pcie_irq_handler(struct irq_desc *desc)
 675{
 676        struct mtk_gen3_pcie *pcie = irq_desc_get_handler_data(desc);
 677        struct irq_chip *irqchip = irq_desc_get_chip(desc);
 678        unsigned long status;
 679        irq_hw_number_t irq_bit = PCIE_INTX_SHIFT;
 680
 681        chained_irq_enter(irqchip, desc);
 682
 683        status = readl_relaxed(pcie->base + PCIE_INT_STATUS_REG);
 684        for_each_set_bit_from(irq_bit, &status, PCI_NUM_INTX +
 685                              PCIE_INTX_SHIFT)
 686                generic_handle_domain_irq(pcie->intx_domain,
 687                                          irq_bit - PCIE_INTX_SHIFT);
 688
 689        irq_bit = PCIE_MSI_SHIFT;
 690        for_each_set_bit_from(irq_bit, &status, PCIE_MSI_SET_NUM +
 691                              PCIE_MSI_SHIFT) {
 692                mtk_pcie_msi_handler(pcie, irq_bit - PCIE_MSI_SHIFT);
 693
 694                writel_relaxed(BIT(irq_bit), pcie->base + PCIE_INT_STATUS_REG);
 695        }
 696
 697        chained_irq_exit(irqchip, desc);
 698}
 699
 700static int mtk_pcie_setup_irq(struct mtk_gen3_pcie *pcie)
 701{
 702        struct device *dev = pcie->dev;
 703        struct platform_device *pdev = to_platform_device(dev);
 704        int err;
 705
 706        err = mtk_pcie_init_irq_domains(pcie);
 707        if (err)
 708                return err;
 709
 710        pcie->irq = platform_get_irq(pdev, 0);
 711        if (pcie->irq < 0)
 712                return pcie->irq;
 713
 714        irq_set_chained_handler_and_data(pcie->irq, mtk_pcie_irq_handler, pcie);
 715
 716        return 0;
 717}
 718
 719static int mtk_pcie_parse_port(struct mtk_gen3_pcie *pcie)
 720{
 721        struct device *dev = pcie->dev;
 722        struct platform_device *pdev = to_platform_device(dev);
 723        struct resource *regs;
 724        int ret;
 725
 726        regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcie-mac");
 727        if (!regs)
 728                return -EINVAL;
 729        pcie->base = devm_ioremap_resource(dev, regs);
 730        if (IS_ERR(pcie->base)) {
 731                dev_err(dev, "failed to map register base\n");
 732                return PTR_ERR(pcie->base);
 733        }
 734
 735        pcie->reg_base = regs->start;
 736
 737        pcie->phy_reset = devm_reset_control_get_optional_exclusive(dev, "phy");
 738        if (IS_ERR(pcie->phy_reset)) {
 739                ret = PTR_ERR(pcie->phy_reset);
 740                if (ret != -EPROBE_DEFER)
 741                        dev_err(dev, "failed to get PHY reset\n");
 742
 743                return ret;
 744        }
 745
 746        pcie->mac_reset = devm_reset_control_get_optional_exclusive(dev, "mac");
 747        if (IS_ERR(pcie->mac_reset)) {
 748                ret = PTR_ERR(pcie->mac_reset);
 749                if (ret != -EPROBE_DEFER)
 750                        dev_err(dev, "failed to get MAC reset\n");
 751
 752                return ret;
 753        }
 754
 755        pcie->phy = devm_phy_optional_get(dev, "pcie-phy");
 756        if (IS_ERR(pcie->phy)) {
 757                ret = PTR_ERR(pcie->phy);
 758                if (ret != -EPROBE_DEFER)
 759                        dev_err(dev, "failed to get PHY\n");
 760
 761                return ret;
 762        }
 763
 764        pcie->num_clks = devm_clk_bulk_get_all(dev, &pcie->clks);
 765        if (pcie->num_clks < 0) {
 766                dev_err(dev, "failed to get clocks\n");
 767                return pcie->num_clks;
 768        }
 769
 770        return 0;
 771}
 772
 773static int mtk_pcie_power_up(struct mtk_gen3_pcie *pcie)
 774{
 775        struct device *dev = pcie->dev;
 776        int err;
 777
 778        /* PHY power on and enable pipe clock */
 779        reset_control_deassert(pcie->phy_reset);
 780
 781        err = phy_init(pcie->phy);
 782        if (err) {
 783                dev_err(dev, "failed to initialize PHY\n");
 784                goto err_phy_init;
 785        }
 786
 787        err = phy_power_on(pcie->phy);
 788        if (err) {
 789                dev_err(dev, "failed to power on PHY\n");
 790                goto err_phy_on;
 791        }
 792
 793        /* MAC power on and enable transaction layer clocks */
 794        reset_control_deassert(pcie->mac_reset);
 795
 796        pm_runtime_enable(dev);
 797        pm_runtime_get_sync(dev);
 798
 799        err = clk_bulk_prepare_enable(pcie->num_clks, pcie->clks);
 800        if (err) {
 801                dev_err(dev, "failed to enable clocks\n");
 802                goto err_clk_init;
 803        }
 804
 805        return 0;
 806
 807err_clk_init:
 808        pm_runtime_put_sync(dev);
 809        pm_runtime_disable(dev);
 810        reset_control_assert(pcie->mac_reset);
 811        phy_power_off(pcie->phy);
 812err_phy_on:
 813        phy_exit(pcie->phy);
 814err_phy_init:
 815        reset_control_assert(pcie->phy_reset);
 816
 817        return err;
 818}
 819
 820static void mtk_pcie_power_down(struct mtk_gen3_pcie *pcie)
 821{
 822        clk_bulk_disable_unprepare(pcie->num_clks, pcie->clks);
 823
 824        pm_runtime_put_sync(pcie->dev);
 825        pm_runtime_disable(pcie->dev);
 826        reset_control_assert(pcie->mac_reset);
 827
 828        phy_power_off(pcie->phy);
 829        phy_exit(pcie->phy);
 830        reset_control_assert(pcie->phy_reset);
 831}
 832
 833static int mtk_pcie_setup(struct mtk_gen3_pcie *pcie)
 834{
 835        int err;
 836
 837        err = mtk_pcie_parse_port(pcie);
 838        if (err)
 839                return err;
 840
 841        /*
 842         * The controller may have been left out of reset by the bootloader
 843         * so make sure that we get a clean start by asserting resets here.
 844         */
 845        reset_control_assert(pcie->phy_reset);
 846        reset_control_assert(pcie->mac_reset);
 847        usleep_range(10, 20);
 848
 849        /* Don't touch the hardware registers before power up */
 850        err = mtk_pcie_power_up(pcie);
 851        if (err)
 852                return err;
 853
 854        /* Try link up */
 855        err = mtk_pcie_startup_port(pcie);
 856        if (err)
 857                goto err_setup;
 858
 859        err = mtk_pcie_setup_irq(pcie);
 860        if (err)
 861                goto err_setup;
 862
 863        return 0;
 864
 865err_setup:
 866        mtk_pcie_power_down(pcie);
 867
 868        return err;
 869}
 870
 871static int mtk_pcie_probe(struct platform_device *pdev)
 872{
 873        struct device *dev = &pdev->dev;
 874        struct mtk_gen3_pcie *pcie;
 875        struct pci_host_bridge *host;
 876        int err;
 877
 878        host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
 879        if (!host)
 880                return -ENOMEM;
 881
 882        pcie = pci_host_bridge_priv(host);
 883
 884        pcie->dev = dev;
 885        platform_set_drvdata(pdev, pcie);
 886
 887        err = mtk_pcie_setup(pcie);
 888        if (err)
 889                return err;
 890
 891        host->ops = &mtk_pcie_ops;
 892        host->sysdata = pcie;
 893
 894        err = pci_host_probe(host);
 895        if (err) {
 896                mtk_pcie_irq_teardown(pcie);
 897                mtk_pcie_power_down(pcie);
 898                return err;
 899        }
 900
 901        return 0;
 902}
 903
 904static int mtk_pcie_remove(struct platform_device *pdev)
 905{
 906        struct mtk_gen3_pcie *pcie = platform_get_drvdata(pdev);
 907        struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
 908
 909        pci_lock_rescan_remove();
 910        pci_stop_root_bus(host->bus);
 911        pci_remove_root_bus(host->bus);
 912        pci_unlock_rescan_remove();
 913
 914        mtk_pcie_irq_teardown(pcie);
 915        mtk_pcie_power_down(pcie);
 916
 917        return 0;
 918}
 919
 920static void __maybe_unused mtk_pcie_irq_save(struct mtk_gen3_pcie *pcie)
 921{
 922        int i;
 923
 924        raw_spin_lock(&pcie->irq_lock);
 925
 926        pcie->saved_irq_state = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
 927
 928        for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
 929                struct mtk_msi_set *msi_set = &pcie->msi_sets[i];
 930
 931                msi_set->saved_irq_state = readl_relaxed(msi_set->base +
 932                                           PCIE_MSI_SET_ENABLE_OFFSET);
 933        }
 934
 935        raw_spin_unlock(&pcie->irq_lock);
 936}
 937
 938static void __maybe_unused mtk_pcie_irq_restore(struct mtk_gen3_pcie *pcie)
 939{
 940        int i;
 941
 942        raw_spin_lock(&pcie->irq_lock);
 943
 944        writel_relaxed(pcie->saved_irq_state, pcie->base + PCIE_INT_ENABLE_REG);
 945
 946        for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
 947                struct mtk_msi_set *msi_set = &pcie->msi_sets[i];
 948
 949                writel_relaxed(msi_set->saved_irq_state,
 950                               msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
 951        }
 952
 953        raw_spin_unlock(&pcie->irq_lock);
 954}
 955
 956static int __maybe_unused mtk_pcie_turn_off_link(struct mtk_gen3_pcie *pcie)
 957{
 958        u32 val;
 959
 960        val = readl_relaxed(pcie->base + PCIE_ICMD_PM_REG);
 961        val |= PCIE_TURN_OFF_LINK;
 962        writel_relaxed(val, pcie->base + PCIE_ICMD_PM_REG);
 963
 964        /* Check the link is L2 */
 965        return readl_poll_timeout(pcie->base + PCIE_LTSSM_STATUS_REG, val,
 966                                  (PCIE_LTSSM_STATE(val) ==
 967                                   PCIE_LTSSM_STATE_L2_IDLE), 20,
 968                                   50 * USEC_PER_MSEC);
 969}
 970
 971static int __maybe_unused mtk_pcie_suspend_noirq(struct device *dev)
 972{
 973        struct mtk_gen3_pcie *pcie = dev_get_drvdata(dev);
 974        int err;
 975        u32 val;
 976
 977        /* Trigger link to L2 state */
 978        err = mtk_pcie_turn_off_link(pcie);
 979        if (err) {
 980                dev_err(pcie->dev, "cannot enter L2 state\n");
 981                return err;
 982        }
 983
 984        /* Pull down the PERST# pin */
 985        val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
 986        val |= PCIE_PE_RSTB;
 987        writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
 988
 989        dev_dbg(pcie->dev, "entered L2 states successfully");
 990
 991        mtk_pcie_irq_save(pcie);
 992        mtk_pcie_power_down(pcie);
 993
 994        return 0;
 995}
 996
 997static int __maybe_unused mtk_pcie_resume_noirq(struct device *dev)
 998{
 999        struct mtk_gen3_pcie *pcie = dev_get_drvdata(dev);
1000        int err;
1001
1002        err = mtk_pcie_power_up(pcie);
1003        if (err)
1004                return err;
1005
1006        err = mtk_pcie_startup_port(pcie);
1007        if (err) {
1008                mtk_pcie_power_down(pcie);
1009                return err;
1010        }
1011
1012        mtk_pcie_irq_restore(pcie);
1013
1014        return 0;
1015}
1016
1017static const struct dev_pm_ops mtk_pcie_pm_ops = {
1018        SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq,
1019                                      mtk_pcie_resume_noirq)
1020};
1021
1022static const struct of_device_id mtk_pcie_of_match[] = {
1023        { .compatible = "mediatek,mt8192-pcie" },
1024        {},
1025};
1026MODULE_DEVICE_TABLE(of, mtk_pcie_of_match);
1027
1028static struct platform_driver mtk_pcie_driver = {
1029        .probe = mtk_pcie_probe,
1030        .remove = mtk_pcie_remove,
1031        .driver = {
1032                .name = "mtk-pcie",
1033                .of_match_table = mtk_pcie_of_match,
1034                .pm = &mtk_pcie_pm_ops,
1035        },
1036};
1037
1038module_platform_driver(mtk_pcie_driver);
1039MODULE_LICENSE("GPL v2");
1040