linux/drivers/pci/host/pci-keystone-dw.c
<<
>>
Prefs
   1/*
   2 * Designware application register space functions for Keystone PCI controller
   3 *
   4 * Copyright (C) 2013-2014 Texas Instruments., Ltd.
   5 *              http://www.ti.com
   6 *
   7 * Author: Murali Karicheri <m-karicheri2@ti.com>
   8 *
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License version 2 as
  12 * published by the Free Software Foundation.
  13 */
  14
  15#include <linux/irq.h>
  16#include <linux/irqdomain.h>
  17#include <linux/irqreturn.h>
  18#include <linux/module.h>
  19#include <linux/of.h>
  20#include <linux/of_pci.h>
  21#include <linux/pci.h>
  22#include <linux/platform_device.h>
  23
  24#include "pcie-designware.h"
  25#include "pci-keystone.h"
  26
  27/* Application register defines */
  28#define LTSSM_EN_VAL                    1
  29#define LTSSM_STATE_MASK                0x1f
  30#define LTSSM_STATE_L0                  0x11
  31#define DBI_CS2_EN_VAL                  0x20
  32#define OB_XLAT_EN_VAL                  2
  33
  34/* Application registers */
  35#define CMD_STATUS                      0x004
  36#define CFG_SETUP                       0x008
  37#define OB_SIZE                         0x030
  38#define CFG_PCIM_WIN_SZ_IDX             3
  39#define CFG_PCIM_WIN_CNT                32
  40#define SPACE0_REMOTE_CFG_OFFSET        0x1000
  41#define OB_OFFSET_INDEX(n)              (0x200 + (8 * n))
  42#define OB_OFFSET_HI(n)                 (0x204 + (8 * n))
  43
  44/* IRQ register defines */
  45#define IRQ_EOI                         0x050
  46#define IRQ_STATUS                      0x184
  47#define IRQ_ENABLE_SET                  0x188
  48#define IRQ_ENABLE_CLR                  0x18c
  49
  50#define MSI_IRQ                         0x054
  51#define MSI0_IRQ_STATUS                 0x104
  52#define MSI0_IRQ_ENABLE_SET             0x108
  53#define MSI0_IRQ_ENABLE_CLR             0x10c
  54#define IRQ_STATUS                      0x184
  55#define MSI_IRQ_OFFSET                  4
  56
  57/* Error IRQ bits */
  58#define ERR_AER         BIT(5)  /* ECRC error */
  59#define ERR_AXI         BIT(4)  /* AXI tag lookup fatal error */
  60#define ERR_CORR        BIT(3)  /* Correctable error */
  61#define ERR_NONFATAL    BIT(2)  /* Non-fatal error */
  62#define ERR_FATAL       BIT(1)  /* Fatal error */
  63#define ERR_SYS         BIT(0)  /* System (fatal, non-fatal, or correctable) */
  64#define ERR_IRQ_ALL     (ERR_AER | ERR_AXI | ERR_CORR | \
  65                         ERR_NONFATAL | ERR_FATAL | ERR_SYS)
  66#define ERR_FATAL_IRQ   (ERR_FATAL | ERR_AXI)
  67#define ERR_IRQ_STATUS_RAW              0x1c0
  68#define ERR_IRQ_STATUS                  0x1c4
  69#define ERR_IRQ_ENABLE_SET              0x1c8
  70#define ERR_IRQ_ENABLE_CLR              0x1cc
  71
  72/* Config space registers */
  73#define DEBUG0                          0x728
  74
  75#define to_keystone_pcie(x)     container_of(x, struct keystone_pcie, pp)
  76
  77static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset,
  78                                             u32 *bit_pos)
  79{
  80        *reg_offset = offset % 8;
  81        *bit_pos = offset >> 3;
  82}
  83
  84phys_addr_t ks_dw_pcie_get_msi_addr(struct pcie_port *pp)
  85{
  86        struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
  87
  88        return ks_pcie->app.start + MSI_IRQ;
  89}
  90
  91void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset)
  92{
  93        struct pcie_port *pp = &ks_pcie->pp;
  94        u32 pending, vector;
  95        int src, virq;
  96
  97        pending = readl(ks_pcie->va_app_base + MSI0_IRQ_STATUS + (offset << 4));
  98
  99        /*
 100         * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit
 101         * shows 1, 9, 17, 25 and so forth
 102         */
 103        for (src = 0; src < 4; src++) {
 104                if (BIT(src) & pending) {
 105                        vector = offset + (src << 3);
 106                        virq = irq_linear_revmap(pp->irq_domain, vector);
 107                        dev_dbg(pp->dev, "irq: bit %d, vector %d, virq %d\n",
 108                                src, vector, virq);
 109                        generic_handle_irq(virq);
 110                }
 111        }
 112}
 113
 114static void ks_dw_pcie_msi_irq_ack(struct irq_data *d)
 115{
 116        u32 offset, reg_offset, bit_pos;
 117        struct keystone_pcie *ks_pcie;
 118        struct msi_desc *msi;
 119        struct pcie_port *pp;
 120
 121        msi = irq_data_get_msi_desc(d);
 122        pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
 123        ks_pcie = to_keystone_pcie(pp);
 124        offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
 125        update_reg_offset_bit_pos(offset, &reg_offset, &bit_pos);
 126
 127        writel(BIT(bit_pos),
 128               ks_pcie->va_app_base + MSI0_IRQ_STATUS + (reg_offset << 4));
 129        writel(reg_offset + MSI_IRQ_OFFSET, ks_pcie->va_app_base + IRQ_EOI);
 130}
 131
 132void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
 133{
 134        u32 reg_offset, bit_pos;
 135        struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
 136
 137        update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
 138        writel(BIT(bit_pos),
 139               ks_pcie->va_app_base + MSI0_IRQ_ENABLE_SET + (reg_offset << 4));
 140}
 141
 142void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
 143{
 144        u32 reg_offset, bit_pos;
 145        struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
 146
 147        update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
 148        writel(BIT(bit_pos),
 149               ks_pcie->va_app_base + MSI0_IRQ_ENABLE_CLR + (reg_offset << 4));
 150}
 151
 152static void ks_dw_pcie_msi_irq_mask(struct irq_data *d)
 153{
 154        struct keystone_pcie *ks_pcie;
 155        struct msi_desc *msi;
 156        struct pcie_port *pp;
 157        u32 offset;
 158
 159        msi = irq_data_get_msi_desc(d);
 160        pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
 161        ks_pcie = to_keystone_pcie(pp);
 162        offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
 163
 164        /* Mask the end point if PVM implemented */
 165        if (IS_ENABLED(CONFIG_PCI_MSI)) {
 166                if (msi->msi_attrib.maskbit)
 167                        pci_msi_mask_irq(d);
 168        }
 169
 170        ks_dw_pcie_msi_clear_irq(pp, offset);
 171}
 172
 173static void ks_dw_pcie_msi_irq_unmask(struct irq_data *d)
 174{
 175        struct keystone_pcie *ks_pcie;
 176        struct msi_desc *msi;
 177        struct pcie_port *pp;
 178        u32 offset;
 179
 180        msi = irq_data_get_msi_desc(d);
 181        pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
 182        ks_pcie = to_keystone_pcie(pp);
 183        offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
 184
 185        /* Mask the end point if PVM implemented */
 186        if (IS_ENABLED(CONFIG_PCI_MSI)) {
 187                if (msi->msi_attrib.maskbit)
 188                        pci_msi_unmask_irq(d);
 189        }
 190
 191        ks_dw_pcie_msi_set_irq(pp, offset);
 192}
 193
 194static struct irq_chip ks_dw_pcie_msi_irq_chip = {
 195        .name = "Keystone-PCIe-MSI-IRQ",
 196        .irq_ack = ks_dw_pcie_msi_irq_ack,
 197        .irq_mask = ks_dw_pcie_msi_irq_mask,
 198        .irq_unmask = ks_dw_pcie_msi_irq_unmask,
 199};
 200
 201static int ks_dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
 202                              irq_hw_number_t hwirq)
 203{
 204        irq_set_chip_and_handler(irq, &ks_dw_pcie_msi_irq_chip,
 205                                 handle_level_irq);
 206        irq_set_chip_data(irq, domain->host_data);
 207
 208        return 0;
 209}
 210
 211static const struct irq_domain_ops ks_dw_pcie_msi_domain_ops = {
 212        .map = ks_dw_pcie_msi_map,
 213};
 214
 215int ks_dw_pcie_msi_host_init(struct pcie_port *pp, struct msi_controller *chip)
 216{
 217        struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
 218        int i;
 219
 220        pp->irq_domain = irq_domain_add_linear(ks_pcie->msi_intc_np,
 221                                        MAX_MSI_IRQS,
 222                                        &ks_dw_pcie_msi_domain_ops,
 223                                        chip);
 224        if (!pp->irq_domain) {
 225                dev_err(pp->dev, "irq domain init failed\n");
 226                return -ENXIO;
 227        }
 228
 229        for (i = 0; i < MAX_MSI_IRQS; i++)
 230                irq_create_mapping(pp->irq_domain, i);
 231
 232        return 0;
 233}
 234
 235void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie)
 236{
 237        int i;
 238
 239        for (i = 0; i < MAX_LEGACY_IRQS; i++)
 240                writel(0x1, ks_pcie->va_app_base + IRQ_ENABLE_SET + (i << 4));
 241}
 242
 243void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset)
 244{
 245        struct pcie_port *pp = &ks_pcie->pp;
 246        u32 pending;
 247        int virq;
 248
 249        pending = readl(ks_pcie->va_app_base + IRQ_STATUS + (offset << 4));
 250
 251        if (BIT(0) & pending) {
 252                virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset);
 253                dev_dbg(pp->dev, ": irq: irq_offset %d, virq %d\n", offset,
 254                        virq);
 255                generic_handle_irq(virq);
 256        }
 257
 258        /* EOI the INTx interrupt */
 259        writel(offset, ks_pcie->va_app_base + IRQ_EOI);
 260}
 261
 262void ks_dw_pcie_enable_error_irq(void __iomem *reg_base)
 263{
 264        writel(ERR_IRQ_ALL, reg_base + ERR_IRQ_ENABLE_SET);
 265}
 266
 267irqreturn_t ks_dw_pcie_handle_error_irq(struct device *dev,
 268                                        void __iomem *reg_base)
 269{
 270        u32 status;
 271
 272        status = readl(reg_base + ERR_IRQ_STATUS_RAW) & ERR_IRQ_ALL;
 273        if (!status)
 274                return IRQ_NONE;
 275
 276        if (status & ERR_FATAL_IRQ)
 277                dev_err(dev, "fatal error (status %#010x)\n", status);
 278
 279        /* Ack the IRQ; status bits are RW1C */
 280        writel(status, reg_base + ERR_IRQ_STATUS);
 281        return IRQ_HANDLED;
 282}
 283
 284static void ks_dw_pcie_ack_legacy_irq(struct irq_data *d)
 285{
 286}
 287
 288static void ks_dw_pcie_mask_legacy_irq(struct irq_data *d)
 289{
 290}
 291
 292static void ks_dw_pcie_unmask_legacy_irq(struct irq_data *d)
 293{
 294}
 295
 296static struct irq_chip ks_dw_pcie_legacy_irq_chip = {
 297        .name = "Keystone-PCI-Legacy-IRQ",
 298        .irq_ack = ks_dw_pcie_ack_legacy_irq,
 299        .irq_mask = ks_dw_pcie_mask_legacy_irq,
 300        .irq_unmask = ks_dw_pcie_unmask_legacy_irq,
 301};
 302
 303static int ks_dw_pcie_init_legacy_irq_map(struct irq_domain *d,
 304                                unsigned int irq, irq_hw_number_t hw_irq)
 305{
 306        irq_set_chip_and_handler(irq, &ks_dw_pcie_legacy_irq_chip,
 307                                 handle_level_irq);
 308        irq_set_chip_data(irq, d->host_data);
 309
 310        return 0;
 311}
 312
 313static const struct irq_domain_ops ks_dw_pcie_legacy_irq_domain_ops = {
 314        .map = ks_dw_pcie_init_legacy_irq_map,
 315        .xlate = irq_domain_xlate_onetwocell,
 316};
 317
 318/**
 319 * ks_dw_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask
 320 * registers
 321 *
 322 * Since modification of dbi_cs2 involves different clock domain, read the
 323 * status back to ensure the transition is complete.
 324 */
 325static void ks_dw_pcie_set_dbi_mode(void __iomem *reg_virt)
 326{
 327        u32 val;
 328
 329        writel(DBI_CS2_EN_VAL | readl(reg_virt + CMD_STATUS),
 330               reg_virt + CMD_STATUS);
 331
 332        do {
 333                val = readl(reg_virt + CMD_STATUS);
 334        } while (!(val & DBI_CS2_EN_VAL));
 335}
 336
 337/**
 338 * ks_dw_pcie_clear_dbi_mode() - Disable DBI mode
 339 *
 340 * Since modification of dbi_cs2 involves different clock domain, read the
 341 * status back to ensure the transition is complete.
 342 */
 343static void ks_dw_pcie_clear_dbi_mode(void __iomem *reg_virt)
 344{
 345        u32 val;
 346
 347        writel(~DBI_CS2_EN_VAL & readl(reg_virt + CMD_STATUS),
 348                     reg_virt + CMD_STATUS);
 349
 350        do {
 351                val = readl(reg_virt + CMD_STATUS);
 352        } while (val & DBI_CS2_EN_VAL);
 353}
 354
 355void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
 356{
 357        struct pcie_port *pp = &ks_pcie->pp;
 358        u32 start = pp->mem->start, end = pp->mem->end;
 359        int i, tr_size;
 360
 361        /* Disable BARs for inbound access */
 362        ks_dw_pcie_set_dbi_mode(ks_pcie->va_app_base);
 363        writel(0, pp->dbi_base + PCI_BASE_ADDRESS_0);
 364        writel(0, pp->dbi_base + PCI_BASE_ADDRESS_1);
 365        ks_dw_pcie_clear_dbi_mode(ks_pcie->va_app_base);
 366
 367        /* Set outbound translation size per window division */
 368        writel(CFG_PCIM_WIN_SZ_IDX & 0x7, ks_pcie->va_app_base + OB_SIZE);
 369
 370        tr_size = (1 << (CFG_PCIM_WIN_SZ_IDX & 0x7)) * SZ_1M;
 371
 372        /* Using Direct 1:1 mapping of RC <-> PCI memory space */
 373        for (i = 0; (i < CFG_PCIM_WIN_CNT) && (start < end); i++) {
 374                writel(start | 1, ks_pcie->va_app_base + OB_OFFSET_INDEX(i));
 375                writel(0, ks_pcie->va_app_base + OB_OFFSET_HI(i));
 376                start += tr_size;
 377        }
 378
 379        /* Enable OB translation */
 380        writel(OB_XLAT_EN_VAL | readl(ks_pcie->va_app_base + CMD_STATUS),
 381               ks_pcie->va_app_base + CMD_STATUS);
 382}
 383
 384/**
 385 * ks_pcie_cfg_setup() - Set up configuration space address for a device
 386 *
 387 * @ks_pcie: ptr to keystone_pcie structure
 388 * @bus: Bus number the device is residing on
 389 * @devfn: device, function number info
 390 *
 391 * Forms and returns the address of configuration space mapped in PCIESS
 392 * address space 0.  Also configures CFG_SETUP for remote configuration space
 393 * access.
 394 *
 395 * The address space has two regions to access configuration - local and remote.
 396 * We access local region for bus 0 (as RC is attached on bus 0) and remote
 397 * region for others with TYPE 1 access when bus > 1.  As for device on bus = 1,
 398 * we will do TYPE 0 access as it will be on our secondary bus (logical).
 399 * CFG_SETUP is needed only for remote configuration access.
 400 */
 401static void __iomem *ks_pcie_cfg_setup(struct keystone_pcie *ks_pcie, u8 bus,
 402                                       unsigned int devfn)
 403{
 404        u8 device = PCI_SLOT(devfn), function = PCI_FUNC(devfn);
 405        struct pcie_port *pp = &ks_pcie->pp;
 406        u32 regval;
 407
 408        if (bus == 0)
 409                return pp->dbi_base;
 410
 411        regval = (bus << 16) | (device << 8) | function;
 412
 413        /*
 414         * Since Bus#1 will be a virtual bus, we need to have TYPE0
 415         * access only.
 416         * TYPE 1
 417         */
 418        if (bus != 1)
 419                regval |= BIT(24);
 420
 421        writel(regval, ks_pcie->va_app_base + CFG_SETUP);
 422        return pp->va_cfg0_base;
 423}
 424
 425int ks_dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
 426                             unsigned int devfn, int where, int size, u32 *val)
 427{
 428        struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
 429        u8 bus_num = bus->number;
 430        void __iomem *addr;
 431
 432        addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn);
 433
 434        return dw_pcie_cfg_read(addr + where, size, val);
 435}
 436
 437int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
 438                             unsigned int devfn, int where, int size, u32 val)
 439{
 440        struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
 441        u8 bus_num = bus->number;
 442        void __iomem *addr;
 443
 444        addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn);
 445
 446        return dw_pcie_cfg_write(addr + where, size, val);
 447}
 448
 449/**
 450 * ks_dw_pcie_v3_65_scan_bus() - keystone scan_bus post initialization
 451 *
 452 * This sets BAR0 to enable inbound access for MSI_IRQ register
 453 */
 454void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp)
 455{
 456        struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
 457
 458        /* Configure and set up BAR0 */
 459        ks_dw_pcie_set_dbi_mode(ks_pcie->va_app_base);
 460
 461        /* Enable BAR0 */
 462        writel(1, pp->dbi_base + PCI_BASE_ADDRESS_0);
 463        writel(SZ_4K - 1, pp->dbi_base + PCI_BASE_ADDRESS_0);
 464
 465        ks_dw_pcie_clear_dbi_mode(ks_pcie->va_app_base);
 466
 467         /*
 468          * For BAR0, just setting bus address for inbound writes (MSI) should
 469          * be sufficient.  Use physical address to avoid any conflicts.
 470          */
 471        writel(ks_pcie->app.start, pp->dbi_base + PCI_BASE_ADDRESS_0);
 472}
 473
 474/**
 475 * ks_dw_pcie_link_up() - Check if link up
 476 */
 477int ks_dw_pcie_link_up(struct pcie_port *pp)
 478{
 479        u32 val = readl(pp->dbi_base + DEBUG0);
 480
 481        return (val & LTSSM_STATE_MASK) == LTSSM_STATE_L0;
 482}
 483
 484void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie)
 485{
 486        u32 val;
 487
 488        /* Disable Link training */
 489        val = readl(ks_pcie->va_app_base + CMD_STATUS);
 490        val &= ~LTSSM_EN_VAL;
 491        writel(LTSSM_EN_VAL | val,  ks_pcie->va_app_base + CMD_STATUS);
 492
 493        /* Initiate Link Training */
 494        val = readl(ks_pcie->va_app_base + CMD_STATUS);
 495        writel(LTSSM_EN_VAL | val,  ks_pcie->va_app_base + CMD_STATUS);
 496}
 497
 498/**
 499 * ks_dw_pcie_host_init() - initialize host for v3_65 dw hardware
 500 *
 501 * Ioremap the register resources, initialize legacy irq domain
 502 * and call dw_pcie_v3_65_host_init() API to initialize the Keystone
 503 * PCI host controller.
 504 */
 505int __init ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie,
 506                                struct device_node *msi_intc_np)
 507{
 508        struct pcie_port *pp = &ks_pcie->pp;
 509        struct platform_device *pdev = to_platform_device(pp->dev);
 510        struct resource *res;
 511
 512        /* Index 0 is the config reg. space address */
 513        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 514        pp->dbi_base = devm_ioremap_resource(pp->dev, res);
 515        if (IS_ERR(pp->dbi_base))
 516                return PTR_ERR(pp->dbi_base);
 517
 518        /*
 519         * We set these same and is used in pcie rd/wr_other_conf
 520         * functions
 521         */
 522        pp->va_cfg0_base = pp->dbi_base + SPACE0_REMOTE_CFG_OFFSET;
 523        pp->va_cfg1_base = pp->va_cfg0_base;
 524
 525        /* Index 1 is the application reg. space address */
 526        res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
 527        ks_pcie->va_app_base = devm_ioremap_resource(pp->dev, res);
 528        if (IS_ERR(ks_pcie->va_app_base))
 529                return PTR_ERR(ks_pcie->va_app_base);
 530
 531        ks_pcie->app = *res;
 532
 533        /* Create legacy IRQ domain */
 534        ks_pcie->legacy_irq_domain =
 535                        irq_domain_add_linear(ks_pcie->legacy_intc_np,
 536                                        MAX_LEGACY_IRQS,
 537                                        &ks_dw_pcie_legacy_irq_domain_ops,
 538                                        NULL);
 539        if (!ks_pcie->legacy_irq_domain) {
 540                dev_err(pp->dev, "Failed to add irq domain for legacy irqs\n");
 541                return -EINVAL;
 542        }
 543
 544        return dw_pcie_host_init(pp);
 545}
 546