linux/drivers/pci/host/pci-keystone-dw.c
<<
>>
Prefs
   1/*
   2 * Designware application register space functions for Keystone PCI controller
   3 *
   4 * Copyright (C) 2013-2014 Texas Instruments., Ltd.
   5 *              http://www.ti.com
   6 *
   7 * Author: Murali Karicheri <m-karicheri2@ti.com>
   8 *
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License version 2 as
  12 * published by the Free Software Foundation.
  13 */
  14
  15#include <linux/irq.h>
  16#include <linux/irqdomain.h>
  17#include <linux/module.h>
  18#include <linux/of.h>
  19#include <linux/of_pci.h>
  20#include <linux/pci.h>
  21#include <linux/platform_device.h>
  22
  23#include "pcie-designware.h"
  24#include "pci-keystone.h"
  25
  26/* Application register defines */
  27#define LTSSM_EN_VAL                    1
  28#define LTSSM_STATE_MASK                0x1f
  29#define LTSSM_STATE_L0                  0x11
  30#define DBI_CS2_EN_VAL                  0x20
  31#define OB_XLAT_EN_VAL                  2
  32
  33/* Application registers */
  34#define CMD_STATUS                      0x004
  35#define CFG_SETUP                       0x008
  36#define OB_SIZE                         0x030
  37#define CFG_PCIM_WIN_SZ_IDX             3
  38#define CFG_PCIM_WIN_CNT                32
  39#define SPACE0_REMOTE_CFG_OFFSET        0x1000
  40#define OB_OFFSET_INDEX(n)              (0x200 + (8 * n))
  41#define OB_OFFSET_HI(n)                 (0x204 + (8 * n))
  42
  43/* IRQ register defines */
  44#define IRQ_EOI                         0x050
  45#define IRQ_STATUS                      0x184
  46#define IRQ_ENABLE_SET                  0x188
  47#define IRQ_ENABLE_CLR                  0x18c
  48
  49#define MSI_IRQ                         0x054
  50#define MSI0_IRQ_STATUS                 0x104
  51#define MSI0_IRQ_ENABLE_SET             0x108
  52#define MSI0_IRQ_ENABLE_CLR             0x10c
  53#define IRQ_STATUS                      0x184
  54#define MSI_IRQ_OFFSET                  4
  55
  56/* Config space registers */
  57#define DEBUG0                          0x728
  58
  59#define to_keystone_pcie(x)     container_of(x, struct keystone_pcie, pp)
  60
  61static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset,
  62                                             u32 *bit_pos)
  63{
  64        *reg_offset = offset % 8;
  65        *bit_pos = offset >> 3;
  66}
  67
  68phys_addr_t ks_dw_pcie_get_msi_addr(struct pcie_port *pp)
  69{
  70        struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
  71
  72        return ks_pcie->app.start + MSI_IRQ;
  73}
  74
  75void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset)
  76{
  77        struct pcie_port *pp = &ks_pcie->pp;
  78        u32 pending, vector;
  79        int src, virq;
  80
  81        pending = readl(ks_pcie->va_app_base + MSI0_IRQ_STATUS + (offset << 4));
  82
  83        /*
  84         * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit
  85         * shows 1, 9, 17, 25 and so forth
  86         */
  87        for (src = 0; src < 4; src++) {
  88                if (BIT(src) & pending) {
  89                        vector = offset + (src << 3);
  90                        virq = irq_linear_revmap(pp->irq_domain, vector);
  91                        dev_dbg(pp->dev, "irq: bit %d, vector %d, virq %d\n",
  92                                src, vector, virq);
  93                        generic_handle_irq(virq);
  94                }
  95        }
  96}
  97
  98static void ks_dw_pcie_msi_irq_ack(struct irq_data *d)
  99{
 100        u32 offset, reg_offset, bit_pos;
 101        struct keystone_pcie *ks_pcie;
 102        struct msi_desc *msi;
 103        struct pcie_port *pp;
 104
 105        msi = irq_data_get_msi_desc(d);
 106        pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
 107        ks_pcie = to_keystone_pcie(pp);
 108        offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
 109        update_reg_offset_bit_pos(offset, &reg_offset, &bit_pos);
 110
 111        writel(BIT(bit_pos),
 112               ks_pcie->va_app_base + MSI0_IRQ_STATUS + (reg_offset << 4));
 113        writel(reg_offset + MSI_IRQ_OFFSET, ks_pcie->va_app_base + IRQ_EOI);
 114}
 115
 116void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
 117{
 118        u32 reg_offset, bit_pos;
 119        struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
 120
 121        update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
 122        writel(BIT(bit_pos),
 123               ks_pcie->va_app_base + MSI0_IRQ_ENABLE_SET + (reg_offset << 4));
 124}
 125
 126void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
 127{
 128        u32 reg_offset, bit_pos;
 129        struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
 130
 131        update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
 132        writel(BIT(bit_pos),
 133               ks_pcie->va_app_base + MSI0_IRQ_ENABLE_CLR + (reg_offset << 4));
 134}
 135
 136static void ks_dw_pcie_msi_irq_mask(struct irq_data *d)
 137{
 138        struct keystone_pcie *ks_pcie;
 139        struct msi_desc *msi;
 140        struct pcie_port *pp;
 141        u32 offset;
 142
 143        msi = irq_data_get_msi_desc(d);
 144        pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
 145        ks_pcie = to_keystone_pcie(pp);
 146        offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
 147
 148        /* Mask the end point if PVM implemented */
 149        if (IS_ENABLED(CONFIG_PCI_MSI)) {
 150                if (msi->msi_attrib.maskbit)
 151                        pci_msi_mask_irq(d);
 152        }
 153
 154        ks_dw_pcie_msi_clear_irq(pp, offset);
 155}
 156
 157static void ks_dw_pcie_msi_irq_unmask(struct irq_data *d)
 158{
 159        struct keystone_pcie *ks_pcie;
 160        struct msi_desc *msi;
 161        struct pcie_port *pp;
 162        u32 offset;
 163
 164        msi = irq_data_get_msi_desc(d);
 165        pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
 166        ks_pcie = to_keystone_pcie(pp);
 167        offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
 168
 169        /* Mask the end point if PVM implemented */
 170        if (IS_ENABLED(CONFIG_PCI_MSI)) {
 171                if (msi->msi_attrib.maskbit)
 172                        pci_msi_unmask_irq(d);
 173        }
 174
 175        ks_dw_pcie_msi_set_irq(pp, offset);
 176}
 177
 178static struct irq_chip ks_dw_pcie_msi_irq_chip = {
 179        .name = "Keystone-PCIe-MSI-IRQ",
 180        .irq_ack = ks_dw_pcie_msi_irq_ack,
 181        .irq_mask = ks_dw_pcie_msi_irq_mask,
 182        .irq_unmask = ks_dw_pcie_msi_irq_unmask,
 183};
 184
 185static int ks_dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
 186                              irq_hw_number_t hwirq)
 187{
 188        irq_set_chip_and_handler(irq, &ks_dw_pcie_msi_irq_chip,
 189                                 handle_level_irq);
 190        irq_set_chip_data(irq, domain->host_data);
 191
 192        return 0;
 193}
 194
 195static const struct irq_domain_ops ks_dw_pcie_msi_domain_ops = {
 196        .map = ks_dw_pcie_msi_map,
 197};
 198
 199int ks_dw_pcie_msi_host_init(struct pcie_port *pp, struct msi_controller *chip)
 200{
 201        struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
 202        int i;
 203
 204        pp->irq_domain = irq_domain_add_linear(ks_pcie->msi_intc_np,
 205                                        MAX_MSI_IRQS,
 206                                        &ks_dw_pcie_msi_domain_ops,
 207                                        chip);
 208        if (!pp->irq_domain) {
 209                dev_err(pp->dev, "irq domain init failed\n");
 210                return -ENXIO;
 211        }
 212
 213        for (i = 0; i < MAX_MSI_IRQS; i++)
 214                irq_create_mapping(pp->irq_domain, i);
 215
 216        return 0;
 217}
 218
 219void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie)
 220{
 221        int i;
 222
 223        for (i = 0; i < MAX_LEGACY_IRQS; i++)
 224                writel(0x1, ks_pcie->va_app_base + IRQ_ENABLE_SET + (i << 4));
 225}
 226
 227void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset)
 228{
 229        struct pcie_port *pp = &ks_pcie->pp;
 230        u32 pending;
 231        int virq;
 232
 233        pending = readl(ks_pcie->va_app_base + IRQ_STATUS + (offset << 4));
 234
 235        if (BIT(0) & pending) {
 236                virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset);
 237                dev_dbg(pp->dev, ": irq: irq_offset %d, virq %d\n", offset,
 238                        virq);
 239                generic_handle_irq(virq);
 240        }
 241
 242        /* EOI the INTx interrupt */
 243        writel(offset, ks_pcie->va_app_base + IRQ_EOI);
 244}
 245
 246static void ks_dw_pcie_ack_legacy_irq(struct irq_data *d)
 247{
 248}
 249
 250static void ks_dw_pcie_mask_legacy_irq(struct irq_data *d)
 251{
 252}
 253
 254static void ks_dw_pcie_unmask_legacy_irq(struct irq_data *d)
 255{
 256}
 257
 258static struct irq_chip ks_dw_pcie_legacy_irq_chip = {
 259        .name = "Keystone-PCI-Legacy-IRQ",
 260        .irq_ack = ks_dw_pcie_ack_legacy_irq,
 261        .irq_mask = ks_dw_pcie_mask_legacy_irq,
 262        .irq_unmask = ks_dw_pcie_unmask_legacy_irq,
 263};
 264
 265static int ks_dw_pcie_init_legacy_irq_map(struct irq_domain *d,
 266                                unsigned int irq, irq_hw_number_t hw_irq)
 267{
 268        irq_set_chip_and_handler(irq, &ks_dw_pcie_legacy_irq_chip,
 269                                 handle_level_irq);
 270        irq_set_chip_data(irq, d->host_data);
 271
 272        return 0;
 273}
 274
 275static const struct irq_domain_ops ks_dw_pcie_legacy_irq_domain_ops = {
 276        .map = ks_dw_pcie_init_legacy_irq_map,
 277        .xlate = irq_domain_xlate_onetwocell,
 278};
 279
 280/**
 281 * ks_dw_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask
 282 * registers
 283 *
 284 * Since modification of dbi_cs2 involves different clock domain, read the
 285 * status back to ensure the transition is complete.
 286 */
 287static void ks_dw_pcie_set_dbi_mode(void __iomem *reg_virt)
 288{
 289        u32 val;
 290
 291        writel(DBI_CS2_EN_VAL | readl(reg_virt + CMD_STATUS),
 292               reg_virt + CMD_STATUS);
 293
 294        do {
 295                val = readl(reg_virt + CMD_STATUS);
 296        } while (!(val & DBI_CS2_EN_VAL));
 297}
 298
 299/**
 300 * ks_dw_pcie_clear_dbi_mode() - Disable DBI mode
 301 *
 302 * Since modification of dbi_cs2 involves different clock domain, read the
 303 * status back to ensure the transition is complete.
 304 */
 305static void ks_dw_pcie_clear_dbi_mode(void __iomem *reg_virt)
 306{
 307        u32 val;
 308
 309        writel(~DBI_CS2_EN_VAL & readl(reg_virt + CMD_STATUS),
 310                     reg_virt + CMD_STATUS);
 311
 312        do {
 313                val = readl(reg_virt + CMD_STATUS);
 314        } while (val & DBI_CS2_EN_VAL);
 315}
 316
 317void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
 318{
 319        struct pcie_port *pp = &ks_pcie->pp;
 320        u32 start = pp->mem->start, end = pp->mem->end;
 321        int i, tr_size;
 322
 323        /* Disable BARs for inbound access */
 324        ks_dw_pcie_set_dbi_mode(ks_pcie->va_app_base);
 325        writel(0, pp->dbi_base + PCI_BASE_ADDRESS_0);
 326        writel(0, pp->dbi_base + PCI_BASE_ADDRESS_1);
 327        ks_dw_pcie_clear_dbi_mode(ks_pcie->va_app_base);
 328
 329        /* Set outbound translation size per window division */
 330        writel(CFG_PCIM_WIN_SZ_IDX & 0x7, ks_pcie->va_app_base + OB_SIZE);
 331
 332        tr_size = (1 << (CFG_PCIM_WIN_SZ_IDX & 0x7)) * SZ_1M;
 333
 334        /* Using Direct 1:1 mapping of RC <-> PCI memory space */
 335        for (i = 0; (i < CFG_PCIM_WIN_CNT) && (start < end); i++) {
 336                writel(start | 1, ks_pcie->va_app_base + OB_OFFSET_INDEX(i));
 337                writel(0, ks_pcie->va_app_base + OB_OFFSET_HI(i));
 338                start += tr_size;
 339        }
 340
 341        /* Enable OB translation */
 342        writel(OB_XLAT_EN_VAL | readl(ks_pcie->va_app_base + CMD_STATUS),
 343               ks_pcie->va_app_base + CMD_STATUS);
 344}
 345
 346/**
 347 * ks_pcie_cfg_setup() - Set up configuration space address for a device
 348 *
 349 * @ks_pcie: ptr to keystone_pcie structure
 350 * @bus: Bus number the device is residing on
 351 * @devfn: device, function number info
 352 *
 353 * Forms and returns the address of configuration space mapped in PCIESS
 354 * address space 0.  Also configures CFG_SETUP for remote configuration space
 355 * access.
 356 *
 357 * The address space has two regions to access configuration - local and remote.
 358 * We access local region for bus 0 (as RC is attached on bus 0) and remote
 359 * region for others with TYPE 1 access when bus > 1.  As for device on bus = 1,
 360 * we will do TYPE 0 access as it will be on our secondary bus (logical).
 361 * CFG_SETUP is needed only for remote configuration access.
 362 */
 363static void __iomem *ks_pcie_cfg_setup(struct keystone_pcie *ks_pcie, u8 bus,
 364                                       unsigned int devfn)
 365{
 366        u8 device = PCI_SLOT(devfn), function = PCI_FUNC(devfn);
 367        struct pcie_port *pp = &ks_pcie->pp;
 368        u32 regval;
 369
 370        if (bus == 0)
 371                return pp->dbi_base;
 372
 373        regval = (bus << 16) | (device << 8) | function;
 374
 375        /*
 376         * Since Bus#1 will be a virtual bus, we need to have TYPE0
 377         * access only.
 378         * TYPE 1
 379         */
 380        if (bus != 1)
 381                regval |= BIT(24);
 382
 383        writel(regval, ks_pcie->va_app_base + CFG_SETUP);
 384        return pp->va_cfg0_base;
 385}
 386
 387int ks_dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
 388                             unsigned int devfn, int where, int size, u32 *val)
 389{
 390        struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
 391        u8 bus_num = bus->number;
 392        void __iomem *addr;
 393
 394        addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn);
 395
 396        return dw_pcie_cfg_read(addr + where, size, val);
 397}
 398
 399int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
 400                             unsigned int devfn, int where, int size, u32 val)
 401{
 402        struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
 403        u8 bus_num = bus->number;
 404        void __iomem *addr;
 405
 406        addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn);
 407
 408        return dw_pcie_cfg_write(addr + where, size, val);
 409}
 410
 411/**
 412 * ks_dw_pcie_v3_65_scan_bus() - keystone scan_bus post initialization
 413 *
 414 * This sets BAR0 to enable inbound access for MSI_IRQ register
 415 */
 416void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp)
 417{
 418        struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
 419
 420        /* Configure and set up BAR0 */
 421        ks_dw_pcie_set_dbi_mode(ks_pcie->va_app_base);
 422
 423        /* Enable BAR0 */
 424        writel(1, pp->dbi_base + PCI_BASE_ADDRESS_0);
 425        writel(SZ_4K - 1, pp->dbi_base + PCI_BASE_ADDRESS_0);
 426
 427        ks_dw_pcie_clear_dbi_mode(ks_pcie->va_app_base);
 428
 429         /*
 430          * For BAR0, just setting bus address for inbound writes (MSI) should
 431          * be sufficient.  Use physical address to avoid any conflicts.
 432          */
 433        writel(ks_pcie->app.start, pp->dbi_base + PCI_BASE_ADDRESS_0);
 434}
 435
 436/**
 437 * ks_dw_pcie_link_up() - Check if link up
 438 */
 439int ks_dw_pcie_link_up(struct pcie_port *pp)
 440{
 441        u32 val = readl(pp->dbi_base + DEBUG0);
 442
 443        return (val & LTSSM_STATE_MASK) == LTSSM_STATE_L0;
 444}
 445
 446void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie)
 447{
 448        u32 val;
 449
 450        /* Disable Link training */
 451        val = readl(ks_pcie->va_app_base + CMD_STATUS);
 452        val &= ~LTSSM_EN_VAL;
 453        writel(LTSSM_EN_VAL | val,  ks_pcie->va_app_base + CMD_STATUS);
 454
 455        /* Initiate Link Training */
 456        val = readl(ks_pcie->va_app_base + CMD_STATUS);
 457        writel(LTSSM_EN_VAL | val,  ks_pcie->va_app_base + CMD_STATUS);
 458}
 459
 460/**
 461 * ks_dw_pcie_host_init() - initialize host for v3_65 dw hardware
 462 *
 463 * Ioremap the register resources, initialize legacy irq domain
 464 * and call dw_pcie_v3_65_host_init() API to initialize the Keystone
 465 * PCI host controller.
 466 */
 467int __init ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie,
 468                                struct device_node *msi_intc_np)
 469{
 470        struct pcie_port *pp = &ks_pcie->pp;
 471        struct platform_device *pdev = to_platform_device(pp->dev);
 472        struct resource *res;
 473
 474        /* Index 0 is the config reg. space address */
 475        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 476        pp->dbi_base = devm_ioremap_resource(pp->dev, res);
 477        if (IS_ERR(pp->dbi_base))
 478                return PTR_ERR(pp->dbi_base);
 479
 480        /*
 481         * We set these same and is used in pcie rd/wr_other_conf
 482         * functions
 483         */
 484        pp->va_cfg0_base = pp->dbi_base + SPACE0_REMOTE_CFG_OFFSET;
 485        pp->va_cfg1_base = pp->va_cfg0_base;
 486
 487        /* Index 1 is the application reg. space address */
 488        res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
 489        ks_pcie->va_app_base = devm_ioremap_resource(pp->dev, res);
 490        if (IS_ERR(ks_pcie->va_app_base))
 491                return PTR_ERR(ks_pcie->va_app_base);
 492
 493        ks_pcie->app = *res;
 494
 495        /* Create legacy IRQ domain */
 496        ks_pcie->legacy_irq_domain =
 497                        irq_domain_add_linear(ks_pcie->legacy_intc_np,
 498                                        MAX_LEGACY_IRQS,
 499                                        &ks_dw_pcie_legacy_irq_domain_ops,
 500                                        NULL);
 501        if (!ks_pcie->legacy_irq_domain) {
 502                dev_err(pp->dev, "Failed to add irq domain for legacy irqs\n");
 503                return -EINVAL;
 504        }
 505
 506        return dw_pcie_host_init(pp);
 507}
 508