linux/drivers/pci/dwc/pcie-designware-host.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Synopsys DesignWare PCIe host controller driver
   4 *
   5 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
   6 *              http://www.samsung.com
   7 *
   8 * Author: Jingoo Han <jg1.han@samsung.com>
   9 */
  10
  11#include <linux/irqchip/chained_irq.h>
  12#include <linux/irqdomain.h>
  13#include <linux/of_address.h>
  14#include <linux/of_pci.h>
  15#include <linux/pci_regs.h>
  16#include <linux/platform_device.h>
  17
  18#include "pcie-designware.h"
  19
  20static struct pci_ops dw_pcie_ops;
  21
  22static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
  23                               u32 *val)
  24{
  25        struct dw_pcie *pci;
  26
  27        if (pp->ops->rd_own_conf)
  28                return pp->ops->rd_own_conf(pp, where, size, val);
  29
  30        pci = to_dw_pcie_from_pp(pp);
  31        return dw_pcie_read(pci->dbi_base + where, size, val);
  32}
  33
  34static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
  35                               u32 val)
  36{
  37        struct dw_pcie *pci;
  38
  39        if (pp->ops->wr_own_conf)
  40                return pp->ops->wr_own_conf(pp, where, size, val);
  41
  42        pci = to_dw_pcie_from_pp(pp);
  43        return dw_pcie_write(pci->dbi_base + where, size, val);
  44}
  45
  46static void dw_msi_ack_irq(struct irq_data *d)
  47{
  48        irq_chip_ack_parent(d);
  49}
  50
  51static void dw_msi_mask_irq(struct irq_data *d)
  52{
  53        pci_msi_mask_irq(d);
  54        irq_chip_mask_parent(d);
  55}
  56
  57static void dw_msi_unmask_irq(struct irq_data *d)
  58{
  59        pci_msi_unmask_irq(d);
  60        irq_chip_unmask_parent(d);
  61}
  62
  63static struct irq_chip dw_pcie_msi_irq_chip = {
  64        .name = "PCI-MSI",
  65        .irq_ack = dw_msi_ack_irq,
  66        .irq_mask = dw_msi_mask_irq,
  67        .irq_unmask = dw_msi_unmask_irq,
  68};
  69
  70static struct msi_domain_info dw_pcie_msi_domain_info = {
  71        .flags  = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
  72                   MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
  73        .chip   = &dw_pcie_msi_irq_chip,
  74};
  75
  76/* MSI int handler */
  77irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
  78{
  79        int i, pos, irq;
  80        u32 val, num_ctrls;
  81        irqreturn_t ret = IRQ_NONE;
  82
  83        num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
  84
  85        for (i = 0; i < num_ctrls; i++) {
  86                dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4,
  87                                    &val);
  88                if (!val)
  89                        continue;
  90
  91                ret = IRQ_HANDLED;
  92                pos = 0;
  93                while ((pos = find_next_bit((unsigned long *) &val, 32,
  94                                            pos)) != 32) {
  95                        irq = irq_find_mapping(pp->irq_domain, i * 32 + pos);
  96                        generic_handle_irq(irq);
  97                        dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12,
  98                                            4, 1 << pos);
  99                        pos++;
 100                }
 101        }
 102
 103        return ret;
 104}
 105
 106/* Chained MSI interrupt service routine */
 107static void dw_chained_msi_isr(struct irq_desc *desc)
 108{
 109        struct irq_chip *chip = irq_desc_get_chip(desc);
 110        struct pcie_port *pp;
 111
 112        chained_irq_enter(chip, desc);
 113
 114        pp = irq_desc_get_handler_data(desc);
 115        dw_handle_msi_irq(pp);
 116
 117        chained_irq_exit(chip, desc);
 118}
 119
 120static void dw_pci_setup_msi_msg(struct irq_data *data, struct msi_msg *msg)
 121{
 122        struct pcie_port *pp = irq_data_get_irq_chip_data(data);
 123        struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
 124        u64 msi_target;
 125
 126        if (pp->ops->get_msi_addr)
 127                msi_target = pp->ops->get_msi_addr(pp);
 128        else
 129                msi_target = (u64)pp->msi_data;
 130
 131        msg->address_lo = lower_32_bits(msi_target);
 132        msg->address_hi = upper_32_bits(msi_target);
 133
 134        if (pp->ops->get_msi_data)
 135                msg->data = pp->ops->get_msi_data(pp, data->hwirq);
 136        else
 137                msg->data = data->hwirq;
 138
 139        dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
 140                (int)data->hwirq, msg->address_hi, msg->address_lo);
 141}
 142
 143static int dw_pci_msi_set_affinity(struct irq_data *irq_data,
 144                                   const struct cpumask *mask, bool force)
 145{
 146        return -EINVAL;
 147}
 148
 149static void dw_pci_bottom_mask(struct irq_data *data)
 150{
 151        struct pcie_port *pp = irq_data_get_irq_chip_data(data);
 152        unsigned int res, bit, ctrl;
 153        unsigned long flags;
 154
 155        raw_spin_lock_irqsave(&pp->lock, flags);
 156
 157        if (pp->ops->msi_clear_irq) {
 158                pp->ops->msi_clear_irq(pp, data->hwirq);
 159        } else {
 160                ctrl = data->hwirq / 32;
 161                res = ctrl * 12;
 162                bit = data->hwirq % 32;
 163
 164                pp->irq_status[ctrl] &= ~(1 << bit);
 165                dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4,
 166                                    pp->irq_status[ctrl]);
 167        }
 168
 169        raw_spin_unlock_irqrestore(&pp->lock, flags);
 170}
 171
 172static void dw_pci_bottom_unmask(struct irq_data *data)
 173{
 174        struct pcie_port *pp = irq_data_get_irq_chip_data(data);
 175        unsigned int res, bit, ctrl;
 176        unsigned long flags;
 177
 178        raw_spin_lock_irqsave(&pp->lock, flags);
 179
 180        if (pp->ops->msi_set_irq) {
 181                pp->ops->msi_set_irq(pp, data->hwirq);
 182        } else {
 183                ctrl = data->hwirq / 32;
 184                res = ctrl * 12;
 185                bit = data->hwirq % 32;
 186
 187                pp->irq_status[ctrl] |= 1 << bit;
 188                dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4,
 189                                    pp->irq_status[ctrl]);
 190        }
 191
 192        raw_spin_unlock_irqrestore(&pp->lock, flags);
 193}
 194
 195static void dw_pci_bottom_ack(struct irq_data *d)
 196{
 197        struct msi_desc *msi = irq_data_get_msi_desc(d);
 198        struct pcie_port *pp;
 199
 200        pp = msi_desc_to_pci_sysdata(msi);
 201
 202        if (pp->ops->msi_irq_ack)
 203                pp->ops->msi_irq_ack(d->hwirq, pp);
 204}
 205
 206static struct irq_chip dw_pci_msi_bottom_irq_chip = {
 207        .name = "DWPCI-MSI",
 208        .irq_ack = dw_pci_bottom_ack,
 209        .irq_compose_msi_msg = dw_pci_setup_msi_msg,
 210        .irq_set_affinity = dw_pci_msi_set_affinity,
 211        .irq_mask = dw_pci_bottom_mask,
 212        .irq_unmask = dw_pci_bottom_unmask,
 213};
 214
 215static int dw_pcie_irq_domain_alloc(struct irq_domain *domain,
 216                                    unsigned int virq, unsigned int nr_irqs,
 217                                    void *args)
 218{
 219        struct pcie_port *pp = domain->host_data;
 220        unsigned long flags;
 221        u32 i;
 222        int bit;
 223
 224        raw_spin_lock_irqsave(&pp->lock, flags);
 225
 226        bit = bitmap_find_free_region(pp->msi_irq_in_use, pp->num_vectors,
 227                                      order_base_2(nr_irqs));
 228
 229        raw_spin_unlock_irqrestore(&pp->lock, flags);
 230
 231        if (bit < 0)
 232                return -ENOSPC;
 233
 234        for (i = 0; i < nr_irqs; i++)
 235                irq_domain_set_info(domain, virq + i, bit + i,
 236                                    &dw_pci_msi_bottom_irq_chip,
 237                                    pp, handle_edge_irq,
 238                                    NULL, NULL);
 239
 240        return 0;
 241}
 242
 243static void dw_pcie_irq_domain_free(struct irq_domain *domain,
 244                                    unsigned int virq, unsigned int nr_irqs)
 245{
 246        struct irq_data *data = irq_domain_get_irq_data(domain, virq);
 247        struct pcie_port *pp = irq_data_get_irq_chip_data(data);
 248        unsigned long flags;
 249
 250        raw_spin_lock_irqsave(&pp->lock, flags);
 251        bitmap_release_region(pp->msi_irq_in_use, data->hwirq,
 252                              order_base_2(nr_irqs));
 253        raw_spin_unlock_irqrestore(&pp->lock, flags);
 254}
 255
 256static const struct irq_domain_ops dw_pcie_msi_domain_ops = {
 257        .alloc  = dw_pcie_irq_domain_alloc,
 258        .free   = dw_pcie_irq_domain_free,
 259};
 260
 261int dw_pcie_allocate_domains(struct pcie_port *pp)
 262{
 263        struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
 264        struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node);
 265
 266        pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors,
 267                                               &dw_pcie_msi_domain_ops, pp);
 268        if (!pp->irq_domain) {
 269                dev_err(pci->dev, "failed to create IRQ domain\n");
 270                return -ENOMEM;
 271        }
 272
 273        pp->msi_domain = pci_msi_create_irq_domain(fwnode,
 274                                                   &dw_pcie_msi_domain_info,
 275                                                   pp->irq_domain);
 276        if (!pp->msi_domain) {
 277                dev_err(pci->dev, "failed to create MSI domain\n");
 278                irq_domain_remove(pp->irq_domain);
 279                return -ENOMEM;
 280        }
 281
 282        return 0;
 283}
 284
 285void dw_pcie_free_msi(struct pcie_port *pp)
 286{
 287        irq_set_chained_handler(pp->msi_irq, NULL);
 288        irq_set_handler_data(pp->msi_irq, NULL);
 289
 290        irq_domain_remove(pp->msi_domain);
 291        irq_domain_remove(pp->irq_domain);
 292}
 293
 294void dw_pcie_msi_init(struct pcie_port *pp)
 295{
 296        struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
 297        struct device *dev = pci->dev;
 298        struct page *page;
 299        u64 msi_target;
 300
 301        page = alloc_page(GFP_KERNEL);
 302        pp->msi_data = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
 303        if (dma_mapping_error(dev, pp->msi_data)) {
 304                dev_err(dev, "failed to map MSI data\n");
 305                __free_page(page);
 306                return;
 307        }
 308        msi_target = (u64)pp->msi_data;
 309
 310        /* program the msi_data */
 311        dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4,
 312                            lower_32_bits(msi_target));
 313        dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4,
 314                            upper_32_bits(msi_target));
 315}
 316
 317int dw_pcie_host_init(struct pcie_port *pp)
 318{
 319        struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
 320        struct device *dev = pci->dev;
 321        struct device_node *np = dev->of_node;
 322        struct platform_device *pdev = to_platform_device(dev);
 323        struct resource_entry *win, *tmp;
 324        struct pci_bus *bus, *child;
 325        struct pci_host_bridge *bridge;
 326        struct resource *cfg_res;
 327        int ret;
 328
 329        raw_spin_lock_init(&pci->pp.lock);
 330
 331        cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
 332        if (cfg_res) {
 333                pp->cfg0_size = resource_size(cfg_res) / 2;
 334                pp->cfg1_size = resource_size(cfg_res) / 2;
 335                pp->cfg0_base = cfg_res->start;
 336                pp->cfg1_base = cfg_res->start + pp->cfg0_size;
 337        } else if (!pp->va_cfg0_base) {
 338                dev_err(dev, "missing *config* reg space\n");
 339        }
 340
 341        bridge = pci_alloc_host_bridge(0);
 342        if (!bridge)
 343                return -ENOMEM;
 344
 345        ret = of_pci_get_host_bridge_resources(np, 0, 0xff,
 346                                        &bridge->windows, &pp->io_base);
 347        if (ret)
 348                return ret;
 349
 350        ret = devm_request_pci_bus_resources(dev, &bridge->windows);
 351        if (ret)
 352                goto error;
 353
 354        /* Get the I/O and memory ranges from DT */
 355        resource_list_for_each_entry_safe(win, tmp, &bridge->windows) {
 356                switch (resource_type(win->res)) {
 357                case IORESOURCE_IO:
 358                        ret = pci_remap_iospace(win->res, pp->io_base);
 359                        if (ret) {
 360                                dev_warn(dev, "error %d: failed to map resource %pR\n",
 361                                         ret, win->res);
 362                                resource_list_destroy_entry(win);
 363                        } else {
 364                                pp->io = win->res;
 365                                pp->io->name = "I/O";
 366                                pp->io_size = resource_size(pp->io);
 367                                pp->io_bus_addr = pp->io->start - win->offset;
 368                        }
 369                        break;
 370                case IORESOURCE_MEM:
 371                        pp->mem = win->res;
 372                        pp->mem->name = "MEM";
 373                        pp->mem_size = resource_size(pp->mem);
 374                        pp->mem_bus_addr = pp->mem->start - win->offset;
 375                        break;
 376                case 0:
 377                        pp->cfg = win->res;
 378                        pp->cfg0_size = resource_size(pp->cfg) / 2;
 379                        pp->cfg1_size = resource_size(pp->cfg) / 2;
 380                        pp->cfg0_base = pp->cfg->start;
 381                        pp->cfg1_base = pp->cfg->start + pp->cfg0_size;
 382                        break;
 383                case IORESOURCE_BUS:
 384                        pp->busn = win->res;
 385                        break;
 386                }
 387        }
 388
 389        if (!pci->dbi_base) {
 390                pci->dbi_base = devm_pci_remap_cfgspace(dev,
 391                                                pp->cfg->start,
 392                                                resource_size(pp->cfg));
 393                if (!pci->dbi_base) {
 394                        dev_err(dev, "error with ioremap\n");
 395                        ret = -ENOMEM;
 396                        goto error;
 397                }
 398        }
 399
 400        pp->mem_base = pp->mem->start;
 401
 402        if (!pp->va_cfg0_base) {
 403                pp->va_cfg0_base = devm_pci_remap_cfgspace(dev,
 404                                        pp->cfg0_base, pp->cfg0_size);
 405                if (!pp->va_cfg0_base) {
 406                        dev_err(dev, "error with ioremap in function\n");
 407                        ret = -ENOMEM;
 408                        goto error;
 409                }
 410        }
 411
 412        if (!pp->va_cfg1_base) {
 413                pp->va_cfg1_base = devm_pci_remap_cfgspace(dev,
 414                                                pp->cfg1_base,
 415                                                pp->cfg1_size);
 416                if (!pp->va_cfg1_base) {
 417                        dev_err(dev, "error with ioremap\n");
 418                        ret = -ENOMEM;
 419                        goto error;
 420                }
 421        }
 422
 423        ret = of_property_read_u32(np, "num-viewport", &pci->num_viewport);
 424        if (ret)
 425                pci->num_viewport = 2;
 426
 427        if (IS_ENABLED(CONFIG_PCI_MSI)) {
 428                /*
 429                 * If a specific SoC driver needs to change the
 430                 * default number of vectors, it needs to implement
 431                 * the set_num_vectors callback.
 432                 */
 433                if (!pp->ops->set_num_vectors) {
 434                        pp->num_vectors = MSI_DEF_NUM_VECTORS;
 435                } else {
 436                        pp->ops->set_num_vectors(pp);
 437
 438                        if (pp->num_vectors > MAX_MSI_IRQS ||
 439                            pp->num_vectors == 0) {
 440                                dev_err(dev,
 441                                        "Invalid number of vectors\n");
 442                                goto error;
 443                        }
 444                }
 445
 446                if (!pp->ops->msi_host_init) {
 447                        ret = dw_pcie_allocate_domains(pp);
 448                        if (ret)
 449                                goto error;
 450
 451                        if (pp->msi_irq)
 452                                irq_set_chained_handler_and_data(pp->msi_irq,
 453                                                            dw_chained_msi_isr,
 454                                                            pp);
 455                } else {
 456                        ret = pp->ops->msi_host_init(pp);
 457                        if (ret < 0)
 458                                goto error;
 459                }
 460        }
 461
 462        if (pp->ops->host_init) {
 463                ret = pp->ops->host_init(pp);
 464                if (ret)
 465                        goto error;
 466        }
 467
 468        pp->root_bus_nr = pp->busn->start;
 469
 470        bridge->dev.parent = dev;
 471        bridge->sysdata = pp;
 472        bridge->busnr = pp->root_bus_nr;
 473        bridge->ops = &dw_pcie_ops;
 474        bridge->map_irq = of_irq_parse_and_map_pci;
 475        bridge->swizzle_irq = pci_common_swizzle;
 476
 477        ret = pci_scan_root_bus_bridge(bridge);
 478        if (ret)
 479                goto error;
 480
 481        bus = bridge->bus;
 482
 483        if (pp->ops->scan_bus)
 484                pp->ops->scan_bus(pp);
 485
 486        pci_bus_size_bridges(bus);
 487        pci_bus_assign_resources(bus);
 488
 489        list_for_each_entry(child, &bus->children, node)
 490                pcie_bus_configure_settings(child);
 491
 492        pci_bus_add_devices(bus);
 493        return 0;
 494
 495error:
 496        pci_free_host_bridge(bridge);
 497        return ret;
 498}
 499
 500static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
 501                                 u32 devfn, int where, int size, u32 *val)
 502{
 503        int ret, type;
 504        u32 busdev, cfg_size;
 505        u64 cpu_addr;
 506        void __iomem *va_cfg_base;
 507        struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
 508
 509        if (pp->ops->rd_other_conf)
 510                return pp->ops->rd_other_conf(pp, bus, devfn, where, size, val);
 511
 512        busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
 513                 PCIE_ATU_FUNC(PCI_FUNC(devfn));
 514
 515        if (bus->parent->number == pp->root_bus_nr) {
 516                type = PCIE_ATU_TYPE_CFG0;
 517                cpu_addr = pp->cfg0_base;
 518                cfg_size = pp->cfg0_size;
 519                va_cfg_base = pp->va_cfg0_base;
 520        } else {
 521                type = PCIE_ATU_TYPE_CFG1;
 522                cpu_addr = pp->cfg1_base;
 523                cfg_size = pp->cfg1_size;
 524                va_cfg_base = pp->va_cfg1_base;
 525        }
 526
 527        dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
 528                                  type, cpu_addr,
 529                                  busdev, cfg_size);
 530        ret = dw_pcie_read(va_cfg_base + where, size, val);
 531        if (pci->num_viewport <= 2)
 532                dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
 533                                          PCIE_ATU_TYPE_IO, pp->io_base,
 534                                          pp->io_bus_addr, pp->io_size);
 535
 536        return ret;
 537}
 538
 539static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
 540                                 u32 devfn, int where, int size, u32 val)
 541{
 542        int ret, type;
 543        u32 busdev, cfg_size;
 544        u64 cpu_addr;
 545        void __iomem *va_cfg_base;
 546        struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
 547
 548        if (pp->ops->wr_other_conf)
 549                return pp->ops->wr_other_conf(pp, bus, devfn, where, size, val);
 550
 551        busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
 552                 PCIE_ATU_FUNC(PCI_FUNC(devfn));
 553
 554        if (bus->parent->number == pp->root_bus_nr) {
 555                type = PCIE_ATU_TYPE_CFG0;
 556                cpu_addr = pp->cfg0_base;
 557                cfg_size = pp->cfg0_size;
 558                va_cfg_base = pp->va_cfg0_base;
 559        } else {
 560                type = PCIE_ATU_TYPE_CFG1;
 561                cpu_addr = pp->cfg1_base;
 562                cfg_size = pp->cfg1_size;
 563                va_cfg_base = pp->va_cfg1_base;
 564        }
 565
 566        dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
 567                                  type, cpu_addr,
 568                                  busdev, cfg_size);
 569        ret = dw_pcie_write(va_cfg_base + where, size, val);
 570        if (pci->num_viewport <= 2)
 571                dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
 572                                          PCIE_ATU_TYPE_IO, pp->io_base,
 573                                          pp->io_bus_addr, pp->io_size);
 574
 575        return ret;
 576}
 577
 578static int dw_pcie_valid_device(struct pcie_port *pp, struct pci_bus *bus,
 579                                int dev)
 580{
 581        struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
 582
 583        /* If there is no link, then there is no device */
 584        if (bus->number != pp->root_bus_nr) {
 585                if (!dw_pcie_link_up(pci))
 586                        return 0;
 587        }
 588
 589        /* access only one slot on each root port */
 590        if (bus->number == pp->root_bus_nr && dev > 0)
 591                return 0;
 592
 593        return 1;
 594}
 595
 596static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
 597                           int size, u32 *val)
 598{
 599        struct pcie_port *pp = bus->sysdata;
 600
 601        if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn))) {
 602                *val = 0xffffffff;
 603                return PCIBIOS_DEVICE_NOT_FOUND;
 604        }
 605
 606        if (bus->number == pp->root_bus_nr)
 607                return dw_pcie_rd_own_conf(pp, where, size, val);
 608
 609        return dw_pcie_rd_other_conf(pp, bus, devfn, where, size, val);
 610}
 611
 612static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
 613                           int where, int size, u32 val)
 614{
 615        struct pcie_port *pp = bus->sysdata;
 616
 617        if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn)))
 618                return PCIBIOS_DEVICE_NOT_FOUND;
 619
 620        if (bus->number == pp->root_bus_nr)
 621                return dw_pcie_wr_own_conf(pp, where, size, val);
 622
 623        return dw_pcie_wr_other_conf(pp, bus, devfn, where, size, val);
 624}
 625
 626static struct pci_ops dw_pcie_ops = {
 627        .read = dw_pcie_rd_conf,
 628        .write = dw_pcie_wr_conf,
 629};
 630
 631static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
 632{
 633        u32 val;
 634
 635        val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT);
 636        if (val == 0xffffffff)
 637                return 1;
 638
 639        return 0;
 640}
 641
 642void dw_pcie_setup_rc(struct pcie_port *pp)
 643{
 644        u32 val, ctrl, num_ctrls;
 645        struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
 646
 647        dw_pcie_setup(pci);
 648
 649        num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
 650
 651        /* Initialize IRQ Status array */
 652        for (ctrl = 0; ctrl < num_ctrls; ctrl++)
 653                dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + (ctrl * 12), 4,
 654                                    &pp->irq_status[ctrl]);
 655        /* setup RC BARs */
 656        dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
 657        dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000);
 658
 659        /* setup interrupt pins */
 660        dw_pcie_dbi_ro_wr_en(pci);
 661        val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE);
 662        val &= 0xffff00ff;
 663        val |= 0x00000100;
 664        dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val);
 665        dw_pcie_dbi_ro_wr_dis(pci);
 666
 667        /* setup bus numbers */
 668        val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS);
 669        val &= 0xff000000;
 670        val |= 0x00ff0100;
 671        dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val);
 672
 673        /* setup command register */
 674        val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
 675        val &= 0xffff0000;
 676        val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
 677                PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
 678        dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
 679
 680        /*
 681         * If the platform provides ->rd_other_conf, it means the platform
 682         * uses its own address translation component rather than ATU, so
 683         * we should not program the ATU here.
 684         */
 685        if (!pp->ops->rd_other_conf) {
 686                /* get iATU unroll support */
 687                pci->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pci);
 688                dev_dbg(pci->dev, "iATU unroll: %s\n",
 689                        pci->iatu_unroll_enabled ? "enabled" : "disabled");
 690
 691                dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX0,
 692                                          PCIE_ATU_TYPE_MEM, pp->mem_base,
 693                                          pp->mem_bus_addr, pp->mem_size);
 694                if (pci->num_viewport > 2)
 695                        dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX2,
 696                                                  PCIE_ATU_TYPE_IO, pp->io_base,
 697                                                  pp->io_bus_addr, pp->io_size);
 698        }
 699
 700        dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
 701
 702        /* Enable write permission for the DBI read-only register */
 703        dw_pcie_dbi_ro_wr_en(pci);
 704        /* program correct class for RC */
 705        dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);
 706        /* Better disable write permission right after the update */
 707        dw_pcie_dbi_ro_wr_dis(pci);
 708
 709        dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val);
 710        val |= PORT_LOGIC_SPEED_CHANGE;
 711        dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
 712}
 713