linux/drivers/pci/controller/dwc/pcie-designware-host.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Synopsys DesignWare PCIe host controller driver
   4 *
   5 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
   6 *              http://www.samsung.com
   7 *
   8 * Author: Jingoo Han <jg1.han@samsung.com>
   9 */
  10
  11#include <linux/irqchip/chained_irq.h>
  12#include <linux/irqdomain.h>
  13#include <linux/of_address.h>
  14#include <linux/of_pci.h>
  15#include <linux/pci_regs.h>
  16#include <linux/platform_device.h>
  17
  18#include "../../pci.h"
  19#include "pcie-designware.h"
  20
  21static struct pci_ops dw_pcie_ops;
  22
  23static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
  24                               u32 *val)
  25{
  26        struct dw_pcie *pci;
  27
  28        if (pp->ops->rd_own_conf)
  29                return pp->ops->rd_own_conf(pp, where, size, val);
  30
  31        pci = to_dw_pcie_from_pp(pp);
  32        return dw_pcie_read(pci->dbi_base + where, size, val);
  33}
  34
  35static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
  36                               u32 val)
  37{
  38        struct dw_pcie *pci;
  39
  40        if (pp->ops->wr_own_conf)
  41                return pp->ops->wr_own_conf(pp, where, size, val);
  42
  43        pci = to_dw_pcie_from_pp(pp);
  44        return dw_pcie_write(pci->dbi_base + where, size, val);
  45}
  46
  47static void dw_msi_ack_irq(struct irq_data *d)
  48{
  49        irq_chip_ack_parent(d);
  50}
  51
  52static void dw_msi_mask_irq(struct irq_data *d)
  53{
  54        pci_msi_mask_irq(d);
  55        irq_chip_mask_parent(d);
  56}
  57
  58static void dw_msi_unmask_irq(struct irq_data *d)
  59{
  60        pci_msi_unmask_irq(d);
  61        irq_chip_unmask_parent(d);
  62}
  63
  64static struct irq_chip dw_pcie_msi_irq_chip = {
  65        .name = "PCI-MSI",
  66        .irq_ack = dw_msi_ack_irq,
  67        .irq_mask = dw_msi_mask_irq,
  68        .irq_unmask = dw_msi_unmask_irq,
  69};
  70
  71static struct msi_domain_info dw_pcie_msi_domain_info = {
  72        .flags  = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
  73                   MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
  74        .chip   = &dw_pcie_msi_irq_chip,
  75};
  76
  77/* MSI int handler */
  78irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
  79{
  80        int i, pos, irq;
  81        u32 val, num_ctrls;
  82        irqreturn_t ret = IRQ_NONE;
  83
  84        num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
  85
  86        for (i = 0; i < num_ctrls; i++) {
  87                dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS +
  88                                        (i * MSI_REG_CTRL_BLOCK_SIZE),
  89                                    4, &val);
  90                if (!val)
  91                        continue;
  92
  93                ret = IRQ_HANDLED;
  94                pos = 0;
  95                while ((pos = find_next_bit((unsigned long *) &val,
  96                                            MAX_MSI_IRQS_PER_CTRL,
  97                                            pos)) != MAX_MSI_IRQS_PER_CTRL) {
  98                        irq = irq_find_mapping(pp->irq_domain,
  99                                               (i * MAX_MSI_IRQS_PER_CTRL) +
 100                                               pos);
 101                        generic_handle_irq(irq);
 102                        dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS +
 103                                                (i * MSI_REG_CTRL_BLOCK_SIZE),
 104                                            4, 1 << pos);
 105                        pos++;
 106                }
 107        }
 108
 109        return ret;
 110}
 111
 112/* Chained MSI interrupt service routine */
 113static void dw_chained_msi_isr(struct irq_desc *desc)
 114{
 115        struct irq_chip *chip = irq_desc_get_chip(desc);
 116        struct pcie_port *pp;
 117
 118        chained_irq_enter(chip, desc);
 119
 120        pp = irq_desc_get_handler_data(desc);
 121        dw_handle_msi_irq(pp);
 122
 123        chained_irq_exit(chip, desc);
 124}
 125
 126static void dw_pci_setup_msi_msg(struct irq_data *data, struct msi_msg *msg)
 127{
 128        struct pcie_port *pp = irq_data_get_irq_chip_data(data);
 129        struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
 130        u64 msi_target;
 131
 132        if (pp->ops->get_msi_addr)
 133                msi_target = pp->ops->get_msi_addr(pp);
 134        else
 135                msi_target = (u64)pp->msi_data;
 136
 137        msg->address_lo = lower_32_bits(msi_target);
 138        msg->address_hi = upper_32_bits(msi_target);
 139
 140        if (pp->ops->get_msi_data)
 141                msg->data = pp->ops->get_msi_data(pp, data->hwirq);
 142        else
 143                msg->data = data->hwirq;
 144
 145        dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
 146                (int)data->hwirq, msg->address_hi, msg->address_lo);
 147}
 148
 149static int dw_pci_msi_set_affinity(struct irq_data *irq_data,
 150                                   const struct cpumask *mask, bool force)
 151{
 152        return -EINVAL;
 153}
 154
 155static void dw_pci_bottom_mask(struct irq_data *data)
 156{
 157        struct pcie_port *pp = irq_data_get_irq_chip_data(data);
 158        unsigned int res, bit, ctrl;
 159        unsigned long flags;
 160
 161        raw_spin_lock_irqsave(&pp->lock, flags);
 162
 163        if (pp->ops->msi_clear_irq) {
 164                pp->ops->msi_clear_irq(pp, data->hwirq);
 165        } else {
 166                ctrl = data->hwirq / MAX_MSI_IRQS_PER_CTRL;
 167                res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
 168                bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL;
 169
 170                pp->irq_status[ctrl] &= ~(1 << bit);
 171                dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4,
 172                                    pp->irq_status[ctrl]);
 173        }
 174
 175        raw_spin_unlock_irqrestore(&pp->lock, flags);
 176}
 177
 178static void dw_pci_bottom_unmask(struct irq_data *data)
 179{
 180        struct pcie_port *pp = irq_data_get_irq_chip_data(data);
 181        unsigned int res, bit, ctrl;
 182        unsigned long flags;
 183
 184        raw_spin_lock_irqsave(&pp->lock, flags);
 185
 186        if (pp->ops->msi_set_irq) {
 187                pp->ops->msi_set_irq(pp, data->hwirq);
 188        } else {
 189                ctrl = data->hwirq / MAX_MSI_IRQS_PER_CTRL;
 190                res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
 191                bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL;
 192
 193                pp->irq_status[ctrl] |= 1 << bit;
 194                dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4,
 195                                    pp->irq_status[ctrl]);
 196        }
 197
 198        raw_spin_unlock_irqrestore(&pp->lock, flags);
 199}
 200
 201static void dw_pci_bottom_ack(struct irq_data *d)
 202{
 203        struct msi_desc *msi = irq_data_get_msi_desc(d);
 204        struct pcie_port *pp;
 205
 206        pp = msi_desc_to_pci_sysdata(msi);
 207
 208        if (pp->ops->msi_irq_ack)
 209                pp->ops->msi_irq_ack(d->hwirq, pp);
 210}
 211
 212static struct irq_chip dw_pci_msi_bottom_irq_chip = {
 213        .name = "DWPCI-MSI",
 214        .irq_ack = dw_pci_bottom_ack,
 215        .irq_compose_msi_msg = dw_pci_setup_msi_msg,
 216        .irq_set_affinity = dw_pci_msi_set_affinity,
 217        .irq_mask = dw_pci_bottom_mask,
 218        .irq_unmask = dw_pci_bottom_unmask,
 219};
 220
 221static int dw_pcie_irq_domain_alloc(struct irq_domain *domain,
 222                                    unsigned int virq, unsigned int nr_irqs,
 223                                    void *args)
 224{
 225        struct pcie_port *pp = domain->host_data;
 226        unsigned long flags;
 227        u32 i;
 228        int bit;
 229
 230        raw_spin_lock_irqsave(&pp->lock, flags);
 231
 232        bit = bitmap_find_free_region(pp->msi_irq_in_use, pp->num_vectors,
 233                                      order_base_2(nr_irqs));
 234
 235        raw_spin_unlock_irqrestore(&pp->lock, flags);
 236
 237        if (bit < 0)
 238                return -ENOSPC;
 239
 240        for (i = 0; i < nr_irqs; i++)
 241                irq_domain_set_info(domain, virq + i, bit + i,
 242                                    &dw_pci_msi_bottom_irq_chip,
 243                                    pp, handle_edge_irq,
 244                                    NULL, NULL);
 245
 246        return 0;
 247}
 248
 249static void dw_pcie_irq_domain_free(struct irq_domain *domain,
 250                                    unsigned int virq, unsigned int nr_irqs)
 251{
 252        struct irq_data *data = irq_domain_get_irq_data(domain, virq);
 253        struct pcie_port *pp = irq_data_get_irq_chip_data(data);
 254        unsigned long flags;
 255
 256        raw_spin_lock_irqsave(&pp->lock, flags);
 257
 258        bitmap_release_region(pp->msi_irq_in_use, data->hwirq,
 259                              order_base_2(nr_irqs));
 260
 261        raw_spin_unlock_irqrestore(&pp->lock, flags);
 262}
 263
 264static const struct irq_domain_ops dw_pcie_msi_domain_ops = {
 265        .alloc  = dw_pcie_irq_domain_alloc,
 266        .free   = dw_pcie_irq_domain_free,
 267};
 268
 269int dw_pcie_allocate_domains(struct pcie_port *pp)
 270{
 271        struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
 272        struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node);
 273
 274        pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors,
 275                                               &dw_pcie_msi_domain_ops, pp);
 276        if (!pp->irq_domain) {
 277                dev_err(pci->dev, "Failed to create IRQ domain\n");
 278                return -ENOMEM;
 279        }
 280
 281        pp->msi_domain = pci_msi_create_irq_domain(fwnode,
 282                                                   &dw_pcie_msi_domain_info,
 283                                                   pp->irq_domain);
 284        if (!pp->msi_domain) {
 285                dev_err(pci->dev, "Failed to create MSI domain\n");
 286                irq_domain_remove(pp->irq_domain);
 287                return -ENOMEM;
 288        }
 289
 290        return 0;
 291}
 292
 293void dw_pcie_free_msi(struct pcie_port *pp)
 294{
 295        irq_set_chained_handler(pp->msi_irq, NULL);
 296        irq_set_handler_data(pp->msi_irq, NULL);
 297
 298        irq_domain_remove(pp->msi_domain);
 299        irq_domain_remove(pp->irq_domain);
 300}
 301
 302void dw_pcie_msi_init(struct pcie_port *pp)
 303{
 304        struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
 305        struct device *dev = pci->dev;
 306        struct page *page;
 307        u64 msi_target;
 308
 309        page = alloc_page(GFP_KERNEL);
 310        pp->msi_data = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
 311        if (dma_mapping_error(dev, pp->msi_data)) {
 312                dev_err(dev, "Failed to map MSI data\n");
 313                __free_page(page);
 314                return;
 315        }
 316        msi_target = (u64)pp->msi_data;
 317
 318        /* Program the msi_data */
 319        dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4,
 320                            lower_32_bits(msi_target));
 321        dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4,
 322                            upper_32_bits(msi_target));
 323}
 324
 325int dw_pcie_host_init(struct pcie_port *pp)
 326{
 327        struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
 328        struct device *dev = pci->dev;
 329        struct device_node *np = dev->of_node;
 330        struct platform_device *pdev = to_platform_device(dev);
 331        struct resource_entry *win, *tmp;
 332        struct pci_bus *bus, *child;
 333        struct pci_host_bridge *bridge;
 334        struct resource *cfg_res;
 335        int ret;
 336
 337        raw_spin_lock_init(&pci->pp.lock);
 338
 339        cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
 340        if (cfg_res) {
 341                pp->cfg0_size = resource_size(cfg_res) >> 1;
 342                pp->cfg1_size = resource_size(cfg_res) >> 1;
 343                pp->cfg0_base = cfg_res->start;
 344                pp->cfg1_base = cfg_res->start + pp->cfg0_size;
 345        } else if (!pp->va_cfg0_base) {
 346                dev_err(dev, "Missing *config* reg space\n");
 347        }
 348
 349        bridge = pci_alloc_host_bridge(0);
 350        if (!bridge)
 351                return -ENOMEM;
 352
 353        ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
 354                                        &bridge->windows, &pp->io_base);
 355        if (ret)
 356                return ret;
 357
 358        ret = devm_request_pci_bus_resources(dev, &bridge->windows);
 359        if (ret)
 360                goto error;
 361
 362        /* Get the I/O and memory ranges from DT */
 363        resource_list_for_each_entry_safe(win, tmp, &bridge->windows) {
 364                switch (resource_type(win->res)) {
 365                case IORESOURCE_IO:
 366                        ret = devm_pci_remap_iospace(dev, win->res,
 367                                                     pp->io_base);
 368                        if (ret) {
 369                                dev_warn(dev, "Error %d: failed to map resource %pR\n",
 370                                         ret, win->res);
 371                                resource_list_destroy_entry(win);
 372                        } else {
 373                                pp->io = win->res;
 374                                pp->io->name = "I/O";
 375                                pp->io_size = resource_size(pp->io);
 376                                pp->io_bus_addr = pp->io->start - win->offset;
 377                        }
 378                        break;
 379                case IORESOURCE_MEM:
 380                        pp->mem = win->res;
 381                        pp->mem->name = "MEM";
 382                        pp->mem_size = resource_size(pp->mem);
 383                        pp->mem_bus_addr = pp->mem->start - win->offset;
 384                        break;
 385                case 0:
 386                        pp->cfg = win->res;
 387                        pp->cfg0_size = resource_size(pp->cfg) >> 1;
 388                        pp->cfg1_size = resource_size(pp->cfg) >> 1;
 389                        pp->cfg0_base = pp->cfg->start;
 390                        pp->cfg1_base = pp->cfg->start + pp->cfg0_size;
 391                        break;
 392                case IORESOURCE_BUS:
 393                        pp->busn = win->res;
 394                        break;
 395                }
 396        }
 397
 398        if (!pci->dbi_base) {
 399                pci->dbi_base = devm_pci_remap_cfgspace(dev,
 400                                                pp->cfg->start,
 401                                                resource_size(pp->cfg));
 402                if (!pci->dbi_base) {
 403                        dev_err(dev, "Error with ioremap\n");
 404                        ret = -ENOMEM;
 405                        goto error;
 406                }
 407        }
 408
 409        pp->mem_base = pp->mem->start;
 410
 411        if (!pp->va_cfg0_base) {
 412                pp->va_cfg0_base = devm_pci_remap_cfgspace(dev,
 413                                        pp->cfg0_base, pp->cfg0_size);
 414                if (!pp->va_cfg0_base) {
 415                        dev_err(dev, "Error with ioremap in function\n");
 416                        ret = -ENOMEM;
 417                        goto error;
 418                }
 419        }
 420
 421        if (!pp->va_cfg1_base) {
 422                pp->va_cfg1_base = devm_pci_remap_cfgspace(dev,
 423                                                pp->cfg1_base,
 424                                                pp->cfg1_size);
 425                if (!pp->va_cfg1_base) {
 426                        dev_err(dev, "Error with ioremap\n");
 427                        ret = -ENOMEM;
 428                        goto error;
 429                }
 430        }
 431
 432        ret = of_property_read_u32(np, "num-viewport", &pci->num_viewport);
 433        if (ret)
 434                pci->num_viewport = 2;
 435
 436        if (IS_ENABLED(CONFIG_PCI_MSI)) {
 437                /*
 438                 * If a specific SoC driver needs to change the
 439                 * default number of vectors, it needs to implement
 440                 * the set_num_vectors callback.
 441                 */
 442                if (!pp->ops->set_num_vectors) {
 443                        pp->num_vectors = MSI_DEF_NUM_VECTORS;
 444                } else {
 445                        pp->ops->set_num_vectors(pp);
 446
 447                        if (pp->num_vectors > MAX_MSI_IRQS ||
 448                            pp->num_vectors == 0) {
 449                                dev_err(dev,
 450                                        "Invalid number of vectors\n");
 451                                goto error;
 452                        }
 453                }
 454
 455                if (!pp->ops->msi_host_init) {
 456                        ret = dw_pcie_allocate_domains(pp);
 457                        if (ret)
 458                                goto error;
 459
 460                        if (pp->msi_irq)
 461                                irq_set_chained_handler_and_data(pp->msi_irq,
 462                                                            dw_chained_msi_isr,
 463                                                            pp);
 464                } else {
 465                        ret = pp->ops->msi_host_init(pp);
 466                        if (ret < 0)
 467                                goto error;
 468                }
 469        }
 470
 471        if (pp->ops->host_init) {
 472                ret = pp->ops->host_init(pp);
 473                if (ret)
 474                        goto error;
 475        }
 476
 477        pp->root_bus_nr = pp->busn->start;
 478
 479        bridge->dev.parent = dev;
 480        bridge->sysdata = pp;
 481        bridge->busnr = pp->root_bus_nr;
 482        bridge->ops = &dw_pcie_ops;
 483        bridge->map_irq = of_irq_parse_and_map_pci;
 484        bridge->swizzle_irq = pci_common_swizzle;
 485
 486        ret = pci_scan_root_bus_bridge(bridge);
 487        if (ret)
 488                goto error;
 489
 490        bus = bridge->bus;
 491
 492        if (pp->ops->scan_bus)
 493                pp->ops->scan_bus(pp);
 494
 495        pci_bus_size_bridges(bus);
 496        pci_bus_assign_resources(bus);
 497
 498        list_for_each_entry(child, &bus->children, node)
 499                pcie_bus_configure_settings(child);
 500
 501        pci_bus_add_devices(bus);
 502        return 0;
 503
 504error:
 505        pci_free_host_bridge(bridge);
 506        return ret;
 507}
 508
 509static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
 510                                 u32 devfn, int where, int size, u32 *val)
 511{
 512        int ret, type;
 513        u32 busdev, cfg_size;
 514        u64 cpu_addr;
 515        void __iomem *va_cfg_base;
 516        struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
 517
 518        if (pp->ops->rd_other_conf)
 519                return pp->ops->rd_other_conf(pp, bus, devfn, where, size, val);
 520
 521        busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
 522                 PCIE_ATU_FUNC(PCI_FUNC(devfn));
 523
 524        if (bus->parent->number == pp->root_bus_nr) {
 525                type = PCIE_ATU_TYPE_CFG0;
 526                cpu_addr = pp->cfg0_base;
 527                cfg_size = pp->cfg0_size;
 528                va_cfg_base = pp->va_cfg0_base;
 529        } else {
 530                type = PCIE_ATU_TYPE_CFG1;
 531                cpu_addr = pp->cfg1_base;
 532                cfg_size = pp->cfg1_size;
 533                va_cfg_base = pp->va_cfg1_base;
 534        }
 535
 536        dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
 537                                  type, cpu_addr,
 538                                  busdev, cfg_size);
 539        ret = dw_pcie_read(va_cfg_base + where, size, val);
 540        if (pci->num_viewport <= 2)
 541                dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
 542                                          PCIE_ATU_TYPE_IO, pp->io_base,
 543                                          pp->io_bus_addr, pp->io_size);
 544
 545        return ret;
 546}
 547
 548static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
 549                                 u32 devfn, int where, int size, u32 val)
 550{
 551        int ret, type;
 552        u32 busdev, cfg_size;
 553        u64 cpu_addr;
 554        void __iomem *va_cfg_base;
 555        struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
 556
 557        if (pp->ops->wr_other_conf)
 558                return pp->ops->wr_other_conf(pp, bus, devfn, where, size, val);
 559
 560        busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
 561                 PCIE_ATU_FUNC(PCI_FUNC(devfn));
 562
 563        if (bus->parent->number == pp->root_bus_nr) {
 564                type = PCIE_ATU_TYPE_CFG0;
 565                cpu_addr = pp->cfg0_base;
 566                cfg_size = pp->cfg0_size;
 567                va_cfg_base = pp->va_cfg0_base;
 568        } else {
 569                type = PCIE_ATU_TYPE_CFG1;
 570                cpu_addr = pp->cfg1_base;
 571                cfg_size = pp->cfg1_size;
 572                va_cfg_base = pp->va_cfg1_base;
 573        }
 574
 575        dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
 576                                  type, cpu_addr,
 577                                  busdev, cfg_size);
 578        ret = dw_pcie_write(va_cfg_base + where, size, val);
 579        if (pci->num_viewport <= 2)
 580                dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
 581                                          PCIE_ATU_TYPE_IO, pp->io_base,
 582                                          pp->io_bus_addr, pp->io_size);
 583
 584        return ret;
 585}
 586
 587static int dw_pcie_valid_device(struct pcie_port *pp, struct pci_bus *bus,
 588                                int dev)
 589{
 590        struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
 591
 592        /* If there is no link, then there is no device */
 593        if (bus->number != pp->root_bus_nr) {
 594                if (!dw_pcie_link_up(pci))
 595                        return 0;
 596        }
 597
 598        /* Access only one slot on each root port */
 599        if (bus->number == pp->root_bus_nr && dev > 0)
 600                return 0;
 601
 602        return 1;
 603}
 604
 605static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
 606                           int size, u32 *val)
 607{
 608        struct pcie_port *pp = bus->sysdata;
 609
 610        if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn))) {
 611                *val = 0xffffffff;
 612                return PCIBIOS_DEVICE_NOT_FOUND;
 613        }
 614
 615        if (bus->number == pp->root_bus_nr)
 616                return dw_pcie_rd_own_conf(pp, where, size, val);
 617
 618        return dw_pcie_rd_other_conf(pp, bus, devfn, where, size, val);
 619}
 620
 621static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
 622                           int where, int size, u32 val)
 623{
 624        struct pcie_port *pp = bus->sysdata;
 625
 626        if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn)))
 627                return PCIBIOS_DEVICE_NOT_FOUND;
 628
 629        if (bus->number == pp->root_bus_nr)
 630                return dw_pcie_wr_own_conf(pp, where, size, val);
 631
 632        return dw_pcie_wr_other_conf(pp, bus, devfn, where, size, val);
 633}
 634
 635static struct pci_ops dw_pcie_ops = {
 636        .read = dw_pcie_rd_conf,
 637        .write = dw_pcie_wr_conf,
 638};
 639
 640static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
 641{
 642        u32 val;
 643
 644        val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT);
 645        if (val == 0xffffffff)
 646                return 1;
 647
 648        return 0;
 649}
 650
 651void dw_pcie_setup_rc(struct pcie_port *pp)
 652{
 653        u32 val, ctrl, num_ctrls;
 654        struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
 655
 656        dw_pcie_setup(pci);
 657
 658        num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
 659
 660        /* Initialize IRQ Status array */
 661        for (ctrl = 0; ctrl < num_ctrls; ctrl++)
 662                dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE +
 663                                        (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
 664                                    4, &pp->irq_status[ctrl]);
 665
 666        /* Setup RC BARs */
 667        dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
 668        dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000);
 669
 670        /* Setup interrupt pins */
 671        dw_pcie_dbi_ro_wr_en(pci);
 672        val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE);
 673        val &= 0xffff00ff;
 674        val |= 0x00000100;
 675        dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val);
 676        dw_pcie_dbi_ro_wr_dis(pci);
 677
 678        /* Setup bus numbers */
 679        val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS);
 680        val &= 0xff000000;
 681        val |= 0x00ff0100;
 682        dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val);
 683
 684        /* Setup command register */
 685        val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
 686        val &= 0xffff0000;
 687        val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
 688                PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
 689        dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
 690
 691        /*
 692         * If the platform provides ->rd_other_conf, it means the platform
 693         * uses its own address translation component rather than ATU, so
 694         * we should not program the ATU here.
 695         */
 696        if (!pp->ops->rd_other_conf) {
 697                /* Get iATU unroll support */
 698                pci->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pci);
 699                dev_dbg(pci->dev, "iATU unroll: %s\n",
 700                        pci->iatu_unroll_enabled ? "enabled" : "disabled");
 701
 702                dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX0,
 703                                          PCIE_ATU_TYPE_MEM, pp->mem_base,
 704                                          pp->mem_bus_addr, pp->mem_size);
 705                if (pci->num_viewport > 2)
 706                        dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX2,
 707                                                  PCIE_ATU_TYPE_IO, pp->io_base,
 708                                                  pp->io_bus_addr, pp->io_size);
 709        }
 710
 711        dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
 712
 713        /* Enable write permission for the DBI read-only register */
 714        dw_pcie_dbi_ro_wr_en(pci);
 715        /* Program correct class for RC */
 716        dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);
 717        /* Better disable write permission right after the update */
 718        dw_pcie_dbi_ro_wr_dis(pci);
 719
 720        dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val);
 721        val |= PORT_LOGIC_SPEED_CHANGE;
 722        dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
 723}
 724