linux/drivers/pci/controller/dwc/pcie-designware-host.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Synopsys DesignWare PCIe host controller driver
   4 *
   5 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
   6 *              https://www.samsung.com
   7 *
   8 * Author: Jingoo Han <jg1.han@samsung.com>
   9 */
  10
  11#include <linux/irqchip/chained_irq.h>
  12#include <linux/irqdomain.h>
  13#include <linux/msi.h>
  14#include <linux/of_address.h>
  15#include <linux/of_pci.h>
  16#include <linux/pci_regs.h>
  17#include <linux/platform_device.h>
  18
  19#include "../../pci.h"
  20#include "pcie-designware.h"
  21
  22static struct pci_ops dw_pcie_ops;
  23static struct pci_ops dw_child_pcie_ops;
  24
  25static void dw_msi_ack_irq(struct irq_data *d)
  26{
  27        irq_chip_ack_parent(d);
  28}
  29
  30static void dw_msi_mask_irq(struct irq_data *d)
  31{
  32        pci_msi_mask_irq(d);
  33        irq_chip_mask_parent(d);
  34}
  35
  36static void dw_msi_unmask_irq(struct irq_data *d)
  37{
  38        pci_msi_unmask_irq(d);
  39        irq_chip_unmask_parent(d);
  40}
  41
  42static struct irq_chip dw_pcie_msi_irq_chip = {
  43        .name = "PCI-MSI",
  44        .irq_ack = dw_msi_ack_irq,
  45        .irq_mask = dw_msi_mask_irq,
  46        .irq_unmask = dw_msi_unmask_irq,
  47};
  48
  49static struct msi_domain_info dw_pcie_msi_domain_info = {
  50        .flags  = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
  51                   MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
  52        .chip   = &dw_pcie_msi_irq_chip,
  53};
  54
  55/* MSI int handler */
  56irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
  57{
  58        int i, pos, irq;
  59        unsigned long val;
  60        u32 status, num_ctrls;
  61        irqreturn_t ret = IRQ_NONE;
  62        struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
  63
  64        num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
  65
  66        for (i = 0; i < num_ctrls; i++) {
  67                status = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS +
  68                                           (i * MSI_REG_CTRL_BLOCK_SIZE));
  69                if (!status)
  70                        continue;
  71
  72                ret = IRQ_HANDLED;
  73                val = status;
  74                pos = 0;
  75                while ((pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL,
  76                                            pos)) != MAX_MSI_IRQS_PER_CTRL) {
  77                        irq = irq_find_mapping(pp->irq_domain,
  78                                               (i * MAX_MSI_IRQS_PER_CTRL) +
  79                                               pos);
  80                        generic_handle_irq(irq);
  81                        pos++;
  82                }
  83        }
  84
  85        return ret;
  86}
  87
  88/* Chained MSI interrupt service routine */
  89static void dw_chained_msi_isr(struct irq_desc *desc)
  90{
  91        struct irq_chip *chip = irq_desc_get_chip(desc);
  92        struct pcie_port *pp;
  93
  94        chained_irq_enter(chip, desc);
  95
  96        pp = irq_desc_get_handler_data(desc);
  97        dw_handle_msi_irq(pp);
  98
  99        chained_irq_exit(chip, desc);
 100}
 101
 102static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg)
 103{
 104        struct pcie_port *pp = irq_data_get_irq_chip_data(d);
 105        struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
 106        u64 msi_target;
 107
 108        msi_target = (u64)pp->msi_data;
 109
 110        msg->address_lo = lower_32_bits(msi_target);
 111        msg->address_hi = upper_32_bits(msi_target);
 112
 113        msg->data = d->hwirq;
 114
 115        dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
 116                (int)d->hwirq, msg->address_hi, msg->address_lo);
 117}
 118
 119static int dw_pci_msi_set_affinity(struct irq_data *d,
 120                                   const struct cpumask *mask, bool force)
 121{
 122        return -EINVAL;
 123}
 124
 125static void dw_pci_bottom_mask(struct irq_data *d)
 126{
 127        struct pcie_port *pp = irq_data_get_irq_chip_data(d);
 128        struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
 129        unsigned int res, bit, ctrl;
 130        unsigned long flags;
 131
 132        raw_spin_lock_irqsave(&pp->lock, flags);
 133
 134        ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
 135        res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
 136        bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
 137
 138        pp->irq_mask[ctrl] |= BIT(bit);
 139        dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]);
 140
 141        raw_spin_unlock_irqrestore(&pp->lock, flags);
 142}
 143
 144static void dw_pci_bottom_unmask(struct irq_data *d)
 145{
 146        struct pcie_port *pp = irq_data_get_irq_chip_data(d);
 147        struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
 148        unsigned int res, bit, ctrl;
 149        unsigned long flags;
 150
 151        raw_spin_lock_irqsave(&pp->lock, flags);
 152
 153        ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
 154        res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
 155        bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
 156
 157        pp->irq_mask[ctrl] &= ~BIT(bit);
 158        dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]);
 159
 160        raw_spin_unlock_irqrestore(&pp->lock, flags);
 161}
 162
 163static void dw_pci_bottom_ack(struct irq_data *d)
 164{
 165        struct pcie_port *pp  = irq_data_get_irq_chip_data(d);
 166        struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
 167        unsigned int res, bit, ctrl;
 168
 169        ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
 170        res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
 171        bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
 172
 173        dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_STATUS + res, BIT(bit));
 174}
 175
 176static struct irq_chip dw_pci_msi_bottom_irq_chip = {
 177        .name = "DWPCI-MSI",
 178        .irq_ack = dw_pci_bottom_ack,
 179        .irq_compose_msi_msg = dw_pci_setup_msi_msg,
 180        .irq_set_affinity = dw_pci_msi_set_affinity,
 181        .irq_mask = dw_pci_bottom_mask,
 182        .irq_unmask = dw_pci_bottom_unmask,
 183};
 184
 185static int dw_pcie_irq_domain_alloc(struct irq_domain *domain,
 186                                    unsigned int virq, unsigned int nr_irqs,
 187                                    void *args)
 188{
 189        struct pcie_port *pp = domain->host_data;
 190        unsigned long flags;
 191        u32 i;
 192        int bit;
 193
 194        raw_spin_lock_irqsave(&pp->lock, flags);
 195
 196        bit = bitmap_find_free_region(pp->msi_irq_in_use, pp->num_vectors,
 197                                      order_base_2(nr_irqs));
 198
 199        raw_spin_unlock_irqrestore(&pp->lock, flags);
 200
 201        if (bit < 0)
 202                return -ENOSPC;
 203
 204        for (i = 0; i < nr_irqs; i++)
 205                irq_domain_set_info(domain, virq + i, bit + i,
 206                                    pp->msi_irq_chip,
 207                                    pp, handle_edge_irq,
 208                                    NULL, NULL);
 209
 210        return 0;
 211}
 212
 213static void dw_pcie_irq_domain_free(struct irq_domain *domain,
 214                                    unsigned int virq, unsigned int nr_irqs)
 215{
 216        struct irq_data *d = irq_domain_get_irq_data(domain, virq);
 217        struct pcie_port *pp = domain->host_data;
 218        unsigned long flags;
 219
 220        raw_spin_lock_irqsave(&pp->lock, flags);
 221
 222        bitmap_release_region(pp->msi_irq_in_use, d->hwirq,
 223                              order_base_2(nr_irqs));
 224
 225        raw_spin_unlock_irqrestore(&pp->lock, flags);
 226}
 227
 228static const struct irq_domain_ops dw_pcie_msi_domain_ops = {
 229        .alloc  = dw_pcie_irq_domain_alloc,
 230        .free   = dw_pcie_irq_domain_free,
 231};
 232
 233int dw_pcie_allocate_domains(struct pcie_port *pp)
 234{
 235        struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
 236        struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node);
 237
 238        pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors,
 239                                               &dw_pcie_msi_domain_ops, pp);
 240        if (!pp->irq_domain) {
 241                dev_err(pci->dev, "Failed to create IRQ domain\n");
 242                return -ENOMEM;
 243        }
 244
 245        irq_domain_update_bus_token(pp->irq_domain, DOMAIN_BUS_NEXUS);
 246
 247        pp->msi_domain = pci_msi_create_irq_domain(fwnode,
 248                                                   &dw_pcie_msi_domain_info,
 249                                                   pp->irq_domain);
 250        if (!pp->msi_domain) {
 251                dev_err(pci->dev, "Failed to create MSI domain\n");
 252                irq_domain_remove(pp->irq_domain);
 253                return -ENOMEM;
 254        }
 255
 256        return 0;
 257}
 258
 259static void dw_pcie_free_msi(struct pcie_port *pp)
 260{
 261        if (pp->msi_irq) {
 262                irq_set_chained_handler(pp->msi_irq, NULL);
 263                irq_set_handler_data(pp->msi_irq, NULL);
 264        }
 265
 266        irq_domain_remove(pp->msi_domain);
 267        irq_domain_remove(pp->irq_domain);
 268
 269        if (pp->msi_data) {
 270                struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
 271                struct device *dev = pci->dev;
 272
 273                dma_unmap_single_attrs(dev, pp->msi_data, sizeof(pp->msi_msg),
 274                                       DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
 275        }
 276}
 277
 278static void dw_pcie_msi_init(struct pcie_port *pp)
 279{
 280        struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
 281        u64 msi_target = (u64)pp->msi_data;
 282
 283        if (!pci_msi_enabled() || !pp->has_msi_ctrl)
 284                return;
 285
 286        /* Program the msi_data */
 287        dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_LO, lower_32_bits(msi_target));
 288        dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_HI, upper_32_bits(msi_target));
 289}
 290
 291int dw_pcie_host_init(struct pcie_port *pp)
 292{
 293        struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
 294        struct device *dev = pci->dev;
 295        struct device_node *np = dev->of_node;
 296        struct platform_device *pdev = to_platform_device(dev);
 297        struct resource_entry *win;
 298        struct pci_host_bridge *bridge;
 299        struct resource *cfg_res;
 300        int ret;
 301
 302        raw_spin_lock_init(&pci->pp.lock);
 303
 304        cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
 305        if (cfg_res) {
 306                pp->cfg0_size = resource_size(cfg_res);
 307                pp->cfg0_base = cfg_res->start;
 308        } else if (!pp->va_cfg0_base) {
 309                dev_err(dev, "Missing *config* reg space\n");
 310        }
 311
 312        if (!pci->dbi_base) {
 313                struct resource *dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
 314                pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_res);
 315                if (IS_ERR(pci->dbi_base))
 316                        return PTR_ERR(pci->dbi_base);
 317        }
 318
 319        bridge = devm_pci_alloc_host_bridge(dev, 0);
 320        if (!bridge)
 321                return -ENOMEM;
 322
 323        pp->bridge = bridge;
 324
 325        /* Get the I/O and memory ranges from DT */
 326        resource_list_for_each_entry(win, &bridge->windows) {
 327                switch (resource_type(win->res)) {
 328                case IORESOURCE_IO:
 329                        pp->io_size = resource_size(win->res);
 330                        pp->io_bus_addr = win->res->start - win->offset;
 331                        pp->io_base = pci_pio_to_address(win->res->start);
 332                        break;
 333                case 0:
 334                        dev_err(dev, "Missing *config* reg space\n");
 335                        pp->cfg0_size = resource_size(win->res);
 336                        pp->cfg0_base = win->res->start;
 337                        if (!pci->dbi_base) {
 338                                pci->dbi_base = devm_pci_remap_cfgspace(dev,
 339                                                                pp->cfg0_base,
 340                                                                pp->cfg0_size);
 341                                if (!pci->dbi_base) {
 342                                        dev_err(dev, "Error with ioremap\n");
 343                                        return -ENOMEM;
 344                                }
 345                        }
 346                        break;
 347                }
 348        }
 349
 350        if (!pp->va_cfg0_base) {
 351                pp->va_cfg0_base = devm_pci_remap_cfgspace(dev,
 352                                        pp->cfg0_base, pp->cfg0_size);
 353                if (!pp->va_cfg0_base) {
 354                        dev_err(dev, "Error with ioremap in function\n");
 355                        return -ENOMEM;
 356                }
 357        }
 358
 359        if (pci->link_gen < 1)
 360                pci->link_gen = of_pci_get_max_link_speed(np);
 361
 362        if (pci_msi_enabled()) {
 363                pp->has_msi_ctrl = !(pp->ops->msi_host_init ||
 364                                     of_property_read_bool(np, "msi-parent") ||
 365                                     of_property_read_bool(np, "msi-map"));
 366
 367                if (!pp->num_vectors) {
 368                        pp->num_vectors = MSI_DEF_NUM_VECTORS;
 369                } else if (pp->num_vectors > MAX_MSI_IRQS) {
 370                        dev_err(dev, "Invalid number of vectors\n");
 371                        return -EINVAL;
 372                }
 373
 374                if (pp->ops->msi_host_init) {
 375                        ret = pp->ops->msi_host_init(pp);
 376                        if (ret < 0)
 377                                return ret;
 378                } else if (pp->has_msi_ctrl) {
 379                        if (!pp->msi_irq) {
 380                                pp->msi_irq = platform_get_irq_byname_optional(pdev, "msi");
 381                                if (pp->msi_irq < 0) {
 382                                        pp->msi_irq = platform_get_irq(pdev, 0);
 383                                        if (pp->msi_irq < 0)
 384                                                return pp->msi_irq;
 385                                }
 386                        }
 387
 388                        pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip;
 389
 390                        ret = dw_pcie_allocate_domains(pp);
 391                        if (ret)
 392                                return ret;
 393
 394                        if (pp->msi_irq > 0)
 395                                irq_set_chained_handler_and_data(pp->msi_irq,
 396                                                            dw_chained_msi_isr,
 397                                                            pp);
 398
 399                        ret = dma_set_mask(pci->dev, DMA_BIT_MASK(32));
 400                        if (ret)
 401                                dev_warn(pci->dev, "Failed to set DMA mask to 32-bit. Devices with only 32-bit MSI support may not work properly\n");
 402
 403                        pp->msi_data = dma_map_single_attrs(pci->dev, &pp->msi_msg,
 404                                                      sizeof(pp->msi_msg),
 405                                                      DMA_FROM_DEVICE,
 406                                                      DMA_ATTR_SKIP_CPU_SYNC);
 407                        if (dma_mapping_error(pci->dev, pp->msi_data)) {
 408                                dev_err(pci->dev, "Failed to map MSI data\n");
 409                                pp->msi_data = 0;
 410                                goto err_free_msi;
 411                        }
 412                }
 413        }
 414
 415        /* Set default bus ops */
 416        bridge->ops = &dw_pcie_ops;
 417        bridge->child_ops = &dw_child_pcie_ops;
 418
 419        if (pp->ops->host_init) {
 420                ret = pp->ops->host_init(pp);
 421                if (ret)
 422                        goto err_free_msi;
 423        }
 424
 425        dw_pcie_setup_rc(pp);
 426        dw_pcie_msi_init(pp);
 427
 428        if (!dw_pcie_link_up(pci) && pci->ops->start_link) {
 429                ret = pci->ops->start_link(pci);
 430                if (ret)
 431                        goto err_free_msi;
 432        }
 433
 434        /* Ignore errors, the link may come up later */
 435        dw_pcie_wait_for_link(pci);
 436
 437        bridge->sysdata = pp;
 438
 439        ret = pci_host_probe(bridge);
 440        if (!ret)
 441                return 0;
 442
 443err_free_msi:
 444        if (pp->has_msi_ctrl)
 445                dw_pcie_free_msi(pp);
 446        return ret;
 447}
 448EXPORT_SYMBOL_GPL(dw_pcie_host_init);
 449
 450void dw_pcie_host_deinit(struct pcie_port *pp)
 451{
 452        pci_stop_root_bus(pp->bridge->bus);
 453        pci_remove_root_bus(pp->bridge->bus);
 454        if (pp->has_msi_ctrl)
 455                dw_pcie_free_msi(pp);
 456}
 457EXPORT_SYMBOL_GPL(dw_pcie_host_deinit);
 458
 459static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
 460                                                unsigned int devfn, int where)
 461{
 462        int type;
 463        u32 busdev;
 464        struct pcie_port *pp = bus->sysdata;
 465        struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
 466
 467        /*
 468         * Checking whether the link is up here is a last line of defense
 469         * against platforms that forward errors on the system bus as
 470         * SError upon PCI configuration transactions issued when the link
 471         * is down. This check is racy by definition and does not stop
 472         * the system from triggering an SError if the link goes down
 473         * after this check is performed.
 474         */
 475        if (!dw_pcie_link_up(pci))
 476                return NULL;
 477
 478        busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
 479                 PCIE_ATU_FUNC(PCI_FUNC(devfn));
 480
 481        if (pci_is_root_bus(bus->parent))
 482                type = PCIE_ATU_TYPE_CFG0;
 483        else
 484                type = PCIE_ATU_TYPE_CFG1;
 485
 486
 487        dw_pcie_prog_outbound_atu(pci, 0, type, pp->cfg0_base, busdev, pp->cfg0_size);
 488
 489        return pp->va_cfg0_base + where;
 490}
 491
 492static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn,
 493                                 int where, int size, u32 *val)
 494{
 495        int ret;
 496        struct pcie_port *pp = bus->sysdata;
 497        struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
 498
 499        ret = pci_generic_config_read(bus, devfn, where, size, val);
 500
 501        if (!ret && pci->io_cfg_atu_shared)
 502                dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO, pp->io_base,
 503                                          pp->io_bus_addr, pp->io_size);
 504
 505        return ret;
 506}
 507
 508static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn,
 509                                 int where, int size, u32 val)
 510{
 511        int ret;
 512        struct pcie_port *pp = bus->sysdata;
 513        struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
 514
 515        ret = pci_generic_config_write(bus, devfn, where, size, val);
 516
 517        if (!ret && pci->io_cfg_atu_shared)
 518                dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO, pp->io_base,
 519                                          pp->io_bus_addr, pp->io_size);
 520
 521        return ret;
 522}
 523
 524static struct pci_ops dw_child_pcie_ops = {
 525        .map_bus = dw_pcie_other_conf_map_bus,
 526        .read = dw_pcie_rd_other_conf,
 527        .write = dw_pcie_wr_other_conf,
 528};
 529
 530void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn, int where)
 531{
 532        struct pcie_port *pp = bus->sysdata;
 533        struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
 534
 535        if (PCI_SLOT(devfn) > 0)
 536                return NULL;
 537
 538        return pci->dbi_base + where;
 539}
 540EXPORT_SYMBOL_GPL(dw_pcie_own_conf_map_bus);
 541
 542static struct pci_ops dw_pcie_ops = {
 543        .map_bus = dw_pcie_own_conf_map_bus,
 544        .read = pci_generic_config_read,
 545        .write = pci_generic_config_write,
 546};
 547
 548void dw_pcie_setup_rc(struct pcie_port *pp)
 549{
 550        int i;
 551        u32 val, ctrl, num_ctrls;
 552        struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
 553
 554        /*
 555         * Enable DBI read-only registers for writing/updating configuration.
 556         * Write permission gets disabled towards the end of this function.
 557         */
 558        dw_pcie_dbi_ro_wr_en(pci);
 559
 560        dw_pcie_setup(pci);
 561
 562        if (pp->has_msi_ctrl) {
 563                num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
 564
 565                /* Initialize IRQ Status array */
 566                for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
 567                        pp->irq_mask[ctrl] = ~0;
 568                        dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK +
 569                                            (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
 570                                            pp->irq_mask[ctrl]);
 571                        dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE +
 572                                            (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
 573                                            ~0);
 574                }
 575        }
 576
 577        /* Setup RC BARs */
 578        dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
 579        dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000);
 580
 581        /* Setup interrupt pins */
 582        val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE);
 583        val &= 0xffff00ff;
 584        val |= 0x00000100;
 585        dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val);
 586
 587        /* Setup bus numbers */
 588        val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS);
 589        val &= 0xff000000;
 590        val |= 0x00ff0100;
 591        dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val);
 592
 593        /* Setup command register */
 594        val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
 595        val &= 0xffff0000;
 596        val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
 597                PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
 598        dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
 599
 600        /* Ensure all outbound windows are disabled so there are multiple matches */
 601        for (i = 0; i < pci->num_ob_windows; i++)
 602                dw_pcie_disable_atu(pci, i, DW_PCIE_REGION_OUTBOUND);
 603
 604        /*
 605         * If the platform provides its own child bus config accesses, it means
 606         * the platform uses its own address translation component rather than
 607         * ATU, so we should not program the ATU here.
 608         */
 609        if (pp->bridge->child_ops == &dw_child_pcie_ops) {
 610                int atu_idx = 0;
 611                struct resource_entry *entry;
 612
 613                /* Get last memory resource entry */
 614                resource_list_for_each_entry(entry, &pp->bridge->windows) {
 615                        if (resource_type(entry->res) != IORESOURCE_MEM)
 616                                continue;
 617
 618                        if (pci->num_ob_windows <= ++atu_idx)
 619                                break;
 620
 621                        dw_pcie_prog_outbound_atu(pci, atu_idx,
 622                                                  PCIE_ATU_TYPE_MEM, entry->res->start,
 623                                                  entry->res->start - entry->offset,
 624                                                  resource_size(entry->res));
 625                }
 626
 627                if (pp->io_size) {
 628                        if (pci->num_ob_windows > ++atu_idx)
 629                                dw_pcie_prog_outbound_atu(pci, atu_idx,
 630                                                          PCIE_ATU_TYPE_IO, pp->io_base,
 631                                                          pp->io_bus_addr, pp->io_size);
 632                        else
 633                                pci->io_cfg_atu_shared = true;
 634                }
 635
 636                if (pci->num_ob_windows <= atu_idx)
 637                        dev_warn(pci->dev, "Resources exceed number of ATU entries (%d)",
 638                                 pci->num_ob_windows);
 639        }
 640
 641        dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
 642
 643        /* Program correct class for RC */
 644        dw_pcie_writew_dbi(pci, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI);
 645
 646        val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
 647        val |= PORT_LOGIC_SPEED_CHANGE;
 648        dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
 649
 650        dw_pcie_dbi_ro_wr_dis(pci);
 651}
 652EXPORT_SYMBOL_GPL(dw_pcie_setup_rc);
 653