linux/drivers/pci/controller/dwc/pcie-designware.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Synopsys DesignWare PCIe host controller driver
   4 *
   5 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
   6 *              https://www.samsung.com
   7 *
   8 * Author: Jingoo Han <jg1.han@samsung.com>
   9 */
  10
  11#include <linux/delay.h>
  12#include <linux/of.h>
  13#include <linux/of_platform.h>
  14#include <linux/types.h>
  15
  16#include "../../pci.h"
  17#include "pcie-designware.h"
  18
  19/*
  20 * These interfaces resemble the pci_find_*capability() interfaces, but these
  21 * are for configuring host controllers, which are bridges *to* PCI devices but
  22 * are not PCI devices themselves.
  23 */
  24static u8 __dw_pcie_find_next_cap(struct dw_pcie *pci, u8 cap_ptr,
  25                                  u8 cap)
  26{
  27        u8 cap_id, next_cap_ptr;
  28        u16 reg;
  29
  30        if (!cap_ptr)
  31                return 0;
  32
  33        reg = dw_pcie_readw_dbi(pci, cap_ptr);
  34        cap_id = (reg & 0x00ff);
  35
  36        if (cap_id > PCI_CAP_ID_MAX)
  37                return 0;
  38
  39        if (cap_id == cap)
  40                return cap_ptr;
  41
  42        next_cap_ptr = (reg & 0xff00) >> 8;
  43        return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap);
  44}
  45
  46u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap)
  47{
  48        u8 next_cap_ptr;
  49        u16 reg;
  50
  51        reg = dw_pcie_readw_dbi(pci, PCI_CAPABILITY_LIST);
  52        next_cap_ptr = (reg & 0x00ff);
  53
  54        return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap);
  55}
  56EXPORT_SYMBOL_GPL(dw_pcie_find_capability);
  57
  58static u16 dw_pcie_find_next_ext_capability(struct dw_pcie *pci, u16 start,
  59                                            u8 cap)
  60{
  61        u32 header;
  62        int ttl;
  63        int pos = PCI_CFG_SPACE_SIZE;
  64
  65        /* minimum 8 bytes per capability */
  66        ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
  67
  68        if (start)
  69                pos = start;
  70
  71        header = dw_pcie_readl_dbi(pci, pos);
  72        /*
  73         * If we have no capabilities, this is indicated by cap ID,
  74         * cap version and next pointer all being 0.
  75         */
  76        if (header == 0)
  77                return 0;
  78
  79        while (ttl-- > 0) {
  80                if (PCI_EXT_CAP_ID(header) == cap && pos != start)
  81                        return pos;
  82
  83                pos = PCI_EXT_CAP_NEXT(header);
  84                if (pos < PCI_CFG_SPACE_SIZE)
  85                        break;
  86
  87                header = dw_pcie_readl_dbi(pci, pos);
  88        }
  89
  90        return 0;
  91}
  92
  93u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap)
  94{
  95        return dw_pcie_find_next_ext_capability(pci, 0, cap);
  96}
  97EXPORT_SYMBOL_GPL(dw_pcie_find_ext_capability);
  98
  99int dw_pcie_read(void __iomem *addr, int size, u32 *val)
 100{
 101        if (!IS_ALIGNED((uintptr_t)addr, size)) {
 102                *val = 0;
 103                return PCIBIOS_BAD_REGISTER_NUMBER;
 104        }
 105
 106        if (size == 4) {
 107                *val = readl(addr);
 108        } else if (size == 2) {
 109                *val = readw(addr);
 110        } else if (size == 1) {
 111                *val = readb(addr);
 112        } else {
 113                *val = 0;
 114                return PCIBIOS_BAD_REGISTER_NUMBER;
 115        }
 116
 117        return PCIBIOS_SUCCESSFUL;
 118}
 119EXPORT_SYMBOL_GPL(dw_pcie_read);
 120
 121int dw_pcie_write(void __iomem *addr, int size, u32 val)
 122{
 123        if (!IS_ALIGNED((uintptr_t)addr, size))
 124                return PCIBIOS_BAD_REGISTER_NUMBER;
 125
 126        if (size == 4)
 127                writel(val, addr);
 128        else if (size == 2)
 129                writew(val, addr);
 130        else if (size == 1)
 131                writeb(val, addr);
 132        else
 133                return PCIBIOS_BAD_REGISTER_NUMBER;
 134
 135        return PCIBIOS_SUCCESSFUL;
 136}
 137EXPORT_SYMBOL_GPL(dw_pcie_write);
 138
 139u32 dw_pcie_read_dbi(struct dw_pcie *pci, u32 reg, size_t size)
 140{
 141        int ret;
 142        u32 val;
 143
 144        if (pci->ops->read_dbi)
 145                return pci->ops->read_dbi(pci, pci->dbi_base, reg, size);
 146
 147        ret = dw_pcie_read(pci->dbi_base + reg, size, &val);
 148        if (ret)
 149                dev_err(pci->dev, "Read DBI address failed\n");
 150
 151        return val;
 152}
 153EXPORT_SYMBOL_GPL(dw_pcie_read_dbi);
 154
 155void dw_pcie_write_dbi(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
 156{
 157        int ret;
 158
 159        if (pci->ops->write_dbi) {
 160                pci->ops->write_dbi(pci, pci->dbi_base, reg, size, val);
 161                return;
 162        }
 163
 164        ret = dw_pcie_write(pci->dbi_base + reg, size, val);
 165        if (ret)
 166                dev_err(pci->dev, "Write DBI address failed\n");
 167}
 168EXPORT_SYMBOL_GPL(dw_pcie_write_dbi);
 169
 170void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
 171{
 172        int ret;
 173
 174        if (pci->ops->write_dbi2) {
 175                pci->ops->write_dbi2(pci, pci->dbi_base2, reg, size, val);
 176                return;
 177        }
 178
 179        ret = dw_pcie_write(pci->dbi_base2 + reg, size, val);
 180        if (ret)
 181                dev_err(pci->dev, "write DBI address failed\n");
 182}
 183
 184static u32 dw_pcie_readl_atu(struct dw_pcie *pci, u32 reg)
 185{
 186        int ret;
 187        u32 val;
 188
 189        if (pci->ops->read_dbi)
 190                return pci->ops->read_dbi(pci, pci->atu_base, reg, 4);
 191
 192        ret = dw_pcie_read(pci->atu_base + reg, 4, &val);
 193        if (ret)
 194                dev_err(pci->dev, "Read ATU address failed\n");
 195
 196        return val;
 197}
 198
 199static void dw_pcie_writel_atu(struct dw_pcie *pci, u32 reg, u32 val)
 200{
 201        int ret;
 202
 203        if (pci->ops->write_dbi) {
 204                pci->ops->write_dbi(pci, pci->atu_base, reg, 4, val);
 205                return;
 206        }
 207
 208        ret = dw_pcie_write(pci->atu_base + reg, 4, val);
 209        if (ret)
 210                dev_err(pci->dev, "Write ATU address failed\n");
 211}
 212
 213static u32 dw_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg)
 214{
 215        u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
 216
 217        return dw_pcie_readl_atu(pci, offset + reg);
 218}
 219
 220static void dw_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg,
 221                                     u32 val)
 222{
 223        u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
 224
 225        dw_pcie_writel_atu(pci, offset + reg, val);
 226}
 227
 228static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, u8 func_no,
 229                                             int index, int type,
 230                                             u64 cpu_addr, u64 pci_addr,
 231                                             u64 size)
 232{
 233        u32 retries, val;
 234        u64 limit_addr = cpu_addr + size - 1;
 235
 236        dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE,
 237                                 lower_32_bits(cpu_addr));
 238        dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE,
 239                                 upper_32_bits(cpu_addr));
 240        dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_LIMIT,
 241                                 lower_32_bits(limit_addr));
 242        dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_LIMIT,
 243                                 upper_32_bits(limit_addr));
 244        dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
 245                                 lower_32_bits(pci_addr));
 246        dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
 247                                 upper_32_bits(pci_addr));
 248        val = type | PCIE_ATU_FUNC_NUM(func_no);
 249        val = upper_32_bits(size - 1) ?
 250                val | PCIE_ATU_INCREASE_REGION_SIZE : val;
 251        dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, val);
 252        dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
 253                                 PCIE_ATU_ENABLE);
 254
 255        /*
 256         * Make sure ATU enable takes effect before any subsequent config
 257         * and I/O accesses.
 258         */
 259        for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
 260                val = dw_pcie_readl_ob_unroll(pci, index,
 261                                              PCIE_ATU_UNR_REGION_CTRL2);
 262                if (val & PCIE_ATU_ENABLE)
 263                        return;
 264
 265                mdelay(LINK_WAIT_IATU);
 266        }
 267        dev_err(pci->dev, "Outbound iATU is not being enabled\n");
 268}
 269
 270static void __dw_pcie_prog_outbound_atu(struct dw_pcie *pci, u8 func_no,
 271                                        int index, int type, u64 cpu_addr,
 272                                        u64 pci_addr, u64 size)
 273{
 274        u32 retries, val;
 275
 276        if (pci->ops->cpu_addr_fixup)
 277                cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr);
 278
 279        if (pci->iatu_unroll_enabled) {
 280                dw_pcie_prog_outbound_atu_unroll(pci, func_no, index, type,
 281                                                 cpu_addr, pci_addr, size);
 282                return;
 283        }
 284
 285        dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT,
 286                           PCIE_ATU_REGION_OUTBOUND | index);
 287        dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_BASE,
 288                           lower_32_bits(cpu_addr));
 289        dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_BASE,
 290                           upper_32_bits(cpu_addr));
 291        dw_pcie_writel_dbi(pci, PCIE_ATU_LIMIT,
 292                           lower_32_bits(cpu_addr + size - 1));
 293        dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET,
 294                           lower_32_bits(pci_addr));
 295        dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET,
 296                           upper_32_bits(pci_addr));
 297        dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type |
 298                           PCIE_ATU_FUNC_NUM(func_no));
 299        dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE);
 300
 301        /*
 302         * Make sure ATU enable takes effect before any subsequent config
 303         * and I/O accesses.
 304         */
 305        for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
 306                val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
 307                if (val & PCIE_ATU_ENABLE)
 308                        return;
 309
 310                mdelay(LINK_WAIT_IATU);
 311        }
 312        dev_err(pci->dev, "Outbound iATU is not being enabled\n");
 313}
 314
 315void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
 316                               u64 cpu_addr, u64 pci_addr, u64 size)
 317{
 318        __dw_pcie_prog_outbound_atu(pci, 0, index, type,
 319                                    cpu_addr, pci_addr, size);
 320}
 321
 322void dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index,
 323                                  int type, u64 cpu_addr, u64 pci_addr,
 324                                  u32 size)
 325{
 326        __dw_pcie_prog_outbound_atu(pci, func_no, index, type,
 327                                    cpu_addr, pci_addr, size);
 328}
 329
 330static u32 dw_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg)
 331{
 332        u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
 333
 334        return dw_pcie_readl_atu(pci, offset + reg);
 335}
 336
 337static void dw_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg,
 338                                     u32 val)
 339{
 340        u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
 341
 342        dw_pcie_writel_atu(pci, offset + reg, val);
 343}
 344
 345static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, u8 func_no,
 346                                           int index, int bar, u64 cpu_addr,
 347                                           enum dw_pcie_as_type as_type)
 348{
 349        int type;
 350        u32 retries, val;
 351
 352        dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
 353                                 lower_32_bits(cpu_addr));
 354        dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
 355                                 upper_32_bits(cpu_addr));
 356
 357        switch (as_type) {
 358        case DW_PCIE_AS_MEM:
 359                type = PCIE_ATU_TYPE_MEM;
 360                break;
 361        case DW_PCIE_AS_IO:
 362                type = PCIE_ATU_TYPE_IO;
 363                break;
 364        default:
 365                return -EINVAL;
 366        }
 367
 368        dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, type |
 369                                 PCIE_ATU_FUNC_NUM(func_no));
 370        dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
 371                                 PCIE_ATU_FUNC_NUM_MATCH_EN |
 372                                 PCIE_ATU_ENABLE |
 373                                 PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
 374
 375        /*
 376         * Make sure ATU enable takes effect before any subsequent config
 377         * and I/O accesses.
 378         */
 379        for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
 380                val = dw_pcie_readl_ib_unroll(pci, index,
 381                                              PCIE_ATU_UNR_REGION_CTRL2);
 382                if (val & PCIE_ATU_ENABLE)
 383                        return 0;
 384
 385                mdelay(LINK_WAIT_IATU);
 386        }
 387        dev_err(pci->dev, "Inbound iATU is not being enabled\n");
 388
 389        return -EBUSY;
 390}
 391
 392int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
 393                             int bar, u64 cpu_addr,
 394                             enum dw_pcie_as_type as_type)
 395{
 396        int type;
 397        u32 retries, val;
 398
 399        if (pci->iatu_unroll_enabled)
 400                return dw_pcie_prog_inbound_atu_unroll(pci, func_no, index, bar,
 401                                                       cpu_addr, as_type);
 402
 403        dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND |
 404                           index);
 405        dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, lower_32_bits(cpu_addr));
 406        dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, upper_32_bits(cpu_addr));
 407
 408        switch (as_type) {
 409        case DW_PCIE_AS_MEM:
 410                type = PCIE_ATU_TYPE_MEM;
 411                break;
 412        case DW_PCIE_AS_IO:
 413                type = PCIE_ATU_TYPE_IO;
 414                break;
 415        default:
 416                return -EINVAL;
 417        }
 418
 419        dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type |
 420                           PCIE_ATU_FUNC_NUM(func_no));
 421        dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE |
 422                           PCIE_ATU_FUNC_NUM_MATCH_EN |
 423                           PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
 424
 425        /*
 426         * Make sure ATU enable takes effect before any subsequent config
 427         * and I/O accesses.
 428         */
 429        for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
 430                val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
 431                if (val & PCIE_ATU_ENABLE)
 432                        return 0;
 433
 434                mdelay(LINK_WAIT_IATU);
 435        }
 436        dev_err(pci->dev, "Inbound iATU is not being enabled\n");
 437
 438        return -EBUSY;
 439}
 440
 441void dw_pcie_disable_atu(struct dw_pcie *pci, int index,
 442                         enum dw_pcie_region_type type)
 443{
 444        int region;
 445
 446        switch (type) {
 447        case DW_PCIE_REGION_INBOUND:
 448                region = PCIE_ATU_REGION_INBOUND;
 449                break;
 450        case DW_PCIE_REGION_OUTBOUND:
 451                region = PCIE_ATU_REGION_OUTBOUND;
 452                break;
 453        default:
 454                return;
 455        }
 456
 457        dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, region | index);
 458        dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, ~(u32)PCIE_ATU_ENABLE);
 459}
 460
 461int dw_pcie_wait_for_link(struct dw_pcie *pci)
 462{
 463        int retries;
 464
 465        /* Check if the link is up or not */
 466        for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
 467                if (dw_pcie_link_up(pci)) {
 468                        dev_info(pci->dev, "Link up\n");
 469                        return 0;
 470                }
 471                usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
 472        }
 473
 474        dev_info(pci->dev, "Phy link never came up\n");
 475
 476        return -ETIMEDOUT;
 477}
 478EXPORT_SYMBOL_GPL(dw_pcie_wait_for_link);
 479
 480int dw_pcie_link_up(struct dw_pcie *pci)
 481{
 482        u32 val;
 483
 484        if (pci->ops->link_up)
 485                return pci->ops->link_up(pci);
 486
 487        val = readl(pci->dbi_base + PCIE_PORT_DEBUG1);
 488        return ((val & PCIE_PORT_DEBUG1_LINK_UP) &&
 489                (!(val & PCIE_PORT_DEBUG1_LINK_IN_TRAINING)));
 490}
 491
 492void dw_pcie_upconfig_setup(struct dw_pcie *pci)
 493{
 494        u32 val;
 495
 496        val = dw_pcie_readl_dbi(pci, PCIE_PORT_MULTI_LANE_CTRL);
 497        val |= PORT_MLTI_UPCFG_SUPPORT;
 498        dw_pcie_writel_dbi(pci, PCIE_PORT_MULTI_LANE_CTRL, val);
 499}
 500EXPORT_SYMBOL_GPL(dw_pcie_upconfig_setup);
 501
 502static void dw_pcie_link_set_max_speed(struct dw_pcie *pci, u32 link_gen)
 503{
 504        u32 cap, ctrl2, link_speed;
 505        u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
 506
 507        cap = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
 508        ctrl2 = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCTL2);
 509        ctrl2 &= ~PCI_EXP_LNKCTL2_TLS;
 510
 511        switch (pcie_link_speed[link_gen]) {
 512        case PCIE_SPEED_2_5GT:
 513                link_speed = PCI_EXP_LNKCTL2_TLS_2_5GT;
 514                break;
 515        case PCIE_SPEED_5_0GT:
 516                link_speed = PCI_EXP_LNKCTL2_TLS_5_0GT;
 517                break;
 518        case PCIE_SPEED_8_0GT:
 519                link_speed = PCI_EXP_LNKCTL2_TLS_8_0GT;
 520                break;
 521        case PCIE_SPEED_16_0GT:
 522                link_speed = PCI_EXP_LNKCTL2_TLS_16_0GT;
 523                break;
 524        default:
 525                /* Use hardware capability */
 526                link_speed = FIELD_GET(PCI_EXP_LNKCAP_SLS, cap);
 527                ctrl2 &= ~PCI_EXP_LNKCTL2_HASD;
 528                break;
 529        }
 530
 531        dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCTL2, ctrl2 | link_speed);
 532
 533        cap &= ~((u32)PCI_EXP_LNKCAP_SLS);
 534        dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, cap | link_speed);
 535
 536}
 537
 538static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
 539{
 540        u32 val;
 541
 542        val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT);
 543        if (val == 0xffffffff)
 544                return 1;
 545
 546        return 0;
 547}
 548
 549static void dw_pcie_iatu_detect_regions_unroll(struct dw_pcie *pci)
 550{
 551        int max_region, i, ob = 0, ib = 0;
 552        u32 val;
 553
 554        max_region = min((int)pci->atu_size / 512, 256);
 555
 556        for (i = 0; i < max_region; i++) {
 557                dw_pcie_writel_ob_unroll(pci, i, PCIE_ATU_UNR_LOWER_TARGET,
 558                                        0x11110000);
 559
 560                val = dw_pcie_readl_ob_unroll(pci, i, PCIE_ATU_UNR_LOWER_TARGET);
 561                if (val == 0x11110000)
 562                        ob++;
 563                else
 564                        break;
 565        }
 566
 567        for (i = 0; i < max_region; i++) {
 568                dw_pcie_writel_ib_unroll(pci, i, PCIE_ATU_UNR_LOWER_TARGET,
 569                                        0x11110000);
 570
 571                val = dw_pcie_readl_ib_unroll(pci, i, PCIE_ATU_UNR_LOWER_TARGET);
 572                if (val == 0x11110000)
 573                        ib++;
 574                else
 575                        break;
 576        }
 577        pci->num_ib_windows = ib;
 578        pci->num_ob_windows = ob;
 579}
 580
 581static void dw_pcie_iatu_detect_regions(struct dw_pcie *pci)
 582{
 583        int max_region, i, ob = 0, ib = 0;
 584        u32 val;
 585
 586        dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, 0xFF);
 587        max_region = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT) + 1;
 588
 589        for (i = 0; i < max_region; i++) {
 590                dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_OUTBOUND | i);
 591                dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, 0x11110000);
 592                val = dw_pcie_readl_dbi(pci, PCIE_ATU_LOWER_TARGET);
 593                if (val == 0x11110000)
 594                        ob++;
 595                else
 596                        break;
 597        }
 598
 599        for (i = 0; i < max_region; i++) {
 600                dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND | i);
 601                dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, 0x11110000);
 602                val = dw_pcie_readl_dbi(pci, PCIE_ATU_LOWER_TARGET);
 603                if (val == 0x11110000)
 604                        ib++;
 605                else
 606                        break;
 607        }
 608
 609        pci->num_ib_windows = ib;
 610        pci->num_ob_windows = ob;
 611}
 612
 613void dw_pcie_setup(struct dw_pcie *pci)
 614{
 615        u32 val;
 616        struct device *dev = pci->dev;
 617        struct device_node *np = dev->of_node;
 618        struct platform_device *pdev = to_platform_device(dev);
 619
 620        if (pci->version >= 0x480A || (!pci->version &&
 621                                       dw_pcie_iatu_unroll_enabled(pci))) {
 622                pci->iatu_unroll_enabled = true;
 623                if (!pci->atu_base) {
 624                        struct resource *res =
 625                                platform_get_resource_byname(pdev, IORESOURCE_MEM, "atu");
 626                        if (res)
 627                                pci->atu_size = resource_size(res);
 628                        pci->atu_base = devm_ioremap_resource(dev, res);
 629                        if (IS_ERR(pci->atu_base))
 630                                pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
 631                }
 632
 633                if (!pci->atu_size)
 634                        /* Pick a minimal default, enough for 8 in and 8 out windows */
 635                        pci->atu_size = SZ_4K;
 636
 637                dw_pcie_iatu_detect_regions_unroll(pci);
 638        } else
 639                dw_pcie_iatu_detect_regions(pci);
 640
 641        dev_info(pci->dev, "iATU unroll: %s\n", pci->iatu_unroll_enabled ?
 642                "enabled" : "disabled");
 643
 644        dev_info(pci->dev, "Detected iATU regions: %u outbound, %u inbound",
 645                 pci->num_ob_windows, pci->num_ib_windows);
 646
 647        if (pci->link_gen > 0)
 648                dw_pcie_link_set_max_speed(pci, pci->link_gen);
 649
 650        /* Configure Gen1 N_FTS */
 651        if (pci->n_fts[0]) {
 652                val = dw_pcie_readl_dbi(pci, PCIE_PORT_AFR);
 653                val &= ~(PORT_AFR_N_FTS_MASK | PORT_AFR_CC_N_FTS_MASK);
 654                val |= PORT_AFR_N_FTS(pci->n_fts[0]);
 655                val |= PORT_AFR_CC_N_FTS(pci->n_fts[0]);
 656                dw_pcie_writel_dbi(pci, PCIE_PORT_AFR, val);
 657        }
 658
 659        /* Configure Gen2+ N_FTS */
 660        if (pci->n_fts[1]) {
 661                val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
 662                val &= ~PORT_LOGIC_N_FTS_MASK;
 663                val |= pci->n_fts[pci->link_gen - 1];
 664                dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
 665        }
 666
 667        val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
 668        val &= ~PORT_LINK_FAST_LINK_MODE;
 669        val |= PORT_LINK_DLL_LINK_EN;
 670        dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
 671
 672        of_property_read_u32(np, "num-lanes", &pci->num_lanes);
 673        if (!pci->num_lanes) {
 674                dev_dbg(pci->dev, "Using h/w default number of lanes\n");
 675                return;
 676        }
 677
 678        /* Set the number of lanes */
 679        val &= ~PORT_LINK_FAST_LINK_MODE;
 680        val &= ~PORT_LINK_MODE_MASK;
 681        switch (pci->num_lanes) {
 682        case 1:
 683                val |= PORT_LINK_MODE_1_LANES;
 684                break;
 685        case 2:
 686                val |= PORT_LINK_MODE_2_LANES;
 687                break;
 688        case 4:
 689                val |= PORT_LINK_MODE_4_LANES;
 690                break;
 691        case 8:
 692                val |= PORT_LINK_MODE_8_LANES;
 693                break;
 694        default:
 695                dev_err(pci->dev, "num-lanes %u: invalid value\n", pci->num_lanes);
 696                return;
 697        }
 698        dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
 699
 700        /* Set link width speed control register */
 701        val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
 702        val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
 703        switch (pci->num_lanes) {
 704        case 1:
 705                val |= PORT_LOGIC_LINK_WIDTH_1_LANES;
 706                break;
 707        case 2:
 708                val |= PORT_LOGIC_LINK_WIDTH_2_LANES;
 709                break;
 710        case 4:
 711                val |= PORT_LOGIC_LINK_WIDTH_4_LANES;
 712                break;
 713        case 8:
 714                val |= PORT_LOGIC_LINK_WIDTH_8_LANES;
 715                break;
 716        }
 717        dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
 718
 719        if (of_property_read_bool(np, "snps,enable-cdm-check")) {
 720                val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
 721                val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS |
 722                       PCIE_PL_CHK_REG_CHK_REG_START;
 723                dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);
 724        }
 725}
 726