linux/drivers/soc/fsl/qbman/qman_ccsr.c
<<
>>
Prefs
   1/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
   2 *
   3 * Redistribution and use in source and binary forms, with or without
   4 * modification, are permitted provided that the following conditions are met:
   5 *     * Redistributions of source code must retain the above copyright
   6 *       notice, this list of conditions and the following disclaimer.
   7 *     * Redistributions in binary form must reproduce the above copyright
   8 *       notice, this list of conditions and the following disclaimer in the
   9 *       documentation and/or other materials provided with the distribution.
  10 *     * Neither the name of Freescale Semiconductor nor the
  11 *       names of its contributors may be used to endorse or promote products
  12 *       derived from this software without specific prior written permission.
  13 *
  14 * ALTERNATIVELY, this software may be distributed under the terms of the
  15 * GNU General Public License ("GPL") as published by the Free Software
  16 * Foundation, either version 2 of that License or (at your option) any
  17 * later version.
  18 *
  19 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  20 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  22 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  28 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  29 */
  30
  31#include "qman_priv.h"
  32
  33u16 qman_ip_rev;
  34EXPORT_SYMBOL(qman_ip_rev);
  35u16 qm_channel_pool1 = QMAN_CHANNEL_POOL1;
  36EXPORT_SYMBOL(qm_channel_pool1);
  37u16 qm_channel_caam = QMAN_CHANNEL_CAAM;
  38EXPORT_SYMBOL(qm_channel_caam);
  39
  40/* Register offsets */
  41#define REG_QCSP_LIO_CFG(n)     (0x0000 + ((n) * 0x10))
  42#define REG_QCSP_IO_CFG(n)      (0x0004 + ((n) * 0x10))
  43#define REG_QCSP_DD_CFG(n)      (0x000c + ((n) * 0x10))
  44#define REG_DD_CFG              0x0200
  45#define REG_DCP_CFG(n)          (0x0300 + ((n) * 0x10))
  46#define REG_DCP_DD_CFG(n)       (0x0304 + ((n) * 0x10))
  47#define REG_DCP_DLM_AVG(n)      (0x030c + ((n) * 0x10))
  48#define REG_PFDR_FPC            0x0400
  49#define REG_PFDR_FP_HEAD        0x0404
  50#define REG_PFDR_FP_TAIL        0x0408
  51#define REG_PFDR_FP_LWIT        0x0410
  52#define REG_PFDR_CFG            0x0414
  53#define REG_SFDR_CFG            0x0500
  54#define REG_SFDR_IN_USE         0x0504
  55#define REG_WQ_CS_CFG(n)        (0x0600 + ((n) * 0x04))
  56#define REG_WQ_DEF_ENC_WQID     0x0630
  57#define REG_WQ_SC_DD_CFG(n)     (0x640 + ((n) * 0x04))
  58#define REG_WQ_PC_DD_CFG(n)     (0x680 + ((n) * 0x04))
  59#define REG_WQ_DC0_DD_CFG(n)    (0x6c0 + ((n) * 0x04))
  60#define REG_WQ_DC1_DD_CFG(n)    (0x700 + ((n) * 0x04))
  61#define REG_WQ_DCn_DD_CFG(n)    (0x6c0 + ((n) * 0x40)) /* n=2,3 */
  62#define REG_CM_CFG              0x0800
  63#define REG_ECSR                0x0a00
  64#define REG_ECIR                0x0a04
  65#define REG_EADR                0x0a08
  66#define REG_ECIR2               0x0a0c
  67#define REG_EDATA(n)            (0x0a10 + ((n) * 0x04))
  68#define REG_SBEC(n)             (0x0a80 + ((n) * 0x04))
  69#define REG_MCR                 0x0b00
  70#define REG_MCP(n)              (0x0b04 + ((n) * 0x04))
  71#define REG_MISC_CFG            0x0be0
  72#define REG_HID_CFG             0x0bf0
  73#define REG_IDLE_STAT           0x0bf4
  74#define REG_IP_REV_1            0x0bf8
  75#define REG_IP_REV_2            0x0bfc
  76#define REG_FQD_BARE            0x0c00
  77#define REG_PFDR_BARE           0x0c20
  78#define REG_offset_BAR          0x0004  /* relative to REG_[FQD|PFDR]_BARE */
  79#define REG_offset_AR           0x0010  /* relative to REG_[FQD|PFDR]_BARE */
  80#define REG_QCSP_BARE           0x0c80
  81#define REG_QCSP_BAR            0x0c84
  82#define REG_CI_SCHED_CFG        0x0d00
  83#define REG_SRCIDR              0x0d04
  84#define REG_LIODNR              0x0d08
  85#define REG_CI_RLM_AVG          0x0d14
  86#define REG_ERR_ISR             0x0e00
  87#define REG_ERR_IER             0x0e04
  88#define REG_REV3_QCSP_LIO_CFG(n)        (0x1000 + ((n) * 0x10))
  89#define REG_REV3_QCSP_IO_CFG(n) (0x1004 + ((n) * 0x10))
  90#define REG_REV3_QCSP_DD_CFG(n) (0x100c + ((n) * 0x10))
  91
  92/* Assists for QMAN_MCR */
  93#define MCR_INIT_PFDR           0x01000000
  94#define MCR_get_rslt(v)         (u8)((v) >> 24)
  95#define MCR_rslt_idle(r)        (!(r) || ((r) >= 0xf0))
  96#define MCR_rslt_ok(r)          ((r) == 0xf0)
  97#define MCR_rslt_eaccess(r)     ((r) == 0xf8)
  98#define MCR_rslt_inval(r)       ((r) == 0xff)
  99
 100/*
 101 * Corenet initiator settings. Stash request queues are 4-deep to match cores
 102 * ability to snarf. Stash priority is 3, other priorities are 2.
 103 */
 104#define QM_CI_SCHED_CFG_SRCCIV          4
 105#define QM_CI_SCHED_CFG_SRQ_W           3
 106#define QM_CI_SCHED_CFG_RW_W            2
 107#define QM_CI_SCHED_CFG_BMAN_W          2
 108/* write SRCCIV enable */
 109#define QM_CI_SCHED_CFG_SRCCIV_EN       BIT(31)
 110
 111/* Follows WQ_CS_CFG0-5 */
 112enum qm_wq_class {
 113        qm_wq_portal = 0,
 114        qm_wq_pool = 1,
 115        qm_wq_fman0 = 2,
 116        qm_wq_fman1 = 3,
 117        qm_wq_caam = 4,
 118        qm_wq_pme = 5,
 119        qm_wq_first = qm_wq_portal,
 120        qm_wq_last = qm_wq_pme
 121};
 122
 123/* Follows FQD_[BARE|BAR|AR] and PFDR_[BARE|BAR|AR] */
 124enum qm_memory {
 125        qm_memory_fqd,
 126        qm_memory_pfdr
 127};
 128
 129/* Used by all error interrupt registers except 'inhibit' */
 130#define QM_EIRQ_CIDE    0x20000000      /* Corenet Initiator Data Error */
 131#define QM_EIRQ_CTDE    0x10000000      /* Corenet Target Data Error */
 132#define QM_EIRQ_CITT    0x08000000      /* Corenet Invalid Target Transaction */
 133#define QM_EIRQ_PLWI    0x04000000      /* PFDR Low Watermark */
 134#define QM_EIRQ_MBEI    0x02000000      /* Multi-bit ECC Error */
 135#define QM_EIRQ_SBEI    0x01000000      /* Single-bit ECC Error */
 136#define QM_EIRQ_PEBI    0x00800000      /* PFDR Enqueues Blocked Interrupt */
 137#define QM_EIRQ_IFSI    0x00020000      /* Invalid FQ Flow Control State */
 138#define QM_EIRQ_ICVI    0x00010000      /* Invalid Command Verb */
 139#define QM_EIRQ_IDDI    0x00000800      /* Invalid Dequeue (Direct-connect) */
 140#define QM_EIRQ_IDFI    0x00000400      /* Invalid Dequeue FQ */
 141#define QM_EIRQ_IDSI    0x00000200      /* Invalid Dequeue Source */
 142#define QM_EIRQ_IDQI    0x00000100      /* Invalid Dequeue Queue */
 143#define QM_EIRQ_IECE    0x00000010      /* Invalid Enqueue Configuration */
 144#define QM_EIRQ_IEOI    0x00000008      /* Invalid Enqueue Overflow */
 145#define QM_EIRQ_IESI    0x00000004      /* Invalid Enqueue State */
 146#define QM_EIRQ_IECI    0x00000002      /* Invalid Enqueue Channel */
 147#define QM_EIRQ_IEQI    0x00000001      /* Invalid Enqueue Queue */
 148
 149/* QMAN_ECIR valid error bit */
 150#define PORTAL_ECSR_ERR (QM_EIRQ_IEQI | QM_EIRQ_IESI | QM_EIRQ_IEOI | \
 151                         QM_EIRQ_IDQI | QM_EIRQ_IDSI | QM_EIRQ_IDFI | \
 152                         QM_EIRQ_IDDI | QM_EIRQ_ICVI | QM_EIRQ_IFSI)
 153#define FQID_ECSR_ERR   (QM_EIRQ_IEQI | QM_EIRQ_IECI | QM_EIRQ_IESI | \
 154                         QM_EIRQ_IEOI | QM_EIRQ_IDQI | QM_EIRQ_IDFI | \
 155                         QM_EIRQ_IFSI)
 156
 157struct qm_ecir {
 158        u32 info; /* res[30-31], ptyp[29], pnum[24-28], fqid[0-23] */
 159};
 160
 161static bool qm_ecir_is_dcp(const struct qm_ecir *p)
 162{
 163        return p->info & BIT(29);
 164}
 165
 166static int qm_ecir_get_pnum(const struct qm_ecir *p)
 167{
 168        return (p->info >> 24) & 0x1f;
 169}
 170
 171static int qm_ecir_get_fqid(const struct qm_ecir *p)
 172{
 173        return p->info & (BIT(24) - 1);
 174}
 175
 176struct qm_ecir2 {
 177        u32 info; /* ptyp[31], res[10-30], pnum[0-9] */
 178};
 179
 180static bool qm_ecir2_is_dcp(const struct qm_ecir2 *p)
 181{
 182        return p->info & BIT(31);
 183}
 184
 185static int qm_ecir2_get_pnum(const struct qm_ecir2 *p)
 186{
 187        return p->info & (BIT(10) - 1);
 188}
 189
 190struct qm_eadr {
 191        u32 info; /* memid[24-27], eadr[0-11] */
 192                  /* v3: memid[24-28], eadr[0-15] */
 193};
 194
 195static int qm_eadr_get_memid(const struct qm_eadr *p)
 196{
 197        return (p->info >> 24) & 0xf;
 198}
 199
 200static int qm_eadr_get_eadr(const struct qm_eadr *p)
 201{
 202        return p->info & (BIT(12) - 1);
 203}
 204
 205static int qm_eadr_v3_get_memid(const struct qm_eadr *p)
 206{
 207        return (p->info >> 24) & 0x1f;
 208}
 209
 210static int qm_eadr_v3_get_eadr(const struct qm_eadr *p)
 211{
 212        return p->info & (BIT(16) - 1);
 213}
 214
 215struct qman_hwerr_txt {
 216        u32 mask;
 217        const char *txt;
 218};
 219
 220
 221static const struct qman_hwerr_txt qman_hwerr_txts[] = {
 222        { QM_EIRQ_CIDE, "Corenet Initiator Data Error" },
 223        { QM_EIRQ_CTDE, "Corenet Target Data Error" },
 224        { QM_EIRQ_CITT, "Corenet Invalid Target Transaction" },
 225        { QM_EIRQ_PLWI, "PFDR Low Watermark" },
 226        { QM_EIRQ_MBEI, "Multi-bit ECC Error" },
 227        { QM_EIRQ_SBEI, "Single-bit ECC Error" },
 228        { QM_EIRQ_PEBI, "PFDR Enqueues Blocked Interrupt" },
 229        { QM_EIRQ_ICVI, "Invalid Command Verb" },
 230        { QM_EIRQ_IFSI, "Invalid Flow Control State" },
 231        { QM_EIRQ_IDDI, "Invalid Dequeue (Direct-connect)" },
 232        { QM_EIRQ_IDFI, "Invalid Dequeue FQ" },
 233        { QM_EIRQ_IDSI, "Invalid Dequeue Source" },
 234        { QM_EIRQ_IDQI, "Invalid Dequeue Queue" },
 235        { QM_EIRQ_IECE, "Invalid Enqueue Configuration" },
 236        { QM_EIRQ_IEOI, "Invalid Enqueue Overflow" },
 237        { QM_EIRQ_IESI, "Invalid Enqueue State" },
 238        { QM_EIRQ_IECI, "Invalid Enqueue Channel" },
 239        { QM_EIRQ_IEQI, "Invalid Enqueue Queue" },
 240};
 241
 242struct qman_error_info_mdata {
 243        u16 addr_mask;
 244        u16 bits;
 245        const char *txt;
 246};
 247
 248static const struct qman_error_info_mdata error_mdata[] = {
 249        { 0x01FF, 24, "FQD cache tag memory 0" },
 250        { 0x01FF, 24, "FQD cache tag memory 1" },
 251        { 0x01FF, 24, "FQD cache tag memory 2" },
 252        { 0x01FF, 24, "FQD cache tag memory 3" },
 253        { 0x0FFF, 512, "FQD cache memory" },
 254        { 0x07FF, 128, "SFDR memory" },
 255        { 0x01FF, 72, "WQ context memory" },
 256        { 0x00FF, 240, "CGR memory" },
 257        { 0x00FF, 302, "Internal Order Restoration List memory" },
 258        { 0x01FF, 256, "SW portal ring memory" },
 259};
 260
 261#define QMAN_ERRS_TO_DISABLE (QM_EIRQ_PLWI | QM_EIRQ_PEBI)
 262
 263/*
 264 * TODO: unimplemented registers
 265 *
 266 * Keeping a list here of QMan registers I have not yet covered;
 267 * QCSP_DD_IHRSR, QCSP_DD_IHRFR, QCSP_DD_HASR,
 268 * DCP_DD_IHRSR, DCP_DD_IHRFR, DCP_DD_HASR, CM_CFG,
 269 * QMAN_EECC, QMAN_SBET, QMAN_EINJ, QMAN_SBEC0-12
 270 */
 271
 272/* Pointer to the start of the QMan's CCSR space */
 273static u32 __iomem *qm_ccsr_start;
 274/* A SDQCR mask comprising all the available/visible pool channels */
 275static u32 qm_pools_sdqcr;
 276static int __qman_probed;
 277
 278static inline u32 qm_ccsr_in(u32 offset)
 279{
 280        return ioread32be(qm_ccsr_start + offset/4);
 281}
 282
 283static inline void qm_ccsr_out(u32 offset, u32 val)
 284{
 285        iowrite32be(val, qm_ccsr_start + offset/4);
 286}
 287
 288u32 qm_get_pools_sdqcr(void)
 289{
 290        return qm_pools_sdqcr;
 291}
 292
 293enum qm_dc_portal {
 294        qm_dc_portal_fman0 = 0,
 295        qm_dc_portal_fman1 = 1
 296};
 297
 298static void qm_set_dc(enum qm_dc_portal portal, int ed, u8 sernd)
 299{
 300        DPAA_ASSERT(!ed || portal == qm_dc_portal_fman0 ||
 301                    portal == qm_dc_portal_fman1);
 302        if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
 303                qm_ccsr_out(REG_DCP_CFG(portal),
 304                            (ed ? 0x1000 : 0) | (sernd & 0x3ff));
 305        else
 306                qm_ccsr_out(REG_DCP_CFG(portal),
 307                            (ed ? 0x100 : 0) | (sernd & 0x1f));
 308}
 309
 310static void qm_set_wq_scheduling(enum qm_wq_class wq_class,
 311                                 u8 cs_elev, u8 csw2, u8 csw3, u8 csw4,
 312                                 u8 csw5, u8 csw6, u8 csw7)
 313{
 314        qm_ccsr_out(REG_WQ_CS_CFG(wq_class), ((cs_elev & 0xff) << 24) |
 315                    ((csw2 & 0x7) << 20) | ((csw3 & 0x7) << 16) |
 316                    ((csw4 & 0x7) << 12) | ((csw5 & 0x7) << 8) |
 317                    ((csw6 & 0x7) << 4) | (csw7 & 0x7));
 318}
 319
 320static void qm_set_hid(void)
 321{
 322        qm_ccsr_out(REG_HID_CFG, 0);
 323}
 324
 325static void qm_set_corenet_initiator(void)
 326{
 327        qm_ccsr_out(REG_CI_SCHED_CFG, QM_CI_SCHED_CFG_SRCCIV_EN |
 328                    (QM_CI_SCHED_CFG_SRCCIV << 24) |
 329                    (QM_CI_SCHED_CFG_SRQ_W << 8) |
 330                    (QM_CI_SCHED_CFG_RW_W << 4) |
 331                    QM_CI_SCHED_CFG_BMAN_W);
 332}
 333
 334static void qm_get_version(u16 *id, u8 *major, u8 *minor)
 335{
 336        u32 v = qm_ccsr_in(REG_IP_REV_1);
 337        *id = (v >> 16);
 338        *major = (v >> 8) & 0xff;
 339        *minor = v & 0xff;
 340}
 341
 342#define PFDR_AR_EN              BIT(31)
 343static void qm_set_memory(enum qm_memory memory, u64 ba, u32 size)
 344{
 345        u32 offset = (memory == qm_memory_fqd) ? REG_FQD_BARE : REG_PFDR_BARE;
 346        u32 exp = ilog2(size);
 347
 348        /* choke if size isn't within range */
 349        DPAA_ASSERT((size >= 4096) && (size <= 1024*1024*1024) &&
 350                    is_power_of_2(size));
 351        /* choke if 'ba' has lower-alignment than 'size' */
 352        DPAA_ASSERT(!(ba & (size - 1)));
 353        qm_ccsr_out(offset, upper_32_bits(ba));
 354        qm_ccsr_out(offset + REG_offset_BAR, lower_32_bits(ba));
 355        qm_ccsr_out(offset + REG_offset_AR, PFDR_AR_EN | (exp - 1));
 356}
 357
 358static void qm_set_pfdr_threshold(u32 th, u8 k)
 359{
 360        qm_ccsr_out(REG_PFDR_FP_LWIT, th & 0xffffff);
 361        qm_ccsr_out(REG_PFDR_CFG, k);
 362}
 363
 364static void qm_set_sfdr_threshold(u16 th)
 365{
 366        qm_ccsr_out(REG_SFDR_CFG, th & 0x3ff);
 367}
 368
 369static int qm_init_pfdr(struct device *dev, u32 pfdr_start, u32 num)
 370{
 371        u8 rslt = MCR_get_rslt(qm_ccsr_in(REG_MCR));
 372
 373        DPAA_ASSERT(pfdr_start && !(pfdr_start & 7) && !(num & 7) && num);
 374        /* Make sure the command interface is 'idle' */
 375        if (!MCR_rslt_idle(rslt)) {
 376                dev_crit(dev, "QMAN_MCR isn't idle");
 377                WARN_ON(1);
 378        }
 379
 380        /* Write the MCR command params then the verb */
 381        qm_ccsr_out(REG_MCP(0), pfdr_start);
 382        /*
 383         * TODO: remove this - it's a workaround for a model bug that is
 384         * corrected in more recent versions. We use the workaround until
 385         * everyone has upgraded.
 386         */
 387        qm_ccsr_out(REG_MCP(1), pfdr_start + num - 16);
 388        dma_wmb();
 389        qm_ccsr_out(REG_MCR, MCR_INIT_PFDR);
 390        /* Poll for the result */
 391        do {
 392                rslt = MCR_get_rslt(qm_ccsr_in(REG_MCR));
 393        } while (!MCR_rslt_idle(rslt));
 394        if (MCR_rslt_ok(rslt))
 395                return 0;
 396        if (MCR_rslt_eaccess(rslt))
 397                return -EACCES;
 398        if (MCR_rslt_inval(rslt))
 399                return -EINVAL;
 400        dev_crit(dev, "Unexpected result from MCR_INIT_PFDR: %02x\n", rslt);
 401        return -ENODEV;
 402}
 403
 404/*
 405 * QMan needs two global memory areas initialized at boot time:
 406 *  1) FQD: Frame Queue Descriptors used to manage frame queues
 407 *  2) PFDR: Packed Frame Queue Descriptor Records used to store frames
 408 * Both areas are reserved using the device tree reserved memory framework
 409 * and the addresses and sizes are initialized when the QMan device is probed
 410 */
 411static dma_addr_t fqd_a, pfdr_a;
 412static size_t fqd_sz, pfdr_sz;
 413
 414#ifdef CONFIG_PPC
 415/*
 416 * Support for PPC Device Tree backward compatibility when compatible
 417 * string is set to fsl-qman-fqd and fsl-qman-pfdr
 418 */
 419static int zero_priv_mem(phys_addr_t addr, size_t sz)
 420{
 421        /* map as cacheable, non-guarded */
 422        void __iomem *tmpp = ioremap_cache(addr, sz);
 423
 424        if (!tmpp)
 425                return -ENOMEM;
 426
 427        memset_io(tmpp, 0, sz);
 428        flush_dcache_range((unsigned long)tmpp,
 429                           (unsigned long)tmpp + sz);
 430        iounmap(tmpp);
 431
 432        return 0;
 433}
 434
 435static int qman_fqd(struct reserved_mem *rmem)
 436{
 437        fqd_a = rmem->base;
 438        fqd_sz = rmem->size;
 439
 440        WARN_ON(!(fqd_a && fqd_sz));
 441        return 0;
 442}
 443RESERVEDMEM_OF_DECLARE(qman_fqd, "fsl,qman-fqd", qman_fqd);
 444
 445static int qman_pfdr(struct reserved_mem *rmem)
 446{
 447        pfdr_a = rmem->base;
 448        pfdr_sz = rmem->size;
 449
 450        WARN_ON(!(pfdr_a && pfdr_sz));
 451
 452        return 0;
 453}
 454RESERVEDMEM_OF_DECLARE(qman_pfdr, "fsl,qman-pfdr", qman_pfdr);
 455
 456#endif
 457
 458static unsigned int qm_get_fqid_maxcnt(void)
 459{
 460        return fqd_sz / 64;
 461}
 462
 463static void log_edata_bits(struct device *dev, u32 bit_count)
 464{
 465        u32 i, j, mask = 0xffffffff;
 466
 467        dev_warn(dev, "ErrInt, EDATA:\n");
 468        i = bit_count / 32;
 469        if (bit_count % 32) {
 470                i++;
 471                mask = ~(mask << bit_count % 32);
 472        }
 473        j = 16 - i;
 474        dev_warn(dev, "  0x%08x\n", qm_ccsr_in(REG_EDATA(j)) & mask);
 475        j++;
 476        for (; j < 16; j++)
 477                dev_warn(dev, "  0x%08x\n", qm_ccsr_in(REG_EDATA(j)));
 478}
 479
 480static void log_additional_error_info(struct device *dev, u32 isr_val,
 481                                      u32 ecsr_val)
 482{
 483        struct qm_ecir ecir_val;
 484        struct qm_eadr eadr_val;
 485        int memid;
 486
 487        ecir_val.info = qm_ccsr_in(REG_ECIR);
 488        /* Is portal info valid */
 489        if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
 490                struct qm_ecir2 ecir2_val;
 491
 492                ecir2_val.info = qm_ccsr_in(REG_ECIR2);
 493                if (ecsr_val & PORTAL_ECSR_ERR) {
 494                        dev_warn(dev, "ErrInt: %s id %d\n",
 495                                 qm_ecir2_is_dcp(&ecir2_val) ? "DCP" : "SWP",
 496                                 qm_ecir2_get_pnum(&ecir2_val));
 497                }
 498                if (ecsr_val & (FQID_ECSR_ERR | QM_EIRQ_IECE))
 499                        dev_warn(dev, "ErrInt: ecir.fqid 0x%x\n",
 500                                 qm_ecir_get_fqid(&ecir_val));
 501
 502                if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) {
 503                        eadr_val.info = qm_ccsr_in(REG_EADR);
 504                        memid = qm_eadr_v3_get_memid(&eadr_val);
 505                        dev_warn(dev, "ErrInt: EADR Memory: %s, 0x%x\n",
 506                                 error_mdata[memid].txt,
 507                                 error_mdata[memid].addr_mask
 508                                        & qm_eadr_v3_get_eadr(&eadr_val));
 509                        log_edata_bits(dev, error_mdata[memid].bits);
 510                }
 511        } else {
 512                if (ecsr_val & PORTAL_ECSR_ERR) {
 513                        dev_warn(dev, "ErrInt: %s id %d\n",
 514                                 qm_ecir_is_dcp(&ecir_val) ? "DCP" : "SWP",
 515                                 qm_ecir_get_pnum(&ecir_val));
 516                }
 517                if (ecsr_val & FQID_ECSR_ERR)
 518                        dev_warn(dev, "ErrInt: ecir.fqid 0x%x\n",
 519                                 qm_ecir_get_fqid(&ecir_val));
 520
 521                if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) {
 522                        eadr_val.info = qm_ccsr_in(REG_EADR);
 523                        memid = qm_eadr_get_memid(&eadr_val);
 524                        dev_warn(dev, "ErrInt: EADR Memory: %s, 0x%x\n",
 525                                 error_mdata[memid].txt,
 526                                 error_mdata[memid].addr_mask
 527                                        & qm_eadr_get_eadr(&eadr_val));
 528                        log_edata_bits(dev, error_mdata[memid].bits);
 529                }
 530        }
 531}
 532
 533static irqreturn_t qman_isr(int irq, void *ptr)
 534{
 535        u32 isr_val, ier_val, ecsr_val, isr_mask, i;
 536        struct device *dev = ptr;
 537
 538        ier_val = qm_ccsr_in(REG_ERR_IER);
 539        isr_val = qm_ccsr_in(REG_ERR_ISR);
 540        ecsr_val = qm_ccsr_in(REG_ECSR);
 541        isr_mask = isr_val & ier_val;
 542
 543        if (!isr_mask)
 544                return IRQ_NONE;
 545
 546        for (i = 0; i < ARRAY_SIZE(qman_hwerr_txts); i++) {
 547                if (qman_hwerr_txts[i].mask & isr_mask) {
 548                        dev_err_ratelimited(dev, "ErrInt: %s\n",
 549                                            qman_hwerr_txts[i].txt);
 550                        if (qman_hwerr_txts[i].mask & ecsr_val) {
 551                                log_additional_error_info(dev, isr_mask,
 552                                                          ecsr_val);
 553                                /* Re-arm error capture registers */
 554                                qm_ccsr_out(REG_ECSR, ecsr_val);
 555                        }
 556                        if (qman_hwerr_txts[i].mask & QMAN_ERRS_TO_DISABLE) {
 557                                dev_dbg(dev, "Disabling error 0x%x\n",
 558                                        qman_hwerr_txts[i].mask);
 559                                ier_val &= ~qman_hwerr_txts[i].mask;
 560                                qm_ccsr_out(REG_ERR_IER, ier_val);
 561                        }
 562                }
 563        }
 564        qm_ccsr_out(REG_ERR_ISR, isr_val);
 565
 566        return IRQ_HANDLED;
 567}
 568
 569static int qman_init_ccsr(struct device *dev)
 570{
 571        int i, err;
 572
 573        /* FQD memory */
 574        qm_set_memory(qm_memory_fqd, fqd_a, fqd_sz);
 575        /* PFDR memory */
 576        qm_set_memory(qm_memory_pfdr, pfdr_a, pfdr_sz);
 577        err = qm_init_pfdr(dev, 8, pfdr_sz / 64 - 8);
 578        if (err)
 579                return err;
 580        /* thresholds */
 581        qm_set_pfdr_threshold(512, 64);
 582        qm_set_sfdr_threshold(128);
 583        /* clear stale PEBI bit from interrupt status register */
 584        qm_ccsr_out(REG_ERR_ISR, QM_EIRQ_PEBI);
 585        /* corenet initiator settings */
 586        qm_set_corenet_initiator();
 587        /* HID settings */
 588        qm_set_hid();
 589        /* Set scheduling weights to defaults */
 590        for (i = qm_wq_first; i <= qm_wq_last; i++)
 591                qm_set_wq_scheduling(i, 0, 0, 0, 0, 0, 0, 0);
 592        /* We are not prepared to accept ERNs for hardware enqueues */
 593        qm_set_dc(qm_dc_portal_fman0, 1, 0);
 594        qm_set_dc(qm_dc_portal_fman1, 1, 0);
 595        return 0;
 596}
 597
 598#define LIO_CFG_LIODN_MASK 0x0fff0000
 599void __qman_liodn_fixup(u16 channel)
 600{
 601        static int done;
 602        static u32 liodn_offset;
 603        u32 before, after;
 604        int idx = channel - QM_CHANNEL_SWPORTAL0;
 605
 606        if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
 607                before = qm_ccsr_in(REG_REV3_QCSP_LIO_CFG(idx));
 608        else
 609                before = qm_ccsr_in(REG_QCSP_LIO_CFG(idx));
 610        if (!done) {
 611                liodn_offset = before & LIO_CFG_LIODN_MASK;
 612                done = 1;
 613                return;
 614        }
 615        after = (before & (~LIO_CFG_LIODN_MASK)) | liodn_offset;
 616        if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
 617                qm_ccsr_out(REG_REV3_QCSP_LIO_CFG(idx), after);
 618        else
 619                qm_ccsr_out(REG_QCSP_LIO_CFG(idx), after);
 620}
 621
 622#define IO_CFG_SDEST_MASK 0x00ff0000
 623void qman_set_sdest(u16 channel, unsigned int cpu_idx)
 624{
 625        int idx = channel - QM_CHANNEL_SWPORTAL0;
 626        u32 before, after;
 627
 628        if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
 629                before = qm_ccsr_in(REG_REV3_QCSP_IO_CFG(idx));
 630                /* Each pair of vcpu share the same SRQ(SDEST) */
 631                cpu_idx /= 2;
 632                after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16);
 633                qm_ccsr_out(REG_REV3_QCSP_IO_CFG(idx), after);
 634        } else {
 635                before = qm_ccsr_in(REG_QCSP_IO_CFG(idx));
 636                after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16);
 637                qm_ccsr_out(REG_QCSP_IO_CFG(idx), after);
 638        }
 639}
 640
 641static int qman_resource_init(struct device *dev)
 642{
 643        int pool_chan_num, cgrid_num;
 644        int ret, i;
 645
 646        switch (qman_ip_rev >> 8) {
 647        case 1:
 648                pool_chan_num = 15;
 649                cgrid_num = 256;
 650                break;
 651        case 2:
 652                pool_chan_num = 3;
 653                cgrid_num = 64;
 654                break;
 655        case 3:
 656                pool_chan_num = 15;
 657                cgrid_num = 256;
 658                break;
 659        default:
 660                return -ENODEV;
 661        }
 662
 663        ret = gen_pool_add(qm_qpalloc, qm_channel_pool1 | DPAA_GENALLOC_OFF,
 664                           pool_chan_num, -1);
 665        if (ret) {
 666                dev_err(dev, "Failed to seed pool channels (%d)\n", ret);
 667                return ret;
 668        }
 669
 670        ret = gen_pool_add(qm_cgralloc, DPAA_GENALLOC_OFF, cgrid_num, -1);
 671        if (ret) {
 672                dev_err(dev, "Failed to seed CGRID range (%d)\n", ret);
 673                return ret;
 674        }
 675
 676        /* parse pool channels into the SDQCR mask */
 677        for (i = 0; i < cgrid_num; i++)
 678                qm_pools_sdqcr |= QM_SDQCR_CHANNELS_POOL_CONV(i);
 679
 680        ret = gen_pool_add(qm_fqalloc, QM_FQID_RANGE_START | DPAA_GENALLOC_OFF,
 681                           qm_get_fqid_maxcnt() - QM_FQID_RANGE_START, -1);
 682        if (ret) {
 683                dev_err(dev, "Failed to seed FQID range (%d)\n", ret);
 684                return ret;
 685        }
 686
 687        return 0;
 688}
 689
 690int qman_is_probed(void)
 691{
 692        return __qman_probed;
 693}
 694EXPORT_SYMBOL_GPL(qman_is_probed);
 695
 696static int fsl_qman_probe(struct platform_device *pdev)
 697{
 698        struct device *dev = &pdev->dev;
 699        struct device_node *node = dev->of_node;
 700        struct resource *res;
 701        int ret, err_irq;
 702        u16 id;
 703        u8 major, minor;
 704
 705        __qman_probed = -1;
 706
 707        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 708        if (!res) {
 709                dev_err(dev, "Can't get %pOF property 'IORESOURCE_MEM'\n",
 710                        node);
 711                return -ENXIO;
 712        }
 713        qm_ccsr_start = devm_ioremap(dev, res->start, resource_size(res));
 714        if (!qm_ccsr_start)
 715                return -ENXIO;
 716
 717        qm_get_version(&id, &major, &minor);
 718        if (major == 1 && minor == 0) {
 719                dev_err(dev, "Rev1.0 on P4080 rev1 is not supported!\n");
 720                        return -ENODEV;
 721        } else if (major == 1 && minor == 1)
 722                qman_ip_rev = QMAN_REV11;
 723        else if (major == 1 && minor == 2)
 724                qman_ip_rev = QMAN_REV12;
 725        else if (major == 2 && minor == 0)
 726                qman_ip_rev = QMAN_REV20;
 727        else if (major == 3 && minor == 0)
 728                qman_ip_rev = QMAN_REV30;
 729        else if (major == 3 && minor == 1)
 730                qman_ip_rev = QMAN_REV31;
 731        else if (major == 3 && minor == 2)
 732                qman_ip_rev = QMAN_REV32;
 733        else {
 734                dev_err(dev, "Unknown QMan version\n");
 735                return -ENODEV;
 736        }
 737
 738        if ((qman_ip_rev & 0xff00) >= QMAN_REV30) {
 739                qm_channel_pool1 = QMAN_CHANNEL_POOL1_REV3;
 740                qm_channel_caam = QMAN_CHANNEL_CAAM_REV3;
 741        }
 742
 743        if (fqd_a) {
 744#ifdef CONFIG_PPC
 745                /*
 746                 * For PPC backward DT compatibility
 747                 * FQD memory MUST be zero'd by software
 748                 */
 749                zero_priv_mem(fqd_a, fqd_sz);
 750#else
 751                WARN(1, "Unexpected architecture using non shared-dma-mem reservations");
 752#endif
 753        } else {
 754                /*
 755                 * Order of memory regions is assumed as FQD followed by PFDR
 756                 * in order to ensure allocations from the correct regions the
 757                 * driver initializes then allocates each piece in order
 758                 */
 759                ret = qbman_init_private_mem(dev, 0, &fqd_a, &fqd_sz);
 760                if (ret) {
 761                        dev_err(dev, "qbman_init_private_mem() for FQD failed 0x%x\n",
 762                                ret);
 763                        return -ENODEV;
 764                }
 765        }
 766        dev_dbg(dev, "Allocated FQD 0x%llx 0x%zx\n", fqd_a, fqd_sz);
 767
 768        if (!pfdr_a) {
 769                /* Setup PFDR memory */
 770                ret = qbman_init_private_mem(dev, 1, &pfdr_a, &pfdr_sz);
 771                if (ret) {
 772                        dev_err(dev, "qbman_init_private_mem() for PFDR failed 0x%x\n",
 773                                ret);
 774                        return -ENODEV;
 775                }
 776        }
 777        dev_dbg(dev, "Allocated PFDR 0x%llx 0x%zx\n", pfdr_a, pfdr_sz);
 778
 779        ret = qman_init_ccsr(dev);
 780        if (ret) {
 781                dev_err(dev, "CCSR setup failed\n");
 782                return ret;
 783        }
 784
 785        err_irq = platform_get_irq(pdev, 0);
 786        if (err_irq <= 0) {
 787                dev_info(dev, "Can't get %pOF property 'interrupts'\n",
 788                         node);
 789                return -ENODEV;
 790        }
 791        ret = devm_request_irq(dev, err_irq, qman_isr, IRQF_SHARED, "qman-err",
 792                               dev);
 793        if (ret)  {
 794                dev_err(dev, "devm_request_irq() failed %d for '%pOF'\n",
 795                        ret, node);
 796                return ret;
 797        }
 798
 799        /*
 800         * Write-to-clear any stale bits, (eg. starvation being asserted prior
 801         * to resource allocation during driver init).
 802         */
 803        qm_ccsr_out(REG_ERR_ISR, 0xffffffff);
 804        /* Enable Error Interrupts */
 805        qm_ccsr_out(REG_ERR_IER, 0xffffffff);
 806
 807        qm_fqalloc = devm_gen_pool_create(dev, 0, -1, "qman-fqalloc");
 808        if (IS_ERR(qm_fqalloc)) {
 809                ret = PTR_ERR(qm_fqalloc);
 810                dev_err(dev, "qman-fqalloc pool init failed (%d)\n", ret);
 811                return ret;
 812        }
 813
 814        qm_qpalloc = devm_gen_pool_create(dev, 0, -1, "qman-qpalloc");
 815        if (IS_ERR(qm_qpalloc)) {
 816                ret = PTR_ERR(qm_qpalloc);
 817                dev_err(dev, "qman-qpalloc pool init failed (%d)\n", ret);
 818                return ret;
 819        }
 820
 821        qm_cgralloc = devm_gen_pool_create(dev, 0, -1, "qman-cgralloc");
 822        if (IS_ERR(qm_cgralloc)) {
 823                ret = PTR_ERR(qm_cgralloc);
 824                dev_err(dev, "qman-cgralloc pool init failed (%d)\n", ret);
 825                return ret;
 826        }
 827
 828        ret = qman_resource_init(dev);
 829        if (ret)
 830                return ret;
 831
 832        ret = qman_alloc_fq_table(qm_get_fqid_maxcnt());
 833        if (ret)
 834                return ret;
 835
 836        ret = qman_wq_alloc();
 837        if (ret)
 838                return ret;
 839
 840        __qman_probed = 1;
 841
 842        return 0;
 843}
 844
 845static const struct of_device_id fsl_qman_ids[] = {
 846        {
 847                .compatible = "fsl,qman",
 848        },
 849        {}
 850};
 851
 852static struct platform_driver fsl_qman_driver = {
 853        .driver = {
 854                .name = KBUILD_MODNAME,
 855                .of_match_table = fsl_qman_ids,
 856                .suppress_bind_attrs = true,
 857        },
 858        .probe = fsl_qman_probe,
 859};
 860
 861builtin_platform_driver(fsl_qman_driver);
 862