linux/drivers/edac/armada_xp_edac.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2017 Pengutronix, Jan Luebbe <kernel@pengutronix.de>
   4 */
   5
   6#include <linux/kernel.h>
   7#include <linux/edac.h>
   8#include <linux/of_platform.h>
   9
  10#include <asm/hardware/cache-l2x0.h>
  11#include <asm/hardware/cache-aurora-l2.h>
  12
  13#include "edac_mc.h"
  14#include "edac_device.h"
  15#include "edac_module.h"
  16
  17/************************ EDAC MC (DDR RAM) ********************************/
  18
  19#define SDRAM_NUM_CS 4
  20
  21#define SDRAM_CONFIG_REG        0x0
  22#define SDRAM_CONFIG_ECC_MASK         BIT(18)
  23#define SDRAM_CONFIG_REGISTERED_MASK  BIT(17)
  24#define SDRAM_CONFIG_BUS_WIDTH_MASK   BIT(15)
  25
  26#define SDRAM_ADDR_CTRL_REG     0x10
  27#define SDRAM_ADDR_CTRL_SIZE_HIGH_OFFSET(cs) (20+cs)
  28#define SDRAM_ADDR_CTRL_SIZE_HIGH_MASK(cs)   (0x1 << SDRAM_ADDR_CTRL_SIZE_HIGH_OFFSET(cs))
  29#define SDRAM_ADDR_CTRL_ADDR_SEL_MASK(cs)    BIT(16+cs)
  30#define SDRAM_ADDR_CTRL_SIZE_LOW_OFFSET(cs)  (cs*4+2)
  31#define SDRAM_ADDR_CTRL_SIZE_LOW_MASK(cs)    (0x3 << SDRAM_ADDR_CTRL_SIZE_LOW_OFFSET(cs))
  32#define SDRAM_ADDR_CTRL_STRUCT_OFFSET(cs)    (cs*4)
  33#define SDRAM_ADDR_CTRL_STRUCT_MASK(cs)      (0x3 << SDRAM_ADDR_CTRL_STRUCT_OFFSET(cs))
  34
  35#define SDRAM_ERR_DATA_H_REG    0x40
  36#define SDRAM_ERR_DATA_L_REG    0x44
  37
  38#define SDRAM_ERR_RECV_ECC_REG  0x48
  39#define SDRAM_ERR_RECV_ECC_VALUE_MASK 0xff
  40
  41#define SDRAM_ERR_CALC_ECC_REG  0x4c
  42#define SDRAM_ERR_CALC_ECC_ROW_OFFSET 8
  43#define SDRAM_ERR_CALC_ECC_ROW_MASK   (0xffff << SDRAM_ERR_CALC_ECC_ROW_OFFSET)
  44#define SDRAM_ERR_CALC_ECC_VALUE_MASK 0xff
  45
  46#define SDRAM_ERR_ADDR_REG      0x50
  47#define SDRAM_ERR_ADDR_BANK_OFFSET    23
  48#define SDRAM_ERR_ADDR_BANK_MASK      (0x7 << SDRAM_ERR_ADDR_BANK_OFFSET)
  49#define SDRAM_ERR_ADDR_COL_OFFSET     8
  50#define SDRAM_ERR_ADDR_COL_MASK       (0x7fff << SDRAM_ERR_ADDR_COL_OFFSET)
  51#define SDRAM_ERR_ADDR_CS_OFFSET      1
  52#define SDRAM_ERR_ADDR_CS_MASK        (0x3 << SDRAM_ERR_ADDR_CS_OFFSET)
  53#define SDRAM_ERR_ADDR_TYPE_MASK      BIT(0)
  54
  55#define SDRAM_ERR_CTRL_REG      0x54
  56#define SDRAM_ERR_CTRL_THR_OFFSET     16
  57#define SDRAM_ERR_CTRL_THR_MASK       (0xff << SDRAM_ERR_CTRL_THR_OFFSET)
  58#define SDRAM_ERR_CTRL_PROP_MASK      BIT(9)
  59
  60#define SDRAM_ERR_SBE_COUNT_REG 0x58
  61#define SDRAM_ERR_DBE_COUNT_REG 0x5c
  62
  63#define SDRAM_ERR_CAUSE_ERR_REG 0xd0
  64#define SDRAM_ERR_CAUSE_MSG_REG 0xd8
  65#define SDRAM_ERR_CAUSE_DBE_MASK      BIT(1)
  66#define SDRAM_ERR_CAUSE_SBE_MASK      BIT(0)
  67
  68#define SDRAM_RANK_CTRL_REG 0x1e0
  69#define SDRAM_RANK_CTRL_EXIST_MASK(cs) BIT(cs)
  70
  71struct axp_mc_drvdata {
  72        void __iomem *base;
  73        /* width in bytes */
  74        unsigned int width;
  75        /* bank interleaving */
  76        bool cs_addr_sel[SDRAM_NUM_CS];
  77
  78        char msg[128];
  79};
  80
  81/* derived from "DRAM Address Multiplexing" in the ARMADA XP Functional Spec */
  82static uint32_t axp_mc_calc_address(struct axp_mc_drvdata *drvdata,
  83                                    uint8_t cs, uint8_t bank, uint16_t row,
  84                                    uint16_t col)
  85{
  86        if (drvdata->width == 8) {
  87                /* 64 bit */
  88                if (drvdata->cs_addr_sel[cs])
  89                        /* bank interleaved */
  90                        return (((row & 0xfff8) << 16) |
  91                                ((bank & 0x7) << 16) |
  92                                ((row & 0x7) << 13) |
  93                                ((col & 0x3ff) << 3));
  94                else
  95                        return (((row & 0xffff << 16) |
  96                                 ((bank & 0x7) << 13) |
  97                                 ((col & 0x3ff)) << 3));
  98        } else if (drvdata->width == 4) {
  99                /* 32 bit */
 100                if (drvdata->cs_addr_sel[cs])
 101                        /* bank interleaved */
 102                        return (((row & 0xfff0) << 15) |
 103                                ((bank & 0x7) << 16) |
 104                                ((row & 0xf) << 12) |
 105                                ((col & 0x3ff) << 2));
 106                else
 107                        return (((row & 0xffff << 15) |
 108                                 ((bank & 0x7) << 12) |
 109                                 ((col & 0x3ff)) << 2));
 110        } else {
 111                /* 16 bit */
 112                if (drvdata->cs_addr_sel[cs])
 113                        /* bank interleaved */
 114                        return (((row & 0xffe0) << 14) |
 115                                ((bank & 0x7) << 16) |
 116                                ((row & 0x1f) << 11) |
 117                                ((col & 0x3ff) << 1));
 118                else
 119                        return (((row & 0xffff << 14) |
 120                                 ((bank & 0x7) << 11) |
 121                                 ((col & 0x3ff)) << 1));
 122        }
 123}
 124
 125static void axp_mc_check(struct mem_ctl_info *mci)
 126{
 127        struct axp_mc_drvdata *drvdata = mci->pvt_info;
 128        uint32_t data_h, data_l, recv_ecc, calc_ecc, addr;
 129        uint32_t cnt_sbe, cnt_dbe, cause_err, cause_msg;
 130        uint32_t row_val, col_val, bank_val, addr_val;
 131        uint8_t syndrome_val, cs_val;
 132        char *msg = drvdata->msg;
 133
 134        data_h    = readl(drvdata->base + SDRAM_ERR_DATA_H_REG);
 135        data_l    = readl(drvdata->base + SDRAM_ERR_DATA_L_REG);
 136        recv_ecc  = readl(drvdata->base + SDRAM_ERR_RECV_ECC_REG);
 137        calc_ecc  = readl(drvdata->base + SDRAM_ERR_CALC_ECC_REG);
 138        addr      = readl(drvdata->base + SDRAM_ERR_ADDR_REG);
 139        cnt_sbe   = readl(drvdata->base + SDRAM_ERR_SBE_COUNT_REG);
 140        cnt_dbe   = readl(drvdata->base + SDRAM_ERR_DBE_COUNT_REG);
 141        cause_err = readl(drvdata->base + SDRAM_ERR_CAUSE_ERR_REG);
 142        cause_msg = readl(drvdata->base + SDRAM_ERR_CAUSE_MSG_REG);
 143
 144        /* clear cause registers */
 145        writel(~(SDRAM_ERR_CAUSE_DBE_MASK | SDRAM_ERR_CAUSE_SBE_MASK),
 146               drvdata->base + SDRAM_ERR_CAUSE_ERR_REG);
 147        writel(~(SDRAM_ERR_CAUSE_DBE_MASK | SDRAM_ERR_CAUSE_SBE_MASK),
 148               drvdata->base + SDRAM_ERR_CAUSE_MSG_REG);
 149
 150        /* clear error counter registers */
 151        if (cnt_sbe)
 152                writel(0, drvdata->base + SDRAM_ERR_SBE_COUNT_REG);
 153        if (cnt_dbe)
 154                writel(0, drvdata->base + SDRAM_ERR_DBE_COUNT_REG);
 155
 156        if (!cnt_sbe && !cnt_dbe)
 157                return;
 158
 159        if (!(addr & SDRAM_ERR_ADDR_TYPE_MASK)) {
 160                if (cnt_sbe)
 161                        cnt_sbe--;
 162                else
 163                        dev_warn(mci->pdev, "inconsistent SBE count detected\n");
 164        } else {
 165                if (cnt_dbe)
 166                        cnt_dbe--;
 167                else
 168                        dev_warn(mci->pdev, "inconsistent DBE count detected\n");
 169        }
 170
 171        /* report earlier errors */
 172        if (cnt_sbe)
 173                edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
 174                                     cnt_sbe, /* error count */
 175                                     0, 0, 0, /* pfn, offset, syndrome */
 176                                     -1, -1, -1, /* top, mid, low layer */
 177                                     mci->ctl_name,
 178                                     "details unavailable (multiple errors)");
 179        if (cnt_dbe)
 180                edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
 181                                     cnt_dbe, /* error count */
 182                                     0, 0, 0, /* pfn, offset, syndrome */
 183                                     -1, -1, -1, /* top, mid, low layer */
 184                                     mci->ctl_name,
 185                                     "details unavailable (multiple errors)");
 186
 187        /* report details for most recent error */
 188        cs_val   = (addr & SDRAM_ERR_ADDR_CS_MASK) >> SDRAM_ERR_ADDR_CS_OFFSET;
 189        bank_val = (addr & SDRAM_ERR_ADDR_BANK_MASK) >> SDRAM_ERR_ADDR_BANK_OFFSET;
 190        row_val  = (calc_ecc & SDRAM_ERR_CALC_ECC_ROW_MASK) >> SDRAM_ERR_CALC_ECC_ROW_OFFSET;
 191        col_val  = (addr & SDRAM_ERR_ADDR_COL_MASK) >> SDRAM_ERR_ADDR_COL_OFFSET;
 192        syndrome_val = (recv_ecc ^ calc_ecc) & 0xff;
 193        addr_val = axp_mc_calc_address(drvdata, cs_val, bank_val, row_val,
 194                                       col_val);
 195        msg += sprintf(msg, "row=0x%04x ", row_val); /* 11 chars */
 196        msg += sprintf(msg, "bank=0x%x ", bank_val); /*  9 chars */
 197        msg += sprintf(msg, "col=0x%04x ", col_val); /* 11 chars */
 198        msg += sprintf(msg, "cs=%d", cs_val);        /*  4 chars */
 199
 200        if (!(addr & SDRAM_ERR_ADDR_TYPE_MASK)) {
 201                edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
 202                                     1, /* error count */
 203                                     addr_val >> PAGE_SHIFT,
 204                                     addr_val & ~PAGE_MASK,
 205                                     syndrome_val,
 206                                     cs_val, -1, -1, /* top, mid, low layer */
 207                                     mci->ctl_name, drvdata->msg);
 208        } else {
 209                edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
 210                                     1, /* error count */
 211                                     addr_val >> PAGE_SHIFT,
 212                                     addr_val & ~PAGE_MASK,
 213                                     syndrome_val,
 214                                     cs_val, -1, -1, /* top, mid, low layer */
 215                                     mci->ctl_name, drvdata->msg);
 216        }
 217}
 218
 219static void axp_mc_read_config(struct mem_ctl_info *mci)
 220{
 221        struct axp_mc_drvdata *drvdata = mci->pvt_info;
 222        uint32_t config, addr_ctrl, rank_ctrl;
 223        unsigned int i, cs_struct, cs_size;
 224        struct dimm_info *dimm;
 225
 226        config = readl(drvdata->base + SDRAM_CONFIG_REG);
 227        if (config & SDRAM_CONFIG_BUS_WIDTH_MASK)
 228                /* 64 bit */
 229                drvdata->width = 8;
 230        else
 231                /* 32 bit */
 232                drvdata->width = 4;
 233
 234        addr_ctrl = readl(drvdata->base + SDRAM_ADDR_CTRL_REG);
 235        rank_ctrl = readl(drvdata->base + SDRAM_RANK_CTRL_REG);
 236        for (i = 0; i < SDRAM_NUM_CS; i++) {
 237                dimm = mci->dimms[i];
 238
 239                if (!(rank_ctrl & SDRAM_RANK_CTRL_EXIST_MASK(i)))
 240                        continue;
 241
 242                drvdata->cs_addr_sel[i] =
 243                        !!(addr_ctrl & SDRAM_ADDR_CTRL_ADDR_SEL_MASK(i));
 244
 245                cs_struct = (addr_ctrl & SDRAM_ADDR_CTRL_STRUCT_MASK(i)) >> SDRAM_ADDR_CTRL_STRUCT_OFFSET(i);
 246                cs_size   = ((addr_ctrl & SDRAM_ADDR_CTRL_SIZE_HIGH_MASK(i)) >> (SDRAM_ADDR_CTRL_SIZE_HIGH_OFFSET(i) - 2) |
 247                            ((addr_ctrl & SDRAM_ADDR_CTRL_SIZE_LOW_MASK(i)) >> SDRAM_ADDR_CTRL_SIZE_LOW_OFFSET(i)));
 248
 249                switch (cs_size) {
 250                case 0: /* 2GBit */
 251                        dimm->nr_pages = 524288;
 252                        break;
 253                case 1: /* 256MBit */
 254                        dimm->nr_pages = 65536;
 255                        break;
 256                case 2: /* 512MBit */
 257                        dimm->nr_pages = 131072;
 258                        break;
 259                case 3: /* 1GBit */
 260                        dimm->nr_pages = 262144;
 261                        break;
 262                case 4: /* 4GBit */
 263                        dimm->nr_pages = 1048576;
 264                        break;
 265                case 5: /* 8GBit */
 266                        dimm->nr_pages = 2097152;
 267                        break;
 268                }
 269                dimm->grain = 8;
 270                dimm->dtype = cs_struct ? DEV_X16 : DEV_X8;
 271                dimm->mtype = (config & SDRAM_CONFIG_REGISTERED_MASK) ?
 272                        MEM_RDDR3 : MEM_DDR3;
 273                dimm->edac_mode = EDAC_SECDED;
 274        }
 275}
 276
 277static const struct of_device_id axp_mc_of_match[] = {
 278        {.compatible = "marvell,armada-xp-sdram-controller",},
 279        {},
 280};
 281MODULE_DEVICE_TABLE(of, axp_mc_of_match);
 282
 283static int axp_mc_probe(struct platform_device *pdev)
 284{
 285        struct axp_mc_drvdata *drvdata;
 286        struct edac_mc_layer layers[1];
 287        const struct of_device_id *id;
 288        struct mem_ctl_info *mci;
 289        struct resource *r;
 290        void __iomem *base;
 291        uint32_t config;
 292
 293        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 294        if (!r) {
 295                dev_err(&pdev->dev, "Unable to get mem resource\n");
 296                return -ENODEV;
 297        }
 298
 299        base = devm_ioremap_resource(&pdev->dev, r);
 300        if (IS_ERR(base)) {
 301                dev_err(&pdev->dev, "Unable to map regs\n");
 302                return PTR_ERR(base);
 303        }
 304
 305        config = readl(base + SDRAM_CONFIG_REG);
 306        if (!(config & SDRAM_CONFIG_ECC_MASK)) {
 307                dev_warn(&pdev->dev, "SDRAM ECC is not enabled\n");
 308                return -EINVAL;
 309        }
 310
 311        layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
 312        layers[0].size = SDRAM_NUM_CS;
 313        layers[0].is_virt_csrow = true;
 314
 315        mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*drvdata));
 316        if (!mci)
 317                return -ENOMEM;
 318
 319        drvdata = mci->pvt_info;
 320        drvdata->base = base;
 321        mci->pdev = &pdev->dev;
 322        platform_set_drvdata(pdev, mci);
 323
 324        id = of_match_device(axp_mc_of_match, &pdev->dev);
 325        mci->edac_check = axp_mc_check;
 326        mci->mtype_cap = MEM_FLAG_DDR3;
 327        mci->edac_cap = EDAC_FLAG_SECDED;
 328        mci->mod_name = pdev->dev.driver->name;
 329        mci->ctl_name = id ? id->compatible : "unknown";
 330        mci->dev_name = dev_name(&pdev->dev);
 331        mci->scrub_mode = SCRUB_NONE;
 332
 333        axp_mc_read_config(mci);
 334
 335        /* These SoCs have a reduced width bus */
 336        if (of_machine_is_compatible("marvell,armada380") ||
 337            of_machine_is_compatible("marvell,armadaxp-98dx3236"))
 338                drvdata->width /= 2;
 339
 340        /* configure SBE threshold */
 341        /* it seems that SBEs are not captured otherwise */
 342        writel(1 << SDRAM_ERR_CTRL_THR_OFFSET, drvdata->base + SDRAM_ERR_CTRL_REG);
 343
 344        /* clear cause registers */
 345        writel(~(SDRAM_ERR_CAUSE_DBE_MASK | SDRAM_ERR_CAUSE_SBE_MASK), drvdata->base + SDRAM_ERR_CAUSE_ERR_REG);
 346        writel(~(SDRAM_ERR_CAUSE_DBE_MASK | SDRAM_ERR_CAUSE_SBE_MASK), drvdata->base + SDRAM_ERR_CAUSE_MSG_REG);
 347
 348        /* clear counter registers */
 349        writel(0, drvdata->base + SDRAM_ERR_SBE_COUNT_REG);
 350        writel(0, drvdata->base + SDRAM_ERR_DBE_COUNT_REG);
 351
 352        if (edac_mc_add_mc(mci)) {
 353                edac_mc_free(mci);
 354                return -EINVAL;
 355        }
 356        edac_op_state = EDAC_OPSTATE_POLL;
 357
 358        return 0;
 359}
 360
 361static int axp_mc_remove(struct platform_device *pdev)
 362{
 363        struct mem_ctl_info *mci = platform_get_drvdata(pdev);
 364
 365        edac_mc_del_mc(&pdev->dev);
 366        edac_mc_free(mci);
 367        platform_set_drvdata(pdev, NULL);
 368
 369        return 0;
 370}
 371
 372static struct platform_driver axp_mc_driver = {
 373        .probe = axp_mc_probe,
 374        .remove = axp_mc_remove,
 375        .driver = {
 376                .name = "armada_xp_mc_edac",
 377                .of_match_table = of_match_ptr(axp_mc_of_match),
 378        },
 379};
 380
 381/************************ EDAC Device (L2 Cache) ***************************/
 382
 383struct aurora_l2_drvdata {
 384        void __iomem *base;
 385
 386        char msg[128];
 387
 388        /* error injection via debugfs */
 389        uint32_t inject_addr;
 390        uint32_t inject_mask;
 391        uint8_t inject_ctl;
 392
 393        struct dentry *debugfs;
 394};
 395
 396#ifdef CONFIG_EDAC_DEBUG
 397static void aurora_l2_inject(struct aurora_l2_drvdata *drvdata)
 398{
 399        drvdata->inject_addr &= AURORA_ERR_INJECT_CTL_ADDR_MASK;
 400        drvdata->inject_ctl &= AURORA_ERR_INJECT_CTL_EN_MASK;
 401        writel(0, drvdata->base + AURORA_ERR_INJECT_CTL_REG);
 402        writel(drvdata->inject_mask, drvdata->base + AURORA_ERR_INJECT_MASK_REG);
 403        writel(drvdata->inject_addr | drvdata->inject_ctl, drvdata->base + AURORA_ERR_INJECT_CTL_REG);
 404}
 405#endif
 406
 407static void aurora_l2_check(struct edac_device_ctl_info *dci)
 408{
 409        struct aurora_l2_drvdata *drvdata = dci->pvt_info;
 410        uint32_t cnt, src, txn, err, attr_cap, addr_cap, way_cap;
 411        unsigned int cnt_ce, cnt_ue;
 412        char *msg = drvdata->msg;
 413        size_t size = sizeof(drvdata->msg);
 414        size_t len = 0;
 415
 416        cnt = readl(drvdata->base + AURORA_ERR_CNT_REG);
 417        attr_cap = readl(drvdata->base + AURORA_ERR_ATTR_CAP_REG);
 418        addr_cap = readl(drvdata->base + AURORA_ERR_ADDR_CAP_REG);
 419        way_cap = readl(drvdata->base + AURORA_ERR_WAY_CAP_REG);
 420
 421        cnt_ce = (cnt & AURORA_ERR_CNT_CE_MASK) >> AURORA_ERR_CNT_CE_OFFSET;
 422        cnt_ue = (cnt & AURORA_ERR_CNT_UE_MASK) >> AURORA_ERR_CNT_UE_OFFSET;
 423        /* clear error counter registers */
 424        if (cnt_ce || cnt_ue)
 425                writel(AURORA_ERR_CNT_CLR, drvdata->base + AURORA_ERR_CNT_REG);
 426
 427        if (!(attr_cap & AURORA_ERR_ATTR_CAP_VALID))
 428                goto clear_remaining;
 429
 430        src = (attr_cap & AURORA_ERR_ATTR_SRC_MSK) >> AURORA_ERR_ATTR_SRC_OFF;
 431        if (src <= 3)
 432                len += scnprintf(msg+len, size-len, "src=CPU%d ", src);
 433        else
 434                len += scnprintf(msg+len, size-len, "src=IO ");
 435
 436        txn =  (attr_cap & AURORA_ERR_ATTR_TXN_MSK) >> AURORA_ERR_ATTR_TXN_OFF;
 437        switch (txn) {
 438        case 0:
 439                len += scnprintf(msg+len, size-len, "txn=Data-Read ");
 440                break;
 441        case 1:
 442                len += scnprintf(msg+len, size-len, "txn=Isn-Read ");
 443                break;
 444        case 2:
 445                len += scnprintf(msg+len, size-len, "txn=Clean-Flush ");
 446                break;
 447        case 3:
 448                len += scnprintf(msg+len, size-len, "txn=Eviction ");
 449                break;
 450        case 4:
 451                len += scnprintf(msg+len, size-len,
 452                                "txn=Read-Modify-Write ");
 453                break;
 454        }
 455
 456        err = (attr_cap & AURORA_ERR_ATTR_ERR_MSK) >> AURORA_ERR_ATTR_ERR_OFF;
 457        switch (err) {
 458        case 0:
 459                len += scnprintf(msg+len, size-len, "err=CorrECC ");
 460                break;
 461        case 1:
 462                len += scnprintf(msg+len, size-len, "err=UnCorrECC ");
 463                break;
 464        case 2:
 465                len += scnprintf(msg+len, size-len, "err=TagParity ");
 466                break;
 467        }
 468
 469        len += scnprintf(msg+len, size-len, "addr=0x%x ", addr_cap & AURORA_ERR_ADDR_CAP_ADDR_MASK);
 470        len += scnprintf(msg+len, size-len, "index=0x%x ", (way_cap & AURORA_ERR_WAY_IDX_MSK) >> AURORA_ERR_WAY_IDX_OFF);
 471        len += scnprintf(msg+len, size-len, "way=0x%x", (way_cap & AURORA_ERR_WAY_CAP_WAY_MASK) >> AURORA_ERR_WAY_CAP_WAY_OFFSET);
 472
 473        /* clear error capture registers */
 474        writel(AURORA_ERR_ATTR_CAP_VALID, drvdata->base + AURORA_ERR_ATTR_CAP_REG);
 475        if (err) {
 476                /* UnCorrECC or TagParity */
 477                if (cnt_ue)
 478                        cnt_ue--;
 479                edac_device_handle_ue(dci, 0, 0, drvdata->msg);
 480        } else {
 481                if (cnt_ce)
 482                        cnt_ce--;
 483                edac_device_handle_ce(dci, 0, 0, drvdata->msg);
 484        }
 485
 486clear_remaining:
 487        /* report remaining errors */
 488        while (cnt_ue--)
 489                edac_device_handle_ue(dci, 0, 0, "details unavailable (multiple errors)");
 490        while (cnt_ce--)
 491                edac_device_handle_ue(dci, 0, 0, "details unavailable (multiple errors)");
 492}
 493
 494static void aurora_l2_poll(struct edac_device_ctl_info *dci)
 495{
 496#ifdef CONFIG_EDAC_DEBUG
 497        struct aurora_l2_drvdata *drvdata = dci->pvt_info;
 498#endif
 499
 500        aurora_l2_check(dci);
 501#ifdef CONFIG_EDAC_DEBUG
 502        aurora_l2_inject(drvdata);
 503#endif
 504}
 505
 506static const struct of_device_id aurora_l2_of_match[] = {
 507        {.compatible = "marvell,aurora-system-cache",},
 508        {},
 509};
 510MODULE_DEVICE_TABLE(of, aurora_l2_of_match);
 511
 512static int aurora_l2_probe(struct platform_device *pdev)
 513{
 514        struct aurora_l2_drvdata *drvdata;
 515        struct edac_device_ctl_info *dci;
 516        const struct of_device_id *id;
 517        uint32_t l2x0_aux_ctrl;
 518        void __iomem *base;
 519        struct resource *r;
 520
 521        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 522        if (!r) {
 523                dev_err(&pdev->dev, "Unable to get mem resource\n");
 524                return -ENODEV;
 525        }
 526
 527        base = devm_ioremap_resource(&pdev->dev, r);
 528        if (IS_ERR(base)) {
 529                dev_err(&pdev->dev, "Unable to map regs\n");
 530                return PTR_ERR(base);
 531        }
 532
 533        l2x0_aux_ctrl = readl(base + L2X0_AUX_CTRL);
 534        if (!(l2x0_aux_ctrl & AURORA_ACR_PARITY_EN))
 535                dev_warn(&pdev->dev, "tag parity is not enabled\n");
 536        if (!(l2x0_aux_ctrl & AURORA_ACR_ECC_EN))
 537                dev_warn(&pdev->dev, "data ECC is not enabled\n");
 538
 539        dci = edac_device_alloc_ctl_info(sizeof(*drvdata),
 540                                         "cpu", 1, "L", 1, 2, NULL, 0, 0);
 541        if (!dci)
 542                return -ENOMEM;
 543
 544        drvdata = dci->pvt_info;
 545        drvdata->base = base;
 546        dci->dev = &pdev->dev;
 547        platform_set_drvdata(pdev, dci);
 548
 549        id = of_match_device(aurora_l2_of_match, &pdev->dev);
 550        dci->edac_check = aurora_l2_poll;
 551        dci->mod_name = pdev->dev.driver->name;
 552        dci->ctl_name = id ? id->compatible : "unknown";
 553        dci->dev_name = dev_name(&pdev->dev);
 554
 555        /* clear registers */
 556        writel(AURORA_ERR_CNT_CLR, drvdata->base + AURORA_ERR_CNT_REG);
 557        writel(AURORA_ERR_ATTR_CAP_VALID, drvdata->base + AURORA_ERR_ATTR_CAP_REG);
 558
 559        if (edac_device_add_device(dci)) {
 560                edac_device_free_ctl_info(dci);
 561                return -EINVAL;
 562        }
 563
 564#ifdef CONFIG_EDAC_DEBUG
 565        drvdata->debugfs = edac_debugfs_create_dir(dev_name(&pdev->dev));
 566        if (drvdata->debugfs) {
 567                edac_debugfs_create_x32("inject_addr", 0644,
 568                                        drvdata->debugfs,
 569                                        &drvdata->inject_addr);
 570                edac_debugfs_create_x32("inject_mask", 0644,
 571                                        drvdata->debugfs,
 572                                        &drvdata->inject_mask);
 573                edac_debugfs_create_x8("inject_ctl", 0644,
 574                                       drvdata->debugfs, &drvdata->inject_ctl);
 575        }
 576#endif
 577
 578        return 0;
 579}
 580
 581static int aurora_l2_remove(struct platform_device *pdev)
 582{
 583        struct edac_device_ctl_info *dci = platform_get_drvdata(pdev);
 584#ifdef CONFIG_EDAC_DEBUG
 585        struct aurora_l2_drvdata *drvdata = dci->pvt_info;
 586
 587        edac_debugfs_remove_recursive(drvdata->debugfs);
 588#endif
 589        edac_device_del_device(&pdev->dev);
 590        edac_device_free_ctl_info(dci);
 591        platform_set_drvdata(pdev, NULL);
 592
 593        return 0;
 594}
 595
 596static struct platform_driver aurora_l2_driver = {
 597        .probe = aurora_l2_probe,
 598        .remove = aurora_l2_remove,
 599        .driver = {
 600                .name = "aurora_l2_edac",
 601                .of_match_table = of_match_ptr(aurora_l2_of_match),
 602        },
 603};
 604
 605/************************ Driver registration ******************************/
 606
 607static struct platform_driver * const drivers[] = {
 608        &axp_mc_driver,
 609        &aurora_l2_driver,
 610};
 611
 612static int __init armada_xp_edac_init(void)
 613{
 614        int res;
 615
 616        /* only polling is supported */
 617        edac_op_state = EDAC_OPSTATE_POLL;
 618
 619        res = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
 620        if (res)
 621                pr_warn("Armada XP EDAC drivers fail to register\n");
 622
 623        return 0;
 624}
 625module_init(armada_xp_edac_init);
 626
 627static void __exit armada_xp_edac_exit(void)
 628{
 629        platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
 630}
 631module_exit(armada_xp_edac_exit);
 632
 633MODULE_LICENSE("GPL v2");
 634MODULE_AUTHOR("Pengutronix");
 635MODULE_DESCRIPTION("EDAC Drivers for Marvell Armada XP SDRAM and L2 Cache Controller");
 636