linux/drivers/edac/highbank_mc_edac.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright 2011-2012 Calxeda, Inc.
   4 */
   5#include <linux/types.h>
   6#include <linux/kernel.h>
   7#include <linux/ctype.h>
   8#include <linux/edac.h>
   9#include <linux/interrupt.h>
  10#include <linux/platform_device.h>
  11#include <linux/of_platform.h>
  12#include <linux/uaccess.h>
  13
  14#include "edac_module.h"
  15
  16/* DDR Ctrlr Error Registers */
  17
  18#define HB_DDR_ECC_ERR_BASE             0x128
  19#define MW_DDR_ECC_ERR_BASE             0x1b4
  20
  21#define HB_DDR_ECC_OPT                  0x00
  22#define HB_DDR_ECC_U_ERR_ADDR           0x08
  23#define HB_DDR_ECC_U_ERR_STAT           0x0c
  24#define HB_DDR_ECC_U_ERR_DATAL          0x10
  25#define HB_DDR_ECC_U_ERR_DATAH          0x14
  26#define HB_DDR_ECC_C_ERR_ADDR           0x18
  27#define HB_DDR_ECC_C_ERR_STAT           0x1c
  28#define HB_DDR_ECC_C_ERR_DATAL          0x20
  29#define HB_DDR_ECC_C_ERR_DATAH          0x24
  30
  31#define HB_DDR_ECC_OPT_MODE_MASK        0x3
  32#define HB_DDR_ECC_OPT_FWC              0x100
  33#define HB_DDR_ECC_OPT_XOR_SHIFT        16
  34
  35/* DDR Ctrlr Interrupt Registers */
  36
  37#define HB_DDR_ECC_INT_BASE             0x180
  38#define MW_DDR_ECC_INT_BASE             0x218
  39
  40#define HB_DDR_ECC_INT_STATUS           0x00
  41#define HB_DDR_ECC_INT_ACK              0x04
  42
  43#define HB_DDR_ECC_INT_STAT_CE          0x8
  44#define HB_DDR_ECC_INT_STAT_DOUBLE_CE   0x10
  45#define HB_DDR_ECC_INT_STAT_UE          0x20
  46#define HB_DDR_ECC_INT_STAT_DOUBLE_UE   0x40
  47
  48struct hb_mc_drvdata {
  49        void __iomem *mc_err_base;
  50        void __iomem *mc_int_base;
  51};
  52
  53static irqreturn_t highbank_mc_err_handler(int irq, void *dev_id)
  54{
  55        struct mem_ctl_info *mci = dev_id;
  56        struct hb_mc_drvdata *drvdata = mci->pvt_info;
  57        u32 status, err_addr;
  58
  59        /* Read the interrupt status register */
  60        status = readl(drvdata->mc_int_base + HB_DDR_ECC_INT_STATUS);
  61
  62        if (status & HB_DDR_ECC_INT_STAT_UE) {
  63                err_addr = readl(drvdata->mc_err_base + HB_DDR_ECC_U_ERR_ADDR);
  64                edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
  65                                     err_addr >> PAGE_SHIFT,
  66                                     err_addr & ~PAGE_MASK, 0,
  67                                     0, 0, -1,
  68                                     mci->ctl_name, "");
  69        }
  70        if (status & HB_DDR_ECC_INT_STAT_CE) {
  71                u32 syndrome = readl(drvdata->mc_err_base + HB_DDR_ECC_C_ERR_STAT);
  72                syndrome = (syndrome >> 8) & 0xff;
  73                err_addr = readl(drvdata->mc_err_base + HB_DDR_ECC_C_ERR_ADDR);
  74                edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
  75                                     err_addr >> PAGE_SHIFT,
  76                                     err_addr & ~PAGE_MASK, syndrome,
  77                                     0, 0, -1,
  78                                     mci->ctl_name, "");
  79        }
  80
  81        /* clear the error, clears the interrupt */
  82        writel(status, drvdata->mc_int_base + HB_DDR_ECC_INT_ACK);
  83        return IRQ_HANDLED;
  84}
  85
  86static void highbank_mc_err_inject(struct mem_ctl_info *mci, u8 synd)
  87{
  88        struct hb_mc_drvdata *pdata = mci->pvt_info;
  89        u32 reg;
  90
  91        reg = readl(pdata->mc_err_base + HB_DDR_ECC_OPT);
  92        reg &= HB_DDR_ECC_OPT_MODE_MASK;
  93        reg |= (synd << HB_DDR_ECC_OPT_XOR_SHIFT) | HB_DDR_ECC_OPT_FWC;
  94        writel(reg, pdata->mc_err_base + HB_DDR_ECC_OPT);
  95}
  96
  97#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
  98
  99static ssize_t highbank_mc_inject_ctrl(struct device *dev,
 100        struct device_attribute *attr, const char *buf, size_t count)
 101{
 102        struct mem_ctl_info *mci = to_mci(dev);
 103        u8 synd;
 104
 105        if (kstrtou8(buf, 16, &synd))
 106                return -EINVAL;
 107
 108        highbank_mc_err_inject(mci, synd);
 109
 110        return count;
 111}
 112
 113static DEVICE_ATTR(inject_ctrl, S_IWUSR, NULL, highbank_mc_inject_ctrl);
 114
 115static struct attribute *highbank_dev_attrs[] = {
 116        &dev_attr_inject_ctrl.attr,
 117        NULL
 118};
 119
 120ATTRIBUTE_GROUPS(highbank_dev);
 121
 122struct hb_mc_settings {
 123        int     err_offset;
 124        int     int_offset;
 125};
 126
 127static struct hb_mc_settings hb_settings = {
 128        .err_offset = HB_DDR_ECC_ERR_BASE,
 129        .int_offset = HB_DDR_ECC_INT_BASE,
 130};
 131
 132static struct hb_mc_settings mw_settings = {
 133        .err_offset = MW_DDR_ECC_ERR_BASE,
 134        .int_offset = MW_DDR_ECC_INT_BASE,
 135};
 136
 137static const struct of_device_id hb_ddr_ctrl_of_match[] = {
 138        { .compatible = "calxeda,hb-ddr-ctrl",          .data = &hb_settings },
 139        { .compatible = "calxeda,ecx-2000-ddr-ctrl",    .data = &mw_settings },
 140        {},
 141};
 142MODULE_DEVICE_TABLE(of, hb_ddr_ctrl_of_match);
 143
 144static int highbank_mc_probe(struct platform_device *pdev)
 145{
 146        const struct of_device_id *id;
 147        const struct hb_mc_settings *settings;
 148        struct edac_mc_layer layers[2];
 149        struct mem_ctl_info *mci;
 150        struct hb_mc_drvdata *drvdata;
 151        struct dimm_info *dimm;
 152        struct resource *r;
 153        void __iomem *base;
 154        u32 control;
 155        int irq;
 156        int res = 0;
 157
 158        id = of_match_device(hb_ddr_ctrl_of_match, &pdev->dev);
 159        if (!id)
 160                return -ENODEV;
 161
 162        layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
 163        layers[0].size = 1;
 164        layers[0].is_virt_csrow = true;
 165        layers[1].type = EDAC_MC_LAYER_CHANNEL;
 166        layers[1].size = 1;
 167        layers[1].is_virt_csrow = false;
 168        mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
 169                            sizeof(struct hb_mc_drvdata));
 170        if (!mci)
 171                return -ENOMEM;
 172
 173        mci->pdev = &pdev->dev;
 174        drvdata = mci->pvt_info;
 175        platform_set_drvdata(pdev, mci);
 176
 177        if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL))
 178                return -ENOMEM;
 179
 180        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 181        if (!r) {
 182                dev_err(&pdev->dev, "Unable to get mem resource\n");
 183                res = -ENODEV;
 184                goto err;
 185        }
 186
 187        if (!devm_request_mem_region(&pdev->dev, r->start,
 188                                     resource_size(r), dev_name(&pdev->dev))) {
 189                dev_err(&pdev->dev, "Error while requesting mem region\n");
 190                res = -EBUSY;
 191                goto err;
 192        }
 193
 194        base = devm_ioremap(&pdev->dev, r->start, resource_size(r));
 195        if (!base) {
 196                dev_err(&pdev->dev, "Unable to map regs\n");
 197                res = -ENOMEM;
 198                goto err;
 199        }
 200
 201        settings = id->data;
 202        drvdata->mc_err_base = base + settings->err_offset;
 203        drvdata->mc_int_base = base + settings->int_offset;
 204
 205        control = readl(drvdata->mc_err_base + HB_DDR_ECC_OPT) & 0x3;
 206        if (!control || (control == 0x2)) {
 207                dev_err(&pdev->dev, "No ECC present, or ECC disabled\n");
 208                res = -ENODEV;
 209                goto err;
 210        }
 211
 212        mci->mtype_cap = MEM_FLAG_DDR3;
 213        mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
 214        mci->edac_cap = EDAC_FLAG_SECDED;
 215        mci->mod_name = pdev->dev.driver->name;
 216        mci->ctl_name = id->compatible;
 217        mci->dev_name = dev_name(&pdev->dev);
 218        mci->scrub_mode = SCRUB_SW_SRC;
 219
 220        /* Only a single 4GB DIMM is supported */
 221        dimm = *mci->dimms;
 222        dimm->nr_pages = (~0UL >> PAGE_SHIFT) + 1;
 223        dimm->grain = 8;
 224        dimm->dtype = DEV_X8;
 225        dimm->mtype = MEM_DDR3;
 226        dimm->edac_mode = EDAC_SECDED;
 227
 228        res = edac_mc_add_mc_with_groups(mci, highbank_dev_groups);
 229        if (res < 0)
 230                goto err;
 231
 232        irq = platform_get_irq(pdev, 0);
 233        res = devm_request_irq(&pdev->dev, irq, highbank_mc_err_handler,
 234                               0, dev_name(&pdev->dev), mci);
 235        if (res < 0) {
 236                dev_err(&pdev->dev, "Unable to request irq %d\n", irq);
 237                goto err2;
 238        }
 239
 240        devres_close_group(&pdev->dev, NULL);
 241        return 0;
 242err2:
 243        edac_mc_del_mc(&pdev->dev);
 244err:
 245        devres_release_group(&pdev->dev, NULL);
 246        edac_mc_free(mci);
 247        return res;
 248}
 249
 250static int highbank_mc_remove(struct platform_device *pdev)
 251{
 252        struct mem_ctl_info *mci = platform_get_drvdata(pdev);
 253
 254        edac_mc_del_mc(&pdev->dev);
 255        edac_mc_free(mci);
 256        return 0;
 257}
 258
 259static struct platform_driver highbank_mc_edac_driver = {
 260        .probe = highbank_mc_probe,
 261        .remove = highbank_mc_remove,
 262        .driver = {
 263                .name = "hb_mc_edac",
 264                .of_match_table = hb_ddr_ctrl_of_match,
 265        },
 266};
 267
 268module_platform_driver(highbank_mc_edac_driver);
 269
 270MODULE_LICENSE("GPL v2");
 271MODULE_AUTHOR("Calxeda, Inc.");
 272MODULE_DESCRIPTION("EDAC Driver for Calxeda Highbank");
 273