linux/drivers/edac/e7xxx_edac.c
<<
>>
Prefs
   1/*
   2 * Intel e7xxx Memory Controller kernel module
   3 * (C) 2003 Linux Networx (http://lnxi.com)
   4 * This file may be distributed under the terms of the
   5 * GNU General Public License.
   6 *
   7 * See "enum e7xxx_chips" below for supported chipsets
   8 *
   9 * Written by Thayne Harbaugh
  10 * Based on work by Dan Hollis <goemon at anime dot net> and others.
  11 *      http://www.anime.net/~goemon/linux-ecc/
  12 *
  13 * Datasheet:
  14 *      http://www.intel.com/content/www/us/en/chipsets/e7501-chipset-memory-controller-hub-datasheet.html
  15 *
  16 * Contributors:
  17 *      Eric Biederman (Linux Networx)
  18 *      Tom Zimmerman (Linux Networx)
  19 *      Jim Garlick (Lawrence Livermore National Labs)
  20 *      Dave Peterson (Lawrence Livermore National Labs)
  21 *      That One Guy (Some other place)
  22 *      Wang Zhenyu (intel.com)
  23 *
  24 * $Id: edac_e7xxx.c,v 1.5.2.9 2005/10/05 00:43:44 dsp_llnl Exp $
  25 *
  26 */
  27
  28#include <linux/module.h>
  29#include <linux/init.h>
  30#include <linux/pci.h>
  31#include <linux/pci_ids.h>
  32#include <linux/edac.h>
  33#include "edac_core.h"
  34
  35#define E7XXX_REVISION " Ver: 2.0.2"
  36#define EDAC_MOD_STR    "e7xxx_edac"
  37
  38#define e7xxx_printk(level, fmt, arg...) \
  39        edac_printk(level, "e7xxx", fmt, ##arg)
  40
  41#define e7xxx_mc_printk(mci, level, fmt, arg...) \
  42        edac_mc_chipset_printk(mci, level, "e7xxx", fmt, ##arg)
  43
  44#ifndef PCI_DEVICE_ID_INTEL_7205_0
  45#define PCI_DEVICE_ID_INTEL_7205_0      0x255d
  46#endif                          /* PCI_DEVICE_ID_INTEL_7205_0 */
  47
  48#ifndef PCI_DEVICE_ID_INTEL_7205_1_ERR
  49#define PCI_DEVICE_ID_INTEL_7205_1_ERR  0x2551
  50#endif                          /* PCI_DEVICE_ID_INTEL_7205_1_ERR */
  51
  52#ifndef PCI_DEVICE_ID_INTEL_7500_0
  53#define PCI_DEVICE_ID_INTEL_7500_0      0x2540
  54#endif                          /* PCI_DEVICE_ID_INTEL_7500_0 */
  55
  56#ifndef PCI_DEVICE_ID_INTEL_7500_1_ERR
  57#define PCI_DEVICE_ID_INTEL_7500_1_ERR  0x2541
  58#endif                          /* PCI_DEVICE_ID_INTEL_7500_1_ERR */
  59
  60#ifndef PCI_DEVICE_ID_INTEL_7501_0
  61#define PCI_DEVICE_ID_INTEL_7501_0      0x254c
  62#endif                          /* PCI_DEVICE_ID_INTEL_7501_0 */
  63
  64#ifndef PCI_DEVICE_ID_INTEL_7501_1_ERR
  65#define PCI_DEVICE_ID_INTEL_7501_1_ERR  0x2541
  66#endif                          /* PCI_DEVICE_ID_INTEL_7501_1_ERR */
  67
  68#ifndef PCI_DEVICE_ID_INTEL_7505_0
  69#define PCI_DEVICE_ID_INTEL_7505_0      0x2550
  70#endif                          /* PCI_DEVICE_ID_INTEL_7505_0 */
  71
  72#ifndef PCI_DEVICE_ID_INTEL_7505_1_ERR
  73#define PCI_DEVICE_ID_INTEL_7505_1_ERR  0x2551
  74#endif                          /* PCI_DEVICE_ID_INTEL_7505_1_ERR */
  75
  76#define E7XXX_NR_CSROWS         8       /* number of csrows */
  77#define E7XXX_NR_DIMMS          8       /* 2 channels, 4 dimms/channel */
  78
  79/* E7XXX register addresses - device 0 function 0 */
  80#define E7XXX_DRB               0x60    /* DRAM row boundary register (8b) */
  81#define E7XXX_DRA               0x70    /* DRAM row attribute register (8b) */
  82                                        /*
  83                                         * 31   Device width row 7 0=x8 1=x4
  84                                         * 27   Device width row 6
  85                                         * 23   Device width row 5
  86                                         * 19   Device width row 4
  87                                         * 15   Device width row 3
  88                                         * 11   Device width row 2
  89                                         *  7   Device width row 1
  90                                         *  3   Device width row 0
  91                                         */
  92#define E7XXX_DRC               0x7C    /* DRAM controller mode reg (32b) */
  93                                        /*
  94                                         * 22    Number channels 0=1,1=2
  95                                         * 19:18 DRB Granularity 32/64MB
  96                                         */
  97#define E7XXX_TOLM              0xC4    /* DRAM top of low memory reg (16b) */
  98#define E7XXX_REMAPBASE         0xC6    /* DRAM remap base address reg (16b) */
  99#define E7XXX_REMAPLIMIT        0xC8    /* DRAM remap limit address reg (16b) */
 100
 101/* E7XXX register addresses - device 0 function 1 */
 102#define E7XXX_DRAM_FERR         0x80    /* DRAM first error register (8b) */
 103#define E7XXX_DRAM_NERR         0x82    /* DRAM next error register (8b) */
 104#define E7XXX_DRAM_CELOG_ADD    0xA0    /* DRAM first correctable memory */
 105                                        /*     error address register (32b) */
 106                                        /*
 107                                         * 31:28 Reserved
 108                                         * 27:6  CE address (4k block 33:12)
 109                                         *  5:0  Reserved
 110                                         */
 111#define E7XXX_DRAM_UELOG_ADD    0xB0    /* DRAM first uncorrectable memory */
 112                                        /*     error address register (32b) */
 113                                        /*
 114                                         * 31:28 Reserved
 115                                         * 27:6  CE address (4k block 33:12)
 116                                         *  5:0  Reserved
 117                                         */
 118#define E7XXX_DRAM_CELOG_SYNDROME 0xD0  /* DRAM first correctable memory */
 119                                        /*     error syndrome register (16b) */
 120
 121enum e7xxx_chips {
 122        E7500 = 0,
 123        E7501,
 124        E7505,
 125        E7205,
 126};
 127
 128struct e7xxx_pvt {
 129        struct pci_dev *bridge_ck;
 130        u32 tolm;
 131        u32 remapbase;
 132        u32 remaplimit;
 133        const struct e7xxx_dev_info *dev_info;
 134};
 135
 136struct e7xxx_dev_info {
 137        u16 err_dev;
 138        const char *ctl_name;
 139};
 140
 141struct e7xxx_error_info {
 142        u8 dram_ferr;
 143        u8 dram_nerr;
 144        u32 dram_celog_add;
 145        u16 dram_celog_syndrome;
 146        u32 dram_uelog_add;
 147};
 148
 149static struct edac_pci_ctl_info *e7xxx_pci;
 150
 151static const struct e7xxx_dev_info e7xxx_devs[] = {
 152        [E7500] = {
 153                .err_dev = PCI_DEVICE_ID_INTEL_7500_1_ERR,
 154                .ctl_name = "E7500"},
 155        [E7501] = {
 156                .err_dev = PCI_DEVICE_ID_INTEL_7501_1_ERR,
 157                .ctl_name = "E7501"},
 158        [E7505] = {
 159                .err_dev = PCI_DEVICE_ID_INTEL_7505_1_ERR,
 160                .ctl_name = "E7505"},
 161        [E7205] = {
 162                .err_dev = PCI_DEVICE_ID_INTEL_7205_1_ERR,
 163                .ctl_name = "E7205"},
 164};
 165
 166/* FIXME - is this valid for both SECDED and S4ECD4ED? */
 167static inline int e7xxx_find_channel(u16 syndrome)
 168{
 169        edac_dbg(3, "\n");
 170
 171        if ((syndrome & 0xff00) == 0)
 172                return 0;
 173
 174        if ((syndrome & 0x00ff) == 0)
 175                return 1;
 176
 177        if ((syndrome & 0xf000) == 0 || (syndrome & 0x0f00) == 0)
 178                return 0;
 179
 180        return 1;
 181}
 182
 183static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci,
 184                                unsigned long page)
 185{
 186        u32 remap;
 187        struct e7xxx_pvt *pvt = (struct e7xxx_pvt *)mci->pvt_info;
 188
 189        edac_dbg(3, "\n");
 190
 191        if ((page < pvt->tolm) ||
 192                ((page >= 0x100000) && (page < pvt->remapbase)))
 193                return page;
 194
 195        remap = (page - pvt->tolm) + pvt->remapbase;
 196
 197        if (remap < pvt->remaplimit)
 198                return remap;
 199
 200        e7xxx_printk(KERN_ERR, "Invalid page %lx - out of range\n", page);
 201        return pvt->tolm - 1;
 202}
 203
 204static void process_ce(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
 205{
 206        u32 error_1b, page;
 207        u16 syndrome;
 208        int row;
 209        int channel;
 210
 211        edac_dbg(3, "\n");
 212        /* read the error address */
 213        error_1b = info->dram_celog_add;
 214        /* FIXME - should use PAGE_SHIFT */
 215        page = error_1b >> 6;   /* convert the address to 4k page */
 216        /* read the syndrome */
 217        syndrome = info->dram_celog_syndrome;
 218        /* FIXME - check for -1 */
 219        row = edac_mc_find_csrow_by_page(mci, page);
 220        /* convert syndrome to channel */
 221        channel = e7xxx_find_channel(syndrome);
 222        edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, page, 0, syndrome,
 223                             row, channel, -1, "e7xxx CE", "");
 224}
 225
 226static void process_ce_no_info(struct mem_ctl_info *mci)
 227{
 228        edac_dbg(3, "\n");
 229        edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, 0, -1, -1, -1,
 230                             "e7xxx CE log register overflow", "");
 231}
 232
 233static void process_ue(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
 234{
 235        u32 error_2b, block_page;
 236        int row;
 237
 238        edac_dbg(3, "\n");
 239        /* read the error address */
 240        error_2b = info->dram_uelog_add;
 241        /* FIXME - should use PAGE_SHIFT */
 242        block_page = error_2b >> 6;     /* convert to 4k address */
 243        row = edac_mc_find_csrow_by_page(mci, block_page);
 244
 245        edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, block_page, 0, 0,
 246                             row, -1, -1, "e7xxx UE", "");
 247}
 248
 249static void process_ue_no_info(struct mem_ctl_info *mci)
 250{
 251        edac_dbg(3, "\n");
 252
 253        edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0, -1, -1, -1,
 254                             "e7xxx UE log register overflow", "");
 255}
 256
 257static void e7xxx_get_error_info(struct mem_ctl_info *mci,
 258                                 struct e7xxx_error_info *info)
 259{
 260        struct e7xxx_pvt *pvt;
 261
 262        pvt = (struct e7xxx_pvt *)mci->pvt_info;
 263        pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_FERR, &info->dram_ferr);
 264        pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_NERR, &info->dram_nerr);
 265
 266        if ((info->dram_ferr & 1) || (info->dram_nerr & 1)) {
 267                pci_read_config_dword(pvt->bridge_ck, E7XXX_DRAM_CELOG_ADD,
 268                                &info->dram_celog_add);
 269                pci_read_config_word(pvt->bridge_ck,
 270                                E7XXX_DRAM_CELOG_SYNDROME,
 271                                &info->dram_celog_syndrome);
 272        }
 273
 274        if ((info->dram_ferr & 2) || (info->dram_nerr & 2))
 275                pci_read_config_dword(pvt->bridge_ck, E7XXX_DRAM_UELOG_ADD,
 276                                &info->dram_uelog_add);
 277
 278        if (info->dram_ferr & 3)
 279                pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_FERR, 0x03, 0x03);
 280
 281        if (info->dram_nerr & 3)
 282                pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_NERR, 0x03, 0x03);
 283}
 284
 285static int e7xxx_process_error_info(struct mem_ctl_info *mci,
 286                                struct e7xxx_error_info *info,
 287                                int handle_errors)
 288{
 289        int error_found;
 290
 291        error_found = 0;
 292
 293        /* decode and report errors */
 294        if (info->dram_ferr & 1) {      /* check first error correctable */
 295                error_found = 1;
 296
 297                if (handle_errors)
 298                        process_ce(mci, info);
 299        }
 300
 301        if (info->dram_ferr & 2) {      /* check first error uncorrectable */
 302                error_found = 1;
 303
 304                if (handle_errors)
 305                        process_ue(mci, info);
 306        }
 307
 308        if (info->dram_nerr & 1) {      /* check next error correctable */
 309                error_found = 1;
 310
 311                if (handle_errors) {
 312                        if (info->dram_ferr & 1)
 313                                process_ce_no_info(mci);
 314                        else
 315                                process_ce(mci, info);
 316                }
 317        }
 318
 319        if (info->dram_nerr & 2) {      /* check next error uncorrectable */
 320                error_found = 1;
 321
 322                if (handle_errors) {
 323                        if (info->dram_ferr & 2)
 324                                process_ue_no_info(mci);
 325                        else
 326                                process_ue(mci, info);
 327                }
 328        }
 329
 330        return error_found;
 331}
 332
 333static void e7xxx_check(struct mem_ctl_info *mci)
 334{
 335        struct e7xxx_error_info info;
 336
 337        edac_dbg(3, "\n");
 338        e7xxx_get_error_info(mci, &info);
 339        e7xxx_process_error_info(mci, &info, 1);
 340}
 341
 342/* Return 1 if dual channel mode is active.  Else return 0. */
 343static inline int dual_channel_active(u32 drc, int dev_idx)
 344{
 345        return (dev_idx == E7501) ? ((drc >> 22) & 0x1) : 1;
 346}
 347
 348/* Return DRB granularity (0=32mb, 1=64mb). */
 349static inline int drb_granularity(u32 drc, int dev_idx)
 350{
 351        /* only e7501 can be single channel */
 352        return (dev_idx == E7501) ? ((drc >> 18) & 0x3) : 1;
 353}
 354
 355static void e7xxx_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
 356                        int dev_idx, u32 drc)
 357{
 358        unsigned long last_cumul_size;
 359        int index, j;
 360        u8 value;
 361        u32 dra, cumul_size, nr_pages;
 362        int drc_chan, drc_drbg, drc_ddim, mem_dev;
 363        struct csrow_info *csrow;
 364        struct dimm_info *dimm;
 365        enum edac_type edac_mode;
 366
 367        pci_read_config_dword(pdev, E7XXX_DRA, &dra);
 368        drc_chan = dual_channel_active(drc, dev_idx);
 369        drc_drbg = drb_granularity(drc, dev_idx);
 370        drc_ddim = (drc >> 20) & 0x3;
 371        last_cumul_size = 0;
 372
 373        /* The dram row boundary (DRB) reg values are boundary address
 374         * for each DRAM row with a granularity of 32 or 64MB (single/dual
 375         * channel operation).  DRB regs are cumulative; therefore DRB7 will
 376         * contain the total memory contained in all eight rows.
 377         */
 378        for (index = 0; index < mci->nr_csrows; index++) {
 379                /* mem_dev 0=x8, 1=x4 */
 380                mem_dev = (dra >> (index * 4 + 3)) & 0x1;
 381                csrow = mci->csrows[index];
 382
 383                pci_read_config_byte(pdev, E7XXX_DRB + index, &value);
 384                /* convert a 64 or 32 MiB DRB to a page size. */
 385                cumul_size = value << (25 + drc_drbg - PAGE_SHIFT);
 386                edac_dbg(3, "(%d) cumul_size 0x%x\n", index, cumul_size);
 387                if (cumul_size == last_cumul_size)
 388                        continue;       /* not populated */
 389
 390                csrow->first_page = last_cumul_size;
 391                csrow->last_page = cumul_size - 1;
 392                nr_pages = cumul_size - last_cumul_size;
 393                last_cumul_size = cumul_size;
 394
 395                /*
 396                * if single channel or x8 devices then SECDED
 397                * if dual channel and x4 then S4ECD4ED
 398                */
 399                if (drc_ddim) {
 400                        if (drc_chan && mem_dev) {
 401                                edac_mode = EDAC_S4ECD4ED;
 402                                mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
 403                        } else {
 404                                edac_mode = EDAC_SECDED;
 405                                mci->edac_cap |= EDAC_FLAG_SECDED;
 406                        }
 407                } else
 408                        edac_mode = EDAC_NONE;
 409
 410                for (j = 0; j < drc_chan + 1; j++) {
 411                        dimm = csrow->channels[j]->dimm;
 412
 413                        dimm->nr_pages = nr_pages / (drc_chan + 1);
 414                        dimm->grain = 1 << 12;  /* 4KiB - resolution of CELOG */
 415                        dimm->mtype = MEM_RDDR; /* only one type supported */
 416                        dimm->dtype = mem_dev ? DEV_X4 : DEV_X8;
 417                        dimm->edac_mode = edac_mode;
 418                }
 419        }
 420}
 421
 422static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
 423{
 424        u16 pci_data;
 425        struct mem_ctl_info *mci = NULL;
 426        struct edac_mc_layer layers[2];
 427        struct e7xxx_pvt *pvt = NULL;
 428        u32 drc;
 429        int drc_chan;
 430        struct e7xxx_error_info discard;
 431
 432        edac_dbg(0, "mci\n");
 433
 434        pci_read_config_dword(pdev, E7XXX_DRC, &drc);
 435
 436        drc_chan = dual_channel_active(drc, dev_idx);
 437        /*
 438         * According with the datasheet, this device has a maximum of
 439         * 4 DIMMS per channel, either single-rank or dual-rank. So, the
 440         * total amount of dimms is 8 (E7XXX_NR_DIMMS).
 441         * That means that the DIMM is mapped as CSROWs, and the channel
 442         * will map the rank. So, an error to either channel should be
 443         * attributed to the same dimm.
 444         */
 445        layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
 446        layers[0].size = E7XXX_NR_CSROWS;
 447        layers[0].is_virt_csrow = true;
 448        layers[1].type = EDAC_MC_LAYER_CHANNEL;
 449        layers[1].size = drc_chan + 1;
 450        layers[1].is_virt_csrow = false;
 451        mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
 452        if (mci == NULL)
 453                return -ENOMEM;
 454
 455        edac_dbg(3, "init mci\n");
 456        mci->mtype_cap = MEM_FLAG_RDDR;
 457        mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED |
 458                EDAC_FLAG_S4ECD4ED;
 459        /* FIXME - what if different memory types are in different csrows? */
 460        mci->mod_name = EDAC_MOD_STR;
 461        mci->mod_ver = E7XXX_REVISION;
 462        mci->pdev = &pdev->dev;
 463        edac_dbg(3, "init pvt\n");
 464        pvt = (struct e7xxx_pvt *)mci->pvt_info;
 465        pvt->dev_info = &e7xxx_devs[dev_idx];
 466        pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
 467                                        pvt->dev_info->err_dev, pvt->bridge_ck);
 468
 469        if (!pvt->bridge_ck) {
 470                e7xxx_printk(KERN_ERR, "error reporting device not found:"
 471                        "vendor %x device 0x%x (broken BIOS?)\n",
 472                        PCI_VENDOR_ID_INTEL, e7xxx_devs[dev_idx].err_dev);
 473                goto fail0;
 474        }
 475
 476        edac_dbg(3, "more mci init\n");
 477        mci->ctl_name = pvt->dev_info->ctl_name;
 478        mci->dev_name = pci_name(pdev);
 479        mci->edac_check = e7xxx_check;
 480        mci->ctl_page_to_phys = ctl_page_to_phys;
 481        e7xxx_init_csrows(mci, pdev, dev_idx, drc);
 482        mci->edac_cap |= EDAC_FLAG_NONE;
 483        edac_dbg(3, "tolm, remapbase, remaplimit\n");
 484        /* load the top of low memory, remap base, and remap limit vars */
 485        pci_read_config_word(pdev, E7XXX_TOLM, &pci_data);
 486        pvt->tolm = ((u32) pci_data) << 4;
 487        pci_read_config_word(pdev, E7XXX_REMAPBASE, &pci_data);
 488        pvt->remapbase = ((u32) pci_data) << 14;
 489        pci_read_config_word(pdev, E7XXX_REMAPLIMIT, &pci_data);
 490        pvt->remaplimit = ((u32) pci_data) << 14;
 491        e7xxx_printk(KERN_INFO,
 492                "tolm = %x, remapbase = %x, remaplimit = %x\n", pvt->tolm,
 493                pvt->remapbase, pvt->remaplimit);
 494
 495        /* clear any pending errors, or initial state bits */
 496        e7xxx_get_error_info(mci, &discard);
 497
 498        /* Here we assume that we will never see multiple instances of this
 499         * type of memory controller.  The ID is therefore hardcoded to 0.
 500         */
 501        if (edac_mc_add_mc(mci)) {
 502                edac_dbg(3, "failed edac_mc_add_mc()\n");
 503                goto fail1;
 504        }
 505
 506        /* allocating generic PCI control info */
 507        e7xxx_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
 508        if (!e7xxx_pci) {
 509                printk(KERN_WARNING
 510                        "%s(): Unable to create PCI control\n",
 511                        __func__);
 512                printk(KERN_WARNING
 513                        "%s(): PCI error report via EDAC not setup\n",
 514                        __func__);
 515        }
 516
 517        /* get this far and it's successful */
 518        edac_dbg(3, "success\n");
 519        return 0;
 520
 521fail1:
 522        pci_dev_put(pvt->bridge_ck);
 523
 524fail0:
 525        edac_mc_free(mci);
 526
 527        return -ENODEV;
 528}
 529
 530/* returns count (>= 0), or negative on error */
 531static int e7xxx_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 532{
 533        edac_dbg(0, "\n");
 534
 535        /* wake up and enable device */
 536        return pci_enable_device(pdev) ?
 537                -EIO : e7xxx_probe1(pdev, ent->driver_data);
 538}
 539
 540static void e7xxx_remove_one(struct pci_dev *pdev)
 541{
 542        struct mem_ctl_info *mci;
 543        struct e7xxx_pvt *pvt;
 544
 545        edac_dbg(0, "\n");
 546
 547        if (e7xxx_pci)
 548                edac_pci_release_generic_ctl(e7xxx_pci);
 549
 550        if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
 551                return;
 552
 553        pvt = (struct e7xxx_pvt *)mci->pvt_info;
 554        pci_dev_put(pvt->bridge_ck);
 555        edac_mc_free(mci);
 556}
 557
 558static const struct pci_device_id e7xxx_pci_tbl[] = {
 559        {
 560         PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
 561         E7205},
 562        {
 563         PCI_VEND_DEV(INTEL, 7500_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
 564         E7500},
 565        {
 566         PCI_VEND_DEV(INTEL, 7501_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
 567         E7501},
 568        {
 569         PCI_VEND_DEV(INTEL, 7505_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
 570         E7505},
 571        {
 572         0,
 573         }                      /* 0 terminated list. */
 574};
 575
 576MODULE_DEVICE_TABLE(pci, e7xxx_pci_tbl);
 577
 578static struct pci_driver e7xxx_driver = {
 579        .name = EDAC_MOD_STR,
 580        .probe = e7xxx_init_one,
 581        .remove = e7xxx_remove_one,
 582        .id_table = e7xxx_pci_tbl,
 583};
 584
 585static int __init e7xxx_init(void)
 586{
 587       /* Ensure that the OPSTATE is set correctly for POLL or NMI */
 588       opstate_init();
 589
 590        return pci_register_driver(&e7xxx_driver);
 591}
 592
 593static void __exit e7xxx_exit(void)
 594{
 595        pci_unregister_driver(&e7xxx_driver);
 596}
 597
 598module_init(e7xxx_init);
 599module_exit(e7xxx_exit);
 600
 601MODULE_LICENSE("GPL");
 602MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh et al\n"
 603                "Based on.work by Dan Hollis et al");
 604MODULE_DESCRIPTION("MC support for Intel e7xxx memory controllers");
 605module_param(edac_op_state, int, 0444);
 606MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
 607