linux/drivers/edac/sb_edac.c
<<
>>
Prefs
   1/* Intel Sandy Bridge -EN/-EP/-EX Memory Controller kernel module
   2 *
   3 * This driver supports the memory controllers found on the Intel
   4 * processor family Sandy Bridge.
   5 *
   6 * This file may be distributed under the terms of the
   7 * GNU General Public License version 2 only.
   8 *
   9 * Copyright (c) 2011 by:
  10 *       Mauro Carvalho Chehab <mchehab@redhat.com>
  11 */
  12
  13#include <linux/module.h>
  14#include <linux/init.h>
  15#include <linux/pci.h>
  16#include <linux/pci_ids.h>
  17#include <linux/slab.h>
  18#include <linux/delay.h>
  19#include <linux/edac.h>
  20#include <linux/mmzone.h>
  21#include <linux/smp.h>
  22#include <linux/bitmap.h>
  23#include <linux/math64.h>
  24#include <linux/mod_devicetable.h>
  25#include <asm/cpu_device_id.h>
  26#include <asm/intel-family.h>
  27#include <asm/processor.h>
  28#include <asm/mce.h>
  29
  30#include "edac_core.h"
  31
  32/* Static vars */
  33static LIST_HEAD(sbridge_edac_list);
  34
  35/*
  36 * Alter this version for the module when modifications are made
  37 */
  38#define SBRIDGE_REVISION    " Ver: 1.1.1 "
  39#define EDAC_MOD_STR      "sbridge_edac"
  40
  41/*
  42 * Debug macros
  43 */
  44#define sbridge_printk(level, fmt, arg...)                      \
  45        edac_printk(level, "sbridge", fmt, ##arg)
  46
  47#define sbridge_mc_printk(mci, level, fmt, arg...)              \
  48        edac_mc_chipset_printk(mci, level, "sbridge", fmt, ##arg)
  49
  50/*
  51 * Get a bit field at register value <v>, from bit <lo> to bit <hi>
  52 */
  53#define GET_BITFIELD(v, lo, hi) \
  54        (((v) & GENMASK_ULL(hi, lo)) >> (lo))
  55
  56/* Devices 12 Function 6, Offsets 0x80 to 0xcc */
  57static const u32 sbridge_dram_rule[] = {
  58        0x80, 0x88, 0x90, 0x98, 0xa0,
  59        0xa8, 0xb0, 0xb8, 0xc0, 0xc8,
  60};
  61
  62static const u32 ibridge_dram_rule[] = {
  63        0x60, 0x68, 0x70, 0x78, 0x80,
  64        0x88, 0x90, 0x98, 0xa0, 0xa8,
  65        0xb0, 0xb8, 0xc0, 0xc8, 0xd0,
  66        0xd8, 0xe0, 0xe8, 0xf0, 0xf8,
  67};
  68
  69static const u32 knl_dram_rule[] = {
  70        0x60, 0x68, 0x70, 0x78, 0x80, /* 0-4 */
  71        0x88, 0x90, 0x98, 0xa0, 0xa8, /* 5-9 */
  72        0xb0, 0xb8, 0xc0, 0xc8, 0xd0, /* 10-14 */
  73        0xd8, 0xe0, 0xe8, 0xf0, 0xf8, /* 15-19 */
  74        0x100, 0x108, 0x110, 0x118,   /* 20-23 */
  75};
  76
  77#define DRAM_RULE_ENABLE(reg)   GET_BITFIELD(reg, 0,  0)
  78#define A7MODE(reg)             GET_BITFIELD(reg, 26, 26)
  79
  80static char *show_dram_attr(u32 attr)
  81{
  82        switch (attr) {
  83                case 0:
  84                        return "DRAM";
  85                case 1:
  86                        return "MMCFG";
  87                case 2:
  88                        return "NXM";
  89                default:
  90                        return "unknown";
  91        }
  92}
  93
  94static const u32 sbridge_interleave_list[] = {
  95        0x84, 0x8c, 0x94, 0x9c, 0xa4,
  96        0xac, 0xb4, 0xbc, 0xc4, 0xcc,
  97};
  98
  99static const u32 ibridge_interleave_list[] = {
 100        0x64, 0x6c, 0x74, 0x7c, 0x84,
 101        0x8c, 0x94, 0x9c, 0xa4, 0xac,
 102        0xb4, 0xbc, 0xc4, 0xcc, 0xd4,
 103        0xdc, 0xe4, 0xec, 0xf4, 0xfc,
 104};
 105
 106static const u32 knl_interleave_list[] = {
 107        0x64, 0x6c, 0x74, 0x7c, 0x84, /* 0-4 */
 108        0x8c, 0x94, 0x9c, 0xa4, 0xac, /* 5-9 */
 109        0xb4, 0xbc, 0xc4, 0xcc, 0xd4, /* 10-14 */
 110        0xdc, 0xe4, 0xec, 0xf4, 0xfc, /* 15-19 */
 111        0x104, 0x10c, 0x114, 0x11c,   /* 20-23 */
 112};
 113
 114struct interleave_pkg {
 115        unsigned char start;
 116        unsigned char end;
 117};
 118
 119static const struct interleave_pkg sbridge_interleave_pkg[] = {
 120        { 0, 2 },
 121        { 3, 5 },
 122        { 8, 10 },
 123        { 11, 13 },
 124        { 16, 18 },
 125        { 19, 21 },
 126        { 24, 26 },
 127        { 27, 29 },
 128};
 129
 130static const struct interleave_pkg ibridge_interleave_pkg[] = {
 131        { 0, 3 },
 132        { 4, 7 },
 133        { 8, 11 },
 134        { 12, 15 },
 135        { 16, 19 },
 136        { 20, 23 },
 137        { 24, 27 },
 138        { 28, 31 },
 139};
 140
 141static inline int sad_pkg(const struct interleave_pkg *table, u32 reg,
 142                          int interleave)
 143{
 144        return GET_BITFIELD(reg, table[interleave].start,
 145                            table[interleave].end);
 146}
 147
 148/* Devices 12 Function 7 */
 149
 150#define TOLM            0x80
 151#define TOHM            0x84
 152#define HASWELL_TOLM    0xd0
 153#define HASWELL_TOHM_0  0xd4
 154#define HASWELL_TOHM_1  0xd8
 155#define KNL_TOLM        0xd0
 156#define KNL_TOHM_0      0xd4
 157#define KNL_TOHM_1      0xd8
 158
 159#define GET_TOLM(reg)           ((GET_BITFIELD(reg, 0,  3) << 28) | 0x3ffffff)
 160#define GET_TOHM(reg)           ((GET_BITFIELD(reg, 0, 20) << 25) | 0x3ffffff)
 161
 162/* Device 13 Function 6 */
 163
 164#define SAD_TARGET      0xf0
 165
 166#define SOURCE_ID(reg)          GET_BITFIELD(reg, 9, 11)
 167
 168#define SOURCE_ID_KNL(reg)      GET_BITFIELD(reg, 12, 14)
 169
 170#define SAD_CONTROL     0xf4
 171
 172/* Device 14 function 0 */
 173
 174static const u32 tad_dram_rule[] = {
 175        0x40, 0x44, 0x48, 0x4c,
 176        0x50, 0x54, 0x58, 0x5c,
 177        0x60, 0x64, 0x68, 0x6c,
 178};
 179#define MAX_TAD ARRAY_SIZE(tad_dram_rule)
 180
 181#define TAD_LIMIT(reg)          ((GET_BITFIELD(reg, 12, 31) << 26) | 0x3ffffff)
 182#define TAD_SOCK(reg)           GET_BITFIELD(reg, 10, 11)
 183#define TAD_CH(reg)             GET_BITFIELD(reg,  8,  9)
 184#define TAD_TGT3(reg)           GET_BITFIELD(reg,  6,  7)
 185#define TAD_TGT2(reg)           GET_BITFIELD(reg,  4,  5)
 186#define TAD_TGT1(reg)           GET_BITFIELD(reg,  2,  3)
 187#define TAD_TGT0(reg)           GET_BITFIELD(reg,  0,  1)
 188
 189/* Device 15, function 0 */
 190
 191#define MCMTR                   0x7c
 192#define KNL_MCMTR               0x624
 193
 194#define IS_ECC_ENABLED(mcmtr)           GET_BITFIELD(mcmtr, 2, 2)
 195#define IS_LOCKSTEP_ENABLED(mcmtr)      GET_BITFIELD(mcmtr, 1, 1)
 196#define IS_CLOSE_PG(mcmtr)              GET_BITFIELD(mcmtr, 0, 0)
 197
 198/* Device 15, function 1 */
 199
 200#define RASENABLES              0xac
 201#define IS_MIRROR_ENABLED(reg)          GET_BITFIELD(reg, 0, 0)
 202
 203/* Device 15, functions 2-5 */
 204
 205static const int mtr_regs[] = {
 206        0x80, 0x84, 0x88,
 207};
 208
 209static const int knl_mtr_reg = 0xb60;
 210
 211#define RANK_DISABLE(mtr)               GET_BITFIELD(mtr, 16, 19)
 212#define IS_DIMM_PRESENT(mtr)            GET_BITFIELD(mtr, 14, 14)
 213#define RANK_CNT_BITS(mtr)              GET_BITFIELD(mtr, 12, 13)
 214#define RANK_WIDTH_BITS(mtr)            GET_BITFIELD(mtr, 2, 4)
 215#define COL_WIDTH_BITS(mtr)             GET_BITFIELD(mtr, 0, 1)
 216
 217static const u32 tad_ch_nilv_offset[] = {
 218        0x90, 0x94, 0x98, 0x9c,
 219        0xa0, 0xa4, 0xa8, 0xac,
 220        0xb0, 0xb4, 0xb8, 0xbc,
 221};
 222#define CHN_IDX_OFFSET(reg)             GET_BITFIELD(reg, 28, 29)
 223#define TAD_OFFSET(reg)                 (GET_BITFIELD(reg,  6, 25) << 26)
 224
 225static const u32 rir_way_limit[] = {
 226        0x108, 0x10c, 0x110, 0x114, 0x118,
 227};
 228#define MAX_RIR_RANGES ARRAY_SIZE(rir_way_limit)
 229
 230#define IS_RIR_VALID(reg)       GET_BITFIELD(reg, 31, 31)
 231#define RIR_WAY(reg)            GET_BITFIELD(reg, 28, 29)
 232
 233#define MAX_RIR_WAY     8
 234
 235static const u32 rir_offset[MAX_RIR_RANGES][MAX_RIR_WAY] = {
 236        { 0x120, 0x124, 0x128, 0x12c, 0x130, 0x134, 0x138, 0x13c },
 237        { 0x140, 0x144, 0x148, 0x14c, 0x150, 0x154, 0x158, 0x15c },
 238        { 0x160, 0x164, 0x168, 0x16c, 0x170, 0x174, 0x178, 0x17c },
 239        { 0x180, 0x184, 0x188, 0x18c, 0x190, 0x194, 0x198, 0x19c },
 240        { 0x1a0, 0x1a4, 0x1a8, 0x1ac, 0x1b0, 0x1b4, 0x1b8, 0x1bc },
 241};
 242
 243#define RIR_RNK_TGT(type, reg) (((type) == BROADWELL) ? \
 244        GET_BITFIELD(reg, 20, 23) : GET_BITFIELD(reg, 16, 19))
 245
 246#define RIR_OFFSET(type, reg) (((type) == HASWELL || (type) == BROADWELL) ? \
 247        GET_BITFIELD(reg,  2, 15) : GET_BITFIELD(reg,  2, 14))
 248
 249/* Device 16, functions 2-7 */
 250
 251/*
 252 * FIXME: Implement the error count reads directly
 253 */
 254
 255static const u32 correrrcnt[] = {
 256        0x104, 0x108, 0x10c, 0x110,
 257};
 258
 259#define RANK_ODD_OV(reg)                GET_BITFIELD(reg, 31, 31)
 260#define RANK_ODD_ERR_CNT(reg)           GET_BITFIELD(reg, 16, 30)
 261#define RANK_EVEN_OV(reg)               GET_BITFIELD(reg, 15, 15)
 262#define RANK_EVEN_ERR_CNT(reg)          GET_BITFIELD(reg,  0, 14)
 263
 264static const u32 correrrthrsld[] = {
 265        0x11c, 0x120, 0x124, 0x128,
 266};
 267
 268#define RANK_ODD_ERR_THRSLD(reg)        GET_BITFIELD(reg, 16, 30)
 269#define RANK_EVEN_ERR_THRSLD(reg)       GET_BITFIELD(reg,  0, 14)
 270
 271
 272/* Device 17, function 0 */
 273
 274#define SB_RANK_CFG_A           0x0328
 275
 276#define IB_RANK_CFG_A           0x0320
 277
 278/*
 279 * sbridge structs
 280 */
 281
 282#define NUM_CHANNELS            8       /* 2MC per socket, four chan per MC */
 283#define MAX_DIMMS               3       /* Max DIMMS per channel */
 284#define KNL_MAX_CHAS            38      /* KNL max num. of Cache Home Agents */
 285#define KNL_MAX_CHANNELS        6       /* KNL max num. of PCI channels */
 286#define KNL_MAX_EDCS            8       /* Embedded DRAM controllers */
 287#define CHANNEL_UNSPECIFIED     0xf     /* Intel IA32 SDM 15-14 */
 288
 289enum type {
 290        SANDY_BRIDGE,
 291        IVY_BRIDGE,
 292        HASWELL,
 293        BROADWELL,
 294        KNIGHTS_LANDING,
 295};
 296
 297struct sbridge_pvt;
 298struct sbridge_info {
 299        enum type       type;
 300        u32             mcmtr;
 301        u32             rankcfgr;
 302        u64             (*get_tolm)(struct sbridge_pvt *pvt);
 303        u64             (*get_tohm)(struct sbridge_pvt *pvt);
 304        u64             (*rir_limit)(u32 reg);
 305        u64             (*sad_limit)(u32 reg);
 306        u32             (*interleave_mode)(u32 reg);
 307        char*           (*show_interleave_mode)(u32 reg);
 308        u32             (*dram_attr)(u32 reg);
 309        const u32       *dram_rule;
 310        const u32       *interleave_list;
 311        const struct interleave_pkg *interleave_pkg;
 312        u8              max_sad;
 313        u8              max_interleave;
 314        u8              (*get_node_id)(struct sbridge_pvt *pvt);
 315        enum mem_type   (*get_memory_type)(struct sbridge_pvt *pvt);
 316        enum dev_type   (*get_width)(struct sbridge_pvt *pvt, u32 mtr);
 317        struct pci_dev  *pci_vtd;
 318};
 319
 320struct sbridge_channel {
 321        u32             ranks;
 322        u32             dimms;
 323};
 324
 325struct pci_id_descr {
 326        int                     dev_id;
 327        int                     optional;
 328};
 329
 330struct pci_id_table {
 331        const struct pci_id_descr       *descr;
 332        int                             n_devs;
 333        enum type                       type;
 334};
 335
 336struct sbridge_dev {
 337        struct list_head        list;
 338        u8                      bus, mc;
 339        u8                      node_id, source_id;
 340        struct pci_dev          **pdev;
 341        int                     n_devs;
 342        struct mem_ctl_info     *mci;
 343};
 344
 345struct knl_pvt {
 346        struct pci_dev          *pci_cha[KNL_MAX_CHAS];
 347        struct pci_dev          *pci_channel[KNL_MAX_CHANNELS];
 348        struct pci_dev          *pci_mc0;
 349        struct pci_dev          *pci_mc1;
 350        struct pci_dev          *pci_mc0_misc;
 351        struct pci_dev          *pci_mc1_misc;
 352        struct pci_dev          *pci_mc_info; /* tolm, tohm */
 353};
 354
 355struct sbridge_pvt {
 356        struct pci_dev          *pci_ta, *pci_ddrio, *pci_ras;
 357        struct pci_dev          *pci_sad0, *pci_sad1;
 358        struct pci_dev          *pci_ha0, *pci_ha1;
 359        struct pci_dev          *pci_br0, *pci_br1;
 360        struct pci_dev          *pci_ha1_ta;
 361        struct pci_dev          *pci_tad[NUM_CHANNELS];
 362
 363        struct sbridge_dev      *sbridge_dev;
 364
 365        struct sbridge_info     info;
 366        struct sbridge_channel  channel[NUM_CHANNELS];
 367
 368        /* Memory type detection */
 369        bool                    is_mirrored, is_lockstep, is_close_pg;
 370        bool                    is_chan_hash;
 371
 372        /* Fifo double buffers */
 373        struct mce              mce_entry[MCE_LOG_LEN];
 374        struct mce              mce_outentry[MCE_LOG_LEN];
 375
 376        /* Fifo in/out counters */
 377        unsigned                mce_in, mce_out;
 378
 379        /* Count indicator to show errors not got */
 380        unsigned                mce_overrun;
 381
 382        /* Memory description */
 383        u64                     tolm, tohm;
 384        struct knl_pvt knl;
 385};
 386
 387#define PCI_DESCR(device_id, opt)       \
 388        .dev_id = (device_id),          \
 389        .optional = opt
 390
 391static const struct pci_id_descr pci_dev_descr_sbridge[] = {
 392                /* Processor Home Agent */
 393        { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0, 0)     },
 394
 395                /* Memory controller */
 396        { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA, 0)      },
 397        { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS, 0)     },
 398        { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0, 0)    },
 399        { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1, 0)    },
 400        { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2, 0)    },
 401        { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3, 0)    },
 402        { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO, 1)   },
 403
 404                /* System Address Decoder */
 405        { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0, 0)        },
 406        { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1, 0)        },
 407
 408                /* Broadcast Registers */
 409        { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_BR, 0)          },
 410};
 411
 412#define PCI_ID_TABLE_ENTRY(A, T) {      \
 413        .descr = A,                     \
 414        .n_devs = ARRAY_SIZE(A),        \
 415        .type = T                       \
 416}
 417
 418static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
 419        PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge, SANDY_BRIDGE),
 420        {0,}                    /* 0 terminated list. */
 421};
 422
 423/* This changes depending if 1HA or 2HA:
 424 * 1HA:
 425 *      0x0eb8 (17.0) is DDRIO0
 426 * 2HA:
 427 *      0x0ebc (17.4) is DDRIO0
 428 */
 429#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0      0x0eb8
 430#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0      0x0ebc
 431
 432/* pci ids */
 433#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0             0x0ea0
 434#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA          0x0ea8
 435#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS         0x0e71
 436#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0        0x0eaa
 437#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1        0x0eab
 438#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2        0x0eac
 439#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3        0x0ead
 440#define PCI_DEVICE_ID_INTEL_IBRIDGE_SAD                 0x0ec8
 441#define PCI_DEVICE_ID_INTEL_IBRIDGE_BR0                 0x0ec9
 442#define PCI_DEVICE_ID_INTEL_IBRIDGE_BR1                 0x0eca
 443#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1             0x0e60
 444#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA          0x0e68
 445#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS         0x0e79
 446#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0        0x0e6a
 447#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1        0x0e6b
 448#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2        0x0e6c
 449#define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3        0x0e6d
 450
 451static const struct pci_id_descr pci_dev_descr_ibridge[] = {
 452                /* Processor Home Agent */
 453        { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0, 0)             },
 454
 455                /* Memory controller */
 456        { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA, 0)          },
 457        { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS, 0)         },
 458        { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0, 0)        },
 459        { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1, 0)        },
 460        { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2, 0)        },
 461        { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3, 0)        },
 462
 463                /* System Address Decoder */
 464        { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_SAD, 0)                 },
 465
 466                /* Broadcast Registers */
 467        { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR0, 1)                 },
 468        { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR1, 0)                 },
 469
 470                /* Optional, mode 2HA */
 471        { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1, 1)             },
 472#if 0
 473        { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA, 1)  },
 474        { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS, 1) },
 475#endif
 476        { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0, 1)        },
 477        { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1, 1)        },
 478        { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2, 1)        },
 479        { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3, 1)        },
 480
 481        { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0, 1)      },
 482        { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0, 1)      },
 483};
 484
 485static const struct pci_id_table pci_dev_descr_ibridge_table[] = {
 486        PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge, IVY_BRIDGE),
 487        {0,}                    /* 0 terminated list. */
 488};
 489
 490/* Haswell support */
 491/* EN processor:
 492 *      - 1 IMC
 493 *      - 3 DDR3 channels, 2 DPC per channel
 494 * EP processor:
 495 *      - 1 or 2 IMC
 496 *      - 4 DDR4 channels, 3 DPC per channel
 497 * EP 4S processor:
 498 *      - 2 IMC
 499 *      - 4 DDR4 channels, 3 DPC per channel
 500 * EX processor:
 501 *      - 2 IMC
 502 *      - each IMC interfaces with a SMI 2 channel
 503 *      - each SMI channel interfaces with a scalable memory buffer
 504 *      - each scalable memory buffer supports 4 DDR3/DDR4 channels, 3 DPC
 505 */
 506#define HASWELL_DDRCRCLKCONTROLS 0xa10 /* Ditto on Broadwell */
 507#define HASWELL_HASYSDEFEATURE2 0x84
 508#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_VTD_MISC 0x2f28
 509#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0     0x2fa0
 510#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1     0x2f60
 511#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA  0x2fa8
 512#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_THERMAL 0x2f71
 513#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA  0x2f68
 514#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_THERMAL 0x2f79
 515#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0 0x2ffc
 516#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1 0x2ffd
 517#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0 0x2faa
 518#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1 0x2fab
 519#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2 0x2fac
 520#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3 0x2fad
 521#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0 0x2f6a
 522#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1 0x2f6b
 523#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2 0x2f6c
 524#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3 0x2f6d
 525#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0 0x2fbd
 526#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1 0x2fbf
 527#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2 0x2fb9
 528#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3 0x2fbb
 529static const struct pci_id_descr pci_dev_descr_haswell[] = {
 530        /* first item must be the HA */
 531        { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0, 0)             },
 532
 533        { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0, 0)        },
 534        { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1, 0)        },
 535
 536        { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1, 1)             },
 537
 538        { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA, 0)          },
 539        { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_THERMAL, 0)     },
 540        { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0, 0)        },
 541        { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1, 0)        },
 542        { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2, 1)        },
 543        { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3, 1)        },
 544
 545        { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0, 1)          },
 546        { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1, 1)          },
 547        { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2, 1)          },
 548        { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3, 1)          },
 549
 550        { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA, 1)          },
 551        { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_THERMAL, 1)     },
 552        { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0, 1)        },
 553        { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1, 1)        },
 554        { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2, 1)        },
 555        { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3, 1)        },
 556};
 557
 558static const struct pci_id_table pci_dev_descr_haswell_table[] = {
 559        PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell, HASWELL),
 560        {0,}                    /* 0 terminated list. */
 561};
 562
 563/* Knight's Landing Support */
 564/*
 565 * KNL's memory channels are swizzled between memory controllers.
 566 * MC0 is mapped to CH3,4,5 and MC1 is mapped to CH0,1,2
 567 */
 568#define knl_channel_remap(mc, chan) ((mc) ? (chan) : (chan) + 3)
 569
 570/* Memory controller, TAD tables, error injection - 2-8-0, 2-9-0 (2 of these) */
 571#define PCI_DEVICE_ID_INTEL_KNL_IMC_MC       0x7840
 572/* DRAM channel stuff; bank addrs, dimmmtr, etc.. 2-8-2 - 2-9-4 (6 of these) */
 573#define PCI_DEVICE_ID_INTEL_KNL_IMC_CHANNEL  0x7843
 574/* kdrwdbu TAD limits/offsets, MCMTR - 2-10-1, 2-11-1 (2 of these) */
 575#define PCI_DEVICE_ID_INTEL_KNL_IMC_TA       0x7844
 576/* CHA broadcast registers, dram rules - 1-29-0 (1 of these) */
 577#define PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0     0x782a
 578/* SAD target - 1-29-1 (1 of these) */
 579#define PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1     0x782b
 580/* Caching / Home Agent */
 581#define PCI_DEVICE_ID_INTEL_KNL_IMC_CHA      0x782c
 582/* Device with TOLM and TOHM, 0-5-0 (1 of these) */
 583#define PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM    0x7810
 584
 585/*
 586 * KNL differs from SB, IB, and Haswell in that it has multiple
 587 * instances of the same device with the same device ID, so we handle that
 588 * by creating as many copies in the table as we expect to find.
 589 * (Like device ID must be grouped together.)
 590 */
 591
 592static const struct pci_id_descr pci_dev_descr_knl[] = {
 593        [0]         = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0, 0) },
 594        [1]         = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1, 0) },
 595        [2 ... 3]   = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_MC, 0)},
 596        [4 ... 41]  = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_CHA, 0) },
 597        [42 ... 47] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_CHANNEL, 0) },
 598        [48]        = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_TA, 0) },
 599        [49]        = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM, 0) },
 600};
 601
 602static const struct pci_id_table pci_dev_descr_knl_table[] = {
 603        PCI_ID_TABLE_ENTRY(pci_dev_descr_knl, KNIGHTS_LANDING),
 604        {0,}
 605};
 606
 607/*
 608 * Broadwell support
 609 *
 610 * DE processor:
 611 *      - 1 IMC
 612 *      - 2 DDR3 channels, 2 DPC per channel
 613 * EP processor:
 614 *      - 1 or 2 IMC
 615 *      - 4 DDR4 channels, 3 DPC per channel
 616 * EP 4S processor:
 617 *      - 2 IMC
 618 *      - 4 DDR4 channels, 3 DPC per channel
 619 * EX processor:
 620 *      - 2 IMC
 621 *      - each IMC interfaces with a SMI 2 channel
 622 *      - each SMI channel interfaces with a scalable memory buffer
 623 *      - each scalable memory buffer supports 4 DDR3/DDR4 channels, 3 DPC
 624 */
 625#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_VTD_MISC 0x6f28
 626#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0   0x6fa0
 627#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1   0x6f60
 628#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA        0x6fa8
 629#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_THERMAL 0x6f71
 630#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA        0x6f68
 631#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_THERMAL 0x6f79
 632#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0 0x6ffc
 633#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1 0x6ffd
 634#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0 0x6faa
 635#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1 0x6fab
 636#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2 0x6fac
 637#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3 0x6fad
 638#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0 0x6f6a
 639#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1 0x6f6b
 640#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2 0x6f6c
 641#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3 0x6f6d
 642#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0 0x6faf
 643
 644static const struct pci_id_descr pci_dev_descr_broadwell[] = {
 645        /* first item must be the HA */
 646        { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0, 0)           },
 647
 648        { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0, 0)      },
 649        { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1, 0)      },
 650
 651        { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1, 1)           },
 652
 653        { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA, 0)        },
 654        { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_THERMAL, 0)   },
 655        { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0, 0)      },
 656        { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1, 0)      },
 657        { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2, 1)      },
 658        { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3, 1)      },
 659
 660        { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0, 1)        },
 661
 662        { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA, 1)        },
 663        { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_THERMAL, 1)   },
 664        { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0, 1)      },
 665        { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1, 1)      },
 666        { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2, 1)      },
 667        { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3, 1)      },
 668};
 669
 670static const struct pci_id_table pci_dev_descr_broadwell_table[] = {
 671        PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell, BROADWELL),
 672        {0,}                    /* 0 terminated list. */
 673};
 674
 675
 676/****************************************************************************
 677                        Ancillary status routines
 678 ****************************************************************************/
 679
 680static inline int numrank(enum type type, u32 mtr)
 681{
 682        int ranks = (1 << RANK_CNT_BITS(mtr));
 683        int max = 4;
 684
 685        if (type == HASWELL || type == BROADWELL || type == KNIGHTS_LANDING)
 686                max = 8;
 687
 688        if (ranks > max) {
 689                edac_dbg(0, "Invalid number of ranks: %d (max = %i) raw value = %x (%04x)\n",
 690                         ranks, max, (unsigned int)RANK_CNT_BITS(mtr), mtr);
 691                return -EINVAL;
 692        }
 693
 694        return ranks;
 695}
 696
 697static inline int numrow(u32 mtr)
 698{
 699        int rows = (RANK_WIDTH_BITS(mtr) + 12);
 700
 701        if (rows < 13 || rows > 18) {
 702                edac_dbg(0, "Invalid number of rows: %d (should be between 14 and 17) raw value = %x (%04x)\n",
 703                         rows, (unsigned int)RANK_WIDTH_BITS(mtr), mtr);
 704                return -EINVAL;
 705        }
 706
 707        return 1 << rows;
 708}
 709
 710static inline int numcol(u32 mtr)
 711{
 712        int cols = (COL_WIDTH_BITS(mtr) + 10);
 713
 714        if (cols > 12) {
 715                edac_dbg(0, "Invalid number of cols: %d (max = 4) raw value = %x (%04x)\n",
 716                         cols, (unsigned int)COL_WIDTH_BITS(mtr), mtr);
 717                return -EINVAL;
 718        }
 719
 720        return 1 << cols;
 721}
 722
 723static struct sbridge_dev *get_sbridge_dev(u8 bus, int multi_bus)
 724{
 725        struct sbridge_dev *sbridge_dev;
 726
 727        /*
 728         * If we have devices scattered across several busses that pertain
 729         * to the same memory controller, we'll lump them all together.
 730         */
 731        if (multi_bus) {
 732                return list_first_entry_or_null(&sbridge_edac_list,
 733                                struct sbridge_dev, list);
 734        }
 735
 736        list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
 737                if (sbridge_dev->bus == bus)
 738                        return sbridge_dev;
 739        }
 740
 741        return NULL;
 742}
 743
 744static struct sbridge_dev *alloc_sbridge_dev(u8 bus,
 745                                           const struct pci_id_table *table)
 746{
 747        struct sbridge_dev *sbridge_dev;
 748
 749        sbridge_dev = kzalloc(sizeof(*sbridge_dev), GFP_KERNEL);
 750        if (!sbridge_dev)
 751                return NULL;
 752
 753        sbridge_dev->pdev = kzalloc(sizeof(*sbridge_dev->pdev) * table->n_devs,
 754                                   GFP_KERNEL);
 755        if (!sbridge_dev->pdev) {
 756                kfree(sbridge_dev);
 757                return NULL;
 758        }
 759
 760        sbridge_dev->bus = bus;
 761        sbridge_dev->n_devs = table->n_devs;
 762        list_add_tail(&sbridge_dev->list, &sbridge_edac_list);
 763
 764        return sbridge_dev;
 765}
 766
 767static void free_sbridge_dev(struct sbridge_dev *sbridge_dev)
 768{
 769        list_del(&sbridge_dev->list);
 770        kfree(sbridge_dev->pdev);
 771        kfree(sbridge_dev);
 772}
 773
 774static u64 sbridge_get_tolm(struct sbridge_pvt *pvt)
 775{
 776        u32 reg;
 777
 778        /* Address range is 32:28 */
 779        pci_read_config_dword(pvt->pci_sad1, TOLM, &reg);
 780        return GET_TOLM(reg);
 781}
 782
 783static u64 sbridge_get_tohm(struct sbridge_pvt *pvt)
 784{
 785        u32 reg;
 786
 787        pci_read_config_dword(pvt->pci_sad1, TOHM, &reg);
 788        return GET_TOHM(reg);
 789}
 790
 791static u64 ibridge_get_tolm(struct sbridge_pvt *pvt)
 792{
 793        u32 reg;
 794
 795        pci_read_config_dword(pvt->pci_br1, TOLM, &reg);
 796
 797        return GET_TOLM(reg);
 798}
 799
 800static u64 ibridge_get_tohm(struct sbridge_pvt *pvt)
 801{
 802        u32 reg;
 803
 804        pci_read_config_dword(pvt->pci_br1, TOHM, &reg);
 805
 806        return GET_TOHM(reg);
 807}
 808
 809static u64 rir_limit(u32 reg)
 810{
 811        return ((u64)GET_BITFIELD(reg,  1, 10) << 29) | 0x1fffffff;
 812}
 813
 814static u64 sad_limit(u32 reg)
 815{
 816        return (GET_BITFIELD(reg, 6, 25) << 26) | 0x3ffffff;
 817}
 818
 819static u32 interleave_mode(u32 reg)
 820{
 821        return GET_BITFIELD(reg, 1, 1);
 822}
 823
 824char *show_interleave_mode(u32 reg)
 825{
 826        return interleave_mode(reg) ? "8:6" : "[8:6]XOR[18:16]";
 827}
 828
 829static u32 dram_attr(u32 reg)
 830{
 831        return GET_BITFIELD(reg, 2, 3);
 832}
 833
 834static u64 knl_sad_limit(u32 reg)
 835{
 836        return (GET_BITFIELD(reg, 7, 26) << 26) | 0x3ffffff;
 837}
 838
 839static u32 knl_interleave_mode(u32 reg)
 840{
 841        return GET_BITFIELD(reg, 1, 2);
 842}
 843
 844static char *knl_show_interleave_mode(u32 reg)
 845{
 846        char *s;
 847
 848        switch (knl_interleave_mode(reg)) {
 849        case 0:
 850                s = "use address bits [8:6]";
 851                break;
 852        case 1:
 853                s = "use address bits [10:8]";
 854                break;
 855        case 2:
 856                s = "use address bits [14:12]";
 857                break;
 858        case 3:
 859                s = "use address bits [32:30]";
 860                break;
 861        default:
 862                WARN_ON(1);
 863                break;
 864        }
 865
 866        return s;
 867}
 868
 869static u32 dram_attr_knl(u32 reg)
 870{
 871        return GET_BITFIELD(reg, 3, 4);
 872}
 873
 874
 875static enum mem_type get_memory_type(struct sbridge_pvt *pvt)
 876{
 877        u32 reg;
 878        enum mem_type mtype;
 879
 880        if (pvt->pci_ddrio) {
 881                pci_read_config_dword(pvt->pci_ddrio, pvt->info.rankcfgr,
 882                                      &reg);
 883                if (GET_BITFIELD(reg, 11, 11))
 884                        /* FIXME: Can also be LRDIMM */
 885                        mtype = MEM_RDDR3;
 886                else
 887                        mtype = MEM_DDR3;
 888        } else
 889                mtype = MEM_UNKNOWN;
 890
 891        return mtype;
 892}
 893
 894static enum mem_type haswell_get_memory_type(struct sbridge_pvt *pvt)
 895{
 896        u32 reg;
 897        bool registered = false;
 898        enum mem_type mtype = MEM_UNKNOWN;
 899
 900        if (!pvt->pci_ddrio)
 901                goto out;
 902
 903        pci_read_config_dword(pvt->pci_ddrio,
 904                              HASWELL_DDRCRCLKCONTROLS, &reg);
 905        /* Is_Rdimm */
 906        if (GET_BITFIELD(reg, 16, 16))
 907                registered = true;
 908
 909        pci_read_config_dword(pvt->pci_ta, MCMTR, &reg);
 910        if (GET_BITFIELD(reg, 14, 14)) {
 911                if (registered)
 912                        mtype = MEM_RDDR4;
 913                else
 914                        mtype = MEM_DDR4;
 915        } else {
 916                if (registered)
 917                        mtype = MEM_RDDR3;
 918                else
 919                        mtype = MEM_DDR3;
 920        }
 921
 922out:
 923        return mtype;
 924}
 925
 926static enum dev_type knl_get_width(struct sbridge_pvt *pvt, u32 mtr)
 927{
 928        /* for KNL value is fixed */
 929        return DEV_X16;
 930}
 931
 932static enum dev_type sbridge_get_width(struct sbridge_pvt *pvt, u32 mtr)
 933{
 934        /* there's no way to figure out */
 935        return DEV_UNKNOWN;
 936}
 937
 938static enum dev_type __ibridge_get_width(u32 mtr)
 939{
 940        enum dev_type type;
 941
 942        switch (mtr) {
 943        case 3:
 944                type = DEV_UNKNOWN;
 945                break;
 946        case 2:
 947                type = DEV_X16;
 948                break;
 949        case 1:
 950                type = DEV_X8;
 951                break;
 952        case 0:
 953                type = DEV_X4;
 954                break;
 955        }
 956
 957        return type;
 958}
 959
 960static enum dev_type ibridge_get_width(struct sbridge_pvt *pvt, u32 mtr)
 961{
 962        /*
 963         * ddr3_width on the documentation but also valid for DDR4 on
 964         * Haswell
 965         */
 966        return __ibridge_get_width(GET_BITFIELD(mtr, 7, 8));
 967}
 968
 969static enum dev_type broadwell_get_width(struct sbridge_pvt *pvt, u32 mtr)
 970{
 971        /* ddr3_width on the documentation but also valid for DDR4 */
 972        return __ibridge_get_width(GET_BITFIELD(mtr, 8, 9));
 973}
 974
 975static enum mem_type knl_get_memory_type(struct sbridge_pvt *pvt)
 976{
 977        /* DDR4 RDIMMS and LRDIMMS are supported */
 978        return MEM_RDDR4;
 979}
 980
 981static u8 get_node_id(struct sbridge_pvt *pvt)
 982{
 983        u32 reg;
 984        pci_read_config_dword(pvt->pci_br0, SAD_CONTROL, &reg);
 985        return GET_BITFIELD(reg, 0, 2);
 986}
 987
 988static u8 haswell_get_node_id(struct sbridge_pvt *pvt)
 989{
 990        u32 reg;
 991
 992        pci_read_config_dword(pvt->pci_sad1, SAD_CONTROL, &reg);
 993        return GET_BITFIELD(reg, 0, 3);
 994}
 995
 996static u8 knl_get_node_id(struct sbridge_pvt *pvt)
 997{
 998        u32 reg;
 999
1000        pci_read_config_dword(pvt->pci_sad1, SAD_CONTROL, &reg);
1001        return GET_BITFIELD(reg, 0, 2);
1002}
1003
1004
1005static u64 haswell_get_tolm(struct sbridge_pvt *pvt)
1006{
1007        u32 reg;
1008
1009        pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOLM, &reg);
1010        return (GET_BITFIELD(reg, 26, 31) << 26) | 0x3ffffff;
1011}
1012
1013static u64 haswell_get_tohm(struct sbridge_pvt *pvt)
1014{
1015        u64 rc;
1016        u32 reg;
1017
1018        pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOHM_0, &reg);
1019        rc = GET_BITFIELD(reg, 26, 31);
1020        pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOHM_1, &reg);
1021        rc = ((reg << 6) | rc) << 26;
1022
1023        return rc | 0x1ffffff;
1024}
1025
1026static u64 knl_get_tolm(struct sbridge_pvt *pvt)
1027{
1028        u32 reg;
1029
1030        pci_read_config_dword(pvt->knl.pci_mc_info, KNL_TOLM, &reg);
1031        return (GET_BITFIELD(reg, 26, 31) << 26) | 0x3ffffff;
1032}
1033
1034static u64 knl_get_tohm(struct sbridge_pvt *pvt)
1035{
1036        u64 rc;
1037        u32 reg_lo, reg_hi;
1038
1039        pci_read_config_dword(pvt->knl.pci_mc_info, KNL_TOHM_0, &reg_lo);
1040        pci_read_config_dword(pvt->knl.pci_mc_info, KNL_TOHM_1, &reg_hi);
1041        rc = ((u64)reg_hi << 32) | reg_lo;
1042        return rc | 0x3ffffff;
1043}
1044
1045
1046static u64 haswell_rir_limit(u32 reg)
1047{
1048        return (((u64)GET_BITFIELD(reg,  1, 11) + 1) << 29) - 1;
1049}
1050
1051static inline u8 sad_pkg_socket(u8 pkg)
1052{
1053        /* on Ivy Bridge, nodeID is SASS, where A is HA and S is node id */
1054        return ((pkg >> 3) << 2) | (pkg & 0x3);
1055}
1056
1057static inline u8 sad_pkg_ha(u8 pkg)
1058{
1059        return (pkg >> 2) & 0x1;
1060}
1061
1062static int haswell_chan_hash(int idx, u64 addr)
1063{
1064        int i;
1065
1066        /*
1067         * XOR even bits from 12:26 to bit0 of idx,
1068         *     odd bits from 13:27 to bit1
1069         */
1070        for (i = 12; i < 28; i += 2)
1071                idx ^= (addr >> i) & 3;
1072
1073        return idx;
1074}
1075
1076/****************************************************************************
1077                        Memory check routines
1078 ****************************************************************************/
1079static struct pci_dev *get_pdev_same_bus(u8 bus, u32 id)
1080{
1081        struct pci_dev *pdev = NULL;
1082
1083        do {
1084                pdev = pci_get_device(PCI_VENDOR_ID_INTEL, id, pdev);
1085                if (pdev && pdev->bus->number == bus)
1086                        break;
1087        } while (pdev);
1088
1089        return pdev;
1090}
1091
1092/**
1093 * check_if_ecc_is_active() - Checks if ECC is active
1094 * @bus:        Device bus
1095 * @type:       Memory controller type
1096 * returns: 0 in case ECC is active, -ENODEV if it can't be determined or
1097 *          disabled
1098 */
1099static int check_if_ecc_is_active(const u8 bus, enum type type)
1100{
1101        struct pci_dev *pdev = NULL;
1102        u32 mcmtr, id;
1103
1104        switch (type) {
1105        case IVY_BRIDGE:
1106                id = PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA;
1107                break;
1108        case HASWELL:
1109                id = PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA;
1110                break;
1111        case SANDY_BRIDGE:
1112                id = PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA;
1113                break;
1114        case BROADWELL:
1115                id = PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA;
1116                break;
1117        case KNIGHTS_LANDING:
1118                /*
1119                 * KNL doesn't group things by bus the same way
1120                 * SB/IB/Haswell does.
1121                 */
1122                id = PCI_DEVICE_ID_INTEL_KNL_IMC_TA;
1123                break;
1124        default:
1125                return -ENODEV;
1126        }
1127
1128        if (type != KNIGHTS_LANDING)
1129                pdev = get_pdev_same_bus(bus, id);
1130        else
1131                pdev = pci_get_device(PCI_VENDOR_ID_INTEL, id, 0);
1132
1133        if (!pdev) {
1134                sbridge_printk(KERN_ERR, "Couldn't find PCI device "
1135                                        "%04x:%04x! on bus %02d\n",
1136                                        PCI_VENDOR_ID_INTEL, id, bus);
1137                return -ENODEV;
1138        }
1139
1140        pci_read_config_dword(pdev,
1141                        type == KNIGHTS_LANDING ? KNL_MCMTR : MCMTR, &mcmtr);
1142        if (!IS_ECC_ENABLED(mcmtr)) {
1143                sbridge_printk(KERN_ERR, "ECC is disabled. Aborting\n");
1144                return -ENODEV;
1145        }
1146        return 0;
1147}
1148
1149/* Low bits of TAD limit, and some metadata. */
1150static const u32 knl_tad_dram_limit_lo[] = {
1151        0x400, 0x500, 0x600, 0x700,
1152        0x800, 0x900, 0xa00, 0xb00,
1153};
1154
1155/* Low bits of TAD offset. */
1156static const u32 knl_tad_dram_offset_lo[] = {
1157        0x404, 0x504, 0x604, 0x704,
1158        0x804, 0x904, 0xa04, 0xb04,
1159};
1160
1161/* High 16 bits of TAD limit and offset. */
1162static const u32 knl_tad_dram_hi[] = {
1163        0x408, 0x508, 0x608, 0x708,
1164        0x808, 0x908, 0xa08, 0xb08,
1165};
1166
1167/* Number of ways a tad entry is interleaved. */
1168static const u32 knl_tad_ways[] = {
1169        8, 6, 4, 3, 2, 1,
1170};
1171
1172/*
1173 * Retrieve the n'th Target Address Decode table entry
1174 * from the memory controller's TAD table.
1175 *
1176 * @pvt:        driver private data
1177 * @entry:      which entry you want to retrieve
1178 * @mc:         which memory controller (0 or 1)
1179 * @offset:     output tad range offset
1180 * @limit:      output address of first byte above tad range
1181 * @ways:       output number of interleave ways
1182 *
1183 * The offset value has curious semantics.  It's a sort of running total
1184 * of the sizes of all the memory regions that aren't mapped in this
1185 * tad table.
1186 */
1187static int knl_get_tad(const struct sbridge_pvt *pvt,
1188                const int entry,
1189                const int mc,
1190                u64 *offset,
1191                u64 *limit,
1192                int *ways)
1193{
1194        u32 reg_limit_lo, reg_offset_lo, reg_hi;
1195        struct pci_dev *pci_mc;
1196        int way_id;
1197
1198        switch (mc) {
1199        case 0:
1200                pci_mc = pvt->knl.pci_mc0;
1201                break;
1202        case 1:
1203                pci_mc = pvt->knl.pci_mc1;
1204                break;
1205        default:
1206                WARN_ON(1);
1207                return -EINVAL;
1208        }
1209
1210        pci_read_config_dword(pci_mc,
1211                        knl_tad_dram_limit_lo[entry], &reg_limit_lo);
1212        pci_read_config_dword(pci_mc,
1213                        knl_tad_dram_offset_lo[entry], &reg_offset_lo);
1214        pci_read_config_dword(pci_mc,
1215                        knl_tad_dram_hi[entry], &reg_hi);
1216
1217        /* Is this TAD entry enabled? */
1218        if (!GET_BITFIELD(reg_limit_lo, 0, 0))
1219                return -ENODEV;
1220
1221        way_id = GET_BITFIELD(reg_limit_lo, 3, 5);
1222
1223        if (way_id < ARRAY_SIZE(knl_tad_ways)) {
1224                *ways = knl_tad_ways[way_id];
1225        } else {
1226                *ways = 0;
1227                sbridge_printk(KERN_ERR,
1228                                "Unexpected value %d in mc_tad_limit_lo wayness field\n",
1229                                way_id);
1230                return -ENODEV;
1231        }
1232
1233        /*
1234         * The least significant 6 bits of base and limit are truncated.
1235         * For limit, we fill the missing bits with 1s.
1236         */
1237        *offset = ((u64) GET_BITFIELD(reg_offset_lo, 6, 31) << 6) |
1238                                ((u64) GET_BITFIELD(reg_hi, 0,  15) << 32);
1239        *limit = ((u64) GET_BITFIELD(reg_limit_lo,  6, 31) << 6) | 63 |
1240                                ((u64) GET_BITFIELD(reg_hi, 16, 31) << 32);
1241
1242        return 0;
1243}
1244
1245/* Determine which memory controller is responsible for a given channel. */
1246static int knl_channel_mc(int channel)
1247{
1248        WARN_ON(channel < 0 || channel >= 6);
1249
1250        return channel < 3 ? 1 : 0;
1251}
1252
1253/*
1254 * Get the Nth entry from EDC_ROUTE_TABLE register.
1255 * (This is the per-tile mapping of logical interleave targets to
1256 *  physical EDC modules.)
1257 *
1258 * entry 0: 0:2
1259 *       1: 3:5
1260 *       2: 6:8
1261 *       3: 9:11
1262 *       4: 12:14
1263 *       5: 15:17
1264 *       6: 18:20
1265 *       7: 21:23
1266 * reserved: 24:31
1267 */
1268static u32 knl_get_edc_route(int entry, u32 reg)
1269{
1270        WARN_ON(entry >= KNL_MAX_EDCS);
1271        return GET_BITFIELD(reg, entry*3, (entry*3)+2);
1272}
1273
1274/*
1275 * Get the Nth entry from MC_ROUTE_TABLE register.
1276 * (This is the per-tile mapping of logical interleave targets to
1277 *  physical DRAM channels modules.)
1278 *
1279 * entry 0: mc 0:2   channel 18:19
1280 *       1: mc 3:5   channel 20:21
1281 *       2: mc 6:8   channel 22:23
1282 *       3: mc 9:11  channel 24:25
1283 *       4: mc 12:14 channel 26:27
1284 *       5: mc 15:17 channel 28:29
1285 * reserved: 30:31
1286 *
1287 * Though we have 3 bits to identify the MC, we should only see
1288 * the values 0 or 1.
1289 */
1290
1291static u32 knl_get_mc_route(int entry, u32 reg)
1292{
1293        int mc, chan;
1294
1295        WARN_ON(entry >= KNL_MAX_CHANNELS);
1296
1297        mc = GET_BITFIELD(reg, entry*3, (entry*3)+2);
1298        chan = GET_BITFIELD(reg, (entry*2) + 18, (entry*2) + 18 + 1);
1299
1300        return knl_channel_remap(mc, chan);
1301}
1302
1303/*
1304 * Render the EDC_ROUTE register in human-readable form.
1305 * Output string s should be at least KNL_MAX_EDCS*2 bytes.
1306 */
1307static void knl_show_edc_route(u32 reg, char *s)
1308{
1309        int i;
1310
1311        for (i = 0; i < KNL_MAX_EDCS; i++) {
1312                s[i*2] = knl_get_edc_route(i, reg) + '0';
1313                s[i*2+1] = '-';
1314        }
1315
1316        s[KNL_MAX_EDCS*2 - 1] = '\0';
1317}
1318
1319/*
1320 * Render the MC_ROUTE register in human-readable form.
1321 * Output string s should be at least KNL_MAX_CHANNELS*2 bytes.
1322 */
1323static void knl_show_mc_route(u32 reg, char *s)
1324{
1325        int i;
1326
1327        for (i = 0; i < KNL_MAX_CHANNELS; i++) {
1328                s[i*2] = knl_get_mc_route(i, reg) + '0';
1329                s[i*2+1] = '-';
1330        }
1331
1332        s[KNL_MAX_CHANNELS*2 - 1] = '\0';
1333}
1334
1335#define KNL_EDC_ROUTE 0xb8
1336#define KNL_MC_ROUTE 0xb4
1337
1338/* Is this dram rule backed by regular DRAM in flat mode? */
1339#define KNL_EDRAM(reg) GET_BITFIELD(reg, 29, 29)
1340
1341/* Is this dram rule cached? */
1342#define KNL_CACHEABLE(reg) GET_BITFIELD(reg, 28, 28)
1343
1344/* Is this rule backed by edc ? */
1345#define KNL_EDRAM_ONLY(reg) GET_BITFIELD(reg, 29, 29)
1346
1347/* Is this rule backed by DRAM, cacheable in EDRAM? */
1348#define KNL_CACHEABLE(reg) GET_BITFIELD(reg, 28, 28)
1349
1350/* Is this rule mod3? */
1351#define KNL_MOD3(reg) GET_BITFIELD(reg, 27, 27)
1352
1353/*
1354 * Figure out how big our RAM modules are.
1355 *
1356 * The DIMMMTR register in KNL doesn't tell us the size of the DIMMs, so we
1357 * have to figure this out from the SAD rules, interleave lists, route tables,
1358 * and TAD rules.
1359 *
1360 * SAD rules can have holes in them (e.g. the 3G-4G hole), so we have to
1361 * inspect the TAD rules to figure out how large the SAD regions really are.
1362 *
1363 * When we know the real size of a SAD region and how many ways it's
1364 * interleaved, we know the individual contribution of each channel to
1365 * TAD is size/ways.
1366 *
1367 * Finally, we have to check whether each channel participates in each SAD
1368 * region.
1369 *
1370 * Fortunately, KNL only supports one DIMM per channel, so once we know how
1371 * much memory the channel uses, we know the DIMM is at least that large.
1372 * (The BIOS might possibly choose not to map all available memory, in which
1373 * case we will underreport the size of the DIMM.)
1374 *
1375 * In theory, we could try to determine the EDC sizes as well, but that would
1376 * only work in flat mode, not in cache mode.
1377 *
1378 * @mc_sizes: Output sizes of channels (must have space for KNL_MAX_CHANNELS
1379 *            elements)
1380 */
1381static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes)
1382{
1383        u64 sad_base, sad_size, sad_limit = 0;
1384        u64 tad_base, tad_size, tad_limit, tad_deadspace, tad_livespace;
1385        int sad_rule = 0;
1386        int tad_rule = 0;
1387        int intrlv_ways, tad_ways;
1388        u32 first_pkg, pkg;
1389        int i;
1390        u64 sad_actual_size[2]; /* sad size accounting for holes, per mc */
1391        u32 dram_rule, interleave_reg;
1392        u32 mc_route_reg[KNL_MAX_CHAS];
1393        u32 edc_route_reg[KNL_MAX_CHAS];
1394        int edram_only;
1395        char edc_route_string[KNL_MAX_EDCS*2];
1396        char mc_route_string[KNL_MAX_CHANNELS*2];
1397        int cur_reg_start;
1398        int mc;
1399        int channel;
1400        int participants[KNL_MAX_CHANNELS];
1401
1402        for (i = 0; i < KNL_MAX_CHANNELS; i++)
1403                mc_sizes[i] = 0;
1404
1405        /* Read the EDC route table in each CHA. */
1406        cur_reg_start = 0;
1407        for (i = 0; i < KNL_MAX_CHAS; i++) {
1408                pci_read_config_dword(pvt->knl.pci_cha[i],
1409                                KNL_EDC_ROUTE, &edc_route_reg[i]);
1410
1411                if (i > 0 && edc_route_reg[i] != edc_route_reg[i-1]) {
1412                        knl_show_edc_route(edc_route_reg[i-1],
1413                                        edc_route_string);
1414                        if (cur_reg_start == i-1)
1415                                edac_dbg(0, "edc route table for CHA %d: %s\n",
1416                                        cur_reg_start, edc_route_string);
1417                        else
1418                                edac_dbg(0, "edc route table for CHA %d-%d: %s\n",
1419                                        cur_reg_start, i-1, edc_route_string);
1420                        cur_reg_start = i;
1421                }
1422        }
1423        knl_show_edc_route(edc_route_reg[i-1], edc_route_string);
1424        if (cur_reg_start == i-1)
1425                edac_dbg(0, "edc route table for CHA %d: %s\n",
1426                        cur_reg_start, edc_route_string);
1427        else
1428                edac_dbg(0, "edc route table for CHA %d-%d: %s\n",
1429                        cur_reg_start, i-1, edc_route_string);
1430
1431        /* Read the MC route table in each CHA. */
1432        cur_reg_start = 0;
1433        for (i = 0; i < KNL_MAX_CHAS; i++) {
1434                pci_read_config_dword(pvt->knl.pci_cha[i],
1435                        KNL_MC_ROUTE, &mc_route_reg[i]);
1436
1437                if (i > 0 && mc_route_reg[i] != mc_route_reg[i-1]) {
1438                        knl_show_mc_route(mc_route_reg[i-1], mc_route_string);
1439                        if (cur_reg_start == i-1)
1440                                edac_dbg(0, "mc route table for CHA %d: %s\n",
1441                                        cur_reg_start, mc_route_string);
1442                        else
1443                                edac_dbg(0, "mc route table for CHA %d-%d: %s\n",
1444                                        cur_reg_start, i-1, mc_route_string);
1445                        cur_reg_start = i;
1446                }
1447        }
1448        knl_show_mc_route(mc_route_reg[i-1], mc_route_string);
1449        if (cur_reg_start == i-1)
1450                edac_dbg(0, "mc route table for CHA %d: %s\n",
1451                        cur_reg_start, mc_route_string);
1452        else
1453                edac_dbg(0, "mc route table for CHA %d-%d: %s\n",
1454                        cur_reg_start, i-1, mc_route_string);
1455
1456        /* Process DRAM rules */
1457        for (sad_rule = 0; sad_rule < pvt->info.max_sad; sad_rule++) {
1458                /* previous limit becomes the new base */
1459                sad_base = sad_limit;
1460
1461                pci_read_config_dword(pvt->pci_sad0,
1462                        pvt->info.dram_rule[sad_rule], &dram_rule);
1463
1464                if (!DRAM_RULE_ENABLE(dram_rule))
1465                        break;
1466
1467                edram_only = KNL_EDRAM_ONLY(dram_rule);
1468
1469                sad_limit = pvt->info.sad_limit(dram_rule)+1;
1470                sad_size = sad_limit - sad_base;
1471
1472                pci_read_config_dword(pvt->pci_sad0,
1473                        pvt->info.interleave_list[sad_rule], &interleave_reg);
1474
1475                /*
1476                 * Find out how many ways this dram rule is interleaved.
1477                 * We stop when we see the first channel again.
1478                 */
1479                first_pkg = sad_pkg(pvt->info.interleave_pkg,
1480                                                interleave_reg, 0);
1481                for (intrlv_ways = 1; intrlv_ways < 8; intrlv_ways++) {
1482                        pkg = sad_pkg(pvt->info.interleave_pkg,
1483                                                interleave_reg, intrlv_ways);
1484
1485                        if ((pkg & 0x8) == 0) {
1486                                /*
1487                                 * 0 bit means memory is non-local,
1488                                 * which KNL doesn't support
1489                                 */
1490                                edac_dbg(0, "Unexpected interleave target %d\n",
1491                                        pkg);
1492                                return -1;
1493                        }
1494
1495                        if (pkg == first_pkg)
1496                                break;
1497                }
1498                if (KNL_MOD3(dram_rule))
1499                        intrlv_ways *= 3;
1500
1501                edac_dbg(3, "dram rule %d (base 0x%llx, limit 0x%llx), %d way interleave%s\n",
1502                        sad_rule,
1503                        sad_base,
1504                        sad_limit,
1505                        intrlv_ways,
1506                        edram_only ? ", EDRAM" : "");
1507
1508                /*
1509                 * Find out how big the SAD region really is by iterating
1510                 * over TAD tables (SAD regions may contain holes).
1511                 * Each memory controller might have a different TAD table, so
1512                 * we have to look at both.
1513                 *
1514                 * Livespace is the memory that's mapped in this TAD table,
1515                 * deadspace is the holes (this could be the MMIO hole, or it
1516                 * could be memory that's mapped by the other TAD table but
1517                 * not this one).
1518                 */
1519                for (mc = 0; mc < 2; mc++) {
1520                        sad_actual_size[mc] = 0;
1521                        tad_livespace = 0;
1522                        for (tad_rule = 0;
1523                                        tad_rule < ARRAY_SIZE(
1524                                                knl_tad_dram_limit_lo);
1525                                        tad_rule++) {
1526                                if (knl_get_tad(pvt,
1527                                                tad_rule,
1528                                                mc,
1529                                                &tad_deadspace,
1530                                                &tad_limit,
1531                                                &tad_ways))
1532                                        break;
1533
1534                                tad_size = (tad_limit+1) -
1535                                        (tad_livespace + tad_deadspace);
1536                                tad_livespace += tad_size;
1537                                tad_base = (tad_limit+1) - tad_size;
1538
1539                                if (tad_base < sad_base) {
1540                                        if (tad_limit > sad_base)
1541                                                edac_dbg(0, "TAD region overlaps lower SAD boundary -- TAD tables may be configured incorrectly.\n");
1542                                } else if (tad_base < sad_limit) {
1543                                        if (tad_limit+1 > sad_limit) {
1544                                                edac_dbg(0, "TAD region overlaps upper SAD boundary -- TAD tables may be configured incorrectly.\n");
1545                                        } else {
1546                                                /* TAD region is completely inside SAD region */
1547                                                edac_dbg(3, "TAD region %d 0x%llx - 0x%llx (%lld bytes) table%d\n",
1548                                                        tad_rule, tad_base,
1549                                                        tad_limit, tad_size,
1550                                                        mc);
1551                                                sad_actual_size[mc] += tad_size;
1552                                        }
1553                                }
1554                                tad_base = tad_limit+1;
1555                        }
1556                }
1557
1558                for (mc = 0; mc < 2; mc++) {
1559                        edac_dbg(3, " total TAD DRAM footprint in table%d : 0x%llx (%lld bytes)\n",
1560                                mc, sad_actual_size[mc], sad_actual_size[mc]);
1561                }
1562
1563                /* Ignore EDRAM rule */
1564                if (edram_only)
1565                        continue;
1566
1567                /* Figure out which channels participate in interleave. */
1568                for (channel = 0; channel < KNL_MAX_CHANNELS; channel++)
1569                        participants[channel] = 0;
1570
1571                /* For each channel, does at least one CHA have
1572                 * this channel mapped to the given target?
1573                 */
1574                for (channel = 0; channel < KNL_MAX_CHANNELS; channel++) {
1575                        int target;
1576                        int cha;
1577
1578                        for (target = 0; target < KNL_MAX_CHANNELS; target++) {
1579                                for (cha = 0; cha < KNL_MAX_CHAS; cha++) {
1580                                        if (knl_get_mc_route(target,
1581                                                mc_route_reg[cha]) == channel
1582                                                && !participants[channel]) {
1583                                                participants[channel] = 1;
1584                                                break;
1585                                        }
1586                                }
1587                        }
1588                }
1589
1590                for (channel = 0; channel < KNL_MAX_CHANNELS; channel++) {
1591                        mc = knl_channel_mc(channel);
1592                        if (participants[channel]) {
1593                                edac_dbg(4, "mc channel %d contributes %lld bytes via sad entry %d\n",
1594                                        channel,
1595                                        sad_actual_size[mc]/intrlv_ways,
1596                                        sad_rule);
1597                                mc_sizes[channel] +=
1598                                        sad_actual_size[mc]/intrlv_ways;
1599                        }
1600                }
1601        }
1602
1603        return 0;
1604}
1605
1606static int get_dimm_config(struct mem_ctl_info *mci)
1607{
1608        struct sbridge_pvt *pvt = mci->pvt_info;
1609        struct dimm_info *dimm;
1610        unsigned i, j, banks, ranks, rows, cols, npages;
1611        u64 size;
1612        u32 reg;
1613        enum edac_type mode;
1614        enum mem_type mtype;
1615        int channels = pvt->info.type == KNIGHTS_LANDING ?
1616                KNL_MAX_CHANNELS : NUM_CHANNELS;
1617        u64 knl_mc_sizes[KNL_MAX_CHANNELS];
1618
1619        if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) {
1620                pci_read_config_dword(pvt->pci_ha0, HASWELL_HASYSDEFEATURE2, &reg);
1621                pvt->is_chan_hash = GET_BITFIELD(reg, 21, 21);
1622        }
1623        if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL ||
1624                        pvt->info.type == KNIGHTS_LANDING)
1625                pci_read_config_dword(pvt->pci_sad1, SAD_TARGET, &reg);
1626        else
1627                pci_read_config_dword(pvt->pci_br0, SAD_TARGET, &reg);
1628
1629        if (pvt->info.type == KNIGHTS_LANDING)
1630                pvt->sbridge_dev->source_id = SOURCE_ID_KNL(reg);
1631        else
1632                pvt->sbridge_dev->source_id = SOURCE_ID(reg);
1633
1634        pvt->sbridge_dev->node_id = pvt->info.get_node_id(pvt);
1635        edac_dbg(0, "mc#%d: Node ID: %d, source ID: %d\n",
1636                 pvt->sbridge_dev->mc,
1637                 pvt->sbridge_dev->node_id,
1638                 pvt->sbridge_dev->source_id);
1639
1640        /* KNL doesn't support mirroring or lockstep,
1641         * and is always closed page
1642         */
1643        if (pvt->info.type == KNIGHTS_LANDING) {
1644                mode = EDAC_S4ECD4ED;
1645                pvt->is_mirrored = false;
1646
1647                if (knl_get_dimm_capacity(pvt, knl_mc_sizes) != 0)
1648                        return -1;
1649        } else {
1650                pci_read_config_dword(pvt->pci_ras, RASENABLES, &reg);
1651                if (IS_MIRROR_ENABLED(reg)) {
1652                        edac_dbg(0, "Memory mirror is enabled\n");
1653                        pvt->is_mirrored = true;
1654                } else {
1655                        edac_dbg(0, "Memory mirror is disabled\n");
1656                        pvt->is_mirrored = false;
1657                }
1658
1659                pci_read_config_dword(pvt->pci_ta, MCMTR, &pvt->info.mcmtr);
1660                if (IS_LOCKSTEP_ENABLED(pvt->info.mcmtr)) {
1661                        edac_dbg(0, "Lockstep is enabled\n");
1662                        mode = EDAC_S8ECD8ED;
1663                        pvt->is_lockstep = true;
1664                } else {
1665                        edac_dbg(0, "Lockstep is disabled\n");
1666                        mode = EDAC_S4ECD4ED;
1667                        pvt->is_lockstep = false;
1668                }
1669                if (IS_CLOSE_PG(pvt->info.mcmtr)) {
1670                        edac_dbg(0, "address map is on closed page mode\n");
1671                        pvt->is_close_pg = true;
1672                } else {
1673                        edac_dbg(0, "address map is on open page mode\n");
1674                        pvt->is_close_pg = false;
1675                }
1676        }
1677
1678        mtype = pvt->info.get_memory_type(pvt);
1679        if (mtype == MEM_RDDR3 || mtype == MEM_RDDR4)
1680                edac_dbg(0, "Memory is registered\n");
1681        else if (mtype == MEM_UNKNOWN)
1682                edac_dbg(0, "Cannot determine memory type\n");
1683        else
1684                edac_dbg(0, "Memory is unregistered\n");
1685
1686        if (mtype == MEM_DDR4 || mtype == MEM_RDDR4)
1687                banks = 16;
1688        else
1689                banks = 8;
1690
1691        for (i = 0; i < channels; i++) {
1692                u32 mtr;
1693
1694                int max_dimms_per_channel;
1695
1696                if (pvt->info.type == KNIGHTS_LANDING) {
1697                        max_dimms_per_channel = 1;
1698                        if (!pvt->knl.pci_channel[i])
1699                                continue;
1700                } else {
1701                        max_dimms_per_channel = ARRAY_SIZE(mtr_regs);
1702                        if (!pvt->pci_tad[i])
1703                                continue;
1704                }
1705
1706                for (j = 0; j < max_dimms_per_channel; j++) {
1707                        dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
1708                                       i, j, 0);
1709                        if (pvt->info.type == KNIGHTS_LANDING) {
1710                                pci_read_config_dword(pvt->knl.pci_channel[i],
1711                                        knl_mtr_reg, &mtr);
1712                        } else {
1713                                pci_read_config_dword(pvt->pci_tad[i],
1714                                        mtr_regs[j], &mtr);
1715                        }
1716                        edac_dbg(4, "Channel #%d  MTR%d = %x\n", i, j, mtr);
1717                        if (IS_DIMM_PRESENT(mtr)) {
1718                                pvt->channel[i].dimms++;
1719
1720                                ranks = numrank(pvt->info.type, mtr);
1721
1722                                if (pvt->info.type == KNIGHTS_LANDING) {
1723                                        /* For DDR4, this is fixed. */
1724                                        cols = 1 << 10;
1725                                        rows = knl_mc_sizes[i] /
1726                                                ((u64) cols * ranks * banks * 8);
1727                                } else {
1728                                        rows = numrow(mtr);
1729                                        cols = numcol(mtr);
1730                                }
1731
1732                                size = ((u64)rows * cols * banks * ranks) >> (20 - 3);
1733                                npages = MiB_TO_PAGES(size);
1734
1735                                edac_dbg(0, "mc#%d: ha %d channel %d, dimm %d, %lld Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
1736                                         pvt->sbridge_dev->mc, i/4, i%4, j,
1737                                         size, npages,
1738                                         banks, ranks, rows, cols);
1739
1740                                dimm->nr_pages = npages;
1741                                dimm->grain = 32;
1742                                dimm->dtype = pvt->info.get_width(pvt, mtr);
1743                                dimm->mtype = mtype;
1744                                dimm->edac_mode = mode;
1745                                snprintf(dimm->label, sizeof(dimm->label),
1746                                         "CPU_SrcID#%u_Ha#%u_Chan#%u_DIMM#%u",
1747                                         pvt->sbridge_dev->source_id, i/4, i%4, j);
1748                        }
1749                }
1750        }
1751
1752        return 0;
1753}
1754
1755static void get_memory_layout(const struct mem_ctl_info *mci)
1756{
1757        struct sbridge_pvt *pvt = mci->pvt_info;
1758        int i, j, k, n_sads, n_tads, sad_interl;
1759        u32 reg;
1760        u64 limit, prv = 0;
1761        u64 tmp_mb;
1762        u32 gb, mb;
1763        u32 rir_way;
1764
1765        /*
1766         * Step 1) Get TOLM/TOHM ranges
1767         */
1768
1769        pvt->tolm = pvt->info.get_tolm(pvt);
1770        tmp_mb = (1 + pvt->tolm) >> 20;
1771
1772        gb = div_u64_rem(tmp_mb, 1024, &mb);
1773        edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n",
1774                gb, (mb*1000)/1024, (u64)pvt->tolm);
1775
1776        /* Address range is already 45:25 */
1777        pvt->tohm = pvt->info.get_tohm(pvt);
1778        tmp_mb = (1 + pvt->tohm) >> 20;
1779
1780        gb = div_u64_rem(tmp_mb, 1024, &mb);
1781        edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)\n",
1782                gb, (mb*1000)/1024, (u64)pvt->tohm);
1783
1784        /*
1785         * Step 2) Get SAD range and SAD Interleave list
1786         * TAD registers contain the interleave wayness. However, it
1787         * seems simpler to just discover it indirectly, with the
1788         * algorithm bellow.
1789         */
1790        prv = 0;
1791        for (n_sads = 0; n_sads < pvt->info.max_sad; n_sads++) {
1792                /* SAD_LIMIT Address range is 45:26 */
1793                pci_read_config_dword(pvt->pci_sad0, pvt->info.dram_rule[n_sads],
1794                                      &reg);
1795                limit = pvt->info.sad_limit(reg);
1796
1797                if (!DRAM_RULE_ENABLE(reg))
1798                        continue;
1799
1800                if (limit <= prv)
1801                        break;
1802
1803                tmp_mb = (limit + 1) >> 20;
1804                gb = div_u64_rem(tmp_mb, 1024, &mb);
1805                edac_dbg(0, "SAD#%d %s up to %u.%03u GB (0x%016Lx) Interleave: %s reg=0x%08x\n",
1806                         n_sads,
1807                         show_dram_attr(pvt->info.dram_attr(reg)),
1808                         gb, (mb*1000)/1024,
1809                         ((u64)tmp_mb) << 20L,
1810                         pvt->info.show_interleave_mode(reg),
1811                         reg);
1812                prv = limit;
1813
1814                pci_read_config_dword(pvt->pci_sad0, pvt->info.interleave_list[n_sads],
1815                                      &reg);
1816                sad_interl = sad_pkg(pvt->info.interleave_pkg, reg, 0);
1817                for (j = 0; j < 8; j++) {
1818                        u32 pkg = sad_pkg(pvt->info.interleave_pkg, reg, j);
1819                        if (j > 0 && sad_interl == pkg)
1820                                break;
1821
1822                        edac_dbg(0, "SAD#%d, interleave #%d: %d\n",
1823                                 n_sads, j, pkg);
1824                }
1825        }
1826
1827        if (pvt->info.type == KNIGHTS_LANDING)
1828                return;
1829
1830        /*
1831         * Step 3) Get TAD range
1832         */
1833        prv = 0;
1834        for (n_tads = 0; n_tads < MAX_TAD; n_tads++) {
1835                pci_read_config_dword(pvt->pci_ha0, tad_dram_rule[n_tads],
1836                                      &reg);
1837                limit = TAD_LIMIT(reg);
1838                if (limit <= prv)
1839                        break;
1840                tmp_mb = (limit + 1) >> 20;
1841
1842                gb = div_u64_rem(tmp_mb, 1024, &mb);
1843                edac_dbg(0, "TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n",
1844                         n_tads, gb, (mb*1000)/1024,
1845                         ((u64)tmp_mb) << 20L,
1846                         (u32)(1 << TAD_SOCK(reg)),
1847                         (u32)TAD_CH(reg) + 1,
1848                         (u32)TAD_TGT0(reg),
1849                         (u32)TAD_TGT1(reg),
1850                         (u32)TAD_TGT2(reg),
1851                         (u32)TAD_TGT3(reg),
1852                         reg);
1853                prv = limit;
1854        }
1855
1856        /*
1857         * Step 4) Get TAD offsets, per each channel
1858         */
1859        for (i = 0; i < NUM_CHANNELS; i++) {
1860                if (!pvt->channel[i].dimms)
1861                        continue;
1862                for (j = 0; j < n_tads; j++) {
1863                        pci_read_config_dword(pvt->pci_tad[i],
1864                                              tad_ch_nilv_offset[j],
1865                                              &reg);
1866                        tmp_mb = TAD_OFFSET(reg) >> 20;
1867                        gb = div_u64_rem(tmp_mb, 1024, &mb);
1868                        edac_dbg(0, "TAD CH#%d, offset #%d: %u.%03u GB (0x%016Lx), reg=0x%08x\n",
1869                                 i, j,
1870                                 gb, (mb*1000)/1024,
1871                                 ((u64)tmp_mb) << 20L,
1872                                 reg);
1873                }
1874        }
1875
1876        /*
1877         * Step 6) Get RIR Wayness/Limit, per each channel
1878         */
1879        for (i = 0; i < NUM_CHANNELS; i++) {
1880                if (!pvt->channel[i].dimms)
1881                        continue;
1882                for (j = 0; j < MAX_RIR_RANGES; j++) {
1883                        pci_read_config_dword(pvt->pci_tad[i],
1884                                              rir_way_limit[j],
1885                                              &reg);
1886
1887                        if (!IS_RIR_VALID(reg))
1888                                continue;
1889
1890                        tmp_mb = pvt->info.rir_limit(reg) >> 20;
1891                        rir_way = 1 << RIR_WAY(reg);
1892                        gb = div_u64_rem(tmp_mb, 1024, &mb);
1893                        edac_dbg(0, "CH#%d RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d, reg=0x%08x\n",
1894                                 i, j,
1895                                 gb, (mb*1000)/1024,
1896                                 ((u64)tmp_mb) << 20L,
1897                                 rir_way,
1898                                 reg);
1899
1900                        for (k = 0; k < rir_way; k++) {
1901                                pci_read_config_dword(pvt->pci_tad[i],
1902                                                      rir_offset[j][k],
1903                                                      &reg);
1904                                tmp_mb = RIR_OFFSET(pvt->info.type, reg) << 6;
1905
1906                                gb = div_u64_rem(tmp_mb, 1024, &mb);
1907                                edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n",
1908                                         i, j, k,
1909                                         gb, (mb*1000)/1024,
1910                                         ((u64)tmp_mb) << 20L,
1911                                         (u32)RIR_RNK_TGT(pvt->info.type, reg),
1912                                         reg);
1913                        }
1914                }
1915        }
1916}
1917
1918static struct mem_ctl_info *get_mci_for_node_id(u8 node_id)
1919{
1920        struct sbridge_dev *sbridge_dev;
1921
1922        list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
1923                if (sbridge_dev->node_id == node_id)
1924                        return sbridge_dev->mci;
1925        }
1926        return NULL;
1927}
1928
1929static int get_memory_error_data(struct mem_ctl_info *mci,
1930                                 u64 addr,
1931                                 u8 *socket, u8 *ha,
1932                                 long *channel_mask,
1933                                 u8 *rank,
1934                                 char **area_type, char *msg)
1935{
1936        struct mem_ctl_info     *new_mci;
1937        struct sbridge_pvt *pvt = mci->pvt_info;
1938        struct pci_dev          *pci_ha;
1939        int                     n_rir, n_sads, n_tads, sad_way, sck_xch;
1940        int                     sad_interl, idx, base_ch;
1941        int                     interleave_mode, shiftup = 0;
1942        unsigned                sad_interleave[pvt->info.max_interleave];
1943        u32                     reg, dram_rule;
1944        u8                      ch_way, sck_way, pkg, sad_ha = 0, ch_add = 0;
1945        u32                     tad_offset;
1946        u32                     rir_way;
1947        u32                     mb, gb;
1948        u64                     ch_addr, offset, limit = 0, prv = 0;
1949
1950
1951        /*
1952         * Step 0) Check if the address is at special memory ranges
1953         * The check bellow is probably enough to fill all cases where
1954         * the error is not inside a memory, except for the legacy
1955         * range (e. g. VGA addresses). It is unlikely, however, that the
1956         * memory controller would generate an error on that range.
1957         */
1958        if ((addr > (u64) pvt->tolm) && (addr < (1LL << 32))) {
1959                sprintf(msg, "Error at TOLM area, on addr 0x%08Lx", addr);
1960                return -EINVAL;
1961        }
1962        if (addr >= (u64)pvt->tohm) {
1963                sprintf(msg, "Error at MMIOH area, on addr 0x%016Lx", addr);
1964                return -EINVAL;
1965        }
1966
1967        /*
1968         * Step 1) Get socket
1969         */
1970        for (n_sads = 0; n_sads < pvt->info.max_sad; n_sads++) {
1971                pci_read_config_dword(pvt->pci_sad0, pvt->info.dram_rule[n_sads],
1972                                      &reg);
1973
1974                if (!DRAM_RULE_ENABLE(reg))
1975                        continue;
1976
1977                limit = pvt->info.sad_limit(reg);
1978                if (limit <= prv) {
1979                        sprintf(msg, "Can't discover the memory socket");
1980                        return -EINVAL;
1981                }
1982                if  (addr <= limit)
1983                        break;
1984                prv = limit;
1985        }
1986        if (n_sads == pvt->info.max_sad) {
1987                sprintf(msg, "Can't discover the memory socket");
1988                return -EINVAL;
1989        }
1990        dram_rule = reg;
1991        *area_type = show_dram_attr(pvt->info.dram_attr(dram_rule));
1992        interleave_mode = pvt->info.interleave_mode(dram_rule);
1993
1994        pci_read_config_dword(pvt->pci_sad0, pvt->info.interleave_list[n_sads],
1995                              &reg);
1996
1997        if (pvt->info.type == SANDY_BRIDGE) {
1998                sad_interl = sad_pkg(pvt->info.interleave_pkg, reg, 0);
1999                for (sad_way = 0; sad_way < 8; sad_way++) {
2000                        u32 pkg = sad_pkg(pvt->info.interleave_pkg, reg, sad_way);
2001                        if (sad_way > 0 && sad_interl == pkg)
2002                                break;
2003                        sad_interleave[sad_way] = pkg;
2004                        edac_dbg(0, "SAD interleave #%d: %d\n",
2005                                 sad_way, sad_interleave[sad_way]);
2006                }
2007                edac_dbg(0, "mc#%d: Error detected on SAD#%d: address 0x%016Lx < 0x%016Lx, Interleave [%d:6]%s\n",
2008                         pvt->sbridge_dev->mc,
2009                         n_sads,
2010                         addr,
2011                         limit,
2012                         sad_way + 7,
2013                         !interleave_mode ? "" : "XOR[18:16]");
2014                if (interleave_mode)
2015                        idx = ((addr >> 6) ^ (addr >> 16)) & 7;
2016                else
2017                        idx = (addr >> 6) & 7;
2018                switch (sad_way) {
2019                case 1:
2020                        idx = 0;
2021                        break;
2022                case 2:
2023                        idx = idx & 1;
2024                        break;
2025                case 4:
2026                        idx = idx & 3;
2027                        break;
2028                case 8:
2029                        break;
2030                default:
2031                        sprintf(msg, "Can't discover socket interleave");
2032                        return -EINVAL;
2033                }
2034                *socket = sad_interleave[idx];
2035                edac_dbg(0, "SAD interleave index: %d (wayness %d) = CPU socket %d\n",
2036                         idx, sad_way, *socket);
2037        } else if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) {
2038                int bits, a7mode = A7MODE(dram_rule);
2039
2040                if (a7mode) {
2041                        /* A7 mode swaps P9 with P6 */
2042                        bits = GET_BITFIELD(addr, 7, 8) << 1;
2043                        bits |= GET_BITFIELD(addr, 9, 9);
2044                } else
2045                        bits = GET_BITFIELD(addr, 6, 8);
2046
2047                if (interleave_mode == 0) {
2048                        /* interleave mode will XOR {8,7,6} with {18,17,16} */
2049                        idx = GET_BITFIELD(addr, 16, 18);
2050                        idx ^= bits;
2051                } else
2052                        idx = bits;
2053
2054                pkg = sad_pkg(pvt->info.interleave_pkg, reg, idx);
2055                *socket = sad_pkg_socket(pkg);
2056                sad_ha = sad_pkg_ha(pkg);
2057                if (sad_ha)
2058                        ch_add = 4;
2059
2060                if (a7mode) {
2061                        /* MCChanShiftUpEnable */
2062                        pci_read_config_dword(pvt->pci_ha0,
2063                                              HASWELL_HASYSDEFEATURE2, &reg);
2064                        shiftup = GET_BITFIELD(reg, 22, 22);
2065                }
2066
2067                edac_dbg(0, "SAD interleave package: %d = CPU socket %d, HA %i, shiftup: %i\n",
2068                         idx, *socket, sad_ha, shiftup);
2069        } else {
2070                /* Ivy Bridge's SAD mode doesn't support XOR interleave mode */
2071                idx = (addr >> 6) & 7;
2072                pkg = sad_pkg(pvt->info.interleave_pkg, reg, idx);
2073                *socket = sad_pkg_socket(pkg);
2074                sad_ha = sad_pkg_ha(pkg);
2075                if (sad_ha)
2076                        ch_add = 4;
2077                edac_dbg(0, "SAD interleave package: %d = CPU socket %d, HA %d\n",
2078                         idx, *socket, sad_ha);
2079        }
2080
2081        *ha = sad_ha;
2082
2083        /*
2084         * Move to the proper node structure, in order to access the
2085         * right PCI registers
2086         */
2087        new_mci = get_mci_for_node_id(*socket);
2088        if (!new_mci) {
2089                sprintf(msg, "Struct for socket #%u wasn't initialized",
2090                        *socket);
2091                return -EINVAL;
2092        }
2093        mci = new_mci;
2094        pvt = mci->pvt_info;
2095
2096        /*
2097         * Step 2) Get memory channel
2098         */
2099        prv = 0;
2100        if (pvt->info.type == SANDY_BRIDGE)
2101                pci_ha = pvt->pci_ha0;
2102        else {
2103                if (sad_ha)
2104                        pci_ha = pvt->pci_ha1;
2105                else
2106                        pci_ha = pvt->pci_ha0;
2107        }
2108        for (n_tads = 0; n_tads < MAX_TAD; n_tads++) {
2109                pci_read_config_dword(pci_ha, tad_dram_rule[n_tads], &reg);
2110                limit = TAD_LIMIT(reg);
2111                if (limit <= prv) {
2112                        sprintf(msg, "Can't discover the memory channel");
2113                        return -EINVAL;
2114                }
2115                if  (addr <= limit)
2116                        break;
2117                prv = limit;
2118        }
2119        if (n_tads == MAX_TAD) {
2120                sprintf(msg, "Can't discover the memory channel");
2121                return -EINVAL;
2122        }
2123
2124        ch_way = TAD_CH(reg) + 1;
2125        sck_way = TAD_SOCK(reg);
2126
2127        if (ch_way == 3)
2128                idx = addr >> 6;
2129        else {
2130                idx = (addr >> (6 + sck_way + shiftup)) & 0x3;
2131                if (pvt->is_chan_hash)
2132                        idx = haswell_chan_hash(idx, addr);
2133        }
2134        idx = idx % ch_way;
2135
2136        /*
2137         * FIXME: Shouldn't we use CHN_IDX_OFFSET() here, when ch_way == 3 ???
2138         */
2139        switch (idx) {
2140        case 0:
2141                base_ch = TAD_TGT0(reg);
2142                break;
2143        case 1:
2144                base_ch = TAD_TGT1(reg);
2145                break;
2146        case 2:
2147                base_ch = TAD_TGT2(reg);
2148                break;
2149        case 3:
2150                base_ch = TAD_TGT3(reg);
2151                break;
2152        default:
2153                sprintf(msg, "Can't discover the TAD target");
2154                return -EINVAL;
2155        }
2156        *channel_mask = 1 << base_ch;
2157
2158        pci_read_config_dword(pvt->pci_tad[ch_add + base_ch],
2159                                tad_ch_nilv_offset[n_tads],
2160                                &tad_offset);
2161
2162        if (pvt->is_mirrored) {
2163                *channel_mask |= 1 << ((base_ch + 2) % 4);
2164                switch(ch_way) {
2165                case 2:
2166                case 4:
2167                        sck_xch = (1 << sck_way) * (ch_way >> 1);
2168                        break;
2169                default:
2170                        sprintf(msg, "Invalid mirror set. Can't decode addr");
2171                        return -EINVAL;
2172                }
2173        } else
2174                sck_xch = (1 << sck_way) * ch_way;
2175
2176        if (pvt->is_lockstep)
2177                *channel_mask |= 1 << ((base_ch + 1) % 4);
2178
2179        offset = TAD_OFFSET(tad_offset);
2180
2181        edac_dbg(0, "TAD#%d: address 0x%016Lx < 0x%016Lx, socket interleave %d, channel interleave %d (offset 0x%08Lx), index %d, base ch: %d, ch mask: 0x%02lx\n",
2182                 n_tads,
2183                 addr,
2184                 limit,
2185                 sck_way,
2186                 ch_way,
2187                 offset,
2188                 idx,
2189                 base_ch,
2190                 *channel_mask);
2191
2192        /* Calculate channel address */
2193        /* Remove the TAD offset */
2194
2195        if (offset > addr) {
2196                sprintf(msg, "Can't calculate ch addr: TAD offset 0x%08Lx is too high for addr 0x%08Lx!",
2197                        offset, addr);
2198                return -EINVAL;
2199        }
2200
2201        ch_addr = addr - offset;
2202        ch_addr >>= (6 + shiftup);
2203        ch_addr /= sck_xch;
2204        ch_addr <<= (6 + shiftup);
2205        ch_addr |= addr & ((1 << (6 + shiftup)) - 1);
2206
2207        /*
2208         * Step 3) Decode rank
2209         */
2210        for (n_rir = 0; n_rir < MAX_RIR_RANGES; n_rir++) {
2211                pci_read_config_dword(pvt->pci_tad[ch_add + base_ch],
2212                                      rir_way_limit[n_rir],
2213                                      &reg);
2214
2215                if (!IS_RIR_VALID(reg))
2216                        continue;
2217
2218                limit = pvt->info.rir_limit(reg);
2219                gb = div_u64_rem(limit >> 20, 1024, &mb);
2220                edac_dbg(0, "RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d\n",
2221                         n_rir,
2222                         gb, (mb*1000)/1024,
2223                         limit,
2224                         1 << RIR_WAY(reg));
2225                if  (ch_addr <= limit)
2226                        break;
2227        }
2228        if (n_rir == MAX_RIR_RANGES) {
2229                sprintf(msg, "Can't discover the memory rank for ch addr 0x%08Lx",
2230                        ch_addr);
2231                return -EINVAL;
2232        }
2233        rir_way = RIR_WAY(reg);
2234
2235        if (pvt->is_close_pg)
2236                idx = (ch_addr >> 6);
2237        else
2238                idx = (ch_addr >> 13);  /* FIXME: Datasheet says to shift by 15 */
2239        idx %= 1 << rir_way;
2240
2241        pci_read_config_dword(pvt->pci_tad[ch_add + base_ch],
2242                              rir_offset[n_rir][idx],
2243                              &reg);
2244        *rank = RIR_RNK_TGT(pvt->info.type, reg);
2245
2246        edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n",
2247                 n_rir,
2248                 ch_addr,
2249                 limit,
2250                 rir_way,
2251                 idx);
2252
2253        return 0;
2254}
2255
2256/****************************************************************************
2257        Device initialization routines: put/get, init/exit
2258 ****************************************************************************/
2259
2260/*
2261 *      sbridge_put_all_devices 'put' all the devices that we have
2262 *                              reserved via 'get'
2263 */
2264static void sbridge_put_devices(struct sbridge_dev *sbridge_dev)
2265{
2266        int i;
2267
2268        edac_dbg(0, "\n");
2269        for (i = 0; i < sbridge_dev->n_devs; i++) {
2270                struct pci_dev *pdev = sbridge_dev->pdev[i];
2271                if (!pdev)
2272                        continue;
2273                edac_dbg(0, "Removing dev %02x:%02x.%d\n",
2274                         pdev->bus->number,
2275                         PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
2276                pci_dev_put(pdev);
2277        }
2278}
2279
2280static void sbridge_put_all_devices(void)
2281{
2282        struct sbridge_dev *sbridge_dev, *tmp;
2283
2284        list_for_each_entry_safe(sbridge_dev, tmp, &sbridge_edac_list, list) {
2285                sbridge_put_devices(sbridge_dev);
2286                free_sbridge_dev(sbridge_dev);
2287        }
2288}
2289
2290static int sbridge_get_onedevice(struct pci_dev **prev,
2291                                 u8 *num_mc,
2292                                 const struct pci_id_table *table,
2293                                 const unsigned devno,
2294                                 const int multi_bus)
2295{
2296        struct sbridge_dev *sbridge_dev;
2297        const struct pci_id_descr *dev_descr = &table->descr[devno];
2298        struct pci_dev *pdev = NULL;
2299        u8 bus = 0;
2300
2301        sbridge_printk(KERN_DEBUG,
2302                "Seeking for: PCI ID %04x:%04x\n",
2303                PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2304
2305        pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
2306                              dev_descr->dev_id, *prev);
2307
2308        if (!pdev) {
2309                if (*prev) {
2310                        *prev = pdev;
2311                        return 0;
2312                }
2313
2314                if (dev_descr->optional)
2315                        return 0;
2316
2317                /* if the HA wasn't found */
2318                if (devno == 0)
2319                        return -ENODEV;
2320
2321                sbridge_printk(KERN_INFO,
2322                        "Device not found: %04x:%04x\n",
2323                        PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2324
2325                /* End of list, leave */
2326                return -ENODEV;
2327        }
2328        bus = pdev->bus->number;
2329
2330        sbridge_dev = get_sbridge_dev(bus, multi_bus);
2331        if (!sbridge_dev) {
2332                sbridge_dev = alloc_sbridge_dev(bus, table);
2333                if (!sbridge_dev) {
2334                        pci_dev_put(pdev);
2335                        return -ENOMEM;
2336                }
2337                (*num_mc)++;
2338        }
2339
2340        if (sbridge_dev->pdev[devno]) {
2341                sbridge_printk(KERN_ERR,
2342                        "Duplicated device for %04x:%04x\n",
2343                        PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2344                pci_dev_put(pdev);
2345                return -ENODEV;
2346        }
2347
2348        sbridge_dev->pdev[devno] = pdev;
2349
2350        /* Be sure that the device is enabled */
2351        if (unlikely(pci_enable_device(pdev) < 0)) {
2352                sbridge_printk(KERN_ERR,
2353                        "Couldn't enable %04x:%04x\n",
2354                        PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2355                return -ENODEV;
2356        }
2357
2358        edac_dbg(0, "Detected %04x:%04x\n",
2359                 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2360
2361        /*
2362         * As stated on drivers/pci/search.c, the reference count for
2363         * @from is always decremented if it is not %NULL. So, as we need
2364         * to get all devices up to null, we need to do a get for the device
2365         */
2366        pci_dev_get(pdev);
2367
2368        *prev = pdev;
2369
2370        return 0;
2371}
2372
2373/*
2374 * sbridge_get_all_devices - Find and perform 'get' operation on the MCH's
2375 *                           devices we want to reference for this driver.
2376 * @num_mc: pointer to the memory controllers count, to be incremented in case
2377 *          of success.
2378 * @table: model specific table
2379 *
2380 * returns 0 in case of success or error code
2381 */
2382static int sbridge_get_all_devices(u8 *num_mc,
2383                                        const struct pci_id_table *table)
2384{
2385        int i, rc;
2386        struct pci_dev *pdev = NULL;
2387        int allow_dups = 0;
2388        int multi_bus = 0;
2389
2390        if (table->type == KNIGHTS_LANDING)
2391                allow_dups = multi_bus = 1;
2392        while (table && table->descr) {
2393                for (i = 0; i < table->n_devs; i++) {
2394                        if (!allow_dups || i == 0 ||
2395                                        table->descr[i].dev_id !=
2396                                                table->descr[i-1].dev_id) {
2397                                pdev = NULL;
2398                        }
2399                        do {
2400                                rc = sbridge_get_onedevice(&pdev, num_mc,
2401                                                           table, i, multi_bus);
2402                                if (rc < 0) {
2403                                        if (i == 0) {
2404                                                i = table->n_devs;
2405                                                break;
2406                                        }
2407                                        sbridge_put_all_devices();
2408                                        return -ENODEV;
2409                                }
2410                        } while (pdev && !allow_dups);
2411                }
2412                table++;
2413        }
2414
2415        return 0;
2416}
2417
2418static int sbridge_mci_bind_devs(struct mem_ctl_info *mci,
2419                                 struct sbridge_dev *sbridge_dev)
2420{
2421        struct sbridge_pvt *pvt = mci->pvt_info;
2422        struct pci_dev *pdev;
2423        u8 saw_chan_mask = 0;
2424        int i;
2425
2426        for (i = 0; i < sbridge_dev->n_devs; i++) {
2427                pdev = sbridge_dev->pdev[i];
2428                if (!pdev)
2429                        continue;
2430
2431                switch (pdev->device) {
2432                case PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0:
2433                        pvt->pci_sad0 = pdev;
2434                        break;
2435                case PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1:
2436                        pvt->pci_sad1 = pdev;
2437                        break;
2438                case PCI_DEVICE_ID_INTEL_SBRIDGE_BR:
2439                        pvt->pci_br0 = pdev;
2440                        break;
2441                case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0:
2442                        pvt->pci_ha0 = pdev;
2443                        break;
2444                case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA:
2445                        pvt->pci_ta = pdev;
2446                        break;
2447                case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS:
2448                        pvt->pci_ras = pdev;
2449                        break;
2450                case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0:
2451                case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1:
2452                case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2:
2453                case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3:
2454                {
2455                        int id = pdev->device - PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0;
2456                        pvt->pci_tad[id] = pdev;
2457                        saw_chan_mask |= 1 << id;
2458                }
2459                        break;
2460                case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO:
2461                        pvt->pci_ddrio = pdev;
2462                        break;
2463                default:
2464                        goto error;
2465                }
2466
2467                edac_dbg(0, "Associated PCI %02x:%02x, bus %d with dev = %p\n",
2468                         pdev->vendor, pdev->device,
2469                         sbridge_dev->bus,
2470                         pdev);
2471        }
2472
2473        /* Check if everything were registered */
2474        if (!pvt->pci_sad0 || !pvt->pci_sad1 || !pvt->pci_ha0 ||
2475            !pvt-> pci_tad || !pvt->pci_ras  || !pvt->pci_ta)
2476                goto enodev;
2477
2478        if (saw_chan_mask != 0x0f)
2479                goto enodev;
2480        return 0;
2481
2482enodev:
2483        sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
2484        return -ENODEV;
2485
2486error:
2487        sbridge_printk(KERN_ERR, "Unexpected device %02x:%02x\n",
2488                       PCI_VENDOR_ID_INTEL, pdev->device);
2489        return -EINVAL;
2490}
2491
2492static int ibridge_mci_bind_devs(struct mem_ctl_info *mci,
2493                                 struct sbridge_dev *sbridge_dev)
2494{
2495        struct sbridge_pvt *pvt = mci->pvt_info;
2496        struct pci_dev *pdev;
2497        u8 saw_chan_mask = 0;
2498        int i;
2499
2500        for (i = 0; i < sbridge_dev->n_devs; i++) {
2501                pdev = sbridge_dev->pdev[i];
2502                if (!pdev)
2503                        continue;
2504
2505                switch (pdev->device) {
2506                case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0:
2507                        pvt->pci_ha0 = pdev;
2508                        break;
2509                case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA:
2510                        pvt->pci_ta = pdev;
2511                case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS:
2512                        pvt->pci_ras = pdev;
2513                        break;
2514                case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0:
2515                case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1:
2516                case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2:
2517                case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3:
2518                {
2519                        int id = pdev->device - PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0;
2520                        pvt->pci_tad[id] = pdev;
2521                        saw_chan_mask |= 1 << id;
2522                }
2523                        break;
2524                case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0:
2525                        pvt->pci_ddrio = pdev;
2526                        break;
2527                case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0:
2528                        pvt->pci_ddrio = pdev;
2529                        break;
2530                case PCI_DEVICE_ID_INTEL_IBRIDGE_SAD:
2531                        pvt->pci_sad0 = pdev;
2532                        break;
2533                case PCI_DEVICE_ID_INTEL_IBRIDGE_BR0:
2534                        pvt->pci_br0 = pdev;
2535                        break;
2536                case PCI_DEVICE_ID_INTEL_IBRIDGE_BR1:
2537                        pvt->pci_br1 = pdev;
2538                        break;
2539                case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1:
2540                        pvt->pci_ha1 = pdev;
2541                        break;
2542                case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0:
2543                case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1:
2544                case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2:
2545                case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3:
2546                {
2547                        int id = pdev->device - PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0 + 4;
2548                        pvt->pci_tad[id] = pdev;
2549                        saw_chan_mask |= 1 << id;
2550                }
2551                        break;
2552                default:
2553                        goto error;
2554                }
2555
2556                edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n",
2557                         sbridge_dev->bus,
2558                         PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
2559                         pdev);
2560        }
2561
2562        /* Check if everything were registered */
2563        if (!pvt->pci_sad0 || !pvt->pci_ha0 || !pvt->pci_br0 ||
2564            !pvt->pci_br1 || !pvt->pci_tad || !pvt->pci_ras  ||
2565            !pvt->pci_ta)
2566                goto enodev;
2567
2568        if (saw_chan_mask != 0x0f && /* -EN */
2569            saw_chan_mask != 0x33 && /* -EP */
2570            saw_chan_mask != 0xff)   /* -EX */
2571                goto enodev;
2572        return 0;
2573
2574enodev:
2575        sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
2576        return -ENODEV;
2577
2578error:
2579        sbridge_printk(KERN_ERR,
2580                       "Unexpected device %02x:%02x\n", PCI_VENDOR_ID_INTEL,
2581                        pdev->device);
2582        return -EINVAL;
2583}
2584
2585static int haswell_mci_bind_devs(struct mem_ctl_info *mci,
2586                                 struct sbridge_dev *sbridge_dev)
2587{
2588        struct sbridge_pvt *pvt = mci->pvt_info;
2589        struct pci_dev *pdev;
2590        u8 saw_chan_mask = 0;
2591        int i;
2592
2593        /* there's only one device per system; not tied to any bus */
2594        if (pvt->info.pci_vtd == NULL)
2595                /* result will be checked later */
2596                pvt->info.pci_vtd = pci_get_device(PCI_VENDOR_ID_INTEL,
2597                                                   PCI_DEVICE_ID_INTEL_HASWELL_IMC_VTD_MISC,
2598                                                   NULL);
2599
2600        for (i = 0; i < sbridge_dev->n_devs; i++) {
2601                pdev = sbridge_dev->pdev[i];
2602                if (!pdev)
2603                        continue;
2604
2605                switch (pdev->device) {
2606                case PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0:
2607                        pvt->pci_sad0 = pdev;
2608                        break;
2609                case PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1:
2610                        pvt->pci_sad1 = pdev;
2611                        break;
2612                case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0:
2613                        pvt->pci_ha0 = pdev;
2614                        break;
2615                case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA:
2616                        pvt->pci_ta = pdev;
2617                        break;
2618                case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_THERMAL:
2619                        pvt->pci_ras = pdev;
2620                        break;
2621                case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0:
2622                case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1:
2623                case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2:
2624                case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3:
2625                {
2626                        int id = pdev->device - PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0;
2627
2628                        pvt->pci_tad[id] = pdev;
2629                        saw_chan_mask |= 1 << id;
2630                }
2631                        break;
2632                case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0:
2633                case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1:
2634                case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2:
2635                case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3:
2636                {
2637                        int id = pdev->device - PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0 + 4;
2638
2639                        pvt->pci_tad[id] = pdev;
2640                        saw_chan_mask |= 1 << id;
2641                }
2642                        break;
2643                case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0:
2644                case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1:
2645                case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2:
2646                case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3:
2647                        if (!pvt->pci_ddrio)
2648                                pvt->pci_ddrio = pdev;
2649                        break;
2650                case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1:
2651                        pvt->pci_ha1 = pdev;
2652                        break;
2653                case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA:
2654                        pvt->pci_ha1_ta = pdev;
2655                        break;
2656                default:
2657                        break;
2658                }
2659
2660                edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n",
2661                         sbridge_dev->bus,
2662                         PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
2663                         pdev);
2664        }
2665
2666        /* Check if everything were registered */
2667        if (!pvt->pci_sad0 || !pvt->pci_ha0 || !pvt->pci_sad1 ||
2668            !pvt->pci_ras  || !pvt->pci_ta || !pvt->info.pci_vtd)
2669                goto enodev;
2670
2671        if (saw_chan_mask != 0x0f && /* -EN */
2672            saw_chan_mask != 0x33 && /* -EP */
2673            saw_chan_mask != 0xff)   /* -EX */
2674                goto enodev;
2675        return 0;
2676
2677enodev:
2678        sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
2679        return -ENODEV;
2680}
2681
2682static int broadwell_mci_bind_devs(struct mem_ctl_info *mci,
2683                                 struct sbridge_dev *sbridge_dev)
2684{
2685        struct sbridge_pvt *pvt = mci->pvt_info;
2686        struct pci_dev *pdev;
2687        u8 saw_chan_mask = 0;
2688        int i;
2689
2690        /* there's only one device per system; not tied to any bus */
2691        if (pvt->info.pci_vtd == NULL)
2692                /* result will be checked later */
2693                pvt->info.pci_vtd = pci_get_device(PCI_VENDOR_ID_INTEL,
2694                                                   PCI_DEVICE_ID_INTEL_BROADWELL_IMC_VTD_MISC,
2695                                                   NULL);
2696
2697        for (i = 0; i < sbridge_dev->n_devs; i++) {
2698                pdev = sbridge_dev->pdev[i];
2699                if (!pdev)
2700                        continue;
2701
2702                switch (pdev->device) {
2703                case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0:
2704                        pvt->pci_sad0 = pdev;
2705                        break;
2706                case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1:
2707                        pvt->pci_sad1 = pdev;
2708                        break;
2709                case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0:
2710                        pvt->pci_ha0 = pdev;
2711                        break;
2712                case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA:
2713                        pvt->pci_ta = pdev;
2714                        break;
2715                case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_THERMAL:
2716                        pvt->pci_ras = pdev;
2717                        break;
2718                case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0:
2719                case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1:
2720                case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2:
2721                case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3:
2722                {
2723                        int id = pdev->device - PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0;
2724                        pvt->pci_tad[id] = pdev;
2725                        saw_chan_mask |= 1 << id;
2726                }
2727                        break;
2728                case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0:
2729                case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1:
2730                case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2:
2731                case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3:
2732                {
2733                        int id = pdev->device - PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0 + 4;
2734                        pvt->pci_tad[id] = pdev;
2735                        saw_chan_mask |= 1 << id;
2736                }
2737                        break;
2738                case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0:
2739                        pvt->pci_ddrio = pdev;
2740                        break;
2741                case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1:
2742                        pvt->pci_ha1 = pdev;
2743                        break;
2744                case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA:
2745                        pvt->pci_ha1_ta = pdev;
2746                        break;
2747                default:
2748                        break;
2749                }
2750
2751                edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n",
2752                         sbridge_dev->bus,
2753                         PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
2754                         pdev);
2755        }
2756
2757        /* Check if everything were registered */
2758        if (!pvt->pci_sad0 || !pvt->pci_ha0 || !pvt->pci_sad1 ||
2759            !pvt->pci_ras  || !pvt->pci_ta || !pvt->info.pci_vtd)
2760                goto enodev;
2761
2762        if (saw_chan_mask != 0x0f && /* -EN */
2763            saw_chan_mask != 0x33 && /* -EP */
2764            saw_chan_mask != 0xff)   /* -EX */
2765                goto enodev;
2766        return 0;
2767
2768enodev:
2769        sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
2770        return -ENODEV;
2771}
2772
2773static int knl_mci_bind_devs(struct mem_ctl_info *mci,
2774                        struct sbridge_dev *sbridge_dev)
2775{
2776        struct sbridge_pvt *pvt = mci->pvt_info;
2777        struct pci_dev *pdev;
2778        int dev, func;
2779
2780        int i;
2781        int devidx;
2782
2783        for (i = 0; i < sbridge_dev->n_devs; i++) {
2784                pdev = sbridge_dev->pdev[i];
2785                if (!pdev)
2786                        continue;
2787
2788                /* Extract PCI device and function. */
2789                dev = (pdev->devfn >> 3) & 0x1f;
2790                func = pdev->devfn & 0x7;
2791
2792                switch (pdev->device) {
2793                case PCI_DEVICE_ID_INTEL_KNL_IMC_MC:
2794                        if (dev == 8)
2795                                pvt->knl.pci_mc0 = pdev;
2796                        else if (dev == 9)
2797                                pvt->knl.pci_mc1 = pdev;
2798                        else {
2799                                sbridge_printk(KERN_ERR,
2800                                        "Memory controller in unexpected place! (dev %d, fn %d)\n",
2801                                        dev, func);
2802                                continue;
2803                        }
2804                        break;
2805
2806                case PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0:
2807                        pvt->pci_sad0 = pdev;
2808                        break;
2809
2810                case PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1:
2811                        pvt->pci_sad1 = pdev;
2812                        break;
2813
2814                case PCI_DEVICE_ID_INTEL_KNL_IMC_CHA:
2815                        /* There are one of these per tile, and range from
2816                         * 1.14.0 to 1.18.5.
2817                         */
2818                        devidx = ((dev-14)*8)+func;
2819
2820                        if (devidx < 0 || devidx >= KNL_MAX_CHAS) {
2821                                sbridge_printk(KERN_ERR,
2822                                        "Caching and Home Agent in unexpected place! (dev %d, fn %d)\n",
2823                                        dev, func);
2824                                continue;
2825                        }
2826
2827                        WARN_ON(pvt->knl.pci_cha[devidx] != NULL);
2828
2829                        pvt->knl.pci_cha[devidx] = pdev;
2830                        break;
2831
2832                case PCI_DEVICE_ID_INTEL_KNL_IMC_CHANNEL:
2833                        devidx = -1;
2834
2835                        /*
2836                         *  MC0 channels 0-2 are device 9 function 2-4,
2837                         *  MC1 channels 3-5 are device 8 function 2-4.
2838                         */
2839
2840                        if (dev == 9)
2841                                devidx = func-2;
2842                        else if (dev == 8)
2843                                devidx = 3 + (func-2);
2844
2845                        if (devidx < 0 || devidx >= KNL_MAX_CHANNELS) {
2846                                sbridge_printk(KERN_ERR,
2847                                        "DRAM Channel Registers in unexpected place! (dev %d, fn %d)\n",
2848                                        dev, func);
2849                                continue;
2850                        }
2851
2852                        WARN_ON(pvt->knl.pci_channel[devidx] != NULL);
2853                        pvt->knl.pci_channel[devidx] = pdev;
2854                        break;
2855
2856                case PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM:
2857                        pvt->knl.pci_mc_info = pdev;
2858                        break;
2859
2860                case PCI_DEVICE_ID_INTEL_KNL_IMC_TA:
2861                        pvt->pci_ta = pdev;
2862                        break;
2863
2864                default:
2865                        sbridge_printk(KERN_ERR, "Unexpected device %d\n",
2866                                pdev->device);
2867                        break;
2868                }
2869        }
2870
2871        if (!pvt->knl.pci_mc0  || !pvt->knl.pci_mc1 ||
2872            !pvt->pci_sad0     || !pvt->pci_sad1    ||
2873            !pvt->pci_ta) {
2874                goto enodev;
2875        }
2876
2877        for (i = 0; i < KNL_MAX_CHANNELS; i++) {
2878                if (!pvt->knl.pci_channel[i]) {
2879                        sbridge_printk(KERN_ERR, "Missing channel %d\n", i);
2880                        goto enodev;
2881                }
2882        }
2883
2884        for (i = 0; i < KNL_MAX_CHAS; i++) {
2885                if (!pvt->knl.pci_cha[i]) {
2886                        sbridge_printk(KERN_ERR, "Missing CHA %d\n", i);
2887                        goto enodev;
2888                }
2889        }
2890
2891        return 0;
2892
2893enodev:
2894        sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
2895        return -ENODEV;
2896}
2897
2898/****************************************************************************
2899                        Error check routines
2900 ****************************************************************************/
2901
2902/*
2903 * While Sandy Bridge has error count registers, SMI BIOS read values from
2904 * and resets the counters. So, they are not reliable for the OS to read
2905 * from them. So, we have no option but to just trust on whatever MCE is
2906 * telling us about the errors.
2907 */
2908static void sbridge_mce_output_error(struct mem_ctl_info *mci,
2909                                    const struct mce *m)
2910{
2911        struct mem_ctl_info *new_mci;
2912        struct sbridge_pvt *pvt = mci->pvt_info;
2913        enum hw_event_mc_err_type tp_event;
2914        char *type, *optype, msg[256];
2915        bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0);
2916        bool overflow = GET_BITFIELD(m->status, 62, 62);
2917        bool uncorrected_error = GET_BITFIELD(m->status, 61, 61);
2918        bool recoverable;
2919        u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
2920        u32 mscod = GET_BITFIELD(m->status, 16, 31);
2921        u32 errcode = GET_BITFIELD(m->status, 0, 15);
2922        u32 channel = GET_BITFIELD(m->status, 0, 3);
2923        u32 optypenum = GET_BITFIELD(m->status, 4, 6);
2924        long channel_mask, first_channel;
2925        u8  rank, socket, ha;
2926        int rc, dimm;
2927        char *area_type = NULL;
2928
2929        if (pvt->info.type != SANDY_BRIDGE)
2930                recoverable = true;
2931        else
2932                recoverable = GET_BITFIELD(m->status, 56, 56);
2933
2934        if (uncorrected_error) {
2935                if (ripv) {
2936                        type = "FATAL";
2937                        tp_event = HW_EVENT_ERR_FATAL;
2938                } else {
2939                        type = "NON_FATAL";
2940                        tp_event = HW_EVENT_ERR_UNCORRECTED;
2941                }
2942        } else {
2943                type = "CORRECTED";
2944                tp_event = HW_EVENT_ERR_CORRECTED;
2945        }
2946
2947        /*
2948         * According with Table 15-9 of the Intel Architecture spec vol 3A,
2949         * memory errors should fit in this mask:
2950         *      000f 0000 1mmm cccc (binary)
2951         * where:
2952         *      f = Correction Report Filtering Bit. If 1, subsequent errors
2953         *          won't be shown
2954         *      mmm = error type
2955         *      cccc = channel
2956         * If the mask doesn't match, report an error to the parsing logic
2957         */
2958        if (! ((errcode & 0xef80) == 0x80)) {
2959                optype = "Can't parse: it is not a mem";
2960        } else {
2961                switch (optypenum) {
2962                case 0:
2963                        optype = "generic undef request error";
2964                        break;
2965                case 1:
2966                        optype = "memory read error";
2967                        break;
2968                case 2:
2969                        optype = "memory write error";
2970                        break;
2971                case 3:
2972                        optype = "addr/cmd error";
2973                        break;
2974                case 4:
2975                        optype = "memory scrubbing error";
2976                        break;
2977                default:
2978                        optype = "reserved";
2979                        break;
2980                }
2981        }
2982
2983        /* Only decode errors with an valid address (ADDRV) */
2984        if (!GET_BITFIELD(m->status, 58, 58))
2985                return;
2986
2987        if (pvt->info.type == KNIGHTS_LANDING) {
2988                if (channel == 14) {
2989                        edac_dbg(0, "%s%s err_code:%04x:%04x EDRAM bank %d\n",
2990                                overflow ? " OVERFLOW" : "",
2991                                (uncorrected_error && recoverable)
2992                                ? " recoverable" : "",
2993                                mscod, errcode,
2994                                m->bank);
2995                } else {
2996                        char A = *("A");
2997
2998                        /*
2999                         * Reported channel is in range 0-2, so we can't map it
3000                         * back to mc. To figure out mc we check machine check
3001                         * bank register that reported this error.
3002                         * bank15 means mc0 and bank16 means mc1.
3003                         */
3004                        channel = knl_channel_remap(m->bank == 16, channel);
3005                        channel_mask = 1 << channel;
3006
3007                        snprintf(msg, sizeof(msg),
3008                                "%s%s err_code:%04x:%04x channel:%d (DIMM_%c)",
3009                                overflow ? " OVERFLOW" : "",
3010                                (uncorrected_error && recoverable)
3011                                ? " recoverable" : " ",
3012                                mscod, errcode, channel, A + channel);
3013                        edac_mc_handle_error(tp_event, mci, core_err_cnt,
3014                                m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
3015                                channel, 0, -1,
3016                                optype, msg);
3017                }
3018                return;
3019        } else {
3020                rc = get_memory_error_data(mci, m->addr, &socket, &ha,
3021                                &channel_mask, &rank, &area_type, msg);
3022        }
3023
3024        if (rc < 0)
3025                goto err_parsing;
3026        new_mci = get_mci_for_node_id(socket);
3027        if (!new_mci) {
3028                strcpy(msg, "Error: socket got corrupted!");
3029                goto err_parsing;
3030        }
3031        mci = new_mci;
3032        pvt = mci->pvt_info;
3033
3034        first_channel = find_first_bit(&channel_mask, NUM_CHANNELS);
3035
3036        if (rank < 4)
3037                dimm = 0;
3038        else if (rank < 8)
3039                dimm = 1;
3040        else
3041                dimm = 2;
3042
3043
3044        /*
3045         * FIXME: On some memory configurations (mirror, lockstep), the
3046         * Memory Controller can't point the error to a single DIMM. The
3047         * EDAC core should be handling the channel mask, in order to point
3048         * to the group of dimm's where the error may be happening.
3049         */
3050        if (!pvt->is_lockstep && !pvt->is_mirrored && !pvt->is_close_pg)
3051                channel = first_channel;
3052
3053        snprintf(msg, sizeof(msg),
3054                 "%s%s area:%s err_code:%04x:%04x socket:%d ha:%d channel_mask:%ld rank:%d",
3055                 overflow ? " OVERFLOW" : "",
3056                 (uncorrected_error && recoverable) ? " recoverable" : "",
3057                 area_type,
3058                 mscod, errcode,
3059                 socket, ha,
3060                 channel_mask,
3061                 rank);
3062
3063        edac_dbg(0, "%s\n", msg);
3064
3065        /* FIXME: need support for channel mask */
3066
3067        if (channel == CHANNEL_UNSPECIFIED)
3068                channel = -1;
3069
3070        /* Call the helper to output message */
3071        edac_mc_handle_error(tp_event, mci, core_err_cnt,
3072                             m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
3073                             4*ha+channel, dimm, -1,
3074                             optype, msg);
3075        return;
3076err_parsing:
3077        edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0,
3078                             -1, -1, -1,
3079                             msg, "");
3080
3081}
3082
3083/*
3084 *      sbridge_check_error     Retrieve and process errors reported by the
3085 *                              hardware. Called by the Core module.
3086 */
3087static void sbridge_check_error(struct mem_ctl_info *mci)
3088{
3089        struct sbridge_pvt *pvt = mci->pvt_info;
3090        int i;
3091        unsigned count = 0;
3092        struct mce *m;
3093
3094        /*
3095         * MCE first step: Copy all mce errors into a temporary buffer
3096         * We use a double buffering here, to reduce the risk of
3097         * loosing an error.
3098         */
3099        smp_rmb();
3100        count = (pvt->mce_out + MCE_LOG_LEN - pvt->mce_in)
3101                % MCE_LOG_LEN;
3102        if (!count)
3103                return;
3104
3105        m = pvt->mce_outentry;
3106        if (pvt->mce_in + count > MCE_LOG_LEN) {
3107                unsigned l = MCE_LOG_LEN - pvt->mce_in;
3108
3109                memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * l);
3110                smp_wmb();
3111                pvt->mce_in = 0;
3112                count -= l;
3113                m += l;
3114        }
3115        memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * count);
3116        smp_wmb();
3117        pvt->mce_in += count;
3118
3119        smp_rmb();
3120        if (pvt->mce_overrun) {
3121                sbridge_printk(KERN_ERR, "Lost %d memory errors\n",
3122                              pvt->mce_overrun);
3123                smp_wmb();
3124                pvt->mce_overrun = 0;
3125        }
3126
3127        /*
3128         * MCE second step: parse errors and display
3129         */
3130        for (i = 0; i < count; i++)
3131                sbridge_mce_output_error(mci, &pvt->mce_outentry[i]);
3132}
3133
3134/*
3135 * sbridge_mce_check_error      Replicates mcelog routine to get errors
3136 *                              This routine simply queues mcelog errors, and
3137 *                              return. The error itself should be handled later
3138 *                              by sbridge_check_error.
3139 * WARNING: As this routine should be called at NMI time, extra care should
3140 * be taken to avoid deadlocks, and to be as fast as possible.
3141 */
3142static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
3143                                   void *data)
3144{
3145        struct mce *mce = (struct mce *)data;
3146        struct mem_ctl_info *mci;
3147        struct sbridge_pvt *pvt;
3148        char *type;
3149
3150        if (get_edac_report_status() == EDAC_REPORTING_DISABLED)
3151                return NOTIFY_DONE;
3152
3153        mci = get_mci_for_node_id(mce->socketid);
3154        if (!mci)
3155                return NOTIFY_BAD;
3156        pvt = mci->pvt_info;
3157
3158        /*
3159         * Just let mcelog handle it if the error is
3160         * outside the memory controller. A memory error
3161         * is indicated by bit 7 = 1 and bits = 8-11,13-15 = 0.
3162         * bit 12 has an special meaning.
3163         */
3164        if ((mce->status & 0xefff) >> 7 != 1)
3165                return NOTIFY_DONE;
3166
3167        if (mce->mcgstatus & MCG_STATUS_MCIP)
3168                type = "Exception";
3169        else
3170                type = "Event";
3171
3172        sbridge_mc_printk(mci, KERN_DEBUG, "HANDLING MCE MEMORY ERROR\n");
3173
3174        sbridge_mc_printk(mci, KERN_DEBUG, "CPU %d: Machine Check %s: %Lx "
3175                          "Bank %d: %016Lx\n", mce->extcpu, type,
3176                          mce->mcgstatus, mce->bank, mce->status);
3177        sbridge_mc_printk(mci, KERN_DEBUG, "TSC %llx ", mce->tsc);
3178        sbridge_mc_printk(mci, KERN_DEBUG, "ADDR %llx ", mce->addr);
3179        sbridge_mc_printk(mci, KERN_DEBUG, "MISC %llx ", mce->misc);
3180
3181        sbridge_mc_printk(mci, KERN_DEBUG, "PROCESSOR %u:%x TIME %llu SOCKET "
3182                          "%u APIC %x\n", mce->cpuvendor, mce->cpuid,
3183                          mce->time, mce->socketid, mce->apicid);
3184
3185        smp_rmb();
3186        if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) {
3187                smp_wmb();
3188                pvt->mce_overrun++;
3189                return NOTIFY_DONE;
3190        }
3191
3192        /* Copy memory error at the ringbuffer */
3193        memcpy(&pvt->mce_entry[pvt->mce_out], mce, sizeof(*mce));
3194        smp_wmb();
3195        pvt->mce_out = (pvt->mce_out + 1) % MCE_LOG_LEN;
3196
3197        /* Handle fatal errors immediately */
3198        if (mce->mcgstatus & 1)
3199                sbridge_check_error(mci);
3200
3201        /* Advice mcelog that the error were handled */
3202        return NOTIFY_STOP;
3203}
3204
3205static struct notifier_block sbridge_mce_dec = {
3206        .notifier_call  = sbridge_mce_check_error,
3207        .priority       = MCE_PRIO_EDAC,
3208};
3209
3210/****************************************************************************
3211                        EDAC register/unregister logic
3212 ****************************************************************************/
3213
3214static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
3215{
3216        struct mem_ctl_info *mci = sbridge_dev->mci;
3217        struct sbridge_pvt *pvt;
3218
3219        if (unlikely(!mci || !mci->pvt_info)) {
3220                edac_dbg(0, "MC: dev = %p\n", &sbridge_dev->pdev[0]->dev);
3221
3222                sbridge_printk(KERN_ERR, "Couldn't find mci handler\n");
3223                return;
3224        }
3225
3226        pvt = mci->pvt_info;
3227
3228        edac_dbg(0, "MC: mci = %p, dev = %p\n",
3229                 mci, &sbridge_dev->pdev[0]->dev);
3230
3231        /* Remove MC sysfs nodes */
3232        edac_mc_del_mc(mci->pdev);
3233
3234        edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
3235        kfree(mci->ctl_name);
3236        edac_mc_free(mci);
3237        sbridge_dev->mci = NULL;
3238}
3239
3240static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
3241{
3242        struct mem_ctl_info *mci;
3243        struct edac_mc_layer layers[2];
3244        struct sbridge_pvt *pvt;
3245        struct pci_dev *pdev = sbridge_dev->pdev[0];
3246        int rc;
3247
3248        /* Check the number of active and not disabled channels */
3249        rc = check_if_ecc_is_active(sbridge_dev->bus, type);
3250        if (unlikely(rc < 0))
3251                return rc;
3252
3253        /* allocate a new MC control structure */
3254        layers[0].type = EDAC_MC_LAYER_CHANNEL;
3255        layers[0].size = type == KNIGHTS_LANDING ?
3256                KNL_MAX_CHANNELS : NUM_CHANNELS;
3257        layers[0].is_virt_csrow = false;
3258        layers[1].type = EDAC_MC_LAYER_SLOT;
3259        layers[1].size = type == KNIGHTS_LANDING ? 1 : MAX_DIMMS;
3260        layers[1].is_virt_csrow = true;
3261        mci = edac_mc_alloc(sbridge_dev->mc, ARRAY_SIZE(layers), layers,
3262                            sizeof(*pvt));
3263
3264        if (unlikely(!mci))
3265                return -ENOMEM;
3266
3267        edac_dbg(0, "MC: mci = %p, dev = %p\n",
3268                 mci, &pdev->dev);
3269
3270        pvt = mci->pvt_info;
3271        memset(pvt, 0, sizeof(*pvt));
3272
3273        /* Associate sbridge_dev and mci for future usage */
3274        pvt->sbridge_dev = sbridge_dev;
3275        sbridge_dev->mci = mci;
3276
3277        mci->mtype_cap = type == KNIGHTS_LANDING ?
3278                MEM_FLAG_DDR4 : MEM_FLAG_DDR3;
3279        mci->edac_ctl_cap = EDAC_FLAG_NONE;
3280        mci->edac_cap = EDAC_FLAG_NONE;
3281        mci->mod_name = "sbridge_edac.c";
3282        mci->mod_ver = SBRIDGE_REVISION;
3283        mci->dev_name = pci_name(pdev);
3284        mci->ctl_page_to_phys = NULL;
3285
3286        /* Set the function pointer to an actual operation function */
3287        mci->edac_check = sbridge_check_error;
3288
3289        pvt->info.type = type;
3290        switch (type) {
3291        case IVY_BRIDGE:
3292                pvt->info.rankcfgr = IB_RANK_CFG_A;
3293                pvt->info.get_tolm = ibridge_get_tolm;
3294                pvt->info.get_tohm = ibridge_get_tohm;
3295                pvt->info.dram_rule = ibridge_dram_rule;
3296                pvt->info.get_memory_type = get_memory_type;
3297                pvt->info.get_node_id = get_node_id;
3298                pvt->info.rir_limit = rir_limit;
3299                pvt->info.sad_limit = sad_limit;
3300                pvt->info.interleave_mode = interleave_mode;
3301                pvt->info.show_interleave_mode = show_interleave_mode;
3302                pvt->info.dram_attr = dram_attr;
3303                pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule);
3304                pvt->info.interleave_list = ibridge_interleave_list;
3305                pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list);
3306                pvt->info.interleave_pkg = ibridge_interleave_pkg;
3307                pvt->info.get_width = ibridge_get_width;
3308                mci->ctl_name = kasprintf(GFP_KERNEL, "Ivy Bridge Socket#%d", mci->mc_idx);
3309
3310                /* Store pci devices at mci for faster access */
3311                rc = ibridge_mci_bind_devs(mci, sbridge_dev);
3312                if (unlikely(rc < 0))
3313                        goto fail0;
3314                break;
3315        case SANDY_BRIDGE:
3316                pvt->info.rankcfgr = SB_RANK_CFG_A;
3317                pvt->info.get_tolm = sbridge_get_tolm;
3318                pvt->info.get_tohm = sbridge_get_tohm;
3319                pvt->info.dram_rule = sbridge_dram_rule;
3320                pvt->info.get_memory_type = get_memory_type;
3321                pvt->info.get_node_id = get_node_id;
3322                pvt->info.rir_limit = rir_limit;
3323                pvt->info.sad_limit = sad_limit;
3324                pvt->info.interleave_mode = interleave_mode;
3325                pvt->info.show_interleave_mode = show_interleave_mode;
3326                pvt->info.dram_attr = dram_attr;
3327                pvt->info.max_sad = ARRAY_SIZE(sbridge_dram_rule);
3328                pvt->info.interleave_list = sbridge_interleave_list;
3329                pvt->info.max_interleave = ARRAY_SIZE(sbridge_interleave_list);
3330                pvt->info.interleave_pkg = sbridge_interleave_pkg;
3331                pvt->info.get_width = sbridge_get_width;
3332                mci->ctl_name = kasprintf(GFP_KERNEL, "Sandy Bridge Socket#%d", mci->mc_idx);
3333
3334                /* Store pci devices at mci for faster access */
3335                rc = sbridge_mci_bind_devs(mci, sbridge_dev);
3336                if (unlikely(rc < 0))
3337                        goto fail0;
3338                break;
3339        case HASWELL:
3340                /* rankcfgr isn't used */
3341                pvt->info.get_tolm = haswell_get_tolm;
3342                pvt->info.get_tohm = haswell_get_tohm;
3343                pvt->info.dram_rule = ibridge_dram_rule;
3344                pvt->info.get_memory_type = haswell_get_memory_type;
3345                pvt->info.get_node_id = haswell_get_node_id;
3346                pvt->info.rir_limit = haswell_rir_limit;
3347                pvt->info.sad_limit = sad_limit;
3348                pvt->info.interleave_mode = interleave_mode;
3349                pvt->info.show_interleave_mode = show_interleave_mode;
3350                pvt->info.dram_attr = dram_attr;
3351                pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule);
3352                pvt->info.interleave_list = ibridge_interleave_list;
3353                pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list);
3354                pvt->info.interleave_pkg = ibridge_interleave_pkg;
3355                pvt->info.get_width = ibridge_get_width;
3356                mci->ctl_name = kasprintf(GFP_KERNEL, "Haswell Socket#%d", mci->mc_idx);
3357
3358                /* Store pci devices at mci for faster access */
3359                rc = haswell_mci_bind_devs(mci, sbridge_dev);
3360                if (unlikely(rc < 0))
3361                        goto fail0;
3362                break;
3363        case BROADWELL:
3364                /* rankcfgr isn't used */
3365                pvt->info.get_tolm = haswell_get_tolm;
3366                pvt->info.get_tohm = haswell_get_tohm;
3367                pvt->info.dram_rule = ibridge_dram_rule;
3368                pvt->info.get_memory_type = haswell_get_memory_type;
3369                pvt->info.get_node_id = haswell_get_node_id;
3370                pvt->info.rir_limit = haswell_rir_limit;
3371                pvt->info.sad_limit = sad_limit;
3372                pvt->info.interleave_mode = interleave_mode;
3373                pvt->info.show_interleave_mode = show_interleave_mode;
3374                pvt->info.dram_attr = dram_attr;
3375                pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule);
3376                pvt->info.interleave_list = ibridge_interleave_list;
3377                pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list);
3378                pvt->info.interleave_pkg = ibridge_interleave_pkg;
3379                pvt->info.get_width = broadwell_get_width;
3380                mci->ctl_name = kasprintf(GFP_KERNEL, "Broadwell Socket#%d", mci->mc_idx);
3381
3382                /* Store pci devices at mci for faster access */
3383                rc = broadwell_mci_bind_devs(mci, sbridge_dev);
3384                if (unlikely(rc < 0))
3385                        goto fail0;
3386                break;
3387        case KNIGHTS_LANDING:
3388                /* pvt->info.rankcfgr == ??? */
3389                pvt->info.get_tolm = knl_get_tolm;
3390                pvt->info.get_tohm = knl_get_tohm;
3391                pvt->info.dram_rule = knl_dram_rule;
3392                pvt->info.get_memory_type = knl_get_memory_type;
3393                pvt->info.get_node_id = knl_get_node_id;
3394                pvt->info.rir_limit = NULL;
3395                pvt->info.sad_limit = knl_sad_limit;
3396                pvt->info.interleave_mode = knl_interleave_mode;
3397                pvt->info.show_interleave_mode = knl_show_interleave_mode;
3398                pvt->info.dram_attr = dram_attr_knl;
3399                pvt->info.max_sad = ARRAY_SIZE(knl_dram_rule);
3400                pvt->info.interleave_list = knl_interleave_list;
3401                pvt->info.max_interleave = ARRAY_SIZE(knl_interleave_list);
3402                pvt->info.interleave_pkg = ibridge_interleave_pkg;
3403                pvt->info.get_width = knl_get_width;
3404                mci->ctl_name = kasprintf(GFP_KERNEL,
3405                        "Knights Landing Socket#%d", mci->mc_idx);
3406
3407                rc = knl_mci_bind_devs(mci, sbridge_dev);
3408                if (unlikely(rc < 0))
3409                        goto fail0;
3410                break;
3411        }
3412
3413        /* Get dimm basic config and the memory layout */
3414        get_dimm_config(mci);
3415        get_memory_layout(mci);
3416
3417        /* record ptr to the generic device */
3418        mci->pdev = &pdev->dev;
3419
3420        /* add this new MC control structure to EDAC's list of MCs */
3421        if (unlikely(edac_mc_add_mc(mci))) {
3422                edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
3423                rc = -EINVAL;
3424                goto fail0;
3425        }
3426
3427        return 0;
3428
3429fail0:
3430        kfree(mci->ctl_name);
3431        edac_mc_free(mci);
3432        sbridge_dev->mci = NULL;
3433        return rc;
3434}
3435
3436#define ICPU(model, table) \
3437        { X86_VENDOR_INTEL, 6, model, 0, (unsigned long)&table }
3438
3439static const struct x86_cpu_id sbridge_cpuids[] = {
3440        ICPU(INTEL_FAM6_SANDYBRIDGE_X,    pci_dev_descr_sbridge_table),
3441        ICPU(INTEL_FAM6_IVYBRIDGE_X,      pci_dev_descr_ibridge_table),
3442        ICPU(INTEL_FAM6_HASWELL_X,        pci_dev_descr_haswell_table),
3443        ICPU(INTEL_FAM6_BROADWELL_X,      pci_dev_descr_broadwell_table),
3444        ICPU(INTEL_FAM6_BROADWELL_XEON_D, pci_dev_descr_broadwell_table),
3445        ICPU(INTEL_FAM6_XEON_PHI_KNL,     pci_dev_descr_knl_table),
3446        ICPU(INTEL_FAM6_XEON_PHI_KNM,     pci_dev_descr_knl_table),
3447        { }
3448};
3449MODULE_DEVICE_TABLE(x86cpu, sbridge_cpuids);
3450
3451/*
3452 *      sbridge_probe   Get all devices and register memory controllers
3453 *                      present.
3454 *      return:
3455 *              0 for FOUND a device
3456 *              < 0 for error code
3457 */
3458
3459static int sbridge_probe(const struct x86_cpu_id *id)
3460{
3461        int rc = -ENODEV;
3462        u8 mc, num_mc = 0;
3463        struct sbridge_dev *sbridge_dev;
3464        struct pci_id_table *ptable = (struct pci_id_table *)id->driver_data;
3465
3466        /* get the pci devices we want to reserve for our use */
3467        rc = sbridge_get_all_devices(&num_mc, ptable);
3468
3469        if (unlikely(rc < 0)) {
3470                edac_dbg(0, "couldn't get all devices\n");
3471                goto fail0;
3472        }
3473
3474        mc = 0;
3475
3476        list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
3477                edac_dbg(0, "Registering MC#%d (%d of %d)\n",
3478                         mc, mc + 1, num_mc);
3479
3480                sbridge_dev->mc = mc++;
3481                rc = sbridge_register_mci(sbridge_dev, ptable->type);
3482                if (unlikely(rc < 0))
3483                        goto fail1;
3484        }
3485
3486        sbridge_printk(KERN_INFO, "%s\n", SBRIDGE_REVISION);
3487
3488        return 0;
3489
3490fail1:
3491        list_for_each_entry(sbridge_dev, &sbridge_edac_list, list)
3492                sbridge_unregister_mci(sbridge_dev);
3493
3494        sbridge_put_all_devices();
3495fail0:
3496        return rc;
3497}
3498
3499/*
3500 *      sbridge_remove  cleanup
3501 *
3502 */
3503static void sbridge_remove(void)
3504{
3505        struct sbridge_dev *sbridge_dev;
3506
3507        edac_dbg(0, "\n");
3508
3509        list_for_each_entry(sbridge_dev, &sbridge_edac_list, list)
3510                sbridge_unregister_mci(sbridge_dev);
3511
3512        /* Release PCI resources */
3513        sbridge_put_all_devices();
3514}
3515
3516/*
3517 *      sbridge_init            Module entry function
3518 *                      Try to initialize this module for its devices
3519 */
3520static int __init sbridge_init(void)
3521{
3522        const struct x86_cpu_id *id;
3523        int rc;
3524
3525        edac_dbg(2, "\n");
3526
3527        id = x86_match_cpu(sbridge_cpuids);
3528        if (!id)
3529                return -ENODEV;
3530
3531        /* Ensure that the OPSTATE is set correctly for POLL or NMI */
3532        opstate_init();
3533
3534        rc = sbridge_probe(id);
3535
3536        if (rc >= 0) {
3537                mce_register_decode_chain(&sbridge_mce_dec);
3538                if (get_edac_report_status() == EDAC_REPORTING_DISABLED)
3539                        sbridge_printk(KERN_WARNING, "Loading driver, error reporting disabled.\n");
3540                return 0;
3541        }
3542
3543        sbridge_printk(KERN_ERR, "Failed to register device with error %d.\n",
3544                      rc);
3545
3546        return rc;
3547}
3548
3549/*
3550 *      sbridge_exit()  Module exit function
3551 *                      Unregister the driver
3552 */
3553static void __exit sbridge_exit(void)
3554{
3555        edac_dbg(2, "\n");
3556        sbridge_remove();
3557        mce_unregister_decode_chain(&sbridge_mce_dec);
3558}
3559
3560module_init(sbridge_init);
3561module_exit(sbridge_exit);
3562
3563module_param(edac_op_state, int, 0444);
3564MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
3565
3566MODULE_LICENSE("GPL");
3567MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
3568MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
3569MODULE_DESCRIPTION("MC Driver for Intel Sandy Bridge and Ivy Bridge memory controllers - "
3570                   SBRIDGE_REVISION);
3571