1
2
3
4
5
6
7
8#include <linux/kernel.h>
9#include <linux/io.h>
10#include <asm/cpu_device_id.h>
11#include <asm/intel-family.h>
12#include <asm/mce.h>
13#include "edac_module.h"
14#include "skx_common.h"
15
16#define I10NM_REVISION "v0.0.4"
17#define EDAC_MOD_STR "i10nm_edac"
18
19
20#define i10nm_printk(level, fmt, arg...) \
21 edac_printk(level, "i10nm", fmt, ##arg)
22
23#define I10NM_GET_SCK_BAR(d, reg) \
24 pci_read_config_dword((d)->uracu, 0xd0, &(reg))
25#define I10NM_GET_IMC_BAR(d, i, reg) \
26 pci_read_config_dword((d)->uracu, 0xd8 + (i) * 4, &(reg))
27#define I10NM_GET_DIMMMTR(m, i, j) \
28 readl((m)->mbase + 0x2080c + (i) * (m)->chan_mmio_sz + (j) * 4)
29#define I10NM_GET_MCDDRTCFG(m, i, j) \
30 readl((m)->mbase + 0x20970 + (i) * (m)->chan_mmio_sz + (j) * 4)
31#define I10NM_GET_MCMTR(m, i) \
32 readl((m)->mbase + 0x20ef8 + (i) * (m)->chan_mmio_sz)
33#define I10NM_GET_AMAP(m, i) \
34 readl((m)->mbase + 0x20814 + (i) * (m)->chan_mmio_sz)
35
36#define I10NM_GET_SCK_MMIO_BASE(reg) (GET_BITFIELD(reg, 0, 28) << 23)
37#define I10NM_GET_IMC_MMIO_OFFSET(reg) (GET_BITFIELD(reg, 0, 10) << 12)
38#define I10NM_GET_IMC_MMIO_SIZE(reg) ((GET_BITFIELD(reg, 13, 23) - \
39 GET_BITFIELD(reg, 0, 10) + 1) << 12)
40
41static struct list_head *i10nm_edac_list;
42
43static struct pci_dev *pci_get_dev_wrapper(int dom, unsigned int bus,
44 unsigned int dev, unsigned int fun)
45{
46 struct pci_dev *pdev;
47
48 pdev = pci_get_domain_bus_and_slot(dom, bus, PCI_DEVFN(dev, fun));
49 if (!pdev) {
50 edac_dbg(2, "No device %02x:%02x.%x\n",
51 bus, dev, fun);
52 return NULL;
53 }
54
55 if (unlikely(pci_enable_device(pdev) < 0)) {
56 edac_dbg(2, "Failed to enable device %02x:%02x.%x\n",
57 bus, dev, fun);
58 return NULL;
59 }
60
61 pci_dev_get(pdev);
62
63 return pdev;
64}
65
66static int i10nm_get_all_munits(void)
67{
68 struct pci_dev *mdev;
69 void __iomem *mbase;
70 unsigned long size;
71 struct skx_dev *d;
72 int i, j = 0;
73 u32 reg, off;
74 u64 base;
75
76 list_for_each_entry(d, i10nm_edac_list, list) {
77 d->util_all = pci_get_dev_wrapper(d->seg, d->bus[1], 29, 1);
78 if (!d->util_all)
79 return -ENODEV;
80
81 d->uracu = pci_get_dev_wrapper(d->seg, d->bus[0], 0, 1);
82 if (!d->uracu)
83 return -ENODEV;
84
85 if (I10NM_GET_SCK_BAR(d, reg)) {
86 i10nm_printk(KERN_ERR, "Failed to socket bar\n");
87 return -ENODEV;
88 }
89
90 base = I10NM_GET_SCK_MMIO_BASE(reg);
91 edac_dbg(2, "socket%d mmio base 0x%llx (reg 0x%x)\n",
92 j++, base, reg);
93
94 for (i = 0; i < I10NM_NUM_IMC; i++) {
95 mdev = pci_get_dev_wrapper(d->seg, d->bus[0],
96 12 + i, 0);
97 if (i == 0 && !mdev) {
98 i10nm_printk(KERN_ERR, "No IMC found\n");
99 return -ENODEV;
100 }
101 if (!mdev)
102 continue;
103
104 d->imc[i].mdev = mdev;
105
106 if (I10NM_GET_IMC_BAR(d, i, reg)) {
107 i10nm_printk(KERN_ERR, "Failed to get mc bar\n");
108 return -ENODEV;
109 }
110
111 off = I10NM_GET_IMC_MMIO_OFFSET(reg);
112 size = I10NM_GET_IMC_MMIO_SIZE(reg);
113 edac_dbg(2, "mc%d mmio base 0x%llx size 0x%lx (reg 0x%x)\n",
114 i, base + off, size, reg);
115
116 mbase = ioremap(base + off, size);
117 if (!mbase) {
118 i10nm_printk(KERN_ERR, "Failed to ioremap 0x%llx\n",
119 base + off);
120 return -ENODEV;
121 }
122
123 d->imc[i].mbase = mbase;
124 }
125 }
126
127 return 0;
128}
129
130static struct res_config i10nm_cfg0 = {
131 .type = I10NM,
132 .decs_did = 0x3452,
133 .busno_cfg_offset = 0xcc,
134 .ddr_chan_mmio_sz = 0x4000,
135};
136
137static struct res_config i10nm_cfg1 = {
138 .type = I10NM,
139 .decs_did = 0x3452,
140 .busno_cfg_offset = 0xd0,
141 .ddr_chan_mmio_sz = 0x4000,
142};
143
144static struct res_config spr_cfg = {
145 .type = SPR,
146 .decs_did = 0x3252,
147 .busno_cfg_offset = 0xd0,
148 .ddr_chan_mmio_sz = 0x8000,
149 .support_ddr5 = true,
150};
151
152static const struct x86_cpu_id i10nm_cpuids[] = {
153 X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPINGS(0x0, 0x3), &i10nm_cfg0),
154 X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPINGS(0x4, 0xf), &i10nm_cfg1),
155 X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ICELAKE_X, X86_STEPPINGS(0x0, 0x3), &i10nm_cfg0),
156 X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ICELAKE_X, X86_STEPPINGS(0x4, 0xf), &i10nm_cfg1),
157 X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ICELAKE_D, X86_STEPPINGS(0x0, 0xf), &i10nm_cfg1),
158 X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(SAPPHIRERAPIDS_X, X86_STEPPINGS(0x0, 0xf), &spr_cfg),
159 {}
160};
161MODULE_DEVICE_TABLE(x86cpu, i10nm_cpuids);
162
163static bool i10nm_check_ecc(struct skx_imc *imc, int chan)
164{
165 u32 mcmtr;
166
167 mcmtr = I10NM_GET_MCMTR(imc, chan);
168 edac_dbg(1, "ch%d mcmtr reg %x\n", chan, mcmtr);
169
170 return !!GET_BITFIELD(mcmtr, 2, 2);
171}
172
173static int i10nm_get_dimm_config(struct mem_ctl_info *mci,
174 struct res_config *cfg)
175{
176 struct skx_pvt *pvt = mci->pvt_info;
177 struct skx_imc *imc = pvt->imc;
178 u32 mtr, amap, mcddrtcfg;
179 struct dimm_info *dimm;
180 int i, j, ndimms;
181
182 for (i = 0; i < I10NM_NUM_CHANNELS; i++) {
183 if (!imc->mbase)
184 continue;
185
186 ndimms = 0;
187 amap = I10NM_GET_AMAP(imc, i);
188 for (j = 0; j < I10NM_NUM_DIMMS; j++) {
189 dimm = edac_get_dimm(mci, i, j, 0);
190 mtr = I10NM_GET_DIMMMTR(imc, i, j);
191 mcddrtcfg = I10NM_GET_MCDDRTCFG(imc, i, j);
192 edac_dbg(1, "dimmmtr 0x%x mcddrtcfg 0x%x (mc%d ch%d dimm%d)\n",
193 mtr, mcddrtcfg, imc->mc, i, j);
194
195 if (IS_DIMM_PRESENT(mtr))
196 ndimms += skx_get_dimm_info(mtr, 0, amap, dimm,
197 imc, i, j, cfg);
198 else if (IS_NVDIMM_PRESENT(mcddrtcfg, j))
199 ndimms += skx_get_nvdimm_info(dimm, imc, i, j,
200 EDAC_MOD_STR);
201 }
202 if (ndimms && !i10nm_check_ecc(imc, i)) {
203 i10nm_printk(KERN_ERR, "ECC is disabled on imc %d channel %d\n",
204 imc->mc, i);
205 return -ENODEV;
206 }
207 }
208
209 return 0;
210}
211
212static struct notifier_block i10nm_mce_dec = {
213 .notifier_call = skx_mce_check_error,
214 .priority = MCE_PRIO_EDAC,
215};
216
217#ifdef CONFIG_EDAC_DEBUG
218
219
220
221
222
223static struct dentry *i10nm_test;
224
225static int debugfs_u64_set(void *data, u64 val)
226{
227 struct mce m;
228
229 pr_warn_once("Fake error to 0x%llx injected via debugfs\n", val);
230
231 memset(&m, 0, sizeof(m));
232
233 m.status = MCI_STATUS_ADDRV + 0x90;
234
235 m.status |= BIT_ULL(MCI_STATUS_CEC_SHIFT);
236 m.addr = val;
237 skx_mce_check_error(NULL, 0, &m);
238
239 return 0;
240}
241DEFINE_SIMPLE_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
242
243static void setup_i10nm_debug(void)
244{
245 i10nm_test = edac_debugfs_create_dir("i10nm_test");
246 if (!i10nm_test)
247 return;
248
249 if (!edac_debugfs_create_file("addr", 0200, i10nm_test,
250 NULL, &fops_u64_wo)) {
251 debugfs_remove(i10nm_test);
252 i10nm_test = NULL;
253 }
254}
255
256static void teardown_i10nm_debug(void)
257{
258 debugfs_remove_recursive(i10nm_test);
259}
260#else
261static inline void setup_i10nm_debug(void) {}
262static inline void teardown_i10nm_debug(void) {}
263#endif
264
265static int __init i10nm_init(void)
266{
267 u8 mc = 0, src_id = 0, node_id = 0;
268 const struct x86_cpu_id *id;
269 struct res_config *cfg;
270 const char *owner;
271 struct skx_dev *d;
272 int rc, i, off[3] = {0xd0, 0xc8, 0xcc};
273 u64 tolm, tohm;
274
275 edac_dbg(2, "\n");
276
277 owner = edac_get_owner();
278 if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
279 return -EBUSY;
280
281 id = x86_match_cpu(i10nm_cpuids);
282 if (!id)
283 return -ENODEV;
284
285 cfg = (struct res_config *)id->driver_data;
286
287 rc = skx_get_hi_lo(0x09a2, off, &tolm, &tohm);
288 if (rc)
289 return rc;
290
291 rc = skx_get_all_bus_mappings(cfg, &i10nm_edac_list);
292 if (rc < 0)
293 goto fail;
294 if (rc == 0) {
295 i10nm_printk(KERN_ERR, "No memory controllers found\n");
296 return -ENODEV;
297 }
298
299 rc = i10nm_get_all_munits();
300 if (rc < 0)
301 goto fail;
302
303 list_for_each_entry(d, i10nm_edac_list, list) {
304 rc = skx_get_src_id(d, 0xf8, &src_id);
305 if (rc < 0)
306 goto fail;
307
308 rc = skx_get_node_id(d, &node_id);
309 if (rc < 0)
310 goto fail;
311
312 edac_dbg(2, "src_id = %d node_id = %d\n", src_id, node_id);
313 for (i = 0; i < I10NM_NUM_IMC; i++) {
314 if (!d->imc[i].mdev)
315 continue;
316
317 d->imc[i].mc = mc++;
318 d->imc[i].lmc = i;
319 d->imc[i].src_id = src_id;
320 d->imc[i].node_id = node_id;
321 d->imc[i].chan_mmio_sz = cfg->ddr_chan_mmio_sz;
322
323 rc = skx_register_mci(&d->imc[i], d->imc[i].mdev,
324 "Intel_10nm Socket", EDAC_MOD_STR,
325 i10nm_get_dimm_config, cfg);
326 if (rc < 0)
327 goto fail;
328 }
329 }
330
331 rc = skx_adxl_get();
332 if (rc)
333 goto fail;
334
335 opstate_init();
336 mce_register_decode_chain(&i10nm_mce_dec);
337 setup_i10nm_debug();
338
339 i10nm_printk(KERN_INFO, "%s\n", I10NM_REVISION);
340
341 return 0;
342fail:
343 skx_remove();
344 return rc;
345}
346
347static void __exit i10nm_exit(void)
348{
349 edac_dbg(2, "\n");
350 teardown_i10nm_debug();
351 mce_unregister_decode_chain(&i10nm_mce_dec);
352 skx_adxl_put();
353 skx_remove();
354}
355
356module_init(i10nm_init);
357module_exit(i10nm_exit);
358
359MODULE_LICENSE("GPL v2");
360MODULE_DESCRIPTION("MC Driver for Intel 10nm server processors");
361