1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/list_sort.h>
14#include <linux/libnvdimm.h>
15#include <linux/module.h>
16#include <linux/mutex.h>
17#include <linux/ndctl.h>
18#include <linux/sysfs.h>
19#include <linux/delay.h>
20#include <linux/list.h>
21#include <linux/acpi.h>
22#include <linux/sort.h>
23#include <linux/io.h>
24#include <linux/nd.h>
25#include <asm/cacheflush.h>
26#include <acpi/nfit.h>
27#include "intel.h"
28#include "nfit.h"
29
30
31
32
33
34#include <linux/io-64-nonatomic-hi-lo.h>
35
36static bool force_enable_dimms;
37module_param(force_enable_dimms, bool, S_IRUGO|S_IWUSR);
38MODULE_PARM_DESC(force_enable_dimms, "Ignore _STA (ACPI DIMM device) status");
39
40static bool disable_vendor_specific;
41module_param(disable_vendor_specific, bool, S_IRUGO);
42MODULE_PARM_DESC(disable_vendor_specific,
43 "Limit commands to the publicly specified set");
44
45static unsigned long override_dsm_mask;
46module_param(override_dsm_mask, ulong, S_IRUGO);
47MODULE_PARM_DESC(override_dsm_mask, "Bitmask of allowed NVDIMM DSM functions");
48
49static int default_dsm_family = -1;
50module_param(default_dsm_family, int, S_IRUGO);
51MODULE_PARM_DESC(default_dsm_family,
52 "Try this DSM type first when identifying NVDIMM family");
53
54static bool no_init_ars;
55module_param(no_init_ars, bool, 0644);
56MODULE_PARM_DESC(no_init_ars, "Skip ARS run at nfit init time");
57
58static bool force_labels;
59module_param(force_labels, bool, 0444);
60MODULE_PARM_DESC(force_labels, "Opt-in to labels despite missing methods");
61
62LIST_HEAD(acpi_descs);
63DEFINE_MUTEX(acpi_desc_lock);
64
65static struct workqueue_struct *nfit_wq;
66
67struct nfit_table_prev {
68 struct list_head spas;
69 struct list_head memdevs;
70 struct list_head dcrs;
71 struct list_head bdws;
72 struct list_head idts;
73 struct list_head flushes;
74};
75
76static guid_t nfit_uuid[NFIT_UUID_MAX];
77
78const guid_t *to_nfit_uuid(enum nfit_uuids id)
79{
80 return &nfit_uuid[id];
81}
82EXPORT_SYMBOL(to_nfit_uuid);
83
84static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc)
85{
86 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
87
88
89
90
91
92 if (!nd_desc->provider_name
93 || strcmp(nd_desc->provider_name, "ACPI.NFIT") != 0)
94 return NULL;
95
96 return to_acpi_device(acpi_desc->dev);
97}
98
99static int xlat_bus_status(void *buf, unsigned int cmd, u32 status)
100{
101 struct nd_cmd_clear_error *clear_err;
102 struct nd_cmd_ars_status *ars_status;
103 u16 flags;
104
105 switch (cmd) {
106 case ND_CMD_ARS_CAP:
107 if ((status & 0xffff) == NFIT_ARS_CAP_NONE)
108 return -ENOTTY;
109
110
111 if (status & 0xffff)
112 return -EIO;
113
114
115 flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE;
116 if ((status >> 16 & flags) == 0)
117 return -ENOTTY;
118 return 0;
119 case ND_CMD_ARS_START:
120
121 if ((status & 0xffff) == NFIT_ARS_START_BUSY)
122 return -EBUSY;
123
124
125 if (status & 0xffff)
126 return -EIO;
127 return 0;
128 case ND_CMD_ARS_STATUS:
129 ars_status = buf;
130
131 if (status & 0xffff)
132 return -EIO;
133
134 if (status == NFIT_ARS_STATUS_DONE)
135 return 0;
136
137
138 if (status == NFIT_ARS_STATUS_BUSY)
139 return -EBUSY;
140
141
142 if (status == NFIT_ARS_STATUS_NONE)
143 return -EAGAIN;
144
145
146
147
148
149
150 if (status == NFIT_ARS_STATUS_INTR) {
151 if (ars_status->out_length >= 40 && (ars_status->flags
152 & NFIT_ARS_F_OVERFLOW))
153 return -ENOSPC;
154 return 0;
155 }
156
157
158 if (status >> 16)
159 return -EIO;
160 return 0;
161 case ND_CMD_CLEAR_ERROR:
162 clear_err = buf;
163 if (status & 0xffff)
164 return -EIO;
165 if (!clear_err->cleared)
166 return -EIO;
167 if (clear_err->length > clear_err->cleared)
168 return clear_err->cleared;
169 return 0;
170 default:
171 break;
172 }
173
174
175 if (status)
176 return -EIO;
177 return 0;
178}
179
180#define ACPI_LABELS_LOCKED 3
181
182static int xlat_nvdimm_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd,
183 u32 status)
184{
185 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
186
187 switch (cmd) {
188 case ND_CMD_GET_CONFIG_SIZE:
189
190
191
192
193 if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags))
194 break;
195
196 if (status >> 16 & ND_CONFIG_LOCKED)
197 return -EACCES;
198 break;
199 case ND_CMD_GET_CONFIG_DATA:
200 if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)
201 && status == ACPI_LABELS_LOCKED)
202 return -EACCES;
203 break;
204 case ND_CMD_SET_CONFIG_DATA:
205 if (test_bit(NFIT_MEM_LSW, &nfit_mem->flags)
206 && status == ACPI_LABELS_LOCKED)
207 return -EACCES;
208 break;
209 default:
210 break;
211 }
212
213
214 if (status)
215 return -EIO;
216 return 0;
217}
218
219static int xlat_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd,
220 u32 status)
221{
222 if (!nvdimm)
223 return xlat_bus_status(buf, cmd, status);
224 return xlat_nvdimm_status(nvdimm, buf, cmd, status);
225}
226
227
228static union acpi_object *pkg_to_buf(union acpi_object *pkg)
229{
230 int i;
231 void *dst;
232 size_t size = 0;
233 union acpi_object *buf = NULL;
234
235 if (pkg->type != ACPI_TYPE_PACKAGE) {
236 WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n",
237 pkg->type);
238 goto err;
239 }
240
241 for (i = 0; i < pkg->package.count; i++) {
242 union acpi_object *obj = &pkg->package.elements[i];
243
244 if (obj->type == ACPI_TYPE_INTEGER)
245 size += 4;
246 else if (obj->type == ACPI_TYPE_BUFFER)
247 size += obj->buffer.length;
248 else {
249 WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n",
250 obj->type);
251 goto err;
252 }
253 }
254
255 buf = ACPI_ALLOCATE(sizeof(*buf) + size);
256 if (!buf)
257 goto err;
258
259 dst = buf + 1;
260 buf->type = ACPI_TYPE_BUFFER;
261 buf->buffer.length = size;
262 buf->buffer.pointer = dst;
263 for (i = 0; i < pkg->package.count; i++) {
264 union acpi_object *obj = &pkg->package.elements[i];
265
266 if (obj->type == ACPI_TYPE_INTEGER) {
267 memcpy(dst, &obj->integer.value, 4);
268 dst += 4;
269 } else if (obj->type == ACPI_TYPE_BUFFER) {
270 memcpy(dst, obj->buffer.pointer, obj->buffer.length);
271 dst += obj->buffer.length;
272 }
273 }
274err:
275 ACPI_FREE(pkg);
276 return buf;
277}
278
279static union acpi_object *int_to_buf(union acpi_object *integer)
280{
281 union acpi_object *buf = ACPI_ALLOCATE(sizeof(*buf) + 4);
282 void *dst = NULL;
283
284 if (!buf)
285 goto err;
286
287 if (integer->type != ACPI_TYPE_INTEGER) {
288 WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n",
289 integer->type);
290 goto err;
291 }
292
293 dst = buf + 1;
294 buf->type = ACPI_TYPE_BUFFER;
295 buf->buffer.length = 4;
296 buf->buffer.pointer = dst;
297 memcpy(dst, &integer->integer.value, 4);
298err:
299 ACPI_FREE(integer);
300 return buf;
301}
302
303static union acpi_object *acpi_label_write(acpi_handle handle, u32 offset,
304 u32 len, void *data)
305{
306 acpi_status rc;
307 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
308 struct acpi_object_list input = {
309 .count = 3,
310 .pointer = (union acpi_object []) {
311 [0] = {
312 .integer.type = ACPI_TYPE_INTEGER,
313 .integer.value = offset,
314 },
315 [1] = {
316 .integer.type = ACPI_TYPE_INTEGER,
317 .integer.value = len,
318 },
319 [2] = {
320 .buffer.type = ACPI_TYPE_BUFFER,
321 .buffer.pointer = data,
322 .buffer.length = len,
323 },
324 },
325 };
326
327 rc = acpi_evaluate_object(handle, "_LSW", &input, &buf);
328 if (ACPI_FAILURE(rc))
329 return NULL;
330 return int_to_buf(buf.pointer);
331}
332
333static union acpi_object *acpi_label_read(acpi_handle handle, u32 offset,
334 u32 len)
335{
336 acpi_status rc;
337 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
338 struct acpi_object_list input = {
339 .count = 2,
340 .pointer = (union acpi_object []) {
341 [0] = {
342 .integer.type = ACPI_TYPE_INTEGER,
343 .integer.value = offset,
344 },
345 [1] = {
346 .integer.type = ACPI_TYPE_INTEGER,
347 .integer.value = len,
348 },
349 },
350 };
351
352 rc = acpi_evaluate_object(handle, "_LSR", &input, &buf);
353 if (ACPI_FAILURE(rc))
354 return NULL;
355 return pkg_to_buf(buf.pointer);
356}
357
358static union acpi_object *acpi_label_info(acpi_handle handle)
359{
360 acpi_status rc;
361 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
362
363 rc = acpi_evaluate_object(handle, "_LSI", NULL, &buf);
364 if (ACPI_FAILURE(rc))
365 return NULL;
366 return pkg_to_buf(buf.pointer);
367}
368
369static u8 nfit_dsm_revid(unsigned family, unsigned func)
370{
371 static const u8 revid_table[NVDIMM_FAMILY_MAX+1][32] = {
372 [NVDIMM_FAMILY_INTEL] = {
373 [NVDIMM_INTEL_GET_MODES] = 2,
374 [NVDIMM_INTEL_GET_FWINFO] = 2,
375 [NVDIMM_INTEL_START_FWUPDATE] = 2,
376 [NVDIMM_INTEL_SEND_FWUPDATE] = 2,
377 [NVDIMM_INTEL_FINISH_FWUPDATE] = 2,
378 [NVDIMM_INTEL_QUERY_FWUPDATE] = 2,
379 [NVDIMM_INTEL_SET_THRESHOLD] = 2,
380 [NVDIMM_INTEL_INJECT_ERROR] = 2,
381 [NVDIMM_INTEL_GET_SECURITY_STATE] = 2,
382 [NVDIMM_INTEL_SET_PASSPHRASE] = 2,
383 [NVDIMM_INTEL_DISABLE_PASSPHRASE] = 2,
384 [NVDIMM_INTEL_UNLOCK_UNIT] = 2,
385 [NVDIMM_INTEL_FREEZE_LOCK] = 2,
386 [NVDIMM_INTEL_SECURE_ERASE] = 2,
387 [NVDIMM_INTEL_OVERWRITE] = 2,
388 [NVDIMM_INTEL_QUERY_OVERWRITE] = 2,
389 [NVDIMM_INTEL_SET_MASTER_PASSPHRASE] = 2,
390 [NVDIMM_INTEL_MASTER_SECURE_ERASE] = 2,
391 },
392 };
393 u8 id;
394
395 if (family > NVDIMM_FAMILY_MAX)
396 return 0;
397 if (func > 31)
398 return 0;
399 id = revid_table[family][func];
400 if (id == 0)
401 return 1;
402 return id;
403}
404
405static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd,
406 struct nd_cmd_pkg *call_pkg)
407{
408 if (call_pkg) {
409 int i;
410
411 if (nfit_mem && nfit_mem->family != call_pkg->nd_family)
412 return -ENOTTY;
413
414 for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++)
415 if (call_pkg->nd_reserved2[i])
416 return -EINVAL;
417 return call_pkg->nd_command;
418 }
419
420
421 if (!nfit_mem)
422 return cmd;
423
424
425 if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
426 return cmd;
427
428
429
430
431
432 return 0;
433}
434
435static bool payload_dumpable(struct nvdimm *nvdimm, unsigned int func)
436{
437 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
438
439 if (nfit_mem && nfit_mem->family == NVDIMM_FAMILY_INTEL
440 && func >= NVDIMM_INTEL_GET_SECURITY_STATE
441 && func <= NVDIMM_INTEL_MASTER_SECURE_ERASE)
442 return IS_ENABLED(CONFIG_NFIT_SECURITY_DEBUG);
443 return true;
444}
445
446int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
447 unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc)
448{
449 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
450 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
451 union acpi_object in_obj, in_buf, *out_obj;
452 const struct nd_cmd_desc *desc = NULL;
453 struct device *dev = acpi_desc->dev;
454 struct nd_cmd_pkg *call_pkg = NULL;
455 const char *cmd_name, *dimm_name;
456 unsigned long cmd_mask, dsm_mask;
457 u32 offset, fw_status = 0;
458 acpi_handle handle;
459 const guid_t *guid;
460 int func, rc, i;
461
462 if (cmd_rc)
463 *cmd_rc = -EINVAL;
464
465 if (cmd == ND_CMD_CALL)
466 call_pkg = buf;
467 func = cmd_to_func(nfit_mem, cmd, call_pkg);
468 if (func < 0)
469 return func;
470
471 if (nvdimm) {
472 struct acpi_device *adev = nfit_mem->adev;
473
474 if (!adev)
475 return -ENOTTY;
476
477 dimm_name = nvdimm_name(nvdimm);
478 cmd_name = nvdimm_cmd_name(cmd);
479 cmd_mask = nvdimm_cmd_mask(nvdimm);
480 dsm_mask = nfit_mem->dsm_mask;
481 desc = nd_cmd_dimm_desc(cmd);
482 guid = to_nfit_uuid(nfit_mem->family);
483 handle = adev->handle;
484 } else {
485 struct acpi_device *adev = to_acpi_dev(acpi_desc);
486
487 cmd_name = nvdimm_bus_cmd_name(cmd);
488 cmd_mask = nd_desc->cmd_mask;
489 dsm_mask = nd_desc->bus_dsm_mask;
490 desc = nd_cmd_bus_desc(cmd);
491 guid = to_nfit_uuid(NFIT_DEV_BUS);
492 handle = adev->handle;
493 dimm_name = "bus";
494 }
495
496 if (!desc || (cmd && (desc->out_num + desc->in_num == 0)))
497 return -ENOTTY;
498
499
500
501
502
503 if (cmd == ND_CMD_CALL && !test_bit(func, &dsm_mask))
504 return -ENOTTY;
505 else if (!test_bit(cmd, &cmd_mask))
506 return -ENOTTY;
507
508 in_obj.type = ACPI_TYPE_PACKAGE;
509 in_obj.package.count = 1;
510 in_obj.package.elements = &in_buf;
511 in_buf.type = ACPI_TYPE_BUFFER;
512 in_buf.buffer.pointer = buf;
513 in_buf.buffer.length = 0;
514
515
516 for (i = 0; i < desc->in_num; i++)
517 in_buf.buffer.length += nd_cmd_in_size(nvdimm, cmd, desc,
518 i, buf);
519
520 if (call_pkg) {
521
522 in_buf.buffer.pointer = (void *) &call_pkg->nd_payload;
523 in_buf.buffer.length = call_pkg->nd_size_in;
524 }
525
526 dev_dbg(dev, "%s cmd: %d: func: %d input length: %d\n",
527 dimm_name, cmd, func, in_buf.buffer.length);
528 if (payload_dumpable(nvdimm, func))
529 print_hex_dump_debug("nvdimm in ", DUMP_PREFIX_OFFSET, 4, 4,
530 in_buf.buffer.pointer,
531 min_t(u32, 256, in_buf.buffer.length), true);
532
533
534 if (nvdimm && cmd == ND_CMD_GET_CONFIG_SIZE
535 && test_bit(NFIT_MEM_LSR, &nfit_mem->flags))
536 out_obj = acpi_label_info(handle);
537 else if (nvdimm && cmd == ND_CMD_GET_CONFIG_DATA
538 && test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) {
539 struct nd_cmd_get_config_data_hdr *p = buf;
540
541 out_obj = acpi_label_read(handle, p->in_offset, p->in_length);
542 } else if (nvdimm && cmd == ND_CMD_SET_CONFIG_DATA
543 && test_bit(NFIT_MEM_LSW, &nfit_mem->flags)) {
544 struct nd_cmd_set_config_hdr *p = buf;
545
546 out_obj = acpi_label_write(handle, p->in_offset, p->in_length,
547 p->in_buf);
548 } else {
549 u8 revid;
550
551 if (nvdimm)
552 revid = nfit_dsm_revid(nfit_mem->family, func);
553 else
554 revid = 1;
555 out_obj = acpi_evaluate_dsm(handle, guid, revid, func, &in_obj);
556 }
557
558 if (!out_obj) {
559 dev_dbg(dev, "%s _DSM failed cmd: %s\n", dimm_name, cmd_name);
560 return -EINVAL;
561 }
562
563 if (out_obj->type != ACPI_TYPE_BUFFER) {
564 dev_dbg(dev, "%s unexpected output object type cmd: %s type: %d\n",
565 dimm_name, cmd_name, out_obj->type);
566 rc = -EINVAL;
567 goto out;
568 }
569
570 dev_dbg(dev, "%s cmd: %s output length: %d\n", dimm_name,
571 cmd_name, out_obj->buffer.length);
572 print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4,
573 out_obj->buffer.pointer,
574 min_t(u32, 128, out_obj->buffer.length), true);
575
576 if (call_pkg) {
577 call_pkg->nd_fw_size = out_obj->buffer.length;
578 memcpy(call_pkg->nd_payload + call_pkg->nd_size_in,
579 out_obj->buffer.pointer,
580 min(call_pkg->nd_fw_size, call_pkg->nd_size_out));
581
582 ACPI_FREE(out_obj);
583
584
585
586
587
588
589 if (cmd_rc)
590 *cmd_rc = 0;
591 return 0;
592 }
593
594 for (i = 0, offset = 0; i < desc->out_num; i++) {
595 u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf,
596 (u32 *) out_obj->buffer.pointer,
597 out_obj->buffer.length - offset);
598
599 if (offset + out_size > out_obj->buffer.length) {
600 dev_dbg(dev, "%s output object underflow cmd: %s field: %d\n",
601 dimm_name, cmd_name, i);
602 break;
603 }
604
605 if (in_buf.buffer.length + offset + out_size > buf_len) {
606 dev_dbg(dev, "%s output overrun cmd: %s field: %d\n",
607 dimm_name, cmd_name, i);
608 rc = -ENXIO;
609 goto out;
610 }
611 memcpy(buf + in_buf.buffer.length + offset,
612 out_obj->buffer.pointer + offset, out_size);
613 offset += out_size;
614 }
615
616
617
618
619
620 if (i >= 1 && ((!nvdimm && cmd >= ND_CMD_ARS_CAP
621 && cmd <= ND_CMD_CLEAR_ERROR)
622 || (nvdimm && cmd >= ND_CMD_SMART
623 && cmd <= ND_CMD_VENDOR)))
624 fw_status = *(u32 *) out_obj->buffer.pointer;
625
626 if (offset + in_buf.buffer.length < buf_len) {
627 if (i >= 1) {
628
629
630
631
632 rc = buf_len - offset - in_buf.buffer.length;
633 if (cmd_rc)
634 *cmd_rc = xlat_status(nvdimm, buf, cmd,
635 fw_status);
636 } else {
637 dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n",
638 __func__, dimm_name, cmd_name, buf_len,
639 offset);
640 rc = -ENXIO;
641 }
642 } else {
643 rc = 0;
644 if (cmd_rc)
645 *cmd_rc = xlat_status(nvdimm, buf, cmd, fw_status);
646 }
647
648 out:
649 ACPI_FREE(out_obj);
650
651 return rc;
652}
653EXPORT_SYMBOL_GPL(acpi_nfit_ctl);
654
655static const char *spa_type_name(u16 type)
656{
657 static const char *to_name[] = {
658 [NFIT_SPA_VOLATILE] = "volatile",
659 [NFIT_SPA_PM] = "pmem",
660 [NFIT_SPA_DCR] = "dimm-control-region",
661 [NFIT_SPA_BDW] = "block-data-window",
662 [NFIT_SPA_VDISK] = "volatile-disk",
663 [NFIT_SPA_VCD] = "volatile-cd",
664 [NFIT_SPA_PDISK] = "persistent-disk",
665 [NFIT_SPA_PCD] = "persistent-cd",
666
667 };
668
669 if (type > NFIT_SPA_PCD)
670 return "unknown";
671
672 return to_name[type];
673}
674
675int nfit_spa_type(struct acpi_nfit_system_address *spa)
676{
677 int i;
678
679 for (i = 0; i < NFIT_UUID_MAX; i++)
680 if (guid_equal(to_nfit_uuid(i), (guid_t *)&spa->range_guid))
681 return i;
682 return -1;
683}
684
685static bool add_spa(struct acpi_nfit_desc *acpi_desc,
686 struct nfit_table_prev *prev,
687 struct acpi_nfit_system_address *spa)
688{
689 struct device *dev = acpi_desc->dev;
690 struct nfit_spa *nfit_spa;
691
692 if (spa->header.length != sizeof(*spa))
693 return false;
694
695 list_for_each_entry(nfit_spa, &prev->spas, list) {
696 if (memcmp(nfit_spa->spa, spa, sizeof(*spa)) == 0) {
697 list_move_tail(&nfit_spa->list, &acpi_desc->spas);
698 return true;
699 }
700 }
701
702 nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa) + sizeof(*spa),
703 GFP_KERNEL);
704 if (!nfit_spa)
705 return false;
706 INIT_LIST_HEAD(&nfit_spa->list);
707 memcpy(nfit_spa->spa, spa, sizeof(*spa));
708 list_add_tail(&nfit_spa->list, &acpi_desc->spas);
709 dev_dbg(dev, "spa index: %d type: %s\n",
710 spa->range_index,
711 spa_type_name(nfit_spa_type(spa)));
712 return true;
713}
714
715static bool add_memdev(struct acpi_nfit_desc *acpi_desc,
716 struct nfit_table_prev *prev,
717 struct acpi_nfit_memory_map *memdev)
718{
719 struct device *dev = acpi_desc->dev;
720 struct nfit_memdev *nfit_memdev;
721
722 if (memdev->header.length != sizeof(*memdev))
723 return false;
724
725 list_for_each_entry(nfit_memdev, &prev->memdevs, list)
726 if (memcmp(nfit_memdev->memdev, memdev, sizeof(*memdev)) == 0) {
727 list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs);
728 return true;
729 }
730
731 nfit_memdev = devm_kzalloc(dev, sizeof(*nfit_memdev) + sizeof(*memdev),
732 GFP_KERNEL);
733 if (!nfit_memdev)
734 return false;
735 INIT_LIST_HEAD(&nfit_memdev->list);
736 memcpy(nfit_memdev->memdev, memdev, sizeof(*memdev));
737 list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs);
738 dev_dbg(dev, "memdev handle: %#x spa: %d dcr: %d flags: %#x\n",
739 memdev->device_handle, memdev->range_index,
740 memdev->region_index, memdev->flags);
741 return true;
742}
743
744int nfit_get_smbios_id(u32 device_handle, u16 *flags)
745{
746 struct acpi_nfit_memory_map *memdev;
747 struct acpi_nfit_desc *acpi_desc;
748 struct nfit_mem *nfit_mem;
749 u16 physical_id;
750
751 mutex_lock(&acpi_desc_lock);
752 list_for_each_entry(acpi_desc, &acpi_descs, list) {
753 mutex_lock(&acpi_desc->init_mutex);
754 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
755 memdev = __to_nfit_memdev(nfit_mem);
756 if (memdev->device_handle == device_handle) {
757 *flags = memdev->flags;
758 physical_id = memdev->physical_id;
759 mutex_unlock(&acpi_desc->init_mutex);
760 mutex_unlock(&acpi_desc_lock);
761 return physical_id;
762 }
763 }
764 mutex_unlock(&acpi_desc->init_mutex);
765 }
766 mutex_unlock(&acpi_desc_lock);
767
768 return -ENODEV;
769}
770EXPORT_SYMBOL_GPL(nfit_get_smbios_id);
771
772
773
774
775
776static size_t sizeof_dcr(struct acpi_nfit_control_region *dcr)
777{
778 if (dcr->header.length < offsetof(struct acpi_nfit_control_region,
779 window_size))
780 return 0;
781 if (dcr->windows)
782 return sizeof(*dcr);
783 return offsetof(struct acpi_nfit_control_region, window_size);
784}
785
786static bool add_dcr(struct acpi_nfit_desc *acpi_desc,
787 struct nfit_table_prev *prev,
788 struct acpi_nfit_control_region *dcr)
789{
790 struct device *dev = acpi_desc->dev;
791 struct nfit_dcr *nfit_dcr;
792
793 if (!sizeof_dcr(dcr))
794 return false;
795
796 list_for_each_entry(nfit_dcr, &prev->dcrs, list)
797 if (memcmp(nfit_dcr->dcr, dcr, sizeof_dcr(dcr)) == 0) {
798 list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs);
799 return true;
800 }
801
802 nfit_dcr = devm_kzalloc(dev, sizeof(*nfit_dcr) + sizeof(*dcr),
803 GFP_KERNEL);
804 if (!nfit_dcr)
805 return false;
806 INIT_LIST_HEAD(&nfit_dcr->list);
807 memcpy(nfit_dcr->dcr, dcr, sizeof_dcr(dcr));
808 list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs);
809 dev_dbg(dev, "dcr index: %d windows: %d\n",
810 dcr->region_index, dcr->windows);
811 return true;
812}
813
814static bool add_bdw(struct acpi_nfit_desc *acpi_desc,
815 struct nfit_table_prev *prev,
816 struct acpi_nfit_data_region *bdw)
817{
818 struct device *dev = acpi_desc->dev;
819 struct nfit_bdw *nfit_bdw;
820
821 if (bdw->header.length != sizeof(*bdw))
822 return false;
823 list_for_each_entry(nfit_bdw, &prev->bdws, list)
824 if (memcmp(nfit_bdw->bdw, bdw, sizeof(*bdw)) == 0) {
825 list_move_tail(&nfit_bdw->list, &acpi_desc->bdws);
826 return true;
827 }
828
829 nfit_bdw = devm_kzalloc(dev, sizeof(*nfit_bdw) + sizeof(*bdw),
830 GFP_KERNEL);
831 if (!nfit_bdw)
832 return false;
833 INIT_LIST_HEAD(&nfit_bdw->list);
834 memcpy(nfit_bdw->bdw, bdw, sizeof(*bdw));
835 list_add_tail(&nfit_bdw->list, &acpi_desc->bdws);
836 dev_dbg(dev, "bdw dcr: %d windows: %d\n",
837 bdw->region_index, bdw->windows);
838 return true;
839}
840
841static size_t sizeof_idt(struct acpi_nfit_interleave *idt)
842{
843 if (idt->header.length < sizeof(*idt))
844 return 0;
845 return sizeof(*idt) + sizeof(u32) * (idt->line_count - 1);
846}
847
848static bool add_idt(struct acpi_nfit_desc *acpi_desc,
849 struct nfit_table_prev *prev,
850 struct acpi_nfit_interleave *idt)
851{
852 struct device *dev = acpi_desc->dev;
853 struct nfit_idt *nfit_idt;
854
855 if (!sizeof_idt(idt))
856 return false;
857
858 list_for_each_entry(nfit_idt, &prev->idts, list) {
859 if (sizeof_idt(nfit_idt->idt) != sizeof_idt(idt))
860 continue;
861
862 if (memcmp(nfit_idt->idt, idt, sizeof_idt(idt)) == 0) {
863 list_move_tail(&nfit_idt->list, &acpi_desc->idts);
864 return true;
865 }
866 }
867
868 nfit_idt = devm_kzalloc(dev, sizeof(*nfit_idt) + sizeof_idt(idt),
869 GFP_KERNEL);
870 if (!nfit_idt)
871 return false;
872 INIT_LIST_HEAD(&nfit_idt->list);
873 memcpy(nfit_idt->idt, idt, sizeof_idt(idt));
874 list_add_tail(&nfit_idt->list, &acpi_desc->idts);
875 dev_dbg(dev, "idt index: %d num_lines: %d\n",
876 idt->interleave_index, idt->line_count);
877 return true;
878}
879
880static size_t sizeof_flush(struct acpi_nfit_flush_address *flush)
881{
882 if (flush->header.length < sizeof(*flush))
883 return 0;
884 return sizeof(*flush) + sizeof(u64) * (flush->hint_count - 1);
885}
886
887static bool add_flush(struct acpi_nfit_desc *acpi_desc,
888 struct nfit_table_prev *prev,
889 struct acpi_nfit_flush_address *flush)
890{
891 struct device *dev = acpi_desc->dev;
892 struct nfit_flush *nfit_flush;
893
894 if (!sizeof_flush(flush))
895 return false;
896
897 list_for_each_entry(nfit_flush, &prev->flushes, list) {
898 if (sizeof_flush(nfit_flush->flush) != sizeof_flush(flush))
899 continue;
900
901 if (memcmp(nfit_flush->flush, flush,
902 sizeof_flush(flush)) == 0) {
903 list_move_tail(&nfit_flush->list, &acpi_desc->flushes);
904 return true;
905 }
906 }
907
908 nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush)
909 + sizeof_flush(flush), GFP_KERNEL);
910 if (!nfit_flush)
911 return false;
912 INIT_LIST_HEAD(&nfit_flush->list);
913 memcpy(nfit_flush->flush, flush, sizeof_flush(flush));
914 list_add_tail(&nfit_flush->list, &acpi_desc->flushes);
915 dev_dbg(dev, "nfit_flush handle: %d hint_count: %d\n",
916 flush->device_handle, flush->hint_count);
917 return true;
918}
919
920static bool add_platform_cap(struct acpi_nfit_desc *acpi_desc,
921 struct acpi_nfit_capabilities *pcap)
922{
923 struct device *dev = acpi_desc->dev;
924 u32 mask;
925
926 mask = (1 << (pcap->highest_capability + 1)) - 1;
927 acpi_desc->platform_cap = pcap->capabilities & mask;
928 dev_dbg(dev, "cap: %#x\n", acpi_desc->platform_cap);
929 return true;
930}
931
932static void *add_table(struct acpi_nfit_desc *acpi_desc,
933 struct nfit_table_prev *prev, void *table, const void *end)
934{
935 struct device *dev = acpi_desc->dev;
936 struct acpi_nfit_header *hdr;
937 void *err = ERR_PTR(-ENOMEM);
938
939 if (table >= end)
940 return NULL;
941
942 hdr = table;
943 if (!hdr->length) {
944 dev_warn(dev, "found a zero length table '%d' parsing nfit\n",
945 hdr->type);
946 return NULL;
947 }
948
949 switch (hdr->type) {
950 case ACPI_NFIT_TYPE_SYSTEM_ADDRESS:
951 if (!add_spa(acpi_desc, prev, table))
952 return err;
953 break;
954 case ACPI_NFIT_TYPE_MEMORY_MAP:
955 if (!add_memdev(acpi_desc, prev, table))
956 return err;
957 break;
958 case ACPI_NFIT_TYPE_CONTROL_REGION:
959 if (!add_dcr(acpi_desc, prev, table))
960 return err;
961 break;
962 case ACPI_NFIT_TYPE_DATA_REGION:
963 if (!add_bdw(acpi_desc, prev, table))
964 return err;
965 break;
966 case ACPI_NFIT_TYPE_INTERLEAVE:
967 if (!add_idt(acpi_desc, prev, table))
968 return err;
969 break;
970 case ACPI_NFIT_TYPE_FLUSH_ADDRESS:
971 if (!add_flush(acpi_desc, prev, table))
972 return err;
973 break;
974 case ACPI_NFIT_TYPE_SMBIOS:
975 dev_dbg(dev, "smbios\n");
976 break;
977 case ACPI_NFIT_TYPE_CAPABILITIES:
978 if (!add_platform_cap(acpi_desc, table))
979 return err;
980 break;
981 default:
982 dev_err(dev, "unknown table '%d' parsing nfit\n", hdr->type);
983 break;
984 }
985
986 return table + hdr->length;
987}
988
989static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc,
990 struct nfit_mem *nfit_mem)
991{
992 u32 device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
993 u16 dcr = nfit_mem->dcr->region_index;
994 struct nfit_spa *nfit_spa;
995
996 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
997 u16 range_index = nfit_spa->spa->range_index;
998 int type = nfit_spa_type(nfit_spa->spa);
999 struct nfit_memdev *nfit_memdev;
1000
1001 if (type != NFIT_SPA_BDW)
1002 continue;
1003
1004 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
1005 if (nfit_memdev->memdev->range_index != range_index)
1006 continue;
1007 if (nfit_memdev->memdev->device_handle != device_handle)
1008 continue;
1009 if (nfit_memdev->memdev->region_index != dcr)
1010 continue;
1011
1012 nfit_mem->spa_bdw = nfit_spa->spa;
1013 return;
1014 }
1015 }
1016
1017 dev_dbg(acpi_desc->dev, "SPA-BDW not found for SPA-DCR %d\n",
1018 nfit_mem->spa_dcr->range_index);
1019 nfit_mem->bdw = NULL;
1020}
1021
1022static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc,
1023 struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa)
1024{
1025 u16 dcr = __to_nfit_memdev(nfit_mem)->region_index;
1026 struct nfit_memdev *nfit_memdev;
1027 struct nfit_bdw *nfit_bdw;
1028 struct nfit_idt *nfit_idt;
1029 u16 idt_idx, range_index;
1030
1031 list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) {
1032 if (nfit_bdw->bdw->region_index != dcr)
1033 continue;
1034 nfit_mem->bdw = nfit_bdw->bdw;
1035 break;
1036 }
1037
1038 if (!nfit_mem->bdw)
1039 return;
1040
1041 nfit_mem_find_spa_bdw(acpi_desc, nfit_mem);
1042
1043 if (!nfit_mem->spa_bdw)
1044 return;
1045
1046 range_index = nfit_mem->spa_bdw->range_index;
1047 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
1048 if (nfit_memdev->memdev->range_index != range_index ||
1049 nfit_memdev->memdev->region_index != dcr)
1050 continue;
1051 nfit_mem->memdev_bdw = nfit_memdev->memdev;
1052 idt_idx = nfit_memdev->memdev->interleave_index;
1053 list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
1054 if (nfit_idt->idt->interleave_index != idt_idx)
1055 continue;
1056 nfit_mem->idt_bdw = nfit_idt->idt;
1057 break;
1058 }
1059 break;
1060 }
1061}
1062
1063static int __nfit_mem_init(struct acpi_nfit_desc *acpi_desc,
1064 struct acpi_nfit_system_address *spa)
1065{
1066 struct nfit_mem *nfit_mem, *found;
1067 struct nfit_memdev *nfit_memdev;
1068 int type = spa ? nfit_spa_type(spa) : 0;
1069
1070 switch (type) {
1071 case NFIT_SPA_DCR:
1072 case NFIT_SPA_PM:
1073 break;
1074 default:
1075 if (spa)
1076 return 0;
1077 }
1078
1079
1080
1081
1082
1083
1084
1085
1086 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
1087 struct nfit_flush *nfit_flush;
1088 struct nfit_dcr *nfit_dcr;
1089 u32 device_handle;
1090 u16 dcr;
1091
1092 if (spa && nfit_memdev->memdev->range_index != spa->range_index)
1093 continue;
1094 if (!spa && nfit_memdev->memdev->range_index)
1095 continue;
1096 found = NULL;
1097 dcr = nfit_memdev->memdev->region_index;
1098 device_handle = nfit_memdev->memdev->device_handle;
1099 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
1100 if (__to_nfit_memdev(nfit_mem)->device_handle
1101 == device_handle) {
1102 found = nfit_mem;
1103 break;
1104 }
1105
1106 if (found)
1107 nfit_mem = found;
1108 else {
1109 nfit_mem = devm_kzalloc(acpi_desc->dev,
1110 sizeof(*nfit_mem), GFP_KERNEL);
1111 if (!nfit_mem)
1112 return -ENOMEM;
1113 INIT_LIST_HEAD(&nfit_mem->list);
1114 nfit_mem->acpi_desc = acpi_desc;
1115 list_add(&nfit_mem->list, &acpi_desc->dimms);
1116 }
1117
1118 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
1119 if (nfit_dcr->dcr->region_index != dcr)
1120 continue;
1121
1122
1123
1124
1125
1126
1127
1128 if (!nfit_mem->dcr)
1129 nfit_mem->dcr = nfit_dcr->dcr;
1130 else if (nfit_mem->dcr->windows == 0
1131 && nfit_dcr->dcr->windows)
1132 nfit_mem->dcr = nfit_dcr->dcr;
1133 break;
1134 }
1135
1136 list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) {
1137 struct acpi_nfit_flush_address *flush;
1138 u16 i;
1139
1140 if (nfit_flush->flush->device_handle != device_handle)
1141 continue;
1142 nfit_mem->nfit_flush = nfit_flush;
1143 flush = nfit_flush->flush;
1144 nfit_mem->flush_wpq = devm_kcalloc(acpi_desc->dev,
1145 flush->hint_count,
1146 sizeof(struct resource),
1147 GFP_KERNEL);
1148 if (!nfit_mem->flush_wpq)
1149 return -ENOMEM;
1150 for (i = 0; i < flush->hint_count; i++) {
1151 struct resource *res = &nfit_mem->flush_wpq[i];
1152
1153 res->start = flush->hint_address[i];
1154 res->end = res->start + 8 - 1;
1155 }
1156 break;
1157 }
1158
1159 if (dcr && !nfit_mem->dcr) {
1160 dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n",
1161 spa->range_index, dcr);
1162 return -ENODEV;
1163 }
1164
1165 if (type == NFIT_SPA_DCR) {
1166 struct nfit_idt *nfit_idt;
1167 u16 idt_idx;
1168
1169
1170 nfit_mem->spa_dcr = spa;
1171 nfit_mem->memdev_dcr = nfit_memdev->memdev;
1172 idt_idx = nfit_memdev->memdev->interleave_index;
1173 list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
1174 if (nfit_idt->idt->interleave_index != idt_idx)
1175 continue;
1176 nfit_mem->idt_dcr = nfit_idt->idt;
1177 break;
1178 }
1179 nfit_mem_init_bdw(acpi_desc, nfit_mem, spa);
1180 } else if (type == NFIT_SPA_PM) {
1181
1182
1183
1184
1185
1186 nfit_mem->memdev_pmem = nfit_memdev->memdev;
1187 } else
1188 nfit_mem->memdev_dcr = nfit_memdev->memdev;
1189 }
1190
1191 return 0;
1192}
1193
1194static int nfit_mem_cmp(void *priv, struct list_head *_a, struct list_head *_b)
1195{
1196 struct nfit_mem *a = container_of(_a, typeof(*a), list);
1197 struct nfit_mem *b = container_of(_b, typeof(*b), list);
1198 u32 handleA, handleB;
1199
1200 handleA = __to_nfit_memdev(a)->device_handle;
1201 handleB = __to_nfit_memdev(b)->device_handle;
1202 if (handleA < handleB)
1203 return -1;
1204 else if (handleA > handleB)
1205 return 1;
1206 return 0;
1207}
1208
1209static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc)
1210{
1211 struct nfit_spa *nfit_spa;
1212 int rc;
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
1224 rc = __nfit_mem_init(acpi_desc, nfit_spa->spa);
1225 if (rc)
1226 return rc;
1227 }
1228
1229
1230
1231
1232
1233
1234 rc = __nfit_mem_init(acpi_desc, NULL);
1235 if (rc)
1236 return rc;
1237
1238 list_sort(NULL, &acpi_desc->dimms, nfit_mem_cmp);
1239
1240 return 0;
1241}
1242
1243static ssize_t bus_dsm_mask_show(struct device *dev,
1244 struct device_attribute *attr, char *buf)
1245{
1246 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
1247 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1248
1249 return sprintf(buf, "%#lx\n", nd_desc->bus_dsm_mask);
1250}
1251static struct device_attribute dev_attr_bus_dsm_mask =
1252 __ATTR(dsm_mask, 0444, bus_dsm_mask_show, NULL);
1253
1254static ssize_t revision_show(struct device *dev,
1255 struct device_attribute *attr, char *buf)
1256{
1257 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
1258 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1259 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1260
1261 return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision);
1262}
1263static DEVICE_ATTR_RO(revision);
1264
1265static ssize_t hw_error_scrub_show(struct device *dev,
1266 struct device_attribute *attr, char *buf)
1267{
1268 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
1269 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1270 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1271
1272 return sprintf(buf, "%d\n", acpi_desc->scrub_mode);
1273}
1274
1275
1276
1277
1278
1279
1280
1281
1282static ssize_t hw_error_scrub_store(struct device *dev,
1283 struct device_attribute *attr, const char *buf, size_t size)
1284{
1285 struct nvdimm_bus_descriptor *nd_desc;
1286 ssize_t rc;
1287 long val;
1288
1289 rc = kstrtol(buf, 0, &val);
1290 if (rc)
1291 return rc;
1292
1293 device_lock(dev);
1294 nd_desc = dev_get_drvdata(dev);
1295 if (nd_desc) {
1296 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1297
1298 switch (val) {
1299 case HW_ERROR_SCRUB_ON:
1300 acpi_desc->scrub_mode = HW_ERROR_SCRUB_ON;
1301 break;
1302 case HW_ERROR_SCRUB_OFF:
1303 acpi_desc->scrub_mode = HW_ERROR_SCRUB_OFF;
1304 break;
1305 default:
1306 rc = -EINVAL;
1307 break;
1308 }
1309 }
1310 device_unlock(dev);
1311 if (rc)
1312 return rc;
1313 return size;
1314}
1315static DEVICE_ATTR_RW(hw_error_scrub);
1316
1317
1318
1319
1320
1321
1322static ssize_t scrub_show(struct device *dev,
1323 struct device_attribute *attr, char *buf)
1324{
1325 struct nvdimm_bus_descriptor *nd_desc;
1326 struct acpi_nfit_desc *acpi_desc;
1327 ssize_t rc = -ENXIO;
1328 bool busy;
1329
1330 device_lock(dev);
1331 nd_desc = dev_get_drvdata(dev);
1332 if (!nd_desc) {
1333 device_unlock(dev);
1334 return rc;
1335 }
1336 acpi_desc = to_acpi_desc(nd_desc);
1337
1338 mutex_lock(&acpi_desc->init_mutex);
1339 busy = test_bit(ARS_BUSY, &acpi_desc->scrub_flags)
1340 && !test_bit(ARS_CANCEL, &acpi_desc->scrub_flags);
1341 rc = sprintf(buf, "%d%s", acpi_desc->scrub_count, busy ? "+\n" : "\n");
1342
1343 if (busy && capable(CAP_SYS_RAWIO) && !test_and_set_bit(ARS_POLL,
1344 &acpi_desc->scrub_flags)) {
1345 acpi_desc->scrub_tmo = 1;
1346 mod_delayed_work(nfit_wq, &acpi_desc->dwork, HZ);
1347 }
1348
1349 mutex_unlock(&acpi_desc->init_mutex);
1350 device_unlock(dev);
1351 return rc;
1352}
1353
1354static ssize_t scrub_store(struct device *dev,
1355 struct device_attribute *attr, const char *buf, size_t size)
1356{
1357 struct nvdimm_bus_descriptor *nd_desc;
1358 ssize_t rc;
1359 long val;
1360
1361 rc = kstrtol(buf, 0, &val);
1362 if (rc)
1363 return rc;
1364 if (val != 1)
1365 return -EINVAL;
1366
1367 device_lock(dev);
1368 nd_desc = dev_get_drvdata(dev);
1369 if (nd_desc) {
1370 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1371
1372 rc = acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_LONG);
1373 }
1374 device_unlock(dev);
1375 if (rc)
1376 return rc;
1377 return size;
1378}
1379static DEVICE_ATTR_RW(scrub);
1380
1381static bool ars_supported(struct nvdimm_bus *nvdimm_bus)
1382{
1383 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1384 const unsigned long mask = 1 << ND_CMD_ARS_CAP | 1 << ND_CMD_ARS_START
1385 | 1 << ND_CMD_ARS_STATUS;
1386
1387 return (nd_desc->cmd_mask & mask) == mask;
1388}
1389
1390static umode_t nfit_visible(struct kobject *kobj, struct attribute *a, int n)
1391{
1392 struct device *dev = container_of(kobj, struct device, kobj);
1393 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
1394
1395 if (a == &dev_attr_scrub.attr && !ars_supported(nvdimm_bus))
1396 return 0;
1397 return a->mode;
1398}
1399
1400static struct attribute *acpi_nfit_attributes[] = {
1401 &dev_attr_revision.attr,
1402 &dev_attr_scrub.attr,
1403 &dev_attr_hw_error_scrub.attr,
1404 &dev_attr_bus_dsm_mask.attr,
1405 NULL,
1406};
1407
1408static const struct attribute_group acpi_nfit_attribute_group = {
1409 .name = "nfit",
1410 .attrs = acpi_nfit_attributes,
1411 .is_visible = nfit_visible,
1412};
1413
1414static const struct attribute_group *acpi_nfit_attribute_groups[] = {
1415 &nvdimm_bus_attribute_group,
1416 &acpi_nfit_attribute_group,
1417 NULL,
1418};
1419
1420static struct acpi_nfit_memory_map *to_nfit_memdev(struct device *dev)
1421{
1422 struct nvdimm *nvdimm = to_nvdimm(dev);
1423 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1424
1425 return __to_nfit_memdev(nfit_mem);
1426}
1427
1428static struct acpi_nfit_control_region *to_nfit_dcr(struct device *dev)
1429{
1430 struct nvdimm *nvdimm = to_nvdimm(dev);
1431 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1432
1433 return nfit_mem->dcr;
1434}
1435
1436static ssize_t handle_show(struct device *dev,
1437 struct device_attribute *attr, char *buf)
1438{
1439 struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
1440
1441 return sprintf(buf, "%#x\n", memdev->device_handle);
1442}
1443static DEVICE_ATTR_RO(handle);
1444
1445static ssize_t phys_id_show(struct device *dev,
1446 struct device_attribute *attr, char *buf)
1447{
1448 struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
1449
1450 return sprintf(buf, "%#x\n", memdev->physical_id);
1451}
1452static DEVICE_ATTR_RO(phys_id);
1453
1454static ssize_t vendor_show(struct device *dev,
1455 struct device_attribute *attr, char *buf)
1456{
1457 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1458
1459 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->vendor_id));
1460}
1461static DEVICE_ATTR_RO(vendor);
1462
1463static ssize_t rev_id_show(struct device *dev,
1464 struct device_attribute *attr, char *buf)
1465{
1466 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1467
1468 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->revision_id));
1469}
1470static DEVICE_ATTR_RO(rev_id);
1471
1472static ssize_t device_show(struct device *dev,
1473 struct device_attribute *attr, char *buf)
1474{
1475 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1476
1477 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->device_id));
1478}
1479static DEVICE_ATTR_RO(device);
1480
1481static ssize_t subsystem_vendor_show(struct device *dev,
1482 struct device_attribute *attr, char *buf)
1483{
1484 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1485
1486 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_vendor_id));
1487}
1488static DEVICE_ATTR_RO(subsystem_vendor);
1489
1490static ssize_t subsystem_rev_id_show(struct device *dev,
1491 struct device_attribute *attr, char *buf)
1492{
1493 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1494
1495 return sprintf(buf, "0x%04x\n",
1496 be16_to_cpu(dcr->subsystem_revision_id));
1497}
1498static DEVICE_ATTR_RO(subsystem_rev_id);
1499
1500static ssize_t subsystem_device_show(struct device *dev,
1501 struct device_attribute *attr, char *buf)
1502{
1503 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1504
1505 return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_device_id));
1506}
1507static DEVICE_ATTR_RO(subsystem_device);
1508
1509static int num_nvdimm_formats(struct nvdimm *nvdimm)
1510{
1511 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1512 int formats = 0;
1513
1514 if (nfit_mem->memdev_pmem)
1515 formats++;
1516 if (nfit_mem->memdev_bdw)
1517 formats++;
1518 return formats;
1519}
1520
1521static ssize_t format_show(struct device *dev,
1522 struct device_attribute *attr, char *buf)
1523{
1524 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1525
1526 return sprintf(buf, "0x%04x\n", le16_to_cpu(dcr->code));
1527}
1528static DEVICE_ATTR_RO(format);
1529
1530static ssize_t format1_show(struct device *dev,
1531 struct device_attribute *attr, char *buf)
1532{
1533 u32 handle;
1534 ssize_t rc = -ENXIO;
1535 struct nfit_mem *nfit_mem;
1536 struct nfit_memdev *nfit_memdev;
1537 struct acpi_nfit_desc *acpi_desc;
1538 struct nvdimm *nvdimm = to_nvdimm(dev);
1539 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1540
1541 nfit_mem = nvdimm_provider_data(nvdimm);
1542 acpi_desc = nfit_mem->acpi_desc;
1543 handle = to_nfit_memdev(dev)->device_handle;
1544
1545
1546 mutex_lock(&acpi_desc->init_mutex);
1547 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
1548 struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
1549 struct nfit_dcr *nfit_dcr;
1550
1551 if (memdev->device_handle != handle)
1552 continue;
1553
1554 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
1555 if (nfit_dcr->dcr->region_index != memdev->region_index)
1556 continue;
1557 if (nfit_dcr->dcr->code == dcr->code)
1558 continue;
1559 rc = sprintf(buf, "0x%04x\n",
1560 le16_to_cpu(nfit_dcr->dcr->code));
1561 break;
1562 }
1563 if (rc != ENXIO)
1564 break;
1565 }
1566 mutex_unlock(&acpi_desc->init_mutex);
1567 return rc;
1568}
1569static DEVICE_ATTR_RO(format1);
1570
1571static ssize_t formats_show(struct device *dev,
1572 struct device_attribute *attr, char *buf)
1573{
1574 struct nvdimm *nvdimm = to_nvdimm(dev);
1575
1576 return sprintf(buf, "%d\n", num_nvdimm_formats(nvdimm));
1577}
1578static DEVICE_ATTR_RO(formats);
1579
1580static ssize_t serial_show(struct device *dev,
1581 struct device_attribute *attr, char *buf)
1582{
1583 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1584
1585 return sprintf(buf, "0x%08x\n", be32_to_cpu(dcr->serial_number));
1586}
1587static DEVICE_ATTR_RO(serial);
1588
1589static ssize_t family_show(struct device *dev,
1590 struct device_attribute *attr, char *buf)
1591{
1592 struct nvdimm *nvdimm = to_nvdimm(dev);
1593 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1594
1595 if (nfit_mem->family < 0)
1596 return -ENXIO;
1597 return sprintf(buf, "%d\n", nfit_mem->family);
1598}
1599static DEVICE_ATTR_RO(family);
1600
1601static ssize_t dsm_mask_show(struct device *dev,
1602 struct device_attribute *attr, char *buf)
1603{
1604 struct nvdimm *nvdimm = to_nvdimm(dev);
1605 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1606
1607 if (nfit_mem->family < 0)
1608 return -ENXIO;
1609 return sprintf(buf, "%#lx\n", nfit_mem->dsm_mask);
1610}
1611static DEVICE_ATTR_RO(dsm_mask);
1612
1613static ssize_t flags_show(struct device *dev,
1614 struct device_attribute *attr, char *buf)
1615{
1616 struct nvdimm *nvdimm = to_nvdimm(dev);
1617 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1618 u16 flags = __to_nfit_memdev(nfit_mem)->flags;
1619
1620 if (test_bit(NFIT_MEM_DIRTY, &nfit_mem->flags))
1621 flags |= ACPI_NFIT_MEM_FLUSH_FAILED;
1622
1623 return sprintf(buf, "%s%s%s%s%s%s%s\n",
1624 flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "",
1625 flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "",
1626 flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "",
1627 flags & ACPI_NFIT_MEM_NOT_ARMED ? "not_armed " : "",
1628 flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : "",
1629 flags & ACPI_NFIT_MEM_MAP_FAILED ? "map_fail " : "",
1630 flags & ACPI_NFIT_MEM_HEALTH_ENABLED ? "smart_notify " : "");
1631}
1632static DEVICE_ATTR_RO(flags);
1633
1634static ssize_t id_show(struct device *dev,
1635 struct device_attribute *attr, char *buf)
1636{
1637 struct nvdimm *nvdimm = to_nvdimm(dev);
1638 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1639
1640 return sprintf(buf, "%s\n", nfit_mem->id);
1641}
1642static DEVICE_ATTR_RO(id);
1643
1644static ssize_t dirty_shutdown_show(struct device *dev,
1645 struct device_attribute *attr, char *buf)
1646{
1647 struct nvdimm *nvdimm = to_nvdimm(dev);
1648 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1649
1650 return sprintf(buf, "%d\n", nfit_mem->dirty_shutdown);
1651}
1652static DEVICE_ATTR_RO(dirty_shutdown);
1653
1654static struct attribute *acpi_nfit_dimm_attributes[] = {
1655 &dev_attr_handle.attr,
1656 &dev_attr_phys_id.attr,
1657 &dev_attr_vendor.attr,
1658 &dev_attr_device.attr,
1659 &dev_attr_rev_id.attr,
1660 &dev_attr_subsystem_vendor.attr,
1661 &dev_attr_subsystem_device.attr,
1662 &dev_attr_subsystem_rev_id.attr,
1663 &dev_attr_format.attr,
1664 &dev_attr_formats.attr,
1665 &dev_attr_format1.attr,
1666 &dev_attr_serial.attr,
1667 &dev_attr_flags.attr,
1668 &dev_attr_id.attr,
1669 &dev_attr_family.attr,
1670 &dev_attr_dsm_mask.attr,
1671 &dev_attr_dirty_shutdown.attr,
1672 NULL,
1673};
1674
1675static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj,
1676 struct attribute *a, int n)
1677{
1678 struct device *dev = container_of(kobj, struct device, kobj);
1679 struct nvdimm *nvdimm = to_nvdimm(dev);
1680 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1681
1682 if (!to_nfit_dcr(dev)) {
1683
1684 if (a == &dev_attr_handle.attr || a == &dev_attr_phys_id.attr
1685 || a == &dev_attr_flags.attr
1686 || a == &dev_attr_family.attr
1687 || a == &dev_attr_dsm_mask.attr)
1688 return a->mode;
1689 return 0;
1690 }
1691
1692 if (a == &dev_attr_format1.attr && num_nvdimm_formats(nvdimm) <= 1)
1693 return 0;
1694
1695 if (!test_bit(NFIT_MEM_DIRTY_COUNT, &nfit_mem->flags)
1696 && a == &dev_attr_dirty_shutdown.attr)
1697 return 0;
1698
1699 return a->mode;
1700}
1701
1702static const struct attribute_group acpi_nfit_dimm_attribute_group = {
1703 .name = "nfit",
1704 .attrs = acpi_nfit_dimm_attributes,
1705 .is_visible = acpi_nfit_dimm_attr_visible,
1706};
1707
1708static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = {
1709 &nvdimm_attribute_group,
1710 &nd_device_attribute_group,
1711 &acpi_nfit_dimm_attribute_group,
1712 NULL,
1713};
1714
1715static struct nvdimm *acpi_nfit_dimm_by_handle(struct acpi_nfit_desc *acpi_desc,
1716 u32 device_handle)
1717{
1718 struct nfit_mem *nfit_mem;
1719
1720 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
1721 if (__to_nfit_memdev(nfit_mem)->device_handle == device_handle)
1722 return nfit_mem->nvdimm;
1723
1724 return NULL;
1725}
1726
1727void __acpi_nvdimm_notify(struct device *dev, u32 event)
1728{
1729 struct nfit_mem *nfit_mem;
1730 struct acpi_nfit_desc *acpi_desc;
1731
1732 dev_dbg(dev->parent, "%s: event: %d\n", dev_name(dev),
1733 event);
1734
1735 if (event != NFIT_NOTIFY_DIMM_HEALTH) {
1736 dev_dbg(dev->parent, "%s: unknown event: %d\n", dev_name(dev),
1737 event);
1738 return;
1739 }
1740
1741 acpi_desc = dev_get_drvdata(dev->parent);
1742 if (!acpi_desc)
1743 return;
1744
1745
1746
1747
1748
1749 nfit_mem = dev_get_drvdata(dev);
1750 if (nfit_mem && nfit_mem->flags_attr)
1751 sysfs_notify_dirent(nfit_mem->flags_attr);
1752}
1753EXPORT_SYMBOL_GPL(__acpi_nvdimm_notify);
1754
1755static void acpi_nvdimm_notify(acpi_handle handle, u32 event, void *data)
1756{
1757 struct acpi_device *adev = data;
1758 struct device *dev = &adev->dev;
1759
1760 device_lock(dev->parent);
1761 __acpi_nvdimm_notify(dev, event);
1762 device_unlock(dev->parent);
1763}
1764
1765static bool acpi_nvdimm_has_method(struct acpi_device *adev, char *method)
1766{
1767 acpi_handle handle;
1768 acpi_status status;
1769
1770 status = acpi_get_handle(adev->handle, method, &handle);
1771
1772 if (ACPI_SUCCESS(status))
1773 return true;
1774 return false;
1775}
1776
1777__weak void nfit_intel_shutdown_status(struct nfit_mem *nfit_mem)
1778{
1779 struct device *dev = &nfit_mem->adev->dev;
1780 struct nd_intel_smart smart = { 0 };
1781 union acpi_object in_buf = {
1782 .buffer.type = ACPI_TYPE_BUFFER,
1783 .buffer.length = 0,
1784 };
1785 union acpi_object in_obj = {
1786 .package.type = ACPI_TYPE_PACKAGE,
1787 .package.count = 1,
1788 .package.elements = &in_buf,
1789 };
1790 const u8 func = ND_INTEL_SMART;
1791 const guid_t *guid = to_nfit_uuid(nfit_mem->family);
1792 u8 revid = nfit_dsm_revid(nfit_mem->family, func);
1793 struct acpi_device *adev = nfit_mem->adev;
1794 acpi_handle handle = adev->handle;
1795 union acpi_object *out_obj;
1796
1797 if ((nfit_mem->dsm_mask & (1 << func)) == 0)
1798 return;
1799
1800 out_obj = acpi_evaluate_dsm(handle, guid, revid, func, &in_obj);
1801 if (!out_obj || out_obj->type != ACPI_TYPE_BUFFER
1802 || out_obj->buffer.length < sizeof(smart)) {
1803 dev_dbg(dev->parent, "%s: failed to retrieve initial health\n",
1804 dev_name(dev));
1805 ACPI_FREE(out_obj);
1806 return;
1807 }
1808 memcpy(&smart, out_obj->buffer.pointer, sizeof(smart));
1809 ACPI_FREE(out_obj);
1810
1811 if (smart.flags & ND_INTEL_SMART_SHUTDOWN_VALID) {
1812 if (smart.shutdown_state)
1813 set_bit(NFIT_MEM_DIRTY, &nfit_mem->flags);
1814 }
1815
1816 if (smart.flags & ND_INTEL_SMART_SHUTDOWN_COUNT_VALID) {
1817 set_bit(NFIT_MEM_DIRTY_COUNT, &nfit_mem->flags);
1818 nfit_mem->dirty_shutdown = smart.shutdown_count;
1819 }
1820}
1821
1822static void populate_shutdown_status(struct nfit_mem *nfit_mem)
1823{
1824
1825
1826
1827
1828
1829 if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
1830 nfit_intel_shutdown_status(nfit_mem);
1831}
1832
1833static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
1834 struct nfit_mem *nfit_mem, u32 device_handle)
1835{
1836 struct acpi_device *adev, *adev_dimm;
1837 struct device *dev = acpi_desc->dev;
1838 unsigned long dsm_mask, label_mask;
1839 const guid_t *guid;
1840 int i;
1841 int family = -1;
1842 struct acpi_nfit_control_region *dcr = nfit_mem->dcr;
1843
1844
1845 nfit_mem->dsm_mask = acpi_desc->dimm_cmd_force_en;
1846 nfit_mem->family = NVDIMM_FAMILY_INTEL;
1847
1848 if (dcr->valid_fields & ACPI_NFIT_CONTROL_MFG_INFO_VALID)
1849 sprintf(nfit_mem->id, "%04x-%02x-%04x-%08x",
1850 be16_to_cpu(dcr->vendor_id),
1851 dcr->manufacturing_location,
1852 be16_to_cpu(dcr->manufacturing_date),
1853 be32_to_cpu(dcr->serial_number));
1854 else
1855 sprintf(nfit_mem->id, "%04x-%08x",
1856 be16_to_cpu(dcr->vendor_id),
1857 be32_to_cpu(dcr->serial_number));
1858
1859 adev = to_acpi_dev(acpi_desc);
1860 if (!adev) {
1861
1862 populate_shutdown_status(nfit_mem);
1863 return 0;
1864 }
1865
1866 adev_dimm = acpi_find_child_device(adev, device_handle, false);
1867 nfit_mem->adev = adev_dimm;
1868 if (!adev_dimm) {
1869 dev_err(dev, "no ACPI.NFIT device with _ADR %#x, disabling...\n",
1870 device_handle);
1871 return force_enable_dimms ? 0 : -ENODEV;
1872 }
1873
1874 if (ACPI_FAILURE(acpi_install_notify_handler(adev_dimm->handle,
1875 ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify, adev_dimm))) {
1876 dev_err(dev, "%s: notification registration failed\n",
1877 dev_name(&adev_dimm->dev));
1878 return -ENXIO;
1879 }
1880
1881
1882
1883
1884 dev_set_drvdata(&adev_dimm->dev, nfit_mem);
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899 for (i = 0; i <= NVDIMM_FAMILY_MAX; i++)
1900 if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1))
1901 if (family < 0 || i == default_dsm_family)
1902 family = i;
1903
1904
1905 nfit_mem->family = family;
1906 if (override_dsm_mask && !disable_vendor_specific)
1907 dsm_mask = override_dsm_mask;
1908 else if (nfit_mem->family == NVDIMM_FAMILY_INTEL) {
1909 dsm_mask = NVDIMM_INTEL_CMDMASK;
1910 if (disable_vendor_specific)
1911 dsm_mask &= ~(1 << ND_CMD_VENDOR);
1912 } else if (nfit_mem->family == NVDIMM_FAMILY_HPE1) {
1913 dsm_mask = 0x1c3c76;
1914 } else if (nfit_mem->family == NVDIMM_FAMILY_HPE2) {
1915 dsm_mask = 0x1fe;
1916 if (disable_vendor_specific)
1917 dsm_mask &= ~(1 << 8);
1918 } else if (nfit_mem->family == NVDIMM_FAMILY_MSFT) {
1919 dsm_mask = 0xffffffff;
1920 } else if (nfit_mem->family == NVDIMM_FAMILY_HYPERV) {
1921 dsm_mask = 0x1f;
1922 } else {
1923 dev_dbg(dev, "unknown dimm command family\n");
1924 nfit_mem->family = -1;
1925
1926 return 0;
1927 }
1928
1929
1930
1931
1932
1933
1934 dsm_mask &= ~1UL;
1935
1936 guid = to_nfit_uuid(nfit_mem->family);
1937 for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
1938 if (acpi_check_dsm(adev_dimm->handle, guid,
1939 nfit_dsm_revid(nfit_mem->family, i),
1940 1ULL << i))
1941 set_bit(i, &nfit_mem->dsm_mask);
1942
1943
1944
1945
1946
1947 label_mask = 1 << ND_CMD_GET_CONFIG_SIZE | 1 << ND_CMD_GET_CONFIG_DATA
1948 | 1 << ND_CMD_SET_CONFIG_DATA;
1949 if (family == NVDIMM_FAMILY_INTEL
1950 && (dsm_mask & label_mask) == label_mask)
1951 ;
1952 else {
1953 if (acpi_nvdimm_has_method(adev_dimm, "_LSI")
1954 && acpi_nvdimm_has_method(adev_dimm, "_LSR")) {
1955 dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev));
1956 set_bit(NFIT_MEM_LSR, &nfit_mem->flags);
1957 }
1958
1959 if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)
1960 && acpi_nvdimm_has_method(adev_dimm, "_LSW")) {
1961 dev_dbg(dev, "%s: has _LSW\n", dev_name(&adev_dimm->dev));
1962 set_bit(NFIT_MEM_LSW, &nfit_mem->flags);
1963 }
1964
1965
1966
1967
1968
1969 if (!test_bit(NFIT_MEM_LSW, &nfit_mem->flags)
1970 && !force_labels) {
1971 dev_dbg(dev, "%s: No _LSW, disable labels\n",
1972 dev_name(&adev_dimm->dev));
1973 clear_bit(NFIT_MEM_LSR, &nfit_mem->flags);
1974 } else
1975 dev_dbg(dev, "%s: Force enable labels\n",
1976 dev_name(&adev_dimm->dev));
1977 }
1978
1979 populate_shutdown_status(nfit_mem);
1980
1981 return 0;
1982}
1983
1984static void shutdown_dimm_notify(void *data)
1985{
1986 struct acpi_nfit_desc *acpi_desc = data;
1987 struct nfit_mem *nfit_mem;
1988
1989 mutex_lock(&acpi_desc->init_mutex);
1990
1991
1992
1993
1994 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
1995 struct acpi_device *adev_dimm = nfit_mem->adev;
1996
1997 if (nfit_mem->flags_attr) {
1998 sysfs_put(nfit_mem->flags_attr);
1999 nfit_mem->flags_attr = NULL;
2000 }
2001 if (adev_dimm) {
2002 acpi_remove_notify_handler(adev_dimm->handle,
2003 ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify);
2004 dev_set_drvdata(&adev_dimm->dev, NULL);
2005 }
2006 }
2007 mutex_unlock(&acpi_desc->init_mutex);
2008}
2009
2010static const struct nvdimm_security_ops *acpi_nfit_get_security_ops(int family)
2011{
2012 switch (family) {
2013 case NVDIMM_FAMILY_INTEL:
2014 return intel_security_ops;
2015 default:
2016 return NULL;
2017 }
2018}
2019
2020static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
2021{
2022 struct nfit_mem *nfit_mem;
2023 int dimm_count = 0, rc;
2024 struct nvdimm *nvdimm;
2025
2026 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
2027 struct acpi_nfit_flush_address *flush;
2028 unsigned long flags = 0, cmd_mask;
2029 struct nfit_memdev *nfit_memdev;
2030 u32 device_handle;
2031 u16 mem_flags;
2032
2033 device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
2034 nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle);
2035 if (nvdimm) {
2036 dimm_count++;
2037 continue;
2038 }
2039
2040 if (nfit_mem->bdw && nfit_mem->memdev_pmem)
2041 set_bit(NDD_ALIASING, &flags);
2042
2043
2044 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
2045 struct acpi_nfit_memory_map *dimm_memdev;
2046
2047 dimm_memdev = __to_nfit_memdev(nfit_mem);
2048 if (dimm_memdev->device_handle
2049 != nfit_memdev->memdev->device_handle)
2050 continue;
2051 dimm_memdev->flags |= nfit_memdev->memdev->flags;
2052 }
2053
2054 mem_flags = __to_nfit_memdev(nfit_mem)->flags;
2055 if (mem_flags & ACPI_NFIT_MEM_NOT_ARMED)
2056 set_bit(NDD_UNARMED, &flags);
2057
2058 rc = acpi_nfit_add_dimm(acpi_desc, nfit_mem, device_handle);
2059 if (rc)
2060 continue;
2061
2062
2063
2064
2065
2066
2067 cmd_mask = 1UL << ND_CMD_CALL;
2068 if (nfit_mem->family == NVDIMM_FAMILY_INTEL) {
2069
2070
2071
2072
2073
2074 cmd_mask |= nfit_mem->dsm_mask & NVDIMM_STANDARD_CMDMASK;
2075 }
2076
2077
2078 if (nfit_mem->family == NVDIMM_FAMILY_HYPERV)
2079 set_bit(NDD_NOBLK, &flags);
2080
2081 if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) {
2082 set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask);
2083 set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask);
2084 }
2085 if (test_bit(NFIT_MEM_LSW, &nfit_mem->flags))
2086 set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask);
2087
2088 flush = nfit_mem->nfit_flush ? nfit_mem->nfit_flush->flush
2089 : NULL;
2090 nvdimm = __nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem,
2091 acpi_nfit_dimm_attribute_groups,
2092 flags, cmd_mask, flush ? flush->hint_count : 0,
2093 nfit_mem->flush_wpq, &nfit_mem->id[0],
2094 acpi_nfit_get_security_ops(nfit_mem->family));
2095 if (!nvdimm)
2096 return -ENOMEM;
2097
2098 nfit_mem->nvdimm = nvdimm;
2099 dimm_count++;
2100
2101 if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0)
2102 continue;
2103
2104 dev_err(acpi_desc->dev, "Error found in NVDIMM %s flags:%s%s%s%s%s\n",
2105 nvdimm_name(nvdimm),
2106 mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "",
2107 mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"",
2108 mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "",
2109 mem_flags & ACPI_NFIT_MEM_NOT_ARMED ? " not_armed" : "",
2110 mem_flags & ACPI_NFIT_MEM_MAP_FAILED ? " map_fail" : "");
2111
2112 }
2113
2114 rc = nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count);
2115 if (rc)
2116 return rc;
2117
2118
2119
2120
2121
2122 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
2123 struct kernfs_node *nfit_kernfs;
2124
2125 nvdimm = nfit_mem->nvdimm;
2126 if (!nvdimm)
2127 continue;
2128
2129 nfit_kernfs = sysfs_get_dirent(nvdimm_kobj(nvdimm)->sd, "nfit");
2130 if (nfit_kernfs)
2131 nfit_mem->flags_attr = sysfs_get_dirent(nfit_kernfs,
2132 "flags");
2133 sysfs_put(nfit_kernfs);
2134 if (!nfit_mem->flags_attr)
2135 dev_warn(acpi_desc->dev, "%s: notifications disabled\n",
2136 nvdimm_name(nvdimm));
2137 }
2138
2139 return devm_add_action_or_reset(acpi_desc->dev, shutdown_dimm_notify,
2140 acpi_desc);
2141}
2142
2143
2144
2145
2146
2147enum nfit_aux_cmds {
2148 NFIT_CMD_TRANSLATE_SPA = 5,
2149 NFIT_CMD_ARS_INJECT_SET = 7,
2150 NFIT_CMD_ARS_INJECT_CLEAR = 8,
2151 NFIT_CMD_ARS_INJECT_GET = 9,
2152};
2153
2154static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
2155{
2156 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2157 const guid_t *guid = to_nfit_uuid(NFIT_DEV_BUS);
2158 struct acpi_device *adev;
2159 unsigned long dsm_mask;
2160 int i;
2161
2162 nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en;
2163 nd_desc->bus_dsm_mask = acpi_desc->bus_nfit_cmd_force_en;
2164 adev = to_acpi_dev(acpi_desc);
2165 if (!adev)
2166 return;
2167
2168 for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++)
2169 if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i))
2170 set_bit(i, &nd_desc->cmd_mask);
2171 set_bit(ND_CMD_CALL, &nd_desc->cmd_mask);
2172
2173 dsm_mask =
2174 (1 << ND_CMD_ARS_CAP) |
2175 (1 << ND_CMD_ARS_START) |
2176 (1 << ND_CMD_ARS_STATUS) |
2177 (1 << ND_CMD_CLEAR_ERROR) |
2178 (1 << NFIT_CMD_TRANSLATE_SPA) |
2179 (1 << NFIT_CMD_ARS_INJECT_SET) |
2180 (1 << NFIT_CMD_ARS_INJECT_CLEAR) |
2181 (1 << NFIT_CMD_ARS_INJECT_GET);
2182 for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
2183 if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i))
2184 set_bit(i, &nd_desc->bus_dsm_mask);
2185}
2186
2187static ssize_t range_index_show(struct device *dev,
2188 struct device_attribute *attr, char *buf)
2189{
2190 struct nd_region *nd_region = to_nd_region(dev);
2191 struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region);
2192
2193 return sprintf(buf, "%d\n", nfit_spa->spa->range_index);
2194}
2195static DEVICE_ATTR_RO(range_index);
2196
2197static struct attribute *acpi_nfit_region_attributes[] = {
2198 &dev_attr_range_index.attr,
2199 NULL,
2200};
2201
2202static const struct attribute_group acpi_nfit_region_attribute_group = {
2203 .name = "nfit",
2204 .attrs = acpi_nfit_region_attributes,
2205};
2206
2207static const struct attribute_group *acpi_nfit_region_attribute_groups[] = {
2208 &nd_region_attribute_group,
2209 &nd_mapping_attribute_group,
2210 &nd_device_attribute_group,
2211 &nd_numa_attribute_group,
2212 &acpi_nfit_region_attribute_group,
2213 NULL,
2214};
2215
2216
2217struct nfit_set_info {
2218 struct nfit_set_info_map {
2219 u64 region_offset;
2220 u32 serial_number;
2221 u32 pad;
2222 } mapping[0];
2223};
2224
2225struct nfit_set_info2 {
2226 struct nfit_set_info_map2 {
2227 u64 region_offset;
2228 u32 serial_number;
2229 u16 vendor_id;
2230 u16 manufacturing_date;
2231 u8 manufacturing_location;
2232 u8 reserved[31];
2233 } mapping[0];
2234};
2235
2236static size_t sizeof_nfit_set_info(int num_mappings)
2237{
2238 return sizeof(struct nfit_set_info)
2239 + num_mappings * sizeof(struct nfit_set_info_map);
2240}
2241
2242static size_t sizeof_nfit_set_info2(int num_mappings)
2243{
2244 return sizeof(struct nfit_set_info2)
2245 + num_mappings * sizeof(struct nfit_set_info_map2);
2246}
2247
2248static int cmp_map_compat(const void *m0, const void *m1)
2249{
2250 const struct nfit_set_info_map *map0 = m0;
2251 const struct nfit_set_info_map *map1 = m1;
2252
2253 return memcmp(&map0->region_offset, &map1->region_offset,
2254 sizeof(u64));
2255}
2256
2257static int cmp_map(const void *m0, const void *m1)
2258{
2259 const struct nfit_set_info_map *map0 = m0;
2260 const struct nfit_set_info_map *map1 = m1;
2261
2262 if (map0->region_offset < map1->region_offset)
2263 return -1;
2264 else if (map0->region_offset > map1->region_offset)
2265 return 1;
2266 return 0;
2267}
2268
2269static int cmp_map2(const void *m0, const void *m1)
2270{
2271 const struct nfit_set_info_map2 *map0 = m0;
2272 const struct nfit_set_info_map2 *map1 = m1;
2273
2274 if (map0->region_offset < map1->region_offset)
2275 return -1;
2276 else if (map0->region_offset > map1->region_offset)
2277 return 1;
2278 return 0;
2279}
2280
2281
2282static struct acpi_nfit_memory_map *memdev_from_spa(
2283 struct acpi_nfit_desc *acpi_desc, u16 range_index, int n)
2284{
2285 struct nfit_memdev *nfit_memdev;
2286
2287 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list)
2288 if (nfit_memdev->memdev->range_index == range_index)
2289 if (n-- == 0)
2290 return nfit_memdev->memdev;
2291 return NULL;
2292}
2293
2294static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
2295 struct nd_region_desc *ndr_desc,
2296 struct acpi_nfit_system_address *spa)
2297{
2298 struct device *dev = acpi_desc->dev;
2299 struct nd_interleave_set *nd_set;
2300 u16 nr = ndr_desc->num_mappings;
2301 struct nfit_set_info2 *info2;
2302 struct nfit_set_info *info;
2303 int i;
2304
2305 nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL);
2306 if (!nd_set)
2307 return -ENOMEM;
2308 guid_copy(&nd_set->type_guid, (guid_t *) spa->range_guid);
2309
2310 info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL);
2311 if (!info)
2312 return -ENOMEM;
2313
2314 info2 = devm_kzalloc(dev, sizeof_nfit_set_info2(nr), GFP_KERNEL);
2315 if (!info2)
2316 return -ENOMEM;
2317
2318 for (i = 0; i < nr; i++) {
2319 struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
2320 struct nfit_set_info_map *map = &info->mapping[i];
2321 struct nfit_set_info_map2 *map2 = &info2->mapping[i];
2322 struct nvdimm *nvdimm = mapping->nvdimm;
2323 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
2324 struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc,
2325 spa->range_index, i);
2326 struct acpi_nfit_control_region *dcr = nfit_mem->dcr;
2327
2328 if (!memdev || !nfit_mem->dcr) {
2329 dev_err(dev, "%s: failed to find DCR\n", __func__);
2330 return -ENODEV;
2331 }
2332
2333 map->region_offset = memdev->region_offset;
2334 map->serial_number = dcr->serial_number;
2335
2336 map2->region_offset = memdev->region_offset;
2337 map2->serial_number = dcr->serial_number;
2338 map2->vendor_id = dcr->vendor_id;
2339 map2->manufacturing_date = dcr->manufacturing_date;
2340 map2->manufacturing_location = dcr->manufacturing_location;
2341 }
2342
2343
2344 sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
2345 cmp_map, NULL);
2346 nd_set->cookie1 = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
2347
2348
2349 sort(&info2->mapping[0], nr, sizeof(struct nfit_set_info_map2),
2350 cmp_map2, NULL);
2351 nd_set->cookie2 = nd_fletcher64(info2, sizeof_nfit_set_info2(nr), 0);
2352
2353
2354 sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
2355 cmp_map_compat, NULL);
2356 nd_set->altcookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
2357
2358
2359 for (i = 0; i < nr; i++) {
2360 struct nfit_set_info_map2 *map2 = &info2->mapping[i];
2361 int j;
2362
2363 for (j = 0; j < nr; j++) {
2364 struct nd_mapping_desc *mapping = &ndr_desc->mapping[j];
2365 struct nvdimm *nvdimm = mapping->nvdimm;
2366 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
2367 struct acpi_nfit_control_region *dcr = nfit_mem->dcr;
2368
2369 if (map2->serial_number == dcr->serial_number &&
2370 map2->vendor_id == dcr->vendor_id &&
2371 map2->manufacturing_date == dcr->manufacturing_date &&
2372 map2->manufacturing_location
2373 == dcr->manufacturing_location) {
2374 mapping->position = i;
2375 break;
2376 }
2377 }
2378 }
2379
2380 ndr_desc->nd_set = nd_set;
2381 devm_kfree(dev, info);
2382 devm_kfree(dev, info2);
2383
2384 return 0;
2385}
2386
2387static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio)
2388{
2389 struct acpi_nfit_interleave *idt = mmio->idt;
2390 u32 sub_line_offset, line_index, line_offset;
2391 u64 line_no, table_skip_count, table_offset;
2392
2393 line_no = div_u64_rem(offset, mmio->line_size, &sub_line_offset);
2394 table_skip_count = div_u64_rem(line_no, mmio->num_lines, &line_index);
2395 line_offset = idt->line_offset[line_index]
2396 * mmio->line_size;
2397 table_offset = table_skip_count * mmio->table_size;
2398
2399 return mmio->base_offset + line_offset + table_offset + sub_line_offset;
2400}
2401
2402static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
2403{
2404 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
2405 u64 offset = nfit_blk->stat_offset + mmio->size * bw;
2406 const u32 STATUS_MASK = 0x80000037;
2407
2408 if (mmio->num_lines)
2409 offset = to_interleave_offset(offset, mmio);
2410
2411 return readl(mmio->addr.base + offset) & STATUS_MASK;
2412}
2413
2414static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
2415 resource_size_t dpa, unsigned int len, unsigned int write)
2416{
2417 u64 cmd, offset;
2418 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
2419
2420 enum {
2421 BCW_OFFSET_MASK = (1ULL << 48)-1,
2422 BCW_LEN_SHIFT = 48,
2423 BCW_LEN_MASK = (1ULL << 8) - 1,
2424 BCW_CMD_SHIFT = 56,
2425 };
2426
2427 cmd = (dpa >> L1_CACHE_SHIFT) & BCW_OFFSET_MASK;
2428 len = len >> L1_CACHE_SHIFT;
2429 cmd |= ((u64) len & BCW_LEN_MASK) << BCW_LEN_SHIFT;
2430 cmd |= ((u64) write) << BCW_CMD_SHIFT;
2431
2432 offset = nfit_blk->cmd_offset + mmio->size * bw;
2433 if (mmio->num_lines)
2434 offset = to_interleave_offset(offset, mmio);
2435
2436 writeq(cmd, mmio->addr.base + offset);
2437 nvdimm_flush(nfit_blk->nd_region);
2438
2439 if (nfit_blk->dimm_flags & NFIT_BLK_DCR_LATCH)
2440 readq(mmio->addr.base + offset);
2441}
2442
2443static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
2444 resource_size_t dpa, void *iobuf, size_t len, int rw,
2445 unsigned int lane)
2446{
2447 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
2448 unsigned int copied = 0;
2449 u64 base_offset;
2450 int rc;
2451
2452 base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES
2453 + lane * mmio->size;
2454 write_blk_ctl(nfit_blk, lane, dpa, len, rw);
2455 while (len) {
2456 unsigned int c;
2457 u64 offset;
2458
2459 if (mmio->num_lines) {
2460 u32 line_offset;
2461
2462 offset = to_interleave_offset(base_offset + copied,
2463 mmio);
2464 div_u64_rem(offset, mmio->line_size, &line_offset);
2465 c = min_t(size_t, len, mmio->line_size - line_offset);
2466 } else {
2467 offset = base_offset + nfit_blk->bdw_offset;
2468 c = len;
2469 }
2470
2471 if (rw)
2472 memcpy_flushcache(mmio->addr.aperture + offset, iobuf + copied, c);
2473 else {
2474 if (nfit_blk->dimm_flags & NFIT_BLK_READ_FLUSH)
2475 arch_invalidate_pmem((void __force *)
2476 mmio->addr.aperture + offset, c);
2477
2478 memcpy(iobuf + copied, mmio->addr.aperture + offset, c);
2479 }
2480
2481 copied += c;
2482 len -= c;
2483 }
2484
2485 if (rw)
2486 nvdimm_flush(nfit_blk->nd_region);
2487
2488 rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0;
2489 return rc;
2490}
2491
2492static int acpi_nfit_blk_region_do_io(struct nd_blk_region *ndbr,
2493 resource_size_t dpa, void *iobuf, u64 len, int rw)
2494{
2495 struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr);
2496 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
2497 struct nd_region *nd_region = nfit_blk->nd_region;
2498 unsigned int lane, copied = 0;
2499 int rc = 0;
2500
2501 lane = nd_region_acquire_lane(nd_region);
2502 while (len) {
2503 u64 c = min(len, mmio->size);
2504
2505 rc = acpi_nfit_blk_single_io(nfit_blk, dpa + copied,
2506 iobuf + copied, c, rw, lane);
2507 if (rc)
2508 break;
2509
2510 copied += c;
2511 len -= c;
2512 }
2513 nd_region_release_lane(nd_region, lane);
2514
2515 return rc;
2516}
2517
2518static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio,
2519 struct acpi_nfit_interleave *idt, u16 interleave_ways)
2520{
2521 if (idt) {
2522 mmio->num_lines = idt->line_count;
2523 mmio->line_size = idt->line_size;
2524 if (interleave_ways == 0)
2525 return -ENXIO;
2526 mmio->table_size = mmio->num_lines * interleave_ways
2527 * mmio->line_size;
2528 }
2529
2530 return 0;
2531}
2532
2533static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc,
2534 struct nvdimm *nvdimm, struct nfit_blk *nfit_blk)
2535{
2536 struct nd_cmd_dimm_flags flags;
2537 int rc;
2538
2539 memset(&flags, 0, sizeof(flags));
2540 rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags,
2541 sizeof(flags), NULL);
2542
2543 if (rc >= 0 && flags.status == 0)
2544 nfit_blk->dimm_flags = flags.flags;
2545 else if (rc == -ENOTTY) {
2546
2547 nfit_blk->dimm_flags = NFIT_BLK_DCR_LATCH | NFIT_BLK_READ_FLUSH;
2548 rc = 0;
2549 } else
2550 rc = -ENXIO;
2551
2552 return rc;
2553}
2554
2555static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
2556 struct device *dev)
2557{
2558 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
2559 struct nd_blk_region *ndbr = to_nd_blk_region(dev);
2560 struct nfit_blk_mmio *mmio;
2561 struct nfit_blk *nfit_blk;
2562 struct nfit_mem *nfit_mem;
2563 struct nvdimm *nvdimm;
2564 int rc;
2565
2566 nvdimm = nd_blk_region_to_dimm(ndbr);
2567 nfit_mem = nvdimm_provider_data(nvdimm);
2568 if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) {
2569 dev_dbg(dev, "missing%s%s%s\n",
2570 nfit_mem ? "" : " nfit_mem",
2571 (nfit_mem && nfit_mem->dcr) ? "" : " dcr",
2572 (nfit_mem && nfit_mem->bdw) ? "" : " bdw");
2573 return -ENXIO;
2574 }
2575
2576 nfit_blk = devm_kzalloc(dev, sizeof(*nfit_blk), GFP_KERNEL);
2577 if (!nfit_blk)
2578 return -ENOMEM;
2579 nd_blk_region_set_provider_data(ndbr, nfit_blk);
2580 nfit_blk->nd_region = to_nd_region(dev);
2581
2582
2583 nfit_blk->bdw_offset = nfit_mem->bdw->offset;
2584 mmio = &nfit_blk->mmio[BDW];
2585 mmio->addr.base = devm_nvdimm_memremap(dev, nfit_mem->spa_bdw->address,
2586 nfit_mem->spa_bdw->length, nd_blk_memremap_flags(ndbr));
2587 if (!mmio->addr.base) {
2588 dev_dbg(dev, "%s failed to map bdw\n",
2589 nvdimm_name(nvdimm));
2590 return -ENOMEM;
2591 }
2592 mmio->size = nfit_mem->bdw->size;
2593 mmio->base_offset = nfit_mem->memdev_bdw->region_offset;
2594 mmio->idt = nfit_mem->idt_bdw;
2595 mmio->spa = nfit_mem->spa_bdw;
2596 rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_bdw,
2597 nfit_mem->memdev_bdw->interleave_ways);
2598 if (rc) {
2599 dev_dbg(dev, "%s failed to init bdw interleave\n",
2600 nvdimm_name(nvdimm));
2601 return rc;
2602 }
2603
2604
2605 nfit_blk->cmd_offset = nfit_mem->dcr->command_offset;
2606 nfit_blk->stat_offset = nfit_mem->dcr->status_offset;
2607 mmio = &nfit_blk->mmio[DCR];
2608 mmio->addr.base = devm_nvdimm_ioremap(dev, nfit_mem->spa_dcr->address,
2609 nfit_mem->spa_dcr->length);
2610 if (!mmio->addr.base) {
2611 dev_dbg(dev, "%s failed to map dcr\n",
2612 nvdimm_name(nvdimm));
2613 return -ENOMEM;
2614 }
2615 mmio->size = nfit_mem->dcr->window_size;
2616 mmio->base_offset = nfit_mem->memdev_dcr->region_offset;
2617 mmio->idt = nfit_mem->idt_dcr;
2618 mmio->spa = nfit_mem->spa_dcr;
2619 rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_dcr,
2620 nfit_mem->memdev_dcr->interleave_ways);
2621 if (rc) {
2622 dev_dbg(dev, "%s failed to init dcr interleave\n",
2623 nvdimm_name(nvdimm));
2624 return rc;
2625 }
2626
2627 rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk);
2628 if (rc < 0) {
2629 dev_dbg(dev, "%s failed get DIMM flags\n",
2630 nvdimm_name(nvdimm));
2631 return rc;
2632 }
2633
2634 if (nvdimm_has_flush(nfit_blk->nd_region) < 0)
2635 dev_warn(dev, "unable to guarantee persistence of writes\n");
2636
2637 if (mmio->line_size == 0)
2638 return 0;
2639
2640 if ((u32) nfit_blk->cmd_offset % mmio->line_size
2641 + 8 > mmio->line_size) {
2642 dev_dbg(dev, "cmd_offset crosses interleave boundary\n");
2643 return -ENXIO;
2644 } else if ((u32) nfit_blk->stat_offset % mmio->line_size
2645 + 8 > mmio->line_size) {
2646 dev_dbg(dev, "stat_offset crosses interleave boundary\n");
2647 return -ENXIO;
2648 }
2649
2650 return 0;
2651}
2652
2653static int ars_get_cap(struct acpi_nfit_desc *acpi_desc,
2654 struct nd_cmd_ars_cap *cmd, struct nfit_spa *nfit_spa)
2655{
2656 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2657 struct acpi_nfit_system_address *spa = nfit_spa->spa;
2658 int cmd_rc, rc;
2659
2660 cmd->address = spa->address;
2661 cmd->length = spa->length;
2662 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, cmd,
2663 sizeof(*cmd), &cmd_rc);
2664 if (rc < 0)
2665 return rc;
2666 return cmd_rc;
2667}
2668
2669static int ars_start(struct acpi_nfit_desc *acpi_desc,
2670 struct nfit_spa *nfit_spa, enum nfit_ars_state req_type)
2671{
2672 int rc;
2673 int cmd_rc;
2674 struct nd_cmd_ars_start ars_start;
2675 struct acpi_nfit_system_address *spa = nfit_spa->spa;
2676 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2677
2678 memset(&ars_start, 0, sizeof(ars_start));
2679 ars_start.address = spa->address;
2680 ars_start.length = spa->length;
2681 if (req_type == ARS_REQ_SHORT)
2682 ars_start.flags = ND_ARS_RETURN_PREV_DATA;
2683 if (nfit_spa_type(spa) == NFIT_SPA_PM)
2684 ars_start.type = ND_ARS_PERSISTENT;
2685 else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE)
2686 ars_start.type = ND_ARS_VOLATILE;
2687 else
2688 return -ENOTTY;
2689
2690 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
2691 sizeof(ars_start), &cmd_rc);
2692
2693 if (rc < 0)
2694 return rc;
2695 if (cmd_rc < 0)
2696 return cmd_rc;
2697 set_bit(ARS_VALID, &acpi_desc->scrub_flags);
2698 return 0;
2699}
2700
2701static int ars_continue(struct acpi_nfit_desc *acpi_desc)
2702{
2703 int rc, cmd_rc;
2704 struct nd_cmd_ars_start ars_start;
2705 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2706 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
2707
2708 ars_start = (struct nd_cmd_ars_start) {
2709 .address = ars_status->restart_address,
2710 .length = ars_status->restart_length,
2711 .type = ars_status->type,
2712 };
2713 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
2714 sizeof(ars_start), &cmd_rc);
2715 if (rc < 0)
2716 return rc;
2717 return cmd_rc;
2718}
2719
2720static int ars_get_status(struct acpi_nfit_desc *acpi_desc)
2721{
2722 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2723 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
2724 int rc, cmd_rc;
2725
2726 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, ars_status,
2727 acpi_desc->max_ars, &cmd_rc);
2728 if (rc < 0)
2729 return rc;
2730 return cmd_rc;
2731}
2732
2733static void ars_complete(struct acpi_nfit_desc *acpi_desc,
2734 struct nfit_spa *nfit_spa)
2735{
2736 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
2737 struct acpi_nfit_system_address *spa = nfit_spa->spa;
2738 struct nd_region *nd_region = nfit_spa->nd_region;
2739 struct device *dev;
2740
2741 lockdep_assert_held(&acpi_desc->init_mutex);
2742
2743
2744
2745
2746
2747 if (acpi_desc->scrub_spa != nfit_spa)
2748 return;
2749
2750 if ((ars_status->address >= spa->address && ars_status->address
2751 < spa->address + spa->length)
2752 || (ars_status->address < spa->address)) {
2753
2754
2755
2756
2757
2758
2759
2760
2761 if (ars_status->address + ars_status->length
2762 >= spa->address + spa->length)
2763 ;
2764 else
2765 return;
2766 } else
2767 return;
2768
2769 acpi_desc->scrub_spa = NULL;
2770 if (nd_region) {
2771 dev = nd_region_dev(nd_region);
2772 nvdimm_region_notify(nd_region, NVDIMM_REVALIDATE_POISON);
2773 } else
2774 dev = acpi_desc->dev;
2775 dev_dbg(dev, "ARS: range %d complete\n", spa->range_index);
2776}
2777
2778static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc)
2779{
2780 struct nvdimm_bus *nvdimm_bus = acpi_desc->nvdimm_bus;
2781 struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
2782 int rc;
2783 u32 i;
2784
2785
2786
2787
2788
2789 if (ars_status->out_length < 44)
2790 return 0;
2791
2792
2793
2794
2795
2796 if (!test_and_clear_bit(ARS_VALID, &acpi_desc->scrub_flags)) {
2797 dev_dbg(acpi_desc->dev, "skip %d stale records\n",
2798 ars_status->num_records);
2799 return 0;
2800 }
2801
2802 for (i = 0; i < ars_status->num_records; i++) {
2803
2804 if (ars_status->out_length
2805 < 44 + sizeof(struct nd_ars_record) * (i + 1))
2806 break;
2807 rc = nvdimm_bus_add_badrange(nvdimm_bus,
2808 ars_status->records[i].err_address,
2809 ars_status->records[i].length);
2810 if (rc)
2811 return rc;
2812 }
2813 if (i < ars_status->num_records)
2814 dev_warn(acpi_desc->dev, "detected truncated ars results\n");
2815
2816 return 0;
2817}
2818
2819static void acpi_nfit_remove_resource(void *data)
2820{
2821 struct resource *res = data;
2822
2823 remove_resource(res);
2824}
2825
2826static int acpi_nfit_insert_resource(struct acpi_nfit_desc *acpi_desc,
2827 struct nd_region_desc *ndr_desc)
2828{
2829 struct resource *res, *nd_res = ndr_desc->res;
2830 int is_pmem, ret;
2831
2832
2833 is_pmem = region_intersects(nd_res->start, resource_size(nd_res),
2834 IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY);
2835 if (is_pmem == REGION_INTERSECTS)
2836 return 0;
2837
2838 res = devm_kzalloc(acpi_desc->dev, sizeof(*res), GFP_KERNEL);
2839 if (!res)
2840 return -ENOMEM;
2841
2842 res->name = "Persistent Memory";
2843 res->start = nd_res->start;
2844 res->end = nd_res->end;
2845 res->flags = IORESOURCE_MEM;
2846 res->desc = IORES_DESC_PERSISTENT_MEMORY;
2847
2848 ret = insert_resource(&iomem_resource, res);
2849 if (ret)
2850 return ret;
2851
2852 ret = devm_add_action_or_reset(acpi_desc->dev,
2853 acpi_nfit_remove_resource,
2854 res);
2855 if (ret)
2856 return ret;
2857
2858 return 0;
2859}
2860
2861static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
2862 struct nd_mapping_desc *mapping, struct nd_region_desc *ndr_desc,
2863 struct acpi_nfit_memory_map *memdev,
2864 struct nfit_spa *nfit_spa)
2865{
2866 struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc,
2867 memdev->device_handle);
2868 struct acpi_nfit_system_address *spa = nfit_spa->spa;
2869 struct nd_blk_region_desc *ndbr_desc;
2870 struct nfit_mem *nfit_mem;
2871 int rc;
2872
2873 if (!nvdimm) {
2874 dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n",
2875 spa->range_index, memdev->device_handle);
2876 return -ENODEV;
2877 }
2878
2879 mapping->nvdimm = nvdimm;
2880 switch (nfit_spa_type(spa)) {
2881 case NFIT_SPA_PM:
2882 case NFIT_SPA_VOLATILE:
2883 mapping->start = memdev->address;
2884 mapping->size = memdev->region_size;
2885 break;
2886 case NFIT_SPA_DCR:
2887 nfit_mem = nvdimm_provider_data(nvdimm);
2888 if (!nfit_mem || !nfit_mem->bdw) {
2889 dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n",
2890 spa->range_index, nvdimm_name(nvdimm));
2891 break;
2892 }
2893
2894 mapping->size = nfit_mem->bdw->capacity;
2895 mapping->start = nfit_mem->bdw->start_address;
2896 ndr_desc->num_lanes = nfit_mem->bdw->windows;
2897 ndr_desc->mapping = mapping;
2898 ndr_desc->num_mappings = 1;
2899 ndbr_desc = to_blk_region_desc(ndr_desc);
2900 ndbr_desc->enable = acpi_nfit_blk_region_enable;
2901 ndbr_desc->do_io = acpi_desc->blk_do_io;
2902 rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa);
2903 if (rc)
2904 return rc;
2905 nfit_spa->nd_region = nvdimm_blk_region_create(acpi_desc->nvdimm_bus,
2906 ndr_desc);
2907 if (!nfit_spa->nd_region)
2908 return -ENOMEM;
2909 break;
2910 }
2911
2912 return 0;
2913}
2914
2915static bool nfit_spa_is_virtual(struct acpi_nfit_system_address *spa)
2916{
2917 return (nfit_spa_type(spa) == NFIT_SPA_VDISK ||
2918 nfit_spa_type(spa) == NFIT_SPA_VCD ||
2919 nfit_spa_type(spa) == NFIT_SPA_PDISK ||
2920 nfit_spa_type(spa) == NFIT_SPA_PCD);
2921}
2922
2923static bool nfit_spa_is_volatile(struct acpi_nfit_system_address *spa)
2924{
2925 return (nfit_spa_type(spa) == NFIT_SPA_VDISK ||
2926 nfit_spa_type(spa) == NFIT_SPA_VCD ||
2927 nfit_spa_type(spa) == NFIT_SPA_VOLATILE);
2928}
2929
2930static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
2931 struct nfit_spa *nfit_spa)
2932{
2933 static struct nd_mapping_desc mappings[ND_MAX_MAPPINGS];
2934 struct acpi_nfit_system_address *spa = nfit_spa->spa;
2935 struct nd_blk_region_desc ndbr_desc;
2936 struct nd_region_desc *ndr_desc;
2937 struct nfit_memdev *nfit_memdev;
2938 struct nvdimm_bus *nvdimm_bus;
2939 struct resource res;
2940 int count = 0, rc;
2941
2942 if (nfit_spa->nd_region)
2943 return 0;
2944
2945 if (spa->range_index == 0 && !nfit_spa_is_virtual(spa)) {
2946 dev_dbg(acpi_desc->dev, "detected invalid spa index\n");
2947 return 0;
2948 }
2949
2950 memset(&res, 0, sizeof(res));
2951 memset(&mappings, 0, sizeof(mappings));
2952 memset(&ndbr_desc, 0, sizeof(ndbr_desc));
2953 res.start = spa->address;
2954 res.end = res.start + spa->length - 1;
2955 ndr_desc = &ndbr_desc.ndr_desc;
2956 ndr_desc->res = &res;
2957 ndr_desc->provider_data = nfit_spa;
2958 ndr_desc->attr_groups = acpi_nfit_region_attribute_groups;
2959 if (spa->flags & ACPI_NFIT_PROXIMITY_VALID) {
2960 ndr_desc->numa_node = acpi_map_pxm_to_online_node(
2961 spa->proximity_domain);
2962 ndr_desc->target_node = acpi_map_pxm_to_node(
2963 spa->proximity_domain);
2964 } else {
2965 ndr_desc->numa_node = NUMA_NO_NODE;
2966 ndr_desc->target_node = NUMA_NO_NODE;
2967 }
2968
2969
2970
2971
2972
2973
2974 if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_CACHE_FLUSH)
2975 set_bit(ND_REGION_PERSIST_CACHE, &ndr_desc->flags);
2976 else if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_MEM_FLUSH)
2977 set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc->flags);
2978
2979 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
2980 struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
2981 struct nd_mapping_desc *mapping;
2982
2983 if (memdev->range_index != spa->range_index)
2984 continue;
2985 if (count >= ND_MAX_MAPPINGS) {
2986 dev_err(acpi_desc->dev, "spa%d exceeds max mappings %d\n",
2987 spa->range_index, ND_MAX_MAPPINGS);
2988 return -ENXIO;
2989 }
2990 mapping = &mappings[count++];
2991 rc = acpi_nfit_init_mapping(acpi_desc, mapping, ndr_desc,
2992 memdev, nfit_spa);
2993 if (rc)
2994 goto out;
2995 }
2996
2997 ndr_desc->mapping = mappings;
2998 ndr_desc->num_mappings = count;
2999 rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa);
3000 if (rc)
3001 goto out;
3002
3003 nvdimm_bus = acpi_desc->nvdimm_bus;
3004 if (nfit_spa_type(spa) == NFIT_SPA_PM) {
3005 rc = acpi_nfit_insert_resource(acpi_desc, ndr_desc);
3006 if (rc) {
3007 dev_warn(acpi_desc->dev,
3008 "failed to insert pmem resource to iomem: %d\n",
3009 rc);
3010 goto out;
3011 }
3012
3013 nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus,
3014 ndr_desc);
3015 if (!nfit_spa->nd_region)
3016 rc = -ENOMEM;
3017 } else if (nfit_spa_is_volatile(spa)) {
3018 nfit_spa->nd_region = nvdimm_volatile_region_create(nvdimm_bus,
3019 ndr_desc);
3020 if (!nfit_spa->nd_region)
3021 rc = -ENOMEM;
3022 } else if (nfit_spa_is_virtual(spa)) {
3023 nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus,
3024 ndr_desc);
3025 if (!nfit_spa->nd_region)
3026 rc = -ENOMEM;
3027 }
3028
3029 out:
3030 if (rc)
3031 dev_err(acpi_desc->dev, "failed to register spa range %d\n",
3032 nfit_spa->spa->range_index);
3033 return rc;
3034}
3035
3036static int ars_status_alloc(struct acpi_nfit_desc *acpi_desc)
3037{
3038 struct device *dev = acpi_desc->dev;
3039 struct nd_cmd_ars_status *ars_status;
3040
3041 if (acpi_desc->ars_status) {
3042 memset(acpi_desc->ars_status, 0, acpi_desc->max_ars);
3043 return 0;
3044 }
3045
3046 ars_status = devm_kzalloc(dev, acpi_desc->max_ars, GFP_KERNEL);
3047 if (!ars_status)
3048 return -ENOMEM;
3049 acpi_desc->ars_status = ars_status;
3050 return 0;
3051}
3052
3053static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc)
3054{
3055 int rc;
3056
3057 if (ars_status_alloc(acpi_desc))
3058 return -ENOMEM;
3059
3060 rc = ars_get_status(acpi_desc);
3061
3062 if (rc < 0 && rc != -ENOSPC)
3063 return rc;
3064
3065 if (ars_status_process_records(acpi_desc))
3066 dev_err(acpi_desc->dev, "Failed to process ARS records\n");
3067
3068 return rc;
3069}
3070
3071static int ars_register(struct acpi_nfit_desc *acpi_desc,
3072 struct nfit_spa *nfit_spa)
3073{
3074 int rc;
3075
3076 if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
3077 return acpi_nfit_register_region(acpi_desc, nfit_spa);
3078
3079 set_bit(ARS_REQ_SHORT, &nfit_spa->ars_state);
3080 if (!no_init_ars)
3081 set_bit(ARS_REQ_LONG, &nfit_spa->ars_state);
3082
3083 switch (acpi_nfit_query_poison(acpi_desc)) {
3084 case 0:
3085 case -ENOSPC:
3086 case -EAGAIN:
3087 rc = ars_start(acpi_desc, nfit_spa, ARS_REQ_SHORT);
3088
3089 if (rc == -EBUSY)
3090 break;
3091 if (rc) {
3092 set_bit(ARS_FAILED, &nfit_spa->ars_state);
3093 break;
3094 }
3095 clear_bit(ARS_REQ_SHORT, &nfit_spa->ars_state);
3096 rc = acpi_nfit_query_poison(acpi_desc);
3097 if (rc)
3098 break;
3099 acpi_desc->scrub_spa = nfit_spa;
3100 ars_complete(acpi_desc, nfit_spa);
3101
3102
3103
3104
3105
3106 acpi_desc->scrub_spa = NULL;
3107 break;
3108 case -EBUSY:
3109 case -ENOMEM:
3110
3111
3112
3113
3114
3115 break;
3116 default:
3117 set_bit(ARS_FAILED, &nfit_spa->ars_state);
3118 break;
3119 }
3120
3121 return acpi_nfit_register_region(acpi_desc, nfit_spa);
3122}
3123
3124static void ars_complete_all(struct acpi_nfit_desc *acpi_desc)
3125{
3126 struct nfit_spa *nfit_spa;
3127
3128 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
3129 if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
3130 continue;
3131 ars_complete(acpi_desc, nfit_spa);
3132 }
3133}
3134
3135static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc,
3136 int query_rc)
3137{
3138 unsigned int tmo = acpi_desc->scrub_tmo;
3139 struct device *dev = acpi_desc->dev;
3140 struct nfit_spa *nfit_spa;
3141
3142 lockdep_assert_held(&acpi_desc->init_mutex);
3143
3144 if (test_bit(ARS_CANCEL, &acpi_desc->scrub_flags))
3145 return 0;
3146
3147 if (query_rc == -EBUSY) {
3148 dev_dbg(dev, "ARS: ARS busy\n");
3149 return min(30U * 60U, tmo * 2);
3150 }
3151 if (query_rc == -ENOSPC) {
3152 dev_dbg(dev, "ARS: ARS continue\n");
3153 ars_continue(acpi_desc);
3154 return 1;
3155 }
3156 if (query_rc && query_rc != -EAGAIN) {
3157 unsigned long long addr, end;
3158
3159 addr = acpi_desc->ars_status->address;
3160 end = addr + acpi_desc->ars_status->length;
3161 dev_dbg(dev, "ARS: %llx-%llx failed (%d)\n", addr, end,
3162 query_rc);
3163 }
3164
3165 ars_complete_all(acpi_desc);
3166 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
3167 enum nfit_ars_state req_type;
3168 int rc;
3169
3170 if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
3171 continue;
3172
3173
3174 if (test_bit(ARS_REQ_SHORT, &nfit_spa->ars_state))
3175 req_type = ARS_REQ_SHORT;
3176 else if (test_bit(ARS_REQ_LONG, &nfit_spa->ars_state))
3177 req_type = ARS_REQ_LONG;
3178 else
3179 continue;
3180 rc = ars_start(acpi_desc, nfit_spa, req_type);
3181
3182 dev = nd_region_dev(nfit_spa->nd_region);
3183 dev_dbg(dev, "ARS: range %d ARS start %s (%d)\n",
3184 nfit_spa->spa->range_index,
3185 req_type == ARS_REQ_SHORT ? "short" : "long",
3186 rc);
3187
3188
3189
3190
3191 if (rc == -EBUSY)
3192 return 1;
3193 if (rc == 0) {
3194 dev_WARN_ONCE(dev, acpi_desc->scrub_spa,
3195 "scrub start while range %d active\n",
3196 acpi_desc->scrub_spa->spa->range_index);
3197 clear_bit(req_type, &nfit_spa->ars_state);
3198 acpi_desc->scrub_spa = nfit_spa;
3199
3200
3201
3202
3203 list_move_tail(&nfit_spa->list, &acpi_desc->spas);
3204 return 1;
3205 }
3206
3207 dev_err(dev, "ARS: range %d ARS failed (%d)\n",
3208 nfit_spa->spa->range_index, rc);
3209 set_bit(ARS_FAILED, &nfit_spa->ars_state);
3210 }
3211 return 0;
3212}
3213
3214static void __sched_ars(struct acpi_nfit_desc *acpi_desc, unsigned int tmo)
3215{
3216 lockdep_assert_held(&acpi_desc->init_mutex);
3217
3218 set_bit(ARS_BUSY, &acpi_desc->scrub_flags);
3219
3220 if (tmo)
3221 acpi_desc->scrub_tmo = tmo;
3222 queue_delayed_work(nfit_wq, &acpi_desc->dwork, tmo * HZ);
3223}
3224
3225static void sched_ars(struct acpi_nfit_desc *acpi_desc)
3226{
3227 __sched_ars(acpi_desc, 0);
3228}
3229
3230static void notify_ars_done(struct acpi_nfit_desc *acpi_desc)
3231{
3232 lockdep_assert_held(&acpi_desc->init_mutex);
3233
3234 clear_bit(ARS_BUSY, &acpi_desc->scrub_flags);
3235 acpi_desc->scrub_count++;
3236 if (acpi_desc->scrub_count_state)
3237 sysfs_notify_dirent(acpi_desc->scrub_count_state);
3238}
3239
3240static void acpi_nfit_scrub(struct work_struct *work)
3241{
3242 struct acpi_nfit_desc *acpi_desc;
3243 unsigned int tmo;
3244 int query_rc;
3245
3246 acpi_desc = container_of(work, typeof(*acpi_desc), dwork.work);
3247 mutex_lock(&acpi_desc->init_mutex);
3248 query_rc = acpi_nfit_query_poison(acpi_desc);
3249 tmo = __acpi_nfit_scrub(acpi_desc, query_rc);
3250 if (tmo)
3251 __sched_ars(acpi_desc, tmo);
3252 else
3253 notify_ars_done(acpi_desc);
3254 memset(acpi_desc->ars_status, 0, acpi_desc->max_ars);
3255 clear_bit(ARS_POLL, &acpi_desc->scrub_flags);
3256 mutex_unlock(&acpi_desc->init_mutex);
3257}
3258
3259static void acpi_nfit_init_ars(struct acpi_nfit_desc *acpi_desc,
3260 struct nfit_spa *nfit_spa)
3261{
3262 int type = nfit_spa_type(nfit_spa->spa);
3263 struct nd_cmd_ars_cap ars_cap;
3264 int rc;
3265
3266 set_bit(ARS_FAILED, &nfit_spa->ars_state);
3267 memset(&ars_cap, 0, sizeof(ars_cap));
3268 rc = ars_get_cap(acpi_desc, &ars_cap, nfit_spa);
3269 if (rc < 0)
3270 return;
3271
3272 if (type == NFIT_SPA_VOLATILE && ((ars_cap.status >> 16)
3273 & ND_ARS_VOLATILE) == 0)
3274 return;
3275 if (type == NFIT_SPA_PM && ((ars_cap.status >> 16)
3276 & ND_ARS_PERSISTENT) == 0)
3277 return;
3278
3279 nfit_spa->max_ars = ars_cap.max_ars_out;
3280 nfit_spa->clear_err_unit = ars_cap.clear_err_unit;
3281 acpi_desc->max_ars = max(nfit_spa->max_ars, acpi_desc->max_ars);
3282 clear_bit(ARS_FAILED, &nfit_spa->ars_state);
3283}
3284
3285static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
3286{
3287 struct nfit_spa *nfit_spa;
3288 int rc;
3289
3290 set_bit(ARS_VALID, &acpi_desc->scrub_flags);
3291 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
3292 switch (nfit_spa_type(nfit_spa->spa)) {
3293 case NFIT_SPA_VOLATILE:
3294 case NFIT_SPA_PM:
3295 acpi_nfit_init_ars(acpi_desc, nfit_spa);
3296 break;
3297 }
3298 }
3299
3300 list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
3301 switch (nfit_spa_type(nfit_spa->spa)) {
3302 case NFIT_SPA_VOLATILE:
3303 case NFIT_SPA_PM:
3304
3305 rc = ars_register(acpi_desc, nfit_spa);
3306 if (rc)
3307 return rc;
3308 break;
3309 case NFIT_SPA_BDW:
3310
3311 break;
3312 case NFIT_SPA_DCR:
3313 case NFIT_SPA_VDISK:
3314 case NFIT_SPA_VCD:
3315 case NFIT_SPA_PDISK:
3316 case NFIT_SPA_PCD:
3317
3318 rc = acpi_nfit_register_region(acpi_desc, nfit_spa);
3319 if (rc)
3320 return rc;
3321 break;
3322 default:
3323
3324 break;
3325 }
3326
3327 sched_ars(acpi_desc);
3328 return 0;
3329}
3330
3331static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc,
3332 struct nfit_table_prev *prev)
3333{
3334 struct device *dev = acpi_desc->dev;
3335
3336 if (!list_empty(&prev->spas) ||
3337 !list_empty(&prev->memdevs) ||
3338 !list_empty(&prev->dcrs) ||
3339 !list_empty(&prev->bdws) ||
3340 !list_empty(&prev->idts) ||
3341 !list_empty(&prev->flushes)) {
3342 dev_err(dev, "new nfit deletes entries (unsupported)\n");
3343 return -ENXIO;
3344 }
3345 return 0;
3346}
3347
3348static int acpi_nfit_desc_init_scrub_attr(struct acpi_nfit_desc *acpi_desc)
3349{
3350 struct device *dev = acpi_desc->dev;
3351 struct kernfs_node *nfit;
3352 struct device *bus_dev;
3353
3354 if (!ars_supported(acpi_desc->nvdimm_bus))
3355 return 0;
3356
3357 bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus);
3358 nfit = sysfs_get_dirent(bus_dev->kobj.sd, "nfit");
3359 if (!nfit) {
3360 dev_err(dev, "sysfs_get_dirent 'nfit' failed\n");
3361 return -ENODEV;
3362 }
3363 acpi_desc->scrub_count_state = sysfs_get_dirent(nfit, "scrub");
3364 sysfs_put(nfit);
3365 if (!acpi_desc->scrub_count_state) {
3366 dev_err(dev, "sysfs_get_dirent 'scrub' failed\n");
3367 return -ENODEV;
3368 }
3369
3370 return 0;
3371}
3372
3373static void acpi_nfit_unregister(void *data)
3374{
3375 struct acpi_nfit_desc *acpi_desc = data;
3376
3377 nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
3378}
3379
3380int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *data, acpi_size sz)
3381{
3382 struct device *dev = acpi_desc->dev;
3383 struct nfit_table_prev prev;
3384 const void *end;
3385 int rc;
3386
3387 if (!acpi_desc->nvdimm_bus) {
3388 acpi_nfit_init_dsms(acpi_desc);
3389
3390 acpi_desc->nvdimm_bus = nvdimm_bus_register(dev,
3391 &acpi_desc->nd_desc);
3392 if (!acpi_desc->nvdimm_bus)
3393 return -ENOMEM;
3394
3395 rc = devm_add_action_or_reset(dev, acpi_nfit_unregister,
3396 acpi_desc);
3397 if (rc)
3398 return rc;
3399
3400 rc = acpi_nfit_desc_init_scrub_attr(acpi_desc);
3401 if (rc)
3402 return rc;
3403
3404
3405 mutex_lock(&acpi_desc_lock);
3406 list_add_tail(&acpi_desc->list, &acpi_descs);
3407 mutex_unlock(&acpi_desc_lock);
3408 }
3409
3410 mutex_lock(&acpi_desc->init_mutex);
3411
3412 INIT_LIST_HEAD(&prev.spas);
3413 INIT_LIST_HEAD(&prev.memdevs);
3414 INIT_LIST_HEAD(&prev.dcrs);
3415 INIT_LIST_HEAD(&prev.bdws);
3416 INIT_LIST_HEAD(&prev.idts);
3417 INIT_LIST_HEAD(&prev.flushes);
3418
3419 list_cut_position(&prev.spas, &acpi_desc->spas,
3420 acpi_desc->spas.prev);
3421 list_cut_position(&prev.memdevs, &acpi_desc->memdevs,
3422 acpi_desc->memdevs.prev);
3423 list_cut_position(&prev.dcrs, &acpi_desc->dcrs,
3424 acpi_desc->dcrs.prev);
3425 list_cut_position(&prev.bdws, &acpi_desc->bdws,
3426 acpi_desc->bdws.prev);
3427 list_cut_position(&prev.idts, &acpi_desc->idts,
3428 acpi_desc->idts.prev);
3429 list_cut_position(&prev.flushes, &acpi_desc->flushes,
3430 acpi_desc->flushes.prev);
3431
3432 end = data + sz;
3433 while (!IS_ERR_OR_NULL(data))
3434 data = add_table(acpi_desc, &prev, data, end);
3435
3436 if (IS_ERR(data)) {
3437 dev_dbg(dev, "nfit table parsing error: %ld\n", PTR_ERR(data));
3438 rc = PTR_ERR(data);
3439 goto out_unlock;
3440 }
3441
3442 rc = acpi_nfit_check_deletions(acpi_desc, &prev);
3443 if (rc)
3444 goto out_unlock;
3445
3446 rc = nfit_mem_init(acpi_desc);
3447 if (rc)
3448 goto out_unlock;
3449
3450 rc = acpi_nfit_register_dimms(acpi_desc);
3451 if (rc)
3452 goto out_unlock;
3453
3454 rc = acpi_nfit_register_regions(acpi_desc);
3455
3456 out_unlock:
3457 mutex_unlock(&acpi_desc->init_mutex);
3458 return rc;
3459}
3460EXPORT_SYMBOL_GPL(acpi_nfit_init);
3461
3462static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
3463{
3464 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
3465 struct device *dev = acpi_desc->dev;
3466
3467
3468 device_lock(dev);
3469 device_unlock(dev);
3470
3471
3472 mutex_lock(&acpi_desc->init_mutex);
3473 mutex_unlock(&acpi_desc->init_mutex);
3474
3475 return 0;
3476}
3477
3478static int __acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
3479 struct nvdimm *nvdimm, unsigned int cmd)
3480{
3481 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
3482
3483 if (nvdimm)
3484 return 0;
3485 if (cmd != ND_CMD_ARS_START)
3486 return 0;
3487
3488
3489
3490
3491
3492
3493
3494 if (work_busy(&acpi_desc->dwork.work))
3495 return -EBUSY;
3496
3497 return 0;
3498}
3499
3500
3501static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
3502 struct nvdimm *nvdimm, unsigned int cmd, void *buf)
3503{
3504 struct nd_cmd_pkg *call_pkg = buf;
3505 unsigned int func;
3506
3507 if (nvdimm && cmd == ND_CMD_CALL &&
3508 call_pkg->nd_family == NVDIMM_FAMILY_INTEL) {
3509 func = call_pkg->nd_command;
3510 if ((1 << func) & NVDIMM_INTEL_SECURITY_CMDMASK)
3511 return -EOPNOTSUPP;
3512 }
3513
3514 return __acpi_nfit_clear_to_send(nd_desc, nvdimm, cmd);
3515}
3516
3517int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc,
3518 enum nfit_ars_state req_type)
3519{
3520 struct device *dev = acpi_desc->dev;
3521 int scheduled = 0, busy = 0;
3522 struct nfit_spa *nfit_spa;
3523
3524 mutex_lock(&acpi_desc->init_mutex);
3525 if (test_bit(ARS_CANCEL, &acpi_desc->scrub_flags)) {
3526 mutex_unlock(&acpi_desc->init_mutex);
3527 return 0;
3528 }
3529
3530 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
3531 int type = nfit_spa_type(nfit_spa->spa);
3532
3533 if (type != NFIT_SPA_PM && type != NFIT_SPA_VOLATILE)
3534 continue;
3535 if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
3536 continue;
3537
3538 if (test_and_set_bit(req_type, &nfit_spa->ars_state))
3539 busy++;
3540 else
3541 scheduled++;
3542 }
3543 if (scheduled) {
3544 sched_ars(acpi_desc);
3545 dev_dbg(dev, "ars_scan triggered\n");
3546 }
3547 mutex_unlock(&acpi_desc->init_mutex);
3548
3549 if (scheduled)
3550 return 0;
3551 if (busy)
3552 return -EBUSY;
3553 return -ENOTTY;
3554}
3555
3556void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev)
3557{
3558 struct nvdimm_bus_descriptor *nd_desc;
3559
3560 dev_set_drvdata(dev, acpi_desc);
3561 acpi_desc->dev = dev;
3562 acpi_desc->blk_do_io = acpi_nfit_blk_region_do_io;
3563 nd_desc = &acpi_desc->nd_desc;
3564 nd_desc->provider_name = "ACPI.NFIT";
3565 nd_desc->module = THIS_MODULE;
3566 nd_desc->ndctl = acpi_nfit_ctl;
3567 nd_desc->flush_probe = acpi_nfit_flush_probe;
3568 nd_desc->clear_to_send = acpi_nfit_clear_to_send;
3569 nd_desc->attr_groups = acpi_nfit_attribute_groups;
3570
3571 INIT_LIST_HEAD(&acpi_desc->spas);
3572 INIT_LIST_HEAD(&acpi_desc->dcrs);
3573 INIT_LIST_HEAD(&acpi_desc->bdws);
3574 INIT_LIST_HEAD(&acpi_desc->idts);
3575 INIT_LIST_HEAD(&acpi_desc->flushes);
3576 INIT_LIST_HEAD(&acpi_desc->memdevs);
3577 INIT_LIST_HEAD(&acpi_desc->dimms);
3578 INIT_LIST_HEAD(&acpi_desc->list);
3579 mutex_init(&acpi_desc->init_mutex);
3580 acpi_desc->scrub_tmo = 1;
3581 INIT_DELAYED_WORK(&acpi_desc->dwork, acpi_nfit_scrub);
3582}
3583EXPORT_SYMBOL_GPL(acpi_nfit_desc_init);
3584
3585static void acpi_nfit_put_table(void *table)
3586{
3587 acpi_put_table(table);
3588}
3589
3590void acpi_nfit_shutdown(void *data)
3591{
3592 struct acpi_nfit_desc *acpi_desc = data;
3593 struct device *bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus);
3594
3595
3596
3597
3598
3599 mutex_lock(&acpi_desc_lock);
3600 list_del(&acpi_desc->list);
3601 mutex_unlock(&acpi_desc_lock);
3602
3603 mutex_lock(&acpi_desc->init_mutex);
3604 set_bit(ARS_CANCEL, &acpi_desc->scrub_flags);
3605 cancel_delayed_work_sync(&acpi_desc->dwork);
3606 mutex_unlock(&acpi_desc->init_mutex);
3607
3608
3609
3610
3611
3612
3613 device_lock(bus_dev);
3614 device_unlock(bus_dev);
3615
3616 flush_workqueue(nfit_wq);
3617}
3618EXPORT_SYMBOL_GPL(acpi_nfit_shutdown);
3619
3620static int acpi_nfit_add(struct acpi_device *adev)
3621{
3622 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
3623 struct acpi_nfit_desc *acpi_desc;
3624 struct device *dev = &adev->dev;
3625 struct acpi_table_header *tbl;
3626 acpi_status status = AE_OK;
3627 acpi_size sz;
3628 int rc = 0;
3629
3630 status = acpi_get_table(ACPI_SIG_NFIT, 0, &tbl);
3631 if (ACPI_FAILURE(status)) {
3632
3633
3634
3635
3636
3637
3638
3639 dev_dbg(dev, "failed to find NFIT at startup\n");
3640 return 0;
3641 }
3642
3643 rc = devm_add_action_or_reset(dev, acpi_nfit_put_table, tbl);
3644 if (rc)
3645 return rc;
3646 sz = tbl->length;
3647
3648 acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
3649 if (!acpi_desc)
3650 return -ENOMEM;
3651 acpi_nfit_desc_init(acpi_desc, &adev->dev);
3652
3653
3654 acpi_desc->acpi_header = *tbl;
3655
3656
3657 status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf);
3658 if (ACPI_SUCCESS(status) && buf.length > 0) {
3659 union acpi_object *obj = buf.pointer;
3660
3661 if (obj->type == ACPI_TYPE_BUFFER)
3662 rc = acpi_nfit_init(acpi_desc, obj->buffer.pointer,
3663 obj->buffer.length);
3664 else
3665 dev_dbg(dev, "invalid type %d, ignoring _FIT\n",
3666 (int) obj->type);
3667 kfree(buf.pointer);
3668 } else
3669
3670 rc = acpi_nfit_init(acpi_desc, (void *) tbl
3671 + sizeof(struct acpi_table_nfit),
3672 sz - sizeof(struct acpi_table_nfit));
3673
3674 if (rc)
3675 return rc;
3676 return devm_add_action_or_reset(dev, acpi_nfit_shutdown, acpi_desc);
3677}
3678
3679static int acpi_nfit_remove(struct acpi_device *adev)
3680{
3681
3682 return 0;
3683}
3684
3685static void acpi_nfit_update_notify(struct device *dev, acpi_handle handle)
3686{
3687 struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev);
3688 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
3689 union acpi_object *obj;
3690 acpi_status status;
3691 int ret;
3692
3693 if (!dev->driver) {
3694
3695 dev_dbg(dev, "no driver found for dev\n");
3696 return;
3697 }
3698
3699 if (!acpi_desc) {
3700 acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
3701 if (!acpi_desc)
3702 return;
3703 acpi_nfit_desc_init(acpi_desc, dev);
3704 } else {
3705
3706
3707
3708
3709 flush_workqueue(nfit_wq);
3710 }
3711
3712
3713 status = acpi_evaluate_object(handle, "_FIT", NULL, &buf);
3714 if (ACPI_FAILURE(status)) {
3715 dev_err(dev, "failed to evaluate _FIT\n");
3716 return;
3717 }
3718
3719 obj = buf.pointer;
3720 if (obj->type == ACPI_TYPE_BUFFER) {
3721 ret = acpi_nfit_init(acpi_desc, obj->buffer.pointer,
3722 obj->buffer.length);
3723 if (ret)
3724 dev_err(dev, "failed to merge updated NFIT\n");
3725 } else
3726 dev_err(dev, "Invalid _FIT\n");
3727 kfree(buf.pointer);
3728}
3729
3730static void acpi_nfit_uc_error_notify(struct device *dev, acpi_handle handle)
3731{
3732 struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev);
3733
3734 if (acpi_desc->scrub_mode == HW_ERROR_SCRUB_ON)
3735 acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_LONG);
3736 else
3737 acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_SHORT);
3738}
3739
3740void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event)
3741{
3742 dev_dbg(dev, "event: 0x%x\n", event);
3743
3744 switch (event) {
3745 case NFIT_NOTIFY_UPDATE:
3746 return acpi_nfit_update_notify(dev, handle);
3747 case NFIT_NOTIFY_UC_MEMORY_ERROR:
3748 return acpi_nfit_uc_error_notify(dev, handle);
3749 default:
3750 return;
3751 }
3752}
3753EXPORT_SYMBOL_GPL(__acpi_nfit_notify);
3754
3755static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
3756{
3757 device_lock(&adev->dev);
3758 __acpi_nfit_notify(&adev->dev, adev->handle, event);
3759 device_unlock(&adev->dev);
3760}
3761
3762static const struct acpi_device_id acpi_nfit_ids[] = {
3763 { "ACPI0012", 0 },
3764 { "", 0 },
3765};
3766MODULE_DEVICE_TABLE(acpi, acpi_nfit_ids);
3767
3768static struct acpi_driver acpi_nfit_driver = {
3769 .name = KBUILD_MODNAME,
3770 .ids = acpi_nfit_ids,
3771 .ops = {
3772 .add = acpi_nfit_add,
3773 .remove = acpi_nfit_remove,
3774 .notify = acpi_nfit_notify,
3775 },
3776};
3777
3778static __init int nfit_init(void)
3779{
3780 int ret;
3781
3782 BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40);
3783 BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56);
3784 BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48);
3785 BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave) != 20);
3786 BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 9);
3787 BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80);
3788 BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40);
3789 BUILD_BUG_ON(sizeof(struct acpi_nfit_capabilities) != 16);
3790
3791 guid_parse(UUID_VOLATILE_MEMORY, &nfit_uuid[NFIT_SPA_VOLATILE]);
3792 guid_parse(UUID_PERSISTENT_MEMORY, &nfit_uuid[NFIT_SPA_PM]);
3793 guid_parse(UUID_CONTROL_REGION, &nfit_uuid[NFIT_SPA_DCR]);
3794 guid_parse(UUID_DATA_REGION, &nfit_uuid[NFIT_SPA_BDW]);
3795 guid_parse(UUID_VOLATILE_VIRTUAL_DISK, &nfit_uuid[NFIT_SPA_VDISK]);
3796 guid_parse(UUID_VOLATILE_VIRTUAL_CD, &nfit_uuid[NFIT_SPA_VCD]);
3797 guid_parse(UUID_PERSISTENT_VIRTUAL_DISK, &nfit_uuid[NFIT_SPA_PDISK]);
3798 guid_parse(UUID_PERSISTENT_VIRTUAL_CD, &nfit_uuid[NFIT_SPA_PCD]);
3799 guid_parse(UUID_NFIT_BUS, &nfit_uuid[NFIT_DEV_BUS]);
3800 guid_parse(UUID_NFIT_DIMM, &nfit_uuid[NFIT_DEV_DIMM]);
3801 guid_parse(UUID_NFIT_DIMM_N_HPE1, &nfit_uuid[NFIT_DEV_DIMM_N_HPE1]);
3802 guid_parse(UUID_NFIT_DIMM_N_HPE2, &nfit_uuid[NFIT_DEV_DIMM_N_HPE2]);
3803 guid_parse(UUID_NFIT_DIMM_N_MSFT, &nfit_uuid[NFIT_DEV_DIMM_N_MSFT]);
3804 guid_parse(UUID_NFIT_DIMM_N_HYPERV, &nfit_uuid[NFIT_DEV_DIMM_N_HYPERV]);
3805
3806 nfit_wq = create_singlethread_workqueue("nfit");
3807 if (!nfit_wq)
3808 return -ENOMEM;
3809
3810 nfit_mce_register();
3811 ret = acpi_bus_register_driver(&acpi_nfit_driver);
3812 if (ret) {
3813 nfit_mce_unregister();
3814 destroy_workqueue(nfit_wq);
3815 }
3816
3817 return ret;
3818
3819}
3820
3821static __exit void nfit_exit(void)
3822{
3823 nfit_mce_unregister();
3824 acpi_bus_unregister_driver(&acpi_nfit_driver);
3825 destroy_workqueue(nfit_wq);
3826 WARN_ON(!list_empty(&acpi_descs));
3827}
3828
3829module_init(nfit_init);
3830module_exit(nfit_exit);
3831MODULE_LICENSE("GPL v2");
3832MODULE_AUTHOR("Intel Corporation");
3833