1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <asm/cacheflush.h>
19#include <linux/blkdev.h>
20#include <linux/hdreg.h>
21#include <linux/init.h>
22#include <linux/platform_device.h>
23#include <linux/module.h>
24#include <linux/moduleparam.h>
25#include <linux/badblocks.h>
26#include <linux/memremap.h>
27#include <linux/vmalloc.h>
28#include <linux/pfn_t.h>
29#include <linux/slab.h>
30#include <linux/socket.h>
31#include <linux/dax.h>
32#include <linux/nd.h>
33#include "pmem.h"
34#include "pfn.h"
35#include "nd.h"
36#include "nd-core.h"
37
38#ifndef set_mce_nospec
39static inline int set_mce_nospec(unsigned long pfn)
40{
41 return 0;
42}
43#endif
44
45#ifndef clear_mce_nospec
46static inline int clear_mce_nospec(unsigned long pfn)
47{
48 return 0;
49}
50#endif
51
52static struct device *to_dev(struct pmem_device *pmem)
53{
54
55
56
57
58 return pmem->bb.dev;
59}
60
61static struct nd_region *to_region(struct pmem_device *pmem)
62{
63 return to_nd_region(to_dev(pmem)->parent);
64}
65
66static void hwpoison_clear(struct pmem_device *pmem,
67 phys_addr_t phys, unsigned int len)
68{
69 unsigned long pfn_start, pfn_end, pfn;
70
71
72 if (is_vmalloc_addr(pmem->virt_addr))
73 return;
74
75 pfn_start = PHYS_PFN(phys);
76 pfn_end = pfn_start + PHYS_PFN(len);
77 for (pfn = pfn_start; pfn < pfn_end; pfn++) {
78 struct page *page = pfn_to_page(pfn);
79
80
81
82
83
84
85 if (test_and_clear_pmem_poison(page))
86 clear_mce_nospec(pfn);
87 }
88}
89
90static int pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset,
91 unsigned int len)
92{
93 struct device *dev = to_dev(pmem);
94 sector_t sector;
95 long cleared;
96 int rc = 0;
97
98 sector = (offset - pmem->data_offset) / 512;
99
100 cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len);
101 if (cleared < len)
102 rc = -EIO;
103 if (cleared > 0 && cleared / 512) {
104 hwpoison_clear(pmem, pmem->phys_addr + offset, cleared);
105 cleared /= 512;
106 dev_dbg(dev, "%#llx clear %ld sector%s\n",
107 (unsigned long long) sector, cleared,
108 cleared > 1 ? "s" : "");
109 badblocks_clear(&pmem->bb, sector, cleared);
110 if (pmem->bb_state)
111 sysfs_notify_dirent(pmem->bb_state);
112 }
113
114 arch_invalidate_pmem(pmem->virt_addr + offset, len);
115 return rc;
116}
117
118static void write_pmem(void *pmem_addr, struct page *page,
119 unsigned int off, unsigned int len)
120{
121 void *mem = kmap_atomic(page);
122
123 memcpy_flushcache(pmem_addr, mem + off, len);
124 kunmap_atomic(mem);
125}
126
127static int read_pmem(struct page *page, unsigned int off,
128 void *pmem_addr, unsigned int len)
129{
130 unsigned long rem;
131 void *mem = kmap_atomic(page);
132
133 rem = memcpy_mcsafe(mem + off, pmem_addr, len);
134 kunmap_atomic(mem);
135 if (rem)
136 return -EIO;
137 return 0;
138}
139
140static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
141 unsigned int len, unsigned int off, int rw,
142 sector_t sector)
143{
144 int rc = 0;
145 bool bad_pmem = false;
146 phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
147 void *pmem_addr = pmem->virt_addr + pmem_off;
148
149 if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
150 bad_pmem = true;
151
152 if (rw == READ) {
153 if (unlikely(bad_pmem))
154 rc = -EIO;
155 else {
156 rc = read_pmem(page, off, pmem_addr, len);
157 flush_dcache_page(page);
158 }
159 } else {
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174 flush_dcache_page(page);
175 write_pmem(pmem_addr, page, off, len);
176 if (unlikely(bad_pmem)) {
177 rc = pmem_clear_poison(pmem, pmem_off, len);
178 write_pmem(pmem_addr, page, off, len);
179 }
180 }
181
182 return rc;
183}
184
185static void pmem_make_request(struct request_queue *q, struct bio *bio)
186{
187 int rc = 0;
188 bool do_acct;
189 unsigned long start;
190 struct pmem_device *pmem = q->queuedata;
191 struct bio_vec *bvec;
192 sector_t sector;
193 int i;
194 struct nd_region *nd_region = to_region(pmem);
195
196 if (bio->bi_rw & REQ_FLUSH)
197 nvdimm_flush(nd_region);
198
199 do_acct = nd_iostat_start(bio, &start);
200 sector = bio->bi_sector;
201 bio_for_each_segment(bvec, bio, i) {
202 rc = pmem_do_bvec(pmem, bvec->bv_page, bvec->bv_len,
203 bvec->bv_offset, bio_data_dir(bio), sector);
204 if (rc)
205 break;
206 sector += bvec->bv_len >> 9;
207 }
208 if (do_acct)
209 nd_iostat_end(bio, start);
210
211 if (bio->bi_rw & REQ_FUA)
212 nvdimm_flush(nd_region);
213
214 bio_endio(bio, rc);
215}
216
217static int pmem_rw_page(struct block_device *bdev, sector_t sector,
218 struct page *page, int rw)
219{
220 struct pmem_device *pmem = bdev->bd_queue->queuedata;
221 int rc;
222
223 rc = pmem_do_bvec(pmem, page, PAGE_CACHE_SIZE, 0, rw, sector);
224
225
226
227
228
229
230
231 if (rc == 0)
232 page_endio(page, rw & WRITE, 0);
233
234 return rc;
235}
236
237
238__weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
239 long nr_pages, void **kaddr, pfn_t *pfn)
240{
241 resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset;
242
243 if (unlikely(is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) / 512,
244 PFN_PHYS(nr_pages))))
245 return -EIO;
246
247 if (kaddr)
248 *kaddr = pmem->virt_addr + offset;
249 if (pfn)
250 *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
251
252
253
254
255
256 if (unlikely(pmem->bb.count))
257 return nr_pages;
258 return PHYS_PFN(pmem->size - pmem->pfn_pad - offset);
259}
260
261static const struct block_device_operations pmem_fops = {
262 .owner = THIS_MODULE,
263 .rw_page = pmem_rw_page,
264 .revalidate_disk = nvdimm_revalidate_disk,
265};
266
267static long pmem_dax_direct_access(struct dax_device *dax_dev,
268 pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn)
269{
270 struct pmem_device *pmem = dax_get_private(dax_dev);
271
272 return __pmem_direct_access(pmem, pgoff, nr_pages, kaddr, pfn);
273}
274
275static int pmem_memcpy_fromiovecend(struct dax_device *dax_dev, pgoff_t pgoff,
276 void *addr, const struct iovec *iov, int offset, int len)
277{
278 return memcpy_fromiovecend_partial_flushcache(addr, iov, offset, len);
279}
280
281static int pmem_memcpy_toiovecend(struct dax_device *dax_dev, pgoff_t pgoff,
282 const struct iovec *iov, void *addr, int offset, int len)
283{
284 return memcpy_toiovecend_partial_mcsafe(iov, addr, offset, len);
285}
286
287static const struct dax_operations pmem_dax_ops = {
288 .direct_access = pmem_dax_direct_access,
289 .memcpy_fromiovecend = pmem_memcpy_fromiovecend,
290 .memcpy_toiovecend = pmem_memcpy_toiovecend,
291};
292
293static const struct attribute_group *pmem_attribute_groups[] = {
294 &dax_attribute_group,
295 NULL,
296};
297
298static void pmem_release_queue(void *q)
299{
300 blk_cleanup_queue(q);
301}
302
303static void pmem_release_disk(void *__pmem)
304{
305 struct pmem_device *pmem = __pmem;
306
307 kill_dax(pmem->dax_dev);
308 put_dax(pmem->dax_dev);
309 del_gendisk(pmem->disk);
310 put_disk(pmem->disk);
311}
312
313static void fsdax_pagefree(struct page *page, void *data)
314{
315 wake_up_var(&page->_count);
316}
317
318static int setup_pagemap_fsdax(struct device *dev, struct dev_pagemap *pgmap)
319{
320 pgmap->type = MEMORY_DEVICE_FS_DAX;
321 pgmap->page_free = fsdax_pagefree;
322
323 return 0;
324}
325
326static int pmem_attach_disk(struct device *dev,
327 struct nd_namespace_common *ndns)
328{
329 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
330 struct nd_region *nd_region = to_nd_region(dev->parent);
331 int nid = dev_to_node(dev), fua;
332 unsigned flush_flags = REQ_FLUSH;
333 struct resource *res = &nsio->res;
334 struct resource bb_res;
335 struct nd_pfn *nd_pfn = NULL;
336 struct dax_device *dax_dev;
337 struct nd_pfn_sb *pfn_sb;
338 struct pmem_device *pmem;
339 struct request_queue *q;
340 struct device *gendev;
341 struct gendisk *disk;
342 void *addr;
343 int rc;
344
345 pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
346 if (!pmem)
347 return -ENOMEM;
348
349
350 if (is_nd_pfn(dev)) {
351 nd_pfn = to_nd_pfn(dev);
352 rc = nvdimm_setup_pfn(nd_pfn, &pmem->pgmap);
353 if (rc)
354 return rc;
355 }
356
357
358 devm_nsio_disable(dev, nsio);
359
360 dev_set_drvdata(dev, pmem);
361 pmem->phys_addr = res->start;
362 pmem->size = resource_size(res);
363 fua = nvdimm_has_flush(nd_region);
364 if (!IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) || fua < 0) {
365 dev_warn(dev, "unable to guarantee persistence of writes\n");
366 fua = 0;
367 }
368 if (fua)
369 flush_flags |= REQ_FUA;
370
371 if (!devm_request_mem_region(dev, res->start, resource_size(res),
372 dev_name(&ndns->dev))) {
373 dev_warn(dev, "could not reserve region %pR\n", res);
374 return -EBUSY;
375 }
376
377 q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev), NULL);
378 if (!q)
379 return -ENOMEM;
380
381 pmem->pfn_flags = PFN_DEV;
382 pmem->pgmap.ref = &q->q_usage_counter;
383 if (is_nd_pfn(dev)) {
384 if (setup_pagemap_fsdax(dev, &pmem->pgmap))
385 return -ENOMEM;
386 addr = devm_memremap_pages(dev, &pmem->pgmap);
387 pfn_sb = nd_pfn->pfn_sb;
388 pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
389 pmem->pfn_pad = resource_size(res) -
390 resource_size(&pmem->pgmap.res);
391 pmem->pfn_flags |= PFN_MAP;
392 memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res));
393 bb_res.start += pmem->data_offset;
394 } else if (pmem_should_map_pages(dev)) {
395 memcpy(&pmem->pgmap.res, &nsio->res, sizeof(pmem->pgmap.res));
396 pmem->pgmap.altmap_valid = false;
397 if (setup_pagemap_fsdax(dev, &pmem->pgmap))
398 return -ENOMEM;
399 addr = devm_memremap_pages(dev, &pmem->pgmap);
400 pmem->pfn_flags |= PFN_MAP;
401 memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res));
402 } else {
403 addr = devm_memremap(dev, pmem->phys_addr,
404 pmem->size, ARCH_MEMREMAP_PMEM);
405 memcpy(&bb_res, &nsio->res, sizeof(bb_res));
406 }
407
408
409
410
411
412 if (devm_add_action_or_reset(dev, pmem_release_queue, q))
413 return -ENOMEM;
414
415 if (IS_ERR(addr))
416 return PTR_ERR(addr);
417 pmem->virt_addr = addr;
418
419 blk_queue_flush(q, flush_flags);
420 blk_queue_make_request(q, pmem_make_request);
421 blk_queue_physical_block_size(q, PAGE_SIZE);
422 blk_queue_logical_block_size(q, pmem_sector_size(ndns));
423 blk_queue_max_hw_sectors(q, UINT_MAX);
424 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
425 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
426 if (pmem->pfn_flags & PFN_MAP)
427 queue_flag_set_unlocked(QUEUE_FLAG_DAX, q);
428 q->queuedata = pmem;
429
430 disk = alloc_disk_node(0, nid);
431 if (!disk)
432 return -ENOMEM;
433 pmem->disk = disk;
434
435 disk->fops = &pmem_fops;
436 disk->queue = q;
437 disk->flags = GENHD_FL_EXT_DEVT;
438 nvdimm_namespace_disk_name(ndns, disk->disk_name);
439 disk->driverfs_dev = dev;
440 set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset)
441 / 512);
442 if (devm_init_badblocks(dev, &pmem->bb))
443 return -ENOMEM;
444 nvdimm_badblocks_populate(nd_region, &pmem->bb, &bb_res);
445 disk->bb = &pmem->bb;
446
447 dax_dev = alloc_dax(pmem, disk->disk_name, &pmem_dax_ops);
448 if (!dax_dev) {
449 put_disk(disk);
450 return -ENOMEM;
451 }
452 dax_write_cache(dax_dev, nvdimm_has_cache(nd_region));
453 pmem->dax_dev = dax_dev;
454
455 gendev = disk_to_dev(disk);
456 gendev->groups = pmem_attribute_groups;
457
458 add_disk(disk);
459 if (devm_add_action_or_reset(dev, pmem_release_disk, pmem))
460 return -ENOMEM;
461
462 revalidate_disk(disk);
463
464 pmem->bb_state = sysfs_get_dirent(disk_to_dev(disk)->kobj.sd,
465 "badblocks");
466 if (!pmem->bb_state)
467 dev_warn(dev, "'badblocks' notification disabled\n");
468
469 return 0;
470}
471
472static int nd_pmem_probe(struct device *dev)
473{
474 struct nd_namespace_common *ndns;
475
476 ndns = nvdimm_namespace_common_probe(dev);
477 if (IS_ERR(ndns))
478 return PTR_ERR(ndns);
479
480 if (devm_nsio_enable(dev, to_nd_namespace_io(&ndns->dev)))
481 return -ENXIO;
482
483 if (is_nd_btt(dev))
484 return nvdimm_namespace_attach_btt(ndns);
485
486 if (is_nd_pfn(dev))
487 return pmem_attach_disk(dev, ndns);
488
489
490 if (nd_btt_probe(dev, ndns) == 0 || nd_pfn_probe(dev, ndns) == 0
491 || nd_dax_probe(dev, ndns) == 0)
492 return -ENXIO;
493
494
495 return pmem_attach_disk(dev, ndns);
496}
497
498static int nd_pmem_remove(struct device *dev)
499{
500 struct pmem_device *pmem = dev_get_drvdata(dev);
501
502 if (is_nd_btt(dev))
503 nvdimm_namespace_detach_btt(to_nd_btt(dev));
504 else {
505
506
507
508
509 sysfs_put(pmem->bb_state);
510 pmem->bb_state = NULL;
511 }
512 nvdimm_flush(to_nd_region(dev->parent));
513
514 return 0;
515}
516
517static void nd_pmem_shutdown(struct device *dev)
518{
519 nvdimm_flush(to_nd_region(dev->parent));
520}
521
522static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
523{
524 struct nd_region *nd_region;
525 resource_size_t offset = 0, end_trunc = 0;
526 struct nd_namespace_common *ndns;
527 struct nd_namespace_io *nsio;
528 struct resource res;
529 struct badblocks *bb;
530 struct kernfs_node *bb_state;
531
532 if (event != NVDIMM_REVALIDATE_POISON)
533 return;
534
535 if (is_nd_btt(dev)) {
536 struct nd_btt *nd_btt = to_nd_btt(dev);
537
538 ndns = nd_btt->ndns;
539 nd_region = to_nd_region(ndns->dev.parent);
540 nsio = to_nd_namespace_io(&ndns->dev);
541 bb = &nsio->bb;
542 bb_state = NULL;
543 } else {
544 struct pmem_device *pmem = dev_get_drvdata(dev);
545
546 nd_region = to_region(pmem);
547 bb = &pmem->bb;
548 bb_state = pmem->bb_state;
549
550 if (is_nd_pfn(dev)) {
551 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
552 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
553
554 ndns = nd_pfn->ndns;
555 offset = pmem->data_offset +
556 __le32_to_cpu(pfn_sb->start_pad);
557 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
558 } else {
559 ndns = to_ndns(dev);
560 }
561
562 nsio = to_nd_namespace_io(&ndns->dev);
563 }
564
565 res.start = nsio->res.start + offset;
566 res.end = nsio->res.end - end_trunc;
567 nvdimm_badblocks_populate(nd_region, bb, &res);
568 if (bb_state)
569 sysfs_notify_dirent(bb_state);
570}
571
572MODULE_ALIAS("pmem");
573MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO);
574MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM);
575static struct nd_device_driver nd_pmem_driver = {
576 .probe = nd_pmem_probe,
577 .remove = nd_pmem_remove,
578 .notify = nd_pmem_notify,
579 .shutdown = nd_pmem_shutdown,
580 .drv = {
581 .name = "nd_pmem",
582 },
583 .type = ND_DRIVER_NAMESPACE_IO | ND_DRIVER_NAMESPACE_PMEM,
584};
585
586module_nd_driver(nd_pmem_driver);
587
588MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
589MODULE_LICENSE("GPL v2");
590