1
2
3
4
5
6
7
8
9
10#include <linux/blkdev.h>
11#include <linux/hdreg.h>
12#include <linux/init.h>
13#include <linux/platform_device.h>
14#include <linux/set_memory.h>
15#include <linux/module.h>
16#include <linux/moduleparam.h>
17#include <linux/badblocks.h>
18#include <linux/memremap.h>
19#include <linux/vmalloc.h>
20#include <linux/blk-mq.h>
21#include <linux/pfn_t.h>
22#include <linux/slab.h>
23#include <linux/uio.h>
24#include <linux/dax.h>
25#include <linux/nd.h>
26#include <linux/mm.h>
27#include <asm/cacheflush.h>
28#include "pmem.h"
29#include "pfn.h"
30#include "nd.h"
31
32static struct device *to_dev(struct pmem_device *pmem)
33{
34
35
36
37
38 return pmem->bb.dev;
39}
40
41static struct nd_region *to_region(struct pmem_device *pmem)
42{
43 return to_nd_region(to_dev(pmem)->parent);
44}
45
46static void hwpoison_clear(struct pmem_device *pmem,
47 phys_addr_t phys, unsigned int len)
48{
49 unsigned long pfn_start, pfn_end, pfn;
50
51
52 if (is_vmalloc_addr(pmem->virt_addr))
53 return;
54
55 pfn_start = PHYS_PFN(phys);
56 pfn_end = pfn_start + PHYS_PFN(len);
57 for (pfn = pfn_start; pfn < pfn_end; pfn++) {
58 struct page *page = pfn_to_page(pfn);
59
60
61
62
63
64
65 if (test_and_clear_pmem_poison(page))
66 clear_mce_nospec(pfn);
67 }
68}
69
70static blk_status_t pmem_clear_poison(struct pmem_device *pmem,
71 phys_addr_t offset, unsigned int len)
72{
73 struct device *dev = to_dev(pmem);
74 sector_t sector;
75 long cleared;
76 blk_status_t rc = BLK_STS_OK;
77
78 sector = (offset - pmem->data_offset) / 512;
79
80 cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len);
81 if (cleared < len)
82 rc = BLK_STS_IOERR;
83 if (cleared > 0 && cleared / 512) {
84 hwpoison_clear(pmem, pmem->phys_addr + offset, cleared);
85 cleared /= 512;
86 dev_dbg(dev, "%#llx clear %ld sector%s\n",
87 (unsigned long long) sector, cleared,
88 cleared > 1 ? "s" : "");
89 badblocks_clear(&pmem->bb, sector, cleared);
90 if (pmem->bb_state)
91 sysfs_notify_dirent(pmem->bb_state);
92 }
93
94 arch_invalidate_pmem(pmem->virt_addr + offset, len);
95
96 return rc;
97}
98
99static void write_pmem(void *pmem_addr, struct page *page,
100 unsigned int off, unsigned int len)
101{
102 unsigned int chunk;
103 void *mem;
104
105 while (len) {
106 mem = kmap_atomic(page);
107 chunk = min_t(unsigned int, len, PAGE_SIZE - off);
108 memcpy_flushcache(pmem_addr, mem + off, chunk);
109 kunmap_atomic(mem);
110 len -= chunk;
111 off = 0;
112 page++;
113 pmem_addr += chunk;
114 }
115}
116
117static blk_status_t read_pmem(struct page *page, unsigned int off,
118 void *pmem_addr, unsigned int len)
119{
120 unsigned int chunk;
121 unsigned long rem;
122 void *mem;
123
124 while (len) {
125 mem = kmap_atomic(page);
126 chunk = min_t(unsigned int, len, PAGE_SIZE - off);
127 rem = copy_mc_to_kernel(mem + off, pmem_addr, chunk);
128 kunmap_atomic(mem);
129 if (rem)
130 return BLK_STS_IOERR;
131 len -= chunk;
132 off = 0;
133 page++;
134 pmem_addr += chunk;
135 }
136 return BLK_STS_OK;
137}
138
139static blk_status_t pmem_do_read(struct pmem_device *pmem,
140 struct page *page, unsigned int page_off,
141 sector_t sector, unsigned int len)
142{
143 blk_status_t rc;
144 phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
145 void *pmem_addr = pmem->virt_addr + pmem_off;
146
147 if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
148 return BLK_STS_IOERR;
149
150 rc = read_pmem(page, page_off, pmem_addr, len);
151 flush_dcache_page(page);
152 return rc;
153}
154
155static blk_status_t pmem_do_write(struct pmem_device *pmem,
156 struct page *page, unsigned int page_off,
157 sector_t sector, unsigned int len)
158{
159 blk_status_t rc = BLK_STS_OK;
160 bool bad_pmem = false;
161 phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
162 void *pmem_addr = pmem->virt_addr + pmem_off;
163
164 if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
165 bad_pmem = true;
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181 flush_dcache_page(page);
182 write_pmem(pmem_addr, page, page_off, len);
183 if (unlikely(bad_pmem)) {
184 rc = pmem_clear_poison(pmem, pmem_off, len);
185 write_pmem(pmem_addr, page, page_off, len);
186 }
187
188 return rc;
189}
190
191static blk_qc_t pmem_submit_bio(struct bio *bio)
192{
193 int ret = 0;
194 blk_status_t rc = 0;
195 bool do_acct;
196 unsigned long start;
197 struct bio_vec bvec;
198 struct bvec_iter iter;
199 struct pmem_device *pmem = bio->bi_disk->private_data;
200 struct nd_region *nd_region = to_region(pmem);
201
202 if (bio->bi_opf & REQ_PREFLUSH)
203 ret = nvdimm_flush(nd_region, bio);
204
205 do_acct = blk_queue_io_stat(bio->bi_disk->queue);
206 if (do_acct)
207 start = bio_start_io_acct(bio);
208 bio_for_each_segment(bvec, bio, iter) {
209 if (op_is_write(bio_op(bio)))
210 rc = pmem_do_write(pmem, bvec.bv_page, bvec.bv_offset,
211 iter.bi_sector, bvec.bv_len);
212 else
213 rc = pmem_do_read(pmem, bvec.bv_page, bvec.bv_offset,
214 iter.bi_sector, bvec.bv_len);
215 if (rc) {
216 bio->bi_status = rc;
217 break;
218 }
219 }
220 if (do_acct)
221 bio_end_io_acct(bio, start);
222
223 if (bio->bi_opf & REQ_FUA)
224 ret = nvdimm_flush(nd_region, bio);
225
226 if (ret)
227 bio->bi_status = errno_to_blk_status(ret);
228
229 bio_endio(bio);
230 return BLK_QC_T_NONE;
231}
232
233static int pmem_rw_page(struct block_device *bdev, sector_t sector,
234 struct page *page, unsigned int op)
235{
236 struct pmem_device *pmem = bdev->bd_disk->private_data;
237 blk_status_t rc;
238
239 if (op_is_write(op))
240 rc = pmem_do_write(pmem, page, 0, sector, thp_size(page));
241 else
242 rc = pmem_do_read(pmem, page, 0, sector, thp_size(page));
243
244
245
246
247
248
249 if (rc == 0)
250 page_endio(page, op_is_write(op), 0);
251
252 return blk_status_to_errno(rc);
253}
254
255
256__weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
257 long nr_pages, void **kaddr, pfn_t *pfn)
258{
259 resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset;
260
261 if (unlikely(is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) / 512,
262 PFN_PHYS(nr_pages))))
263 return -EIO;
264
265 if (kaddr)
266 *kaddr = pmem->virt_addr + offset;
267 if (pfn)
268 *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
269
270
271
272
273
274 if (unlikely(pmem->bb.count))
275 return nr_pages;
276 return PHYS_PFN(pmem->size - pmem->pfn_pad - offset);
277}
278
279static const struct block_device_operations pmem_fops = {
280 .owner = THIS_MODULE,
281 .submit_bio = pmem_submit_bio,
282 .rw_page = pmem_rw_page,
283};
284
285static int pmem_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
286 size_t nr_pages)
287{
288 struct pmem_device *pmem = dax_get_private(dax_dev);
289
290 return blk_status_to_errno(pmem_do_write(pmem, ZERO_PAGE(0), 0,
291 PFN_PHYS(pgoff) >> SECTOR_SHIFT,
292 PAGE_SIZE));
293}
294
295static long pmem_dax_direct_access(struct dax_device *dax_dev,
296 pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn)
297{
298 struct pmem_device *pmem = dax_get_private(dax_dev);
299
300 return __pmem_direct_access(pmem, pgoff, nr_pages, kaddr, pfn);
301}
302
303
304
305
306
307
308
309static size_t pmem_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
310 void *addr, size_t bytes, struct iov_iter *i)
311{
312 return _copy_from_iter_flushcache(addr, bytes, i);
313}
314
315static size_t pmem_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff,
316 void *addr, size_t bytes, struct iov_iter *i)
317{
318 return _copy_mc_to_iter(addr, bytes, i);
319}
320
321static const struct dax_operations pmem_dax_ops = {
322 .direct_access = pmem_dax_direct_access,
323 .dax_supported = generic_fsdax_supported,
324 .copy_from_iter = pmem_copy_from_iter,
325 .copy_to_iter = pmem_copy_to_iter,
326 .zero_page_range = pmem_dax_zero_page_range,
327};
328
329static const struct attribute_group *pmem_attribute_groups[] = {
330 &dax_attribute_group,
331 NULL,
332};
333
334static void pmem_pagemap_cleanup(struct dev_pagemap *pgmap)
335{
336 struct request_queue *q =
337 container_of(pgmap->ref, struct request_queue, q_usage_counter);
338
339 blk_cleanup_queue(q);
340}
341
342static void pmem_release_queue(void *pgmap)
343{
344 pmem_pagemap_cleanup(pgmap);
345}
346
347static void pmem_pagemap_kill(struct dev_pagemap *pgmap)
348{
349 struct request_queue *q =
350 container_of(pgmap->ref, struct request_queue, q_usage_counter);
351
352 blk_freeze_queue_start(q);
353}
354
355static void pmem_release_disk(void *__pmem)
356{
357 struct pmem_device *pmem = __pmem;
358
359 kill_dax(pmem->dax_dev);
360 put_dax(pmem->dax_dev);
361 del_gendisk(pmem->disk);
362 put_disk(pmem->disk);
363}
364
365static const struct dev_pagemap_ops fsdax_pagemap_ops = {
366 .kill = pmem_pagemap_kill,
367 .cleanup = pmem_pagemap_cleanup,
368};
369
370static int pmem_attach_disk(struct device *dev,
371 struct nd_namespace_common *ndns)
372{
373 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
374 struct nd_region *nd_region = to_nd_region(dev->parent);
375 int nid = dev_to_node(dev), fua;
376 struct resource *res = &nsio->res;
377 struct range bb_range;
378 struct nd_pfn *nd_pfn = NULL;
379 struct dax_device *dax_dev;
380 struct nd_pfn_sb *pfn_sb;
381 struct pmem_device *pmem;
382 struct request_queue *q;
383 struct device *gendev;
384 struct gendisk *disk;
385 void *addr;
386 int rc;
387 unsigned long flags = 0UL;
388
389 pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
390 if (!pmem)
391 return -ENOMEM;
392
393 rc = devm_namespace_enable(dev, ndns, nd_info_block_reserve());
394 if (rc)
395 return rc;
396
397
398 if (is_nd_pfn(dev)) {
399 nd_pfn = to_nd_pfn(dev);
400 rc = nvdimm_setup_pfn(nd_pfn, &pmem->pgmap);
401 if (rc)
402 return rc;
403 }
404
405
406 devm_namespace_disable(dev, ndns);
407
408 dev_set_drvdata(dev, pmem);
409 pmem->phys_addr = res->start;
410 pmem->size = resource_size(res);
411 fua = nvdimm_has_flush(nd_region);
412 if (!IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) || fua < 0) {
413 dev_warn(dev, "unable to guarantee persistence of writes\n");
414 fua = 0;
415 }
416
417 if (!devm_request_mem_region(dev, res->start, resource_size(res),
418 dev_name(&ndns->dev))) {
419 dev_warn(dev, "could not reserve region %pR\n", res);
420 return -EBUSY;
421 }
422
423 q = blk_alloc_queue(dev_to_node(dev));
424 if (!q)
425 return -ENOMEM;
426
427 pmem->pfn_flags = PFN_DEV;
428 pmem->pgmap.ref = &q->q_usage_counter;
429 if (is_nd_pfn(dev)) {
430 pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
431 pmem->pgmap.ops = &fsdax_pagemap_ops;
432 addr = devm_memremap_pages(dev, &pmem->pgmap);
433 pfn_sb = nd_pfn->pfn_sb;
434 pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
435 pmem->pfn_pad = resource_size(res) -
436 range_len(&pmem->pgmap.range);
437 pmem->pfn_flags |= PFN_MAP;
438 bb_range = pmem->pgmap.range;
439 bb_range.start += pmem->data_offset;
440 } else if (pmem_should_map_pages(dev)) {
441 pmem->pgmap.range.start = res->start;
442 pmem->pgmap.range.end = res->end;
443 pmem->pgmap.nr_range = 1;
444 pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
445 pmem->pgmap.ops = &fsdax_pagemap_ops;
446 addr = devm_memremap_pages(dev, &pmem->pgmap);
447 pmem->pfn_flags |= PFN_MAP;
448 bb_range = pmem->pgmap.range;
449 } else {
450 if (devm_add_action_or_reset(dev, pmem_release_queue,
451 &pmem->pgmap))
452 return -ENOMEM;
453 addr = devm_memremap(dev, pmem->phys_addr,
454 pmem->size, ARCH_MEMREMAP_PMEM);
455 bb_range.start = res->start;
456 bb_range.end = res->end;
457 }
458
459 if (IS_ERR(addr))
460 return PTR_ERR(addr);
461 pmem->virt_addr = addr;
462
463 blk_queue_write_cache(q, true, fua);
464 blk_queue_physical_block_size(q, PAGE_SIZE);
465 blk_queue_logical_block_size(q, pmem_sector_size(ndns));
466 blk_queue_max_hw_sectors(q, UINT_MAX);
467 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
468 if (pmem->pfn_flags & PFN_MAP)
469 blk_queue_flag_set(QUEUE_FLAG_DAX, q);
470
471 disk = alloc_disk_node(0, nid);
472 if (!disk)
473 return -ENOMEM;
474 pmem->disk = disk;
475
476 disk->fops = &pmem_fops;
477 disk->queue = q;
478 disk->flags = GENHD_FL_EXT_DEVT;
479 disk->private_data = pmem;
480 nvdimm_namespace_disk_name(ndns, disk->disk_name);
481 set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset)
482 / 512);
483 if (devm_init_badblocks(dev, &pmem->bb))
484 return -ENOMEM;
485 nvdimm_badblocks_populate(nd_region, &pmem->bb, &bb_range);
486 disk->bb = &pmem->bb;
487
488 if (is_nvdimm_sync(nd_region))
489 flags = DAXDEV_F_SYNC;
490 dax_dev = alloc_dax(pmem, disk->disk_name, &pmem_dax_ops, flags);
491 if (IS_ERR(dax_dev)) {
492 put_disk(disk);
493 return PTR_ERR(dax_dev);
494 }
495 dax_write_cache(dax_dev, nvdimm_has_cache(nd_region));
496 pmem->dax_dev = dax_dev;
497 gendev = disk_to_dev(disk);
498 gendev->groups = pmem_attribute_groups;
499
500 device_add_disk(dev, disk, NULL);
501 if (devm_add_action_or_reset(dev, pmem_release_disk, pmem))
502 return -ENOMEM;
503
504 nvdimm_check_and_set_ro(disk);
505
506 pmem->bb_state = sysfs_get_dirent(disk_to_dev(disk)->kobj.sd,
507 "badblocks");
508 if (!pmem->bb_state)
509 dev_warn(dev, "'badblocks' notification disabled\n");
510
511 return 0;
512}
513
514static int nd_pmem_probe(struct device *dev)
515{
516 int ret;
517 struct nd_namespace_common *ndns;
518
519 ndns = nvdimm_namespace_common_probe(dev);
520 if (IS_ERR(ndns))
521 return PTR_ERR(ndns);
522
523 if (is_nd_btt(dev))
524 return nvdimm_namespace_attach_btt(ndns);
525
526 if (is_nd_pfn(dev))
527 return pmem_attach_disk(dev, ndns);
528
529 ret = devm_namespace_enable(dev, ndns, nd_info_block_reserve());
530 if (ret)
531 return ret;
532
533 ret = nd_btt_probe(dev, ndns);
534 if (ret == 0)
535 return -ENXIO;
536
537
538
539
540
541
542
543
544
545
546
547
548 ret = nd_pfn_probe(dev, ndns);
549 if (ret == 0)
550 return -ENXIO;
551 else if (ret == -EOPNOTSUPP)
552 return ret;
553
554 ret = nd_dax_probe(dev, ndns);
555 if (ret == 0)
556 return -ENXIO;
557 else if (ret == -EOPNOTSUPP)
558 return ret;
559
560
561 devm_namespace_disable(dev, ndns);
562
563 return pmem_attach_disk(dev, ndns);
564}
565
566static int nd_pmem_remove(struct device *dev)
567{
568 struct pmem_device *pmem = dev_get_drvdata(dev);
569
570 if (is_nd_btt(dev))
571 nvdimm_namespace_detach_btt(to_nd_btt(dev));
572 else {
573
574
575
576
577 sysfs_put(pmem->bb_state);
578 pmem->bb_state = NULL;
579 }
580 nvdimm_flush(to_nd_region(dev->parent), NULL);
581
582 return 0;
583}
584
585static void nd_pmem_shutdown(struct device *dev)
586{
587 nvdimm_flush(to_nd_region(dev->parent), NULL);
588}
589
590static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
591{
592 struct nd_region *nd_region;
593 resource_size_t offset = 0, end_trunc = 0;
594 struct nd_namespace_common *ndns;
595 struct nd_namespace_io *nsio;
596 struct badblocks *bb;
597 struct range range;
598 struct kernfs_node *bb_state;
599
600 if (event != NVDIMM_REVALIDATE_POISON)
601 return;
602
603 if (is_nd_btt(dev)) {
604 struct nd_btt *nd_btt = to_nd_btt(dev);
605
606 ndns = nd_btt->ndns;
607 nd_region = to_nd_region(ndns->dev.parent);
608 nsio = to_nd_namespace_io(&ndns->dev);
609 bb = &nsio->bb;
610 bb_state = NULL;
611 } else {
612 struct pmem_device *pmem = dev_get_drvdata(dev);
613
614 nd_region = to_region(pmem);
615 bb = &pmem->bb;
616 bb_state = pmem->bb_state;
617
618 if (is_nd_pfn(dev)) {
619 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
620 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
621
622 ndns = nd_pfn->ndns;
623 offset = pmem->data_offset +
624 __le32_to_cpu(pfn_sb->start_pad);
625 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
626 } else {
627 ndns = to_ndns(dev);
628 }
629
630 nsio = to_nd_namespace_io(&ndns->dev);
631 }
632
633 range.start = nsio->res.start + offset;
634 range.end = nsio->res.end - end_trunc;
635 nvdimm_badblocks_populate(nd_region, bb, &range);
636 if (bb_state)
637 sysfs_notify_dirent(bb_state);
638}
639
640MODULE_ALIAS("pmem");
641MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO);
642MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM);
643static struct nd_device_driver nd_pmem_driver = {
644 .probe = nd_pmem_probe,
645 .remove = nd_pmem_remove,
646 .notify = nd_pmem_notify,
647 .shutdown = nd_pmem_shutdown,
648 .drv = {
649 .name = "nd_pmem",
650 },
651 .type = ND_DRIVER_NAMESPACE_IO | ND_DRIVER_NAMESPACE_PMEM,
652};
653
654module_nd_driver(nd_pmem_driver);
655
656MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
657MODULE_LICENSE("GPL v2");
658