1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/scatterlist.h>
14#include <linux/highmem.h>
15#include <linux/sched.h>
16#include <linux/slab.h>
17#include <linux/hash.h>
18#include <linux/sort.h>
19#include <linux/io.h>
20#include <linux/nd.h>
21#include "nd-core.h"
22#include "nd.h"
23
24
25
26
27
28#include <asm-generic/io-64-nonatomic-hi-lo.h>
29
30static DEFINE_IDA(region_ida);
31static DEFINE_PER_CPU(int, flush_idx);
32
33static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm,
34 struct nd_region_data *ndrd)
35{
36 int i, j;
37
38 dev_dbg(dev, "%s: map %d flush address%s\n", nvdimm_name(nvdimm),
39 nvdimm->num_flush, nvdimm->num_flush == 1 ? "" : "es");
40 for (i = 0; i < (1 << ndrd->hints_shift); i++) {
41 struct resource *res = &nvdimm->flush_wpq[i];
42 unsigned long pfn = PHYS_PFN(res->start);
43 void __iomem *flush_page;
44
45
46 for (j = 0; j < i; j++) {
47 struct resource *res_j = &nvdimm->flush_wpq[j];
48 unsigned long pfn_j = PHYS_PFN(res_j->start);
49
50 if (pfn == pfn_j)
51 break;
52 }
53
54 if (j < i)
55 flush_page = (void __iomem *) ((unsigned long)
56 ndrd_get_flush_wpq(ndrd, dimm, j)
57 & PAGE_MASK);
58 else
59 flush_page = devm_nvdimm_ioremap(dev,
60 PFN_PHYS(pfn), PAGE_SIZE);
61 if (!flush_page)
62 return -ENXIO;
63 ndrd_set_flush_wpq(ndrd, dimm, i, flush_page
64 + (res->start & ~PAGE_MASK));
65 }
66
67 return 0;
68}
69
70int nd_region_activate(struct nd_region *nd_region)
71{
72 int i, j, num_flush = 0;
73 struct nd_region_data *ndrd;
74 struct device *dev = &nd_region->dev;
75 size_t flush_data_size = sizeof(void *);
76
77 nvdimm_bus_lock(&nd_region->dev);
78 for (i = 0; i < nd_region->ndr_mappings; i++) {
79 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
80 struct nvdimm *nvdimm = nd_mapping->nvdimm;
81
82 if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
83 nvdimm_bus_unlock(&nd_region->dev);
84 return -EBUSY;
85 }
86
87
88 flush_data_size += sizeof(void *);
89 num_flush = min_not_zero(num_flush, nvdimm->num_flush);
90 if (!nvdimm->num_flush)
91 continue;
92 flush_data_size += nvdimm->num_flush * sizeof(void *);
93 }
94 nvdimm_bus_unlock(&nd_region->dev);
95
96 ndrd = devm_kzalloc(dev, sizeof(*ndrd) + flush_data_size, GFP_KERNEL);
97 if (!ndrd)
98 return -ENOMEM;
99 dev_set_drvdata(dev, ndrd);
100
101 if (!num_flush)
102 return 0;
103
104 ndrd->hints_shift = ilog2(num_flush);
105 for (i = 0; i < nd_region->ndr_mappings; i++) {
106 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
107 struct nvdimm *nvdimm = nd_mapping->nvdimm;
108 int rc = nvdimm_map_flush(&nd_region->dev, nvdimm, i, ndrd);
109
110 if (rc)
111 return rc;
112 }
113
114
115
116
117
118 for (i = 0; i < nd_region->ndr_mappings - 1; i++) {
119
120 if (!ndrd_get_flush_wpq(ndrd, i, 0))
121 continue;
122
123 for (j = i + 1; j < nd_region->ndr_mappings; j++)
124 if (ndrd_get_flush_wpq(ndrd, i, 0) ==
125 ndrd_get_flush_wpq(ndrd, j, 0))
126 ndrd_set_flush_wpq(ndrd, j, 0, NULL);
127 }
128
129 return 0;
130}
131
132static void nd_region_release(struct device *dev)
133{
134 struct nd_region *nd_region = to_nd_region(dev);
135 u16 i;
136
137 for (i = 0; i < nd_region->ndr_mappings; i++) {
138 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
139 struct nvdimm *nvdimm = nd_mapping->nvdimm;
140
141 put_device(&nvdimm->dev);
142 }
143 free_percpu(nd_region->lane);
144 ida_simple_remove(®ion_ida, nd_region->id);
145 if (is_nd_blk(dev))
146 kfree(to_nd_blk_region(dev));
147 else
148 kfree(nd_region);
149}
150
151static struct device_type nd_blk_device_type = {
152 .name = "nd_blk",
153 .release = nd_region_release,
154};
155
156static struct device_type nd_pmem_device_type = {
157 .name = "nd_pmem",
158 .release = nd_region_release,
159};
160
161static struct device_type nd_volatile_device_type = {
162 .name = "nd_volatile",
163 .release = nd_region_release,
164};
165
166bool is_nd_pmem(struct device *dev)
167{
168 return dev ? dev->type == &nd_pmem_device_type : false;
169}
170
171bool is_nd_blk(struct device *dev)
172{
173 return dev ? dev->type == &nd_blk_device_type : false;
174}
175
176bool is_nd_volatile(struct device *dev)
177{
178 return dev ? dev->type == &nd_volatile_device_type : false;
179}
180
181struct nd_region *to_nd_region(struct device *dev)
182{
183 struct nd_region *nd_region = container_of(dev, struct nd_region, dev);
184
185 WARN_ON(dev->type->release != nd_region_release);
186 return nd_region;
187}
188EXPORT_SYMBOL_GPL(to_nd_region);
189
190struct device *nd_region_dev(struct nd_region *nd_region)
191{
192 if (!nd_region)
193 return NULL;
194 return &nd_region->dev;
195}
196EXPORT_SYMBOL_GPL(nd_region_dev);
197
198struct nd_blk_region *to_nd_blk_region(struct device *dev)
199{
200 struct nd_region *nd_region = to_nd_region(dev);
201
202 WARN_ON(!is_nd_blk(dev));
203 return container_of(nd_region, struct nd_blk_region, nd_region);
204}
205EXPORT_SYMBOL_GPL(to_nd_blk_region);
206
207void *nd_region_provider_data(struct nd_region *nd_region)
208{
209 return nd_region->provider_data;
210}
211EXPORT_SYMBOL_GPL(nd_region_provider_data);
212
213void *nd_blk_region_provider_data(struct nd_blk_region *ndbr)
214{
215 return ndbr->blk_provider_data;
216}
217EXPORT_SYMBOL_GPL(nd_blk_region_provider_data);
218
219void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data)
220{
221 ndbr->blk_provider_data = data;
222}
223EXPORT_SYMBOL_GPL(nd_blk_region_set_provider_data);
224
225
226
227
228
229
230
231
232
233int nd_region_to_nstype(struct nd_region *nd_region)
234{
235 if (is_memory(&nd_region->dev)) {
236 u16 i, alias;
237
238 for (i = 0, alias = 0; i < nd_region->ndr_mappings; i++) {
239 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
240 struct nvdimm *nvdimm = nd_mapping->nvdimm;
241
242 if (test_bit(NDD_ALIASING, &nvdimm->flags))
243 alias++;
244 }
245 if (alias)
246 return ND_DEVICE_NAMESPACE_PMEM;
247 else
248 return ND_DEVICE_NAMESPACE_IO;
249 } else if (is_nd_blk(&nd_region->dev)) {
250 return ND_DEVICE_NAMESPACE_BLK;
251 }
252
253 return 0;
254}
255EXPORT_SYMBOL(nd_region_to_nstype);
256
257static ssize_t size_show(struct device *dev,
258 struct device_attribute *attr, char *buf)
259{
260 struct nd_region *nd_region = to_nd_region(dev);
261 unsigned long long size = 0;
262
263 if (is_memory(dev)) {
264 size = nd_region->ndr_size;
265 } else if (nd_region->ndr_mappings == 1) {
266 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
267
268 size = nd_mapping->size;
269 }
270
271 return sprintf(buf, "%llu\n", size);
272}
273static DEVICE_ATTR_RO(size);
274
275static ssize_t deep_flush_show(struct device *dev,
276 struct device_attribute *attr, char *buf)
277{
278 struct nd_region *nd_region = to_nd_region(dev);
279
280
281
282
283
284 return sprintf(buf, "%d\n", nvdimm_has_flush(nd_region));
285}
286
287static ssize_t deep_flush_store(struct device *dev, struct device_attribute *attr,
288 const char *buf, size_t len)
289{
290 bool flush;
291 int rc = strtobool(buf, &flush);
292 struct nd_region *nd_region = to_nd_region(dev);
293
294 if (rc)
295 return rc;
296 if (!flush)
297 return -EINVAL;
298 nvdimm_flush(nd_region);
299
300 return len;
301}
302static DEVICE_ATTR_RW(deep_flush);
303
304static ssize_t mappings_show(struct device *dev,
305 struct device_attribute *attr, char *buf)
306{
307 struct nd_region *nd_region = to_nd_region(dev);
308
309 return sprintf(buf, "%d\n", nd_region->ndr_mappings);
310}
311static DEVICE_ATTR_RO(mappings);
312
313static ssize_t nstype_show(struct device *dev,
314 struct device_attribute *attr, char *buf)
315{
316 struct nd_region *nd_region = to_nd_region(dev);
317
318 return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region));
319}
320static DEVICE_ATTR_RO(nstype);
321
322static ssize_t set_cookie_show(struct device *dev,
323 struct device_attribute *attr, char *buf)
324{
325 struct nd_region *nd_region = to_nd_region(dev);
326 struct nd_interleave_set *nd_set = nd_region->nd_set;
327 ssize_t rc = 0;
328
329 if (is_memory(dev) && nd_set)
330 ;
331 else
332 return -ENXIO;
333
334
335
336
337
338
339
340 device_lock(dev);
341 nvdimm_bus_lock(dev);
342 wait_nvdimm_bus_probe_idle(dev);
343 if (nd_region->ndr_mappings) {
344 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
345 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
346
347 if (ndd) {
348 struct nd_namespace_index *nsindex;
349
350 nsindex = to_namespace_index(ndd, ndd->ns_current);
351 rc = sprintf(buf, "%#llx\n",
352 nd_region_interleave_set_cookie(nd_region,
353 nsindex));
354 }
355 }
356 nvdimm_bus_unlock(dev);
357 device_unlock(dev);
358
359 if (rc)
360 return rc;
361 return sprintf(buf, "%#llx\n", nd_set->cookie1);
362}
363static DEVICE_ATTR_RO(set_cookie);
364
365resource_size_t nd_region_available_dpa(struct nd_region *nd_region)
366{
367 resource_size_t blk_max_overlap = 0, available, overlap;
368 int i;
369
370 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
371
372 retry:
373 available = 0;
374 overlap = blk_max_overlap;
375 for (i = 0; i < nd_region->ndr_mappings; i++) {
376 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
377 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
378
379
380 if (!ndd)
381 return 0;
382
383 if (is_memory(&nd_region->dev)) {
384 available += nd_pmem_available_dpa(nd_region,
385 nd_mapping, &overlap);
386 if (overlap > blk_max_overlap) {
387 blk_max_overlap = overlap;
388 goto retry;
389 }
390 } else if (is_nd_blk(&nd_region->dev))
391 available += nd_blk_available_dpa(nd_region);
392 }
393
394 return available;
395}
396
397resource_size_t nd_region_allocatable_dpa(struct nd_region *nd_region)
398{
399 resource_size_t available = 0;
400 int i;
401
402 if (is_memory(&nd_region->dev))
403 available = (phys_addr_t)ULLONG_MAX;
404
405 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
406 for (i = 0; i < nd_region->ndr_mappings; i++) {
407 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
408
409 if (is_memory(&nd_region->dev))
410 available = min(available,
411 nd_pmem_max_contiguous_dpa(nd_region,
412 nd_mapping));
413 else if (is_nd_blk(&nd_region->dev))
414 available += nd_blk_available_dpa(nd_region);
415 }
416 if (is_memory(&nd_region->dev))
417 return available * nd_region->ndr_mappings;
418 return available;
419}
420
421static ssize_t available_size_show(struct device *dev,
422 struct device_attribute *attr, char *buf)
423{
424 struct nd_region *nd_region = to_nd_region(dev);
425 unsigned long long available = 0;
426
427
428
429
430
431
432
433 nvdimm_bus_lock(dev);
434 wait_nvdimm_bus_probe_idle(dev);
435 available = nd_region_available_dpa(nd_region);
436 nvdimm_bus_unlock(dev);
437
438 return sprintf(buf, "%llu\n", available);
439}
440static DEVICE_ATTR_RO(available_size);
441
442static ssize_t max_available_extent_show(struct device *dev,
443 struct device_attribute *attr, char *buf)
444{
445 struct nd_region *nd_region = to_nd_region(dev);
446 unsigned long long available = 0;
447
448 nvdimm_bus_lock(dev);
449 wait_nvdimm_bus_probe_idle(dev);
450 available = nd_region_allocatable_dpa(nd_region);
451 nvdimm_bus_unlock(dev);
452
453 return sprintf(buf, "%llu\n", available);
454}
455static DEVICE_ATTR_RO(max_available_extent);
456
457static ssize_t init_namespaces_show(struct device *dev,
458 struct device_attribute *attr, char *buf)
459{
460 struct nd_region_data *ndrd = dev_get_drvdata(dev);
461 ssize_t rc;
462
463 nvdimm_bus_lock(dev);
464 if (ndrd)
465 rc = sprintf(buf, "%d/%d\n", ndrd->ns_active, ndrd->ns_count);
466 else
467 rc = -ENXIO;
468 nvdimm_bus_unlock(dev);
469
470 return rc;
471}
472static DEVICE_ATTR_RO(init_namespaces);
473
474static ssize_t namespace_seed_show(struct device *dev,
475 struct device_attribute *attr, char *buf)
476{
477 struct nd_region *nd_region = to_nd_region(dev);
478 ssize_t rc;
479
480 nvdimm_bus_lock(dev);
481 if (nd_region->ns_seed)
482 rc = sprintf(buf, "%s\n", dev_name(nd_region->ns_seed));
483 else
484 rc = sprintf(buf, "\n");
485 nvdimm_bus_unlock(dev);
486 return rc;
487}
488static DEVICE_ATTR_RO(namespace_seed);
489
490static ssize_t btt_seed_show(struct device *dev,
491 struct device_attribute *attr, char *buf)
492{
493 struct nd_region *nd_region = to_nd_region(dev);
494 ssize_t rc;
495
496 nvdimm_bus_lock(dev);
497 if (nd_region->btt_seed)
498 rc = sprintf(buf, "%s\n", dev_name(nd_region->btt_seed));
499 else
500 rc = sprintf(buf, "\n");
501 nvdimm_bus_unlock(dev);
502
503 return rc;
504}
505static DEVICE_ATTR_RO(btt_seed);
506
507static ssize_t pfn_seed_show(struct device *dev,
508 struct device_attribute *attr, char *buf)
509{
510 struct nd_region *nd_region = to_nd_region(dev);
511 ssize_t rc;
512
513 nvdimm_bus_lock(dev);
514 if (nd_region->pfn_seed)
515 rc = sprintf(buf, "%s\n", dev_name(nd_region->pfn_seed));
516 else
517 rc = sprintf(buf, "\n");
518 nvdimm_bus_unlock(dev);
519
520 return rc;
521}
522static DEVICE_ATTR_RO(pfn_seed);
523
524static ssize_t dax_seed_show(struct device *dev,
525 struct device_attribute *attr, char *buf)
526{
527 struct nd_region *nd_region = to_nd_region(dev);
528 ssize_t rc;
529
530 nvdimm_bus_lock(dev);
531 if (nd_region->dax_seed)
532 rc = sprintf(buf, "%s\n", dev_name(nd_region->dax_seed));
533 else
534 rc = sprintf(buf, "\n");
535 nvdimm_bus_unlock(dev);
536
537 return rc;
538}
539static DEVICE_ATTR_RO(dax_seed);
540
541static ssize_t read_only_show(struct device *dev,
542 struct device_attribute *attr, char *buf)
543{
544 struct nd_region *nd_region = to_nd_region(dev);
545
546 return sprintf(buf, "%d\n", nd_region->ro);
547}
548
549static ssize_t read_only_store(struct device *dev,
550 struct device_attribute *attr, const char *buf, size_t len)
551{
552 bool ro;
553 int rc = strtobool(buf, &ro);
554 struct nd_region *nd_region = to_nd_region(dev);
555
556 if (rc)
557 return rc;
558
559 nd_region->ro = ro;
560 return len;
561}
562static DEVICE_ATTR_RW(read_only);
563
564static ssize_t region_badblocks_show(struct device *dev,
565 struct device_attribute *attr, char *buf)
566{
567 struct nd_region *nd_region = to_nd_region(dev);
568 ssize_t rc;
569
570 device_lock(dev);
571 if (dev->driver)
572 rc = badblocks_show(&nd_region->bb, buf, 0);
573 else
574 rc = -ENXIO;
575 device_unlock(dev);
576
577 return rc;
578}
579static DEVICE_ATTR(badblocks, 0444, region_badblocks_show, NULL);
580
581static ssize_t resource_show(struct device *dev,
582 struct device_attribute *attr, char *buf)
583{
584 struct nd_region *nd_region = to_nd_region(dev);
585
586 return sprintf(buf, "%#llx\n", nd_region->ndr_start);
587}
588static DEVICE_ATTR_RO(resource);
589
590static ssize_t persistence_domain_show(struct device *dev,
591 struct device_attribute *attr, char *buf)
592{
593 struct nd_region *nd_region = to_nd_region(dev);
594
595 if (test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags))
596 return sprintf(buf, "cpu_cache\n");
597 else if (test_bit(ND_REGION_PERSIST_MEMCTRL, &nd_region->flags))
598 return sprintf(buf, "memory_controller\n");
599 else
600 return sprintf(buf, "\n");
601}
602static DEVICE_ATTR_RO(persistence_domain);
603
604static struct attribute *nd_region_attributes[] = {
605 &dev_attr_size.attr,
606 &dev_attr_nstype.attr,
607 &dev_attr_mappings.attr,
608 &dev_attr_btt_seed.attr,
609 &dev_attr_pfn_seed.attr,
610 &dev_attr_dax_seed.attr,
611 &dev_attr_deep_flush.attr,
612 &dev_attr_read_only.attr,
613 &dev_attr_set_cookie.attr,
614 &dev_attr_available_size.attr,
615 &dev_attr_max_available_extent.attr,
616 &dev_attr_namespace_seed.attr,
617 &dev_attr_init_namespaces.attr,
618 &dev_attr_badblocks.attr,
619 &dev_attr_resource.attr,
620 &dev_attr_persistence_domain.attr,
621 NULL,
622};
623
624static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
625{
626 struct device *dev = container_of(kobj, typeof(*dev), kobj);
627 struct nd_region *nd_region = to_nd_region(dev);
628 struct nd_interleave_set *nd_set = nd_region->nd_set;
629 int type = nd_region_to_nstype(nd_region);
630
631 if (!is_memory(dev) && a == &dev_attr_pfn_seed.attr)
632 return 0;
633
634 if (!is_memory(dev) && a == &dev_attr_dax_seed.attr)
635 return 0;
636
637 if (!is_nd_pmem(dev) && a == &dev_attr_badblocks.attr)
638 return 0;
639
640 if (a == &dev_attr_resource.attr) {
641 if (is_nd_pmem(dev))
642 return 0400;
643 else
644 return 0;
645 }
646
647 if (a == &dev_attr_deep_flush.attr) {
648 int has_flush = nvdimm_has_flush(nd_region);
649
650 if (has_flush == 1)
651 return a->mode;
652 else if (has_flush == 0)
653 return 0444;
654 else
655 return 0;
656 }
657
658 if (a == &dev_attr_persistence_domain.attr) {
659 if ((nd_region->flags & (BIT(ND_REGION_PERSIST_CACHE)
660 | BIT(ND_REGION_PERSIST_MEMCTRL))) == 0)
661 return 0;
662 return a->mode;
663 }
664
665 if (a != &dev_attr_set_cookie.attr
666 && a != &dev_attr_available_size.attr)
667 return a->mode;
668
669 if ((type == ND_DEVICE_NAMESPACE_PMEM
670 || type == ND_DEVICE_NAMESPACE_BLK)
671 && a == &dev_attr_available_size.attr)
672 return a->mode;
673 else if (is_memory(dev) && nd_set)
674 return a->mode;
675
676 return 0;
677}
678
679struct attribute_group nd_region_attribute_group = {
680 .attrs = nd_region_attributes,
681 .is_visible = region_visible,
682};
683EXPORT_SYMBOL_GPL(nd_region_attribute_group);
684
685u64 nd_region_interleave_set_cookie(struct nd_region *nd_region,
686 struct nd_namespace_index *nsindex)
687{
688 struct nd_interleave_set *nd_set = nd_region->nd_set;
689
690 if (!nd_set)
691 return 0;
692
693 if (nsindex && __le16_to_cpu(nsindex->major) == 1
694 && __le16_to_cpu(nsindex->minor) == 1)
695 return nd_set->cookie1;
696 return nd_set->cookie2;
697}
698
699u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region)
700{
701 struct nd_interleave_set *nd_set = nd_region->nd_set;
702
703 if (nd_set)
704 return nd_set->altcookie;
705 return 0;
706}
707
708void nd_mapping_free_labels(struct nd_mapping *nd_mapping)
709{
710 struct nd_label_ent *label_ent, *e;
711
712 lockdep_assert_held(&nd_mapping->lock);
713 list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
714 list_del(&label_ent->list);
715 kfree(label_ent);
716 }
717}
718
719
720
721
722
723
724
725static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus,
726 struct device *dev, bool probe)
727{
728 struct nd_region *nd_region;
729
730 if (!probe && is_nd_region(dev)) {
731 int i;
732
733 nd_region = to_nd_region(dev);
734 for (i = 0; i < nd_region->ndr_mappings; i++) {
735 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
736 struct nvdimm_drvdata *ndd = nd_mapping->ndd;
737 struct nvdimm *nvdimm = nd_mapping->nvdimm;
738
739 mutex_lock(&nd_mapping->lock);
740 nd_mapping_free_labels(nd_mapping);
741 mutex_unlock(&nd_mapping->lock);
742
743 put_ndd(ndd);
744 nd_mapping->ndd = NULL;
745 if (ndd)
746 atomic_dec(&nvdimm->busy);
747 }
748 }
749 if (dev->parent && is_nd_region(dev->parent) && probe) {
750 nd_region = to_nd_region(dev->parent);
751 nvdimm_bus_lock(dev);
752 if (nd_region->ns_seed == dev)
753 nd_region_create_ns_seed(nd_region);
754 nvdimm_bus_unlock(dev);
755 }
756 if (is_nd_btt(dev) && probe) {
757 struct nd_btt *nd_btt = to_nd_btt(dev);
758
759 nd_region = to_nd_region(dev->parent);
760 nvdimm_bus_lock(dev);
761 if (nd_region->btt_seed == dev)
762 nd_region_create_btt_seed(nd_region);
763 if (nd_region->ns_seed == &nd_btt->ndns->dev)
764 nd_region_create_ns_seed(nd_region);
765 nvdimm_bus_unlock(dev);
766 }
767 if (is_nd_pfn(dev) && probe) {
768 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
769
770 nd_region = to_nd_region(dev->parent);
771 nvdimm_bus_lock(dev);
772 if (nd_region->pfn_seed == dev)
773 nd_region_create_pfn_seed(nd_region);
774 if (nd_region->ns_seed == &nd_pfn->ndns->dev)
775 nd_region_create_ns_seed(nd_region);
776 nvdimm_bus_unlock(dev);
777 }
778 if (is_nd_dax(dev) && probe) {
779 struct nd_dax *nd_dax = to_nd_dax(dev);
780
781 nd_region = to_nd_region(dev->parent);
782 nvdimm_bus_lock(dev);
783 if (nd_region->dax_seed == dev)
784 nd_region_create_dax_seed(nd_region);
785 if (nd_region->ns_seed == &nd_dax->nd_pfn.ndns->dev)
786 nd_region_create_ns_seed(nd_region);
787 nvdimm_bus_unlock(dev);
788 }
789}
790
791void nd_region_probe_success(struct nvdimm_bus *nvdimm_bus, struct device *dev)
792{
793 nd_region_notify_driver_action(nvdimm_bus, dev, true);
794}
795
796void nd_region_disable(struct nvdimm_bus *nvdimm_bus, struct device *dev)
797{
798 nd_region_notify_driver_action(nvdimm_bus, dev, false);
799}
800
801static ssize_t mappingN(struct device *dev, char *buf, int n)
802{
803 struct nd_region *nd_region = to_nd_region(dev);
804 struct nd_mapping *nd_mapping;
805 struct nvdimm *nvdimm;
806
807 if (n >= nd_region->ndr_mappings)
808 return -ENXIO;
809 nd_mapping = &nd_region->mapping[n];
810 nvdimm = nd_mapping->nvdimm;
811
812 return sprintf(buf, "%s,%llu,%llu,%d\n", dev_name(&nvdimm->dev),
813 nd_mapping->start, nd_mapping->size,
814 nd_mapping->position);
815}
816
817#define REGION_MAPPING(idx) \
818static ssize_t mapping##idx##_show(struct device *dev, \
819 struct device_attribute *attr, char *buf) \
820{ \
821 return mappingN(dev, buf, idx); \
822} \
823static DEVICE_ATTR_RO(mapping##idx)
824
825
826
827
828
829REGION_MAPPING(0);
830REGION_MAPPING(1);
831REGION_MAPPING(2);
832REGION_MAPPING(3);
833REGION_MAPPING(4);
834REGION_MAPPING(5);
835REGION_MAPPING(6);
836REGION_MAPPING(7);
837REGION_MAPPING(8);
838REGION_MAPPING(9);
839REGION_MAPPING(10);
840REGION_MAPPING(11);
841REGION_MAPPING(12);
842REGION_MAPPING(13);
843REGION_MAPPING(14);
844REGION_MAPPING(15);
845REGION_MAPPING(16);
846REGION_MAPPING(17);
847REGION_MAPPING(18);
848REGION_MAPPING(19);
849REGION_MAPPING(20);
850REGION_MAPPING(21);
851REGION_MAPPING(22);
852REGION_MAPPING(23);
853REGION_MAPPING(24);
854REGION_MAPPING(25);
855REGION_MAPPING(26);
856REGION_MAPPING(27);
857REGION_MAPPING(28);
858REGION_MAPPING(29);
859REGION_MAPPING(30);
860REGION_MAPPING(31);
861
862static umode_t mapping_visible(struct kobject *kobj, struct attribute *a, int n)
863{
864 struct device *dev = container_of(kobj, struct device, kobj);
865 struct nd_region *nd_region = to_nd_region(dev);
866
867 if (n < nd_region->ndr_mappings)
868 return a->mode;
869 return 0;
870}
871
872static struct attribute *mapping_attributes[] = {
873 &dev_attr_mapping0.attr,
874 &dev_attr_mapping1.attr,
875 &dev_attr_mapping2.attr,
876 &dev_attr_mapping3.attr,
877 &dev_attr_mapping4.attr,
878 &dev_attr_mapping5.attr,
879 &dev_attr_mapping6.attr,
880 &dev_attr_mapping7.attr,
881 &dev_attr_mapping8.attr,
882 &dev_attr_mapping9.attr,
883 &dev_attr_mapping10.attr,
884 &dev_attr_mapping11.attr,
885 &dev_attr_mapping12.attr,
886 &dev_attr_mapping13.attr,
887 &dev_attr_mapping14.attr,
888 &dev_attr_mapping15.attr,
889 &dev_attr_mapping16.attr,
890 &dev_attr_mapping17.attr,
891 &dev_attr_mapping18.attr,
892 &dev_attr_mapping19.attr,
893 &dev_attr_mapping20.attr,
894 &dev_attr_mapping21.attr,
895 &dev_attr_mapping22.attr,
896 &dev_attr_mapping23.attr,
897 &dev_attr_mapping24.attr,
898 &dev_attr_mapping25.attr,
899 &dev_attr_mapping26.attr,
900 &dev_attr_mapping27.attr,
901 &dev_attr_mapping28.attr,
902 &dev_attr_mapping29.attr,
903 &dev_attr_mapping30.attr,
904 &dev_attr_mapping31.attr,
905 NULL,
906};
907
908struct attribute_group nd_mapping_attribute_group = {
909 .is_visible = mapping_visible,
910 .attrs = mapping_attributes,
911};
912EXPORT_SYMBOL_GPL(nd_mapping_attribute_group);
913
914int nd_blk_region_init(struct nd_region *nd_region)
915{
916 struct device *dev = &nd_region->dev;
917 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
918
919 if (!is_nd_blk(dev))
920 return 0;
921
922 if (nd_region->ndr_mappings < 1) {
923 dev_dbg(dev, "invalid BLK region\n");
924 return -ENXIO;
925 }
926
927 return to_nd_blk_region(dev)->enable(nvdimm_bus, dev);
928}
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947unsigned int nd_region_acquire_lane(struct nd_region *nd_region)
948{
949 unsigned int cpu, lane;
950
951 cpu = get_cpu();
952 if (nd_region->num_lanes < nr_cpu_ids) {
953 struct nd_percpu_lane *ndl_lock, *ndl_count;
954
955 lane = cpu % nd_region->num_lanes;
956 ndl_count = per_cpu_ptr(nd_region->lane, cpu);
957 ndl_lock = per_cpu_ptr(nd_region->lane, lane);
958 if (ndl_count->count++ == 0)
959 spin_lock(&ndl_lock->lock);
960 } else
961 lane = cpu;
962
963 return lane;
964}
965EXPORT_SYMBOL(nd_region_acquire_lane);
966
967void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane)
968{
969 if (nd_region->num_lanes < nr_cpu_ids) {
970 unsigned int cpu = get_cpu();
971 struct nd_percpu_lane *ndl_lock, *ndl_count;
972
973 ndl_count = per_cpu_ptr(nd_region->lane, cpu);
974 ndl_lock = per_cpu_ptr(nd_region->lane, lane);
975 if (--ndl_count->count == 0)
976 spin_unlock(&ndl_lock->lock);
977 put_cpu();
978 }
979 put_cpu();
980}
981EXPORT_SYMBOL(nd_region_release_lane);
982
983static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
984 struct nd_region_desc *ndr_desc, struct device_type *dev_type,
985 const char *caller)
986{
987 struct nd_region *nd_region;
988 struct device *dev;
989 void *region_buf;
990 unsigned int i;
991 int ro = 0;
992
993 for (i = 0; i < ndr_desc->num_mappings; i++) {
994 struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
995 struct nvdimm *nvdimm = mapping->nvdimm;
996
997 if ((mapping->start | mapping->size) % SZ_4K) {
998 dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not 4K aligned\n",
999 caller, dev_name(&nvdimm->dev), i);
1000
1001 return NULL;
1002 }
1003
1004 if (test_bit(NDD_UNARMED, &nvdimm->flags))
1005 ro = 1;
1006 }
1007
1008 if (dev_type == &nd_blk_device_type) {
1009 struct nd_blk_region_desc *ndbr_desc;
1010 struct nd_blk_region *ndbr;
1011
1012 ndbr_desc = to_blk_region_desc(ndr_desc);
1013 ndbr = kzalloc(sizeof(*ndbr) + sizeof(struct nd_mapping)
1014 * ndr_desc->num_mappings,
1015 GFP_KERNEL);
1016 if (ndbr) {
1017 nd_region = &ndbr->nd_region;
1018 ndbr->enable = ndbr_desc->enable;
1019 ndbr->do_io = ndbr_desc->do_io;
1020 }
1021 region_buf = ndbr;
1022 } else {
1023 nd_region = kzalloc(sizeof(struct nd_region)
1024 + sizeof(struct nd_mapping)
1025 * ndr_desc->num_mappings,
1026 GFP_KERNEL);
1027 region_buf = nd_region;
1028 }
1029
1030 if (!region_buf)
1031 return NULL;
1032 nd_region->id = ida_simple_get(®ion_ida, 0, 0, GFP_KERNEL);
1033 if (nd_region->id < 0)
1034 goto err_id;
1035
1036 nd_region->lane = alloc_percpu(struct nd_percpu_lane);
1037 if (!nd_region->lane)
1038 goto err_percpu;
1039
1040 for (i = 0; i < nr_cpu_ids; i++) {
1041 struct nd_percpu_lane *ndl;
1042
1043 ndl = per_cpu_ptr(nd_region->lane, i);
1044 spin_lock_init(&ndl->lock);
1045 ndl->count = 0;
1046 }
1047
1048 for (i = 0; i < ndr_desc->num_mappings; i++) {
1049 struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
1050 struct nvdimm *nvdimm = mapping->nvdimm;
1051
1052 nd_region->mapping[i].nvdimm = nvdimm;
1053 nd_region->mapping[i].start = mapping->start;
1054 nd_region->mapping[i].size = mapping->size;
1055 nd_region->mapping[i].position = mapping->position;
1056 INIT_LIST_HEAD(&nd_region->mapping[i].labels);
1057 mutex_init(&nd_region->mapping[i].lock);
1058
1059 get_device(&nvdimm->dev);
1060 }
1061 nd_region->ndr_mappings = ndr_desc->num_mappings;
1062 nd_region->provider_data = ndr_desc->provider_data;
1063 nd_region->nd_set = ndr_desc->nd_set;
1064 nd_region->num_lanes = ndr_desc->num_lanes;
1065 nd_region->flags = ndr_desc->flags;
1066 nd_region->ro = ro;
1067 nd_region->numa_node = ndr_desc->numa_node;
1068 ida_init(&nd_region->ns_ida);
1069 ida_init(&nd_region->btt_ida);
1070 ida_init(&nd_region->pfn_ida);
1071 ida_init(&nd_region->dax_ida);
1072 dev = &nd_region->dev;
1073 dev_set_name(dev, "region%d", nd_region->id);
1074 dev->parent = &nvdimm_bus->dev;
1075 dev->type = dev_type;
1076 dev->groups = ndr_desc->attr_groups;
1077 nd_region->ndr_size = resource_size(ndr_desc->res);
1078 nd_region->ndr_start = ndr_desc->res->start;
1079 nd_device_register(dev);
1080
1081 return nd_region;
1082
1083 err_percpu:
1084 ida_simple_remove(®ion_ida, nd_region->id);
1085 err_id:
1086 kfree(region_buf);
1087 return NULL;
1088}
1089
1090struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus,
1091 struct nd_region_desc *ndr_desc)
1092{
1093 ndr_desc->num_lanes = ND_MAX_LANES;
1094 return nd_region_create(nvdimm_bus, ndr_desc, &nd_pmem_device_type,
1095 __func__);
1096}
1097EXPORT_SYMBOL_GPL(nvdimm_pmem_region_create);
1098
1099struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus,
1100 struct nd_region_desc *ndr_desc)
1101{
1102 if (ndr_desc->num_mappings > 1)
1103 return NULL;
1104 ndr_desc->num_lanes = min(ndr_desc->num_lanes, ND_MAX_LANES);
1105 return nd_region_create(nvdimm_bus, ndr_desc, &nd_blk_device_type,
1106 __func__);
1107}
1108EXPORT_SYMBOL_GPL(nvdimm_blk_region_create);
1109
1110struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus,
1111 struct nd_region_desc *ndr_desc)
1112{
1113 ndr_desc->num_lanes = ND_MAX_LANES;
1114 return nd_region_create(nvdimm_bus, ndr_desc, &nd_volatile_device_type,
1115 __func__);
1116}
1117EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create);
1118
1119
1120
1121
1122
1123void nvdimm_flush(struct nd_region *nd_region)
1124{
1125 struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev);
1126 int i, idx;
1127
1128
1129
1130
1131
1132 idx = this_cpu_read(flush_idx);
1133 idx = this_cpu_add_return(flush_idx, hash_32(current->pid + idx, 8));
1134
1135
1136
1137
1138
1139
1140
1141
1142 wmb();
1143 for (i = 0; i < nd_region->ndr_mappings; i++)
1144 if (ndrd_get_flush_wpq(ndrd, i, 0))
1145 writeq(1, ndrd_get_flush_wpq(ndrd, i, idx));
1146 wmb();
1147}
1148EXPORT_SYMBOL_GPL(nvdimm_flush);
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158int nvdimm_has_flush(struct nd_region *nd_region)
1159{
1160 int i;
1161
1162
1163 if (nd_region->ndr_mappings == 0
1164 || !IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API))
1165 return -ENXIO;
1166
1167 for (i = 0; i < nd_region->ndr_mappings; i++) {
1168 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1169 struct nvdimm *nvdimm = nd_mapping->nvdimm;
1170
1171
1172 if (nvdimm->num_flush)
1173 return 1;
1174 }
1175
1176
1177
1178
1179
1180 return 0;
1181}
1182EXPORT_SYMBOL_GPL(nvdimm_has_flush);
1183
1184int nvdimm_has_cache(struct nd_region *nd_region)
1185{
1186 return is_nd_pmem(&nd_region->dev) &&
1187 !test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags);
1188}
1189EXPORT_SYMBOL_GPL(nvdimm_has_cache);
1190
1191struct conflict_context {
1192 struct nd_region *nd_region;
1193 resource_size_t start, size;
1194};
1195
1196static int region_conflict(struct device *dev, void *data)
1197{
1198 struct nd_region *nd_region;
1199 struct conflict_context *ctx = data;
1200 resource_size_t res_end, region_end, region_start;
1201
1202 if (!is_memory(dev))
1203 return 0;
1204
1205 nd_region = to_nd_region(dev);
1206 if (nd_region == ctx->nd_region)
1207 return 0;
1208
1209 res_end = ctx->start + ctx->size;
1210 region_start = nd_region->ndr_start;
1211 region_end = region_start + nd_region->ndr_size;
1212 if (ctx->start >= region_start && ctx->start < region_end)
1213 return -EBUSY;
1214 if (res_end > region_start && res_end <= region_end)
1215 return -EBUSY;
1216 return 0;
1217}
1218
1219int nd_region_conflict(struct nd_region *nd_region, resource_size_t start,
1220 resource_size_t size)
1221{
1222 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
1223 struct conflict_context ctx = {
1224 .nd_region = nd_region,
1225 .start = start,
1226 .size = size,
1227 };
1228
1229 return device_for_each_child(&nvdimm_bus->dev, &ctx, region_conflict);
1230}
1231
1232void __exit nd_region_devs_exit(void)
1233{
1234 ida_destroy(®ion_ida);
1235}
1236