1
2
3
4
5
6
7
8
9#include <linux/device.h>
10#include <linux/export.h>
11#include <linux/fs.h>
12#include <linux/idr.h>
13#include <linux/init.h>
14#include <linux/kref.h>
15#include <linux/module.h>
16#include <linux/nvmem-consumer.h>
17#include <linux/nvmem-provider.h>
18#include <linux/gpio/consumer.h>
19#include <linux/of.h>
20#include <linux/slab.h>
21
22struct nvmem_device {
23 struct module *owner;
24 struct device dev;
25 int stride;
26 int word_size;
27 int id;
28 struct kref refcnt;
29 size_t size;
30 bool read_only;
31 bool root_only;
32 int flags;
33 enum nvmem_type type;
34 struct bin_attribute eeprom;
35 struct device *base_dev;
36 struct list_head cells;
37 const struct nvmem_keepout *keepout;
38 unsigned int nkeepout;
39 nvmem_reg_read_t reg_read;
40 nvmem_reg_write_t reg_write;
41 struct gpio_desc *wp_gpio;
42 void *priv;
43};
44
45#define to_nvmem_device(d) container_of(d, struct nvmem_device, dev)
46
47#define FLAG_COMPAT BIT(0)
48
49struct nvmem_cell {
50 const char *name;
51 int offset;
52 int bytes;
53 int bit_offset;
54 int nbits;
55 struct device_node *np;
56 struct nvmem_device *nvmem;
57 struct list_head node;
58};
59
60static DEFINE_MUTEX(nvmem_mutex);
61static DEFINE_IDA(nvmem_ida);
62
63static DEFINE_MUTEX(nvmem_cell_mutex);
64static LIST_HEAD(nvmem_cell_tables);
65
66static DEFINE_MUTEX(nvmem_lookup_mutex);
67static LIST_HEAD(nvmem_lookup_list);
68
69static BLOCKING_NOTIFIER_HEAD(nvmem_notifier);
70
71static int __nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
72 void *val, size_t bytes)
73{
74 if (nvmem->reg_read)
75 return nvmem->reg_read(nvmem->priv, offset, val, bytes);
76
77 return -EINVAL;
78}
79
80static int __nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
81 void *val, size_t bytes)
82{
83 int ret;
84
85 if (nvmem->reg_write) {
86 gpiod_set_value_cansleep(nvmem->wp_gpio, 0);
87 ret = nvmem->reg_write(nvmem->priv, offset, val, bytes);
88 gpiod_set_value_cansleep(nvmem->wp_gpio, 1);
89 return ret;
90 }
91
92 return -EINVAL;
93}
94
95static int nvmem_access_with_keepouts(struct nvmem_device *nvmem,
96 unsigned int offset, void *val,
97 size_t bytes, int write)
98{
99
100 unsigned int end = offset + bytes;
101 unsigned int kend, ksize;
102 const struct nvmem_keepout *keepout = nvmem->keepout;
103 const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout;
104 int rc;
105
106
107
108
109
110 while ((keepout < keepoutend) && (keepout->end <= offset))
111 keepout++;
112
113 while ((offset < end) && (keepout < keepoutend)) {
114
115 if (offset < keepout->start) {
116 kend = min(end, keepout->start);
117 ksize = kend - offset;
118 if (write)
119 rc = __nvmem_reg_write(nvmem, offset, val, ksize);
120 else
121 rc = __nvmem_reg_read(nvmem, offset, val, ksize);
122
123 if (rc)
124 return rc;
125
126 offset += ksize;
127 val += ksize;
128 }
129
130
131
132
133
134 kend = min(end, keepout->end);
135 ksize = kend - offset;
136 if (!write)
137 memset(val, keepout->value, ksize);
138
139 val += ksize;
140 offset += ksize;
141 keepout++;
142 }
143
144
145
146
147
148 if (offset < end) {
149 ksize = end - offset;
150 if (write)
151 return __nvmem_reg_write(nvmem, offset, val, ksize);
152 else
153 return __nvmem_reg_read(nvmem, offset, val, ksize);
154 }
155
156 return 0;
157}
158
159static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
160 void *val, size_t bytes)
161{
162 if (!nvmem->nkeepout)
163 return __nvmem_reg_read(nvmem, offset, val, bytes);
164
165 return nvmem_access_with_keepouts(nvmem, offset, val, bytes, false);
166}
167
168static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
169 void *val, size_t bytes)
170{
171 if (!nvmem->nkeepout)
172 return __nvmem_reg_write(nvmem, offset, val, bytes);
173
174 return nvmem_access_with_keepouts(nvmem, offset, val, bytes, true);
175}
176
177#ifdef CONFIG_NVMEM_SYSFS
178static const char * const nvmem_type_str[] = {
179 [NVMEM_TYPE_UNKNOWN] = "Unknown",
180 [NVMEM_TYPE_EEPROM] = "EEPROM",
181 [NVMEM_TYPE_OTP] = "OTP",
182 [NVMEM_TYPE_BATTERY_BACKED] = "Battery backed",
183 [NVMEM_TYPE_FRAM] = "FRAM",
184};
185
186#ifdef CONFIG_DEBUG_LOCK_ALLOC
187static struct lock_class_key eeprom_lock_key;
188#endif
189
190static ssize_t type_show(struct device *dev,
191 struct device_attribute *attr, char *buf)
192{
193 struct nvmem_device *nvmem = to_nvmem_device(dev);
194
195 return sprintf(buf, "%s\n", nvmem_type_str[nvmem->type]);
196}
197
198static DEVICE_ATTR_RO(type);
199
200static struct attribute *nvmem_attrs[] = {
201 &dev_attr_type.attr,
202 NULL,
203};
204
205static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
206 struct bin_attribute *attr, char *buf,
207 loff_t pos, size_t count)
208{
209 struct device *dev;
210 struct nvmem_device *nvmem;
211 int rc;
212
213 if (attr->private)
214 dev = attr->private;
215 else
216 dev = kobj_to_dev(kobj);
217 nvmem = to_nvmem_device(dev);
218
219
220 if (pos >= nvmem->size)
221 return 0;
222
223 if (!IS_ALIGNED(pos, nvmem->stride))
224 return -EINVAL;
225
226 if (count < nvmem->word_size)
227 return -EINVAL;
228
229 if (pos + count > nvmem->size)
230 count = nvmem->size - pos;
231
232 count = round_down(count, nvmem->word_size);
233
234 if (!nvmem->reg_read)
235 return -EPERM;
236
237 rc = nvmem_reg_read(nvmem, pos, buf, count);
238
239 if (rc)
240 return rc;
241
242 return count;
243}
244
245static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
246 struct bin_attribute *attr, char *buf,
247 loff_t pos, size_t count)
248{
249 struct device *dev;
250 struct nvmem_device *nvmem;
251 int rc;
252
253 if (attr->private)
254 dev = attr->private;
255 else
256 dev = kobj_to_dev(kobj);
257 nvmem = to_nvmem_device(dev);
258
259
260 if (pos >= nvmem->size)
261 return -EFBIG;
262
263 if (!IS_ALIGNED(pos, nvmem->stride))
264 return -EINVAL;
265
266 if (count < nvmem->word_size)
267 return -EINVAL;
268
269 if (pos + count > nvmem->size)
270 count = nvmem->size - pos;
271
272 count = round_down(count, nvmem->word_size);
273
274 if (!nvmem->reg_write)
275 return -EPERM;
276
277 rc = nvmem_reg_write(nvmem, pos, buf, count);
278
279 if (rc)
280 return rc;
281
282 return count;
283}
284
285static umode_t nvmem_bin_attr_get_umode(struct nvmem_device *nvmem)
286{
287 umode_t mode = 0400;
288
289 if (!nvmem->root_only)
290 mode |= 0044;
291
292 if (!nvmem->read_only)
293 mode |= 0200;
294
295 if (!nvmem->reg_write)
296 mode &= ~0200;
297
298 if (!nvmem->reg_read)
299 mode &= ~0444;
300
301 return mode;
302}
303
304static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj,
305 struct bin_attribute *attr, int i)
306{
307 struct device *dev = kobj_to_dev(kobj);
308 struct nvmem_device *nvmem = to_nvmem_device(dev);
309
310 return nvmem_bin_attr_get_umode(nvmem);
311}
312
313
314static struct bin_attribute bin_attr_rw_nvmem = {
315 .attr = {
316 .name = "nvmem",
317 .mode = 0644,
318 },
319 .read = bin_attr_nvmem_read,
320 .write = bin_attr_nvmem_write,
321};
322
323static struct bin_attribute *nvmem_bin_attributes[] = {
324 &bin_attr_rw_nvmem,
325 NULL,
326};
327
328static const struct attribute_group nvmem_bin_group = {
329 .bin_attrs = nvmem_bin_attributes,
330 .attrs = nvmem_attrs,
331 .is_bin_visible = nvmem_bin_attr_is_visible,
332};
333
334static const struct attribute_group *nvmem_dev_groups[] = {
335 &nvmem_bin_group,
336 NULL,
337};
338
339static struct bin_attribute bin_attr_nvmem_eeprom_compat = {
340 .attr = {
341 .name = "eeprom",
342 },
343 .read = bin_attr_nvmem_read,
344 .write = bin_attr_nvmem_write,
345};
346
347
348
349
350
351
352static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
353 const struct nvmem_config *config)
354{
355 int rval;
356
357 if (!config->compat)
358 return 0;
359
360 if (!config->base_dev)
361 return -EINVAL;
362
363 if (config->type == NVMEM_TYPE_FRAM)
364 bin_attr_nvmem_eeprom_compat.attr.name = "fram";
365
366 nvmem->eeprom = bin_attr_nvmem_eeprom_compat;
367 nvmem->eeprom.attr.mode = nvmem_bin_attr_get_umode(nvmem);
368 nvmem->eeprom.size = nvmem->size;
369#ifdef CONFIG_DEBUG_LOCK_ALLOC
370 nvmem->eeprom.attr.key = &eeprom_lock_key;
371#endif
372 nvmem->eeprom.private = &nvmem->dev;
373 nvmem->base_dev = config->base_dev;
374
375 rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom);
376 if (rval) {
377 dev_err(&nvmem->dev,
378 "Failed to create eeprom binary file %d\n", rval);
379 return rval;
380 }
381
382 nvmem->flags |= FLAG_COMPAT;
383
384 return 0;
385}
386
387static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
388 const struct nvmem_config *config)
389{
390 if (config->compat)
391 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
392}
393
394#else
395
396static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
397 const struct nvmem_config *config)
398{
399 return -ENOSYS;
400}
401static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
402 const struct nvmem_config *config)
403{
404}
405
406#endif
407
408static void nvmem_release(struct device *dev)
409{
410 struct nvmem_device *nvmem = to_nvmem_device(dev);
411
412 ida_free(&nvmem_ida, nvmem->id);
413 gpiod_put(nvmem->wp_gpio);
414 kfree(nvmem);
415}
416
417static const struct device_type nvmem_provider_type = {
418 .release = nvmem_release,
419};
420
421static struct bus_type nvmem_bus_type = {
422 .name = "nvmem",
423};
424
425static void nvmem_cell_drop(struct nvmem_cell *cell)
426{
427 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell);
428 mutex_lock(&nvmem_mutex);
429 list_del(&cell->node);
430 mutex_unlock(&nvmem_mutex);
431 of_node_put(cell->np);
432 kfree_const(cell->name);
433 kfree(cell);
434}
435
436static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem)
437{
438 struct nvmem_cell *cell, *p;
439
440 list_for_each_entry_safe(cell, p, &nvmem->cells, node)
441 nvmem_cell_drop(cell);
442}
443
444static void nvmem_cell_add(struct nvmem_cell *cell)
445{
446 mutex_lock(&nvmem_mutex);
447 list_add_tail(&cell->node, &cell->nvmem->cells);
448 mutex_unlock(&nvmem_mutex);
449 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell);
450}
451
452static int nvmem_cell_info_to_nvmem_cell_nodup(struct nvmem_device *nvmem,
453 const struct nvmem_cell_info *info,
454 struct nvmem_cell *cell)
455{
456 cell->nvmem = nvmem;
457 cell->offset = info->offset;
458 cell->bytes = info->bytes;
459 cell->name = info->name;
460
461 cell->bit_offset = info->bit_offset;
462 cell->nbits = info->nbits;
463
464 if (cell->nbits)
465 cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
466 BITS_PER_BYTE);
467
468 if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
469 dev_err(&nvmem->dev,
470 "cell %s unaligned to nvmem stride %d\n",
471 cell->name ?: "<unknown>", nvmem->stride);
472 return -EINVAL;
473 }
474
475 return 0;
476}
477
478static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
479 const struct nvmem_cell_info *info,
480 struct nvmem_cell *cell)
481{
482 int err;
483
484 err = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, cell);
485 if (err)
486 return err;
487
488 cell->name = kstrdup_const(info->name, GFP_KERNEL);
489 if (!cell->name)
490 return -ENOMEM;
491
492 return 0;
493}
494
495
496
497
498
499
500
501
502
503
504static int nvmem_add_cells(struct nvmem_device *nvmem,
505 const struct nvmem_cell_info *info,
506 int ncells)
507{
508 struct nvmem_cell **cells;
509 int i, rval;
510
511 cells = kcalloc(ncells, sizeof(*cells), GFP_KERNEL);
512 if (!cells)
513 return -ENOMEM;
514
515 for (i = 0; i < ncells; i++) {
516 cells[i] = kzalloc(sizeof(**cells), GFP_KERNEL);
517 if (!cells[i]) {
518 rval = -ENOMEM;
519 goto err;
520 }
521
522 rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]);
523 if (rval) {
524 kfree(cells[i]);
525 goto err;
526 }
527
528 nvmem_cell_add(cells[i]);
529 }
530
531
532 kfree(cells);
533
534 return 0;
535err:
536 while (i--)
537 nvmem_cell_drop(cells[i]);
538
539 kfree(cells);
540
541 return rval;
542}
543
544
545
546
547
548
549
550
551int nvmem_register_notifier(struct notifier_block *nb)
552{
553 return blocking_notifier_chain_register(&nvmem_notifier, nb);
554}
555EXPORT_SYMBOL_GPL(nvmem_register_notifier);
556
557
558
559
560
561
562
563
564int nvmem_unregister_notifier(struct notifier_block *nb)
565{
566 return blocking_notifier_chain_unregister(&nvmem_notifier, nb);
567}
568EXPORT_SYMBOL_GPL(nvmem_unregister_notifier);
569
570static int nvmem_add_cells_from_table(struct nvmem_device *nvmem)
571{
572 const struct nvmem_cell_info *info;
573 struct nvmem_cell_table *table;
574 struct nvmem_cell *cell;
575 int rval = 0, i;
576
577 mutex_lock(&nvmem_cell_mutex);
578 list_for_each_entry(table, &nvmem_cell_tables, node) {
579 if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) {
580 for (i = 0; i < table->ncells; i++) {
581 info = &table->cells[i];
582
583 cell = kzalloc(sizeof(*cell), GFP_KERNEL);
584 if (!cell) {
585 rval = -ENOMEM;
586 goto out;
587 }
588
589 rval = nvmem_cell_info_to_nvmem_cell(nvmem,
590 info,
591 cell);
592 if (rval) {
593 kfree(cell);
594 goto out;
595 }
596
597 nvmem_cell_add(cell);
598 }
599 }
600 }
601
602out:
603 mutex_unlock(&nvmem_cell_mutex);
604 return rval;
605}
606
607static struct nvmem_cell *
608nvmem_find_cell_by_name(struct nvmem_device *nvmem, const char *cell_id)
609{
610 struct nvmem_cell *iter, *cell = NULL;
611
612 mutex_lock(&nvmem_mutex);
613 list_for_each_entry(iter, &nvmem->cells, node) {
614 if (strcmp(cell_id, iter->name) == 0) {
615 cell = iter;
616 break;
617 }
618 }
619 mutex_unlock(&nvmem_mutex);
620
621 return cell;
622}
623
624static int nvmem_validate_keepouts(struct nvmem_device *nvmem)
625{
626 unsigned int cur = 0;
627 const struct nvmem_keepout *keepout = nvmem->keepout;
628 const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout;
629
630 while (keepout < keepoutend) {
631
632 if (keepout->start < cur) {
633 dev_err(&nvmem->dev,
634 "Keepout regions aren't sorted or overlap.\n");
635
636 return -ERANGE;
637 }
638
639 if (keepout->end < keepout->start) {
640 dev_err(&nvmem->dev,
641 "Invalid keepout region.\n");
642
643 return -EINVAL;
644 }
645
646
647
648
649
650 if ((keepout->end - keepout->start < nvmem->word_size) ||
651 ((keepout->start != cur) &&
652 (keepout->start - cur < nvmem->word_size))) {
653
654 dev_err(&nvmem->dev,
655 "Keepout regions violate word_size constraints.\n");
656
657 return -ERANGE;
658 }
659
660
661 if (!IS_ALIGNED(keepout->start, nvmem->stride) ||
662 !IS_ALIGNED(keepout->end, nvmem->stride)) {
663
664 dev_err(&nvmem->dev,
665 "Keepout regions violate stride.\n");
666
667 return -EINVAL;
668 }
669
670 cur = keepout->end;
671 keepout++;
672 }
673
674 return 0;
675}
676
677static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
678{
679 struct device_node *parent, *child;
680 struct device *dev = &nvmem->dev;
681 struct nvmem_cell *cell;
682 const __be32 *addr;
683 int len;
684
685 parent = dev->of_node;
686
687 for_each_child_of_node(parent, child) {
688 addr = of_get_property(child, "reg", &len);
689 if (!addr)
690 continue;
691 if (len < 2 * sizeof(u32)) {
692 dev_err(dev, "nvmem: invalid reg on %pOF\n", child);
693 of_node_put(child);
694 return -EINVAL;
695 }
696
697 cell = kzalloc(sizeof(*cell), GFP_KERNEL);
698 if (!cell) {
699 of_node_put(child);
700 return -ENOMEM;
701 }
702
703 cell->nvmem = nvmem;
704 cell->offset = be32_to_cpup(addr++);
705 cell->bytes = be32_to_cpup(addr);
706 cell->name = kasprintf(GFP_KERNEL, "%pOFn", child);
707
708 addr = of_get_property(child, "bits", &len);
709 if (addr && len == (2 * sizeof(u32))) {
710 cell->bit_offset = be32_to_cpup(addr++);
711 cell->nbits = be32_to_cpup(addr);
712 }
713
714 if (cell->nbits)
715 cell->bytes = DIV_ROUND_UP(
716 cell->nbits + cell->bit_offset,
717 BITS_PER_BYTE);
718
719 if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
720 dev_err(dev, "cell %s unaligned to nvmem stride %d\n",
721 cell->name, nvmem->stride);
722
723 kfree_const(cell->name);
724 kfree(cell);
725 of_node_put(child);
726 return -EINVAL;
727 }
728
729 cell->np = of_node_get(child);
730 nvmem_cell_add(cell);
731 }
732
733 return 0;
734}
735
736
737
738
739
740
741
742
743
744
745
746struct nvmem_device *nvmem_register(const struct nvmem_config *config)
747{
748 struct nvmem_device *nvmem;
749 int rval;
750
751 if (!config->dev)
752 return ERR_PTR(-EINVAL);
753
754 if (!config->reg_read && !config->reg_write)
755 return ERR_PTR(-EINVAL);
756
757 nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL);
758 if (!nvmem)
759 return ERR_PTR(-ENOMEM);
760
761 rval = ida_alloc(&nvmem_ida, GFP_KERNEL);
762 if (rval < 0) {
763 kfree(nvmem);
764 return ERR_PTR(rval);
765 }
766
767 if (config->wp_gpio)
768 nvmem->wp_gpio = config->wp_gpio;
769 else
770 nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp",
771 GPIOD_OUT_HIGH);
772 if (IS_ERR(nvmem->wp_gpio)) {
773 ida_free(&nvmem_ida, nvmem->id);
774 rval = PTR_ERR(nvmem->wp_gpio);
775 kfree(nvmem);
776 return ERR_PTR(rval);
777 }
778
779 kref_init(&nvmem->refcnt);
780 INIT_LIST_HEAD(&nvmem->cells);
781
782 nvmem->id = rval;
783 nvmem->owner = config->owner;
784 if (!nvmem->owner && config->dev->driver)
785 nvmem->owner = config->dev->driver->owner;
786 nvmem->stride = config->stride ?: 1;
787 nvmem->word_size = config->word_size ?: 1;
788 nvmem->size = config->size;
789 nvmem->dev.type = &nvmem_provider_type;
790 nvmem->dev.bus = &nvmem_bus_type;
791 nvmem->dev.parent = config->dev;
792 nvmem->root_only = config->root_only;
793 nvmem->priv = config->priv;
794 nvmem->type = config->type;
795 nvmem->reg_read = config->reg_read;
796 nvmem->reg_write = config->reg_write;
797 nvmem->keepout = config->keepout;
798 nvmem->nkeepout = config->nkeepout;
799 if (config->of_node)
800 nvmem->dev.of_node = config->of_node;
801 else if (!config->no_of_node)
802 nvmem->dev.of_node = config->dev->of_node;
803
804 switch (config->id) {
805 case NVMEM_DEVID_NONE:
806 dev_set_name(&nvmem->dev, "%s", config->name);
807 break;
808 case NVMEM_DEVID_AUTO:
809 dev_set_name(&nvmem->dev, "%s%d", config->name, nvmem->id);
810 break;
811 default:
812 dev_set_name(&nvmem->dev, "%s%d",
813 config->name ? : "nvmem",
814 config->name ? config->id : nvmem->id);
815 break;
816 }
817
818 nvmem->read_only = device_property_present(config->dev, "read-only") ||
819 config->read_only || !nvmem->reg_write;
820
821#ifdef CONFIG_NVMEM_SYSFS
822 nvmem->dev.groups = nvmem_dev_groups;
823#endif
824
825 if (nvmem->nkeepout) {
826 rval = nvmem_validate_keepouts(nvmem);
827 if (rval)
828 goto err_put_device;
829 }
830
831 dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
832
833 rval = device_register(&nvmem->dev);
834 if (rval)
835 goto err_put_device;
836
837 if (config->compat) {
838 rval = nvmem_sysfs_setup_compat(nvmem, config);
839 if (rval)
840 goto err_device_del;
841 }
842
843 if (config->cells) {
844 rval = nvmem_add_cells(nvmem, config->cells, config->ncells);
845 if (rval)
846 goto err_teardown_compat;
847 }
848
849 rval = nvmem_add_cells_from_table(nvmem);
850 if (rval)
851 goto err_remove_cells;
852
853 rval = nvmem_add_cells_from_of(nvmem);
854 if (rval)
855 goto err_remove_cells;
856
857 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem);
858
859 return nvmem;
860
861err_remove_cells:
862 nvmem_device_remove_all_cells(nvmem);
863err_teardown_compat:
864 if (config->compat)
865 nvmem_sysfs_remove_compat(nvmem, config);
866err_device_del:
867 device_del(&nvmem->dev);
868err_put_device:
869 put_device(&nvmem->dev);
870
871 return ERR_PTR(rval);
872}
873EXPORT_SYMBOL_GPL(nvmem_register);
874
875static void nvmem_device_release(struct kref *kref)
876{
877 struct nvmem_device *nvmem;
878
879 nvmem = container_of(kref, struct nvmem_device, refcnt);
880
881 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem);
882
883 if (nvmem->flags & FLAG_COMPAT)
884 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
885
886 nvmem_device_remove_all_cells(nvmem);
887 device_unregister(&nvmem->dev);
888}
889
890
891
892
893
894
895void nvmem_unregister(struct nvmem_device *nvmem)
896{
897 kref_put(&nvmem->refcnt, nvmem_device_release);
898}
899EXPORT_SYMBOL_GPL(nvmem_unregister);
900
901static void devm_nvmem_release(struct device *dev, void *res)
902{
903 nvmem_unregister(*(struct nvmem_device **)res);
904}
905
906
907
908
909
910
911
912
913
914
915
916
917struct nvmem_device *devm_nvmem_register(struct device *dev,
918 const struct nvmem_config *config)
919{
920 struct nvmem_device **ptr, *nvmem;
921
922 ptr = devres_alloc(devm_nvmem_release, sizeof(*ptr), GFP_KERNEL);
923 if (!ptr)
924 return ERR_PTR(-ENOMEM);
925
926 nvmem = nvmem_register(config);
927
928 if (!IS_ERR(nvmem)) {
929 *ptr = nvmem;
930 devres_add(dev, ptr);
931 } else {
932 devres_free(ptr);
933 }
934
935 return nvmem;
936}
937EXPORT_SYMBOL_GPL(devm_nvmem_register);
938
939static int devm_nvmem_match(struct device *dev, void *res, void *data)
940{
941 struct nvmem_device **r = res;
942
943 return *r == data;
944}
945
946
947
948
949
950
951
952
953
954
955int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem)
956{
957 return devres_release(dev, devm_nvmem_release, devm_nvmem_match, nvmem);
958}
959EXPORT_SYMBOL(devm_nvmem_unregister);
960
961static struct nvmem_device *__nvmem_device_get(void *data,
962 int (*match)(struct device *dev, const void *data))
963{
964 struct nvmem_device *nvmem = NULL;
965 struct device *dev;
966
967 mutex_lock(&nvmem_mutex);
968 dev = bus_find_device(&nvmem_bus_type, NULL, data, match);
969 if (dev)
970 nvmem = to_nvmem_device(dev);
971 mutex_unlock(&nvmem_mutex);
972 if (!nvmem)
973 return ERR_PTR(-EPROBE_DEFER);
974
975 if (!try_module_get(nvmem->owner)) {
976 dev_err(&nvmem->dev,
977 "could not increase module refcount for cell %s\n",
978 nvmem_dev_name(nvmem));
979
980 put_device(&nvmem->dev);
981 return ERR_PTR(-EINVAL);
982 }
983
984 kref_get(&nvmem->refcnt);
985
986 return nvmem;
987}
988
989static void __nvmem_device_put(struct nvmem_device *nvmem)
990{
991 put_device(&nvmem->dev);
992 module_put(nvmem->owner);
993 kref_put(&nvmem->refcnt, nvmem_device_release);
994}
995
996#if IS_ENABLED(CONFIG_OF)
997
998
999
1000
1001
1002
1003
1004
1005
1006struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
1007{
1008
1009 struct device_node *nvmem_np;
1010 struct nvmem_device *nvmem;
1011 int index = 0;
1012
1013 if (id)
1014 index = of_property_match_string(np, "nvmem-names", id);
1015
1016 nvmem_np = of_parse_phandle(np, "nvmem", index);
1017 if (!nvmem_np)
1018 return ERR_PTR(-ENOENT);
1019
1020 nvmem = __nvmem_device_get(nvmem_np, device_match_of_node);
1021 of_node_put(nvmem_np);
1022 return nvmem;
1023}
1024EXPORT_SYMBOL_GPL(of_nvmem_device_get);
1025#endif
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name)
1037{
1038 if (dev->of_node) {
1039 struct nvmem_device *nvmem;
1040
1041 nvmem = of_nvmem_device_get(dev->of_node, dev_name);
1042
1043 if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER)
1044 return nvmem;
1045
1046 }
1047
1048 return __nvmem_device_get((void *)dev_name, device_match_name);
1049}
1050EXPORT_SYMBOL_GPL(nvmem_device_get);
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061struct nvmem_device *nvmem_device_find(void *data,
1062 int (*match)(struct device *dev, const void *data))
1063{
1064 return __nvmem_device_get(data, match);
1065}
1066EXPORT_SYMBOL_GPL(nvmem_device_find);
1067
1068static int devm_nvmem_device_match(struct device *dev, void *res, void *data)
1069{
1070 struct nvmem_device **nvmem = res;
1071
1072 if (WARN_ON(!nvmem || !*nvmem))
1073 return 0;
1074
1075 return *nvmem == data;
1076}
1077
1078static void devm_nvmem_device_release(struct device *dev, void *res)
1079{
1080 nvmem_device_put(*(struct nvmem_device **)res);
1081}
1082
1083
1084
1085
1086
1087
1088
1089
1090void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem)
1091{
1092 int ret;
1093
1094 ret = devres_release(dev, devm_nvmem_device_release,
1095 devm_nvmem_device_match, nvmem);
1096
1097 WARN_ON(ret);
1098}
1099EXPORT_SYMBOL_GPL(devm_nvmem_device_put);
1100
1101
1102
1103
1104
1105
1106void nvmem_device_put(struct nvmem_device *nvmem)
1107{
1108 __nvmem_device_put(nvmem);
1109}
1110EXPORT_SYMBOL_GPL(nvmem_device_put);
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id)
1123{
1124 struct nvmem_device **ptr, *nvmem;
1125
1126 ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL);
1127 if (!ptr)
1128 return ERR_PTR(-ENOMEM);
1129
1130 nvmem = nvmem_device_get(dev, id);
1131 if (!IS_ERR(nvmem)) {
1132 *ptr = nvmem;
1133 devres_add(dev, ptr);
1134 } else {
1135 devres_free(ptr);
1136 }
1137
1138 return nvmem;
1139}
1140EXPORT_SYMBOL_GPL(devm_nvmem_device_get);
1141
1142static struct nvmem_cell *
1143nvmem_cell_get_from_lookup(struct device *dev, const char *con_id)
1144{
1145 struct nvmem_cell *cell = ERR_PTR(-ENOENT);
1146 struct nvmem_cell_lookup *lookup;
1147 struct nvmem_device *nvmem;
1148 const char *dev_id;
1149
1150 if (!dev)
1151 return ERR_PTR(-EINVAL);
1152
1153 dev_id = dev_name(dev);
1154
1155 mutex_lock(&nvmem_lookup_mutex);
1156
1157 list_for_each_entry(lookup, &nvmem_lookup_list, node) {
1158 if ((strcmp(lookup->dev_id, dev_id) == 0) &&
1159 (strcmp(lookup->con_id, con_id) == 0)) {
1160
1161 nvmem = __nvmem_device_get((void *)lookup->nvmem_name,
1162 device_match_name);
1163 if (IS_ERR(nvmem)) {
1164
1165 cell = ERR_CAST(nvmem);
1166 break;
1167 }
1168
1169 cell = nvmem_find_cell_by_name(nvmem,
1170 lookup->cell_name);
1171 if (!cell) {
1172 __nvmem_device_put(nvmem);
1173 cell = ERR_PTR(-ENOENT);
1174 }
1175 break;
1176 }
1177 }
1178
1179 mutex_unlock(&nvmem_lookup_mutex);
1180 return cell;
1181}
1182
1183#if IS_ENABLED(CONFIG_OF)
1184static struct nvmem_cell *
1185nvmem_find_cell_by_node(struct nvmem_device *nvmem, struct device_node *np)
1186{
1187 struct nvmem_cell *iter, *cell = NULL;
1188
1189 mutex_lock(&nvmem_mutex);
1190 list_for_each_entry(iter, &nvmem->cells, node) {
1191 if (np == iter->np) {
1192 cell = iter;
1193 break;
1194 }
1195 }
1196 mutex_unlock(&nvmem_mutex);
1197
1198 return cell;
1199}
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id)
1214{
1215 struct device_node *cell_np, *nvmem_np;
1216 struct nvmem_device *nvmem;
1217 struct nvmem_cell *cell;
1218 int index = 0;
1219
1220
1221 if (id)
1222 index = of_property_match_string(np, "nvmem-cell-names", id);
1223
1224 cell_np = of_parse_phandle(np, "nvmem-cells", index);
1225 if (!cell_np)
1226 return ERR_PTR(-ENOENT);
1227
1228 nvmem_np = of_get_next_parent(cell_np);
1229 if (!nvmem_np)
1230 return ERR_PTR(-EINVAL);
1231
1232 nvmem = __nvmem_device_get(nvmem_np, device_match_of_node);
1233 of_node_put(nvmem_np);
1234 if (IS_ERR(nvmem))
1235 return ERR_CAST(nvmem);
1236
1237 cell = nvmem_find_cell_by_node(nvmem, cell_np);
1238 if (!cell) {
1239 __nvmem_device_put(nvmem);
1240 return ERR_PTR(-ENOENT);
1241 }
1242
1243 return cell;
1244}
1245EXPORT_SYMBOL_GPL(of_nvmem_cell_get);
1246#endif
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id)
1261{
1262 struct nvmem_cell *cell;
1263
1264 if (dev->of_node) {
1265 cell = of_nvmem_cell_get(dev->of_node, id);
1266 if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER)
1267 return cell;
1268 }
1269
1270
1271 if (!id)
1272 return ERR_PTR(-EINVAL);
1273
1274 return nvmem_cell_get_from_lookup(dev, id);
1275}
1276EXPORT_SYMBOL_GPL(nvmem_cell_get);
1277
1278static void devm_nvmem_cell_release(struct device *dev, void *res)
1279{
1280 nvmem_cell_put(*(struct nvmem_cell **)res);
1281}
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id)
1294{
1295 struct nvmem_cell **ptr, *cell;
1296
1297 ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL);
1298 if (!ptr)
1299 return ERR_PTR(-ENOMEM);
1300
1301 cell = nvmem_cell_get(dev, id);
1302 if (!IS_ERR(cell)) {
1303 *ptr = cell;
1304 devres_add(dev, ptr);
1305 } else {
1306 devres_free(ptr);
1307 }
1308
1309 return cell;
1310}
1311EXPORT_SYMBOL_GPL(devm_nvmem_cell_get);
1312
1313static int devm_nvmem_cell_match(struct device *dev, void *res, void *data)
1314{
1315 struct nvmem_cell **c = res;
1316
1317 if (WARN_ON(!c || !*c))
1318 return 0;
1319
1320 return *c == data;
1321}
1322
1323
1324
1325
1326
1327
1328
1329
1330void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell)
1331{
1332 int ret;
1333
1334 ret = devres_release(dev, devm_nvmem_cell_release,
1335 devm_nvmem_cell_match, cell);
1336
1337 WARN_ON(ret);
1338}
1339EXPORT_SYMBOL(devm_nvmem_cell_put);
1340
1341
1342
1343
1344
1345
1346void nvmem_cell_put(struct nvmem_cell *cell)
1347{
1348 struct nvmem_device *nvmem = cell->nvmem;
1349
1350 __nvmem_device_put(nvmem);
1351}
1352EXPORT_SYMBOL_GPL(nvmem_cell_put);
1353
1354static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf)
1355{
1356 u8 *p, *b;
1357 int i, extra, bit_offset = cell->bit_offset;
1358
1359 p = b = buf;
1360 if (bit_offset) {
1361
1362 *b++ >>= bit_offset;
1363
1364
1365 for (i = 1; i < cell->bytes; i++) {
1366
1367 *p |= *b << (BITS_PER_BYTE - bit_offset);
1368
1369 p = b;
1370 *b++ >>= bit_offset;
1371 }
1372 } else {
1373
1374 p += cell->bytes - 1;
1375 }
1376
1377
1378 extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE);
1379 while (--extra >= 0)
1380 *p-- = 0;
1381
1382
1383 *p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0);
1384}
1385
1386static int __nvmem_cell_read(struct nvmem_device *nvmem,
1387 struct nvmem_cell *cell,
1388 void *buf, size_t *len)
1389{
1390 int rc;
1391
1392 rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes);
1393
1394 if (rc)
1395 return rc;
1396
1397
1398 if (cell->bit_offset || cell->nbits)
1399 nvmem_shift_read_buffer_in_place(cell, buf);
1400
1401 if (len)
1402 *len = cell->bytes;
1403
1404 return 0;
1405}
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
1418{
1419 struct nvmem_device *nvmem = cell->nvmem;
1420 u8 *buf;
1421 int rc;
1422
1423 if (!nvmem)
1424 return ERR_PTR(-EINVAL);
1425
1426 buf = kzalloc(cell->bytes, GFP_KERNEL);
1427 if (!buf)
1428 return ERR_PTR(-ENOMEM);
1429
1430 rc = __nvmem_cell_read(nvmem, cell, buf, len);
1431 if (rc) {
1432 kfree(buf);
1433 return ERR_PTR(rc);
1434 }
1435
1436 return buf;
1437}
1438EXPORT_SYMBOL_GPL(nvmem_cell_read);
1439
1440static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell,
1441 u8 *_buf, int len)
1442{
1443 struct nvmem_device *nvmem = cell->nvmem;
1444 int i, rc, nbits, bit_offset = cell->bit_offset;
1445 u8 v, *p, *buf, *b, pbyte, pbits;
1446
1447 nbits = cell->nbits;
1448 buf = kzalloc(cell->bytes, GFP_KERNEL);
1449 if (!buf)
1450 return ERR_PTR(-ENOMEM);
1451
1452 memcpy(buf, _buf, len);
1453 p = b = buf;
1454
1455 if (bit_offset) {
1456 pbyte = *b;
1457 *b <<= bit_offset;
1458
1459
1460 rc = nvmem_reg_read(nvmem, cell->offset, &v, 1);
1461 if (rc)
1462 goto err;
1463 *b++ |= GENMASK(bit_offset - 1, 0) & v;
1464
1465
1466 for (i = 1; i < cell->bytes; i++) {
1467
1468 pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset);
1469 pbyte = *b;
1470 p = b;
1471 *b <<= bit_offset;
1472 *b++ |= pbits;
1473 }
1474 }
1475
1476
1477 if ((nbits + bit_offset) % BITS_PER_BYTE) {
1478
1479 rc = nvmem_reg_read(nvmem,
1480 cell->offset + cell->bytes - 1, &v, 1);
1481 if (rc)
1482 goto err;
1483 *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v;
1484
1485 }
1486
1487 return buf;
1488err:
1489 kfree(buf);
1490 return ERR_PTR(rc);
1491}
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
1503{
1504 struct nvmem_device *nvmem = cell->nvmem;
1505 int rc;
1506
1507 if (!nvmem || nvmem->read_only ||
1508 (cell->bit_offset == 0 && len != cell->bytes))
1509 return -EINVAL;
1510
1511 if (cell->bit_offset || cell->nbits) {
1512 buf = nvmem_cell_prepare_write_buffer(cell, buf, len);
1513 if (IS_ERR(buf))
1514 return PTR_ERR(buf);
1515 }
1516
1517 rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes);
1518
1519
1520 if (cell->bit_offset || cell->nbits)
1521 kfree(buf);
1522
1523 if (rc)
1524 return rc;
1525
1526 return len;
1527}
1528EXPORT_SYMBOL_GPL(nvmem_cell_write);
1529
1530static int nvmem_cell_read_common(struct device *dev, const char *cell_id,
1531 void *val, size_t count)
1532{
1533 struct nvmem_cell *cell;
1534 void *buf;
1535 size_t len;
1536
1537 cell = nvmem_cell_get(dev, cell_id);
1538 if (IS_ERR(cell))
1539 return PTR_ERR(cell);
1540
1541 buf = nvmem_cell_read(cell, &len);
1542 if (IS_ERR(buf)) {
1543 nvmem_cell_put(cell);
1544 return PTR_ERR(buf);
1545 }
1546 if (len != count) {
1547 kfree(buf);
1548 nvmem_cell_put(cell);
1549 return -EINVAL;
1550 }
1551 memcpy(val, buf, count);
1552 kfree(buf);
1553 nvmem_cell_put(cell);
1554
1555 return 0;
1556}
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567int nvmem_cell_read_u8(struct device *dev, const char *cell_id, u8 *val)
1568{
1569 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1570}
1571EXPORT_SYMBOL_GPL(nvmem_cell_read_u8);
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val)
1583{
1584 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1585}
1586EXPORT_SYMBOL_GPL(nvmem_cell_read_u16);
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val)
1598{
1599 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1600}
1601EXPORT_SYMBOL_GPL(nvmem_cell_read_u32);
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val)
1613{
1614 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1615}
1616EXPORT_SYMBOL_GPL(nvmem_cell_read_u64);
1617
1618static const void *nvmem_cell_read_variable_common(struct device *dev,
1619 const char *cell_id,
1620 size_t max_len, size_t *len)
1621{
1622 struct nvmem_cell *cell;
1623 int nbits;
1624 void *buf;
1625
1626 cell = nvmem_cell_get(dev, cell_id);
1627 if (IS_ERR(cell))
1628 return cell;
1629
1630 nbits = cell->nbits;
1631 buf = nvmem_cell_read(cell, len);
1632 nvmem_cell_put(cell);
1633 if (IS_ERR(buf))
1634 return buf;
1635
1636
1637
1638
1639
1640 if (nbits)
1641 *len = DIV_ROUND_UP(nbits, 8);
1642
1643 if (*len > max_len) {
1644 kfree(buf);
1645 return ERR_PTR(-ERANGE);
1646 }
1647
1648 return buf;
1649}
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660int nvmem_cell_read_variable_le_u32(struct device *dev, const char *cell_id,
1661 u32 *val)
1662{
1663 size_t len;
1664 const u8 *buf;
1665 int i;
1666
1667 buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len);
1668 if (IS_ERR(buf))
1669 return PTR_ERR(buf);
1670
1671
1672 *val = 0;
1673 for (i = 0; i < len; i++)
1674 *val |= buf[i] << (8 * i);
1675
1676 kfree(buf);
1677
1678 return 0;
1679}
1680EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u32);
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691int nvmem_cell_read_variable_le_u64(struct device *dev, const char *cell_id,
1692 u64 *val)
1693{
1694 size_t len;
1695 const u8 *buf;
1696 int i;
1697
1698 buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len);
1699 if (IS_ERR(buf))
1700 return PTR_ERR(buf);
1701
1702
1703 *val = 0;
1704 for (i = 0; i < len; i++)
1705 *val |= (uint64_t)buf[i] << (8 * i);
1706
1707 kfree(buf);
1708
1709 return 0;
1710}
1711EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u64);
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
1724 struct nvmem_cell_info *info, void *buf)
1725{
1726 struct nvmem_cell cell;
1727 int rc;
1728 ssize_t len;
1729
1730 if (!nvmem)
1731 return -EINVAL;
1732
1733 rc = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, &cell);
1734 if (rc)
1735 return rc;
1736
1737 rc = __nvmem_cell_read(nvmem, &cell, buf, &len);
1738 if (rc)
1739 return rc;
1740
1741 return len;
1742}
1743EXPORT_SYMBOL_GPL(nvmem_device_cell_read);
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754int nvmem_device_cell_write(struct nvmem_device *nvmem,
1755 struct nvmem_cell_info *info, void *buf)
1756{
1757 struct nvmem_cell cell;
1758 int rc;
1759
1760 if (!nvmem)
1761 return -EINVAL;
1762
1763 rc = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, &cell);
1764 if (rc)
1765 return rc;
1766
1767 return nvmem_cell_write(&cell, buf, cell.bytes);
1768}
1769EXPORT_SYMBOL_GPL(nvmem_device_cell_write);
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782int nvmem_device_read(struct nvmem_device *nvmem,
1783 unsigned int offset,
1784 size_t bytes, void *buf)
1785{
1786 int rc;
1787
1788 if (!nvmem)
1789 return -EINVAL;
1790
1791 rc = nvmem_reg_read(nvmem, offset, buf, bytes);
1792
1793 if (rc)
1794 return rc;
1795
1796 return bytes;
1797}
1798EXPORT_SYMBOL_GPL(nvmem_device_read);
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810int nvmem_device_write(struct nvmem_device *nvmem,
1811 unsigned int offset,
1812 size_t bytes, void *buf)
1813{
1814 int rc;
1815
1816 if (!nvmem)
1817 return -EINVAL;
1818
1819 rc = nvmem_reg_write(nvmem, offset, buf, bytes);
1820
1821 if (rc)
1822 return rc;
1823
1824
1825 return bytes;
1826}
1827EXPORT_SYMBOL_GPL(nvmem_device_write);
1828
1829
1830
1831
1832
1833
1834void nvmem_add_cell_table(struct nvmem_cell_table *table)
1835{
1836 mutex_lock(&nvmem_cell_mutex);
1837 list_add_tail(&table->node, &nvmem_cell_tables);
1838 mutex_unlock(&nvmem_cell_mutex);
1839}
1840EXPORT_SYMBOL_GPL(nvmem_add_cell_table);
1841
1842
1843
1844
1845
1846
1847void nvmem_del_cell_table(struct nvmem_cell_table *table)
1848{
1849 mutex_lock(&nvmem_cell_mutex);
1850 list_del(&table->node);
1851 mutex_unlock(&nvmem_cell_mutex);
1852}
1853EXPORT_SYMBOL_GPL(nvmem_del_cell_table);
1854
1855
1856
1857
1858
1859
1860
1861void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
1862{
1863 int i;
1864
1865 mutex_lock(&nvmem_lookup_mutex);
1866 for (i = 0; i < nentries; i++)
1867 list_add_tail(&entries[i].node, &nvmem_lookup_list);
1868 mutex_unlock(&nvmem_lookup_mutex);
1869}
1870EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups);
1871
1872
1873
1874
1875
1876
1877
1878
1879void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
1880{
1881 int i;
1882
1883 mutex_lock(&nvmem_lookup_mutex);
1884 for (i = 0; i < nentries; i++)
1885 list_del(&entries[i].node);
1886 mutex_unlock(&nvmem_lookup_mutex);
1887}
1888EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups);
1889
1890
1891
1892
1893
1894
1895
1896
1897const char *nvmem_dev_name(struct nvmem_device *nvmem)
1898{
1899 return dev_name(&nvmem->dev);
1900}
1901EXPORT_SYMBOL_GPL(nvmem_dev_name);
1902
1903static int __init nvmem_init(void)
1904{
1905 return bus_register(&nvmem_bus_type);
1906}
1907
1908static void __exit nvmem_exit(void)
1909{
1910 bus_unregister(&nvmem_bus_type);
1911}
1912
1913subsys_initcall(nvmem_init);
1914module_exit(nvmem_exit);
1915
1916MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org");
1917MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
1918MODULE_DESCRIPTION("nvmem Driver Core");
1919MODULE_LICENSE("GPL v2");
1920