1
2
3
4
5
6
7
8
9#include <linux/device.h>
10#include <linux/export.h>
11#include <linux/fs.h>
12#include <linux/idr.h>
13#include <linux/init.h>
14#include <linux/kref.h>
15#include <linux/module.h>
16#include <linux/nvmem-consumer.h>
17#include <linux/nvmem-provider.h>
18#include <linux/of.h>
19#include <linux/slab.h>
20#include "nvmem.h"
21
22struct nvmem_cell {
23 const char *name;
24 int offset;
25 int bytes;
26 int bit_offset;
27 int nbits;
28 struct device_node *np;
29 struct nvmem_device *nvmem;
30 struct list_head node;
31};
32
33static DEFINE_MUTEX(nvmem_mutex);
34static DEFINE_IDA(nvmem_ida);
35
36static DEFINE_MUTEX(nvmem_cell_mutex);
37static LIST_HEAD(nvmem_cell_tables);
38
39static DEFINE_MUTEX(nvmem_lookup_mutex);
40static LIST_HEAD(nvmem_lookup_list);
41
42static BLOCKING_NOTIFIER_HEAD(nvmem_notifier);
43
44
45static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
46 void *val, size_t bytes)
47{
48 if (nvmem->reg_read)
49 return nvmem->reg_read(nvmem->priv, offset, val, bytes);
50
51 return -EINVAL;
52}
53
54static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
55 void *val, size_t bytes)
56{
57 if (nvmem->reg_write)
58 return nvmem->reg_write(nvmem->priv, offset, val, bytes);
59
60 return -EINVAL;
61}
62
63static void nvmem_release(struct device *dev)
64{
65 struct nvmem_device *nvmem = to_nvmem_device(dev);
66
67 ida_simple_remove(&nvmem_ida, nvmem->id);
68 kfree(nvmem);
69}
70
71static const struct device_type nvmem_provider_type = {
72 .release = nvmem_release,
73};
74
75static struct bus_type nvmem_bus_type = {
76 .name = "nvmem",
77};
78
79static int of_nvmem_match(struct device *dev, const void *nvmem_np)
80{
81 return dev->of_node == nvmem_np;
82}
83
84static struct nvmem_device *of_nvmem_find(struct device_node *nvmem_np)
85{
86 struct device *d;
87
88 if (!nvmem_np)
89 return NULL;
90
91 d = bus_find_device(&nvmem_bus_type, NULL, nvmem_np, of_nvmem_match);
92
93 if (!d)
94 return NULL;
95
96 return to_nvmem_device(d);
97}
98
99static struct nvmem_device *nvmem_find(const char *name)
100{
101 struct device *d;
102
103 d = bus_find_device_by_name(&nvmem_bus_type, NULL, name);
104
105 if (!d)
106 return NULL;
107
108 return to_nvmem_device(d);
109}
110
111static void nvmem_cell_drop(struct nvmem_cell *cell)
112{
113 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell);
114 mutex_lock(&nvmem_mutex);
115 list_del(&cell->node);
116 mutex_unlock(&nvmem_mutex);
117 of_node_put(cell->np);
118 kfree(cell->name);
119 kfree(cell);
120}
121
122static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem)
123{
124 struct nvmem_cell *cell, *p;
125
126 list_for_each_entry_safe(cell, p, &nvmem->cells, node)
127 nvmem_cell_drop(cell);
128}
129
130static void nvmem_cell_add(struct nvmem_cell *cell)
131{
132 mutex_lock(&nvmem_mutex);
133 list_add_tail(&cell->node, &cell->nvmem->cells);
134 mutex_unlock(&nvmem_mutex);
135 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell);
136}
137
138static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
139 const struct nvmem_cell_info *info,
140 struct nvmem_cell *cell)
141{
142 cell->nvmem = nvmem;
143 cell->offset = info->offset;
144 cell->bytes = info->bytes;
145 cell->name = info->name;
146
147 cell->bit_offset = info->bit_offset;
148 cell->nbits = info->nbits;
149
150 if (cell->nbits)
151 cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
152 BITS_PER_BYTE);
153
154 if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
155 dev_err(&nvmem->dev,
156 "cell %s unaligned to nvmem stride %d\n",
157 cell->name, nvmem->stride);
158 return -EINVAL;
159 }
160
161 return 0;
162}
163
164
165
166
167
168
169
170
171
172
173static int nvmem_add_cells(struct nvmem_device *nvmem,
174 const struct nvmem_cell_info *info,
175 int ncells)
176{
177 struct nvmem_cell **cells;
178 int i, rval;
179
180 cells = kcalloc(ncells, sizeof(*cells), GFP_KERNEL);
181 if (!cells)
182 return -ENOMEM;
183
184 for (i = 0; i < ncells; i++) {
185 cells[i] = kzalloc(sizeof(**cells), GFP_KERNEL);
186 if (!cells[i]) {
187 rval = -ENOMEM;
188 goto err;
189 }
190
191 rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]);
192 if (rval) {
193 kfree(cells[i]);
194 goto err;
195 }
196
197 nvmem_cell_add(cells[i]);
198 }
199
200
201 kfree(cells);
202
203 return 0;
204err:
205 while (i--)
206 nvmem_cell_drop(cells[i]);
207
208 kfree(cells);
209
210 return rval;
211}
212
213
214
215
216
217
218
219
220int nvmem_register_notifier(struct notifier_block *nb)
221{
222 return blocking_notifier_chain_register(&nvmem_notifier, nb);
223}
224EXPORT_SYMBOL_GPL(nvmem_register_notifier);
225
226
227
228
229
230
231
232
233int nvmem_unregister_notifier(struct notifier_block *nb)
234{
235 return blocking_notifier_chain_unregister(&nvmem_notifier, nb);
236}
237EXPORT_SYMBOL_GPL(nvmem_unregister_notifier);
238
239static int nvmem_add_cells_from_table(struct nvmem_device *nvmem)
240{
241 const struct nvmem_cell_info *info;
242 struct nvmem_cell_table *table;
243 struct nvmem_cell *cell;
244 int rval = 0, i;
245
246 mutex_lock(&nvmem_cell_mutex);
247 list_for_each_entry(table, &nvmem_cell_tables, node) {
248 if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) {
249 for (i = 0; i < table->ncells; i++) {
250 info = &table->cells[i];
251
252 cell = kzalloc(sizeof(*cell), GFP_KERNEL);
253 if (!cell) {
254 rval = -ENOMEM;
255 goto out;
256 }
257
258 rval = nvmem_cell_info_to_nvmem_cell(nvmem,
259 info,
260 cell);
261 if (rval) {
262 kfree(cell);
263 goto out;
264 }
265
266 nvmem_cell_add(cell);
267 }
268 }
269 }
270
271out:
272 mutex_unlock(&nvmem_cell_mutex);
273 return rval;
274}
275
276static struct nvmem_cell *
277nvmem_find_cell_by_name(struct nvmem_device *nvmem, const char *cell_id)
278{
279 struct nvmem_cell *iter, *cell = NULL;
280
281 mutex_lock(&nvmem_mutex);
282 list_for_each_entry(iter, &nvmem->cells, node) {
283 if (strcmp(cell_id, iter->name) == 0) {
284 cell = iter;
285 break;
286 }
287 }
288 mutex_unlock(&nvmem_mutex);
289
290 return cell;
291}
292
293static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
294{
295 struct device_node *parent, *child;
296 struct device *dev = &nvmem->dev;
297 struct nvmem_cell *cell;
298 const __be32 *addr;
299 int len;
300
301 parent = dev->of_node;
302
303 for_each_child_of_node(parent, child) {
304 addr = of_get_property(child, "reg", &len);
305 if (!addr || (len < 2 * sizeof(u32))) {
306 dev_err(dev, "nvmem: invalid reg on %pOF\n", child);
307 return -EINVAL;
308 }
309
310 cell = kzalloc(sizeof(*cell), GFP_KERNEL);
311 if (!cell)
312 return -ENOMEM;
313
314 cell->nvmem = nvmem;
315 cell->np = of_node_get(child);
316 cell->offset = be32_to_cpup(addr++);
317 cell->bytes = be32_to_cpup(addr);
318 cell->name = kasprintf(GFP_KERNEL, "%pOFn", child);
319
320 addr = of_get_property(child, "bits", &len);
321 if (addr && len == (2 * sizeof(u32))) {
322 cell->bit_offset = be32_to_cpup(addr++);
323 cell->nbits = be32_to_cpup(addr);
324 }
325
326 if (cell->nbits)
327 cell->bytes = DIV_ROUND_UP(
328 cell->nbits + cell->bit_offset,
329 BITS_PER_BYTE);
330
331 if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
332 dev_err(dev, "cell %s unaligned to nvmem stride %d\n",
333 cell->name, nvmem->stride);
334
335 kfree(cell->name);
336 kfree(cell);
337 return -EINVAL;
338 }
339
340 nvmem_cell_add(cell);
341 }
342
343 return 0;
344}
345
346
347
348
349
350
351
352
353
354
355
356struct nvmem_device *nvmem_register(const struct nvmem_config *config)
357{
358 struct nvmem_device *nvmem;
359 int rval;
360
361 if (!config->dev)
362 return ERR_PTR(-EINVAL);
363
364 nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL);
365 if (!nvmem)
366 return ERR_PTR(-ENOMEM);
367
368 rval = ida_simple_get(&nvmem_ida, 0, 0, GFP_KERNEL);
369 if (rval < 0) {
370 kfree(nvmem);
371 return ERR_PTR(rval);
372 }
373
374 kref_init(&nvmem->refcnt);
375 INIT_LIST_HEAD(&nvmem->cells);
376
377 nvmem->id = rval;
378 nvmem->owner = config->owner;
379 if (!nvmem->owner && config->dev->driver)
380 nvmem->owner = config->dev->driver->owner;
381 nvmem->stride = config->stride ?: 1;
382 nvmem->word_size = config->word_size ?: 1;
383 nvmem->size = config->size;
384 nvmem->dev.type = &nvmem_provider_type;
385 nvmem->dev.bus = &nvmem_bus_type;
386 nvmem->dev.parent = config->dev;
387 nvmem->priv = config->priv;
388 nvmem->type = config->type;
389 nvmem->reg_read = config->reg_read;
390 nvmem->reg_write = config->reg_write;
391 if (!config->no_of_node)
392 nvmem->dev.of_node = config->dev->of_node;
393
394 if (config->id == -1 && config->name) {
395 dev_set_name(&nvmem->dev, "%s", config->name);
396 } else {
397 dev_set_name(&nvmem->dev, "%s%d",
398 config->name ? : "nvmem",
399 config->name ? config->id : nvmem->id);
400 }
401
402 nvmem->read_only = device_property_present(config->dev, "read-only") ||
403 config->read_only || !nvmem->reg_write;
404
405 nvmem->dev.groups = nvmem_sysfs_get_groups(nvmem, config);
406
407 device_initialize(&nvmem->dev);
408
409 dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
410
411 rval = device_add(&nvmem->dev);
412 if (rval)
413 goto err_put_device;
414
415 if (config->compat) {
416 rval = nvmem_sysfs_setup_compat(nvmem, config);
417 if (rval)
418 goto err_device_del;
419 }
420
421 if (config->cells) {
422 rval = nvmem_add_cells(nvmem, config->cells, config->ncells);
423 if (rval)
424 goto err_teardown_compat;
425 }
426
427 rval = nvmem_add_cells_from_table(nvmem);
428 if (rval)
429 goto err_remove_cells;
430
431 rval = nvmem_add_cells_from_of(nvmem);
432 if (rval)
433 goto err_remove_cells;
434
435 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem);
436
437 return nvmem;
438
439err_remove_cells:
440 nvmem_device_remove_all_cells(nvmem);
441err_teardown_compat:
442 if (config->compat)
443 nvmem_sysfs_remove_compat(nvmem, config);
444err_device_del:
445 device_del(&nvmem->dev);
446err_put_device:
447 put_device(&nvmem->dev);
448
449 return ERR_PTR(rval);
450}
451EXPORT_SYMBOL_GPL(nvmem_register);
452
453static void nvmem_device_release(struct kref *kref)
454{
455 struct nvmem_device *nvmem;
456
457 nvmem = container_of(kref, struct nvmem_device, refcnt);
458
459 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem);
460
461 if (nvmem->flags & FLAG_COMPAT)
462 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
463
464 nvmem_device_remove_all_cells(nvmem);
465 device_del(&nvmem->dev);
466 put_device(&nvmem->dev);
467}
468
469
470
471
472
473
474void nvmem_unregister(struct nvmem_device *nvmem)
475{
476 kref_put(&nvmem->refcnt, nvmem_device_release);
477}
478EXPORT_SYMBOL_GPL(nvmem_unregister);
479
480static void devm_nvmem_release(struct device *dev, void *res)
481{
482 nvmem_unregister(*(struct nvmem_device **)res);
483}
484
485
486
487
488
489
490
491
492
493
494
495
496struct nvmem_device *devm_nvmem_register(struct device *dev,
497 const struct nvmem_config *config)
498{
499 struct nvmem_device **ptr, *nvmem;
500
501 ptr = devres_alloc(devm_nvmem_release, sizeof(*ptr), GFP_KERNEL);
502 if (!ptr)
503 return ERR_PTR(-ENOMEM);
504
505 nvmem = nvmem_register(config);
506
507 if (!IS_ERR(nvmem)) {
508 *ptr = nvmem;
509 devres_add(dev, ptr);
510 } else {
511 devres_free(ptr);
512 }
513
514 return nvmem;
515}
516EXPORT_SYMBOL_GPL(devm_nvmem_register);
517
518static int devm_nvmem_match(struct device *dev, void *res, void *data)
519{
520 struct nvmem_device **r = res;
521
522 return *r == data;
523}
524
525
526
527
528
529
530
531
532
533
534int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem)
535{
536 return devres_release(dev, devm_nvmem_release, devm_nvmem_match, nvmem);
537}
538EXPORT_SYMBOL(devm_nvmem_unregister);
539
540static struct nvmem_device *__nvmem_device_get(struct device_node *np,
541 const char *nvmem_name)
542{
543 struct nvmem_device *nvmem = NULL;
544
545 mutex_lock(&nvmem_mutex);
546 nvmem = np ? of_nvmem_find(np) : nvmem_find(nvmem_name);
547 mutex_unlock(&nvmem_mutex);
548 if (!nvmem)
549 return ERR_PTR(-EPROBE_DEFER);
550
551 if (!try_module_get(nvmem->owner)) {
552 dev_err(&nvmem->dev,
553 "could not increase module refcount for cell %s\n",
554 nvmem_dev_name(nvmem));
555
556 put_device(&nvmem->dev);
557 return ERR_PTR(-EINVAL);
558 }
559
560 kref_get(&nvmem->refcnt);
561
562 return nvmem;
563}
564
565static void __nvmem_device_put(struct nvmem_device *nvmem)
566{
567 put_device(&nvmem->dev);
568 module_put(nvmem->owner);
569 kref_put(&nvmem->refcnt, nvmem_device_release);
570}
571
572#if IS_ENABLED(CONFIG_OF)
573
574
575
576
577
578
579
580
581
582struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
583{
584
585 struct device_node *nvmem_np;
586 int index = 0;
587
588 if (id)
589 index = of_property_match_string(np, "nvmem-names", id);
590
591 nvmem_np = of_parse_phandle(np, "nvmem", index);
592 if (!nvmem_np)
593 return ERR_PTR(-ENOENT);
594
595 return __nvmem_device_get(nvmem_np, NULL);
596}
597EXPORT_SYMBOL_GPL(of_nvmem_device_get);
598#endif
599
600
601
602
603
604
605
606
607
608
609struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name)
610{
611 if (dev->of_node) {
612 struct nvmem_device *nvmem;
613
614 nvmem = of_nvmem_device_get(dev->of_node, dev_name);
615
616 if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER)
617 return nvmem;
618
619 }
620
621 return __nvmem_device_get(NULL, dev_name);
622}
623EXPORT_SYMBOL_GPL(nvmem_device_get);
624
625static int devm_nvmem_device_match(struct device *dev, void *res, void *data)
626{
627 struct nvmem_device **nvmem = res;
628
629 if (WARN_ON(!nvmem || !*nvmem))
630 return 0;
631
632 return *nvmem == data;
633}
634
635static void devm_nvmem_device_release(struct device *dev, void *res)
636{
637 nvmem_device_put(*(struct nvmem_device **)res);
638}
639
640
641
642
643
644
645
646
647void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem)
648{
649 int ret;
650
651 ret = devres_release(dev, devm_nvmem_device_release,
652 devm_nvmem_device_match, nvmem);
653
654 WARN_ON(ret);
655}
656EXPORT_SYMBOL_GPL(devm_nvmem_device_put);
657
658
659
660
661
662
663void nvmem_device_put(struct nvmem_device *nvmem)
664{
665 __nvmem_device_put(nvmem);
666}
667EXPORT_SYMBOL_GPL(nvmem_device_put);
668
669
670
671
672
673
674
675
676
677
678
679struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id)
680{
681 struct nvmem_device **ptr, *nvmem;
682
683 ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL);
684 if (!ptr)
685 return ERR_PTR(-ENOMEM);
686
687 nvmem = nvmem_device_get(dev, id);
688 if (!IS_ERR(nvmem)) {
689 *ptr = nvmem;
690 devres_add(dev, ptr);
691 } else {
692 devres_free(ptr);
693 }
694
695 return nvmem;
696}
697EXPORT_SYMBOL_GPL(devm_nvmem_device_get);
698
699static struct nvmem_cell *
700nvmem_cell_get_from_lookup(struct device *dev, const char *con_id)
701{
702 struct nvmem_cell *cell = ERR_PTR(-ENOENT);
703 struct nvmem_cell_lookup *lookup;
704 struct nvmem_device *nvmem;
705 const char *dev_id;
706
707 if (!dev)
708 return ERR_PTR(-EINVAL);
709
710 dev_id = dev_name(dev);
711
712 mutex_lock(&nvmem_lookup_mutex);
713
714 list_for_each_entry(lookup, &nvmem_lookup_list, node) {
715 if ((strcmp(lookup->dev_id, dev_id) == 0) &&
716 (strcmp(lookup->con_id, con_id) == 0)) {
717
718 nvmem = __nvmem_device_get(NULL, lookup->nvmem_name);
719 if (IS_ERR(nvmem)) {
720
721 cell = ERR_CAST(nvmem);
722 break;
723 }
724
725 cell = nvmem_find_cell_by_name(nvmem,
726 lookup->cell_name);
727 if (!cell) {
728 __nvmem_device_put(nvmem);
729 cell = ERR_PTR(-ENOENT);
730 }
731 break;
732 }
733 }
734
735 mutex_unlock(&nvmem_lookup_mutex);
736 return cell;
737}
738
739#if IS_ENABLED(CONFIG_OF)
740static struct nvmem_cell *
741nvmem_find_cell_by_node(struct nvmem_device *nvmem, struct device_node *np)
742{
743 struct nvmem_cell *iter, *cell = NULL;
744
745 mutex_lock(&nvmem_mutex);
746 list_for_each_entry(iter, &nvmem->cells, node) {
747 if (np == iter->np) {
748 cell = iter;
749 break;
750 }
751 }
752 mutex_unlock(&nvmem_mutex);
753
754 return cell;
755}
756
757
758
759
760
761
762
763
764
765
766
767
768
769struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id)
770{
771 struct device_node *cell_np, *nvmem_np;
772 struct nvmem_device *nvmem;
773 struct nvmem_cell *cell;
774 int index = 0;
775
776
777 if (id)
778 index = of_property_match_string(np, "nvmem-cell-names", id);
779
780 cell_np = of_parse_phandle(np, "nvmem-cells", index);
781 if (!cell_np)
782 return ERR_PTR(-ENOENT);
783
784 nvmem_np = of_get_next_parent(cell_np);
785 if (!nvmem_np)
786 return ERR_PTR(-EINVAL);
787
788 nvmem = __nvmem_device_get(nvmem_np, NULL);
789 of_node_put(nvmem_np);
790 if (IS_ERR(nvmem))
791 return ERR_CAST(nvmem);
792
793 cell = nvmem_find_cell_by_node(nvmem, cell_np);
794 if (!cell) {
795 __nvmem_device_put(nvmem);
796 return ERR_PTR(-ENOENT);
797 }
798
799 return cell;
800}
801EXPORT_SYMBOL_GPL(of_nvmem_cell_get);
802#endif
803
804
805
806
807
808
809
810
811
812
813
814
815
816struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id)
817{
818 struct nvmem_cell *cell;
819
820 if (dev->of_node) {
821 cell = of_nvmem_cell_get(dev->of_node, id);
822 if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER)
823 return cell;
824 }
825
826
827 if (!id)
828 return ERR_PTR(-EINVAL);
829
830 return nvmem_cell_get_from_lookup(dev, id);
831}
832EXPORT_SYMBOL_GPL(nvmem_cell_get);
833
834static void devm_nvmem_cell_release(struct device *dev, void *res)
835{
836 nvmem_cell_put(*(struct nvmem_cell **)res);
837}
838
839
840
841
842
843
844
845
846
847
848
849struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id)
850{
851 struct nvmem_cell **ptr, *cell;
852
853 ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL);
854 if (!ptr)
855 return ERR_PTR(-ENOMEM);
856
857 cell = nvmem_cell_get(dev, id);
858 if (!IS_ERR(cell)) {
859 *ptr = cell;
860 devres_add(dev, ptr);
861 } else {
862 devres_free(ptr);
863 }
864
865 return cell;
866}
867EXPORT_SYMBOL_GPL(devm_nvmem_cell_get);
868
869static int devm_nvmem_cell_match(struct device *dev, void *res, void *data)
870{
871 struct nvmem_cell **c = res;
872
873 if (WARN_ON(!c || !*c))
874 return 0;
875
876 return *c == data;
877}
878
879
880
881
882
883
884
885
886void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell)
887{
888 int ret;
889
890 ret = devres_release(dev, devm_nvmem_cell_release,
891 devm_nvmem_cell_match, cell);
892
893 WARN_ON(ret);
894}
895EXPORT_SYMBOL(devm_nvmem_cell_put);
896
897
898
899
900
901
902void nvmem_cell_put(struct nvmem_cell *cell)
903{
904 struct nvmem_device *nvmem = cell->nvmem;
905
906 __nvmem_device_put(nvmem);
907}
908EXPORT_SYMBOL_GPL(nvmem_cell_put);
909
910static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf)
911{
912 u8 *p, *b;
913 int i, extra, bit_offset = cell->bit_offset;
914
915 p = b = buf;
916 if (bit_offset) {
917
918 *b++ >>= bit_offset;
919
920
921 for (i = 1; i < cell->bytes; i++) {
922
923 *p |= *b << (BITS_PER_BYTE - bit_offset);
924
925 p = b;
926 *b++ >>= bit_offset;
927 }
928 } else {
929
930 p += cell->bytes - 1;
931 }
932
933
934 extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE);
935 while (--extra >= 0)
936 *p-- = 0;
937
938
939 *p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0);
940}
941
942static int __nvmem_cell_read(struct nvmem_device *nvmem,
943 struct nvmem_cell *cell,
944 void *buf, size_t *len)
945{
946 int rc;
947
948 rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes);
949
950 if (rc)
951 return rc;
952
953
954 if (cell->bit_offset || cell->nbits)
955 nvmem_shift_read_buffer_in_place(cell, buf);
956
957 if (len)
958 *len = cell->bytes;
959
960 return 0;
961}
962
963
964
965
966
967
968
969
970
971
972
973void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
974{
975 struct nvmem_device *nvmem = cell->nvmem;
976 u8 *buf;
977 int rc;
978
979 if (!nvmem)
980 return ERR_PTR(-EINVAL);
981
982 buf = kzalloc(cell->bytes, GFP_KERNEL);
983 if (!buf)
984 return ERR_PTR(-ENOMEM);
985
986 rc = __nvmem_cell_read(nvmem, cell, buf, len);
987 if (rc) {
988 kfree(buf);
989 return ERR_PTR(rc);
990 }
991
992 return buf;
993}
994EXPORT_SYMBOL_GPL(nvmem_cell_read);
995
996static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell,
997 u8 *_buf, int len)
998{
999 struct nvmem_device *nvmem = cell->nvmem;
1000 int i, rc, nbits, bit_offset = cell->bit_offset;
1001 u8 v, *p, *buf, *b, pbyte, pbits;
1002
1003 nbits = cell->nbits;
1004 buf = kzalloc(cell->bytes, GFP_KERNEL);
1005 if (!buf)
1006 return ERR_PTR(-ENOMEM);
1007
1008 memcpy(buf, _buf, len);
1009 p = b = buf;
1010
1011 if (bit_offset) {
1012 pbyte = *b;
1013 *b <<= bit_offset;
1014
1015
1016 rc = nvmem_reg_read(nvmem, cell->offset, &v, 1);
1017 if (rc)
1018 goto err;
1019 *b++ |= GENMASK(bit_offset - 1, 0) & v;
1020
1021
1022 for (i = 1; i < cell->bytes; i++) {
1023
1024 pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset);
1025 pbyte = *b;
1026 p = b;
1027 *b <<= bit_offset;
1028 *b++ |= pbits;
1029 }
1030 }
1031
1032
1033 if ((nbits + bit_offset) % BITS_PER_BYTE) {
1034
1035 rc = nvmem_reg_read(nvmem,
1036 cell->offset + cell->bytes - 1, &v, 1);
1037 if (rc)
1038 goto err;
1039 *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v;
1040
1041 }
1042
1043 return buf;
1044err:
1045 kfree(buf);
1046 return ERR_PTR(rc);
1047}
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
1059{
1060 struct nvmem_device *nvmem = cell->nvmem;
1061 int rc;
1062
1063 if (!nvmem || nvmem->read_only ||
1064 (cell->bit_offset == 0 && len != cell->bytes))
1065 return -EINVAL;
1066
1067 if (cell->bit_offset || cell->nbits) {
1068 buf = nvmem_cell_prepare_write_buffer(cell, buf, len);
1069 if (IS_ERR(buf))
1070 return PTR_ERR(buf);
1071 }
1072
1073 rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes);
1074
1075
1076 if (cell->bit_offset || cell->nbits)
1077 kfree(buf);
1078
1079 if (rc)
1080 return rc;
1081
1082 return len;
1083}
1084EXPORT_SYMBOL_GPL(nvmem_cell_write);
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val)
1096{
1097 struct nvmem_cell *cell;
1098 void *buf;
1099 size_t len;
1100
1101 cell = nvmem_cell_get(dev, cell_id);
1102 if (IS_ERR(cell))
1103 return PTR_ERR(cell);
1104
1105 buf = nvmem_cell_read(cell, &len);
1106 if (IS_ERR(buf)) {
1107 nvmem_cell_put(cell);
1108 return PTR_ERR(buf);
1109 }
1110 if (len != sizeof(*val)) {
1111 kfree(buf);
1112 nvmem_cell_put(cell);
1113 return -EINVAL;
1114 }
1115 memcpy(val, buf, sizeof(*val));
1116 kfree(buf);
1117 nvmem_cell_put(cell);
1118
1119 return 0;
1120}
1121EXPORT_SYMBOL_GPL(nvmem_cell_read_u16);
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val)
1133{
1134 struct nvmem_cell *cell;
1135 void *buf;
1136 size_t len;
1137
1138 cell = nvmem_cell_get(dev, cell_id);
1139 if (IS_ERR(cell))
1140 return PTR_ERR(cell);
1141
1142 buf = nvmem_cell_read(cell, &len);
1143 if (IS_ERR(buf)) {
1144 nvmem_cell_put(cell);
1145 return PTR_ERR(buf);
1146 }
1147 if (len != sizeof(*val)) {
1148 kfree(buf);
1149 nvmem_cell_put(cell);
1150 return -EINVAL;
1151 }
1152 memcpy(val, buf, sizeof(*val));
1153
1154 kfree(buf);
1155 nvmem_cell_put(cell);
1156 return 0;
1157}
1158EXPORT_SYMBOL_GPL(nvmem_cell_read_u32);
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
1171 struct nvmem_cell_info *info, void *buf)
1172{
1173 struct nvmem_cell cell;
1174 int rc;
1175 ssize_t len;
1176
1177 if (!nvmem)
1178 return -EINVAL;
1179
1180 rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
1181 if (rc)
1182 return rc;
1183
1184 rc = __nvmem_cell_read(nvmem, &cell, buf, &len);
1185 if (rc)
1186 return rc;
1187
1188 return len;
1189}
1190EXPORT_SYMBOL_GPL(nvmem_device_cell_read);
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201int nvmem_device_cell_write(struct nvmem_device *nvmem,
1202 struct nvmem_cell_info *info, void *buf)
1203{
1204 struct nvmem_cell cell;
1205 int rc;
1206
1207 if (!nvmem)
1208 return -EINVAL;
1209
1210 rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
1211 if (rc)
1212 return rc;
1213
1214 return nvmem_cell_write(&cell, buf, cell.bytes);
1215}
1216EXPORT_SYMBOL_GPL(nvmem_device_cell_write);
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229int nvmem_device_read(struct nvmem_device *nvmem,
1230 unsigned int offset,
1231 size_t bytes, void *buf)
1232{
1233 int rc;
1234
1235 if (!nvmem)
1236 return -EINVAL;
1237
1238 rc = nvmem_reg_read(nvmem, offset, buf, bytes);
1239
1240 if (rc)
1241 return rc;
1242
1243 return bytes;
1244}
1245EXPORT_SYMBOL_GPL(nvmem_device_read);
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257int nvmem_device_write(struct nvmem_device *nvmem,
1258 unsigned int offset,
1259 size_t bytes, void *buf)
1260{
1261 int rc;
1262
1263 if (!nvmem)
1264 return -EINVAL;
1265
1266 rc = nvmem_reg_write(nvmem, offset, buf, bytes);
1267
1268 if (rc)
1269 return rc;
1270
1271
1272 return bytes;
1273}
1274EXPORT_SYMBOL_GPL(nvmem_device_write);
1275
1276
1277
1278
1279
1280
1281void nvmem_add_cell_table(struct nvmem_cell_table *table)
1282{
1283 mutex_lock(&nvmem_cell_mutex);
1284 list_add_tail(&table->node, &nvmem_cell_tables);
1285 mutex_unlock(&nvmem_cell_mutex);
1286}
1287EXPORT_SYMBOL_GPL(nvmem_add_cell_table);
1288
1289
1290
1291
1292
1293
1294void nvmem_del_cell_table(struct nvmem_cell_table *table)
1295{
1296 mutex_lock(&nvmem_cell_mutex);
1297 list_del(&table->node);
1298 mutex_unlock(&nvmem_cell_mutex);
1299}
1300EXPORT_SYMBOL_GPL(nvmem_del_cell_table);
1301
1302
1303
1304
1305
1306
1307
1308void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
1309{
1310 int i;
1311
1312 mutex_lock(&nvmem_lookup_mutex);
1313 for (i = 0; i < nentries; i++)
1314 list_add_tail(&entries[i].node, &nvmem_lookup_list);
1315 mutex_unlock(&nvmem_lookup_mutex);
1316}
1317EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups);
1318
1319
1320
1321
1322
1323
1324
1325
1326void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
1327{
1328 int i;
1329
1330 mutex_lock(&nvmem_lookup_mutex);
1331 for (i = 0; i < nentries; i++)
1332 list_del(&entries[i].node);
1333 mutex_unlock(&nvmem_lookup_mutex);
1334}
1335EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups);
1336
1337
1338
1339
1340
1341
1342
1343
1344const char *nvmem_dev_name(struct nvmem_device *nvmem)
1345{
1346 return dev_name(&nvmem->dev);
1347}
1348EXPORT_SYMBOL_GPL(nvmem_dev_name);
1349
1350static int __init nvmem_init(void)
1351{
1352 return bus_register(&nvmem_bus_type);
1353}
1354
1355static void __exit nvmem_exit(void)
1356{
1357 bus_unregister(&nvmem_bus_type);
1358}
1359
1360subsys_initcall(nvmem_init);
1361module_exit(nvmem_exit);
1362
1363MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org");
1364MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
1365MODULE_DESCRIPTION("nvmem Driver Core");
1366MODULE_LICENSE("GPL v2");
1367