1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/bsearch.h>
14#include <linux/device.h>
15#include <linux/export.h>
16#include <linux/slab.h>
17#include <linux/sort.h>
18
19#include "trace.h"
20#include "internal.h"
21
22static const struct regcache_ops *cache_types[] = {
23 ®cache_rbtree_ops,
24#if IS_ENABLED(CONFIG_REGCACHE_COMPRESSED)
25 ®cache_lzo_ops,
26#endif
27 ®cache_flat_ops,
28};
29
30static int regcache_hw_init(struct regmap *map)
31{
32 int i, j;
33 int ret;
34 int count;
35 unsigned int reg, val;
36 void *tmp_buf;
37
38 if (!map->num_reg_defaults_raw)
39 return -EINVAL;
40
41
42 for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++)
43 if (regmap_readable(map, i * map->reg_stride) &&
44 !regmap_volatile(map, i * map->reg_stride))
45 count++;
46
47
48 if (!count) {
49 map->cache_bypass = true;
50 return 0;
51 }
52
53 map->num_reg_defaults = count;
54 map->reg_defaults = kmalloc_array(count, sizeof(struct reg_default),
55 GFP_KERNEL);
56 if (!map->reg_defaults)
57 return -ENOMEM;
58
59 if (!map->reg_defaults_raw) {
60 bool cache_bypass = map->cache_bypass;
61 dev_warn(map->dev, "No cache defaults, reading back from HW\n");
62
63
64 map->cache_bypass = true;
65 tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL);
66 if (!tmp_buf) {
67 ret = -ENOMEM;
68 goto err_free;
69 }
70 ret = regmap_raw_read(map, 0, tmp_buf,
71 map->cache_size_raw);
72 map->cache_bypass = cache_bypass;
73 if (ret == 0) {
74 map->reg_defaults_raw = tmp_buf;
75 map->cache_free = 1;
76 } else {
77 kfree(tmp_buf);
78 }
79 }
80
81
82 for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) {
83 reg = i * map->reg_stride;
84
85 if (!regmap_readable(map, reg))
86 continue;
87
88 if (regmap_volatile(map, reg))
89 continue;
90
91 if (map->reg_defaults_raw) {
92 val = regcache_get_val(map, map->reg_defaults_raw, i);
93 } else {
94 bool cache_bypass = map->cache_bypass;
95
96 map->cache_bypass = true;
97 ret = regmap_read(map, reg, &val);
98 map->cache_bypass = cache_bypass;
99 if (ret != 0) {
100 dev_err(map->dev, "Failed to read %d: %d\n",
101 reg, ret);
102 goto err_free;
103 }
104 }
105
106 map->reg_defaults[j].reg = reg;
107 map->reg_defaults[j].def = val;
108 j++;
109 }
110
111 return 0;
112
113err_free:
114 kfree(map->reg_defaults);
115
116 return ret;
117}
118
119int regcache_init(struct regmap *map, const struct regmap_config *config)
120{
121 int ret;
122 int i;
123 void *tmp_buf;
124
125 if (map->cache_type == REGCACHE_NONE) {
126 if (config->reg_defaults || config->num_reg_defaults_raw)
127 dev_warn(map->dev,
128 "No cache used with register defaults set!\n");
129
130 map->cache_bypass = true;
131 return 0;
132 }
133
134 if (config->reg_defaults && !config->num_reg_defaults) {
135 dev_err(map->dev,
136 "Register defaults are set without the number!\n");
137 return -EINVAL;
138 }
139
140 for (i = 0; i < config->num_reg_defaults; i++)
141 if (config->reg_defaults[i].reg % map->reg_stride)
142 return -EINVAL;
143
144 for (i = 0; i < ARRAY_SIZE(cache_types); i++)
145 if (cache_types[i]->type == map->cache_type)
146 break;
147
148 if (i == ARRAY_SIZE(cache_types)) {
149 dev_err(map->dev, "Could not match compress type: %d\n",
150 map->cache_type);
151 return -EINVAL;
152 }
153
154 map->num_reg_defaults = config->num_reg_defaults;
155 map->num_reg_defaults_raw = config->num_reg_defaults_raw;
156 map->reg_defaults_raw = config->reg_defaults_raw;
157 map->cache_word_size = DIV_ROUND_UP(config->val_bits, 8);
158 map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw;
159
160 map->cache = NULL;
161 map->cache_ops = cache_types[i];
162
163 if (!map->cache_ops->read ||
164 !map->cache_ops->write ||
165 !map->cache_ops->name)
166 return -EINVAL;
167
168
169
170
171
172 if (config->reg_defaults) {
173 tmp_buf = kmemdup(config->reg_defaults, map->num_reg_defaults *
174 sizeof(struct reg_default), GFP_KERNEL);
175 if (!tmp_buf)
176 return -ENOMEM;
177 map->reg_defaults = tmp_buf;
178 } else if (map->num_reg_defaults_raw) {
179
180
181
182
183 ret = regcache_hw_init(map);
184 if (ret < 0)
185 return ret;
186 if (map->cache_bypass)
187 return 0;
188 }
189
190 if (!map->max_register)
191 map->max_register = map->num_reg_defaults_raw;
192
193 if (map->cache_ops->init) {
194 dev_dbg(map->dev, "Initializing %s cache\n",
195 map->cache_ops->name);
196 ret = map->cache_ops->init(map);
197 if (ret)
198 goto err_free;
199 }
200 return 0;
201
202err_free:
203 kfree(map->reg_defaults);
204 if (map->cache_free)
205 kfree(map->reg_defaults_raw);
206
207 return ret;
208}
209
210void regcache_exit(struct regmap *map)
211{
212 if (map->cache_type == REGCACHE_NONE)
213 return;
214
215 BUG_ON(!map->cache_ops);
216
217 kfree(map->reg_defaults);
218 if (map->cache_free)
219 kfree(map->reg_defaults_raw);
220
221 if (map->cache_ops->exit) {
222 dev_dbg(map->dev, "Destroying %s cache\n",
223 map->cache_ops->name);
224 map->cache_ops->exit(map);
225 }
226}
227
228
229
230
231
232
233
234
235
236
237int regcache_read(struct regmap *map,
238 unsigned int reg, unsigned int *value)
239{
240 int ret;
241
242 if (map->cache_type == REGCACHE_NONE)
243 return -ENOSYS;
244
245 BUG_ON(!map->cache_ops);
246
247 if (!regmap_volatile(map, reg)) {
248 ret = map->cache_ops->read(map, reg, value);
249
250 if (ret == 0)
251 trace_regmap_reg_read_cache(map, reg, *value);
252
253 return ret;
254 }
255
256 return -EINVAL;
257}
258
259
260
261
262
263
264
265
266
267
268int regcache_write(struct regmap *map,
269 unsigned int reg, unsigned int value)
270{
271 if (map->cache_type == REGCACHE_NONE)
272 return 0;
273
274 BUG_ON(!map->cache_ops);
275
276 if (!regmap_volatile(map, reg))
277 return map->cache_ops->write(map, reg, value);
278
279 return 0;
280}
281
282static bool regcache_reg_needs_sync(struct regmap *map, unsigned int reg,
283 unsigned int val)
284{
285 int ret;
286
287
288 if (!map->no_sync_defaults)
289 return true;
290
291
292 ret = regcache_lookup_reg(map, reg);
293 if (ret >= 0 && val == map->reg_defaults[ret].def)
294 return false;
295 return true;
296}
297
298static int regcache_default_sync(struct regmap *map, unsigned int min,
299 unsigned int max)
300{
301 unsigned int reg;
302
303 for (reg = min; reg <= max; reg += map->reg_stride) {
304 unsigned int val;
305 int ret;
306
307 if (regmap_volatile(map, reg) ||
308 !regmap_writeable(map, reg))
309 continue;
310
311 ret = regcache_read(map, reg, &val);
312 if (ret)
313 return ret;
314
315 if (!regcache_reg_needs_sync(map, reg, val))
316 continue;
317
318 map->cache_bypass = true;
319 ret = _regmap_write(map, reg, val);
320 map->cache_bypass = false;
321 if (ret) {
322 dev_err(map->dev, "Unable to sync register %#x. %d\n",
323 reg, ret);
324 return ret;
325 }
326 dev_dbg(map->dev, "Synced register %#x, value %#x\n", reg, val);
327 }
328
329 return 0;
330}
331
332
333
334
335
336
337
338
339
340
341
342
343int regcache_sync(struct regmap *map)
344{
345 int ret = 0;
346 unsigned int i;
347 const char *name;
348 bool bypass;
349
350 BUG_ON(!map->cache_ops);
351
352 map->lock(map->lock_arg);
353
354 bypass = map->cache_bypass;
355 dev_dbg(map->dev, "Syncing %s cache\n",
356 map->cache_ops->name);
357 name = map->cache_ops->name;
358 trace_regcache_sync(map, name, "start");
359
360 if (!map->cache_dirty)
361 goto out;
362
363 map->async = true;
364
365
366 map->cache_bypass = true;
367 for (i = 0; i < map->patch_regs; i++) {
368 ret = _regmap_write(map, map->patch[i].reg, map->patch[i].def);
369 if (ret != 0) {
370 dev_err(map->dev, "Failed to write %x = %x: %d\n",
371 map->patch[i].reg, map->patch[i].def, ret);
372 goto out;
373 }
374 }
375 map->cache_bypass = false;
376
377 if (map->cache_ops->sync)
378 ret = map->cache_ops->sync(map, 0, map->max_register);
379 else
380 ret = regcache_default_sync(map, 0, map->max_register);
381
382 if (ret == 0)
383 map->cache_dirty = false;
384
385out:
386
387 map->async = false;
388 map->cache_bypass = bypass;
389 map->no_sync_defaults = false;
390 map->unlock(map->lock_arg);
391
392 regmap_async_complete(map);
393
394 trace_regcache_sync(map, name, "stop");
395
396 return ret;
397}
398EXPORT_SYMBOL_GPL(regcache_sync);
399
400
401
402
403
404
405
406
407
408
409
410
411
412int regcache_sync_region(struct regmap *map, unsigned int min,
413 unsigned int max)
414{
415 int ret = 0;
416 const char *name;
417 bool bypass;
418
419 BUG_ON(!map->cache_ops);
420
421 map->lock(map->lock_arg);
422
423
424 bypass = map->cache_bypass;
425
426 name = map->cache_ops->name;
427 dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max);
428
429 trace_regcache_sync(map, name, "start region");
430
431 if (!map->cache_dirty)
432 goto out;
433
434 map->async = true;
435
436 if (map->cache_ops->sync)
437 ret = map->cache_ops->sync(map, min, max);
438 else
439 ret = regcache_default_sync(map, min, max);
440
441out:
442
443 map->cache_bypass = bypass;
444 map->async = false;
445 map->no_sync_defaults = false;
446 map->unlock(map->lock_arg);
447
448 regmap_async_complete(map);
449
450 trace_regcache_sync(map, name, "stop region");
451
452 return ret;
453}
454EXPORT_SYMBOL_GPL(regcache_sync_region);
455
456
457
458
459
460
461
462
463
464
465
466
467int regcache_drop_region(struct regmap *map, unsigned int min,
468 unsigned int max)
469{
470 int ret = 0;
471
472 if (!map->cache_ops || !map->cache_ops->drop)
473 return -EINVAL;
474
475 map->lock(map->lock_arg);
476
477 trace_regcache_drop_region(map, min, max);
478
479 ret = map->cache_ops->drop(map, min, max);
480
481 map->unlock(map->lock_arg);
482
483 return ret;
484}
485EXPORT_SYMBOL_GPL(regcache_drop_region);
486
487
488
489
490
491
492
493
494
495
496
497
498
499void regcache_cache_only(struct regmap *map, bool enable)
500{
501 map->lock(map->lock_arg);
502 WARN_ON(map->cache_bypass && enable);
503 map->cache_only = enable;
504 trace_regmap_cache_only(map, enable);
505 map->unlock(map->lock_arg);
506}
507EXPORT_SYMBOL_GPL(regcache_cache_only);
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522void regcache_mark_dirty(struct regmap *map)
523{
524 map->lock(map->lock_arg);
525 map->cache_dirty = true;
526 map->no_sync_defaults = true;
527 map->unlock(map->lock_arg);
528}
529EXPORT_SYMBOL_GPL(regcache_mark_dirty);
530
531
532
533
534
535
536
537
538
539
540
541
542void regcache_cache_bypass(struct regmap *map, bool enable)
543{
544 map->lock(map->lock_arg);
545 WARN_ON(map->cache_only && enable);
546 map->cache_bypass = enable;
547 trace_regmap_cache_bypass(map, enable);
548 map->unlock(map->lock_arg);
549}
550EXPORT_SYMBOL_GPL(regcache_cache_bypass);
551
552bool regcache_set_val(struct regmap *map, void *base, unsigned int idx,
553 unsigned int val)
554{
555 if (regcache_get_val(map, base, idx) == val)
556 return true;
557
558
559 if (map->format.format_val) {
560 map->format.format_val(base + (map->cache_word_size * idx),
561 val, 0);
562 return false;
563 }
564
565 switch (map->cache_word_size) {
566 case 1: {
567 u8 *cache = base;
568
569 cache[idx] = val;
570 break;
571 }
572 case 2: {
573 u16 *cache = base;
574
575 cache[idx] = val;
576 break;
577 }
578 case 4: {
579 u32 *cache = base;
580
581 cache[idx] = val;
582 break;
583 }
584#ifdef CONFIG_64BIT
585 case 8: {
586 u64 *cache = base;
587
588 cache[idx] = val;
589 break;
590 }
591#endif
592 default:
593 BUG();
594 }
595 return false;
596}
597
598unsigned int regcache_get_val(struct regmap *map, const void *base,
599 unsigned int idx)
600{
601 if (!base)
602 return -EINVAL;
603
604
605 if (map->format.parse_val)
606 return map->format.parse_val(regcache_get_val_addr(map, base,
607 idx));
608
609 switch (map->cache_word_size) {
610 case 1: {
611 const u8 *cache = base;
612
613 return cache[idx];
614 }
615 case 2: {
616 const u16 *cache = base;
617
618 return cache[idx];
619 }
620 case 4: {
621 const u32 *cache = base;
622
623 return cache[idx];
624 }
625#ifdef CONFIG_64BIT
626 case 8: {
627 const u64 *cache = base;
628
629 return cache[idx];
630 }
631#endif
632 default:
633 BUG();
634 }
635
636 return -1;
637}
638
639static int regcache_default_cmp(const void *a, const void *b)
640{
641 const struct reg_default *_a = a;
642 const struct reg_default *_b = b;
643
644 return _a->reg - _b->reg;
645}
646
647int regcache_lookup_reg(struct regmap *map, unsigned int reg)
648{
649 struct reg_default key;
650 struct reg_default *r;
651
652 key.reg = reg;
653 key.def = 0;
654
655 r = bsearch(&key, map->reg_defaults, map->num_reg_defaults,
656 sizeof(struct reg_default), regcache_default_cmp);
657
658 if (r)
659 return r - map->reg_defaults;
660 else
661 return -ENOENT;
662}
663
664static bool regcache_reg_present(unsigned long *cache_present, unsigned int idx)
665{
666 if (!cache_present)
667 return true;
668
669 return test_bit(idx, cache_present);
670}
671
672static int regcache_sync_block_single(struct regmap *map, void *block,
673 unsigned long *cache_present,
674 unsigned int block_base,
675 unsigned int start, unsigned int end)
676{
677 unsigned int i, regtmp, val;
678 int ret;
679
680 for (i = start; i < end; i++) {
681 regtmp = block_base + (i * map->reg_stride);
682
683 if (!regcache_reg_present(cache_present, i) ||
684 !regmap_writeable(map, regtmp))
685 continue;
686
687 val = regcache_get_val(map, block, i);
688 if (!regcache_reg_needs_sync(map, regtmp, val))
689 continue;
690
691 map->cache_bypass = true;
692
693 ret = _regmap_write(map, regtmp, val);
694
695 map->cache_bypass = false;
696 if (ret != 0) {
697 dev_err(map->dev, "Unable to sync register %#x. %d\n",
698 regtmp, ret);
699 return ret;
700 }
701 dev_dbg(map->dev, "Synced register %#x, value %#x\n",
702 regtmp, val);
703 }
704
705 return 0;
706}
707
708static int regcache_sync_block_raw_flush(struct regmap *map, const void **data,
709 unsigned int base, unsigned int cur)
710{
711 size_t val_bytes = map->format.val_bytes;
712 int ret, count;
713
714 if (*data == NULL)
715 return 0;
716
717 count = (cur - base) / map->reg_stride;
718
719 dev_dbg(map->dev, "Writing %zu bytes for %d registers from 0x%x-0x%x\n",
720 count * val_bytes, count, base, cur - map->reg_stride);
721
722 map->cache_bypass = true;
723
724 ret = _regmap_raw_write(map, base, *data, count * val_bytes);
725 if (ret)
726 dev_err(map->dev, "Unable to sync registers %#x-%#x. %d\n",
727 base, cur - map->reg_stride, ret);
728
729 map->cache_bypass = false;
730
731 *data = NULL;
732
733 return ret;
734}
735
736static int regcache_sync_block_raw(struct regmap *map, void *block,
737 unsigned long *cache_present,
738 unsigned int block_base, unsigned int start,
739 unsigned int end)
740{
741 unsigned int i, val;
742 unsigned int regtmp = 0;
743 unsigned int base = 0;
744 const void *data = NULL;
745 int ret;
746
747 for (i = start; i < end; i++) {
748 regtmp = block_base + (i * map->reg_stride);
749
750 if (!regcache_reg_present(cache_present, i) ||
751 !regmap_writeable(map, regtmp)) {
752 ret = regcache_sync_block_raw_flush(map, &data,
753 base, regtmp);
754 if (ret != 0)
755 return ret;
756 continue;
757 }
758
759 val = regcache_get_val(map, block, i);
760 if (!regcache_reg_needs_sync(map, regtmp, val)) {
761 ret = regcache_sync_block_raw_flush(map, &data,
762 base, regtmp);
763 if (ret != 0)
764 return ret;
765 continue;
766 }
767
768 if (!data) {
769 data = regcache_get_val_addr(map, block, i);
770 base = regtmp;
771 }
772 }
773
774 return regcache_sync_block_raw_flush(map, &data, base, regtmp +
775 map->reg_stride);
776}
777
778int regcache_sync_block(struct regmap *map, void *block,
779 unsigned long *cache_present,
780 unsigned int block_base, unsigned int start,
781 unsigned int end)
782{
783 if (regmap_can_raw_write(map) && !map->use_single_write)
784 return regcache_sync_block_raw(map, block, cache_present,
785 block_base, start, end);
786 else
787 return regcache_sync_block_single(map, block, cache_present,
788 block_base, start, end);
789}
790