1
2
3
4
5
6
7#include <linux/atomic.h>
8#include <linux/device.h>
9#include <linux/err.h>
10#include <linux/export.h>
11#include <linux/kernel.h>
12#include <linux/kref.h>
13#include <linux/module.h>
14#include <linux/of.h>
15#include <linux/reset.h>
16#include <linux/reset-controller.h>
17#include <linux/slab.h>
18
19static DEFINE_MUTEX(reset_list_mutex);
20static LIST_HEAD(reset_controller_list);
21
22static DEFINE_MUTEX(reset_lookup_mutex);
23static LIST_HEAD(reset_lookup_list);
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41struct reset_control {
42 struct reset_controller_dev *rcdev;
43 struct list_head list;
44 unsigned int id;
45 struct kref refcnt;
46 bool acquired;
47 bool shared;
48 bool array;
49 atomic_t deassert_count;
50 atomic_t triggered_count;
51};
52
53
54
55
56
57
58
59struct reset_control_array {
60 struct reset_control base;
61 unsigned int num_rstcs;
62 struct reset_control *rstc[];
63};
64
65static const char *rcdev_name(struct reset_controller_dev *rcdev)
66{
67 if (rcdev->dev)
68 return dev_name(rcdev->dev);
69
70 if (rcdev->of_node)
71 return rcdev->of_node->full_name;
72
73 return NULL;
74}
75
76
77
78
79
80
81
82
83
84
85
86static int of_reset_simple_xlate(struct reset_controller_dev *rcdev,
87 const struct of_phandle_args *reset_spec)
88{
89 if (reset_spec->args[0] >= rcdev->nr_resets)
90 return -EINVAL;
91
92 return reset_spec->args[0];
93}
94
95
96
97
98
99int reset_controller_register(struct reset_controller_dev *rcdev)
100{
101 if (!rcdev->of_xlate) {
102 rcdev->of_reset_n_cells = 1;
103 rcdev->of_xlate = of_reset_simple_xlate;
104 }
105
106 INIT_LIST_HEAD(&rcdev->reset_control_head);
107
108 mutex_lock(&reset_list_mutex);
109 list_add(&rcdev->list, &reset_controller_list);
110 mutex_unlock(&reset_list_mutex);
111
112 return 0;
113}
114EXPORT_SYMBOL_GPL(reset_controller_register);
115
116
117
118
119
120void reset_controller_unregister(struct reset_controller_dev *rcdev)
121{
122 mutex_lock(&reset_list_mutex);
123 list_del(&rcdev->list);
124 mutex_unlock(&reset_list_mutex);
125}
126EXPORT_SYMBOL_GPL(reset_controller_unregister);
127
128static void devm_reset_controller_release(struct device *dev, void *res)
129{
130 reset_controller_unregister(*(struct reset_controller_dev **)res);
131}
132
133
134
135
136
137
138
139
140
141
142int devm_reset_controller_register(struct device *dev,
143 struct reset_controller_dev *rcdev)
144{
145 struct reset_controller_dev **rcdevp;
146 int ret;
147
148 rcdevp = devres_alloc(devm_reset_controller_release, sizeof(*rcdevp),
149 GFP_KERNEL);
150 if (!rcdevp)
151 return -ENOMEM;
152
153 ret = reset_controller_register(rcdev);
154 if (ret) {
155 devres_free(rcdevp);
156 return ret;
157 }
158
159 *rcdevp = rcdev;
160 devres_add(dev, rcdevp);
161
162 return ret;
163}
164EXPORT_SYMBOL_GPL(devm_reset_controller_register);
165
166
167
168
169
170
171void reset_controller_add_lookup(struct reset_control_lookup *lookup,
172 unsigned int num_entries)
173{
174 struct reset_control_lookup *entry;
175 unsigned int i;
176
177 mutex_lock(&reset_lookup_mutex);
178 for (i = 0; i < num_entries; i++) {
179 entry = &lookup[i];
180
181 if (!entry->dev_id || !entry->provider) {
182 pr_warn("%s(): reset lookup entry badly specified, skipping\n",
183 __func__);
184 continue;
185 }
186
187 list_add_tail(&entry->list, &reset_lookup_list);
188 }
189 mutex_unlock(&reset_lookup_mutex);
190}
191EXPORT_SYMBOL_GPL(reset_controller_add_lookup);
192
193static inline struct reset_control_array *
194rstc_to_array(struct reset_control *rstc) {
195 return container_of(rstc, struct reset_control_array, base);
196}
197
198static int reset_control_array_reset(struct reset_control_array *resets)
199{
200 int ret, i;
201
202 for (i = 0; i < resets->num_rstcs; i++) {
203 ret = reset_control_reset(resets->rstc[i]);
204 if (ret)
205 return ret;
206 }
207
208 return 0;
209}
210
211static int reset_control_array_rearm(struct reset_control_array *resets)
212{
213 struct reset_control *rstc;
214 int i;
215
216 for (i = 0; i < resets->num_rstcs; i++) {
217 rstc = resets->rstc[i];
218
219 if (!rstc)
220 continue;
221
222 if (WARN_ON(IS_ERR(rstc)))
223 return -EINVAL;
224
225 if (rstc->shared) {
226 if (WARN_ON(atomic_read(&rstc->deassert_count) != 0))
227 return -EINVAL;
228 } else {
229 if (!rstc->acquired)
230 return -EPERM;
231 }
232 }
233
234 for (i = 0; i < resets->num_rstcs; i++) {
235 rstc = resets->rstc[i];
236
237 if (rstc && rstc->shared)
238 WARN_ON(atomic_dec_return(&rstc->triggered_count) < 0);
239 }
240
241 return 0;
242}
243
244static int reset_control_array_assert(struct reset_control_array *resets)
245{
246 int ret, i;
247
248 for (i = 0; i < resets->num_rstcs; i++) {
249 ret = reset_control_assert(resets->rstc[i]);
250 if (ret)
251 goto err;
252 }
253
254 return 0;
255
256err:
257 while (i--)
258 reset_control_deassert(resets->rstc[i]);
259 return ret;
260}
261
262static int reset_control_array_deassert(struct reset_control_array *resets)
263{
264 int ret, i;
265
266 for (i = 0; i < resets->num_rstcs; i++) {
267 ret = reset_control_deassert(resets->rstc[i]);
268 if (ret)
269 goto err;
270 }
271
272 return 0;
273
274err:
275 while (i--)
276 reset_control_assert(resets->rstc[i]);
277 return ret;
278}
279
280static int reset_control_array_acquire(struct reset_control_array *resets)
281{
282 unsigned int i;
283 int err;
284
285 for (i = 0; i < resets->num_rstcs; i++) {
286 err = reset_control_acquire(resets->rstc[i]);
287 if (err < 0)
288 goto release;
289 }
290
291 return 0;
292
293release:
294 while (i--)
295 reset_control_release(resets->rstc[i]);
296
297 return err;
298}
299
300static void reset_control_array_release(struct reset_control_array *resets)
301{
302 unsigned int i;
303
304 for (i = 0; i < resets->num_rstcs; i++)
305 reset_control_release(resets->rstc[i]);
306}
307
308static inline bool reset_control_is_array(struct reset_control *rstc)
309{
310 return rstc->array;
311}
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326int reset_control_reset(struct reset_control *rstc)
327{
328 int ret;
329
330 if (!rstc)
331 return 0;
332
333 if (WARN_ON(IS_ERR(rstc)))
334 return -EINVAL;
335
336 if (reset_control_is_array(rstc))
337 return reset_control_array_reset(rstc_to_array(rstc));
338
339 if (!rstc->rcdev->ops->reset)
340 return -ENOTSUPP;
341
342 if (rstc->shared) {
343 if (WARN_ON(atomic_read(&rstc->deassert_count) != 0))
344 return -EINVAL;
345
346 if (atomic_inc_return(&rstc->triggered_count) != 1)
347 return 0;
348 } else {
349 if (!rstc->acquired)
350 return -EPERM;
351 }
352
353 ret = rstc->rcdev->ops->reset(rstc->rcdev, rstc->id);
354 if (rstc->shared && ret)
355 atomic_dec(&rstc->triggered_count);
356
357 return ret;
358}
359EXPORT_SYMBOL_GPL(reset_control_reset);
360
361
362
363
364
365
366
367
368
369
370int reset_control_bulk_reset(int num_rstcs,
371 struct reset_control_bulk_data *rstcs)
372{
373 int ret, i;
374
375 for (i = 0; i < num_rstcs; i++) {
376 ret = reset_control_reset(rstcs[i].rstc);
377 if (ret)
378 return ret;
379 }
380
381 return 0;
382}
383EXPORT_SYMBOL_GPL(reset_control_bulk_reset);
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400int reset_control_rearm(struct reset_control *rstc)
401{
402 if (!rstc)
403 return 0;
404
405 if (WARN_ON(IS_ERR(rstc)))
406 return -EINVAL;
407
408 if (reset_control_is_array(rstc))
409 return reset_control_array_rearm(rstc_to_array(rstc));
410
411 if (rstc->shared) {
412 if (WARN_ON(atomic_read(&rstc->deassert_count) != 0))
413 return -EINVAL;
414
415 WARN_ON(atomic_dec_return(&rstc->triggered_count) < 0);
416 } else {
417 if (!rstc->acquired)
418 return -EPERM;
419 }
420
421 return 0;
422}
423EXPORT_SYMBOL_GPL(reset_control_rearm);
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441int reset_control_assert(struct reset_control *rstc)
442{
443 if (!rstc)
444 return 0;
445
446 if (WARN_ON(IS_ERR(rstc)))
447 return -EINVAL;
448
449 if (reset_control_is_array(rstc))
450 return reset_control_array_assert(rstc_to_array(rstc));
451
452 if (rstc->shared) {
453 if (WARN_ON(atomic_read(&rstc->triggered_count) != 0))
454 return -EINVAL;
455
456 if (WARN_ON(atomic_read(&rstc->deassert_count) == 0))
457 return -EINVAL;
458
459 if (atomic_dec_return(&rstc->deassert_count) != 0)
460 return 0;
461
462
463
464
465
466 if (!rstc->rcdev->ops->assert)
467 return 0;
468 } else {
469
470
471
472
473
474 if (!rstc->rcdev->ops->assert)
475 return -ENOTSUPP;
476
477 if (!rstc->acquired) {
478 WARN(1, "reset %s (ID: %u) is not acquired\n",
479 rcdev_name(rstc->rcdev), rstc->id);
480 return -EPERM;
481 }
482 }
483
484 return rstc->rcdev->ops->assert(rstc->rcdev, rstc->id);
485}
486EXPORT_SYMBOL_GPL(reset_control_assert);
487
488
489
490
491
492
493
494
495
496
497
498int reset_control_bulk_assert(int num_rstcs,
499 struct reset_control_bulk_data *rstcs)
500{
501 int ret, i;
502
503 for (i = 0; i < num_rstcs; i++) {
504 ret = reset_control_assert(rstcs[i].rstc);
505 if (ret)
506 goto err;
507 }
508
509 return 0;
510
511err:
512 while (i--)
513 reset_control_deassert(rstcs[i].rstc);
514 return ret;
515}
516EXPORT_SYMBOL_GPL(reset_control_bulk_assert);
517
518
519
520
521
522
523
524
525
526
527
528
529int reset_control_deassert(struct reset_control *rstc)
530{
531 if (!rstc)
532 return 0;
533
534 if (WARN_ON(IS_ERR(rstc)))
535 return -EINVAL;
536
537 if (reset_control_is_array(rstc))
538 return reset_control_array_deassert(rstc_to_array(rstc));
539
540 if (rstc->shared) {
541 if (WARN_ON(atomic_read(&rstc->triggered_count) != 0))
542 return -EINVAL;
543
544 if (atomic_inc_return(&rstc->deassert_count) != 1)
545 return 0;
546 } else {
547 if (!rstc->acquired) {
548 WARN(1, "reset %s (ID: %u) is not acquired\n",
549 rcdev_name(rstc->rcdev), rstc->id);
550 return -EPERM;
551 }
552 }
553
554
555
556
557
558
559
560
561 if (!rstc->rcdev->ops->deassert)
562 return 0;
563
564 return rstc->rcdev->ops->deassert(rstc->rcdev, rstc->id);
565}
566EXPORT_SYMBOL_GPL(reset_control_deassert);
567
568
569
570
571
572
573
574
575
576
577
578int reset_control_bulk_deassert(int num_rstcs,
579 struct reset_control_bulk_data *rstcs)
580{
581 int ret, i;
582
583 for (i = num_rstcs - 1; i >= 0; i--) {
584 ret = reset_control_deassert(rstcs[i].rstc);
585 if (ret)
586 goto err;
587 }
588
589 return 0;
590
591err:
592 while (i < num_rstcs)
593 reset_control_assert(rstcs[i++].rstc);
594 return ret;
595}
596EXPORT_SYMBOL_GPL(reset_control_bulk_deassert);
597
598
599
600
601
602
603
604int reset_control_status(struct reset_control *rstc)
605{
606 if (!rstc)
607 return 0;
608
609 if (WARN_ON(IS_ERR(rstc)) || reset_control_is_array(rstc))
610 return -EINVAL;
611
612 if (rstc->rcdev->ops->status)
613 return rstc->rcdev->ops->status(rstc->rcdev, rstc->id);
614
615 return -ENOTSUPP;
616}
617EXPORT_SYMBOL_GPL(reset_control_status);
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639int reset_control_acquire(struct reset_control *rstc)
640{
641 struct reset_control *rc;
642
643 if (!rstc)
644 return 0;
645
646 if (WARN_ON(IS_ERR(rstc)))
647 return -EINVAL;
648
649 if (reset_control_is_array(rstc))
650 return reset_control_array_acquire(rstc_to_array(rstc));
651
652 mutex_lock(&reset_list_mutex);
653
654 if (rstc->acquired) {
655 mutex_unlock(&reset_list_mutex);
656 return 0;
657 }
658
659 list_for_each_entry(rc, &rstc->rcdev->reset_control_head, list) {
660 if (rstc != rc && rstc->id == rc->id) {
661 if (rc->acquired) {
662 mutex_unlock(&reset_list_mutex);
663 return -EBUSY;
664 }
665 }
666 }
667
668 rstc->acquired = true;
669
670 mutex_unlock(&reset_list_mutex);
671 return 0;
672}
673EXPORT_SYMBOL_GPL(reset_control_acquire);
674
675
676
677
678
679
680
681
682
683
684
685int reset_control_bulk_acquire(int num_rstcs,
686 struct reset_control_bulk_data *rstcs)
687{
688 int ret, i;
689
690 for (i = 0; i < num_rstcs; i++) {
691 ret = reset_control_acquire(rstcs[i].rstc);
692 if (ret)
693 goto err;
694 }
695
696 return 0;
697
698err:
699 while (i--)
700 reset_control_release(rstcs[i].rstc);
701 return ret;
702}
703EXPORT_SYMBOL_GPL(reset_control_bulk_acquire);
704
705
706
707
708
709
710
711
712
713
714
715void reset_control_release(struct reset_control *rstc)
716{
717 if (!rstc || WARN_ON(IS_ERR(rstc)))
718 return;
719
720 if (reset_control_is_array(rstc))
721 reset_control_array_release(rstc_to_array(rstc));
722 else
723 rstc->acquired = false;
724}
725EXPORT_SYMBOL_GPL(reset_control_release);
726
727
728
729
730
731
732
733
734
735
736
737void reset_control_bulk_release(int num_rstcs,
738 struct reset_control_bulk_data *rstcs)
739{
740 int i;
741
742 for (i = 0; i < num_rstcs; i++)
743 reset_control_release(rstcs[i].rstc);
744}
745EXPORT_SYMBOL_GPL(reset_control_bulk_release);
746
747static struct reset_control *__reset_control_get_internal(
748 struct reset_controller_dev *rcdev,
749 unsigned int index, bool shared, bool acquired)
750{
751 struct reset_control *rstc;
752
753 lockdep_assert_held(&reset_list_mutex);
754
755 list_for_each_entry(rstc, &rcdev->reset_control_head, list) {
756 if (rstc->id == index) {
757
758
759
760
761
762 if (!rstc->shared && !shared && !acquired)
763 break;
764
765 if (WARN_ON(!rstc->shared || !shared))
766 return ERR_PTR(-EBUSY);
767
768 kref_get(&rstc->refcnt);
769 return rstc;
770 }
771 }
772
773 rstc = kzalloc(sizeof(*rstc), GFP_KERNEL);
774 if (!rstc)
775 return ERR_PTR(-ENOMEM);
776
777 try_module_get(rcdev->owner);
778
779 rstc->rcdev = rcdev;
780 list_add(&rstc->list, &rcdev->reset_control_head);
781 rstc->id = index;
782 kref_init(&rstc->refcnt);
783 rstc->acquired = acquired;
784 rstc->shared = shared;
785
786 return rstc;
787}
788
789static void __reset_control_release(struct kref *kref)
790{
791 struct reset_control *rstc = container_of(kref, struct reset_control,
792 refcnt);
793
794 lockdep_assert_held(&reset_list_mutex);
795
796 module_put(rstc->rcdev->owner);
797
798 list_del(&rstc->list);
799 kfree(rstc);
800}
801
802static void __reset_control_put_internal(struct reset_control *rstc)
803{
804 lockdep_assert_held(&reset_list_mutex);
805
806 kref_put(&rstc->refcnt, __reset_control_release);
807}
808
809struct reset_control *__of_reset_control_get(struct device_node *node,
810 const char *id, int index, bool shared,
811 bool optional, bool acquired)
812{
813 struct reset_control *rstc;
814 struct reset_controller_dev *r, *rcdev;
815 struct of_phandle_args args;
816 int rstc_id;
817 int ret;
818
819 if (!node)
820 return ERR_PTR(-EINVAL);
821
822 if (id) {
823 index = of_property_match_string(node,
824 "reset-names", id);
825 if (index == -EILSEQ)
826 return ERR_PTR(index);
827 if (index < 0)
828 return optional ? NULL : ERR_PTR(-ENOENT);
829 }
830
831 ret = of_parse_phandle_with_args(node, "resets", "#reset-cells",
832 index, &args);
833 if (ret == -EINVAL)
834 return ERR_PTR(ret);
835 if (ret)
836 return optional ? NULL : ERR_PTR(ret);
837
838 mutex_lock(&reset_list_mutex);
839 rcdev = NULL;
840 list_for_each_entry(r, &reset_controller_list, list) {
841 if (args.np == r->of_node) {
842 rcdev = r;
843 break;
844 }
845 }
846
847 if (!rcdev) {
848 rstc = ERR_PTR(-EPROBE_DEFER);
849 goto out;
850 }
851
852 if (WARN_ON(args.args_count != rcdev->of_reset_n_cells)) {
853 rstc = ERR_PTR(-EINVAL);
854 goto out;
855 }
856
857 rstc_id = rcdev->of_xlate(rcdev, &args);
858 if (rstc_id < 0) {
859 rstc = ERR_PTR(rstc_id);
860 goto out;
861 }
862
863
864 rstc = __reset_control_get_internal(rcdev, rstc_id, shared, acquired);
865
866out:
867 mutex_unlock(&reset_list_mutex);
868 of_node_put(args.np);
869
870 return rstc;
871}
872EXPORT_SYMBOL_GPL(__of_reset_control_get);
873
874static struct reset_controller_dev *
875__reset_controller_by_name(const char *name)
876{
877 struct reset_controller_dev *rcdev;
878
879 lockdep_assert_held(&reset_list_mutex);
880
881 list_for_each_entry(rcdev, &reset_controller_list, list) {
882 if (!rcdev->dev)
883 continue;
884
885 if (!strcmp(name, dev_name(rcdev->dev)))
886 return rcdev;
887 }
888
889 return NULL;
890}
891
892static struct reset_control *
893__reset_control_get_from_lookup(struct device *dev, const char *con_id,
894 bool shared, bool optional, bool acquired)
895{
896 const struct reset_control_lookup *lookup;
897 struct reset_controller_dev *rcdev;
898 const char *dev_id = dev_name(dev);
899 struct reset_control *rstc = NULL;
900
901 mutex_lock(&reset_lookup_mutex);
902
903 list_for_each_entry(lookup, &reset_lookup_list, list) {
904 if (strcmp(lookup->dev_id, dev_id))
905 continue;
906
907 if ((!con_id && !lookup->con_id) ||
908 ((con_id && lookup->con_id) &&
909 !strcmp(con_id, lookup->con_id))) {
910 mutex_lock(&reset_list_mutex);
911 rcdev = __reset_controller_by_name(lookup->provider);
912 if (!rcdev) {
913 mutex_unlock(&reset_list_mutex);
914 mutex_unlock(&reset_lookup_mutex);
915
916 return ERR_PTR(-EPROBE_DEFER);
917 }
918
919 rstc = __reset_control_get_internal(rcdev,
920 lookup->index,
921 shared, acquired);
922 mutex_unlock(&reset_list_mutex);
923 break;
924 }
925 }
926
927 mutex_unlock(&reset_lookup_mutex);
928
929 if (!rstc)
930 return optional ? NULL : ERR_PTR(-ENOENT);
931
932 return rstc;
933}
934
935struct reset_control *__reset_control_get(struct device *dev, const char *id,
936 int index, bool shared, bool optional,
937 bool acquired)
938{
939 if (WARN_ON(shared && acquired))
940 return ERR_PTR(-EINVAL);
941
942 if (dev->of_node)
943 return __of_reset_control_get(dev->of_node, id, index, shared,
944 optional, acquired);
945
946 return __reset_control_get_from_lookup(dev, id, shared, optional,
947 acquired);
948}
949EXPORT_SYMBOL_GPL(__reset_control_get);
950
951int __reset_control_bulk_get(struct device *dev, int num_rstcs,
952 struct reset_control_bulk_data *rstcs,
953 bool shared, bool optional, bool acquired)
954{
955 int ret, i;
956
957 for (i = 0; i < num_rstcs; i++) {
958 rstcs[i].rstc = __reset_control_get(dev, rstcs[i].id, 0,
959 shared, optional, acquired);
960 if (IS_ERR(rstcs[i].rstc)) {
961 ret = PTR_ERR(rstcs[i].rstc);
962 goto err;
963 }
964 }
965
966 return 0;
967
968err:
969 mutex_lock(&reset_list_mutex);
970 while (i--)
971 __reset_control_put_internal(rstcs[i].rstc);
972 mutex_unlock(&reset_list_mutex);
973 return ret;
974}
975EXPORT_SYMBOL_GPL(__reset_control_bulk_get);
976
977static void reset_control_array_put(struct reset_control_array *resets)
978{
979 int i;
980
981 mutex_lock(&reset_list_mutex);
982 for (i = 0; i < resets->num_rstcs; i++)
983 __reset_control_put_internal(resets->rstc[i]);
984 mutex_unlock(&reset_list_mutex);
985 kfree(resets);
986}
987
988
989
990
991
992void reset_control_put(struct reset_control *rstc)
993{
994 if (IS_ERR_OR_NULL(rstc))
995 return;
996
997 if (reset_control_is_array(rstc)) {
998 reset_control_array_put(rstc_to_array(rstc));
999 return;
1000 }
1001
1002 mutex_lock(&reset_list_mutex);
1003 __reset_control_put_internal(rstc);
1004 mutex_unlock(&reset_list_mutex);
1005}
1006EXPORT_SYMBOL_GPL(reset_control_put);
1007
1008
1009
1010
1011
1012
1013void reset_control_bulk_put(int num_rstcs, struct reset_control_bulk_data *rstcs)
1014{
1015 mutex_lock(&reset_list_mutex);
1016 while (num_rstcs--) {
1017 if (IS_ERR_OR_NULL(rstcs[num_rstcs].rstc))
1018 continue;
1019 __reset_control_put_internal(rstcs[num_rstcs].rstc);
1020 }
1021 mutex_unlock(&reset_list_mutex);
1022}
1023EXPORT_SYMBOL_GPL(reset_control_bulk_put);
1024
1025static void devm_reset_control_release(struct device *dev, void *res)
1026{
1027 reset_control_put(*(struct reset_control **)res);
1028}
1029
1030struct reset_control *__devm_reset_control_get(struct device *dev,
1031 const char *id, int index, bool shared,
1032 bool optional, bool acquired)
1033{
1034 struct reset_control **ptr, *rstc;
1035
1036 ptr = devres_alloc(devm_reset_control_release, sizeof(*ptr),
1037 GFP_KERNEL);
1038 if (!ptr)
1039 return ERR_PTR(-ENOMEM);
1040
1041 rstc = __reset_control_get(dev, id, index, shared, optional, acquired);
1042 if (IS_ERR_OR_NULL(rstc)) {
1043 devres_free(ptr);
1044 return rstc;
1045 }
1046
1047 *ptr = rstc;
1048 devres_add(dev, ptr);
1049
1050 return rstc;
1051}
1052EXPORT_SYMBOL_GPL(__devm_reset_control_get);
1053
1054struct reset_control_bulk_devres {
1055 int num_rstcs;
1056 struct reset_control_bulk_data *rstcs;
1057};
1058
1059static void devm_reset_control_bulk_release(struct device *dev, void *res)
1060{
1061 struct reset_control_bulk_devres *devres = res;
1062
1063 reset_control_bulk_put(devres->num_rstcs, devres->rstcs);
1064}
1065
1066int __devm_reset_control_bulk_get(struct device *dev, int num_rstcs,
1067 struct reset_control_bulk_data *rstcs,
1068 bool shared, bool optional, bool acquired)
1069{
1070 struct reset_control_bulk_devres *ptr;
1071 int ret;
1072
1073 ptr = devres_alloc(devm_reset_control_bulk_release, sizeof(*ptr),
1074 GFP_KERNEL);
1075 if (!ptr)
1076 return -ENOMEM;
1077
1078 ret = __reset_control_bulk_get(dev, num_rstcs, rstcs, shared, optional, acquired);
1079 if (ret < 0) {
1080 devres_free(ptr);
1081 return ret;
1082 }
1083
1084 ptr->num_rstcs = num_rstcs;
1085 ptr->rstcs = rstcs;
1086 devres_add(dev, ptr);
1087
1088 return 0;
1089}
1090EXPORT_SYMBOL_GPL(__devm_reset_control_bulk_get);
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102int __device_reset(struct device *dev, bool optional)
1103{
1104 struct reset_control *rstc;
1105 int ret;
1106
1107 rstc = __reset_control_get(dev, NULL, 0, 0, optional, true);
1108 if (IS_ERR(rstc))
1109 return PTR_ERR(rstc);
1110
1111 ret = reset_control_reset(rstc);
1112
1113 reset_control_put(rstc);
1114
1115 return ret;
1116}
1117EXPORT_SYMBOL_GPL(__device_reset);
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131static int of_reset_control_get_count(struct device_node *node)
1132{
1133 int count;
1134
1135 if (!node)
1136 return -EINVAL;
1137
1138 count = of_count_phandle_with_args(node, "resets", "#reset-cells");
1139 if (count == 0)
1140 count = -ENOENT;
1141
1142 return count;
1143}
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157struct reset_control *
1158of_reset_control_array_get(struct device_node *np, bool shared, bool optional,
1159 bool acquired)
1160{
1161 struct reset_control_array *resets;
1162 struct reset_control *rstc;
1163 int num, i;
1164
1165 num = of_reset_control_get_count(np);
1166 if (num < 0)
1167 return optional ? NULL : ERR_PTR(num);
1168
1169 resets = kzalloc(struct_size(resets, rstc, num), GFP_KERNEL);
1170 if (!resets)
1171 return ERR_PTR(-ENOMEM);
1172
1173 for (i = 0; i < num; i++) {
1174 rstc = __of_reset_control_get(np, NULL, i, shared, optional,
1175 acquired);
1176 if (IS_ERR(rstc))
1177 goto err_rst;
1178 resets->rstc[i] = rstc;
1179 }
1180 resets->num_rstcs = num;
1181 resets->base.array = true;
1182
1183 return &resets->base;
1184
1185err_rst:
1186 mutex_lock(&reset_list_mutex);
1187 while (--i >= 0)
1188 __reset_control_put_internal(resets->rstc[i]);
1189 mutex_unlock(&reset_list_mutex);
1190
1191 kfree(resets);
1192
1193 return rstc;
1194}
1195EXPORT_SYMBOL_GPL(of_reset_control_array_get);
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210struct reset_control *
1211devm_reset_control_array_get(struct device *dev, bool shared, bool optional)
1212{
1213 struct reset_control **ptr, *rstc;
1214
1215 ptr = devres_alloc(devm_reset_control_release, sizeof(*ptr),
1216 GFP_KERNEL);
1217 if (!ptr)
1218 return ERR_PTR(-ENOMEM);
1219
1220 rstc = of_reset_control_array_get(dev->of_node, shared, optional, true);
1221 if (IS_ERR_OR_NULL(rstc)) {
1222 devres_free(ptr);
1223 return rstc;
1224 }
1225
1226 *ptr = rstc;
1227 devres_add(dev, ptr);
1228
1229 return rstc;
1230}
1231EXPORT_SYMBOL_GPL(devm_reset_control_array_get);
1232
1233static int reset_control_get_count_from_lookup(struct device *dev)
1234{
1235 const struct reset_control_lookup *lookup;
1236 const char *dev_id;
1237 int count = 0;
1238
1239 if (!dev)
1240 return -EINVAL;
1241
1242 dev_id = dev_name(dev);
1243 mutex_lock(&reset_lookup_mutex);
1244
1245 list_for_each_entry(lookup, &reset_lookup_list, list) {
1246 if (!strcmp(lookup->dev_id, dev_id))
1247 count++;
1248 }
1249
1250 mutex_unlock(&reset_lookup_mutex);
1251
1252 if (count == 0)
1253 count = -ENOENT;
1254
1255 return count;
1256}
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266int reset_control_get_count(struct device *dev)
1267{
1268 if (dev->of_node)
1269 return of_reset_control_get_count(dev->of_node);
1270
1271 return reset_control_get_count_from_lookup(dev);
1272}
1273EXPORT_SYMBOL_GPL(reset_control_get_count);
1274