1
2
3
4
5
6
7#include <linux/atomic.h>
8#include <linux/device.h>
9#include <linux/err.h>
10#include <linux/export.h>
11#include <linux/kernel.h>
12#include <linux/kref.h>
13#include <linux/module.h>
14#include <linux/of.h>
15#include <linux/reset.h>
16#include <linux/reset-controller.h>
17#include <linux/slab.h>
18
19static DEFINE_MUTEX(reset_list_mutex);
20static LIST_HEAD(reset_controller_list);
21
22static DEFINE_MUTEX(reset_lookup_mutex);
23static LIST_HEAD(reset_lookup_list);
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41struct reset_control {
42 struct reset_controller_dev *rcdev;
43 struct list_head list;
44 unsigned int id;
45 struct kref refcnt;
46 bool acquired;
47 bool shared;
48 bool array;
49 atomic_t deassert_count;
50 atomic_t triggered_count;
51};
52
53
54
55
56
57
58
59struct reset_control_array {
60 struct reset_control base;
61 unsigned int num_rstcs;
62 struct reset_control *rstc[];
63};
64
65static const char *rcdev_name(struct reset_controller_dev *rcdev)
66{
67 if (rcdev->dev)
68 return dev_name(rcdev->dev);
69
70 if (rcdev->of_node)
71 return rcdev->of_node->full_name;
72
73 return NULL;
74}
75
76
77
78
79
80
81
82
83
84
85
86static int of_reset_simple_xlate(struct reset_controller_dev *rcdev,
87 const struct of_phandle_args *reset_spec)
88{
89 if (reset_spec->args[0] >= rcdev->nr_resets)
90 return -EINVAL;
91
92 return reset_spec->args[0];
93}
94
95
96
97
98
99int reset_controller_register(struct reset_controller_dev *rcdev)
100{
101 if (!rcdev->of_xlate) {
102 rcdev->of_reset_n_cells = 1;
103 rcdev->of_xlate = of_reset_simple_xlate;
104 }
105
106 INIT_LIST_HEAD(&rcdev->reset_control_head);
107
108 mutex_lock(&reset_list_mutex);
109 list_add(&rcdev->list, &reset_controller_list);
110 mutex_unlock(&reset_list_mutex);
111
112 return 0;
113}
114EXPORT_SYMBOL_GPL(reset_controller_register);
115
116
117
118
119
120void reset_controller_unregister(struct reset_controller_dev *rcdev)
121{
122 mutex_lock(&reset_list_mutex);
123 list_del(&rcdev->list);
124 mutex_unlock(&reset_list_mutex);
125}
126EXPORT_SYMBOL_GPL(reset_controller_unregister);
127
128static void devm_reset_controller_release(struct device *dev, void *res)
129{
130 reset_controller_unregister(*(struct reset_controller_dev **)res);
131}
132
133
134
135
136
137
138
139
140
141
142int devm_reset_controller_register(struct device *dev,
143 struct reset_controller_dev *rcdev)
144{
145 struct reset_controller_dev **rcdevp;
146 int ret;
147
148 rcdevp = devres_alloc(devm_reset_controller_release, sizeof(*rcdevp),
149 GFP_KERNEL);
150 if (!rcdevp)
151 return -ENOMEM;
152
153 ret = reset_controller_register(rcdev);
154 if (ret) {
155 devres_free(rcdevp);
156 return ret;
157 }
158
159 *rcdevp = rcdev;
160 devres_add(dev, rcdevp);
161
162 return ret;
163}
164EXPORT_SYMBOL_GPL(devm_reset_controller_register);
165
166
167
168
169
170
171void reset_controller_add_lookup(struct reset_control_lookup *lookup,
172 unsigned int num_entries)
173{
174 struct reset_control_lookup *entry;
175 unsigned int i;
176
177 mutex_lock(&reset_lookup_mutex);
178 for (i = 0; i < num_entries; i++) {
179 entry = &lookup[i];
180
181 if (!entry->dev_id || !entry->provider) {
182 pr_warn("%s(): reset lookup entry badly specified, skipping\n",
183 __func__);
184 continue;
185 }
186
187 list_add_tail(&entry->list, &reset_lookup_list);
188 }
189 mutex_unlock(&reset_lookup_mutex);
190}
191EXPORT_SYMBOL_GPL(reset_controller_add_lookup);
192
193static inline struct reset_control_array *
194rstc_to_array(struct reset_control *rstc) {
195 return container_of(rstc, struct reset_control_array, base);
196}
197
198static int reset_control_array_reset(struct reset_control_array *resets)
199{
200 int ret, i;
201
202 for (i = 0; i < resets->num_rstcs; i++) {
203 ret = reset_control_reset(resets->rstc[i]);
204 if (ret)
205 return ret;
206 }
207
208 return 0;
209}
210
211static int reset_control_array_rearm(struct reset_control_array *resets)
212{
213 struct reset_control *rstc;
214 int i;
215
216 for (i = 0; i < resets->num_rstcs; i++) {
217 rstc = resets->rstc[i];
218
219 if (!rstc)
220 continue;
221
222 if (WARN_ON(IS_ERR(rstc)))
223 return -EINVAL;
224
225 if (rstc->shared) {
226 if (WARN_ON(atomic_read(&rstc->deassert_count) != 0))
227 return -EINVAL;
228 } else {
229 if (!rstc->acquired)
230 return -EPERM;
231 }
232 }
233
234 for (i = 0; i < resets->num_rstcs; i++) {
235 rstc = resets->rstc[i];
236
237 if (rstc && rstc->shared)
238 WARN_ON(atomic_dec_return(&rstc->triggered_count) < 0);
239 }
240
241 return 0;
242}
243
244static int reset_control_array_assert(struct reset_control_array *resets)
245{
246 int ret, i;
247
248 for (i = 0; i < resets->num_rstcs; i++) {
249 ret = reset_control_assert(resets->rstc[i]);
250 if (ret)
251 goto err;
252 }
253
254 return 0;
255
256err:
257 while (i--)
258 reset_control_deassert(resets->rstc[i]);
259 return ret;
260}
261
262static int reset_control_array_deassert(struct reset_control_array *resets)
263{
264 int ret, i;
265
266 for (i = 0; i < resets->num_rstcs; i++) {
267 ret = reset_control_deassert(resets->rstc[i]);
268 if (ret)
269 goto err;
270 }
271
272 return 0;
273
274err:
275 while (i--)
276 reset_control_assert(resets->rstc[i]);
277 return ret;
278}
279
280static int reset_control_array_acquire(struct reset_control_array *resets)
281{
282 unsigned int i;
283 int err;
284
285 for (i = 0; i < resets->num_rstcs; i++) {
286 err = reset_control_acquire(resets->rstc[i]);
287 if (err < 0)
288 goto release;
289 }
290
291 return 0;
292
293release:
294 while (i--)
295 reset_control_release(resets->rstc[i]);
296
297 return err;
298}
299
300static void reset_control_array_release(struct reset_control_array *resets)
301{
302 unsigned int i;
303
304 for (i = 0; i < resets->num_rstcs; i++)
305 reset_control_release(resets->rstc[i]);
306}
307
308static inline bool reset_control_is_array(struct reset_control *rstc)
309{
310 return rstc->array;
311}
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326int reset_control_reset(struct reset_control *rstc)
327{
328 int ret;
329
330 if (!rstc)
331 return 0;
332
333 if (WARN_ON(IS_ERR(rstc)))
334 return -EINVAL;
335
336 if (reset_control_is_array(rstc))
337 return reset_control_array_reset(rstc_to_array(rstc));
338
339 if (!rstc->rcdev->ops->reset)
340 return -ENOTSUPP;
341
342 if (rstc->shared) {
343 if (WARN_ON(atomic_read(&rstc->deassert_count) != 0))
344 return -EINVAL;
345
346 if (atomic_inc_return(&rstc->triggered_count) != 1)
347 return 0;
348 } else {
349 if (!rstc->acquired)
350 return -EPERM;
351 }
352
353 ret = rstc->rcdev->ops->reset(rstc->rcdev, rstc->id);
354 if (rstc->shared && ret)
355 atomic_dec(&rstc->triggered_count);
356
357 return ret;
358}
359EXPORT_SYMBOL_GPL(reset_control_reset);
360
361
362
363
364
365
366
367
368
369
370int reset_control_bulk_reset(int num_rstcs,
371 struct reset_control_bulk_data *rstcs)
372{
373 int ret, i;
374
375 for (i = 0; i < num_rstcs; i++) {
376 ret = reset_control_reset(rstcs[i].rstc);
377 if (ret)
378 return ret;
379 }
380
381 return 0;
382}
383EXPORT_SYMBOL_GPL(reset_control_bulk_reset);
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400int reset_control_rearm(struct reset_control *rstc)
401{
402 if (!rstc)
403 return 0;
404
405 if (WARN_ON(IS_ERR(rstc)))
406 return -EINVAL;
407
408 if (reset_control_is_array(rstc))
409 return reset_control_array_rearm(rstc_to_array(rstc));
410
411 if (rstc->shared) {
412 if (WARN_ON(atomic_read(&rstc->deassert_count) != 0))
413 return -EINVAL;
414
415 WARN_ON(atomic_dec_return(&rstc->triggered_count) < 0);
416 } else {
417 if (!rstc->acquired)
418 return -EPERM;
419 }
420
421 return 0;
422}
423EXPORT_SYMBOL_GPL(reset_control_rearm);
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441int reset_control_assert(struct reset_control *rstc)
442{
443 if (!rstc)
444 return 0;
445
446 if (WARN_ON(IS_ERR(rstc)))
447 return -EINVAL;
448
449 if (reset_control_is_array(rstc))
450 return reset_control_array_assert(rstc_to_array(rstc));
451
452 if (rstc->shared) {
453 if (WARN_ON(atomic_read(&rstc->triggered_count) != 0))
454 return -EINVAL;
455
456 if (WARN_ON(atomic_read(&rstc->deassert_count) == 0))
457 return -EINVAL;
458
459 if (atomic_dec_return(&rstc->deassert_count) != 0)
460 return 0;
461
462
463
464
465
466 if (!rstc->rcdev->ops->assert)
467 return 0;
468 } else {
469
470
471
472
473
474 if (!rstc->rcdev->ops->assert)
475 return -ENOTSUPP;
476
477 if (!rstc->acquired) {
478 WARN(1, "reset %s (ID: %u) is not acquired\n",
479 rcdev_name(rstc->rcdev), rstc->id);
480 return -EPERM;
481 }
482 }
483
484 return rstc->rcdev->ops->assert(rstc->rcdev, rstc->id);
485}
486EXPORT_SYMBOL_GPL(reset_control_assert);
487
488
489
490
491
492
493
494
495
496
497
498int reset_control_bulk_assert(int num_rstcs,
499 struct reset_control_bulk_data *rstcs)
500{
501 int ret, i;
502
503 for (i = 0; i < num_rstcs; i++) {
504 ret = reset_control_assert(rstcs[i].rstc);
505 if (ret)
506 goto err;
507 }
508
509 return 0;
510
511err:
512 while (i--)
513 reset_control_deassert(rstcs[i].rstc);
514 return ret;
515}
516EXPORT_SYMBOL_GPL(reset_control_bulk_assert);
517
518
519
520
521
522
523
524
525
526
527
528
529int reset_control_deassert(struct reset_control *rstc)
530{
531 if (!rstc)
532 return 0;
533
534 if (WARN_ON(IS_ERR(rstc)))
535 return -EINVAL;
536
537 if (reset_control_is_array(rstc))
538 return reset_control_array_deassert(rstc_to_array(rstc));
539
540 if (rstc->shared) {
541 if (WARN_ON(atomic_read(&rstc->triggered_count) != 0))
542 return -EINVAL;
543
544 if (atomic_inc_return(&rstc->deassert_count) != 1)
545 return 0;
546 } else {
547 if (!rstc->acquired) {
548 WARN(1, "reset %s (ID: %u) is not acquired\n",
549 rcdev_name(rstc->rcdev), rstc->id);
550 return -EPERM;
551 }
552 }
553
554
555
556
557
558
559
560
561 if (!rstc->rcdev->ops->deassert)
562 return 0;
563
564 return rstc->rcdev->ops->deassert(rstc->rcdev, rstc->id);
565}
566EXPORT_SYMBOL_GPL(reset_control_deassert);
567
568
569
570
571
572
573
574
575
576
577
578int reset_control_bulk_deassert(int num_rstcs,
579 struct reset_control_bulk_data *rstcs)
580{
581 int ret, i;
582
583 for (i = num_rstcs - 1; i >= 0; i--) {
584 ret = reset_control_deassert(rstcs[i].rstc);
585 if (ret)
586 goto err;
587 }
588
589 return 0;
590
591err:
592 while (i < num_rstcs)
593 reset_control_assert(rstcs[i++].rstc);
594 return ret;
595}
596EXPORT_SYMBOL_GPL(reset_control_bulk_deassert);
597
598
599
600
601
602
603
604int reset_control_status(struct reset_control *rstc)
605{
606 if (!rstc)
607 return 0;
608
609 if (WARN_ON(IS_ERR(rstc)) || reset_control_is_array(rstc))
610 return -EINVAL;
611
612 if (rstc->rcdev->ops->status)
613 return rstc->rcdev->ops->status(rstc->rcdev, rstc->id);
614
615 return -ENOTSUPP;
616}
617EXPORT_SYMBOL_GPL(reset_control_status);
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639int reset_control_acquire(struct reset_control *rstc)
640{
641 struct reset_control *rc;
642
643 if (!rstc)
644 return 0;
645
646 if (WARN_ON(IS_ERR(rstc)))
647 return -EINVAL;
648
649 if (reset_control_is_array(rstc))
650 return reset_control_array_acquire(rstc_to_array(rstc));
651
652 mutex_lock(&reset_list_mutex);
653
654 if (rstc->acquired) {
655 mutex_unlock(&reset_list_mutex);
656 return 0;
657 }
658
659 list_for_each_entry(rc, &rstc->rcdev->reset_control_head, list) {
660 if (rstc != rc && rstc->id == rc->id) {
661 if (rc->acquired) {
662 mutex_unlock(&reset_list_mutex);
663 return -EBUSY;
664 }
665 }
666 }
667
668 rstc->acquired = true;
669
670 mutex_unlock(&reset_list_mutex);
671 return 0;
672}
673EXPORT_SYMBOL_GPL(reset_control_acquire);
674
675
676
677
678
679
680
681
682
683
684
685int reset_control_bulk_acquire(int num_rstcs,
686 struct reset_control_bulk_data *rstcs)
687{
688 int ret, i;
689
690 for (i = 0; i < num_rstcs; i++) {
691 ret = reset_control_acquire(rstcs[i].rstc);
692 if (ret)
693 goto err;
694 }
695
696 return 0;
697
698err:
699 while (i--)
700 reset_control_release(rstcs[i].rstc);
701 return ret;
702}
703EXPORT_SYMBOL_GPL(reset_control_bulk_acquire);
704
705
706
707
708
709
710
711
712
713
714
715void reset_control_release(struct reset_control *rstc)
716{
717 if (!rstc || WARN_ON(IS_ERR(rstc)))
718 return;
719
720 if (reset_control_is_array(rstc))
721 reset_control_array_release(rstc_to_array(rstc));
722 else
723 rstc->acquired = false;
724}
725EXPORT_SYMBOL_GPL(reset_control_release);
726
727
728
729
730
731
732
733
734
735
736
737void reset_control_bulk_release(int num_rstcs,
738 struct reset_control_bulk_data *rstcs)
739{
740 int i;
741
742 for (i = 0; i < num_rstcs; i++)
743 reset_control_release(rstcs[i].rstc);
744}
745EXPORT_SYMBOL_GPL(reset_control_bulk_release);
746
747static struct reset_control *
748__reset_control_get_internal(struct reset_controller_dev *rcdev,
749 unsigned int index, bool shared, bool acquired)
750{
751 struct reset_control *rstc;
752
753 lockdep_assert_held(&reset_list_mutex);
754
755 list_for_each_entry(rstc, &rcdev->reset_control_head, list) {
756 if (rstc->id == index) {
757
758
759
760
761
762 if (!rstc->shared && !shared && !acquired)
763 break;
764
765 if (WARN_ON(!rstc->shared || !shared))
766 return ERR_PTR(-EBUSY);
767
768 kref_get(&rstc->refcnt);
769 return rstc;
770 }
771 }
772
773 rstc = kzalloc(sizeof(*rstc), GFP_KERNEL);
774 if (!rstc)
775 return ERR_PTR(-ENOMEM);
776
777 if (!try_module_get(rcdev->owner)) {
778 kfree(rstc);
779 return ERR_PTR(-ENODEV);
780 }
781
782 rstc->rcdev = rcdev;
783 list_add(&rstc->list, &rcdev->reset_control_head);
784 rstc->id = index;
785 kref_init(&rstc->refcnt);
786 rstc->acquired = acquired;
787 rstc->shared = shared;
788
789 return rstc;
790}
791
792static void __reset_control_release(struct kref *kref)
793{
794 struct reset_control *rstc = container_of(kref, struct reset_control,
795 refcnt);
796
797 lockdep_assert_held(&reset_list_mutex);
798
799 module_put(rstc->rcdev->owner);
800
801 list_del(&rstc->list);
802 kfree(rstc);
803}
804
805static void __reset_control_put_internal(struct reset_control *rstc)
806{
807 lockdep_assert_held(&reset_list_mutex);
808
809 kref_put(&rstc->refcnt, __reset_control_release);
810}
811
812struct reset_control *
813__of_reset_control_get(struct device_node *node, const char *id, int index,
814 bool shared, bool optional, bool acquired)
815{
816 struct reset_control *rstc;
817 struct reset_controller_dev *r, *rcdev;
818 struct of_phandle_args args;
819 int rstc_id;
820 int ret;
821
822 if (!node)
823 return ERR_PTR(-EINVAL);
824
825 if (id) {
826 index = of_property_match_string(node,
827 "reset-names", id);
828 if (index == -EILSEQ)
829 return ERR_PTR(index);
830 if (index < 0)
831 return optional ? NULL : ERR_PTR(-ENOENT);
832 }
833
834 ret = of_parse_phandle_with_args(node, "resets", "#reset-cells",
835 index, &args);
836 if (ret == -EINVAL)
837 return ERR_PTR(ret);
838 if (ret)
839 return optional ? NULL : ERR_PTR(ret);
840
841 mutex_lock(&reset_list_mutex);
842 rcdev = NULL;
843 list_for_each_entry(r, &reset_controller_list, list) {
844 if (args.np == r->of_node) {
845 rcdev = r;
846 break;
847 }
848 }
849
850 if (!rcdev) {
851 rstc = ERR_PTR(-EPROBE_DEFER);
852 goto out;
853 }
854
855 if (WARN_ON(args.args_count != rcdev->of_reset_n_cells)) {
856 rstc = ERR_PTR(-EINVAL);
857 goto out;
858 }
859
860 rstc_id = rcdev->of_xlate(rcdev, &args);
861 if (rstc_id < 0) {
862 rstc = ERR_PTR(rstc_id);
863 goto out;
864 }
865
866
867 rstc = __reset_control_get_internal(rcdev, rstc_id, shared, acquired);
868
869out:
870 mutex_unlock(&reset_list_mutex);
871 of_node_put(args.np);
872
873 return rstc;
874}
875EXPORT_SYMBOL_GPL(__of_reset_control_get);
876
877static struct reset_controller_dev *
878__reset_controller_by_name(const char *name)
879{
880 struct reset_controller_dev *rcdev;
881
882 lockdep_assert_held(&reset_list_mutex);
883
884 list_for_each_entry(rcdev, &reset_controller_list, list) {
885 if (!rcdev->dev)
886 continue;
887
888 if (!strcmp(name, dev_name(rcdev->dev)))
889 return rcdev;
890 }
891
892 return NULL;
893}
894
895static struct reset_control *
896__reset_control_get_from_lookup(struct device *dev, const char *con_id,
897 bool shared, bool optional, bool acquired)
898{
899 const struct reset_control_lookup *lookup;
900 struct reset_controller_dev *rcdev;
901 const char *dev_id = dev_name(dev);
902 struct reset_control *rstc = NULL;
903
904 mutex_lock(&reset_lookup_mutex);
905
906 list_for_each_entry(lookup, &reset_lookup_list, list) {
907 if (strcmp(lookup->dev_id, dev_id))
908 continue;
909
910 if ((!con_id && !lookup->con_id) ||
911 ((con_id && lookup->con_id) &&
912 !strcmp(con_id, lookup->con_id))) {
913 mutex_lock(&reset_list_mutex);
914 rcdev = __reset_controller_by_name(lookup->provider);
915 if (!rcdev) {
916 mutex_unlock(&reset_list_mutex);
917 mutex_unlock(&reset_lookup_mutex);
918
919 return ERR_PTR(-EPROBE_DEFER);
920 }
921
922 rstc = __reset_control_get_internal(rcdev,
923 lookup->index,
924 shared, acquired);
925 mutex_unlock(&reset_list_mutex);
926 break;
927 }
928 }
929
930 mutex_unlock(&reset_lookup_mutex);
931
932 if (!rstc)
933 return optional ? NULL : ERR_PTR(-ENOENT);
934
935 return rstc;
936}
937
938struct reset_control *__reset_control_get(struct device *dev, const char *id,
939 int index, bool shared, bool optional,
940 bool acquired)
941{
942 if (WARN_ON(shared && acquired))
943 return ERR_PTR(-EINVAL);
944
945 if (dev->of_node)
946 return __of_reset_control_get(dev->of_node, id, index, shared,
947 optional, acquired);
948
949 return __reset_control_get_from_lookup(dev, id, shared, optional,
950 acquired);
951}
952EXPORT_SYMBOL_GPL(__reset_control_get);
953
954int __reset_control_bulk_get(struct device *dev, int num_rstcs,
955 struct reset_control_bulk_data *rstcs,
956 bool shared, bool optional, bool acquired)
957{
958 int ret, i;
959
960 for (i = 0; i < num_rstcs; i++) {
961 rstcs[i].rstc = __reset_control_get(dev, rstcs[i].id, 0,
962 shared, optional, acquired);
963 if (IS_ERR(rstcs[i].rstc)) {
964 ret = PTR_ERR(rstcs[i].rstc);
965 goto err;
966 }
967 }
968
969 return 0;
970
971err:
972 mutex_lock(&reset_list_mutex);
973 while (i--)
974 __reset_control_put_internal(rstcs[i].rstc);
975 mutex_unlock(&reset_list_mutex);
976 return ret;
977}
978EXPORT_SYMBOL_GPL(__reset_control_bulk_get);
979
980static void reset_control_array_put(struct reset_control_array *resets)
981{
982 int i;
983
984 mutex_lock(&reset_list_mutex);
985 for (i = 0; i < resets->num_rstcs; i++)
986 __reset_control_put_internal(resets->rstc[i]);
987 mutex_unlock(&reset_list_mutex);
988 kfree(resets);
989}
990
991
992
993
994
995void reset_control_put(struct reset_control *rstc)
996{
997 if (IS_ERR_OR_NULL(rstc))
998 return;
999
1000 if (reset_control_is_array(rstc)) {
1001 reset_control_array_put(rstc_to_array(rstc));
1002 return;
1003 }
1004
1005 mutex_lock(&reset_list_mutex);
1006 __reset_control_put_internal(rstc);
1007 mutex_unlock(&reset_list_mutex);
1008}
1009EXPORT_SYMBOL_GPL(reset_control_put);
1010
1011
1012
1013
1014
1015
1016void reset_control_bulk_put(int num_rstcs, struct reset_control_bulk_data *rstcs)
1017{
1018 mutex_lock(&reset_list_mutex);
1019 while (num_rstcs--) {
1020 if (IS_ERR_OR_NULL(rstcs[num_rstcs].rstc))
1021 continue;
1022 __reset_control_put_internal(rstcs[num_rstcs].rstc);
1023 }
1024 mutex_unlock(&reset_list_mutex);
1025}
1026EXPORT_SYMBOL_GPL(reset_control_bulk_put);
1027
1028static void devm_reset_control_release(struct device *dev, void *res)
1029{
1030 reset_control_put(*(struct reset_control **)res);
1031}
1032
1033struct reset_control *
1034__devm_reset_control_get(struct device *dev, const char *id, int index,
1035 bool shared, bool optional, bool acquired)
1036{
1037 struct reset_control **ptr, *rstc;
1038
1039 ptr = devres_alloc(devm_reset_control_release, sizeof(*ptr),
1040 GFP_KERNEL);
1041 if (!ptr)
1042 return ERR_PTR(-ENOMEM);
1043
1044 rstc = __reset_control_get(dev, id, index, shared, optional, acquired);
1045 if (IS_ERR_OR_NULL(rstc)) {
1046 devres_free(ptr);
1047 return rstc;
1048 }
1049
1050 *ptr = rstc;
1051 devres_add(dev, ptr);
1052
1053 return rstc;
1054}
1055EXPORT_SYMBOL_GPL(__devm_reset_control_get);
1056
1057struct reset_control_bulk_devres {
1058 int num_rstcs;
1059 struct reset_control_bulk_data *rstcs;
1060};
1061
1062static void devm_reset_control_bulk_release(struct device *dev, void *res)
1063{
1064 struct reset_control_bulk_devres *devres = res;
1065
1066 reset_control_bulk_put(devres->num_rstcs, devres->rstcs);
1067}
1068
1069int __devm_reset_control_bulk_get(struct device *dev, int num_rstcs,
1070 struct reset_control_bulk_data *rstcs,
1071 bool shared, bool optional, bool acquired)
1072{
1073 struct reset_control_bulk_devres *ptr;
1074 int ret;
1075
1076 ptr = devres_alloc(devm_reset_control_bulk_release, sizeof(*ptr),
1077 GFP_KERNEL);
1078 if (!ptr)
1079 return -ENOMEM;
1080
1081 ret = __reset_control_bulk_get(dev, num_rstcs, rstcs, shared, optional, acquired);
1082 if (ret < 0) {
1083 devres_free(ptr);
1084 return ret;
1085 }
1086
1087 ptr->num_rstcs = num_rstcs;
1088 ptr->rstcs = rstcs;
1089 devres_add(dev, ptr);
1090
1091 return 0;
1092}
1093EXPORT_SYMBOL_GPL(__devm_reset_control_bulk_get);
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105int __device_reset(struct device *dev, bool optional)
1106{
1107 struct reset_control *rstc;
1108 int ret;
1109
1110 rstc = __reset_control_get(dev, NULL, 0, 0, optional, true);
1111 if (IS_ERR(rstc))
1112 return PTR_ERR(rstc);
1113
1114 ret = reset_control_reset(rstc);
1115
1116 reset_control_put(rstc);
1117
1118 return ret;
1119}
1120EXPORT_SYMBOL_GPL(__device_reset);
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134static int of_reset_control_get_count(struct device_node *node)
1135{
1136 int count;
1137
1138 if (!node)
1139 return -EINVAL;
1140
1141 count = of_count_phandle_with_args(node, "resets", "#reset-cells");
1142 if (count == 0)
1143 count = -ENOENT;
1144
1145 return count;
1146}
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160struct reset_control *
1161of_reset_control_array_get(struct device_node *np, bool shared, bool optional,
1162 bool acquired)
1163{
1164 struct reset_control_array *resets;
1165 struct reset_control *rstc;
1166 int num, i;
1167
1168 num = of_reset_control_get_count(np);
1169 if (num < 0)
1170 return optional ? NULL : ERR_PTR(num);
1171
1172 resets = kzalloc(struct_size(resets, rstc, num), GFP_KERNEL);
1173 if (!resets)
1174 return ERR_PTR(-ENOMEM);
1175
1176 for (i = 0; i < num; i++) {
1177 rstc = __of_reset_control_get(np, NULL, i, shared, optional,
1178 acquired);
1179 if (IS_ERR(rstc))
1180 goto err_rst;
1181 resets->rstc[i] = rstc;
1182 }
1183 resets->num_rstcs = num;
1184 resets->base.array = true;
1185
1186 return &resets->base;
1187
1188err_rst:
1189 mutex_lock(&reset_list_mutex);
1190 while (--i >= 0)
1191 __reset_control_put_internal(resets->rstc[i]);
1192 mutex_unlock(&reset_list_mutex);
1193
1194 kfree(resets);
1195
1196 return rstc;
1197}
1198EXPORT_SYMBOL_GPL(of_reset_control_array_get);
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213struct reset_control *
1214devm_reset_control_array_get(struct device *dev, bool shared, bool optional)
1215{
1216 struct reset_control **ptr, *rstc;
1217
1218 ptr = devres_alloc(devm_reset_control_release, sizeof(*ptr),
1219 GFP_KERNEL);
1220 if (!ptr)
1221 return ERR_PTR(-ENOMEM);
1222
1223 rstc = of_reset_control_array_get(dev->of_node, shared, optional, true);
1224 if (IS_ERR_OR_NULL(rstc)) {
1225 devres_free(ptr);
1226 return rstc;
1227 }
1228
1229 *ptr = rstc;
1230 devres_add(dev, ptr);
1231
1232 return rstc;
1233}
1234EXPORT_SYMBOL_GPL(devm_reset_control_array_get);
1235
1236static int reset_control_get_count_from_lookup(struct device *dev)
1237{
1238 const struct reset_control_lookup *lookup;
1239 const char *dev_id;
1240 int count = 0;
1241
1242 if (!dev)
1243 return -EINVAL;
1244
1245 dev_id = dev_name(dev);
1246 mutex_lock(&reset_lookup_mutex);
1247
1248 list_for_each_entry(lookup, &reset_lookup_list, list) {
1249 if (!strcmp(lookup->dev_id, dev_id))
1250 count++;
1251 }
1252
1253 mutex_unlock(&reset_lookup_mutex);
1254
1255 if (count == 0)
1256 count = -ENOENT;
1257
1258 return count;
1259}
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269int reset_control_get_count(struct device *dev)
1270{
1271 if (dev->of_node)
1272 return of_reset_control_get_count(dev->of_node);
1273
1274 return reset_control_get_count_from_lookup(dev);
1275}
1276EXPORT_SYMBOL_GPL(reset_control_get_count);
1277