1
2
3
4
5
6
7
8
9#include <linux/device.h>
10#include <linux/module.h>
11#include <linux/slab.h>
12#include <linux/percpu.h>
13
14#include <asm/sections.h>
15
16#include "base.h"
17
18struct devres_node {
19 struct list_head entry;
20 dr_release_t release;
21#ifdef CONFIG_DEBUG_DEVRES
22 const char *name;
23 size_t size;
24#endif
25};
26
27struct devres {
28 struct devres_node node;
29
30
31
32
33
34
35
36 u8 __aligned(ARCH_KMALLOC_MINALIGN) data[];
37};
38
39struct devres_group {
40 struct devres_node node[2];
41 void *id;
42 int color;
43
44};
45
46#ifdef CONFIG_DEBUG_DEVRES
47static int log_devres = 0;
48module_param_named(log, log_devres, int, S_IRUGO | S_IWUSR);
49
50static void set_node_dbginfo(struct devres_node *node, const char *name,
51 size_t size)
52{
53 node->name = name;
54 node->size = size;
55}
56
57static void devres_log(struct device *dev, struct devres_node *node,
58 const char *op)
59{
60 if (unlikely(log_devres))
61 dev_err(dev, "DEVRES %3s %p %s (%lu bytes)\n",
62 op, node, node->name, (unsigned long)node->size);
63}
64#else
65#define set_node_dbginfo(node, n, s) do {} while (0)
66#define devres_log(dev, node, op) do {} while (0)
67#endif
68
69
70
71
72
73static void group_open_release(struct device *dev, void *res)
74{
75
76}
77
78static void group_close_release(struct device *dev, void *res)
79{
80
81}
82
83static struct devres_group * node_to_group(struct devres_node *node)
84{
85 if (node->release == &group_open_release)
86 return container_of(node, struct devres_group, node[0]);
87 if (node->release == &group_close_release)
88 return container_of(node, struct devres_group, node[1]);
89 return NULL;
90}
91
92static __always_inline struct devres * alloc_dr(dr_release_t release,
93 size_t size, gfp_t gfp, int nid)
94{
95 size_t tot_size;
96 struct devres *dr;
97
98
99 if (unlikely(check_add_overflow(sizeof(struct devres), size,
100 &tot_size)))
101 return NULL;
102
103 dr = kmalloc_node_track_caller(tot_size, gfp, nid);
104 if (unlikely(!dr))
105 return NULL;
106
107 memset(dr, 0, offsetof(struct devres, data));
108
109 INIT_LIST_HEAD(&dr->node.entry);
110 dr->node.release = release;
111 return dr;
112}
113
114static void add_dr(struct device *dev, struct devres_node *node)
115{
116 devres_log(dev, node, "ADD");
117 BUG_ON(!list_empty(&node->entry));
118 list_add_tail(&node->entry, &dev->devres_head);
119}
120
121#ifdef CONFIG_DEBUG_DEVRES
122void * __devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid,
123 const char *name)
124{
125 struct devres *dr;
126
127 dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid);
128 if (unlikely(!dr))
129 return NULL;
130 set_node_dbginfo(&dr->node, name, size);
131 return dr->data;
132}
133EXPORT_SYMBOL_GPL(__devres_alloc_node);
134#else
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149void * devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid)
150{
151 struct devres *dr;
152
153 dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid);
154 if (unlikely(!dr))
155 return NULL;
156 return dr->data;
157}
158EXPORT_SYMBOL_GPL(devres_alloc_node);
159#endif
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176void devres_for_each_res(struct device *dev, dr_release_t release,
177 dr_match_t match, void *match_data,
178 void (*fn)(struct device *, void *, void *),
179 void *data)
180{
181 struct devres_node *node;
182 struct devres_node *tmp;
183 unsigned long flags;
184
185 if (!fn)
186 return;
187
188 spin_lock_irqsave(&dev->devres_lock, flags);
189 list_for_each_entry_safe_reverse(node, tmp,
190 &dev->devres_head, entry) {
191 struct devres *dr = container_of(node, struct devres, node);
192
193 if (node->release != release)
194 continue;
195 if (match && !match(dev, dr->data, match_data))
196 continue;
197 fn(dev, dr->data, data);
198 }
199 spin_unlock_irqrestore(&dev->devres_lock, flags);
200}
201EXPORT_SYMBOL_GPL(devres_for_each_res);
202
203
204
205
206
207
208
209void devres_free(void *res)
210{
211 if (res) {
212 struct devres *dr = container_of(res, struct devres, data);
213
214 BUG_ON(!list_empty(&dr->node.entry));
215 kfree(dr);
216 }
217}
218EXPORT_SYMBOL_GPL(devres_free);
219
220
221
222
223
224
225
226
227
228
229void devres_add(struct device *dev, void *res)
230{
231 struct devres *dr = container_of(res, struct devres, data);
232 unsigned long flags;
233
234 spin_lock_irqsave(&dev->devres_lock, flags);
235 add_dr(dev, &dr->node);
236 spin_unlock_irqrestore(&dev->devres_lock, flags);
237}
238EXPORT_SYMBOL_GPL(devres_add);
239
240static struct devres *find_dr(struct device *dev, dr_release_t release,
241 dr_match_t match, void *match_data)
242{
243 struct devres_node *node;
244
245 list_for_each_entry_reverse(node, &dev->devres_head, entry) {
246 struct devres *dr = container_of(node, struct devres, node);
247
248 if (node->release != release)
249 continue;
250 if (match && !match(dev, dr->data, match_data))
251 continue;
252 return dr;
253 }
254
255 return NULL;
256}
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272void * devres_find(struct device *dev, dr_release_t release,
273 dr_match_t match, void *match_data)
274{
275 struct devres *dr;
276 unsigned long flags;
277
278 spin_lock_irqsave(&dev->devres_lock, flags);
279 dr = find_dr(dev, release, match, match_data);
280 spin_unlock_irqrestore(&dev->devres_lock, flags);
281
282 if (dr)
283 return dr->data;
284 return NULL;
285}
286EXPORT_SYMBOL_GPL(devres_find);
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302void * devres_get(struct device *dev, void *new_res,
303 dr_match_t match, void *match_data)
304{
305 struct devres *new_dr = container_of(new_res, struct devres, data);
306 struct devres *dr;
307 unsigned long flags;
308
309 spin_lock_irqsave(&dev->devres_lock, flags);
310 dr = find_dr(dev, new_dr->node.release, match, match_data);
311 if (!dr) {
312 add_dr(dev, &new_dr->node);
313 dr = new_dr;
314 new_res = NULL;
315 }
316 spin_unlock_irqrestore(&dev->devres_lock, flags);
317 devres_free(new_res);
318
319 return dr->data;
320}
321EXPORT_SYMBOL_GPL(devres_get);
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338void * devres_remove(struct device *dev, dr_release_t release,
339 dr_match_t match, void *match_data)
340{
341 struct devres *dr;
342 unsigned long flags;
343
344 spin_lock_irqsave(&dev->devres_lock, flags);
345 dr = find_dr(dev, release, match, match_data);
346 if (dr) {
347 list_del_init(&dr->node.entry);
348 devres_log(dev, &dr->node, "REM");
349 }
350 spin_unlock_irqrestore(&dev->devres_lock, flags);
351
352 if (dr)
353 return dr->data;
354 return NULL;
355}
356EXPORT_SYMBOL_GPL(devres_remove);
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376int devres_destroy(struct device *dev, dr_release_t release,
377 dr_match_t match, void *match_data)
378{
379 void *res;
380
381 res = devres_remove(dev, release, match, match_data);
382 if (unlikely(!res))
383 return -ENOENT;
384
385 devres_free(res);
386 return 0;
387}
388EXPORT_SYMBOL_GPL(devres_destroy);
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406int devres_release(struct device *dev, dr_release_t release,
407 dr_match_t match, void *match_data)
408{
409 void *res;
410
411 res = devres_remove(dev, release, match, match_data);
412 if (unlikely(!res))
413 return -ENOENT;
414
415 (*release)(dev, res);
416 devres_free(res);
417 return 0;
418}
419EXPORT_SYMBOL_GPL(devres_release);
420
421static int remove_nodes(struct device *dev,
422 struct list_head *first, struct list_head *end,
423 struct list_head *todo)
424{
425 int cnt = 0, nr_groups = 0;
426 struct list_head *cur;
427
428
429
430
431 cur = first;
432 while (cur != end) {
433 struct devres_node *node;
434 struct devres_group *grp;
435
436 node = list_entry(cur, struct devres_node, entry);
437 cur = cur->next;
438
439 grp = node_to_group(node);
440 if (grp) {
441
442 grp->color = 0;
443 nr_groups++;
444 } else {
445
446 if (&node->entry == first)
447 first = first->next;
448 list_move_tail(&node->entry, todo);
449 cnt++;
450 }
451 }
452
453 if (!nr_groups)
454 return cnt;
455
456
457
458
459
460
461
462 cur = first;
463 while (cur != end) {
464 struct devres_node *node;
465 struct devres_group *grp;
466
467 node = list_entry(cur, struct devres_node, entry);
468 cur = cur->next;
469
470 grp = node_to_group(node);
471 BUG_ON(!grp || list_empty(&grp->node[0].entry));
472
473 grp->color++;
474 if (list_empty(&grp->node[1].entry))
475 grp->color++;
476
477 BUG_ON(grp->color <= 0 || grp->color > 2);
478 if (grp->color == 2) {
479
480
481
482 list_move_tail(&grp->node[0].entry, todo);
483 list_del_init(&grp->node[1].entry);
484 }
485 }
486
487 return cnt;
488}
489
490static int release_nodes(struct device *dev, struct list_head *first,
491 struct list_head *end, unsigned long flags)
492 __releases(&dev->devres_lock)
493{
494 LIST_HEAD(todo);
495 int cnt;
496 struct devres *dr, *tmp;
497
498 cnt = remove_nodes(dev, first, end, &todo);
499
500 spin_unlock_irqrestore(&dev->devres_lock, flags);
501
502
503
504
505 list_for_each_entry_safe_reverse(dr, tmp, &todo, node.entry) {
506 devres_log(dev, &dr->node, "REL");
507 dr->node.release(dev, dr->data);
508 kfree(dr);
509 }
510
511 return cnt;
512}
513
514
515
516
517
518
519
520
521int devres_release_all(struct device *dev)
522{
523 unsigned long flags;
524
525
526 if (WARN_ON(dev->devres_head.next == NULL))
527 return -ENODEV;
528 spin_lock_irqsave(&dev->devres_lock, flags);
529 return release_nodes(dev, dev->devres_head.next, &dev->devres_head,
530 flags);
531}
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546void * devres_open_group(struct device *dev, void *id, gfp_t gfp)
547{
548 struct devres_group *grp;
549 unsigned long flags;
550
551 grp = kmalloc(sizeof(*grp), gfp);
552 if (unlikely(!grp))
553 return NULL;
554
555 grp->node[0].release = &group_open_release;
556 grp->node[1].release = &group_close_release;
557 INIT_LIST_HEAD(&grp->node[0].entry);
558 INIT_LIST_HEAD(&grp->node[1].entry);
559 set_node_dbginfo(&grp->node[0], "grp<", 0);
560 set_node_dbginfo(&grp->node[1], "grp>", 0);
561 grp->id = grp;
562 if (id)
563 grp->id = id;
564
565 spin_lock_irqsave(&dev->devres_lock, flags);
566 add_dr(dev, &grp->node[0]);
567 spin_unlock_irqrestore(&dev->devres_lock, flags);
568 return grp->id;
569}
570EXPORT_SYMBOL_GPL(devres_open_group);
571
572
573static struct devres_group * find_group(struct device *dev, void *id)
574{
575 struct devres_node *node;
576
577 list_for_each_entry_reverse(node, &dev->devres_head, entry) {
578 struct devres_group *grp;
579
580 if (node->release != &group_open_release)
581 continue;
582
583 grp = container_of(node, struct devres_group, node[0]);
584
585 if (id) {
586 if (grp->id == id)
587 return grp;
588 } else if (list_empty(&grp->node[1].entry))
589 return grp;
590 }
591
592 return NULL;
593}
594
595
596
597
598
599
600
601
602
603void devres_close_group(struct device *dev, void *id)
604{
605 struct devres_group *grp;
606 unsigned long flags;
607
608 spin_lock_irqsave(&dev->devres_lock, flags);
609
610 grp = find_group(dev, id);
611 if (grp)
612 add_dr(dev, &grp->node[1]);
613 else
614 WARN_ON(1);
615
616 spin_unlock_irqrestore(&dev->devres_lock, flags);
617}
618EXPORT_SYMBOL_GPL(devres_close_group);
619
620
621
622
623
624
625
626
627
628
629void devres_remove_group(struct device *dev, void *id)
630{
631 struct devres_group *grp;
632 unsigned long flags;
633
634 spin_lock_irqsave(&dev->devres_lock, flags);
635
636 grp = find_group(dev, id);
637 if (grp) {
638 list_del_init(&grp->node[0].entry);
639 list_del_init(&grp->node[1].entry);
640 devres_log(dev, &grp->node[0], "REM");
641 } else
642 WARN_ON(1);
643
644 spin_unlock_irqrestore(&dev->devres_lock, flags);
645
646 kfree(grp);
647}
648EXPORT_SYMBOL_GPL(devres_remove_group);
649
650
651
652
653
654
655
656
657
658
659
660
661
662int devres_release_group(struct device *dev, void *id)
663{
664 struct devres_group *grp;
665 unsigned long flags;
666 int cnt = 0;
667
668 spin_lock_irqsave(&dev->devres_lock, flags);
669
670 grp = find_group(dev, id);
671 if (grp) {
672 struct list_head *first = &grp->node[0].entry;
673 struct list_head *end = &dev->devres_head;
674
675 if (!list_empty(&grp->node[1].entry))
676 end = grp->node[1].entry.next;
677
678 cnt = release_nodes(dev, first, end, flags);
679 } else {
680 WARN_ON(1);
681 spin_unlock_irqrestore(&dev->devres_lock, flags);
682 }
683
684 return cnt;
685}
686EXPORT_SYMBOL_GPL(devres_release_group);
687
688
689
690
691
692
693struct action_devres {
694 void *data;
695 void (*action)(void *);
696};
697
698static int devm_action_match(struct device *dev, void *res, void *p)
699{
700 struct action_devres *devres = res;
701 struct action_devres *target = p;
702
703 return devres->action == target->action &&
704 devres->data == target->data;
705}
706
707static void devm_action_release(struct device *dev, void *res)
708{
709 struct action_devres *devres = res;
710
711 devres->action(devres->data);
712}
713
714
715
716
717
718
719
720
721
722
723int devm_add_action(struct device *dev, void (*action)(void *), void *data)
724{
725 struct action_devres *devres;
726
727 devres = devres_alloc(devm_action_release,
728 sizeof(struct action_devres), GFP_KERNEL);
729 if (!devres)
730 return -ENOMEM;
731
732 devres->data = data;
733 devres->action = action;
734
735 devres_add(dev, devres);
736 return 0;
737}
738EXPORT_SYMBOL_GPL(devm_add_action);
739
740
741
742
743
744
745
746
747
748
749void devm_remove_action(struct device *dev, void (*action)(void *), void *data)
750{
751 struct action_devres devres = {
752 .data = data,
753 .action = action,
754 };
755
756 WARN_ON(devres_destroy(dev, devm_action_release, devm_action_match,
757 &devres));
758}
759EXPORT_SYMBOL_GPL(devm_remove_action);
760
761
762
763
764
765
766
767
768
769
770
771void devm_release_action(struct device *dev, void (*action)(void *), void *data)
772{
773 struct action_devres devres = {
774 .data = data,
775 .action = action,
776 };
777
778 WARN_ON(devres_release(dev, devm_action_release, devm_action_match,
779 &devres));
780
781}
782EXPORT_SYMBOL_GPL(devm_release_action);
783
784
785
786
787static void devm_kmalloc_release(struct device *dev, void *res)
788{
789
790}
791
792static int devm_kmalloc_match(struct device *dev, void *res, void *data)
793{
794 return res == data;
795}
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810void * devm_kmalloc(struct device *dev, size_t size, gfp_t gfp)
811{
812 struct devres *dr;
813
814
815 dr = alloc_dr(devm_kmalloc_release, size, gfp, dev_to_node(dev));
816 if (unlikely(!dr))
817 return NULL;
818
819
820
821
822
823 set_node_dbginfo(&dr->node, "devm_kzalloc_release", size);
824 devres_add(dev, dr->data);
825 return dr->data;
826}
827EXPORT_SYMBOL_GPL(devm_kmalloc);
828
829
830
831
832
833
834
835
836
837
838
839char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp)
840{
841 size_t size;
842 char *buf;
843
844 if (!s)
845 return NULL;
846
847 size = strlen(s) + 1;
848 buf = devm_kmalloc(dev, size, gfp);
849 if (buf)
850 memcpy(buf, s, size);
851 return buf;
852}
853EXPORT_SYMBOL_GPL(devm_kstrdup);
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868const char *devm_kstrdup_const(struct device *dev, const char *s, gfp_t gfp)
869{
870 if (is_kernel_rodata((unsigned long)s))
871 return s;
872
873 return devm_kstrdup(dev, s, gfp);
874}
875EXPORT_SYMBOL_GPL(devm_kstrdup_const);
876
877
878
879
880
881
882
883
884
885
886
887
888char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt,
889 va_list ap)
890{
891 unsigned int len;
892 char *p;
893 va_list aq;
894
895 va_copy(aq, ap);
896 len = vsnprintf(NULL, 0, fmt, aq);
897 va_end(aq);
898
899 p = devm_kmalloc(dev, len+1, gfp);
900 if (!p)
901 return NULL;
902
903 vsnprintf(p, len+1, fmt, ap);
904
905 return p;
906}
907EXPORT_SYMBOL(devm_kvasprintf);
908
909
910
911
912
913
914
915
916
917
918
919
920char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...)
921{
922 va_list ap;
923 char *p;
924
925 va_start(ap, fmt);
926 p = devm_kvasprintf(dev, gfp, fmt, ap);
927 va_end(ap);
928
929 return p;
930}
931EXPORT_SYMBOL_GPL(devm_kasprintf);
932
933
934
935
936
937
938
939
940void devm_kfree(struct device *dev, const void *p)
941{
942 int rc;
943
944
945
946
947
948 if (unlikely(is_kernel_rodata((unsigned long)p)))
949 return;
950
951 rc = devres_destroy(dev, devm_kmalloc_release,
952 devm_kmalloc_match, (void *)p);
953 WARN_ON(rc);
954}
955EXPORT_SYMBOL_GPL(devm_kfree);
956
957
958
959
960
961
962
963
964
965
966void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp)
967{
968 void *p;
969
970 p = devm_kmalloc(dev, len, gfp);
971 if (p)
972 memcpy(p, src, len);
973
974 return p;
975}
976EXPORT_SYMBOL_GPL(devm_kmemdup);
977
978struct pages_devres {
979 unsigned long addr;
980 unsigned int order;
981};
982
983static int devm_pages_match(struct device *dev, void *res, void *p)
984{
985 struct pages_devres *devres = res;
986 struct pages_devres *target = p;
987
988 return devres->addr == target->addr;
989}
990
991static void devm_pages_release(struct device *dev, void *res)
992{
993 struct pages_devres *devres = res;
994
995 free_pages(devres->addr, devres->order);
996}
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011unsigned long devm_get_free_pages(struct device *dev,
1012 gfp_t gfp_mask, unsigned int order)
1013{
1014 struct pages_devres *devres;
1015 unsigned long addr;
1016
1017 addr = __get_free_pages(gfp_mask, order);
1018
1019 if (unlikely(!addr))
1020 return 0;
1021
1022 devres = devres_alloc(devm_pages_release,
1023 sizeof(struct pages_devres), GFP_KERNEL);
1024 if (unlikely(!devres)) {
1025 free_pages(addr, order);
1026 return 0;
1027 }
1028
1029 devres->addr = addr;
1030 devres->order = order;
1031
1032 devres_add(dev, devres);
1033 return addr;
1034}
1035EXPORT_SYMBOL_GPL(devm_get_free_pages);
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045void devm_free_pages(struct device *dev, unsigned long addr)
1046{
1047 struct pages_devres devres = { .addr = addr };
1048
1049 WARN_ON(devres_release(dev, devm_pages_release, devm_pages_match,
1050 &devres));
1051}
1052EXPORT_SYMBOL_GPL(devm_free_pages);
1053
1054static void devm_percpu_release(struct device *dev, void *pdata)
1055{
1056 void __percpu *p;
1057
1058 p = *(void __percpu **)pdata;
1059 free_percpu(p);
1060}
1061
1062static int devm_percpu_match(struct device *dev, void *data, void *p)
1063{
1064 struct devres *devr = container_of(data, struct devres, data);
1065
1066 return *(void **)devr->data == p;
1067}
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081void __percpu *__devm_alloc_percpu(struct device *dev, size_t size,
1082 size_t align)
1083{
1084 void *p;
1085 void __percpu *pcpu;
1086
1087 pcpu = __alloc_percpu(size, align);
1088 if (!pcpu)
1089 return NULL;
1090
1091 p = devres_alloc(devm_percpu_release, sizeof(void *), GFP_KERNEL);
1092 if (!p) {
1093 free_percpu(pcpu);
1094 return NULL;
1095 }
1096
1097 *(void __percpu **)p = pcpu;
1098
1099 devres_add(dev, p);
1100
1101 return pcpu;
1102}
1103EXPORT_SYMBOL_GPL(__devm_alloc_percpu);
1104
1105
1106
1107
1108
1109
1110
1111
1112void devm_free_percpu(struct device *dev, void __percpu *pdata)
1113{
1114 WARN_ON(devres_destroy(dev, devm_percpu_release, devm_percpu_match,
1115 (void *)pdata));
1116}
1117EXPORT_SYMBOL_GPL(devm_free_percpu);
1118