1
2
3
4
5
6
7
8
9
10#include <linux/device.h>
11#include <linux/module.h>
12#include <linux/slab.h>
13
14#include "base.h"
15
16struct devres_node {
17 struct list_head entry;
18 dr_release_t release;
19#ifdef CONFIG_DEBUG_DEVRES
20 const char *name;
21 size_t size;
22#endif
23};
24
25struct devres {
26 struct devres_node node;
27
28 unsigned long long data[];
29};
30
31struct devres_group {
32 struct devres_node node[2];
33 void *id;
34 int color;
35
36};
37
38#ifdef CONFIG_DEBUG_DEVRES
39static int log_devres = 0;
40module_param_named(log, log_devres, int, S_IRUGO | S_IWUSR);
41
42static void set_node_dbginfo(struct devres_node *node, const char *name,
43 size_t size)
44{
45 node->name = name;
46 node->size = size;
47}
48
49static void devres_log(struct device *dev, struct devres_node *node,
50 const char *op)
51{
52 if (unlikely(log_devres))
53 dev_err(dev, "DEVRES %3s %p %s (%lu bytes)\n",
54 op, node, node->name, (unsigned long)node->size);
55}
56#else
57#define set_node_dbginfo(node, n, s) do {} while (0)
58#define devres_log(dev, node, op) do {} while (0)
59#endif
60
61
62
63
64
65static void group_open_release(struct device *dev, void *res)
66{
67
68}
69
70static void group_close_release(struct device *dev, void *res)
71{
72
73}
74
75static struct devres_group * node_to_group(struct devres_node *node)
76{
77 if (node->release == &group_open_release)
78 return container_of(node, struct devres_group, node[0]);
79 if (node->release == &group_close_release)
80 return container_of(node, struct devres_group, node[1]);
81 return NULL;
82}
83
84static __always_inline struct devres * alloc_dr(dr_release_t release,
85 size_t size, gfp_t gfp)
86{
87 size_t tot_size = sizeof(struct devres) + size;
88 struct devres *dr;
89
90 dr = kmalloc_track_caller(tot_size, gfp);
91 if (unlikely(!dr))
92 return NULL;
93
94 memset(dr, 0, offsetof(struct devres, data));
95
96 INIT_LIST_HEAD(&dr->node.entry);
97 dr->node.release = release;
98 return dr;
99}
100
101static void add_dr(struct device *dev, struct devres_node *node)
102{
103 devres_log(dev, node, "ADD");
104 BUG_ON(!list_empty(&node->entry));
105 list_add_tail(&node->entry, &dev->devres_head);
106}
107
108#ifdef CONFIG_DEBUG_DEVRES
109void * __devres_alloc(dr_release_t release, size_t size, gfp_t gfp,
110 const char *name)
111{
112 struct devres *dr;
113
114 dr = alloc_dr(release, size, gfp | __GFP_ZERO);
115 if (unlikely(!dr))
116 return NULL;
117 set_node_dbginfo(&dr->node, name, size);
118 return dr->data;
119}
120EXPORT_SYMBOL_GPL(__devres_alloc);
121#else
122
123
124
125
126
127
128
129
130
131
132
133
134
135void * devres_alloc(dr_release_t release, size_t size, gfp_t gfp)
136{
137 struct devres *dr;
138
139 dr = alloc_dr(release, size, gfp | __GFP_ZERO);
140 if (unlikely(!dr))
141 return NULL;
142 return dr->data;
143}
144EXPORT_SYMBOL_GPL(devres_alloc);
145#endif
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162void devres_for_each_res(struct device *dev, dr_release_t release,
163 dr_match_t match, void *match_data,
164 void (*fn)(struct device *, void *, void *),
165 void *data)
166{
167 struct devres_node *node;
168 struct devres_node *tmp;
169 unsigned long flags;
170
171 if (!fn)
172 return;
173
174 spin_lock_irqsave(&dev->devres_lock, flags);
175 list_for_each_entry_safe_reverse(node, tmp,
176 &dev->devres_head, entry) {
177 struct devres *dr = container_of(node, struct devres, node);
178
179 if (node->release != release)
180 continue;
181 if (match && !match(dev, dr->data, match_data))
182 continue;
183 fn(dev, dr->data, data);
184 }
185 spin_unlock_irqrestore(&dev->devres_lock, flags);
186}
187EXPORT_SYMBOL_GPL(devres_for_each_res);
188
189
190
191
192
193
194
195void devres_free(void *res)
196{
197 if (res) {
198 struct devres *dr = container_of(res, struct devres, data);
199
200 BUG_ON(!list_empty(&dr->node.entry));
201 kfree(dr);
202 }
203}
204EXPORT_SYMBOL_GPL(devres_free);
205
206
207
208
209
210
211
212
213
214
215void devres_add(struct device *dev, void *res)
216{
217 struct devres *dr = container_of(res, struct devres, data);
218 unsigned long flags;
219
220 spin_lock_irqsave(&dev->devres_lock, flags);
221 add_dr(dev, &dr->node);
222 spin_unlock_irqrestore(&dev->devres_lock, flags);
223}
224EXPORT_SYMBOL_GPL(devres_add);
225
226static struct devres *find_dr(struct device *dev, dr_release_t release,
227 dr_match_t match, void *match_data)
228{
229 struct devres_node *node;
230
231 list_for_each_entry_reverse(node, &dev->devres_head, entry) {
232 struct devres *dr = container_of(node, struct devres, node);
233
234 if (node->release != release)
235 continue;
236 if (match && !match(dev, dr->data, match_data))
237 continue;
238 return dr;
239 }
240
241 return NULL;
242}
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258void * devres_find(struct device *dev, dr_release_t release,
259 dr_match_t match, void *match_data)
260{
261 struct devres *dr;
262 unsigned long flags;
263
264 spin_lock_irqsave(&dev->devres_lock, flags);
265 dr = find_dr(dev, release, match, match_data);
266 spin_unlock_irqrestore(&dev->devres_lock, flags);
267
268 if (dr)
269 return dr->data;
270 return NULL;
271}
272EXPORT_SYMBOL_GPL(devres_find);
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288void * devres_get(struct device *dev, void *new_res,
289 dr_match_t match, void *match_data)
290{
291 struct devres *new_dr = container_of(new_res, struct devres, data);
292 struct devres *dr;
293 unsigned long flags;
294
295 spin_lock_irqsave(&dev->devres_lock, flags);
296 dr = find_dr(dev, new_dr->node.release, match, match_data);
297 if (!dr) {
298 add_dr(dev, &new_dr->node);
299 dr = new_dr;
300 new_dr = NULL;
301 }
302 spin_unlock_irqrestore(&dev->devres_lock, flags);
303 devres_free(new_dr);
304
305 return dr->data;
306}
307EXPORT_SYMBOL_GPL(devres_get);
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324void * devres_remove(struct device *dev, dr_release_t release,
325 dr_match_t match, void *match_data)
326{
327 struct devres *dr;
328 unsigned long flags;
329
330 spin_lock_irqsave(&dev->devres_lock, flags);
331 dr = find_dr(dev, release, match, match_data);
332 if (dr) {
333 list_del_init(&dr->node.entry);
334 devres_log(dev, &dr->node, "REM");
335 }
336 spin_unlock_irqrestore(&dev->devres_lock, flags);
337
338 if (dr)
339 return dr->data;
340 return NULL;
341}
342EXPORT_SYMBOL_GPL(devres_remove);
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362int devres_destroy(struct device *dev, dr_release_t release,
363 dr_match_t match, void *match_data)
364{
365 void *res;
366
367 res = devres_remove(dev, release, match, match_data);
368 if (unlikely(!res))
369 return -ENOENT;
370
371 devres_free(res);
372 return 0;
373}
374EXPORT_SYMBOL_GPL(devres_destroy);
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392int devres_release(struct device *dev, dr_release_t release,
393 dr_match_t match, void *match_data)
394{
395 void *res;
396
397 res = devres_remove(dev, release, match, match_data);
398 if (unlikely(!res))
399 return -ENOENT;
400
401 (*release)(dev, res);
402 devres_free(res);
403 return 0;
404}
405EXPORT_SYMBOL_GPL(devres_release);
406
407static int remove_nodes(struct device *dev,
408 struct list_head *first, struct list_head *end,
409 struct list_head *todo)
410{
411 int cnt = 0, nr_groups = 0;
412 struct list_head *cur;
413
414
415
416
417 cur = first;
418 while (cur != end) {
419 struct devres_node *node;
420 struct devres_group *grp;
421
422 node = list_entry(cur, struct devres_node, entry);
423 cur = cur->next;
424
425 grp = node_to_group(node);
426 if (grp) {
427
428 grp->color = 0;
429 nr_groups++;
430 } else {
431
432 if (&node->entry == first)
433 first = first->next;
434 list_move_tail(&node->entry, todo);
435 cnt++;
436 }
437 }
438
439 if (!nr_groups)
440 return cnt;
441
442
443
444
445
446
447
448 cur = first;
449 while (cur != end) {
450 struct devres_node *node;
451 struct devres_group *grp;
452
453 node = list_entry(cur, struct devres_node, entry);
454 cur = cur->next;
455
456 grp = node_to_group(node);
457 BUG_ON(!grp || list_empty(&grp->node[0].entry));
458
459 grp->color++;
460 if (list_empty(&grp->node[1].entry))
461 grp->color++;
462
463 BUG_ON(grp->color <= 0 || grp->color > 2);
464 if (grp->color == 2) {
465
466
467
468 list_move_tail(&grp->node[0].entry, todo);
469 list_del_init(&grp->node[1].entry);
470 }
471 }
472
473 return cnt;
474}
475
476static int release_nodes(struct device *dev, struct list_head *first,
477 struct list_head *end, unsigned long flags)
478 __releases(&dev->devres_lock)
479{
480 LIST_HEAD(todo);
481 int cnt;
482 struct devres *dr, *tmp;
483
484 cnt = remove_nodes(dev, first, end, &todo);
485
486 spin_unlock_irqrestore(&dev->devres_lock, flags);
487
488
489
490
491 list_for_each_entry_safe_reverse(dr, tmp, &todo, node.entry) {
492 devres_log(dev, &dr->node, "REL");
493 dr->node.release(dev, dr->data);
494 kfree(dr);
495 }
496
497 return cnt;
498}
499
500
501
502
503
504
505
506
507int devres_release_all(struct device *dev)
508{
509 unsigned long flags;
510
511
512 if (WARN_ON(dev->devres_head.next == NULL))
513 return -ENODEV;
514 spin_lock_irqsave(&dev->devres_lock, flags);
515 return release_nodes(dev, dev->devres_head.next, &dev->devres_head,
516 flags);
517}
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532void * devres_open_group(struct device *dev, void *id, gfp_t gfp)
533{
534 struct devres_group *grp;
535 unsigned long flags;
536
537 grp = kmalloc(sizeof(*grp), gfp);
538 if (unlikely(!grp))
539 return NULL;
540
541 grp->node[0].release = &group_open_release;
542 grp->node[1].release = &group_close_release;
543 INIT_LIST_HEAD(&grp->node[0].entry);
544 INIT_LIST_HEAD(&grp->node[1].entry);
545 set_node_dbginfo(&grp->node[0], "grp<", 0);
546 set_node_dbginfo(&grp->node[1], "grp>", 0);
547 grp->id = grp;
548 if (id)
549 grp->id = id;
550
551 spin_lock_irqsave(&dev->devres_lock, flags);
552 add_dr(dev, &grp->node[0]);
553 spin_unlock_irqrestore(&dev->devres_lock, flags);
554 return grp->id;
555}
556EXPORT_SYMBOL_GPL(devres_open_group);
557
558
559static struct devres_group * find_group(struct device *dev, void *id)
560{
561 struct devres_node *node;
562
563 list_for_each_entry_reverse(node, &dev->devres_head, entry) {
564 struct devres_group *grp;
565
566 if (node->release != &group_open_release)
567 continue;
568
569 grp = container_of(node, struct devres_group, node[0]);
570
571 if (id) {
572 if (grp->id == id)
573 return grp;
574 } else if (list_empty(&grp->node[1].entry))
575 return grp;
576 }
577
578 return NULL;
579}
580
581
582
583
584
585
586
587
588
589void devres_close_group(struct device *dev, void *id)
590{
591 struct devres_group *grp;
592 unsigned long flags;
593
594 spin_lock_irqsave(&dev->devres_lock, flags);
595
596 grp = find_group(dev, id);
597 if (grp)
598 add_dr(dev, &grp->node[1]);
599 else
600 WARN_ON(1);
601
602 spin_unlock_irqrestore(&dev->devres_lock, flags);
603}
604EXPORT_SYMBOL_GPL(devres_close_group);
605
606
607
608
609
610
611
612
613
614
615void devres_remove_group(struct device *dev, void *id)
616{
617 struct devres_group *grp;
618 unsigned long flags;
619
620 spin_lock_irqsave(&dev->devres_lock, flags);
621
622 grp = find_group(dev, id);
623 if (grp) {
624 list_del_init(&grp->node[0].entry);
625 list_del_init(&grp->node[1].entry);
626 devres_log(dev, &grp->node[0], "REM");
627 } else
628 WARN_ON(1);
629
630 spin_unlock_irqrestore(&dev->devres_lock, flags);
631
632 kfree(grp);
633}
634EXPORT_SYMBOL_GPL(devres_remove_group);
635
636
637
638
639
640
641
642
643
644
645
646
647
648int devres_release_group(struct device *dev, void *id)
649{
650 struct devres_group *grp;
651 unsigned long flags;
652 int cnt = 0;
653
654 spin_lock_irqsave(&dev->devres_lock, flags);
655
656 grp = find_group(dev, id);
657 if (grp) {
658 struct list_head *first = &grp->node[0].entry;
659 struct list_head *end = &dev->devres_head;
660
661 if (!list_empty(&grp->node[1].entry))
662 end = grp->node[1].entry.next;
663
664 cnt = release_nodes(dev, first, end, flags);
665 } else {
666 WARN_ON(1);
667 spin_unlock_irqrestore(&dev->devres_lock, flags);
668 }
669
670 return cnt;
671}
672EXPORT_SYMBOL_GPL(devres_release_group);
673
674
675
676
677
678
679struct action_devres {
680 void *data;
681 void (*action)(void *);
682};
683
684static int devm_action_match(struct device *dev, void *res, void *p)
685{
686 struct action_devres *devres = res;
687 struct action_devres *target = p;
688
689 return devres->action == target->action &&
690 devres->data == target->data;
691}
692
693static void devm_action_release(struct device *dev, void *res)
694{
695 struct action_devres *devres = res;
696
697 devres->action(devres->data);
698}
699
700
701
702
703
704
705
706
707
708
709int devm_add_action(struct device *dev, void (*action)(void *), void *data)
710{
711 struct action_devres *devres;
712
713 devres = devres_alloc(devm_action_release,
714 sizeof(struct action_devres), GFP_KERNEL);
715 if (!devres)
716 return -ENOMEM;
717
718 devres->data = data;
719 devres->action = action;
720
721 devres_add(dev, devres);
722 return 0;
723}
724EXPORT_SYMBOL_GPL(devm_add_action);
725
726
727
728
729
730
731
732
733
734
735void devm_remove_action(struct device *dev, void (*action)(void *), void *data)
736{
737 struct action_devres devres = {
738 .data = data,
739 .action = action,
740 };
741
742 WARN_ON(devres_destroy(dev, devm_action_release, devm_action_match,
743 &devres));
744
745}
746EXPORT_SYMBOL_GPL(devm_remove_action);
747
748
749
750
751static void devm_kmalloc_release(struct device *dev, void *res)
752{
753
754}
755
756static int devm_kmalloc_match(struct device *dev, void *res, void *data)
757{
758 return res == data;
759}
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774void * devm_kmalloc(struct device *dev, size_t size, gfp_t gfp)
775{
776 struct devres *dr;
777
778
779 dr = alloc_dr(devm_kmalloc_release, size, gfp);
780 if (unlikely(!dr))
781 return NULL;
782
783
784
785
786
787 set_node_dbginfo(&dr->node, "devm_kzalloc_release", size);
788 devres_add(dev, dr->data);
789 return dr->data;
790}
791EXPORT_SYMBOL_GPL(devm_kmalloc);
792
793
794
795
796
797
798
799
800
801
802
803char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp)
804{
805 size_t size;
806 char *buf;
807
808 if (!s)
809 return NULL;
810
811 size = strlen(s) + 1;
812 buf = devm_kmalloc(dev, size, gfp);
813 if (buf)
814 memcpy(buf, s, size);
815 return buf;
816}
817EXPORT_SYMBOL_GPL(devm_kstrdup);
818
819
820
821
822
823
824
825
826
827
828
829
830char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt,
831 va_list ap)
832{
833 unsigned int len;
834 char *p;
835 va_list aq;
836
837 va_copy(aq, ap);
838 len = vsnprintf(NULL, 0, fmt, aq);
839 va_end(aq);
840
841 p = devm_kmalloc(dev, len+1, gfp);
842 if (!p)
843 return NULL;
844
845 vsnprintf(p, len+1, fmt, ap);
846
847 return p;
848}
849EXPORT_SYMBOL(devm_kvasprintf);
850
851
852
853
854
855
856
857
858
859
860
861
862char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...)
863{
864 va_list ap;
865 char *p;
866
867 va_start(ap, fmt);
868 p = devm_kvasprintf(dev, gfp, fmt, ap);
869 va_end(ap);
870
871 return p;
872}
873EXPORT_SYMBOL_GPL(devm_kasprintf);
874
875
876
877
878
879
880
881
882void devm_kfree(struct device *dev, void *p)
883{
884 int rc;
885
886 rc = devres_destroy(dev, devm_kmalloc_release, devm_kmalloc_match, p);
887 WARN_ON(rc);
888}
889EXPORT_SYMBOL_GPL(devm_kfree);
890
891
892
893
894
895
896
897
898
899
900void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp)
901{
902 void *p;
903
904 p = devm_kmalloc(dev, len, gfp);
905 if (p)
906 memcpy(p, src, len);
907
908 return p;
909}
910EXPORT_SYMBOL_GPL(devm_kmemdup);
911
912struct pages_devres {
913 unsigned long addr;
914 unsigned int order;
915};
916
917static int devm_pages_match(struct device *dev, void *res, void *p)
918{
919 struct pages_devres *devres = res;
920 struct pages_devres *target = p;
921
922 return devres->addr == target->addr;
923}
924
925static void devm_pages_release(struct device *dev, void *res)
926{
927 struct pages_devres *devres = res;
928
929 free_pages(devres->addr, devres->order);
930}
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945unsigned long devm_get_free_pages(struct device *dev,
946 gfp_t gfp_mask, unsigned int order)
947{
948 struct pages_devres *devres;
949 unsigned long addr;
950
951 addr = __get_free_pages(gfp_mask, order);
952
953 if (unlikely(!addr))
954 return 0;
955
956 devres = devres_alloc(devm_pages_release,
957 sizeof(struct pages_devres), GFP_KERNEL);
958 if (unlikely(!devres)) {
959 free_pages(addr, order);
960 return 0;
961 }
962
963 devres->addr = addr;
964 devres->order = order;
965
966 devres_add(dev, devres);
967 return addr;
968}
969EXPORT_SYMBOL_GPL(devm_get_free_pages);
970
971
972
973
974
975
976
977
978
979void devm_free_pages(struct device *dev, unsigned long addr)
980{
981 struct pages_devres devres = { .addr = addr };
982
983 WARN_ON(devres_release(dev, devm_pages_release, devm_pages_match,
984 &devres));
985}
986EXPORT_SYMBOL_GPL(devm_free_pages);
987