1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#define pr_fmt(fmt) "OF: " fmt
18
19#include <linux/bitmap.h>
20#include <linux/console.h>
21#include <linux/ctype.h>
22#include <linux/cpu.h>
23#include <linux/module.h>
24#include <linux/of.h>
25#include <linux/of_device.h>
26#include <linux/of_graph.h>
27#include <linux/spinlock.h>
28#include <linux/slab.h>
29#include <linux/string.h>
30#include <linux/proc_fs.h>
31
32#include "of_private.h"
33
34LIST_HEAD(aliases_lookup);
35
36struct device_node *of_root;
37EXPORT_SYMBOL(of_root);
38struct device_node *of_chosen;
39struct device_node *of_aliases;
40struct device_node *of_stdout;
41static const char *of_stdout_options;
42
43struct kset *of_kset;
44
45
46
47
48
49
50
51DEFINE_MUTEX(of_mutex);
52
53
54
55
56DEFINE_RAW_SPINLOCK(devtree_lock);
57
58bool of_node_name_eq(const struct device_node *np, const char *name)
59{
60 const char *node_name;
61 size_t len;
62
63 if (!np)
64 return false;
65
66 node_name = kbasename(np->full_name);
67 len = strchrnul(node_name, '@') - node_name;
68
69 return (strlen(name) == len) && (strncmp(node_name, name, len) == 0);
70}
71
72bool of_node_name_prefix(const struct device_node *np, const char *prefix)
73{
74 if (!np)
75 return false;
76
77 return strncmp(kbasename(np->full_name), prefix, strlen(prefix)) == 0;
78}
79
80int of_n_addr_cells(struct device_node *np)
81{
82 u32 cells;
83
84 do {
85 if (np->parent)
86 np = np->parent;
87 if (!of_property_read_u32(np, "#address-cells", &cells))
88 return cells;
89 } while (np->parent);
90
91 return OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
92}
93EXPORT_SYMBOL(of_n_addr_cells);
94
95int of_n_size_cells(struct device_node *np)
96{
97 u32 cells;
98
99 do {
100 if (np->parent)
101 np = np->parent;
102 if (!of_property_read_u32(np, "#size-cells", &cells))
103 return cells;
104 } while (np->parent);
105
106 return OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
107}
108EXPORT_SYMBOL(of_n_size_cells);
109
110#ifdef CONFIG_NUMA
111int __weak of_node_to_nid(struct device_node *np)
112{
113 return NUMA_NO_NODE;
114}
115#endif
116
117static struct device_node **phandle_cache;
118static u32 phandle_cache_mask;
119
120
121
122
123
124
125
126
127
128void of_populate_phandle_cache(void)
129{
130 unsigned long flags;
131 u32 cache_entries;
132 struct device_node *np;
133 u32 phandles = 0;
134
135 raw_spin_lock_irqsave(&devtree_lock, flags);
136
137 kfree(phandle_cache);
138 phandle_cache = NULL;
139
140 for_each_of_allnodes(np)
141 if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL)
142 phandles++;
143
144 if (!phandles)
145 goto out;
146
147 cache_entries = roundup_pow_of_two(phandles);
148 phandle_cache_mask = cache_entries - 1;
149
150 phandle_cache = kcalloc(cache_entries, sizeof(*phandle_cache),
151 GFP_ATOMIC);
152 if (!phandle_cache)
153 goto out;
154
155 for_each_of_allnodes(np)
156 if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL)
157 phandle_cache[np->phandle & phandle_cache_mask] = np;
158
159out:
160 raw_spin_unlock_irqrestore(&devtree_lock, flags);
161}
162
163int of_free_phandle_cache(void)
164{
165 unsigned long flags;
166
167 raw_spin_lock_irqsave(&devtree_lock, flags);
168
169 kfree(phandle_cache);
170 phandle_cache = NULL;
171
172 raw_spin_unlock_irqrestore(&devtree_lock, flags);
173
174 return 0;
175}
176#if !defined(CONFIG_MODULES)
177late_initcall_sync(of_free_phandle_cache);
178#endif
179
180void __init of_core_init(void)
181{
182 struct device_node *np;
183
184 of_populate_phandle_cache();
185
186
187 mutex_lock(&of_mutex);
188 of_kset = kset_create_and_add("devicetree", NULL, firmware_kobj);
189 if (!of_kset) {
190 mutex_unlock(&of_mutex);
191 pr_err("failed to register existing nodes\n");
192 return;
193 }
194 for_each_of_allnodes(np)
195 __of_attach_node_sysfs(np);
196 mutex_unlock(&of_mutex);
197
198
199 if (of_root)
200 proc_symlink("device-tree", NULL, "/sys/firmware/devicetree/base");
201}
202
203static struct property *__of_find_property(const struct device_node *np,
204 const char *name, int *lenp)
205{
206 struct property *pp;
207
208 if (!np)
209 return NULL;
210
211 for (pp = np->properties; pp; pp = pp->next) {
212 if (of_prop_cmp(pp->name, name) == 0) {
213 if (lenp)
214 *lenp = pp->length;
215 break;
216 }
217 }
218
219 return pp;
220}
221
222struct property *of_find_property(const struct device_node *np,
223 const char *name,
224 int *lenp)
225{
226 struct property *pp;
227 unsigned long flags;
228
229 raw_spin_lock_irqsave(&devtree_lock, flags);
230 pp = __of_find_property(np, name, lenp);
231 raw_spin_unlock_irqrestore(&devtree_lock, flags);
232
233 return pp;
234}
235EXPORT_SYMBOL(of_find_property);
236
237struct device_node *__of_find_all_nodes(struct device_node *prev)
238{
239 struct device_node *np;
240 if (!prev) {
241 np = of_root;
242 } else if (prev->child) {
243 np = prev->child;
244 } else {
245
246 np = prev;
247 while (np->parent && !np->sibling)
248 np = np->parent;
249 np = np->sibling;
250 }
251 return np;
252}
253
254
255
256
257
258
259
260
261
262struct device_node *of_find_all_nodes(struct device_node *prev)
263{
264 struct device_node *np;
265 unsigned long flags;
266
267 raw_spin_lock_irqsave(&devtree_lock, flags);
268 np = __of_find_all_nodes(prev);
269 of_node_get(np);
270 of_node_put(prev);
271 raw_spin_unlock_irqrestore(&devtree_lock, flags);
272 return np;
273}
274EXPORT_SYMBOL(of_find_all_nodes);
275
276
277
278
279
280const void *__of_get_property(const struct device_node *np,
281 const char *name, int *lenp)
282{
283 struct property *pp = __of_find_property(np, name, lenp);
284
285 return pp ? pp->value : NULL;
286}
287
288
289
290
291
292const void *of_get_property(const struct device_node *np, const char *name,
293 int *lenp)
294{
295 struct property *pp = of_find_property(np, name, lenp);
296
297 return pp ? pp->value : NULL;
298}
299EXPORT_SYMBOL(of_get_property);
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315bool __weak arch_match_cpu_phys_id(int cpu, u64 phys_id)
316{
317 return (u32)phys_id == cpu;
318}
319
320
321
322
323
324
325static bool __of_find_n_match_cpu_property(struct device_node *cpun,
326 const char *prop_name, int cpu, unsigned int *thread)
327{
328 const __be32 *cell;
329 int ac, prop_len, tid;
330 u64 hwid;
331
332 ac = of_n_addr_cells(cpun);
333 cell = of_get_property(cpun, prop_name, &prop_len);
334 if (!cell || !ac)
335 return false;
336 prop_len /= sizeof(*cell) * ac;
337 for (tid = 0; tid < prop_len; tid++) {
338 hwid = of_read_number(cell, ac);
339 if (arch_match_cpu_phys_id(cpu, hwid)) {
340 if (thread)
341 *thread = tid;
342 return true;
343 }
344 cell += ac;
345 }
346 return false;
347}
348
349
350
351
352
353
354
355bool __weak arch_find_n_match_cpu_physical_id(struct device_node *cpun,
356 int cpu, unsigned int *thread)
357{
358
359
360
361
362 if (IS_ENABLED(CONFIG_PPC) &&
363 __of_find_n_match_cpu_property(cpun,
364 "ibm,ppc-interrupt-server#s",
365 cpu, thread))
366 return true;
367
368 return __of_find_n_match_cpu_property(cpun, "reg", cpu, thread);
369}
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
391{
392 struct device_node *cpun;
393
394 for_each_node_by_type(cpun, "cpu") {
395 if (arch_find_n_match_cpu_physical_id(cpun, cpu, thread))
396 return cpun;
397 }
398 return NULL;
399}
400EXPORT_SYMBOL(of_get_cpu_node);
401
402
403
404
405
406
407
408
409
410int of_cpu_node_to_id(struct device_node *cpu_node)
411{
412 int cpu;
413 bool found = false;
414 struct device_node *np;
415
416 for_each_possible_cpu(cpu) {
417 np = of_cpu_device_node_get(cpu);
418 found = (cpu_node == np);
419 of_node_put(np);
420 if (found)
421 return cpu;
422 }
423
424 return -ENODEV;
425}
426EXPORT_SYMBOL(of_cpu_node_to_id);
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458static int __of_device_is_compatible(const struct device_node *device,
459 const char *compat, const char *type, const char *name)
460{
461 struct property *prop;
462 const char *cp;
463 int index = 0, score = 0;
464
465
466 if (compat && compat[0]) {
467 prop = __of_find_property(device, "compatible", NULL);
468 for (cp = of_prop_next_string(prop, NULL); cp;
469 cp = of_prop_next_string(prop, cp), index++) {
470 if (of_compat_cmp(cp, compat, strlen(compat)) == 0) {
471 score = INT_MAX/2 - (index << 2);
472 break;
473 }
474 }
475 if (!score)
476 return 0;
477 }
478
479
480 if (type && type[0]) {
481 if (!device->type || of_node_cmp(type, device->type))
482 return 0;
483 score += 2;
484 }
485
486
487 if (name && name[0]) {
488 if (!device->name || of_node_cmp(name, device->name))
489 return 0;
490 score++;
491 }
492
493 return score;
494}
495
496
497
498
499int of_device_is_compatible(const struct device_node *device,
500 const char *compat)
501{
502 unsigned long flags;
503 int res;
504
505 raw_spin_lock_irqsave(&devtree_lock, flags);
506 res = __of_device_is_compatible(device, compat, NULL, NULL);
507 raw_spin_unlock_irqrestore(&devtree_lock, flags);
508 return res;
509}
510EXPORT_SYMBOL(of_device_is_compatible);
511
512
513
514
515
516int of_device_compatible_match(struct device_node *device,
517 const char *const *compat)
518{
519 unsigned int tmp, score = 0;
520
521 if (!compat)
522 return 0;
523
524 while (*compat) {
525 tmp = of_device_is_compatible(device, *compat);
526 if (tmp > score)
527 score = tmp;
528 compat++;
529 }
530
531 return score;
532}
533
534
535
536
537
538
539
540
541int of_machine_is_compatible(const char *compat)
542{
543 struct device_node *root;
544 int rc = 0;
545
546 root = of_find_node_by_path("/");
547 if (root) {
548 rc = of_device_is_compatible(root, compat);
549 of_node_put(root);
550 }
551 return rc;
552}
553EXPORT_SYMBOL(of_machine_is_compatible);
554
555
556
557
558
559
560
561
562
563static bool __of_device_is_available(const struct device_node *device)
564{
565 const char *status;
566 int statlen;
567
568 if (!device)
569 return false;
570
571 status = __of_get_property(device, "status", &statlen);
572 if (status == NULL)
573 return true;
574
575 if (statlen > 0) {
576 if (!strcmp(status, "okay") || !strcmp(status, "ok"))
577 return true;
578 }
579
580 return false;
581}
582
583
584
585
586
587
588
589
590
591bool of_device_is_available(const struct device_node *device)
592{
593 unsigned long flags;
594 bool res;
595
596 raw_spin_lock_irqsave(&devtree_lock, flags);
597 res = __of_device_is_available(device);
598 raw_spin_unlock_irqrestore(&devtree_lock, flags);
599 return res;
600
601}
602EXPORT_SYMBOL(of_device_is_available);
603
604
605
606
607
608
609
610
611
612
613
614
615
616bool of_device_is_big_endian(const struct device_node *device)
617{
618 if (of_property_read_bool(device, "big-endian"))
619 return true;
620 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) &&
621 of_property_read_bool(device, "native-endian"))
622 return true;
623 return false;
624}
625EXPORT_SYMBOL(of_device_is_big_endian);
626
627
628
629
630
631
632
633
634struct device_node *of_get_parent(const struct device_node *node)
635{
636 struct device_node *np;
637 unsigned long flags;
638
639 if (!node)
640 return NULL;
641
642 raw_spin_lock_irqsave(&devtree_lock, flags);
643 np = of_node_get(node->parent);
644 raw_spin_unlock_irqrestore(&devtree_lock, flags);
645 return np;
646}
647EXPORT_SYMBOL(of_get_parent);
648
649
650
651
652
653
654
655
656
657
658
659
660struct device_node *of_get_next_parent(struct device_node *node)
661{
662 struct device_node *parent;
663 unsigned long flags;
664
665 if (!node)
666 return NULL;
667
668 raw_spin_lock_irqsave(&devtree_lock, flags);
669 parent = of_node_get(node->parent);
670 of_node_put(node);
671 raw_spin_unlock_irqrestore(&devtree_lock, flags);
672 return parent;
673}
674EXPORT_SYMBOL(of_get_next_parent);
675
676static struct device_node *__of_get_next_child(const struct device_node *node,
677 struct device_node *prev)
678{
679 struct device_node *next;
680
681 if (!node)
682 return NULL;
683
684 next = prev ? prev->sibling : node->child;
685 for (; next; next = next->sibling)
686 if (of_node_get(next))
687 break;
688 of_node_put(prev);
689 return next;
690}
691#define __for_each_child_of_node(parent, child) \
692 for (child = __of_get_next_child(parent, NULL); child != NULL; \
693 child = __of_get_next_child(parent, child))
694
695
696
697
698
699
700
701
702
703
704struct device_node *of_get_next_child(const struct device_node *node,
705 struct device_node *prev)
706{
707 struct device_node *next;
708 unsigned long flags;
709
710 raw_spin_lock_irqsave(&devtree_lock, flags);
711 next = __of_get_next_child(node, prev);
712 raw_spin_unlock_irqrestore(&devtree_lock, flags);
713 return next;
714}
715EXPORT_SYMBOL(of_get_next_child);
716
717
718
719
720
721
722
723
724
725struct device_node *of_get_next_available_child(const struct device_node *node,
726 struct device_node *prev)
727{
728 struct device_node *next;
729 unsigned long flags;
730
731 if (!node)
732 return NULL;
733
734 raw_spin_lock_irqsave(&devtree_lock, flags);
735 next = prev ? prev->sibling : node->child;
736 for (; next; next = next->sibling) {
737 if (!__of_device_is_available(next))
738 continue;
739 if (of_node_get(next))
740 break;
741 }
742 of_node_put(prev);
743 raw_spin_unlock_irqrestore(&devtree_lock, flags);
744 return next;
745}
746EXPORT_SYMBOL(of_get_next_available_child);
747
748
749
750
751
752
753
754
755
756
757
758
759struct device_node *of_get_compatible_child(const struct device_node *parent,
760 const char *compatible)
761{
762 struct device_node *child;
763
764 for_each_child_of_node(parent, child) {
765 if (of_device_is_compatible(child, compatible))
766 break;
767 }
768
769 return child;
770}
771EXPORT_SYMBOL(of_get_compatible_child);
772
773
774
775
776
777
778
779
780
781
782
783
784struct device_node *of_get_child_by_name(const struct device_node *node,
785 const char *name)
786{
787 struct device_node *child;
788
789 for_each_child_of_node(node, child)
790 if (child->name && (of_node_cmp(child->name, name) == 0))
791 break;
792 return child;
793}
794EXPORT_SYMBOL(of_get_child_by_name);
795
796struct device_node *__of_find_node_by_path(struct device_node *parent,
797 const char *path)
798{
799 struct device_node *child;
800 int len;
801
802 len = strcspn(path, "/:");
803 if (!len)
804 return NULL;
805
806 __for_each_child_of_node(parent, child) {
807 const char *name = kbasename(child->full_name);
808 if (strncmp(path, name, len) == 0 && (strlen(name) == len))
809 return child;
810 }
811 return NULL;
812}
813
814struct device_node *__of_find_node_by_full_path(struct device_node *node,
815 const char *path)
816{
817 const char *separator = strchr(path, ':');
818
819 while (node && *path == '/') {
820 struct device_node *tmp = node;
821
822 path++;
823 node = __of_find_node_by_path(node, path);
824 of_node_put(tmp);
825 path = strchrnul(path, '/');
826 if (separator && separator < path)
827 break;
828 }
829 return node;
830}
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850struct device_node *of_find_node_opts_by_path(const char *path, const char **opts)
851{
852 struct device_node *np = NULL;
853 struct property *pp;
854 unsigned long flags;
855 const char *separator = strchr(path, ':');
856
857 if (opts)
858 *opts = separator ? separator + 1 : NULL;
859
860 if (strcmp(path, "/") == 0)
861 return of_node_get(of_root);
862
863
864 if (*path != '/') {
865 int len;
866 const char *p = separator;
867
868 if (!p)
869 p = strchrnul(path, '/');
870 len = p - path;
871
872
873 if (!of_aliases)
874 return NULL;
875
876 for_each_property_of_node(of_aliases, pp) {
877 if (strlen(pp->name) == len && !strncmp(pp->name, path, len)) {
878 np = of_find_node_by_path(pp->value);
879 break;
880 }
881 }
882 if (!np)
883 return NULL;
884 path = p;
885 }
886
887
888 raw_spin_lock_irqsave(&devtree_lock, flags);
889 if (!np)
890 np = of_node_get(of_root);
891 np = __of_find_node_by_full_path(np, path);
892 raw_spin_unlock_irqrestore(&devtree_lock, flags);
893 return np;
894}
895EXPORT_SYMBOL(of_find_node_opts_by_path);
896
897
898
899
900
901
902
903
904
905
906
907
908struct device_node *of_find_node_by_name(struct device_node *from,
909 const char *name)
910{
911 struct device_node *np;
912 unsigned long flags;
913
914 raw_spin_lock_irqsave(&devtree_lock, flags);
915 for_each_of_allnodes_from(from, np)
916 if (np->name && (of_node_cmp(np->name, name) == 0)
917 && of_node_get(np))
918 break;
919 of_node_put(from);
920 raw_spin_unlock_irqrestore(&devtree_lock, flags);
921 return np;
922}
923EXPORT_SYMBOL(of_find_node_by_name);
924
925
926
927
928
929
930
931
932
933
934
935
936
937struct device_node *of_find_node_by_type(struct device_node *from,
938 const char *type)
939{
940 struct device_node *np;
941 unsigned long flags;
942
943 raw_spin_lock_irqsave(&devtree_lock, flags);
944 for_each_of_allnodes_from(from, np)
945 if (np->type && (of_node_cmp(np->type, type) == 0)
946 && of_node_get(np))
947 break;
948 of_node_put(from);
949 raw_spin_unlock_irqrestore(&devtree_lock, flags);
950 return np;
951}
952EXPORT_SYMBOL(of_find_node_by_type);
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968struct device_node *of_find_compatible_node(struct device_node *from,
969 const char *type, const char *compatible)
970{
971 struct device_node *np;
972 unsigned long flags;
973
974 raw_spin_lock_irqsave(&devtree_lock, flags);
975 for_each_of_allnodes_from(from, np)
976 if (__of_device_is_compatible(np, compatible, type, NULL) &&
977 of_node_get(np))
978 break;
979 of_node_put(from);
980 raw_spin_unlock_irqrestore(&devtree_lock, flags);
981 return np;
982}
983EXPORT_SYMBOL(of_find_compatible_node);
984
985
986
987
988
989
990
991
992
993
994
995
996
997struct device_node *of_find_node_with_property(struct device_node *from,
998 const char *prop_name)
999{
1000 struct device_node *np;
1001 struct property *pp;
1002 unsigned long flags;
1003
1004 raw_spin_lock_irqsave(&devtree_lock, flags);
1005 for_each_of_allnodes_from(from, np) {
1006 for (pp = np->properties; pp; pp = pp->next) {
1007 if (of_prop_cmp(pp->name, prop_name) == 0) {
1008 of_node_get(np);
1009 goto out;
1010 }
1011 }
1012 }
1013out:
1014 of_node_put(from);
1015 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1016 return np;
1017}
1018EXPORT_SYMBOL(of_find_node_with_property);
1019
1020static
1021const struct of_device_id *__of_match_node(const struct of_device_id *matches,
1022 const struct device_node *node)
1023{
1024 const struct of_device_id *best_match = NULL;
1025 int score, best_score = 0;
1026
1027 if (!matches)
1028 return NULL;
1029
1030 for (; matches->name[0] || matches->type[0] || matches->compatible[0]; matches++) {
1031 score = __of_device_is_compatible(node, matches->compatible,
1032 matches->type, matches->name);
1033 if (score > best_score) {
1034 best_match = matches;
1035 best_score = score;
1036 }
1037 }
1038
1039 return best_match;
1040}
1041
1042
1043
1044
1045
1046
1047
1048
1049const struct of_device_id *of_match_node(const struct of_device_id *matches,
1050 const struct device_node *node)
1051{
1052 const struct of_device_id *match;
1053 unsigned long flags;
1054
1055 raw_spin_lock_irqsave(&devtree_lock, flags);
1056 match = __of_match_node(matches, node);
1057 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1058 return match;
1059}
1060EXPORT_SYMBOL(of_match_node);
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075struct device_node *of_find_matching_node_and_match(struct device_node *from,
1076 const struct of_device_id *matches,
1077 const struct of_device_id **match)
1078{
1079 struct device_node *np;
1080 const struct of_device_id *m;
1081 unsigned long flags;
1082
1083 if (match)
1084 *match = NULL;
1085
1086 raw_spin_lock_irqsave(&devtree_lock, flags);
1087 for_each_of_allnodes_from(from, np) {
1088 m = __of_match_node(matches, np);
1089 if (m && of_node_get(np)) {
1090 if (match)
1091 *match = m;
1092 break;
1093 }
1094 }
1095 of_node_put(from);
1096 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1097 return np;
1098}
1099EXPORT_SYMBOL(of_find_matching_node_and_match);
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114int of_modalias_node(struct device_node *node, char *modalias, int len)
1115{
1116 const char *compatible, *p;
1117 int cplen;
1118
1119 compatible = of_get_property(node, "compatible", &cplen);
1120 if (!compatible || strlen(compatible) > cplen)
1121 return -ENODEV;
1122 p = strchr(compatible, ',');
1123 strlcpy(modalias, p ? p + 1 : compatible, len);
1124 return 0;
1125}
1126EXPORT_SYMBOL_GPL(of_modalias_node);
1127
1128
1129
1130
1131
1132
1133
1134
1135struct device_node *of_find_node_by_phandle(phandle handle)
1136{
1137 struct device_node *np = NULL;
1138 unsigned long flags;
1139 phandle masked_handle;
1140
1141 if (!handle)
1142 return NULL;
1143
1144 raw_spin_lock_irqsave(&devtree_lock, flags);
1145
1146 masked_handle = handle & phandle_cache_mask;
1147
1148 if (phandle_cache) {
1149 if (phandle_cache[masked_handle] &&
1150 handle == phandle_cache[masked_handle]->phandle)
1151 np = phandle_cache[masked_handle];
1152 }
1153
1154 if (!np) {
1155 for_each_of_allnodes(np)
1156 if (np->phandle == handle) {
1157 if (phandle_cache)
1158 phandle_cache[masked_handle] = np;
1159 break;
1160 }
1161 }
1162
1163 of_node_get(np);
1164 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1165 return np;
1166}
1167EXPORT_SYMBOL(of_find_node_by_phandle);
1168
1169void of_print_phandle_args(const char *msg, const struct of_phandle_args *args)
1170{
1171 int i;
1172 printk("%s %pOF", msg, args->np);
1173 for (i = 0; i < args->args_count; i++) {
1174 const char delim = i ? ',' : ':';
1175
1176 pr_cont("%c%08x", delim, args->args[i]);
1177 }
1178 pr_cont("\n");
1179}
1180
1181int of_phandle_iterator_init(struct of_phandle_iterator *it,
1182 const struct device_node *np,
1183 const char *list_name,
1184 const char *cells_name,
1185 int cell_count)
1186{
1187 const __be32 *list;
1188 int size;
1189
1190 memset(it, 0, sizeof(*it));
1191
1192 list = of_get_property(np, list_name, &size);
1193 if (!list)
1194 return -ENOENT;
1195
1196 it->cells_name = cells_name;
1197 it->cell_count = cell_count;
1198 it->parent = np;
1199 it->list_end = list + size / sizeof(*list);
1200 it->phandle_end = list;
1201 it->cur = list;
1202
1203 return 0;
1204}
1205EXPORT_SYMBOL_GPL(of_phandle_iterator_init);
1206
1207int of_phandle_iterator_next(struct of_phandle_iterator *it)
1208{
1209 uint32_t count = 0;
1210
1211 if (it->node) {
1212 of_node_put(it->node);
1213 it->node = NULL;
1214 }
1215
1216 if (!it->cur || it->phandle_end >= it->list_end)
1217 return -ENOENT;
1218
1219 it->cur = it->phandle_end;
1220
1221
1222 it->phandle = be32_to_cpup(it->cur++);
1223
1224 if (it->phandle) {
1225
1226
1227
1228
1229
1230 it->node = of_find_node_by_phandle(it->phandle);
1231
1232 if (it->cells_name) {
1233 if (!it->node) {
1234 pr_err("%pOF: could not find phandle\n",
1235 it->parent);
1236 goto err;
1237 }
1238
1239 if (of_property_read_u32(it->node, it->cells_name,
1240 &count)) {
1241 pr_err("%pOF: could not get %s for %pOF\n",
1242 it->parent,
1243 it->cells_name,
1244 it->node);
1245 goto err;
1246 }
1247 } else {
1248 count = it->cell_count;
1249 }
1250
1251
1252
1253
1254
1255 if (it->cur + count > it->list_end) {
1256 pr_err("%pOF: arguments longer than property\n",
1257 it->parent);
1258 goto err;
1259 }
1260 }
1261
1262 it->phandle_end = it->cur + count;
1263 it->cur_count = count;
1264
1265 return 0;
1266
1267err:
1268 if (it->node) {
1269 of_node_put(it->node);
1270 it->node = NULL;
1271 }
1272
1273 return -EINVAL;
1274}
1275EXPORT_SYMBOL_GPL(of_phandle_iterator_next);
1276
1277int of_phandle_iterator_args(struct of_phandle_iterator *it,
1278 uint32_t *args,
1279 int size)
1280{
1281 int i, count;
1282
1283 count = it->cur_count;
1284
1285 if (WARN_ON(size < count))
1286 count = size;
1287
1288 for (i = 0; i < count; i++)
1289 args[i] = be32_to_cpup(it->cur++);
1290
1291 return count;
1292}
1293
1294static int __of_parse_phandle_with_args(const struct device_node *np,
1295 const char *list_name,
1296 const char *cells_name,
1297 int cell_count, int index,
1298 struct of_phandle_args *out_args)
1299{
1300 struct of_phandle_iterator it;
1301 int rc, cur_index = 0;
1302
1303
1304 of_for_each_phandle(&it, rc, np, list_name, cells_name, cell_count) {
1305
1306
1307
1308
1309
1310
1311 rc = -ENOENT;
1312 if (cur_index == index) {
1313 if (!it.phandle)
1314 goto err;
1315
1316 if (out_args) {
1317 int c;
1318
1319 c = of_phandle_iterator_args(&it,
1320 out_args->args,
1321 MAX_PHANDLE_ARGS);
1322 out_args->np = it.node;
1323 out_args->args_count = c;
1324 } else {
1325 of_node_put(it.node);
1326 }
1327
1328
1329 return 0;
1330 }
1331
1332 cur_index++;
1333 }
1334
1335
1336
1337
1338
1339
1340
1341 err:
1342 of_node_put(it.node);
1343 return rc;
1344}
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356struct device_node *of_parse_phandle(const struct device_node *np,
1357 const char *phandle_name, int index)
1358{
1359 struct of_phandle_args args;
1360
1361 if (index < 0)
1362 return NULL;
1363
1364 if (__of_parse_phandle_with_args(np, phandle_name, NULL, 0,
1365 index, &args))
1366 return NULL;
1367
1368 return args.np;
1369}
1370EXPORT_SYMBOL(of_parse_phandle);
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404int of_parse_phandle_with_args(const struct device_node *np, const char *list_name,
1405 const char *cells_name, int index,
1406 struct of_phandle_args *out_args)
1407{
1408 if (index < 0)
1409 return -EINVAL;
1410 return __of_parse_phandle_with_args(np, list_name, cells_name, 0,
1411 index, out_args);
1412}
1413EXPORT_SYMBOL(of_parse_phandle_with_args);
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457int of_parse_phandle_with_args_map(const struct device_node *np,
1458 const char *list_name,
1459 const char *stem_name,
1460 int index, struct of_phandle_args *out_args)
1461{
1462 char *cells_name, *map_name = NULL, *mask_name = NULL;
1463 char *pass_name = NULL;
1464 struct device_node *cur, *new = NULL;
1465 const __be32 *map, *mask, *pass;
1466 static const __be32 dummy_mask[] = { [0 ... MAX_PHANDLE_ARGS] = ~0 };
1467 static const __be32 dummy_pass[] = { [0 ... MAX_PHANDLE_ARGS] = 0 };
1468 __be32 initial_match_array[MAX_PHANDLE_ARGS];
1469 const __be32 *match_array = initial_match_array;
1470 int i, ret, map_len, match;
1471 u32 list_size, new_size;
1472
1473 if (index < 0)
1474 return -EINVAL;
1475
1476 cells_name = kasprintf(GFP_KERNEL, "#%s-cells", stem_name);
1477 if (!cells_name)
1478 return -ENOMEM;
1479
1480 ret = -ENOMEM;
1481 map_name = kasprintf(GFP_KERNEL, "%s-map", stem_name);
1482 if (!map_name)
1483 goto free;
1484
1485 mask_name = kasprintf(GFP_KERNEL, "%s-map-mask", stem_name);
1486 if (!mask_name)
1487 goto free;
1488
1489 pass_name = kasprintf(GFP_KERNEL, "%s-map-pass-thru", stem_name);
1490 if (!pass_name)
1491 goto free;
1492
1493 ret = __of_parse_phandle_with_args(np, list_name, cells_name, 0, index,
1494 out_args);
1495 if (ret)
1496 goto free;
1497
1498
1499 cur = out_args->np;
1500 ret = of_property_read_u32(cur, cells_name, &list_size);
1501 if (ret < 0)
1502 goto put;
1503
1504
1505 for (i = 0; i < list_size; i++)
1506 initial_match_array[i] = cpu_to_be32(out_args->args[i]);
1507
1508 ret = -EINVAL;
1509 while (cur) {
1510
1511 map = of_get_property(cur, map_name, &map_len);
1512 if (!map) {
1513 ret = 0;
1514 goto free;
1515 }
1516 map_len /= sizeof(u32);
1517
1518
1519 mask = of_get_property(cur, mask_name, NULL);
1520 if (!mask)
1521 mask = dummy_mask;
1522
1523 match = 0;
1524 while (map_len > (list_size + 1) && !match) {
1525
1526 match = 1;
1527 for (i = 0; i < list_size; i++, map_len--)
1528 match &= !((match_array[i] ^ *map++) & mask[i]);
1529
1530 of_node_put(new);
1531 new = of_find_node_by_phandle(be32_to_cpup(map));
1532 map++;
1533 map_len--;
1534
1535
1536 if (!new)
1537 goto put;
1538
1539 if (!of_device_is_available(new))
1540 match = 0;
1541
1542 ret = of_property_read_u32(new, cells_name, &new_size);
1543 if (ret)
1544 goto put;
1545
1546
1547 if (WARN_ON(new_size > MAX_PHANDLE_ARGS))
1548 goto put;
1549 if (map_len < new_size)
1550 goto put;
1551
1552
1553 map += new_size;
1554 map_len -= new_size;
1555 }
1556 if (!match)
1557 goto put;
1558
1559
1560 pass = of_get_property(cur, pass_name, NULL);
1561 if (!pass)
1562 pass = dummy_pass;
1563
1564
1565
1566
1567
1568
1569 match_array = map - new_size;
1570 for (i = 0; i < new_size; i++) {
1571 __be32 val = *(map - new_size + i);
1572
1573 if (i < list_size) {
1574 val &= ~pass[i];
1575 val |= cpu_to_be32(out_args->args[i]) & pass[i];
1576 }
1577
1578 out_args->args[i] = be32_to_cpu(val);
1579 }
1580 out_args->args_count = list_size = new_size;
1581
1582 out_args->np = new;
1583 of_node_put(cur);
1584 cur = new;
1585 }
1586put:
1587 of_node_put(cur);
1588 of_node_put(new);
1589free:
1590 kfree(mask_name);
1591 kfree(map_name);
1592 kfree(cells_name);
1593 kfree(pass_name);
1594
1595 return ret;
1596}
1597EXPORT_SYMBOL(of_parse_phandle_with_args_map);
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629int of_parse_phandle_with_fixed_args(const struct device_node *np,
1630 const char *list_name, int cell_count,
1631 int index, struct of_phandle_args *out_args)
1632{
1633 if (index < 0)
1634 return -EINVAL;
1635 return __of_parse_phandle_with_args(np, list_name, NULL, cell_count,
1636 index, out_args);
1637}
1638EXPORT_SYMBOL(of_parse_phandle_with_fixed_args);
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655int of_count_phandle_with_args(const struct device_node *np, const char *list_name,
1656 const char *cells_name)
1657{
1658 struct of_phandle_iterator it;
1659 int rc, cur_index = 0;
1660
1661 rc = of_phandle_iterator_init(&it, np, list_name, cells_name, 0);
1662 if (rc)
1663 return rc;
1664
1665 while ((rc = of_phandle_iterator_next(&it)) == 0)
1666 cur_index += 1;
1667
1668 if (rc != -ENOENT)
1669 return rc;
1670
1671 return cur_index;
1672}
1673EXPORT_SYMBOL(of_count_phandle_with_args);
1674
1675
1676
1677
1678int __of_add_property(struct device_node *np, struct property *prop)
1679{
1680 struct property **next;
1681
1682 prop->next = NULL;
1683 next = &np->properties;
1684 while (*next) {
1685 if (strcmp(prop->name, (*next)->name) == 0)
1686
1687 return -EEXIST;
1688
1689 next = &(*next)->next;
1690 }
1691 *next = prop;
1692
1693 return 0;
1694}
1695
1696
1697
1698
1699int of_add_property(struct device_node *np, struct property *prop)
1700{
1701 unsigned long flags;
1702 int rc;
1703
1704 mutex_lock(&of_mutex);
1705
1706 raw_spin_lock_irqsave(&devtree_lock, flags);
1707 rc = __of_add_property(np, prop);
1708 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1709
1710 if (!rc)
1711 __of_add_property_sysfs(np, prop);
1712
1713 mutex_unlock(&of_mutex);
1714
1715 if (!rc)
1716 of_property_notify(OF_RECONFIG_ADD_PROPERTY, np, prop, NULL);
1717
1718 return rc;
1719}
1720
1721int __of_remove_property(struct device_node *np, struct property *prop)
1722{
1723 struct property **next;
1724
1725 for (next = &np->properties; *next; next = &(*next)->next) {
1726 if (*next == prop)
1727 break;
1728 }
1729 if (*next == NULL)
1730 return -ENODEV;
1731
1732
1733 *next = prop->next;
1734 prop->next = np->deadprops;
1735 np->deadprops = prop;
1736
1737 return 0;
1738}
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748int of_remove_property(struct device_node *np, struct property *prop)
1749{
1750 unsigned long flags;
1751 int rc;
1752
1753 if (!prop)
1754 return -ENODEV;
1755
1756 mutex_lock(&of_mutex);
1757
1758 raw_spin_lock_irqsave(&devtree_lock, flags);
1759 rc = __of_remove_property(np, prop);
1760 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1761
1762 if (!rc)
1763 __of_remove_property_sysfs(np, prop);
1764
1765 mutex_unlock(&of_mutex);
1766
1767 if (!rc)
1768 of_property_notify(OF_RECONFIG_REMOVE_PROPERTY, np, prop, NULL);
1769
1770 return rc;
1771}
1772
1773int __of_update_property(struct device_node *np, struct property *newprop,
1774 struct property **oldpropp)
1775{
1776 struct property **next, *oldprop;
1777
1778 for (next = &np->properties; *next; next = &(*next)->next) {
1779 if (of_prop_cmp((*next)->name, newprop->name) == 0)
1780 break;
1781 }
1782 *oldpropp = oldprop = *next;
1783
1784 if (oldprop) {
1785
1786 newprop->next = oldprop->next;
1787 *next = newprop;
1788 oldprop->next = np->deadprops;
1789 np->deadprops = oldprop;
1790 } else {
1791
1792 newprop->next = NULL;
1793 *next = newprop;
1794 }
1795
1796 return 0;
1797}
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808int of_update_property(struct device_node *np, struct property *newprop)
1809{
1810 struct property *oldprop;
1811 unsigned long flags;
1812 int rc;
1813
1814 if (!newprop->name)
1815 return -EINVAL;
1816
1817 mutex_lock(&of_mutex);
1818
1819 raw_spin_lock_irqsave(&devtree_lock, flags);
1820 rc = __of_update_property(np, newprop, &oldprop);
1821 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1822
1823 if (!rc)
1824 __of_update_property_sysfs(np, newprop, oldprop);
1825
1826 mutex_unlock(&of_mutex);
1827
1828 if (!rc)
1829 of_property_notify(OF_RECONFIG_UPDATE_PROPERTY, np, newprop, oldprop);
1830
1831 return rc;
1832}
1833
1834static void of_alias_add(struct alias_prop *ap, struct device_node *np,
1835 int id, const char *stem, int stem_len)
1836{
1837 ap->np = np;
1838 ap->id = id;
1839 strncpy(ap->stem, stem, stem_len);
1840 ap->stem[stem_len] = 0;
1841 list_add_tail(&ap->link, &aliases_lookup);
1842 pr_debug("adding DT alias:%s: stem=%s id=%i node=%pOF\n",
1843 ap->alias, ap->stem, ap->id, np);
1844}
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
1857{
1858 struct property *pp;
1859
1860 of_aliases = of_find_node_by_path("/aliases");
1861 of_chosen = of_find_node_by_path("/chosen");
1862 if (of_chosen == NULL)
1863 of_chosen = of_find_node_by_path("/chosen@0");
1864
1865 if (of_chosen) {
1866
1867 const char *name = NULL;
1868
1869 if (of_property_read_string(of_chosen, "stdout-path", &name))
1870 of_property_read_string(of_chosen, "linux,stdout-path",
1871 &name);
1872 if (IS_ENABLED(CONFIG_PPC) && !name)
1873 of_property_read_string(of_aliases, "stdout", &name);
1874 if (name)
1875 of_stdout = of_find_node_opts_by_path(name, &of_stdout_options);
1876 }
1877
1878 if (!of_aliases)
1879 return;
1880
1881 for_each_property_of_node(of_aliases, pp) {
1882 const char *start = pp->name;
1883 const char *end = start + strlen(start);
1884 struct device_node *np;
1885 struct alias_prop *ap;
1886 int id, len;
1887
1888
1889 if (!strcmp(pp->name, "name") ||
1890 !strcmp(pp->name, "phandle") ||
1891 !strcmp(pp->name, "linux,phandle"))
1892 continue;
1893
1894 np = of_find_node_by_path(pp->value);
1895 if (!np)
1896 continue;
1897
1898
1899
1900 while (isdigit(*(end-1)) && end > start)
1901 end--;
1902 len = end - start;
1903
1904 if (kstrtoint(end, 10, &id) < 0)
1905 continue;
1906
1907
1908 ap = dt_alloc(sizeof(*ap) + len + 1, __alignof__(*ap));
1909 if (!ap)
1910 continue;
1911 memset(ap, 0, sizeof(*ap) + len + 1);
1912 ap->alias = start;
1913 of_alias_add(ap, np, id, start, len);
1914 }
1915}
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925int of_alias_get_id(struct device_node *np, const char *stem)
1926{
1927 struct alias_prop *app;
1928 int id = -ENODEV;
1929
1930 mutex_lock(&of_mutex);
1931 list_for_each_entry(app, &aliases_lookup, link) {
1932 if (strcmp(app->stem, stem) != 0)
1933 continue;
1934
1935 if (np == app->np) {
1936 id = app->id;
1937 break;
1938 }
1939 }
1940 mutex_unlock(&of_mutex);
1941
1942 return id;
1943}
1944EXPORT_SYMBOL_GPL(of_alias_get_id);
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959int of_alias_get_alias_list(const struct of_device_id *matches,
1960 const char *stem, unsigned long *bitmap,
1961 unsigned int nbits)
1962{
1963 struct alias_prop *app;
1964 int ret = 0;
1965
1966
1967 bitmap_zero(bitmap, nbits);
1968
1969 mutex_lock(&of_mutex);
1970 pr_debug("%s: Looking for stem: %s\n", __func__, stem);
1971 list_for_each_entry(app, &aliases_lookup, link) {
1972 pr_debug("%s: stem: %s, id: %d\n",
1973 __func__, app->stem, app->id);
1974
1975 if (strcmp(app->stem, stem) != 0) {
1976 pr_debug("%s: stem comparison didn't pass %s\n",
1977 __func__, app->stem);
1978 continue;
1979 }
1980
1981 if (of_match_node(matches, app->np)) {
1982 pr_debug("%s: Allocated ID %d\n", __func__, app->id);
1983
1984 if (app->id >= nbits) {
1985 pr_warn("%s: ID %d >= than bitmap field %d\n",
1986 __func__, app->id, nbits);
1987 ret = -EOVERFLOW;
1988 } else {
1989 set_bit(app->id, bitmap);
1990 }
1991 }
1992 }
1993 mutex_unlock(&of_mutex);
1994
1995 return ret;
1996}
1997EXPORT_SYMBOL_GPL(of_alias_get_alias_list);
1998
1999
2000
2001
2002
2003
2004
2005
2006int of_alias_get_highest_id(const char *stem)
2007{
2008 struct alias_prop *app;
2009 int id = -ENODEV;
2010
2011 mutex_lock(&of_mutex);
2012 list_for_each_entry(app, &aliases_lookup, link) {
2013 if (strcmp(app->stem, stem) != 0)
2014 continue;
2015
2016 if (app->id > id)
2017 id = app->id;
2018 }
2019 mutex_unlock(&of_mutex);
2020
2021 return id;
2022}
2023EXPORT_SYMBOL_GPL(of_alias_get_highest_id);
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035bool of_console_check(struct device_node *dn, char *name, int index)
2036{
2037 if (!dn || dn != of_stdout || console_set_on_cmdline)
2038 return false;
2039
2040
2041
2042
2043
2044 return !add_preferred_console(name, index, (char *)of_stdout_options);
2045}
2046EXPORT_SYMBOL_GPL(of_console_check);
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056struct device_node *of_find_next_cache_node(const struct device_node *np)
2057{
2058 struct device_node *child, *cache_node;
2059
2060 cache_node = of_parse_phandle(np, "l2-cache", 0);
2061 if (!cache_node)
2062 cache_node = of_parse_phandle(np, "next-level-cache", 0);
2063
2064 if (cache_node)
2065 return cache_node;
2066
2067
2068
2069
2070 if (!strcmp(np->type, "cpu"))
2071 for_each_child_of_node(np, child)
2072 if (!strcmp(child->type, "cache"))
2073 return child;
2074
2075 return NULL;
2076}
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087int of_find_last_cache_level(unsigned int cpu)
2088{
2089 u32 cache_level = 0;
2090 struct device_node *prev = NULL, *np = of_cpu_device_node_get(cpu);
2091
2092 while (np) {
2093 prev = np;
2094 of_node_put(np);
2095 np = of_find_next_cache_node(np);
2096 }
2097
2098 of_property_read_u32(prev, "cache-level", &cache_level);
2099
2100 return cache_level;
2101}
2102