1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#define pr_fmt(fmt) "OF: " fmt
18
19#include <linux/bitmap.h>
20#include <linux/console.h>
21#include <linux/ctype.h>
22#include <linux/cpu.h>
23#include <linux/module.h>
24#include <linux/of.h>
25#include <linux/of_device.h>
26#include <linux/of_graph.h>
27#include <linux/spinlock.h>
28#include <linux/slab.h>
29#include <linux/string.h>
30#include <linux/proc_fs.h>
31
32#include "of_private.h"
33
34LIST_HEAD(aliases_lookup);
35
36struct device_node *of_root;
37EXPORT_SYMBOL(of_root);
38struct device_node *of_chosen;
39struct device_node *of_aliases;
40struct device_node *of_stdout;
41static const char *of_stdout_options;
42
43struct kset *of_kset;
44
45
46
47
48
49
50
51DEFINE_MUTEX(of_mutex);
52
53
54
55
56DEFINE_RAW_SPINLOCK(devtree_lock);
57
58bool of_node_name_eq(const struct device_node *np, const char *name)
59{
60 const char *node_name;
61 size_t len;
62
63 if (!np)
64 return false;
65
66 node_name = kbasename(np->full_name);
67 len = strchrnul(node_name, '@') - node_name;
68
69 return (strlen(name) == len) && (strncmp(node_name, name, len) == 0);
70}
71EXPORT_SYMBOL(of_node_name_eq);
72
73bool of_node_name_prefix(const struct device_node *np, const char *prefix)
74{
75 if (!np)
76 return false;
77
78 return strncmp(kbasename(np->full_name), prefix, strlen(prefix)) == 0;
79}
80EXPORT_SYMBOL(of_node_name_prefix);
81
82static bool __of_node_is_type(const struct device_node *np, const char *type)
83{
84 const char *match = __of_get_property(np, "device_type", NULL);
85
86 return np && match && type && !strcmp(match, type);
87}
88
89int of_n_addr_cells(struct device_node *np)
90{
91 u32 cells;
92
93 do {
94 if (np->parent)
95 np = np->parent;
96 if (!of_property_read_u32(np, "#address-cells", &cells))
97 return cells;
98 } while (np->parent);
99
100 return OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
101}
102EXPORT_SYMBOL(of_n_addr_cells);
103
104int of_n_size_cells(struct device_node *np)
105{
106 u32 cells;
107
108 do {
109 if (np->parent)
110 np = np->parent;
111 if (!of_property_read_u32(np, "#size-cells", &cells))
112 return cells;
113 } while (np->parent);
114
115 return OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
116}
117EXPORT_SYMBOL(of_n_size_cells);
118
119#ifdef CONFIG_NUMA
120int __weak of_node_to_nid(struct device_node *np)
121{
122 return NUMA_NO_NODE;
123}
124#endif
125
126
127
128
129
130
131
132
133
134
135static struct device_node **phandle_cache;
136static u32 phandle_cache_mask;
137
138
139
140
141static void __of_free_phandle_cache(void)
142{
143 u32 cache_entries = phandle_cache_mask + 1;
144 u32 k;
145
146 if (!phandle_cache)
147 return;
148
149 for (k = 0; k < cache_entries; k++)
150 of_node_put(phandle_cache[k]);
151
152 kfree(phandle_cache);
153 phandle_cache = NULL;
154}
155
156int of_free_phandle_cache(void)
157{
158 unsigned long flags;
159
160 raw_spin_lock_irqsave(&devtree_lock, flags);
161
162 __of_free_phandle_cache();
163
164 raw_spin_unlock_irqrestore(&devtree_lock, flags);
165
166 return 0;
167}
168#if !defined(CONFIG_MODULES)
169late_initcall_sync(of_free_phandle_cache);
170#endif
171
172
173
174
175void __of_free_phandle_cache_entry(phandle handle)
176{
177 phandle masked_handle;
178 struct device_node *np;
179
180 if (!handle)
181 return;
182
183 masked_handle = handle & phandle_cache_mask;
184
185 if (phandle_cache) {
186 np = phandle_cache[masked_handle];
187 if (np && handle == np->phandle) {
188 of_node_put(np);
189 phandle_cache[masked_handle] = NULL;
190 }
191 }
192}
193
194void of_populate_phandle_cache(void)
195{
196 unsigned long flags;
197 u32 cache_entries;
198 struct device_node *np;
199 u32 phandles = 0;
200
201 raw_spin_lock_irqsave(&devtree_lock, flags);
202
203 __of_free_phandle_cache();
204
205 for_each_of_allnodes(np)
206 if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL)
207 phandles++;
208
209 if (!phandles)
210 goto out;
211
212 cache_entries = roundup_pow_of_two(phandles);
213 phandle_cache_mask = cache_entries - 1;
214
215 phandle_cache = kcalloc(cache_entries, sizeof(*phandle_cache),
216 GFP_ATOMIC);
217 if (!phandle_cache)
218 goto out;
219
220 for_each_of_allnodes(np)
221 if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL) {
222 of_node_get(np);
223 phandle_cache[np->phandle & phandle_cache_mask] = np;
224 }
225
226out:
227 raw_spin_unlock_irqrestore(&devtree_lock, flags);
228}
229
230void __init of_core_init(void)
231{
232 struct device_node *np;
233
234 of_populate_phandle_cache();
235
236
237 mutex_lock(&of_mutex);
238 of_kset = kset_create_and_add("devicetree", NULL, firmware_kobj);
239 if (!of_kset) {
240 mutex_unlock(&of_mutex);
241 pr_err("failed to register existing nodes\n");
242 return;
243 }
244 for_each_of_allnodes(np)
245 __of_attach_node_sysfs(np);
246 mutex_unlock(&of_mutex);
247
248
249 if (of_root)
250 proc_symlink("device-tree", NULL, "/sys/firmware/devicetree/base");
251}
252
253static struct property *__of_find_property(const struct device_node *np,
254 const char *name, int *lenp)
255{
256 struct property *pp;
257
258 if (!np)
259 return NULL;
260
261 for (pp = np->properties; pp; pp = pp->next) {
262 if (of_prop_cmp(pp->name, name) == 0) {
263 if (lenp)
264 *lenp = pp->length;
265 break;
266 }
267 }
268
269 return pp;
270}
271
272struct property *of_find_property(const struct device_node *np,
273 const char *name,
274 int *lenp)
275{
276 struct property *pp;
277 unsigned long flags;
278
279 raw_spin_lock_irqsave(&devtree_lock, flags);
280 pp = __of_find_property(np, name, lenp);
281 raw_spin_unlock_irqrestore(&devtree_lock, flags);
282
283 return pp;
284}
285EXPORT_SYMBOL(of_find_property);
286
287struct device_node *__of_find_all_nodes(struct device_node *prev)
288{
289 struct device_node *np;
290 if (!prev) {
291 np = of_root;
292 } else if (prev->child) {
293 np = prev->child;
294 } else {
295
296 np = prev;
297 while (np->parent && !np->sibling)
298 np = np->parent;
299 np = np->sibling;
300 }
301 return np;
302}
303
304
305
306
307
308
309
310
311
312struct device_node *of_find_all_nodes(struct device_node *prev)
313{
314 struct device_node *np;
315 unsigned long flags;
316
317 raw_spin_lock_irqsave(&devtree_lock, flags);
318 np = __of_find_all_nodes(prev);
319 of_node_get(np);
320 of_node_put(prev);
321 raw_spin_unlock_irqrestore(&devtree_lock, flags);
322 return np;
323}
324EXPORT_SYMBOL(of_find_all_nodes);
325
326
327
328
329
330const void *__of_get_property(const struct device_node *np,
331 const char *name, int *lenp)
332{
333 struct property *pp = __of_find_property(np, name, lenp);
334
335 return pp ? pp->value : NULL;
336}
337
338
339
340
341
342const void *of_get_property(const struct device_node *np, const char *name,
343 int *lenp)
344{
345 struct property *pp = of_find_property(np, name, lenp);
346
347 return pp ? pp->value : NULL;
348}
349EXPORT_SYMBOL(of_get_property);
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365bool __weak arch_match_cpu_phys_id(int cpu, u64 phys_id)
366{
367 return (u32)phys_id == cpu;
368}
369
370
371
372
373
374
375static bool __of_find_n_match_cpu_property(struct device_node *cpun,
376 const char *prop_name, int cpu, unsigned int *thread)
377{
378 const __be32 *cell;
379 int ac, prop_len, tid;
380 u64 hwid;
381
382 ac = of_n_addr_cells(cpun);
383 cell = of_get_property(cpun, prop_name, &prop_len);
384 if (!cell && !ac && arch_match_cpu_phys_id(cpu, 0))
385 return true;
386 if (!cell || !ac)
387 return false;
388 prop_len /= sizeof(*cell) * ac;
389 for (tid = 0; tid < prop_len; tid++) {
390 hwid = of_read_number(cell, ac);
391 if (arch_match_cpu_phys_id(cpu, hwid)) {
392 if (thread)
393 *thread = tid;
394 return true;
395 }
396 cell += ac;
397 }
398 return false;
399}
400
401
402
403
404
405
406
407bool __weak arch_find_n_match_cpu_physical_id(struct device_node *cpun,
408 int cpu, unsigned int *thread)
409{
410
411
412
413
414 if (IS_ENABLED(CONFIG_PPC) &&
415 __of_find_n_match_cpu_property(cpun,
416 "ibm,ppc-interrupt-server#s",
417 cpu, thread))
418 return true;
419
420 return __of_find_n_match_cpu_property(cpun, "reg", cpu, thread);
421}
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
443{
444 struct device_node *cpun;
445
446 for_each_of_cpu_node(cpun) {
447 if (arch_find_n_match_cpu_physical_id(cpun, cpu, thread))
448 return cpun;
449 }
450 return NULL;
451}
452EXPORT_SYMBOL(of_get_cpu_node);
453
454
455
456
457
458
459
460
461
462int of_cpu_node_to_id(struct device_node *cpu_node)
463{
464 int cpu;
465 bool found = false;
466 struct device_node *np;
467
468 for_each_possible_cpu(cpu) {
469 np = of_cpu_device_node_get(cpu);
470 found = (cpu_node == np);
471 of_node_put(np);
472 if (found)
473 return cpu;
474 }
475
476 return -ENODEV;
477}
478EXPORT_SYMBOL(of_cpu_node_to_id);
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510static int __of_device_is_compatible(const struct device_node *device,
511 const char *compat, const char *type, const char *name)
512{
513 struct property *prop;
514 const char *cp;
515 int index = 0, score = 0;
516
517
518 if (compat && compat[0]) {
519 prop = __of_find_property(device, "compatible", NULL);
520 for (cp = of_prop_next_string(prop, NULL); cp;
521 cp = of_prop_next_string(prop, cp), index++) {
522 if (of_compat_cmp(cp, compat, strlen(compat)) == 0) {
523 score = INT_MAX/2 - (index << 2);
524 break;
525 }
526 }
527 if (!score)
528 return 0;
529 }
530
531
532 if (type && type[0]) {
533 if (!__of_node_is_type(device, type))
534 return 0;
535 score += 2;
536 }
537
538
539 if (name && name[0]) {
540 if (!of_node_name_eq(device, name))
541 return 0;
542 score++;
543 }
544
545 return score;
546}
547
548
549
550
551int of_device_is_compatible(const struct device_node *device,
552 const char *compat)
553{
554 unsigned long flags;
555 int res;
556
557 raw_spin_lock_irqsave(&devtree_lock, flags);
558 res = __of_device_is_compatible(device, compat, NULL, NULL);
559 raw_spin_unlock_irqrestore(&devtree_lock, flags);
560 return res;
561}
562EXPORT_SYMBOL(of_device_is_compatible);
563
564
565
566
567
568int of_device_compatible_match(struct device_node *device,
569 const char *const *compat)
570{
571 unsigned int tmp, score = 0;
572
573 if (!compat)
574 return 0;
575
576 while (*compat) {
577 tmp = of_device_is_compatible(device, *compat);
578 if (tmp > score)
579 score = tmp;
580 compat++;
581 }
582
583 return score;
584}
585
586
587
588
589
590
591
592
593int of_machine_is_compatible(const char *compat)
594{
595 struct device_node *root;
596 int rc = 0;
597
598 root = of_find_node_by_path("/");
599 if (root) {
600 rc = of_device_is_compatible(root, compat);
601 of_node_put(root);
602 }
603 return rc;
604}
605EXPORT_SYMBOL(of_machine_is_compatible);
606
607
608
609
610
611
612
613
614
615static bool __of_device_is_available(const struct device_node *device)
616{
617 const char *status;
618 int statlen;
619
620 if (!device)
621 return false;
622
623 status = __of_get_property(device, "status", &statlen);
624 if (status == NULL)
625 return true;
626
627 if (statlen > 0) {
628 if (!strcmp(status, "okay") || !strcmp(status, "ok"))
629 return true;
630 }
631
632 return false;
633}
634
635
636
637
638
639
640
641
642
643bool of_device_is_available(const struct device_node *device)
644{
645 unsigned long flags;
646 bool res;
647
648 raw_spin_lock_irqsave(&devtree_lock, flags);
649 res = __of_device_is_available(device);
650 raw_spin_unlock_irqrestore(&devtree_lock, flags);
651 return res;
652
653}
654EXPORT_SYMBOL(of_device_is_available);
655
656
657
658
659
660
661
662
663
664
665
666
667
668bool of_device_is_big_endian(const struct device_node *device)
669{
670 if (of_property_read_bool(device, "big-endian"))
671 return true;
672 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) &&
673 of_property_read_bool(device, "native-endian"))
674 return true;
675 return false;
676}
677EXPORT_SYMBOL(of_device_is_big_endian);
678
679
680
681
682
683
684
685
686struct device_node *of_get_parent(const struct device_node *node)
687{
688 struct device_node *np;
689 unsigned long flags;
690
691 if (!node)
692 return NULL;
693
694 raw_spin_lock_irqsave(&devtree_lock, flags);
695 np = of_node_get(node->parent);
696 raw_spin_unlock_irqrestore(&devtree_lock, flags);
697 return np;
698}
699EXPORT_SYMBOL(of_get_parent);
700
701
702
703
704
705
706
707
708
709
710
711
712struct device_node *of_get_next_parent(struct device_node *node)
713{
714 struct device_node *parent;
715 unsigned long flags;
716
717 if (!node)
718 return NULL;
719
720 raw_spin_lock_irqsave(&devtree_lock, flags);
721 parent = of_node_get(node->parent);
722 of_node_put(node);
723 raw_spin_unlock_irqrestore(&devtree_lock, flags);
724 return parent;
725}
726EXPORT_SYMBOL(of_get_next_parent);
727
728static struct device_node *__of_get_next_child(const struct device_node *node,
729 struct device_node *prev)
730{
731 struct device_node *next;
732
733 if (!node)
734 return NULL;
735
736 next = prev ? prev->sibling : node->child;
737 for (; next; next = next->sibling)
738 if (of_node_get(next))
739 break;
740 of_node_put(prev);
741 return next;
742}
743#define __for_each_child_of_node(parent, child) \
744 for (child = __of_get_next_child(parent, NULL); child != NULL; \
745 child = __of_get_next_child(parent, child))
746
747
748
749
750
751
752
753
754
755
756struct device_node *of_get_next_child(const struct device_node *node,
757 struct device_node *prev)
758{
759 struct device_node *next;
760 unsigned long flags;
761
762 raw_spin_lock_irqsave(&devtree_lock, flags);
763 next = __of_get_next_child(node, prev);
764 raw_spin_unlock_irqrestore(&devtree_lock, flags);
765 return next;
766}
767EXPORT_SYMBOL(of_get_next_child);
768
769
770
771
772
773
774
775
776
777struct device_node *of_get_next_available_child(const struct device_node *node,
778 struct device_node *prev)
779{
780 struct device_node *next;
781 unsigned long flags;
782
783 if (!node)
784 return NULL;
785
786 raw_spin_lock_irqsave(&devtree_lock, flags);
787 next = prev ? prev->sibling : node->child;
788 for (; next; next = next->sibling) {
789 if (!__of_device_is_available(next))
790 continue;
791 if (of_node_get(next))
792 break;
793 }
794 of_node_put(prev);
795 raw_spin_unlock_irqrestore(&devtree_lock, flags);
796 return next;
797}
798EXPORT_SYMBOL(of_get_next_available_child);
799
800
801
802
803
804
805
806
807
808struct device_node *of_get_next_cpu_node(struct device_node *prev)
809{
810 struct device_node *next = NULL;
811 unsigned long flags;
812 struct device_node *node;
813
814 if (!prev)
815 node = of_find_node_by_path("/cpus");
816
817 raw_spin_lock_irqsave(&devtree_lock, flags);
818 if (prev)
819 next = prev->sibling;
820 else if (node) {
821 next = node->child;
822 of_node_put(node);
823 }
824 for (; next; next = next->sibling) {
825 if (!(of_node_name_eq(next, "cpu") ||
826 __of_node_is_type(next, "cpu")))
827 continue;
828 if (of_node_get(next))
829 break;
830 }
831 of_node_put(prev);
832 raw_spin_unlock_irqrestore(&devtree_lock, flags);
833 return next;
834}
835EXPORT_SYMBOL(of_get_next_cpu_node);
836
837
838
839
840
841
842
843
844
845
846
847
848struct device_node *of_get_compatible_child(const struct device_node *parent,
849 const char *compatible)
850{
851 struct device_node *child;
852
853 for_each_child_of_node(parent, child) {
854 if (of_device_is_compatible(child, compatible))
855 break;
856 }
857
858 return child;
859}
860EXPORT_SYMBOL(of_get_compatible_child);
861
862
863
864
865
866
867
868
869
870
871
872
873struct device_node *of_get_child_by_name(const struct device_node *node,
874 const char *name)
875{
876 struct device_node *child;
877
878 for_each_child_of_node(node, child)
879 if (of_node_name_eq(child, name))
880 break;
881 return child;
882}
883EXPORT_SYMBOL(of_get_child_by_name);
884
885struct device_node *__of_find_node_by_path(struct device_node *parent,
886 const char *path)
887{
888 struct device_node *child;
889 int len;
890
891 len = strcspn(path, "/:");
892 if (!len)
893 return NULL;
894
895 __for_each_child_of_node(parent, child) {
896 const char *name = kbasename(child->full_name);
897 if (strncmp(path, name, len) == 0 && (strlen(name) == len))
898 return child;
899 }
900 return NULL;
901}
902
903struct device_node *__of_find_node_by_full_path(struct device_node *node,
904 const char *path)
905{
906 const char *separator = strchr(path, ':');
907
908 while (node && *path == '/') {
909 struct device_node *tmp = node;
910
911 path++;
912 node = __of_find_node_by_path(node, path);
913 of_node_put(tmp);
914 path = strchrnul(path, '/');
915 if (separator && separator < path)
916 break;
917 }
918 return node;
919}
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939struct device_node *of_find_node_opts_by_path(const char *path, const char **opts)
940{
941 struct device_node *np = NULL;
942 struct property *pp;
943 unsigned long flags;
944 const char *separator = strchr(path, ':');
945
946 if (opts)
947 *opts = separator ? separator + 1 : NULL;
948
949 if (strcmp(path, "/") == 0)
950 return of_node_get(of_root);
951
952
953 if (*path != '/') {
954 int len;
955 const char *p = separator;
956
957 if (!p)
958 p = strchrnul(path, '/');
959 len = p - path;
960
961
962 if (!of_aliases)
963 return NULL;
964
965 for_each_property_of_node(of_aliases, pp) {
966 if (strlen(pp->name) == len && !strncmp(pp->name, path, len)) {
967 np = of_find_node_by_path(pp->value);
968 break;
969 }
970 }
971 if (!np)
972 return NULL;
973 path = p;
974 }
975
976
977 raw_spin_lock_irqsave(&devtree_lock, flags);
978 if (!np)
979 np = of_node_get(of_root);
980 np = __of_find_node_by_full_path(np, path);
981 raw_spin_unlock_irqrestore(&devtree_lock, flags);
982 return np;
983}
984EXPORT_SYMBOL(of_find_node_opts_by_path);
985
986
987
988
989
990
991
992
993
994
995
996
997struct device_node *of_find_node_by_name(struct device_node *from,
998 const char *name)
999{
1000 struct device_node *np;
1001 unsigned long flags;
1002
1003 raw_spin_lock_irqsave(&devtree_lock, flags);
1004 for_each_of_allnodes_from(from, np)
1005 if (of_node_name_eq(np, name) && of_node_get(np))
1006 break;
1007 of_node_put(from);
1008 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1009 return np;
1010}
1011EXPORT_SYMBOL(of_find_node_by_name);
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025struct device_node *of_find_node_by_type(struct device_node *from,
1026 const char *type)
1027{
1028 struct device_node *np;
1029 unsigned long flags;
1030
1031 raw_spin_lock_irqsave(&devtree_lock, flags);
1032 for_each_of_allnodes_from(from, np)
1033 if (__of_node_is_type(np, type) && of_node_get(np))
1034 break;
1035 of_node_put(from);
1036 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1037 return np;
1038}
1039EXPORT_SYMBOL(of_find_node_by_type);
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055struct device_node *of_find_compatible_node(struct device_node *from,
1056 const char *type, const char *compatible)
1057{
1058 struct device_node *np;
1059 unsigned long flags;
1060
1061 raw_spin_lock_irqsave(&devtree_lock, flags);
1062 for_each_of_allnodes_from(from, np)
1063 if (__of_device_is_compatible(np, compatible, type, NULL) &&
1064 of_node_get(np))
1065 break;
1066 of_node_put(from);
1067 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1068 return np;
1069}
1070EXPORT_SYMBOL(of_find_compatible_node);
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084struct device_node *of_find_node_with_property(struct device_node *from,
1085 const char *prop_name)
1086{
1087 struct device_node *np;
1088 struct property *pp;
1089 unsigned long flags;
1090
1091 raw_spin_lock_irqsave(&devtree_lock, flags);
1092 for_each_of_allnodes_from(from, np) {
1093 for (pp = np->properties; pp; pp = pp->next) {
1094 if (of_prop_cmp(pp->name, prop_name) == 0) {
1095 of_node_get(np);
1096 goto out;
1097 }
1098 }
1099 }
1100out:
1101 of_node_put(from);
1102 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1103 return np;
1104}
1105EXPORT_SYMBOL(of_find_node_with_property);
1106
1107static
1108const struct of_device_id *__of_match_node(const struct of_device_id *matches,
1109 const struct device_node *node)
1110{
1111 const struct of_device_id *best_match = NULL;
1112 int score, best_score = 0;
1113
1114 if (!matches)
1115 return NULL;
1116
1117 for (; matches->name[0] || matches->type[0] || matches->compatible[0]; matches++) {
1118 score = __of_device_is_compatible(node, matches->compatible,
1119 matches->type, matches->name);
1120 if (score > best_score) {
1121 best_match = matches;
1122 best_score = score;
1123 }
1124 }
1125
1126 return best_match;
1127}
1128
1129
1130
1131
1132
1133
1134
1135
1136const struct of_device_id *of_match_node(const struct of_device_id *matches,
1137 const struct device_node *node)
1138{
1139 const struct of_device_id *match;
1140 unsigned long flags;
1141
1142 raw_spin_lock_irqsave(&devtree_lock, flags);
1143 match = __of_match_node(matches, node);
1144 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1145 return match;
1146}
1147EXPORT_SYMBOL(of_match_node);
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162struct device_node *of_find_matching_node_and_match(struct device_node *from,
1163 const struct of_device_id *matches,
1164 const struct of_device_id **match)
1165{
1166 struct device_node *np;
1167 const struct of_device_id *m;
1168 unsigned long flags;
1169
1170 if (match)
1171 *match = NULL;
1172
1173 raw_spin_lock_irqsave(&devtree_lock, flags);
1174 for_each_of_allnodes_from(from, np) {
1175 m = __of_match_node(matches, np);
1176 if (m && of_node_get(np)) {
1177 if (match)
1178 *match = m;
1179 break;
1180 }
1181 }
1182 of_node_put(from);
1183 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1184 return np;
1185}
1186EXPORT_SYMBOL(of_find_matching_node_and_match);
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201int of_modalias_node(struct device_node *node, char *modalias, int len)
1202{
1203 const char *compatible, *p;
1204 int cplen;
1205
1206 compatible = of_get_property(node, "compatible", &cplen);
1207 if (!compatible || strlen(compatible) > cplen)
1208 return -ENODEV;
1209 p = strchr(compatible, ',');
1210 strlcpy(modalias, p ? p + 1 : compatible, len);
1211 return 0;
1212}
1213EXPORT_SYMBOL_GPL(of_modalias_node);
1214
1215
1216
1217
1218
1219
1220
1221
1222struct device_node *of_find_node_by_phandle(phandle handle)
1223{
1224 struct device_node *np = NULL;
1225 unsigned long flags;
1226 phandle masked_handle;
1227
1228 if (!handle)
1229 return NULL;
1230
1231 raw_spin_lock_irqsave(&devtree_lock, flags);
1232
1233 masked_handle = handle & phandle_cache_mask;
1234
1235 if (phandle_cache) {
1236 if (phandle_cache[masked_handle] &&
1237 handle == phandle_cache[masked_handle]->phandle)
1238 np = phandle_cache[masked_handle];
1239 if (np && of_node_check_flag(np, OF_DETACHED)) {
1240 WARN_ON(1);
1241 of_node_put(np);
1242 phandle_cache[masked_handle] = NULL;
1243 np = NULL;
1244 }
1245 }
1246
1247 if (!np) {
1248 for_each_of_allnodes(np)
1249 if (np->phandle == handle &&
1250 !of_node_check_flag(np, OF_DETACHED)) {
1251 if (phandle_cache) {
1252
1253 of_node_get(np);
1254 phandle_cache[masked_handle] = np;
1255 }
1256 break;
1257 }
1258 }
1259
1260 of_node_get(np);
1261 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1262 return np;
1263}
1264EXPORT_SYMBOL(of_find_node_by_phandle);
1265
1266void of_print_phandle_args(const char *msg, const struct of_phandle_args *args)
1267{
1268 int i;
1269 printk("%s %pOF", msg, args->np);
1270 for (i = 0; i < args->args_count; i++) {
1271 const char delim = i ? ',' : ':';
1272
1273 pr_cont("%c%08x", delim, args->args[i]);
1274 }
1275 pr_cont("\n");
1276}
1277
1278int of_phandle_iterator_init(struct of_phandle_iterator *it,
1279 const struct device_node *np,
1280 const char *list_name,
1281 const char *cells_name,
1282 int cell_count)
1283{
1284 const __be32 *list;
1285 int size;
1286
1287 memset(it, 0, sizeof(*it));
1288
1289 list = of_get_property(np, list_name, &size);
1290 if (!list)
1291 return -ENOENT;
1292
1293 it->cells_name = cells_name;
1294 it->cell_count = cell_count;
1295 it->parent = np;
1296 it->list_end = list + size / sizeof(*list);
1297 it->phandle_end = list;
1298 it->cur = list;
1299
1300 return 0;
1301}
1302EXPORT_SYMBOL_GPL(of_phandle_iterator_init);
1303
1304int of_phandle_iterator_next(struct of_phandle_iterator *it)
1305{
1306 uint32_t count = 0;
1307
1308 if (it->node) {
1309 of_node_put(it->node);
1310 it->node = NULL;
1311 }
1312
1313 if (!it->cur || it->phandle_end >= it->list_end)
1314 return -ENOENT;
1315
1316 it->cur = it->phandle_end;
1317
1318
1319 it->phandle = be32_to_cpup(it->cur++);
1320
1321 if (it->phandle) {
1322
1323
1324
1325
1326
1327 it->node = of_find_node_by_phandle(it->phandle);
1328
1329 if (it->cells_name) {
1330 if (!it->node) {
1331 pr_err("%pOF: could not find phandle\n",
1332 it->parent);
1333 goto err;
1334 }
1335
1336 if (of_property_read_u32(it->node, it->cells_name,
1337 &count)) {
1338 pr_err("%pOF: could not get %s for %pOF\n",
1339 it->parent,
1340 it->cells_name,
1341 it->node);
1342 goto err;
1343 }
1344 } else {
1345 count = it->cell_count;
1346 }
1347
1348
1349
1350
1351
1352 if (it->cur + count > it->list_end) {
1353 pr_err("%pOF: %s = %d found %d\n",
1354 it->parent, it->cells_name,
1355 count, it->cell_count);
1356 goto err;
1357 }
1358 }
1359
1360 it->phandle_end = it->cur + count;
1361 it->cur_count = count;
1362
1363 return 0;
1364
1365err:
1366 if (it->node) {
1367 of_node_put(it->node);
1368 it->node = NULL;
1369 }
1370
1371 return -EINVAL;
1372}
1373EXPORT_SYMBOL_GPL(of_phandle_iterator_next);
1374
1375int of_phandle_iterator_args(struct of_phandle_iterator *it,
1376 uint32_t *args,
1377 int size)
1378{
1379 int i, count;
1380
1381 count = it->cur_count;
1382
1383 if (WARN_ON(size < count))
1384 count = size;
1385
1386 for (i = 0; i < count; i++)
1387 args[i] = be32_to_cpup(it->cur++);
1388
1389 return count;
1390}
1391
1392static int __of_parse_phandle_with_args(const struct device_node *np,
1393 const char *list_name,
1394 const char *cells_name,
1395 int cell_count, int index,
1396 struct of_phandle_args *out_args)
1397{
1398 struct of_phandle_iterator it;
1399 int rc, cur_index = 0;
1400
1401
1402 of_for_each_phandle(&it, rc, np, list_name, cells_name, cell_count) {
1403
1404
1405
1406
1407
1408
1409 rc = -ENOENT;
1410 if (cur_index == index) {
1411 if (!it.phandle)
1412 goto err;
1413
1414 if (out_args) {
1415 int c;
1416
1417 c = of_phandle_iterator_args(&it,
1418 out_args->args,
1419 MAX_PHANDLE_ARGS);
1420 out_args->np = it.node;
1421 out_args->args_count = c;
1422 } else {
1423 of_node_put(it.node);
1424 }
1425
1426
1427 return 0;
1428 }
1429
1430 cur_index++;
1431 }
1432
1433
1434
1435
1436
1437
1438
1439 err:
1440 of_node_put(it.node);
1441 return rc;
1442}
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454struct device_node *of_parse_phandle(const struct device_node *np,
1455 const char *phandle_name, int index)
1456{
1457 struct of_phandle_args args;
1458
1459 if (index < 0)
1460 return NULL;
1461
1462 if (__of_parse_phandle_with_args(np, phandle_name, NULL, 0,
1463 index, &args))
1464 return NULL;
1465
1466 return args.np;
1467}
1468EXPORT_SYMBOL(of_parse_phandle);
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502int of_parse_phandle_with_args(const struct device_node *np, const char *list_name,
1503 const char *cells_name, int index,
1504 struct of_phandle_args *out_args)
1505{
1506 if (index < 0)
1507 return -EINVAL;
1508 return __of_parse_phandle_with_args(np, list_name, cells_name, 0,
1509 index, out_args);
1510}
1511EXPORT_SYMBOL(of_parse_phandle_with_args);
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555int of_parse_phandle_with_args_map(const struct device_node *np,
1556 const char *list_name,
1557 const char *stem_name,
1558 int index, struct of_phandle_args *out_args)
1559{
1560 char *cells_name, *map_name = NULL, *mask_name = NULL;
1561 char *pass_name = NULL;
1562 struct device_node *cur, *new = NULL;
1563 const __be32 *map, *mask, *pass;
1564 static const __be32 dummy_mask[] = { [0 ... MAX_PHANDLE_ARGS] = ~0 };
1565 static const __be32 dummy_pass[] = { [0 ... MAX_PHANDLE_ARGS] = 0 };
1566 __be32 initial_match_array[MAX_PHANDLE_ARGS];
1567 const __be32 *match_array = initial_match_array;
1568 int i, ret, map_len, match;
1569 u32 list_size, new_size;
1570
1571 if (index < 0)
1572 return -EINVAL;
1573
1574 cells_name = kasprintf(GFP_KERNEL, "#%s-cells", stem_name);
1575 if (!cells_name)
1576 return -ENOMEM;
1577
1578 ret = -ENOMEM;
1579 map_name = kasprintf(GFP_KERNEL, "%s-map", stem_name);
1580 if (!map_name)
1581 goto free;
1582
1583 mask_name = kasprintf(GFP_KERNEL, "%s-map-mask", stem_name);
1584 if (!mask_name)
1585 goto free;
1586
1587 pass_name = kasprintf(GFP_KERNEL, "%s-map-pass-thru", stem_name);
1588 if (!pass_name)
1589 goto free;
1590
1591 ret = __of_parse_phandle_with_args(np, list_name, cells_name, 0, index,
1592 out_args);
1593 if (ret)
1594 goto free;
1595
1596
1597 cur = out_args->np;
1598 ret = of_property_read_u32(cur, cells_name, &list_size);
1599 if (ret < 0)
1600 goto put;
1601
1602
1603 for (i = 0; i < list_size; i++)
1604 initial_match_array[i] = cpu_to_be32(out_args->args[i]);
1605
1606 ret = -EINVAL;
1607 while (cur) {
1608
1609 map = of_get_property(cur, map_name, &map_len);
1610 if (!map) {
1611 ret = 0;
1612 goto free;
1613 }
1614 map_len /= sizeof(u32);
1615
1616
1617 mask = of_get_property(cur, mask_name, NULL);
1618 if (!mask)
1619 mask = dummy_mask;
1620
1621 match = 0;
1622 while (map_len > (list_size + 1) && !match) {
1623
1624 match = 1;
1625 for (i = 0; i < list_size; i++, map_len--)
1626 match &= !((match_array[i] ^ *map++) & mask[i]);
1627
1628 of_node_put(new);
1629 new = of_find_node_by_phandle(be32_to_cpup(map));
1630 map++;
1631 map_len--;
1632
1633
1634 if (!new)
1635 goto put;
1636
1637 if (!of_device_is_available(new))
1638 match = 0;
1639
1640 ret = of_property_read_u32(new, cells_name, &new_size);
1641 if (ret)
1642 goto put;
1643
1644
1645 if (WARN_ON(new_size > MAX_PHANDLE_ARGS))
1646 goto put;
1647 if (map_len < new_size)
1648 goto put;
1649
1650
1651 map += new_size;
1652 map_len -= new_size;
1653 }
1654 if (!match)
1655 goto put;
1656
1657
1658 pass = of_get_property(cur, pass_name, NULL);
1659 if (!pass)
1660 pass = dummy_pass;
1661
1662
1663
1664
1665
1666
1667 match_array = map - new_size;
1668 for (i = 0; i < new_size; i++) {
1669 __be32 val = *(map - new_size + i);
1670
1671 if (i < list_size) {
1672 val &= ~pass[i];
1673 val |= cpu_to_be32(out_args->args[i]) & pass[i];
1674 }
1675
1676 out_args->args[i] = be32_to_cpu(val);
1677 }
1678 out_args->args_count = list_size = new_size;
1679
1680 out_args->np = new;
1681 of_node_put(cur);
1682 cur = new;
1683 }
1684put:
1685 of_node_put(cur);
1686 of_node_put(new);
1687free:
1688 kfree(mask_name);
1689 kfree(map_name);
1690 kfree(cells_name);
1691 kfree(pass_name);
1692
1693 return ret;
1694}
1695EXPORT_SYMBOL(of_parse_phandle_with_args_map);
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727int of_parse_phandle_with_fixed_args(const struct device_node *np,
1728 const char *list_name, int cell_count,
1729 int index, struct of_phandle_args *out_args)
1730{
1731 if (index < 0)
1732 return -EINVAL;
1733 return __of_parse_phandle_with_args(np, list_name, NULL, cell_count,
1734 index, out_args);
1735}
1736EXPORT_SYMBOL(of_parse_phandle_with_fixed_args);
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753int of_count_phandle_with_args(const struct device_node *np, const char *list_name,
1754 const char *cells_name)
1755{
1756 struct of_phandle_iterator it;
1757 int rc, cur_index = 0;
1758
1759 rc = of_phandle_iterator_init(&it, np, list_name, cells_name, 0);
1760 if (rc)
1761 return rc;
1762
1763 while ((rc = of_phandle_iterator_next(&it)) == 0)
1764 cur_index += 1;
1765
1766 if (rc != -ENOENT)
1767 return rc;
1768
1769 return cur_index;
1770}
1771EXPORT_SYMBOL(of_count_phandle_with_args);
1772
1773
1774
1775
1776int __of_add_property(struct device_node *np, struct property *prop)
1777{
1778 struct property **next;
1779
1780 prop->next = NULL;
1781 next = &np->properties;
1782 while (*next) {
1783 if (strcmp(prop->name, (*next)->name) == 0)
1784
1785 return -EEXIST;
1786
1787 next = &(*next)->next;
1788 }
1789 *next = prop;
1790
1791 return 0;
1792}
1793
1794
1795
1796
1797int of_add_property(struct device_node *np, struct property *prop)
1798{
1799 unsigned long flags;
1800 int rc;
1801
1802 mutex_lock(&of_mutex);
1803
1804 raw_spin_lock_irqsave(&devtree_lock, flags);
1805 rc = __of_add_property(np, prop);
1806 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1807
1808 if (!rc)
1809 __of_add_property_sysfs(np, prop);
1810
1811 mutex_unlock(&of_mutex);
1812
1813 if (!rc)
1814 of_property_notify(OF_RECONFIG_ADD_PROPERTY, np, prop, NULL);
1815
1816 return rc;
1817}
1818
1819int __of_remove_property(struct device_node *np, struct property *prop)
1820{
1821 struct property **next;
1822
1823 for (next = &np->properties; *next; next = &(*next)->next) {
1824 if (*next == prop)
1825 break;
1826 }
1827 if (*next == NULL)
1828 return -ENODEV;
1829
1830
1831 *next = prop->next;
1832 prop->next = np->deadprops;
1833 np->deadprops = prop;
1834
1835 return 0;
1836}
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846int of_remove_property(struct device_node *np, struct property *prop)
1847{
1848 unsigned long flags;
1849 int rc;
1850
1851 if (!prop)
1852 return -ENODEV;
1853
1854 mutex_lock(&of_mutex);
1855
1856 raw_spin_lock_irqsave(&devtree_lock, flags);
1857 rc = __of_remove_property(np, prop);
1858 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1859
1860 if (!rc)
1861 __of_remove_property_sysfs(np, prop);
1862
1863 mutex_unlock(&of_mutex);
1864
1865 if (!rc)
1866 of_property_notify(OF_RECONFIG_REMOVE_PROPERTY, np, prop, NULL);
1867
1868 return rc;
1869}
1870
1871int __of_update_property(struct device_node *np, struct property *newprop,
1872 struct property **oldpropp)
1873{
1874 struct property **next, *oldprop;
1875
1876 for (next = &np->properties; *next; next = &(*next)->next) {
1877 if (of_prop_cmp((*next)->name, newprop->name) == 0)
1878 break;
1879 }
1880 *oldpropp = oldprop = *next;
1881
1882 if (oldprop) {
1883
1884 newprop->next = oldprop->next;
1885 *next = newprop;
1886 oldprop->next = np->deadprops;
1887 np->deadprops = oldprop;
1888 } else {
1889
1890 newprop->next = NULL;
1891 *next = newprop;
1892 }
1893
1894 return 0;
1895}
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906int of_update_property(struct device_node *np, struct property *newprop)
1907{
1908 struct property *oldprop;
1909 unsigned long flags;
1910 int rc;
1911
1912 if (!newprop->name)
1913 return -EINVAL;
1914
1915 mutex_lock(&of_mutex);
1916
1917 raw_spin_lock_irqsave(&devtree_lock, flags);
1918 rc = __of_update_property(np, newprop, &oldprop);
1919 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1920
1921 if (!rc)
1922 __of_update_property_sysfs(np, newprop, oldprop);
1923
1924 mutex_unlock(&of_mutex);
1925
1926 if (!rc)
1927 of_property_notify(OF_RECONFIG_UPDATE_PROPERTY, np, newprop, oldprop);
1928
1929 return rc;
1930}
1931
1932static void of_alias_add(struct alias_prop *ap, struct device_node *np,
1933 int id, const char *stem, int stem_len)
1934{
1935 ap->np = np;
1936 ap->id = id;
1937 strncpy(ap->stem, stem, stem_len);
1938 ap->stem[stem_len] = 0;
1939 list_add_tail(&ap->link, &aliases_lookup);
1940 pr_debug("adding DT alias:%s: stem=%s id=%i node=%pOF\n",
1941 ap->alias, ap->stem, ap->id, np);
1942}
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
1955{
1956 struct property *pp;
1957
1958 of_aliases = of_find_node_by_path("/aliases");
1959 of_chosen = of_find_node_by_path("/chosen");
1960 if (of_chosen == NULL)
1961 of_chosen = of_find_node_by_path("/chosen@0");
1962
1963 if (of_chosen) {
1964
1965 const char *name = NULL;
1966
1967 if (of_property_read_string(of_chosen, "stdout-path", &name))
1968 of_property_read_string(of_chosen, "linux,stdout-path",
1969 &name);
1970 if (IS_ENABLED(CONFIG_PPC) && !name)
1971 of_property_read_string(of_aliases, "stdout", &name);
1972 if (name)
1973 of_stdout = of_find_node_opts_by_path(name, &of_stdout_options);
1974 }
1975
1976 if (!of_aliases)
1977 return;
1978
1979 for_each_property_of_node(of_aliases, pp) {
1980 const char *start = pp->name;
1981 const char *end = start + strlen(start);
1982 struct device_node *np;
1983 struct alias_prop *ap;
1984 int id, len;
1985
1986
1987 if (!strcmp(pp->name, "name") ||
1988 !strcmp(pp->name, "phandle") ||
1989 !strcmp(pp->name, "linux,phandle"))
1990 continue;
1991
1992 np = of_find_node_by_path(pp->value);
1993 if (!np)
1994 continue;
1995
1996
1997
1998 while (isdigit(*(end-1)) && end > start)
1999 end--;
2000 len = end - start;
2001
2002 if (kstrtoint(end, 10, &id) < 0)
2003 continue;
2004
2005
2006 ap = dt_alloc(sizeof(*ap) + len + 1, __alignof__(*ap));
2007 if (!ap)
2008 continue;
2009 memset(ap, 0, sizeof(*ap) + len + 1);
2010 ap->alias = start;
2011 of_alias_add(ap, np, id, start, len);
2012 }
2013}
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023int of_alias_get_id(struct device_node *np, const char *stem)
2024{
2025 struct alias_prop *app;
2026 int id = -ENODEV;
2027
2028 mutex_lock(&of_mutex);
2029 list_for_each_entry(app, &aliases_lookup, link) {
2030 if (strcmp(app->stem, stem) != 0)
2031 continue;
2032
2033 if (np == app->np) {
2034 id = app->id;
2035 break;
2036 }
2037 }
2038 mutex_unlock(&of_mutex);
2039
2040 return id;
2041}
2042EXPORT_SYMBOL_GPL(of_alias_get_id);
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057int of_alias_get_alias_list(const struct of_device_id *matches,
2058 const char *stem, unsigned long *bitmap,
2059 unsigned int nbits)
2060{
2061 struct alias_prop *app;
2062 int ret = 0;
2063
2064
2065 bitmap_zero(bitmap, nbits);
2066
2067 mutex_lock(&of_mutex);
2068 pr_debug("%s: Looking for stem: %s\n", __func__, stem);
2069 list_for_each_entry(app, &aliases_lookup, link) {
2070 pr_debug("%s: stem: %s, id: %d\n",
2071 __func__, app->stem, app->id);
2072
2073 if (strcmp(app->stem, stem) != 0) {
2074 pr_debug("%s: stem comparison didn't pass %s\n",
2075 __func__, app->stem);
2076 continue;
2077 }
2078
2079 if (of_match_node(matches, app->np)) {
2080 pr_debug("%s: Allocated ID %d\n", __func__, app->id);
2081
2082 if (app->id >= nbits) {
2083 pr_warn("%s: ID %d >= than bitmap field %d\n",
2084 __func__, app->id, nbits);
2085 ret = -EOVERFLOW;
2086 } else {
2087 set_bit(app->id, bitmap);
2088 }
2089 }
2090 }
2091 mutex_unlock(&of_mutex);
2092
2093 return ret;
2094}
2095EXPORT_SYMBOL_GPL(of_alias_get_alias_list);
2096
2097
2098
2099
2100
2101
2102
2103
2104int of_alias_get_highest_id(const char *stem)
2105{
2106 struct alias_prop *app;
2107 int id = -ENODEV;
2108
2109 mutex_lock(&of_mutex);
2110 list_for_each_entry(app, &aliases_lookup, link) {
2111 if (strcmp(app->stem, stem) != 0)
2112 continue;
2113
2114 if (app->id > id)
2115 id = app->id;
2116 }
2117 mutex_unlock(&of_mutex);
2118
2119 return id;
2120}
2121EXPORT_SYMBOL_GPL(of_alias_get_highest_id);
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133bool of_console_check(struct device_node *dn, char *name, int index)
2134{
2135 if (!dn || dn != of_stdout || console_set_on_cmdline)
2136 return false;
2137
2138
2139
2140
2141
2142 return !add_preferred_console(name, index, (char *)of_stdout_options);
2143}
2144EXPORT_SYMBOL_GPL(of_console_check);
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154struct device_node *of_find_next_cache_node(const struct device_node *np)
2155{
2156 struct device_node *child, *cache_node;
2157
2158 cache_node = of_parse_phandle(np, "l2-cache", 0);
2159 if (!cache_node)
2160 cache_node = of_parse_phandle(np, "next-level-cache", 0);
2161
2162 if (cache_node)
2163 return cache_node;
2164
2165
2166
2167
2168 if (IS_ENABLED(CONFIG_PPC_PMAC) && of_node_is_type(np, "cpu"))
2169 for_each_child_of_node(np, child)
2170 if (of_node_is_type(child, "cache"))
2171 return child;
2172
2173 return NULL;
2174}
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185int of_find_last_cache_level(unsigned int cpu)
2186{
2187 u32 cache_level = 0;
2188 struct device_node *prev = NULL, *np = of_cpu_device_node_get(cpu);
2189
2190 while (np) {
2191 prev = np;
2192 of_node_put(np);
2193 np = of_find_next_cache_node(np);
2194 }
2195
2196 of_property_read_u32(prev, "cache-level", &cache_level);
2197
2198 return cache_level;
2199}
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220int of_map_rid(struct device_node *np, u32 rid,
2221 const char *map_name, const char *map_mask_name,
2222 struct device_node **target, u32 *id_out)
2223{
2224 u32 map_mask, masked_rid;
2225 int map_len;
2226 const __be32 *map = NULL;
2227
2228 if (!np || !map_name || (!target && !id_out))
2229 return -EINVAL;
2230
2231 map = of_get_property(np, map_name, &map_len);
2232 if (!map) {
2233 if (target)
2234 return -ENODEV;
2235
2236 *id_out = rid;
2237 return 0;
2238 }
2239
2240 if (!map_len || map_len % (4 * sizeof(*map))) {
2241 pr_err("%pOF: Error: Bad %s length: %d\n", np,
2242 map_name, map_len);
2243 return -EINVAL;
2244 }
2245
2246
2247 map_mask = 0xffffffff;
2248
2249
2250
2251
2252
2253 if (map_mask_name)
2254 of_property_read_u32(np, map_mask_name, &map_mask);
2255
2256 masked_rid = map_mask & rid;
2257 for ( ; map_len > 0; map_len -= 4 * sizeof(*map), map += 4) {
2258 struct device_node *phandle_node;
2259 u32 rid_base = be32_to_cpup(map + 0);
2260 u32 phandle = be32_to_cpup(map + 1);
2261 u32 out_base = be32_to_cpup(map + 2);
2262 u32 rid_len = be32_to_cpup(map + 3);
2263
2264 if (rid_base & ~map_mask) {
2265 pr_err("%pOF: Invalid %s translation - %s-mask (0x%x) ignores rid-base (0x%x)\n",
2266 np, map_name, map_name,
2267 map_mask, rid_base);
2268 return -EFAULT;
2269 }
2270
2271 if (masked_rid < rid_base || masked_rid >= rid_base + rid_len)
2272 continue;
2273
2274 phandle_node = of_find_node_by_phandle(phandle);
2275 if (!phandle_node)
2276 return -ENODEV;
2277
2278 if (target) {
2279 if (*target)
2280 of_node_put(phandle_node);
2281 else
2282 *target = phandle_node;
2283
2284 if (*target != phandle_node)
2285 continue;
2286 }
2287
2288 if (id_out)
2289 *id_out = masked_rid - rid_base + out_base;
2290
2291 pr_debug("%pOF: %s, using mask %08x, rid-base: %08x, out-base: %08x, length: %08x, rid: %08x -> %08x\n",
2292 np, map_name, map_mask, rid_base, out_base,
2293 rid_len, rid, masked_rid - rid_base + out_base);
2294 return 0;
2295 }
2296
2297 pr_info("%pOF: no %s translation for rid 0x%x on %pOF\n", np, map_name,
2298 rid, target && *target ? *target : NULL);
2299
2300
2301 if (id_out)
2302 *id_out = rid;
2303 return 0;
2304}
2305EXPORT_SYMBOL_GPL(of_map_rid);
2306