1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#define pr_fmt(fmt) "OF: " fmt
18
19#include <linux/bitmap.h>
20#include <linux/console.h>
21#include <linux/ctype.h>
22#include <linux/cpu.h>
23#include <linux/module.h>
24#include <linux/of.h>
25#include <linux/of_device.h>
26#include <linux/of_graph.h>
27#include <linux/spinlock.h>
28#include <linux/slab.h>
29#include <linux/string.h>
30#include <linux/proc_fs.h>
31
32#include "of_private.h"
33
34LIST_HEAD(aliases_lookup);
35
36struct device_node *of_root;
37EXPORT_SYMBOL(of_root);
38struct device_node *of_chosen;
39struct device_node *of_aliases;
40struct device_node *of_stdout;
41static const char *of_stdout_options;
42
43struct kset *of_kset;
44
45
46
47
48
49
50
51DEFINE_MUTEX(of_mutex);
52
53
54
55
56DEFINE_RAW_SPINLOCK(devtree_lock);
57
58bool of_node_name_eq(const struct device_node *np, const char *name)
59{
60 const char *node_name;
61 size_t len;
62
63 if (!np)
64 return false;
65
66 node_name = kbasename(np->full_name);
67 len = strchrnul(node_name, '@') - node_name;
68
69 return (strlen(name) == len) && (strncmp(node_name, name, len) == 0);
70}
71EXPORT_SYMBOL(of_node_name_eq);
72
73bool of_node_name_prefix(const struct device_node *np, const char *prefix)
74{
75 if (!np)
76 return false;
77
78 return strncmp(kbasename(np->full_name), prefix, strlen(prefix)) == 0;
79}
80EXPORT_SYMBOL(of_node_name_prefix);
81
82static bool __of_node_is_type(const struct device_node *np, const char *type)
83{
84 const char *match = __of_get_property(np, "device_type", NULL);
85
86 return np && match && type && !strcmp(match, type);
87}
88
89int of_n_addr_cells(struct device_node *np)
90{
91 u32 cells;
92
93 do {
94 if (np->parent)
95 np = np->parent;
96 if (!of_property_read_u32(np, "#address-cells", &cells))
97 return cells;
98 } while (np->parent);
99
100 return OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
101}
102EXPORT_SYMBOL(of_n_addr_cells);
103
104int of_n_size_cells(struct device_node *np)
105{
106 u32 cells;
107
108 do {
109 if (np->parent)
110 np = np->parent;
111 if (!of_property_read_u32(np, "#size-cells", &cells))
112 return cells;
113 } while (np->parent);
114
115 return OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
116}
117EXPORT_SYMBOL(of_n_size_cells);
118
119#ifdef CONFIG_NUMA
120int __weak of_node_to_nid(struct device_node *np)
121{
122 return NUMA_NO_NODE;
123}
124#endif
125
126
127
128
129
130
131
132
133
134
135static struct device_node **phandle_cache;
136static u32 phandle_cache_mask;
137
138
139
140
141static void __of_free_phandle_cache(void)
142{
143 u32 cache_entries = phandle_cache_mask + 1;
144 u32 k;
145
146 if (!phandle_cache)
147 return;
148
149 for (k = 0; k < cache_entries; k++)
150 of_node_put(phandle_cache[k]);
151
152 kfree(phandle_cache);
153 phandle_cache = NULL;
154}
155
156int of_free_phandle_cache(void)
157{
158 unsigned long flags;
159
160 raw_spin_lock_irqsave(&devtree_lock, flags);
161
162 __of_free_phandle_cache();
163
164 raw_spin_unlock_irqrestore(&devtree_lock, flags);
165
166 return 0;
167}
168#if !defined(CONFIG_MODULES)
169late_initcall_sync(of_free_phandle_cache);
170#endif
171
172
173
174
175void __of_free_phandle_cache_entry(phandle handle)
176{
177 phandle masked_handle;
178 struct device_node *np;
179
180 if (!handle)
181 return;
182
183 masked_handle = handle & phandle_cache_mask;
184
185 if (phandle_cache) {
186 np = phandle_cache[masked_handle];
187 if (np && handle == np->phandle) {
188 of_node_put(np);
189 phandle_cache[masked_handle] = NULL;
190 }
191 }
192}
193
194void of_populate_phandle_cache(void)
195{
196 unsigned long flags;
197 u32 cache_entries;
198 struct device_node *np;
199 u32 phandles = 0;
200
201 raw_spin_lock_irqsave(&devtree_lock, flags);
202
203 __of_free_phandle_cache();
204
205 for_each_of_allnodes(np)
206 if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL)
207 phandles++;
208
209 if (!phandles)
210 goto out;
211
212 cache_entries = roundup_pow_of_two(phandles);
213 phandle_cache_mask = cache_entries - 1;
214
215 phandle_cache = kcalloc(cache_entries, sizeof(*phandle_cache),
216 GFP_ATOMIC);
217 if (!phandle_cache)
218 goto out;
219
220 for_each_of_allnodes(np)
221 if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL) {
222 of_node_get(np);
223 phandle_cache[np->phandle & phandle_cache_mask] = np;
224 }
225
226out:
227 raw_spin_unlock_irqrestore(&devtree_lock, flags);
228}
229
230void __init of_core_init(void)
231{
232 struct device_node *np;
233
234 of_populate_phandle_cache();
235
236
237 mutex_lock(&of_mutex);
238 of_kset = kset_create_and_add("devicetree", NULL, firmware_kobj);
239 if (!of_kset) {
240 mutex_unlock(&of_mutex);
241 pr_err("failed to register existing nodes\n");
242 return;
243 }
244 for_each_of_allnodes(np)
245 __of_attach_node_sysfs(np);
246 mutex_unlock(&of_mutex);
247
248
249 if (of_root)
250 proc_symlink("device-tree", NULL, "/sys/firmware/devicetree/base");
251}
252
253static struct property *__of_find_property(const struct device_node *np,
254 const char *name, int *lenp)
255{
256 struct property *pp;
257
258 if (!np)
259 return NULL;
260
261 for (pp = np->properties; pp; pp = pp->next) {
262 if (of_prop_cmp(pp->name, name) == 0) {
263 if (lenp)
264 *lenp = pp->length;
265 break;
266 }
267 }
268
269 return pp;
270}
271
272struct property *of_find_property(const struct device_node *np,
273 const char *name,
274 int *lenp)
275{
276 struct property *pp;
277 unsigned long flags;
278
279 raw_spin_lock_irqsave(&devtree_lock, flags);
280 pp = __of_find_property(np, name, lenp);
281 raw_spin_unlock_irqrestore(&devtree_lock, flags);
282
283 return pp;
284}
285EXPORT_SYMBOL(of_find_property);
286
287struct device_node *__of_find_all_nodes(struct device_node *prev)
288{
289 struct device_node *np;
290 if (!prev) {
291 np = of_root;
292 } else if (prev->child) {
293 np = prev->child;
294 } else {
295
296 np = prev;
297 while (np->parent && !np->sibling)
298 np = np->parent;
299 np = np->sibling;
300 }
301 return np;
302}
303
304
305
306
307
308
309
310
311
312struct device_node *of_find_all_nodes(struct device_node *prev)
313{
314 struct device_node *np;
315 unsigned long flags;
316
317 raw_spin_lock_irqsave(&devtree_lock, flags);
318 np = __of_find_all_nodes(prev);
319 of_node_get(np);
320 of_node_put(prev);
321 raw_spin_unlock_irqrestore(&devtree_lock, flags);
322 return np;
323}
324EXPORT_SYMBOL(of_find_all_nodes);
325
326
327
328
329
330const void *__of_get_property(const struct device_node *np,
331 const char *name, int *lenp)
332{
333 struct property *pp = __of_find_property(np, name, lenp);
334
335 return pp ? pp->value : NULL;
336}
337
338
339
340
341
342const void *of_get_property(const struct device_node *np, const char *name,
343 int *lenp)
344{
345 struct property *pp = of_find_property(np, name, lenp);
346
347 return pp ? pp->value : NULL;
348}
349EXPORT_SYMBOL(of_get_property);
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365bool __weak arch_match_cpu_phys_id(int cpu, u64 phys_id)
366{
367 return (u32)phys_id == cpu;
368}
369
370
371
372
373
374
375static bool __of_find_n_match_cpu_property(struct device_node *cpun,
376 const char *prop_name, int cpu, unsigned int *thread)
377{
378 const __be32 *cell;
379 int ac, prop_len, tid;
380 u64 hwid;
381
382 ac = of_n_addr_cells(cpun);
383 cell = of_get_property(cpun, prop_name, &prop_len);
384 if (!cell && !ac && arch_match_cpu_phys_id(cpu, 0))
385 return true;
386 if (!cell || !ac)
387 return false;
388 prop_len /= sizeof(*cell) * ac;
389 for (tid = 0; tid < prop_len; tid++) {
390 hwid = of_read_number(cell, ac);
391 if (arch_match_cpu_phys_id(cpu, hwid)) {
392 if (thread)
393 *thread = tid;
394 return true;
395 }
396 cell += ac;
397 }
398 return false;
399}
400
401
402
403
404
405
406
407bool __weak arch_find_n_match_cpu_physical_id(struct device_node *cpun,
408 int cpu, unsigned int *thread)
409{
410
411
412
413
414 if (IS_ENABLED(CONFIG_PPC) &&
415 __of_find_n_match_cpu_property(cpun,
416 "ibm,ppc-interrupt-server#s",
417 cpu, thread))
418 return true;
419
420 return __of_find_n_match_cpu_property(cpun, "reg", cpu, thread);
421}
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
443{
444 struct device_node *cpun;
445
446 for_each_of_cpu_node(cpun) {
447 if (arch_find_n_match_cpu_physical_id(cpun, cpu, thread))
448 return cpun;
449 }
450 return NULL;
451}
452EXPORT_SYMBOL(of_get_cpu_node);
453
454
455
456
457
458
459
460
461
462int of_cpu_node_to_id(struct device_node *cpu_node)
463{
464 int cpu;
465 bool found = false;
466 struct device_node *np;
467
468 for_each_possible_cpu(cpu) {
469 np = of_cpu_device_node_get(cpu);
470 found = (cpu_node == np);
471 of_node_put(np);
472 if (found)
473 return cpu;
474 }
475
476 return -ENODEV;
477}
478EXPORT_SYMBOL(of_cpu_node_to_id);
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510static int __of_device_is_compatible(const struct device_node *device,
511 const char *compat, const char *type, const char *name)
512{
513 struct property *prop;
514 const char *cp;
515 int index = 0, score = 0;
516
517
518 if (compat && compat[0]) {
519 prop = __of_find_property(device, "compatible", NULL);
520 for (cp = of_prop_next_string(prop, NULL); cp;
521 cp = of_prop_next_string(prop, cp), index++) {
522 if (of_compat_cmp(cp, compat, strlen(compat)) == 0) {
523 score = INT_MAX/2 - (index << 2);
524 break;
525 }
526 }
527 if (!score)
528 return 0;
529 }
530
531
532 if (type && type[0]) {
533 if (!__of_node_is_type(device, type))
534 return 0;
535 score += 2;
536 }
537
538
539 if (name && name[0]) {
540 if (!of_node_name_eq(device, name))
541 return 0;
542 score++;
543 }
544
545 return score;
546}
547
548
549
550
551int of_device_is_compatible(const struct device_node *device,
552 const char *compat)
553{
554 unsigned long flags;
555 int res;
556
557 raw_spin_lock_irqsave(&devtree_lock, flags);
558 res = __of_device_is_compatible(device, compat, NULL, NULL);
559 raw_spin_unlock_irqrestore(&devtree_lock, flags);
560 return res;
561}
562EXPORT_SYMBOL(of_device_is_compatible);
563
564
565
566
567
568int of_device_compatible_match(struct device_node *device,
569 const char *const *compat)
570{
571 unsigned int tmp, score = 0;
572
573 if (!compat)
574 return 0;
575
576 while (*compat) {
577 tmp = of_device_is_compatible(device, *compat);
578 if (tmp > score)
579 score = tmp;
580 compat++;
581 }
582
583 return score;
584}
585
586
587
588
589
590
591
592
593int of_machine_is_compatible(const char *compat)
594{
595 struct device_node *root;
596 int rc = 0;
597
598 root = of_find_node_by_path("/");
599 if (root) {
600 rc = of_device_is_compatible(root, compat);
601 of_node_put(root);
602 }
603 return rc;
604}
605EXPORT_SYMBOL(of_machine_is_compatible);
606
607
608
609
610
611
612
613
614
615static bool __of_device_is_available(const struct device_node *device)
616{
617 const char *status;
618 int statlen;
619
620 if (!device)
621 return false;
622
623 status = __of_get_property(device, "status", &statlen);
624 if (status == NULL)
625 return true;
626
627 if (statlen > 0) {
628 if (!strcmp(status, "okay") || !strcmp(status, "ok"))
629 return true;
630 }
631
632 return false;
633}
634
635
636
637
638
639
640
641
642
643bool of_device_is_available(const struct device_node *device)
644{
645 unsigned long flags;
646 bool res;
647
648 raw_spin_lock_irqsave(&devtree_lock, flags);
649 res = __of_device_is_available(device);
650 raw_spin_unlock_irqrestore(&devtree_lock, flags);
651 return res;
652
653}
654EXPORT_SYMBOL(of_device_is_available);
655
656
657
658
659
660
661
662
663
664
665
666
667
668bool of_device_is_big_endian(const struct device_node *device)
669{
670 if (of_property_read_bool(device, "big-endian"))
671 return true;
672 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) &&
673 of_property_read_bool(device, "native-endian"))
674 return true;
675 return false;
676}
677EXPORT_SYMBOL(of_device_is_big_endian);
678
679
680
681
682
683
684
685
686struct device_node *of_get_parent(const struct device_node *node)
687{
688 struct device_node *np;
689 unsigned long flags;
690
691 if (!node)
692 return NULL;
693
694 raw_spin_lock_irqsave(&devtree_lock, flags);
695 np = of_node_get(node->parent);
696 raw_spin_unlock_irqrestore(&devtree_lock, flags);
697 return np;
698}
699EXPORT_SYMBOL(of_get_parent);
700
701
702
703
704
705
706
707
708
709
710
711
712struct device_node *of_get_next_parent(struct device_node *node)
713{
714 struct device_node *parent;
715 unsigned long flags;
716
717 if (!node)
718 return NULL;
719
720 raw_spin_lock_irqsave(&devtree_lock, flags);
721 parent = of_node_get(node->parent);
722 of_node_put(node);
723 raw_spin_unlock_irqrestore(&devtree_lock, flags);
724 return parent;
725}
726EXPORT_SYMBOL(of_get_next_parent);
727
728static struct device_node *__of_get_next_child(const struct device_node *node,
729 struct device_node *prev)
730{
731 struct device_node *next;
732
733 if (!node)
734 return NULL;
735
736 next = prev ? prev->sibling : node->child;
737 for (; next; next = next->sibling)
738 if (of_node_get(next))
739 break;
740 of_node_put(prev);
741 return next;
742}
743#define __for_each_child_of_node(parent, child) \
744 for (child = __of_get_next_child(parent, NULL); child != NULL; \
745 child = __of_get_next_child(parent, child))
746
747
748
749
750
751
752
753
754
755
756struct device_node *of_get_next_child(const struct device_node *node,
757 struct device_node *prev)
758{
759 struct device_node *next;
760 unsigned long flags;
761
762 raw_spin_lock_irqsave(&devtree_lock, flags);
763 next = __of_get_next_child(node, prev);
764 raw_spin_unlock_irqrestore(&devtree_lock, flags);
765 return next;
766}
767EXPORT_SYMBOL(of_get_next_child);
768
769
770
771
772
773
774
775
776
777struct device_node *of_get_next_available_child(const struct device_node *node,
778 struct device_node *prev)
779{
780 struct device_node *next;
781 unsigned long flags;
782
783 if (!node)
784 return NULL;
785
786 raw_spin_lock_irqsave(&devtree_lock, flags);
787 next = prev ? prev->sibling : node->child;
788 for (; next; next = next->sibling) {
789 if (!__of_device_is_available(next))
790 continue;
791 if (of_node_get(next))
792 break;
793 }
794 of_node_put(prev);
795 raw_spin_unlock_irqrestore(&devtree_lock, flags);
796 return next;
797}
798EXPORT_SYMBOL(of_get_next_available_child);
799
800
801
802
803
804
805
806
807
808struct device_node *of_get_next_cpu_node(struct device_node *prev)
809{
810 struct device_node *next = NULL;
811 unsigned long flags;
812 struct device_node *node;
813
814 if (!prev)
815 node = of_find_node_by_path("/cpus");
816
817 raw_spin_lock_irqsave(&devtree_lock, flags);
818 if (prev)
819 next = prev->sibling;
820 else if (node) {
821 next = node->child;
822 of_node_put(node);
823 }
824 for (; next; next = next->sibling) {
825 if (!(of_node_name_eq(next, "cpu") ||
826 __of_node_is_type(next, "cpu")))
827 continue;
828 if (of_node_get(next))
829 break;
830 }
831 of_node_put(prev);
832 raw_spin_unlock_irqrestore(&devtree_lock, flags);
833 return next;
834}
835EXPORT_SYMBOL(of_get_next_cpu_node);
836
837
838
839
840
841
842
843
844
845
846
847
848struct device_node *of_get_compatible_child(const struct device_node *parent,
849 const char *compatible)
850{
851 struct device_node *child;
852
853 for_each_child_of_node(parent, child) {
854 if (of_device_is_compatible(child, compatible))
855 break;
856 }
857
858 return child;
859}
860EXPORT_SYMBOL(of_get_compatible_child);
861
862
863
864
865
866
867
868
869
870
871
872
873struct device_node *of_get_child_by_name(const struct device_node *node,
874 const char *name)
875{
876 struct device_node *child;
877
878 for_each_child_of_node(node, child)
879 if (of_node_name_eq(child, name))
880 break;
881 return child;
882}
883EXPORT_SYMBOL(of_get_child_by_name);
884
885struct device_node *__of_find_node_by_path(struct device_node *parent,
886 const char *path)
887{
888 struct device_node *child;
889 int len;
890
891 len = strcspn(path, "/:");
892 if (!len)
893 return NULL;
894
895 __for_each_child_of_node(parent, child) {
896 const char *name = kbasename(child->full_name);
897 if (strncmp(path, name, len) == 0 && (strlen(name) == len))
898 return child;
899 }
900 return NULL;
901}
902
903struct device_node *__of_find_node_by_full_path(struct device_node *node,
904 const char *path)
905{
906 const char *separator = strchr(path, ':');
907
908 while (node && *path == '/') {
909 struct device_node *tmp = node;
910
911 path++;
912 node = __of_find_node_by_path(node, path);
913 of_node_put(tmp);
914 path = strchrnul(path, '/');
915 if (separator && separator < path)
916 break;
917 }
918 return node;
919}
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939struct device_node *of_find_node_opts_by_path(const char *path, const char **opts)
940{
941 struct device_node *np = NULL;
942 struct property *pp;
943 unsigned long flags;
944 const char *separator = strchr(path, ':');
945
946 if (opts)
947 *opts = separator ? separator + 1 : NULL;
948
949 if (strcmp(path, "/") == 0)
950 return of_node_get(of_root);
951
952
953 if (*path != '/') {
954 int len;
955 const char *p = separator;
956
957 if (!p)
958 p = strchrnul(path, '/');
959 len = p - path;
960
961
962 if (!of_aliases)
963 return NULL;
964
965 for_each_property_of_node(of_aliases, pp) {
966 if (strlen(pp->name) == len && !strncmp(pp->name, path, len)) {
967 np = of_find_node_by_path(pp->value);
968 break;
969 }
970 }
971 if (!np)
972 return NULL;
973 path = p;
974 }
975
976
977 raw_spin_lock_irqsave(&devtree_lock, flags);
978 if (!np)
979 np = of_node_get(of_root);
980 np = __of_find_node_by_full_path(np, path);
981 raw_spin_unlock_irqrestore(&devtree_lock, flags);
982 return np;
983}
984EXPORT_SYMBOL(of_find_node_opts_by_path);
985
986
987
988
989
990
991
992
993
994
995
996
997struct device_node *of_find_node_by_name(struct device_node *from,
998 const char *name)
999{
1000 struct device_node *np;
1001 unsigned long flags;
1002
1003 raw_spin_lock_irqsave(&devtree_lock, flags);
1004 for_each_of_allnodes_from(from, np)
1005 if (of_node_name_eq(np, name) && of_node_get(np))
1006 break;
1007 of_node_put(from);
1008 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1009 return np;
1010}
1011EXPORT_SYMBOL(of_find_node_by_name);
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025struct device_node *of_find_node_by_type(struct device_node *from,
1026 const char *type)
1027{
1028 struct device_node *np;
1029 unsigned long flags;
1030
1031 raw_spin_lock_irqsave(&devtree_lock, flags);
1032 for_each_of_allnodes_from(from, np)
1033 if (__of_node_is_type(np, type) && of_node_get(np))
1034 break;
1035 of_node_put(from);
1036 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1037 return np;
1038}
1039EXPORT_SYMBOL(of_find_node_by_type);
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055struct device_node *of_find_compatible_node(struct device_node *from,
1056 const char *type, const char *compatible)
1057{
1058 struct device_node *np;
1059 unsigned long flags;
1060
1061 raw_spin_lock_irqsave(&devtree_lock, flags);
1062 for_each_of_allnodes_from(from, np)
1063 if (__of_device_is_compatible(np, compatible, type, NULL) &&
1064 of_node_get(np))
1065 break;
1066 of_node_put(from);
1067 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1068 return np;
1069}
1070EXPORT_SYMBOL(of_find_compatible_node);
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084struct device_node *of_find_node_with_property(struct device_node *from,
1085 const char *prop_name)
1086{
1087 struct device_node *np;
1088 struct property *pp;
1089 unsigned long flags;
1090
1091 raw_spin_lock_irqsave(&devtree_lock, flags);
1092 for_each_of_allnodes_from(from, np) {
1093 for (pp = np->properties; pp; pp = pp->next) {
1094 if (of_prop_cmp(pp->name, prop_name) == 0) {
1095 of_node_get(np);
1096 goto out;
1097 }
1098 }
1099 }
1100out:
1101 of_node_put(from);
1102 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1103 return np;
1104}
1105EXPORT_SYMBOL(of_find_node_with_property);
1106
1107static
1108const struct of_device_id *__of_match_node(const struct of_device_id *matches,
1109 const struct device_node *node)
1110{
1111 const struct of_device_id *best_match = NULL;
1112 int score, best_score = 0;
1113
1114 if (!matches)
1115 return NULL;
1116
1117 for (; matches->name[0] || matches->type[0] || matches->compatible[0]; matches++) {
1118 score = __of_device_is_compatible(node, matches->compatible,
1119 matches->type, matches->name);
1120 if (score > best_score) {
1121 best_match = matches;
1122 best_score = score;
1123 }
1124 }
1125
1126 return best_match;
1127}
1128
1129
1130
1131
1132
1133
1134
1135
1136const struct of_device_id *of_match_node(const struct of_device_id *matches,
1137 const struct device_node *node)
1138{
1139 const struct of_device_id *match;
1140 unsigned long flags;
1141
1142 raw_spin_lock_irqsave(&devtree_lock, flags);
1143 match = __of_match_node(matches, node);
1144 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1145 return match;
1146}
1147EXPORT_SYMBOL(of_match_node);
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162struct device_node *of_find_matching_node_and_match(struct device_node *from,
1163 const struct of_device_id *matches,
1164 const struct of_device_id **match)
1165{
1166 struct device_node *np;
1167 const struct of_device_id *m;
1168 unsigned long flags;
1169
1170 if (match)
1171 *match = NULL;
1172
1173 raw_spin_lock_irqsave(&devtree_lock, flags);
1174 for_each_of_allnodes_from(from, np) {
1175 m = __of_match_node(matches, np);
1176 if (m && of_node_get(np)) {
1177 if (match)
1178 *match = m;
1179 break;
1180 }
1181 }
1182 of_node_put(from);
1183 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1184 return np;
1185}
1186EXPORT_SYMBOL(of_find_matching_node_and_match);
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201int of_modalias_node(struct device_node *node, char *modalias, int len)
1202{
1203 const char *compatible, *p;
1204 int cplen;
1205
1206 compatible = of_get_property(node, "compatible", &cplen);
1207 if (!compatible || strlen(compatible) > cplen)
1208 return -ENODEV;
1209 p = strchr(compatible, ',');
1210 strlcpy(modalias, p ? p + 1 : compatible, len);
1211 return 0;
1212}
1213EXPORT_SYMBOL_GPL(of_modalias_node);
1214
1215
1216
1217
1218
1219
1220
1221
1222struct device_node *of_find_node_by_phandle(phandle handle)
1223{
1224 struct device_node *np = NULL;
1225 unsigned long flags;
1226 phandle masked_handle;
1227
1228 if (!handle)
1229 return NULL;
1230
1231 raw_spin_lock_irqsave(&devtree_lock, flags);
1232
1233 masked_handle = handle & phandle_cache_mask;
1234
1235 if (phandle_cache) {
1236 if (phandle_cache[masked_handle] &&
1237 handle == phandle_cache[masked_handle]->phandle)
1238 np = phandle_cache[masked_handle];
1239 if (np && of_node_check_flag(np, OF_DETACHED)) {
1240 WARN_ON(1);
1241 of_node_put(np);
1242 phandle_cache[masked_handle] = NULL;
1243 np = NULL;
1244 }
1245 }
1246
1247 if (!np) {
1248 for_each_of_allnodes(np)
1249 if (np->phandle == handle &&
1250 !of_node_check_flag(np, OF_DETACHED)) {
1251 if (phandle_cache) {
1252
1253 of_node_get(np);
1254 phandle_cache[masked_handle] = np;
1255 }
1256 break;
1257 }
1258 }
1259
1260 of_node_get(np);
1261 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1262 return np;
1263}
1264EXPORT_SYMBOL(of_find_node_by_phandle);
1265
1266void of_print_phandle_args(const char *msg, const struct of_phandle_args *args)
1267{
1268 int i;
1269 printk("%s %pOF", msg, args->np);
1270 for (i = 0; i < args->args_count; i++) {
1271 const char delim = i ? ',' : ':';
1272
1273 pr_cont("%c%08x", delim, args->args[i]);
1274 }
1275 pr_cont("\n");
1276}
1277
1278int of_phandle_iterator_init(struct of_phandle_iterator *it,
1279 const struct device_node *np,
1280 const char *list_name,
1281 const char *cells_name,
1282 int cell_count)
1283{
1284 const __be32 *list;
1285 int size;
1286
1287 memset(it, 0, sizeof(*it));
1288
1289 list = of_get_property(np, list_name, &size);
1290 if (!list)
1291 return -ENOENT;
1292
1293 it->cells_name = cells_name;
1294 it->cell_count = cell_count;
1295 it->parent = np;
1296 it->list_end = list + size / sizeof(*list);
1297 it->phandle_end = list;
1298 it->cur = list;
1299
1300 return 0;
1301}
1302EXPORT_SYMBOL_GPL(of_phandle_iterator_init);
1303
1304int of_phandle_iterator_next(struct of_phandle_iterator *it)
1305{
1306 uint32_t count = 0;
1307
1308 if (it->node) {
1309 of_node_put(it->node);
1310 it->node = NULL;
1311 }
1312
1313 if (!it->cur || it->phandle_end >= it->list_end)
1314 return -ENOENT;
1315
1316 it->cur = it->phandle_end;
1317
1318
1319 it->phandle = be32_to_cpup(it->cur++);
1320
1321 if (it->phandle) {
1322
1323
1324
1325
1326
1327 it->node = of_find_node_by_phandle(it->phandle);
1328
1329 if (it->cells_name) {
1330 if (!it->node) {
1331 pr_err("%pOF: could not find phandle\n",
1332 it->parent);
1333 goto err;
1334 }
1335
1336 if (of_property_read_u32(it->node, it->cells_name,
1337 &count)) {
1338 pr_err("%pOF: could not get %s for %pOF\n",
1339 it->parent,
1340 it->cells_name,
1341 it->node);
1342 goto err;
1343 }
1344 } else {
1345 count = it->cell_count;
1346 }
1347
1348
1349
1350
1351
1352 if (it->cur + count > it->list_end) {
1353 pr_err("%pOF: arguments longer than property\n",
1354 it->parent);
1355 goto err;
1356 }
1357 }
1358
1359 it->phandle_end = it->cur + count;
1360 it->cur_count = count;
1361
1362 return 0;
1363
1364err:
1365 if (it->node) {
1366 of_node_put(it->node);
1367 it->node = NULL;
1368 }
1369
1370 return -EINVAL;
1371}
1372EXPORT_SYMBOL_GPL(of_phandle_iterator_next);
1373
1374int of_phandle_iterator_args(struct of_phandle_iterator *it,
1375 uint32_t *args,
1376 int size)
1377{
1378 int i, count;
1379
1380 count = it->cur_count;
1381
1382 if (WARN_ON(size < count))
1383 count = size;
1384
1385 for (i = 0; i < count; i++)
1386 args[i] = be32_to_cpup(it->cur++);
1387
1388 return count;
1389}
1390
1391static int __of_parse_phandle_with_args(const struct device_node *np,
1392 const char *list_name,
1393 const char *cells_name,
1394 int cell_count, int index,
1395 struct of_phandle_args *out_args)
1396{
1397 struct of_phandle_iterator it;
1398 int rc, cur_index = 0;
1399
1400
1401 of_for_each_phandle(&it, rc, np, list_name, cells_name, cell_count) {
1402
1403
1404
1405
1406
1407
1408 rc = -ENOENT;
1409 if (cur_index == index) {
1410 if (!it.phandle)
1411 goto err;
1412
1413 if (out_args) {
1414 int c;
1415
1416 c = of_phandle_iterator_args(&it,
1417 out_args->args,
1418 MAX_PHANDLE_ARGS);
1419 out_args->np = it.node;
1420 out_args->args_count = c;
1421 } else {
1422 of_node_put(it.node);
1423 }
1424
1425
1426 return 0;
1427 }
1428
1429 cur_index++;
1430 }
1431
1432
1433
1434
1435
1436
1437
1438 err:
1439 of_node_put(it.node);
1440 return rc;
1441}
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453struct device_node *of_parse_phandle(const struct device_node *np,
1454 const char *phandle_name, int index)
1455{
1456 struct of_phandle_args args;
1457
1458 if (index < 0)
1459 return NULL;
1460
1461 if (__of_parse_phandle_with_args(np, phandle_name, NULL, 0,
1462 index, &args))
1463 return NULL;
1464
1465 return args.np;
1466}
1467EXPORT_SYMBOL(of_parse_phandle);
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501int of_parse_phandle_with_args(const struct device_node *np, const char *list_name,
1502 const char *cells_name, int index,
1503 struct of_phandle_args *out_args)
1504{
1505 if (index < 0)
1506 return -EINVAL;
1507 return __of_parse_phandle_with_args(np, list_name, cells_name, 0,
1508 index, out_args);
1509}
1510EXPORT_SYMBOL(of_parse_phandle_with_args);
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554int of_parse_phandle_with_args_map(const struct device_node *np,
1555 const char *list_name,
1556 const char *stem_name,
1557 int index, struct of_phandle_args *out_args)
1558{
1559 char *cells_name, *map_name = NULL, *mask_name = NULL;
1560 char *pass_name = NULL;
1561 struct device_node *cur, *new = NULL;
1562 const __be32 *map, *mask, *pass;
1563 static const __be32 dummy_mask[] = { [0 ... MAX_PHANDLE_ARGS] = ~0 };
1564 static const __be32 dummy_pass[] = { [0 ... MAX_PHANDLE_ARGS] = 0 };
1565 __be32 initial_match_array[MAX_PHANDLE_ARGS];
1566 const __be32 *match_array = initial_match_array;
1567 int i, ret, map_len, match;
1568 u32 list_size, new_size;
1569
1570 if (index < 0)
1571 return -EINVAL;
1572
1573 cells_name = kasprintf(GFP_KERNEL, "#%s-cells", stem_name);
1574 if (!cells_name)
1575 return -ENOMEM;
1576
1577 ret = -ENOMEM;
1578 map_name = kasprintf(GFP_KERNEL, "%s-map", stem_name);
1579 if (!map_name)
1580 goto free;
1581
1582 mask_name = kasprintf(GFP_KERNEL, "%s-map-mask", stem_name);
1583 if (!mask_name)
1584 goto free;
1585
1586 pass_name = kasprintf(GFP_KERNEL, "%s-map-pass-thru", stem_name);
1587 if (!pass_name)
1588 goto free;
1589
1590 ret = __of_parse_phandle_with_args(np, list_name, cells_name, 0, index,
1591 out_args);
1592 if (ret)
1593 goto free;
1594
1595
1596 cur = out_args->np;
1597 ret = of_property_read_u32(cur, cells_name, &list_size);
1598 if (ret < 0)
1599 goto put;
1600
1601
1602 for (i = 0; i < list_size; i++)
1603 initial_match_array[i] = cpu_to_be32(out_args->args[i]);
1604
1605 ret = -EINVAL;
1606 while (cur) {
1607
1608 map = of_get_property(cur, map_name, &map_len);
1609 if (!map) {
1610 ret = 0;
1611 goto free;
1612 }
1613 map_len /= sizeof(u32);
1614
1615
1616 mask = of_get_property(cur, mask_name, NULL);
1617 if (!mask)
1618 mask = dummy_mask;
1619
1620 match = 0;
1621 while (map_len > (list_size + 1) && !match) {
1622
1623 match = 1;
1624 for (i = 0; i < list_size; i++, map_len--)
1625 match &= !((match_array[i] ^ *map++) & mask[i]);
1626
1627 of_node_put(new);
1628 new = of_find_node_by_phandle(be32_to_cpup(map));
1629 map++;
1630 map_len--;
1631
1632
1633 if (!new)
1634 goto put;
1635
1636 if (!of_device_is_available(new))
1637 match = 0;
1638
1639 ret = of_property_read_u32(new, cells_name, &new_size);
1640 if (ret)
1641 goto put;
1642
1643
1644 if (WARN_ON(new_size > MAX_PHANDLE_ARGS))
1645 goto put;
1646 if (map_len < new_size)
1647 goto put;
1648
1649
1650 map += new_size;
1651 map_len -= new_size;
1652 }
1653 if (!match)
1654 goto put;
1655
1656
1657 pass = of_get_property(cur, pass_name, NULL);
1658 if (!pass)
1659 pass = dummy_pass;
1660
1661
1662
1663
1664
1665
1666 match_array = map - new_size;
1667 for (i = 0; i < new_size; i++) {
1668 __be32 val = *(map - new_size + i);
1669
1670 if (i < list_size) {
1671 val &= ~pass[i];
1672 val |= cpu_to_be32(out_args->args[i]) & pass[i];
1673 }
1674
1675 out_args->args[i] = be32_to_cpu(val);
1676 }
1677 out_args->args_count = list_size = new_size;
1678
1679 out_args->np = new;
1680 of_node_put(cur);
1681 cur = new;
1682 }
1683put:
1684 of_node_put(cur);
1685 of_node_put(new);
1686free:
1687 kfree(mask_name);
1688 kfree(map_name);
1689 kfree(cells_name);
1690 kfree(pass_name);
1691
1692 return ret;
1693}
1694EXPORT_SYMBOL(of_parse_phandle_with_args_map);
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726int of_parse_phandle_with_fixed_args(const struct device_node *np,
1727 const char *list_name, int cell_count,
1728 int index, struct of_phandle_args *out_args)
1729{
1730 if (index < 0)
1731 return -EINVAL;
1732 return __of_parse_phandle_with_args(np, list_name, NULL, cell_count,
1733 index, out_args);
1734}
1735EXPORT_SYMBOL(of_parse_phandle_with_fixed_args);
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752int of_count_phandle_with_args(const struct device_node *np, const char *list_name,
1753 const char *cells_name)
1754{
1755 struct of_phandle_iterator it;
1756 int rc, cur_index = 0;
1757
1758 rc = of_phandle_iterator_init(&it, np, list_name, cells_name, 0);
1759 if (rc)
1760 return rc;
1761
1762 while ((rc = of_phandle_iterator_next(&it)) == 0)
1763 cur_index += 1;
1764
1765 if (rc != -ENOENT)
1766 return rc;
1767
1768 return cur_index;
1769}
1770EXPORT_SYMBOL(of_count_phandle_with_args);
1771
1772
1773
1774
1775int __of_add_property(struct device_node *np, struct property *prop)
1776{
1777 struct property **next;
1778
1779 prop->next = NULL;
1780 next = &np->properties;
1781 while (*next) {
1782 if (strcmp(prop->name, (*next)->name) == 0)
1783
1784 return -EEXIST;
1785
1786 next = &(*next)->next;
1787 }
1788 *next = prop;
1789
1790 return 0;
1791}
1792
1793
1794
1795
1796int of_add_property(struct device_node *np, struct property *prop)
1797{
1798 unsigned long flags;
1799 int rc;
1800
1801 mutex_lock(&of_mutex);
1802
1803 raw_spin_lock_irqsave(&devtree_lock, flags);
1804 rc = __of_add_property(np, prop);
1805 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1806
1807 if (!rc)
1808 __of_add_property_sysfs(np, prop);
1809
1810 mutex_unlock(&of_mutex);
1811
1812 if (!rc)
1813 of_property_notify(OF_RECONFIG_ADD_PROPERTY, np, prop, NULL);
1814
1815 return rc;
1816}
1817
1818int __of_remove_property(struct device_node *np, struct property *prop)
1819{
1820 struct property **next;
1821
1822 for (next = &np->properties; *next; next = &(*next)->next) {
1823 if (*next == prop)
1824 break;
1825 }
1826 if (*next == NULL)
1827 return -ENODEV;
1828
1829
1830 *next = prop->next;
1831 prop->next = np->deadprops;
1832 np->deadprops = prop;
1833
1834 return 0;
1835}
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845int of_remove_property(struct device_node *np, struct property *prop)
1846{
1847 unsigned long flags;
1848 int rc;
1849
1850 if (!prop)
1851 return -ENODEV;
1852
1853 mutex_lock(&of_mutex);
1854
1855 raw_spin_lock_irqsave(&devtree_lock, flags);
1856 rc = __of_remove_property(np, prop);
1857 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1858
1859 if (!rc)
1860 __of_remove_property_sysfs(np, prop);
1861
1862 mutex_unlock(&of_mutex);
1863
1864 if (!rc)
1865 of_property_notify(OF_RECONFIG_REMOVE_PROPERTY, np, prop, NULL);
1866
1867 return rc;
1868}
1869
1870int __of_update_property(struct device_node *np, struct property *newprop,
1871 struct property **oldpropp)
1872{
1873 struct property **next, *oldprop;
1874
1875 for (next = &np->properties; *next; next = &(*next)->next) {
1876 if (of_prop_cmp((*next)->name, newprop->name) == 0)
1877 break;
1878 }
1879 *oldpropp = oldprop = *next;
1880
1881 if (oldprop) {
1882
1883 newprop->next = oldprop->next;
1884 *next = newprop;
1885 oldprop->next = np->deadprops;
1886 np->deadprops = oldprop;
1887 } else {
1888
1889 newprop->next = NULL;
1890 *next = newprop;
1891 }
1892
1893 return 0;
1894}
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905int of_update_property(struct device_node *np, struct property *newprop)
1906{
1907 struct property *oldprop;
1908 unsigned long flags;
1909 int rc;
1910
1911 if (!newprop->name)
1912 return -EINVAL;
1913
1914 mutex_lock(&of_mutex);
1915
1916 raw_spin_lock_irqsave(&devtree_lock, flags);
1917 rc = __of_update_property(np, newprop, &oldprop);
1918 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1919
1920 if (!rc)
1921 __of_update_property_sysfs(np, newprop, oldprop);
1922
1923 mutex_unlock(&of_mutex);
1924
1925 if (!rc)
1926 of_property_notify(OF_RECONFIG_UPDATE_PROPERTY, np, newprop, oldprop);
1927
1928 return rc;
1929}
1930
1931static void of_alias_add(struct alias_prop *ap, struct device_node *np,
1932 int id, const char *stem, int stem_len)
1933{
1934 ap->np = np;
1935 ap->id = id;
1936 strncpy(ap->stem, stem, stem_len);
1937 ap->stem[stem_len] = 0;
1938 list_add_tail(&ap->link, &aliases_lookup);
1939 pr_debug("adding DT alias:%s: stem=%s id=%i node=%pOF\n",
1940 ap->alias, ap->stem, ap->id, np);
1941}
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
1954{
1955 struct property *pp;
1956
1957 of_aliases = of_find_node_by_path("/aliases");
1958 of_chosen = of_find_node_by_path("/chosen");
1959 if (of_chosen == NULL)
1960 of_chosen = of_find_node_by_path("/chosen@0");
1961
1962 if (of_chosen) {
1963
1964 const char *name = NULL;
1965
1966 if (of_property_read_string(of_chosen, "stdout-path", &name))
1967 of_property_read_string(of_chosen, "linux,stdout-path",
1968 &name);
1969 if (IS_ENABLED(CONFIG_PPC) && !name)
1970 of_property_read_string(of_aliases, "stdout", &name);
1971 if (name)
1972 of_stdout = of_find_node_opts_by_path(name, &of_stdout_options);
1973 }
1974
1975 if (!of_aliases)
1976 return;
1977
1978 for_each_property_of_node(of_aliases, pp) {
1979 const char *start = pp->name;
1980 const char *end = start + strlen(start);
1981 struct device_node *np;
1982 struct alias_prop *ap;
1983 int id, len;
1984
1985
1986 if (!strcmp(pp->name, "name") ||
1987 !strcmp(pp->name, "phandle") ||
1988 !strcmp(pp->name, "linux,phandle"))
1989 continue;
1990
1991 np = of_find_node_by_path(pp->value);
1992 if (!np)
1993 continue;
1994
1995
1996
1997 while (isdigit(*(end-1)) && end > start)
1998 end--;
1999 len = end - start;
2000
2001 if (kstrtoint(end, 10, &id) < 0)
2002 continue;
2003
2004
2005 ap = dt_alloc(sizeof(*ap) + len + 1, __alignof__(*ap));
2006 if (!ap)
2007 continue;
2008 memset(ap, 0, sizeof(*ap) + len + 1);
2009 ap->alias = start;
2010 of_alias_add(ap, np, id, start, len);
2011 }
2012}
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022int of_alias_get_id(struct device_node *np, const char *stem)
2023{
2024 struct alias_prop *app;
2025 int id = -ENODEV;
2026
2027 mutex_lock(&of_mutex);
2028 list_for_each_entry(app, &aliases_lookup, link) {
2029 if (strcmp(app->stem, stem) != 0)
2030 continue;
2031
2032 if (np == app->np) {
2033 id = app->id;
2034 break;
2035 }
2036 }
2037 mutex_unlock(&of_mutex);
2038
2039 return id;
2040}
2041EXPORT_SYMBOL_GPL(of_alias_get_id);
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056int of_alias_get_alias_list(const struct of_device_id *matches,
2057 const char *stem, unsigned long *bitmap,
2058 unsigned int nbits)
2059{
2060 struct alias_prop *app;
2061 int ret = 0;
2062
2063
2064 bitmap_zero(bitmap, nbits);
2065
2066 mutex_lock(&of_mutex);
2067 pr_debug("%s: Looking for stem: %s\n", __func__, stem);
2068 list_for_each_entry(app, &aliases_lookup, link) {
2069 pr_debug("%s: stem: %s, id: %d\n",
2070 __func__, app->stem, app->id);
2071
2072 if (strcmp(app->stem, stem) != 0) {
2073 pr_debug("%s: stem comparison didn't pass %s\n",
2074 __func__, app->stem);
2075 continue;
2076 }
2077
2078 if (of_match_node(matches, app->np)) {
2079 pr_debug("%s: Allocated ID %d\n", __func__, app->id);
2080
2081 if (app->id >= nbits) {
2082 pr_warn("%s: ID %d >= than bitmap field %d\n",
2083 __func__, app->id, nbits);
2084 ret = -EOVERFLOW;
2085 } else {
2086 set_bit(app->id, bitmap);
2087 }
2088 }
2089 }
2090 mutex_unlock(&of_mutex);
2091
2092 return ret;
2093}
2094EXPORT_SYMBOL_GPL(of_alias_get_alias_list);
2095
2096
2097
2098
2099
2100
2101
2102
2103int of_alias_get_highest_id(const char *stem)
2104{
2105 struct alias_prop *app;
2106 int id = -ENODEV;
2107
2108 mutex_lock(&of_mutex);
2109 list_for_each_entry(app, &aliases_lookup, link) {
2110 if (strcmp(app->stem, stem) != 0)
2111 continue;
2112
2113 if (app->id > id)
2114 id = app->id;
2115 }
2116 mutex_unlock(&of_mutex);
2117
2118 return id;
2119}
2120EXPORT_SYMBOL_GPL(of_alias_get_highest_id);
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132bool of_console_check(struct device_node *dn, char *name, int index)
2133{
2134 if (!dn || dn != of_stdout || console_set_on_cmdline)
2135 return false;
2136
2137
2138
2139
2140
2141 return !add_preferred_console(name, index, (char *)of_stdout_options);
2142}
2143EXPORT_SYMBOL_GPL(of_console_check);
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153struct device_node *of_find_next_cache_node(const struct device_node *np)
2154{
2155 struct device_node *child, *cache_node;
2156
2157 cache_node = of_parse_phandle(np, "l2-cache", 0);
2158 if (!cache_node)
2159 cache_node = of_parse_phandle(np, "next-level-cache", 0);
2160
2161 if (cache_node)
2162 return cache_node;
2163
2164
2165
2166
2167 if (IS_ENABLED(CONFIG_PPC_PMAC) && of_node_is_type(np, "cpu"))
2168 for_each_child_of_node(np, child)
2169 if (of_node_is_type(child, "cache"))
2170 return child;
2171
2172 return NULL;
2173}
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184int of_find_last_cache_level(unsigned int cpu)
2185{
2186 u32 cache_level = 0;
2187 struct device_node *prev = NULL, *np = of_cpu_device_node_get(cpu);
2188
2189 while (np) {
2190 prev = np;
2191 of_node_put(np);
2192 np = of_find_next_cache_node(np);
2193 }
2194
2195 of_property_read_u32(prev, "cache-level", &cache_level);
2196
2197 return cache_level;
2198}
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219int of_map_rid(struct device_node *np, u32 rid,
2220 const char *map_name, const char *map_mask_name,
2221 struct device_node **target, u32 *id_out)
2222{
2223 u32 map_mask, masked_rid;
2224 int map_len;
2225 const __be32 *map = NULL;
2226
2227 if (!np || !map_name || (!target && !id_out))
2228 return -EINVAL;
2229
2230 map = of_get_property(np, map_name, &map_len);
2231 if (!map) {
2232 if (target)
2233 return -ENODEV;
2234
2235 *id_out = rid;
2236 return 0;
2237 }
2238
2239 if (!map_len || map_len % (4 * sizeof(*map))) {
2240 pr_err("%pOF: Error: Bad %s length: %d\n", np,
2241 map_name, map_len);
2242 return -EINVAL;
2243 }
2244
2245
2246 map_mask = 0xffffffff;
2247
2248
2249
2250
2251
2252 if (map_mask_name)
2253 of_property_read_u32(np, map_mask_name, &map_mask);
2254
2255 masked_rid = map_mask & rid;
2256 for ( ; map_len > 0; map_len -= 4 * sizeof(*map), map += 4) {
2257 struct device_node *phandle_node;
2258 u32 rid_base = be32_to_cpup(map + 0);
2259 u32 phandle = be32_to_cpup(map + 1);
2260 u32 out_base = be32_to_cpup(map + 2);
2261 u32 rid_len = be32_to_cpup(map + 3);
2262
2263 if (rid_base & ~map_mask) {
2264 pr_err("%pOF: Invalid %s translation - %s-mask (0x%x) ignores rid-base (0x%x)\n",
2265 np, map_name, map_name,
2266 map_mask, rid_base);
2267 return -EFAULT;
2268 }
2269
2270 if (masked_rid < rid_base || masked_rid >= rid_base + rid_len)
2271 continue;
2272
2273 phandle_node = of_find_node_by_phandle(phandle);
2274 if (!phandle_node)
2275 return -ENODEV;
2276
2277 if (target) {
2278 if (*target)
2279 of_node_put(phandle_node);
2280 else
2281 *target = phandle_node;
2282
2283 if (*target != phandle_node)
2284 continue;
2285 }
2286
2287 if (id_out)
2288 *id_out = masked_rid - rid_base + out_base;
2289
2290 pr_debug("%pOF: %s, using mask %08x, rid-base: %08x, out-base: %08x, length: %08x, rid: %08x -> %08x\n",
2291 np, map_name, map_mask, rid_base, out_base,
2292 rid_len, rid, masked_rid - rid_base + out_base);
2293 return 0;
2294 }
2295
2296 pr_err("%pOF: Invalid %s translation - no match for rid 0x%x on %pOF\n",
2297 np, map_name, rid, target && *target ? *target : NULL);
2298 return -EFAULT;
2299}
2300EXPORT_SYMBOL_GPL(of_map_rid);
2301