1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#define pr_fmt(fmt) "OF: " fmt
18
19#include <linux/bitmap.h>
20#include <linux/console.h>
21#include <linux/ctype.h>
22#include <linux/cpu.h>
23#include <linux/module.h>
24#include <linux/of.h>
25#include <linux/of_device.h>
26#include <linux/of_graph.h>
27#include <linux/spinlock.h>
28#include <linux/slab.h>
29#include <linux/string.h>
30#include <linux/proc_fs.h>
31
32#include "of_private.h"
33
34LIST_HEAD(aliases_lookup);
35
36struct device_node *of_root;
37EXPORT_SYMBOL(of_root);
38struct device_node *of_chosen;
39struct device_node *of_aliases;
40struct device_node *of_stdout;
41static const char *of_stdout_options;
42
43struct kset *of_kset;
44
45
46
47
48
49
50
51DEFINE_MUTEX(of_mutex);
52
53
54
55
56DEFINE_RAW_SPINLOCK(devtree_lock);
57
58bool of_node_name_eq(const struct device_node *np, const char *name)
59{
60 const char *node_name;
61 size_t len;
62
63 if (!np)
64 return false;
65
66 node_name = kbasename(np->full_name);
67 len = strchrnul(node_name, '@') - node_name;
68
69 return (strlen(name) == len) && (strncmp(node_name, name, len) == 0);
70}
71EXPORT_SYMBOL(of_node_name_eq);
72
73bool of_node_name_prefix(const struct device_node *np, const char *prefix)
74{
75 if (!np)
76 return false;
77
78 return strncmp(kbasename(np->full_name), prefix, strlen(prefix)) == 0;
79}
80EXPORT_SYMBOL(of_node_name_prefix);
81
82static bool __of_node_is_type(const struct device_node *np, const char *type)
83{
84 const char *match = __of_get_property(np, "device_type", NULL);
85
86 return np && match && type && !strcmp(match, type);
87}
88
89int of_bus_n_addr_cells(struct device_node *np)
90{
91 u32 cells;
92
93 for (; np; np = np->parent)
94 if (!of_property_read_u32(np, "#address-cells", &cells))
95 return cells;
96
97
98 return OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
99}
100
101int of_n_addr_cells(struct device_node *np)
102{
103 if (np->parent)
104 np = np->parent;
105
106 return of_bus_n_addr_cells(np);
107}
108EXPORT_SYMBOL(of_n_addr_cells);
109
110int of_bus_n_size_cells(struct device_node *np)
111{
112 u32 cells;
113
114 for (; np; np = np->parent)
115 if (!of_property_read_u32(np, "#size-cells", &cells))
116 return cells;
117
118
119 return OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
120}
121
122int of_n_size_cells(struct device_node *np)
123{
124 if (np->parent)
125 np = np->parent;
126
127 return of_bus_n_size_cells(np);
128}
129EXPORT_SYMBOL(of_n_size_cells);
130
131#ifdef CONFIG_NUMA
132int __weak of_node_to_nid(struct device_node *np)
133{
134 return NUMA_NO_NODE;
135}
136#endif
137
138#define OF_PHANDLE_CACHE_BITS 7
139#define OF_PHANDLE_CACHE_SZ BIT(OF_PHANDLE_CACHE_BITS)
140
141static struct device_node *phandle_cache[OF_PHANDLE_CACHE_SZ];
142
143static u32 of_phandle_cache_hash(phandle handle)
144{
145 return hash_32(handle, OF_PHANDLE_CACHE_BITS);
146}
147
148
149
150
151void __of_phandle_cache_inv_entry(phandle handle)
152{
153 u32 handle_hash;
154 struct device_node *np;
155
156 if (!handle)
157 return;
158
159 handle_hash = of_phandle_cache_hash(handle);
160
161 np = phandle_cache[handle_hash];
162 if (np && handle == np->phandle)
163 phandle_cache[handle_hash] = NULL;
164}
165
166void __init of_core_init(void)
167{
168 struct device_node *np;
169
170
171
172 mutex_lock(&of_mutex);
173 of_kset = kset_create_and_add("devicetree", NULL, firmware_kobj);
174 if (!of_kset) {
175 mutex_unlock(&of_mutex);
176 pr_err("failed to register existing nodes\n");
177 return;
178 }
179 for_each_of_allnodes(np) {
180 __of_attach_node_sysfs(np);
181 if (np->phandle && !phandle_cache[of_phandle_cache_hash(np->phandle)])
182 phandle_cache[of_phandle_cache_hash(np->phandle)] = np;
183 }
184 mutex_unlock(&of_mutex);
185
186
187 if (of_root)
188 proc_symlink("device-tree", NULL, "/sys/firmware/devicetree/base");
189}
190
191static struct property *__of_find_property(const struct device_node *np,
192 const char *name, int *lenp)
193{
194 struct property *pp;
195
196 if (!np)
197 return NULL;
198
199 for (pp = np->properties; pp; pp = pp->next) {
200 if (of_prop_cmp(pp->name, name) == 0) {
201 if (lenp)
202 *lenp = pp->length;
203 break;
204 }
205 }
206
207 return pp;
208}
209
210struct property *of_find_property(const struct device_node *np,
211 const char *name,
212 int *lenp)
213{
214 struct property *pp;
215 unsigned long flags;
216
217 raw_spin_lock_irqsave(&devtree_lock, flags);
218 pp = __of_find_property(np, name, lenp);
219 raw_spin_unlock_irqrestore(&devtree_lock, flags);
220
221 return pp;
222}
223EXPORT_SYMBOL(of_find_property);
224
225struct device_node *__of_find_all_nodes(struct device_node *prev)
226{
227 struct device_node *np;
228 if (!prev) {
229 np = of_root;
230 } else if (prev->child) {
231 np = prev->child;
232 } else {
233
234 np = prev;
235 while (np->parent && !np->sibling)
236 np = np->parent;
237 np = np->sibling;
238 }
239 return np;
240}
241
242
243
244
245
246
247
248
249
250struct device_node *of_find_all_nodes(struct device_node *prev)
251{
252 struct device_node *np;
253 unsigned long flags;
254
255 raw_spin_lock_irqsave(&devtree_lock, flags);
256 np = __of_find_all_nodes(prev);
257 of_node_get(np);
258 of_node_put(prev);
259 raw_spin_unlock_irqrestore(&devtree_lock, flags);
260 return np;
261}
262EXPORT_SYMBOL(of_find_all_nodes);
263
264
265
266
267
268const void *__of_get_property(const struct device_node *np,
269 const char *name, int *lenp)
270{
271 struct property *pp = __of_find_property(np, name, lenp);
272
273 return pp ? pp->value : NULL;
274}
275
276
277
278
279
280const void *of_get_property(const struct device_node *np, const char *name,
281 int *lenp)
282{
283 struct property *pp = of_find_property(np, name, lenp);
284
285 return pp ? pp->value : NULL;
286}
287EXPORT_SYMBOL(of_get_property);
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303bool __weak arch_match_cpu_phys_id(int cpu, u64 phys_id)
304{
305 return (u32)phys_id == cpu;
306}
307
308
309
310
311
312
313static bool __of_find_n_match_cpu_property(struct device_node *cpun,
314 const char *prop_name, int cpu, unsigned int *thread)
315{
316 const __be32 *cell;
317 int ac, prop_len, tid;
318 u64 hwid;
319
320 ac = of_n_addr_cells(cpun);
321 cell = of_get_property(cpun, prop_name, &prop_len);
322 if (!cell && !ac && arch_match_cpu_phys_id(cpu, 0))
323 return true;
324 if (!cell || !ac)
325 return false;
326 prop_len /= sizeof(*cell) * ac;
327 for (tid = 0; tid < prop_len; tid++) {
328 hwid = of_read_number(cell, ac);
329 if (arch_match_cpu_phys_id(cpu, hwid)) {
330 if (thread)
331 *thread = tid;
332 return true;
333 }
334 cell += ac;
335 }
336 return false;
337}
338
339
340
341
342
343
344
345bool __weak arch_find_n_match_cpu_physical_id(struct device_node *cpun,
346 int cpu, unsigned int *thread)
347{
348
349
350
351
352 if (IS_ENABLED(CONFIG_PPC) &&
353 __of_find_n_match_cpu_property(cpun,
354 "ibm,ppc-interrupt-server#s",
355 cpu, thread))
356 return true;
357
358 return __of_find_n_match_cpu_property(cpun, "reg", cpu, thread);
359}
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
381{
382 struct device_node *cpun;
383
384 for_each_of_cpu_node(cpun) {
385 if (arch_find_n_match_cpu_physical_id(cpun, cpu, thread))
386 return cpun;
387 }
388 return NULL;
389}
390EXPORT_SYMBOL(of_get_cpu_node);
391
392
393
394
395
396
397
398
399
400int of_cpu_node_to_id(struct device_node *cpu_node)
401{
402 int cpu;
403 bool found = false;
404 struct device_node *np;
405
406 for_each_possible_cpu(cpu) {
407 np = of_cpu_device_node_get(cpu);
408 found = (cpu_node == np);
409 of_node_put(np);
410 if (found)
411 return cpu;
412 }
413
414 return -ENODEV;
415}
416EXPORT_SYMBOL(of_cpu_node_to_id);
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433struct device_node *of_get_cpu_state_node(struct device_node *cpu_node,
434 int index)
435{
436 struct of_phandle_args args;
437 int err;
438
439 err = of_parse_phandle_with_args(cpu_node, "power-domains",
440 "#power-domain-cells", 0, &args);
441 if (!err) {
442 struct device_node *state_node =
443 of_parse_phandle(args.np, "domain-idle-states", index);
444
445 of_node_put(args.np);
446 if (state_node)
447 return state_node;
448 }
449
450 return of_parse_phandle(cpu_node, "cpu-idle-states", index);
451}
452EXPORT_SYMBOL(of_get_cpu_state_node);
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484static int __of_device_is_compatible(const struct device_node *device,
485 const char *compat, const char *type, const char *name)
486{
487 struct property *prop;
488 const char *cp;
489 int index = 0, score = 0;
490
491
492 if (compat && compat[0]) {
493 prop = __of_find_property(device, "compatible", NULL);
494 for (cp = of_prop_next_string(prop, NULL); cp;
495 cp = of_prop_next_string(prop, cp), index++) {
496 if (of_compat_cmp(cp, compat, strlen(compat)) == 0) {
497 score = INT_MAX/2 - (index << 2);
498 break;
499 }
500 }
501 if (!score)
502 return 0;
503 }
504
505
506 if (type && type[0]) {
507 if (!__of_node_is_type(device, type))
508 return 0;
509 score += 2;
510 }
511
512
513 if (name && name[0]) {
514 if (!of_node_name_eq(device, name))
515 return 0;
516 score++;
517 }
518
519 return score;
520}
521
522
523
524
525int of_device_is_compatible(const struct device_node *device,
526 const char *compat)
527{
528 unsigned long flags;
529 int res;
530
531 raw_spin_lock_irqsave(&devtree_lock, flags);
532 res = __of_device_is_compatible(device, compat, NULL, NULL);
533 raw_spin_unlock_irqrestore(&devtree_lock, flags);
534 return res;
535}
536EXPORT_SYMBOL(of_device_is_compatible);
537
538
539
540
541
542int of_device_compatible_match(struct device_node *device,
543 const char *const *compat)
544{
545 unsigned int tmp, score = 0;
546
547 if (!compat)
548 return 0;
549
550 while (*compat) {
551 tmp = of_device_is_compatible(device, *compat);
552 if (tmp > score)
553 score = tmp;
554 compat++;
555 }
556
557 return score;
558}
559
560
561
562
563
564
565
566
567int of_machine_is_compatible(const char *compat)
568{
569 struct device_node *root;
570 int rc = 0;
571
572 root = of_find_node_by_path("/");
573 if (root) {
574 rc = of_device_is_compatible(root, compat);
575 of_node_put(root);
576 }
577 return rc;
578}
579EXPORT_SYMBOL(of_machine_is_compatible);
580
581
582
583
584
585
586
587
588
589static bool __of_device_is_available(const struct device_node *device)
590{
591 const char *status;
592 int statlen;
593
594 if (!device)
595 return false;
596
597 status = __of_get_property(device, "status", &statlen);
598 if (status == NULL)
599 return true;
600
601 if (statlen > 0) {
602 if (!strcmp(status, "okay") || !strcmp(status, "ok"))
603 return true;
604 }
605
606 return false;
607}
608
609
610
611
612
613
614
615
616
617bool of_device_is_available(const struct device_node *device)
618{
619 unsigned long flags;
620 bool res;
621
622 raw_spin_lock_irqsave(&devtree_lock, flags);
623 res = __of_device_is_available(device);
624 raw_spin_unlock_irqrestore(&devtree_lock, flags);
625 return res;
626
627}
628EXPORT_SYMBOL(of_device_is_available);
629
630
631
632
633
634
635
636
637
638
639
640
641
642bool of_device_is_big_endian(const struct device_node *device)
643{
644 if (of_property_read_bool(device, "big-endian"))
645 return true;
646 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) &&
647 of_property_read_bool(device, "native-endian"))
648 return true;
649 return false;
650}
651EXPORT_SYMBOL(of_device_is_big_endian);
652
653
654
655
656
657
658
659
660struct device_node *of_get_parent(const struct device_node *node)
661{
662 struct device_node *np;
663 unsigned long flags;
664
665 if (!node)
666 return NULL;
667
668 raw_spin_lock_irqsave(&devtree_lock, flags);
669 np = of_node_get(node->parent);
670 raw_spin_unlock_irqrestore(&devtree_lock, flags);
671 return np;
672}
673EXPORT_SYMBOL(of_get_parent);
674
675
676
677
678
679
680
681
682
683
684
685
686struct device_node *of_get_next_parent(struct device_node *node)
687{
688 struct device_node *parent;
689 unsigned long flags;
690
691 if (!node)
692 return NULL;
693
694 raw_spin_lock_irqsave(&devtree_lock, flags);
695 parent = of_node_get(node->parent);
696 of_node_put(node);
697 raw_spin_unlock_irqrestore(&devtree_lock, flags);
698 return parent;
699}
700EXPORT_SYMBOL(of_get_next_parent);
701
702static struct device_node *__of_get_next_child(const struct device_node *node,
703 struct device_node *prev)
704{
705 struct device_node *next;
706
707 if (!node)
708 return NULL;
709
710 next = prev ? prev->sibling : node->child;
711 for (; next; next = next->sibling)
712 if (of_node_get(next))
713 break;
714 of_node_put(prev);
715 return next;
716}
717#define __for_each_child_of_node(parent, child) \
718 for (child = __of_get_next_child(parent, NULL); child != NULL; \
719 child = __of_get_next_child(parent, child))
720
721
722
723
724
725
726
727
728
729
730struct device_node *of_get_next_child(const struct device_node *node,
731 struct device_node *prev)
732{
733 struct device_node *next;
734 unsigned long flags;
735
736 raw_spin_lock_irqsave(&devtree_lock, flags);
737 next = __of_get_next_child(node, prev);
738 raw_spin_unlock_irqrestore(&devtree_lock, flags);
739 return next;
740}
741EXPORT_SYMBOL(of_get_next_child);
742
743
744
745
746
747
748
749
750
751struct device_node *of_get_next_available_child(const struct device_node *node,
752 struct device_node *prev)
753{
754 struct device_node *next;
755 unsigned long flags;
756
757 if (!node)
758 return NULL;
759
760 raw_spin_lock_irqsave(&devtree_lock, flags);
761 next = prev ? prev->sibling : node->child;
762 for (; next; next = next->sibling) {
763 if (!__of_device_is_available(next))
764 continue;
765 if (of_node_get(next))
766 break;
767 }
768 of_node_put(prev);
769 raw_spin_unlock_irqrestore(&devtree_lock, flags);
770 return next;
771}
772EXPORT_SYMBOL(of_get_next_available_child);
773
774
775
776
777
778
779
780
781
782struct device_node *of_get_next_cpu_node(struct device_node *prev)
783{
784 struct device_node *next = NULL;
785 unsigned long flags;
786 struct device_node *node;
787
788 if (!prev)
789 node = of_find_node_by_path("/cpus");
790
791 raw_spin_lock_irqsave(&devtree_lock, flags);
792 if (prev)
793 next = prev->sibling;
794 else if (node) {
795 next = node->child;
796 of_node_put(node);
797 }
798 for (; next; next = next->sibling) {
799 if (!(of_node_name_eq(next, "cpu") ||
800 __of_node_is_type(next, "cpu")))
801 continue;
802 if (of_node_get(next))
803 break;
804 }
805 of_node_put(prev);
806 raw_spin_unlock_irqrestore(&devtree_lock, flags);
807 return next;
808}
809EXPORT_SYMBOL(of_get_next_cpu_node);
810
811
812
813
814
815
816
817
818
819
820
821
822struct device_node *of_get_compatible_child(const struct device_node *parent,
823 const char *compatible)
824{
825 struct device_node *child;
826
827 for_each_child_of_node(parent, child) {
828 if (of_device_is_compatible(child, compatible))
829 break;
830 }
831
832 return child;
833}
834EXPORT_SYMBOL(of_get_compatible_child);
835
836
837
838
839
840
841
842
843
844
845
846
847struct device_node *of_get_child_by_name(const struct device_node *node,
848 const char *name)
849{
850 struct device_node *child;
851
852 for_each_child_of_node(node, child)
853 if (of_node_name_eq(child, name))
854 break;
855 return child;
856}
857EXPORT_SYMBOL(of_get_child_by_name);
858
859struct device_node *__of_find_node_by_path(struct device_node *parent,
860 const char *path)
861{
862 struct device_node *child;
863 int len;
864
865 len = strcspn(path, "/:");
866 if (!len)
867 return NULL;
868
869 __for_each_child_of_node(parent, child) {
870 const char *name = kbasename(child->full_name);
871 if (strncmp(path, name, len) == 0 && (strlen(name) == len))
872 return child;
873 }
874 return NULL;
875}
876
877struct device_node *__of_find_node_by_full_path(struct device_node *node,
878 const char *path)
879{
880 const char *separator = strchr(path, ':');
881
882 while (node && *path == '/') {
883 struct device_node *tmp = node;
884
885 path++;
886 node = __of_find_node_by_path(node, path);
887 of_node_put(tmp);
888 path = strchrnul(path, '/');
889 if (separator && separator < path)
890 break;
891 }
892 return node;
893}
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913struct device_node *of_find_node_opts_by_path(const char *path, const char **opts)
914{
915 struct device_node *np = NULL;
916 struct property *pp;
917 unsigned long flags;
918 const char *separator = strchr(path, ':');
919
920 if (opts)
921 *opts = separator ? separator + 1 : NULL;
922
923 if (strcmp(path, "/") == 0)
924 return of_node_get(of_root);
925
926
927 if (*path != '/') {
928 int len;
929 const char *p = separator;
930
931 if (!p)
932 p = strchrnul(path, '/');
933 len = p - path;
934
935
936 if (!of_aliases)
937 return NULL;
938
939 for_each_property_of_node(of_aliases, pp) {
940 if (strlen(pp->name) == len && !strncmp(pp->name, path, len)) {
941 np = of_find_node_by_path(pp->value);
942 break;
943 }
944 }
945 if (!np)
946 return NULL;
947 path = p;
948 }
949
950
951 raw_spin_lock_irqsave(&devtree_lock, flags);
952 if (!np)
953 np = of_node_get(of_root);
954 np = __of_find_node_by_full_path(np, path);
955 raw_spin_unlock_irqrestore(&devtree_lock, flags);
956 return np;
957}
958EXPORT_SYMBOL(of_find_node_opts_by_path);
959
960
961
962
963
964
965
966
967
968
969
970
971struct device_node *of_find_node_by_name(struct device_node *from,
972 const char *name)
973{
974 struct device_node *np;
975 unsigned long flags;
976
977 raw_spin_lock_irqsave(&devtree_lock, flags);
978 for_each_of_allnodes_from(from, np)
979 if (of_node_name_eq(np, name) && of_node_get(np))
980 break;
981 of_node_put(from);
982 raw_spin_unlock_irqrestore(&devtree_lock, flags);
983 return np;
984}
985EXPORT_SYMBOL(of_find_node_by_name);
986
987
988
989
990
991
992
993
994
995
996
997
998
999struct device_node *of_find_node_by_type(struct device_node *from,
1000 const char *type)
1001{
1002 struct device_node *np;
1003 unsigned long flags;
1004
1005 raw_spin_lock_irqsave(&devtree_lock, flags);
1006 for_each_of_allnodes_from(from, np)
1007 if (__of_node_is_type(np, type) && of_node_get(np))
1008 break;
1009 of_node_put(from);
1010 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1011 return np;
1012}
1013EXPORT_SYMBOL(of_find_node_by_type);
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029struct device_node *of_find_compatible_node(struct device_node *from,
1030 const char *type, const char *compatible)
1031{
1032 struct device_node *np;
1033 unsigned long flags;
1034
1035 raw_spin_lock_irqsave(&devtree_lock, flags);
1036 for_each_of_allnodes_from(from, np)
1037 if (__of_device_is_compatible(np, compatible, type, NULL) &&
1038 of_node_get(np))
1039 break;
1040 of_node_put(from);
1041 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1042 return np;
1043}
1044EXPORT_SYMBOL(of_find_compatible_node);
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058struct device_node *of_find_node_with_property(struct device_node *from,
1059 const char *prop_name)
1060{
1061 struct device_node *np;
1062 struct property *pp;
1063 unsigned long flags;
1064
1065 raw_spin_lock_irqsave(&devtree_lock, flags);
1066 for_each_of_allnodes_from(from, np) {
1067 for (pp = np->properties; pp; pp = pp->next) {
1068 if (of_prop_cmp(pp->name, prop_name) == 0) {
1069 of_node_get(np);
1070 goto out;
1071 }
1072 }
1073 }
1074out:
1075 of_node_put(from);
1076 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1077 return np;
1078}
1079EXPORT_SYMBOL(of_find_node_with_property);
1080
1081static
1082const struct of_device_id *__of_match_node(const struct of_device_id *matches,
1083 const struct device_node *node)
1084{
1085 const struct of_device_id *best_match = NULL;
1086 int score, best_score = 0;
1087
1088 if (!matches)
1089 return NULL;
1090
1091 for (; matches->name[0] || matches->type[0] || matches->compatible[0]; matches++) {
1092 score = __of_device_is_compatible(node, matches->compatible,
1093 matches->type, matches->name);
1094 if (score > best_score) {
1095 best_match = matches;
1096 best_score = score;
1097 }
1098 }
1099
1100 return best_match;
1101}
1102
1103
1104
1105
1106
1107
1108
1109
1110const struct of_device_id *of_match_node(const struct of_device_id *matches,
1111 const struct device_node *node)
1112{
1113 const struct of_device_id *match;
1114 unsigned long flags;
1115
1116 raw_spin_lock_irqsave(&devtree_lock, flags);
1117 match = __of_match_node(matches, node);
1118 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1119 return match;
1120}
1121EXPORT_SYMBOL(of_match_node);
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136struct device_node *of_find_matching_node_and_match(struct device_node *from,
1137 const struct of_device_id *matches,
1138 const struct of_device_id **match)
1139{
1140 struct device_node *np;
1141 const struct of_device_id *m;
1142 unsigned long flags;
1143
1144 if (match)
1145 *match = NULL;
1146
1147 raw_spin_lock_irqsave(&devtree_lock, flags);
1148 for_each_of_allnodes_from(from, np) {
1149 m = __of_match_node(matches, np);
1150 if (m && of_node_get(np)) {
1151 if (match)
1152 *match = m;
1153 break;
1154 }
1155 }
1156 of_node_put(from);
1157 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1158 return np;
1159}
1160EXPORT_SYMBOL(of_find_matching_node_and_match);
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175int of_modalias_node(struct device_node *node, char *modalias, int len)
1176{
1177 const char *compatible, *p;
1178 int cplen;
1179
1180 compatible = of_get_property(node, "compatible", &cplen);
1181 if (!compatible || strlen(compatible) > cplen)
1182 return -ENODEV;
1183 p = strchr(compatible, ',');
1184 strlcpy(modalias, p ? p + 1 : compatible, len);
1185 return 0;
1186}
1187EXPORT_SYMBOL_GPL(of_modalias_node);
1188
1189
1190
1191
1192
1193
1194
1195
1196struct device_node *of_find_node_by_phandle(phandle handle)
1197{
1198 struct device_node *np = NULL;
1199 unsigned long flags;
1200 u32 handle_hash;
1201
1202 if (!handle)
1203 return NULL;
1204
1205 handle_hash = of_phandle_cache_hash(handle);
1206
1207 raw_spin_lock_irqsave(&devtree_lock, flags);
1208
1209 if (phandle_cache[handle_hash] &&
1210 handle == phandle_cache[handle_hash]->phandle)
1211 np = phandle_cache[handle_hash];
1212
1213 if (!np) {
1214 for_each_of_allnodes(np)
1215 if (np->phandle == handle &&
1216 !of_node_check_flag(np, OF_DETACHED)) {
1217 phandle_cache[handle_hash] = np;
1218 break;
1219 }
1220 }
1221
1222 of_node_get(np);
1223 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1224 return np;
1225}
1226EXPORT_SYMBOL(of_find_node_by_phandle);
1227
1228void of_print_phandle_args(const char *msg, const struct of_phandle_args *args)
1229{
1230 int i;
1231 printk("%s %pOF", msg, args->np);
1232 for (i = 0; i < args->args_count; i++) {
1233 const char delim = i ? ',' : ':';
1234
1235 pr_cont("%c%08x", delim, args->args[i]);
1236 }
1237 pr_cont("\n");
1238}
1239
1240int of_phandle_iterator_init(struct of_phandle_iterator *it,
1241 const struct device_node *np,
1242 const char *list_name,
1243 const char *cells_name,
1244 int cell_count)
1245{
1246 const __be32 *list;
1247 int size;
1248
1249 memset(it, 0, sizeof(*it));
1250
1251
1252
1253
1254
1255 if (cell_count < 0 && !cells_name)
1256 return -EINVAL;
1257
1258 list = of_get_property(np, list_name, &size);
1259 if (!list)
1260 return -ENOENT;
1261
1262 it->cells_name = cells_name;
1263 it->cell_count = cell_count;
1264 it->parent = np;
1265 it->list_end = list + size / sizeof(*list);
1266 it->phandle_end = list;
1267 it->cur = list;
1268
1269 return 0;
1270}
1271EXPORT_SYMBOL_GPL(of_phandle_iterator_init);
1272
1273int of_phandle_iterator_next(struct of_phandle_iterator *it)
1274{
1275 uint32_t count = 0;
1276
1277 if (it->node) {
1278 of_node_put(it->node);
1279 it->node = NULL;
1280 }
1281
1282 if (!it->cur || it->phandle_end >= it->list_end)
1283 return -ENOENT;
1284
1285 it->cur = it->phandle_end;
1286
1287
1288 it->phandle = be32_to_cpup(it->cur++);
1289
1290 if (it->phandle) {
1291
1292
1293
1294
1295
1296 it->node = of_find_node_by_phandle(it->phandle);
1297
1298 if (it->cells_name) {
1299 if (!it->node) {
1300 pr_err("%pOF: could not find phandle %d\n",
1301 it->parent, it->phandle);
1302 goto err;
1303 }
1304
1305 if (of_property_read_u32(it->node, it->cells_name,
1306 &count)) {
1307
1308
1309
1310
1311
1312 if (it->cell_count >= 0) {
1313 count = it->cell_count;
1314 } else {
1315 pr_err("%pOF: could not get %s for %pOF\n",
1316 it->parent,
1317 it->cells_name,
1318 it->node);
1319 goto err;
1320 }
1321 }
1322 } else {
1323 count = it->cell_count;
1324 }
1325
1326
1327
1328
1329
1330 if (it->cur + count > it->list_end) {
1331 pr_err("%pOF: %s = %d found %d\n",
1332 it->parent, it->cells_name,
1333 count, it->cell_count);
1334 goto err;
1335 }
1336 }
1337
1338 it->phandle_end = it->cur + count;
1339 it->cur_count = count;
1340
1341 return 0;
1342
1343err:
1344 if (it->node) {
1345 of_node_put(it->node);
1346 it->node = NULL;
1347 }
1348
1349 return -EINVAL;
1350}
1351EXPORT_SYMBOL_GPL(of_phandle_iterator_next);
1352
1353int of_phandle_iterator_args(struct of_phandle_iterator *it,
1354 uint32_t *args,
1355 int size)
1356{
1357 int i, count;
1358
1359 count = it->cur_count;
1360
1361 if (WARN_ON(size < count))
1362 count = size;
1363
1364 for (i = 0; i < count; i++)
1365 args[i] = be32_to_cpup(it->cur++);
1366
1367 return count;
1368}
1369
1370static int __of_parse_phandle_with_args(const struct device_node *np,
1371 const char *list_name,
1372 const char *cells_name,
1373 int cell_count, int index,
1374 struct of_phandle_args *out_args)
1375{
1376 struct of_phandle_iterator it;
1377 int rc, cur_index = 0;
1378
1379
1380 of_for_each_phandle(&it, rc, np, list_name, cells_name, cell_count) {
1381
1382
1383
1384
1385
1386
1387 rc = -ENOENT;
1388 if (cur_index == index) {
1389 if (!it.phandle)
1390 goto err;
1391
1392 if (out_args) {
1393 int c;
1394
1395 c = of_phandle_iterator_args(&it,
1396 out_args->args,
1397 MAX_PHANDLE_ARGS);
1398 out_args->np = it.node;
1399 out_args->args_count = c;
1400 } else {
1401 of_node_put(it.node);
1402 }
1403
1404
1405 return 0;
1406 }
1407
1408 cur_index++;
1409 }
1410
1411
1412
1413
1414
1415
1416
1417 err:
1418 of_node_put(it.node);
1419 return rc;
1420}
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432struct device_node *of_parse_phandle(const struct device_node *np,
1433 const char *phandle_name, int index)
1434{
1435 struct of_phandle_args args;
1436
1437 if (index < 0)
1438 return NULL;
1439
1440 if (__of_parse_phandle_with_args(np, phandle_name, NULL, 0,
1441 index, &args))
1442 return NULL;
1443
1444 return args.np;
1445}
1446EXPORT_SYMBOL(of_parse_phandle);
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480int of_parse_phandle_with_args(const struct device_node *np, const char *list_name,
1481 const char *cells_name, int index,
1482 struct of_phandle_args *out_args)
1483{
1484 int cell_count = -1;
1485
1486 if (index < 0)
1487 return -EINVAL;
1488
1489
1490 if (!cells_name)
1491 cell_count = 0;
1492
1493 return __of_parse_phandle_with_args(np, list_name, cells_name,
1494 cell_count, index, out_args);
1495}
1496EXPORT_SYMBOL(of_parse_phandle_with_args);
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540int of_parse_phandle_with_args_map(const struct device_node *np,
1541 const char *list_name,
1542 const char *stem_name,
1543 int index, struct of_phandle_args *out_args)
1544{
1545 char *cells_name, *map_name = NULL, *mask_name = NULL;
1546 char *pass_name = NULL;
1547 struct device_node *cur, *new = NULL;
1548 const __be32 *map, *mask, *pass;
1549 static const __be32 dummy_mask[] = { [0 ... MAX_PHANDLE_ARGS] = ~0 };
1550 static const __be32 dummy_pass[] = { [0 ... MAX_PHANDLE_ARGS] = 0 };
1551 __be32 initial_match_array[MAX_PHANDLE_ARGS];
1552 const __be32 *match_array = initial_match_array;
1553 int i, ret, map_len, match;
1554 u32 list_size, new_size;
1555
1556 if (index < 0)
1557 return -EINVAL;
1558
1559 cells_name = kasprintf(GFP_KERNEL, "#%s-cells", stem_name);
1560 if (!cells_name)
1561 return -ENOMEM;
1562
1563 ret = -ENOMEM;
1564 map_name = kasprintf(GFP_KERNEL, "%s-map", stem_name);
1565 if (!map_name)
1566 goto free;
1567
1568 mask_name = kasprintf(GFP_KERNEL, "%s-map-mask", stem_name);
1569 if (!mask_name)
1570 goto free;
1571
1572 pass_name = kasprintf(GFP_KERNEL, "%s-map-pass-thru", stem_name);
1573 if (!pass_name)
1574 goto free;
1575
1576 ret = __of_parse_phandle_with_args(np, list_name, cells_name, -1, index,
1577 out_args);
1578 if (ret)
1579 goto free;
1580
1581
1582 cur = out_args->np;
1583 ret = of_property_read_u32(cur, cells_name, &list_size);
1584 if (ret < 0)
1585 goto put;
1586
1587
1588 for (i = 0; i < list_size; i++)
1589 initial_match_array[i] = cpu_to_be32(out_args->args[i]);
1590
1591 ret = -EINVAL;
1592 while (cur) {
1593
1594 map = of_get_property(cur, map_name, &map_len);
1595 if (!map) {
1596 ret = 0;
1597 goto free;
1598 }
1599 map_len /= sizeof(u32);
1600
1601
1602 mask = of_get_property(cur, mask_name, NULL);
1603 if (!mask)
1604 mask = dummy_mask;
1605
1606 match = 0;
1607 while (map_len > (list_size + 1) && !match) {
1608
1609 match = 1;
1610 for (i = 0; i < list_size; i++, map_len--)
1611 match &= !((match_array[i] ^ *map++) & mask[i]);
1612
1613 of_node_put(new);
1614 new = of_find_node_by_phandle(be32_to_cpup(map));
1615 map++;
1616 map_len--;
1617
1618
1619 if (!new)
1620 goto put;
1621
1622 if (!of_device_is_available(new))
1623 match = 0;
1624
1625 ret = of_property_read_u32(new, cells_name, &new_size);
1626 if (ret)
1627 goto put;
1628
1629
1630 if (WARN_ON(new_size > MAX_PHANDLE_ARGS))
1631 goto put;
1632 if (map_len < new_size)
1633 goto put;
1634
1635
1636 map += new_size;
1637 map_len -= new_size;
1638 }
1639 if (!match)
1640 goto put;
1641
1642
1643 pass = of_get_property(cur, pass_name, NULL);
1644 if (!pass)
1645 pass = dummy_pass;
1646
1647
1648
1649
1650
1651
1652 match_array = map - new_size;
1653 for (i = 0; i < new_size; i++) {
1654 __be32 val = *(map - new_size + i);
1655
1656 if (i < list_size) {
1657 val &= ~pass[i];
1658 val |= cpu_to_be32(out_args->args[i]) & pass[i];
1659 }
1660
1661 out_args->args[i] = be32_to_cpu(val);
1662 }
1663 out_args->args_count = list_size = new_size;
1664
1665 out_args->np = new;
1666 of_node_put(cur);
1667 cur = new;
1668 }
1669put:
1670 of_node_put(cur);
1671 of_node_put(new);
1672free:
1673 kfree(mask_name);
1674 kfree(map_name);
1675 kfree(cells_name);
1676 kfree(pass_name);
1677
1678 return ret;
1679}
1680EXPORT_SYMBOL(of_parse_phandle_with_args_map);
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712int of_parse_phandle_with_fixed_args(const struct device_node *np,
1713 const char *list_name, int cell_count,
1714 int index, struct of_phandle_args *out_args)
1715{
1716 if (index < 0)
1717 return -EINVAL;
1718 return __of_parse_phandle_with_args(np, list_name, NULL, cell_count,
1719 index, out_args);
1720}
1721EXPORT_SYMBOL(of_parse_phandle_with_fixed_args);
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738int of_count_phandle_with_args(const struct device_node *np, const char *list_name,
1739 const char *cells_name)
1740{
1741 struct of_phandle_iterator it;
1742 int rc, cur_index = 0;
1743
1744
1745
1746
1747
1748
1749
1750 if (!cells_name) {
1751 const __be32 *list;
1752 int size;
1753
1754 list = of_get_property(np, list_name, &size);
1755 if (!list)
1756 return -ENOENT;
1757
1758 return size / sizeof(*list);
1759 }
1760
1761 rc = of_phandle_iterator_init(&it, np, list_name, cells_name, -1);
1762 if (rc)
1763 return rc;
1764
1765 while ((rc = of_phandle_iterator_next(&it)) == 0)
1766 cur_index += 1;
1767
1768 if (rc != -ENOENT)
1769 return rc;
1770
1771 return cur_index;
1772}
1773EXPORT_SYMBOL(of_count_phandle_with_args);
1774
1775
1776
1777
1778
1779
1780int __of_add_property(struct device_node *np, struct property *prop)
1781{
1782 struct property **next;
1783
1784 prop->next = NULL;
1785 next = &np->properties;
1786 while (*next) {
1787 if (strcmp(prop->name, (*next)->name) == 0)
1788
1789 return -EEXIST;
1790
1791 next = &(*next)->next;
1792 }
1793 *next = prop;
1794
1795 return 0;
1796}
1797
1798
1799
1800
1801
1802
1803int of_add_property(struct device_node *np, struct property *prop)
1804{
1805 unsigned long flags;
1806 int rc;
1807
1808 mutex_lock(&of_mutex);
1809
1810 raw_spin_lock_irqsave(&devtree_lock, flags);
1811 rc = __of_add_property(np, prop);
1812 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1813
1814 if (!rc)
1815 __of_add_property_sysfs(np, prop);
1816
1817 mutex_unlock(&of_mutex);
1818
1819 if (!rc)
1820 of_property_notify(OF_RECONFIG_ADD_PROPERTY, np, prop, NULL);
1821
1822 return rc;
1823}
1824
1825int __of_remove_property(struct device_node *np, struct property *prop)
1826{
1827 struct property **next;
1828
1829 for (next = &np->properties; *next; next = &(*next)->next) {
1830 if (*next == prop)
1831 break;
1832 }
1833 if (*next == NULL)
1834 return -ENODEV;
1835
1836
1837 *next = prop->next;
1838 prop->next = np->deadprops;
1839 np->deadprops = prop;
1840
1841 return 0;
1842}
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854int of_remove_property(struct device_node *np, struct property *prop)
1855{
1856 unsigned long flags;
1857 int rc;
1858
1859 if (!prop)
1860 return -ENODEV;
1861
1862 mutex_lock(&of_mutex);
1863
1864 raw_spin_lock_irqsave(&devtree_lock, flags);
1865 rc = __of_remove_property(np, prop);
1866 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1867
1868 if (!rc)
1869 __of_remove_property_sysfs(np, prop);
1870
1871 mutex_unlock(&of_mutex);
1872
1873 if (!rc)
1874 of_property_notify(OF_RECONFIG_REMOVE_PROPERTY, np, prop, NULL);
1875
1876 return rc;
1877}
1878EXPORT_SYMBOL_GPL(of_remove_property);
1879
1880int __of_update_property(struct device_node *np, struct property *newprop,
1881 struct property **oldpropp)
1882{
1883 struct property **next, *oldprop;
1884
1885 for (next = &np->properties; *next; next = &(*next)->next) {
1886 if (of_prop_cmp((*next)->name, newprop->name) == 0)
1887 break;
1888 }
1889 *oldpropp = oldprop = *next;
1890
1891 if (oldprop) {
1892
1893 newprop->next = oldprop->next;
1894 *next = newprop;
1895 oldprop->next = np->deadprops;
1896 np->deadprops = oldprop;
1897 } else {
1898
1899 newprop->next = NULL;
1900 *next = newprop;
1901 }
1902
1903 return 0;
1904}
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915int of_update_property(struct device_node *np, struct property *newprop)
1916{
1917 struct property *oldprop;
1918 unsigned long flags;
1919 int rc;
1920
1921 if (!newprop->name)
1922 return -EINVAL;
1923
1924 mutex_lock(&of_mutex);
1925
1926 raw_spin_lock_irqsave(&devtree_lock, flags);
1927 rc = __of_update_property(np, newprop, &oldprop);
1928 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1929
1930 if (!rc)
1931 __of_update_property_sysfs(np, newprop, oldprop);
1932
1933 mutex_unlock(&of_mutex);
1934
1935 if (!rc)
1936 of_property_notify(OF_RECONFIG_UPDATE_PROPERTY, np, newprop, oldprop);
1937
1938 return rc;
1939}
1940
1941static void of_alias_add(struct alias_prop *ap, struct device_node *np,
1942 int id, const char *stem, int stem_len)
1943{
1944 ap->np = np;
1945 ap->id = id;
1946 strncpy(ap->stem, stem, stem_len);
1947 ap->stem[stem_len] = 0;
1948 list_add_tail(&ap->link, &aliases_lookup);
1949 pr_debug("adding DT alias:%s: stem=%s id=%i node=%pOF\n",
1950 ap->alias, ap->stem, ap->id, np);
1951}
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
1963{
1964 struct property *pp;
1965
1966 of_aliases = of_find_node_by_path("/aliases");
1967 of_chosen = of_find_node_by_path("/chosen");
1968 if (of_chosen == NULL)
1969 of_chosen = of_find_node_by_path("/chosen@0");
1970
1971 if (of_chosen) {
1972
1973 const char *name = NULL;
1974
1975 if (of_property_read_string(of_chosen, "stdout-path", &name))
1976 of_property_read_string(of_chosen, "linux,stdout-path",
1977 &name);
1978 if (IS_ENABLED(CONFIG_PPC) && !name)
1979 of_property_read_string(of_aliases, "stdout", &name);
1980 if (name)
1981 of_stdout = of_find_node_opts_by_path(name, &of_stdout_options);
1982 }
1983
1984 if (!of_aliases)
1985 return;
1986
1987 for_each_property_of_node(of_aliases, pp) {
1988 const char *start = pp->name;
1989 const char *end = start + strlen(start);
1990 struct device_node *np;
1991 struct alias_prop *ap;
1992 int id, len;
1993
1994
1995 if (!strcmp(pp->name, "name") ||
1996 !strcmp(pp->name, "phandle") ||
1997 !strcmp(pp->name, "linux,phandle"))
1998 continue;
1999
2000 np = of_find_node_by_path(pp->value);
2001 if (!np)
2002 continue;
2003
2004
2005
2006 while (isdigit(*(end-1)) && end > start)
2007 end--;
2008 len = end - start;
2009
2010 if (kstrtoint(end, 10, &id) < 0)
2011 continue;
2012
2013
2014 ap = dt_alloc(sizeof(*ap) + len + 1, __alignof__(*ap));
2015 if (!ap)
2016 continue;
2017 memset(ap, 0, sizeof(*ap) + len + 1);
2018 ap->alias = start;
2019 of_alias_add(ap, np, id, start, len);
2020 }
2021}
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033int of_alias_get_id(struct device_node *np, const char *stem)
2034{
2035 struct alias_prop *app;
2036 int id = -ENODEV;
2037
2038 mutex_lock(&of_mutex);
2039 list_for_each_entry(app, &aliases_lookup, link) {
2040 if (strcmp(app->stem, stem) != 0)
2041 continue;
2042
2043 if (np == app->np) {
2044 id = app->id;
2045 break;
2046 }
2047 }
2048 mutex_unlock(&of_mutex);
2049
2050 return id;
2051}
2052EXPORT_SYMBOL_GPL(of_alias_get_id);
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067int of_alias_get_alias_list(const struct of_device_id *matches,
2068 const char *stem, unsigned long *bitmap,
2069 unsigned int nbits)
2070{
2071 struct alias_prop *app;
2072 int ret = 0;
2073
2074
2075 bitmap_zero(bitmap, nbits);
2076
2077 mutex_lock(&of_mutex);
2078 pr_debug("%s: Looking for stem: %s\n", __func__, stem);
2079 list_for_each_entry(app, &aliases_lookup, link) {
2080 pr_debug("%s: stem: %s, id: %d\n",
2081 __func__, app->stem, app->id);
2082
2083 if (strcmp(app->stem, stem) != 0) {
2084 pr_debug("%s: stem comparison didn't pass %s\n",
2085 __func__, app->stem);
2086 continue;
2087 }
2088
2089 if (of_match_node(matches, app->np)) {
2090 pr_debug("%s: Allocated ID %d\n", __func__, app->id);
2091
2092 if (app->id >= nbits) {
2093 pr_warn("%s: ID %d >= than bitmap field %d\n",
2094 __func__, app->id, nbits);
2095 ret = -EOVERFLOW;
2096 } else {
2097 set_bit(app->id, bitmap);
2098 }
2099 }
2100 }
2101 mutex_unlock(&of_mutex);
2102
2103 return ret;
2104}
2105EXPORT_SYMBOL_GPL(of_alias_get_alias_list);
2106
2107
2108
2109
2110
2111
2112
2113
2114int of_alias_get_highest_id(const char *stem)
2115{
2116 struct alias_prop *app;
2117 int id = -ENODEV;
2118
2119 mutex_lock(&of_mutex);
2120 list_for_each_entry(app, &aliases_lookup, link) {
2121 if (strcmp(app->stem, stem) != 0)
2122 continue;
2123
2124 if (app->id > id)
2125 id = app->id;
2126 }
2127 mutex_unlock(&of_mutex);
2128
2129 return id;
2130}
2131EXPORT_SYMBOL_GPL(of_alias_get_highest_id);
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144bool of_console_check(struct device_node *dn, char *name, int index)
2145{
2146 if (!dn || dn != of_stdout || console_set_on_cmdline)
2147 return false;
2148
2149
2150
2151
2152
2153 return !add_preferred_console(name, index, (char *)of_stdout_options);
2154}
2155EXPORT_SYMBOL_GPL(of_console_check);
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165struct device_node *of_find_next_cache_node(const struct device_node *np)
2166{
2167 struct device_node *child, *cache_node;
2168
2169 cache_node = of_parse_phandle(np, "l2-cache", 0);
2170 if (!cache_node)
2171 cache_node = of_parse_phandle(np, "next-level-cache", 0);
2172
2173 if (cache_node)
2174 return cache_node;
2175
2176
2177
2178
2179 if (IS_ENABLED(CONFIG_PPC_PMAC) && of_node_is_type(np, "cpu"))
2180 for_each_child_of_node(np, child)
2181 if (of_node_is_type(child, "cache"))
2182 return child;
2183
2184 return NULL;
2185}
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196int of_find_last_cache_level(unsigned int cpu)
2197{
2198 u32 cache_level = 0;
2199 struct device_node *prev = NULL, *np = of_cpu_device_node_get(cpu);
2200
2201 while (np) {
2202 prev = np;
2203 of_node_put(np);
2204 np = of_find_next_cache_node(np);
2205 }
2206
2207 of_property_read_u32(prev, "cache-level", &cache_level);
2208
2209 return cache_level;
2210}
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231int of_map_id(struct device_node *np, u32 id,
2232 const char *map_name, const char *map_mask_name,
2233 struct device_node **target, u32 *id_out)
2234{
2235 u32 map_mask, masked_id;
2236 int map_len;
2237 const __be32 *map = NULL;
2238
2239 if (!np || !map_name || (!target && !id_out))
2240 return -EINVAL;
2241
2242 map = of_get_property(np, map_name, &map_len);
2243 if (!map) {
2244 if (target)
2245 return -ENODEV;
2246
2247 *id_out = id;
2248 return 0;
2249 }
2250
2251 if (!map_len || map_len % (4 * sizeof(*map))) {
2252 pr_err("%pOF: Error: Bad %s length: %d\n", np,
2253 map_name, map_len);
2254 return -EINVAL;
2255 }
2256
2257
2258 map_mask = 0xffffffff;
2259
2260
2261
2262
2263
2264 if (map_mask_name)
2265 of_property_read_u32(np, map_mask_name, &map_mask);
2266
2267 masked_id = map_mask & id;
2268 for ( ; map_len > 0; map_len -= 4 * sizeof(*map), map += 4) {
2269 struct device_node *phandle_node;
2270 u32 id_base = be32_to_cpup(map + 0);
2271 u32 phandle = be32_to_cpup(map + 1);
2272 u32 out_base = be32_to_cpup(map + 2);
2273 u32 id_len = be32_to_cpup(map + 3);
2274
2275 if (id_base & ~map_mask) {
2276 pr_err("%pOF: Invalid %s translation - %s-mask (0x%x) ignores id-base (0x%x)\n",
2277 np, map_name, map_name,
2278 map_mask, id_base);
2279 return -EFAULT;
2280 }
2281
2282 if (masked_id < id_base || masked_id >= id_base + id_len)
2283 continue;
2284
2285 phandle_node = of_find_node_by_phandle(phandle);
2286 if (!phandle_node)
2287 return -ENODEV;
2288
2289 if (target) {
2290 if (*target)
2291 of_node_put(phandle_node);
2292 else
2293 *target = phandle_node;
2294
2295 if (*target != phandle_node)
2296 continue;
2297 }
2298
2299 if (id_out)
2300 *id_out = masked_id - id_base + out_base;
2301
2302 pr_debug("%pOF: %s, using mask %08x, id-base: %08x, out-base: %08x, length: %08x, id: %08x -> %08x\n",
2303 np, map_name, map_mask, id_base, out_base,
2304 id_len, id, masked_id - id_base + out_base);
2305 return 0;
2306 }
2307
2308 pr_info("%pOF: no %s translation for id 0x%x on %pOF\n", np, map_name,
2309 id, target && *target ? *target : NULL);
2310
2311
2312 if (id_out)
2313 *id_out = id;
2314 return 0;
2315}
2316EXPORT_SYMBOL_GPL(of_map_id);
2317