1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#define pr_fmt(fmt) "OF: " fmt
18
19#include <linux/bitmap.h>
20#include <linux/console.h>
21#include <linux/ctype.h>
22#include <linux/cpu.h>
23#include <linux/module.h>
24#include <linux/of.h>
25#include <linux/of_device.h>
26#include <linux/of_graph.h>
27#include <linux/spinlock.h>
28#include <linux/slab.h>
29#include <linux/string.h>
30#include <linux/proc_fs.h>
31
32#include "of_private.h"
33
34LIST_HEAD(aliases_lookup);
35
36struct device_node *of_root;
37EXPORT_SYMBOL(of_root);
38struct device_node *of_chosen;
39EXPORT_SYMBOL(of_chosen);
40struct device_node *of_aliases;
41struct device_node *of_stdout;
42static const char *of_stdout_options;
43
44struct kset *of_kset;
45
46
47
48
49
50
51
52DEFINE_MUTEX(of_mutex);
53
54
55
56
57DEFINE_RAW_SPINLOCK(devtree_lock);
58
59bool of_node_name_eq(const struct device_node *np, const char *name)
60{
61 const char *node_name;
62 size_t len;
63
64 if (!np)
65 return false;
66
67 node_name = kbasename(np->full_name);
68 len = strchrnul(node_name, '@') - node_name;
69
70 return (strlen(name) == len) && (strncmp(node_name, name, len) == 0);
71}
72EXPORT_SYMBOL(of_node_name_eq);
73
74bool of_node_name_prefix(const struct device_node *np, const char *prefix)
75{
76 if (!np)
77 return false;
78
79 return strncmp(kbasename(np->full_name), prefix, strlen(prefix)) == 0;
80}
81EXPORT_SYMBOL(of_node_name_prefix);
82
83static bool __of_node_is_type(const struct device_node *np, const char *type)
84{
85 const char *match = __of_get_property(np, "device_type", NULL);
86
87 return np && match && type && !strcmp(match, type);
88}
89
90int of_bus_n_addr_cells(struct device_node *np)
91{
92 u32 cells;
93
94 for (; np; np = np->parent)
95 if (!of_property_read_u32(np, "#address-cells", &cells))
96 return cells;
97
98
99 return OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
100}
101
102int of_n_addr_cells(struct device_node *np)
103{
104 if (np->parent)
105 np = np->parent;
106
107 return of_bus_n_addr_cells(np);
108}
109EXPORT_SYMBOL(of_n_addr_cells);
110
111int of_bus_n_size_cells(struct device_node *np)
112{
113 u32 cells;
114
115 for (; np; np = np->parent)
116 if (!of_property_read_u32(np, "#size-cells", &cells))
117 return cells;
118
119
120 return OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
121}
122
123int of_n_size_cells(struct device_node *np)
124{
125 if (np->parent)
126 np = np->parent;
127
128 return of_bus_n_size_cells(np);
129}
130EXPORT_SYMBOL(of_n_size_cells);
131
132#ifdef CONFIG_NUMA
133int __weak of_node_to_nid(struct device_node *np)
134{
135 return NUMA_NO_NODE;
136}
137#endif
138
139#define OF_PHANDLE_CACHE_BITS 7
140#define OF_PHANDLE_CACHE_SZ BIT(OF_PHANDLE_CACHE_BITS)
141
142static struct device_node *phandle_cache[OF_PHANDLE_CACHE_SZ];
143
144static u32 of_phandle_cache_hash(phandle handle)
145{
146 return hash_32(handle, OF_PHANDLE_CACHE_BITS);
147}
148
149
150
151
152void __of_phandle_cache_inv_entry(phandle handle)
153{
154 u32 handle_hash;
155 struct device_node *np;
156
157 if (!handle)
158 return;
159
160 handle_hash = of_phandle_cache_hash(handle);
161
162 np = phandle_cache[handle_hash];
163 if (np && handle == np->phandle)
164 phandle_cache[handle_hash] = NULL;
165}
166
167void __init of_core_init(void)
168{
169 struct device_node *np;
170
171
172
173 mutex_lock(&of_mutex);
174 of_kset = kset_create_and_add("devicetree", NULL, firmware_kobj);
175 if (!of_kset) {
176 mutex_unlock(&of_mutex);
177 pr_err("failed to register existing nodes\n");
178 return;
179 }
180 for_each_of_allnodes(np) {
181 __of_attach_node_sysfs(np);
182 if (np->phandle && !phandle_cache[of_phandle_cache_hash(np->phandle)])
183 phandle_cache[of_phandle_cache_hash(np->phandle)] = np;
184 }
185 mutex_unlock(&of_mutex);
186
187
188 if (of_root)
189 proc_symlink("device-tree", NULL, "/sys/firmware/devicetree/base");
190}
191
192static struct property *__of_find_property(const struct device_node *np,
193 const char *name, int *lenp)
194{
195 struct property *pp;
196
197 if (!np)
198 return NULL;
199
200 for (pp = np->properties; pp; pp = pp->next) {
201 if (of_prop_cmp(pp->name, name) == 0) {
202 if (lenp)
203 *lenp = pp->length;
204 break;
205 }
206 }
207
208 return pp;
209}
210
211struct property *of_find_property(const struct device_node *np,
212 const char *name,
213 int *lenp)
214{
215 struct property *pp;
216 unsigned long flags;
217
218 raw_spin_lock_irqsave(&devtree_lock, flags);
219 pp = __of_find_property(np, name, lenp);
220 raw_spin_unlock_irqrestore(&devtree_lock, flags);
221
222 return pp;
223}
224EXPORT_SYMBOL(of_find_property);
225
226struct device_node *__of_find_all_nodes(struct device_node *prev)
227{
228 struct device_node *np;
229 if (!prev) {
230 np = of_root;
231 } else if (prev->child) {
232 np = prev->child;
233 } else {
234
235 np = prev;
236 while (np->parent && !np->sibling)
237 np = np->parent;
238 np = np->sibling;
239 }
240 return np;
241}
242
243
244
245
246
247
248
249
250
251struct device_node *of_find_all_nodes(struct device_node *prev)
252{
253 struct device_node *np;
254 unsigned long flags;
255
256 raw_spin_lock_irqsave(&devtree_lock, flags);
257 np = __of_find_all_nodes(prev);
258 of_node_get(np);
259 of_node_put(prev);
260 raw_spin_unlock_irqrestore(&devtree_lock, flags);
261 return np;
262}
263EXPORT_SYMBOL(of_find_all_nodes);
264
265
266
267
268
269const void *__of_get_property(const struct device_node *np,
270 const char *name, int *lenp)
271{
272 struct property *pp = __of_find_property(np, name, lenp);
273
274 return pp ? pp->value : NULL;
275}
276
277
278
279
280
281const void *of_get_property(const struct device_node *np, const char *name,
282 int *lenp)
283{
284 struct property *pp = of_find_property(np, name, lenp);
285
286 return pp ? pp->value : NULL;
287}
288EXPORT_SYMBOL(of_get_property);
289
290
291
292
293
294
295
296
297
298u64 of_get_cpu_hwid(struct device_node *cpun, unsigned int thread)
299{
300 const __be32 *cell;
301 int ac, len;
302
303 ac = of_n_addr_cells(cpun);
304 cell = of_get_property(cpun, "reg", &len);
305 if (!cell || !ac || ((sizeof(*cell) * ac * (thread + 1)) > len))
306 return ~0ULL;
307
308 cell += ac * thread;
309 return of_read_number(cell, ac);
310}
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326bool __weak arch_match_cpu_phys_id(int cpu, u64 phys_id)
327{
328 return (u32)phys_id == cpu;
329}
330
331
332
333
334
335
336static bool __of_find_n_match_cpu_property(struct device_node *cpun,
337 const char *prop_name, int cpu, unsigned int *thread)
338{
339 const __be32 *cell;
340 int ac, prop_len, tid;
341 u64 hwid;
342
343 ac = of_n_addr_cells(cpun);
344 cell = of_get_property(cpun, prop_name, &prop_len);
345 if (!cell && !ac && arch_match_cpu_phys_id(cpu, 0))
346 return true;
347 if (!cell || !ac)
348 return false;
349 prop_len /= sizeof(*cell) * ac;
350 for (tid = 0; tid < prop_len; tid++) {
351 hwid = of_read_number(cell, ac);
352 if (arch_match_cpu_phys_id(cpu, hwid)) {
353 if (thread)
354 *thread = tid;
355 return true;
356 }
357 cell += ac;
358 }
359 return false;
360}
361
362
363
364
365
366
367
368bool __weak arch_find_n_match_cpu_physical_id(struct device_node *cpun,
369 int cpu, unsigned int *thread)
370{
371
372
373
374
375 if (IS_ENABLED(CONFIG_PPC) &&
376 __of_find_n_match_cpu_property(cpun,
377 "ibm,ppc-interrupt-server#s",
378 cpu, thread))
379 return true;
380
381 return __of_find_n_match_cpu_property(cpun, "reg", cpu, thread);
382}
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
404{
405 struct device_node *cpun;
406
407 for_each_of_cpu_node(cpun) {
408 if (arch_find_n_match_cpu_physical_id(cpun, cpu, thread))
409 return cpun;
410 }
411 return NULL;
412}
413EXPORT_SYMBOL(of_get_cpu_node);
414
415
416
417
418
419
420
421
422
423int of_cpu_node_to_id(struct device_node *cpu_node)
424{
425 int cpu;
426 bool found = false;
427 struct device_node *np;
428
429 for_each_possible_cpu(cpu) {
430 np = of_cpu_device_node_get(cpu);
431 found = (cpu_node == np);
432 of_node_put(np);
433 if (found)
434 return cpu;
435 }
436
437 return -ENODEV;
438}
439EXPORT_SYMBOL(of_cpu_node_to_id);
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456struct device_node *of_get_cpu_state_node(struct device_node *cpu_node,
457 int index)
458{
459 struct of_phandle_args args;
460 int err;
461
462 err = of_parse_phandle_with_args(cpu_node, "power-domains",
463 "#power-domain-cells", 0, &args);
464 if (!err) {
465 struct device_node *state_node =
466 of_parse_phandle(args.np, "domain-idle-states", index);
467
468 of_node_put(args.np);
469 if (state_node)
470 return state_node;
471 }
472
473 return of_parse_phandle(cpu_node, "cpu-idle-states", index);
474}
475EXPORT_SYMBOL(of_get_cpu_state_node);
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507static int __of_device_is_compatible(const struct device_node *device,
508 const char *compat, const char *type, const char *name)
509{
510 struct property *prop;
511 const char *cp;
512 int index = 0, score = 0;
513
514
515 if (compat && compat[0]) {
516 prop = __of_find_property(device, "compatible", NULL);
517 for (cp = of_prop_next_string(prop, NULL); cp;
518 cp = of_prop_next_string(prop, cp), index++) {
519 if (of_compat_cmp(cp, compat, strlen(compat)) == 0) {
520 score = INT_MAX/2 - (index << 2);
521 break;
522 }
523 }
524 if (!score)
525 return 0;
526 }
527
528
529 if (type && type[0]) {
530 if (!__of_node_is_type(device, type))
531 return 0;
532 score += 2;
533 }
534
535
536 if (name && name[0]) {
537 if (!of_node_name_eq(device, name))
538 return 0;
539 score++;
540 }
541
542 return score;
543}
544
545
546
547
548int of_device_is_compatible(const struct device_node *device,
549 const char *compat)
550{
551 unsigned long flags;
552 int res;
553
554 raw_spin_lock_irqsave(&devtree_lock, flags);
555 res = __of_device_is_compatible(device, compat, NULL, NULL);
556 raw_spin_unlock_irqrestore(&devtree_lock, flags);
557 return res;
558}
559EXPORT_SYMBOL(of_device_is_compatible);
560
561
562
563
564
565int of_device_compatible_match(struct device_node *device,
566 const char *const *compat)
567{
568 unsigned int tmp, score = 0;
569
570 if (!compat)
571 return 0;
572
573 while (*compat) {
574 tmp = of_device_is_compatible(device, *compat);
575 if (tmp > score)
576 score = tmp;
577 compat++;
578 }
579
580 return score;
581}
582
583
584
585
586
587
588
589
590int of_machine_is_compatible(const char *compat)
591{
592 struct device_node *root;
593 int rc = 0;
594
595 root = of_find_node_by_path("/");
596 if (root) {
597 rc = of_device_is_compatible(root, compat);
598 of_node_put(root);
599 }
600 return rc;
601}
602EXPORT_SYMBOL(of_machine_is_compatible);
603
604
605
606
607
608
609
610
611
612static bool __of_device_is_available(const struct device_node *device)
613{
614 const char *status;
615 int statlen;
616
617 if (!device)
618 return false;
619
620 status = __of_get_property(device, "status", &statlen);
621 if (status == NULL)
622 return true;
623
624 if (statlen > 0) {
625 if (!strcmp(status, "okay") || !strcmp(status, "ok"))
626 return true;
627 }
628
629 return false;
630}
631
632
633
634
635
636
637
638
639
640bool of_device_is_available(const struct device_node *device)
641{
642 unsigned long flags;
643 bool res;
644
645 raw_spin_lock_irqsave(&devtree_lock, flags);
646 res = __of_device_is_available(device);
647 raw_spin_unlock_irqrestore(&devtree_lock, flags);
648 return res;
649
650}
651EXPORT_SYMBOL(of_device_is_available);
652
653
654
655
656
657
658
659
660
661
662
663
664
665bool of_device_is_big_endian(const struct device_node *device)
666{
667 if (of_property_read_bool(device, "big-endian"))
668 return true;
669 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) &&
670 of_property_read_bool(device, "native-endian"))
671 return true;
672 return false;
673}
674EXPORT_SYMBOL(of_device_is_big_endian);
675
676
677
678
679
680
681
682
683struct device_node *of_get_parent(const struct device_node *node)
684{
685 struct device_node *np;
686 unsigned long flags;
687
688 if (!node)
689 return NULL;
690
691 raw_spin_lock_irqsave(&devtree_lock, flags);
692 np = of_node_get(node->parent);
693 raw_spin_unlock_irqrestore(&devtree_lock, flags);
694 return np;
695}
696EXPORT_SYMBOL(of_get_parent);
697
698
699
700
701
702
703
704
705
706
707
708
709struct device_node *of_get_next_parent(struct device_node *node)
710{
711 struct device_node *parent;
712 unsigned long flags;
713
714 if (!node)
715 return NULL;
716
717 raw_spin_lock_irqsave(&devtree_lock, flags);
718 parent = of_node_get(node->parent);
719 of_node_put(node);
720 raw_spin_unlock_irqrestore(&devtree_lock, flags);
721 return parent;
722}
723EXPORT_SYMBOL(of_get_next_parent);
724
725static struct device_node *__of_get_next_child(const struct device_node *node,
726 struct device_node *prev)
727{
728 struct device_node *next;
729
730 if (!node)
731 return NULL;
732
733 next = prev ? prev->sibling : node->child;
734 of_node_get(next);
735 of_node_put(prev);
736 return next;
737}
738#define __for_each_child_of_node(parent, child) \
739 for (child = __of_get_next_child(parent, NULL); child != NULL; \
740 child = __of_get_next_child(parent, child))
741
742
743
744
745
746
747
748
749
750
751struct device_node *of_get_next_child(const struct device_node *node,
752 struct device_node *prev)
753{
754 struct device_node *next;
755 unsigned long flags;
756
757 raw_spin_lock_irqsave(&devtree_lock, flags);
758 next = __of_get_next_child(node, prev);
759 raw_spin_unlock_irqrestore(&devtree_lock, flags);
760 return next;
761}
762EXPORT_SYMBOL(of_get_next_child);
763
764
765
766
767
768
769
770
771
772struct device_node *of_get_next_available_child(const struct device_node *node,
773 struct device_node *prev)
774{
775 struct device_node *next;
776 unsigned long flags;
777
778 if (!node)
779 return NULL;
780
781 raw_spin_lock_irqsave(&devtree_lock, flags);
782 next = prev ? prev->sibling : node->child;
783 for (; next; next = next->sibling) {
784 if (!__of_device_is_available(next))
785 continue;
786 if (of_node_get(next))
787 break;
788 }
789 of_node_put(prev);
790 raw_spin_unlock_irqrestore(&devtree_lock, flags);
791 return next;
792}
793EXPORT_SYMBOL(of_get_next_available_child);
794
795
796
797
798
799
800
801
802
803struct device_node *of_get_next_cpu_node(struct device_node *prev)
804{
805 struct device_node *next = NULL;
806 unsigned long flags;
807 struct device_node *node;
808
809 if (!prev)
810 node = of_find_node_by_path("/cpus");
811
812 raw_spin_lock_irqsave(&devtree_lock, flags);
813 if (prev)
814 next = prev->sibling;
815 else if (node) {
816 next = node->child;
817 of_node_put(node);
818 }
819 for (; next; next = next->sibling) {
820 if (!(of_node_name_eq(next, "cpu") ||
821 __of_node_is_type(next, "cpu")))
822 continue;
823 if (of_node_get(next))
824 break;
825 }
826 of_node_put(prev);
827 raw_spin_unlock_irqrestore(&devtree_lock, flags);
828 return next;
829}
830EXPORT_SYMBOL(of_get_next_cpu_node);
831
832
833
834
835
836
837
838
839
840
841
842
843struct device_node *of_get_compatible_child(const struct device_node *parent,
844 const char *compatible)
845{
846 struct device_node *child;
847
848 for_each_child_of_node(parent, child) {
849 if (of_device_is_compatible(child, compatible))
850 break;
851 }
852
853 return child;
854}
855EXPORT_SYMBOL(of_get_compatible_child);
856
857
858
859
860
861
862
863
864
865
866
867
868struct device_node *of_get_child_by_name(const struct device_node *node,
869 const char *name)
870{
871 struct device_node *child;
872
873 for_each_child_of_node(node, child)
874 if (of_node_name_eq(child, name))
875 break;
876 return child;
877}
878EXPORT_SYMBOL(of_get_child_by_name);
879
880struct device_node *__of_find_node_by_path(struct device_node *parent,
881 const char *path)
882{
883 struct device_node *child;
884 int len;
885
886 len = strcspn(path, "/:");
887 if (!len)
888 return NULL;
889
890 __for_each_child_of_node(parent, child) {
891 const char *name = kbasename(child->full_name);
892 if (strncmp(path, name, len) == 0 && (strlen(name) == len))
893 return child;
894 }
895 return NULL;
896}
897
898struct device_node *__of_find_node_by_full_path(struct device_node *node,
899 const char *path)
900{
901 const char *separator = strchr(path, ':');
902
903 while (node && *path == '/') {
904 struct device_node *tmp = node;
905
906 path++;
907 node = __of_find_node_by_path(node, path);
908 of_node_put(tmp);
909 path = strchrnul(path, '/');
910 if (separator && separator < path)
911 break;
912 }
913 return node;
914}
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934struct device_node *of_find_node_opts_by_path(const char *path, const char **opts)
935{
936 struct device_node *np = NULL;
937 struct property *pp;
938 unsigned long flags;
939 const char *separator = strchr(path, ':');
940
941 if (opts)
942 *opts = separator ? separator + 1 : NULL;
943
944 if (strcmp(path, "/") == 0)
945 return of_node_get(of_root);
946
947
948 if (*path != '/') {
949 int len;
950 const char *p = separator;
951
952 if (!p)
953 p = strchrnul(path, '/');
954 len = p - path;
955
956
957 if (!of_aliases)
958 return NULL;
959
960 for_each_property_of_node(of_aliases, pp) {
961 if (strlen(pp->name) == len && !strncmp(pp->name, path, len)) {
962 np = of_find_node_by_path(pp->value);
963 break;
964 }
965 }
966 if (!np)
967 return NULL;
968 path = p;
969 }
970
971
972 raw_spin_lock_irqsave(&devtree_lock, flags);
973 if (!np)
974 np = of_node_get(of_root);
975 np = __of_find_node_by_full_path(np, path);
976 raw_spin_unlock_irqrestore(&devtree_lock, flags);
977 return np;
978}
979EXPORT_SYMBOL(of_find_node_opts_by_path);
980
981
982
983
984
985
986
987
988
989
990
991
992struct device_node *of_find_node_by_name(struct device_node *from,
993 const char *name)
994{
995 struct device_node *np;
996 unsigned long flags;
997
998 raw_spin_lock_irqsave(&devtree_lock, flags);
999 for_each_of_allnodes_from(from, np)
1000 if (of_node_name_eq(np, name) && of_node_get(np))
1001 break;
1002 of_node_put(from);
1003 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1004 return np;
1005}
1006EXPORT_SYMBOL(of_find_node_by_name);
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020struct device_node *of_find_node_by_type(struct device_node *from,
1021 const char *type)
1022{
1023 struct device_node *np;
1024 unsigned long flags;
1025
1026 raw_spin_lock_irqsave(&devtree_lock, flags);
1027 for_each_of_allnodes_from(from, np)
1028 if (__of_node_is_type(np, type) && of_node_get(np))
1029 break;
1030 of_node_put(from);
1031 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1032 return np;
1033}
1034EXPORT_SYMBOL(of_find_node_by_type);
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050struct device_node *of_find_compatible_node(struct device_node *from,
1051 const char *type, const char *compatible)
1052{
1053 struct device_node *np;
1054 unsigned long flags;
1055
1056 raw_spin_lock_irqsave(&devtree_lock, flags);
1057 for_each_of_allnodes_from(from, np)
1058 if (__of_device_is_compatible(np, compatible, type, NULL) &&
1059 of_node_get(np))
1060 break;
1061 of_node_put(from);
1062 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1063 return np;
1064}
1065EXPORT_SYMBOL(of_find_compatible_node);
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079struct device_node *of_find_node_with_property(struct device_node *from,
1080 const char *prop_name)
1081{
1082 struct device_node *np;
1083 struct property *pp;
1084 unsigned long flags;
1085
1086 raw_spin_lock_irqsave(&devtree_lock, flags);
1087 for_each_of_allnodes_from(from, np) {
1088 for (pp = np->properties; pp; pp = pp->next) {
1089 if (of_prop_cmp(pp->name, prop_name) == 0) {
1090 of_node_get(np);
1091 goto out;
1092 }
1093 }
1094 }
1095out:
1096 of_node_put(from);
1097 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1098 return np;
1099}
1100EXPORT_SYMBOL(of_find_node_with_property);
1101
1102static
1103const struct of_device_id *__of_match_node(const struct of_device_id *matches,
1104 const struct device_node *node)
1105{
1106 const struct of_device_id *best_match = NULL;
1107 int score, best_score = 0;
1108
1109 if (!matches)
1110 return NULL;
1111
1112 for (; matches->name[0] || matches->type[0] || matches->compatible[0]; matches++) {
1113 score = __of_device_is_compatible(node, matches->compatible,
1114 matches->type, matches->name);
1115 if (score > best_score) {
1116 best_match = matches;
1117 best_score = score;
1118 }
1119 }
1120
1121 return best_match;
1122}
1123
1124
1125
1126
1127
1128
1129
1130
1131const struct of_device_id *of_match_node(const struct of_device_id *matches,
1132 const struct device_node *node)
1133{
1134 const struct of_device_id *match;
1135 unsigned long flags;
1136
1137 raw_spin_lock_irqsave(&devtree_lock, flags);
1138 match = __of_match_node(matches, node);
1139 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1140 return match;
1141}
1142EXPORT_SYMBOL(of_match_node);
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157struct device_node *of_find_matching_node_and_match(struct device_node *from,
1158 const struct of_device_id *matches,
1159 const struct of_device_id **match)
1160{
1161 struct device_node *np;
1162 const struct of_device_id *m;
1163 unsigned long flags;
1164
1165 if (match)
1166 *match = NULL;
1167
1168 raw_spin_lock_irqsave(&devtree_lock, flags);
1169 for_each_of_allnodes_from(from, np) {
1170 m = __of_match_node(matches, np);
1171 if (m && of_node_get(np)) {
1172 if (match)
1173 *match = m;
1174 break;
1175 }
1176 }
1177 of_node_put(from);
1178 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1179 return np;
1180}
1181EXPORT_SYMBOL(of_find_matching_node_and_match);
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196int of_modalias_node(struct device_node *node, char *modalias, int len)
1197{
1198 const char *compatible, *p;
1199 int cplen;
1200
1201 compatible = of_get_property(node, "compatible", &cplen);
1202 if (!compatible || strlen(compatible) > cplen)
1203 return -ENODEV;
1204 p = strchr(compatible, ',');
1205 strlcpy(modalias, p ? p + 1 : compatible, len);
1206 return 0;
1207}
1208EXPORT_SYMBOL_GPL(of_modalias_node);
1209
1210
1211
1212
1213
1214
1215
1216
1217struct device_node *of_find_node_by_phandle(phandle handle)
1218{
1219 struct device_node *np = NULL;
1220 unsigned long flags;
1221 u32 handle_hash;
1222
1223 if (!handle)
1224 return NULL;
1225
1226 handle_hash = of_phandle_cache_hash(handle);
1227
1228 raw_spin_lock_irqsave(&devtree_lock, flags);
1229
1230 if (phandle_cache[handle_hash] &&
1231 handle == phandle_cache[handle_hash]->phandle)
1232 np = phandle_cache[handle_hash];
1233
1234 if (!np) {
1235 for_each_of_allnodes(np)
1236 if (np->phandle == handle &&
1237 !of_node_check_flag(np, OF_DETACHED)) {
1238 phandle_cache[handle_hash] = np;
1239 break;
1240 }
1241 }
1242
1243 of_node_get(np);
1244 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1245 return np;
1246}
1247EXPORT_SYMBOL(of_find_node_by_phandle);
1248
1249void of_print_phandle_args(const char *msg, const struct of_phandle_args *args)
1250{
1251 int i;
1252 printk("%s %pOF", msg, args->np);
1253 for (i = 0; i < args->args_count; i++) {
1254 const char delim = i ? ',' : ':';
1255
1256 pr_cont("%c%08x", delim, args->args[i]);
1257 }
1258 pr_cont("\n");
1259}
1260
1261int of_phandle_iterator_init(struct of_phandle_iterator *it,
1262 const struct device_node *np,
1263 const char *list_name,
1264 const char *cells_name,
1265 int cell_count)
1266{
1267 const __be32 *list;
1268 int size;
1269
1270 memset(it, 0, sizeof(*it));
1271
1272
1273
1274
1275
1276 if (cell_count < 0 && !cells_name)
1277 return -EINVAL;
1278
1279 list = of_get_property(np, list_name, &size);
1280 if (!list)
1281 return -ENOENT;
1282
1283 it->cells_name = cells_name;
1284 it->cell_count = cell_count;
1285 it->parent = np;
1286 it->list_end = list + size / sizeof(*list);
1287 it->phandle_end = list;
1288 it->cur = list;
1289
1290 return 0;
1291}
1292EXPORT_SYMBOL_GPL(of_phandle_iterator_init);
1293
1294int of_phandle_iterator_next(struct of_phandle_iterator *it)
1295{
1296 uint32_t count = 0;
1297
1298 if (it->node) {
1299 of_node_put(it->node);
1300 it->node = NULL;
1301 }
1302
1303 if (!it->cur || it->phandle_end >= it->list_end)
1304 return -ENOENT;
1305
1306 it->cur = it->phandle_end;
1307
1308
1309 it->phandle = be32_to_cpup(it->cur++);
1310
1311 if (it->phandle) {
1312
1313
1314
1315
1316
1317 it->node = of_find_node_by_phandle(it->phandle);
1318
1319 if (it->cells_name) {
1320 if (!it->node) {
1321 pr_err("%pOF: could not find phandle %d\n",
1322 it->parent, it->phandle);
1323 goto err;
1324 }
1325
1326 if (of_property_read_u32(it->node, it->cells_name,
1327 &count)) {
1328
1329
1330
1331
1332
1333 if (it->cell_count >= 0) {
1334 count = it->cell_count;
1335 } else {
1336 pr_err("%pOF: could not get %s for %pOF\n",
1337 it->parent,
1338 it->cells_name,
1339 it->node);
1340 goto err;
1341 }
1342 }
1343 } else {
1344 count = it->cell_count;
1345 }
1346
1347
1348
1349
1350
1351 if (it->cur + count > it->list_end) {
1352 pr_err("%pOF: %s = %d found %d\n",
1353 it->parent, it->cells_name,
1354 count, it->cell_count);
1355 goto err;
1356 }
1357 }
1358
1359 it->phandle_end = it->cur + count;
1360 it->cur_count = count;
1361
1362 return 0;
1363
1364err:
1365 if (it->node) {
1366 of_node_put(it->node);
1367 it->node = NULL;
1368 }
1369
1370 return -EINVAL;
1371}
1372EXPORT_SYMBOL_GPL(of_phandle_iterator_next);
1373
1374int of_phandle_iterator_args(struct of_phandle_iterator *it,
1375 uint32_t *args,
1376 int size)
1377{
1378 int i, count;
1379
1380 count = it->cur_count;
1381
1382 if (WARN_ON(size < count))
1383 count = size;
1384
1385 for (i = 0; i < count; i++)
1386 args[i] = be32_to_cpup(it->cur++);
1387
1388 return count;
1389}
1390
1391static int __of_parse_phandle_with_args(const struct device_node *np,
1392 const char *list_name,
1393 const char *cells_name,
1394 int cell_count, int index,
1395 struct of_phandle_args *out_args)
1396{
1397 struct of_phandle_iterator it;
1398 int rc, cur_index = 0;
1399
1400
1401 of_for_each_phandle(&it, rc, np, list_name, cells_name, cell_count) {
1402
1403
1404
1405
1406
1407
1408 rc = -ENOENT;
1409 if (cur_index == index) {
1410 if (!it.phandle)
1411 goto err;
1412
1413 if (out_args) {
1414 int c;
1415
1416 c = of_phandle_iterator_args(&it,
1417 out_args->args,
1418 MAX_PHANDLE_ARGS);
1419 out_args->np = it.node;
1420 out_args->args_count = c;
1421 } else {
1422 of_node_put(it.node);
1423 }
1424
1425
1426 return 0;
1427 }
1428
1429 cur_index++;
1430 }
1431
1432
1433
1434
1435
1436
1437
1438 err:
1439 of_node_put(it.node);
1440 return rc;
1441}
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453struct device_node *of_parse_phandle(const struct device_node *np,
1454 const char *phandle_name, int index)
1455{
1456 struct of_phandle_args args;
1457
1458 if (index < 0)
1459 return NULL;
1460
1461 if (__of_parse_phandle_with_args(np, phandle_name, NULL, 0,
1462 index, &args))
1463 return NULL;
1464
1465 return args.np;
1466}
1467EXPORT_SYMBOL(of_parse_phandle);
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501int of_parse_phandle_with_args(const struct device_node *np, const char *list_name,
1502 const char *cells_name, int index,
1503 struct of_phandle_args *out_args)
1504{
1505 int cell_count = -1;
1506
1507 if (index < 0)
1508 return -EINVAL;
1509
1510
1511 if (!cells_name)
1512 cell_count = 0;
1513
1514 return __of_parse_phandle_with_args(np, list_name, cells_name,
1515 cell_count, index, out_args);
1516}
1517EXPORT_SYMBOL(of_parse_phandle_with_args);
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561int of_parse_phandle_with_args_map(const struct device_node *np,
1562 const char *list_name,
1563 const char *stem_name,
1564 int index, struct of_phandle_args *out_args)
1565{
1566 char *cells_name, *map_name = NULL, *mask_name = NULL;
1567 char *pass_name = NULL;
1568 struct device_node *cur, *new = NULL;
1569 const __be32 *map, *mask, *pass;
1570 static const __be32 dummy_mask[] = { [0 ... MAX_PHANDLE_ARGS] = ~0 };
1571 static const __be32 dummy_pass[] = { [0 ... MAX_PHANDLE_ARGS] = 0 };
1572 __be32 initial_match_array[MAX_PHANDLE_ARGS];
1573 const __be32 *match_array = initial_match_array;
1574 int i, ret, map_len, match;
1575 u32 list_size, new_size;
1576
1577 if (index < 0)
1578 return -EINVAL;
1579
1580 cells_name = kasprintf(GFP_KERNEL, "#%s-cells", stem_name);
1581 if (!cells_name)
1582 return -ENOMEM;
1583
1584 ret = -ENOMEM;
1585 map_name = kasprintf(GFP_KERNEL, "%s-map", stem_name);
1586 if (!map_name)
1587 goto free;
1588
1589 mask_name = kasprintf(GFP_KERNEL, "%s-map-mask", stem_name);
1590 if (!mask_name)
1591 goto free;
1592
1593 pass_name = kasprintf(GFP_KERNEL, "%s-map-pass-thru", stem_name);
1594 if (!pass_name)
1595 goto free;
1596
1597 ret = __of_parse_phandle_with_args(np, list_name, cells_name, -1, index,
1598 out_args);
1599 if (ret)
1600 goto free;
1601
1602
1603 cur = out_args->np;
1604 ret = of_property_read_u32(cur, cells_name, &list_size);
1605 if (ret < 0)
1606 goto put;
1607
1608
1609 for (i = 0; i < list_size; i++)
1610 initial_match_array[i] = cpu_to_be32(out_args->args[i]);
1611
1612 ret = -EINVAL;
1613 while (cur) {
1614
1615 map = of_get_property(cur, map_name, &map_len);
1616 if (!map) {
1617 ret = 0;
1618 goto free;
1619 }
1620 map_len /= sizeof(u32);
1621
1622
1623 mask = of_get_property(cur, mask_name, NULL);
1624 if (!mask)
1625 mask = dummy_mask;
1626
1627 match = 0;
1628 while (map_len > (list_size + 1) && !match) {
1629
1630 match = 1;
1631 for (i = 0; i < list_size; i++, map_len--)
1632 match &= !((match_array[i] ^ *map++) & mask[i]);
1633
1634 of_node_put(new);
1635 new = of_find_node_by_phandle(be32_to_cpup(map));
1636 map++;
1637 map_len--;
1638
1639
1640 if (!new)
1641 goto put;
1642
1643 if (!of_device_is_available(new))
1644 match = 0;
1645
1646 ret = of_property_read_u32(new, cells_name, &new_size);
1647 if (ret)
1648 goto put;
1649
1650
1651 if (WARN_ON(new_size > MAX_PHANDLE_ARGS))
1652 goto put;
1653 if (map_len < new_size)
1654 goto put;
1655
1656
1657 map += new_size;
1658 map_len -= new_size;
1659 }
1660 if (!match)
1661 goto put;
1662
1663
1664 pass = of_get_property(cur, pass_name, NULL);
1665 if (!pass)
1666 pass = dummy_pass;
1667
1668
1669
1670
1671
1672
1673 match_array = map - new_size;
1674 for (i = 0; i < new_size; i++) {
1675 __be32 val = *(map - new_size + i);
1676
1677 if (i < list_size) {
1678 val &= ~pass[i];
1679 val |= cpu_to_be32(out_args->args[i]) & pass[i];
1680 }
1681
1682 out_args->args[i] = be32_to_cpu(val);
1683 }
1684 out_args->args_count = list_size = new_size;
1685
1686 out_args->np = new;
1687 of_node_put(cur);
1688 cur = new;
1689 }
1690put:
1691 of_node_put(cur);
1692 of_node_put(new);
1693free:
1694 kfree(mask_name);
1695 kfree(map_name);
1696 kfree(cells_name);
1697 kfree(pass_name);
1698
1699 return ret;
1700}
1701EXPORT_SYMBOL(of_parse_phandle_with_args_map);
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733int of_parse_phandle_with_fixed_args(const struct device_node *np,
1734 const char *list_name, int cell_count,
1735 int index, struct of_phandle_args *out_args)
1736{
1737 if (index < 0)
1738 return -EINVAL;
1739 return __of_parse_phandle_with_args(np, list_name, NULL, cell_count,
1740 index, out_args);
1741}
1742EXPORT_SYMBOL(of_parse_phandle_with_fixed_args);
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759int of_count_phandle_with_args(const struct device_node *np, const char *list_name,
1760 const char *cells_name)
1761{
1762 struct of_phandle_iterator it;
1763 int rc, cur_index = 0;
1764
1765
1766
1767
1768
1769
1770
1771 if (!cells_name) {
1772 const __be32 *list;
1773 int size;
1774
1775 list = of_get_property(np, list_name, &size);
1776 if (!list)
1777 return -ENOENT;
1778
1779 return size / sizeof(*list);
1780 }
1781
1782 rc = of_phandle_iterator_init(&it, np, list_name, cells_name, -1);
1783 if (rc)
1784 return rc;
1785
1786 while ((rc = of_phandle_iterator_next(&it)) == 0)
1787 cur_index += 1;
1788
1789 if (rc != -ENOENT)
1790 return rc;
1791
1792 return cur_index;
1793}
1794EXPORT_SYMBOL(of_count_phandle_with_args);
1795
1796
1797
1798
1799
1800
1801int __of_add_property(struct device_node *np, struct property *prop)
1802{
1803 struct property **next;
1804
1805 prop->next = NULL;
1806 next = &np->properties;
1807 while (*next) {
1808 if (strcmp(prop->name, (*next)->name) == 0)
1809
1810 return -EEXIST;
1811
1812 next = &(*next)->next;
1813 }
1814 *next = prop;
1815
1816 return 0;
1817}
1818
1819
1820
1821
1822
1823
1824int of_add_property(struct device_node *np, struct property *prop)
1825{
1826 unsigned long flags;
1827 int rc;
1828
1829 mutex_lock(&of_mutex);
1830
1831 raw_spin_lock_irqsave(&devtree_lock, flags);
1832 rc = __of_add_property(np, prop);
1833 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1834
1835 if (!rc)
1836 __of_add_property_sysfs(np, prop);
1837
1838 mutex_unlock(&of_mutex);
1839
1840 if (!rc)
1841 of_property_notify(OF_RECONFIG_ADD_PROPERTY, np, prop, NULL);
1842
1843 return rc;
1844}
1845EXPORT_SYMBOL_GPL(of_add_property);
1846
1847int __of_remove_property(struct device_node *np, struct property *prop)
1848{
1849 struct property **next;
1850
1851 for (next = &np->properties; *next; next = &(*next)->next) {
1852 if (*next == prop)
1853 break;
1854 }
1855 if (*next == NULL)
1856 return -ENODEV;
1857
1858
1859 *next = prop->next;
1860 prop->next = np->deadprops;
1861 np->deadprops = prop;
1862
1863 return 0;
1864}
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876int of_remove_property(struct device_node *np, struct property *prop)
1877{
1878 unsigned long flags;
1879 int rc;
1880
1881 if (!prop)
1882 return -ENODEV;
1883
1884 mutex_lock(&of_mutex);
1885
1886 raw_spin_lock_irqsave(&devtree_lock, flags);
1887 rc = __of_remove_property(np, prop);
1888 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1889
1890 if (!rc)
1891 __of_remove_property_sysfs(np, prop);
1892
1893 mutex_unlock(&of_mutex);
1894
1895 if (!rc)
1896 of_property_notify(OF_RECONFIG_REMOVE_PROPERTY, np, prop, NULL);
1897
1898 return rc;
1899}
1900EXPORT_SYMBOL_GPL(of_remove_property);
1901
1902int __of_update_property(struct device_node *np, struct property *newprop,
1903 struct property **oldpropp)
1904{
1905 struct property **next, *oldprop;
1906
1907 for (next = &np->properties; *next; next = &(*next)->next) {
1908 if (of_prop_cmp((*next)->name, newprop->name) == 0)
1909 break;
1910 }
1911 *oldpropp = oldprop = *next;
1912
1913 if (oldprop) {
1914
1915 newprop->next = oldprop->next;
1916 *next = newprop;
1917 oldprop->next = np->deadprops;
1918 np->deadprops = oldprop;
1919 } else {
1920
1921 newprop->next = NULL;
1922 *next = newprop;
1923 }
1924
1925 return 0;
1926}
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937int of_update_property(struct device_node *np, struct property *newprop)
1938{
1939 struct property *oldprop;
1940 unsigned long flags;
1941 int rc;
1942
1943 if (!newprop->name)
1944 return -EINVAL;
1945
1946 mutex_lock(&of_mutex);
1947
1948 raw_spin_lock_irqsave(&devtree_lock, flags);
1949 rc = __of_update_property(np, newprop, &oldprop);
1950 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1951
1952 if (!rc)
1953 __of_update_property_sysfs(np, newprop, oldprop);
1954
1955 mutex_unlock(&of_mutex);
1956
1957 if (!rc)
1958 of_property_notify(OF_RECONFIG_UPDATE_PROPERTY, np, newprop, oldprop);
1959
1960 return rc;
1961}
1962
1963static void of_alias_add(struct alias_prop *ap, struct device_node *np,
1964 int id, const char *stem, int stem_len)
1965{
1966 ap->np = np;
1967 ap->id = id;
1968 strncpy(ap->stem, stem, stem_len);
1969 ap->stem[stem_len] = 0;
1970 list_add_tail(&ap->link, &aliases_lookup);
1971 pr_debug("adding DT alias:%s: stem=%s id=%i node=%pOF\n",
1972 ap->alias, ap->stem, ap->id, np);
1973}
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
1985{
1986 struct property *pp;
1987
1988 of_aliases = of_find_node_by_path("/aliases");
1989 of_chosen = of_find_node_by_path("/chosen");
1990 if (of_chosen == NULL)
1991 of_chosen = of_find_node_by_path("/chosen@0");
1992
1993 if (of_chosen) {
1994
1995 const char *name = NULL;
1996
1997 if (of_property_read_string(of_chosen, "stdout-path", &name))
1998 of_property_read_string(of_chosen, "linux,stdout-path",
1999 &name);
2000 if (IS_ENABLED(CONFIG_PPC) && !name)
2001 of_property_read_string(of_aliases, "stdout", &name);
2002 if (name)
2003 of_stdout = of_find_node_opts_by_path(name, &of_stdout_options);
2004 }
2005
2006 if (!of_aliases)
2007 return;
2008
2009 for_each_property_of_node(of_aliases, pp) {
2010 const char *start = pp->name;
2011 const char *end = start + strlen(start);
2012 struct device_node *np;
2013 struct alias_prop *ap;
2014 int id, len;
2015
2016
2017 if (!strcmp(pp->name, "name") ||
2018 !strcmp(pp->name, "phandle") ||
2019 !strcmp(pp->name, "linux,phandle"))
2020 continue;
2021
2022 np = of_find_node_by_path(pp->value);
2023 if (!np)
2024 continue;
2025
2026
2027
2028 while (isdigit(*(end-1)) && end > start)
2029 end--;
2030 len = end - start;
2031
2032 if (kstrtoint(end, 10, &id) < 0)
2033 continue;
2034
2035
2036 ap = dt_alloc(sizeof(*ap) + len + 1, __alignof__(*ap));
2037 if (!ap)
2038 continue;
2039 memset(ap, 0, sizeof(*ap) + len + 1);
2040 ap->alias = start;
2041 of_alias_add(ap, np, id, start, len);
2042 }
2043}
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055int of_alias_get_id(struct device_node *np, const char *stem)
2056{
2057 struct alias_prop *app;
2058 int id = -ENODEV;
2059
2060 mutex_lock(&of_mutex);
2061 list_for_each_entry(app, &aliases_lookup, link) {
2062 if (strcmp(app->stem, stem) != 0)
2063 continue;
2064
2065 if (np == app->np) {
2066 id = app->id;
2067 break;
2068 }
2069 }
2070 mutex_unlock(&of_mutex);
2071
2072 return id;
2073}
2074EXPORT_SYMBOL_GPL(of_alias_get_id);
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089int of_alias_get_alias_list(const struct of_device_id *matches,
2090 const char *stem, unsigned long *bitmap,
2091 unsigned int nbits)
2092{
2093 struct alias_prop *app;
2094 int ret = 0;
2095
2096
2097 bitmap_zero(bitmap, nbits);
2098
2099 mutex_lock(&of_mutex);
2100 pr_debug("%s: Looking for stem: %s\n", __func__, stem);
2101 list_for_each_entry(app, &aliases_lookup, link) {
2102 pr_debug("%s: stem: %s, id: %d\n",
2103 __func__, app->stem, app->id);
2104
2105 if (strcmp(app->stem, stem) != 0) {
2106 pr_debug("%s: stem comparison didn't pass %s\n",
2107 __func__, app->stem);
2108 continue;
2109 }
2110
2111 if (of_match_node(matches, app->np)) {
2112 pr_debug("%s: Allocated ID %d\n", __func__, app->id);
2113
2114 if (app->id >= nbits) {
2115 pr_warn("%s: ID %d >= than bitmap field %d\n",
2116 __func__, app->id, nbits);
2117 ret = -EOVERFLOW;
2118 } else {
2119 set_bit(app->id, bitmap);
2120 }
2121 }
2122 }
2123 mutex_unlock(&of_mutex);
2124
2125 return ret;
2126}
2127EXPORT_SYMBOL_GPL(of_alias_get_alias_list);
2128
2129
2130
2131
2132
2133
2134
2135
2136int of_alias_get_highest_id(const char *stem)
2137{
2138 struct alias_prop *app;
2139 int id = -ENODEV;
2140
2141 mutex_lock(&of_mutex);
2142 list_for_each_entry(app, &aliases_lookup, link) {
2143 if (strcmp(app->stem, stem) != 0)
2144 continue;
2145
2146 if (app->id > id)
2147 id = app->id;
2148 }
2149 mutex_unlock(&of_mutex);
2150
2151 return id;
2152}
2153EXPORT_SYMBOL_GPL(of_alias_get_highest_id);
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166bool of_console_check(struct device_node *dn, char *name, int index)
2167{
2168 if (!dn || dn != of_stdout || console_set_on_cmdline)
2169 return false;
2170
2171
2172
2173
2174
2175 return !add_preferred_console(name, index, (char *)of_stdout_options);
2176}
2177EXPORT_SYMBOL_GPL(of_console_check);
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187struct device_node *of_find_next_cache_node(const struct device_node *np)
2188{
2189 struct device_node *child, *cache_node;
2190
2191 cache_node = of_parse_phandle(np, "l2-cache", 0);
2192 if (!cache_node)
2193 cache_node = of_parse_phandle(np, "next-level-cache", 0);
2194
2195 if (cache_node)
2196 return cache_node;
2197
2198
2199
2200
2201 if (IS_ENABLED(CONFIG_PPC_PMAC) && of_node_is_type(np, "cpu"))
2202 for_each_child_of_node(np, child)
2203 if (of_node_is_type(child, "cache"))
2204 return child;
2205
2206 return NULL;
2207}
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218int of_find_last_cache_level(unsigned int cpu)
2219{
2220 u32 cache_level = 0;
2221 struct device_node *prev = NULL, *np = of_cpu_device_node_get(cpu);
2222
2223 while (np) {
2224 prev = np;
2225 of_node_put(np);
2226 np = of_find_next_cache_node(np);
2227 }
2228
2229 of_property_read_u32(prev, "cache-level", &cache_level);
2230
2231 return cache_level;
2232}
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253int of_map_id(struct device_node *np, u32 id,
2254 const char *map_name, const char *map_mask_name,
2255 struct device_node **target, u32 *id_out)
2256{
2257 u32 map_mask, masked_id;
2258 int map_len;
2259 const __be32 *map = NULL;
2260
2261 if (!np || !map_name || (!target && !id_out))
2262 return -EINVAL;
2263
2264 map = of_get_property(np, map_name, &map_len);
2265 if (!map) {
2266 if (target)
2267 return -ENODEV;
2268
2269 *id_out = id;
2270 return 0;
2271 }
2272
2273 if (!map_len || map_len % (4 * sizeof(*map))) {
2274 pr_err("%pOF: Error: Bad %s length: %d\n", np,
2275 map_name, map_len);
2276 return -EINVAL;
2277 }
2278
2279
2280 map_mask = 0xffffffff;
2281
2282
2283
2284
2285
2286 if (map_mask_name)
2287 of_property_read_u32(np, map_mask_name, &map_mask);
2288
2289 masked_id = map_mask & id;
2290 for ( ; map_len > 0; map_len -= 4 * sizeof(*map), map += 4) {
2291 struct device_node *phandle_node;
2292 u32 id_base = be32_to_cpup(map + 0);
2293 u32 phandle = be32_to_cpup(map + 1);
2294 u32 out_base = be32_to_cpup(map + 2);
2295 u32 id_len = be32_to_cpup(map + 3);
2296
2297 if (id_base & ~map_mask) {
2298 pr_err("%pOF: Invalid %s translation - %s-mask (0x%x) ignores id-base (0x%x)\n",
2299 np, map_name, map_name,
2300 map_mask, id_base);
2301 return -EFAULT;
2302 }
2303
2304 if (masked_id < id_base || masked_id >= id_base + id_len)
2305 continue;
2306
2307 phandle_node = of_find_node_by_phandle(phandle);
2308 if (!phandle_node)
2309 return -ENODEV;
2310
2311 if (target) {
2312 if (*target)
2313 of_node_put(phandle_node);
2314 else
2315 *target = phandle_node;
2316
2317 if (*target != phandle_node)
2318 continue;
2319 }
2320
2321 if (id_out)
2322 *id_out = masked_id - id_base + out_base;
2323
2324 pr_debug("%pOF: %s, using mask %08x, id-base: %08x, out-base: %08x, length: %08x, id: %08x -> %08x\n",
2325 np, map_name, map_mask, id_base, out_base,
2326 id_len, id, masked_id - id_base + out_base);
2327 return 0;
2328 }
2329
2330 pr_info("%pOF: no %s translation for id 0x%x on %pOF\n", np, map_name,
2331 id, target && *target ? *target : NULL);
2332
2333
2334 if (id_out)
2335 *id_out = id;
2336 return 0;
2337}
2338EXPORT_SYMBOL_GPL(of_map_id);
2339