1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#define pr_fmt(fmt) "OF: " fmt
18
19#include <linux/bitmap.h>
20#include <linux/console.h>
21#include <linux/ctype.h>
22#include <linux/cpu.h>
23#include <linux/module.h>
24#include <linux/of.h>
25#include <linux/of_device.h>
26#include <linux/of_graph.h>
27#include <linux/spinlock.h>
28#include <linux/slab.h>
29#include <linux/string.h>
30#include <linux/proc_fs.h>
31
32#include "of_private.h"
33
34LIST_HEAD(aliases_lookup);
35
36struct device_node *of_root;
37EXPORT_SYMBOL(of_root);
38struct device_node *of_chosen;
39EXPORT_SYMBOL(of_chosen);
40struct device_node *of_aliases;
41struct device_node *of_stdout;
42static const char *of_stdout_options;
43
44struct kset *of_kset;
45
46
47
48
49
50
51
52DEFINE_MUTEX(of_mutex);
53
54
55
56
57DEFINE_RAW_SPINLOCK(devtree_lock);
58
59bool of_node_name_eq(const struct device_node *np, const char *name)
60{
61 const char *node_name;
62 size_t len;
63
64 if (!np)
65 return false;
66
67 node_name = kbasename(np->full_name);
68 len = strchrnul(node_name, '@') - node_name;
69
70 return (strlen(name) == len) && (strncmp(node_name, name, len) == 0);
71}
72EXPORT_SYMBOL(of_node_name_eq);
73
74bool of_node_name_prefix(const struct device_node *np, const char *prefix)
75{
76 if (!np)
77 return false;
78
79 return strncmp(kbasename(np->full_name), prefix, strlen(prefix)) == 0;
80}
81EXPORT_SYMBOL(of_node_name_prefix);
82
83static bool __of_node_is_type(const struct device_node *np, const char *type)
84{
85 const char *match = __of_get_property(np, "device_type", NULL);
86
87 return np && match && type && !strcmp(match, type);
88}
89
90int of_bus_n_addr_cells(struct device_node *np)
91{
92 u32 cells;
93
94 for (; np; np = np->parent)
95 if (!of_property_read_u32(np, "#address-cells", &cells))
96 return cells;
97
98
99 return OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
100}
101
102int of_n_addr_cells(struct device_node *np)
103{
104 if (np->parent)
105 np = np->parent;
106
107 return of_bus_n_addr_cells(np);
108}
109EXPORT_SYMBOL(of_n_addr_cells);
110
111int of_bus_n_size_cells(struct device_node *np)
112{
113 u32 cells;
114
115 for (; np; np = np->parent)
116 if (!of_property_read_u32(np, "#size-cells", &cells))
117 return cells;
118
119
120 return OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
121}
122
123int of_n_size_cells(struct device_node *np)
124{
125 if (np->parent)
126 np = np->parent;
127
128 return of_bus_n_size_cells(np);
129}
130EXPORT_SYMBOL(of_n_size_cells);
131
132#ifdef CONFIG_NUMA
133int __weak of_node_to_nid(struct device_node *np)
134{
135 return NUMA_NO_NODE;
136}
137#endif
138
139#define OF_PHANDLE_CACHE_BITS 7
140#define OF_PHANDLE_CACHE_SZ BIT(OF_PHANDLE_CACHE_BITS)
141
142static struct device_node *phandle_cache[OF_PHANDLE_CACHE_SZ];
143
144static u32 of_phandle_cache_hash(phandle handle)
145{
146 return hash_32(handle, OF_PHANDLE_CACHE_BITS);
147}
148
149
150
151
152void __of_phandle_cache_inv_entry(phandle handle)
153{
154 u32 handle_hash;
155 struct device_node *np;
156
157 if (!handle)
158 return;
159
160 handle_hash = of_phandle_cache_hash(handle);
161
162 np = phandle_cache[handle_hash];
163 if (np && handle == np->phandle)
164 phandle_cache[handle_hash] = NULL;
165}
166
167void __init of_core_init(void)
168{
169 struct device_node *np;
170
171
172
173 mutex_lock(&of_mutex);
174 of_kset = kset_create_and_add("devicetree", NULL, firmware_kobj);
175 if (!of_kset) {
176 mutex_unlock(&of_mutex);
177 pr_err("failed to register existing nodes\n");
178 return;
179 }
180 for_each_of_allnodes(np) {
181 __of_attach_node_sysfs(np);
182 if (np->phandle && !phandle_cache[of_phandle_cache_hash(np->phandle)])
183 phandle_cache[of_phandle_cache_hash(np->phandle)] = np;
184 }
185 mutex_unlock(&of_mutex);
186
187
188 if (of_root)
189 proc_symlink("device-tree", NULL, "/sys/firmware/devicetree/base");
190}
191
192static struct property *__of_find_property(const struct device_node *np,
193 const char *name, int *lenp)
194{
195 struct property *pp;
196
197 if (!np)
198 return NULL;
199
200 for (pp = np->properties; pp; pp = pp->next) {
201 if (of_prop_cmp(pp->name, name) == 0) {
202 if (lenp)
203 *lenp = pp->length;
204 break;
205 }
206 }
207
208 return pp;
209}
210
211struct property *of_find_property(const struct device_node *np,
212 const char *name,
213 int *lenp)
214{
215 struct property *pp;
216 unsigned long flags;
217
218 raw_spin_lock_irqsave(&devtree_lock, flags);
219 pp = __of_find_property(np, name, lenp);
220 raw_spin_unlock_irqrestore(&devtree_lock, flags);
221
222 return pp;
223}
224EXPORT_SYMBOL(of_find_property);
225
226struct device_node *__of_find_all_nodes(struct device_node *prev)
227{
228 struct device_node *np;
229 if (!prev) {
230 np = of_root;
231 } else if (prev->child) {
232 np = prev->child;
233 } else {
234
235 np = prev;
236 while (np->parent && !np->sibling)
237 np = np->parent;
238 np = np->sibling;
239 }
240 return np;
241}
242
243
244
245
246
247
248
249
250
251struct device_node *of_find_all_nodes(struct device_node *prev)
252{
253 struct device_node *np;
254 unsigned long flags;
255
256 raw_spin_lock_irqsave(&devtree_lock, flags);
257 np = __of_find_all_nodes(prev);
258 of_node_get(np);
259 of_node_put(prev);
260 raw_spin_unlock_irqrestore(&devtree_lock, flags);
261 return np;
262}
263EXPORT_SYMBOL(of_find_all_nodes);
264
265
266
267
268
269const void *__of_get_property(const struct device_node *np,
270 const char *name, int *lenp)
271{
272 struct property *pp = __of_find_property(np, name, lenp);
273
274 return pp ? pp->value : NULL;
275}
276
277
278
279
280
281const void *of_get_property(const struct device_node *np, const char *name,
282 int *lenp)
283{
284 struct property *pp = of_find_property(np, name, lenp);
285
286 return pp ? pp->value : NULL;
287}
288EXPORT_SYMBOL(of_get_property);
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304bool __weak arch_match_cpu_phys_id(int cpu, u64 phys_id)
305{
306 return (u32)phys_id == cpu;
307}
308
309
310
311
312
313
314static bool __of_find_n_match_cpu_property(struct device_node *cpun,
315 const char *prop_name, int cpu, unsigned int *thread)
316{
317 const __be32 *cell;
318 int ac, prop_len, tid;
319 u64 hwid;
320
321 ac = of_n_addr_cells(cpun);
322 cell = of_get_property(cpun, prop_name, &prop_len);
323 if (!cell && !ac && arch_match_cpu_phys_id(cpu, 0))
324 return true;
325 if (!cell || !ac)
326 return false;
327 prop_len /= sizeof(*cell) * ac;
328 for (tid = 0; tid < prop_len; tid++) {
329 hwid = of_read_number(cell, ac);
330 if (arch_match_cpu_phys_id(cpu, hwid)) {
331 if (thread)
332 *thread = tid;
333 return true;
334 }
335 cell += ac;
336 }
337 return false;
338}
339
340
341
342
343
344
345
346bool __weak arch_find_n_match_cpu_physical_id(struct device_node *cpun,
347 int cpu, unsigned int *thread)
348{
349
350
351
352
353 if (IS_ENABLED(CONFIG_PPC) &&
354 __of_find_n_match_cpu_property(cpun,
355 "ibm,ppc-interrupt-server#s",
356 cpu, thread))
357 return true;
358
359 return __of_find_n_match_cpu_property(cpun, "reg", cpu, thread);
360}
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
382{
383 struct device_node *cpun;
384
385 for_each_of_cpu_node(cpun) {
386 if (arch_find_n_match_cpu_physical_id(cpun, cpu, thread))
387 return cpun;
388 }
389 return NULL;
390}
391EXPORT_SYMBOL(of_get_cpu_node);
392
393
394
395
396
397
398
399
400
401int of_cpu_node_to_id(struct device_node *cpu_node)
402{
403 int cpu;
404 bool found = false;
405 struct device_node *np;
406
407 for_each_possible_cpu(cpu) {
408 np = of_cpu_device_node_get(cpu);
409 found = (cpu_node == np);
410 of_node_put(np);
411 if (found)
412 return cpu;
413 }
414
415 return -ENODEV;
416}
417EXPORT_SYMBOL(of_cpu_node_to_id);
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434struct device_node *of_get_cpu_state_node(struct device_node *cpu_node,
435 int index)
436{
437 struct of_phandle_args args;
438 int err;
439
440 err = of_parse_phandle_with_args(cpu_node, "power-domains",
441 "#power-domain-cells", 0, &args);
442 if (!err) {
443 struct device_node *state_node =
444 of_parse_phandle(args.np, "domain-idle-states", index);
445
446 of_node_put(args.np);
447 if (state_node)
448 return state_node;
449 }
450
451 return of_parse_phandle(cpu_node, "cpu-idle-states", index);
452}
453EXPORT_SYMBOL(of_get_cpu_state_node);
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485static int __of_device_is_compatible(const struct device_node *device,
486 const char *compat, const char *type, const char *name)
487{
488 struct property *prop;
489 const char *cp;
490 int index = 0, score = 0;
491
492
493 if (compat && compat[0]) {
494 prop = __of_find_property(device, "compatible", NULL);
495 for (cp = of_prop_next_string(prop, NULL); cp;
496 cp = of_prop_next_string(prop, cp), index++) {
497 if (of_compat_cmp(cp, compat, strlen(compat)) == 0) {
498 score = INT_MAX/2 - (index << 2);
499 break;
500 }
501 }
502 if (!score)
503 return 0;
504 }
505
506
507 if (type && type[0]) {
508 if (!__of_node_is_type(device, type))
509 return 0;
510 score += 2;
511 }
512
513
514 if (name && name[0]) {
515 if (!of_node_name_eq(device, name))
516 return 0;
517 score++;
518 }
519
520 return score;
521}
522
523
524
525
526int of_device_is_compatible(const struct device_node *device,
527 const char *compat)
528{
529 unsigned long flags;
530 int res;
531
532 raw_spin_lock_irqsave(&devtree_lock, flags);
533 res = __of_device_is_compatible(device, compat, NULL, NULL);
534 raw_spin_unlock_irqrestore(&devtree_lock, flags);
535 return res;
536}
537EXPORT_SYMBOL(of_device_is_compatible);
538
539
540
541
542
543int of_device_compatible_match(struct device_node *device,
544 const char *const *compat)
545{
546 unsigned int tmp, score = 0;
547
548 if (!compat)
549 return 0;
550
551 while (*compat) {
552 tmp = of_device_is_compatible(device, *compat);
553 if (tmp > score)
554 score = tmp;
555 compat++;
556 }
557
558 return score;
559}
560
561
562
563
564
565
566
567
568int of_machine_is_compatible(const char *compat)
569{
570 struct device_node *root;
571 int rc = 0;
572
573 root = of_find_node_by_path("/");
574 if (root) {
575 rc = of_device_is_compatible(root, compat);
576 of_node_put(root);
577 }
578 return rc;
579}
580EXPORT_SYMBOL(of_machine_is_compatible);
581
582
583
584
585
586
587
588
589
590static bool __of_device_is_available(const struct device_node *device)
591{
592 const char *status;
593 int statlen;
594
595 if (!device)
596 return false;
597
598 status = __of_get_property(device, "status", &statlen);
599 if (status == NULL)
600 return true;
601
602 if (statlen > 0) {
603 if (!strcmp(status, "okay") || !strcmp(status, "ok"))
604 return true;
605 }
606
607 return false;
608}
609
610
611
612
613
614
615
616
617
618bool of_device_is_available(const struct device_node *device)
619{
620 unsigned long flags;
621 bool res;
622
623 raw_spin_lock_irqsave(&devtree_lock, flags);
624 res = __of_device_is_available(device);
625 raw_spin_unlock_irqrestore(&devtree_lock, flags);
626 return res;
627
628}
629EXPORT_SYMBOL(of_device_is_available);
630
631
632
633
634
635
636
637
638
639
640
641
642
643bool of_device_is_big_endian(const struct device_node *device)
644{
645 if (of_property_read_bool(device, "big-endian"))
646 return true;
647 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) &&
648 of_property_read_bool(device, "native-endian"))
649 return true;
650 return false;
651}
652EXPORT_SYMBOL(of_device_is_big_endian);
653
654
655
656
657
658
659
660
661struct device_node *of_get_parent(const struct device_node *node)
662{
663 struct device_node *np;
664 unsigned long flags;
665
666 if (!node)
667 return NULL;
668
669 raw_spin_lock_irqsave(&devtree_lock, flags);
670 np = of_node_get(node->parent);
671 raw_spin_unlock_irqrestore(&devtree_lock, flags);
672 return np;
673}
674EXPORT_SYMBOL(of_get_parent);
675
676
677
678
679
680
681
682
683
684
685
686
687struct device_node *of_get_next_parent(struct device_node *node)
688{
689 struct device_node *parent;
690 unsigned long flags;
691
692 if (!node)
693 return NULL;
694
695 raw_spin_lock_irqsave(&devtree_lock, flags);
696 parent = of_node_get(node->parent);
697 of_node_put(node);
698 raw_spin_unlock_irqrestore(&devtree_lock, flags);
699 return parent;
700}
701EXPORT_SYMBOL(of_get_next_parent);
702
703static struct device_node *__of_get_next_child(const struct device_node *node,
704 struct device_node *prev)
705{
706 struct device_node *next;
707
708 if (!node)
709 return NULL;
710
711 next = prev ? prev->sibling : node->child;
712 of_node_get(next);
713 of_node_put(prev);
714 return next;
715}
716#define __for_each_child_of_node(parent, child) \
717 for (child = __of_get_next_child(parent, NULL); child != NULL; \
718 child = __of_get_next_child(parent, child))
719
720
721
722
723
724
725
726
727
728
729struct device_node *of_get_next_child(const struct device_node *node,
730 struct device_node *prev)
731{
732 struct device_node *next;
733 unsigned long flags;
734
735 raw_spin_lock_irqsave(&devtree_lock, flags);
736 next = __of_get_next_child(node, prev);
737 raw_spin_unlock_irqrestore(&devtree_lock, flags);
738 return next;
739}
740EXPORT_SYMBOL(of_get_next_child);
741
742
743
744
745
746
747
748
749
750struct device_node *of_get_next_available_child(const struct device_node *node,
751 struct device_node *prev)
752{
753 struct device_node *next;
754 unsigned long flags;
755
756 if (!node)
757 return NULL;
758
759 raw_spin_lock_irqsave(&devtree_lock, flags);
760 next = prev ? prev->sibling : node->child;
761 for (; next; next = next->sibling) {
762 if (!__of_device_is_available(next))
763 continue;
764 if (of_node_get(next))
765 break;
766 }
767 of_node_put(prev);
768 raw_spin_unlock_irqrestore(&devtree_lock, flags);
769 return next;
770}
771EXPORT_SYMBOL(of_get_next_available_child);
772
773
774
775
776
777
778
779
780
781struct device_node *of_get_next_cpu_node(struct device_node *prev)
782{
783 struct device_node *next = NULL;
784 unsigned long flags;
785 struct device_node *node;
786
787 if (!prev)
788 node = of_find_node_by_path("/cpus");
789
790 raw_spin_lock_irqsave(&devtree_lock, flags);
791 if (prev)
792 next = prev->sibling;
793 else if (node) {
794 next = node->child;
795 of_node_put(node);
796 }
797 for (; next; next = next->sibling) {
798 if (!(of_node_name_eq(next, "cpu") ||
799 __of_node_is_type(next, "cpu")))
800 continue;
801 if (of_node_get(next))
802 break;
803 }
804 of_node_put(prev);
805 raw_spin_unlock_irqrestore(&devtree_lock, flags);
806 return next;
807}
808EXPORT_SYMBOL(of_get_next_cpu_node);
809
810
811
812
813
814
815
816
817
818
819
820
821struct device_node *of_get_compatible_child(const struct device_node *parent,
822 const char *compatible)
823{
824 struct device_node *child;
825
826 for_each_child_of_node(parent, child) {
827 if (of_device_is_compatible(child, compatible))
828 break;
829 }
830
831 return child;
832}
833EXPORT_SYMBOL(of_get_compatible_child);
834
835
836
837
838
839
840
841
842
843
844
845
846struct device_node *of_get_child_by_name(const struct device_node *node,
847 const char *name)
848{
849 struct device_node *child;
850
851 for_each_child_of_node(node, child)
852 if (of_node_name_eq(child, name))
853 break;
854 return child;
855}
856EXPORT_SYMBOL(of_get_child_by_name);
857
858struct device_node *__of_find_node_by_path(struct device_node *parent,
859 const char *path)
860{
861 struct device_node *child;
862 int len;
863
864 len = strcspn(path, "/:");
865 if (!len)
866 return NULL;
867
868 __for_each_child_of_node(parent, child) {
869 const char *name = kbasename(child->full_name);
870 if (strncmp(path, name, len) == 0 && (strlen(name) == len))
871 return child;
872 }
873 return NULL;
874}
875
876struct device_node *__of_find_node_by_full_path(struct device_node *node,
877 const char *path)
878{
879 const char *separator = strchr(path, ':');
880
881 while (node && *path == '/') {
882 struct device_node *tmp = node;
883
884 path++;
885 node = __of_find_node_by_path(node, path);
886 of_node_put(tmp);
887 path = strchrnul(path, '/');
888 if (separator && separator < path)
889 break;
890 }
891 return node;
892}
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912struct device_node *of_find_node_opts_by_path(const char *path, const char **opts)
913{
914 struct device_node *np = NULL;
915 struct property *pp;
916 unsigned long flags;
917 const char *separator = strchr(path, ':');
918
919 if (opts)
920 *opts = separator ? separator + 1 : NULL;
921
922 if (strcmp(path, "/") == 0)
923 return of_node_get(of_root);
924
925
926 if (*path != '/') {
927 int len;
928 const char *p = separator;
929
930 if (!p)
931 p = strchrnul(path, '/');
932 len = p - path;
933
934
935 if (!of_aliases)
936 return NULL;
937
938 for_each_property_of_node(of_aliases, pp) {
939 if (strlen(pp->name) == len && !strncmp(pp->name, path, len)) {
940 np = of_find_node_by_path(pp->value);
941 break;
942 }
943 }
944 if (!np)
945 return NULL;
946 path = p;
947 }
948
949
950 raw_spin_lock_irqsave(&devtree_lock, flags);
951 if (!np)
952 np = of_node_get(of_root);
953 np = __of_find_node_by_full_path(np, path);
954 raw_spin_unlock_irqrestore(&devtree_lock, flags);
955 return np;
956}
957EXPORT_SYMBOL(of_find_node_opts_by_path);
958
959
960
961
962
963
964
965
966
967
968
969
970struct device_node *of_find_node_by_name(struct device_node *from,
971 const char *name)
972{
973 struct device_node *np;
974 unsigned long flags;
975
976 raw_spin_lock_irqsave(&devtree_lock, flags);
977 for_each_of_allnodes_from(from, np)
978 if (of_node_name_eq(np, name) && of_node_get(np))
979 break;
980 of_node_put(from);
981 raw_spin_unlock_irqrestore(&devtree_lock, flags);
982 return np;
983}
984EXPORT_SYMBOL(of_find_node_by_name);
985
986
987
988
989
990
991
992
993
994
995
996
997
998struct device_node *of_find_node_by_type(struct device_node *from,
999 const char *type)
1000{
1001 struct device_node *np;
1002 unsigned long flags;
1003
1004 raw_spin_lock_irqsave(&devtree_lock, flags);
1005 for_each_of_allnodes_from(from, np)
1006 if (__of_node_is_type(np, type) && of_node_get(np))
1007 break;
1008 of_node_put(from);
1009 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1010 return np;
1011}
1012EXPORT_SYMBOL(of_find_node_by_type);
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028struct device_node *of_find_compatible_node(struct device_node *from,
1029 const char *type, const char *compatible)
1030{
1031 struct device_node *np;
1032 unsigned long flags;
1033
1034 raw_spin_lock_irqsave(&devtree_lock, flags);
1035 for_each_of_allnodes_from(from, np)
1036 if (__of_device_is_compatible(np, compatible, type, NULL) &&
1037 of_node_get(np))
1038 break;
1039 of_node_put(from);
1040 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1041 return np;
1042}
1043EXPORT_SYMBOL(of_find_compatible_node);
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057struct device_node *of_find_node_with_property(struct device_node *from,
1058 const char *prop_name)
1059{
1060 struct device_node *np;
1061 struct property *pp;
1062 unsigned long flags;
1063
1064 raw_spin_lock_irqsave(&devtree_lock, flags);
1065 for_each_of_allnodes_from(from, np) {
1066 for (pp = np->properties; pp; pp = pp->next) {
1067 if (of_prop_cmp(pp->name, prop_name) == 0) {
1068 of_node_get(np);
1069 goto out;
1070 }
1071 }
1072 }
1073out:
1074 of_node_put(from);
1075 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1076 return np;
1077}
1078EXPORT_SYMBOL(of_find_node_with_property);
1079
1080static
1081const struct of_device_id *__of_match_node(const struct of_device_id *matches,
1082 const struct device_node *node)
1083{
1084 const struct of_device_id *best_match = NULL;
1085 int score, best_score = 0;
1086
1087 if (!matches)
1088 return NULL;
1089
1090 for (; matches->name[0] || matches->type[0] || matches->compatible[0]; matches++) {
1091 score = __of_device_is_compatible(node, matches->compatible,
1092 matches->type, matches->name);
1093 if (score > best_score) {
1094 best_match = matches;
1095 best_score = score;
1096 }
1097 }
1098
1099 return best_match;
1100}
1101
1102
1103
1104
1105
1106
1107
1108
1109const struct of_device_id *of_match_node(const struct of_device_id *matches,
1110 const struct device_node *node)
1111{
1112 const struct of_device_id *match;
1113 unsigned long flags;
1114
1115 raw_spin_lock_irqsave(&devtree_lock, flags);
1116 match = __of_match_node(matches, node);
1117 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1118 return match;
1119}
1120EXPORT_SYMBOL(of_match_node);
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135struct device_node *of_find_matching_node_and_match(struct device_node *from,
1136 const struct of_device_id *matches,
1137 const struct of_device_id **match)
1138{
1139 struct device_node *np;
1140 const struct of_device_id *m;
1141 unsigned long flags;
1142
1143 if (match)
1144 *match = NULL;
1145
1146 raw_spin_lock_irqsave(&devtree_lock, flags);
1147 for_each_of_allnodes_from(from, np) {
1148 m = __of_match_node(matches, np);
1149 if (m && of_node_get(np)) {
1150 if (match)
1151 *match = m;
1152 break;
1153 }
1154 }
1155 of_node_put(from);
1156 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1157 return np;
1158}
1159EXPORT_SYMBOL(of_find_matching_node_and_match);
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174int of_modalias_node(struct device_node *node, char *modalias, int len)
1175{
1176 const char *compatible, *p;
1177 int cplen;
1178
1179 compatible = of_get_property(node, "compatible", &cplen);
1180 if (!compatible || strlen(compatible) > cplen)
1181 return -ENODEV;
1182 p = strchr(compatible, ',');
1183 strlcpy(modalias, p ? p + 1 : compatible, len);
1184 return 0;
1185}
1186EXPORT_SYMBOL_GPL(of_modalias_node);
1187
1188
1189
1190
1191
1192
1193
1194
1195struct device_node *of_find_node_by_phandle(phandle handle)
1196{
1197 struct device_node *np = NULL;
1198 unsigned long flags;
1199 u32 handle_hash;
1200
1201 if (!handle)
1202 return NULL;
1203
1204 handle_hash = of_phandle_cache_hash(handle);
1205
1206 raw_spin_lock_irqsave(&devtree_lock, flags);
1207
1208 if (phandle_cache[handle_hash] &&
1209 handle == phandle_cache[handle_hash]->phandle)
1210 np = phandle_cache[handle_hash];
1211
1212 if (!np) {
1213 for_each_of_allnodes(np)
1214 if (np->phandle == handle &&
1215 !of_node_check_flag(np, OF_DETACHED)) {
1216 phandle_cache[handle_hash] = np;
1217 break;
1218 }
1219 }
1220
1221 of_node_get(np);
1222 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1223 return np;
1224}
1225EXPORT_SYMBOL(of_find_node_by_phandle);
1226
1227void of_print_phandle_args(const char *msg, const struct of_phandle_args *args)
1228{
1229 int i;
1230 printk("%s %pOF", msg, args->np);
1231 for (i = 0; i < args->args_count; i++) {
1232 const char delim = i ? ',' : ':';
1233
1234 pr_cont("%c%08x", delim, args->args[i]);
1235 }
1236 pr_cont("\n");
1237}
1238
1239int of_phandle_iterator_init(struct of_phandle_iterator *it,
1240 const struct device_node *np,
1241 const char *list_name,
1242 const char *cells_name,
1243 int cell_count)
1244{
1245 const __be32 *list;
1246 int size;
1247
1248 memset(it, 0, sizeof(*it));
1249
1250
1251
1252
1253
1254 if (cell_count < 0 && !cells_name)
1255 return -EINVAL;
1256
1257 list = of_get_property(np, list_name, &size);
1258 if (!list)
1259 return -ENOENT;
1260
1261 it->cells_name = cells_name;
1262 it->cell_count = cell_count;
1263 it->parent = np;
1264 it->list_end = list + size / sizeof(*list);
1265 it->phandle_end = list;
1266 it->cur = list;
1267
1268 return 0;
1269}
1270EXPORT_SYMBOL_GPL(of_phandle_iterator_init);
1271
1272int of_phandle_iterator_next(struct of_phandle_iterator *it)
1273{
1274 uint32_t count = 0;
1275
1276 if (it->node) {
1277 of_node_put(it->node);
1278 it->node = NULL;
1279 }
1280
1281 if (!it->cur || it->phandle_end >= it->list_end)
1282 return -ENOENT;
1283
1284 it->cur = it->phandle_end;
1285
1286
1287 it->phandle = be32_to_cpup(it->cur++);
1288
1289 if (it->phandle) {
1290
1291
1292
1293
1294
1295 it->node = of_find_node_by_phandle(it->phandle);
1296
1297 if (it->cells_name) {
1298 if (!it->node) {
1299 pr_err("%pOF: could not find phandle %d\n",
1300 it->parent, it->phandle);
1301 goto err;
1302 }
1303
1304 if (of_property_read_u32(it->node, it->cells_name,
1305 &count)) {
1306
1307
1308
1309
1310
1311 if (it->cell_count >= 0) {
1312 count = it->cell_count;
1313 } else {
1314 pr_err("%pOF: could not get %s for %pOF\n",
1315 it->parent,
1316 it->cells_name,
1317 it->node);
1318 goto err;
1319 }
1320 }
1321 } else {
1322 count = it->cell_count;
1323 }
1324
1325
1326
1327
1328
1329 if (it->cur + count > it->list_end) {
1330 pr_err("%pOF: %s = %d found %d\n",
1331 it->parent, it->cells_name,
1332 count, it->cell_count);
1333 goto err;
1334 }
1335 }
1336
1337 it->phandle_end = it->cur + count;
1338 it->cur_count = count;
1339
1340 return 0;
1341
1342err:
1343 if (it->node) {
1344 of_node_put(it->node);
1345 it->node = NULL;
1346 }
1347
1348 return -EINVAL;
1349}
1350EXPORT_SYMBOL_GPL(of_phandle_iterator_next);
1351
1352int of_phandle_iterator_args(struct of_phandle_iterator *it,
1353 uint32_t *args,
1354 int size)
1355{
1356 int i, count;
1357
1358 count = it->cur_count;
1359
1360 if (WARN_ON(size < count))
1361 count = size;
1362
1363 for (i = 0; i < count; i++)
1364 args[i] = be32_to_cpup(it->cur++);
1365
1366 return count;
1367}
1368
1369static int __of_parse_phandle_with_args(const struct device_node *np,
1370 const char *list_name,
1371 const char *cells_name,
1372 int cell_count, int index,
1373 struct of_phandle_args *out_args)
1374{
1375 struct of_phandle_iterator it;
1376 int rc, cur_index = 0;
1377
1378
1379 of_for_each_phandle(&it, rc, np, list_name, cells_name, cell_count) {
1380
1381
1382
1383
1384
1385
1386 rc = -ENOENT;
1387 if (cur_index == index) {
1388 if (!it.phandle)
1389 goto err;
1390
1391 if (out_args) {
1392 int c;
1393
1394 c = of_phandle_iterator_args(&it,
1395 out_args->args,
1396 MAX_PHANDLE_ARGS);
1397 out_args->np = it.node;
1398 out_args->args_count = c;
1399 } else {
1400 of_node_put(it.node);
1401 }
1402
1403
1404 return 0;
1405 }
1406
1407 cur_index++;
1408 }
1409
1410
1411
1412
1413
1414
1415
1416 err:
1417 of_node_put(it.node);
1418 return rc;
1419}
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431struct device_node *of_parse_phandle(const struct device_node *np,
1432 const char *phandle_name, int index)
1433{
1434 struct of_phandle_args args;
1435
1436 if (index < 0)
1437 return NULL;
1438
1439 if (__of_parse_phandle_with_args(np, phandle_name, NULL, 0,
1440 index, &args))
1441 return NULL;
1442
1443 return args.np;
1444}
1445EXPORT_SYMBOL(of_parse_phandle);
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479int of_parse_phandle_with_args(const struct device_node *np, const char *list_name,
1480 const char *cells_name, int index,
1481 struct of_phandle_args *out_args)
1482{
1483 int cell_count = -1;
1484
1485 if (index < 0)
1486 return -EINVAL;
1487
1488
1489 if (!cells_name)
1490 cell_count = 0;
1491
1492 return __of_parse_phandle_with_args(np, list_name, cells_name,
1493 cell_count, index, out_args);
1494}
1495EXPORT_SYMBOL(of_parse_phandle_with_args);
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539int of_parse_phandle_with_args_map(const struct device_node *np,
1540 const char *list_name,
1541 const char *stem_name,
1542 int index, struct of_phandle_args *out_args)
1543{
1544 char *cells_name, *map_name = NULL, *mask_name = NULL;
1545 char *pass_name = NULL;
1546 struct device_node *cur, *new = NULL;
1547 const __be32 *map, *mask, *pass;
1548 static const __be32 dummy_mask[] = { [0 ... MAX_PHANDLE_ARGS] = ~0 };
1549 static const __be32 dummy_pass[] = { [0 ... MAX_PHANDLE_ARGS] = 0 };
1550 __be32 initial_match_array[MAX_PHANDLE_ARGS];
1551 const __be32 *match_array = initial_match_array;
1552 int i, ret, map_len, match;
1553 u32 list_size, new_size;
1554
1555 if (index < 0)
1556 return -EINVAL;
1557
1558 cells_name = kasprintf(GFP_KERNEL, "#%s-cells", stem_name);
1559 if (!cells_name)
1560 return -ENOMEM;
1561
1562 ret = -ENOMEM;
1563 map_name = kasprintf(GFP_KERNEL, "%s-map", stem_name);
1564 if (!map_name)
1565 goto free;
1566
1567 mask_name = kasprintf(GFP_KERNEL, "%s-map-mask", stem_name);
1568 if (!mask_name)
1569 goto free;
1570
1571 pass_name = kasprintf(GFP_KERNEL, "%s-map-pass-thru", stem_name);
1572 if (!pass_name)
1573 goto free;
1574
1575 ret = __of_parse_phandle_with_args(np, list_name, cells_name, -1, index,
1576 out_args);
1577 if (ret)
1578 goto free;
1579
1580
1581 cur = out_args->np;
1582 ret = of_property_read_u32(cur, cells_name, &list_size);
1583 if (ret < 0)
1584 goto put;
1585
1586
1587 for (i = 0; i < list_size; i++)
1588 initial_match_array[i] = cpu_to_be32(out_args->args[i]);
1589
1590 ret = -EINVAL;
1591 while (cur) {
1592
1593 map = of_get_property(cur, map_name, &map_len);
1594 if (!map) {
1595 ret = 0;
1596 goto free;
1597 }
1598 map_len /= sizeof(u32);
1599
1600
1601 mask = of_get_property(cur, mask_name, NULL);
1602 if (!mask)
1603 mask = dummy_mask;
1604
1605 match = 0;
1606 while (map_len > (list_size + 1) && !match) {
1607
1608 match = 1;
1609 for (i = 0; i < list_size; i++, map_len--)
1610 match &= !((match_array[i] ^ *map++) & mask[i]);
1611
1612 of_node_put(new);
1613 new = of_find_node_by_phandle(be32_to_cpup(map));
1614 map++;
1615 map_len--;
1616
1617
1618 if (!new)
1619 goto put;
1620
1621 if (!of_device_is_available(new))
1622 match = 0;
1623
1624 ret = of_property_read_u32(new, cells_name, &new_size);
1625 if (ret)
1626 goto put;
1627
1628
1629 if (WARN_ON(new_size > MAX_PHANDLE_ARGS))
1630 goto put;
1631 if (map_len < new_size)
1632 goto put;
1633
1634
1635 map += new_size;
1636 map_len -= new_size;
1637 }
1638 if (!match)
1639 goto put;
1640
1641
1642 pass = of_get_property(cur, pass_name, NULL);
1643 if (!pass)
1644 pass = dummy_pass;
1645
1646
1647
1648
1649
1650
1651 match_array = map - new_size;
1652 for (i = 0; i < new_size; i++) {
1653 __be32 val = *(map - new_size + i);
1654
1655 if (i < list_size) {
1656 val &= ~pass[i];
1657 val |= cpu_to_be32(out_args->args[i]) & pass[i];
1658 }
1659
1660 out_args->args[i] = be32_to_cpu(val);
1661 }
1662 out_args->args_count = list_size = new_size;
1663
1664 out_args->np = new;
1665 of_node_put(cur);
1666 cur = new;
1667 }
1668put:
1669 of_node_put(cur);
1670 of_node_put(new);
1671free:
1672 kfree(mask_name);
1673 kfree(map_name);
1674 kfree(cells_name);
1675 kfree(pass_name);
1676
1677 return ret;
1678}
1679EXPORT_SYMBOL(of_parse_phandle_with_args_map);
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711int of_parse_phandle_with_fixed_args(const struct device_node *np,
1712 const char *list_name, int cell_count,
1713 int index, struct of_phandle_args *out_args)
1714{
1715 if (index < 0)
1716 return -EINVAL;
1717 return __of_parse_phandle_with_args(np, list_name, NULL, cell_count,
1718 index, out_args);
1719}
1720EXPORT_SYMBOL(of_parse_phandle_with_fixed_args);
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737int of_count_phandle_with_args(const struct device_node *np, const char *list_name,
1738 const char *cells_name)
1739{
1740 struct of_phandle_iterator it;
1741 int rc, cur_index = 0;
1742
1743
1744
1745
1746
1747
1748
1749 if (!cells_name) {
1750 const __be32 *list;
1751 int size;
1752
1753 list = of_get_property(np, list_name, &size);
1754 if (!list)
1755 return -ENOENT;
1756
1757 return size / sizeof(*list);
1758 }
1759
1760 rc = of_phandle_iterator_init(&it, np, list_name, cells_name, -1);
1761 if (rc)
1762 return rc;
1763
1764 while ((rc = of_phandle_iterator_next(&it)) == 0)
1765 cur_index += 1;
1766
1767 if (rc != -ENOENT)
1768 return rc;
1769
1770 return cur_index;
1771}
1772EXPORT_SYMBOL(of_count_phandle_with_args);
1773
1774
1775
1776
1777
1778
1779int __of_add_property(struct device_node *np, struct property *prop)
1780{
1781 struct property **next;
1782
1783 prop->next = NULL;
1784 next = &np->properties;
1785 while (*next) {
1786 if (strcmp(prop->name, (*next)->name) == 0)
1787
1788 return -EEXIST;
1789
1790 next = &(*next)->next;
1791 }
1792 *next = prop;
1793
1794 return 0;
1795}
1796
1797
1798
1799
1800
1801
1802int of_add_property(struct device_node *np, struct property *prop)
1803{
1804 unsigned long flags;
1805 int rc;
1806
1807 mutex_lock(&of_mutex);
1808
1809 raw_spin_lock_irqsave(&devtree_lock, flags);
1810 rc = __of_add_property(np, prop);
1811 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1812
1813 if (!rc)
1814 __of_add_property_sysfs(np, prop);
1815
1816 mutex_unlock(&of_mutex);
1817
1818 if (!rc)
1819 of_property_notify(OF_RECONFIG_ADD_PROPERTY, np, prop, NULL);
1820
1821 return rc;
1822}
1823EXPORT_SYMBOL_GPL(of_add_property);
1824
1825int __of_remove_property(struct device_node *np, struct property *prop)
1826{
1827 struct property **next;
1828
1829 for (next = &np->properties; *next; next = &(*next)->next) {
1830 if (*next == prop)
1831 break;
1832 }
1833 if (*next == NULL)
1834 return -ENODEV;
1835
1836
1837 *next = prop->next;
1838 prop->next = np->deadprops;
1839 np->deadprops = prop;
1840
1841 return 0;
1842}
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854int of_remove_property(struct device_node *np, struct property *prop)
1855{
1856 unsigned long flags;
1857 int rc;
1858
1859 if (!prop)
1860 return -ENODEV;
1861
1862 mutex_lock(&of_mutex);
1863
1864 raw_spin_lock_irqsave(&devtree_lock, flags);
1865 rc = __of_remove_property(np, prop);
1866 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1867
1868 if (!rc)
1869 __of_remove_property_sysfs(np, prop);
1870
1871 mutex_unlock(&of_mutex);
1872
1873 if (!rc)
1874 of_property_notify(OF_RECONFIG_REMOVE_PROPERTY, np, prop, NULL);
1875
1876 return rc;
1877}
1878EXPORT_SYMBOL_GPL(of_remove_property);
1879
1880int __of_update_property(struct device_node *np, struct property *newprop,
1881 struct property **oldpropp)
1882{
1883 struct property **next, *oldprop;
1884
1885 for (next = &np->properties; *next; next = &(*next)->next) {
1886 if (of_prop_cmp((*next)->name, newprop->name) == 0)
1887 break;
1888 }
1889 *oldpropp = oldprop = *next;
1890
1891 if (oldprop) {
1892
1893 newprop->next = oldprop->next;
1894 *next = newprop;
1895 oldprop->next = np->deadprops;
1896 np->deadprops = oldprop;
1897 } else {
1898
1899 newprop->next = NULL;
1900 *next = newprop;
1901 }
1902
1903 return 0;
1904}
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915int of_update_property(struct device_node *np, struct property *newprop)
1916{
1917 struct property *oldprop;
1918 unsigned long flags;
1919 int rc;
1920
1921 if (!newprop->name)
1922 return -EINVAL;
1923
1924 mutex_lock(&of_mutex);
1925
1926 raw_spin_lock_irqsave(&devtree_lock, flags);
1927 rc = __of_update_property(np, newprop, &oldprop);
1928 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1929
1930 if (!rc)
1931 __of_update_property_sysfs(np, newprop, oldprop);
1932
1933 mutex_unlock(&of_mutex);
1934
1935 if (!rc)
1936 of_property_notify(OF_RECONFIG_UPDATE_PROPERTY, np, newprop, oldprop);
1937
1938 return rc;
1939}
1940
1941static void of_alias_add(struct alias_prop *ap, struct device_node *np,
1942 int id, const char *stem, int stem_len)
1943{
1944 ap->np = np;
1945 ap->id = id;
1946 strncpy(ap->stem, stem, stem_len);
1947 ap->stem[stem_len] = 0;
1948 list_add_tail(&ap->link, &aliases_lookup);
1949 pr_debug("adding DT alias:%s: stem=%s id=%i node=%pOF\n",
1950 ap->alias, ap->stem, ap->id, np);
1951}
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
1963{
1964 struct property *pp;
1965
1966 of_aliases = of_find_node_by_path("/aliases");
1967 of_chosen = of_find_node_by_path("/chosen");
1968 if (of_chosen == NULL)
1969 of_chosen = of_find_node_by_path("/chosen@0");
1970
1971 if (of_chosen) {
1972
1973 const char *name = NULL;
1974
1975 if (of_property_read_string(of_chosen, "stdout-path", &name))
1976 of_property_read_string(of_chosen, "linux,stdout-path",
1977 &name);
1978 if (IS_ENABLED(CONFIG_PPC) && !name)
1979 of_property_read_string(of_aliases, "stdout", &name);
1980 if (name)
1981 of_stdout = of_find_node_opts_by_path(name, &of_stdout_options);
1982 }
1983
1984 if (!of_aliases)
1985 return;
1986
1987 for_each_property_of_node(of_aliases, pp) {
1988 const char *start = pp->name;
1989 const char *end = start + strlen(start);
1990 struct device_node *np;
1991 struct alias_prop *ap;
1992 int id, len;
1993
1994
1995 if (!strcmp(pp->name, "name") ||
1996 !strcmp(pp->name, "phandle") ||
1997 !strcmp(pp->name, "linux,phandle"))
1998 continue;
1999
2000 np = of_find_node_by_path(pp->value);
2001 if (!np)
2002 continue;
2003
2004
2005
2006 while (isdigit(*(end-1)) && end > start)
2007 end--;
2008 len = end - start;
2009
2010 if (kstrtoint(end, 10, &id) < 0)
2011 continue;
2012
2013
2014 ap = dt_alloc(sizeof(*ap) + len + 1, __alignof__(*ap));
2015 if (!ap)
2016 continue;
2017 memset(ap, 0, sizeof(*ap) + len + 1);
2018 ap->alias = start;
2019 of_alias_add(ap, np, id, start, len);
2020 }
2021}
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033int of_alias_get_id(struct device_node *np, const char *stem)
2034{
2035 struct alias_prop *app;
2036 int id = -ENODEV;
2037
2038 mutex_lock(&of_mutex);
2039 list_for_each_entry(app, &aliases_lookup, link) {
2040 if (strcmp(app->stem, stem) != 0)
2041 continue;
2042
2043 if (np == app->np) {
2044 id = app->id;
2045 break;
2046 }
2047 }
2048 mutex_unlock(&of_mutex);
2049
2050 return id;
2051}
2052EXPORT_SYMBOL_GPL(of_alias_get_id);
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067int of_alias_get_alias_list(const struct of_device_id *matches,
2068 const char *stem, unsigned long *bitmap,
2069 unsigned int nbits)
2070{
2071 struct alias_prop *app;
2072 int ret = 0;
2073
2074
2075 bitmap_zero(bitmap, nbits);
2076
2077 mutex_lock(&of_mutex);
2078 pr_debug("%s: Looking for stem: %s\n", __func__, stem);
2079 list_for_each_entry(app, &aliases_lookup, link) {
2080 pr_debug("%s: stem: %s, id: %d\n",
2081 __func__, app->stem, app->id);
2082
2083 if (strcmp(app->stem, stem) != 0) {
2084 pr_debug("%s: stem comparison didn't pass %s\n",
2085 __func__, app->stem);
2086 continue;
2087 }
2088
2089 if (of_match_node(matches, app->np)) {
2090 pr_debug("%s: Allocated ID %d\n", __func__, app->id);
2091
2092 if (app->id >= nbits) {
2093 pr_warn("%s: ID %d >= than bitmap field %d\n",
2094 __func__, app->id, nbits);
2095 ret = -EOVERFLOW;
2096 } else {
2097 set_bit(app->id, bitmap);
2098 }
2099 }
2100 }
2101 mutex_unlock(&of_mutex);
2102
2103 return ret;
2104}
2105EXPORT_SYMBOL_GPL(of_alias_get_alias_list);
2106
2107
2108
2109
2110
2111
2112
2113
2114int of_alias_get_highest_id(const char *stem)
2115{
2116 struct alias_prop *app;
2117 int id = -ENODEV;
2118
2119 mutex_lock(&of_mutex);
2120 list_for_each_entry(app, &aliases_lookup, link) {
2121 if (strcmp(app->stem, stem) != 0)
2122 continue;
2123
2124 if (app->id > id)
2125 id = app->id;
2126 }
2127 mutex_unlock(&of_mutex);
2128
2129 return id;
2130}
2131EXPORT_SYMBOL_GPL(of_alias_get_highest_id);
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144bool of_console_check(struct device_node *dn, char *name, int index)
2145{
2146 if (!dn || dn != of_stdout || console_set_on_cmdline)
2147 return false;
2148
2149
2150
2151
2152
2153 return !add_preferred_console(name, index, (char *)of_stdout_options);
2154}
2155EXPORT_SYMBOL_GPL(of_console_check);
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165struct device_node *of_find_next_cache_node(const struct device_node *np)
2166{
2167 struct device_node *child, *cache_node;
2168
2169 cache_node = of_parse_phandle(np, "l2-cache", 0);
2170 if (!cache_node)
2171 cache_node = of_parse_phandle(np, "next-level-cache", 0);
2172
2173 if (cache_node)
2174 return cache_node;
2175
2176
2177
2178
2179 if (IS_ENABLED(CONFIG_PPC_PMAC) && of_node_is_type(np, "cpu"))
2180 for_each_child_of_node(np, child)
2181 if (of_node_is_type(child, "cache"))
2182 return child;
2183
2184 return NULL;
2185}
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196int of_find_last_cache_level(unsigned int cpu)
2197{
2198 u32 cache_level = 0;
2199 struct device_node *prev = NULL, *np = of_cpu_device_node_get(cpu);
2200
2201 while (np) {
2202 prev = np;
2203 of_node_put(np);
2204 np = of_find_next_cache_node(np);
2205 }
2206
2207 of_property_read_u32(prev, "cache-level", &cache_level);
2208
2209 return cache_level;
2210}
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231int of_map_id(struct device_node *np, u32 id,
2232 const char *map_name, const char *map_mask_name,
2233 struct device_node **target, u32 *id_out)
2234{
2235 u32 map_mask, masked_id;
2236 int map_len;
2237 const __be32 *map = NULL;
2238
2239 if (!np || !map_name || (!target && !id_out))
2240 return -EINVAL;
2241
2242 map = of_get_property(np, map_name, &map_len);
2243 if (!map) {
2244 if (target)
2245 return -ENODEV;
2246
2247 *id_out = id;
2248 return 0;
2249 }
2250
2251 if (!map_len || map_len % (4 * sizeof(*map))) {
2252 pr_err("%pOF: Error: Bad %s length: %d\n", np,
2253 map_name, map_len);
2254 return -EINVAL;
2255 }
2256
2257
2258 map_mask = 0xffffffff;
2259
2260
2261
2262
2263
2264 if (map_mask_name)
2265 of_property_read_u32(np, map_mask_name, &map_mask);
2266
2267 masked_id = map_mask & id;
2268 for ( ; map_len > 0; map_len -= 4 * sizeof(*map), map += 4) {
2269 struct device_node *phandle_node;
2270 u32 id_base = be32_to_cpup(map + 0);
2271 u32 phandle = be32_to_cpup(map + 1);
2272 u32 out_base = be32_to_cpup(map + 2);
2273 u32 id_len = be32_to_cpup(map + 3);
2274
2275 if (id_base & ~map_mask) {
2276 pr_err("%pOF: Invalid %s translation - %s-mask (0x%x) ignores id-base (0x%x)\n",
2277 np, map_name, map_name,
2278 map_mask, id_base);
2279 return -EFAULT;
2280 }
2281
2282 if (masked_id < id_base || masked_id >= id_base + id_len)
2283 continue;
2284
2285 phandle_node = of_find_node_by_phandle(phandle);
2286 if (!phandle_node)
2287 return -ENODEV;
2288
2289 if (target) {
2290 if (*target)
2291 of_node_put(phandle_node);
2292 else
2293 *target = phandle_node;
2294
2295 if (*target != phandle_node)
2296 continue;
2297 }
2298
2299 if (id_out)
2300 *id_out = masked_id - id_base + out_base;
2301
2302 pr_debug("%pOF: %s, using mask %08x, id-base: %08x, out-base: %08x, length: %08x, id: %08x -> %08x\n",
2303 np, map_name, map_mask, id_base, out_base,
2304 id_len, id, masked_id - id_base + out_base);
2305 return 0;
2306 }
2307
2308 pr_info("%pOF: no %s translation for id 0x%x on %pOF\n", np, map_name,
2309 id, target && *target ? *target : NULL);
2310
2311
2312 if (id_out)
2313 *id_out = id;
2314 return 0;
2315}
2316EXPORT_SYMBOL_GPL(of_map_id);
2317