1
2
3
4
5
6
7
8
9
10
11#define pr_fmt(fmt) "numa: " fmt
12
13#include <linux/threads.h>
14#include <linux/bootmem.h>
15#include <linux/init.h>
16#include <linux/mm.h>
17#include <linux/mmzone.h>
18#include <linux/export.h>
19#include <linux/nodemask.h>
20#include <linux/cpu.h>
21#include <linux/notifier.h>
22#include <linux/memblock.h>
23#include <linux/of.h>
24#include <linux/pfn.h>
25#include <linux/cpuset.h>
26#include <linux/node.h>
27#include <linux/stop_machine.h>
28#include <linux/proc_fs.h>
29#include <linux/seq_file.h>
30#include <linux/uaccess.h>
31#include <linux/slab.h>
32#include <asm/cputhreads.h>
33#include <asm/sparsemem.h>
34#include <asm/prom.h>
35#include <asm/smp.h>
36#include <asm/cputhreads.h>
37#include <asm/topology.h>
38#include <asm/firmware.h>
39#include <asm/paca.h>
40#include <asm/hvcall.h>
41#include <asm/setup.h>
42#include <asm/vdso.h>
43
44static int numa_enabled = 1;
45
46static char *cmdline __initdata;
47
48static int numa_debug;
49#define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
50
51int numa_cpu_lookup_table[NR_CPUS];
52cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
53struct pglist_data *node_data[MAX_NUMNODES];
54
55EXPORT_SYMBOL(numa_cpu_lookup_table);
56EXPORT_SYMBOL(node_to_cpumask_map);
57EXPORT_SYMBOL(node_data);
58
59static int min_common_depth;
60static int n_mem_addr_cells, n_mem_size_cells;
61static int form1_affinity;
62
63#define MAX_DISTANCE_REF_POINTS 4
64static int distance_ref_points_depth;
65static const __be32 *distance_ref_points;
66static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS];
67
68
69
70
71
72
73
74static void __init setup_node_to_cpumask_map(void)
75{
76 unsigned int node;
77
78
79 if (nr_node_ids == MAX_NUMNODES)
80 setup_nr_node_ids();
81
82
83 for_each_node(node)
84 alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
85
86
87 dbg("Node to cpumask map for %d nodes\n", nr_node_ids);
88}
89
90static int __init fake_numa_create_new_node(unsigned long end_pfn,
91 unsigned int *nid)
92{
93 unsigned long long mem;
94 char *p = cmdline;
95 static unsigned int fake_nid;
96 static unsigned long long curr_boundary;
97
98
99
100
101
102 if (fake_nid)
103 *nid = fake_nid;
104
105
106
107
108
109 if (!p)
110 return 0;
111
112 mem = memparse(p, &p);
113 if (!mem)
114 return 0;
115
116 if (mem < curr_boundary)
117 return 0;
118
119 curr_boundary = mem;
120
121 if ((end_pfn << PAGE_SHIFT) > mem) {
122
123
124
125 while (*p == ',' || *p == ' ' || *p == '\t')
126 p++;
127
128 cmdline = p;
129 fake_nid++;
130 *nid = fake_nid;
131 dbg("created new fake_node with id %d\n", fake_nid);
132 return 1;
133 }
134 return 0;
135}
136
137static void reset_numa_cpu_lookup_table(void)
138{
139 unsigned int cpu;
140
141 for_each_possible_cpu(cpu)
142 numa_cpu_lookup_table[cpu] = -1;
143}
144
145static void update_numa_cpu_lookup_table(unsigned int cpu, int node)
146{
147 numa_cpu_lookup_table[cpu] = node;
148}
149
150static void map_cpu_to_node(int cpu, int node)
151{
152 update_numa_cpu_lookup_table(cpu, node);
153
154 dbg("adding cpu %d to node %d\n", cpu, node);
155
156 if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node])))
157 cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
158}
159
160#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR)
161static void unmap_cpu_from_node(unsigned long cpu)
162{
163 int node = numa_cpu_lookup_table[cpu];
164
165 dbg("removing cpu %lu from node %d\n", cpu, node);
166
167 if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
168 cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
169 } else {
170 printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
171 cpu, node);
172 }
173}
174#endif
175
176
177static const __be32 *of_get_associativity(struct device_node *dev)
178{
179 return of_get_property(dev, "ibm,associativity", NULL);
180}
181
182
183
184
185
186
187static const __be32 *of_get_usable_memory(struct device_node *memory)
188{
189 const __be32 *prop;
190 u32 len;
191 prop = of_get_property(memory, "linux,drconf-usable-memory", &len);
192 if (!prop || len < sizeof(unsigned int))
193 return NULL;
194 return prop;
195}
196
197int __node_distance(int a, int b)
198{
199 int i;
200 int distance = LOCAL_DISTANCE;
201
202 if (!form1_affinity)
203 return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE);
204
205 for (i = 0; i < distance_ref_points_depth; i++) {
206 if (distance_lookup_table[a][i] == distance_lookup_table[b][i])
207 break;
208
209
210 distance *= 2;
211 }
212
213 return distance;
214}
215EXPORT_SYMBOL(__node_distance);
216
217static void initialize_distance_lookup_table(int nid,
218 const __be32 *associativity)
219{
220 int i;
221
222 if (!form1_affinity)
223 return;
224
225 for (i = 0; i < distance_ref_points_depth; i++) {
226 const __be32 *entry;
227
228 entry = &associativity[be32_to_cpu(distance_ref_points[i]) - 1];
229 distance_lookup_table[nid][i] = of_read_number(entry, 1);
230 }
231}
232
233
234
235
236static int associativity_to_nid(const __be32 *associativity)
237{
238 int nid = -1;
239
240 if (min_common_depth == -1)
241 goto out;
242
243 if (of_read_number(associativity, 1) >= min_common_depth)
244 nid = of_read_number(&associativity[min_common_depth], 1);
245
246
247 if (nid == 0xffff || nid >= MAX_NUMNODES)
248 nid = -1;
249
250 if (nid > 0 &&
251 of_read_number(associativity, 1) >= distance_ref_points_depth) {
252
253
254
255 initialize_distance_lookup_table(nid, associativity + 1);
256 }
257
258out:
259 return nid;
260}
261
262
263
264
265static int of_node_to_nid_single(struct device_node *device)
266{
267 int nid = -1;
268 const __be32 *tmp;
269
270 tmp = of_get_associativity(device);
271 if (tmp)
272 nid = associativity_to_nid(tmp);
273 return nid;
274}
275
276
277int of_node_to_nid(struct device_node *device)
278{
279 int nid = -1;
280
281 of_node_get(device);
282 while (device) {
283 nid = of_node_to_nid_single(device);
284 if (nid != -1)
285 break;
286
287 device = of_get_next_parent(device);
288 }
289 of_node_put(device);
290
291 return nid;
292}
293EXPORT_SYMBOL_GPL(of_node_to_nid);
294
295static int __init find_min_common_depth(void)
296{
297 int depth;
298 struct device_node *root;
299
300 if (firmware_has_feature(FW_FEATURE_OPAL))
301 root = of_find_node_by_path("/ibm,opal");
302 else
303 root = of_find_node_by_path("/rtas");
304 if (!root)
305 root = of_find_node_by_path("/");
306
307
308
309
310
311
312
313
314
315
316
317
318
319 distance_ref_points = of_get_property(root,
320 "ibm,associativity-reference-points",
321 &distance_ref_points_depth);
322
323 if (!distance_ref_points) {
324 dbg("NUMA: ibm,associativity-reference-points not found.\n");
325 goto err;
326 }
327
328 distance_ref_points_depth /= sizeof(int);
329
330 if (firmware_has_feature(FW_FEATURE_OPAL) ||
331 firmware_has_feature(FW_FEATURE_TYPE1_AFFINITY)) {
332 dbg("Using form 1 affinity\n");
333 form1_affinity = 1;
334 }
335
336 if (form1_affinity) {
337 depth = of_read_number(distance_ref_points, 1);
338 } else {
339 if (distance_ref_points_depth < 2) {
340 printk(KERN_WARNING "NUMA: "
341 "short ibm,associativity-reference-points\n");
342 goto err;
343 }
344
345 depth = of_read_number(&distance_ref_points[1], 1);
346 }
347
348
349
350
351
352 if (distance_ref_points_depth > MAX_DISTANCE_REF_POINTS) {
353 printk(KERN_WARNING "NUMA: distance array capped at "
354 "%d entries\n", MAX_DISTANCE_REF_POINTS);
355 distance_ref_points_depth = MAX_DISTANCE_REF_POINTS;
356 }
357
358 of_node_put(root);
359 return depth;
360
361err:
362 of_node_put(root);
363 return -1;
364}
365
366static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
367{
368 struct device_node *memory = NULL;
369
370 memory = of_find_node_by_type(memory, "memory");
371 if (!memory)
372 panic("numa.c: No memory nodes found!");
373
374 *n_addr_cells = of_n_addr_cells(memory);
375 *n_size_cells = of_n_size_cells(memory);
376 of_node_put(memory);
377}
378
379static unsigned long read_n_cells(int n, const __be32 **buf)
380{
381 unsigned long result = 0;
382
383 while (n--) {
384 result = (result << 32) | of_read_number(*buf, 1);
385 (*buf)++;
386 }
387 return result;
388}
389
390
391
392
393
394static void read_drconf_cell(struct of_drconf_cell *drmem, const __be32 **cellp)
395{
396 const __be32 *cp;
397
398 drmem->base_addr = read_n_cells(n_mem_addr_cells, cellp);
399
400 cp = *cellp;
401 drmem->drc_index = of_read_number(cp, 1);
402 drmem->reserved = of_read_number(&cp[1], 1);
403 drmem->aa_index = of_read_number(&cp[2], 1);
404 drmem->flags = of_read_number(&cp[3], 1);
405
406 *cellp = cp + 4;
407}
408
409
410
411
412
413
414
415
416static int of_get_drconf_memory(struct device_node *memory, const __be32 **dm)
417{
418 const __be32 *prop;
419 u32 len, entries;
420
421 prop = of_get_property(memory, "ibm,dynamic-memory", &len);
422 if (!prop || len < sizeof(unsigned int))
423 return 0;
424
425 entries = of_read_number(prop++, 1);
426
427
428
429
430 if (len < (entries * (n_mem_addr_cells + 4) + 1) * sizeof(unsigned int))
431 return 0;
432
433 *dm = prop;
434 return entries;
435}
436
437
438
439
440
441static u64 of_get_lmb_size(struct device_node *memory)
442{
443 const __be32 *prop;
444 u32 len;
445
446 prop = of_get_property(memory, "ibm,lmb-size", &len);
447 if (!prop || len < sizeof(unsigned int))
448 return 0;
449
450 return read_n_cells(n_mem_size_cells, &prop);
451}
452
453struct assoc_arrays {
454 u32 n_arrays;
455 u32 array_sz;
456 const __be32 *arrays;
457};
458
459
460
461
462
463
464
465
466
467
468
469static int of_get_assoc_arrays(struct device_node *memory,
470 struct assoc_arrays *aa)
471{
472 const __be32 *prop;
473 u32 len;
474
475 prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len);
476 if (!prop || len < 2 * sizeof(unsigned int))
477 return -1;
478
479 aa->n_arrays = of_read_number(prop++, 1);
480 aa->array_sz = of_read_number(prop++, 1);
481
482
483
484
485 if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int))
486 return -1;
487
488 aa->arrays = prop;
489 return 0;
490}
491
492
493
494
495
496static int of_drconf_to_nid_single(struct of_drconf_cell *drmem,
497 struct assoc_arrays *aa)
498{
499 int default_nid = 0;
500 int nid = default_nid;
501 int index;
502
503 if (min_common_depth > 0 && min_common_depth <= aa->array_sz &&
504 !(drmem->flags & DRCONF_MEM_AI_INVALID) &&
505 drmem->aa_index < aa->n_arrays) {
506 index = drmem->aa_index * aa->array_sz + min_common_depth - 1;
507 nid = of_read_number(&aa->arrays[index], 1);
508
509 if (nid == 0xffff || nid >= MAX_NUMNODES)
510 nid = default_nid;
511
512 if (nid > 0) {
513 index = drmem->aa_index * aa->array_sz;
514 initialize_distance_lookup_table(nid,
515 &aa->arrays[index]);
516 }
517 }
518
519 return nid;
520}
521
522
523
524
525
526static int numa_setup_cpu(unsigned long lcpu)
527{
528 int nid = -1;
529 struct device_node *cpu;
530
531
532
533
534
535
536 if ((nid = numa_cpu_lookup_table[lcpu]) >= 0) {
537 map_cpu_to_node(lcpu, nid);
538 return nid;
539 }
540
541 cpu = of_get_cpu_node(lcpu, NULL);
542
543 if (!cpu) {
544 WARN_ON(1);
545 if (cpu_present(lcpu))
546 goto out_present;
547 else
548 goto out;
549 }
550
551 nid = of_node_to_nid_single(cpu);
552
553out_present:
554 if (nid < 0 || !node_online(nid))
555 nid = first_online_node;
556
557 map_cpu_to_node(lcpu, nid);
558 of_node_put(cpu);
559out:
560 return nid;
561}
562
563static void verify_cpu_node_mapping(int cpu, int node)
564{
565 int base, sibling, i;
566
567
568 base = cpu_first_thread_sibling(cpu);
569
570 for (i = 0; i < threads_per_core; i++) {
571 sibling = base + i;
572
573 if (sibling == cpu || cpu_is_offline(sibling))
574 continue;
575
576 if (cpu_to_node(sibling) != node) {
577 WARN(1, "CPU thread siblings %d and %d don't belong"
578 " to the same node!\n", cpu, sibling);
579 break;
580 }
581 }
582}
583
584
585static int ppc_numa_cpu_prepare(unsigned int cpu)
586{
587 int nid;
588
589 nid = numa_setup_cpu(cpu);
590 verify_cpu_node_mapping(cpu, nid);
591 return 0;
592}
593
594static int ppc_numa_cpu_dead(unsigned int cpu)
595{
596#ifdef CONFIG_HOTPLUG_CPU
597 unmap_cpu_from_node(cpu);
598#endif
599 return 0;
600}
601
602
603
604
605
606
607
608
609
610static unsigned long __init numa_enforce_memory_limit(unsigned long start,
611 unsigned long size)
612{
613
614
615
616
617
618
619
620 if (start + size <= memblock_end_of_DRAM())
621 return size;
622
623 if (start >= memblock_end_of_DRAM())
624 return 0;
625
626 return memblock_end_of_DRAM() - start;
627}
628
629
630
631
632
633static inline int __init read_usm_ranges(const __be32 **usm)
634{
635
636
637
638
639
640
641 return read_n_cells(n_mem_size_cells, usm);
642}
643
644
645
646
647
648static void __init parse_drconf_memory(struct device_node *memory)
649{
650 const __be32 *uninitialized_var(dm), *usm;
651 unsigned int n, rc, ranges, is_kexec_kdump = 0;
652 unsigned long lmb_size, base, size, sz;
653 int nid;
654 struct assoc_arrays aa = { .arrays = NULL };
655
656 n = of_get_drconf_memory(memory, &dm);
657 if (!n)
658 return;
659
660 lmb_size = of_get_lmb_size(memory);
661 if (!lmb_size)
662 return;
663
664 rc = of_get_assoc_arrays(memory, &aa);
665 if (rc)
666 return;
667
668
669 usm = of_get_usable_memory(memory);
670 if (usm != NULL)
671 is_kexec_kdump = 1;
672
673 for (; n != 0; --n) {
674 struct of_drconf_cell drmem;
675
676 read_drconf_cell(&drmem, &dm);
677
678
679
680 if ((drmem.flags & DRCONF_MEM_RESERVED)
681 || !(drmem.flags & DRCONF_MEM_ASSIGNED))
682 continue;
683
684 base = drmem.base_addr;
685 size = lmb_size;
686 ranges = 1;
687
688 if (is_kexec_kdump) {
689 ranges = read_usm_ranges(&usm);
690 if (!ranges)
691 continue;
692 }
693 do {
694 if (is_kexec_kdump) {
695 base = read_n_cells(n_mem_addr_cells, &usm);
696 size = read_n_cells(n_mem_size_cells, &usm);
697 }
698 nid = of_drconf_to_nid_single(&drmem, &aa);
699 fake_numa_create_new_node(
700 ((base + size) >> PAGE_SHIFT),
701 &nid);
702 node_set_online(nid);
703 sz = numa_enforce_memory_limit(base, size);
704 if (sz)
705 memblock_set_node(base, sz,
706 &memblock.memory, nid);
707 } while (--ranges);
708 }
709}
710
711static int __init parse_numa_properties(void)
712{
713 struct device_node *memory;
714 int default_nid = 0;
715 unsigned long i;
716
717 if (numa_enabled == 0) {
718 printk(KERN_WARNING "NUMA disabled by user\n");
719 return -1;
720 }
721
722 min_common_depth = find_min_common_depth();
723
724 if (min_common_depth < 0)
725 return min_common_depth;
726
727 dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth);
728
729
730
731
732
733
734 for_each_present_cpu(i) {
735 struct device_node *cpu;
736 int nid;
737
738 cpu = of_get_cpu_node(i, NULL);
739 BUG_ON(!cpu);
740 nid = of_node_to_nid_single(cpu);
741 of_node_put(cpu);
742
743
744
745
746
747
748 if (nid < 0)
749 continue;
750 node_set_online(nid);
751 }
752
753 get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
754
755 for_each_node_by_type(memory, "memory") {
756 unsigned long start;
757 unsigned long size;
758 int nid;
759 int ranges;
760 const __be32 *memcell_buf;
761 unsigned int len;
762
763 memcell_buf = of_get_property(memory,
764 "linux,usable-memory", &len);
765 if (!memcell_buf || len <= 0)
766 memcell_buf = of_get_property(memory, "reg", &len);
767 if (!memcell_buf || len <= 0)
768 continue;
769
770
771 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
772new_range:
773
774 start = read_n_cells(n_mem_addr_cells, &memcell_buf);
775 size = read_n_cells(n_mem_size_cells, &memcell_buf);
776
777
778
779
780
781
782 nid = of_node_to_nid_single(memory);
783 if (nid < 0)
784 nid = default_nid;
785
786 fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid);
787 node_set_online(nid);
788
789 if (!(size = numa_enforce_memory_limit(start, size))) {
790 if (--ranges)
791 goto new_range;
792 else
793 continue;
794 }
795
796 memblock_set_node(start, size, &memblock.memory, nid);
797
798 if (--ranges)
799 goto new_range;
800 }
801
802
803
804
805
806
807 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
808 if (memory)
809 parse_drconf_memory(memory);
810
811 return 0;
812}
813
814static void __init setup_nonnuma(void)
815{
816 unsigned long top_of_ram = memblock_end_of_DRAM();
817 unsigned long total_ram = memblock_phys_mem_size();
818 unsigned long start_pfn, end_pfn;
819 unsigned int nid = 0;
820 struct memblock_region *reg;
821
822 printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
823 top_of_ram, total_ram);
824 printk(KERN_DEBUG "Memory hole size: %ldMB\n",
825 (top_of_ram - total_ram) >> 20);
826
827 for_each_memblock(memory, reg) {
828 start_pfn = memblock_region_memory_base_pfn(reg);
829 end_pfn = memblock_region_memory_end_pfn(reg);
830
831 fake_numa_create_new_node(end_pfn, &nid);
832 memblock_set_node(PFN_PHYS(start_pfn),
833 PFN_PHYS(end_pfn - start_pfn),
834 &memblock.memory, nid);
835 node_set_online(nid);
836 }
837}
838
839void __init dump_numa_cpu_topology(void)
840{
841 unsigned int node;
842 unsigned int cpu, count;
843
844 if (min_common_depth == -1 || !numa_enabled)
845 return;
846
847 for_each_online_node(node) {
848 pr_info("Node %d CPUs:", node);
849
850 count = 0;
851
852
853
854
855 for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
856 if (cpumask_test_cpu(cpu,
857 node_to_cpumask_map[node])) {
858 if (count == 0)
859 pr_cont(" %u", cpu);
860 ++count;
861 } else {
862 if (count > 1)
863 pr_cont("-%u", cpu - 1);
864 count = 0;
865 }
866 }
867
868 if (count > 1)
869 pr_cont("-%u", nr_cpu_ids - 1);
870 pr_cont("\n");
871 }
872}
873
874
875static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
876{
877 u64 spanned_pages = end_pfn - start_pfn;
878 const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES);
879 u64 nd_pa;
880 void *nd;
881 int tnid;
882
883 if (spanned_pages)
884 pr_info("Initmem setup node %d [mem %#010Lx-%#010Lx]\n",
885 nid, start_pfn << PAGE_SHIFT,
886 (end_pfn << PAGE_SHIFT) - 1);
887 else
888 pr_info("Initmem setup node %d\n", nid);
889
890 nd_pa = memblock_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
891 nd = __va(nd_pa);
892
893
894 pr_info(" NODE_DATA [mem %#010Lx-%#010Lx]\n",
895 nd_pa, nd_pa + nd_size - 1);
896 tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
897 if (tnid != nid)
898 pr_info(" NODE_DATA(%d) on node %d\n", nid, tnid);
899
900 node_data[nid] = nd;
901 memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
902 NODE_DATA(nid)->node_id = nid;
903 NODE_DATA(nid)->node_start_pfn = start_pfn;
904 NODE_DATA(nid)->node_spanned_pages = spanned_pages;
905}
906
907void __init initmem_init(void)
908{
909 int nid, cpu;
910
911 max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
912 max_pfn = max_low_pfn;
913
914 if (parse_numa_properties())
915 setup_nonnuma();
916
917 memblock_dump_all();
918
919
920
921
922
923
924 nodes_and(node_possible_map, node_possible_map, node_online_map);
925
926 for_each_online_node(nid) {
927 unsigned long start_pfn, end_pfn;
928
929 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
930 setup_node_data(nid, start_pfn, end_pfn);
931 sparse_memory_present_with_active_regions(nid);
932 }
933
934 sparse_init();
935
936 setup_node_to_cpumask_map();
937
938 reset_numa_cpu_lookup_table();
939
940
941
942
943
944
945
946
947 cpuhp_setup_state_nocalls(CPUHP_POWER_NUMA_PREPARE, "powerpc/numa:prepare",
948 ppc_numa_cpu_prepare, ppc_numa_cpu_dead);
949 for_each_present_cpu(cpu)
950 numa_setup_cpu(cpu);
951}
952
953static int __init early_numa(char *p)
954{
955 if (!p)
956 return 0;
957
958 if (strstr(p, "off"))
959 numa_enabled = 0;
960
961 if (strstr(p, "debug"))
962 numa_debug = 1;
963
964 p = strstr(p, "fake=");
965 if (p)
966 cmdline = p + strlen("fake=");
967
968 return 0;
969}
970early_param("numa", early_numa);
971
972static bool topology_updates_enabled = true;
973
974static int __init early_topology_updates(char *p)
975{
976 if (!p)
977 return 0;
978
979 if (!strcmp(p, "off")) {
980 pr_info("Disabling topology updates\n");
981 topology_updates_enabled = false;
982 }
983
984 return 0;
985}
986early_param("topology_updates", early_topology_updates);
987
988#ifdef CONFIG_MEMORY_HOTPLUG
989
990
991
992
993
994static int hot_add_drconf_scn_to_nid(struct device_node *memory,
995 unsigned long scn_addr)
996{
997 const __be32 *dm;
998 unsigned int drconf_cell_cnt, rc;
999 unsigned long lmb_size;
1000 struct assoc_arrays aa;
1001 int nid = -1;
1002
1003 drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
1004 if (!drconf_cell_cnt)
1005 return -1;
1006
1007 lmb_size = of_get_lmb_size(memory);
1008 if (!lmb_size)
1009 return -1;
1010
1011 rc = of_get_assoc_arrays(memory, &aa);
1012 if (rc)
1013 return -1;
1014
1015 for (; drconf_cell_cnt != 0; --drconf_cell_cnt) {
1016 struct of_drconf_cell drmem;
1017
1018 read_drconf_cell(&drmem, &dm);
1019
1020
1021
1022 if ((drmem.flags & DRCONF_MEM_RESERVED)
1023 || !(drmem.flags & DRCONF_MEM_ASSIGNED))
1024 continue;
1025
1026 if ((scn_addr < drmem.base_addr)
1027 || (scn_addr >= (drmem.base_addr + lmb_size)))
1028 continue;
1029
1030 nid = of_drconf_to_nid_single(&drmem, &aa);
1031 break;
1032 }
1033
1034 return nid;
1035}
1036
1037
1038
1039
1040
1041
1042static int hot_add_node_scn_to_nid(unsigned long scn_addr)
1043{
1044 struct device_node *memory;
1045 int nid = -1;
1046
1047 for_each_node_by_type(memory, "memory") {
1048 unsigned long start, size;
1049 int ranges;
1050 const __be32 *memcell_buf;
1051 unsigned int len;
1052
1053 memcell_buf = of_get_property(memory, "reg", &len);
1054 if (!memcell_buf || len <= 0)
1055 continue;
1056
1057
1058 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
1059
1060 while (ranges--) {
1061 start = read_n_cells(n_mem_addr_cells, &memcell_buf);
1062 size = read_n_cells(n_mem_size_cells, &memcell_buf);
1063
1064 if ((scn_addr < start) || (scn_addr >= (start + size)))
1065 continue;
1066
1067 nid = of_node_to_nid_single(memory);
1068 break;
1069 }
1070
1071 if (nid >= 0)
1072 break;
1073 }
1074
1075 of_node_put(memory);
1076
1077 return nid;
1078}
1079
1080
1081
1082
1083
1084
1085int hot_add_scn_to_nid(unsigned long scn_addr)
1086{
1087 struct device_node *memory = NULL;
1088 int nid;
1089
1090 if (!numa_enabled || (min_common_depth < 0))
1091 return first_online_node;
1092
1093 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1094 if (memory) {
1095 nid = hot_add_drconf_scn_to_nid(memory, scn_addr);
1096 of_node_put(memory);
1097 } else {
1098 nid = hot_add_node_scn_to_nid(scn_addr);
1099 }
1100
1101 if (nid < 0 || !node_online(nid))
1102 nid = first_online_node;
1103
1104 return nid;
1105}
1106
1107static u64 hot_add_drconf_memory_max(void)
1108{
1109 struct device_node *memory = NULL;
1110 struct device_node *dn = NULL;
1111 unsigned int drconf_cell_cnt = 0;
1112 u64 lmb_size = 0;
1113 const __be32 *dm = NULL;
1114 const __be64 *lrdr = NULL;
1115 struct of_drconf_cell drmem;
1116
1117 dn = of_find_node_by_path("/rtas");
1118 if (dn) {
1119 lrdr = of_get_property(dn, "ibm,lrdr-capacity", NULL);
1120 of_node_put(dn);
1121 if (lrdr)
1122 return be64_to_cpup(lrdr);
1123 }
1124
1125 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1126 if (memory) {
1127 drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
1128 lmb_size = of_get_lmb_size(memory);
1129
1130
1131 dm += (drconf_cell_cnt - 1) * 6;
1132 read_drconf_cell(&drmem, &dm);
1133 of_node_put(memory);
1134 return drmem.base_addr + lmb_size;
1135 }
1136 return 0;
1137}
1138
1139
1140
1141
1142
1143
1144
1145u64 memory_hotplug_max(void)
1146{
1147 return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM());
1148}
1149#endif
1150
1151
1152#ifdef CONFIG_PPC_SPLPAR
1153
1154#include "vphn.h"
1155
1156struct topology_update_data {
1157 struct topology_update_data *next;
1158 unsigned int cpu;
1159 int old_nid;
1160 int new_nid;
1161};
1162
1163static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS];
1164static cpumask_t cpu_associativity_changes_mask;
1165static int vphn_enabled;
1166static int prrn_enabled;
1167static void reset_topology_timer(void);
1168
1169
1170
1171
1172
1173static void setup_cpu_associativity_change_counters(void)
1174{
1175 int cpu;
1176
1177
1178 BUILD_BUG_ON(MAX_DISTANCE_REF_POINTS > 8);
1179
1180 for_each_possible_cpu(cpu) {
1181 int i;
1182 u8 *counts = vphn_cpu_change_counts[cpu];
1183 volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
1184
1185 for (i = 0; i < distance_ref_points_depth; i++)
1186 counts[i] = hypervisor_counts[i];
1187 }
1188}
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201static int update_cpu_associativity_changes_mask(void)
1202{
1203 int cpu;
1204 cpumask_t *changes = &cpu_associativity_changes_mask;
1205
1206 for_each_possible_cpu(cpu) {
1207 int i, changed = 0;
1208 u8 *counts = vphn_cpu_change_counts[cpu];
1209 volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
1210
1211 for (i = 0; i < distance_ref_points_depth; i++) {
1212 if (hypervisor_counts[i] != counts[i]) {
1213 counts[i] = hypervisor_counts[i];
1214 changed = 1;
1215 }
1216 }
1217 if (changed) {
1218 cpumask_or(changes, changes, cpu_sibling_mask(cpu));
1219 cpu = cpu_last_thread_sibling(cpu);
1220 }
1221 }
1222
1223 return cpumask_weight(changes);
1224}
1225
1226
1227
1228
1229
1230static long hcall_vphn(unsigned long cpu, __be32 *associativity)
1231{
1232 long rc;
1233 long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
1234 u64 flags = 1;
1235 int hwcpu = get_hard_smp_processor_id(cpu);
1236
1237 rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, hwcpu);
1238 vphn_unpack_associativity(retbuf, associativity);
1239
1240 return rc;
1241}
1242
1243static long vphn_get_associativity(unsigned long cpu,
1244 __be32 *associativity)
1245{
1246 long rc;
1247
1248 rc = hcall_vphn(cpu, associativity);
1249
1250 switch (rc) {
1251 case H_FUNCTION:
1252 printk(KERN_INFO
1253 "VPHN is not supported. Disabling polling...\n");
1254 stop_topology_update();
1255 break;
1256 case H_HARDWARE:
1257 printk(KERN_ERR
1258 "hcall_vphn() experienced a hardware fault "
1259 "preventing VPHN. Disabling polling...\n");
1260 stop_topology_update();
1261 }
1262
1263 return rc;
1264}
1265
1266
1267
1268
1269
1270
1271static int update_cpu_topology(void *data)
1272{
1273 struct topology_update_data *update;
1274 unsigned long cpu;
1275
1276 if (!data)
1277 return -EINVAL;
1278
1279 cpu = smp_processor_id();
1280
1281 for (update = data; update; update = update->next) {
1282 int new_nid = update->new_nid;
1283 if (cpu != update->cpu)
1284 continue;
1285
1286 unmap_cpu_from_node(cpu);
1287 map_cpu_to_node(cpu, new_nid);
1288 set_cpu_numa_node(cpu, new_nid);
1289 set_cpu_numa_mem(cpu, local_memory_node(new_nid));
1290 vdso_getcpu_init();
1291 }
1292
1293 return 0;
1294}
1295
1296static int update_lookup_table(void *data)
1297{
1298 struct topology_update_data *update;
1299
1300 if (!data)
1301 return -EINVAL;
1302
1303
1304
1305
1306
1307
1308
1309 for (update = data; update; update = update->next) {
1310 int nid, base, j;
1311
1312 nid = update->new_nid;
1313 base = cpu_first_thread_sibling(update->cpu);
1314
1315 for (j = 0; j < threads_per_core; j++) {
1316 update_numa_cpu_lookup_table(base + j, nid);
1317 }
1318 }
1319
1320 return 0;
1321}
1322
1323
1324
1325
1326
1327int arch_update_cpu_topology(void)
1328{
1329 unsigned int cpu, sibling, changed = 0;
1330 struct topology_update_data *updates, *ud;
1331 __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
1332 cpumask_t updated_cpus;
1333 struct device *dev;
1334 int weight, new_nid, i = 0;
1335
1336 if (!prrn_enabled && !vphn_enabled)
1337 return 0;
1338
1339 weight = cpumask_weight(&cpu_associativity_changes_mask);
1340 if (!weight)
1341 return 0;
1342
1343 updates = kzalloc(weight * (sizeof(*updates)), GFP_KERNEL);
1344 if (!updates)
1345 return 0;
1346
1347 cpumask_clear(&updated_cpus);
1348
1349 for_each_cpu(cpu, &cpu_associativity_changes_mask) {
1350
1351
1352
1353
1354
1355 if (!cpumask_subset(cpu_sibling_mask(cpu),
1356 &cpu_associativity_changes_mask)) {
1357 pr_info("Sibling bits not set for associativity "
1358 "change, cpu%d\n", cpu);
1359 cpumask_or(&cpu_associativity_changes_mask,
1360 &cpu_associativity_changes_mask,
1361 cpu_sibling_mask(cpu));
1362 cpu = cpu_last_thread_sibling(cpu);
1363 continue;
1364 }
1365
1366
1367 vphn_get_associativity(cpu, associativity);
1368 new_nid = associativity_to_nid(associativity);
1369 if (new_nid < 0 || !node_online(new_nid))
1370 new_nid = first_online_node;
1371
1372 if (new_nid == numa_cpu_lookup_table[cpu]) {
1373 cpumask_andnot(&cpu_associativity_changes_mask,
1374 &cpu_associativity_changes_mask,
1375 cpu_sibling_mask(cpu));
1376 cpu = cpu_last_thread_sibling(cpu);
1377 continue;
1378 }
1379
1380 for_each_cpu(sibling, cpu_sibling_mask(cpu)) {
1381 ud = &updates[i++];
1382 ud->cpu = sibling;
1383 ud->new_nid = new_nid;
1384 ud->old_nid = numa_cpu_lookup_table[sibling];
1385 cpumask_set_cpu(sibling, &updated_cpus);
1386 if (i < weight)
1387 ud->next = &updates[i];
1388 }
1389 cpu = cpu_last_thread_sibling(cpu);
1390 }
1391
1392 pr_debug("Topology update for the following CPUs:\n");
1393 if (cpumask_weight(&updated_cpus)) {
1394 for (ud = &updates[0]; ud; ud = ud->next) {
1395 pr_debug("cpu %d moving from node %d "
1396 "to %d\n", ud->cpu,
1397 ud->old_nid, ud->new_nid);
1398 }
1399 }
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412 if (!cpumask_weight(&updated_cpus))
1413 goto out;
1414
1415 stop_machine(update_cpu_topology, &updates[0], &updated_cpus);
1416
1417
1418
1419
1420
1421
1422 stop_machine(update_lookup_table, &updates[0],
1423 cpumask_of(raw_smp_processor_id()));
1424
1425 for (ud = &updates[0]; ud; ud = ud->next) {
1426 unregister_cpu_under_node(ud->cpu, ud->old_nid);
1427 register_cpu_under_node(ud->cpu, ud->new_nid);
1428
1429 dev = get_cpu_device(ud->cpu);
1430 if (dev)
1431 kobject_uevent(&dev->kobj, KOBJ_CHANGE);
1432 cpumask_clear_cpu(ud->cpu, &cpu_associativity_changes_mask);
1433 changed = 1;
1434 }
1435
1436out:
1437 kfree(updates);
1438 return changed;
1439}
1440
1441static void topology_work_fn(struct work_struct *work)
1442{
1443 rebuild_sched_domains();
1444}
1445static DECLARE_WORK(topology_work, topology_work_fn);
1446
1447static void topology_schedule_update(void)
1448{
1449 schedule_work(&topology_work);
1450}
1451
1452static void topology_timer_fn(unsigned long ignored)
1453{
1454 if (prrn_enabled && cpumask_weight(&cpu_associativity_changes_mask))
1455 topology_schedule_update();
1456 else if (vphn_enabled) {
1457 if (update_cpu_associativity_changes_mask() > 0)
1458 topology_schedule_update();
1459 reset_topology_timer();
1460 }
1461}
1462static struct timer_list topology_timer =
1463 TIMER_INITIALIZER(topology_timer_fn, 0, 0);
1464
1465static void reset_topology_timer(void)
1466{
1467 topology_timer.data = 0;
1468 topology_timer.expires = jiffies + 60 * HZ;
1469 mod_timer(&topology_timer, topology_timer.expires);
1470}
1471
1472#ifdef CONFIG_SMP
1473
1474static void stage_topology_update(int core_id)
1475{
1476 cpumask_or(&cpu_associativity_changes_mask,
1477 &cpu_associativity_changes_mask, cpu_sibling_mask(core_id));
1478 reset_topology_timer();
1479}
1480
1481static int dt_update_callback(struct notifier_block *nb,
1482 unsigned long action, void *data)
1483{
1484 struct of_reconfig_data *update = data;
1485 int rc = NOTIFY_DONE;
1486
1487 switch (action) {
1488 case OF_RECONFIG_UPDATE_PROPERTY:
1489 if (!of_prop_cmp(update->dn->type, "cpu") &&
1490 !of_prop_cmp(update->prop->name, "ibm,associativity")) {
1491 u32 core_id;
1492 of_property_read_u32(update->dn, "reg", &core_id);
1493 stage_topology_update(core_id);
1494 rc = NOTIFY_OK;
1495 }
1496 break;
1497 }
1498
1499 return rc;
1500}
1501
1502static struct notifier_block dt_update_nb = {
1503 .notifier_call = dt_update_callback,
1504};
1505
1506#endif
1507
1508
1509
1510
1511int start_topology_update(void)
1512{
1513 int rc = 0;
1514
1515 if (firmware_has_feature(FW_FEATURE_PRRN)) {
1516 if (!prrn_enabled) {
1517 prrn_enabled = 1;
1518 vphn_enabled = 0;
1519#ifdef CONFIG_SMP
1520 rc = of_reconfig_notifier_register(&dt_update_nb);
1521#endif
1522 }
1523 } else if (firmware_has_feature(FW_FEATURE_VPHN) &&
1524 lppaca_shared_proc(get_lppaca())) {
1525 if (!vphn_enabled) {
1526 prrn_enabled = 0;
1527 vphn_enabled = 1;
1528 setup_cpu_associativity_change_counters();
1529 init_timer_deferrable(&topology_timer);
1530 reset_topology_timer();
1531 }
1532 }
1533
1534 return rc;
1535}
1536
1537
1538
1539
1540int stop_topology_update(void)
1541{
1542 int rc = 0;
1543
1544 if (prrn_enabled) {
1545 prrn_enabled = 0;
1546#ifdef CONFIG_SMP
1547 rc = of_reconfig_notifier_unregister(&dt_update_nb);
1548#endif
1549 } else if (vphn_enabled) {
1550 vphn_enabled = 0;
1551 rc = del_timer_sync(&topology_timer);
1552 }
1553
1554 return rc;
1555}
1556
1557int prrn_is_enabled(void)
1558{
1559 return prrn_enabled;
1560}
1561
1562static int topology_read(struct seq_file *file, void *v)
1563{
1564 if (vphn_enabled || prrn_enabled)
1565 seq_puts(file, "on\n");
1566 else
1567 seq_puts(file, "off\n");
1568
1569 return 0;
1570}
1571
1572static int topology_open(struct inode *inode, struct file *file)
1573{
1574 return single_open(file, topology_read, NULL);
1575}
1576
1577static ssize_t topology_write(struct file *file, const char __user *buf,
1578 size_t count, loff_t *off)
1579{
1580 char kbuf[4];
1581 int read_len;
1582
1583 read_len = count < 3 ? count : 3;
1584 if (copy_from_user(kbuf, buf, read_len))
1585 return -EINVAL;
1586
1587 kbuf[read_len] = '\0';
1588
1589 if (!strncmp(kbuf, "on", 2))
1590 start_topology_update();
1591 else if (!strncmp(kbuf, "off", 3))
1592 stop_topology_update();
1593 else
1594 return -EINVAL;
1595
1596 return count;
1597}
1598
1599static const struct file_operations topology_ops = {
1600 .read = seq_read,
1601 .write = topology_write,
1602 .open = topology_open,
1603 .release = single_release
1604};
1605
1606static int topology_update_init(void)
1607{
1608
1609 if (topology_updates_enabled)
1610 start_topology_update();
1611
1612 if (!proc_create("powerpc/topology_updates", 0644, NULL, &topology_ops))
1613 return -ENOMEM;
1614
1615 return 0;
1616}
1617device_initcall(topology_update_init);
1618#endif
1619