1
2
3
4
5
6
7
8
9
10
11#include <linux/threads.h>
12#include <linux/bootmem.h>
13#include <linux/init.h>
14#include <linux/mm.h>
15#include <linux/mmzone.h>
16#include <linux/export.h>
17#include <linux/nodemask.h>
18#include <linux/cpu.h>
19#include <linux/notifier.h>
20#include <linux/memblock.h>
21#include <linux/of.h>
22#include <linux/pfn.h>
23#include <linux/cpuset.h>
24#include <linux/node.h>
25#include <linux/stop_machine.h>
26#include <linux/proc_fs.h>
27#include <linux/seq_file.h>
28#include <linux/uaccess.h>
29#include <linux/slab.h>
30#include <asm/cputhreads.h>
31#include <asm/sparsemem.h>
32#include <asm/prom.h>
33#include <asm/smp.h>
34#include <asm/firmware.h>
35#include <asm/paca.h>
36#include <asm/hvcall.h>
37#include <asm/setup.h>
38#include <asm/vdso.h>
39
40static int numa_enabled = 1;
41
42static char *cmdline __initdata;
43
44static int numa_debug;
45#define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
46
47int numa_cpu_lookup_table[NR_CPUS];
48cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
49struct pglist_data *node_data[MAX_NUMNODES];
50
51EXPORT_SYMBOL(numa_cpu_lookup_table);
52EXPORT_SYMBOL(node_to_cpumask_map);
53EXPORT_SYMBOL(node_data);
54
55static int min_common_depth;
56static int n_mem_addr_cells, n_mem_size_cells;
57static int form1_affinity;
58
59#define MAX_DISTANCE_REF_POINTS 4
60static int distance_ref_points_depth;
61static const unsigned int *distance_ref_points;
62static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS];
63
64
65
66
67
68
69
70static void __init setup_node_to_cpumask_map(void)
71{
72 unsigned int node;
73
74
75 if (nr_node_ids == MAX_NUMNODES)
76 setup_nr_node_ids();
77
78
79 for (node = 0; node < nr_node_ids; node++)
80 alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
81
82
83 dbg("Node to cpumask map for %d nodes\n", nr_node_ids);
84}
85
86static int __init fake_numa_create_new_node(unsigned long end_pfn,
87 unsigned int *nid)
88{
89 unsigned long long mem;
90 char *p = cmdline;
91 static unsigned int fake_nid;
92 static unsigned long long curr_boundary;
93
94
95
96
97
98 if (fake_nid)
99 *nid = fake_nid;
100
101
102
103
104
105 if (!p)
106 return 0;
107
108 mem = memparse(p, &p);
109 if (!mem)
110 return 0;
111
112 if (mem < curr_boundary)
113 return 0;
114
115 curr_boundary = mem;
116
117 if ((end_pfn << PAGE_SHIFT) > mem) {
118
119
120
121 while (*p == ',' || *p == ' ' || *p == '\t')
122 p++;
123
124 cmdline = p;
125 fake_nid++;
126 *nid = fake_nid;
127 dbg("created new fake_node with id %d\n", fake_nid);
128 return 1;
129 }
130 return 0;
131}
132
133
134
135
136
137
138
139static void __init get_node_active_region(unsigned long pfn,
140 struct node_active_region *node_ar)
141{
142 unsigned long start_pfn, end_pfn;
143 int i, nid;
144
145 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
146 if (pfn >= start_pfn && pfn < end_pfn) {
147 node_ar->nid = nid;
148 node_ar->start_pfn = start_pfn;
149 node_ar->end_pfn = end_pfn;
150 break;
151 }
152 }
153}
154
155static void map_cpu_to_node(int cpu, int node)
156{
157 numa_cpu_lookup_table[cpu] = node;
158
159 dbg("adding cpu %d to node %d\n", cpu, node);
160
161 if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node])))
162 cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
163}
164
165#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR)
166static void unmap_cpu_from_node(unsigned long cpu)
167{
168 int node = numa_cpu_lookup_table[cpu];
169
170 dbg("removing cpu %lu from node %d\n", cpu, node);
171
172 if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
173 cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
174 } else {
175 printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
176 cpu, node);
177 }
178}
179#endif
180
181
182static const int *of_get_associativity(struct device_node *dev)
183{
184 return of_get_property(dev, "ibm,associativity", NULL);
185}
186
187
188
189
190
191
192static const u32 *of_get_usable_memory(struct device_node *memory)
193{
194 const u32 *prop;
195 u32 len;
196 prop = of_get_property(memory, "linux,drconf-usable-memory", &len);
197 if (!prop || len < sizeof(unsigned int))
198 return 0;
199 return prop;
200}
201
202int __node_distance(int a, int b)
203{
204 int i;
205 int distance = LOCAL_DISTANCE;
206
207 if (!form1_affinity)
208 return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE);
209
210 for (i = 0; i < distance_ref_points_depth; i++) {
211 if (distance_lookup_table[a][i] == distance_lookup_table[b][i])
212 break;
213
214
215 distance *= 2;
216 }
217
218 return distance;
219}
220
221static void initialize_distance_lookup_table(int nid,
222 const unsigned int *associativity)
223{
224 int i;
225
226 if (!form1_affinity)
227 return;
228
229 for (i = 0; i < distance_ref_points_depth; i++) {
230 distance_lookup_table[nid][i] =
231 associativity[distance_ref_points[i]];
232 }
233}
234
235
236
237
238static int associativity_to_nid(const unsigned int *associativity)
239{
240 int nid = -1;
241
242 if (min_common_depth == -1)
243 goto out;
244
245 if (associativity[0] >= min_common_depth)
246 nid = associativity[min_common_depth];
247
248
249 if (nid == 0xffff || nid >= MAX_NUMNODES)
250 nid = -1;
251
252 if (nid > 0 && associativity[0] >= distance_ref_points_depth)
253 initialize_distance_lookup_table(nid, associativity);
254
255out:
256 return nid;
257}
258
259
260
261
262static int of_node_to_nid_single(struct device_node *device)
263{
264 int nid = -1;
265 const unsigned int *tmp;
266
267 tmp = of_get_associativity(device);
268 if (tmp)
269 nid = associativity_to_nid(tmp);
270 return nid;
271}
272
273
274int of_node_to_nid(struct device_node *device)
275{
276 struct device_node *tmp;
277 int nid = -1;
278
279 of_node_get(device);
280 while (device) {
281 nid = of_node_to_nid_single(device);
282 if (nid != -1)
283 break;
284
285 tmp = device;
286 device = of_get_parent(tmp);
287 of_node_put(tmp);
288 }
289 of_node_put(device);
290
291 return nid;
292}
293EXPORT_SYMBOL_GPL(of_node_to_nid);
294
295static int __init find_min_common_depth(void)
296{
297 int depth;
298 struct device_node *root;
299
300 if (firmware_has_feature(FW_FEATURE_OPAL))
301 root = of_find_node_by_path("/ibm,opal");
302 else
303 root = of_find_node_by_path("/rtas");
304 if (!root)
305 root = of_find_node_by_path("/");
306
307
308
309
310
311
312
313
314
315
316
317
318
319 distance_ref_points = of_get_property(root,
320 "ibm,associativity-reference-points",
321 &distance_ref_points_depth);
322
323 if (!distance_ref_points) {
324 dbg("NUMA: ibm,associativity-reference-points not found.\n");
325 goto err;
326 }
327
328 distance_ref_points_depth /= sizeof(int);
329
330 if (firmware_has_feature(FW_FEATURE_OPAL) ||
331 firmware_has_feature(FW_FEATURE_TYPE1_AFFINITY)) {
332 dbg("Using form 1 affinity\n");
333 form1_affinity = 1;
334 }
335
336 if (form1_affinity) {
337 depth = distance_ref_points[0];
338 } else {
339 if (distance_ref_points_depth < 2) {
340 printk(KERN_WARNING "NUMA: "
341 "short ibm,associativity-reference-points\n");
342 goto err;
343 }
344
345 depth = distance_ref_points[1];
346 }
347
348
349
350
351
352 if (distance_ref_points_depth > MAX_DISTANCE_REF_POINTS) {
353 printk(KERN_WARNING "NUMA: distance array capped at "
354 "%d entries\n", MAX_DISTANCE_REF_POINTS);
355 distance_ref_points_depth = MAX_DISTANCE_REF_POINTS;
356 }
357
358 of_node_put(root);
359 return depth;
360
361err:
362 of_node_put(root);
363 return -1;
364}
365
366static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
367{
368 struct device_node *memory = NULL;
369
370 memory = of_find_node_by_type(memory, "memory");
371 if (!memory)
372 panic("numa.c: No memory nodes found!");
373
374 *n_addr_cells = of_n_addr_cells(memory);
375 *n_size_cells = of_n_size_cells(memory);
376 of_node_put(memory);
377}
378
379static unsigned long read_n_cells(int n, const unsigned int **buf)
380{
381 unsigned long result = 0;
382
383 while (n--) {
384 result = (result << 32) | **buf;
385 (*buf)++;
386 }
387 return result;
388}
389
390
391
392
393
394static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp)
395{
396 const u32 *cp;
397
398 drmem->base_addr = read_n_cells(n_mem_addr_cells, cellp);
399
400 cp = *cellp;
401 drmem->drc_index = cp[0];
402 drmem->reserved = cp[1];
403 drmem->aa_index = cp[2];
404 drmem->flags = cp[3];
405
406 *cellp = cp + 4;
407}
408
409
410
411
412
413
414
415
416static int of_get_drconf_memory(struct device_node *memory, const u32 **dm)
417{
418 const u32 *prop;
419 u32 len, entries;
420
421 prop = of_get_property(memory, "ibm,dynamic-memory", &len);
422 if (!prop || len < sizeof(unsigned int))
423 return 0;
424
425 entries = *prop++;
426
427
428
429
430 if (len < (entries * (n_mem_addr_cells + 4) + 1) * sizeof(unsigned int))
431 return 0;
432
433 *dm = prop;
434 return entries;
435}
436
437
438
439
440
441static u64 of_get_lmb_size(struct device_node *memory)
442{
443 const u32 *prop;
444 u32 len;
445
446 prop = of_get_property(memory, "ibm,lmb-size", &len);
447 if (!prop || len < sizeof(unsigned int))
448 return 0;
449
450 return read_n_cells(n_mem_size_cells, &prop);
451}
452
453struct assoc_arrays {
454 u32 n_arrays;
455 u32 array_sz;
456 const u32 *arrays;
457};
458
459
460
461
462
463
464
465
466
467
468
469static int of_get_assoc_arrays(struct device_node *memory,
470 struct assoc_arrays *aa)
471{
472 const u32 *prop;
473 u32 len;
474
475 prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len);
476 if (!prop || len < 2 * sizeof(unsigned int))
477 return -1;
478
479 aa->n_arrays = *prop++;
480 aa->array_sz = *prop++;
481
482
483
484
485 if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int))
486 return -1;
487
488 aa->arrays = prop;
489 return 0;
490}
491
492
493
494
495
496static int of_drconf_to_nid_single(struct of_drconf_cell *drmem,
497 struct assoc_arrays *aa)
498{
499 int default_nid = 0;
500 int nid = default_nid;
501 int index;
502
503 if (min_common_depth > 0 && min_common_depth <= aa->array_sz &&
504 !(drmem->flags & DRCONF_MEM_AI_INVALID) &&
505 drmem->aa_index < aa->n_arrays) {
506 index = drmem->aa_index * aa->array_sz + min_common_depth - 1;
507 nid = aa->arrays[index];
508
509 if (nid == 0xffff || nid >= MAX_NUMNODES)
510 nid = default_nid;
511 }
512
513 return nid;
514}
515
516
517
518
519
520static int numa_setup_cpu(unsigned long lcpu)
521{
522 int nid = 0;
523 struct device_node *cpu = of_get_cpu_node(lcpu, NULL);
524
525 if (!cpu) {
526 WARN_ON(1);
527 goto out;
528 }
529
530 nid = of_node_to_nid_single(cpu);
531
532 if (nid < 0 || !node_online(nid))
533 nid = first_online_node;
534out:
535 map_cpu_to_node(lcpu, nid);
536
537 of_node_put(cpu);
538
539 return nid;
540}
541
542static int cpu_numa_callback(struct notifier_block *nfb, unsigned long action,
543 void *hcpu)
544{
545 unsigned long lcpu = (unsigned long)hcpu;
546 int ret = NOTIFY_DONE;
547
548 switch (action) {
549 case CPU_UP_PREPARE:
550 case CPU_UP_PREPARE_FROZEN:
551 numa_setup_cpu(lcpu);
552 ret = NOTIFY_OK;
553 break;
554#ifdef CONFIG_HOTPLUG_CPU
555 case CPU_DEAD:
556 case CPU_DEAD_FROZEN:
557 case CPU_UP_CANCELED:
558 case CPU_UP_CANCELED_FROZEN:
559 unmap_cpu_from_node(lcpu);
560 break;
561 ret = NOTIFY_OK;
562#endif
563 }
564 return ret;
565}
566
567
568
569
570
571
572
573
574
575static unsigned long __init numa_enforce_memory_limit(unsigned long start,
576 unsigned long size)
577{
578
579
580
581
582
583
584
585 if (start + size <= memblock_end_of_DRAM())
586 return size;
587
588 if (start >= memblock_end_of_DRAM())
589 return 0;
590
591 return memblock_end_of_DRAM() - start;
592}
593
594
595
596
597
598static inline int __init read_usm_ranges(const u32 **usm)
599{
600
601
602
603
604
605
606 return read_n_cells(n_mem_size_cells, usm);
607}
608
609
610
611
612
613static void __init parse_drconf_memory(struct device_node *memory)
614{
615 const u32 *uninitialized_var(dm), *usm;
616 unsigned int n, rc, ranges, is_kexec_kdump = 0;
617 unsigned long lmb_size, base, size, sz;
618 int nid;
619 struct assoc_arrays aa = { .arrays = NULL };
620
621 n = of_get_drconf_memory(memory, &dm);
622 if (!n)
623 return;
624
625 lmb_size = of_get_lmb_size(memory);
626 if (!lmb_size)
627 return;
628
629 rc = of_get_assoc_arrays(memory, &aa);
630 if (rc)
631 return;
632
633
634 usm = of_get_usable_memory(memory);
635 if (usm != NULL)
636 is_kexec_kdump = 1;
637
638 for (; n != 0; --n) {
639 struct of_drconf_cell drmem;
640
641 read_drconf_cell(&drmem, &dm);
642
643
644
645 if ((drmem.flags & DRCONF_MEM_RESERVED)
646 || !(drmem.flags & DRCONF_MEM_ASSIGNED))
647 continue;
648
649 base = drmem.base_addr;
650 size = lmb_size;
651 ranges = 1;
652
653 if (is_kexec_kdump) {
654 ranges = read_usm_ranges(&usm);
655 if (!ranges)
656 continue;
657 }
658 do {
659 if (is_kexec_kdump) {
660 base = read_n_cells(n_mem_addr_cells, &usm);
661 size = read_n_cells(n_mem_size_cells, &usm);
662 }
663 nid = of_drconf_to_nid_single(&drmem, &aa);
664 fake_numa_create_new_node(
665 ((base + size) >> PAGE_SHIFT),
666 &nid);
667 node_set_online(nid);
668 sz = numa_enforce_memory_limit(base, size);
669 if (sz)
670 memblock_set_node(base, sz, nid);
671 } while (--ranges);
672 }
673}
674
675static int __init parse_numa_properties(void)
676{
677 struct device_node *memory;
678 int default_nid = 0;
679 unsigned long i;
680
681 if (numa_enabled == 0) {
682 printk(KERN_WARNING "NUMA disabled by user\n");
683 return -1;
684 }
685
686 min_common_depth = find_min_common_depth();
687
688 if (min_common_depth < 0)
689 return min_common_depth;
690
691 dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth);
692
693
694
695
696
697
698 for_each_present_cpu(i) {
699 struct device_node *cpu;
700 int nid;
701
702 cpu = of_get_cpu_node(i, NULL);
703 BUG_ON(!cpu);
704 nid = of_node_to_nid_single(cpu);
705 of_node_put(cpu);
706
707
708
709
710
711
712 if (nid < 0)
713 continue;
714 node_set_online(nid);
715 }
716
717 get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
718
719 for_each_node_by_type(memory, "memory") {
720 unsigned long start;
721 unsigned long size;
722 int nid;
723 int ranges;
724 const unsigned int *memcell_buf;
725 unsigned int len;
726
727 memcell_buf = of_get_property(memory,
728 "linux,usable-memory", &len);
729 if (!memcell_buf || len <= 0)
730 memcell_buf = of_get_property(memory, "reg", &len);
731 if (!memcell_buf || len <= 0)
732 continue;
733
734
735 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
736new_range:
737
738 start = read_n_cells(n_mem_addr_cells, &memcell_buf);
739 size = read_n_cells(n_mem_size_cells, &memcell_buf);
740
741
742
743
744
745
746 nid = of_node_to_nid_single(memory);
747 if (nid < 0)
748 nid = default_nid;
749
750 fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid);
751 node_set_online(nid);
752
753 if (!(size = numa_enforce_memory_limit(start, size))) {
754 if (--ranges)
755 goto new_range;
756 else
757 continue;
758 }
759
760 memblock_set_node(start, size, nid);
761
762 if (--ranges)
763 goto new_range;
764 }
765
766
767
768
769
770
771 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
772 if (memory)
773 parse_drconf_memory(memory);
774
775 return 0;
776}
777
778static void __init setup_nonnuma(void)
779{
780 unsigned long top_of_ram = memblock_end_of_DRAM();
781 unsigned long total_ram = memblock_phys_mem_size();
782 unsigned long start_pfn, end_pfn;
783 unsigned int nid = 0;
784 struct memblock_region *reg;
785
786 printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
787 top_of_ram, total_ram);
788 printk(KERN_DEBUG "Memory hole size: %ldMB\n",
789 (top_of_ram - total_ram) >> 20);
790
791 for_each_memblock(memory, reg) {
792 start_pfn = memblock_region_memory_base_pfn(reg);
793 end_pfn = memblock_region_memory_end_pfn(reg);
794
795 fake_numa_create_new_node(end_pfn, &nid);
796 memblock_set_node(PFN_PHYS(start_pfn),
797 PFN_PHYS(end_pfn - start_pfn), nid);
798 node_set_online(nid);
799 }
800}
801
802void __init dump_numa_cpu_topology(void)
803{
804 unsigned int node;
805 unsigned int cpu, count;
806
807 if (min_common_depth == -1 || !numa_enabled)
808 return;
809
810 for_each_online_node(node) {
811 printk(KERN_DEBUG "Node %d CPUs:", node);
812
813 count = 0;
814
815
816
817
818 for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
819 if (cpumask_test_cpu(cpu,
820 node_to_cpumask_map[node])) {
821 if (count == 0)
822 printk(" %u", cpu);
823 ++count;
824 } else {
825 if (count > 1)
826 printk("-%u", cpu - 1);
827 count = 0;
828 }
829 }
830
831 if (count > 1)
832 printk("-%u", nr_cpu_ids - 1);
833 printk("\n");
834 }
835}
836
837static void __init dump_numa_memory_topology(void)
838{
839 unsigned int node;
840 unsigned int count;
841
842 if (min_common_depth == -1 || !numa_enabled)
843 return;
844
845 for_each_online_node(node) {
846 unsigned long i;
847
848 printk(KERN_DEBUG "Node %d Memory:", node);
849
850 count = 0;
851
852 for (i = 0; i < memblock_end_of_DRAM();
853 i += (1 << SECTION_SIZE_BITS)) {
854 if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) {
855 if (count == 0)
856 printk(" 0x%lx", i);
857 ++count;
858 } else {
859 if (count > 0)
860 printk("-0x%lx", i);
861 count = 0;
862 }
863 }
864
865 if (count > 0)
866 printk("-0x%lx", i);
867 printk("\n");
868 }
869}
870
871
872
873
874
875
876
877
878static void __init *careful_zallocation(int nid, unsigned long size,
879 unsigned long align,
880 unsigned long end_pfn)
881{
882 void *ret;
883 int new_nid;
884 unsigned long ret_paddr;
885
886 ret_paddr = __memblock_alloc_base(size, align, end_pfn << PAGE_SHIFT);
887
888
889 if (!ret_paddr)
890 ret_paddr = __memblock_alloc_base(size, align, memblock_end_of_DRAM());
891
892 if (!ret_paddr)
893 panic("numa.c: cannot allocate %lu bytes for node %d",
894 size, nid);
895
896 ret = __va(ret_paddr);
897
898
899
900
901
902
903
904
905
906
907
908
909
910 new_nid = early_pfn_to_nid(ret_paddr >> PAGE_SHIFT);
911 if (new_nid < nid) {
912 ret = __alloc_bootmem_node(NODE_DATA(new_nid),
913 size, align, 0);
914
915 dbg("alloc_bootmem %p %lx\n", ret, size);
916 }
917
918 memset(ret, 0, size);
919 return ret;
920}
921
922static struct notifier_block ppc64_numa_nb = {
923 .notifier_call = cpu_numa_callback,
924 .priority = 1
925};
926
927static void __init mark_reserved_regions_for_nid(int nid)
928{
929 struct pglist_data *node = NODE_DATA(nid);
930 struct memblock_region *reg;
931
932 for_each_memblock(reserved, reg) {
933 unsigned long physbase = reg->base;
934 unsigned long size = reg->size;
935 unsigned long start_pfn = physbase >> PAGE_SHIFT;
936 unsigned long end_pfn = PFN_UP(physbase + size);
937 struct node_active_region node_ar;
938 unsigned long node_end_pfn = node->node_start_pfn +
939 node->node_spanned_pages;
940
941
942
943
944
945
946
947
948 if (end_pfn <= node->node_start_pfn ||
949 start_pfn >= node_end_pfn)
950 continue;
951
952 get_node_active_region(start_pfn, &node_ar);
953 while (start_pfn < end_pfn &&
954 node_ar.start_pfn < node_ar.end_pfn) {
955 unsigned long reserve_size = size;
956
957
958
959
960 if (end_pfn > node_ar.end_pfn)
961 reserve_size = (node_ar.end_pfn << PAGE_SHIFT)
962 - physbase;
963
964
965
966
967 if (node_ar.nid == nid) {
968 dbg("reserve_bootmem %lx %lx nid=%d\n",
969 physbase, reserve_size, node_ar.nid);
970 reserve_bootmem_node(NODE_DATA(node_ar.nid),
971 physbase, reserve_size,
972 BOOTMEM_DEFAULT);
973 }
974
975
976
977
978 if (end_pfn <= node_ar.end_pfn)
979 break;
980
981
982
983
984
985
986 start_pfn = node_ar.end_pfn;
987 physbase = start_pfn << PAGE_SHIFT;
988 size = size - reserve_size;
989 get_node_active_region(start_pfn, &node_ar);
990 }
991 }
992}
993
994
995void __init do_init_bootmem(void)
996{
997 int nid;
998
999 min_low_pfn = 0;
1000 max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
1001 max_pfn = max_low_pfn;
1002
1003 if (parse_numa_properties())
1004 setup_nonnuma();
1005 else
1006 dump_numa_memory_topology();
1007
1008 for_each_online_node(nid) {
1009 unsigned long start_pfn, end_pfn;
1010 void *bootmem_vaddr;
1011 unsigned long bootmap_pages;
1012
1013 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
1014
1015
1016
1017
1018
1019
1020
1021
1022 NODE_DATA(nid) = careful_zallocation(nid,
1023 sizeof(struct pglist_data),
1024 SMP_CACHE_BYTES, end_pfn);
1025
1026 dbg("node %d\n", nid);
1027 dbg("NODE_DATA() = %p\n", NODE_DATA(nid));
1028
1029 NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
1030 NODE_DATA(nid)->node_start_pfn = start_pfn;
1031 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
1032
1033 if (NODE_DATA(nid)->node_spanned_pages == 0)
1034 continue;
1035
1036 dbg("start_paddr = %lx\n", start_pfn << PAGE_SHIFT);
1037 dbg("end_paddr = %lx\n", end_pfn << PAGE_SHIFT);
1038
1039 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
1040 bootmem_vaddr = careful_zallocation(nid,
1041 bootmap_pages << PAGE_SHIFT,
1042 PAGE_SIZE, end_pfn);
1043
1044 dbg("bootmap_vaddr = %p\n", bootmem_vaddr);
1045
1046 init_bootmem_node(NODE_DATA(nid),
1047 __pa(bootmem_vaddr) >> PAGE_SHIFT,
1048 start_pfn, end_pfn);
1049
1050 free_bootmem_with_active_regions(nid, end_pfn);
1051
1052
1053
1054
1055
1056 mark_reserved_regions_for_nid(nid);
1057 sparse_memory_present_with_active_regions(nid);
1058 }
1059
1060 init_bootmem_done = 1;
1061
1062
1063
1064
1065
1066 setup_node_to_cpumask_map();
1067
1068 register_cpu_notifier(&ppc64_numa_nb);
1069 cpu_numa_callback(&ppc64_numa_nb, CPU_UP_PREPARE,
1070 (void *)(unsigned long)boot_cpuid);
1071}
1072
1073void __init paging_init(void)
1074{
1075 unsigned long max_zone_pfns[MAX_NR_ZONES];
1076 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
1077 max_zone_pfns[ZONE_DMA] = memblock_end_of_DRAM() >> PAGE_SHIFT;
1078 free_area_init_nodes(max_zone_pfns);
1079}
1080
1081static int __init early_numa(char *p)
1082{
1083 if (!p)
1084 return 0;
1085
1086 if (strstr(p, "off"))
1087 numa_enabled = 0;
1088
1089 if (strstr(p, "debug"))
1090 numa_debug = 1;
1091
1092 p = strstr(p, "fake=");
1093 if (p)
1094 cmdline = p + strlen("fake=");
1095
1096 return 0;
1097}
1098early_param("numa", early_numa);
1099
1100#ifdef CONFIG_MEMORY_HOTPLUG
1101
1102
1103
1104
1105
1106static int hot_add_drconf_scn_to_nid(struct device_node *memory,
1107 unsigned long scn_addr)
1108{
1109 const u32 *dm;
1110 unsigned int drconf_cell_cnt, rc;
1111 unsigned long lmb_size;
1112 struct assoc_arrays aa;
1113 int nid = -1;
1114
1115 drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
1116 if (!drconf_cell_cnt)
1117 return -1;
1118
1119 lmb_size = of_get_lmb_size(memory);
1120 if (!lmb_size)
1121 return -1;
1122
1123 rc = of_get_assoc_arrays(memory, &aa);
1124 if (rc)
1125 return -1;
1126
1127 for (; drconf_cell_cnt != 0; --drconf_cell_cnt) {
1128 struct of_drconf_cell drmem;
1129
1130 read_drconf_cell(&drmem, &dm);
1131
1132
1133
1134 if ((drmem.flags & DRCONF_MEM_RESERVED)
1135 || !(drmem.flags & DRCONF_MEM_ASSIGNED))
1136 continue;
1137
1138 if ((scn_addr < drmem.base_addr)
1139 || (scn_addr >= (drmem.base_addr + lmb_size)))
1140 continue;
1141
1142 nid = of_drconf_to_nid_single(&drmem, &aa);
1143 break;
1144 }
1145
1146 return nid;
1147}
1148
1149
1150
1151
1152
1153
1154int hot_add_node_scn_to_nid(unsigned long scn_addr)
1155{
1156 struct device_node *memory;
1157 int nid = -1;
1158
1159 for_each_node_by_type(memory, "memory") {
1160 unsigned long start, size;
1161 int ranges;
1162 const unsigned int *memcell_buf;
1163 unsigned int len;
1164
1165 memcell_buf = of_get_property(memory, "reg", &len);
1166 if (!memcell_buf || len <= 0)
1167 continue;
1168
1169
1170 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
1171
1172 while (ranges--) {
1173 start = read_n_cells(n_mem_addr_cells, &memcell_buf);
1174 size = read_n_cells(n_mem_size_cells, &memcell_buf);
1175
1176 if ((scn_addr < start) || (scn_addr >= (start + size)))
1177 continue;
1178
1179 nid = of_node_to_nid_single(memory);
1180 break;
1181 }
1182
1183 if (nid >= 0)
1184 break;
1185 }
1186
1187 of_node_put(memory);
1188
1189 return nid;
1190}
1191
1192
1193
1194
1195
1196
1197int hot_add_scn_to_nid(unsigned long scn_addr)
1198{
1199 struct device_node *memory = NULL;
1200 int nid, found = 0;
1201
1202 if (!numa_enabled || (min_common_depth < 0))
1203 return first_online_node;
1204
1205 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1206 if (memory) {
1207 nid = hot_add_drconf_scn_to_nid(memory, scn_addr);
1208 of_node_put(memory);
1209 } else {
1210 nid = hot_add_node_scn_to_nid(scn_addr);
1211 }
1212
1213 if (nid < 0 || !node_online(nid))
1214 nid = first_online_node;
1215
1216 if (NODE_DATA(nid)->node_spanned_pages)
1217 return nid;
1218
1219 for_each_online_node(nid) {
1220 if (NODE_DATA(nid)->node_spanned_pages) {
1221 found = 1;
1222 break;
1223 }
1224 }
1225
1226 BUG_ON(!found);
1227 return nid;
1228}
1229
1230static u64 hot_add_drconf_memory_max(void)
1231{
1232 struct device_node *memory = NULL;
1233 unsigned int drconf_cell_cnt = 0;
1234 u64 lmb_size = 0;
1235 const u32 *dm = 0;
1236
1237 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1238 if (memory) {
1239 drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
1240 lmb_size = of_get_lmb_size(memory);
1241 of_node_put(memory);
1242 }
1243 return lmb_size * drconf_cell_cnt;
1244}
1245
1246
1247
1248
1249
1250
1251
1252u64 memory_hotplug_max(void)
1253{
1254 return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM());
1255}
1256#endif
1257
1258
1259#ifdef CONFIG_PPC_SPLPAR
1260struct topology_update_data {
1261 struct topology_update_data *next;
1262 unsigned int cpu;
1263 int old_nid;
1264 int new_nid;
1265};
1266
1267static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS];
1268static cpumask_t cpu_associativity_changes_mask;
1269static int vphn_enabled;
1270static int prrn_enabled;
1271static void reset_topology_timer(void);
1272
1273
1274
1275
1276
1277static void setup_cpu_associativity_change_counters(void)
1278{
1279 int cpu;
1280
1281
1282 BUILD_BUG_ON(MAX_DISTANCE_REF_POINTS > 8);
1283
1284 for_each_possible_cpu(cpu) {
1285 int i;
1286 u8 *counts = vphn_cpu_change_counts[cpu];
1287 volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
1288
1289 for (i = 0; i < distance_ref_points_depth; i++)
1290 counts[i] = hypervisor_counts[i];
1291 }
1292}
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305static int update_cpu_associativity_changes_mask(void)
1306{
1307 int cpu;
1308 cpumask_t *changes = &cpu_associativity_changes_mask;
1309
1310 for_each_possible_cpu(cpu) {
1311 int i, changed = 0;
1312 u8 *counts = vphn_cpu_change_counts[cpu];
1313 volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
1314
1315 for (i = 0; i < distance_ref_points_depth; i++) {
1316 if (hypervisor_counts[i] != counts[i]) {
1317 counts[i] = hypervisor_counts[i];
1318 changed = 1;
1319 }
1320 }
1321 if (changed) {
1322 cpumask_or(changes, changes, cpu_sibling_mask(cpu));
1323 cpu = cpu_last_thread_sibling(cpu);
1324 }
1325 }
1326
1327 return cpumask_weight(changes);
1328}
1329
1330
1331
1332
1333
1334#define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32) + 1)
1335
1336
1337
1338
1339
1340static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked)
1341{
1342 int i, nr_assoc_doms = 0;
1343 const u16 *field = (const u16*) packed;
1344
1345#define VPHN_FIELD_UNUSED (0xffff)
1346#define VPHN_FIELD_MSB (0x8000)
1347#define VPHN_FIELD_MASK (~VPHN_FIELD_MSB)
1348
1349 for (i = 1; i < VPHN_ASSOC_BUFSIZE; i++) {
1350 if (*field == VPHN_FIELD_UNUSED) {
1351
1352
1353
1354
1355 unpacked[i] = *((u32*)field);
1356 field += 2;
1357 } else if (*field & VPHN_FIELD_MSB) {
1358
1359 unpacked[i] = *field & VPHN_FIELD_MASK;
1360 field++;
1361 nr_assoc_doms++;
1362 } else {
1363
1364
1365
1366 unpacked[i] = *((u32*)field);
1367 field += 2;
1368 nr_assoc_doms++;
1369 }
1370 }
1371
1372
1373 unpacked[0] = nr_assoc_doms;
1374
1375 return nr_assoc_doms;
1376}
1377
1378
1379
1380
1381
1382static long hcall_vphn(unsigned long cpu, unsigned int *associativity)
1383{
1384 long rc;
1385 long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
1386 u64 flags = 1;
1387 int hwcpu = get_hard_smp_processor_id(cpu);
1388
1389 rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, hwcpu);
1390 vphn_unpack_associativity(retbuf, associativity);
1391
1392 return rc;
1393}
1394
1395static long vphn_get_associativity(unsigned long cpu,
1396 unsigned int *associativity)
1397{
1398 long rc;
1399
1400 rc = hcall_vphn(cpu, associativity);
1401
1402 switch (rc) {
1403 case H_FUNCTION:
1404 printk(KERN_INFO
1405 "VPHN is not supported. Disabling polling...\n");
1406 stop_topology_update();
1407 break;
1408 case H_HARDWARE:
1409 printk(KERN_ERR
1410 "hcall_vphn() experienced a hardware fault "
1411 "preventing VPHN. Disabling polling...\n");
1412 stop_topology_update();
1413 }
1414
1415 return rc;
1416}
1417
1418
1419
1420
1421
1422
1423static int update_cpu_topology(void *data)
1424{
1425 struct topology_update_data *update;
1426 unsigned long cpu;
1427
1428 if (!data)
1429 return -EINVAL;
1430
1431 cpu = smp_processor_id();
1432
1433 for (update = data; update; update = update->next) {
1434 if (cpu != update->cpu)
1435 continue;
1436
1437 unmap_cpu_from_node(update->cpu);
1438 map_cpu_to_node(update->cpu, update->new_nid);
1439 vdso_getcpu_init();
1440 }
1441
1442 return 0;
1443}
1444
1445
1446
1447
1448
1449int arch_update_cpu_topology(void)
1450{
1451 unsigned int cpu, sibling, changed = 0;
1452 struct topology_update_data *updates, *ud;
1453 unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
1454 cpumask_t updated_cpus;
1455 struct device *dev;
1456 int weight, new_nid, i = 0;
1457
1458 weight = cpumask_weight(&cpu_associativity_changes_mask);
1459 if (!weight)
1460 return 0;
1461
1462 updates = kzalloc(weight * (sizeof(*updates)), GFP_KERNEL);
1463 if (!updates)
1464 return 0;
1465
1466 cpumask_clear(&updated_cpus);
1467
1468 for_each_cpu(cpu, &cpu_associativity_changes_mask) {
1469
1470
1471
1472
1473
1474 if (!cpumask_subset(cpu_sibling_mask(cpu),
1475 &cpu_associativity_changes_mask)) {
1476 pr_info("Sibling bits not set for associativity "
1477 "change, cpu%d\n", cpu);
1478 cpumask_or(&cpu_associativity_changes_mask,
1479 &cpu_associativity_changes_mask,
1480 cpu_sibling_mask(cpu));
1481 cpu = cpu_last_thread_sibling(cpu);
1482 continue;
1483 }
1484
1485
1486 vphn_get_associativity(cpu, associativity);
1487 new_nid = associativity_to_nid(associativity);
1488 if (new_nid < 0 || !node_online(new_nid))
1489 new_nid = first_online_node;
1490
1491 if (new_nid == numa_cpu_lookup_table[cpu]) {
1492 cpumask_andnot(&cpu_associativity_changes_mask,
1493 &cpu_associativity_changes_mask,
1494 cpu_sibling_mask(cpu));
1495 cpu = cpu_last_thread_sibling(cpu);
1496 continue;
1497 }
1498
1499 for_each_cpu(sibling, cpu_sibling_mask(cpu)) {
1500 ud = &updates[i++];
1501 ud->cpu = sibling;
1502 ud->new_nid = new_nid;
1503 ud->old_nid = numa_cpu_lookup_table[sibling];
1504 cpumask_set_cpu(sibling, &updated_cpus);
1505 if (i < weight)
1506 ud->next = &updates[i];
1507 }
1508 cpu = cpu_last_thread_sibling(cpu);
1509 }
1510
1511 stop_machine(update_cpu_topology, &updates[0], &updated_cpus);
1512
1513 for (ud = &updates[0]; ud; ud = ud->next) {
1514 unregister_cpu_under_node(ud->cpu, ud->old_nid);
1515 register_cpu_under_node(ud->cpu, ud->new_nid);
1516
1517 dev = get_cpu_device(ud->cpu);
1518 if (dev)
1519 kobject_uevent(&dev->kobj, KOBJ_CHANGE);
1520 cpumask_clear_cpu(ud->cpu, &cpu_associativity_changes_mask);
1521 changed = 1;
1522 }
1523
1524 kfree(updates);
1525 return changed;
1526}
1527
1528static void topology_work_fn(struct work_struct *work)
1529{
1530 rebuild_sched_domains();
1531}
1532static DECLARE_WORK(topology_work, topology_work_fn);
1533
1534void topology_schedule_update(void)
1535{
1536 schedule_work(&topology_work);
1537}
1538
1539static void topology_timer_fn(unsigned long ignored)
1540{
1541 if (prrn_enabled && cpumask_weight(&cpu_associativity_changes_mask))
1542 topology_schedule_update();
1543 else if (vphn_enabled) {
1544 if (update_cpu_associativity_changes_mask() > 0)
1545 topology_schedule_update();
1546 reset_topology_timer();
1547 }
1548}
1549static struct timer_list topology_timer =
1550 TIMER_INITIALIZER(topology_timer_fn, 0, 0);
1551
1552static void reset_topology_timer(void)
1553{
1554 topology_timer.data = 0;
1555 topology_timer.expires = jiffies + 60 * HZ;
1556 mod_timer(&topology_timer, topology_timer.expires);
1557}
1558
1559#ifdef CONFIG_SMP
1560
1561static void stage_topology_update(int core_id)
1562{
1563 cpumask_or(&cpu_associativity_changes_mask,
1564 &cpu_associativity_changes_mask, cpu_sibling_mask(core_id));
1565 reset_topology_timer();
1566}
1567
1568static int dt_update_callback(struct notifier_block *nb,
1569 unsigned long action, void *data)
1570{
1571 struct of_prop_reconfig *update;
1572 int rc = NOTIFY_DONE;
1573
1574 switch (action) {
1575 case OF_RECONFIG_UPDATE_PROPERTY:
1576 update = (struct of_prop_reconfig *)data;
1577 if (!of_prop_cmp(update->dn->type, "cpu") &&
1578 !of_prop_cmp(update->prop->name, "ibm,associativity")) {
1579 u32 core_id;
1580 of_property_read_u32(update->dn, "reg", &core_id);
1581 stage_topology_update(core_id);
1582 rc = NOTIFY_OK;
1583 }
1584 break;
1585 }
1586
1587 return rc;
1588}
1589
1590static struct notifier_block dt_update_nb = {
1591 .notifier_call = dt_update_callback,
1592};
1593
1594#endif
1595
1596
1597
1598
1599int start_topology_update(void)
1600{
1601 int rc = 0;
1602
1603 if (firmware_has_feature(FW_FEATURE_PRRN)) {
1604 if (!prrn_enabled) {
1605 prrn_enabled = 1;
1606 vphn_enabled = 0;
1607#ifdef CONFIG_SMP
1608 rc = of_reconfig_notifier_register(&dt_update_nb);
1609#endif
1610 }
1611 } else if (firmware_has_feature(FW_FEATURE_VPHN) &&
1612 get_lppaca()->shared_proc) {
1613 if (!vphn_enabled) {
1614 prrn_enabled = 0;
1615 vphn_enabled = 1;
1616 setup_cpu_associativity_change_counters();
1617 init_timer_deferrable(&topology_timer);
1618 reset_topology_timer();
1619 }
1620 }
1621
1622 return rc;
1623}
1624
1625
1626
1627
1628int stop_topology_update(void)
1629{
1630 int rc = 0;
1631
1632 if (prrn_enabled) {
1633 prrn_enabled = 0;
1634#ifdef CONFIG_SMP
1635 rc = of_reconfig_notifier_unregister(&dt_update_nb);
1636#endif
1637 } else if (vphn_enabled) {
1638 vphn_enabled = 0;
1639 rc = del_timer_sync(&topology_timer);
1640 }
1641
1642 return rc;
1643}
1644
1645int prrn_is_enabled(void)
1646{
1647 return prrn_enabled;
1648}
1649
1650static int topology_read(struct seq_file *file, void *v)
1651{
1652 if (vphn_enabled || prrn_enabled)
1653 seq_puts(file, "on\n");
1654 else
1655 seq_puts(file, "off\n");
1656
1657 return 0;
1658}
1659
1660static int topology_open(struct inode *inode, struct file *file)
1661{
1662 return single_open(file, topology_read, NULL);
1663}
1664
1665static ssize_t topology_write(struct file *file, const char __user *buf,
1666 size_t count, loff_t *off)
1667{
1668 char kbuf[4];
1669 int read_len;
1670
1671 read_len = count < 3 ? count : 3;
1672 if (copy_from_user(kbuf, buf, read_len))
1673 return -EINVAL;
1674
1675 kbuf[read_len] = '\0';
1676
1677 if (!strncmp(kbuf, "on", 2))
1678 start_topology_update();
1679 else if (!strncmp(kbuf, "off", 3))
1680 stop_topology_update();
1681 else
1682 return -EINVAL;
1683
1684 return count;
1685}
1686
1687static const struct file_operations topology_ops = {
1688 .read = seq_read,
1689 .write = topology_write,
1690 .open = topology_open,
1691 .release = single_release
1692};
1693
1694static int topology_update_init(void)
1695{
1696 start_topology_update();
1697 proc_create("powerpc/topology_updates", 644, NULL, &topology_ops);
1698
1699 return 0;
1700}
1701device_initcall(topology_update_init);
1702#endif
1703