1
2
3
4
5
6
7
8
9
10
11#include <linux/threads.h>
12#include <linux/bootmem.h>
13#include <linux/init.h>
14#include <linux/mm.h>
15#include <linux/mmzone.h>
16#include <linux/module.h>
17#include <linux/nodemask.h>
18#include <linux/cpu.h>
19#include <linux/notifier.h>
20#include <linux/memblock.h>
21#include <linux/of.h>
22#include <linux/pfn.h>
23#include <linux/cpuset.h>
24#include <linux/node.h>
25#include <asm/sparsemem.h>
26#include <asm/prom.h>
27#include <asm/system.h>
28#include <asm/smp.h>
29#include <asm/firmware.h>
30#include <asm/paca.h>
31#include <asm/hvcall.h>
32
33static int numa_enabled = 1;
34
35static char *cmdline __initdata;
36
37static int numa_debug;
38#define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
39
40int numa_cpu_lookup_table[NR_CPUS];
41cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
42struct pglist_data *node_data[MAX_NUMNODES];
43
44EXPORT_SYMBOL(numa_cpu_lookup_table);
45EXPORT_SYMBOL(node_to_cpumask_map);
46EXPORT_SYMBOL(node_data);
47
48static int min_common_depth;
49static int n_mem_addr_cells, n_mem_size_cells;
50static int form1_affinity;
51
52#define MAX_DISTANCE_REF_POINTS 4
53static int distance_ref_points_depth;
54static const unsigned int *distance_ref_points;
55static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS];
56
57
58
59
60
61
62
63static void __init setup_node_to_cpumask_map(void)
64{
65 unsigned int node, num = 0;
66
67
68 if (nr_node_ids == MAX_NUMNODES) {
69 for_each_node_mask(node, node_possible_map)
70 num = node;
71 nr_node_ids = num + 1;
72 }
73
74
75 for (node = 0; node < nr_node_ids; node++)
76 alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
77
78
79 dbg("Node to cpumask map for %d nodes\n", nr_node_ids);
80}
81
82static int __cpuinit fake_numa_create_new_node(unsigned long end_pfn,
83 unsigned int *nid)
84{
85 unsigned long long mem;
86 char *p = cmdline;
87 static unsigned int fake_nid;
88 static unsigned long long curr_boundary;
89
90
91
92
93
94 if (fake_nid)
95 *nid = fake_nid;
96
97
98
99
100
101 if (!p)
102 return 0;
103
104 mem = memparse(p, &p);
105 if (!mem)
106 return 0;
107
108 if (mem < curr_boundary)
109 return 0;
110
111 curr_boundary = mem;
112
113 if ((end_pfn << PAGE_SHIFT) > mem) {
114
115
116
117 while (*p == ',' || *p == ' ' || *p == '\t')
118 p++;
119
120 cmdline = p;
121 fake_nid++;
122 *nid = fake_nid;
123 dbg("created new fake_node with id %d\n", fake_nid);
124 return 1;
125 }
126 return 0;
127}
128
129
130
131
132
133
134
135
136
137
138
139static int __init get_active_region_work_fn(unsigned long start_pfn,
140 unsigned long end_pfn, void *datax)
141{
142 struct node_active_region *data;
143 data = (struct node_active_region *)datax;
144
145 if (start_pfn <= data->start_pfn && end_pfn > data->start_pfn) {
146 data->start_pfn = start_pfn;
147 data->end_pfn = end_pfn;
148 return 1;
149 }
150 return 0;
151
152}
153
154
155
156
157
158
159
160static void __init get_node_active_region(unsigned long start_pfn,
161 struct node_active_region *node_ar)
162{
163 int nid = early_pfn_to_nid(start_pfn);
164
165 node_ar->nid = nid;
166 node_ar->start_pfn = start_pfn;
167 node_ar->end_pfn = start_pfn;
168 work_with_active_regions(nid, get_active_region_work_fn, node_ar);
169}
170
171static void map_cpu_to_node(int cpu, int node)
172{
173 numa_cpu_lookup_table[cpu] = node;
174
175 dbg("adding cpu %d to node %d\n", cpu, node);
176
177 if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node])))
178 cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
179}
180
181#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR)
182static void unmap_cpu_from_node(unsigned long cpu)
183{
184 int node = numa_cpu_lookup_table[cpu];
185
186 dbg("removing cpu %lu from node %d\n", cpu, node);
187
188 if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
189 cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
190 } else {
191 printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
192 cpu, node);
193 }
194}
195#endif
196
197
198static const int *of_get_associativity(struct device_node *dev)
199{
200 return of_get_property(dev, "ibm,associativity", NULL);
201}
202
203
204
205
206
207
208static const u32 *of_get_usable_memory(struct device_node *memory)
209{
210 const u32 *prop;
211 u32 len;
212 prop = of_get_property(memory, "linux,drconf-usable-memory", &len);
213 if (!prop || len < sizeof(unsigned int))
214 return 0;
215 return prop;
216}
217
218int __node_distance(int a, int b)
219{
220 int i;
221 int distance = LOCAL_DISTANCE;
222
223 if (!form1_affinity)
224 return distance;
225
226 for (i = 0; i < distance_ref_points_depth; i++) {
227 if (distance_lookup_table[a][i] == distance_lookup_table[b][i])
228 break;
229
230
231 distance *= 2;
232 }
233
234 return distance;
235}
236
237static void initialize_distance_lookup_table(int nid,
238 const unsigned int *associativity)
239{
240 int i;
241
242 if (!form1_affinity)
243 return;
244
245 for (i = 0; i < distance_ref_points_depth; i++) {
246 distance_lookup_table[nid][i] =
247 associativity[distance_ref_points[i]];
248 }
249}
250
251
252
253
254static int associativity_to_nid(const unsigned int *associativity)
255{
256 int nid = -1;
257
258 if (min_common_depth == -1)
259 goto out;
260
261 if (associativity[0] >= min_common_depth)
262 nid = associativity[min_common_depth];
263
264
265 if (nid == 0xffff || nid >= MAX_NUMNODES)
266 nid = -1;
267
268 if (nid > 0 && associativity[0] >= distance_ref_points_depth)
269 initialize_distance_lookup_table(nid, associativity);
270
271out:
272 return nid;
273}
274
275
276
277
278static int of_node_to_nid_single(struct device_node *device)
279{
280 int nid = -1;
281 const unsigned int *tmp;
282
283 tmp = of_get_associativity(device);
284 if (tmp)
285 nid = associativity_to_nid(tmp);
286 return nid;
287}
288
289
290int of_node_to_nid(struct device_node *device)
291{
292 struct device_node *tmp;
293 int nid = -1;
294
295 of_node_get(device);
296 while (device) {
297 nid = of_node_to_nid_single(device);
298 if (nid != -1)
299 break;
300
301 tmp = device;
302 device = of_get_parent(tmp);
303 of_node_put(tmp);
304 }
305 of_node_put(device);
306
307 return nid;
308}
309EXPORT_SYMBOL_GPL(of_node_to_nid);
310
311static int __init find_min_common_depth(void)
312{
313 int depth;
314 struct device_node *rtas_root;
315 struct device_node *chosen;
316 const char *vec5;
317
318 rtas_root = of_find_node_by_path("/rtas");
319
320 if (!rtas_root)
321 return -1;
322
323
324
325
326
327
328
329
330
331
332
333
334
335 distance_ref_points = of_get_property(rtas_root,
336 "ibm,associativity-reference-points",
337 &distance_ref_points_depth);
338
339 if (!distance_ref_points) {
340 dbg("NUMA: ibm,associativity-reference-points not found.\n");
341 goto err;
342 }
343
344 distance_ref_points_depth /= sizeof(int);
345
346#define VEC5_AFFINITY_BYTE 5
347#define VEC5_AFFINITY 0x80
348 chosen = of_find_node_by_path("/chosen");
349 if (chosen) {
350 vec5 = of_get_property(chosen, "ibm,architecture-vec-5", NULL);
351 if (vec5 && (vec5[VEC5_AFFINITY_BYTE] & VEC5_AFFINITY)) {
352 dbg("Using form 1 affinity\n");
353 form1_affinity = 1;
354 }
355 }
356
357 if (form1_affinity) {
358 depth = distance_ref_points[0];
359 } else {
360 if (distance_ref_points_depth < 2) {
361 printk(KERN_WARNING "NUMA: "
362 "short ibm,associativity-reference-points\n");
363 goto err;
364 }
365
366 depth = distance_ref_points[1];
367 }
368
369
370
371
372
373 if (distance_ref_points_depth > MAX_DISTANCE_REF_POINTS) {
374 printk(KERN_WARNING "NUMA: distance array capped at "
375 "%d entries\n", MAX_DISTANCE_REF_POINTS);
376 distance_ref_points_depth = MAX_DISTANCE_REF_POINTS;
377 }
378
379 of_node_put(rtas_root);
380 return depth;
381
382err:
383 of_node_put(rtas_root);
384 return -1;
385}
386
387static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
388{
389 struct device_node *memory = NULL;
390
391 memory = of_find_node_by_type(memory, "memory");
392 if (!memory)
393 panic("numa.c: No memory nodes found!");
394
395 *n_addr_cells = of_n_addr_cells(memory);
396 *n_size_cells = of_n_size_cells(memory);
397 of_node_put(memory);
398}
399
400static unsigned long __devinit read_n_cells(int n, const unsigned int **buf)
401{
402 unsigned long result = 0;
403
404 while (n--) {
405 result = (result << 32) | **buf;
406 (*buf)++;
407 }
408 return result;
409}
410
411struct of_drconf_cell {
412 u64 base_addr;
413 u32 drc_index;
414 u32 reserved;
415 u32 aa_index;
416 u32 flags;
417};
418
419#define DRCONF_MEM_ASSIGNED 0x00000008
420#define DRCONF_MEM_AI_INVALID 0x00000040
421#define DRCONF_MEM_RESERVED 0x00000080
422
423
424
425
426
427static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp)
428{
429 const u32 *cp;
430
431 drmem->base_addr = read_n_cells(n_mem_addr_cells, cellp);
432
433 cp = *cellp;
434 drmem->drc_index = cp[0];
435 drmem->reserved = cp[1];
436 drmem->aa_index = cp[2];
437 drmem->flags = cp[3];
438
439 *cellp = cp + 4;
440}
441
442
443
444
445
446
447
448
449static int of_get_drconf_memory(struct device_node *memory, const u32 **dm)
450{
451 const u32 *prop;
452 u32 len, entries;
453
454 prop = of_get_property(memory, "ibm,dynamic-memory", &len);
455 if (!prop || len < sizeof(unsigned int))
456 return 0;
457
458 entries = *prop++;
459
460
461
462
463 if (len < (entries * (n_mem_addr_cells + 4) + 1) * sizeof(unsigned int))
464 return 0;
465
466 *dm = prop;
467 return entries;
468}
469
470
471
472
473
474static u64 of_get_lmb_size(struct device_node *memory)
475{
476 const u32 *prop;
477 u32 len;
478
479 prop = of_get_property(memory, "ibm,lmb-size", &len);
480 if (!prop || len < sizeof(unsigned int))
481 return 0;
482
483 return read_n_cells(n_mem_size_cells, &prop);
484}
485
486struct assoc_arrays {
487 u32 n_arrays;
488 u32 array_sz;
489 const u32 *arrays;
490};
491
492
493
494
495
496
497
498
499
500
501
502static int of_get_assoc_arrays(struct device_node *memory,
503 struct assoc_arrays *aa)
504{
505 const u32 *prop;
506 u32 len;
507
508 prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len);
509 if (!prop || len < 2 * sizeof(unsigned int))
510 return -1;
511
512 aa->n_arrays = *prop++;
513 aa->array_sz = *prop++;
514
515
516
517
518 if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int))
519 return -1;
520
521 aa->arrays = prop;
522 return 0;
523}
524
525
526
527
528
529static int of_drconf_to_nid_single(struct of_drconf_cell *drmem,
530 struct assoc_arrays *aa)
531{
532 int default_nid = 0;
533 int nid = default_nid;
534 int index;
535
536 if (min_common_depth > 0 && min_common_depth <= aa->array_sz &&
537 !(drmem->flags & DRCONF_MEM_AI_INVALID) &&
538 drmem->aa_index < aa->n_arrays) {
539 index = drmem->aa_index * aa->array_sz + min_common_depth - 1;
540 nid = aa->arrays[index];
541
542 if (nid == 0xffff || nid >= MAX_NUMNODES)
543 nid = default_nid;
544 }
545
546 return nid;
547}
548
549
550
551
552
553static int __cpuinit numa_setup_cpu(unsigned long lcpu)
554{
555 int nid = 0;
556 struct device_node *cpu = of_get_cpu_node(lcpu, NULL);
557
558 if (!cpu) {
559 WARN_ON(1);
560 goto out;
561 }
562
563 nid = of_node_to_nid_single(cpu);
564
565 if (nid < 0 || !node_online(nid))
566 nid = first_online_node;
567out:
568 map_cpu_to_node(lcpu, nid);
569
570 of_node_put(cpu);
571
572 return nid;
573}
574
575static int __cpuinit cpu_numa_callback(struct notifier_block *nfb,
576 unsigned long action,
577 void *hcpu)
578{
579 unsigned long lcpu = (unsigned long)hcpu;
580 int ret = NOTIFY_DONE;
581
582 switch (action) {
583 case CPU_UP_PREPARE:
584 case CPU_UP_PREPARE_FROZEN:
585 numa_setup_cpu(lcpu);
586 ret = NOTIFY_OK;
587 break;
588#ifdef CONFIG_HOTPLUG_CPU
589 case CPU_DEAD:
590 case CPU_DEAD_FROZEN:
591 case CPU_UP_CANCELED:
592 case CPU_UP_CANCELED_FROZEN:
593 unmap_cpu_from_node(lcpu);
594 break;
595 ret = NOTIFY_OK;
596#endif
597 }
598 return ret;
599}
600
601
602
603
604
605
606
607
608
609static unsigned long __init numa_enforce_memory_limit(unsigned long start,
610 unsigned long size)
611{
612
613
614
615
616
617
618
619 if (start + size <= memblock_end_of_DRAM())
620 return size;
621
622 if (start >= memblock_end_of_DRAM())
623 return 0;
624
625 return memblock_end_of_DRAM() - start;
626}
627
628
629
630
631
632static inline int __init read_usm_ranges(const u32 **usm)
633{
634
635
636
637
638
639
640 return read_n_cells(n_mem_size_cells, usm);
641}
642
643
644
645
646
647static void __init parse_drconf_memory(struct device_node *memory)
648{
649 const u32 *dm, *usm;
650 unsigned int n, rc, ranges, is_kexec_kdump = 0;
651 unsigned long lmb_size, base, size, sz;
652 int nid;
653 struct assoc_arrays aa;
654
655 n = of_get_drconf_memory(memory, &dm);
656 if (!n)
657 return;
658
659 lmb_size = of_get_lmb_size(memory);
660 if (!lmb_size)
661 return;
662
663 rc = of_get_assoc_arrays(memory, &aa);
664 if (rc)
665 return;
666
667
668 usm = of_get_usable_memory(memory);
669 if (usm != NULL)
670 is_kexec_kdump = 1;
671
672 for (; n != 0; --n) {
673 struct of_drconf_cell drmem;
674
675 read_drconf_cell(&drmem, &dm);
676
677
678
679 if ((drmem.flags & DRCONF_MEM_RESERVED)
680 || !(drmem.flags & DRCONF_MEM_ASSIGNED))
681 continue;
682
683 base = drmem.base_addr;
684 size = lmb_size;
685 ranges = 1;
686
687 if (is_kexec_kdump) {
688 ranges = read_usm_ranges(&usm);
689 if (!ranges)
690 continue;
691 }
692 do {
693 if (is_kexec_kdump) {
694 base = read_n_cells(n_mem_addr_cells, &usm);
695 size = read_n_cells(n_mem_size_cells, &usm);
696 }
697 nid = of_drconf_to_nid_single(&drmem, &aa);
698 fake_numa_create_new_node(
699 ((base + size) >> PAGE_SHIFT),
700 &nid);
701 node_set_online(nid);
702 sz = numa_enforce_memory_limit(base, size);
703 if (sz)
704 add_active_range(nid, base >> PAGE_SHIFT,
705 (base >> PAGE_SHIFT)
706 + (sz >> PAGE_SHIFT));
707 } while (--ranges);
708 }
709}
710
711static int __init parse_numa_properties(void)
712{
713 struct device_node *cpu = NULL;
714 struct device_node *memory = NULL;
715 int default_nid = 0;
716 unsigned long i;
717
718 if (numa_enabled == 0) {
719 printk(KERN_WARNING "NUMA disabled by user\n");
720 return -1;
721 }
722
723 min_common_depth = find_min_common_depth();
724
725 if (min_common_depth < 0)
726 return min_common_depth;
727
728 dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth);
729
730
731
732
733
734
735 for_each_present_cpu(i) {
736 int nid;
737
738 cpu = of_get_cpu_node(i, NULL);
739 BUG_ON(!cpu);
740 nid = of_node_to_nid_single(cpu);
741 of_node_put(cpu);
742
743
744
745
746
747
748 if (nid < 0)
749 continue;
750 node_set_online(nid);
751 }
752
753 get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
754 memory = NULL;
755 while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {
756 unsigned long start;
757 unsigned long size;
758 int nid;
759 int ranges;
760 const unsigned int *memcell_buf;
761 unsigned int len;
762
763 memcell_buf = of_get_property(memory,
764 "linux,usable-memory", &len);
765 if (!memcell_buf || len <= 0)
766 memcell_buf = of_get_property(memory, "reg", &len);
767 if (!memcell_buf || len <= 0)
768 continue;
769
770
771 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
772new_range:
773
774 start = read_n_cells(n_mem_addr_cells, &memcell_buf);
775 size = read_n_cells(n_mem_size_cells, &memcell_buf);
776
777
778
779
780
781
782 nid = of_node_to_nid_single(memory);
783 if (nid < 0)
784 nid = default_nid;
785
786 fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid);
787 node_set_online(nid);
788
789 if (!(size = numa_enforce_memory_limit(start, size))) {
790 if (--ranges)
791 goto new_range;
792 else
793 continue;
794 }
795
796 add_active_range(nid, start >> PAGE_SHIFT,
797 (start >> PAGE_SHIFT) + (size >> PAGE_SHIFT));
798
799 if (--ranges)
800 goto new_range;
801 }
802
803
804
805
806
807 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
808 if (memory)
809 parse_drconf_memory(memory);
810
811 return 0;
812}
813
814static void __init setup_nonnuma(void)
815{
816 unsigned long top_of_ram = memblock_end_of_DRAM();
817 unsigned long total_ram = memblock_phys_mem_size();
818 unsigned long start_pfn, end_pfn;
819 unsigned int nid = 0;
820 struct memblock_region *reg;
821
822 printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
823 top_of_ram, total_ram);
824 printk(KERN_DEBUG "Memory hole size: %ldMB\n",
825 (top_of_ram - total_ram) >> 20);
826
827 for_each_memblock(memory, reg) {
828 start_pfn = memblock_region_memory_base_pfn(reg);
829 end_pfn = memblock_region_memory_end_pfn(reg);
830
831 fake_numa_create_new_node(end_pfn, &nid);
832 add_active_range(nid, start_pfn, end_pfn);
833 node_set_online(nid);
834 }
835}
836
837void __init dump_numa_cpu_topology(void)
838{
839 unsigned int node;
840 unsigned int cpu, count;
841
842 if (min_common_depth == -1 || !numa_enabled)
843 return;
844
845 for_each_online_node(node) {
846 printk(KERN_DEBUG "Node %d CPUs:", node);
847
848 count = 0;
849
850
851
852
853 for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
854 if (cpumask_test_cpu(cpu,
855 node_to_cpumask_map[node])) {
856 if (count == 0)
857 printk(" %u", cpu);
858 ++count;
859 } else {
860 if (count > 1)
861 printk("-%u", cpu - 1);
862 count = 0;
863 }
864 }
865
866 if (count > 1)
867 printk("-%u", nr_cpu_ids - 1);
868 printk("\n");
869 }
870}
871
872static void __init dump_numa_memory_topology(void)
873{
874 unsigned int node;
875 unsigned int count;
876
877 if (min_common_depth == -1 || !numa_enabled)
878 return;
879
880 for_each_online_node(node) {
881 unsigned long i;
882
883 printk(KERN_DEBUG "Node %d Memory:", node);
884
885 count = 0;
886
887 for (i = 0; i < memblock_end_of_DRAM();
888 i += (1 << SECTION_SIZE_BITS)) {
889 if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) {
890 if (count == 0)
891 printk(" 0x%lx", i);
892 ++count;
893 } else {
894 if (count > 0)
895 printk("-0x%lx", i);
896 count = 0;
897 }
898 }
899
900 if (count > 0)
901 printk("-0x%lx", i);
902 printk("\n");
903 }
904}
905
906
907
908
909
910
911
912
913static void __init *careful_zallocation(int nid, unsigned long size,
914 unsigned long align,
915 unsigned long end_pfn)
916{
917 void *ret;
918 int new_nid;
919 unsigned long ret_paddr;
920
921 ret_paddr = __memblock_alloc_base(size, align, end_pfn << PAGE_SHIFT);
922
923
924 if (!ret_paddr)
925 ret_paddr = __memblock_alloc_base(size, align, memblock_end_of_DRAM());
926
927 if (!ret_paddr)
928 panic("numa.c: cannot allocate %lu bytes for node %d",
929 size, nid);
930
931 ret = __va(ret_paddr);
932
933
934
935
936
937
938
939
940
941
942
943
944
945 new_nid = early_pfn_to_nid(ret_paddr >> PAGE_SHIFT);
946 if (new_nid < nid) {
947 ret = __alloc_bootmem_node(NODE_DATA(new_nid),
948 size, align, 0);
949
950 dbg("alloc_bootmem %p %lx\n", ret, size);
951 }
952
953 memset(ret, 0, size);
954 return ret;
955}
956
957static struct notifier_block __cpuinitdata ppc64_numa_nb = {
958 .notifier_call = cpu_numa_callback,
959 .priority = 1
960};
961
962static void mark_reserved_regions_for_nid(int nid)
963{
964 struct pglist_data *node = NODE_DATA(nid);
965 struct memblock_region *reg;
966
967 for_each_memblock(reserved, reg) {
968 unsigned long physbase = reg->base;
969 unsigned long size = reg->size;
970 unsigned long start_pfn = physbase >> PAGE_SHIFT;
971 unsigned long end_pfn = PFN_UP(physbase + size);
972 struct node_active_region node_ar;
973 unsigned long node_end_pfn = node->node_start_pfn +
974 node->node_spanned_pages;
975
976
977
978
979
980
981
982
983 if (end_pfn <= node->node_start_pfn ||
984 start_pfn >= node_end_pfn)
985 continue;
986
987 get_node_active_region(start_pfn, &node_ar);
988 while (start_pfn < end_pfn &&
989 node_ar.start_pfn < node_ar.end_pfn) {
990 unsigned long reserve_size = size;
991
992
993
994
995 if (end_pfn > node_ar.end_pfn)
996 reserve_size = (node_ar.end_pfn << PAGE_SHIFT)
997 - physbase;
998
999
1000
1001
1002 if (node_ar.nid == nid) {
1003 dbg("reserve_bootmem %lx %lx nid=%d\n",
1004 physbase, reserve_size, node_ar.nid);
1005 reserve_bootmem_node(NODE_DATA(node_ar.nid),
1006 physbase, reserve_size,
1007 BOOTMEM_DEFAULT);
1008 }
1009
1010
1011
1012
1013 if (end_pfn <= node_ar.end_pfn)
1014 break;
1015
1016
1017
1018
1019
1020
1021 start_pfn = node_ar.end_pfn;
1022 physbase = start_pfn << PAGE_SHIFT;
1023 size = size - reserve_size;
1024 get_node_active_region(start_pfn, &node_ar);
1025 }
1026 }
1027}
1028
1029
1030void __init do_init_bootmem(void)
1031{
1032 int nid;
1033
1034 min_low_pfn = 0;
1035 max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
1036 max_pfn = max_low_pfn;
1037
1038 if (parse_numa_properties())
1039 setup_nonnuma();
1040 else
1041 dump_numa_memory_topology();
1042
1043 for_each_online_node(nid) {
1044 unsigned long start_pfn, end_pfn;
1045 void *bootmem_vaddr;
1046 unsigned long bootmap_pages;
1047
1048 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
1049
1050
1051
1052
1053
1054
1055
1056
1057 NODE_DATA(nid) = careful_zallocation(nid,
1058 sizeof(struct pglist_data),
1059 SMP_CACHE_BYTES, end_pfn);
1060
1061 dbg("node %d\n", nid);
1062 dbg("NODE_DATA() = %p\n", NODE_DATA(nid));
1063
1064 NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
1065 NODE_DATA(nid)->node_start_pfn = start_pfn;
1066 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
1067
1068 if (NODE_DATA(nid)->node_spanned_pages == 0)
1069 continue;
1070
1071 dbg("start_paddr = %lx\n", start_pfn << PAGE_SHIFT);
1072 dbg("end_paddr = %lx\n", end_pfn << PAGE_SHIFT);
1073
1074 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
1075 bootmem_vaddr = careful_zallocation(nid,
1076 bootmap_pages << PAGE_SHIFT,
1077 PAGE_SIZE, end_pfn);
1078
1079 dbg("bootmap_vaddr = %p\n", bootmem_vaddr);
1080
1081 init_bootmem_node(NODE_DATA(nid),
1082 __pa(bootmem_vaddr) >> PAGE_SHIFT,
1083 start_pfn, end_pfn);
1084
1085 free_bootmem_with_active_regions(nid, end_pfn);
1086
1087
1088
1089
1090
1091 mark_reserved_regions_for_nid(nid);
1092 sparse_memory_present_with_active_regions(nid);
1093 }
1094
1095 init_bootmem_done = 1;
1096
1097
1098
1099
1100
1101 setup_node_to_cpumask_map();
1102
1103 register_cpu_notifier(&ppc64_numa_nb);
1104 cpu_numa_callback(&ppc64_numa_nb, CPU_UP_PREPARE,
1105 (void *)(unsigned long)boot_cpuid);
1106}
1107
1108void __init paging_init(void)
1109{
1110 unsigned long max_zone_pfns[MAX_NR_ZONES];
1111 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
1112 max_zone_pfns[ZONE_DMA] = memblock_end_of_DRAM() >> PAGE_SHIFT;
1113 free_area_init_nodes(max_zone_pfns);
1114}
1115
1116static int __init early_numa(char *p)
1117{
1118 if (!p)
1119 return 0;
1120
1121 if (strstr(p, "off"))
1122 numa_enabled = 0;
1123
1124 if (strstr(p, "debug"))
1125 numa_debug = 1;
1126
1127 p = strstr(p, "fake=");
1128 if (p)
1129 cmdline = p + strlen("fake=");
1130
1131 return 0;
1132}
1133early_param("numa", early_numa);
1134
1135#ifdef CONFIG_MEMORY_HOTPLUG
1136
1137
1138
1139
1140
1141static int hot_add_drconf_scn_to_nid(struct device_node *memory,
1142 unsigned long scn_addr)
1143{
1144 const u32 *dm;
1145 unsigned int drconf_cell_cnt, rc;
1146 unsigned long lmb_size;
1147 struct assoc_arrays aa;
1148 int nid = -1;
1149
1150 drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
1151 if (!drconf_cell_cnt)
1152 return -1;
1153
1154 lmb_size = of_get_lmb_size(memory);
1155 if (!lmb_size)
1156 return -1;
1157
1158 rc = of_get_assoc_arrays(memory, &aa);
1159 if (rc)
1160 return -1;
1161
1162 for (; drconf_cell_cnt != 0; --drconf_cell_cnt) {
1163 struct of_drconf_cell drmem;
1164
1165 read_drconf_cell(&drmem, &dm);
1166
1167
1168
1169 if ((drmem.flags & DRCONF_MEM_RESERVED)
1170 || !(drmem.flags & DRCONF_MEM_ASSIGNED))
1171 continue;
1172
1173 if ((scn_addr < drmem.base_addr)
1174 || (scn_addr >= (drmem.base_addr + lmb_size)))
1175 continue;
1176
1177 nid = of_drconf_to_nid_single(&drmem, &aa);
1178 break;
1179 }
1180
1181 return nid;
1182}
1183
1184
1185
1186
1187
1188
1189int hot_add_node_scn_to_nid(unsigned long scn_addr)
1190{
1191 struct device_node *memory = NULL;
1192 int nid = -1;
1193
1194 while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {
1195 unsigned long start, size;
1196 int ranges;
1197 const unsigned int *memcell_buf;
1198 unsigned int len;
1199
1200 memcell_buf = of_get_property(memory, "reg", &len);
1201 if (!memcell_buf || len <= 0)
1202 continue;
1203
1204
1205 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
1206
1207 while (ranges--) {
1208 start = read_n_cells(n_mem_addr_cells, &memcell_buf);
1209 size = read_n_cells(n_mem_size_cells, &memcell_buf);
1210
1211 if ((scn_addr < start) || (scn_addr >= (start + size)))
1212 continue;
1213
1214 nid = of_node_to_nid_single(memory);
1215 break;
1216 }
1217
1218 of_node_put(memory);
1219 if (nid >= 0)
1220 break;
1221 }
1222
1223 return nid;
1224}
1225
1226
1227
1228
1229
1230
1231int hot_add_scn_to_nid(unsigned long scn_addr)
1232{
1233 struct device_node *memory = NULL;
1234 int nid, found = 0;
1235
1236 if (!numa_enabled || (min_common_depth < 0))
1237 return first_online_node;
1238
1239 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1240 if (memory) {
1241 nid = hot_add_drconf_scn_to_nid(memory, scn_addr);
1242 of_node_put(memory);
1243 } else {
1244 nid = hot_add_node_scn_to_nid(scn_addr);
1245 }
1246
1247 if (nid < 0 || !node_online(nid))
1248 nid = first_online_node;
1249
1250 if (NODE_DATA(nid)->node_spanned_pages)
1251 return nid;
1252
1253 for_each_online_node(nid) {
1254 if (NODE_DATA(nid)->node_spanned_pages) {
1255 found = 1;
1256 break;
1257 }
1258 }
1259
1260 BUG_ON(!found);
1261 return nid;
1262}
1263
1264static u64 hot_add_drconf_memory_max(void)
1265{
1266 struct device_node *memory = NULL;
1267 unsigned int drconf_cell_cnt = 0;
1268 u64 lmb_size = 0;
1269 const u32 *dm = 0;
1270
1271 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1272 if (memory) {
1273 drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
1274 lmb_size = of_get_lmb_size(memory);
1275 of_node_put(memory);
1276 }
1277 return lmb_size * drconf_cell_cnt;
1278}
1279
1280
1281
1282
1283
1284
1285
1286u64 memory_hotplug_max(void)
1287{
1288 return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM());
1289}
1290#endif
1291
1292
1293#ifdef CONFIG_PPC_SPLPAR
1294static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS];
1295static cpumask_t cpu_associativity_changes_mask;
1296static int vphn_enabled;
1297static void set_topology_timer(void);
1298
1299
1300
1301
1302
1303static void setup_cpu_associativity_change_counters(void)
1304{
1305 int cpu;
1306
1307
1308 BUILD_BUG_ON(MAX_DISTANCE_REF_POINTS > 8);
1309
1310 for_each_possible_cpu(cpu) {
1311 int i;
1312 u8 *counts = vphn_cpu_change_counts[cpu];
1313 volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
1314
1315 for (i = 0; i < distance_ref_points_depth; i++)
1316 counts[i] = hypervisor_counts[i];
1317 }
1318}
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331static int update_cpu_associativity_changes_mask(void)
1332{
1333 int cpu, nr_cpus = 0;
1334 cpumask_t *changes = &cpu_associativity_changes_mask;
1335
1336 cpumask_clear(changes);
1337
1338 for_each_possible_cpu(cpu) {
1339 int i, changed = 0;
1340 u8 *counts = vphn_cpu_change_counts[cpu];
1341 volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
1342
1343 for (i = 0; i < distance_ref_points_depth; i++) {
1344 if (hypervisor_counts[i] != counts[i]) {
1345 counts[i] = hypervisor_counts[i];
1346 changed = 1;
1347 }
1348 }
1349 if (changed) {
1350 cpumask_set_cpu(cpu, changes);
1351 nr_cpus++;
1352 }
1353 }
1354
1355 return nr_cpus;
1356}
1357
1358
1359
1360
1361
1362#define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32) + 1)
1363
1364
1365
1366
1367
1368static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked)
1369{
1370 int i, nr_assoc_doms = 0;
1371 const u16 *field = (const u16*) packed;
1372
1373#define VPHN_FIELD_UNUSED (0xffff)
1374#define VPHN_FIELD_MSB (0x8000)
1375#define VPHN_FIELD_MASK (~VPHN_FIELD_MSB)
1376
1377 for (i = 1; i < VPHN_ASSOC_BUFSIZE; i++) {
1378 if (*field == VPHN_FIELD_UNUSED) {
1379
1380
1381
1382
1383 unpacked[i] = *((u32*)field);
1384 field += 2;
1385 } else if (*field & VPHN_FIELD_MSB) {
1386
1387 unpacked[i] = *field & VPHN_FIELD_MASK;
1388 field++;
1389 nr_assoc_doms++;
1390 } else {
1391
1392
1393
1394 unpacked[i] = *((u32*)field);
1395 field += 2;
1396 nr_assoc_doms++;
1397 }
1398 }
1399
1400
1401 unpacked[0] = nr_assoc_doms;
1402
1403 return nr_assoc_doms;
1404}
1405
1406
1407
1408
1409
1410static long hcall_vphn(unsigned long cpu, unsigned int *associativity)
1411{
1412 long rc;
1413 long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
1414 u64 flags = 1;
1415 int hwcpu = get_hard_smp_processor_id(cpu);
1416
1417 rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, hwcpu);
1418 vphn_unpack_associativity(retbuf, associativity);
1419
1420 return rc;
1421}
1422
1423static long vphn_get_associativity(unsigned long cpu,
1424 unsigned int *associativity)
1425{
1426 long rc;
1427
1428 rc = hcall_vphn(cpu, associativity);
1429
1430 switch (rc) {
1431 case H_FUNCTION:
1432 printk(KERN_INFO
1433 "VPHN is not supported. Disabling polling...\n");
1434 stop_topology_update();
1435 break;
1436 case H_HARDWARE:
1437 printk(KERN_ERR
1438 "hcall_vphn() experienced a hardware fault "
1439 "preventing VPHN. Disabling polling...\n");
1440 stop_topology_update();
1441 }
1442
1443 return rc;
1444}
1445
1446
1447
1448
1449
1450int arch_update_cpu_topology(void)
1451{
1452 int cpu, nid, old_nid;
1453 unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
1454 struct sys_device *sysdev;
1455
1456 for_each_cpu_mask(cpu, cpu_associativity_changes_mask) {
1457 vphn_get_associativity(cpu, associativity);
1458 nid = associativity_to_nid(associativity);
1459
1460 if (nid < 0 || !node_online(nid))
1461 nid = first_online_node;
1462
1463 old_nid = numa_cpu_lookup_table[cpu];
1464
1465
1466
1467
1468 get_online_cpus();
1469 unregister_cpu_under_node(cpu, old_nid);
1470 unmap_cpu_from_node(cpu);
1471 map_cpu_to_node(cpu, nid);
1472 register_cpu_under_node(cpu, nid);
1473 put_online_cpus();
1474
1475 sysdev = get_cpu_sysdev(cpu);
1476 if (sysdev)
1477 kobject_uevent(&sysdev->kobj, KOBJ_CHANGE);
1478 }
1479
1480 return 1;
1481}
1482
1483static void topology_work_fn(struct work_struct *work)
1484{
1485 rebuild_sched_domains();
1486}
1487static DECLARE_WORK(topology_work, topology_work_fn);
1488
1489void topology_schedule_update(void)
1490{
1491 schedule_work(&topology_work);
1492}
1493
1494static void topology_timer_fn(unsigned long ignored)
1495{
1496 if (!vphn_enabled)
1497 return;
1498 if (update_cpu_associativity_changes_mask() > 0)
1499 topology_schedule_update();
1500 set_topology_timer();
1501}
1502static struct timer_list topology_timer =
1503 TIMER_INITIALIZER(topology_timer_fn, 0, 0);
1504
1505static void set_topology_timer(void)
1506{
1507 topology_timer.data = 0;
1508 topology_timer.expires = jiffies + 60 * HZ;
1509 add_timer(&topology_timer);
1510}
1511
1512
1513
1514
1515int start_topology_update(void)
1516{
1517 int rc = 0;
1518
1519
1520 if (0 && firmware_has_feature(FW_FEATURE_VPHN) &&
1521 get_lppaca()->shared_proc) {
1522 vphn_enabled = 1;
1523 setup_cpu_associativity_change_counters();
1524 init_timer_deferrable(&topology_timer);
1525 set_topology_timer();
1526 rc = 1;
1527 }
1528
1529 return rc;
1530}
1531__initcall(start_topology_update);
1532
1533
1534
1535
1536int stop_topology_update(void)
1537{
1538 vphn_enabled = 0;
1539 return del_timer_sync(&topology_timer);
1540}
1541#endif
1542