1
2#include <linux/kernel.h>
3#include <linux/mm.h>
4#include <linux/string.h>
5#include <linux/init.h>
6#include <linux/bootmem.h>
7#include <linux/memblock.h>
8#include <linux/mmzone.h>
9#include <linux/ctype.h>
10#include <linux/nodemask.h>
11#include <linux/sched.h>
12#include <linux/topology.h>
13
14#include <asm/e820.h>
15#include <asm/proto.h>
16#include <asm/dma.h>
17#include <asm/acpi.h>
18#include <asm/amd_nb.h>
19
20#include "numa_internal.h"
21
22int __initdata numa_off;
23nodemask_t numa_nodes_parsed __initdata;
24
25struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
26EXPORT_SYMBOL(node_data);
27
28static struct numa_meminfo numa_meminfo
29#ifndef CONFIG_MEMORY_HOTPLUG
30__initdata
31#endif
32;
33
34static int numa_distance_cnt;
35static u8 *numa_distance;
36
37static __init int numa_setup(char *opt)
38{
39 if (!opt)
40 return -EINVAL;
41 if (!strncmp(opt, "off", 3))
42 numa_off = 1;
43#ifdef CONFIG_NUMA_EMU
44 if (!strncmp(opt, "fake=", 5))
45 numa_emu_cmdline(opt + 5);
46#endif
47#ifdef CONFIG_ACPI_NUMA
48 if (!strncmp(opt, "noacpi", 6))
49 acpi_numa = -1;
50#endif
51 return 0;
52}
53early_param("numa", numa_setup);
54
55
56
57
58s16 __apicid_to_node[MAX_LOCAL_APIC] = {
59 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
60};
61
62int numa_cpu_node(int cpu)
63{
64 int apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
65
66 if (apicid != BAD_APICID)
67 return __apicid_to_node[apicid];
68 return NUMA_NO_NODE;
69}
70
71cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
72EXPORT_SYMBOL(node_to_cpumask_map);
73
74
75
76
77DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
78EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
79
80void numa_set_node(int cpu, int node)
81{
82 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
83
84
85 if (cpu_to_node_map) {
86 cpu_to_node_map[cpu] = node;
87 return;
88 }
89
90#ifdef CONFIG_DEBUG_PER_CPU_MAPS
91 if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
92 printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
93 dump_stack();
94 return;
95 }
96#endif
97 per_cpu(x86_cpu_to_node_map, cpu) = node;
98
99 set_cpu_numa_node(cpu, node);
100}
101
102void numa_clear_node(int cpu)
103{
104 numa_set_node(cpu, NUMA_NO_NODE);
105}
106
107
108
109
110
111
112
113
114void __init setup_node_to_cpumask_map(void)
115{
116 unsigned int node;
117
118
119 if (nr_node_ids == MAX_NUMNODES)
120 setup_nr_node_ids();
121
122
123 for (node = 0; node < nr_node_ids; node++)
124 alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
125
126
127 pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids);
128}
129
130static int __init numa_add_memblk_to(int nid, u64 start, u64 end,
131 struct numa_meminfo *mi)
132{
133
134 if (start == end)
135 return 0;
136
137
138 if (start > end || nid < 0 || nid >= MAX_NUMNODES) {
139 pr_warning("NUMA: Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n",
140 nid, start, end - 1);
141 return 0;
142 }
143
144 if (mi->nr_blks >= NR_NODE_MEMBLKS) {
145 pr_err("NUMA: too many memblk ranges\n");
146 return -EINVAL;
147 }
148
149 mi->blk[mi->nr_blks].start = start;
150 mi->blk[mi->nr_blks].end = end;
151 mi->blk[mi->nr_blks].nid = nid;
152 mi->nr_blks++;
153 return 0;
154}
155
156
157
158
159
160
161
162
163
164void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi)
165{
166 mi->nr_blks--;
167 memmove(&mi->blk[idx], &mi->blk[idx + 1],
168 (mi->nr_blks - idx) * sizeof(mi->blk[0]));
169}
170
171
172
173
174
175
176
177
178
179
180
181
182int __init numa_add_memblk(int nid, u64 start, u64 end)
183{
184 return numa_add_memblk_to(nid, start, end, &numa_meminfo);
185}
186
187
188static void __init alloc_node_data(int nid)
189{
190 const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
191 u64 nd_pa;
192 void *nd;
193 int tnid;
194
195
196
197
198
199 nd_pa = memblock_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
200 if (!nd_pa) {
201 pr_err("Cannot find %zu bytes in any node\n", nd_size);
202 return;
203 }
204 nd = __va(nd_pa);
205
206
207 printk(KERN_INFO "NODE_DATA(%d) allocated [mem %#010Lx-%#010Lx]\n", nid,
208 nd_pa, nd_pa + nd_size - 1);
209 tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
210 if (tnid != nid)
211 printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nid, tnid);
212
213 node_data[nid] = nd;
214 memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
215
216 node_set_online(nid);
217}
218
219
220
221
222
223
224
225
226
227
228
229int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
230{
231 const u64 low = 0;
232 const u64 high = PFN_PHYS(max_pfn);
233 int i, j, k;
234
235
236 for (i = 0; i < mi->nr_blks; i++) {
237 struct numa_memblk *bi = &mi->blk[i];
238
239
240 bi->start = max(bi->start, low);
241 bi->end = min(bi->end, high);
242
243
244 if (bi->start >= bi->end ||
245 !memblock_overlaps_region(&memblock.memory,
246 bi->start, bi->end - bi->start))
247 numa_remove_memblk_from(i--, mi);
248 }
249
250
251 for (i = 0; i < mi->nr_blks; i++) {
252 struct numa_memblk *bi = &mi->blk[i];
253
254 for (j = i + 1; j < mi->nr_blks; j++) {
255 struct numa_memblk *bj = &mi->blk[j];
256 u64 start, end;
257
258
259
260
261
262
263 if (bi->end > bj->start && bi->start < bj->end) {
264 if (bi->nid != bj->nid) {
265 pr_err("NUMA: node %d [mem %#010Lx-%#010Lx] overlaps with node %d [mem %#010Lx-%#010Lx]\n",
266 bi->nid, bi->start, bi->end - 1,
267 bj->nid, bj->start, bj->end - 1);
268 return -EINVAL;
269 }
270 pr_warning("NUMA: Warning: node %d [mem %#010Lx-%#010Lx] overlaps with itself [mem %#010Lx-%#010Lx]\n",
271 bi->nid, bi->start, bi->end - 1,
272 bj->start, bj->end - 1);
273 }
274
275
276
277
278
279
280 if (bi->nid != bj->nid)
281 continue;
282 start = min(bi->start, bj->start);
283 end = max(bi->end, bj->end);
284 for (k = 0; k < mi->nr_blks; k++) {
285 struct numa_memblk *bk = &mi->blk[k];
286
287 if (bi->nid == bk->nid)
288 continue;
289 if (start < bk->end && end > bk->start)
290 break;
291 }
292 if (k < mi->nr_blks)
293 continue;
294 printk(KERN_INFO "NUMA: Node %d [mem %#010Lx-%#010Lx] + [mem %#010Lx-%#010Lx] -> [mem %#010Lx-%#010Lx]\n",
295 bi->nid, bi->start, bi->end - 1, bj->start,
296 bj->end - 1, start, end - 1);
297 bi->start = start;
298 bi->end = end;
299 numa_remove_memblk_from(j--, mi);
300 }
301 }
302
303
304 for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) {
305 mi->blk[i].start = mi->blk[i].end = 0;
306 mi->blk[i].nid = NUMA_NO_NODE;
307 }
308
309 return 0;
310}
311
312
313
314
315static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask,
316 const struct numa_meminfo *mi)
317{
318 int i;
319
320 for (i = 0; i < ARRAY_SIZE(mi->blk); i++)
321 if (mi->blk[i].start != mi->blk[i].end &&
322 mi->blk[i].nid != NUMA_NO_NODE)
323 node_set(mi->blk[i].nid, *nodemask);
324}
325
326
327
328
329
330
331
332void __init numa_reset_distance(void)
333{
334 size_t size = numa_distance_cnt * numa_distance_cnt * sizeof(numa_distance[0]);
335
336
337 if (numa_distance_cnt)
338 memblock_free(__pa(numa_distance), size);
339 numa_distance_cnt = 0;
340 numa_distance = NULL;
341}
342
343static int __init numa_alloc_distance(void)
344{
345 nodemask_t nodes_parsed;
346 size_t size;
347 int i, j, cnt = 0;
348 u64 phys;
349
350
351 nodes_parsed = numa_nodes_parsed;
352 numa_nodemask_from_meminfo(&nodes_parsed, &numa_meminfo);
353
354 for_each_node_mask(i, nodes_parsed)
355 cnt = i;
356 cnt++;
357 size = cnt * cnt * sizeof(numa_distance[0]);
358
359 phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
360 size, PAGE_SIZE);
361 if (!phys) {
362 pr_warning("NUMA: Warning: can't allocate distance table!\n");
363
364 numa_distance = (void *)1LU;
365 return -ENOMEM;
366 }
367 memblock_reserve(phys, size);
368
369 numa_distance = __va(phys);
370 numa_distance_cnt = cnt;
371
372
373 for (i = 0; i < cnt; i++)
374 for (j = 0; j < cnt; j++)
375 numa_distance[i * cnt + j] = i == j ?
376 LOCAL_DISTANCE : REMOTE_DISTANCE;
377 printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt);
378
379 return 0;
380}
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401void __init numa_set_distance(int from, int to, int distance)
402{
403 if (!numa_distance && numa_alloc_distance() < 0)
404 return;
405
406 if (from >= numa_distance_cnt || to >= numa_distance_cnt ||
407 from < 0 || to < 0) {
408 pr_warn_once("NUMA: Warning: node ids are out of bound, from=%d to=%d distance=%d\n",
409 from, to, distance);
410 return;
411 }
412
413 if ((u8)distance != distance ||
414 (from == to && distance != LOCAL_DISTANCE)) {
415 pr_warn_once("NUMA: Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
416 from, to, distance);
417 return;
418 }
419
420 numa_distance[from * numa_distance_cnt + to] = distance;
421}
422
423int __node_distance(int from, int to)
424{
425 if (from >= numa_distance_cnt || to >= numa_distance_cnt)
426 return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE;
427 return numa_distance[from * numa_distance_cnt + to];
428}
429EXPORT_SYMBOL(__node_distance);
430
431
432
433
434
435static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
436{
437 u64 numaram, e820ram;
438 int i;
439
440 numaram = 0;
441 for (i = 0; i < mi->nr_blks; i++) {
442 u64 s = mi->blk[i].start >> PAGE_SHIFT;
443 u64 e = mi->blk[i].end >> PAGE_SHIFT;
444 numaram += e - s;
445 numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e);
446 if ((s64)numaram < 0)
447 numaram = 0;
448 }
449
450 e820ram = max_pfn - absent_pages_in_range(0, max_pfn);
451
452
453 if ((s64)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) {
454 printk(KERN_ERR "NUMA: nodes only cover %LuMB of your %LuMB e820 RAM. Not used.\n",
455 (numaram << PAGE_SHIFT) >> 20,
456 (e820ram << PAGE_SHIFT) >> 20);
457 return false;
458 }
459 return true;
460}
461
462static void __init numa_clear_kernel_node_hotplug(void)
463{
464 int i, nid;
465 nodemask_t numa_kernel_nodes = NODE_MASK_NONE;
466 unsigned long start, end;
467 struct memblock_region *r;
468
469
470
471
472
473
474 for (i = 0; i < numa_meminfo.nr_blks; i++) {
475 struct numa_memblk *mb = &numa_meminfo.blk[i];
476
477 memblock_set_node(mb->start, mb->end - mb->start,
478 &memblock.reserved, mb->nid);
479 }
480
481
482
483
484
485
486
487
488 for_each_memblock(reserved, r)
489 if (r->nid != MAX_NUMNODES)
490 node_set(r->nid, numa_kernel_nodes);
491
492
493 for (i = 0; i < numa_meminfo.nr_blks; i++) {
494 nid = numa_meminfo.blk[i].nid;
495 if (!node_isset(nid, numa_kernel_nodes))
496 continue;
497
498 start = numa_meminfo.blk[i].start;
499 end = numa_meminfo.blk[i].end;
500
501 memblock_clear_hotplug(start, end - start);
502 }
503}
504
505static int __init numa_register_memblks(struct numa_meminfo *mi)
506{
507 unsigned long uninitialized_var(pfn_align);
508 int i, nid;
509
510
511 node_possible_map = numa_nodes_parsed;
512 numa_nodemask_from_meminfo(&node_possible_map, mi);
513 if (WARN_ON(nodes_empty(node_possible_map)))
514 return -EINVAL;
515
516 for (i = 0; i < mi->nr_blks; i++) {
517 struct numa_memblk *mb = &mi->blk[i];
518 memblock_set_node(mb->start, mb->end - mb->start,
519 &memblock.memory, mb->nid);
520 }
521
522
523
524
525
526
527
528
529 numa_clear_kernel_node_hotplug();
530
531
532
533
534
535#ifdef NODE_NOT_IN_PAGE_FLAGS
536 pfn_align = node_map_pfn_alignment();
537 if (pfn_align && pfn_align < PAGES_PER_SECTION) {
538 printk(KERN_WARNING "Node alignment %LuMB < min %LuMB, rejecting NUMA config\n",
539 PFN_PHYS(pfn_align) >> 20,
540 PFN_PHYS(PAGES_PER_SECTION) >> 20);
541 return -EINVAL;
542 }
543#endif
544 if (!numa_meminfo_cover_memory(mi))
545 return -EINVAL;
546
547
548 for_each_node_mask(nid, node_possible_map) {
549 u64 start = PFN_PHYS(max_pfn);
550 u64 end = 0;
551
552 for (i = 0; i < mi->nr_blks; i++) {
553 if (nid != mi->blk[i].nid)
554 continue;
555 start = min(mi->blk[i].start, start);
556 end = max(mi->blk[i].end, end);
557 }
558
559 if (start >= end)
560 continue;
561
562
563
564
565
566 if (end && (end - start) < NODE_MIN_SIZE)
567 continue;
568
569 alloc_node_data(nid);
570 }
571
572
573 memblock_dump_all();
574 return 0;
575}
576
577
578
579
580
581
582
583
584static void __init numa_init_array(void)
585{
586 int rr, i;
587
588 rr = first_node(node_online_map);
589 for (i = 0; i < nr_cpu_ids; i++) {
590 if (early_cpu_to_node(i) != NUMA_NO_NODE)
591 continue;
592 numa_set_node(i, rr);
593 rr = next_node(rr, node_online_map);
594 if (rr == MAX_NUMNODES)
595 rr = first_node(node_online_map);
596 }
597}
598
599static int __init numa_init(int (*init_func)(void))
600{
601 int i;
602 int ret;
603
604 for (i = 0; i < MAX_LOCAL_APIC; i++)
605 set_apicid_to_node(i, NUMA_NO_NODE);
606
607 nodes_clear(numa_nodes_parsed);
608 nodes_clear(node_possible_map);
609 nodes_clear(node_online_map);
610 memset(&numa_meminfo, 0, sizeof(numa_meminfo));
611 WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.memory,
612 MAX_NUMNODES));
613 WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.reserved,
614 MAX_NUMNODES));
615
616 WARN_ON(memblock_clear_hotplug(0, ULLONG_MAX));
617 numa_reset_distance();
618
619 ret = init_func();
620 if (ret < 0)
621 return ret;
622
623
624
625
626
627
628
629
630
631 memblock_set_bottom_up(false);
632
633 ret = numa_cleanup_meminfo(&numa_meminfo);
634 if (ret < 0)
635 return ret;
636
637 numa_emulation(&numa_meminfo, numa_distance_cnt);
638
639 ret = numa_register_memblks(&numa_meminfo);
640 if (ret < 0)
641 return ret;
642
643 for (i = 0; i < nr_cpu_ids; i++) {
644 int nid = early_cpu_to_node(i);
645
646 if (nid == NUMA_NO_NODE)
647 continue;
648 if (!node_online(nid))
649 numa_clear_node(i);
650 }
651 numa_init_array();
652
653 return 0;
654}
655
656
657
658
659
660
661
662
663
664
665static int __init dummy_numa_init(void)
666{
667 printk(KERN_INFO "%s\n",
668 numa_off ? "NUMA turned off" : "No NUMA configuration found");
669 printk(KERN_INFO "Faking a node at [mem %#018Lx-%#018Lx]\n",
670 0LLU, PFN_PHYS(max_pfn) - 1);
671
672 node_set(0, numa_nodes_parsed);
673 numa_add_memblk(0, 0, PFN_PHYS(max_pfn));
674
675 return 0;
676}
677
678
679
680
681
682
683
684
685void __init x86_numa_init(void)
686{
687 if (!numa_off) {
688#ifdef CONFIG_X86_NUMAQ
689 if (!numa_init(numaq_numa_init))
690 return;
691#endif
692#ifdef CONFIG_ACPI_NUMA
693 if (!numa_init(x86_acpi_numa_init))
694 return;
695#endif
696#ifdef CONFIG_AMD_NUMA
697 if (!numa_init(amd_numa_init))
698 return;
699#endif
700 }
701
702 numa_init(dummy_numa_init);
703}
704
705static void __init init_memory_less_node(int nid)
706{
707 unsigned long zones_size[MAX_NR_ZONES] = {0};
708 unsigned long zholes_size[MAX_NR_ZONES] = {0};
709
710
711 alloc_node_data(nid);
712 free_area_init_node(nid, zones_size, 0, zholes_size);
713
714
715
716
717
718}
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734void __init init_cpu_to_node(void)
735{
736 int cpu;
737 u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
738
739 BUG_ON(cpu_to_apicid == NULL);
740
741 for_each_possible_cpu(cpu) {
742 int node = numa_cpu_node(cpu);
743
744 if (node == NUMA_NO_NODE)
745 continue;
746
747 if (!node_online(node))
748 init_memory_less_node(node);
749
750 numa_set_node(cpu, node);
751 }
752}
753
754#ifndef CONFIG_DEBUG_PER_CPU_MAPS
755
756# ifndef CONFIG_NUMA_EMU
757void numa_add_cpu(int cpu)
758{
759 cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
760}
761
762void numa_remove_cpu(int cpu)
763{
764 cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
765}
766# endif
767
768#else
769
770int __cpu_to_node(int cpu)
771{
772 if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
773 printk(KERN_WARNING
774 "cpu_to_node(%d): usage too early!\n", cpu);
775 dump_stack();
776 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
777 }
778 return per_cpu(x86_cpu_to_node_map, cpu);
779}
780EXPORT_SYMBOL(__cpu_to_node);
781
782
783
784
785
786int early_cpu_to_node(int cpu)
787{
788 if (early_per_cpu_ptr(x86_cpu_to_node_map))
789 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
790
791 if (!cpu_possible(cpu)) {
792 printk(KERN_WARNING
793 "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
794 dump_stack();
795 return NUMA_NO_NODE;
796 }
797 return per_cpu(x86_cpu_to_node_map, cpu);
798}
799
800void debug_cpumask_set_cpu(int cpu, int node, bool enable)
801{
802 struct cpumask *mask;
803
804 if (node == NUMA_NO_NODE) {
805
806 return;
807 }
808 mask = node_to_cpumask_map[node];
809 if (!mask) {
810 pr_err("node_to_cpumask_map[%i] NULL\n", node);
811 dump_stack();
812 return;
813 }
814
815 if (enable)
816 cpumask_set_cpu(cpu, mask);
817 else
818 cpumask_clear_cpu(cpu, mask);
819
820 printk(KERN_DEBUG "%s cpu %d node %d: mask now %pc\n",
821 enable ? "numa_add_cpu" : "numa_remove_cpu",
822 cpu, node, mask);
823 return;
824}
825
826# ifndef CONFIG_NUMA_EMU
827static void numa_set_cpumask(int cpu, bool enable)
828{
829 debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable);
830}
831
832void numa_add_cpu(int cpu)
833{
834 numa_set_cpumask(cpu, true);
835}
836
837void numa_remove_cpu(int cpu)
838{
839 numa_set_cpumask(cpu, false);
840}
841# endif
842
843
844
845
846const struct cpumask *cpumask_of_node(int node)
847{
848 if (node >= nr_node_ids) {
849 printk(KERN_WARNING
850 "cpumask_of_node(%d): node > nr_node_ids(%d)\n",
851 node, nr_node_ids);
852 dump_stack();
853 return cpu_none_mask;
854 }
855 if (node_to_cpumask_map[node] == NULL) {
856 printk(KERN_WARNING
857 "cpumask_of_node(%d): no node_to_cpumask_map!\n",
858 node);
859 dump_stack();
860 return cpu_online_mask;
861 }
862 return node_to_cpumask_map[node];
863}
864EXPORT_SYMBOL(cpumask_of_node);
865
866#endif
867
868#ifdef CONFIG_MEMORY_HOTPLUG
869int memory_add_physaddr_to_nid(u64 start)
870{
871 struct numa_meminfo *mi = &numa_meminfo;
872 int nid = mi->blk[0].nid;
873 int i;
874
875 for (i = 0; i < mi->nr_blks; i++)
876 if (mi->blk[i].start <= start && mi->blk[i].end > start)
877 nid = mi->blk[i].nid;
878 return nid;
879}
880EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
881#endif
882