1
2
3#include <linux/acpi.h>
4#include <linux/kernel.h>
5#include <linux/mm.h>
6#include <linux/string.h>
7#include <linux/init.h>
8#include <linux/memblock.h>
9#include <linux/mmzone.h>
10#include <linux/ctype.h>
11#include <linux/nodemask.h>
12#include <linux/sched.h>
13#include <linux/topology.h>
14
15#include <asm/e820/api.h>
16#include <asm/proto.h>
17#include <asm/dma.h>
18#include <asm/amd_nb.h>
19
20#include "numa_internal.h"
21
22int numa_off;
23nodemask_t numa_nodes_parsed __initdata;
24
25struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
26EXPORT_SYMBOL(node_data);
27
28static struct numa_meminfo numa_meminfo __initdata_or_meminfo;
29static struct numa_meminfo numa_reserved_meminfo __initdata_or_meminfo;
30
31static int numa_distance_cnt;
32static u8 *numa_distance;
33
34static __init int numa_setup(char *opt)
35{
36 if (!opt)
37 return -EINVAL;
38 if (!strncmp(opt, "off", 3))
39 numa_off = 1;
40 if (!strncmp(opt, "fake=", 5))
41 return numa_emu_cmdline(opt + 5);
42 if (!strncmp(opt, "noacpi", 6))
43 disable_srat();
44 if (!strncmp(opt, "nohmat", 6))
45 disable_hmat();
46 return 0;
47}
48early_param("numa", numa_setup);
49
50
51
52
53s16 __apicid_to_node[MAX_LOCAL_APIC] = {
54 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
55};
56
57int numa_cpu_node(int cpu)
58{
59 int apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
60
61 if (apicid != BAD_APICID)
62 return __apicid_to_node[apicid];
63 return NUMA_NO_NODE;
64}
65
66cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
67EXPORT_SYMBOL(node_to_cpumask_map);
68
69
70
71
72DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
73EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
74
75void numa_set_node(int cpu, int node)
76{
77 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
78
79
80 if (cpu_to_node_map) {
81 cpu_to_node_map[cpu] = node;
82 return;
83 }
84
85#ifdef CONFIG_DEBUG_PER_CPU_MAPS
86 if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
87 printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
88 dump_stack();
89 return;
90 }
91#endif
92 per_cpu(x86_cpu_to_node_map, cpu) = node;
93
94 set_cpu_numa_node(cpu, node);
95}
96
97void numa_clear_node(int cpu)
98{
99 numa_set_node(cpu, NUMA_NO_NODE);
100}
101
102
103
104
105
106
107
108
109void __init setup_node_to_cpumask_map(void)
110{
111 unsigned int node;
112
113
114 if (nr_node_ids == MAX_NUMNODES)
115 setup_nr_node_ids();
116
117
118 for (node = 0; node < nr_node_ids; node++)
119 alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
120
121
122 pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids);
123}
124
125static int __init numa_add_memblk_to(int nid, u64 start, u64 end,
126 struct numa_meminfo *mi)
127{
128
129 if (start == end)
130 return 0;
131
132
133 if (start > end || nid < 0 || nid >= MAX_NUMNODES) {
134 pr_warn("Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n",
135 nid, start, end - 1);
136 return 0;
137 }
138
139 if (mi->nr_blks >= NR_NODE_MEMBLKS) {
140 pr_err("too many memblk ranges\n");
141 return -EINVAL;
142 }
143
144 mi->blk[mi->nr_blks].start = start;
145 mi->blk[mi->nr_blks].end = end;
146 mi->blk[mi->nr_blks].nid = nid;
147 mi->nr_blks++;
148 return 0;
149}
150
151
152
153
154
155
156
157
158
159void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi)
160{
161 mi->nr_blks--;
162 memmove(&mi->blk[idx], &mi->blk[idx + 1],
163 (mi->nr_blks - idx) * sizeof(mi->blk[0]));
164}
165
166
167
168
169
170
171
172static void __init numa_move_tail_memblk(struct numa_meminfo *dst, int idx,
173 struct numa_meminfo *src)
174{
175 dst->blk[dst->nr_blks++] = src->blk[idx];
176 numa_remove_memblk_from(idx, src);
177}
178
179
180
181
182
183
184
185
186
187
188
189
190int __init numa_add_memblk(int nid, u64 start, u64 end)
191{
192 return numa_add_memblk_to(nid, start, end, &numa_meminfo);
193}
194
195
196static void __init alloc_node_data(int nid)
197{
198 const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
199 u64 nd_pa;
200 void *nd;
201 int tnid;
202
203
204
205
206
207 nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
208 if (!nd_pa) {
209 pr_err("Cannot find %zu bytes in any node (initial node: %d)\n",
210 nd_size, nid);
211 return;
212 }
213 nd = __va(nd_pa);
214
215
216 printk(KERN_INFO "NODE_DATA(%d) allocated [mem %#010Lx-%#010Lx]\n", nid,
217 nd_pa, nd_pa + nd_size - 1);
218 tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
219 if (tnid != nid)
220 printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nid, tnid);
221
222 node_data[nid] = nd;
223 memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
224
225 node_set_online(nid);
226}
227
228
229
230
231
232
233
234
235
236
237
238int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
239{
240 const u64 low = 0;
241 const u64 high = PFN_PHYS(max_pfn);
242 int i, j, k;
243
244
245 for (i = 0; i < mi->nr_blks; i++) {
246 struct numa_memblk *bi = &mi->blk[i];
247
248
249 if (!memblock_overlaps_region(&memblock.memory,
250 bi->start, bi->end - bi->start)) {
251 numa_move_tail_memblk(&numa_reserved_meminfo, i--, mi);
252 continue;
253 }
254
255
256 bi->start = max(bi->start, low);
257
258
259 if (bi->end > high) {
260 numa_add_memblk_to(bi->nid, high, bi->end,
261 &numa_reserved_meminfo);
262 bi->end = high;
263 }
264
265
266 if (bi->start >= bi->end)
267 numa_remove_memblk_from(i--, mi);
268 }
269
270
271 for (i = 0; i < mi->nr_blks; i++) {
272 struct numa_memblk *bi = &mi->blk[i];
273
274 for (j = i + 1; j < mi->nr_blks; j++) {
275 struct numa_memblk *bj = &mi->blk[j];
276 u64 start, end;
277
278
279
280
281
282
283 if (bi->end > bj->start && bi->start < bj->end) {
284 if (bi->nid != bj->nid) {
285 pr_err("node %d [mem %#010Lx-%#010Lx] overlaps with node %d [mem %#010Lx-%#010Lx]\n",
286 bi->nid, bi->start, bi->end - 1,
287 bj->nid, bj->start, bj->end - 1);
288 return -EINVAL;
289 }
290 pr_warn("Warning: node %d [mem %#010Lx-%#010Lx] overlaps with itself [mem %#010Lx-%#010Lx]\n",
291 bi->nid, bi->start, bi->end - 1,
292 bj->start, bj->end - 1);
293 }
294
295
296
297
298
299
300 if (bi->nid != bj->nid)
301 continue;
302 start = min(bi->start, bj->start);
303 end = max(bi->end, bj->end);
304 for (k = 0; k < mi->nr_blks; k++) {
305 struct numa_memblk *bk = &mi->blk[k];
306
307 if (bi->nid == bk->nid)
308 continue;
309 if (start < bk->end && end > bk->start)
310 break;
311 }
312 if (k < mi->nr_blks)
313 continue;
314 printk(KERN_INFO "NUMA: Node %d [mem %#010Lx-%#010Lx] + [mem %#010Lx-%#010Lx] -> [mem %#010Lx-%#010Lx]\n",
315 bi->nid, bi->start, bi->end - 1, bj->start,
316 bj->end - 1, start, end - 1);
317 bi->start = start;
318 bi->end = end;
319 numa_remove_memblk_from(j--, mi);
320 }
321 }
322
323
324 for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) {
325 mi->blk[i].start = mi->blk[i].end = 0;
326 mi->blk[i].nid = NUMA_NO_NODE;
327 }
328
329 return 0;
330}
331
332
333
334
335static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask,
336 const struct numa_meminfo *mi)
337{
338 int i;
339
340 for (i = 0; i < ARRAY_SIZE(mi->blk); i++)
341 if (mi->blk[i].start != mi->blk[i].end &&
342 mi->blk[i].nid != NUMA_NO_NODE)
343 node_set(mi->blk[i].nid, *nodemask);
344}
345
346
347
348
349
350
351
352void __init numa_reset_distance(void)
353{
354 size_t size = numa_distance_cnt * numa_distance_cnt * sizeof(numa_distance[0]);
355
356
357 if (numa_distance_cnt)
358 memblock_free_ptr(numa_distance, size);
359 numa_distance_cnt = 0;
360 numa_distance = NULL;
361}
362
363static int __init numa_alloc_distance(void)
364{
365 nodemask_t nodes_parsed;
366 size_t size;
367 int i, j, cnt = 0;
368 u64 phys;
369
370
371 nodes_parsed = numa_nodes_parsed;
372 numa_nodemask_from_meminfo(&nodes_parsed, &numa_meminfo);
373
374 for_each_node_mask(i, nodes_parsed)
375 cnt = i;
376 cnt++;
377 size = cnt * cnt * sizeof(numa_distance[0]);
378
379 phys = memblock_phys_alloc_range(size, PAGE_SIZE, 0,
380 PFN_PHYS(max_pfn_mapped));
381 if (!phys) {
382 pr_warn("Warning: can't allocate distance table!\n");
383
384 numa_distance = (void *)1LU;
385 return -ENOMEM;
386 }
387
388 numa_distance = __va(phys);
389 numa_distance_cnt = cnt;
390
391
392 for (i = 0; i < cnt; i++)
393 for (j = 0; j < cnt; j++)
394 numa_distance[i * cnt + j] = i == j ?
395 LOCAL_DISTANCE : REMOTE_DISTANCE;
396 printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt);
397
398 return 0;
399}
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420void __init numa_set_distance(int from, int to, int distance)
421{
422 if (!numa_distance && numa_alloc_distance() < 0)
423 return;
424
425 if (from >= numa_distance_cnt || to >= numa_distance_cnt ||
426 from < 0 || to < 0) {
427 pr_warn_once("Warning: node ids are out of bound, from=%d to=%d distance=%d\n",
428 from, to, distance);
429 return;
430 }
431
432 if ((u8)distance != distance ||
433 (from == to && distance != LOCAL_DISTANCE)) {
434 pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
435 from, to, distance);
436 return;
437 }
438
439 numa_distance[from * numa_distance_cnt + to] = distance;
440}
441
442int __node_distance(int from, int to)
443{
444 if (from >= numa_distance_cnt || to >= numa_distance_cnt)
445 return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE;
446 return numa_distance[from * numa_distance_cnt + to];
447}
448EXPORT_SYMBOL(__node_distance);
449
450
451
452
453
454static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
455{
456 u64 numaram, e820ram;
457 int i;
458
459 numaram = 0;
460 for (i = 0; i < mi->nr_blks; i++) {
461 u64 s = mi->blk[i].start >> PAGE_SHIFT;
462 u64 e = mi->blk[i].end >> PAGE_SHIFT;
463 numaram += e - s;
464 numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e);
465 if ((s64)numaram < 0)
466 numaram = 0;
467 }
468
469 e820ram = max_pfn - absent_pages_in_range(0, max_pfn);
470
471
472 if ((s64)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) {
473 printk(KERN_ERR "NUMA: nodes only cover %LuMB of your %LuMB e820 RAM. Not used.\n",
474 (numaram << PAGE_SHIFT) >> 20,
475 (e820ram << PAGE_SHIFT) >> 20);
476 return false;
477 }
478 return true;
479}
480
481
482
483
484
485static void __init numa_clear_kernel_node_hotplug(void)
486{
487 nodemask_t reserved_nodemask = NODE_MASK_NONE;
488 struct memblock_region *mb_region;
489 int i;
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505 for (i = 0; i < numa_meminfo.nr_blks; i++) {
506 struct numa_memblk *mb = numa_meminfo.blk + i;
507 int ret;
508
509 ret = memblock_set_node(mb->start, mb->end - mb->start, &memblock.reserved, mb->nid);
510 WARN_ON_ONCE(ret);
511 }
512
513
514
515
516
517
518
519
520
521
522 for_each_reserved_mem_region(mb_region) {
523 int nid = memblock_get_region_node(mb_region);
524
525 if (nid != MAX_NUMNODES)
526 node_set(nid, reserved_nodemask);
527 }
528
529
530
531
532
533
534
535
536
537 for (i = 0; i < numa_meminfo.nr_blks; i++) {
538 struct numa_memblk *mb = numa_meminfo.blk + i;
539
540 if (!node_isset(mb->nid, reserved_nodemask))
541 continue;
542
543 memblock_clear_hotplug(mb->start, mb->end - mb->start);
544 }
545}
546
547static int __init numa_register_memblks(struct numa_meminfo *mi)
548{
549 int i, nid;
550
551
552 node_possible_map = numa_nodes_parsed;
553 numa_nodemask_from_meminfo(&node_possible_map, mi);
554 if (WARN_ON(nodes_empty(node_possible_map)))
555 return -EINVAL;
556
557 for (i = 0; i < mi->nr_blks; i++) {
558 struct numa_memblk *mb = &mi->blk[i];
559 memblock_set_node(mb->start, mb->end - mb->start,
560 &memblock.memory, mb->nid);
561 }
562
563
564
565
566
567
568
569
570 numa_clear_kernel_node_hotplug();
571
572
573
574
575
576 if (IS_ENABLED(NODE_NOT_IN_PAGE_FLAGS)) {
577 unsigned long pfn_align = node_map_pfn_alignment();
578
579 if (pfn_align && pfn_align < PAGES_PER_SECTION) {
580 pr_warn("Node alignment %LuMB < min %LuMB, rejecting NUMA config\n",
581 PFN_PHYS(pfn_align) >> 20,
582 PFN_PHYS(PAGES_PER_SECTION) >> 20);
583 return -EINVAL;
584 }
585 }
586 if (!numa_meminfo_cover_memory(mi))
587 return -EINVAL;
588
589
590 for_each_node_mask(nid, node_possible_map) {
591 u64 start = PFN_PHYS(max_pfn);
592 u64 end = 0;
593
594 for (i = 0; i < mi->nr_blks; i++) {
595 if (nid != mi->blk[i].nid)
596 continue;
597 start = min(mi->blk[i].start, start);
598 end = max(mi->blk[i].end, end);
599 }
600
601 if (start >= end)
602 continue;
603
604
605
606
607
608 if (end && (end - start) < NODE_MIN_SIZE)
609 continue;
610
611 alloc_node_data(nid);
612 }
613
614
615 memblock_dump_all();
616 return 0;
617}
618
619
620
621
622
623
624
625
626static void __init numa_init_array(void)
627{
628 int rr, i;
629
630 rr = first_node(node_online_map);
631 for (i = 0; i < nr_cpu_ids; i++) {
632 if (early_cpu_to_node(i) != NUMA_NO_NODE)
633 continue;
634 numa_set_node(i, rr);
635 rr = next_node_in(rr, node_online_map);
636 }
637}
638
639static int __init numa_init(int (*init_func)(void))
640{
641 int i;
642 int ret;
643
644 for (i = 0; i < MAX_LOCAL_APIC; i++)
645 set_apicid_to_node(i, NUMA_NO_NODE);
646
647 nodes_clear(numa_nodes_parsed);
648 nodes_clear(node_possible_map);
649 nodes_clear(node_online_map);
650 memset(&numa_meminfo, 0, sizeof(numa_meminfo));
651 WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.memory,
652 MAX_NUMNODES));
653 WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.reserved,
654 MAX_NUMNODES));
655
656 WARN_ON(memblock_clear_hotplug(0, ULLONG_MAX));
657 numa_reset_distance();
658
659 ret = init_func();
660 if (ret < 0)
661 return ret;
662
663
664
665
666
667
668
669
670
671 memblock_set_bottom_up(false);
672
673 ret = numa_cleanup_meminfo(&numa_meminfo);
674 if (ret < 0)
675 return ret;
676
677 numa_emulation(&numa_meminfo, numa_distance_cnt);
678
679 ret = numa_register_memblks(&numa_meminfo);
680 if (ret < 0)
681 return ret;
682
683 for (i = 0; i < nr_cpu_ids; i++) {
684 int nid = early_cpu_to_node(i);
685
686 if (nid == NUMA_NO_NODE)
687 continue;
688 if (!node_online(nid))
689 numa_clear_node(i);
690 }
691 numa_init_array();
692
693 return 0;
694}
695
696
697
698
699
700
701
702
703
704
705static int __init dummy_numa_init(void)
706{
707 printk(KERN_INFO "%s\n",
708 numa_off ? "NUMA turned off" : "No NUMA configuration found");
709 printk(KERN_INFO "Faking a node at [mem %#018Lx-%#018Lx]\n",
710 0LLU, PFN_PHYS(max_pfn) - 1);
711
712 node_set(0, numa_nodes_parsed);
713 numa_add_memblk(0, 0, PFN_PHYS(max_pfn));
714
715 return 0;
716}
717
718
719
720
721
722
723
724
725void __init x86_numa_init(void)
726{
727 if (!numa_off) {
728#ifdef CONFIG_ACPI_NUMA
729 if (!numa_init(x86_acpi_numa_init))
730 return;
731#endif
732#ifdef CONFIG_AMD_NUMA
733 if (!numa_init(amd_numa_init))
734 return;
735#endif
736 }
737
738 numa_init(dummy_numa_init);
739}
740
741static void __init init_memory_less_node(int nid)
742{
743
744 alloc_node_data(nid);
745 free_area_init_memoryless_node(nid);
746
747
748
749
750
751}
752
753
754
755
756
757
758
759
760
761
762
763
764
765void __init init_gi_nodes(void)
766{
767 int nid;
768
769 for_each_node_state(nid, N_GENERIC_INITIATOR)
770 if (!node_online(nid))
771 init_memory_less_node(nid);
772}
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788void __init init_cpu_to_node(void)
789{
790 int cpu;
791 u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
792
793 BUG_ON(cpu_to_apicid == NULL);
794
795 for_each_possible_cpu(cpu) {
796 int node = numa_cpu_node(cpu);
797
798 if (node == NUMA_NO_NODE)
799 continue;
800
801 if (!node_online(node))
802 init_memory_less_node(node);
803
804 numa_set_node(cpu, node);
805 }
806}
807
808#ifndef CONFIG_DEBUG_PER_CPU_MAPS
809
810# ifndef CONFIG_NUMA_EMU
811void numa_add_cpu(int cpu)
812{
813 cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
814}
815
816void numa_remove_cpu(int cpu)
817{
818 cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
819}
820# endif
821
822#else
823
824int __cpu_to_node(int cpu)
825{
826 if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
827 printk(KERN_WARNING
828 "cpu_to_node(%d): usage too early!\n", cpu);
829 dump_stack();
830 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
831 }
832 return per_cpu(x86_cpu_to_node_map, cpu);
833}
834EXPORT_SYMBOL(__cpu_to_node);
835
836
837
838
839
840int early_cpu_to_node(int cpu)
841{
842 if (early_per_cpu_ptr(x86_cpu_to_node_map))
843 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
844
845 if (!cpu_possible(cpu)) {
846 printk(KERN_WARNING
847 "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
848 dump_stack();
849 return NUMA_NO_NODE;
850 }
851 return per_cpu(x86_cpu_to_node_map, cpu);
852}
853
854void debug_cpumask_set_cpu(int cpu, int node, bool enable)
855{
856 struct cpumask *mask;
857
858 if (node == NUMA_NO_NODE) {
859
860 return;
861 }
862 mask = node_to_cpumask_map[node];
863 if (!mask) {
864 pr_err("node_to_cpumask_map[%i] NULL\n", node);
865 dump_stack();
866 return;
867 }
868
869 if (enable)
870 cpumask_set_cpu(cpu, mask);
871 else
872 cpumask_clear_cpu(cpu, mask);
873
874 printk(KERN_DEBUG "%s cpu %d node %d: mask now %*pbl\n",
875 enable ? "numa_add_cpu" : "numa_remove_cpu",
876 cpu, node, cpumask_pr_args(mask));
877 return;
878}
879
880# ifndef CONFIG_NUMA_EMU
881static void numa_set_cpumask(int cpu, bool enable)
882{
883 debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable);
884}
885
886void numa_add_cpu(int cpu)
887{
888 numa_set_cpumask(cpu, true);
889}
890
891void numa_remove_cpu(int cpu)
892{
893 numa_set_cpumask(cpu, false);
894}
895# endif
896
897
898
899
900const struct cpumask *cpumask_of_node(int node)
901{
902 if ((unsigned)node >= nr_node_ids) {
903 printk(KERN_WARNING
904 "cpumask_of_node(%d): (unsigned)node >= nr_node_ids(%u)\n",
905 node, nr_node_ids);
906 dump_stack();
907 return cpu_none_mask;
908 }
909 if (node_to_cpumask_map[node] == NULL) {
910 printk(KERN_WARNING
911 "cpumask_of_node(%d): no node_to_cpumask_map!\n",
912 node);
913 dump_stack();
914 return cpu_online_mask;
915 }
916 return node_to_cpumask_map[node];
917}
918EXPORT_SYMBOL(cpumask_of_node);
919
920#endif
921
922#ifdef CONFIG_NUMA_KEEP_MEMINFO
923static int meminfo_to_nid(struct numa_meminfo *mi, u64 start)
924{
925 int i;
926
927 for (i = 0; i < mi->nr_blks; i++)
928 if (mi->blk[i].start <= start && mi->blk[i].end > start)
929 return mi->blk[i].nid;
930 return NUMA_NO_NODE;
931}
932
933int phys_to_target_node(phys_addr_t start)
934{
935 int nid = meminfo_to_nid(&numa_meminfo, start);
936
937
938
939
940
941 if (nid != NUMA_NO_NODE)
942 return nid;
943
944 return meminfo_to_nid(&numa_reserved_meminfo, start);
945}
946EXPORT_SYMBOL_GPL(phys_to_target_node);
947
948int memory_add_physaddr_to_nid(u64 start)
949{
950 int nid = meminfo_to_nid(&numa_meminfo, start);
951
952 if (nid == NUMA_NO_NODE)
953 nid = numa_meminfo.blk[0].nid;
954 return nid;
955}
956EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
957#endif
958