linux/arch/x86/mm/numa.c
<<
>>
Prefs
   1/* Common code for 32 and 64-bit NUMA */
   2#include <linux/kernel.h>
   3#include <linux/mm.h>
   4#include <linux/string.h>
   5#include <linux/init.h>
   6#include <linux/bootmem.h>
   7#include <linux/memblock.h>
   8#include <linux/mmzone.h>
   9#include <linux/ctype.h>
  10#include <linux/module.h>
  11#include <linux/nodemask.h>
  12#include <linux/sched.h>
  13#include <linux/topology.h>
  14
  15#include <asm/e820.h>
  16#include <asm/proto.h>
  17#include <asm/dma.h>
  18#include <asm/acpi.h>
  19#include <asm/amd_nb.h>
  20
  21#include "numa_internal.h"
  22
  23int __initdata numa_off;
  24nodemask_t numa_nodes_parsed __initdata;
  25
  26struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
  27EXPORT_SYMBOL(node_data);
  28
  29static struct numa_meminfo numa_meminfo
  30#ifndef CONFIG_MEMORY_HOTPLUG
  31__initdata
  32#endif
  33;
  34
  35static int numa_distance_cnt;
  36static u8 *numa_distance;
  37
  38static __init int numa_setup(char *opt)
  39{
  40        if (!opt)
  41                return -EINVAL;
  42        if (!strncmp(opt, "off", 3))
  43                numa_off = 1;
  44#ifdef CONFIG_NUMA_EMU
  45        if (!strncmp(opt, "fake=", 5))
  46                numa_emu_cmdline(opt + 5);
  47#endif
  48#ifdef CONFIG_ACPI_NUMA
  49        if (!strncmp(opt, "noacpi", 6))
  50                acpi_numa = -1;
  51#endif
  52        return 0;
  53}
  54early_param("numa", numa_setup);
  55
  56/*
  57 * apicid, cpu, node mappings
  58 */
  59s16 __apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = {
  60        [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
  61};
  62
  63int __cpuinit numa_cpu_node(int cpu)
  64{
  65        int apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
  66
  67        if (apicid != BAD_APICID)
  68                return __apicid_to_node[apicid];
  69        return NUMA_NO_NODE;
  70}
  71
  72cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
  73EXPORT_SYMBOL(node_to_cpumask_map);
  74
  75/*
  76 * Map cpu index to node index
  77 */
  78DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
  79EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
  80
  81void __cpuinit numa_set_node(int cpu, int node)
  82{
  83        int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
  84
  85        /* early setting, no percpu area yet */
  86        if (cpu_to_node_map) {
  87                cpu_to_node_map[cpu] = node;
  88                return;
  89        }
  90
  91#ifdef CONFIG_DEBUG_PER_CPU_MAPS
  92        if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
  93                printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
  94                dump_stack();
  95                return;
  96        }
  97#endif
  98        per_cpu(x86_cpu_to_node_map, cpu) = node;
  99
 100        if (node != NUMA_NO_NODE)
 101                set_cpu_numa_node(cpu, node);
 102}
 103
 104void __cpuinit numa_clear_node(int cpu)
 105{
 106        numa_set_node(cpu, NUMA_NO_NODE);
 107}
 108
 109/*
 110 * Allocate node_to_cpumask_map based on number of available nodes
 111 * Requires node_possible_map to be valid.
 112 *
 113 * Note: node_to_cpumask() is not valid until after this is done.
 114 * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
 115 */
 116void __init setup_node_to_cpumask_map(void)
 117{
 118        unsigned int node, num = 0;
 119
 120        /* setup nr_node_ids if not done yet */
 121        if (nr_node_ids == MAX_NUMNODES) {
 122                for_each_node_mask(node, node_possible_map)
 123                        num = node;
 124                nr_node_ids = num + 1;
 125        }
 126
 127        /* allocate the map */
 128        for (node = 0; node < nr_node_ids; node++)
 129                alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
 130
 131        /* cpumask_of_node() will now work */
 132        pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids);
 133}
 134
 135static int __init numa_add_memblk_to(int nid, u64 start, u64 end,
 136                                     struct numa_meminfo *mi)
 137{
 138        /* ignore zero length blks */
 139        if (start == end)
 140                return 0;
 141
 142        /* whine about and ignore invalid blks */
 143        if (start > end || nid < 0 || nid >= MAX_NUMNODES) {
 144                pr_warning("NUMA: Warning: invalid memblk node %d (%Lx-%Lx)\n",
 145                           nid, start, end);
 146                return 0;
 147        }
 148
 149        if (mi->nr_blks >= NR_NODE_MEMBLKS) {
 150                pr_err("NUMA: too many memblk ranges\n");
 151                return -EINVAL;
 152        }
 153
 154        mi->blk[mi->nr_blks].start = start;
 155        mi->blk[mi->nr_blks].end = end;
 156        mi->blk[mi->nr_blks].nid = nid;
 157        mi->nr_blks++;
 158        return 0;
 159}
 160
 161/**
 162 * numa_remove_memblk_from - Remove one numa_memblk from a numa_meminfo
 163 * @idx: Index of memblk to remove
 164 * @mi: numa_meminfo to remove memblk from
 165 *
 166 * Remove @idx'th numa_memblk from @mi by shifting @mi->blk[] and
 167 * decrementing @mi->nr_blks.
 168 */
 169void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi)
 170{
 171        mi->nr_blks--;
 172        memmove(&mi->blk[idx], &mi->blk[idx + 1],
 173                (mi->nr_blks - idx) * sizeof(mi->blk[0]));
 174}
 175
 176/**
 177 * numa_add_memblk - Add one numa_memblk to numa_meminfo
 178 * @nid: NUMA node ID of the new memblk
 179 * @start: Start address of the new memblk
 180 * @end: End address of the new memblk
 181 *
 182 * Add a new memblk to the default numa_meminfo.
 183 *
 184 * RETURNS:
 185 * 0 on success, -errno on failure.
 186 */
 187int __init numa_add_memblk(int nid, u64 start, u64 end)
 188{
 189        return numa_add_memblk_to(nid, start, end, &numa_meminfo);
 190}
 191
 192/* Initialize NODE_DATA for a node on the local memory */
 193static void __init setup_node_data(int nid, u64 start, u64 end)
 194{
 195        const u64 nd_low = PFN_PHYS(MAX_DMA_PFN);
 196        const u64 nd_high = PFN_PHYS(max_pfn_mapped);
 197        const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
 198        bool remapped = false;
 199        u64 nd_pa;
 200        void *nd;
 201        int tnid;
 202
 203        /*
 204         * Don't confuse VM with a node that doesn't have the
 205         * minimum amount of memory:
 206         */
 207        if (end && (end - start) < NODE_MIN_SIZE)
 208                return;
 209
 210        /* initialize remap allocator before aligning to ZONE_ALIGN */
 211        init_alloc_remap(nid, start, end);
 212
 213        start = roundup(start, ZONE_ALIGN);
 214
 215        printk(KERN_INFO "Initmem setup node %d %016Lx-%016Lx\n",
 216               nid, start, end);
 217
 218        /*
 219         * Allocate node data.  Try remap allocator first, node-local
 220         * memory and then any node.  Never allocate in DMA zone.
 221         */
 222        nd = alloc_remap(nid, nd_size);
 223        if (nd) {
 224                nd_pa = __pa(nd);
 225                remapped = true;
 226        } else {
 227                nd_pa = memblock_x86_find_in_range_node(nid, nd_low, nd_high,
 228                                                nd_size, SMP_CACHE_BYTES);
 229                if (nd_pa == MEMBLOCK_ERROR)
 230                        nd_pa = memblock_find_in_range(nd_low, nd_high,
 231                                                nd_size, SMP_CACHE_BYTES);
 232                if (nd_pa == MEMBLOCK_ERROR) {
 233                        pr_err("Cannot find %zu bytes in node %d\n",
 234                               nd_size, nid);
 235                        return;
 236                }
 237                memblock_x86_reserve_range(nd_pa, nd_pa + nd_size, "NODE_DATA");
 238                nd = __va(nd_pa);
 239        }
 240
 241        /* report and initialize */
 242        printk(KERN_INFO "  NODE_DATA [%016Lx - %016Lx]%s\n",
 243               nd_pa, nd_pa + nd_size - 1, remapped ? " (remapped)" : "");
 244        tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
 245        if (!remapped && tnid != nid)
 246                printk(KERN_INFO "    NODE_DATA(%d) on node %d\n", nid, tnid);
 247
 248        node_data[nid] = nd;
 249        memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
 250        NODE_DATA(nid)->node_id = nid;
 251        NODE_DATA(nid)->node_start_pfn = start >> PAGE_SHIFT;
 252        NODE_DATA(nid)->node_spanned_pages = (end - start) >> PAGE_SHIFT;
 253
 254        node_set_online(nid);
 255}
 256
 257/**
 258 * numa_cleanup_meminfo - Cleanup a numa_meminfo
 259 * @mi: numa_meminfo to clean up
 260 *
 261 * Sanitize @mi by merging and removing unncessary memblks.  Also check for
 262 * conflicts and clear unused memblks.
 263 *
 264 * RETURNS:
 265 * 0 on success, -errno on failure.
 266 */
 267int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
 268{
 269        const u64 low = 0;
 270        const u64 high = PFN_PHYS(max_pfn);
 271        int i, j, k;
 272
 273        /* first, trim all entries */
 274        for (i = 0; i < mi->nr_blks; i++) {
 275                struct numa_memblk *bi = &mi->blk[i];
 276
 277                /* make sure all blocks are inside the limits */
 278                bi->start = max(bi->start, low);
 279                bi->end = min(bi->end, high);
 280
 281                /* and there's no empty block */
 282                if (bi->start >= bi->end)
 283                        numa_remove_memblk_from(i--, mi);
 284        }
 285
 286        /* merge neighboring / overlapping entries */
 287        for (i = 0; i < mi->nr_blks; i++) {
 288                struct numa_memblk *bi = &mi->blk[i];
 289
 290                for (j = i + 1; j < mi->nr_blks; j++) {
 291                        struct numa_memblk *bj = &mi->blk[j];
 292                        u64 start, end;
 293
 294                        /*
 295                         * See whether there are overlapping blocks.  Whine
 296                         * about but allow overlaps of the same nid.  They
 297                         * will be merged below.
 298                         */
 299                        if (bi->end > bj->start && bi->start < bj->end) {
 300                                if (bi->nid != bj->nid) {
 301                                        pr_err("NUMA: node %d (%Lx-%Lx) overlaps with node %d (%Lx-%Lx)\n",
 302                                               bi->nid, bi->start, bi->end,
 303                                               bj->nid, bj->start, bj->end);
 304                                        return -EINVAL;
 305                                }
 306                                pr_warning("NUMA: Warning: node %d (%Lx-%Lx) overlaps with itself (%Lx-%Lx)\n",
 307                                           bi->nid, bi->start, bi->end,
 308                                           bj->start, bj->end);
 309                        }
 310
 311                        /*
 312                         * Join together blocks on the same node, holes
 313                         * between which don't overlap with memory on other
 314                         * nodes.
 315                         */
 316                        if (bi->nid != bj->nid)
 317                                continue;
 318                        start = min(bi->start, bj->start);
 319                        end = max(bi->end, bj->end);
 320                        for (k = 0; k < mi->nr_blks; k++) {
 321                                struct numa_memblk *bk = &mi->blk[k];
 322
 323                                if (bi->nid == bk->nid)
 324                                        continue;
 325                                if (start < bk->end && end > bk->start)
 326                                        break;
 327                        }
 328                        if (k < mi->nr_blks)
 329                                continue;
 330                        printk(KERN_INFO "NUMA: Node %d [%Lx,%Lx) + [%Lx,%Lx) -> [%Lx,%Lx)\n",
 331                               bi->nid, bi->start, bi->end, bj->start, bj->end,
 332                               start, end);
 333                        bi->start = start;
 334                        bi->end = end;
 335                        numa_remove_memblk_from(j--, mi);
 336                }
 337        }
 338
 339        /* clear unused ones */
 340        for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) {
 341                mi->blk[i].start = mi->blk[i].end = 0;
 342                mi->blk[i].nid = NUMA_NO_NODE;
 343        }
 344
 345        return 0;
 346}
 347
 348/*
 349 * Set nodes, which have memory in @mi, in *@nodemask.
 350 */
 351static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask,
 352                                              const struct numa_meminfo *mi)
 353{
 354        int i;
 355
 356        for (i = 0; i < ARRAY_SIZE(mi->blk); i++)
 357                if (mi->blk[i].start != mi->blk[i].end &&
 358                    mi->blk[i].nid != NUMA_NO_NODE)
 359                        node_set(mi->blk[i].nid, *nodemask);
 360}
 361
 362/**
 363 * numa_reset_distance - Reset NUMA distance table
 364 *
 365 * The current table is freed.  The next numa_set_distance() call will
 366 * create a new one.
 367 */
 368void __init numa_reset_distance(void)
 369{
 370        size_t size = numa_distance_cnt * numa_distance_cnt * sizeof(numa_distance[0]);
 371
 372        /* numa_distance could be 1LU marking allocation failure, test cnt */
 373        if (numa_distance_cnt)
 374                memblock_x86_free_range(__pa(numa_distance),
 375                                        __pa(numa_distance) + size);
 376        numa_distance_cnt = 0;
 377        numa_distance = NULL;   /* enable table creation */
 378}
 379
 380static int __init numa_alloc_distance(void)
 381{
 382        nodemask_t nodes_parsed;
 383        size_t size;
 384        int i, j, cnt = 0;
 385        u64 phys;
 386
 387        /* size the new table and allocate it */
 388        nodes_parsed = numa_nodes_parsed;
 389        numa_nodemask_from_meminfo(&nodes_parsed, &numa_meminfo);
 390
 391        for_each_node_mask(i, nodes_parsed)
 392                cnt = i;
 393        cnt++;
 394        size = cnt * cnt * sizeof(numa_distance[0]);
 395
 396        phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
 397                                      size, PAGE_SIZE);
 398        if (phys == MEMBLOCK_ERROR) {
 399                pr_warning("NUMA: Warning: can't allocate distance table!\n");
 400                /* don't retry until explicitly reset */
 401                numa_distance = (void *)1LU;
 402                return -ENOMEM;
 403        }
 404        memblock_x86_reserve_range(phys, phys + size, "NUMA DIST");
 405
 406        numa_distance = __va(phys);
 407        numa_distance_cnt = cnt;
 408
 409        /* fill with the default distances */
 410        for (i = 0; i < cnt; i++)
 411                for (j = 0; j < cnt; j++)
 412                        numa_distance[i * cnt + j] = i == j ?
 413                                LOCAL_DISTANCE : REMOTE_DISTANCE;
 414        printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt);
 415
 416        return 0;
 417}
 418
 419/**
 420 * numa_set_distance - Set NUMA distance from one NUMA to another
 421 * @from: the 'from' node to set distance
 422 * @to: the 'to'  node to set distance
 423 * @distance: NUMA distance
 424 *
 425 * Set the distance from node @from to @to to @distance.  If distance table
 426 * doesn't exist, one which is large enough to accommodate all the currently
 427 * known nodes will be created.
 428 *
 429 * If such table cannot be allocated, a warning is printed and further
 430 * calls are ignored until the distance table is reset with
 431 * numa_reset_distance().
 432 *
 433 * If @from or @to is higher than the highest known node at the time of
 434 * table creation or @distance doesn't make sense, the call is ignored.
 435 * This is to allow simplification of specific NUMA config implementations.
 436 */
 437void __init numa_set_distance(int from, int to, int distance)
 438{
 439        if (!numa_distance && numa_alloc_distance() < 0)
 440                return;
 441
 442        if (from >= numa_distance_cnt || to >= numa_distance_cnt) {
 443                printk_once(KERN_DEBUG "NUMA: Debug: distance out of bound, from=%d to=%d distance=%d\n",
 444                            from, to, distance);
 445                return;
 446        }
 447
 448        if ((u8)distance != distance ||
 449            (from == to && distance != LOCAL_DISTANCE)) {
 450                pr_warn_once("NUMA: Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
 451                             from, to, distance);
 452                return;
 453        }
 454
 455        numa_distance[from * numa_distance_cnt + to] = distance;
 456}
 457
 458int __node_distance(int from, int to)
 459{
 460        if (from >= numa_distance_cnt || to >= numa_distance_cnt)
 461                return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE;
 462        return numa_distance[from * numa_distance_cnt + to];
 463}
 464EXPORT_SYMBOL(__node_distance);
 465
 466/*
 467 * Sanity check to catch more bad NUMA configurations (they are amazingly
 468 * common).  Make sure the nodes cover all memory.
 469 */
 470static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
 471{
 472        u64 numaram, e820ram;
 473        int i;
 474
 475        numaram = 0;
 476        for (i = 0; i < mi->nr_blks; i++) {
 477                u64 s = mi->blk[i].start >> PAGE_SHIFT;
 478                u64 e = mi->blk[i].end >> PAGE_SHIFT;
 479                numaram += e - s;
 480                numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e);
 481                if ((s64)numaram < 0)
 482                        numaram = 0;
 483        }
 484
 485        e820ram = max_pfn - (memblock_x86_hole_size(0,
 486                                        PFN_PHYS(max_pfn)) >> PAGE_SHIFT);
 487        /* We seem to lose 3 pages somewhere. Allow 1M of slack. */
 488        if ((s64)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) {
 489                printk(KERN_ERR "NUMA: nodes only cover %LuMB of your %LuMB e820 RAM. Not used.\n",
 490                       (numaram << PAGE_SHIFT) >> 20,
 491                       (e820ram << PAGE_SHIFT) >> 20);
 492                return false;
 493        }
 494        return true;
 495}
 496
 497static int __init numa_register_memblks(struct numa_meminfo *mi)
 498{
 499        unsigned long uninitialized_var(pfn_align);
 500        int i, nid;
 501
 502        /* Account for nodes with cpus and no memory */
 503        node_possible_map = numa_nodes_parsed;
 504        numa_nodemask_from_meminfo(&node_possible_map, mi);
 505        if (WARN_ON(nodes_empty(node_possible_map)))
 506                return -EINVAL;
 507
 508        for (i = 0; i < mi->nr_blks; i++)
 509                memblock_x86_register_active_regions(mi->blk[i].nid,
 510                                        mi->blk[i].start >> PAGE_SHIFT,
 511                                        mi->blk[i].end >> PAGE_SHIFT);
 512
 513        /* for out of order entries */
 514        sort_node_map();
 515
 516        /*
 517         * If sections array is gonna be used for pfn -> nid mapping, check
 518         * whether its granularity is fine enough.
 519         */
 520#ifdef NODE_NOT_IN_PAGE_FLAGS
 521        pfn_align = node_map_pfn_alignment();
 522        if (pfn_align && pfn_align < PAGES_PER_SECTION) {
 523                printk(KERN_WARNING "Node alignment %LuMB < min %LuMB, rejecting NUMA config\n",
 524                       PFN_PHYS(pfn_align) >> 20,
 525                       PFN_PHYS(PAGES_PER_SECTION) >> 20);
 526                return -EINVAL;
 527        }
 528#endif
 529        if (!numa_meminfo_cover_memory(mi))
 530                return -EINVAL;
 531
 532        /* Finally register nodes. */
 533        for_each_node_mask(nid, node_possible_map) {
 534                u64 start = PFN_PHYS(max_pfn);
 535                u64 end = 0;
 536
 537                for (i = 0; i < mi->nr_blks; i++) {
 538                        if (nid != mi->blk[i].nid)
 539                                continue;
 540                        start = min(mi->blk[i].start, start);
 541                        end = max(mi->blk[i].end, end);
 542                }
 543
 544                if (start < end)
 545                        setup_node_data(nid, start, end);
 546        }
 547
 548        return 0;
 549}
 550
 551/*
 552 * There are unfortunately some poorly designed mainboards around that
 553 * only connect memory to a single CPU. This breaks the 1:1 cpu->node
 554 * mapping. To avoid this fill in the mapping for all possible CPUs,
 555 * as the number of CPUs is not known yet. We round robin the existing
 556 * nodes.
 557 */
 558static void __init numa_init_array(void)
 559{
 560        int rr, i;
 561
 562        rr = first_node(node_online_map);
 563        for (i = 0; i < nr_cpu_ids; i++) {
 564                if (early_cpu_to_node(i) != NUMA_NO_NODE)
 565                        continue;
 566                numa_set_node(i, rr);
 567                rr = next_node(rr, node_online_map);
 568                if (rr == MAX_NUMNODES)
 569                        rr = first_node(node_online_map);
 570        }
 571}
 572
 573static int __init numa_init(int (*init_func)(void))
 574{
 575        int i;
 576        int ret;
 577
 578        for (i = 0; i < MAX_LOCAL_APIC; i++)
 579                set_apicid_to_node(i, NUMA_NO_NODE);
 580
 581        nodes_clear(numa_nodes_parsed);
 582        nodes_clear(node_possible_map);
 583        nodes_clear(node_online_map);
 584        memset(&numa_meminfo, 0, sizeof(numa_meminfo));
 585        remove_all_active_ranges();
 586        numa_reset_distance();
 587
 588        ret = init_func();
 589        if (ret < 0)
 590                return ret;
 591        ret = numa_cleanup_meminfo(&numa_meminfo);
 592        if (ret < 0)
 593                return ret;
 594
 595        numa_emulation(&numa_meminfo, numa_distance_cnt);
 596
 597        ret = numa_register_memblks(&numa_meminfo);
 598        if (ret < 0)
 599                return ret;
 600
 601        for (i = 0; i < nr_cpu_ids; i++) {
 602                int nid = early_cpu_to_node(i);
 603
 604                if (nid == NUMA_NO_NODE)
 605                        continue;
 606                if (!node_online(nid))
 607                        numa_clear_node(i);
 608        }
 609        numa_init_array();
 610        return 0;
 611}
 612
 613/**
 614 * dummy_numa_init - Fallback dummy NUMA init
 615 *
 616 * Used if there's no underlying NUMA architecture, NUMA initialization
 617 * fails, or NUMA is disabled on the command line.
 618 *
 619 * Must online at least one node and add memory blocks that cover all
 620 * allowed memory.  This function must not fail.
 621 */
 622static int __init dummy_numa_init(void)
 623{
 624        printk(KERN_INFO "%s\n",
 625               numa_off ? "NUMA turned off" : "No NUMA configuration found");
 626        printk(KERN_INFO "Faking a node at %016Lx-%016Lx\n",
 627               0LLU, PFN_PHYS(max_pfn));
 628
 629        node_set(0, numa_nodes_parsed);
 630        numa_add_memblk(0, 0, PFN_PHYS(max_pfn));
 631
 632        return 0;
 633}
 634
 635/**
 636 * x86_numa_init - Initialize NUMA
 637 *
 638 * Try each configured NUMA initialization method until one succeeds.  The
 639 * last fallback is dummy single node config encomapssing whole memory and
 640 * never fails.
 641 */
 642void __init x86_numa_init(void)
 643{
 644        if (!numa_off) {
 645#ifdef CONFIG_X86_NUMAQ
 646                if (!numa_init(numaq_numa_init))
 647                        return;
 648#endif
 649#ifdef CONFIG_ACPI_NUMA
 650                if (!numa_init(x86_acpi_numa_init))
 651                        return;
 652#endif
 653#ifdef CONFIG_AMD_NUMA
 654                if (!numa_init(amd_numa_init))
 655                        return;
 656#endif
 657        }
 658
 659        numa_init(dummy_numa_init);
 660}
 661
 662static __init int find_near_online_node(int node)
 663{
 664        int n, val;
 665        int min_val = INT_MAX;
 666        int best_node = -1;
 667
 668        for_each_online_node(n) {
 669                val = node_distance(node, n);
 670
 671                if (val < min_val) {
 672                        min_val = val;
 673                        best_node = n;
 674                }
 675        }
 676
 677        return best_node;
 678}
 679
 680/*
 681 * Setup early cpu_to_node.
 682 *
 683 * Populate cpu_to_node[] only if x86_cpu_to_apicid[],
 684 * and apicid_to_node[] tables have valid entries for a CPU.
 685 * This means we skip cpu_to_node[] initialisation for NUMA
 686 * emulation and faking node case (when running a kernel compiled
 687 * for NUMA on a non NUMA box), which is OK as cpu_to_node[]
 688 * is already initialized in a round robin manner at numa_init_array,
 689 * prior to this call, and this initialization is good enough
 690 * for the fake NUMA cases.
 691 *
 692 * Called before the per_cpu areas are setup.
 693 */
 694void __init init_cpu_to_node(void)
 695{
 696        int cpu;
 697        u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
 698
 699        BUG_ON(cpu_to_apicid == NULL);
 700
 701        for_each_possible_cpu(cpu) {
 702                int node = numa_cpu_node(cpu);
 703
 704                if (node == NUMA_NO_NODE)
 705                        continue;
 706                if (!node_online(node))
 707                        node = find_near_online_node(node);
 708                numa_set_node(cpu, node);
 709        }
 710}
 711
 712#ifndef CONFIG_DEBUG_PER_CPU_MAPS
 713
 714# ifndef CONFIG_NUMA_EMU
 715void __cpuinit numa_add_cpu(int cpu)
 716{
 717        cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
 718}
 719
 720void __cpuinit numa_remove_cpu(int cpu)
 721{
 722        cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
 723}
 724# endif /* !CONFIG_NUMA_EMU */
 725
 726#else   /* !CONFIG_DEBUG_PER_CPU_MAPS */
 727
 728int __cpu_to_node(int cpu)
 729{
 730        if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
 731                printk(KERN_WARNING
 732                        "cpu_to_node(%d): usage too early!\n", cpu);
 733                dump_stack();
 734                return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
 735        }
 736        return per_cpu(x86_cpu_to_node_map, cpu);
 737}
 738EXPORT_SYMBOL(__cpu_to_node);
 739
 740/*
 741 * Same function as cpu_to_node() but used if called before the
 742 * per_cpu areas are setup.
 743 */
 744int early_cpu_to_node(int cpu)
 745{
 746        if (early_per_cpu_ptr(x86_cpu_to_node_map))
 747                return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
 748
 749        if (!cpu_possible(cpu)) {
 750                printk(KERN_WARNING
 751                        "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
 752                dump_stack();
 753                return NUMA_NO_NODE;
 754        }
 755        return per_cpu(x86_cpu_to_node_map, cpu);
 756}
 757
 758void debug_cpumask_set_cpu(int cpu, int node, bool enable)
 759{
 760        struct cpumask *mask;
 761        char buf[64];
 762
 763        if (node == NUMA_NO_NODE) {
 764                /* early_cpu_to_node() already emits a warning and trace */
 765                return;
 766        }
 767        mask = node_to_cpumask_map[node];
 768        if (!mask) {
 769                pr_err("node_to_cpumask_map[%i] NULL\n", node);
 770                dump_stack();
 771                return;
 772        }
 773
 774        if (enable)
 775                cpumask_set_cpu(cpu, mask);
 776        else
 777                cpumask_clear_cpu(cpu, mask);
 778
 779        cpulist_scnprintf(buf, sizeof(buf), mask);
 780        printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
 781                enable ? "numa_add_cpu" : "numa_remove_cpu",
 782                cpu, node, buf);
 783        return;
 784}
 785
 786# ifndef CONFIG_NUMA_EMU
 787static void __cpuinit numa_set_cpumask(int cpu, bool enable)
 788{
 789        debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable);
 790}
 791
 792void __cpuinit numa_add_cpu(int cpu)
 793{
 794        numa_set_cpumask(cpu, true);
 795}
 796
 797void __cpuinit numa_remove_cpu(int cpu)
 798{
 799        numa_set_cpumask(cpu, false);
 800}
 801# endif /* !CONFIG_NUMA_EMU */
 802
 803/*
 804 * Returns a pointer to the bitmask of CPUs on Node 'node'.
 805 */
 806const struct cpumask *cpumask_of_node(int node)
 807{
 808        if (node >= nr_node_ids) {
 809                printk(KERN_WARNING
 810                        "cpumask_of_node(%d): node > nr_node_ids(%d)\n",
 811                        node, nr_node_ids);
 812                dump_stack();
 813                return cpu_none_mask;
 814        }
 815        if (node_to_cpumask_map[node] == NULL) {
 816                printk(KERN_WARNING
 817                        "cpumask_of_node(%d): no node_to_cpumask_map!\n",
 818                        node);
 819                dump_stack();
 820                return cpu_online_mask;
 821        }
 822        return node_to_cpumask_map[node];
 823}
 824EXPORT_SYMBOL(cpumask_of_node);
 825
 826#endif  /* !CONFIG_DEBUG_PER_CPU_MAPS */
 827
 828#ifdef CONFIG_MEMORY_HOTPLUG
 829int memory_add_physaddr_to_nid(u64 start)
 830{
 831        struct numa_meminfo *mi = &numa_meminfo;
 832        int nid = mi->blk[0].nid;
 833        int i;
 834
 835        for (i = 0; i < mi->nr_blks; i++)
 836                if (mi->blk[i].start <= start && mi->blk[i].end > start)
 837                        nid = mi->blk[i].nid;
 838        return nid;
 839}
 840EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
 841#endif
 842