linux/arch/x86/mm/numa.c
<<
>>
Prefs
   1/* Common code for 32 and 64-bit NUMA */
   2#include <linux/kernel.h>
   3#include <linux/mm.h>
   4#include <linux/string.h>
   5#include <linux/init.h>
   6#include <linux/bootmem.h>
   7#include <linux/memblock.h>
   8#include <linux/mmzone.h>
   9#include <linux/ctype.h>
  10#include <linux/module.h>
  11#include <linux/nodemask.h>
  12#include <linux/sched.h>
  13#include <linux/topology.h>
  14
  15#include <asm/e820.h>
  16#include <asm/proto.h>
  17#include <asm/dma.h>
  18#include <asm/acpi.h>
  19#include <asm/amd_nb.h>
  20
  21#include "numa_internal.h"
  22
  23int __initdata numa_off;
  24nodemask_t numa_nodes_parsed __initdata;
  25
  26struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
  27EXPORT_SYMBOL(node_data);
  28
  29static struct numa_meminfo numa_meminfo
  30#ifndef CONFIG_MEMORY_HOTPLUG
  31__initdata
  32#endif
  33;
  34
  35static int numa_distance_cnt;
  36static u8 *numa_distance;
  37
  38static __init int numa_setup(char *opt)
  39{
  40        if (!opt)
  41                return -EINVAL;
  42        if (!strncmp(opt, "off", 3))
  43                numa_off = 1;
  44#ifdef CONFIG_NUMA_EMU
  45        if (!strncmp(opt, "fake=", 5))
  46                numa_emu_cmdline(opt + 5);
  47#endif
  48#ifdef CONFIG_ACPI_NUMA
  49        if (!strncmp(opt, "noacpi", 6))
  50                acpi_numa = -1;
  51#endif
  52        return 0;
  53}
  54early_param("numa", numa_setup);
  55
  56/*
  57 * apicid, cpu, node mappings
  58 */
  59s16 __apicid_to_node[MAX_LOCAL_APIC] = {
  60        [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
  61};
  62
  63int numa_cpu_node(int cpu)
  64{
  65        int apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
  66
  67        if (apicid != BAD_APICID)
  68                return __apicid_to_node[apicid];
  69        return NUMA_NO_NODE;
  70}
  71
  72cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
  73EXPORT_SYMBOL(node_to_cpumask_map);
  74
  75/*
  76 * Map cpu index to node index
  77 */
  78DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
  79EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
  80
  81void numa_set_node(int cpu, int node)
  82{
  83        int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
  84
  85        /* early setting, no percpu area yet */
  86        if (cpu_to_node_map) {
  87                cpu_to_node_map[cpu] = node;
  88                return;
  89        }
  90
  91#ifdef CONFIG_DEBUG_PER_CPU_MAPS
  92        if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
  93                printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
  94                dump_stack();
  95                return;
  96        }
  97#endif
  98        per_cpu(x86_cpu_to_node_map, cpu) = node;
  99
 100        set_cpu_numa_node(cpu, node);
 101}
 102
 103void numa_clear_node(int cpu)
 104{
 105        numa_set_node(cpu, NUMA_NO_NODE);
 106}
 107
 108/*
 109 * Allocate node_to_cpumask_map based on number of available nodes
 110 * Requires node_possible_map to be valid.
 111 *
 112 * Note: cpumask_of_node() is not valid until after this is done.
 113 * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
 114 */
 115void __init setup_node_to_cpumask_map(void)
 116{
 117        unsigned int node;
 118
 119        /* setup nr_node_ids if not done yet */
 120        if (nr_node_ids == MAX_NUMNODES)
 121                setup_nr_node_ids();
 122
 123        /* allocate the map */
 124        for (node = 0; node < nr_node_ids; node++)
 125                alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
 126
 127        /* cpumask_of_node() will now work */
 128        pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids);
 129}
 130
 131static int __init numa_add_memblk_to(int nid, u64 start, u64 end,
 132                                     struct numa_meminfo *mi)
 133{
 134        /* ignore zero length blks */
 135        if (start == end)
 136                return 0;
 137
 138        /* whine about and ignore invalid blks */
 139        if (start > end || nid < 0 || nid >= MAX_NUMNODES) {
 140                pr_warning("NUMA: Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n",
 141                           nid, start, end - 1);
 142                return 0;
 143        }
 144
 145        if (mi->nr_blks >= NR_NODE_MEMBLKS) {
 146                pr_err("NUMA: too many memblk ranges\n");
 147                return -EINVAL;
 148        }
 149
 150        mi->blk[mi->nr_blks].start = start;
 151        mi->blk[mi->nr_blks].end = end;
 152        mi->blk[mi->nr_blks].nid = nid;
 153        mi->nr_blks++;
 154        return 0;
 155}
 156
 157/**
 158 * numa_remove_memblk_from - Remove one numa_memblk from a numa_meminfo
 159 * @idx: Index of memblk to remove
 160 * @mi: numa_meminfo to remove memblk from
 161 *
 162 * Remove @idx'th numa_memblk from @mi by shifting @mi->blk[] and
 163 * decrementing @mi->nr_blks.
 164 */
 165void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi)
 166{
 167        mi->nr_blks--;
 168        memmove(&mi->blk[idx], &mi->blk[idx + 1],
 169                (mi->nr_blks - idx) * sizeof(mi->blk[0]));
 170}
 171
 172/**
 173 * numa_add_memblk - Add one numa_memblk to numa_meminfo
 174 * @nid: NUMA node ID of the new memblk
 175 * @start: Start address of the new memblk
 176 * @end: End address of the new memblk
 177 *
 178 * Add a new memblk to the default numa_meminfo.
 179 *
 180 * RETURNS:
 181 * 0 on success, -errno on failure.
 182 */
 183int __init numa_add_memblk(int nid, u64 start, u64 end)
 184{
 185        return numa_add_memblk_to(nid, start, end, &numa_meminfo);
 186}
 187
 188/* Allocate NODE_DATA for a node on the local memory */
 189static void __init alloc_node_data(int nid)
 190{
 191        const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
 192        u64 nd_pa;
 193        void *nd;
 194        int tnid;
 195
 196        /*
 197         * Allocate node data.  Try node-local memory and then any node.
 198         * Never allocate in DMA zone.
 199         */
 200        nd_pa = memblock_alloc_nid(nd_size, SMP_CACHE_BYTES, nid);
 201        if (!nd_pa) {
 202                nd_pa = __memblock_alloc_base(nd_size, SMP_CACHE_BYTES,
 203                                              MEMBLOCK_ALLOC_ACCESSIBLE);
 204                if (!nd_pa) {
 205                        pr_err("Cannot find %zu bytes in node %d\n",
 206                               nd_size, nid);
 207                        return;
 208                }
 209        }
 210        nd = __va(nd_pa);
 211
 212        /* report and initialize */
 213        printk(KERN_INFO "NODE_DATA(%d) allocated [mem %#010Lx-%#010Lx]\n", nid,
 214               nd_pa, nd_pa + nd_size - 1);
 215        tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
 216        if (tnid != nid)
 217                printk(KERN_INFO "    NODE_DATA(%d) on node %d\n", nid, tnid);
 218
 219        node_data[nid] = nd;
 220        memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
 221
 222        node_set_online(nid);
 223}
 224
 225/**
 226 * numa_cleanup_meminfo - Cleanup a numa_meminfo
 227 * @mi: numa_meminfo to clean up
 228 *
 229 * Sanitize @mi by merging and removing unncessary memblks.  Also check for
 230 * conflicts and clear unused memblks.
 231 *
 232 * RETURNS:
 233 * 0 on success, -errno on failure.
 234 */
 235int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
 236{
 237        const u64 low = 0;
 238        const u64 high = PFN_PHYS(max_pfn);
 239        int i, j, k;
 240
 241        /* first, trim all entries */
 242        for (i = 0; i < mi->nr_blks; i++) {
 243                struct numa_memblk *bi = &mi->blk[i];
 244
 245                /* make sure all blocks are inside the limits */
 246                bi->start = max(bi->start, low);
 247                bi->end = min(bi->end, high);
 248
 249                /* and there's no empty block */
 250                if (bi->start >= bi->end)
 251                        numa_remove_memblk_from(i--, mi);
 252        }
 253
 254        /* merge neighboring / overlapping entries */
 255        for (i = 0; i < mi->nr_blks; i++) {
 256                struct numa_memblk *bi = &mi->blk[i];
 257
 258                for (j = i + 1; j < mi->nr_blks; j++) {
 259                        struct numa_memblk *bj = &mi->blk[j];
 260                        u64 start, end;
 261
 262                        /*
 263                         * See whether there are overlapping blocks.  Whine
 264                         * about but allow overlaps of the same nid.  They
 265                         * will be merged below.
 266                         */
 267                        if (bi->end > bj->start && bi->start < bj->end) {
 268                                if (bi->nid != bj->nid) {
 269                                        pr_err("NUMA: node %d [mem %#010Lx-%#010Lx] overlaps with node %d [mem %#010Lx-%#010Lx]\n",
 270                                               bi->nid, bi->start, bi->end - 1,
 271                                               bj->nid, bj->start, bj->end - 1);
 272                                        return -EINVAL;
 273                                }
 274                                pr_warning("NUMA: Warning: node %d [mem %#010Lx-%#010Lx] overlaps with itself [mem %#010Lx-%#010Lx]\n",
 275                                           bi->nid, bi->start, bi->end - 1,
 276                                           bj->start, bj->end - 1);
 277                        }
 278
 279                        /*
 280                         * Join together blocks on the same node, holes
 281                         * between which don't overlap with memory on other
 282                         * nodes.
 283                         */
 284                        if (bi->nid != bj->nid)
 285                                continue;
 286                        start = min(bi->start, bj->start);
 287                        end = max(bi->end, bj->end);
 288                        for (k = 0; k < mi->nr_blks; k++) {
 289                                struct numa_memblk *bk = &mi->blk[k];
 290
 291                                if (bi->nid == bk->nid)
 292                                        continue;
 293                                if (start < bk->end && end > bk->start)
 294                                        break;
 295                        }
 296                        if (k < mi->nr_blks)
 297                                continue;
 298                        printk(KERN_INFO "NUMA: Node %d [mem %#010Lx-%#010Lx] + [mem %#010Lx-%#010Lx] -> [mem %#010Lx-%#010Lx]\n",
 299                               bi->nid, bi->start, bi->end - 1, bj->start,
 300                               bj->end - 1, start, end - 1);
 301                        bi->start = start;
 302                        bi->end = end;
 303                        numa_remove_memblk_from(j--, mi);
 304                }
 305        }
 306
 307        /* clear unused ones */
 308        for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) {
 309                mi->blk[i].start = mi->blk[i].end = 0;
 310                mi->blk[i].nid = NUMA_NO_NODE;
 311        }
 312
 313        return 0;
 314}
 315
 316/*
 317 * Set nodes, which have memory in @mi, in *@nodemask.
 318 */
 319static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask,
 320                                              const struct numa_meminfo *mi)
 321{
 322        int i;
 323
 324        for (i = 0; i < ARRAY_SIZE(mi->blk); i++)
 325                if (mi->blk[i].start != mi->blk[i].end &&
 326                    mi->blk[i].nid != NUMA_NO_NODE)
 327                        node_set(mi->blk[i].nid, *nodemask);
 328}
 329
 330/**
 331 * numa_reset_distance - Reset NUMA distance table
 332 *
 333 * The current table is freed.  The next numa_set_distance() call will
 334 * create a new one.
 335 */
 336void __init numa_reset_distance(void)
 337{
 338        size_t size = numa_distance_cnt * numa_distance_cnt * sizeof(numa_distance[0]);
 339
 340        /* numa_distance could be 1LU marking allocation failure, test cnt */
 341        if (numa_distance_cnt)
 342                memblock_free(__pa(numa_distance), size);
 343        numa_distance_cnt = 0;
 344        numa_distance = NULL;   /* enable table creation */
 345}
 346
 347static int __init numa_alloc_distance(void)
 348{
 349        nodemask_t nodes_parsed;
 350        size_t size;
 351        int i, j, cnt = 0;
 352        u64 phys;
 353
 354        /* size the new table and allocate it */
 355        nodes_parsed = numa_nodes_parsed;
 356        numa_nodemask_from_meminfo(&nodes_parsed, &numa_meminfo);
 357
 358        for_each_node_mask(i, nodes_parsed)
 359                cnt = i;
 360        cnt++;
 361        size = cnt * cnt * sizeof(numa_distance[0]);
 362
 363        phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
 364                                      size, PAGE_SIZE);
 365        if (!phys) {
 366                pr_warning("NUMA: Warning: can't allocate distance table!\n");
 367                /* don't retry until explicitly reset */
 368                numa_distance = (void *)1LU;
 369                return -ENOMEM;
 370        }
 371        memblock_reserve(phys, size);
 372
 373        numa_distance = __va(phys);
 374        numa_distance_cnt = cnt;
 375
 376        /* fill with the default distances */
 377        for (i = 0; i < cnt; i++)
 378                for (j = 0; j < cnt; j++)
 379                        numa_distance[i * cnt + j] = i == j ?
 380                                LOCAL_DISTANCE : REMOTE_DISTANCE;
 381        printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt);
 382
 383        return 0;
 384}
 385
 386/**
 387 * numa_set_distance - Set NUMA distance from one NUMA to another
 388 * @from: the 'from' node to set distance
 389 * @to: the 'to'  node to set distance
 390 * @distance: NUMA distance
 391 *
 392 * Set the distance from node @from to @to to @distance.  If distance table
 393 * doesn't exist, one which is large enough to accommodate all the currently
 394 * known nodes will be created.
 395 *
 396 * If such table cannot be allocated, a warning is printed and further
 397 * calls are ignored until the distance table is reset with
 398 * numa_reset_distance().
 399 *
 400 * If @from or @to is higher than the highest known node or lower than zero
 401 * at the time of table creation or @distance doesn't make sense, the call
 402 * is ignored.
 403 * This is to allow simplification of specific NUMA config implementations.
 404 */
 405void __init numa_set_distance(int from, int to, int distance)
 406{
 407        if (!numa_distance && numa_alloc_distance() < 0)
 408                return;
 409
 410        if (from >= numa_distance_cnt || to >= numa_distance_cnt ||
 411                        from < 0 || to < 0) {
 412                pr_warn_once("NUMA: Warning: node ids are out of bound, from=%d to=%d distance=%d\n",
 413                            from, to, distance);
 414                return;
 415        }
 416
 417        if ((u8)distance != distance ||
 418            (from == to && distance != LOCAL_DISTANCE)) {
 419                pr_warn_once("NUMA: Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
 420                             from, to, distance);
 421                return;
 422        }
 423
 424        numa_distance[from * numa_distance_cnt + to] = distance;
 425}
 426
 427int __node_distance(int from, int to)
 428{
 429        if (from >= numa_distance_cnt || to >= numa_distance_cnt)
 430                return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE;
 431        return numa_distance[from * numa_distance_cnt + to];
 432}
 433EXPORT_SYMBOL(__node_distance);
 434
 435/*
 436 * Sanity check to catch more bad NUMA configurations (they are amazingly
 437 * common).  Make sure the nodes cover all memory.
 438 */
 439static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
 440{
 441        u64 numaram, e820ram;
 442        int i;
 443
 444        numaram = 0;
 445        for (i = 0; i < mi->nr_blks; i++) {
 446                u64 s = mi->blk[i].start >> PAGE_SHIFT;
 447                u64 e = mi->blk[i].end >> PAGE_SHIFT;
 448                numaram += e - s;
 449                numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e);
 450                if ((s64)numaram < 0)
 451                        numaram = 0;
 452        }
 453
 454        e820ram = max_pfn - absent_pages_in_range(0, max_pfn);
 455
 456        /* We seem to lose 3 pages somewhere. Allow 1M of slack. */
 457        if ((s64)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) {
 458                printk(KERN_ERR "NUMA: nodes only cover %LuMB of your %LuMB e820 RAM. Not used.\n",
 459                       (numaram << PAGE_SHIFT) >> 20,
 460                       (e820ram << PAGE_SHIFT) >> 20);
 461                return false;
 462        }
 463        return true;
 464}
 465
 466static void __init numa_clear_kernel_node_hotplug(void)
 467{
 468        int i, nid;
 469        nodemask_t numa_kernel_nodes = NODE_MASK_NONE;
 470        unsigned long start, end;
 471        struct memblock_region *r;
 472
 473        /*
 474         * At this time, all memory regions reserved by memblock are
 475         * used by the kernel. Set the nid in memblock.reserved will
 476         * mark out all the nodes the kernel resides in.
 477         */
 478        for (i = 0; i < numa_meminfo.nr_blks; i++) {
 479                struct numa_memblk *mb = &numa_meminfo.blk[i];
 480
 481                memblock_set_node(mb->start, mb->end - mb->start,
 482                                  &memblock.reserved, mb->nid);
 483        }
 484
 485        /* Mark all kernel nodes. */
 486        for_each_memblock(reserved, r)
 487                node_set(r->nid, numa_kernel_nodes);
 488
 489        /* Clear MEMBLOCK_HOTPLUG flag for memory in kernel nodes. */
 490        for (i = 0; i < numa_meminfo.nr_blks; i++) {
 491                nid = numa_meminfo.blk[i].nid;
 492                if (!node_isset(nid, numa_kernel_nodes))
 493                        continue;
 494
 495                start = numa_meminfo.blk[i].start;
 496                end = numa_meminfo.blk[i].end;
 497
 498                memblock_clear_hotplug(start, end - start);
 499        }
 500}
 501
 502static int __init numa_register_memblks(struct numa_meminfo *mi)
 503{
 504        unsigned long uninitialized_var(pfn_align);
 505        int i, nid;
 506
 507        /* Account for nodes with cpus and no memory */
 508        node_possible_map = numa_nodes_parsed;
 509        numa_nodemask_from_meminfo(&node_possible_map, mi);
 510        if (WARN_ON(nodes_empty(node_possible_map)))
 511                return -EINVAL;
 512
 513        for (i = 0; i < mi->nr_blks; i++) {
 514                struct numa_memblk *mb = &mi->blk[i];
 515                memblock_set_node(mb->start, mb->end - mb->start,
 516                                  &memblock.memory, mb->nid);
 517        }
 518
 519        /*
 520         * At very early time, the kernel have to use some memory such as
 521         * loading the kernel image. We cannot prevent this anyway. So any
 522         * node the kernel resides in should be un-hotpluggable.
 523         *
 524         * And when we come here, alloc node data won't fail.
 525         */
 526        numa_clear_kernel_node_hotplug();
 527
 528        /*
 529         * If sections array is gonna be used for pfn -> nid mapping, check
 530         * whether its granularity is fine enough.
 531         */
 532#ifdef NODE_NOT_IN_PAGE_FLAGS
 533        pfn_align = node_map_pfn_alignment();
 534        if (pfn_align && pfn_align < PAGES_PER_SECTION) {
 535                printk(KERN_WARNING "Node alignment %LuMB < min %LuMB, rejecting NUMA config\n",
 536                       PFN_PHYS(pfn_align) >> 20,
 537                       PFN_PHYS(PAGES_PER_SECTION) >> 20);
 538                return -EINVAL;
 539        }
 540#endif
 541        if (!numa_meminfo_cover_memory(mi))
 542                return -EINVAL;
 543
 544        /* Finally register nodes. */
 545        for_each_node_mask(nid, node_possible_map) {
 546                u64 start = PFN_PHYS(max_pfn);
 547                u64 end = 0;
 548
 549                for (i = 0; i < mi->nr_blks; i++) {
 550                        if (nid != mi->blk[i].nid)
 551                                continue;
 552                        start = min(mi->blk[i].start, start);
 553                        end = max(mi->blk[i].end, end);
 554                }
 555
 556                if (start >= end)
 557                        continue;
 558
 559                /*
 560                 * Don't confuse VM with a node that doesn't have the
 561                 * minimum amount of memory:
 562                 */
 563                if (end && (end - start) < NODE_MIN_SIZE)
 564                        continue;
 565
 566                alloc_node_data(nid);
 567        }
 568
 569        /* Dump memblock with node info and return. */
 570        memblock_dump_all();
 571        return 0;
 572}
 573
 574/*
 575 * There are unfortunately some poorly designed mainboards around that
 576 * only connect memory to a single CPU. This breaks the 1:1 cpu->node
 577 * mapping. To avoid this fill in the mapping for all possible CPUs,
 578 * as the number of CPUs is not known yet. We round robin the existing
 579 * nodes.
 580 */
 581static void __init numa_init_array(void)
 582{
 583        int rr, i;
 584
 585        rr = first_node(node_online_map);
 586        for (i = 0; i < nr_cpu_ids; i++) {
 587                if (early_cpu_to_node(i) != NUMA_NO_NODE)
 588                        continue;
 589                numa_set_node(i, rr);
 590                rr = next_node(rr, node_online_map);
 591                if (rr == MAX_NUMNODES)
 592                        rr = first_node(node_online_map);
 593        }
 594}
 595
 596static int __init numa_init(int (*init_func)(void))
 597{
 598        int i;
 599        int ret;
 600
 601        for (i = 0; i < MAX_LOCAL_APIC; i++)
 602                set_apicid_to_node(i, NUMA_NO_NODE);
 603
 604        nodes_clear(numa_nodes_parsed);
 605        nodes_clear(node_possible_map);
 606        nodes_clear(node_online_map);
 607        memset(&numa_meminfo, 0, sizeof(numa_meminfo));
 608        WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.memory,
 609                                  MAX_NUMNODES));
 610        WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.reserved,
 611                                  MAX_NUMNODES));
 612        /* In case that parsing SRAT failed. */
 613        WARN_ON(memblock_clear_hotplug(0, ULLONG_MAX));
 614        numa_reset_distance();
 615
 616        ret = init_func();
 617        if (ret < 0)
 618                return ret;
 619
 620        /*
 621         * We reset memblock back to the top-down direction
 622         * here because if we configured ACPI_NUMA, we have
 623         * parsed SRAT in init_func(). It is ok to have the
 624         * reset here even if we did't configure ACPI_NUMA
 625         * or acpi numa init fails and fallbacks to dummy
 626         * numa init.
 627         */
 628        memblock_set_bottom_up(false);
 629
 630        ret = numa_cleanup_meminfo(&numa_meminfo);
 631        if (ret < 0)
 632                return ret;
 633
 634        numa_emulation(&numa_meminfo, numa_distance_cnt);
 635
 636        ret = numa_register_memblks(&numa_meminfo);
 637        if (ret < 0)
 638                return ret;
 639
 640        for (i = 0; i < nr_cpu_ids; i++) {
 641                int nid = early_cpu_to_node(i);
 642
 643                if (nid == NUMA_NO_NODE)
 644                        continue;
 645                if (!node_online(nid))
 646                        numa_clear_node(i);
 647        }
 648        numa_init_array();
 649
 650        return 0;
 651}
 652
 653/**
 654 * dummy_numa_init - Fallback dummy NUMA init
 655 *
 656 * Used if there's no underlying NUMA architecture, NUMA initialization
 657 * fails, or NUMA is disabled on the command line.
 658 *
 659 * Must online at least one node and add memory blocks that cover all
 660 * allowed memory.  This function must not fail.
 661 */
 662static int __init dummy_numa_init(void)
 663{
 664        printk(KERN_INFO "%s\n",
 665               numa_off ? "NUMA turned off" : "No NUMA configuration found");
 666        printk(KERN_INFO "Faking a node at [mem %#018Lx-%#018Lx]\n",
 667               0LLU, PFN_PHYS(max_pfn) - 1);
 668
 669        node_set(0, numa_nodes_parsed);
 670        numa_add_memblk(0, 0, PFN_PHYS(max_pfn));
 671
 672        return 0;
 673}
 674
 675/**
 676 * x86_numa_init - Initialize NUMA
 677 *
 678 * Try each configured NUMA initialization method until one succeeds.  The
 679 * last fallback is dummy single node config encomapssing whole memory and
 680 * never fails.
 681 */
 682void __init x86_numa_init(void)
 683{
 684        if (!numa_off) {
 685#ifdef CONFIG_ACPI_NUMA
 686                if (!numa_init(x86_acpi_numa_init))
 687                        return;
 688#endif
 689#ifdef CONFIG_AMD_NUMA
 690                if (!numa_init(amd_numa_init))
 691                        return;
 692#endif
 693        }
 694
 695        numa_init(dummy_numa_init);
 696}
 697
 698static __init int find_near_online_node(int node)
 699{
 700        int n, val;
 701        int min_val = INT_MAX;
 702        int best_node = -1;
 703
 704        for_each_online_node(n) {
 705                val = node_distance(node, n);
 706
 707                if (val < min_val) {
 708                        min_val = val;
 709                        best_node = n;
 710                }
 711        }
 712
 713        return best_node;
 714}
 715
 716/*
 717 * Setup early cpu_to_node.
 718 *
 719 * Populate cpu_to_node[] only if x86_cpu_to_apicid[],
 720 * and apicid_to_node[] tables have valid entries for a CPU.
 721 * This means we skip cpu_to_node[] initialisation for NUMA
 722 * emulation and faking node case (when running a kernel compiled
 723 * for NUMA on a non NUMA box), which is OK as cpu_to_node[]
 724 * is already initialized in a round robin manner at numa_init_array,
 725 * prior to this call, and this initialization is good enough
 726 * for the fake NUMA cases.
 727 *
 728 * Called before the per_cpu areas are setup.
 729 */
 730void __init init_cpu_to_node(void)
 731{
 732        int cpu;
 733        u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
 734
 735        BUG_ON(cpu_to_apicid == NULL);
 736
 737        for_each_possible_cpu(cpu) {
 738                int node = numa_cpu_node(cpu);
 739
 740                if (node == NUMA_NO_NODE)
 741                        continue;
 742                if (!node_online(node))
 743                        node = find_near_online_node(node);
 744                numa_set_node(cpu, node);
 745        }
 746}
 747
 748#ifndef CONFIG_DEBUG_PER_CPU_MAPS
 749
 750# ifndef CONFIG_NUMA_EMU
 751void numa_add_cpu(int cpu)
 752{
 753        cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
 754}
 755
 756void numa_remove_cpu(int cpu)
 757{
 758        cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
 759}
 760# endif /* !CONFIG_NUMA_EMU */
 761
 762#else   /* !CONFIG_DEBUG_PER_CPU_MAPS */
 763
 764int __cpu_to_node(int cpu)
 765{
 766        if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
 767                printk(KERN_WARNING
 768                        "cpu_to_node(%d): usage too early!\n", cpu);
 769                dump_stack();
 770                return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
 771        }
 772        return per_cpu(x86_cpu_to_node_map, cpu);
 773}
 774EXPORT_SYMBOL(__cpu_to_node);
 775
 776/*
 777 * Same function as cpu_to_node() but used if called before the
 778 * per_cpu areas are setup.
 779 */
 780int early_cpu_to_node(int cpu)
 781{
 782        if (early_per_cpu_ptr(x86_cpu_to_node_map))
 783                return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
 784
 785        if (!cpu_possible(cpu)) {
 786                printk(KERN_WARNING
 787                        "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
 788                dump_stack();
 789                return NUMA_NO_NODE;
 790        }
 791        return per_cpu(x86_cpu_to_node_map, cpu);
 792}
 793
 794void debug_cpumask_set_cpu(int cpu, int node, bool enable)
 795{
 796        struct cpumask *mask;
 797
 798        if (node == NUMA_NO_NODE) {
 799                /* early_cpu_to_node() already emits a warning and trace */
 800                return;
 801        }
 802        mask = node_to_cpumask_map[node];
 803        if (!mask) {
 804                pr_err("node_to_cpumask_map[%i] NULL\n", node);
 805                dump_stack();
 806                return;
 807        }
 808
 809        if (enable)
 810                cpumask_set_cpu(cpu, mask);
 811        else
 812                cpumask_clear_cpu(cpu, mask);
 813
 814        printk(KERN_DEBUG "%s cpu %d node %d: mask now %*pbl\n",
 815                enable ? "numa_add_cpu" : "numa_remove_cpu",
 816                cpu, node, cpumask_pr_args(mask));
 817        return;
 818}
 819
 820# ifndef CONFIG_NUMA_EMU
 821static void numa_set_cpumask(int cpu, bool enable)
 822{
 823        debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable);
 824}
 825
 826void numa_add_cpu(int cpu)
 827{
 828        numa_set_cpumask(cpu, true);
 829}
 830
 831void numa_remove_cpu(int cpu)
 832{
 833        numa_set_cpumask(cpu, false);
 834}
 835# endif /* !CONFIG_NUMA_EMU */
 836
 837/*
 838 * Returns a pointer to the bitmask of CPUs on Node 'node'.
 839 */
 840const struct cpumask *cpumask_of_node(int node)
 841{
 842        if (node >= nr_node_ids) {
 843                printk(KERN_WARNING
 844                        "cpumask_of_node(%d): node > nr_node_ids(%d)\n",
 845                        node, nr_node_ids);
 846                dump_stack();
 847                return cpu_none_mask;
 848        }
 849        if (node_to_cpumask_map[node] == NULL) {
 850                printk(KERN_WARNING
 851                        "cpumask_of_node(%d): no node_to_cpumask_map!\n",
 852                        node);
 853                dump_stack();
 854                return cpu_online_mask;
 855        }
 856        return node_to_cpumask_map[node];
 857}
 858EXPORT_SYMBOL(cpumask_of_node);
 859
 860#endif  /* !CONFIG_DEBUG_PER_CPU_MAPS */
 861
 862#ifdef CONFIG_MEMORY_HOTPLUG
 863int memory_add_physaddr_to_nid(u64 start)
 864{
 865        struct numa_meminfo *mi = &numa_meminfo;
 866        int nid = mi->blk[0].nid;
 867        int i;
 868
 869        for (i = 0; i < mi->nr_blks; i++)
 870                if (mi->blk[i].start <= start && mi->blk[i].end > start)
 871                        nid = mi->blk[i].nid;
 872        return nid;
 873}
 874EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
 875#endif
 876