linux/arch/x86/mm/numa.c
<<
>>
Prefs
   1/* Common code for 32 and 64-bit NUMA */
   2#include <linux/kernel.h>
   3#include <linux/mm.h>
   4#include <linux/string.h>
   5#include <linux/init.h>
   6#include <linux/bootmem.h>
   7#include <linux/memblock.h>
   8#include <linux/mmzone.h>
   9#include <linux/ctype.h>
  10#include <linux/module.h>
  11#include <linux/nodemask.h>
  12#include <linux/sched.h>
  13#include <linux/topology.h>
  14
  15#include <asm/e820.h>
  16#include <asm/proto.h>
  17#include <asm/dma.h>
  18#include <asm/acpi.h>
  19#include <asm/amd_nb.h>
  20
  21#include "numa_internal.h"
  22
  23int __initdata numa_off;
  24nodemask_t numa_nodes_parsed __initdata;
  25
  26struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
  27EXPORT_SYMBOL(node_data);
  28
  29static struct numa_meminfo numa_meminfo
  30#ifndef CONFIG_MEMORY_HOTPLUG
  31__initdata
  32#endif
  33;
  34
  35static int numa_distance_cnt;
  36static u8 *numa_distance;
  37
  38static __init int numa_setup(char *opt)
  39{
  40        if (!opt)
  41                return -EINVAL;
  42        if (!strncmp(opt, "off", 3))
  43                numa_off = 1;
  44#ifdef CONFIG_NUMA_EMU
  45        if (!strncmp(opt, "fake=", 5))
  46                numa_emu_cmdline(opt + 5);
  47#endif
  48#ifdef CONFIG_ACPI_NUMA
  49        if (!strncmp(opt, "noacpi", 6))
  50                acpi_numa = -1;
  51#endif
  52        return 0;
  53}
  54early_param("numa", numa_setup);
  55
  56/*
  57 * apicid, cpu, node mappings
  58 */
  59s16 __apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = {
  60        [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
  61};
  62
  63int __cpuinit numa_cpu_node(int cpu)
  64{
  65        int apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
  66
  67        if (apicid != BAD_APICID)
  68                return __apicid_to_node[apicid];
  69        return NUMA_NO_NODE;
  70}
  71
  72cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
  73EXPORT_SYMBOL(node_to_cpumask_map);
  74
  75/*
  76 * Map cpu index to node index
  77 */
  78DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
  79EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
  80
  81void __cpuinit numa_set_node(int cpu, int node)
  82{
  83        int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
  84
  85        /* early setting, no percpu area yet */
  86        if (cpu_to_node_map) {
  87                cpu_to_node_map[cpu] = node;
  88                return;
  89        }
  90
  91#ifdef CONFIG_DEBUG_PER_CPU_MAPS
  92        if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
  93                printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
  94                dump_stack();
  95                return;
  96        }
  97#endif
  98        per_cpu(x86_cpu_to_node_map, cpu) = node;
  99
 100        if (node != NUMA_NO_NODE)
 101                set_cpu_numa_node(cpu, node);
 102}
 103
 104void __cpuinit numa_clear_node(int cpu)
 105{
 106        numa_set_node(cpu, NUMA_NO_NODE);
 107}
 108
 109/*
 110 * Allocate node_to_cpumask_map based on number of available nodes
 111 * Requires node_possible_map to be valid.
 112 *
 113 * Note: cpumask_of_node() is not valid until after this is done.
 114 * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
 115 */
 116void __init setup_node_to_cpumask_map(void)
 117{
 118        unsigned int node, num = 0;
 119
 120        /* setup nr_node_ids if not done yet */
 121        if (nr_node_ids == MAX_NUMNODES) {
 122                for_each_node_mask(node, node_possible_map)
 123                        num = node;
 124                nr_node_ids = num + 1;
 125        }
 126
 127        /* allocate the map */
 128        for (node = 0; node < nr_node_ids; node++)
 129                alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
 130
 131        /* cpumask_of_node() will now work */
 132        pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids);
 133}
 134
 135static int __init numa_add_memblk_to(int nid, u64 start, u64 end,
 136                                     struct numa_meminfo *mi)
 137{
 138        /* ignore zero length blks */
 139        if (start == end)
 140                return 0;
 141
 142        /* whine about and ignore invalid blks */
 143        if (start > end || nid < 0 || nid >= MAX_NUMNODES) {
 144                pr_warning("NUMA: Warning: invalid memblk node %d (%Lx-%Lx)\n",
 145                           nid, start, end);
 146                return 0;
 147        }
 148
 149        if (mi->nr_blks >= NR_NODE_MEMBLKS) {
 150                pr_err("NUMA: too many memblk ranges\n");
 151                return -EINVAL;
 152        }
 153
 154        mi->blk[mi->nr_blks].start = start;
 155        mi->blk[mi->nr_blks].end = end;
 156        mi->blk[mi->nr_blks].nid = nid;
 157        mi->nr_blks++;
 158        return 0;
 159}
 160
 161/**
 162 * numa_remove_memblk_from - Remove one numa_memblk from a numa_meminfo
 163 * @idx: Index of memblk to remove
 164 * @mi: numa_meminfo to remove memblk from
 165 *
 166 * Remove @idx'th numa_memblk from @mi by shifting @mi->blk[] and
 167 * decrementing @mi->nr_blks.
 168 */
 169void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi)
 170{
 171        mi->nr_blks--;
 172        memmove(&mi->blk[idx], &mi->blk[idx + 1],
 173                (mi->nr_blks - idx) * sizeof(mi->blk[0]));
 174}
 175
 176/**
 177 * numa_add_memblk - Add one numa_memblk to numa_meminfo
 178 * @nid: NUMA node ID of the new memblk
 179 * @start: Start address of the new memblk
 180 * @end: End address of the new memblk
 181 *
 182 * Add a new memblk to the default numa_meminfo.
 183 *
 184 * RETURNS:
 185 * 0 on success, -errno on failure.
 186 */
 187int __init numa_add_memblk(int nid, u64 start, u64 end)
 188{
 189        return numa_add_memblk_to(nid, start, end, &numa_meminfo);
 190}
 191
 192/* Initialize NODE_DATA for a node on the local memory */
 193static void __init setup_node_data(int nid, u64 start, u64 end)
 194{
 195        const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
 196        bool remapped = false;
 197        u64 nd_pa;
 198        void *nd;
 199        int tnid;
 200
 201        /*
 202         * Don't confuse VM with a node that doesn't have the
 203         * minimum amount of memory:
 204         */
 205        if (end && (end - start) < NODE_MIN_SIZE)
 206                return;
 207
 208        /* initialize remap allocator before aligning to ZONE_ALIGN */
 209        init_alloc_remap(nid, start, end);
 210
 211        start = roundup(start, ZONE_ALIGN);
 212
 213        printk(KERN_INFO "Initmem setup node %d %016Lx-%016Lx\n",
 214               nid, start, end);
 215
 216        /*
 217         * Allocate node data.  Try remap allocator first, node-local
 218         * memory and then any node.  Never allocate in DMA zone.
 219         */
 220        nd = alloc_remap(nid, nd_size);
 221        if (nd) {
 222                nd_pa = __pa(nd);
 223                remapped = true;
 224        } else {
 225                nd_pa = memblock_alloc_nid(nd_size, SMP_CACHE_BYTES, nid);
 226                if (!nd_pa) {
 227                        pr_err("Cannot find %zu bytes in node %d\n",
 228                               nd_size, nid);
 229                        return;
 230                }
 231                nd = __va(nd_pa);
 232        }
 233
 234        /* report and initialize */
 235        printk(KERN_INFO "  NODE_DATA [%016Lx - %016Lx]%s\n",
 236               nd_pa, nd_pa + nd_size - 1, remapped ? " (remapped)" : "");
 237        tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
 238        if (!remapped && tnid != nid)
 239                printk(KERN_INFO "    NODE_DATA(%d) on node %d\n", nid, tnid);
 240
 241        node_data[nid] = nd;
 242        memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
 243        NODE_DATA(nid)->node_id = nid;
 244        NODE_DATA(nid)->node_start_pfn = start >> PAGE_SHIFT;
 245        NODE_DATA(nid)->node_spanned_pages = (end - start) >> PAGE_SHIFT;
 246
 247        node_set_online(nid);
 248}
 249
 250/**
 251 * numa_cleanup_meminfo - Cleanup a numa_meminfo
 252 * @mi: numa_meminfo to clean up
 253 *
 254 * Sanitize @mi by merging and removing unncessary memblks.  Also check for
 255 * conflicts and clear unused memblks.
 256 *
 257 * RETURNS:
 258 * 0 on success, -errno on failure.
 259 */
 260int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
 261{
 262        const u64 low = 0;
 263        const u64 high = PFN_PHYS(max_pfn);
 264        int i, j, k;
 265
 266        /* first, trim all entries */
 267        for (i = 0; i < mi->nr_blks; i++) {
 268                struct numa_memblk *bi = &mi->blk[i];
 269
 270                /* make sure all blocks are inside the limits */
 271                bi->start = max(bi->start, low);
 272                bi->end = min(bi->end, high);
 273
 274                /* and there's no empty block */
 275                if (bi->start >= bi->end)
 276                        numa_remove_memblk_from(i--, mi);
 277        }
 278
 279        /* merge neighboring / overlapping entries */
 280        for (i = 0; i < mi->nr_blks; i++) {
 281                struct numa_memblk *bi = &mi->blk[i];
 282
 283                for (j = i + 1; j < mi->nr_blks; j++) {
 284                        struct numa_memblk *bj = &mi->blk[j];
 285                        u64 start, end;
 286
 287                        /*
 288                         * See whether there are overlapping blocks.  Whine
 289                         * about but allow overlaps of the same nid.  They
 290                         * will be merged below.
 291                         */
 292                        if (bi->end > bj->start && bi->start < bj->end) {
 293                                if (bi->nid != bj->nid) {
 294                                        pr_err("NUMA: node %d (%Lx-%Lx) overlaps with node %d (%Lx-%Lx)\n",
 295                                               bi->nid, bi->start, bi->end,
 296                                               bj->nid, bj->start, bj->end);
 297                                        return -EINVAL;
 298                                }
 299                                pr_warning("NUMA: Warning: node %d (%Lx-%Lx) overlaps with itself (%Lx-%Lx)\n",
 300                                           bi->nid, bi->start, bi->end,
 301                                           bj->start, bj->end);
 302                        }
 303
 304                        /*
 305                         * Join together blocks on the same node, holes
 306                         * between which don't overlap with memory on other
 307                         * nodes.
 308                         */
 309                        if (bi->nid != bj->nid)
 310                                continue;
 311                        start = min(bi->start, bj->start);
 312                        end = max(bi->end, bj->end);
 313                        for (k = 0; k < mi->nr_blks; k++) {
 314                                struct numa_memblk *bk = &mi->blk[k];
 315
 316                                if (bi->nid == bk->nid)
 317                                        continue;
 318                                if (start < bk->end && end > bk->start)
 319                                        break;
 320                        }
 321                        if (k < mi->nr_blks)
 322                                continue;
 323                        printk(KERN_INFO "NUMA: Node %d [%Lx,%Lx) + [%Lx,%Lx) -> [%Lx,%Lx)\n",
 324                               bi->nid, bi->start, bi->end, bj->start, bj->end,
 325                               start, end);
 326                        bi->start = start;
 327                        bi->end = end;
 328                        numa_remove_memblk_from(j--, mi);
 329                }
 330        }
 331
 332        /* clear unused ones */
 333        for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) {
 334                mi->blk[i].start = mi->blk[i].end = 0;
 335                mi->blk[i].nid = NUMA_NO_NODE;
 336        }
 337
 338        return 0;
 339}
 340
 341/*
 342 * Set nodes, which have memory in @mi, in *@nodemask.
 343 */
 344static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask,
 345                                              const struct numa_meminfo *mi)
 346{
 347        int i;
 348
 349        for (i = 0; i < ARRAY_SIZE(mi->blk); i++)
 350                if (mi->blk[i].start != mi->blk[i].end &&
 351                    mi->blk[i].nid != NUMA_NO_NODE)
 352                        node_set(mi->blk[i].nid, *nodemask);
 353}
 354
 355/**
 356 * numa_reset_distance - Reset NUMA distance table
 357 *
 358 * The current table is freed.  The next numa_set_distance() call will
 359 * create a new one.
 360 */
 361void __init numa_reset_distance(void)
 362{
 363        size_t size = numa_distance_cnt * numa_distance_cnt * sizeof(numa_distance[0]);
 364
 365        /* numa_distance could be 1LU marking allocation failure, test cnt */
 366        if (numa_distance_cnt)
 367                memblock_free(__pa(numa_distance), size);
 368        numa_distance_cnt = 0;
 369        numa_distance = NULL;   /* enable table creation */
 370}
 371
 372static int __init numa_alloc_distance(void)
 373{
 374        nodemask_t nodes_parsed;
 375        size_t size;
 376        int i, j, cnt = 0;
 377        u64 phys;
 378
 379        /* size the new table and allocate it */
 380        nodes_parsed = numa_nodes_parsed;
 381        numa_nodemask_from_meminfo(&nodes_parsed, &numa_meminfo);
 382
 383        for_each_node_mask(i, nodes_parsed)
 384                cnt = i;
 385        cnt++;
 386        size = cnt * cnt * sizeof(numa_distance[0]);
 387
 388        phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
 389                                      size, PAGE_SIZE);
 390        if (!phys) {
 391                pr_warning("NUMA: Warning: can't allocate distance table!\n");
 392                /* don't retry until explicitly reset */
 393                numa_distance = (void *)1LU;
 394                return -ENOMEM;
 395        }
 396        memblock_reserve(phys, size);
 397
 398        numa_distance = __va(phys);
 399        numa_distance_cnt = cnt;
 400
 401        /* fill with the default distances */
 402        for (i = 0; i < cnt; i++)
 403                for (j = 0; j < cnt; j++)
 404                        numa_distance[i * cnt + j] = i == j ?
 405                                LOCAL_DISTANCE : REMOTE_DISTANCE;
 406        printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt);
 407
 408        return 0;
 409}
 410
 411/**
 412 * numa_set_distance - Set NUMA distance from one NUMA to another
 413 * @from: the 'from' node to set distance
 414 * @to: the 'to'  node to set distance
 415 * @distance: NUMA distance
 416 *
 417 * Set the distance from node @from to @to to @distance.  If distance table
 418 * doesn't exist, one which is large enough to accommodate all the currently
 419 * known nodes will be created.
 420 *
 421 * If such table cannot be allocated, a warning is printed and further
 422 * calls are ignored until the distance table is reset with
 423 * numa_reset_distance().
 424 *
 425 * If @from or @to is higher than the highest known node or lower than zero
 426 * at the time of table creation or @distance doesn't make sense, the call
 427 * is ignored.
 428 * This is to allow simplification of specific NUMA config implementations.
 429 */
 430void __init numa_set_distance(int from, int to, int distance)
 431{
 432        if (!numa_distance && numa_alloc_distance() < 0)
 433                return;
 434
 435        if (from >= numa_distance_cnt || to >= numa_distance_cnt ||
 436                        from < 0 || to < 0) {
 437                pr_warn_once("NUMA: Warning: node ids are out of bound, from=%d to=%d distance=%d\n",
 438                            from, to, distance);
 439                return;
 440        }
 441
 442        if ((u8)distance != distance ||
 443            (from == to && distance != LOCAL_DISTANCE)) {
 444                pr_warn_once("NUMA: Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
 445                             from, to, distance);
 446                return;
 447        }
 448
 449        numa_distance[from * numa_distance_cnt + to] = distance;
 450}
 451
 452int __node_distance(int from, int to)
 453{
 454        if (from >= numa_distance_cnt || to >= numa_distance_cnt)
 455                return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE;
 456        return numa_distance[from * numa_distance_cnt + to];
 457}
 458EXPORT_SYMBOL(__node_distance);
 459
 460/*
 461 * Sanity check to catch more bad NUMA configurations (they are amazingly
 462 * common).  Make sure the nodes cover all memory.
 463 */
 464static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
 465{
 466        u64 numaram, e820ram;
 467        int i;
 468
 469        numaram = 0;
 470        for (i = 0; i < mi->nr_blks; i++) {
 471                u64 s = mi->blk[i].start >> PAGE_SHIFT;
 472                u64 e = mi->blk[i].end >> PAGE_SHIFT;
 473                numaram += e - s;
 474                numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e);
 475                if ((s64)numaram < 0)
 476                        numaram = 0;
 477        }
 478
 479        e820ram = max_pfn - absent_pages_in_range(0, max_pfn);
 480
 481        /* We seem to lose 3 pages somewhere. Allow 1M of slack. */
 482        if ((s64)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) {
 483                printk(KERN_ERR "NUMA: nodes only cover %LuMB of your %LuMB e820 RAM. Not used.\n",
 484                       (numaram << PAGE_SHIFT) >> 20,
 485                       (e820ram << PAGE_SHIFT) >> 20);
 486                return false;
 487        }
 488        return true;
 489}
 490
 491static int __init numa_register_memblks(struct numa_meminfo *mi)
 492{
 493        unsigned long uninitialized_var(pfn_align);
 494        int i, nid;
 495
 496        /* Account for nodes with cpus and no memory */
 497        node_possible_map = numa_nodes_parsed;
 498        numa_nodemask_from_meminfo(&node_possible_map, mi);
 499        if (WARN_ON(nodes_empty(node_possible_map)))
 500                return -EINVAL;
 501
 502        for (i = 0; i < mi->nr_blks; i++) {
 503                struct numa_memblk *mb = &mi->blk[i];
 504                memblock_set_node(mb->start, mb->end - mb->start, mb->nid);
 505        }
 506
 507        /*
 508         * If sections array is gonna be used for pfn -> nid mapping, check
 509         * whether its granularity is fine enough.
 510         */
 511#ifdef NODE_NOT_IN_PAGE_FLAGS
 512        pfn_align = node_map_pfn_alignment();
 513        if (pfn_align && pfn_align < PAGES_PER_SECTION) {
 514                printk(KERN_WARNING "Node alignment %LuMB < min %LuMB, rejecting NUMA config\n",
 515                       PFN_PHYS(pfn_align) >> 20,
 516                       PFN_PHYS(PAGES_PER_SECTION) >> 20);
 517                return -EINVAL;
 518        }
 519#endif
 520        if (!numa_meminfo_cover_memory(mi))
 521                return -EINVAL;
 522
 523        /* Finally register nodes. */
 524        for_each_node_mask(nid, node_possible_map) {
 525                u64 start = PFN_PHYS(max_pfn);
 526                u64 end = 0;
 527
 528                for (i = 0; i < mi->nr_blks; i++) {
 529                        if (nid != mi->blk[i].nid)
 530                                continue;
 531                        start = min(mi->blk[i].start, start);
 532                        end = max(mi->blk[i].end, end);
 533                }
 534
 535                if (start < end)
 536                        setup_node_data(nid, start, end);
 537        }
 538
 539        /* Dump memblock with node info and return. */
 540        memblock_dump_all();
 541        return 0;
 542}
 543
 544/*
 545 * There are unfortunately some poorly designed mainboards around that
 546 * only connect memory to a single CPU. This breaks the 1:1 cpu->node
 547 * mapping. To avoid this fill in the mapping for all possible CPUs,
 548 * as the number of CPUs is not known yet. We round robin the existing
 549 * nodes.
 550 */
 551static void __init numa_init_array(void)
 552{
 553        int rr, i;
 554
 555        rr = first_node(node_online_map);
 556        for (i = 0; i < nr_cpu_ids; i++) {
 557                if (early_cpu_to_node(i) != NUMA_NO_NODE)
 558                        continue;
 559                numa_set_node(i, rr);
 560                rr = next_node(rr, node_online_map);
 561                if (rr == MAX_NUMNODES)
 562                        rr = first_node(node_online_map);
 563        }
 564}
 565
 566static int __init numa_init(int (*init_func)(void))
 567{
 568        int i;
 569        int ret;
 570
 571        for (i = 0; i < MAX_LOCAL_APIC; i++)
 572                set_apicid_to_node(i, NUMA_NO_NODE);
 573
 574        nodes_clear(numa_nodes_parsed);
 575        nodes_clear(node_possible_map);
 576        nodes_clear(node_online_map);
 577        memset(&numa_meminfo, 0, sizeof(numa_meminfo));
 578        WARN_ON(memblock_set_node(0, ULLONG_MAX, MAX_NUMNODES));
 579        numa_reset_distance();
 580
 581        ret = init_func();
 582        if (ret < 0)
 583                return ret;
 584        ret = numa_cleanup_meminfo(&numa_meminfo);
 585        if (ret < 0)
 586                return ret;
 587
 588        numa_emulation(&numa_meminfo, numa_distance_cnt);
 589
 590        ret = numa_register_memblks(&numa_meminfo);
 591        if (ret < 0)
 592                return ret;
 593
 594        for (i = 0; i < nr_cpu_ids; i++) {
 595                int nid = early_cpu_to_node(i);
 596
 597                if (nid == NUMA_NO_NODE)
 598                        continue;
 599                if (!node_online(nid))
 600                        numa_clear_node(i);
 601        }
 602        numa_init_array();
 603        return 0;
 604}
 605
 606/**
 607 * dummy_numa_init - Fallback dummy NUMA init
 608 *
 609 * Used if there's no underlying NUMA architecture, NUMA initialization
 610 * fails, or NUMA is disabled on the command line.
 611 *
 612 * Must online at least one node and add memory blocks that cover all
 613 * allowed memory.  This function must not fail.
 614 */
 615static int __init dummy_numa_init(void)
 616{
 617        printk(KERN_INFO "%s\n",
 618               numa_off ? "NUMA turned off" : "No NUMA configuration found");
 619        printk(KERN_INFO "Faking a node at %016Lx-%016Lx\n",
 620               0LLU, PFN_PHYS(max_pfn));
 621
 622        node_set(0, numa_nodes_parsed);
 623        numa_add_memblk(0, 0, PFN_PHYS(max_pfn));
 624
 625        return 0;
 626}
 627
 628/**
 629 * x86_numa_init - Initialize NUMA
 630 *
 631 * Try each configured NUMA initialization method until one succeeds.  The
 632 * last fallback is dummy single node config encomapssing whole memory and
 633 * never fails.
 634 */
 635void __init x86_numa_init(void)
 636{
 637        if (!numa_off) {
 638#ifdef CONFIG_X86_NUMAQ
 639                if (!numa_init(numaq_numa_init))
 640                        return;
 641#endif
 642#ifdef CONFIG_ACPI_NUMA
 643                if (!numa_init(x86_acpi_numa_init))
 644                        return;
 645#endif
 646#ifdef CONFIG_AMD_NUMA
 647                if (!numa_init(amd_numa_init))
 648                        return;
 649#endif
 650        }
 651
 652        numa_init(dummy_numa_init);
 653}
 654
 655static __init int find_near_online_node(int node)
 656{
 657        int n, val;
 658        int min_val = INT_MAX;
 659        int best_node = -1;
 660
 661        for_each_online_node(n) {
 662                val = node_distance(node, n);
 663
 664                if (val < min_val) {
 665                        min_val = val;
 666                        best_node = n;
 667                }
 668        }
 669
 670        return best_node;
 671}
 672
 673/*
 674 * Setup early cpu_to_node.
 675 *
 676 * Populate cpu_to_node[] only if x86_cpu_to_apicid[],
 677 * and apicid_to_node[] tables have valid entries for a CPU.
 678 * This means we skip cpu_to_node[] initialisation for NUMA
 679 * emulation and faking node case (when running a kernel compiled
 680 * for NUMA on a non NUMA box), which is OK as cpu_to_node[]
 681 * is already initialized in a round robin manner at numa_init_array,
 682 * prior to this call, and this initialization is good enough
 683 * for the fake NUMA cases.
 684 *
 685 * Called before the per_cpu areas are setup.
 686 */
 687void __init init_cpu_to_node(void)
 688{
 689        int cpu;
 690        u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
 691
 692        BUG_ON(cpu_to_apicid == NULL);
 693
 694        for_each_possible_cpu(cpu) {
 695                int node = numa_cpu_node(cpu);
 696
 697                if (node == NUMA_NO_NODE)
 698                        continue;
 699                if (!node_online(node))
 700                        node = find_near_online_node(node);
 701                numa_set_node(cpu, node);
 702        }
 703}
 704
 705#ifndef CONFIG_DEBUG_PER_CPU_MAPS
 706
 707# ifndef CONFIG_NUMA_EMU
 708void __cpuinit numa_add_cpu(int cpu)
 709{
 710        cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
 711}
 712
 713void __cpuinit numa_remove_cpu(int cpu)
 714{
 715        cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
 716}
 717# endif /* !CONFIG_NUMA_EMU */
 718
 719#else   /* !CONFIG_DEBUG_PER_CPU_MAPS */
 720
 721int __cpu_to_node(int cpu)
 722{
 723        if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
 724                printk(KERN_WARNING
 725                        "cpu_to_node(%d): usage too early!\n", cpu);
 726                dump_stack();
 727                return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
 728        }
 729        return per_cpu(x86_cpu_to_node_map, cpu);
 730}
 731EXPORT_SYMBOL(__cpu_to_node);
 732
 733/*
 734 * Same function as cpu_to_node() but used if called before the
 735 * per_cpu areas are setup.
 736 */
 737int early_cpu_to_node(int cpu)
 738{
 739        if (early_per_cpu_ptr(x86_cpu_to_node_map))
 740                return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
 741
 742        if (!cpu_possible(cpu)) {
 743                printk(KERN_WARNING
 744                        "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
 745                dump_stack();
 746                return NUMA_NO_NODE;
 747        }
 748        return per_cpu(x86_cpu_to_node_map, cpu);
 749}
 750
 751void debug_cpumask_set_cpu(int cpu, int node, bool enable)
 752{
 753        struct cpumask *mask;
 754        char buf[64];
 755
 756        if (node == NUMA_NO_NODE) {
 757                /* early_cpu_to_node() already emits a warning and trace */
 758                return;
 759        }
 760        mask = node_to_cpumask_map[node];
 761        if (!mask) {
 762                pr_err("node_to_cpumask_map[%i] NULL\n", node);
 763                dump_stack();
 764                return;
 765        }
 766
 767        if (enable)
 768                cpumask_set_cpu(cpu, mask);
 769        else
 770                cpumask_clear_cpu(cpu, mask);
 771
 772        cpulist_scnprintf(buf, sizeof(buf), mask);
 773        printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
 774                enable ? "numa_add_cpu" : "numa_remove_cpu",
 775                cpu, node, buf);
 776        return;
 777}
 778
 779# ifndef CONFIG_NUMA_EMU
 780static void __cpuinit numa_set_cpumask(int cpu, bool enable)
 781{
 782        debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable);
 783}
 784
 785void __cpuinit numa_add_cpu(int cpu)
 786{
 787        numa_set_cpumask(cpu, true);
 788}
 789
 790void __cpuinit numa_remove_cpu(int cpu)
 791{
 792        numa_set_cpumask(cpu, false);
 793}
 794# endif /* !CONFIG_NUMA_EMU */
 795
 796/*
 797 * Returns a pointer to the bitmask of CPUs on Node 'node'.
 798 */
 799const struct cpumask *cpumask_of_node(int node)
 800{
 801        if (node >= nr_node_ids) {
 802                printk(KERN_WARNING
 803                        "cpumask_of_node(%d): node > nr_node_ids(%d)\n",
 804                        node, nr_node_ids);
 805                dump_stack();
 806                return cpu_none_mask;
 807        }
 808        if (node_to_cpumask_map[node] == NULL) {
 809                printk(KERN_WARNING
 810                        "cpumask_of_node(%d): no node_to_cpumask_map!\n",
 811                        node);
 812                dump_stack();
 813                return cpu_online_mask;
 814        }
 815        return node_to_cpumask_map[node];
 816}
 817EXPORT_SYMBOL(cpumask_of_node);
 818
 819#endif  /* !CONFIG_DEBUG_PER_CPU_MAPS */
 820
 821#ifdef CONFIG_MEMORY_HOTPLUG
 822int memory_add_physaddr_to_nid(u64 start)
 823{
 824        struct numa_meminfo *mi = &numa_meminfo;
 825        int nid = mi->blk[0].nid;
 826        int i;
 827
 828        for (i = 0; i < mi->nr_blks; i++)
 829                if (mi->blk[i].start <= start && mi->blk[i].end > start)
 830                        nid = mi->blk[i].nid;
 831        return nid;
 832}
 833EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
 834#endif
 835