linux/arch/x86/mm/numa_64.c
<<
>>
Prefs
   1/*
   2 * Generic VM initialization for x86-64 NUMA setups.
   3 * Copyright 2002,2003 Andi Kleen, SuSE Labs.
   4 */
   5#include <linux/kernel.h>
   6#include <linux/mm.h>
   7#include <linux/string.h>
   8#include <linux/init.h>
   9#include <linux/bootmem.h>
  10#include <linux/mmzone.h>
  11#include <linux/ctype.h>
  12#include <linux/module.h>
  13#include <linux/nodemask.h>
  14#include <linux/sched.h>
  15
  16#include <asm/e820.h>
  17#include <asm/proto.h>
  18#include <asm/dma.h>
  19#include <asm/numa.h>
  20#include <asm/acpi.h>
  21#include <asm/k8.h>
  22
  23struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
  24EXPORT_SYMBOL(node_data);
  25
  26struct memnode memnode;
  27
  28s16 apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = {
  29        [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
  30};
  31
  32int numa_off __initdata;
  33static unsigned long __initdata nodemap_addr;
  34static unsigned long __initdata nodemap_size;
  35
  36DEFINE_PER_CPU(int, node_number) = 0;
  37EXPORT_PER_CPU_SYMBOL(node_number);
  38
  39/*
  40 * Map cpu index to node index
  41 */
  42DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
  43EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
  44
  45/*
  46 * Given a shift value, try to populate memnodemap[]
  47 * Returns :
  48 * 1 if OK
  49 * 0 if memnodmap[] too small (of shift too small)
  50 * -1 if node overlap or lost ram (shift too big)
  51 */
  52static int __init populate_memnodemap(const struct bootnode *nodes,
  53                                      int numnodes, int shift, int *nodeids)
  54{
  55        unsigned long addr, end;
  56        int i, res = -1;
  57
  58        memset(memnodemap, 0xff, sizeof(s16)*memnodemapsize);
  59        for (i = 0; i < numnodes; i++) {
  60                addr = nodes[i].start;
  61                end = nodes[i].end;
  62                if (addr >= end)
  63                        continue;
  64                if ((end >> shift) >= memnodemapsize)
  65                        return 0;
  66                do {
  67                        if (memnodemap[addr >> shift] != NUMA_NO_NODE)
  68                                return -1;
  69
  70                        if (!nodeids)
  71                                memnodemap[addr >> shift] = i;
  72                        else
  73                                memnodemap[addr >> shift] = nodeids[i];
  74
  75                        addr += (1UL << shift);
  76                } while (addr < end);
  77                res = 1;
  78        }
  79        return res;
  80}
  81
  82static int __init allocate_cachealigned_memnodemap(void)
  83{
  84        unsigned long addr;
  85
  86        memnodemap = memnode.embedded_map;
  87        if (memnodemapsize <= ARRAY_SIZE(memnode.embedded_map))
  88                return 0;
  89
  90        addr = 0x8000;
  91        nodemap_size = roundup(sizeof(s16) * memnodemapsize, L1_CACHE_BYTES);
  92        nodemap_addr = find_e820_area(addr, max_pfn<<PAGE_SHIFT,
  93                                      nodemap_size, L1_CACHE_BYTES);
  94        if (nodemap_addr == -1UL) {
  95                printk(KERN_ERR
  96                       "NUMA: Unable to allocate Memory to Node hash map\n");
  97                nodemap_addr = nodemap_size = 0;
  98                return -1;
  99        }
 100        memnodemap = phys_to_virt(nodemap_addr);
 101        reserve_early(nodemap_addr, nodemap_addr + nodemap_size, "MEMNODEMAP");
 102
 103        printk(KERN_DEBUG "NUMA: Allocated memnodemap from %lx - %lx\n",
 104               nodemap_addr, nodemap_addr + nodemap_size);
 105        return 0;
 106}
 107
 108/*
 109 * The LSB of all start and end addresses in the node map is the value of the
 110 * maximum possible shift.
 111 */
 112static int __init extract_lsb_from_nodes(const struct bootnode *nodes,
 113                                         int numnodes)
 114{
 115        int i, nodes_used = 0;
 116        unsigned long start, end;
 117        unsigned long bitfield = 0, memtop = 0;
 118
 119        for (i = 0; i < numnodes; i++) {
 120                start = nodes[i].start;
 121                end = nodes[i].end;
 122                if (start >= end)
 123                        continue;
 124                bitfield |= start;
 125                nodes_used++;
 126                if (end > memtop)
 127                        memtop = end;
 128        }
 129        if (nodes_used <= 1)
 130                i = 63;
 131        else
 132                i = find_first_bit(&bitfield, sizeof(unsigned long)*8);
 133        memnodemapsize = (memtop >> i)+1;
 134        return i;
 135}
 136
 137int __init compute_hash_shift(struct bootnode *nodes, int numnodes,
 138                              int *nodeids)
 139{
 140        int shift;
 141
 142        shift = extract_lsb_from_nodes(nodes, numnodes);
 143        if (allocate_cachealigned_memnodemap())
 144                return -1;
 145        printk(KERN_DEBUG "NUMA: Using %d for the hash shift.\n",
 146                shift);
 147
 148        if (populate_memnodemap(nodes, numnodes, shift, nodeids) != 1) {
 149                printk(KERN_INFO "Your memory is not aligned you need to "
 150                       "rebuild your kernel with a bigger NODEMAPSIZE "
 151                       "shift=%d\n", shift);
 152                return -1;
 153        }
 154        return shift;
 155}
 156
 157int __meminit  __early_pfn_to_nid(unsigned long pfn)
 158{
 159        return phys_to_nid(pfn << PAGE_SHIFT);
 160}
 161
 162static void * __init early_node_mem(int nodeid, unsigned long start,
 163                                    unsigned long end, unsigned long size,
 164                                    unsigned long align)
 165{
 166        unsigned long mem = find_e820_area(start, end, size, align);
 167        void *ptr;
 168
 169        if (mem != -1L)
 170                return __va(mem);
 171
 172        ptr = __alloc_bootmem_nopanic(size, align, __pa(MAX_DMA_ADDRESS));
 173        if (ptr == NULL) {
 174                printk(KERN_ERR "Cannot find %lu bytes in node %d\n",
 175                       size, nodeid);
 176                return NULL;
 177        }
 178        return ptr;
 179}
 180
 181/* Initialize bootmem allocator for a node */
 182void __init
 183setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
 184{
 185        unsigned long start_pfn, last_pfn, bootmap_pages, bootmap_size;
 186        const int pgdat_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
 187        unsigned long bootmap_start, nodedata_phys;
 188        void *bootmap;
 189        int nid;
 190
 191        if (!end)
 192                return;
 193
 194        /*
 195         * Don't confuse VM with a node that doesn't have the
 196         * minimum amount of memory:
 197         */
 198        if (end && (end - start) < NODE_MIN_SIZE)
 199                return;
 200
 201        start = roundup(start, ZONE_ALIGN);
 202
 203        printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid,
 204               start, end);
 205
 206        start_pfn = start >> PAGE_SHIFT;
 207        last_pfn = end >> PAGE_SHIFT;
 208
 209        node_data[nodeid] = early_node_mem(nodeid, start, end, pgdat_size,
 210                                           SMP_CACHE_BYTES);
 211        if (node_data[nodeid] == NULL)
 212                return;
 213        nodedata_phys = __pa(node_data[nodeid]);
 214        printk(KERN_INFO "  NODE_DATA [%016lx - %016lx]\n", nodedata_phys,
 215                nodedata_phys + pgdat_size - 1);
 216
 217        memset(NODE_DATA(nodeid), 0, sizeof(pg_data_t));
 218        NODE_DATA(nodeid)->bdata = &bootmem_node_data[nodeid];
 219        NODE_DATA(nodeid)->node_start_pfn = start_pfn;
 220        NODE_DATA(nodeid)->node_spanned_pages = last_pfn - start_pfn;
 221
 222        /*
 223         * Find a place for the bootmem map
 224         * nodedata_phys could be on other nodes by alloc_bootmem,
 225         * so need to sure bootmap_start not to be small, otherwise
 226         * early_node_mem will get that with find_e820_area instead
 227         * of alloc_bootmem, that could clash with reserved range
 228         */
 229        bootmap_pages = bootmem_bootmap_pages(last_pfn - start_pfn);
 230        nid = phys_to_nid(nodedata_phys);
 231        if (nid == nodeid)
 232                bootmap_start = roundup(nodedata_phys + pgdat_size, PAGE_SIZE);
 233        else
 234                bootmap_start = roundup(start, PAGE_SIZE);
 235        /*
 236         * SMP_CACHE_BYTES could be enough, but init_bootmem_node like
 237         * to use that to align to PAGE_SIZE
 238         */
 239        bootmap = early_node_mem(nodeid, bootmap_start, end,
 240                                 bootmap_pages<<PAGE_SHIFT, PAGE_SIZE);
 241        if (bootmap == NULL)  {
 242                if (nodedata_phys < start || nodedata_phys >= end)
 243                        free_bootmem(nodedata_phys, pgdat_size);
 244                node_data[nodeid] = NULL;
 245                return;
 246        }
 247        bootmap_start = __pa(bootmap);
 248
 249        bootmap_size = init_bootmem_node(NODE_DATA(nodeid),
 250                                         bootmap_start >> PAGE_SHIFT,
 251                                         start_pfn, last_pfn);
 252
 253        printk(KERN_INFO "  bootmap [%016lx -  %016lx] pages %lx\n",
 254                 bootmap_start, bootmap_start + bootmap_size - 1,
 255                 bootmap_pages);
 256
 257        free_bootmem_with_active_regions(nodeid, end);
 258
 259        /*
 260         * convert early reserve to bootmem reserve earlier
 261         * otherwise early_node_mem could use early reserved mem
 262         * on previous node
 263         */
 264        early_res_to_bootmem(start, end);
 265
 266        /*
 267         * in some case early_node_mem could use alloc_bootmem
 268         * to get range on other node, don't reserve that again
 269         */
 270        if (nid != nodeid)
 271                printk(KERN_INFO "    NODE_DATA(%d) on node %d\n", nodeid, nid);
 272        else
 273                reserve_bootmem_node(NODE_DATA(nodeid), nodedata_phys,
 274                                        pgdat_size, BOOTMEM_DEFAULT);
 275        nid = phys_to_nid(bootmap_start);
 276        if (nid != nodeid)
 277                printk(KERN_INFO "    bootmap(%d) on node %d\n", nodeid, nid);
 278        else
 279                reserve_bootmem_node(NODE_DATA(nodeid), bootmap_start,
 280                                 bootmap_pages<<PAGE_SHIFT, BOOTMEM_DEFAULT);
 281
 282        node_set_online(nodeid);
 283}
 284
 285/*
 286 * There are unfortunately some poorly designed mainboards around that
 287 * only connect memory to a single CPU. This breaks the 1:1 cpu->node
 288 * mapping. To avoid this fill in the mapping for all possible CPUs,
 289 * as the number of CPUs is not known yet. We round robin the existing
 290 * nodes.
 291 */
 292void __init numa_init_array(void)
 293{
 294        int rr, i;
 295
 296        rr = first_node(node_online_map);
 297        for (i = 0; i < nr_cpu_ids; i++) {
 298                if (early_cpu_to_node(i) != NUMA_NO_NODE)
 299                        continue;
 300                numa_set_node(i, rr);
 301                rr = next_node(rr, node_online_map);
 302                if (rr == MAX_NUMNODES)
 303                        rr = first_node(node_online_map);
 304        }
 305}
 306
 307#ifdef CONFIG_NUMA_EMU
 308/* Numa emulation */
 309static char *cmdline __initdata;
 310
 311/*
 312 * Setups up nid to range from addr to addr + size.  If the end
 313 * boundary is greater than max_addr, then max_addr is used instead.
 314 * The return value is 0 if there is additional memory left for
 315 * allocation past addr and -1 otherwise.  addr is adjusted to be at
 316 * the end of the node.
 317 */
 318static int __init setup_node_range(int nid, struct bootnode *nodes, u64 *addr,
 319                                   u64 size, u64 max_addr)
 320{
 321        int ret = 0;
 322
 323        nodes[nid].start = *addr;
 324        *addr += size;
 325        if (*addr >= max_addr) {
 326                *addr = max_addr;
 327                ret = -1;
 328        }
 329        nodes[nid].end = *addr;
 330        node_set(nid, node_possible_map);
 331        printk(KERN_INFO "Faking node %d at %016Lx-%016Lx (%LuMB)\n", nid,
 332               nodes[nid].start, nodes[nid].end,
 333               (nodes[nid].end - nodes[nid].start) >> 20);
 334        return ret;
 335}
 336
 337/*
 338 * Splits num_nodes nodes up equally starting at node_start.  The return value
 339 * is the number of nodes split up and addr is adjusted to be at the end of the
 340 * last node allocated.
 341 */
 342static int __init split_nodes_equally(struct bootnode *nodes, u64 *addr,
 343                                      u64 max_addr, int node_start,
 344                                      int num_nodes)
 345{
 346        unsigned int big;
 347        u64 size;
 348        int i;
 349
 350        if (num_nodes <= 0)
 351                return -1;
 352        if (num_nodes > MAX_NUMNODES)
 353                num_nodes = MAX_NUMNODES;
 354        size = (max_addr - *addr - e820_hole_size(*addr, max_addr)) /
 355               num_nodes;
 356        /*
 357         * Calculate the number of big nodes that can be allocated as a result
 358         * of consolidating the leftovers.
 359         */
 360        big = ((size & ~FAKE_NODE_MIN_HASH_MASK) * num_nodes) /
 361              FAKE_NODE_MIN_SIZE;
 362
 363        /* Round down to nearest FAKE_NODE_MIN_SIZE. */
 364        size &= FAKE_NODE_MIN_HASH_MASK;
 365        if (!size) {
 366                printk(KERN_ERR "Not enough memory for each node.  "
 367                       "NUMA emulation disabled.\n");
 368                return -1;
 369        }
 370
 371        for (i = node_start; i < num_nodes + node_start; i++) {
 372                u64 end = *addr + size;
 373
 374                if (i < big)
 375                        end += FAKE_NODE_MIN_SIZE;
 376                /*
 377                 * The final node can have the remaining system RAM.  Other
 378                 * nodes receive roughly the same amount of available pages.
 379                 */
 380                if (i == num_nodes + node_start - 1)
 381                        end = max_addr;
 382                else
 383                        while (end - *addr - e820_hole_size(*addr, end) <
 384                               size) {
 385                                end += FAKE_NODE_MIN_SIZE;
 386                                if (end > max_addr) {
 387                                        end = max_addr;
 388                                        break;
 389                                }
 390                        }
 391                if (setup_node_range(i, nodes, addr, end - *addr, max_addr) < 0)
 392                        break;
 393        }
 394        return i - node_start + 1;
 395}
 396
 397/*
 398 * Splits the remaining system RAM into chunks of size.  The remaining memory is
 399 * always assigned to a final node and can be asymmetric.  Returns the number of
 400 * nodes split.
 401 */
 402static int __init split_nodes_by_size(struct bootnode *nodes, u64 *addr,
 403                                      u64 max_addr, int node_start, u64 size)
 404{
 405        int i = node_start;
 406        size = (size << 20) & FAKE_NODE_MIN_HASH_MASK;
 407        while (!setup_node_range(i++, nodes, addr, size, max_addr))
 408                ;
 409        return i - node_start;
 410}
 411
 412/*
 413 * Sets up the system RAM area from start_pfn to last_pfn according to the
 414 * numa=fake command-line option.
 415 */
 416static struct bootnode nodes[MAX_NUMNODES] __initdata;
 417
 418static int __init numa_emulation(unsigned long start_pfn, unsigned long last_pfn)
 419{
 420        u64 size, addr = start_pfn << PAGE_SHIFT;
 421        u64 max_addr = last_pfn << PAGE_SHIFT;
 422        int num_nodes = 0, num = 0, coeff_flag, coeff = -1, i;
 423
 424        memset(&nodes, 0, sizeof(nodes));
 425        /*
 426         * If the numa=fake command-line is just a single number N, split the
 427         * system RAM into N fake nodes.
 428         */
 429        if (!strchr(cmdline, '*') && !strchr(cmdline, ',')) {
 430                long n = simple_strtol(cmdline, NULL, 0);
 431
 432                num_nodes = split_nodes_equally(nodes, &addr, max_addr, 0, n);
 433                if (num_nodes < 0)
 434                        return num_nodes;
 435                goto out;
 436        }
 437
 438        /* Parse the command line. */
 439        for (coeff_flag = 0; ; cmdline++) {
 440                if (*cmdline && isdigit(*cmdline)) {
 441                        num = num * 10 + *cmdline - '0';
 442                        continue;
 443                }
 444                if (*cmdline == '*') {
 445                        if (num > 0)
 446                                coeff = num;
 447                        coeff_flag = 1;
 448                }
 449                if (!*cmdline || *cmdline == ',') {
 450                        if (!coeff_flag)
 451                                coeff = 1;
 452                        /*
 453                         * Round down to the nearest FAKE_NODE_MIN_SIZE.
 454                         * Command-line coefficients are in megabytes.
 455                         */
 456                        size = ((u64)num << 20) & FAKE_NODE_MIN_HASH_MASK;
 457                        if (size)
 458                                for (i = 0; i < coeff; i++, num_nodes++)
 459                                        if (setup_node_range(num_nodes, nodes,
 460                                                &addr, size, max_addr) < 0)
 461                                                goto done;
 462                        if (!*cmdline)
 463                                break;
 464                        coeff_flag = 0;
 465                        coeff = -1;
 466                }
 467                num = 0;
 468        }
 469done:
 470        if (!num_nodes)
 471                return -1;
 472        /* Fill remainder of system RAM, if appropriate. */
 473        if (addr < max_addr) {
 474                if (coeff_flag && coeff < 0) {
 475                        /* Split remaining nodes into num-sized chunks */
 476                        num_nodes += split_nodes_by_size(nodes, &addr, max_addr,
 477                                                         num_nodes, num);
 478                        goto out;
 479                }
 480                switch (*(cmdline - 1)) {
 481                case '*':
 482                        /* Split remaining nodes into coeff chunks */
 483                        if (coeff <= 0)
 484                                break;
 485                        num_nodes += split_nodes_equally(nodes, &addr, max_addr,
 486                                                         num_nodes, coeff);
 487                        break;
 488                case ',':
 489                        /* Do not allocate remaining system RAM */
 490                        break;
 491                default:
 492                        /* Give one final node */
 493                        setup_node_range(num_nodes, nodes, &addr,
 494                                         max_addr - addr, max_addr);
 495                        num_nodes++;
 496                }
 497        }
 498out:
 499        memnode_shift = compute_hash_shift(nodes, num_nodes, NULL);
 500        if (memnode_shift < 0) {
 501                memnode_shift = 0;
 502                printk(KERN_ERR "No NUMA hash function found.  NUMA emulation "
 503                       "disabled.\n");
 504                return -1;
 505        }
 506
 507        /*
 508         * We need to vacate all active ranges that may have been registered by
 509         * SRAT and set acpi_numa to -1 so that srat_disabled() always returns
 510         * true.  NUMA emulation has succeeded so we will not scan ACPI nodes.
 511         */
 512        remove_all_active_ranges();
 513#ifdef CONFIG_ACPI_NUMA
 514        acpi_numa = -1;
 515#endif
 516        for_each_node_mask(i, node_possible_map) {
 517                e820_register_active_regions(i, nodes[i].start >> PAGE_SHIFT,
 518                                                nodes[i].end >> PAGE_SHIFT);
 519                setup_node_bootmem(i, nodes[i].start, nodes[i].end);
 520        }
 521        acpi_fake_nodes(nodes, num_nodes);
 522        numa_init_array();
 523        return 0;
 524}
 525#endif /* CONFIG_NUMA_EMU */
 526
 527void __init initmem_init(unsigned long start_pfn, unsigned long last_pfn)
 528{
 529        int i;
 530
 531        nodes_clear(node_possible_map);
 532        nodes_clear(node_online_map);
 533
 534#ifdef CONFIG_NUMA_EMU
 535        if (cmdline && !numa_emulation(start_pfn, last_pfn))
 536                return;
 537        nodes_clear(node_possible_map);
 538        nodes_clear(node_online_map);
 539#endif
 540
 541#ifdef CONFIG_ACPI_NUMA
 542        if (!numa_off && !acpi_scan_nodes(start_pfn << PAGE_SHIFT,
 543                                          last_pfn << PAGE_SHIFT))
 544                return;
 545        nodes_clear(node_possible_map);
 546        nodes_clear(node_online_map);
 547#endif
 548
 549#ifdef CONFIG_K8_NUMA
 550        if (!numa_off && !k8_scan_nodes(start_pfn<<PAGE_SHIFT,
 551                                        last_pfn<<PAGE_SHIFT))
 552                return;
 553        nodes_clear(node_possible_map);
 554        nodes_clear(node_online_map);
 555#endif
 556        printk(KERN_INFO "%s\n",
 557               numa_off ? "NUMA turned off" : "No NUMA configuration found");
 558
 559        printk(KERN_INFO "Faking a node at %016lx-%016lx\n",
 560               start_pfn << PAGE_SHIFT,
 561               last_pfn << PAGE_SHIFT);
 562        /* setup dummy node covering all memory */
 563        memnode_shift = 63;
 564        memnodemap = memnode.embedded_map;
 565        memnodemap[0] = 0;
 566        node_set_online(0);
 567        node_set(0, node_possible_map);
 568        for (i = 0; i < nr_cpu_ids; i++)
 569                numa_set_node(i, 0);
 570        e820_register_active_regions(0, start_pfn, last_pfn);
 571        setup_node_bootmem(0, start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT);
 572}
 573
 574unsigned long __init numa_free_all_bootmem(void)
 575{
 576        unsigned long pages = 0;
 577        int i;
 578
 579        for_each_online_node(i)
 580                pages += free_all_bootmem_node(NODE_DATA(i));
 581
 582        return pages;
 583}
 584
 585static __init int numa_setup(char *opt)
 586{
 587        if (!opt)
 588                return -EINVAL;
 589        if (!strncmp(opt, "off", 3))
 590                numa_off = 1;
 591#ifdef CONFIG_NUMA_EMU
 592        if (!strncmp(opt, "fake=", 5))
 593                cmdline = opt + 5;
 594#endif
 595#ifdef CONFIG_ACPI_NUMA
 596        if (!strncmp(opt, "noacpi", 6))
 597                acpi_numa = -1;
 598#endif
 599        return 0;
 600}
 601early_param("numa", numa_setup);
 602
 603#ifdef CONFIG_NUMA
 604/*
 605 * Setup early cpu_to_node.
 606 *
 607 * Populate cpu_to_node[] only if x86_cpu_to_apicid[],
 608 * and apicid_to_node[] tables have valid entries for a CPU.
 609 * This means we skip cpu_to_node[] initialisation for NUMA
 610 * emulation and faking node case (when running a kernel compiled
 611 * for NUMA on a non NUMA box), which is OK as cpu_to_node[]
 612 * is already initialized in a round robin manner at numa_init_array,
 613 * prior to this call, and this initialization is good enough
 614 * for the fake NUMA cases.
 615 *
 616 * Called before the per_cpu areas are setup.
 617 */
 618void __init init_cpu_to_node(void)
 619{
 620        int cpu;
 621        u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
 622
 623        BUG_ON(cpu_to_apicid == NULL);
 624
 625        for_each_possible_cpu(cpu) {
 626                int node;
 627                u16 apicid = cpu_to_apicid[cpu];
 628
 629                if (apicid == BAD_APICID)
 630                        continue;
 631                node = apicid_to_node[apicid];
 632                if (node == NUMA_NO_NODE)
 633                        continue;
 634                if (!node_online(node))
 635                        continue;
 636                numa_set_node(cpu, node);
 637        }
 638}
 639#endif
 640
 641
 642void __cpuinit numa_set_node(int cpu, int node)
 643{
 644        int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
 645
 646        /* early setting, no percpu area yet */
 647        if (cpu_to_node_map) {
 648                cpu_to_node_map[cpu] = node;
 649                return;
 650        }
 651
 652#ifdef CONFIG_DEBUG_PER_CPU_MAPS
 653        if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
 654                printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
 655                dump_stack();
 656                return;
 657        }
 658#endif
 659        per_cpu(x86_cpu_to_node_map, cpu) = node;
 660
 661        if (node != NUMA_NO_NODE)
 662                per_cpu(node_number, cpu) = node;
 663}
 664
 665void __cpuinit numa_clear_node(int cpu)
 666{
 667        numa_set_node(cpu, NUMA_NO_NODE);
 668}
 669
 670#ifndef CONFIG_DEBUG_PER_CPU_MAPS
 671
 672void __cpuinit numa_add_cpu(int cpu)
 673{
 674        cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
 675}
 676
 677void __cpuinit numa_remove_cpu(int cpu)
 678{
 679        cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
 680}
 681
 682#else /* CONFIG_DEBUG_PER_CPU_MAPS */
 683
 684/*
 685 * --------- debug versions of the numa functions ---------
 686 */
 687static void __cpuinit numa_set_cpumask(int cpu, int enable)
 688{
 689        int node = early_cpu_to_node(cpu);
 690        struct cpumask *mask;
 691        char buf[64];
 692
 693        mask = node_to_cpumask_map[node];
 694        if (mask == NULL) {
 695                printk(KERN_ERR "node_to_cpumask_map[%i] NULL\n", node);
 696                dump_stack();
 697                return;
 698        }
 699
 700        if (enable)
 701                cpumask_set_cpu(cpu, mask);
 702        else
 703                cpumask_clear_cpu(cpu, mask);
 704
 705        cpulist_scnprintf(buf, sizeof(buf), mask);
 706        printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
 707                enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, buf);
 708}
 709
 710void __cpuinit numa_add_cpu(int cpu)
 711{
 712        numa_set_cpumask(cpu, 1);
 713}
 714
 715void __cpuinit numa_remove_cpu(int cpu)
 716{
 717        numa_set_cpumask(cpu, 0);
 718}
 719
 720int cpu_to_node(int cpu)
 721{
 722        if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
 723                printk(KERN_WARNING
 724                        "cpu_to_node(%d): usage too early!\n", cpu);
 725                dump_stack();
 726                return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
 727        }
 728        return per_cpu(x86_cpu_to_node_map, cpu);
 729}
 730EXPORT_SYMBOL(cpu_to_node);
 731
 732/*
 733 * Same function as cpu_to_node() but used if called before the
 734 * per_cpu areas are setup.
 735 */
 736int early_cpu_to_node(int cpu)
 737{
 738        if (early_per_cpu_ptr(x86_cpu_to_node_map))
 739                return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
 740
 741        if (!cpu_possible(cpu)) {
 742                printk(KERN_WARNING
 743                        "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
 744                dump_stack();
 745                return NUMA_NO_NODE;
 746        }
 747        return per_cpu(x86_cpu_to_node_map, cpu);
 748}
 749
 750/*
 751 * --------- end of debug versions of the numa functions ---------
 752 */
 753
 754#endif /* CONFIG_DEBUG_PER_CPU_MAPS */
 755