linux/arch/x86/mm/numa_64.c
<<
>>
Prefs
   1/*
   2 * Generic VM initialization for x86-64 NUMA setups.
   3 * Copyright 2002,2003 Andi Kleen, SuSE Labs.
   4 */
   5#include <linux/kernel.h>
   6#include <linux/mm.h>
   7#include <linux/string.h>
   8#include <linux/init.h>
   9#include <linux/bootmem.h>
  10#include <linux/memblock.h>
  11#include <linux/mmzone.h>
  12#include <linux/ctype.h>
  13#include <linux/module.h>
  14#include <linux/nodemask.h>
  15#include <linux/sched.h>
  16
  17#include <asm/e820.h>
  18#include <asm/proto.h>
  19#include <asm/dma.h>
  20#include <asm/numa.h>
  21#include <asm/acpi.h>
  22#include <asm/amd_nb.h>
  23
  24struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
  25EXPORT_SYMBOL(node_data);
  26
  27struct memnode memnode;
  28
  29s16 apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = {
  30        [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
  31};
  32
  33static unsigned long __initdata nodemap_addr;
  34static unsigned long __initdata nodemap_size;
  35
  36/*
  37 * Map cpu index to node index
  38 */
  39DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
  40EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
  41
  42/*
  43 * Given a shift value, try to populate memnodemap[]
  44 * Returns :
  45 * 1 if OK
  46 * 0 if memnodmap[] too small (of shift too small)
  47 * -1 if node overlap or lost ram (shift too big)
  48 */
  49static int __init populate_memnodemap(const struct bootnode *nodes,
  50                                      int numnodes, int shift, int *nodeids)
  51{
  52        unsigned long addr, end;
  53        int i, res = -1;
  54
  55        memset(memnodemap, 0xff, sizeof(s16)*memnodemapsize);
  56        for (i = 0; i < numnodes; i++) {
  57                addr = nodes[i].start;
  58                end = nodes[i].end;
  59                if (addr >= end)
  60                        continue;
  61                if ((end >> shift) >= memnodemapsize)
  62                        return 0;
  63                do {
  64                        if (memnodemap[addr >> shift] != NUMA_NO_NODE)
  65                                return -1;
  66
  67                        if (!nodeids)
  68                                memnodemap[addr >> shift] = i;
  69                        else
  70                                memnodemap[addr >> shift] = nodeids[i];
  71
  72                        addr += (1UL << shift);
  73                } while (addr < end);
  74                res = 1;
  75        }
  76        return res;
  77}
  78
  79static int __init allocate_cachealigned_memnodemap(void)
  80{
  81        unsigned long addr;
  82
  83        memnodemap = memnode.embedded_map;
  84        if (memnodemapsize <= ARRAY_SIZE(memnode.embedded_map))
  85                return 0;
  86
  87        addr = 0x8000;
  88        nodemap_size = roundup(sizeof(s16) * memnodemapsize, L1_CACHE_BYTES);
  89        nodemap_addr = memblock_find_in_range(addr, max_pfn<<PAGE_SHIFT,
  90                                      nodemap_size, L1_CACHE_BYTES);
  91        if (nodemap_addr == MEMBLOCK_ERROR) {
  92                printk(KERN_ERR
  93                       "NUMA: Unable to allocate Memory to Node hash map\n");
  94                nodemap_addr = nodemap_size = 0;
  95                return -1;
  96        }
  97        memnodemap = phys_to_virt(nodemap_addr);
  98        memblock_x86_reserve_range(nodemap_addr, nodemap_addr + nodemap_size, "MEMNODEMAP");
  99
 100        printk(KERN_DEBUG "NUMA: Allocated memnodemap from %lx - %lx\n",
 101               nodemap_addr, nodemap_addr + nodemap_size);
 102        return 0;
 103}
 104
 105/*
 106 * The LSB of all start and end addresses in the node map is the value of the
 107 * maximum possible shift.
 108 */
 109static int __init extract_lsb_from_nodes(const struct bootnode *nodes,
 110                                         int numnodes)
 111{
 112        int i, nodes_used = 0;
 113        unsigned long start, end;
 114        unsigned long bitfield = 0, memtop = 0;
 115
 116        for (i = 0; i < numnodes; i++) {
 117                start = nodes[i].start;
 118                end = nodes[i].end;
 119                if (start >= end)
 120                        continue;
 121                bitfield |= start;
 122                nodes_used++;
 123                if (end > memtop)
 124                        memtop = end;
 125        }
 126        if (nodes_used <= 1)
 127                i = 63;
 128        else
 129                i = find_first_bit(&bitfield, sizeof(unsigned long)*8);
 130        memnodemapsize = (memtop >> i)+1;
 131        return i;
 132}
 133
 134int __init compute_hash_shift(struct bootnode *nodes, int numnodes,
 135                              int *nodeids)
 136{
 137        int shift;
 138
 139        shift = extract_lsb_from_nodes(nodes, numnodes);
 140        if (allocate_cachealigned_memnodemap())
 141                return -1;
 142        printk(KERN_DEBUG "NUMA: Using %d for the hash shift.\n",
 143                shift);
 144
 145        if (populate_memnodemap(nodes, numnodes, shift, nodeids) != 1) {
 146                printk(KERN_INFO "Your memory is not aligned you need to "
 147                       "rebuild your kernel with a bigger NODEMAPSIZE "
 148                       "shift=%d\n", shift);
 149                return -1;
 150        }
 151        return shift;
 152}
 153
 154int __meminit  __early_pfn_to_nid(unsigned long pfn)
 155{
 156        return phys_to_nid(pfn << PAGE_SHIFT);
 157}
 158
 159static void * __init early_node_mem(int nodeid, unsigned long start,
 160                                    unsigned long end, unsigned long size,
 161                                    unsigned long align)
 162{
 163        unsigned long mem;
 164
 165        /*
 166         * put it on high as possible
 167         * something will go with NODE_DATA
 168         */
 169        if (start < (MAX_DMA_PFN<<PAGE_SHIFT))
 170                start = MAX_DMA_PFN<<PAGE_SHIFT;
 171        if (start < (MAX_DMA32_PFN<<PAGE_SHIFT) &&
 172            end > (MAX_DMA32_PFN<<PAGE_SHIFT))
 173                start = MAX_DMA32_PFN<<PAGE_SHIFT;
 174        mem = memblock_x86_find_in_range_node(nodeid, start, end, size, align);
 175        if (mem != MEMBLOCK_ERROR)
 176                return __va(mem);
 177
 178        /* extend the search scope */
 179        end = max_pfn_mapped << PAGE_SHIFT;
 180        start = MAX_DMA_PFN << PAGE_SHIFT;
 181        mem = memblock_find_in_range(start, end, size, align);
 182        if (mem != MEMBLOCK_ERROR)
 183                return __va(mem);
 184
 185        printk(KERN_ERR "Cannot find %lu bytes in node %d\n",
 186                       size, nodeid);
 187
 188        return NULL;
 189}
 190
 191/* Initialize bootmem allocator for a node */
 192void __init
 193setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
 194{
 195        unsigned long start_pfn, last_pfn, nodedata_phys;
 196        const int pgdat_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
 197        int nid;
 198
 199        if (!end)
 200                return;
 201
 202        /*
 203         * Don't confuse VM with a node that doesn't have the
 204         * minimum amount of memory:
 205         */
 206        if (end && (end - start) < NODE_MIN_SIZE)
 207                return;
 208
 209        start = roundup(start, ZONE_ALIGN);
 210
 211        printk(KERN_INFO "Initmem setup node %d %016lx-%016lx\n", nodeid,
 212               start, end);
 213
 214        start_pfn = start >> PAGE_SHIFT;
 215        last_pfn = end >> PAGE_SHIFT;
 216
 217        node_data[nodeid] = early_node_mem(nodeid, start, end, pgdat_size,
 218                                           SMP_CACHE_BYTES);
 219        if (node_data[nodeid] == NULL)
 220                return;
 221        nodedata_phys = __pa(node_data[nodeid]);
 222        memblock_x86_reserve_range(nodedata_phys, nodedata_phys + pgdat_size, "NODE_DATA");
 223        printk(KERN_INFO "  NODE_DATA [%016lx - %016lx]\n", nodedata_phys,
 224                nodedata_phys + pgdat_size - 1);
 225        nid = phys_to_nid(nodedata_phys);
 226        if (nid != nodeid)
 227                printk(KERN_INFO "    NODE_DATA(%d) on node %d\n", nodeid, nid);
 228
 229        memset(NODE_DATA(nodeid), 0, sizeof(pg_data_t));
 230        NODE_DATA(nodeid)->node_id = nodeid;
 231        NODE_DATA(nodeid)->node_start_pfn = start_pfn;
 232        NODE_DATA(nodeid)->node_spanned_pages = last_pfn - start_pfn;
 233
 234        node_set_online(nodeid);
 235}
 236
 237/*
 238 * There are unfortunately some poorly designed mainboards around that
 239 * only connect memory to a single CPU. This breaks the 1:1 cpu->node
 240 * mapping. To avoid this fill in the mapping for all possible CPUs,
 241 * as the number of CPUs is not known yet. We round robin the existing
 242 * nodes.
 243 */
 244void __init numa_init_array(void)
 245{
 246        int rr, i;
 247
 248        rr = first_node(node_online_map);
 249        for (i = 0; i < nr_cpu_ids; i++) {
 250                if (early_cpu_to_node(i) != NUMA_NO_NODE)
 251                        continue;
 252                numa_set_node(i, rr);
 253                rr = next_node(rr, node_online_map);
 254                if (rr == MAX_NUMNODES)
 255                        rr = first_node(node_online_map);
 256        }
 257}
 258
 259#ifdef CONFIG_NUMA_EMU
 260/* Numa emulation */
 261static struct bootnode nodes[MAX_NUMNODES] __initdata;
 262static struct bootnode physnodes[MAX_NUMNODES] __cpuinitdata;
 263static char *cmdline __initdata;
 264
 265void __init numa_emu_cmdline(char *str)
 266{
 267        cmdline = str;
 268}
 269
 270static int __init setup_physnodes(unsigned long start, unsigned long end,
 271                                        int acpi, int amd)
 272{
 273        int ret = 0;
 274        int i;
 275
 276        memset(physnodes, 0, sizeof(physnodes));
 277#ifdef CONFIG_ACPI_NUMA
 278        if (acpi)
 279                acpi_get_nodes(physnodes, start, end);
 280#endif
 281#ifdef CONFIG_AMD_NUMA
 282        if (amd)
 283                amd_get_nodes(physnodes);
 284#endif
 285        /*
 286         * Basic sanity checking on the physical node map: there may be errors
 287         * if the SRAT or AMD code incorrectly reported the topology or the mem=
 288         * kernel parameter is used.
 289         */
 290        for (i = 0; i < MAX_NUMNODES; i++) {
 291                if (physnodes[i].start == physnodes[i].end)
 292                        continue;
 293                if (physnodes[i].start > end) {
 294                        physnodes[i].end = physnodes[i].start;
 295                        continue;
 296                }
 297                if (physnodes[i].end < start) {
 298                        physnodes[i].start = physnodes[i].end;
 299                        continue;
 300                }
 301                if (physnodes[i].start < start)
 302                        physnodes[i].start = start;
 303                if (physnodes[i].end > end)
 304                        physnodes[i].end = end;
 305                ret++;
 306        }
 307
 308        /*
 309         * If no physical topology was detected, a single node is faked to cover
 310         * the entire address space.
 311         */
 312        if (!ret) {
 313                physnodes[ret].start = start;
 314                physnodes[ret].end = end;
 315                ret = 1;
 316        }
 317        return ret;
 318}
 319
 320static void __init fake_physnodes(int acpi, int amd, int nr_nodes)
 321{
 322        int i;
 323
 324        BUG_ON(acpi && amd);
 325#ifdef CONFIG_ACPI_NUMA
 326        if (acpi)
 327                acpi_fake_nodes(nodes, nr_nodes);
 328#endif
 329#ifdef CONFIG_AMD_NUMA
 330        if (amd)
 331                amd_fake_nodes(nodes, nr_nodes);
 332#endif
 333        if (!acpi && !amd)
 334                for (i = 0; i < nr_cpu_ids; i++)
 335                        numa_set_node(i, 0);
 336}
 337
 338/*
 339 * Setups up nid to range from addr to addr + size.  If the end
 340 * boundary is greater than max_addr, then max_addr is used instead.
 341 * The return value is 0 if there is additional memory left for
 342 * allocation past addr and -1 otherwise.  addr is adjusted to be at
 343 * the end of the node.
 344 */
 345static int __init setup_node_range(int nid, u64 *addr, u64 size, u64 max_addr)
 346{
 347        int ret = 0;
 348        nodes[nid].start = *addr;
 349        *addr += size;
 350        if (*addr >= max_addr) {
 351                *addr = max_addr;
 352                ret = -1;
 353        }
 354        nodes[nid].end = *addr;
 355        node_set(nid, node_possible_map);
 356        printk(KERN_INFO "Faking node %d at %016Lx-%016Lx (%LuMB)\n", nid,
 357               nodes[nid].start, nodes[nid].end,
 358               (nodes[nid].end - nodes[nid].start) >> 20);
 359        return ret;
 360}
 361
 362/*
 363 * Sets up nr_nodes fake nodes interleaved over physical nodes ranging from addr
 364 * to max_addr.  The return value is the number of nodes allocated.
 365 */
 366static int __init split_nodes_interleave(u64 addr, u64 max_addr, int nr_nodes)
 367{
 368        nodemask_t physnode_mask = NODE_MASK_NONE;
 369        u64 size;
 370        int big;
 371        int ret = 0;
 372        int i;
 373
 374        if (nr_nodes <= 0)
 375                return -1;
 376        if (nr_nodes > MAX_NUMNODES) {
 377                pr_info("numa=fake=%d too large, reducing to %d\n",
 378                        nr_nodes, MAX_NUMNODES);
 379                nr_nodes = MAX_NUMNODES;
 380        }
 381
 382        size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) / nr_nodes;
 383        /*
 384         * Calculate the number of big nodes that can be allocated as a result
 385         * of consolidating the remainder.
 386         */
 387        big = ((size & ~FAKE_NODE_MIN_HASH_MASK) * nr_nodes) /
 388                FAKE_NODE_MIN_SIZE;
 389
 390        size &= FAKE_NODE_MIN_HASH_MASK;
 391        if (!size) {
 392                pr_err("Not enough memory for each node.  "
 393                        "NUMA emulation disabled.\n");
 394                return -1;
 395        }
 396
 397        for (i = 0; i < MAX_NUMNODES; i++)
 398                if (physnodes[i].start != physnodes[i].end)
 399                        node_set(i, physnode_mask);
 400
 401        /*
 402         * Continue to fill physical nodes with fake nodes until there is no
 403         * memory left on any of them.
 404         */
 405        while (nodes_weight(physnode_mask)) {
 406                for_each_node_mask(i, physnode_mask) {
 407                        u64 end = physnodes[i].start + size;
 408                        u64 dma32_end = PFN_PHYS(MAX_DMA32_PFN);
 409
 410                        if (ret < big)
 411                                end += FAKE_NODE_MIN_SIZE;
 412
 413                        /*
 414                         * Continue to add memory to this fake node if its
 415                         * non-reserved memory is less than the per-node size.
 416                         */
 417                        while (end - physnodes[i].start -
 418                                memblock_x86_hole_size(physnodes[i].start, end) < size) {
 419                                end += FAKE_NODE_MIN_SIZE;
 420                                if (end > physnodes[i].end) {
 421                                        end = physnodes[i].end;
 422                                        break;
 423                                }
 424                        }
 425
 426                        /*
 427                         * If there won't be at least FAKE_NODE_MIN_SIZE of
 428                         * non-reserved memory in ZONE_DMA32 for the next node,
 429                         * this one must extend to the boundary.
 430                         */
 431                        if (end < dma32_end && dma32_end - end -
 432                            memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
 433                                end = dma32_end;
 434
 435                        /*
 436                         * If there won't be enough non-reserved memory for the
 437                         * next node, this one must extend to the end of the
 438                         * physical node.
 439                         */
 440                        if (physnodes[i].end - end -
 441                            memblock_x86_hole_size(end, physnodes[i].end) < size)
 442                                end = physnodes[i].end;
 443
 444                        /*
 445                         * Avoid allocating more nodes than requested, which can
 446                         * happen as a result of rounding down each node's size
 447                         * to FAKE_NODE_MIN_SIZE.
 448                         */
 449                        if (nodes_weight(physnode_mask) + ret >= nr_nodes)
 450                                end = physnodes[i].end;
 451
 452                        if (setup_node_range(ret++, &physnodes[i].start,
 453                                                end - physnodes[i].start,
 454                                                physnodes[i].end) < 0)
 455                                node_clear(i, physnode_mask);
 456                }
 457        }
 458        return ret;
 459}
 460
 461/*
 462 * Returns the end address of a node so that there is at least `size' amount of
 463 * non-reserved memory or `max_addr' is reached.
 464 */
 465static u64 __init find_end_of_node(u64 start, u64 max_addr, u64 size)
 466{
 467        u64 end = start + size;
 468
 469        while (end - start - memblock_x86_hole_size(start, end) < size) {
 470                end += FAKE_NODE_MIN_SIZE;
 471                if (end > max_addr) {
 472                        end = max_addr;
 473                        break;
 474                }
 475        }
 476        return end;
 477}
 478
 479/*
 480 * Sets up fake nodes of `size' interleaved over physical nodes ranging from
 481 * `addr' to `max_addr'.  The return value is the number of nodes allocated.
 482 */
 483static int __init split_nodes_size_interleave(u64 addr, u64 max_addr, u64 size)
 484{
 485        nodemask_t physnode_mask = NODE_MASK_NONE;
 486        u64 min_size;
 487        int ret = 0;
 488        int i;
 489
 490        if (!size)
 491                return -1;
 492        /*
 493         * The limit on emulated nodes is MAX_NUMNODES, so the size per node is
 494         * increased accordingly if the requested size is too small.  This
 495         * creates a uniform distribution of node sizes across the entire
 496         * machine (but not necessarily over physical nodes).
 497         */
 498        min_size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) /
 499                                                MAX_NUMNODES;
 500        min_size = max(min_size, FAKE_NODE_MIN_SIZE);
 501        if ((min_size & FAKE_NODE_MIN_HASH_MASK) < min_size)
 502                min_size = (min_size + FAKE_NODE_MIN_SIZE) &
 503                                                FAKE_NODE_MIN_HASH_MASK;
 504        if (size < min_size) {
 505                pr_err("Fake node size %LuMB too small, increasing to %LuMB\n",
 506                        size >> 20, min_size >> 20);
 507                size = min_size;
 508        }
 509        size &= FAKE_NODE_MIN_HASH_MASK;
 510
 511        for (i = 0; i < MAX_NUMNODES; i++)
 512                if (physnodes[i].start != physnodes[i].end)
 513                        node_set(i, physnode_mask);
 514        /*
 515         * Fill physical nodes with fake nodes of size until there is no memory
 516         * left on any of them.
 517         */
 518        while (nodes_weight(physnode_mask)) {
 519                for_each_node_mask(i, physnode_mask) {
 520                        u64 dma32_end = MAX_DMA32_PFN << PAGE_SHIFT;
 521                        u64 end;
 522
 523                        end = find_end_of_node(physnodes[i].start,
 524                                                physnodes[i].end, size);
 525                        /*
 526                         * If there won't be at least FAKE_NODE_MIN_SIZE of
 527                         * non-reserved memory in ZONE_DMA32 for the next node,
 528                         * this one must extend to the boundary.
 529                         */
 530                        if (end < dma32_end && dma32_end - end -
 531                            memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
 532                                end = dma32_end;
 533
 534                        /*
 535                         * If there won't be enough non-reserved memory for the
 536                         * next node, this one must extend to the end of the
 537                         * physical node.
 538                         */
 539                        if (physnodes[i].end - end -
 540                            memblock_x86_hole_size(end, physnodes[i].end) < size)
 541                                end = physnodes[i].end;
 542
 543                        /*
 544                         * Setup the fake node that will be allocated as bootmem
 545                         * later.  If setup_node_range() returns non-zero, there
 546                         * is no more memory available on this physical node.
 547                         */
 548                        if (setup_node_range(ret++, &physnodes[i].start,
 549                                                end - physnodes[i].start,
 550                                                physnodes[i].end) < 0)
 551                                node_clear(i, physnode_mask);
 552                }
 553        }
 554        return ret;
 555}
 556
 557/*
 558 * Sets up the system RAM area from start_pfn to last_pfn according to the
 559 * numa=fake command-line option.
 560 */
 561static int __init numa_emulation(unsigned long start_pfn,
 562                        unsigned long last_pfn, int acpi, int amd)
 563{
 564        u64 addr = start_pfn << PAGE_SHIFT;
 565        u64 max_addr = last_pfn << PAGE_SHIFT;
 566        int num_nodes;
 567        int i;
 568
 569        /*
 570         * If the numa=fake command-line contains a 'M' or 'G', it represents
 571         * the fixed node size.  Otherwise, if it is just a single number N,
 572         * split the system RAM into N fake nodes.
 573         */
 574        if (strchr(cmdline, 'M') || strchr(cmdline, 'G')) {
 575                u64 size;
 576
 577                size = memparse(cmdline, &cmdline);
 578                num_nodes = split_nodes_size_interleave(addr, max_addr, size);
 579        } else {
 580                unsigned long n;
 581
 582                n = simple_strtoul(cmdline, NULL, 0);
 583                num_nodes = split_nodes_interleave(addr, max_addr, n);
 584        }
 585
 586        if (num_nodes < 0)
 587                return num_nodes;
 588        memnode_shift = compute_hash_shift(nodes, num_nodes, NULL);
 589        if (memnode_shift < 0) {
 590                memnode_shift = 0;
 591                printk(KERN_ERR "No NUMA hash function found.  NUMA emulation "
 592                       "disabled.\n");
 593                return -1;
 594        }
 595
 596        /*
 597         * We need to vacate all active ranges that may have been registered for
 598         * the e820 memory map.
 599         */
 600        remove_all_active_ranges();
 601        for_each_node_mask(i, node_possible_map) {
 602                memblock_x86_register_active_regions(i, nodes[i].start >> PAGE_SHIFT,
 603                                                nodes[i].end >> PAGE_SHIFT);
 604                setup_node_bootmem(i, nodes[i].start, nodes[i].end);
 605        }
 606        setup_physnodes(addr, max_addr, acpi, amd);
 607        fake_physnodes(acpi, amd, num_nodes);
 608        numa_init_array();
 609        return 0;
 610}
 611#endif /* CONFIG_NUMA_EMU */
 612
 613void __init initmem_init(unsigned long start_pfn, unsigned long last_pfn,
 614                                int acpi, int amd)
 615{
 616        int i;
 617
 618        nodes_clear(node_possible_map);
 619        nodes_clear(node_online_map);
 620
 621#ifdef CONFIG_NUMA_EMU
 622        setup_physnodes(start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT,
 623                        acpi, amd);
 624        if (cmdline && !numa_emulation(start_pfn, last_pfn, acpi, amd))
 625                return;
 626        setup_physnodes(start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT,
 627                        acpi, amd);
 628        nodes_clear(node_possible_map);
 629        nodes_clear(node_online_map);
 630#endif
 631
 632#ifdef CONFIG_ACPI_NUMA
 633        if (!numa_off && acpi && !acpi_scan_nodes(start_pfn << PAGE_SHIFT,
 634                                                  last_pfn << PAGE_SHIFT))
 635                return;
 636        nodes_clear(node_possible_map);
 637        nodes_clear(node_online_map);
 638#endif
 639
 640#ifdef CONFIG_AMD_NUMA
 641        if (!numa_off && amd && !amd_scan_nodes())
 642                return;
 643        nodes_clear(node_possible_map);
 644        nodes_clear(node_online_map);
 645#endif
 646        printk(KERN_INFO "%s\n",
 647               numa_off ? "NUMA turned off" : "No NUMA configuration found");
 648
 649        printk(KERN_INFO "Faking a node at %016lx-%016lx\n",
 650               start_pfn << PAGE_SHIFT,
 651               last_pfn << PAGE_SHIFT);
 652        /* setup dummy node covering all memory */
 653        memnode_shift = 63;
 654        memnodemap = memnode.embedded_map;
 655        memnodemap[0] = 0;
 656        node_set_online(0);
 657        node_set(0, node_possible_map);
 658        for (i = 0; i < nr_cpu_ids; i++)
 659                numa_set_node(i, 0);
 660        memblock_x86_register_active_regions(0, start_pfn, last_pfn);
 661        setup_node_bootmem(0, start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT);
 662}
 663
 664unsigned long __init numa_free_all_bootmem(void)
 665{
 666        unsigned long pages = 0;
 667        int i;
 668
 669        for_each_online_node(i)
 670                pages += free_all_bootmem_node(NODE_DATA(i));
 671
 672        pages += free_all_memory_core_early(MAX_NUMNODES);
 673
 674        return pages;
 675}
 676
 677#ifdef CONFIG_NUMA
 678
 679static __init int find_near_online_node(int node)
 680{
 681        int n, val;
 682        int min_val = INT_MAX;
 683        int best_node = -1;
 684
 685        for_each_online_node(n) {
 686                val = node_distance(node, n);
 687
 688                if (val < min_val) {
 689                        min_val = val;
 690                        best_node = n;
 691                }
 692        }
 693
 694        return best_node;
 695}
 696
 697/*
 698 * Setup early cpu_to_node.
 699 *
 700 * Populate cpu_to_node[] only if x86_cpu_to_apicid[],
 701 * and apicid_to_node[] tables have valid entries for a CPU.
 702 * This means we skip cpu_to_node[] initialisation for NUMA
 703 * emulation and faking node case (when running a kernel compiled
 704 * for NUMA on a non NUMA box), which is OK as cpu_to_node[]
 705 * is already initialized in a round robin manner at numa_init_array,
 706 * prior to this call, and this initialization is good enough
 707 * for the fake NUMA cases.
 708 *
 709 * Called before the per_cpu areas are setup.
 710 */
 711void __init init_cpu_to_node(void)
 712{
 713        int cpu;
 714        u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
 715
 716        BUG_ON(cpu_to_apicid == NULL);
 717
 718        for_each_possible_cpu(cpu) {
 719                int node;
 720                u16 apicid = cpu_to_apicid[cpu];
 721
 722                if (apicid == BAD_APICID)
 723                        continue;
 724                node = apicid_to_node[apicid];
 725                if (node == NUMA_NO_NODE)
 726                        continue;
 727                if (!node_online(node))
 728                        node = find_near_online_node(node);
 729                numa_set_node(cpu, node);
 730        }
 731}
 732#endif
 733
 734
 735void __cpuinit numa_set_node(int cpu, int node)
 736{
 737        int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
 738
 739        /* early setting, no percpu area yet */
 740        if (cpu_to_node_map) {
 741                cpu_to_node_map[cpu] = node;
 742                return;
 743        }
 744
 745#ifdef CONFIG_DEBUG_PER_CPU_MAPS
 746        if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
 747                printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
 748                dump_stack();
 749                return;
 750        }
 751#endif
 752        per_cpu(x86_cpu_to_node_map, cpu) = node;
 753
 754        if (node != NUMA_NO_NODE)
 755                set_cpu_numa_node(cpu, node);
 756}
 757
 758void __cpuinit numa_clear_node(int cpu)
 759{
 760        numa_set_node(cpu, NUMA_NO_NODE);
 761}
 762
 763#ifndef CONFIG_DEBUG_PER_CPU_MAPS
 764
 765#ifndef CONFIG_NUMA_EMU
 766void __cpuinit numa_add_cpu(int cpu)
 767{
 768        cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
 769}
 770
 771void __cpuinit numa_remove_cpu(int cpu)
 772{
 773        cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
 774}
 775#else
 776void __cpuinit numa_add_cpu(int cpu)
 777{
 778        unsigned long addr;
 779        u16 apicid;
 780        int physnid;
 781        int nid = NUMA_NO_NODE;
 782
 783        nid = early_cpu_to_node(cpu);
 784        BUG_ON(nid == NUMA_NO_NODE || !node_online(nid));
 785
 786        /*
 787         * Use the starting address of the emulated node to find which physical
 788         * node it is allocated on.
 789         */
 790        addr = node_start_pfn(nid) << PAGE_SHIFT;
 791        for (physnid = 0; physnid < MAX_NUMNODES; physnid++)
 792                if (addr >= physnodes[physnid].start &&
 793                    addr < physnodes[physnid].end)
 794                        break;
 795
 796        /*
 797         * Map the cpu to each emulated node that is allocated on the physical
 798         * node of the cpu's apic id.
 799         */
 800        for_each_online_node(nid) {
 801                addr = node_start_pfn(nid) << PAGE_SHIFT;
 802                if (addr >= physnodes[physnid].start &&
 803                    addr < physnodes[physnid].end)
 804                        cpumask_set_cpu(cpu, node_to_cpumask_map[nid]);
 805        }
 806}
 807
 808void __cpuinit numa_remove_cpu(int cpu)
 809{
 810        int i;
 811
 812        for_each_online_node(i)
 813                cpumask_clear_cpu(cpu, node_to_cpumask_map[i]);
 814}
 815#endif /* !CONFIG_NUMA_EMU */
 816
 817#else /* CONFIG_DEBUG_PER_CPU_MAPS */
 818static struct cpumask __cpuinit *debug_cpumask_set_cpu(int cpu, int enable)
 819{
 820        int node = early_cpu_to_node(cpu);
 821        struct cpumask *mask;
 822        char buf[64];
 823
 824        mask = node_to_cpumask_map[node];
 825        if (!mask) {
 826                pr_err("node_to_cpumask_map[%i] NULL\n", node);
 827                dump_stack();
 828                return NULL;
 829        }
 830
 831        cpulist_scnprintf(buf, sizeof(buf), mask);
 832        printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
 833                enable ? "numa_add_cpu" : "numa_remove_cpu",
 834                cpu, node, buf);
 835        return mask;
 836}
 837
 838/*
 839 * --------- debug versions of the numa functions ---------
 840 */
 841#ifndef CONFIG_NUMA_EMU
 842static void __cpuinit numa_set_cpumask(int cpu, int enable)
 843{
 844        struct cpumask *mask;
 845
 846        mask = debug_cpumask_set_cpu(cpu, enable);
 847        if (!mask)
 848                return;
 849
 850        if (enable)
 851                cpumask_set_cpu(cpu, mask);
 852        else
 853                cpumask_clear_cpu(cpu, mask);
 854}
 855#else
 856static void __cpuinit numa_set_cpumask(int cpu, int enable)
 857{
 858        int node = early_cpu_to_node(cpu);
 859        struct cpumask *mask;
 860        int i;
 861
 862        for_each_online_node(i) {
 863                unsigned long addr;
 864
 865                addr = node_start_pfn(i) << PAGE_SHIFT;
 866                if (addr < physnodes[node].start ||
 867                                        addr >= physnodes[node].end)
 868                        continue;
 869                mask = debug_cpumask_set_cpu(cpu, enable);
 870                if (!mask)
 871                        return;
 872
 873                if (enable)
 874                        cpumask_set_cpu(cpu, mask);
 875                else
 876                        cpumask_clear_cpu(cpu, mask);
 877        }
 878}
 879#endif /* CONFIG_NUMA_EMU */
 880
 881void __cpuinit numa_add_cpu(int cpu)
 882{
 883        numa_set_cpumask(cpu, 1);
 884}
 885
 886void __cpuinit numa_remove_cpu(int cpu)
 887{
 888        numa_set_cpumask(cpu, 0);
 889}
 890
 891int __cpu_to_node(int cpu)
 892{
 893        if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
 894                printk(KERN_WARNING
 895                        "cpu_to_node(%d): usage too early!\n", cpu);
 896                dump_stack();
 897                return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
 898        }
 899        return per_cpu(x86_cpu_to_node_map, cpu);
 900}
 901EXPORT_SYMBOL(__cpu_to_node);
 902
 903/*
 904 * Same function as cpu_to_node() but used if called before the
 905 * per_cpu areas are setup.
 906 */
 907int early_cpu_to_node(int cpu)
 908{
 909        if (early_per_cpu_ptr(x86_cpu_to_node_map))
 910                return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
 911
 912        if (!cpu_possible(cpu)) {
 913                printk(KERN_WARNING
 914                        "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
 915                dump_stack();
 916                return NUMA_NO_NODE;
 917        }
 918        return per_cpu(x86_cpu_to_node_map, cpu);
 919}
 920
 921/*
 922 * --------- end of debug versions of the numa functions ---------
 923 */
 924
 925#endif /* CONFIG_DEBUG_PER_CPU_MAPS */
 926