linux/arch/powerpc/mm/numa.c
<<
>>
Prefs
   1/*
   2 * pSeries NUMA support
   3 *
   4 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License
   8 * as published by the Free Software Foundation; either version
   9 * 2 of the License, or (at your option) any later version.
  10 */
  11#include <linux/threads.h>
  12#include <linux/bootmem.h>
  13#include <linux/init.h>
  14#include <linux/mm.h>
  15#include <linux/mmzone.h>
  16#include <linux/module.h>
  17#include <linux/nodemask.h>
  18#include <linux/cpu.h>
  19#include <linux/notifier.h>
  20#include <asm/sparsemem.h>
  21#include <asm/lmb.h>
  22#include <asm/system.h>
  23#include <asm/smp.h>
  24
  25static int numa_enabled = 1;
  26
  27static int numa_debug;
  28#define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
  29
  30int numa_cpu_lookup_table[NR_CPUS];
  31cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES];
  32struct pglist_data *node_data[MAX_NUMNODES];
  33
  34EXPORT_SYMBOL(numa_cpu_lookup_table);
  35EXPORT_SYMBOL(numa_cpumask_lookup_table);
  36EXPORT_SYMBOL(node_data);
  37
  38static bootmem_data_t __initdata plat_node_bdata[MAX_NUMNODES];
  39static int min_common_depth;
  40static int n_mem_addr_cells, n_mem_size_cells;
  41
  42static void __cpuinit map_cpu_to_node(int cpu, int node)
  43{
  44        numa_cpu_lookup_table[cpu] = node;
  45
  46        dbg("adding cpu %d to node %d\n", cpu, node);
  47
  48        if (!(cpu_isset(cpu, numa_cpumask_lookup_table[node])))
  49                cpu_set(cpu, numa_cpumask_lookup_table[node]);
  50}
  51
  52#ifdef CONFIG_HOTPLUG_CPU
  53static void unmap_cpu_from_node(unsigned long cpu)
  54{
  55        int node = numa_cpu_lookup_table[cpu];
  56
  57        dbg("removing cpu %lu from node %d\n", cpu, node);
  58
  59        if (cpu_isset(cpu, numa_cpumask_lookup_table[node])) {
  60                cpu_clear(cpu, numa_cpumask_lookup_table[node]);
  61        } else {
  62                printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
  63                       cpu, node);
  64        }
  65}
  66#endif /* CONFIG_HOTPLUG_CPU */
  67
  68static struct device_node * __cpuinit find_cpu_node(unsigned int cpu)
  69{
  70        unsigned int hw_cpuid = get_hard_smp_processor_id(cpu);
  71        struct device_node *cpu_node = NULL;
  72        const unsigned int *interrupt_server, *reg;
  73        int len;
  74
  75        while ((cpu_node = of_find_node_by_type(cpu_node, "cpu")) != NULL) {
  76                /* Try interrupt server first */
  77                interrupt_server = of_get_property(cpu_node,
  78                                        "ibm,ppc-interrupt-server#s", &len);
  79
  80                len = len / sizeof(u32);
  81
  82                if (interrupt_server && (len > 0)) {
  83                        while (len--) {
  84                                if (interrupt_server[len] == hw_cpuid)
  85                                        return cpu_node;
  86                        }
  87                } else {
  88                        reg = of_get_property(cpu_node, "reg", &len);
  89                        if (reg && (len > 0) && (reg[0] == hw_cpuid))
  90                                return cpu_node;
  91                }
  92        }
  93
  94        return NULL;
  95}
  96
  97/* must hold reference to node during call */
  98static const int *of_get_associativity(struct device_node *dev)
  99{
 100        return of_get_property(dev, "ibm,associativity", NULL);
 101}
 102
 103/* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa
 104 * info is found.
 105 */
 106static int of_node_to_nid_single(struct device_node *device)
 107{
 108        int nid = -1;
 109        const unsigned int *tmp;
 110
 111        if (min_common_depth == -1)
 112                goto out;
 113
 114        tmp = of_get_associativity(device);
 115        if (!tmp)
 116                goto out;
 117
 118        if (tmp[0] >= min_common_depth)
 119                nid = tmp[min_common_depth];
 120
 121        /* POWER4 LPAR uses 0xffff as invalid node */
 122        if (nid == 0xffff || nid >= MAX_NUMNODES)
 123                nid = -1;
 124out:
 125        return nid;
 126}
 127
 128/* Walk the device tree upwards, looking for an associativity id */
 129int of_node_to_nid(struct device_node *device)
 130{
 131        struct device_node *tmp;
 132        int nid = -1;
 133
 134        of_node_get(device);
 135        while (device) {
 136                nid = of_node_to_nid_single(device);
 137                if (nid != -1)
 138                        break;
 139
 140                tmp = device;
 141                device = of_get_parent(tmp);
 142                of_node_put(tmp);
 143        }
 144        of_node_put(device);
 145
 146        return nid;
 147}
 148EXPORT_SYMBOL_GPL(of_node_to_nid);
 149
 150/*
 151 * In theory, the "ibm,associativity" property may contain multiple
 152 * associativity lists because a resource may be multiply connected
 153 * into the machine.  This resource then has different associativity
 154 * characteristics relative to its multiple connections.  We ignore
 155 * this for now.  We also assume that all cpu and memory sets have
 156 * their distances represented at a common level.  This won't be
 157 * true for hierarchical NUMA.
 158 *
 159 * In any case the ibm,associativity-reference-points should give
 160 * the correct depth for a normal NUMA system.
 161 *
 162 * - Dave Hansen <haveblue@us.ibm.com>
 163 */
 164static int __init find_min_common_depth(void)
 165{
 166        int depth;
 167        const unsigned int *ref_points;
 168        struct device_node *rtas_root;
 169        unsigned int len;
 170
 171        rtas_root = of_find_node_by_path("/rtas");
 172
 173        if (!rtas_root)
 174                return -1;
 175
 176        /*
 177         * this property is 2 32-bit integers, each representing a level of
 178         * depth in the associativity nodes.  The first is for an SMP
 179         * configuration (should be all 0's) and the second is for a normal
 180         * NUMA configuration.
 181         */
 182        ref_points = of_get_property(rtas_root,
 183                        "ibm,associativity-reference-points", &len);
 184
 185        if ((len >= 1) && ref_points) {
 186                depth = ref_points[1];
 187        } else {
 188                dbg("NUMA: ibm,associativity-reference-points not found.\n");
 189                depth = -1;
 190        }
 191        of_node_put(rtas_root);
 192
 193        return depth;
 194}
 195
 196static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
 197{
 198        struct device_node *memory = NULL;
 199
 200        memory = of_find_node_by_type(memory, "memory");
 201        if (!memory)
 202                panic("numa.c: No memory nodes found!");
 203
 204        *n_addr_cells = of_n_addr_cells(memory);
 205        *n_size_cells = of_n_size_cells(memory);
 206        of_node_put(memory);
 207}
 208
 209static unsigned long __devinit read_n_cells(int n, const unsigned int **buf)
 210{
 211        unsigned long result = 0;
 212
 213        while (n--) {
 214                result = (result << 32) | **buf;
 215                (*buf)++;
 216        }
 217        return result;
 218}
 219
 220/*
 221 * Figure out to which domain a cpu belongs and stick it there.
 222 * Return the id of the domain used.
 223 */
 224static int __cpuinit numa_setup_cpu(unsigned long lcpu)
 225{
 226        int nid = 0;
 227        struct device_node *cpu = find_cpu_node(lcpu);
 228
 229        if (!cpu) {
 230                WARN_ON(1);
 231                goto out;
 232        }
 233
 234        nid = of_node_to_nid_single(cpu);
 235
 236        if (nid < 0 || !node_online(nid))
 237                nid = any_online_node(NODE_MASK_ALL);
 238out:
 239        map_cpu_to_node(lcpu, nid);
 240
 241        of_node_put(cpu);
 242
 243        return nid;
 244}
 245
 246static int __cpuinit cpu_numa_callback(struct notifier_block *nfb,
 247                             unsigned long action,
 248                             void *hcpu)
 249{
 250        unsigned long lcpu = (unsigned long)hcpu;
 251        int ret = NOTIFY_DONE;
 252
 253        switch (action) {
 254        case CPU_UP_PREPARE:
 255        case CPU_UP_PREPARE_FROZEN:
 256                numa_setup_cpu(lcpu);
 257                ret = NOTIFY_OK;
 258                break;
 259#ifdef CONFIG_HOTPLUG_CPU
 260        case CPU_DEAD:
 261        case CPU_DEAD_FROZEN:
 262        case CPU_UP_CANCELED:
 263        case CPU_UP_CANCELED_FROZEN:
 264                unmap_cpu_from_node(lcpu);
 265                break;
 266                ret = NOTIFY_OK;
 267#endif
 268        }
 269        return ret;
 270}
 271
 272/*
 273 * Check and possibly modify a memory region to enforce the memory limit.
 274 *
 275 * Returns the size the region should have to enforce the memory limit.
 276 * This will either be the original value of size, a truncated value,
 277 * or zero. If the returned value of size is 0 the region should be
 278 * discarded as it lies wholy above the memory limit.
 279 */
 280static unsigned long __init numa_enforce_memory_limit(unsigned long start,
 281                                                      unsigned long size)
 282{
 283        /*
 284         * We use lmb_end_of_DRAM() in here instead of memory_limit because
 285         * we've already adjusted it for the limit and it takes care of
 286         * having memory holes below the limit.
 287         */
 288
 289        if (! memory_limit)
 290                return size;
 291
 292        if (start + size <= lmb_end_of_DRAM())
 293                return size;
 294
 295        if (start >= lmb_end_of_DRAM())
 296                return 0;
 297
 298        return lmb_end_of_DRAM() - start;
 299}
 300
 301/*
 302 * Extract NUMA information from the ibm,dynamic-reconfiguration-memory
 303 * node.  This assumes n_mem_{addr,size}_cells have been set.
 304 */
 305static void __init parse_drconf_memory(struct device_node *memory)
 306{
 307        const unsigned int *lm, *dm, *aa;
 308        unsigned int ls, ld, la;
 309        unsigned int n, aam, aalen;
 310        unsigned long lmb_size, size, start;
 311        int nid, default_nid = 0;
 312        unsigned int ai, flags;
 313
 314        lm = of_get_property(memory, "ibm,lmb-size", &ls);
 315        dm = of_get_property(memory, "ibm,dynamic-memory", &ld);
 316        aa = of_get_property(memory, "ibm,associativity-lookup-arrays", &la);
 317        if (!lm || !dm || !aa ||
 318            ls < sizeof(unsigned int) || ld < sizeof(unsigned int) ||
 319            la < 2 * sizeof(unsigned int))
 320                return;
 321
 322        lmb_size = read_n_cells(n_mem_size_cells, &lm);
 323        n = *dm++;              /* number of LMBs */
 324        aam = *aa++;            /* number of associativity lists */
 325        aalen = *aa++;          /* length of each associativity list */
 326        if (ld < (n * (n_mem_addr_cells + 4) + 1) * sizeof(unsigned int) ||
 327            la < (aam * aalen + 2) * sizeof(unsigned int))
 328                return;
 329
 330        for (; n != 0; --n) {
 331                start = read_n_cells(n_mem_addr_cells, &dm);
 332                ai = dm[2];
 333                flags = dm[3];
 334                dm += 4;
 335                /* 0x80 == reserved, 0x8 = assigned to us */
 336                if ((flags & 0x80) || !(flags & 0x8))
 337                        continue;
 338                nid = default_nid;
 339                /* flags & 0x40 means associativity index is invalid */
 340                if (min_common_depth > 0 && min_common_depth <= aalen &&
 341                    (flags & 0x40) == 0 && ai < aam) {
 342                        /* this is like of_node_to_nid_single */
 343                        nid = aa[ai * aalen + min_common_depth - 1];
 344                        if (nid == 0xffff || nid >= MAX_NUMNODES)
 345                                nid = default_nid;
 346                }
 347                node_set_online(nid);
 348
 349                size = numa_enforce_memory_limit(start, lmb_size);
 350                if (!size)
 351                        continue;
 352
 353                add_active_range(nid, start >> PAGE_SHIFT,
 354                                 (start >> PAGE_SHIFT) + (size >> PAGE_SHIFT));
 355        }
 356}
 357
 358static int __init parse_numa_properties(void)
 359{
 360        struct device_node *cpu = NULL;
 361        struct device_node *memory = NULL;
 362        int default_nid = 0;
 363        unsigned long i;
 364
 365        if (numa_enabled == 0) {
 366                printk(KERN_WARNING "NUMA disabled by user\n");
 367                return -1;
 368        }
 369
 370        min_common_depth = find_min_common_depth();
 371
 372        if (min_common_depth < 0)
 373                return min_common_depth;
 374
 375        dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth);
 376
 377        /*
 378         * Even though we connect cpus to numa domains later in SMP
 379         * init, we need to know the node ids now. This is because
 380         * each node to be onlined must have NODE_DATA etc backing it.
 381         */
 382        for_each_present_cpu(i) {
 383                int nid;
 384
 385                cpu = find_cpu_node(i);
 386                BUG_ON(!cpu);
 387                nid = of_node_to_nid_single(cpu);
 388                of_node_put(cpu);
 389
 390                /*
 391                 * Don't fall back to default_nid yet -- we will plug
 392                 * cpus into nodes once the memory scan has discovered
 393                 * the topology.
 394                 */
 395                if (nid < 0)
 396                        continue;
 397                node_set_online(nid);
 398        }
 399
 400        get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
 401        memory = NULL;
 402        while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {
 403                unsigned long start;
 404                unsigned long size;
 405                int nid;
 406                int ranges;
 407                const unsigned int *memcell_buf;
 408                unsigned int len;
 409
 410                memcell_buf = of_get_property(memory,
 411                        "linux,usable-memory", &len);
 412                if (!memcell_buf || len <= 0)
 413                        memcell_buf = of_get_property(memory, "reg", &len);
 414                if (!memcell_buf || len <= 0)
 415                        continue;
 416
 417                /* ranges in cell */
 418                ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
 419new_range:
 420                /* these are order-sensitive, and modify the buffer pointer */
 421                start = read_n_cells(n_mem_addr_cells, &memcell_buf);
 422                size = read_n_cells(n_mem_size_cells, &memcell_buf);
 423
 424                /*
 425                 * Assumption: either all memory nodes or none will
 426                 * have associativity properties.  If none, then
 427                 * everything goes to default_nid.
 428                 */
 429                nid = of_node_to_nid_single(memory);
 430                if (nid < 0)
 431                        nid = default_nid;
 432                node_set_online(nid);
 433
 434                if (!(size = numa_enforce_memory_limit(start, size))) {
 435                        if (--ranges)
 436                                goto new_range;
 437                        else
 438                                continue;
 439                }
 440
 441                add_active_range(nid, start >> PAGE_SHIFT,
 442                                (start >> PAGE_SHIFT) + (size >> PAGE_SHIFT));
 443
 444                if (--ranges)
 445                        goto new_range;
 446        }
 447
 448        /*
 449         * Now do the same thing for each LMB listed in the ibm,dynamic-memory
 450         * property in the ibm,dynamic-reconfiguration-memory node.
 451         */
 452        memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
 453        if (memory)
 454                parse_drconf_memory(memory);
 455
 456        return 0;
 457}
 458
 459static void __init setup_nonnuma(void)
 460{
 461        unsigned long top_of_ram = lmb_end_of_DRAM();
 462        unsigned long total_ram = lmb_phys_mem_size();
 463        unsigned long start_pfn, end_pfn;
 464        unsigned int i;
 465
 466        printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
 467               top_of_ram, total_ram);
 468        printk(KERN_DEBUG "Memory hole size: %ldMB\n",
 469               (top_of_ram - total_ram) >> 20);
 470
 471        for (i = 0; i < lmb.memory.cnt; ++i) {
 472                start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT;
 473                end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i);
 474                add_active_range(0, start_pfn, end_pfn);
 475        }
 476        node_set_online(0);
 477}
 478
 479void __init dump_numa_cpu_topology(void)
 480{
 481        unsigned int node;
 482        unsigned int cpu, count;
 483
 484        if (min_common_depth == -1 || !numa_enabled)
 485                return;
 486
 487        for_each_online_node(node) {
 488                printk(KERN_DEBUG "Node %d CPUs:", node);
 489
 490                count = 0;
 491                /*
 492                 * If we used a CPU iterator here we would miss printing
 493                 * the holes in the cpumap.
 494                 */
 495                for (cpu = 0; cpu < NR_CPUS; cpu++) {
 496                        if (cpu_isset(cpu, numa_cpumask_lookup_table[node])) {
 497                                if (count == 0)
 498                                        printk(" %u", cpu);
 499                                ++count;
 500                        } else {
 501                                if (count > 1)
 502                                        printk("-%u", cpu - 1);
 503                                count = 0;
 504                        }
 505                }
 506
 507                if (count > 1)
 508                        printk("-%u", NR_CPUS - 1);
 509                printk("\n");
 510        }
 511}
 512
 513static void __init dump_numa_memory_topology(void)
 514{
 515        unsigned int node;
 516        unsigned int count;
 517
 518        if (min_common_depth == -1 || !numa_enabled)
 519                return;
 520
 521        for_each_online_node(node) {
 522                unsigned long i;
 523
 524                printk(KERN_DEBUG "Node %d Memory:", node);
 525
 526                count = 0;
 527
 528                for (i = 0; i < lmb_end_of_DRAM();
 529                     i += (1 << SECTION_SIZE_BITS)) {
 530                        if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) {
 531                                if (count == 0)
 532                                        printk(" 0x%lx", i);
 533                                ++count;
 534                        } else {
 535                                if (count > 0)
 536                                        printk("-0x%lx", i);
 537                                count = 0;
 538                        }
 539                }
 540
 541                if (count > 0)
 542                        printk("-0x%lx", i);
 543                printk("\n");
 544        }
 545}
 546
 547/*
 548 * Allocate some memory, satisfying the lmb or bootmem allocator where
 549 * required. nid is the preferred node and end is the physical address of
 550 * the highest address in the node.
 551 *
 552 * Returns the physical address of the memory.
 553 */
 554static void __init *careful_allocation(int nid, unsigned long size,
 555                                       unsigned long align,
 556                                       unsigned long end_pfn)
 557{
 558        int new_nid;
 559        unsigned long ret = __lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT);
 560
 561        /* retry over all memory */
 562        if (!ret)
 563                ret = __lmb_alloc_base(size, align, lmb_end_of_DRAM());
 564
 565        if (!ret)
 566                panic("numa.c: cannot allocate %lu bytes on node %d",
 567                      size, nid);
 568
 569        /*
 570         * If the memory came from a previously allocated node, we must
 571         * retry with the bootmem allocator.
 572         */
 573        new_nid = early_pfn_to_nid(ret >> PAGE_SHIFT);
 574        if (new_nid < nid) {
 575                ret = (unsigned long)__alloc_bootmem_node(NODE_DATA(new_nid),
 576                                size, align, 0);
 577
 578                if (!ret)
 579                        panic("numa.c: cannot allocate %lu bytes on node %d",
 580                              size, new_nid);
 581
 582                ret = __pa(ret);
 583
 584                dbg("alloc_bootmem %lx %lx\n", ret, size);
 585        }
 586
 587        return (void *)ret;
 588}
 589
 590static struct notifier_block __cpuinitdata ppc64_numa_nb = {
 591        .notifier_call = cpu_numa_callback,
 592        .priority = 1 /* Must run before sched domains notifier. */
 593};
 594
 595void __init do_init_bootmem(void)
 596{
 597        int nid;
 598        unsigned int i;
 599
 600        min_low_pfn = 0;
 601        max_low_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
 602        max_pfn = max_low_pfn;
 603
 604        if (parse_numa_properties())
 605                setup_nonnuma();
 606        else
 607                dump_numa_memory_topology();
 608
 609        register_cpu_notifier(&ppc64_numa_nb);
 610        cpu_numa_callback(&ppc64_numa_nb, CPU_UP_PREPARE,
 611                          (void *)(unsigned long)boot_cpuid);
 612
 613        for_each_online_node(nid) {
 614                unsigned long start_pfn, end_pfn;
 615                unsigned long bootmem_paddr;
 616                unsigned long bootmap_pages;
 617
 618                get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
 619
 620                /* Allocate the node structure node local if possible */
 621                NODE_DATA(nid) = careful_allocation(nid,
 622                                        sizeof(struct pglist_data),
 623                                        SMP_CACHE_BYTES, end_pfn);
 624                NODE_DATA(nid) = __va(NODE_DATA(nid));
 625                memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
 626
 627                dbg("node %d\n", nid);
 628                dbg("NODE_DATA() = %p\n", NODE_DATA(nid));
 629
 630                NODE_DATA(nid)->bdata = &plat_node_bdata[nid];
 631                NODE_DATA(nid)->node_start_pfn = start_pfn;
 632                NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
 633
 634                if (NODE_DATA(nid)->node_spanned_pages == 0)
 635                        continue;
 636
 637                dbg("start_paddr = %lx\n", start_pfn << PAGE_SHIFT);
 638                dbg("end_paddr = %lx\n", end_pfn << PAGE_SHIFT);
 639
 640                bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
 641                bootmem_paddr = (unsigned long)careful_allocation(nid,
 642                                        bootmap_pages << PAGE_SHIFT,
 643                                        PAGE_SIZE, end_pfn);
 644                memset(__va(bootmem_paddr), 0, bootmap_pages << PAGE_SHIFT);
 645
 646                dbg("bootmap_paddr = %lx\n", bootmem_paddr);
 647
 648                init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT,
 649                                  start_pfn, end_pfn);
 650
 651                free_bootmem_with_active_regions(nid, end_pfn);
 652
 653                /* Mark reserved regions on this node */
 654                for (i = 0; i < lmb.reserved.cnt; i++) {
 655                        unsigned long physbase = lmb.reserved.region[i].base;
 656                        unsigned long size = lmb.reserved.region[i].size;
 657                        unsigned long start_paddr = start_pfn << PAGE_SHIFT;
 658                        unsigned long end_paddr = end_pfn << PAGE_SHIFT;
 659
 660                        if (early_pfn_to_nid(physbase >> PAGE_SHIFT) != nid &&
 661                            early_pfn_to_nid((physbase+size-1) >> PAGE_SHIFT) != nid)
 662                                continue;
 663
 664                        if (physbase < end_paddr &&
 665                            (physbase+size) > start_paddr) {
 666                                /* overlaps */
 667                                if (physbase < start_paddr) {
 668                                        size -= start_paddr - physbase;
 669                                        physbase = start_paddr;
 670                                }
 671
 672                                if (size > end_paddr - physbase)
 673                                        size = end_paddr - physbase;
 674
 675                                dbg("reserve_bootmem %lx %lx\n", physbase,
 676                                    size);
 677                                reserve_bootmem_node(NODE_DATA(nid), physbase,
 678                                                     size);
 679                        }
 680                }
 681
 682                sparse_memory_present_with_active_regions(nid);
 683        }
 684}
 685
 686void __init paging_init(void)
 687{
 688        unsigned long max_zone_pfns[MAX_NR_ZONES];
 689        memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
 690        max_zone_pfns[ZONE_DMA] = lmb_end_of_DRAM() >> PAGE_SHIFT;
 691        free_area_init_nodes(max_zone_pfns);
 692}
 693
 694static int __init early_numa(char *p)
 695{
 696        if (!p)
 697                return 0;
 698
 699        if (strstr(p, "off"))
 700                numa_enabled = 0;
 701
 702        if (strstr(p, "debug"))
 703                numa_debug = 1;
 704
 705        return 0;
 706}
 707early_param("numa", early_numa);
 708
 709#ifdef CONFIG_MEMORY_HOTPLUG
 710/*
 711 * Find the node associated with a hot added memory section.  Section
 712 * corresponds to a SPARSEMEM section, not an LMB.  It is assumed that
 713 * sections are fully contained within a single LMB.
 714 */
 715int hot_add_scn_to_nid(unsigned long scn_addr)
 716{
 717        struct device_node *memory = NULL;
 718        nodemask_t nodes;
 719        int default_nid = any_online_node(NODE_MASK_ALL);
 720        int nid;
 721
 722        if (!numa_enabled || (min_common_depth < 0))
 723                return default_nid;
 724
 725        while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {
 726                unsigned long start, size;
 727                int ranges;
 728                const unsigned int *memcell_buf;
 729                unsigned int len;
 730
 731                memcell_buf = of_get_property(memory, "reg", &len);
 732                if (!memcell_buf || len <= 0)
 733                        continue;
 734
 735                /* ranges in cell */
 736                ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
 737ha_new_range:
 738                start = read_n_cells(n_mem_addr_cells, &memcell_buf);
 739                size = read_n_cells(n_mem_size_cells, &memcell_buf);
 740                nid = of_node_to_nid_single(memory);
 741
 742                /* Domains not present at boot default to 0 */
 743                if (nid < 0 || !node_online(nid))
 744                        nid = default_nid;
 745
 746                if ((scn_addr >= start) && (scn_addr < (start + size))) {
 747                        of_node_put(memory);
 748                        goto got_nid;
 749                }
 750
 751                if (--ranges)           /* process all ranges in cell */
 752                        goto ha_new_range;
 753        }
 754        BUG();  /* section address should be found above */
 755        return 0;
 756
 757        /* Temporary code to ensure that returned node is not empty */
 758got_nid:
 759        nodes_setall(nodes);
 760        while (NODE_DATA(nid)->node_spanned_pages == 0) {
 761                node_clear(nid, nodes);
 762                nid = any_online_node(nodes);
 763        }
 764        return nid;
 765}
 766#endif /* CONFIG_MEMORY_HOTPLUG */
 767