linux/arch/powerpc/mm/numa.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * pSeries NUMA support
   4 *
   5 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
   6 */
   7#define pr_fmt(fmt) "numa: " fmt
   8
   9#include <linux/threads.h>
  10#include <linux/memblock.h>
  11#include <linux/init.h>
  12#include <linux/mm.h>
  13#include <linux/mmzone.h>
  14#include <linux/export.h>
  15#include <linux/nodemask.h>
  16#include <linux/cpu.h>
  17#include <linux/notifier.h>
  18#include <linux/of.h>
  19#include <linux/pfn.h>
  20#include <linux/cpuset.h>
  21#include <linux/node.h>
  22#include <linux/stop_machine.h>
  23#include <linux/proc_fs.h>
  24#include <linux/seq_file.h>
  25#include <linux/uaccess.h>
  26#include <linux/slab.h>
  27#include <asm/cputhreads.h>
  28#include <asm/sparsemem.h>
  29#include <asm/prom.h>
  30#include <asm/smp.h>
  31#include <asm/topology.h>
  32#include <asm/firmware.h>
  33#include <asm/paca.h>
  34#include <asm/hvcall.h>
  35#include <asm/setup.h>
  36#include <asm/vdso.h>
  37#include <asm/drmem.h>
  38
  39static int numa_enabled = 1;
  40
  41static char *cmdline __initdata;
  42
  43int numa_cpu_lookup_table[NR_CPUS];
  44cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
  45struct pglist_data *node_data[MAX_NUMNODES];
  46
  47EXPORT_SYMBOL(numa_cpu_lookup_table);
  48EXPORT_SYMBOL(node_to_cpumask_map);
  49EXPORT_SYMBOL(node_data);
  50
  51static int primary_domain_index;
  52static int n_mem_addr_cells, n_mem_size_cells;
  53
  54#define FORM0_AFFINITY 0
  55#define FORM1_AFFINITY 1
  56#define FORM2_AFFINITY 2
  57static int affinity_form;
  58
  59#define MAX_DISTANCE_REF_POINTS 4
  60static int distance_ref_points_depth;
  61static const __be32 *distance_ref_points;
  62static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS];
  63static int numa_distance_table[MAX_NUMNODES][MAX_NUMNODES] = {
  64        [0 ... MAX_NUMNODES - 1] = { [0 ... MAX_NUMNODES - 1] = -1 }
  65};
  66static int numa_id_index_table[MAX_NUMNODES] = { [0 ... MAX_NUMNODES - 1] = NUMA_NO_NODE };
  67
  68/*
  69 * Allocate node_to_cpumask_map based on number of available nodes
  70 * Requires node_possible_map to be valid.
  71 *
  72 * Note: cpumask_of_node() is not valid until after this is done.
  73 */
  74static void __init setup_node_to_cpumask_map(void)
  75{
  76        unsigned int node;
  77
  78        /* setup nr_node_ids if not done yet */
  79        if (nr_node_ids == MAX_NUMNODES)
  80                setup_nr_node_ids();
  81
  82        /* allocate the map */
  83        for_each_node(node)
  84                alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
  85
  86        /* cpumask_of_node() will now work */
  87        pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids);
  88}
  89
  90static int __init fake_numa_create_new_node(unsigned long end_pfn,
  91                                                unsigned int *nid)
  92{
  93        unsigned long long mem;
  94        char *p = cmdline;
  95        static unsigned int fake_nid;
  96        static unsigned long long curr_boundary;
  97
  98        /*
  99         * Modify node id, iff we started creating NUMA nodes
 100         * We want to continue from where we left of the last time
 101         */
 102        if (fake_nid)
 103                *nid = fake_nid;
 104        /*
 105         * In case there are no more arguments to parse, the
 106         * node_id should be the same as the last fake node id
 107         * (we've handled this above).
 108         */
 109        if (!p)
 110                return 0;
 111
 112        mem = memparse(p, &p);
 113        if (!mem)
 114                return 0;
 115
 116        if (mem < curr_boundary)
 117                return 0;
 118
 119        curr_boundary = mem;
 120
 121        if ((end_pfn << PAGE_SHIFT) > mem) {
 122                /*
 123                 * Skip commas and spaces
 124                 */
 125                while (*p == ',' || *p == ' ' || *p == '\t')
 126                        p++;
 127
 128                cmdline = p;
 129                fake_nid++;
 130                *nid = fake_nid;
 131                pr_debug("created new fake_node with id %d\n", fake_nid);
 132                return 1;
 133        }
 134        return 0;
 135}
 136
 137static void reset_numa_cpu_lookup_table(void)
 138{
 139        unsigned int cpu;
 140
 141        for_each_possible_cpu(cpu)
 142                numa_cpu_lookup_table[cpu] = -1;
 143}
 144
 145void map_cpu_to_node(int cpu, int node)
 146{
 147        update_numa_cpu_lookup_table(cpu, node);
 148
 149        if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node]))) {
 150                pr_debug("adding cpu %d to node %d\n", cpu, node);
 151                cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
 152        }
 153}
 154
 155#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR)
 156void unmap_cpu_from_node(unsigned long cpu)
 157{
 158        int node = numa_cpu_lookup_table[cpu];
 159
 160        if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
 161                cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
 162                pr_debug("removing cpu %lu from node %d\n", cpu, node);
 163        } else {
 164                pr_warn("Warning: cpu %lu not found in node %d\n", cpu, node);
 165        }
 166}
 167#endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */
 168
 169static int __associativity_to_nid(const __be32 *associativity,
 170                                  int max_array_sz)
 171{
 172        int nid;
 173        /*
 174         * primary_domain_index is 1 based array index.
 175         */
 176        int index = primary_domain_index  - 1;
 177
 178        if (!numa_enabled || index >= max_array_sz)
 179                return NUMA_NO_NODE;
 180
 181        nid = of_read_number(&associativity[index], 1);
 182
 183        /* POWER4 LPAR uses 0xffff as invalid node */
 184        if (nid == 0xffff || nid >= nr_node_ids)
 185                nid = NUMA_NO_NODE;
 186        return nid;
 187}
 188/*
 189 * Returns nid in the range [0..nr_node_ids], or -1 if no useful NUMA
 190 * info is found.
 191 */
 192static int associativity_to_nid(const __be32 *associativity)
 193{
 194        int array_sz = of_read_number(associativity, 1);
 195
 196        /* Skip the first element in the associativity array */
 197        return __associativity_to_nid((associativity + 1), array_sz);
 198}
 199
 200static int __cpu_form2_relative_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc)
 201{
 202        int dist;
 203        int node1, node2;
 204
 205        node1 = associativity_to_nid(cpu1_assoc);
 206        node2 = associativity_to_nid(cpu2_assoc);
 207
 208        dist = numa_distance_table[node1][node2];
 209        if (dist <= LOCAL_DISTANCE)
 210                return 0;
 211        else if (dist <= REMOTE_DISTANCE)
 212                return 1;
 213        else
 214                return 2;
 215}
 216
 217static int __cpu_form1_relative_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc)
 218{
 219        int dist = 0;
 220
 221        int i, index;
 222
 223        for (i = 0; i < distance_ref_points_depth; i++) {
 224                index = be32_to_cpu(distance_ref_points[i]);
 225                if (cpu1_assoc[index] == cpu2_assoc[index])
 226                        break;
 227                dist++;
 228        }
 229
 230        return dist;
 231}
 232
 233int cpu_relative_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc)
 234{
 235        /* We should not get called with FORM0 */
 236        VM_WARN_ON(affinity_form == FORM0_AFFINITY);
 237        if (affinity_form == FORM1_AFFINITY)
 238                return __cpu_form1_relative_distance(cpu1_assoc, cpu2_assoc);
 239        return __cpu_form2_relative_distance(cpu1_assoc, cpu2_assoc);
 240}
 241
 242/* must hold reference to node during call */
 243static const __be32 *of_get_associativity(struct device_node *dev)
 244{
 245        return of_get_property(dev, "ibm,associativity", NULL);
 246}
 247
 248int __node_distance(int a, int b)
 249{
 250        int i;
 251        int distance = LOCAL_DISTANCE;
 252
 253        if (affinity_form == FORM2_AFFINITY)
 254                return numa_distance_table[a][b];
 255        else if (affinity_form == FORM0_AFFINITY)
 256                return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE);
 257
 258        for (i = 0; i < distance_ref_points_depth; i++) {
 259                if (distance_lookup_table[a][i] == distance_lookup_table[b][i])
 260                        break;
 261
 262                /* Double the distance for each NUMA level */
 263                distance *= 2;
 264        }
 265
 266        return distance;
 267}
 268EXPORT_SYMBOL(__node_distance);
 269
 270/* Returns the nid associated with the given device tree node,
 271 * or -1 if not found.
 272 */
 273static int of_node_to_nid_single(struct device_node *device)
 274{
 275        int nid = NUMA_NO_NODE;
 276        const __be32 *tmp;
 277
 278        tmp = of_get_associativity(device);
 279        if (tmp)
 280                nid = associativity_to_nid(tmp);
 281        return nid;
 282}
 283
 284/* Walk the device tree upwards, looking for an associativity id */
 285int of_node_to_nid(struct device_node *device)
 286{
 287        int nid = NUMA_NO_NODE;
 288
 289        of_node_get(device);
 290        while (device) {
 291                nid = of_node_to_nid_single(device);
 292                if (nid != -1)
 293                        break;
 294
 295                device = of_get_next_parent(device);
 296        }
 297        of_node_put(device);
 298
 299        return nid;
 300}
 301EXPORT_SYMBOL(of_node_to_nid);
 302
 303static void __initialize_form1_numa_distance(const __be32 *associativity,
 304                                             int max_array_sz)
 305{
 306        int i, nid;
 307
 308        if (affinity_form != FORM1_AFFINITY)
 309                return;
 310
 311        nid = __associativity_to_nid(associativity, max_array_sz);
 312        if (nid != NUMA_NO_NODE) {
 313                for (i = 0; i < distance_ref_points_depth; i++) {
 314                        const __be32 *entry;
 315                        int index = be32_to_cpu(distance_ref_points[i]) - 1;
 316
 317                        /*
 318                         * broken hierarchy, return with broken distance table
 319                         */
 320                        if (WARN(index >= max_array_sz, "Broken ibm,associativity property"))
 321                                return;
 322
 323                        entry = &associativity[index];
 324                        distance_lookup_table[nid][i] = of_read_number(entry, 1);
 325                }
 326        }
 327}
 328
 329static void initialize_form1_numa_distance(const __be32 *associativity)
 330{
 331        int array_sz;
 332
 333        array_sz = of_read_number(associativity, 1);
 334        /* Skip the first element in the associativity array */
 335        __initialize_form1_numa_distance(associativity + 1, array_sz);
 336}
 337
 338/*
 339 * Used to update distance information w.r.t newly added node.
 340 */
 341void update_numa_distance(struct device_node *node)
 342{
 343        int nid;
 344
 345        if (affinity_form == FORM0_AFFINITY)
 346                return;
 347        else if (affinity_form == FORM1_AFFINITY) {
 348                const __be32 *associativity;
 349
 350                associativity = of_get_associativity(node);
 351                if (!associativity)
 352                        return;
 353
 354                initialize_form1_numa_distance(associativity);
 355                return;
 356        }
 357
 358        /* FORM2 affinity  */
 359        nid = of_node_to_nid_single(node);
 360        if (nid == NUMA_NO_NODE)
 361                return;
 362
 363        /*
 364         * With FORM2 we expect NUMA distance of all possible NUMA
 365         * nodes to be provided during boot.
 366         */
 367        WARN(numa_distance_table[nid][nid] == -1,
 368             "NUMA distance details for node %d not provided\n", nid);
 369}
 370
 371/*
 372 * ibm,numa-lookup-index-table= {N, domainid1, domainid2, ..... domainidN}
 373 * ibm,numa-distance-table = { N, 1, 2, 4, 5, 1, 6, .... N elements}
 374 */
 375static void initialize_form2_numa_distance_lookup_table(void)
 376{
 377        int i, j;
 378        struct device_node *root;
 379        const __u8 *numa_dist_table;
 380        const __be32 *numa_lookup_index;
 381        int numa_dist_table_length;
 382        int max_numa_index, distance_index;
 383
 384        if (firmware_has_feature(FW_FEATURE_OPAL))
 385                root = of_find_node_by_path("/ibm,opal");
 386        else
 387                root = of_find_node_by_path("/rtas");
 388        if (!root)
 389                root = of_find_node_by_path("/");
 390
 391        numa_lookup_index = of_get_property(root, "ibm,numa-lookup-index-table", NULL);
 392        max_numa_index = of_read_number(&numa_lookup_index[0], 1);
 393
 394        /* first element of the array is the size and is encode-int */
 395        numa_dist_table = of_get_property(root, "ibm,numa-distance-table", NULL);
 396        numa_dist_table_length = of_read_number((const __be32 *)&numa_dist_table[0], 1);
 397        /* Skip the size which is encoded int */
 398        numa_dist_table += sizeof(__be32);
 399
 400        pr_debug("numa_dist_table_len = %d, numa_dist_indexes_len = %d\n",
 401                 numa_dist_table_length, max_numa_index);
 402
 403        for (i = 0; i < max_numa_index; i++)
 404                /* +1 skip the max_numa_index in the property */
 405                numa_id_index_table[i] = of_read_number(&numa_lookup_index[i + 1], 1);
 406
 407
 408        if (numa_dist_table_length != max_numa_index * max_numa_index) {
 409                WARN(1, "Wrong NUMA distance information\n");
 410                /* consider everybody else just remote. */
 411                for (i = 0;  i < max_numa_index; i++) {
 412                        for (j = 0; j < max_numa_index; j++) {
 413                                int nodeA = numa_id_index_table[i];
 414                                int nodeB = numa_id_index_table[j];
 415
 416                                if (nodeA == nodeB)
 417                                        numa_distance_table[nodeA][nodeB] = LOCAL_DISTANCE;
 418                                else
 419                                        numa_distance_table[nodeA][nodeB] = REMOTE_DISTANCE;
 420                        }
 421                }
 422        }
 423
 424        distance_index = 0;
 425        for (i = 0;  i < max_numa_index; i++) {
 426                for (j = 0; j < max_numa_index; j++) {
 427                        int nodeA = numa_id_index_table[i];
 428                        int nodeB = numa_id_index_table[j];
 429
 430                        numa_distance_table[nodeA][nodeB] = numa_dist_table[distance_index++];
 431                        pr_debug("dist[%d][%d]=%d ", nodeA, nodeB, numa_distance_table[nodeA][nodeB]);
 432                }
 433        }
 434        of_node_put(root);
 435}
 436
 437static int __init find_primary_domain_index(void)
 438{
 439        int index;
 440        struct device_node *root;
 441
 442        /*
 443         * Check for which form of affinity.
 444         */
 445        if (firmware_has_feature(FW_FEATURE_OPAL)) {
 446                affinity_form = FORM1_AFFINITY;
 447        } else if (firmware_has_feature(FW_FEATURE_FORM2_AFFINITY)) {
 448                pr_debug("Using form 2 affinity\n");
 449                affinity_form = FORM2_AFFINITY;
 450        } else if (firmware_has_feature(FW_FEATURE_FORM1_AFFINITY)) {
 451                pr_debug("Using form 1 affinity\n");
 452                affinity_form = FORM1_AFFINITY;
 453        } else
 454                affinity_form = FORM0_AFFINITY;
 455
 456        if (firmware_has_feature(FW_FEATURE_OPAL))
 457                root = of_find_node_by_path("/ibm,opal");
 458        else
 459                root = of_find_node_by_path("/rtas");
 460        if (!root)
 461                root = of_find_node_by_path("/");
 462
 463        /*
 464         * This property is a set of 32-bit integers, each representing
 465         * an index into the ibm,associativity nodes.
 466         *
 467         * With form 0 affinity the first integer is for an SMP configuration
 468         * (should be all 0's) and the second is for a normal NUMA
 469         * configuration. We have only one level of NUMA.
 470         *
 471         * With form 1 affinity the first integer is the most significant
 472         * NUMA boundary and the following are progressively less significant
 473         * boundaries. There can be more than one level of NUMA.
 474         */
 475        distance_ref_points = of_get_property(root,
 476                                        "ibm,associativity-reference-points",
 477                                        &distance_ref_points_depth);
 478
 479        if (!distance_ref_points) {
 480                pr_debug("ibm,associativity-reference-points not found.\n");
 481                goto err;
 482        }
 483
 484        distance_ref_points_depth /= sizeof(int);
 485        if (affinity_form == FORM0_AFFINITY) {
 486                if (distance_ref_points_depth < 2) {
 487                        pr_warn("short ibm,associativity-reference-points\n");
 488                        goto err;
 489                }
 490
 491                index = of_read_number(&distance_ref_points[1], 1);
 492        } else {
 493                /*
 494                 * Both FORM1 and FORM2 affinity find the primary domain details
 495                 * at the same offset.
 496                 */
 497                index = of_read_number(distance_ref_points, 1);
 498        }
 499        /*
 500         * Warn and cap if the hardware supports more than
 501         * MAX_DISTANCE_REF_POINTS domains.
 502         */
 503        if (distance_ref_points_depth > MAX_DISTANCE_REF_POINTS) {
 504                pr_warn("distance array capped at %d entries\n",
 505                        MAX_DISTANCE_REF_POINTS);
 506                distance_ref_points_depth = MAX_DISTANCE_REF_POINTS;
 507        }
 508
 509        of_node_put(root);
 510        return index;
 511
 512err:
 513        of_node_put(root);
 514        return -1;
 515}
 516
 517static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
 518{
 519        struct device_node *memory = NULL;
 520
 521        memory = of_find_node_by_type(memory, "memory");
 522        if (!memory)
 523                panic("numa.c: No memory nodes found!");
 524
 525        *n_addr_cells = of_n_addr_cells(memory);
 526        *n_size_cells = of_n_size_cells(memory);
 527        of_node_put(memory);
 528}
 529
 530static unsigned long read_n_cells(int n, const __be32 **buf)
 531{
 532        unsigned long result = 0;
 533
 534        while (n--) {
 535                result = (result << 32) | of_read_number(*buf, 1);
 536                (*buf)++;
 537        }
 538        return result;
 539}
 540
 541struct assoc_arrays {
 542        u32     n_arrays;
 543        u32     array_sz;
 544        const __be32 *arrays;
 545};
 546
 547/*
 548 * Retrieve and validate the list of associativity arrays for drconf
 549 * memory from the ibm,associativity-lookup-arrays property of the
 550 * device tree..
 551 *
 552 * The layout of the ibm,associativity-lookup-arrays property is a number N
 553 * indicating the number of associativity arrays, followed by a number M
 554 * indicating the size of each associativity array, followed by a list
 555 * of N associativity arrays.
 556 */
 557static int of_get_assoc_arrays(struct assoc_arrays *aa)
 558{
 559        struct device_node *memory;
 560        const __be32 *prop;
 561        u32 len;
 562
 563        memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
 564        if (!memory)
 565                return -1;
 566
 567        prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len);
 568        if (!prop || len < 2 * sizeof(unsigned int)) {
 569                of_node_put(memory);
 570                return -1;
 571        }
 572
 573        aa->n_arrays = of_read_number(prop++, 1);
 574        aa->array_sz = of_read_number(prop++, 1);
 575
 576        of_node_put(memory);
 577
 578        /* Now that we know the number of arrays and size of each array,
 579         * revalidate the size of the property read in.
 580         */
 581        if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int))
 582                return -1;
 583
 584        aa->arrays = prop;
 585        return 0;
 586}
 587
 588static int get_nid_and_numa_distance(struct drmem_lmb *lmb)
 589{
 590        struct assoc_arrays aa = { .arrays = NULL };
 591        int default_nid = NUMA_NO_NODE;
 592        int nid = default_nid;
 593        int rc, index;
 594
 595        if ((primary_domain_index < 0) || !numa_enabled)
 596                return default_nid;
 597
 598        rc = of_get_assoc_arrays(&aa);
 599        if (rc)
 600                return default_nid;
 601
 602        if (primary_domain_index <= aa.array_sz &&
 603            !(lmb->flags & DRCONF_MEM_AI_INVALID) && lmb->aa_index < aa.n_arrays) {
 604                const __be32 *associativity;
 605
 606                index = lmb->aa_index * aa.array_sz;
 607                associativity = &aa.arrays[index];
 608                nid = __associativity_to_nid(associativity, aa.array_sz);
 609                if (nid > 0 && affinity_form == FORM1_AFFINITY) {
 610                        /*
 611                         * lookup array associativity entries have
 612                         * no length of the array as the first element.
 613                         */
 614                        __initialize_form1_numa_distance(associativity, aa.array_sz);
 615                }
 616        }
 617        return nid;
 618}
 619
 620/*
 621 * This is like of_node_to_nid_single() for memory represented in the
 622 * ibm,dynamic-reconfiguration-memory node.
 623 */
 624int of_drconf_to_nid_single(struct drmem_lmb *lmb)
 625{
 626        struct assoc_arrays aa = { .arrays = NULL };
 627        int default_nid = NUMA_NO_NODE;
 628        int nid = default_nid;
 629        int rc, index;
 630
 631        if ((primary_domain_index < 0) || !numa_enabled)
 632                return default_nid;
 633
 634        rc = of_get_assoc_arrays(&aa);
 635        if (rc)
 636                return default_nid;
 637
 638        if (primary_domain_index <= aa.array_sz &&
 639            !(lmb->flags & DRCONF_MEM_AI_INVALID) && lmb->aa_index < aa.n_arrays) {
 640                const __be32 *associativity;
 641
 642                index = lmb->aa_index * aa.array_sz;
 643                associativity = &aa.arrays[index];
 644                nid = __associativity_to_nid(associativity, aa.array_sz);
 645        }
 646        return nid;
 647}
 648
 649#ifdef CONFIG_PPC_SPLPAR
 650
 651static int __vphn_get_associativity(long lcpu, __be32 *associativity)
 652{
 653        long rc, hwid;
 654
 655        /*
 656         * On a shared lpar, device tree will not have node associativity.
 657         * At this time lppaca, or its __old_status field may not be
 658         * updated. Hence kernel cannot detect if its on a shared lpar. So
 659         * request an explicit associativity irrespective of whether the
 660         * lpar is shared or dedicated. Use the device tree property as a
 661         * fallback. cpu_to_phys_id is only valid between
 662         * smp_setup_cpu_maps() and smp_setup_pacas().
 663         */
 664        if (firmware_has_feature(FW_FEATURE_VPHN)) {
 665                if (cpu_to_phys_id)
 666                        hwid = cpu_to_phys_id[lcpu];
 667                else
 668                        hwid = get_hard_smp_processor_id(lcpu);
 669
 670                rc = hcall_vphn(hwid, VPHN_FLAG_VCPU, associativity);
 671                if (rc == H_SUCCESS)
 672                        return 0;
 673        }
 674
 675        return -1;
 676}
 677
 678static int vphn_get_nid(long lcpu)
 679{
 680        __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
 681
 682
 683        if (!__vphn_get_associativity(lcpu, associativity))
 684                return associativity_to_nid(associativity);
 685
 686        return NUMA_NO_NODE;
 687
 688}
 689#else
 690
 691static int __vphn_get_associativity(long lcpu, __be32 *associativity)
 692{
 693        return -1;
 694}
 695
 696static int vphn_get_nid(long unused)
 697{
 698        return NUMA_NO_NODE;
 699}
 700#endif  /* CONFIG_PPC_SPLPAR */
 701
 702/*
 703 * Figure out to which domain a cpu belongs and stick it there.
 704 * Return the id of the domain used.
 705 */
 706static int numa_setup_cpu(unsigned long lcpu)
 707{
 708        struct device_node *cpu;
 709        int fcpu = cpu_first_thread_sibling(lcpu);
 710        int nid = NUMA_NO_NODE;
 711
 712        if (!cpu_present(lcpu)) {
 713                set_cpu_numa_node(lcpu, first_online_node);
 714                return first_online_node;
 715        }
 716
 717        /*
 718         * If a valid cpu-to-node mapping is already available, use it
 719         * directly instead of querying the firmware, since it represents
 720         * the most recent mapping notified to us by the platform (eg: VPHN).
 721         * Since cpu_to_node binding remains the same for all threads in the
 722         * core. If a valid cpu-to-node mapping is already available, for
 723         * the first thread in the core, use it.
 724         */
 725        nid = numa_cpu_lookup_table[fcpu];
 726        if (nid >= 0) {
 727                map_cpu_to_node(lcpu, nid);
 728                return nid;
 729        }
 730
 731        nid = vphn_get_nid(lcpu);
 732        if (nid != NUMA_NO_NODE)
 733                goto out_present;
 734
 735        cpu = of_get_cpu_node(lcpu, NULL);
 736
 737        if (!cpu) {
 738                WARN_ON(1);
 739                if (cpu_present(lcpu))
 740                        goto out_present;
 741                else
 742                        goto out;
 743        }
 744
 745        nid = of_node_to_nid_single(cpu);
 746        of_node_put(cpu);
 747
 748out_present:
 749        if (nid < 0 || !node_possible(nid))
 750                nid = first_online_node;
 751
 752        /*
 753         * Update for the first thread of the core. All threads of a core
 754         * have to be part of the same node. This not only avoids querying
 755         * for every other thread in the core, but always avoids a case
 756         * where virtual node associativity change causes subsequent threads
 757         * of a core to be associated with different nid. However if first
 758         * thread is already online, expect it to have a valid mapping.
 759         */
 760        if (fcpu != lcpu) {
 761                WARN_ON(cpu_online(fcpu));
 762                map_cpu_to_node(fcpu, nid);
 763        }
 764
 765        map_cpu_to_node(lcpu, nid);
 766out:
 767        return nid;
 768}
 769
 770static void verify_cpu_node_mapping(int cpu, int node)
 771{
 772        int base, sibling, i;
 773
 774        /* Verify that all the threads in the core belong to the same node */
 775        base = cpu_first_thread_sibling(cpu);
 776
 777        for (i = 0; i < threads_per_core; i++) {
 778                sibling = base + i;
 779
 780                if (sibling == cpu || cpu_is_offline(sibling))
 781                        continue;
 782
 783                if (cpu_to_node(sibling) != node) {
 784                        WARN(1, "CPU thread siblings %d and %d don't belong"
 785                                " to the same node!\n", cpu, sibling);
 786                        break;
 787                }
 788        }
 789}
 790
 791/* Must run before sched domains notifier. */
 792static int ppc_numa_cpu_prepare(unsigned int cpu)
 793{
 794        int nid;
 795
 796        nid = numa_setup_cpu(cpu);
 797        verify_cpu_node_mapping(cpu, nid);
 798        return 0;
 799}
 800
 801static int ppc_numa_cpu_dead(unsigned int cpu)
 802{
 803        return 0;
 804}
 805
 806/*
 807 * Check and possibly modify a memory region to enforce the memory limit.
 808 *
 809 * Returns the size the region should have to enforce the memory limit.
 810 * This will either be the original value of size, a truncated value,
 811 * or zero. If the returned value of size is 0 the region should be
 812 * discarded as it lies wholly above the memory limit.
 813 */
 814static unsigned long __init numa_enforce_memory_limit(unsigned long start,
 815                                                      unsigned long size)
 816{
 817        /*
 818         * We use memblock_end_of_DRAM() in here instead of memory_limit because
 819         * we've already adjusted it for the limit and it takes care of
 820         * having memory holes below the limit.  Also, in the case of
 821         * iommu_is_off, memory_limit is not set but is implicitly enforced.
 822         */
 823
 824        if (start + size <= memblock_end_of_DRAM())
 825                return size;
 826
 827        if (start >= memblock_end_of_DRAM())
 828                return 0;
 829
 830        return memblock_end_of_DRAM() - start;
 831}
 832
 833/*
 834 * Reads the counter for a given entry in
 835 * linux,drconf-usable-memory property
 836 */
 837static inline int __init read_usm_ranges(const __be32 **usm)
 838{
 839        /*
 840         * For each lmb in ibm,dynamic-memory a corresponding
 841         * entry in linux,drconf-usable-memory property contains
 842         * a counter followed by that many (base, size) duple.
 843         * read the counter from linux,drconf-usable-memory
 844         */
 845        return read_n_cells(n_mem_size_cells, usm);
 846}
 847
 848/*
 849 * Extract NUMA information from the ibm,dynamic-reconfiguration-memory
 850 * node.  This assumes n_mem_{addr,size}_cells have been set.
 851 */
 852static int __init numa_setup_drmem_lmb(struct drmem_lmb *lmb,
 853                                        const __be32 **usm,
 854                                        void *data)
 855{
 856        unsigned int ranges, is_kexec_kdump = 0;
 857        unsigned long base, size, sz;
 858        int nid;
 859
 860        /*
 861         * Skip this block if the reserved bit is set in flags (0x80)
 862         * or if the block is not assigned to this partition (0x8)
 863         */
 864        if ((lmb->flags & DRCONF_MEM_RESERVED)
 865            || !(lmb->flags & DRCONF_MEM_ASSIGNED))
 866                return 0;
 867
 868        if (*usm)
 869                is_kexec_kdump = 1;
 870
 871        base = lmb->base_addr;
 872        size = drmem_lmb_size();
 873        ranges = 1;
 874
 875        if (is_kexec_kdump) {
 876                ranges = read_usm_ranges(usm);
 877                if (!ranges) /* there are no (base, size) duple */
 878                        return 0;
 879        }
 880
 881        do {
 882                if (is_kexec_kdump) {
 883                        base = read_n_cells(n_mem_addr_cells, usm);
 884                        size = read_n_cells(n_mem_size_cells, usm);
 885                }
 886
 887                nid = get_nid_and_numa_distance(lmb);
 888                fake_numa_create_new_node(((base + size) >> PAGE_SHIFT),
 889                                          &nid);
 890                node_set_online(nid);
 891                sz = numa_enforce_memory_limit(base, size);
 892                if (sz)
 893                        memblock_set_node(base, sz, &memblock.memory, nid);
 894        } while (--ranges);
 895
 896        return 0;
 897}
 898
 899static int __init parse_numa_properties(void)
 900{
 901        struct device_node *memory;
 902        int default_nid = 0;
 903        unsigned long i;
 904        const __be32 *associativity;
 905
 906        if (numa_enabled == 0) {
 907                pr_warn("disabled by user\n");
 908                return -1;
 909        }
 910
 911        primary_domain_index = find_primary_domain_index();
 912
 913        if (primary_domain_index < 0) {
 914                /*
 915                 * if we fail to parse primary_domain_index from device tree
 916                 * mark the numa disabled, boot with numa disabled.
 917                 */
 918                numa_enabled = false;
 919                return primary_domain_index;
 920        }
 921
 922        pr_debug("associativity depth for CPU/Memory: %d\n", primary_domain_index);
 923
 924        /*
 925         * If it is FORM2 initialize the distance table here.
 926         */
 927        if (affinity_form == FORM2_AFFINITY)
 928                initialize_form2_numa_distance_lookup_table();
 929
 930        /*
 931         * Even though we connect cpus to numa domains later in SMP
 932         * init, we need to know the node ids now. This is because
 933         * each node to be onlined must have NODE_DATA etc backing it.
 934         */
 935        for_each_present_cpu(i) {
 936                __be32 vphn_assoc[VPHN_ASSOC_BUFSIZE];
 937                struct device_node *cpu;
 938                int nid = NUMA_NO_NODE;
 939
 940                memset(vphn_assoc, 0, VPHN_ASSOC_BUFSIZE * sizeof(__be32));
 941
 942                if (__vphn_get_associativity(i, vphn_assoc) == 0) {
 943                        nid = associativity_to_nid(vphn_assoc);
 944                        initialize_form1_numa_distance(vphn_assoc);
 945                } else {
 946
 947                        /*
 948                         * Don't fall back to default_nid yet -- we will plug
 949                         * cpus into nodes once the memory scan has discovered
 950                         * the topology.
 951                         */
 952                        cpu = of_get_cpu_node(i, NULL);
 953                        BUG_ON(!cpu);
 954
 955                        associativity = of_get_associativity(cpu);
 956                        if (associativity) {
 957                                nid = associativity_to_nid(associativity);
 958                                initialize_form1_numa_distance(associativity);
 959                        }
 960                        of_node_put(cpu);
 961                }
 962
 963                node_set_online(nid);
 964        }
 965
 966        get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
 967
 968        for_each_node_by_type(memory, "memory") {
 969                unsigned long start;
 970                unsigned long size;
 971                int nid;
 972                int ranges;
 973                const __be32 *memcell_buf;
 974                unsigned int len;
 975
 976                memcell_buf = of_get_property(memory,
 977                        "linux,usable-memory", &len);
 978                if (!memcell_buf || len <= 0)
 979                        memcell_buf = of_get_property(memory, "reg", &len);
 980                if (!memcell_buf || len <= 0)
 981                        continue;
 982
 983                /* ranges in cell */
 984                ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
 985new_range:
 986                /* these are order-sensitive, and modify the buffer pointer */
 987                start = read_n_cells(n_mem_addr_cells, &memcell_buf);
 988                size = read_n_cells(n_mem_size_cells, &memcell_buf);
 989
 990                /*
 991                 * Assumption: either all memory nodes or none will
 992                 * have associativity properties.  If none, then
 993                 * everything goes to default_nid.
 994                 */
 995                associativity = of_get_associativity(memory);
 996                if (associativity) {
 997                        nid = associativity_to_nid(associativity);
 998                        initialize_form1_numa_distance(associativity);
 999                } else
1000                        nid = default_nid;
1001
1002                fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid);
1003                node_set_online(nid);
1004
1005                size = numa_enforce_memory_limit(start, size);
1006                if (size)
1007                        memblock_set_node(start, size, &memblock.memory, nid);
1008
1009                if (--ranges)
1010                        goto new_range;
1011        }
1012
1013        /*
1014         * Now do the same thing for each MEMBLOCK listed in the
1015         * ibm,dynamic-memory property in the
1016         * ibm,dynamic-reconfiguration-memory node.
1017         */
1018        memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1019        if (memory) {
1020                walk_drmem_lmbs(memory, NULL, numa_setup_drmem_lmb);
1021                of_node_put(memory);
1022        }
1023
1024        return 0;
1025}
1026
1027static void __init setup_nonnuma(void)
1028{
1029        unsigned long top_of_ram = memblock_end_of_DRAM();
1030        unsigned long total_ram = memblock_phys_mem_size();
1031        unsigned long start_pfn, end_pfn;
1032        unsigned int nid = 0;
1033        int i;
1034
1035        pr_debug("Top of RAM: 0x%lx, Total RAM: 0x%lx\n", top_of_ram, total_ram);
1036        pr_debug("Memory hole size: %ldMB\n", (top_of_ram - total_ram) >> 20);
1037
1038        for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) {
1039                fake_numa_create_new_node(end_pfn, &nid);
1040                memblock_set_node(PFN_PHYS(start_pfn),
1041                                  PFN_PHYS(end_pfn - start_pfn),
1042                                  &memblock.memory, nid);
1043                node_set_online(nid);
1044        }
1045}
1046
1047void __init dump_numa_cpu_topology(void)
1048{
1049        unsigned int node;
1050        unsigned int cpu, count;
1051
1052        if (!numa_enabled)
1053                return;
1054
1055        for_each_online_node(node) {
1056                pr_info("Node %d CPUs:", node);
1057
1058                count = 0;
1059                /*
1060                 * If we used a CPU iterator here we would miss printing
1061                 * the holes in the cpumap.
1062                 */
1063                for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
1064                        if (cpumask_test_cpu(cpu,
1065                                        node_to_cpumask_map[node])) {
1066                                if (count == 0)
1067                                        pr_cont(" %u", cpu);
1068                                ++count;
1069                        } else {
1070                                if (count > 1)
1071                                        pr_cont("-%u", cpu - 1);
1072                                count = 0;
1073                        }
1074                }
1075
1076                if (count > 1)
1077                        pr_cont("-%u", nr_cpu_ids - 1);
1078                pr_cont("\n");
1079        }
1080}
1081
1082/* Initialize NODE_DATA for a node on the local memory */
1083static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
1084{
1085        u64 spanned_pages = end_pfn - start_pfn;
1086        const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES);
1087        u64 nd_pa;
1088        void *nd;
1089        int tnid;
1090
1091        nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
1092        if (!nd_pa)
1093                panic("Cannot allocate %zu bytes for node %d data\n",
1094                      nd_size, nid);
1095
1096        nd = __va(nd_pa);
1097
1098        /* report and initialize */
1099        pr_info("  NODE_DATA [mem %#010Lx-%#010Lx]\n",
1100                nd_pa, nd_pa + nd_size - 1);
1101        tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
1102        if (tnid != nid)
1103                pr_info("    NODE_DATA(%d) on node %d\n", nid, tnid);
1104
1105        node_data[nid] = nd;
1106        memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
1107        NODE_DATA(nid)->node_id = nid;
1108        NODE_DATA(nid)->node_start_pfn = start_pfn;
1109        NODE_DATA(nid)->node_spanned_pages = spanned_pages;
1110}
1111
1112static void __init find_possible_nodes(void)
1113{
1114        struct device_node *rtas;
1115        const __be32 *domains = NULL;
1116        int prop_length, max_nodes;
1117        u32 i;
1118
1119        if (!numa_enabled)
1120                return;
1121
1122        rtas = of_find_node_by_path("/rtas");
1123        if (!rtas)
1124                return;
1125
1126        /*
1127         * ibm,current-associativity-domains is a fairly recent property. If
1128         * it doesn't exist, then fallback on ibm,max-associativity-domains.
1129         * Current denotes what the platform can support compared to max
1130         * which denotes what the Hypervisor can support.
1131         *
1132         * If the LPAR is migratable, new nodes might be activated after a LPM,
1133         * so we should consider the max number in that case.
1134         */
1135        if (!of_get_property(of_root, "ibm,migratable-partition", NULL))
1136                domains = of_get_property(rtas,
1137                                          "ibm,current-associativity-domains",
1138                                          &prop_length);
1139        if (!domains) {
1140                domains = of_get_property(rtas, "ibm,max-associativity-domains",
1141                                        &prop_length);
1142                if (!domains)
1143                        goto out;
1144        }
1145
1146        max_nodes = of_read_number(&domains[primary_domain_index], 1);
1147        pr_info("Partition configured for %d NUMA nodes.\n", max_nodes);
1148
1149        for (i = 0; i < max_nodes; i++) {
1150                if (!node_possible(i))
1151                        node_set(i, node_possible_map);
1152        }
1153
1154        prop_length /= sizeof(int);
1155        if (prop_length > primary_domain_index + 2)
1156                coregroup_enabled = 1;
1157
1158out:
1159        of_node_put(rtas);
1160}
1161
1162void __init mem_topology_setup(void)
1163{
1164        int cpu;
1165
1166        /*
1167         * Linux/mm assumes node 0 to be online at boot. However this is not
1168         * true on PowerPC, where node 0 is similar to any other node, it
1169         * could be cpuless, memoryless node. So force node 0 to be offline
1170         * for now. This will prevent cpuless, memoryless node 0 showing up
1171         * unnecessarily as online. If a node has cpus or memory that need
1172         * to be online, then node will anyway be marked online.
1173         */
1174        node_set_offline(0);
1175
1176        if (parse_numa_properties())
1177                setup_nonnuma();
1178
1179        /*
1180         * Modify the set of possible NUMA nodes to reflect information
1181         * available about the set of online nodes, and the set of nodes
1182         * that we expect to make use of for this platform's affinity
1183         * calculations.
1184         */
1185        nodes_and(node_possible_map, node_possible_map, node_online_map);
1186
1187        find_possible_nodes();
1188
1189        setup_node_to_cpumask_map();
1190
1191        reset_numa_cpu_lookup_table();
1192
1193        for_each_possible_cpu(cpu) {
1194                /*
1195                 * Powerpc with CONFIG_NUMA always used to have a node 0,
1196                 * even if it was memoryless or cpuless. For all cpus that
1197                 * are possible but not present, cpu_to_node() would point
1198                 * to node 0. To remove a cpuless, memoryless dummy node,
1199                 * powerpc need to make sure all possible but not present
1200                 * cpu_to_node are set to a proper node.
1201                 */
1202                numa_setup_cpu(cpu);
1203        }
1204}
1205
1206void __init initmem_init(void)
1207{
1208        int nid;
1209
1210        max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
1211        max_pfn = max_low_pfn;
1212
1213        memblock_dump_all();
1214
1215        for_each_online_node(nid) {
1216                unsigned long start_pfn, end_pfn;
1217
1218                get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
1219                setup_node_data(nid, start_pfn, end_pfn);
1220        }
1221
1222        sparse_init();
1223
1224        /*
1225         * We need the numa_cpu_lookup_table to be accurate for all CPUs,
1226         * even before we online them, so that we can use cpu_to_{node,mem}
1227         * early in boot, cf. smp_prepare_cpus().
1228         * _nocalls() + manual invocation is used because cpuhp is not yet
1229         * initialized for the boot CPU.
1230         */
1231        cpuhp_setup_state_nocalls(CPUHP_POWER_NUMA_PREPARE, "powerpc/numa:prepare",
1232                                  ppc_numa_cpu_prepare, ppc_numa_cpu_dead);
1233}
1234
1235static int __init early_numa(char *p)
1236{
1237        if (!p)
1238                return 0;
1239
1240        if (strstr(p, "off"))
1241                numa_enabled = 0;
1242
1243        p = strstr(p, "fake=");
1244        if (p)
1245                cmdline = p + strlen("fake=");
1246
1247        return 0;
1248}
1249early_param("numa", early_numa);
1250
1251#ifdef CONFIG_MEMORY_HOTPLUG
1252/*
1253 * Find the node associated with a hot added memory section for
1254 * memory represented in the device tree by the property
1255 * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory.
1256 */
1257static int hot_add_drconf_scn_to_nid(unsigned long scn_addr)
1258{
1259        struct drmem_lmb *lmb;
1260        unsigned long lmb_size;
1261        int nid = NUMA_NO_NODE;
1262
1263        lmb_size = drmem_lmb_size();
1264
1265        for_each_drmem_lmb(lmb) {
1266                /* skip this block if it is reserved or not assigned to
1267                 * this partition */
1268                if ((lmb->flags & DRCONF_MEM_RESERVED)
1269                    || !(lmb->flags & DRCONF_MEM_ASSIGNED))
1270                        continue;
1271
1272                if ((scn_addr < lmb->base_addr)
1273                    || (scn_addr >= (lmb->base_addr + lmb_size)))
1274                        continue;
1275
1276                nid = of_drconf_to_nid_single(lmb);
1277                break;
1278        }
1279
1280        return nid;
1281}
1282
1283/*
1284 * Find the node associated with a hot added memory section for memory
1285 * represented in the device tree as a node (i.e. memory@XXXX) for
1286 * each memblock.
1287 */
1288static int hot_add_node_scn_to_nid(unsigned long scn_addr)
1289{
1290        struct device_node *memory;
1291        int nid = NUMA_NO_NODE;
1292
1293        for_each_node_by_type(memory, "memory") {
1294                unsigned long start, size;
1295                int ranges;
1296                const __be32 *memcell_buf;
1297                unsigned int len;
1298
1299                memcell_buf = of_get_property(memory, "reg", &len);
1300                if (!memcell_buf || len <= 0)
1301                        continue;
1302
1303                /* ranges in cell */
1304                ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
1305
1306                while (ranges--) {
1307                        start = read_n_cells(n_mem_addr_cells, &memcell_buf);
1308                        size = read_n_cells(n_mem_size_cells, &memcell_buf);
1309
1310                        if ((scn_addr < start) || (scn_addr >= (start + size)))
1311                                continue;
1312
1313                        nid = of_node_to_nid_single(memory);
1314                        break;
1315                }
1316
1317                if (nid >= 0)
1318                        break;
1319        }
1320
1321        of_node_put(memory);
1322
1323        return nid;
1324}
1325
1326/*
1327 * Find the node associated with a hot added memory section.  Section
1328 * corresponds to a SPARSEMEM section, not an MEMBLOCK.  It is assumed that
1329 * sections are fully contained within a single MEMBLOCK.
1330 */
1331int hot_add_scn_to_nid(unsigned long scn_addr)
1332{
1333        struct device_node *memory = NULL;
1334        int nid;
1335
1336        if (!numa_enabled)
1337                return first_online_node;
1338
1339        memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1340        if (memory) {
1341                nid = hot_add_drconf_scn_to_nid(scn_addr);
1342                of_node_put(memory);
1343        } else {
1344                nid = hot_add_node_scn_to_nid(scn_addr);
1345        }
1346
1347        if (nid < 0 || !node_possible(nid))
1348                nid = first_online_node;
1349
1350        return nid;
1351}
1352
1353static u64 hot_add_drconf_memory_max(void)
1354{
1355        struct device_node *memory = NULL;
1356        struct device_node *dn = NULL;
1357        const __be64 *lrdr = NULL;
1358
1359        dn = of_find_node_by_path("/rtas");
1360        if (dn) {
1361                lrdr = of_get_property(dn, "ibm,lrdr-capacity", NULL);
1362                of_node_put(dn);
1363                if (lrdr)
1364                        return be64_to_cpup(lrdr);
1365        }
1366
1367        memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1368        if (memory) {
1369                of_node_put(memory);
1370                return drmem_lmb_memory_max();
1371        }
1372        return 0;
1373}
1374
1375/*
1376 * memory_hotplug_max - return max address of memory that may be added
1377 *
1378 * This is currently only used on systems that support drconfig memory
1379 * hotplug.
1380 */
1381u64 memory_hotplug_max(void)
1382{
1383        return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM());
1384}
1385#endif /* CONFIG_MEMORY_HOTPLUG */
1386
1387/* Virtual Processor Home Node (VPHN) support */
1388#ifdef CONFIG_PPC_SPLPAR
1389static int topology_inited;
1390
1391/*
1392 * Retrieve the new associativity information for a virtual processor's
1393 * home node.
1394 */
1395static long vphn_get_associativity(unsigned long cpu,
1396                                        __be32 *associativity)
1397{
1398        long rc;
1399
1400        rc = hcall_vphn(get_hard_smp_processor_id(cpu),
1401                                VPHN_FLAG_VCPU, associativity);
1402
1403        switch (rc) {
1404        case H_SUCCESS:
1405                pr_debug("VPHN hcall succeeded. Reset polling...\n");
1406                goto out;
1407
1408        case H_FUNCTION:
1409                pr_err_ratelimited("VPHN unsupported. Disabling polling...\n");
1410                break;
1411        case H_HARDWARE:
1412                pr_err_ratelimited("hcall_vphn() experienced a hardware fault "
1413                        "preventing VPHN. Disabling polling...\n");
1414                break;
1415        case H_PARAMETER:
1416                pr_err_ratelimited("hcall_vphn() was passed an invalid parameter. "
1417                        "Disabling polling...\n");
1418                break;
1419        default:
1420                pr_err_ratelimited("hcall_vphn() returned %ld. Disabling polling...\n"
1421                        , rc);
1422                break;
1423        }
1424out:
1425        return rc;
1426}
1427
1428int find_and_online_cpu_nid(int cpu)
1429{
1430        __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
1431        int new_nid;
1432
1433        /* Use associativity from first thread for all siblings */
1434        if (vphn_get_associativity(cpu, associativity))
1435                return cpu_to_node(cpu);
1436
1437        new_nid = associativity_to_nid(associativity);
1438        if (new_nid < 0 || !node_possible(new_nid))
1439                new_nid = first_online_node;
1440
1441        if (NODE_DATA(new_nid) == NULL) {
1442#ifdef CONFIG_MEMORY_HOTPLUG
1443                /*
1444                 * Need to ensure that NODE_DATA is initialized for a node from
1445                 * available memory (see memblock_alloc_try_nid). If unable to
1446                 * init the node, then default to nearest node that has memory
1447                 * installed. Skip onlining a node if the subsystems are not
1448                 * yet initialized.
1449                 */
1450                if (!topology_inited || try_online_node(new_nid))
1451                        new_nid = first_online_node;
1452#else
1453                /*
1454                 * Default to using the nearest node that has memory installed.
1455                 * Otherwise, it would be necessary to patch the kernel MM code
1456                 * to deal with more memoryless-node error conditions.
1457                 */
1458                new_nid = first_online_node;
1459#endif
1460        }
1461
1462        pr_debug("%s:%d cpu %d nid %d\n", __FUNCTION__, __LINE__,
1463                cpu, new_nid);
1464        return new_nid;
1465}
1466
1467int cpu_to_coregroup_id(int cpu)
1468{
1469        __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
1470        int index;
1471
1472        if (cpu < 0 || cpu > nr_cpu_ids)
1473                return -1;
1474
1475        if (!coregroup_enabled)
1476                goto out;
1477
1478        if (!firmware_has_feature(FW_FEATURE_VPHN))
1479                goto out;
1480
1481        if (vphn_get_associativity(cpu, associativity))
1482                goto out;
1483
1484        index = of_read_number(associativity, 1);
1485        if (index > primary_domain_index + 1)
1486                return of_read_number(&associativity[index - 1], 1);
1487
1488out:
1489        return cpu_to_core_id(cpu);
1490}
1491
1492static int topology_update_init(void)
1493{
1494        topology_inited = 1;
1495        return 0;
1496}
1497device_initcall(topology_update_init);
1498#endif /* CONFIG_PPC_SPLPAR */
1499