linux/arch/ia64/kernel/numa.c
<<
>>
Prefs
   1/*
   2 * This program is free software; you can redistribute it and/or modify
   3 * it under the terms of the GNU General Public License as published by
   4 * the Free Software Foundation; either version 2 of the License, or
   5 * (at your option) any later version.
   6 *
   7 * This program is distributed in the hope that it will be useful,
   8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
   9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  10 * GNU General Public License for more details.
  11 *
  12 * You should have received a copy of the GNU General Public License
  13 * along with this program; if not, write to the Free Software
  14 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  15 *
  16 * ia64 kernel NUMA specific stuff
  17 *
  18 * Copyright (C) 2002 Erich Focht <efocht@ess.nec.de>
  19 * Copyright (C) 2004 Silicon Graphics, Inc.
  20 *   Jesse Barnes <jbarnes@sgi.com>
  21 */
  22#include <linux/topology.h>
  23#include <linux/module.h>
  24#include <asm/processor.h>
  25#include <asm/smp.h>
  26
  27u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned;
  28EXPORT_SYMBOL(cpu_to_node_map);
  29
  30cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned;
  31EXPORT_SYMBOL(node_to_cpu_mask);
  32
  33void __cpuinit map_cpu_to_node(int cpu, int nid)
  34{
  35        int oldnid;
  36        if (nid < 0) { /* just initialize by zero */
  37                cpu_to_node_map[cpu] = 0;
  38                return;
  39        }
  40        /* sanity check first */
  41        oldnid = cpu_to_node_map[cpu];
  42        if (cpu_isset(cpu, node_to_cpu_mask[oldnid])) {
  43                return; /* nothing to do */
  44        }
  45        /* we don't have cpu-driven node hot add yet...
  46           In usual case, node is created from SRAT at boot time. */
  47        if (!node_online(nid))
  48                nid = first_online_node;
  49        cpu_to_node_map[cpu] = nid;
  50        cpu_set(cpu, node_to_cpu_mask[nid]);
  51        return;
  52}
  53
  54void __cpuinit unmap_cpu_from_node(int cpu, int nid)
  55{
  56        WARN_ON(!cpu_isset(cpu, node_to_cpu_mask[nid]));
  57        WARN_ON(cpu_to_node_map[cpu] != nid);
  58        cpu_to_node_map[cpu] = 0;
  59        cpu_clear(cpu, node_to_cpu_mask[nid]);
  60}
  61
  62
  63/**
  64 * build_cpu_to_node_map - setup cpu to node and node to cpumask arrays
  65 *
  66 * Build cpu to node mapping and initialize the per node cpu masks using
  67 * info from the node_cpuid array handed to us by ACPI.
  68 */
  69void __init build_cpu_to_node_map(void)
  70{
  71        int cpu, i, node;
  72
  73        for(node=0; node < MAX_NUMNODES; node++)
  74                cpus_clear(node_to_cpu_mask[node]);
  75
  76        for_each_possible_early_cpu(cpu) {
  77                node = -1;
  78                for (i = 0; i < NR_CPUS; ++i)
  79                        if (cpu_physical_id(cpu) == node_cpuid[i].phys_id) {
  80                                node = node_cpuid[i].nid;
  81                                break;
  82                        }
  83                map_cpu_to_node(cpu, node);
  84        }
  85}
  86