linux/arch/arm/kernel/topology.c
<<
>>
Prefs
   1/*
   2 * arch/arm/kernel/topology.c
   3 *
   4 * Copyright (C) 2011 Linaro Limited.
   5 * Written by: Vincent Guittot
   6 *
   7 * based on arch/sh/kernel/topology.c
   8 *
   9 * This file is subject to the terms and conditions of the GNU General Public
  10 * License.  See the file "COPYING" in the main directory of this archive
  11 * for more details.
  12 */
  13
  14#include <linux/cpu.h>
  15#include <linux/cpumask.h>
  16#include <linux/export.h>
  17#include <linux/init.h>
  18#include <linux/percpu.h>
  19#include <linux/node.h>
  20#include <linux/nodemask.h>
  21#include <linux/of.h>
  22#include <linux/sched.h>
  23#include <linux/slab.h>
  24
  25#include <asm/cputype.h>
  26#include <asm/topology.h>
  27
  28/*
  29 * cpu capacity scale management
  30 */
  31
  32/*
  33 * cpu capacity table
  34 * This per cpu data structure describes the relative capacity of each core.
  35 * On a heteregenous system, cores don't have the same computation capacity
  36 * and we reflect that difference in the cpu_capacity field so the scheduler
  37 * can take this difference into account during load balance. A per cpu
  38 * structure is preferred because each CPU updates its own cpu_capacity field
  39 * during the load balance except for idle cores. One idle core is selected
  40 * to run the rebalance_domains for all idle cores and the cpu_capacity can be
  41 * updated during this sequence.
  42 */
  43static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
  44
  45unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
  46{
  47        return per_cpu(cpu_scale, cpu);
  48}
  49
  50static void set_capacity_scale(unsigned int cpu, unsigned long capacity)
  51{
  52        per_cpu(cpu_scale, cpu) = capacity;
  53}
  54
  55#ifdef CONFIG_OF
  56struct cpu_efficiency {
  57        const char *compatible;
  58        unsigned long efficiency;
  59};
  60
  61/*
  62 * Table of relative efficiency of each processors
  63 * The efficiency value must fit in 20bit and the final
  64 * cpu_scale value must be in the range
  65 *   0 < cpu_scale < 3*SCHED_CAPACITY_SCALE/2
  66 * in order to return at most 1 when DIV_ROUND_CLOSEST
  67 * is used to compute the capacity of a CPU.
  68 * Processors that are not defined in the table,
  69 * use the default SCHED_CAPACITY_SCALE value for cpu_scale.
  70 */
  71static const struct cpu_efficiency table_efficiency[] = {
  72        {"arm,cortex-a15", 3891},
  73        {"arm,cortex-a7",  2048},
  74        {NULL, },
  75};
  76
  77static unsigned long *__cpu_capacity;
  78#define cpu_capacity(cpu)       __cpu_capacity[cpu]
  79
  80static unsigned long middle_capacity = 1;
  81
  82/*
  83 * Iterate all CPUs' descriptor in DT and compute the efficiency
  84 * (as per table_efficiency). Also calculate a middle efficiency
  85 * as close as possible to  (max{eff_i} - min{eff_i}) / 2
  86 * This is later used to scale the cpu_capacity field such that an
  87 * 'average' CPU is of middle capacity. Also see the comments near
  88 * table_efficiency[] and update_cpu_capacity().
  89 */
  90static void __init parse_dt_topology(void)
  91{
  92        const struct cpu_efficiency *cpu_eff;
  93        struct device_node *cn = NULL;
  94        unsigned long min_capacity = ULONG_MAX;
  95        unsigned long max_capacity = 0;
  96        unsigned long capacity = 0;
  97        int cpu = 0;
  98
  99        __cpu_capacity = kcalloc(nr_cpu_ids, sizeof(*__cpu_capacity),
 100                                 GFP_NOWAIT);
 101
 102        for_each_possible_cpu(cpu) {
 103                const u32 *rate;
 104                int len;
 105
 106                /* too early to use cpu->of_node */
 107                cn = of_get_cpu_node(cpu, NULL);
 108                if (!cn) {
 109                        pr_err("missing device node for CPU %d\n", cpu);
 110                        continue;
 111                }
 112
 113                for (cpu_eff = table_efficiency; cpu_eff->compatible; cpu_eff++)
 114                        if (of_device_is_compatible(cn, cpu_eff->compatible))
 115                                break;
 116
 117                if (cpu_eff->compatible == NULL)
 118                        continue;
 119
 120                rate = of_get_property(cn, "clock-frequency", &len);
 121                if (!rate || len != 4) {
 122                        pr_err("%s missing clock-frequency property\n",
 123                                cn->full_name);
 124                        continue;
 125                }
 126
 127                capacity = ((be32_to_cpup(rate)) >> 20) * cpu_eff->efficiency;
 128
 129                /* Save min capacity of the system */
 130                if (capacity < min_capacity)
 131                        min_capacity = capacity;
 132
 133                /* Save max capacity of the system */
 134                if (capacity > max_capacity)
 135                        max_capacity = capacity;
 136
 137                cpu_capacity(cpu) = capacity;
 138        }
 139
 140        /* If min and max capacities are equals, we bypass the update of the
 141         * cpu_scale because all CPUs have the same capacity. Otherwise, we
 142         * compute a middle_capacity factor that will ensure that the capacity
 143         * of an 'average' CPU of the system will be as close as possible to
 144         * SCHED_CAPACITY_SCALE, which is the default value, but with the
 145         * constraint explained near table_efficiency[].
 146         */
 147        if (4*max_capacity < (3*(max_capacity + min_capacity)))
 148                middle_capacity = (min_capacity + max_capacity)
 149                                >> (SCHED_CAPACITY_SHIFT+1);
 150        else
 151                middle_capacity = ((max_capacity / 3)
 152                                >> (SCHED_CAPACITY_SHIFT-1)) + 1;
 153
 154}
 155
 156/*
 157 * Look for a customed capacity of a CPU in the cpu_capacity table during the
 158 * boot. The update of all CPUs is in O(n^2) for heteregeneous system but the
 159 * function returns directly for SMP system.
 160 */
 161static void update_cpu_capacity(unsigned int cpu)
 162{
 163        if (!cpu_capacity(cpu))
 164                return;
 165
 166        set_capacity_scale(cpu, cpu_capacity(cpu) / middle_capacity);
 167
 168        pr_info("CPU%u: update cpu_capacity %lu\n",
 169                cpu, arch_scale_cpu_capacity(NULL, cpu));
 170}
 171
 172#else
 173static inline void parse_dt_topology(void) {}
 174static inline void update_cpu_capacity(unsigned int cpuid) {}
 175#endif
 176
 177 /*
 178 * cpu topology table
 179 */
 180struct cputopo_arm cpu_topology[NR_CPUS];
 181EXPORT_SYMBOL_GPL(cpu_topology);
 182
 183const struct cpumask *cpu_coregroup_mask(int cpu)
 184{
 185        return &cpu_topology[cpu].core_sibling;
 186}
 187
 188/*
 189 * The current assumption is that we can power gate each core independently.
 190 * This will be superseded by DT binding once available.
 191 */
 192const struct cpumask *cpu_corepower_mask(int cpu)
 193{
 194        return &cpu_topology[cpu].thread_sibling;
 195}
 196
 197static void update_siblings_masks(unsigned int cpuid)
 198{
 199        struct cputopo_arm *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
 200        int cpu;
 201
 202        /* update core and thread sibling masks */
 203        for_each_possible_cpu(cpu) {
 204                cpu_topo = &cpu_topology[cpu];
 205
 206                if (cpuid_topo->socket_id != cpu_topo->socket_id)
 207                        continue;
 208
 209                cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
 210                if (cpu != cpuid)
 211                        cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
 212
 213                if (cpuid_topo->core_id != cpu_topo->core_id)
 214                        continue;
 215
 216                cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
 217                if (cpu != cpuid)
 218                        cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
 219        }
 220        smp_wmb();
 221}
 222
 223/*
 224 * store_cpu_topology is called at boot when only one cpu is running
 225 * and with the mutex cpu_hotplug.lock locked, when several cpus have booted,
 226 * which prevents simultaneous write access to cpu_topology array
 227 */
 228void store_cpu_topology(unsigned int cpuid)
 229{
 230        struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid];
 231        unsigned int mpidr;
 232
 233        /* If the cpu topology has been already set, just return */
 234        if (cpuid_topo->core_id != -1)
 235                return;
 236
 237        mpidr = read_cpuid_mpidr();
 238
 239        /* create cpu topology mapping */
 240        if ((mpidr & MPIDR_SMP_BITMASK) == MPIDR_SMP_VALUE) {
 241                /*
 242                 * This is a multiprocessor system
 243                 * multiprocessor format & multiprocessor mode field are set
 244                 */
 245
 246                if (mpidr & MPIDR_MT_BITMASK) {
 247                        /* core performance interdependency */
 248                        cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
 249                        cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
 250                        cpuid_topo->socket_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
 251                } else {
 252                        /* largely independent cores */
 253                        cpuid_topo->thread_id = -1;
 254                        cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
 255                        cpuid_topo->socket_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
 256                }
 257        } else {
 258                /*
 259                 * This is an uniprocessor system
 260                 * we are in multiprocessor format but uniprocessor system
 261                 * or in the old uniprocessor format
 262                 */
 263                cpuid_topo->thread_id = -1;
 264                cpuid_topo->core_id = 0;
 265                cpuid_topo->socket_id = -1;
 266        }
 267
 268        update_siblings_masks(cpuid);
 269
 270        update_cpu_capacity(cpuid);
 271
 272        pr_info("CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n",
 273                cpuid, cpu_topology[cpuid].thread_id,
 274                cpu_topology[cpuid].core_id,
 275                cpu_topology[cpuid].socket_id, mpidr);
 276}
 277
 278static inline int cpu_corepower_flags(void)
 279{
 280        return SD_SHARE_PKG_RESOURCES  | SD_SHARE_POWERDOMAIN;
 281}
 282
 283static struct sched_domain_topology_level arm_topology[] = {
 284#ifdef CONFIG_SCHED_MC
 285        { cpu_corepower_mask, cpu_corepower_flags, SD_INIT_NAME(GMC) },
 286        { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
 287#endif
 288        { cpu_cpu_mask, SD_INIT_NAME(DIE) },
 289        { NULL, },
 290};
 291
 292/*
 293 * init_cpu_topology is called at boot when only one cpu is running
 294 * which prevent simultaneous write access to cpu_topology array
 295 */
 296void __init init_cpu_topology(void)
 297{
 298        unsigned int cpu;
 299
 300        /* init core mask and capacity */
 301        for_each_possible_cpu(cpu) {
 302                struct cputopo_arm *cpu_topo = &(cpu_topology[cpu]);
 303
 304                cpu_topo->thread_id = -1;
 305                cpu_topo->core_id =  -1;
 306                cpu_topo->socket_id = -1;
 307                cpumask_clear(&cpu_topo->core_sibling);
 308                cpumask_clear(&cpu_topo->thread_sibling);
 309        }
 310        smp_wmb();
 311
 312        parse_dt_topology();
 313
 314        /* Set scheduler topology descriptor */
 315        set_sched_topology(arm_topology);
 316}
 317