linux/arch/ia64/kernel/topology.c
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * This file contains NUMA specific variables and functions which can
   7 * be split away from DISCONTIGMEM and are used on NUMA machines with
   8 * contiguous memory.
   9 *              2002/08/07 Erich Focht <efocht@ess.nec.de>
  10 * Populate cpu entries in sysfs for non-numa systems as well
  11 *      Intel Corporation - Ashok Raj
  12 * 02/27/2006 Zhang, Yanmin
  13 *      Populate cpu cache entries in sysfs for cpu cache info
  14 */
  15
  16#include <linux/cpu.h>
  17#include <linux/kernel.h>
  18#include <linux/mm.h>
  19#include <linux/node.h>
  20#include <linux/slab.h>
  21#include <linux/init.h>
  22#include <linux/bootmem.h>
  23#include <linux/nodemask.h>
  24#include <linux/notifier.h>
  25#include <asm/mmzone.h>
  26#include <asm/numa.h>
  27#include <asm/cpu.h>
  28
  29static struct ia64_cpu *sysfs_cpus;
  30
  31void arch_fix_phys_package_id(int num, u32 slot)
  32{
  33#ifdef CONFIG_SMP
  34        if (cpu_data(num)->socket_id == -1)
  35                cpu_data(num)->socket_id = slot;
  36#endif
  37}
  38EXPORT_SYMBOL_GPL(arch_fix_phys_package_id);
  39
  40
  41#ifdef CONFIG_HOTPLUG_CPU
  42int __ref arch_register_cpu(int num)
  43{
  44#ifdef CONFIG_ACPI
  45        /*
  46         * If CPEI can be re-targeted or if this is not
  47         * CPEI target, then it is hotpluggable
  48         */
  49        if (can_cpei_retarget() || !is_cpu_cpei_target(num))
  50                sysfs_cpus[num].cpu.hotpluggable = 1;
  51        map_cpu_to_node(num, node_cpuid[num].nid);
  52#endif
  53        return register_cpu(&sysfs_cpus[num].cpu, num);
  54}
  55EXPORT_SYMBOL(arch_register_cpu);
  56
  57void __ref arch_unregister_cpu(int num)
  58{
  59        unregister_cpu(&sysfs_cpus[num].cpu);
  60#ifdef CONFIG_ACPI
  61        unmap_cpu_from_node(num, cpu_to_node(num));
  62#endif
  63}
  64EXPORT_SYMBOL(arch_unregister_cpu);
  65#else
  66static int __init arch_register_cpu(int num)
  67{
  68        return register_cpu(&sysfs_cpus[num].cpu, num);
  69}
  70#endif /*CONFIG_HOTPLUG_CPU*/
  71
  72
  73static int __init topology_init(void)
  74{
  75        int i, err = 0;
  76
  77#ifdef CONFIG_NUMA
  78        /*
  79         * MCD - Do we want to register all ONLINE nodes, or all POSSIBLE nodes?
  80         */
  81        for_each_online_node(i) {
  82                if ((err = register_one_node(i)))
  83                        goto out;
  84        }
  85#endif
  86
  87        sysfs_cpus = kzalloc(sizeof(struct ia64_cpu) * NR_CPUS, GFP_KERNEL);
  88        if (!sysfs_cpus)
  89                panic("kzalloc in topology_init failed - NR_CPUS too big?");
  90
  91        for_each_present_cpu(i) {
  92                if((err = arch_register_cpu(i)))
  93                        goto out;
  94        }
  95out:
  96        return err;
  97}
  98
  99subsys_initcall(topology_init);
 100
 101
 102/*
 103 * Export cpu cache information through sysfs
 104 */
 105
 106/*
 107 *  A bunch of string array to get pretty printing
 108 */
 109static const char *cache_types[] = {
 110        "",                     /* not used */
 111        "Instruction",
 112        "Data",
 113        "Unified"       /* unified */
 114};
 115
 116static const char *cache_mattrib[]={
 117        "WriteThrough",
 118        "WriteBack",
 119        "",             /* reserved */
 120        ""              /* reserved */
 121};
 122
 123struct cache_info {
 124        pal_cache_config_info_t cci;
 125        cpumask_t shared_cpu_map;
 126        int level;
 127        int type;
 128        struct kobject kobj;
 129};
 130
 131struct cpu_cache_info {
 132        struct cache_info *cache_leaves;
 133        int     num_cache_leaves;
 134        struct kobject kobj;
 135};
 136
 137static struct cpu_cache_info    all_cpu_cache_info[NR_CPUS] __cpuinitdata;
 138#define LEAF_KOBJECT_PTR(x,y)    (&all_cpu_cache_info[x].cache_leaves[y])
 139
 140#ifdef CONFIG_SMP
 141static void __cpuinit cache_shared_cpu_map_setup( unsigned int cpu,
 142                struct cache_info * this_leaf)
 143{
 144        pal_cache_shared_info_t csi;
 145        int num_shared, i = 0;
 146        unsigned int j;
 147
 148        if (cpu_data(cpu)->threads_per_core <= 1 &&
 149                cpu_data(cpu)->cores_per_socket <= 1) {
 150                cpu_set(cpu, this_leaf->shared_cpu_map);
 151                return;
 152        }
 153
 154        if (ia64_pal_cache_shared_info(this_leaf->level,
 155                                        this_leaf->type,
 156                                        0,
 157                                        &csi) != PAL_STATUS_SUCCESS)
 158                return;
 159
 160        num_shared = (int) csi.num_shared;
 161        do {
 162                for_each_possible_cpu(j)
 163                        if (cpu_data(cpu)->socket_id == cpu_data(j)->socket_id
 164                                && cpu_data(j)->core_id == csi.log1_cid
 165                                && cpu_data(j)->thread_id == csi.log1_tid)
 166                                cpu_set(j, this_leaf->shared_cpu_map);
 167
 168                i++;
 169        } while (i < num_shared &&
 170                ia64_pal_cache_shared_info(this_leaf->level,
 171                                this_leaf->type,
 172                                i,
 173                                &csi) == PAL_STATUS_SUCCESS);
 174}
 175#else
 176static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu,
 177                struct cache_info * this_leaf)
 178{
 179        cpu_set(cpu, this_leaf->shared_cpu_map);
 180        return;
 181}
 182#endif
 183
 184static ssize_t show_coherency_line_size(struct cache_info *this_leaf,
 185                                        char *buf)
 186{
 187        return sprintf(buf, "%u\n", 1 << this_leaf->cci.pcci_line_size);
 188}
 189
 190static ssize_t show_ways_of_associativity(struct cache_info *this_leaf,
 191                                        char *buf)
 192{
 193        return sprintf(buf, "%u\n", this_leaf->cci.pcci_assoc);
 194}
 195
 196static ssize_t show_attributes(struct cache_info *this_leaf, char *buf)
 197{
 198        return sprintf(buf,
 199                        "%s\n",
 200                        cache_mattrib[this_leaf->cci.pcci_cache_attr]);
 201}
 202
 203static ssize_t show_size(struct cache_info *this_leaf, char *buf)
 204{
 205        return sprintf(buf, "%uK\n", this_leaf->cci.pcci_cache_size / 1024);
 206}
 207
 208static ssize_t show_number_of_sets(struct cache_info *this_leaf, char *buf)
 209{
 210        unsigned number_of_sets = this_leaf->cci.pcci_cache_size;
 211        number_of_sets /= this_leaf->cci.pcci_assoc;
 212        number_of_sets /= 1 << this_leaf->cci.pcci_line_size;
 213
 214        return sprintf(buf, "%u\n", number_of_sets);
 215}
 216
 217static ssize_t show_shared_cpu_map(struct cache_info *this_leaf, char *buf)
 218{
 219        ssize_t len;
 220        cpumask_t shared_cpu_map;
 221
 222        cpus_and(shared_cpu_map, this_leaf->shared_cpu_map, cpu_online_map);
 223        len = cpumask_scnprintf(buf, NR_CPUS+1, &shared_cpu_map);
 224        len += sprintf(buf+len, "\n");
 225        return len;
 226}
 227
 228static ssize_t show_type(struct cache_info *this_leaf, char *buf)
 229{
 230        int type = this_leaf->type + this_leaf->cci.pcci_unified;
 231        return sprintf(buf, "%s\n", cache_types[type]);
 232}
 233
 234static ssize_t show_level(struct cache_info *this_leaf, char *buf)
 235{
 236        return sprintf(buf, "%u\n", this_leaf->level);
 237}
 238
 239struct cache_attr {
 240        struct attribute attr;
 241        ssize_t (*show)(struct cache_info *, char *);
 242        ssize_t (*store)(struct cache_info *, const char *, size_t count);
 243};
 244
 245#ifdef define_one_ro
 246        #undef define_one_ro
 247#endif
 248#define define_one_ro(_name) \
 249        static struct cache_attr _name = \
 250__ATTR(_name, 0444, show_##_name, NULL)
 251
 252define_one_ro(level);
 253define_one_ro(type);
 254define_one_ro(coherency_line_size);
 255define_one_ro(ways_of_associativity);
 256define_one_ro(size);
 257define_one_ro(number_of_sets);
 258define_one_ro(shared_cpu_map);
 259define_one_ro(attributes);
 260
 261static struct attribute * cache_default_attrs[] = {
 262        &type.attr,
 263        &level.attr,
 264        &coherency_line_size.attr,
 265        &ways_of_associativity.attr,
 266        &attributes.attr,
 267        &size.attr,
 268        &number_of_sets.attr,
 269        &shared_cpu_map.attr,
 270        NULL
 271};
 272
 273#define to_object(k) container_of(k, struct cache_info, kobj)
 274#define to_attr(a) container_of(a, struct cache_attr, attr)
 275
 276static ssize_t cache_show(struct kobject * kobj, struct attribute * attr, char * buf)
 277{
 278        struct cache_attr *fattr = to_attr(attr);
 279        struct cache_info *this_leaf = to_object(kobj);
 280        ssize_t ret;
 281
 282        ret = fattr->show ? fattr->show(this_leaf, buf) : 0;
 283        return ret;
 284}
 285
 286static const struct sysfs_ops cache_sysfs_ops = {
 287        .show   = cache_show
 288};
 289
 290static struct kobj_type cache_ktype = {
 291        .sysfs_ops      = &cache_sysfs_ops,
 292        .default_attrs  = cache_default_attrs,
 293};
 294
 295static struct kobj_type cache_ktype_percpu_entry = {
 296        .sysfs_ops      = &cache_sysfs_ops,
 297};
 298
 299static void __cpuinit cpu_cache_sysfs_exit(unsigned int cpu)
 300{
 301        kfree(all_cpu_cache_info[cpu].cache_leaves);
 302        all_cpu_cache_info[cpu].cache_leaves = NULL;
 303        all_cpu_cache_info[cpu].num_cache_leaves = 0;
 304        memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject));
 305        return;
 306}
 307
 308static int __cpuinit cpu_cache_sysfs_init(unsigned int cpu)
 309{
 310        unsigned long i, levels, unique_caches;
 311        pal_cache_config_info_t cci;
 312        int j;
 313        long status;
 314        struct cache_info *this_cache;
 315        int num_cache_leaves = 0;
 316
 317        if ((status = ia64_pal_cache_summary(&levels, &unique_caches)) != 0) {
 318                printk(KERN_ERR "ia64_pal_cache_summary=%ld\n", status);
 319                return -1;
 320        }
 321
 322        this_cache=kzalloc(sizeof(struct cache_info)*unique_caches,
 323                        GFP_KERNEL);
 324        if (this_cache == NULL)
 325                return -ENOMEM;
 326
 327        for (i=0; i < levels; i++) {
 328                for (j=2; j >0 ; j--) {
 329                        if ((status=ia64_pal_cache_config_info(i,j, &cci)) !=
 330                                        PAL_STATUS_SUCCESS)
 331                                continue;
 332
 333                        this_cache[num_cache_leaves].cci = cci;
 334                        this_cache[num_cache_leaves].level = i + 1;
 335                        this_cache[num_cache_leaves].type = j;
 336
 337                        cache_shared_cpu_map_setup(cpu,
 338                                        &this_cache[num_cache_leaves]);
 339                        num_cache_leaves ++;
 340                }
 341        }
 342
 343        all_cpu_cache_info[cpu].cache_leaves = this_cache;
 344        all_cpu_cache_info[cpu].num_cache_leaves = num_cache_leaves;
 345
 346        memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject));
 347
 348        return 0;
 349}
 350
 351/* Add cache interface for CPU device */
 352static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
 353{
 354        unsigned int cpu = sys_dev->id;
 355        unsigned long i, j;
 356        struct cache_info *this_object;
 357        int retval = 0;
 358        cpumask_t oldmask;
 359
 360        if (all_cpu_cache_info[cpu].kobj.parent)
 361                return 0;
 362
 363        oldmask = current->cpus_allowed;
 364        retval = set_cpus_allowed_ptr(current, cpumask_of(cpu));
 365        if (unlikely(retval))
 366                return retval;
 367
 368        retval = cpu_cache_sysfs_init(cpu);
 369        set_cpus_allowed_ptr(current, &oldmask);
 370        if (unlikely(retval < 0))
 371                return retval;
 372
 373        retval = kobject_init_and_add(&all_cpu_cache_info[cpu].kobj,
 374                                      &cache_ktype_percpu_entry, &sys_dev->kobj,
 375                                      "%s", "cache");
 376        if (unlikely(retval < 0)) {
 377                cpu_cache_sysfs_exit(cpu);
 378                return retval;
 379        }
 380
 381        for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++) {
 382                this_object = LEAF_KOBJECT_PTR(cpu,i);
 383                retval = kobject_init_and_add(&(this_object->kobj),
 384                                              &cache_ktype,
 385                                              &all_cpu_cache_info[cpu].kobj,
 386                                              "index%1lu", i);
 387                if (unlikely(retval)) {
 388                        for (j = 0; j < i; j++) {
 389                                kobject_put(&(LEAF_KOBJECT_PTR(cpu,j)->kobj));
 390                        }
 391                        kobject_put(&all_cpu_cache_info[cpu].kobj);
 392                        cpu_cache_sysfs_exit(cpu);
 393                        return retval;
 394                }
 395                kobject_uevent(&(this_object->kobj), KOBJ_ADD);
 396        }
 397        kobject_uevent(&all_cpu_cache_info[cpu].kobj, KOBJ_ADD);
 398        return retval;
 399}
 400
 401/* Remove cache interface for CPU device */
 402static int __cpuinit cache_remove_dev(struct sys_device * sys_dev)
 403{
 404        unsigned int cpu = sys_dev->id;
 405        unsigned long i;
 406
 407        for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++)
 408                kobject_put(&(LEAF_KOBJECT_PTR(cpu,i)->kobj));
 409
 410        if (all_cpu_cache_info[cpu].kobj.parent) {
 411                kobject_put(&all_cpu_cache_info[cpu].kobj);
 412                memset(&all_cpu_cache_info[cpu].kobj,
 413                        0,
 414                        sizeof(struct kobject));
 415        }
 416
 417        cpu_cache_sysfs_exit(cpu);
 418
 419        return 0;
 420}
 421
 422/*
 423 * When a cpu is hot-plugged, do a check and initiate
 424 * cache kobject if necessary
 425 */
 426static int __cpuinit cache_cpu_callback(struct notifier_block *nfb,
 427                unsigned long action, void *hcpu)
 428{
 429        unsigned int cpu = (unsigned long)hcpu;
 430        struct sys_device *sys_dev;
 431
 432        sys_dev = get_cpu_sysdev(cpu);
 433        switch (action) {
 434        case CPU_ONLINE:
 435        case CPU_ONLINE_FROZEN:
 436                cache_add_dev(sys_dev);
 437                break;
 438        case CPU_DEAD:
 439        case CPU_DEAD_FROZEN:
 440                cache_remove_dev(sys_dev);
 441                break;
 442        }
 443        return NOTIFY_OK;
 444}
 445
 446static struct notifier_block __cpuinitdata cache_cpu_notifier =
 447{
 448        .notifier_call = cache_cpu_callback
 449};
 450
 451static int __init cache_sysfs_init(void)
 452{
 453        int i;
 454
 455        for_each_online_cpu(i) {
 456                struct sys_device *sys_dev = get_cpu_sysdev((unsigned int)i);
 457                cache_add_dev(sys_dev);
 458        }
 459
 460        register_hotcpu_notifier(&cache_cpu_notifier);
 461
 462        return 0;
 463}
 464
 465device_initcall(cache_sysfs_init);
 466
 467