linux/arch/ia64/kernel/topology.c
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * This file contains NUMA specific variables and functions which can
   7 * be split away from DISCONTIGMEM and are used on NUMA machines with
   8 * contiguous memory.
   9 *              2002/08/07 Erich Focht <efocht@ess.nec.de>
  10 * Populate cpu entries in sysfs for non-numa systems as well
  11 *      Intel Corporation - Ashok Raj
  12 * 02/27/2006 Zhang, Yanmin
  13 *      Populate cpu cache entries in sysfs for cpu cache info
  14 */
  15
  16#include <linux/cpu.h>
  17#include <linux/kernel.h>
  18#include <linux/mm.h>
  19#include <linux/node.h>
  20#include <linux/slab.h>
  21#include <linux/init.h>
  22#include <linux/bootmem.h>
  23#include <linux/nodemask.h>
  24#include <linux/notifier.h>
  25#include <linux/export.h>
  26#include <asm/mmzone.h>
  27#include <asm/numa.h>
  28#include <asm/cpu.h>
  29
  30static struct ia64_cpu *sysfs_cpus;
  31
  32void arch_fix_phys_package_id(int num, u32 slot)
  33{
  34#ifdef CONFIG_SMP
  35        if (cpu_data(num)->socket_id == -1)
  36                cpu_data(num)->socket_id = slot;
  37#endif
  38}
  39EXPORT_SYMBOL_GPL(arch_fix_phys_package_id);
  40
  41
  42#ifdef CONFIG_HOTPLUG_CPU
  43int __ref arch_register_cpu(int num)
  44{
  45#ifdef CONFIG_ACPI
  46        /*
  47         * If CPEI can be re-targeted or if this is not
  48         * CPEI target, then it is hotpluggable
  49         */
  50        if (can_cpei_retarget() || !is_cpu_cpei_target(num))
  51                sysfs_cpus[num].cpu.hotpluggable = 1;
  52        map_cpu_to_node(num, node_cpuid[num].nid);
  53#endif
  54        return register_cpu(&sysfs_cpus[num].cpu, num);
  55}
  56EXPORT_SYMBOL(arch_register_cpu);
  57
  58void __ref arch_unregister_cpu(int num)
  59{
  60        unregister_cpu(&sysfs_cpus[num].cpu);
  61#ifdef CONFIG_ACPI
  62        unmap_cpu_from_node(num, cpu_to_node(num));
  63#endif
  64}
  65EXPORT_SYMBOL(arch_unregister_cpu);
  66#else
  67static int __init arch_register_cpu(int num)
  68{
  69        return register_cpu(&sysfs_cpus[num].cpu, num);
  70}
  71#endif /*CONFIG_HOTPLUG_CPU*/
  72
  73
  74static int __init topology_init(void)
  75{
  76        int i, err = 0;
  77
  78#ifdef CONFIG_NUMA
  79        /*
  80         * MCD - Do we want to register all ONLINE nodes, or all POSSIBLE nodes?
  81         */
  82        for_each_online_node(i) {
  83                if ((err = register_one_node(i)))
  84                        goto out;
  85        }
  86#endif
  87
  88        sysfs_cpus = kzalloc(sizeof(struct ia64_cpu) * NR_CPUS, GFP_KERNEL);
  89        if (!sysfs_cpus)
  90                panic("kzalloc in topology_init failed - NR_CPUS too big?");
  91
  92        for_each_present_cpu(i) {
  93                if((err = arch_register_cpu(i)))
  94                        goto out;
  95        }
  96out:
  97        return err;
  98}
  99
 100subsys_initcall(topology_init);
 101
 102
 103/*
 104 * Export cpu cache information through sysfs
 105 */
 106
 107/*
 108 *  A bunch of string array to get pretty printing
 109 */
 110static const char *cache_types[] = {
 111        "",                     /* not used */
 112        "Instruction",
 113        "Data",
 114        "Unified"       /* unified */
 115};
 116
 117static const char *cache_mattrib[]={
 118        "WriteThrough",
 119        "WriteBack",
 120        "",             /* reserved */
 121        ""              /* reserved */
 122};
 123
 124struct cache_info {
 125        pal_cache_config_info_t cci;
 126        cpumask_t shared_cpu_map;
 127        int level;
 128        int type;
 129        struct kobject kobj;
 130};
 131
 132struct cpu_cache_info {
 133        struct cache_info *cache_leaves;
 134        int     num_cache_leaves;
 135        struct kobject kobj;
 136};
 137
 138static struct cpu_cache_info    all_cpu_cache_info[NR_CPUS];
 139#define LEAF_KOBJECT_PTR(x,y)    (&all_cpu_cache_info[x].cache_leaves[y])
 140
 141#ifdef CONFIG_SMP
 142static void cache_shared_cpu_map_setup(unsigned int cpu,
 143                struct cache_info * this_leaf)
 144{
 145        pal_cache_shared_info_t csi;
 146        int num_shared, i = 0;
 147        unsigned int j;
 148
 149        if (cpu_data(cpu)->threads_per_core <= 1 &&
 150                cpu_data(cpu)->cores_per_socket <= 1) {
 151                cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
 152                return;
 153        }
 154
 155        if (ia64_pal_cache_shared_info(this_leaf->level,
 156                                        this_leaf->type,
 157                                        0,
 158                                        &csi) != PAL_STATUS_SUCCESS)
 159                return;
 160
 161        num_shared = (int) csi.num_shared;
 162        do {
 163                for_each_possible_cpu(j)
 164                        if (cpu_data(cpu)->socket_id == cpu_data(j)->socket_id
 165                                && cpu_data(j)->core_id == csi.log1_cid
 166                                && cpu_data(j)->thread_id == csi.log1_tid)
 167                                cpumask_set_cpu(j, &this_leaf->shared_cpu_map);
 168
 169                i++;
 170        } while (i < num_shared &&
 171                ia64_pal_cache_shared_info(this_leaf->level,
 172                                this_leaf->type,
 173                                i,
 174                                &csi) == PAL_STATUS_SUCCESS);
 175}
 176#else
 177static void cache_shared_cpu_map_setup(unsigned int cpu,
 178                struct cache_info * this_leaf)
 179{
 180        cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
 181        return;
 182}
 183#endif
 184
 185static ssize_t show_coherency_line_size(struct cache_info *this_leaf,
 186                                        char *buf)
 187{
 188        return sprintf(buf, "%u\n", 1 << this_leaf->cci.pcci_line_size);
 189}
 190
 191static ssize_t show_ways_of_associativity(struct cache_info *this_leaf,
 192                                        char *buf)
 193{
 194        return sprintf(buf, "%u\n", this_leaf->cci.pcci_assoc);
 195}
 196
 197static ssize_t show_attributes(struct cache_info *this_leaf, char *buf)
 198{
 199        return sprintf(buf,
 200                        "%s\n",
 201                        cache_mattrib[this_leaf->cci.pcci_cache_attr]);
 202}
 203
 204static ssize_t show_size(struct cache_info *this_leaf, char *buf)
 205{
 206        return sprintf(buf, "%uK\n", this_leaf->cci.pcci_cache_size / 1024);
 207}
 208
 209static ssize_t show_number_of_sets(struct cache_info *this_leaf, char *buf)
 210{
 211        unsigned number_of_sets = this_leaf->cci.pcci_cache_size;
 212        number_of_sets /= this_leaf->cci.pcci_assoc;
 213        number_of_sets /= 1 << this_leaf->cci.pcci_line_size;
 214
 215        return sprintf(buf, "%u\n", number_of_sets);
 216}
 217
 218static ssize_t show_shared_cpu_map(struct cache_info *this_leaf, char *buf)
 219{
 220        cpumask_t shared_cpu_map;
 221
 222        cpumask_and(&shared_cpu_map,
 223                                &this_leaf->shared_cpu_map, cpu_online_mask);
 224        return scnprintf(buf, PAGE_SIZE, "%*pb\n",
 225                         cpumask_pr_args(&shared_cpu_map));
 226}
 227
 228static ssize_t show_type(struct cache_info *this_leaf, char *buf)
 229{
 230        int type = this_leaf->type + this_leaf->cci.pcci_unified;
 231        return sprintf(buf, "%s\n", cache_types[type]);
 232}
 233
 234static ssize_t show_level(struct cache_info *this_leaf, char *buf)
 235{
 236        return sprintf(buf, "%u\n", this_leaf->level);
 237}
 238
 239struct cache_attr {
 240        struct attribute attr;
 241        ssize_t (*show)(struct cache_info *, char *);
 242        ssize_t (*store)(struct cache_info *, const char *, size_t count);
 243};
 244
 245#ifdef define_one_ro
 246        #undef define_one_ro
 247#endif
 248#define define_one_ro(_name) \
 249        static struct cache_attr _name = \
 250__ATTR(_name, 0444, show_##_name, NULL)
 251
 252define_one_ro(level);
 253define_one_ro(type);
 254define_one_ro(coherency_line_size);
 255define_one_ro(ways_of_associativity);
 256define_one_ro(size);
 257define_one_ro(number_of_sets);
 258define_one_ro(shared_cpu_map);
 259define_one_ro(attributes);
 260
 261static struct attribute * cache_default_attrs[] = {
 262        &type.attr,
 263        &level.attr,
 264        &coherency_line_size.attr,
 265        &ways_of_associativity.attr,
 266        &attributes.attr,
 267        &size.attr,
 268        &number_of_sets.attr,
 269        &shared_cpu_map.attr,
 270        NULL
 271};
 272
 273#define to_object(k) container_of(k, struct cache_info, kobj)
 274#define to_attr(a) container_of(a, struct cache_attr, attr)
 275
 276static ssize_t ia64_cache_show(struct kobject * kobj, struct attribute * attr, char * buf)
 277{
 278        struct cache_attr *fattr = to_attr(attr);
 279        struct cache_info *this_leaf = to_object(kobj);
 280        ssize_t ret;
 281
 282        ret = fattr->show ? fattr->show(this_leaf, buf) : 0;
 283        return ret;
 284}
 285
 286static const struct sysfs_ops cache_sysfs_ops = {
 287        .show   = ia64_cache_show
 288};
 289
 290static struct kobj_type cache_ktype = {
 291        .sysfs_ops      = &cache_sysfs_ops,
 292        .default_attrs  = cache_default_attrs,
 293};
 294
 295static struct kobj_type cache_ktype_percpu_entry = {
 296        .sysfs_ops      = &cache_sysfs_ops,
 297};
 298
 299static void cpu_cache_sysfs_exit(unsigned int cpu)
 300{
 301        kfree(all_cpu_cache_info[cpu].cache_leaves);
 302        all_cpu_cache_info[cpu].cache_leaves = NULL;
 303        all_cpu_cache_info[cpu].num_cache_leaves = 0;
 304        memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject));
 305        return;
 306}
 307
 308static int cpu_cache_sysfs_init(unsigned int cpu)
 309{
 310        unsigned long i, levels, unique_caches;
 311        pal_cache_config_info_t cci;
 312        int j;
 313        long status;
 314        struct cache_info *this_cache;
 315        int num_cache_leaves = 0;
 316
 317        if ((status = ia64_pal_cache_summary(&levels, &unique_caches)) != 0) {
 318                printk(KERN_ERR "ia64_pal_cache_summary=%ld\n", status);
 319                return -1;
 320        }
 321
 322        this_cache=kzalloc(sizeof(struct cache_info)*unique_caches,
 323                        GFP_KERNEL);
 324        if (this_cache == NULL)
 325                return -ENOMEM;
 326
 327        for (i=0; i < levels; i++) {
 328                for (j=2; j >0 ; j--) {
 329                        if ((status=ia64_pal_cache_config_info(i,j, &cci)) !=
 330                                        PAL_STATUS_SUCCESS)
 331                                continue;
 332
 333                        this_cache[num_cache_leaves].cci = cci;
 334                        this_cache[num_cache_leaves].level = i + 1;
 335                        this_cache[num_cache_leaves].type = j;
 336
 337                        cache_shared_cpu_map_setup(cpu,
 338                                        &this_cache[num_cache_leaves]);
 339                        num_cache_leaves ++;
 340                }
 341        }
 342
 343        all_cpu_cache_info[cpu].cache_leaves = this_cache;
 344        all_cpu_cache_info[cpu].num_cache_leaves = num_cache_leaves;
 345
 346        memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject));
 347
 348        return 0;
 349}
 350
 351/* Add cache interface for CPU device */
 352static int cache_add_dev(struct device *sys_dev)
 353{
 354        unsigned int cpu = sys_dev->id;
 355        unsigned long i, j;
 356        struct cache_info *this_object;
 357        int retval = 0;
 358        cpumask_t oldmask;
 359
 360        if (all_cpu_cache_info[cpu].kobj.parent)
 361                return 0;
 362
 363        oldmask = current->cpus_allowed;
 364        retval = set_cpus_allowed_ptr(current, cpumask_of(cpu));
 365        if (unlikely(retval))
 366                return retval;
 367
 368        retval = cpu_cache_sysfs_init(cpu);
 369        set_cpus_allowed_ptr(current, &oldmask);
 370        if (unlikely(retval < 0))
 371                return retval;
 372
 373        retval = kobject_init_and_add(&all_cpu_cache_info[cpu].kobj,
 374                                      &cache_ktype_percpu_entry, &sys_dev->kobj,
 375                                      "%s", "cache");
 376        if (unlikely(retval < 0)) {
 377                cpu_cache_sysfs_exit(cpu);
 378                return retval;
 379        }
 380
 381        for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++) {
 382                this_object = LEAF_KOBJECT_PTR(cpu,i);
 383                retval = kobject_init_and_add(&(this_object->kobj),
 384                                              &cache_ktype,
 385                                              &all_cpu_cache_info[cpu].kobj,
 386                                              "index%1lu", i);
 387                if (unlikely(retval)) {
 388                        for (j = 0; j < i; j++) {
 389                                kobject_put(&(LEAF_KOBJECT_PTR(cpu,j)->kobj));
 390                        }
 391                        kobject_put(&all_cpu_cache_info[cpu].kobj);
 392                        cpu_cache_sysfs_exit(cpu);
 393                        return retval;
 394                }
 395                kobject_uevent(&(this_object->kobj), KOBJ_ADD);
 396        }
 397        kobject_uevent(&all_cpu_cache_info[cpu].kobj, KOBJ_ADD);
 398        return retval;
 399}
 400
 401/* Remove cache interface for CPU device */
 402static int cache_remove_dev(struct device *sys_dev)
 403{
 404        unsigned int cpu = sys_dev->id;
 405        unsigned long i;
 406
 407        for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++)
 408                kobject_put(&(LEAF_KOBJECT_PTR(cpu,i)->kobj));
 409
 410        if (all_cpu_cache_info[cpu].kobj.parent) {
 411                kobject_put(&all_cpu_cache_info[cpu].kobj);
 412                memset(&all_cpu_cache_info[cpu].kobj,
 413                        0,
 414                        sizeof(struct kobject));
 415        }
 416
 417        cpu_cache_sysfs_exit(cpu);
 418
 419        return 0;
 420}
 421
 422/*
 423 * When a cpu is hot-plugged, do a check and initiate
 424 * cache kobject if necessary
 425 */
 426static int cache_cpu_callback(struct notifier_block *nfb,
 427                unsigned long action, void *hcpu)
 428{
 429        unsigned int cpu = (unsigned long)hcpu;
 430        struct device *sys_dev;
 431
 432        sys_dev = get_cpu_device(cpu);
 433        switch (action) {
 434        case CPU_ONLINE:
 435        case CPU_ONLINE_FROZEN:
 436                cache_add_dev(sys_dev);
 437                break;
 438        case CPU_DEAD:
 439        case CPU_DEAD_FROZEN:
 440                cache_remove_dev(sys_dev);
 441                break;
 442        }
 443        return NOTIFY_OK;
 444}
 445
 446static struct notifier_block cache_cpu_notifier =
 447{
 448        .notifier_call = cache_cpu_callback
 449};
 450
 451static int __init cache_sysfs_init(void)
 452{
 453        int i;
 454
 455        cpu_notifier_register_begin();
 456
 457        for_each_online_cpu(i) {
 458                struct device *sys_dev = get_cpu_device((unsigned int)i);
 459                cache_add_dev(sys_dev);
 460        }
 461
 462        __register_hotcpu_notifier(&cache_cpu_notifier);
 463
 464        cpu_notifier_register_done();
 465
 466        return 0;
 467}
 468
 469device_initcall(cache_sysfs_init);
 470
 471