linux/arch/ia64/kernel/topology.c
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * This file contains NUMA specific variables and functions which are used on
   7 * NUMA machines with contiguous memory.
   8 *              2002/08/07 Erich Focht <efocht@ess.nec.de>
   9 * Populate cpu entries in sysfs for non-numa systems as well
  10 *      Intel Corporation - Ashok Raj
  11 * 02/27/2006 Zhang, Yanmin
  12 *      Populate cpu cache entries in sysfs for cpu cache info
  13 */
  14
  15#include <linux/cpu.h>
  16#include <linux/kernel.h>
  17#include <linux/mm.h>
  18#include <linux/node.h>
  19#include <linux/slab.h>
  20#include <linux/init.h>
  21#include <linux/memblock.h>
  22#include <linux/nodemask.h>
  23#include <linux/notifier.h>
  24#include <linux/export.h>
  25#include <asm/mmzone.h>
  26#include <asm/numa.h>
  27#include <asm/cpu.h>
  28
  29static struct ia64_cpu *sysfs_cpus;
  30
  31void arch_fix_phys_package_id(int num, u32 slot)
  32{
  33#ifdef CONFIG_SMP
  34        if (cpu_data(num)->socket_id == -1)
  35                cpu_data(num)->socket_id = slot;
  36#endif
  37}
  38EXPORT_SYMBOL_GPL(arch_fix_phys_package_id);
  39
  40
  41#ifdef CONFIG_HOTPLUG_CPU
  42int __ref arch_register_cpu(int num)
  43{
  44        /*
  45         * If CPEI can be re-targeted or if this is not
  46         * CPEI target, then it is hotpluggable
  47         */
  48        if (can_cpei_retarget() || !is_cpu_cpei_target(num))
  49                sysfs_cpus[num].cpu.hotpluggable = 1;
  50        map_cpu_to_node(num, node_cpuid[num].nid);
  51        return register_cpu(&sysfs_cpus[num].cpu, num);
  52}
  53EXPORT_SYMBOL(arch_register_cpu);
  54
  55void __ref arch_unregister_cpu(int num)
  56{
  57        unregister_cpu(&sysfs_cpus[num].cpu);
  58        unmap_cpu_from_node(num, cpu_to_node(num));
  59}
  60EXPORT_SYMBOL(arch_unregister_cpu);
  61#else
  62static int __init arch_register_cpu(int num)
  63{
  64        return register_cpu(&sysfs_cpus[num].cpu, num);
  65}
  66#endif /*CONFIG_HOTPLUG_CPU*/
  67
  68
  69static int __init topology_init(void)
  70{
  71        int i, err = 0;
  72
  73#ifdef CONFIG_NUMA
  74        /*
  75         * MCD - Do we want to register all ONLINE nodes, or all POSSIBLE nodes?
  76         */
  77        for_each_online_node(i) {
  78                if ((err = register_one_node(i)))
  79                        goto out;
  80        }
  81#endif
  82
  83        sysfs_cpus = kcalloc(NR_CPUS, sizeof(struct ia64_cpu), GFP_KERNEL);
  84        if (!sysfs_cpus)
  85                panic("kzalloc in topology_init failed - NR_CPUS too big?");
  86
  87        for_each_present_cpu(i) {
  88                if((err = arch_register_cpu(i)))
  89                        goto out;
  90        }
  91out:
  92        return err;
  93}
  94
  95subsys_initcall(topology_init);
  96
  97
  98/*
  99 * Export cpu cache information through sysfs
 100 */
 101
 102/*
 103 *  A bunch of string array to get pretty printing
 104 */
 105static const char *cache_types[] = {
 106        "",                     /* not used */
 107        "Instruction",
 108        "Data",
 109        "Unified"       /* unified */
 110};
 111
 112static const char *cache_mattrib[]={
 113        "WriteThrough",
 114        "WriteBack",
 115        "",             /* reserved */
 116        ""              /* reserved */
 117};
 118
 119struct cache_info {
 120        pal_cache_config_info_t cci;
 121        cpumask_t shared_cpu_map;
 122        int level;
 123        int type;
 124        struct kobject kobj;
 125};
 126
 127struct cpu_cache_info {
 128        struct cache_info *cache_leaves;
 129        int     num_cache_leaves;
 130        struct kobject kobj;
 131};
 132
 133static struct cpu_cache_info    all_cpu_cache_info[NR_CPUS];
 134#define LEAF_KOBJECT_PTR(x,y)    (&all_cpu_cache_info[x].cache_leaves[y])
 135
 136#ifdef CONFIG_SMP
 137static void cache_shared_cpu_map_setup(unsigned int cpu,
 138                struct cache_info * this_leaf)
 139{
 140        pal_cache_shared_info_t csi;
 141        int num_shared, i = 0;
 142        unsigned int j;
 143
 144        if (cpu_data(cpu)->threads_per_core <= 1 &&
 145                cpu_data(cpu)->cores_per_socket <= 1) {
 146                cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
 147                return;
 148        }
 149
 150        if (ia64_pal_cache_shared_info(this_leaf->level,
 151                                        this_leaf->type,
 152                                        0,
 153                                        &csi) != PAL_STATUS_SUCCESS)
 154                return;
 155
 156        num_shared = (int) csi.num_shared;
 157        do {
 158                for_each_possible_cpu(j)
 159                        if (cpu_data(cpu)->socket_id == cpu_data(j)->socket_id
 160                                && cpu_data(j)->core_id == csi.log1_cid
 161                                && cpu_data(j)->thread_id == csi.log1_tid)
 162                                cpumask_set_cpu(j, &this_leaf->shared_cpu_map);
 163
 164                i++;
 165        } while (i < num_shared &&
 166                ia64_pal_cache_shared_info(this_leaf->level,
 167                                this_leaf->type,
 168                                i,
 169                                &csi) == PAL_STATUS_SUCCESS);
 170}
 171#else
 172static void cache_shared_cpu_map_setup(unsigned int cpu,
 173                struct cache_info * this_leaf)
 174{
 175        cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
 176        return;
 177}
 178#endif
 179
 180static ssize_t show_coherency_line_size(struct cache_info *this_leaf,
 181                                        char *buf)
 182{
 183        return sprintf(buf, "%u\n", 1 << this_leaf->cci.pcci_line_size);
 184}
 185
 186static ssize_t show_ways_of_associativity(struct cache_info *this_leaf,
 187                                        char *buf)
 188{
 189        return sprintf(buf, "%u\n", this_leaf->cci.pcci_assoc);
 190}
 191
 192static ssize_t show_attributes(struct cache_info *this_leaf, char *buf)
 193{
 194        return sprintf(buf,
 195                        "%s\n",
 196                        cache_mattrib[this_leaf->cci.pcci_cache_attr]);
 197}
 198
 199static ssize_t show_size(struct cache_info *this_leaf, char *buf)
 200{
 201        return sprintf(buf, "%uK\n", this_leaf->cci.pcci_cache_size / 1024);
 202}
 203
 204static ssize_t show_number_of_sets(struct cache_info *this_leaf, char *buf)
 205{
 206        unsigned number_of_sets = this_leaf->cci.pcci_cache_size;
 207        number_of_sets /= this_leaf->cci.pcci_assoc;
 208        number_of_sets /= 1 << this_leaf->cci.pcci_line_size;
 209
 210        return sprintf(buf, "%u\n", number_of_sets);
 211}
 212
 213static ssize_t show_shared_cpu_map(struct cache_info *this_leaf, char *buf)
 214{
 215        cpumask_t shared_cpu_map;
 216
 217        cpumask_and(&shared_cpu_map,
 218                                &this_leaf->shared_cpu_map, cpu_online_mask);
 219        return scnprintf(buf, PAGE_SIZE, "%*pb\n",
 220                         cpumask_pr_args(&shared_cpu_map));
 221}
 222
 223static ssize_t show_type(struct cache_info *this_leaf, char *buf)
 224{
 225        int type = this_leaf->type + this_leaf->cci.pcci_unified;
 226        return sprintf(buf, "%s\n", cache_types[type]);
 227}
 228
 229static ssize_t show_level(struct cache_info *this_leaf, char *buf)
 230{
 231        return sprintf(buf, "%u\n", this_leaf->level);
 232}
 233
 234struct cache_attr {
 235        struct attribute attr;
 236        ssize_t (*show)(struct cache_info *, char *);
 237        ssize_t (*store)(struct cache_info *, const char *, size_t count);
 238};
 239
 240#ifdef define_one_ro
 241        #undef define_one_ro
 242#endif
 243#define define_one_ro(_name) \
 244        static struct cache_attr _name = \
 245__ATTR(_name, 0444, show_##_name, NULL)
 246
 247define_one_ro(level);
 248define_one_ro(type);
 249define_one_ro(coherency_line_size);
 250define_one_ro(ways_of_associativity);
 251define_one_ro(size);
 252define_one_ro(number_of_sets);
 253define_one_ro(shared_cpu_map);
 254define_one_ro(attributes);
 255
 256static struct attribute * cache_default_attrs[] = {
 257        &type.attr,
 258        &level.attr,
 259        &coherency_line_size.attr,
 260        &ways_of_associativity.attr,
 261        &attributes.attr,
 262        &size.attr,
 263        &number_of_sets.attr,
 264        &shared_cpu_map.attr,
 265        NULL
 266};
 267
 268#define to_object(k) container_of(k, struct cache_info, kobj)
 269#define to_attr(a) container_of(a, struct cache_attr, attr)
 270
 271static ssize_t ia64_cache_show(struct kobject * kobj, struct attribute * attr, char * buf)
 272{
 273        struct cache_attr *fattr = to_attr(attr);
 274        struct cache_info *this_leaf = to_object(kobj);
 275        ssize_t ret;
 276
 277        ret = fattr->show ? fattr->show(this_leaf, buf) : 0;
 278        return ret;
 279}
 280
 281static const struct sysfs_ops cache_sysfs_ops = {
 282        .show   = ia64_cache_show
 283};
 284
 285static struct kobj_type cache_ktype = {
 286        .sysfs_ops      = &cache_sysfs_ops,
 287        .default_attrs  = cache_default_attrs,
 288};
 289
 290static struct kobj_type cache_ktype_percpu_entry = {
 291        .sysfs_ops      = &cache_sysfs_ops,
 292};
 293
 294static void cpu_cache_sysfs_exit(unsigned int cpu)
 295{
 296        kfree(all_cpu_cache_info[cpu].cache_leaves);
 297        all_cpu_cache_info[cpu].cache_leaves = NULL;
 298        all_cpu_cache_info[cpu].num_cache_leaves = 0;
 299        memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject));
 300        return;
 301}
 302
 303static int cpu_cache_sysfs_init(unsigned int cpu)
 304{
 305        unsigned long i, levels, unique_caches;
 306        pal_cache_config_info_t cci;
 307        int j;
 308        long status;
 309        struct cache_info *this_cache;
 310        int num_cache_leaves = 0;
 311
 312        if ((status = ia64_pal_cache_summary(&levels, &unique_caches)) != 0) {
 313                printk(KERN_ERR "ia64_pal_cache_summary=%ld\n", status);
 314                return -1;
 315        }
 316
 317        this_cache=kcalloc(unique_caches, sizeof(struct cache_info),
 318                           GFP_KERNEL);
 319        if (this_cache == NULL)
 320                return -ENOMEM;
 321
 322        for (i=0; i < levels; i++) {
 323                for (j=2; j >0 ; j--) {
 324                        if ((status=ia64_pal_cache_config_info(i,j, &cci)) !=
 325                                        PAL_STATUS_SUCCESS)
 326                                continue;
 327
 328                        this_cache[num_cache_leaves].cci = cci;
 329                        this_cache[num_cache_leaves].level = i + 1;
 330                        this_cache[num_cache_leaves].type = j;
 331
 332                        cache_shared_cpu_map_setup(cpu,
 333                                        &this_cache[num_cache_leaves]);
 334                        num_cache_leaves ++;
 335                }
 336        }
 337
 338        all_cpu_cache_info[cpu].cache_leaves = this_cache;
 339        all_cpu_cache_info[cpu].num_cache_leaves = num_cache_leaves;
 340
 341        memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject));
 342
 343        return 0;
 344}
 345
 346/* Add cache interface for CPU device */
 347static int cache_add_dev(unsigned int cpu)
 348{
 349        struct device *sys_dev = get_cpu_device(cpu);
 350        unsigned long i, j;
 351        struct cache_info *this_object;
 352        int retval = 0;
 353
 354        if (all_cpu_cache_info[cpu].kobj.parent)
 355                return 0;
 356
 357
 358        retval = cpu_cache_sysfs_init(cpu);
 359        if (unlikely(retval < 0))
 360                return retval;
 361
 362        retval = kobject_init_and_add(&all_cpu_cache_info[cpu].kobj,
 363                                      &cache_ktype_percpu_entry, &sys_dev->kobj,
 364                                      "%s", "cache");
 365        if (unlikely(retval < 0)) {
 366                cpu_cache_sysfs_exit(cpu);
 367                return retval;
 368        }
 369
 370        for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++) {
 371                this_object = LEAF_KOBJECT_PTR(cpu,i);
 372                retval = kobject_init_and_add(&(this_object->kobj),
 373                                              &cache_ktype,
 374                                              &all_cpu_cache_info[cpu].kobj,
 375                                              "index%1lu", i);
 376                if (unlikely(retval)) {
 377                        for (j = 0; j < i; j++) {
 378                                kobject_put(&(LEAF_KOBJECT_PTR(cpu,j)->kobj));
 379                        }
 380                        kobject_put(&all_cpu_cache_info[cpu].kobj);
 381                        cpu_cache_sysfs_exit(cpu);
 382                        return retval;
 383                }
 384                kobject_uevent(&(this_object->kobj), KOBJ_ADD);
 385        }
 386        kobject_uevent(&all_cpu_cache_info[cpu].kobj, KOBJ_ADD);
 387        return retval;
 388}
 389
 390/* Remove cache interface for CPU device */
 391static int cache_remove_dev(unsigned int cpu)
 392{
 393        unsigned long i;
 394
 395        for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++)
 396                kobject_put(&(LEAF_KOBJECT_PTR(cpu,i)->kobj));
 397
 398        if (all_cpu_cache_info[cpu].kobj.parent) {
 399                kobject_put(&all_cpu_cache_info[cpu].kobj);
 400                memset(&all_cpu_cache_info[cpu].kobj,
 401                        0,
 402                        sizeof(struct kobject));
 403        }
 404
 405        cpu_cache_sysfs_exit(cpu);
 406
 407        return 0;
 408}
 409
 410static int __init cache_sysfs_init(void)
 411{
 412        int ret;
 413
 414        ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "ia64/topology:online",
 415                                cache_add_dev, cache_remove_dev);
 416        WARN_ON(ret < 0);
 417        return 0;
 418}
 419device_initcall(cache_sysfs_init);
 420