linux/arch/sparc/kernel/sysfs.c
<<
>>
Prefs
   1/* sysfs.c: Toplogy sysfs support code for sparc64.
   2 *
   3 * Copyright (C) 2007 David S. Miller <davem@davemloft.net>
   4 */
   5#include <linux/sched.h>
   6#include <linux/device.h>
   7#include <linux/cpu.h>
   8#include <linux/smp.h>
   9#include <linux/percpu.h>
  10#include <linux/init.h>
  11
  12#include <asm/cpudata.h>
  13#include <asm/hypervisor.h>
  14#include <asm/spitfire.h>
  15
  16static DEFINE_PER_CPU(struct hv_mmu_statistics, mmu_stats) __attribute__((aligned(64)));
  17
  18#define SHOW_MMUSTAT_ULONG(NAME) \
  19static ssize_t show_##NAME(struct device *dev, \
  20                        struct device_attribute *attr, char *buf) \
  21{ \
  22        struct hv_mmu_statistics *p = &per_cpu(mmu_stats, dev->id); \
  23        return sprintf(buf, "%lu\n", p->NAME); \
  24} \
  25static DEVICE_ATTR(NAME, 0444, show_##NAME, NULL)
  26
  27SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_8k_tte);
  28SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_8k_tte);
  29SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_64k_tte);
  30SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_64k_tte);
  31SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_4mb_tte);
  32SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_4mb_tte);
  33SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_256mb_tte);
  34SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_256mb_tte);
  35SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_8k_tte);
  36SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_8k_tte);
  37SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_64k_tte);
  38SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_64k_tte);
  39SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_4mb_tte);
  40SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_4mb_tte);
  41SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_256mb_tte);
  42SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_256mb_tte);
  43SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_8k_tte);
  44SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_8k_tte);
  45SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_64k_tte);
  46SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_64k_tte);
  47SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_4mb_tte);
  48SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_4mb_tte);
  49SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_256mb_tte);
  50SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_256mb_tte);
  51SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_8k_tte);
  52SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_8k_tte);
  53SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_64k_tte);
  54SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_64k_tte);
  55SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_4mb_tte);
  56SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_4mb_tte);
  57SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_256mb_tte);
  58SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_256mb_tte);
  59
  60static struct attribute *mmu_stat_attrs[] = {
  61        &dev_attr_immu_tsb_hits_ctx0_8k_tte.attr,
  62        &dev_attr_immu_tsb_ticks_ctx0_8k_tte.attr,
  63        &dev_attr_immu_tsb_hits_ctx0_64k_tte.attr,
  64        &dev_attr_immu_tsb_ticks_ctx0_64k_tte.attr,
  65        &dev_attr_immu_tsb_hits_ctx0_4mb_tte.attr,
  66        &dev_attr_immu_tsb_ticks_ctx0_4mb_tte.attr,
  67        &dev_attr_immu_tsb_hits_ctx0_256mb_tte.attr,
  68        &dev_attr_immu_tsb_ticks_ctx0_256mb_tte.attr,
  69        &dev_attr_immu_tsb_hits_ctxnon0_8k_tte.attr,
  70        &dev_attr_immu_tsb_ticks_ctxnon0_8k_tte.attr,
  71        &dev_attr_immu_tsb_hits_ctxnon0_64k_tte.attr,
  72        &dev_attr_immu_tsb_ticks_ctxnon0_64k_tte.attr,
  73        &dev_attr_immu_tsb_hits_ctxnon0_4mb_tte.attr,
  74        &dev_attr_immu_tsb_ticks_ctxnon0_4mb_tte.attr,
  75        &dev_attr_immu_tsb_hits_ctxnon0_256mb_tte.attr,
  76        &dev_attr_immu_tsb_ticks_ctxnon0_256mb_tte.attr,
  77        &dev_attr_dmmu_tsb_hits_ctx0_8k_tte.attr,
  78        &dev_attr_dmmu_tsb_ticks_ctx0_8k_tte.attr,
  79        &dev_attr_dmmu_tsb_hits_ctx0_64k_tte.attr,
  80        &dev_attr_dmmu_tsb_ticks_ctx0_64k_tte.attr,
  81        &dev_attr_dmmu_tsb_hits_ctx0_4mb_tte.attr,
  82        &dev_attr_dmmu_tsb_ticks_ctx0_4mb_tte.attr,
  83        &dev_attr_dmmu_tsb_hits_ctx0_256mb_tte.attr,
  84        &dev_attr_dmmu_tsb_ticks_ctx0_256mb_tte.attr,
  85        &dev_attr_dmmu_tsb_hits_ctxnon0_8k_tte.attr,
  86        &dev_attr_dmmu_tsb_ticks_ctxnon0_8k_tte.attr,
  87        &dev_attr_dmmu_tsb_hits_ctxnon0_64k_tte.attr,
  88        &dev_attr_dmmu_tsb_ticks_ctxnon0_64k_tte.attr,
  89        &dev_attr_dmmu_tsb_hits_ctxnon0_4mb_tte.attr,
  90        &dev_attr_dmmu_tsb_ticks_ctxnon0_4mb_tte.attr,
  91        &dev_attr_dmmu_tsb_hits_ctxnon0_256mb_tte.attr,
  92        &dev_attr_dmmu_tsb_ticks_ctxnon0_256mb_tte.attr,
  93        NULL,
  94};
  95
  96static struct attribute_group mmu_stat_group = {
  97        .attrs = mmu_stat_attrs,
  98        .name = "mmu_stats",
  99};
 100
 101/* XXX convert to rusty's on_one_cpu */
 102static unsigned long run_on_cpu(unsigned long cpu,
 103                                unsigned long (*func)(unsigned long),
 104                                unsigned long arg)
 105{
 106        cpumask_t old_affinity;
 107        unsigned long ret;
 108
 109        cpumask_copy(&old_affinity, tsk_cpus_allowed(current));
 110        /* should return -EINVAL to userspace */
 111        if (set_cpus_allowed_ptr(current, cpumask_of(cpu)))
 112                return 0;
 113
 114        ret = func(arg);
 115
 116        set_cpus_allowed_ptr(current, &old_affinity);
 117
 118        return ret;
 119}
 120
 121static unsigned long read_mmustat_enable(unsigned long junk)
 122{
 123        unsigned long ra = 0;
 124
 125        sun4v_mmustat_info(&ra);
 126
 127        return ra != 0;
 128}
 129
 130static unsigned long write_mmustat_enable(unsigned long val)
 131{
 132        unsigned long ra, orig_ra;
 133
 134        if (val)
 135                ra = __pa(&per_cpu(mmu_stats, smp_processor_id()));
 136        else
 137                ra = 0UL;
 138
 139        return sun4v_mmustat_conf(ra, &orig_ra);
 140}
 141
 142static ssize_t show_mmustat_enable(struct device *s,
 143                                struct device_attribute *attr, char *buf)
 144{
 145        unsigned long val = run_on_cpu(s->id, read_mmustat_enable, 0);
 146        return sprintf(buf, "%lx\n", val);
 147}
 148
 149static ssize_t store_mmustat_enable(struct device *s,
 150                        struct device_attribute *attr, const char *buf,
 151                        size_t count)
 152{
 153        unsigned long val, err;
 154        int ret = sscanf(buf, "%ld", &val);
 155
 156        if (ret != 1)
 157                return -EINVAL;
 158
 159        err = run_on_cpu(s->id, write_mmustat_enable, val);
 160        if (err)
 161                return -EIO;
 162
 163        return count;
 164}
 165
 166static DEVICE_ATTR(mmustat_enable, 0644, show_mmustat_enable, store_mmustat_enable);
 167
 168static int mmu_stats_supported;
 169
 170static int register_mmu_stats(struct device *s)
 171{
 172        if (!mmu_stats_supported)
 173                return 0;
 174        device_create_file(s, &dev_attr_mmustat_enable);
 175        return sysfs_create_group(&s->kobj, &mmu_stat_group);
 176}
 177
 178#ifdef CONFIG_HOTPLUG_CPU
 179static void unregister_mmu_stats(struct device *s)
 180{
 181        if (!mmu_stats_supported)
 182                return;
 183        sysfs_remove_group(&s->kobj, &mmu_stat_group);
 184        device_remove_file(s, &dev_attr_mmustat_enable);
 185}
 186#endif
 187
 188#define SHOW_CPUDATA_ULONG_NAME(NAME, MEMBER) \
 189static ssize_t show_##NAME(struct device *dev, \
 190                struct device_attribute *attr, char *buf) \
 191{ \
 192        cpuinfo_sparc *c = &cpu_data(dev->id); \
 193        return sprintf(buf, "%lu\n", c->MEMBER); \
 194}
 195
 196#define SHOW_CPUDATA_UINT_NAME(NAME, MEMBER) \
 197static ssize_t show_##NAME(struct device *dev, \
 198                struct device_attribute *attr, char *buf) \
 199{ \
 200        cpuinfo_sparc *c = &cpu_data(dev->id); \
 201        return sprintf(buf, "%u\n", c->MEMBER); \
 202}
 203
 204SHOW_CPUDATA_ULONG_NAME(clock_tick, clock_tick);
 205SHOW_CPUDATA_UINT_NAME(l1_dcache_size, dcache_size);
 206SHOW_CPUDATA_UINT_NAME(l1_dcache_line_size, dcache_line_size);
 207SHOW_CPUDATA_UINT_NAME(l1_icache_size, icache_size);
 208SHOW_CPUDATA_UINT_NAME(l1_icache_line_size, icache_line_size);
 209SHOW_CPUDATA_UINT_NAME(l2_cache_size, ecache_size);
 210SHOW_CPUDATA_UINT_NAME(l2_cache_line_size, ecache_line_size);
 211
 212static struct device_attribute cpu_core_attrs[] = {
 213        __ATTR(clock_tick,          0444, show_clock_tick, NULL),
 214        __ATTR(l1_dcache_size,      0444, show_l1_dcache_size, NULL),
 215        __ATTR(l1_dcache_line_size, 0444, show_l1_dcache_line_size, NULL),
 216        __ATTR(l1_icache_size,      0444, show_l1_icache_size, NULL),
 217        __ATTR(l1_icache_line_size, 0444, show_l1_icache_line_size, NULL),
 218        __ATTR(l2_cache_size,       0444, show_l2_cache_size, NULL),
 219        __ATTR(l2_cache_line_size,  0444, show_l2_cache_line_size, NULL),
 220};
 221
 222static DEFINE_PER_CPU(struct cpu, cpu_devices);
 223
 224static void register_cpu_online(unsigned int cpu)
 225{
 226        struct cpu *c = &per_cpu(cpu_devices, cpu);
 227        struct device *s = &c->dev;
 228        int i;
 229
 230        for (i = 0; i < ARRAY_SIZE(cpu_core_attrs); i++)
 231                device_create_file(s, &cpu_core_attrs[i]);
 232
 233        register_mmu_stats(s);
 234}
 235
 236#ifdef CONFIG_HOTPLUG_CPU
 237static void unregister_cpu_online(unsigned int cpu)
 238{
 239        struct cpu *c = &per_cpu(cpu_devices, cpu);
 240        struct device *s = &c->dev;
 241        int i;
 242
 243        unregister_mmu_stats(s);
 244        for (i = 0; i < ARRAY_SIZE(cpu_core_attrs); i++)
 245                device_remove_file(s, &cpu_core_attrs[i]);
 246}
 247#endif
 248
 249static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
 250                                      unsigned long action, void *hcpu)
 251{
 252        unsigned int cpu = (unsigned int)(long)hcpu;
 253
 254        switch (action) {
 255        case CPU_ONLINE:
 256        case CPU_ONLINE_FROZEN:
 257                register_cpu_online(cpu);
 258                break;
 259#ifdef CONFIG_HOTPLUG_CPU
 260        case CPU_DEAD:
 261        case CPU_DEAD_FROZEN:
 262                unregister_cpu_online(cpu);
 263                break;
 264#endif
 265        }
 266        return NOTIFY_OK;
 267}
 268
 269static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
 270        .notifier_call  = sysfs_cpu_notify,
 271};
 272
 273static void __init check_mmu_stats(void)
 274{
 275        unsigned long dummy1, err;
 276
 277        if (tlb_type != hypervisor)
 278                return;
 279
 280        err = sun4v_mmustat_info(&dummy1);
 281        if (!err)
 282                mmu_stats_supported = 1;
 283}
 284
 285static void register_nodes(void)
 286{
 287#ifdef CONFIG_NUMA
 288        int i;
 289
 290        for (i = 0; i < MAX_NUMNODES; i++)
 291                register_one_node(i);
 292#endif
 293}
 294
 295static int __init topology_init(void)
 296{
 297        int cpu;
 298
 299        register_nodes();
 300
 301        check_mmu_stats();
 302
 303        register_cpu_notifier(&sysfs_cpu_nb);
 304
 305        for_each_possible_cpu(cpu) {
 306                struct cpu *c = &per_cpu(cpu_devices, cpu);
 307
 308                register_cpu(c, cpu);
 309                if (cpu_online(cpu))
 310                        register_cpu_online(cpu);
 311        }
 312
 313        return 0;
 314}
 315
 316subsys_initcall(topology_init);
 317