linux/drivers/base/cacheinfo.c
<<
>>
Prefs
   1/*
   2 * cacheinfo support - processor cache information via sysfs
   3 *
   4 * Based on arch/x86/kernel/cpu/intel_cacheinfo.c
   5 * Author: Sudeep Holla <sudeep.holla@arm.com>
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 *
  11 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
  12 * kind, whether express or implied; without even the implied warranty
  13 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  18 */
  19#include <linux/bitops.h>
  20#include <linux/cacheinfo.h>
  21#include <linux/compiler.h>
  22#include <linux/cpu.h>
  23#include <linux/device.h>
  24#include <linux/init.h>
  25#include <linux/of.h>
  26#include <linux/sched.h>
  27#include <linux/slab.h>
  28#include <linux/smp.h>
  29#include <linux/sysfs.h>
  30
  31/* pointer to per cpu cacheinfo */
  32static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo);
  33#define ci_cacheinfo(cpu)       (&per_cpu(ci_cpu_cacheinfo, cpu))
  34#define cache_leaves(cpu)       (ci_cacheinfo(cpu)->num_leaves)
  35#define per_cpu_cacheinfo(cpu)  (ci_cacheinfo(cpu)->info_list)
  36
  37struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu)
  38{
  39        return ci_cacheinfo(cpu);
  40}
  41
  42#ifdef CONFIG_OF
  43static int cache_setup_of_node(unsigned int cpu)
  44{
  45        struct device_node *np;
  46        struct cacheinfo *this_leaf;
  47        struct device *cpu_dev = get_cpu_device(cpu);
  48        struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
  49        unsigned int index = 0;
  50
  51        /* skip if of_node is already populated */
  52        if (this_cpu_ci->info_list->of_node)
  53                return 0;
  54
  55        if (!cpu_dev) {
  56                pr_err("No cpu device for CPU %d\n", cpu);
  57                return -ENODEV;
  58        }
  59        np = cpu_dev->of_node;
  60        if (!np) {
  61                pr_err("Failed to find cpu%d device node\n", cpu);
  62                return -ENOENT;
  63        }
  64
  65        while (index < cache_leaves(cpu)) {
  66                this_leaf = this_cpu_ci->info_list + index;
  67                if (this_leaf->level != 1)
  68                        np = of_find_next_cache_node(np);
  69                else
  70                        np = of_node_get(np);/* cpu node itself */
  71                if (!np)
  72                        break;
  73                this_leaf->of_node = np;
  74                index++;
  75        }
  76
  77        if (index != cache_leaves(cpu)) /* not all OF nodes populated */
  78                return -ENOENT;
  79
  80        return 0;
  81}
  82
  83static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
  84                                           struct cacheinfo *sib_leaf)
  85{
  86        return sib_leaf->of_node == this_leaf->of_node;
  87}
  88#else
  89static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
  90static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
  91                                           struct cacheinfo *sib_leaf)
  92{
  93        /*
  94         * For non-DT systems, assume unique level 1 cache, system-wide
  95         * shared caches for all other levels. This will be used only if
  96         * arch specific code has not populated shared_cpu_map
  97         */
  98        return !(this_leaf->level == 1);
  99}
 100#endif
 101
 102static int cache_shared_cpu_map_setup(unsigned int cpu)
 103{
 104        struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
 105        struct cacheinfo *this_leaf, *sib_leaf;
 106        unsigned int index;
 107        int ret;
 108
 109        ret = cache_setup_of_node(cpu);
 110        if (ret)
 111                return ret;
 112
 113        for (index = 0; index < cache_leaves(cpu); index++) {
 114                unsigned int i;
 115
 116                this_leaf = this_cpu_ci->info_list + index;
 117                /* skip if shared_cpu_map is already populated */
 118                if (!cpumask_empty(&this_leaf->shared_cpu_map))
 119                        continue;
 120
 121                cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
 122                for_each_online_cpu(i) {
 123                        struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
 124
 125                        if (i == cpu || !sib_cpu_ci->info_list)
 126                                continue;/* skip if itself or no cacheinfo */
 127                        sib_leaf = sib_cpu_ci->info_list + index;
 128                        if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
 129                                cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
 130                                cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
 131                        }
 132                }
 133        }
 134
 135        return 0;
 136}
 137
 138static void cache_shared_cpu_map_remove(unsigned int cpu)
 139{
 140        struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
 141        struct cacheinfo *this_leaf, *sib_leaf;
 142        unsigned int sibling, index;
 143
 144        for (index = 0; index < cache_leaves(cpu); index++) {
 145                this_leaf = this_cpu_ci->info_list + index;
 146                for_each_cpu(sibling, &this_leaf->shared_cpu_map) {
 147                        struct cpu_cacheinfo *sib_cpu_ci;
 148
 149                        if (sibling == cpu) /* skip itself */
 150                                continue;
 151
 152                        sib_cpu_ci = get_cpu_cacheinfo(sibling);
 153                        if (!sib_cpu_ci->info_list)
 154                                continue;
 155
 156                        sib_leaf = sib_cpu_ci->info_list + index;
 157                        cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
 158                        cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
 159                }
 160                of_node_put(this_leaf->of_node);
 161        }
 162}
 163
 164static void free_cache_attributes(unsigned int cpu)
 165{
 166        if (!per_cpu_cacheinfo(cpu))
 167                return;
 168
 169        cache_shared_cpu_map_remove(cpu);
 170
 171        kfree(per_cpu_cacheinfo(cpu));
 172        per_cpu_cacheinfo(cpu) = NULL;
 173}
 174
 175int __weak init_cache_level(unsigned int cpu)
 176{
 177        return -ENOENT;
 178}
 179
 180int __weak populate_cache_leaves(unsigned int cpu)
 181{
 182        return -ENOENT;
 183}
 184
 185static int detect_cache_attributes(unsigned int cpu)
 186{
 187        int ret;
 188
 189        if (init_cache_level(cpu) || !cache_leaves(cpu))
 190                return -ENOENT;
 191
 192        per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
 193                                         sizeof(struct cacheinfo), GFP_KERNEL);
 194        if (per_cpu_cacheinfo(cpu) == NULL)
 195                return -ENOMEM;
 196
 197        ret = populate_cache_leaves(cpu);
 198        if (ret)
 199                goto free_ci;
 200        /*
 201         * For systems using DT for cache hierarchy, of_node and shared_cpu_map
 202         * will be set up here only if they are not populated already
 203         */
 204        ret = cache_shared_cpu_map_setup(cpu);
 205        if (ret) {
 206                pr_warn("Unable to detect cache hierarchy from DT for CPU %d\n",
 207                        cpu);
 208                goto free_ci;
 209        }
 210        return 0;
 211
 212free_ci:
 213        free_cache_attributes(cpu);
 214        return ret;
 215}
 216
 217/* pointer to cpuX/cache device */
 218static DEFINE_PER_CPU(struct device *, ci_cache_dev);
 219#define per_cpu_cache_dev(cpu)  (per_cpu(ci_cache_dev, cpu))
 220
 221static cpumask_t cache_dev_map;
 222
 223/* pointer to array of devices for cpuX/cache/indexY */
 224static DEFINE_PER_CPU(struct device **, ci_index_dev);
 225#define per_cpu_index_dev(cpu)  (per_cpu(ci_index_dev, cpu))
 226#define per_cache_index_dev(cpu, idx)   ((per_cpu_index_dev(cpu))[idx])
 227
 228#define show_one(file_name, object)                             \
 229static ssize_t file_name##_show(struct device *dev,             \
 230                struct device_attribute *attr, char *buf)       \
 231{                                                               \
 232        struct cacheinfo *this_leaf = dev_get_drvdata(dev);     \
 233        return sprintf(buf, "%u\n", this_leaf->object);         \
 234}
 235
 236show_one(level, level);
 237show_one(coherency_line_size, coherency_line_size);
 238show_one(number_of_sets, number_of_sets);
 239show_one(physical_line_partition, physical_line_partition);
 240show_one(ways_of_associativity, ways_of_associativity);
 241
 242static ssize_t size_show(struct device *dev,
 243                         struct device_attribute *attr, char *buf)
 244{
 245        struct cacheinfo *this_leaf = dev_get_drvdata(dev);
 246
 247        return sprintf(buf, "%uK\n", this_leaf->size >> 10);
 248}
 249
 250static ssize_t shared_cpumap_show_func(struct device *dev, bool list, char *buf)
 251{
 252        struct cacheinfo *this_leaf = dev_get_drvdata(dev);
 253        const struct cpumask *mask = &this_leaf->shared_cpu_map;
 254
 255        return cpumap_print_to_pagebuf(list, buf, mask);
 256}
 257
 258static ssize_t shared_cpu_map_show(struct device *dev,
 259                                   struct device_attribute *attr, char *buf)
 260{
 261        return shared_cpumap_show_func(dev, false, buf);
 262}
 263
 264static ssize_t shared_cpu_list_show(struct device *dev,
 265                                    struct device_attribute *attr, char *buf)
 266{
 267        return shared_cpumap_show_func(dev, true, buf);
 268}
 269
 270static ssize_t type_show(struct device *dev,
 271                         struct device_attribute *attr, char *buf)
 272{
 273        struct cacheinfo *this_leaf = dev_get_drvdata(dev);
 274
 275        switch (this_leaf->type) {
 276        case CACHE_TYPE_DATA:
 277                return sprintf(buf, "Data\n");
 278        case CACHE_TYPE_INST:
 279                return sprintf(buf, "Instruction\n");
 280        case CACHE_TYPE_UNIFIED:
 281                return sprintf(buf, "Unified\n");
 282        default:
 283                return -EINVAL;
 284        }
 285}
 286
 287static ssize_t allocation_policy_show(struct device *dev,
 288                                      struct device_attribute *attr, char *buf)
 289{
 290        struct cacheinfo *this_leaf = dev_get_drvdata(dev);
 291        unsigned int ci_attr = this_leaf->attributes;
 292        int n = 0;
 293
 294        if ((ci_attr & CACHE_READ_ALLOCATE) && (ci_attr & CACHE_WRITE_ALLOCATE))
 295                n = sprintf(buf, "ReadWriteAllocate\n");
 296        else if (ci_attr & CACHE_READ_ALLOCATE)
 297                n = sprintf(buf, "ReadAllocate\n");
 298        else if (ci_attr & CACHE_WRITE_ALLOCATE)
 299                n = sprintf(buf, "WriteAllocate\n");
 300        return n;
 301}
 302
 303static ssize_t write_policy_show(struct device *dev,
 304                                 struct device_attribute *attr, char *buf)
 305{
 306        struct cacheinfo *this_leaf = dev_get_drvdata(dev);
 307        unsigned int ci_attr = this_leaf->attributes;
 308        int n = 0;
 309
 310        if (ci_attr & CACHE_WRITE_THROUGH)
 311                n = sprintf(buf, "WriteThrough\n");
 312        else if (ci_attr & CACHE_WRITE_BACK)
 313                n = sprintf(buf, "WriteBack\n");
 314        return n;
 315}
 316
 317static DEVICE_ATTR_RO(level);
 318static DEVICE_ATTR_RO(type);
 319static DEVICE_ATTR_RO(coherency_line_size);
 320static DEVICE_ATTR_RO(ways_of_associativity);
 321static DEVICE_ATTR_RO(number_of_sets);
 322static DEVICE_ATTR_RO(size);
 323static DEVICE_ATTR_RO(allocation_policy);
 324static DEVICE_ATTR_RO(write_policy);
 325static DEVICE_ATTR_RO(shared_cpu_map);
 326static DEVICE_ATTR_RO(shared_cpu_list);
 327static DEVICE_ATTR_RO(physical_line_partition);
 328
 329static struct attribute *cache_default_attrs[] = {
 330        &dev_attr_type.attr,
 331        &dev_attr_level.attr,
 332        &dev_attr_shared_cpu_map.attr,
 333        &dev_attr_shared_cpu_list.attr,
 334        &dev_attr_coherency_line_size.attr,
 335        &dev_attr_ways_of_associativity.attr,
 336        &dev_attr_number_of_sets.attr,
 337        &dev_attr_size.attr,
 338        &dev_attr_allocation_policy.attr,
 339        &dev_attr_write_policy.attr,
 340        &dev_attr_physical_line_partition.attr,
 341        NULL
 342};
 343
 344static umode_t
 345cache_default_attrs_is_visible(struct kobject *kobj,
 346                               struct attribute *attr, int unused)
 347{
 348        struct device *dev = kobj_to_dev(kobj);
 349        struct cacheinfo *this_leaf = dev_get_drvdata(dev);
 350        const struct cpumask *mask = &this_leaf->shared_cpu_map;
 351        umode_t mode = attr->mode;
 352
 353        if ((attr == &dev_attr_type.attr) && this_leaf->type)
 354                return mode;
 355        if ((attr == &dev_attr_level.attr) && this_leaf->level)
 356                return mode;
 357        if ((attr == &dev_attr_shared_cpu_map.attr) && !cpumask_empty(mask))
 358                return mode;
 359        if ((attr == &dev_attr_shared_cpu_list.attr) && !cpumask_empty(mask))
 360                return mode;
 361        if ((attr == &dev_attr_coherency_line_size.attr) &&
 362            this_leaf->coherency_line_size)
 363                return mode;
 364        if ((attr == &dev_attr_ways_of_associativity.attr) &&
 365            this_leaf->size) /* allow 0 = full associativity */
 366                return mode;
 367        if ((attr == &dev_attr_number_of_sets.attr) &&
 368            this_leaf->number_of_sets)
 369                return mode;
 370        if ((attr == &dev_attr_size.attr) && this_leaf->size)
 371                return mode;
 372        if ((attr == &dev_attr_write_policy.attr) &&
 373            (this_leaf->attributes & CACHE_WRITE_POLICY_MASK))
 374                return mode;
 375        if ((attr == &dev_attr_allocation_policy.attr) &&
 376            (this_leaf->attributes & CACHE_ALLOCATE_POLICY_MASK))
 377                return mode;
 378        if ((attr == &dev_attr_physical_line_partition.attr) &&
 379            this_leaf->physical_line_partition)
 380                return mode;
 381
 382        return 0;
 383}
 384
 385static const struct attribute_group cache_default_group = {
 386        .attrs = cache_default_attrs,
 387        .is_visible = cache_default_attrs_is_visible,
 388};
 389
 390static const struct attribute_group *cache_default_groups[] = {
 391        &cache_default_group,
 392        NULL,
 393};
 394
 395static const struct attribute_group *cache_private_groups[] = {
 396        &cache_default_group,
 397        NULL, /* Place holder for private group */
 398        NULL,
 399};
 400
 401const struct attribute_group *
 402__weak cache_get_priv_group(struct cacheinfo *this_leaf)
 403{
 404        return NULL;
 405}
 406
 407static const struct attribute_group **
 408cache_get_attribute_groups(struct cacheinfo *this_leaf)
 409{
 410        const struct attribute_group *priv_group =
 411                        cache_get_priv_group(this_leaf);
 412
 413        if (!priv_group)
 414                return cache_default_groups;
 415
 416        if (!cache_private_groups[1])
 417                cache_private_groups[1] = priv_group;
 418
 419        return cache_private_groups;
 420}
 421
 422/* Add/Remove cache interface for CPU device */
 423static void cpu_cache_sysfs_exit(unsigned int cpu)
 424{
 425        int i;
 426        struct device *ci_dev;
 427
 428        if (per_cpu_index_dev(cpu)) {
 429                for (i = 0; i < cache_leaves(cpu); i++) {
 430                        ci_dev = per_cache_index_dev(cpu, i);
 431                        if (!ci_dev)
 432                                continue;
 433                        device_unregister(ci_dev);
 434                }
 435                kfree(per_cpu_index_dev(cpu));
 436                per_cpu_index_dev(cpu) = NULL;
 437        }
 438        device_unregister(per_cpu_cache_dev(cpu));
 439        per_cpu_cache_dev(cpu) = NULL;
 440}
 441
 442static int cpu_cache_sysfs_init(unsigned int cpu)
 443{
 444        struct device *dev = get_cpu_device(cpu);
 445
 446        if (per_cpu_cacheinfo(cpu) == NULL)
 447                return -ENOENT;
 448
 449        per_cpu_cache_dev(cpu) = cpu_device_create(dev, NULL, NULL, "cache");
 450        if (IS_ERR(per_cpu_cache_dev(cpu)))
 451                return PTR_ERR(per_cpu_cache_dev(cpu));
 452
 453        /* Allocate all required memory */
 454        per_cpu_index_dev(cpu) = kcalloc(cache_leaves(cpu),
 455                                         sizeof(struct device *), GFP_KERNEL);
 456        if (unlikely(per_cpu_index_dev(cpu) == NULL))
 457                goto err_out;
 458
 459        return 0;
 460
 461err_out:
 462        cpu_cache_sysfs_exit(cpu);
 463        return -ENOMEM;
 464}
 465
 466static int cache_add_dev(unsigned int cpu)
 467{
 468        unsigned int i;
 469        int rc;
 470        struct device *ci_dev, *parent;
 471        struct cacheinfo *this_leaf;
 472        struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
 473        const struct attribute_group **cache_groups;
 474
 475        rc = cpu_cache_sysfs_init(cpu);
 476        if (unlikely(rc < 0))
 477                return rc;
 478
 479        parent = per_cpu_cache_dev(cpu);
 480        for (i = 0; i < cache_leaves(cpu); i++) {
 481                this_leaf = this_cpu_ci->info_list + i;
 482                if (this_leaf->disable_sysfs)
 483                        continue;
 484                cache_groups = cache_get_attribute_groups(this_leaf);
 485                ci_dev = cpu_device_create(parent, this_leaf, cache_groups,
 486                                           "index%1u", i);
 487                if (IS_ERR(ci_dev)) {
 488                        rc = PTR_ERR(ci_dev);
 489                        goto err;
 490                }
 491                per_cache_index_dev(cpu, i) = ci_dev;
 492        }
 493        cpumask_set_cpu(cpu, &cache_dev_map);
 494
 495        return 0;
 496err:
 497        cpu_cache_sysfs_exit(cpu);
 498        return rc;
 499}
 500
 501static void cache_remove_dev(unsigned int cpu)
 502{
 503        if (!cpumask_test_cpu(cpu, &cache_dev_map))
 504                return;
 505        cpumask_clear_cpu(cpu, &cache_dev_map);
 506
 507        cpu_cache_sysfs_exit(cpu);
 508}
 509
 510static int cacheinfo_cpu_callback(struct notifier_block *nfb,
 511                                  unsigned long action, void *hcpu)
 512{
 513        unsigned int cpu = (unsigned long)hcpu;
 514        int rc = 0;
 515
 516        switch (action & ~CPU_TASKS_FROZEN) {
 517        case CPU_ONLINE:
 518                rc = detect_cache_attributes(cpu);
 519                if (!rc)
 520                        rc = cache_add_dev(cpu);
 521                break;
 522        case CPU_DEAD:
 523                cache_remove_dev(cpu);
 524                free_cache_attributes(cpu);
 525                break;
 526        }
 527        return notifier_from_errno(rc);
 528}
 529
 530static int __init cacheinfo_sysfs_init(void)
 531{
 532        int cpu, rc = 0;
 533
 534        cpu_notifier_register_begin();
 535
 536        for_each_online_cpu(cpu) {
 537                rc = detect_cache_attributes(cpu);
 538                if (rc)
 539                        goto out;
 540                rc = cache_add_dev(cpu);
 541                if (rc) {
 542                        free_cache_attributes(cpu);
 543                        pr_err("error populating cacheinfo..cpu%d\n", cpu);
 544                        goto out;
 545                }
 546        }
 547        __hotcpu_notifier(cacheinfo_cpu_callback, 0);
 548
 549out:
 550        cpu_notifier_register_done();
 551        return rc;
 552}
 553
 554device_initcall(cacheinfo_sysfs_init);
 555