linux/arch/powerpc/kernel/cacheinfo.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Processor cache information made available to userspace via sysfs;
   4 * intended to be compatible with x86 intel_cacheinfo implementation.
   5 *
   6 * Copyright 2008 IBM Corporation
   7 * Author: Nathan Lynch
   8 */
   9
  10#define pr_fmt(fmt) "cacheinfo: " fmt
  11
  12#include <linux/cpu.h>
  13#include <linux/cpumask.h>
  14#include <linux/kernel.h>
  15#include <linux/kobject.h>
  16#include <linux/list.h>
  17#include <linux/notifier.h>
  18#include <linux/of.h>
  19#include <linux/percpu.h>
  20#include <linux/slab.h>
  21#include <asm/prom.h>
  22#include <asm/cputhreads.h>
  23#include <asm/smp.h>
  24
  25#include "cacheinfo.h"
  26
  27/* per-cpu object for tracking:
  28 * - a "cache" kobject for the top-level directory
  29 * - a list of "index" objects representing the cpu's local cache hierarchy
  30 */
  31struct cache_dir {
  32        struct kobject *kobj; /* bare (not embedded) kobject for cache
  33                               * directory */
  34        struct cache_index_dir *index; /* list of index objects */
  35};
  36
  37/* "index" object: each cpu's cache directory has an index
  38 * subdirectory corresponding to a cache object associated with the
  39 * cpu.  This object's lifetime is managed via the embedded kobject.
  40 */
  41struct cache_index_dir {
  42        struct kobject kobj;
  43        struct cache_index_dir *next; /* next index in parent directory */
  44        struct cache *cache;
  45};
  46
  47/* Template for determining which OF properties to query for a given
  48 * cache type */
  49struct cache_type_info {
  50        const char *name;
  51        const char *size_prop;
  52
  53        /* Allow for both [di]-cache-line-size and
  54         * [di]-cache-block-size properties.  According to the PowerPC
  55         * Processor binding, -line-size should be provided if it
  56         * differs from the cache block size (that which is operated
  57         * on by cache instructions), so we look for -line-size first.
  58         * See cache_get_line_size(). */
  59
  60        const char *line_size_props[2];
  61        const char *nr_sets_prop;
  62};
  63
  64/* These are used to index the cache_type_info array. */
  65#define CACHE_TYPE_UNIFIED     0 /* cache-size, cache-block-size, etc. */
  66#define CACHE_TYPE_UNIFIED_D   1 /* d-cache-size, d-cache-block-size, etc */
  67#define CACHE_TYPE_INSTRUCTION 2
  68#define CACHE_TYPE_DATA        3
  69
  70static const struct cache_type_info cache_type_info[] = {
  71        {
  72                /* Embedded systems that use cache-size, cache-block-size,
  73                 * etc. for the Unified (typically L2) cache. */
  74                .name            = "Unified",
  75                .size_prop       = "cache-size",
  76                .line_size_props = { "cache-line-size",
  77                                     "cache-block-size", },
  78                .nr_sets_prop    = "cache-sets",
  79        },
  80        {
  81                /* PowerPC Processor binding says the [di]-cache-*
  82                 * must be equal on unified caches, so just use
  83                 * d-cache properties. */
  84                .name            = "Unified",
  85                .size_prop       = "d-cache-size",
  86                .line_size_props = { "d-cache-line-size",
  87                                     "d-cache-block-size", },
  88                .nr_sets_prop    = "d-cache-sets",
  89        },
  90        {
  91                .name            = "Instruction",
  92                .size_prop       = "i-cache-size",
  93                .line_size_props = { "i-cache-line-size",
  94                                     "i-cache-block-size", },
  95                .nr_sets_prop    = "i-cache-sets",
  96        },
  97        {
  98                .name            = "Data",
  99                .size_prop       = "d-cache-size",
 100                .line_size_props = { "d-cache-line-size",
 101                                     "d-cache-block-size", },
 102                .nr_sets_prop    = "d-cache-sets",
 103        },
 104};
 105
 106/* Cache object: each instance of this corresponds to a distinct cache
 107 * in the system.  There are separate objects for Harvard caches: one
 108 * each for instruction and data, and each refers to the same OF node.
 109 * The refcount of the OF node is elevated for the lifetime of the
 110 * cache object.  A cache object is released when its shared_cpu_map
 111 * is cleared (see cache_cpu_clear).
 112 *
 113 * A cache object is on two lists: an unsorted global list
 114 * (cache_list) of cache objects; and a singly-linked list
 115 * representing the local cache hierarchy, which is ordered by level
 116 * (e.g. L1d -> L1i -> L2 -> L3).
 117 */
 118struct cache {
 119        struct device_node *ofnode;    /* OF node for this cache, may be cpu */
 120        struct cpumask shared_cpu_map; /* online CPUs using this cache */
 121        int type;                      /* split cache disambiguation */
 122        int level;                     /* level not explicit in device tree */
 123        int group_id;                  /* id of the group of threads that share this cache */
 124        struct list_head list;         /* global list of cache objects */
 125        struct cache *next_local;      /* next cache of >= level */
 126};
 127
 128static DEFINE_PER_CPU(struct cache_dir *, cache_dir_pcpu);
 129
 130/* traversal/modification of this list occurs only at cpu hotplug time;
 131 * access is serialized by cpu hotplug locking
 132 */
 133static LIST_HEAD(cache_list);
 134
 135static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *k)
 136{
 137        return container_of(k, struct cache_index_dir, kobj);
 138}
 139
 140static const char *cache_type_string(const struct cache *cache)
 141{
 142        return cache_type_info[cache->type].name;
 143}
 144
 145static void cache_init(struct cache *cache, int type, int level,
 146                       struct device_node *ofnode, int group_id)
 147{
 148        cache->type = type;
 149        cache->level = level;
 150        cache->ofnode = of_node_get(ofnode);
 151        cache->group_id = group_id;
 152        INIT_LIST_HEAD(&cache->list);
 153        list_add(&cache->list, &cache_list);
 154}
 155
 156static struct cache *new_cache(int type, int level,
 157                               struct device_node *ofnode, int group_id)
 158{
 159        struct cache *cache;
 160
 161        cache = kzalloc(sizeof(*cache), GFP_KERNEL);
 162        if (cache)
 163                cache_init(cache, type, level, ofnode, group_id);
 164
 165        return cache;
 166}
 167
 168static void release_cache_debugcheck(struct cache *cache)
 169{
 170        struct cache *iter;
 171
 172        list_for_each_entry(iter, &cache_list, list)
 173                WARN_ONCE(iter->next_local == cache,
 174                          "cache for %pOFP(%s) refers to cache for %pOFP(%s)\n",
 175                          iter->ofnode,
 176                          cache_type_string(iter),
 177                          cache->ofnode,
 178                          cache_type_string(cache));
 179}
 180
 181static void release_cache(struct cache *cache)
 182{
 183        if (!cache)
 184                return;
 185
 186        pr_debug("freeing L%d %s cache for %pOFP\n", cache->level,
 187                 cache_type_string(cache), cache->ofnode);
 188
 189        release_cache_debugcheck(cache);
 190        list_del(&cache->list);
 191        of_node_put(cache->ofnode);
 192        kfree(cache);
 193}
 194
 195static void cache_cpu_set(struct cache *cache, int cpu)
 196{
 197        struct cache *next = cache;
 198
 199        while (next) {
 200                WARN_ONCE(cpumask_test_cpu(cpu, &next->shared_cpu_map),
 201                          "CPU %i already accounted in %pOFP(%s)\n",
 202                          cpu, next->ofnode,
 203                          cache_type_string(next));
 204                cpumask_set_cpu(cpu, &next->shared_cpu_map);
 205                next = next->next_local;
 206        }
 207}
 208
 209static int cache_size(const struct cache *cache, unsigned int *ret)
 210{
 211        const char *propname;
 212        const __be32 *cache_size;
 213
 214        propname = cache_type_info[cache->type].size_prop;
 215
 216        cache_size = of_get_property(cache->ofnode, propname, NULL);
 217        if (!cache_size)
 218                return -ENODEV;
 219
 220        *ret = of_read_number(cache_size, 1);
 221        return 0;
 222}
 223
 224static int cache_size_kb(const struct cache *cache, unsigned int *ret)
 225{
 226        unsigned int size;
 227
 228        if (cache_size(cache, &size))
 229                return -ENODEV;
 230
 231        *ret = size / 1024;
 232        return 0;
 233}
 234
 235/* not cache_line_size() because that's a macro in include/linux/cache.h */
 236static int cache_get_line_size(const struct cache *cache, unsigned int *ret)
 237{
 238        const __be32 *line_size;
 239        int i, lim;
 240
 241        lim = ARRAY_SIZE(cache_type_info[cache->type].line_size_props);
 242
 243        for (i = 0; i < lim; i++) {
 244                const char *propname;
 245
 246                propname = cache_type_info[cache->type].line_size_props[i];
 247                line_size = of_get_property(cache->ofnode, propname, NULL);
 248                if (line_size)
 249                        break;
 250        }
 251
 252        if (!line_size)
 253                return -ENODEV;
 254
 255        *ret = of_read_number(line_size, 1);
 256        return 0;
 257}
 258
 259static int cache_nr_sets(const struct cache *cache, unsigned int *ret)
 260{
 261        const char *propname;
 262        const __be32 *nr_sets;
 263
 264        propname = cache_type_info[cache->type].nr_sets_prop;
 265
 266        nr_sets = of_get_property(cache->ofnode, propname, NULL);
 267        if (!nr_sets)
 268                return -ENODEV;
 269
 270        *ret = of_read_number(nr_sets, 1);
 271        return 0;
 272}
 273
 274static int cache_associativity(const struct cache *cache, unsigned int *ret)
 275{
 276        unsigned int line_size;
 277        unsigned int nr_sets;
 278        unsigned int size;
 279
 280        if (cache_nr_sets(cache, &nr_sets))
 281                goto err;
 282
 283        /* If the cache is fully associative, there is no need to
 284         * check the other properties.
 285         */
 286        if (nr_sets == 1) {
 287                *ret = 0;
 288                return 0;
 289        }
 290
 291        if (cache_get_line_size(cache, &line_size))
 292                goto err;
 293        if (cache_size(cache, &size))
 294                goto err;
 295
 296        if (!(nr_sets > 0 && size > 0 && line_size > 0))
 297                goto err;
 298
 299        *ret = (size / nr_sets) / line_size;
 300        return 0;
 301err:
 302        return -ENODEV;
 303}
 304
 305/* helper for dealing with split caches */
 306static struct cache *cache_find_first_sibling(struct cache *cache)
 307{
 308        struct cache *iter;
 309
 310        if (cache->type == CACHE_TYPE_UNIFIED ||
 311            cache->type == CACHE_TYPE_UNIFIED_D)
 312                return cache;
 313
 314        list_for_each_entry(iter, &cache_list, list)
 315                if (iter->ofnode == cache->ofnode &&
 316                    iter->group_id == cache->group_id &&
 317                    iter->next_local == cache)
 318                        return iter;
 319
 320        return cache;
 321}
 322
 323/* return the first cache on a local list matching node and thread-group id */
 324static struct cache *cache_lookup_by_node_group(const struct device_node *node,
 325                                                int group_id)
 326{
 327        struct cache *cache = NULL;
 328        struct cache *iter;
 329
 330        list_for_each_entry(iter, &cache_list, list) {
 331                if (iter->ofnode != node ||
 332                    iter->group_id != group_id)
 333                        continue;
 334                cache = cache_find_first_sibling(iter);
 335                break;
 336        }
 337
 338        return cache;
 339}
 340
 341static bool cache_node_is_unified(const struct device_node *np)
 342{
 343        return of_get_property(np, "cache-unified", NULL);
 344}
 345
 346/*
 347 * Unified caches can have two different sets of tags.  Most embedded
 348 * use cache-size, etc. for the unified cache size, but open firmware systems
 349 * use d-cache-size, etc.   Check on initialization for which type we have, and
 350 * return the appropriate structure type.  Assume it's embedded if it isn't
 351 * open firmware.  If it's yet a 3rd type, then there will be missing entries
 352 * in /sys/devices/system/cpu/cpu0/cache/index2/, and this code will need
 353 * to be extended further.
 354 */
 355static int cache_is_unified_d(const struct device_node *np)
 356{
 357        return of_get_property(np,
 358                cache_type_info[CACHE_TYPE_UNIFIED_D].size_prop, NULL) ?
 359                CACHE_TYPE_UNIFIED_D : CACHE_TYPE_UNIFIED;
 360}
 361
 362static struct cache *cache_do_one_devnode_unified(struct device_node *node, int group_id,
 363                                                  int level)
 364{
 365        pr_debug("creating L%d ucache for %pOFP\n", level, node);
 366
 367        return new_cache(cache_is_unified_d(node), level, node, group_id);
 368}
 369
 370static struct cache *cache_do_one_devnode_split(struct device_node *node, int group_id,
 371                                                int level)
 372{
 373        struct cache *dcache, *icache;
 374
 375        pr_debug("creating L%d dcache and icache for %pOFP\n", level,
 376                 node);
 377
 378        dcache = new_cache(CACHE_TYPE_DATA, level, node, group_id);
 379        icache = new_cache(CACHE_TYPE_INSTRUCTION, level, node, group_id);
 380
 381        if (!dcache || !icache)
 382                goto err;
 383
 384        dcache->next_local = icache;
 385
 386        return dcache;
 387err:
 388        release_cache(dcache);
 389        release_cache(icache);
 390        return NULL;
 391}
 392
 393static struct cache *cache_do_one_devnode(struct device_node *node, int group_id, int level)
 394{
 395        struct cache *cache;
 396
 397        if (cache_node_is_unified(node))
 398                cache = cache_do_one_devnode_unified(node, group_id, level);
 399        else
 400                cache = cache_do_one_devnode_split(node, group_id, level);
 401
 402        return cache;
 403}
 404
 405static struct cache *cache_lookup_or_instantiate(struct device_node *node,
 406                                                 int group_id,
 407                                                 int level)
 408{
 409        struct cache *cache;
 410
 411        cache = cache_lookup_by_node_group(node, group_id);
 412
 413        WARN_ONCE(cache && cache->level != level,
 414                  "cache level mismatch on lookup (got %d, expected %d)\n",
 415                  cache->level, level);
 416
 417        if (!cache)
 418                cache = cache_do_one_devnode(node, group_id, level);
 419
 420        return cache;
 421}
 422
 423static void link_cache_lists(struct cache *smaller, struct cache *bigger)
 424{
 425        while (smaller->next_local) {
 426                if (smaller->next_local == bigger)
 427                        return; /* already linked */
 428                smaller = smaller->next_local;
 429        }
 430
 431        smaller->next_local = bigger;
 432
 433        /*
 434         * The cache->next_local list sorts by level ascending:
 435         * L1d -> L1i -> L2 -> L3 ...
 436         */
 437        WARN_ONCE((smaller->level == 1 && bigger->level > 2) ||
 438                  (smaller->level > 1 && bigger->level != smaller->level + 1),
 439                  "linking L%i cache %pOFP to L%i cache %pOFP; skipped a level?\n",
 440                  smaller->level, smaller->ofnode, bigger->level, bigger->ofnode);
 441}
 442
 443static void do_subsidiary_caches_debugcheck(struct cache *cache)
 444{
 445        WARN_ONCE(cache->level != 1,
 446                  "instantiating cache chain from L%d %s cache for "
 447                  "%pOFP instead of an L1\n", cache->level,
 448                  cache_type_string(cache), cache->ofnode);
 449        WARN_ONCE(!of_node_is_type(cache->ofnode, "cpu"),
 450                  "instantiating cache chain from node %pOFP of type '%s' "
 451                  "instead of a cpu node\n", cache->ofnode,
 452                  of_node_get_device_type(cache->ofnode));
 453}
 454
 455/*
 456 * If sub-groups of threads in a core containing @cpu_id share the
 457 * L@level-cache (information obtained via "ibm,thread-groups"
 458 * device-tree property), then we identify the group by the first
 459 * thread-sibling in the group. We define this to be the group-id.
 460 *
 461 * In the absence of any thread-group information for L@level-cache,
 462 * this function returns -1.
 463 */
 464static int get_group_id(unsigned int cpu_id, int level)
 465{
 466        if (has_big_cores && level == 1)
 467                return cpumask_first(per_cpu(thread_group_l1_cache_map,
 468                                             cpu_id));
 469        else if (thread_group_shares_l2 && level == 2)
 470                return cpumask_first(per_cpu(thread_group_l2_cache_map,
 471                                             cpu_id));
 472        else if (thread_group_shares_l3 && level == 3)
 473                return cpumask_first(per_cpu(thread_group_l3_cache_map,
 474                                             cpu_id));
 475        return -1;
 476}
 477
 478static void do_subsidiary_caches(struct cache *cache, unsigned int cpu_id)
 479{
 480        struct device_node *subcache_node;
 481        int level = cache->level;
 482
 483        do_subsidiary_caches_debugcheck(cache);
 484
 485        while ((subcache_node = of_find_next_cache_node(cache->ofnode))) {
 486                struct cache *subcache;
 487                int group_id;
 488
 489                level++;
 490                group_id = get_group_id(cpu_id, level);
 491                subcache = cache_lookup_or_instantiate(subcache_node, group_id, level);
 492                of_node_put(subcache_node);
 493                if (!subcache)
 494                        break;
 495
 496                link_cache_lists(cache, subcache);
 497                cache = subcache;
 498        }
 499}
 500
 501static struct cache *cache_chain_instantiate(unsigned int cpu_id)
 502{
 503        struct device_node *cpu_node;
 504        struct cache *cpu_cache = NULL;
 505        int group_id;
 506
 507        pr_debug("creating cache object(s) for CPU %i\n", cpu_id);
 508
 509        cpu_node = of_get_cpu_node(cpu_id, NULL);
 510        WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id);
 511        if (!cpu_node)
 512                goto out;
 513
 514        group_id = get_group_id(cpu_id, 1);
 515
 516        cpu_cache = cache_lookup_or_instantiate(cpu_node, group_id, 1);
 517        if (!cpu_cache)
 518                goto out;
 519
 520        do_subsidiary_caches(cpu_cache, cpu_id);
 521
 522        cache_cpu_set(cpu_cache, cpu_id);
 523out:
 524        of_node_put(cpu_node);
 525
 526        return cpu_cache;
 527}
 528
 529static struct cache_dir *cacheinfo_create_cache_dir(unsigned int cpu_id)
 530{
 531        struct cache_dir *cache_dir;
 532        struct device *dev;
 533        struct kobject *kobj = NULL;
 534
 535        dev = get_cpu_device(cpu_id);
 536        WARN_ONCE(!dev, "no dev for CPU %i\n", cpu_id);
 537        if (!dev)
 538                goto err;
 539
 540        kobj = kobject_create_and_add("cache", &dev->kobj);
 541        if (!kobj)
 542                goto err;
 543
 544        cache_dir = kzalloc(sizeof(*cache_dir), GFP_KERNEL);
 545        if (!cache_dir)
 546                goto err;
 547
 548        cache_dir->kobj = kobj;
 549
 550        WARN_ON_ONCE(per_cpu(cache_dir_pcpu, cpu_id) != NULL);
 551
 552        per_cpu(cache_dir_pcpu, cpu_id) = cache_dir;
 553
 554        return cache_dir;
 555err:
 556        kobject_put(kobj);
 557        return NULL;
 558}
 559
 560static void cache_index_release(struct kobject *kobj)
 561{
 562        struct cache_index_dir *index;
 563
 564        index = kobj_to_cache_index_dir(kobj);
 565
 566        pr_debug("freeing index directory for L%d %s cache\n",
 567                 index->cache->level, cache_type_string(index->cache));
 568
 569        kfree(index);
 570}
 571
 572static ssize_t cache_index_show(struct kobject *k, struct attribute *attr, char *buf)
 573{
 574        struct kobj_attribute *kobj_attr;
 575
 576        kobj_attr = container_of(attr, struct kobj_attribute, attr);
 577
 578        return kobj_attr->show(k, kobj_attr, buf);
 579}
 580
 581static struct cache *index_kobj_to_cache(struct kobject *k)
 582{
 583        struct cache_index_dir *index;
 584
 585        index = kobj_to_cache_index_dir(k);
 586
 587        return index->cache;
 588}
 589
 590static ssize_t size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
 591{
 592        unsigned int size_kb;
 593        struct cache *cache;
 594
 595        cache = index_kobj_to_cache(k);
 596
 597        if (cache_size_kb(cache, &size_kb))
 598                return -ENODEV;
 599
 600        return sprintf(buf, "%uK\n", size_kb);
 601}
 602
 603static struct kobj_attribute cache_size_attr =
 604        __ATTR(size, 0444, size_show, NULL);
 605
 606
 607static ssize_t line_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
 608{
 609        unsigned int line_size;
 610        struct cache *cache;
 611
 612        cache = index_kobj_to_cache(k);
 613
 614        if (cache_get_line_size(cache, &line_size))
 615                return -ENODEV;
 616
 617        return sprintf(buf, "%u\n", line_size);
 618}
 619
 620static struct kobj_attribute cache_line_size_attr =
 621        __ATTR(coherency_line_size, 0444, line_size_show, NULL);
 622
 623static ssize_t nr_sets_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
 624{
 625        unsigned int nr_sets;
 626        struct cache *cache;
 627
 628        cache = index_kobj_to_cache(k);
 629
 630        if (cache_nr_sets(cache, &nr_sets))
 631                return -ENODEV;
 632
 633        return sprintf(buf, "%u\n", nr_sets);
 634}
 635
 636static struct kobj_attribute cache_nr_sets_attr =
 637        __ATTR(number_of_sets, 0444, nr_sets_show, NULL);
 638
 639static ssize_t associativity_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
 640{
 641        unsigned int associativity;
 642        struct cache *cache;
 643
 644        cache = index_kobj_to_cache(k);
 645
 646        if (cache_associativity(cache, &associativity))
 647                return -ENODEV;
 648
 649        return sprintf(buf, "%u\n", associativity);
 650}
 651
 652static struct kobj_attribute cache_assoc_attr =
 653        __ATTR(ways_of_associativity, 0444, associativity_show, NULL);
 654
 655static ssize_t type_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
 656{
 657        struct cache *cache;
 658
 659        cache = index_kobj_to_cache(k);
 660
 661        return sprintf(buf, "%s\n", cache_type_string(cache));
 662}
 663
 664static struct kobj_attribute cache_type_attr =
 665        __ATTR(type, 0444, type_show, NULL);
 666
 667static ssize_t level_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
 668{
 669        struct cache_index_dir *index;
 670        struct cache *cache;
 671
 672        index = kobj_to_cache_index_dir(k);
 673        cache = index->cache;
 674
 675        return sprintf(buf, "%d\n", cache->level);
 676}
 677
 678static struct kobj_attribute cache_level_attr =
 679        __ATTR(level, 0444, level_show, NULL);
 680
 681static ssize_t
 682show_shared_cpumap(struct kobject *k, struct kobj_attribute *attr, char *buf, bool list)
 683{
 684        struct cache_index_dir *index;
 685        struct cache *cache;
 686        const struct cpumask *mask;
 687
 688        index = kobj_to_cache_index_dir(k);
 689        cache = index->cache;
 690
 691        mask = &cache->shared_cpu_map;
 692
 693        return cpumap_print_to_pagebuf(list, buf, mask);
 694}
 695
 696static ssize_t shared_cpu_map_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
 697{
 698        return show_shared_cpumap(k, attr, buf, false);
 699}
 700
 701static ssize_t shared_cpu_list_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
 702{
 703        return show_shared_cpumap(k, attr, buf, true);
 704}
 705
 706static struct kobj_attribute cache_shared_cpu_map_attr =
 707        __ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL);
 708
 709static struct kobj_attribute cache_shared_cpu_list_attr =
 710        __ATTR(shared_cpu_list, 0444, shared_cpu_list_show, NULL);
 711
 712/* Attributes which should always be created -- the kobject/sysfs core
 713 * does this automatically via kobj_type->default_attrs.  This is the
 714 * minimum data required to uniquely identify a cache.
 715 */
 716static struct attribute *cache_index_default_attrs[] = {
 717        &cache_type_attr.attr,
 718        &cache_level_attr.attr,
 719        &cache_shared_cpu_map_attr.attr,
 720        &cache_shared_cpu_list_attr.attr,
 721        NULL,
 722};
 723
 724/* Attributes which should be created if the cache device node has the
 725 * right properties -- see cacheinfo_create_index_opt_attrs
 726 */
 727static struct kobj_attribute *cache_index_opt_attrs[] = {
 728        &cache_size_attr,
 729        &cache_line_size_attr,
 730        &cache_nr_sets_attr,
 731        &cache_assoc_attr,
 732};
 733
 734static const struct sysfs_ops cache_index_ops = {
 735        .show = cache_index_show,
 736};
 737
 738static struct kobj_type cache_index_type = {
 739        .release = cache_index_release,
 740        .sysfs_ops = &cache_index_ops,
 741        .default_attrs = cache_index_default_attrs,
 742};
 743
 744static void cacheinfo_create_index_opt_attrs(struct cache_index_dir *dir)
 745{
 746        const char *cache_type;
 747        struct cache *cache;
 748        char *buf;
 749        int i;
 750
 751        buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
 752        if (!buf)
 753                return;
 754
 755        cache = dir->cache;
 756        cache_type = cache_type_string(cache);
 757
 758        /* We don't want to create an attribute that can't provide a
 759         * meaningful value.  Check the return value of each optional
 760         * attribute's ->show method before registering the
 761         * attribute.
 762         */
 763        for (i = 0; i < ARRAY_SIZE(cache_index_opt_attrs); i++) {
 764                struct kobj_attribute *attr;
 765                ssize_t rc;
 766
 767                attr = cache_index_opt_attrs[i];
 768
 769                rc = attr->show(&dir->kobj, attr, buf);
 770                if (rc <= 0) {
 771                        pr_debug("not creating %s attribute for "
 772                                 "%pOFP(%s) (rc = %zd)\n",
 773                                 attr->attr.name, cache->ofnode,
 774                                 cache_type, rc);
 775                        continue;
 776                }
 777                if (sysfs_create_file(&dir->kobj, &attr->attr))
 778                        pr_debug("could not create %s attribute for %pOFP(%s)\n",
 779                                 attr->attr.name, cache->ofnode, cache_type);
 780        }
 781
 782        kfree(buf);
 783}
 784
 785static void cacheinfo_create_index_dir(struct cache *cache, int index,
 786                                       struct cache_dir *cache_dir)
 787{
 788        struct cache_index_dir *index_dir;
 789        int rc;
 790
 791        index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL);
 792        if (!index_dir)
 793                return;
 794
 795        index_dir->cache = cache;
 796
 797        rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type,
 798                                  cache_dir->kobj, "index%d", index);
 799        if (rc) {
 800                kobject_put(&index_dir->kobj);
 801                return;
 802        }
 803
 804        index_dir->next = cache_dir->index;
 805        cache_dir->index = index_dir;
 806
 807        cacheinfo_create_index_opt_attrs(index_dir);
 808}
 809
 810static void cacheinfo_sysfs_populate(unsigned int cpu_id,
 811                                     struct cache *cache_list)
 812{
 813        struct cache_dir *cache_dir;
 814        struct cache *cache;
 815        int index = 0;
 816
 817        cache_dir = cacheinfo_create_cache_dir(cpu_id);
 818        if (!cache_dir)
 819                return;
 820
 821        cache = cache_list;
 822        while (cache) {
 823                cacheinfo_create_index_dir(cache, index, cache_dir);
 824                index++;
 825                cache = cache->next_local;
 826        }
 827}
 828
 829void cacheinfo_cpu_online(unsigned int cpu_id)
 830{
 831        struct cache *cache;
 832
 833        cache = cache_chain_instantiate(cpu_id);
 834        if (!cache)
 835                return;
 836
 837        cacheinfo_sysfs_populate(cpu_id, cache);
 838}
 839
 840/* functions needed to remove cache entry for cpu offline or suspend/resume */
 841
 842#if (defined(CONFIG_PPC_PSERIES) && defined(CONFIG_SUSPEND)) || \
 843    defined(CONFIG_HOTPLUG_CPU)
 844
 845static struct cache *cache_lookup_by_cpu(unsigned int cpu_id)
 846{
 847        struct device_node *cpu_node;
 848        struct cache *cache;
 849        int group_id;
 850
 851        cpu_node = of_get_cpu_node(cpu_id, NULL);
 852        WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id);
 853        if (!cpu_node)
 854                return NULL;
 855
 856        group_id = get_group_id(cpu_id, 1);
 857        cache = cache_lookup_by_node_group(cpu_node, group_id);
 858        of_node_put(cpu_node);
 859
 860        return cache;
 861}
 862
 863static void remove_index_dirs(struct cache_dir *cache_dir)
 864{
 865        struct cache_index_dir *index;
 866
 867        index = cache_dir->index;
 868
 869        while (index) {
 870                struct cache_index_dir *next;
 871
 872                next = index->next;
 873                kobject_put(&index->kobj);
 874                index = next;
 875        }
 876}
 877
 878static void remove_cache_dir(struct cache_dir *cache_dir)
 879{
 880        remove_index_dirs(cache_dir);
 881
 882        /* Remove cache dir from sysfs */
 883        kobject_del(cache_dir->kobj);
 884
 885        kobject_put(cache_dir->kobj);
 886
 887        kfree(cache_dir);
 888}
 889
 890static void cache_cpu_clear(struct cache *cache, int cpu)
 891{
 892        while (cache) {
 893                struct cache *next = cache->next_local;
 894
 895                WARN_ONCE(!cpumask_test_cpu(cpu, &cache->shared_cpu_map),
 896                          "CPU %i not accounted in %pOFP(%s)\n",
 897                          cpu, cache->ofnode,
 898                          cache_type_string(cache));
 899
 900                cpumask_clear_cpu(cpu, &cache->shared_cpu_map);
 901
 902                /* Release the cache object if all the cpus using it
 903                 * are offline */
 904                if (cpumask_empty(&cache->shared_cpu_map))
 905                        release_cache(cache);
 906
 907                cache = next;
 908        }
 909}
 910
 911void cacheinfo_cpu_offline(unsigned int cpu_id)
 912{
 913        struct cache_dir *cache_dir;
 914        struct cache *cache;
 915
 916        /* Prevent userspace from seeing inconsistent state - remove
 917         * the sysfs hierarchy first */
 918        cache_dir = per_cpu(cache_dir_pcpu, cpu_id);
 919
 920        /* careful, sysfs population may have failed */
 921        if (cache_dir)
 922                remove_cache_dir(cache_dir);
 923
 924        per_cpu(cache_dir_pcpu, cpu_id) = NULL;
 925
 926        /* clear the CPU's bit in its cache chain, possibly freeing
 927         * cache objects */
 928        cache = cache_lookup_by_cpu(cpu_id);
 929        if (cache)
 930                cache_cpu_clear(cache, cpu_id);
 931}
 932
 933void cacheinfo_teardown(void)
 934{
 935        unsigned int cpu;
 936
 937        lockdep_assert_cpus_held();
 938
 939        for_each_online_cpu(cpu)
 940                cacheinfo_cpu_offline(cpu);
 941}
 942
 943void cacheinfo_rebuild(void)
 944{
 945        unsigned int cpu;
 946
 947        lockdep_assert_cpus_held();
 948
 949        for_each_online_cpu(cpu)
 950                cacheinfo_cpu_online(cpu);
 951}
 952
 953#endif /* (CONFIG_PPC_PSERIES && CONFIG_SUSPEND) || CONFIG_HOTPLUG_CPU */
 954