linux/arch/powerpc/kernel/cacheinfo.c
<<
>>
Prefs
   1/*
   2 * Processor cache information made available to userspace via sysfs;
   3 * intended to be compatible with x86 intel_cacheinfo implementation.
   4 *
   5 * Copyright 2008 IBM Corporation
   6 * Author: Nathan Lynch
   7 *
   8 * This program is free software; you can redistribute it and/or
   9 * modify it under the terms of the GNU General Public License version
  10 * 2 as published by the Free Software Foundation.
  11 */
  12
  13#include <linux/cpu.h>
  14#include <linux/cpumask.h>
  15#include <linux/kernel.h>
  16#include <linux/kobject.h>
  17#include <linux/list.h>
  18#include <linux/notifier.h>
  19#include <linux/of.h>
  20#include <linux/percpu.h>
  21#include <linux/slab.h>
  22#include <asm/prom.h>
  23
  24#include "cacheinfo.h"
  25
  26/* per-cpu object for tracking:
  27 * - a "cache" kobject for the top-level directory
  28 * - a list of "index" objects representing the cpu's local cache hierarchy
  29 */
  30struct cache_dir {
  31        struct kobject *kobj; /* bare (not embedded) kobject for cache
  32                               * directory */
  33        struct cache_index_dir *index; /* list of index objects */
  34};
  35
  36/* "index" object: each cpu's cache directory has an index
  37 * subdirectory corresponding to a cache object associated with the
  38 * cpu.  This object's lifetime is managed via the embedded kobject.
  39 */
  40struct cache_index_dir {
  41        struct kobject kobj;
  42        struct cache_index_dir *next; /* next index in parent directory */
  43        struct cache *cache;
  44};
  45
  46/* Template for determining which OF properties to query for a given
  47 * cache type */
  48struct cache_type_info {
  49        const char *name;
  50        const char *size_prop;
  51
  52        /* Allow for both [di]-cache-line-size and
  53         * [di]-cache-block-size properties.  According to the PowerPC
  54         * Processor binding, -line-size should be provided if it
  55         * differs from the cache block size (that which is operated
  56         * on by cache instructions), so we look for -line-size first.
  57         * See cache_get_line_size(). */
  58
  59        const char *line_size_props[2];
  60        const char *nr_sets_prop;
  61};
  62
  63/* These are used to index the cache_type_info array. */
  64#define CACHE_TYPE_UNIFIED     0 /* cache-size, cache-block-size, etc. */
  65#define CACHE_TYPE_UNIFIED_D   1 /* d-cache-size, d-cache-block-size, etc */
  66#define CACHE_TYPE_INSTRUCTION 2
  67#define CACHE_TYPE_DATA        3
  68
  69static const struct cache_type_info cache_type_info[] = {
  70        {
  71                /* Embedded systems that use cache-size, cache-block-size,
  72                 * etc. for the Unified (typically L2) cache. */
  73                .name            = "Unified",
  74                .size_prop       = "cache-size",
  75                .line_size_props = { "cache-line-size",
  76                                     "cache-block-size", },
  77                .nr_sets_prop    = "cache-sets",
  78        },
  79        {
  80                /* PowerPC Processor binding says the [di]-cache-*
  81                 * must be equal on unified caches, so just use
  82                 * d-cache properties. */
  83                .name            = "Unified",
  84                .size_prop       = "d-cache-size",
  85                .line_size_props = { "d-cache-line-size",
  86                                     "d-cache-block-size", },
  87                .nr_sets_prop    = "d-cache-sets",
  88        },
  89        {
  90                .name            = "Instruction",
  91                .size_prop       = "i-cache-size",
  92                .line_size_props = { "i-cache-line-size",
  93                                     "i-cache-block-size", },
  94                .nr_sets_prop    = "i-cache-sets",
  95        },
  96        {
  97                .name            = "Data",
  98                .size_prop       = "d-cache-size",
  99                .line_size_props = { "d-cache-line-size",
 100                                     "d-cache-block-size", },
 101                .nr_sets_prop    = "d-cache-sets",
 102        },
 103};
 104
 105/* Cache object: each instance of this corresponds to a distinct cache
 106 * in the system.  There are separate objects for Harvard caches: one
 107 * each for instruction and data, and each refers to the same OF node.
 108 * The refcount of the OF node is elevated for the lifetime of the
 109 * cache object.  A cache object is released when its shared_cpu_map
 110 * is cleared (see cache_cpu_clear).
 111 *
 112 * A cache object is on two lists: an unsorted global list
 113 * (cache_list) of cache objects; and a singly-linked list
 114 * representing the local cache hierarchy, which is ordered by level
 115 * (e.g. L1d -> L1i -> L2 -> L3).
 116 */
 117struct cache {
 118        struct device_node *ofnode;    /* OF node for this cache, may be cpu */
 119        struct cpumask shared_cpu_map; /* online CPUs using this cache */
 120        int type;                      /* split cache disambiguation */
 121        int level;                     /* level not explicit in device tree */
 122        struct list_head list;         /* global list of cache objects */
 123        struct cache *next_local;      /* next cache of >= level */
 124};
 125
 126static DEFINE_PER_CPU(struct cache_dir *, cache_dir_pcpu);
 127
 128/* traversal/modification of this list occurs only at cpu hotplug time;
 129 * access is serialized by cpu hotplug locking
 130 */
 131static LIST_HEAD(cache_list);
 132
 133static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *k)
 134{
 135        return container_of(k, struct cache_index_dir, kobj);
 136}
 137
 138static const char *cache_type_string(const struct cache *cache)
 139{
 140        return cache_type_info[cache->type].name;
 141}
 142
 143static void cache_init(struct cache *cache, int type, int level,
 144                       struct device_node *ofnode)
 145{
 146        cache->type = type;
 147        cache->level = level;
 148        cache->ofnode = of_node_get(ofnode);
 149        INIT_LIST_HEAD(&cache->list);
 150        list_add(&cache->list, &cache_list);
 151}
 152
 153static struct cache *new_cache(int type, int level, struct device_node *ofnode)
 154{
 155        struct cache *cache;
 156
 157        cache = kzalloc(sizeof(*cache), GFP_KERNEL);
 158        if (cache)
 159                cache_init(cache, type, level, ofnode);
 160
 161        return cache;
 162}
 163
 164static void release_cache_debugcheck(struct cache *cache)
 165{
 166        struct cache *iter;
 167
 168        list_for_each_entry(iter, &cache_list, list)
 169                WARN_ONCE(iter->next_local == cache,
 170                          "cache for %s(%s) refers to cache for %s(%s)\n",
 171                          iter->ofnode->full_name,
 172                          cache_type_string(iter),
 173                          cache->ofnode->full_name,
 174                          cache_type_string(cache));
 175}
 176
 177static void release_cache(struct cache *cache)
 178{
 179        if (!cache)
 180                return;
 181
 182        pr_debug("freeing L%d %s cache for %s\n", cache->level,
 183                 cache_type_string(cache), cache->ofnode->full_name);
 184
 185        release_cache_debugcheck(cache);
 186        list_del(&cache->list);
 187        of_node_put(cache->ofnode);
 188        kfree(cache);
 189}
 190
 191static void cache_cpu_set(struct cache *cache, int cpu)
 192{
 193        struct cache *next = cache;
 194
 195        while (next) {
 196                WARN_ONCE(cpumask_test_cpu(cpu, &next->shared_cpu_map),
 197                          "CPU %i already accounted in %s(%s)\n",
 198                          cpu, next->ofnode->full_name,
 199                          cache_type_string(next));
 200                cpumask_set_cpu(cpu, &next->shared_cpu_map);
 201                next = next->next_local;
 202        }
 203}
 204
 205static int cache_size(const struct cache *cache, unsigned int *ret)
 206{
 207        const char *propname;
 208        const __be32 *cache_size;
 209
 210        propname = cache_type_info[cache->type].size_prop;
 211
 212        cache_size = of_get_property(cache->ofnode, propname, NULL);
 213        if (!cache_size)
 214                return -ENODEV;
 215
 216        *ret = of_read_number(cache_size, 1);
 217        return 0;
 218}
 219
 220static int cache_size_kb(const struct cache *cache, unsigned int *ret)
 221{
 222        unsigned int size;
 223
 224        if (cache_size(cache, &size))
 225                return -ENODEV;
 226
 227        *ret = size / 1024;
 228        return 0;
 229}
 230
 231/* not cache_line_size() because that's a macro in include/linux/cache.h */
 232static int cache_get_line_size(const struct cache *cache, unsigned int *ret)
 233{
 234        const __be32 *line_size;
 235        int i, lim;
 236
 237        lim = ARRAY_SIZE(cache_type_info[cache->type].line_size_props);
 238
 239        for (i = 0; i < lim; i++) {
 240                const char *propname;
 241
 242                propname = cache_type_info[cache->type].line_size_props[i];
 243                line_size = of_get_property(cache->ofnode, propname, NULL);
 244                if (line_size)
 245                        break;
 246        }
 247
 248        if (!line_size)
 249                return -ENODEV;
 250
 251        *ret = of_read_number(line_size, 1);
 252        return 0;
 253}
 254
 255static int cache_nr_sets(const struct cache *cache, unsigned int *ret)
 256{
 257        const char *propname;
 258        const __be32 *nr_sets;
 259
 260        propname = cache_type_info[cache->type].nr_sets_prop;
 261
 262        nr_sets = of_get_property(cache->ofnode, propname, NULL);
 263        if (!nr_sets)
 264                return -ENODEV;
 265
 266        *ret = of_read_number(nr_sets, 1);
 267        return 0;
 268}
 269
 270static int cache_associativity(const struct cache *cache, unsigned int *ret)
 271{
 272        unsigned int line_size;
 273        unsigned int nr_sets;
 274        unsigned int size;
 275
 276        if (cache_nr_sets(cache, &nr_sets))
 277                goto err;
 278
 279        /* If the cache is fully associative, there is no need to
 280         * check the other properties.
 281         */
 282        if (nr_sets == 1) {
 283                *ret = 0;
 284                return 0;
 285        }
 286
 287        if (cache_get_line_size(cache, &line_size))
 288                goto err;
 289        if (cache_size(cache, &size))
 290                goto err;
 291
 292        if (!(nr_sets > 0 && size > 0 && line_size > 0))
 293                goto err;
 294
 295        *ret = (size / nr_sets) / line_size;
 296        return 0;
 297err:
 298        return -ENODEV;
 299}
 300
 301/* helper for dealing with split caches */
 302static struct cache *cache_find_first_sibling(struct cache *cache)
 303{
 304        struct cache *iter;
 305
 306        if (cache->type == CACHE_TYPE_UNIFIED ||
 307            cache->type == CACHE_TYPE_UNIFIED_D)
 308                return cache;
 309
 310        list_for_each_entry(iter, &cache_list, list)
 311                if (iter->ofnode == cache->ofnode && iter->next_local == cache)
 312                        return iter;
 313
 314        return cache;
 315}
 316
 317/* return the first cache on a local list matching node */
 318static struct cache *cache_lookup_by_node(const struct device_node *node)
 319{
 320        struct cache *cache = NULL;
 321        struct cache *iter;
 322
 323        list_for_each_entry(iter, &cache_list, list) {
 324                if (iter->ofnode != node)
 325                        continue;
 326                cache = cache_find_first_sibling(iter);
 327                break;
 328        }
 329
 330        return cache;
 331}
 332
 333static bool cache_node_is_unified(const struct device_node *np)
 334{
 335        return of_get_property(np, "cache-unified", NULL);
 336}
 337
 338/*
 339 * Unified caches can have two different sets of tags.  Most embedded
 340 * use cache-size, etc. for the unified cache size, but open firmware systems
 341 * use d-cache-size, etc.   Check on initialization for which type we have, and
 342 * return the appropriate structure type.  Assume it's embedded if it isn't
 343 * open firmware.  If it's yet a 3rd type, then there will be missing entries
 344 * in /sys/devices/system/cpu/cpu0/cache/index2/, and this code will need
 345 * to be extended further.
 346 */
 347static int cache_is_unified_d(const struct device_node *np)
 348{
 349        return of_get_property(np,
 350                cache_type_info[CACHE_TYPE_UNIFIED_D].size_prop, NULL) ?
 351                CACHE_TYPE_UNIFIED_D : CACHE_TYPE_UNIFIED;
 352}
 353
 354/*
 355 */
 356static struct cache *cache_do_one_devnode_unified(struct device_node *node, int level)
 357{
 358        pr_debug("creating L%d ucache for %s\n", level, node->full_name);
 359
 360        return new_cache(cache_is_unified_d(node), level, node);
 361}
 362
 363static struct cache *cache_do_one_devnode_split(struct device_node *node,
 364                                                int level)
 365{
 366        struct cache *dcache, *icache;
 367
 368        pr_debug("creating L%d dcache and icache for %s\n", level,
 369                 node->full_name);
 370
 371        dcache = new_cache(CACHE_TYPE_DATA, level, node);
 372        icache = new_cache(CACHE_TYPE_INSTRUCTION, level, node);
 373
 374        if (!dcache || !icache)
 375                goto err;
 376
 377        dcache->next_local = icache;
 378
 379        return dcache;
 380err:
 381        release_cache(dcache);
 382        release_cache(icache);
 383        return NULL;
 384}
 385
 386static struct cache *cache_do_one_devnode(struct device_node *node, int level)
 387{
 388        struct cache *cache;
 389
 390        if (cache_node_is_unified(node))
 391                cache = cache_do_one_devnode_unified(node, level);
 392        else
 393                cache = cache_do_one_devnode_split(node, level);
 394
 395        return cache;
 396}
 397
 398static struct cache *cache_lookup_or_instantiate(struct device_node *node,
 399                                                 int level)
 400{
 401        struct cache *cache;
 402
 403        cache = cache_lookup_by_node(node);
 404
 405        WARN_ONCE(cache && cache->level != level,
 406                  "cache level mismatch on lookup (got %d, expected %d)\n",
 407                  cache->level, level);
 408
 409        if (!cache)
 410                cache = cache_do_one_devnode(node, level);
 411
 412        return cache;
 413}
 414
 415static void link_cache_lists(struct cache *smaller, struct cache *bigger)
 416{
 417        while (smaller->next_local) {
 418                if (smaller->next_local == bigger)
 419                        return; /* already linked */
 420                smaller = smaller->next_local;
 421        }
 422
 423        smaller->next_local = bigger;
 424}
 425
 426static void do_subsidiary_caches_debugcheck(struct cache *cache)
 427{
 428        WARN_ON_ONCE(cache->level != 1);
 429        WARN_ON_ONCE(strcmp(cache->ofnode->type, "cpu"));
 430}
 431
 432static void do_subsidiary_caches(struct cache *cache)
 433{
 434        struct device_node *subcache_node;
 435        int level = cache->level;
 436
 437        do_subsidiary_caches_debugcheck(cache);
 438
 439        while ((subcache_node = of_find_next_cache_node(cache->ofnode))) {
 440                struct cache *subcache;
 441
 442                level++;
 443                subcache = cache_lookup_or_instantiate(subcache_node, level);
 444                of_node_put(subcache_node);
 445                if (!subcache)
 446                        break;
 447
 448                link_cache_lists(cache, subcache);
 449                cache = subcache;
 450        }
 451}
 452
 453static struct cache *cache_chain_instantiate(unsigned int cpu_id)
 454{
 455        struct device_node *cpu_node;
 456        struct cache *cpu_cache = NULL;
 457
 458        pr_debug("creating cache object(s) for CPU %i\n", cpu_id);
 459
 460        cpu_node = of_get_cpu_node(cpu_id, NULL);
 461        WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id);
 462        if (!cpu_node)
 463                goto out;
 464
 465        cpu_cache = cache_lookup_or_instantiate(cpu_node, 1);
 466        if (!cpu_cache)
 467                goto out;
 468
 469        do_subsidiary_caches(cpu_cache);
 470
 471        cache_cpu_set(cpu_cache, cpu_id);
 472out:
 473        of_node_put(cpu_node);
 474
 475        return cpu_cache;
 476}
 477
 478static struct cache_dir *cacheinfo_create_cache_dir(unsigned int cpu_id)
 479{
 480        struct cache_dir *cache_dir;
 481        struct device *dev;
 482        struct kobject *kobj = NULL;
 483
 484        dev = get_cpu_device(cpu_id);
 485        WARN_ONCE(!dev, "no dev for CPU %i\n", cpu_id);
 486        if (!dev)
 487                goto err;
 488
 489        kobj = kobject_create_and_add("cache", &dev->kobj);
 490        if (!kobj)
 491                goto err;
 492
 493        cache_dir = kzalloc(sizeof(*cache_dir), GFP_KERNEL);
 494        if (!cache_dir)
 495                goto err;
 496
 497        cache_dir->kobj = kobj;
 498
 499        WARN_ON_ONCE(per_cpu(cache_dir_pcpu, cpu_id) != NULL);
 500
 501        per_cpu(cache_dir_pcpu, cpu_id) = cache_dir;
 502
 503        return cache_dir;
 504err:
 505        kobject_put(kobj);
 506        return NULL;
 507}
 508
 509static void cache_index_release(struct kobject *kobj)
 510{
 511        struct cache_index_dir *index;
 512
 513        index = kobj_to_cache_index_dir(kobj);
 514
 515        pr_debug("freeing index directory for L%d %s cache\n",
 516                 index->cache->level, cache_type_string(index->cache));
 517
 518        kfree(index);
 519}
 520
 521static ssize_t cache_index_show(struct kobject *k, struct attribute *attr, char *buf)
 522{
 523        struct kobj_attribute *kobj_attr;
 524
 525        kobj_attr = container_of(attr, struct kobj_attribute, attr);
 526
 527        return kobj_attr->show(k, kobj_attr, buf);
 528}
 529
 530static struct cache *index_kobj_to_cache(struct kobject *k)
 531{
 532        struct cache_index_dir *index;
 533
 534        index = kobj_to_cache_index_dir(k);
 535
 536        return index->cache;
 537}
 538
 539static ssize_t size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
 540{
 541        unsigned int size_kb;
 542        struct cache *cache;
 543
 544        cache = index_kobj_to_cache(k);
 545
 546        if (cache_size_kb(cache, &size_kb))
 547                return -ENODEV;
 548
 549        return sprintf(buf, "%uK\n", size_kb);
 550}
 551
 552static struct kobj_attribute cache_size_attr =
 553        __ATTR(size, 0444, size_show, NULL);
 554
 555
 556static ssize_t line_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
 557{
 558        unsigned int line_size;
 559        struct cache *cache;
 560
 561        cache = index_kobj_to_cache(k);
 562
 563        if (cache_get_line_size(cache, &line_size))
 564                return -ENODEV;
 565
 566        return sprintf(buf, "%u\n", line_size);
 567}
 568
 569static struct kobj_attribute cache_line_size_attr =
 570        __ATTR(coherency_line_size, 0444, line_size_show, NULL);
 571
 572static ssize_t nr_sets_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
 573{
 574        unsigned int nr_sets;
 575        struct cache *cache;
 576
 577        cache = index_kobj_to_cache(k);
 578
 579        if (cache_nr_sets(cache, &nr_sets))
 580                return -ENODEV;
 581
 582        return sprintf(buf, "%u\n", nr_sets);
 583}
 584
 585static struct kobj_attribute cache_nr_sets_attr =
 586        __ATTR(number_of_sets, 0444, nr_sets_show, NULL);
 587
 588static ssize_t associativity_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
 589{
 590        unsigned int associativity;
 591        struct cache *cache;
 592
 593        cache = index_kobj_to_cache(k);
 594
 595        if (cache_associativity(cache, &associativity))
 596                return -ENODEV;
 597
 598        return sprintf(buf, "%u\n", associativity);
 599}
 600
 601static struct kobj_attribute cache_assoc_attr =
 602        __ATTR(ways_of_associativity, 0444, associativity_show, NULL);
 603
 604static ssize_t type_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
 605{
 606        struct cache *cache;
 607
 608        cache = index_kobj_to_cache(k);
 609
 610        return sprintf(buf, "%s\n", cache_type_string(cache));
 611}
 612
 613static struct kobj_attribute cache_type_attr =
 614        __ATTR(type, 0444, type_show, NULL);
 615
 616static ssize_t level_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
 617{
 618        struct cache_index_dir *index;
 619        struct cache *cache;
 620
 621        index = kobj_to_cache_index_dir(k);
 622        cache = index->cache;
 623
 624        return sprintf(buf, "%d\n", cache->level);
 625}
 626
 627static struct kobj_attribute cache_level_attr =
 628        __ATTR(level, 0444, level_show, NULL);
 629
 630static ssize_t shared_cpu_map_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
 631{
 632        struct cache_index_dir *index;
 633        struct cache *cache;
 634        int ret;
 635
 636        index = kobj_to_cache_index_dir(k);
 637        cache = index->cache;
 638
 639        ret = scnprintf(buf, PAGE_SIZE - 1, "%*pb\n",
 640                        cpumask_pr_args(&cache->shared_cpu_map));
 641        buf[ret++] = '\n';
 642        buf[ret] = '\0';
 643        return ret;
 644}
 645
 646static struct kobj_attribute cache_shared_cpu_map_attr =
 647        __ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL);
 648
 649/* Attributes which should always be created -- the kobject/sysfs core
 650 * does this automatically via kobj_type->default_attrs.  This is the
 651 * minimum data required to uniquely identify a cache.
 652 */
 653static struct attribute *cache_index_default_attrs[] = {
 654        &cache_type_attr.attr,
 655        &cache_level_attr.attr,
 656        &cache_shared_cpu_map_attr.attr,
 657        NULL,
 658};
 659
 660/* Attributes which should be created if the cache device node has the
 661 * right properties -- see cacheinfo_create_index_opt_attrs
 662 */
 663static struct kobj_attribute *cache_index_opt_attrs[] = {
 664        &cache_size_attr,
 665        &cache_line_size_attr,
 666        &cache_nr_sets_attr,
 667        &cache_assoc_attr,
 668};
 669
 670static const struct sysfs_ops cache_index_ops = {
 671        .show = cache_index_show,
 672};
 673
 674static struct kobj_type cache_index_type = {
 675        .release = cache_index_release,
 676        .sysfs_ops = &cache_index_ops,
 677        .default_attrs = cache_index_default_attrs,
 678};
 679
 680static void cacheinfo_create_index_opt_attrs(struct cache_index_dir *dir)
 681{
 682        const char *cache_name;
 683        const char *cache_type;
 684        struct cache *cache;
 685        char *buf;
 686        int i;
 687
 688        buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
 689        if (!buf)
 690                return;
 691
 692        cache = dir->cache;
 693        cache_name = cache->ofnode->full_name;
 694        cache_type = cache_type_string(cache);
 695
 696        /* We don't want to create an attribute that can't provide a
 697         * meaningful value.  Check the return value of each optional
 698         * attribute's ->show method before registering the
 699         * attribute.
 700         */
 701        for (i = 0; i < ARRAY_SIZE(cache_index_opt_attrs); i++) {
 702                struct kobj_attribute *attr;
 703                ssize_t rc;
 704
 705                attr = cache_index_opt_attrs[i];
 706
 707                rc = attr->show(&dir->kobj, attr, buf);
 708                if (rc <= 0) {
 709                        pr_debug("not creating %s attribute for "
 710                                 "%s(%s) (rc = %zd)\n",
 711                                 attr->attr.name, cache_name,
 712                                 cache_type, rc);
 713                        continue;
 714                }
 715                if (sysfs_create_file(&dir->kobj, &attr->attr))
 716                        pr_debug("could not create %s attribute for %s(%s)\n",
 717                                 attr->attr.name, cache_name, cache_type);
 718        }
 719
 720        kfree(buf);
 721}
 722
 723static void cacheinfo_create_index_dir(struct cache *cache, int index,
 724                                       struct cache_dir *cache_dir)
 725{
 726        struct cache_index_dir *index_dir;
 727        int rc;
 728
 729        index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL);
 730        if (!index_dir)
 731                goto err;
 732
 733        index_dir->cache = cache;
 734
 735        rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type,
 736                                  cache_dir->kobj, "index%d", index);
 737        if (rc)
 738                goto err;
 739
 740        index_dir->next = cache_dir->index;
 741        cache_dir->index = index_dir;
 742
 743        cacheinfo_create_index_opt_attrs(index_dir);
 744
 745        return;
 746err:
 747        kfree(index_dir);
 748}
 749
 750static void cacheinfo_sysfs_populate(unsigned int cpu_id,
 751                                     struct cache *cache_list)
 752{
 753        struct cache_dir *cache_dir;
 754        struct cache *cache;
 755        int index = 0;
 756
 757        cache_dir = cacheinfo_create_cache_dir(cpu_id);
 758        if (!cache_dir)
 759                return;
 760
 761        cache = cache_list;
 762        while (cache) {
 763                cacheinfo_create_index_dir(cache, index, cache_dir);
 764                index++;
 765                cache = cache->next_local;
 766        }
 767}
 768
 769void cacheinfo_cpu_online(unsigned int cpu_id)
 770{
 771        struct cache *cache;
 772
 773        cache = cache_chain_instantiate(cpu_id);
 774        if (!cache)
 775                return;
 776
 777        cacheinfo_sysfs_populate(cpu_id, cache);
 778}
 779
 780/* functions needed to remove cache entry for cpu offline or suspend/resume */
 781
 782#if (defined(CONFIG_PPC_PSERIES) && defined(CONFIG_SUSPEND)) || \
 783    defined(CONFIG_HOTPLUG_CPU)
 784
 785static struct cache *cache_lookup_by_cpu(unsigned int cpu_id)
 786{
 787        struct device_node *cpu_node;
 788        struct cache *cache;
 789
 790        cpu_node = of_get_cpu_node(cpu_id, NULL);
 791        WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id);
 792        if (!cpu_node)
 793                return NULL;
 794
 795        cache = cache_lookup_by_node(cpu_node);
 796        of_node_put(cpu_node);
 797
 798        return cache;
 799}
 800
 801static void remove_index_dirs(struct cache_dir *cache_dir)
 802{
 803        struct cache_index_dir *index;
 804
 805        index = cache_dir->index;
 806
 807        while (index) {
 808                struct cache_index_dir *next;
 809
 810                next = index->next;
 811                kobject_put(&index->kobj);
 812                index = next;
 813        }
 814}
 815
 816static void remove_cache_dir(struct cache_dir *cache_dir)
 817{
 818        remove_index_dirs(cache_dir);
 819
 820        /* Remove cache dir from sysfs */
 821        kobject_del(cache_dir->kobj);
 822
 823        kobject_put(cache_dir->kobj);
 824
 825        kfree(cache_dir);
 826}
 827
 828static void cache_cpu_clear(struct cache *cache, int cpu)
 829{
 830        while (cache) {
 831                struct cache *next = cache->next_local;
 832
 833                WARN_ONCE(!cpumask_test_cpu(cpu, &cache->shared_cpu_map),
 834                          "CPU %i not accounted in %s(%s)\n",
 835                          cpu, cache->ofnode->full_name,
 836                          cache_type_string(cache));
 837
 838                cpumask_clear_cpu(cpu, &cache->shared_cpu_map);
 839
 840                /* Release the cache object if all the cpus using it
 841                 * are offline */
 842                if (cpumask_empty(&cache->shared_cpu_map))
 843                        release_cache(cache);
 844
 845                cache = next;
 846        }
 847}
 848
 849void cacheinfo_cpu_offline(unsigned int cpu_id)
 850{
 851        struct cache_dir *cache_dir;
 852        struct cache *cache;
 853
 854        /* Prevent userspace from seeing inconsistent state - remove
 855         * the sysfs hierarchy first */
 856        cache_dir = per_cpu(cache_dir_pcpu, cpu_id);
 857
 858        /* careful, sysfs population may have failed */
 859        if (cache_dir)
 860                remove_cache_dir(cache_dir);
 861
 862        per_cpu(cache_dir_pcpu, cpu_id) = NULL;
 863
 864        /* clear the CPU's bit in its cache chain, possibly freeing
 865         * cache objects */
 866        cache = cache_lookup_by_cpu(cpu_id);
 867        if (cache)
 868                cache_cpu_clear(cache, cpu_id);
 869}
 870#endif /* (CONFIG_PPC_PSERIES && CONFIG_SUSPEND) || CONFIG_HOTPLUG_CPU */
 871