linux/drivers/base/cacheinfo.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * cacheinfo support - processor cache information via sysfs
   4 *
   5 * Based on arch/x86/kernel/cpu/intel_cacheinfo.c
   6 * Author: Sudeep Holla <sudeep.holla@arm.com>
   7 */
   8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   9
  10#include <linux/acpi.h>
  11#include <linux/bitops.h>
  12#include <linux/cacheinfo.h>
  13#include <linux/compiler.h>
  14#include <linux/cpu.h>
  15#include <linux/device.h>
  16#include <linux/init.h>
  17#include <linux/of.h>
  18#include <linux/sched.h>
  19#include <linux/slab.h>
  20#include <linux/smp.h>
  21#include <linux/sysfs.h>
  22
  23/* pointer to per cpu cacheinfo */
  24static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo);
  25#define ci_cacheinfo(cpu)       (&per_cpu(ci_cpu_cacheinfo, cpu))
  26#define cache_leaves(cpu)       (ci_cacheinfo(cpu)->num_leaves)
  27#define per_cpu_cacheinfo(cpu)  (ci_cacheinfo(cpu)->info_list)
  28
  29struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu)
  30{
  31        return ci_cacheinfo(cpu);
  32}
  33
  34#ifdef CONFIG_OF
  35static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
  36                                           struct cacheinfo *sib_leaf)
  37{
  38        return sib_leaf->fw_token == this_leaf->fw_token;
  39}
  40
  41/* OF properties to query for a given cache type */
  42struct cache_type_info {
  43        const char *size_prop;
  44        const char *line_size_props[2];
  45        const char *nr_sets_prop;
  46};
  47
  48static const struct cache_type_info cache_type_info[] = {
  49        {
  50                .size_prop       = "cache-size",
  51                .line_size_props = { "cache-line-size",
  52                                     "cache-block-size", },
  53                .nr_sets_prop    = "cache-sets",
  54        }, {
  55                .size_prop       = "i-cache-size",
  56                .line_size_props = { "i-cache-line-size",
  57                                     "i-cache-block-size", },
  58                .nr_sets_prop    = "i-cache-sets",
  59        }, {
  60                .size_prop       = "d-cache-size",
  61                .line_size_props = { "d-cache-line-size",
  62                                     "d-cache-block-size", },
  63                .nr_sets_prop    = "d-cache-sets",
  64        },
  65};
  66
  67static inline int get_cacheinfo_idx(enum cache_type type)
  68{
  69        if (type == CACHE_TYPE_UNIFIED)
  70                return 0;
  71        return type;
  72}
  73
  74static void cache_size(struct cacheinfo *this_leaf, struct device_node *np)
  75{
  76        const char *propname;
  77        int ct_idx;
  78
  79        ct_idx = get_cacheinfo_idx(this_leaf->type);
  80        propname = cache_type_info[ct_idx].size_prop;
  81
  82        of_property_read_u32(np, propname, &this_leaf->size);
  83}
  84
  85/* not cache_line_size() because that's a macro in include/linux/cache.h */
  86static void cache_get_line_size(struct cacheinfo *this_leaf,
  87                                struct device_node *np)
  88{
  89        int i, lim, ct_idx;
  90
  91        ct_idx = get_cacheinfo_idx(this_leaf->type);
  92        lim = ARRAY_SIZE(cache_type_info[ct_idx].line_size_props);
  93
  94        for (i = 0; i < lim; i++) {
  95                int ret;
  96                u32 line_size;
  97                const char *propname;
  98
  99                propname = cache_type_info[ct_idx].line_size_props[i];
 100                ret = of_property_read_u32(np, propname, &line_size);
 101                if (!ret) {
 102                        this_leaf->coherency_line_size = line_size;
 103                        break;
 104                }
 105        }
 106}
 107
 108static void cache_nr_sets(struct cacheinfo *this_leaf, struct device_node *np)
 109{
 110        const char *propname;
 111        int ct_idx;
 112
 113        ct_idx = get_cacheinfo_idx(this_leaf->type);
 114        propname = cache_type_info[ct_idx].nr_sets_prop;
 115
 116        of_property_read_u32(np, propname, &this_leaf->number_of_sets);
 117}
 118
 119static void cache_associativity(struct cacheinfo *this_leaf)
 120{
 121        unsigned int line_size = this_leaf->coherency_line_size;
 122        unsigned int nr_sets = this_leaf->number_of_sets;
 123        unsigned int size = this_leaf->size;
 124
 125        /*
 126         * If the cache is fully associative, there is no need to
 127         * check the other properties.
 128         */
 129        if (!(nr_sets == 1) && (nr_sets > 0 && size > 0 && line_size > 0))
 130                this_leaf->ways_of_associativity = (size / nr_sets) / line_size;
 131}
 132
 133static bool cache_node_is_unified(struct cacheinfo *this_leaf,
 134                                  struct device_node *np)
 135{
 136        return of_property_read_bool(np, "cache-unified");
 137}
 138
 139static void cache_of_set_props(struct cacheinfo *this_leaf,
 140                               struct device_node *np)
 141{
 142        /*
 143         * init_cache_level must setup the cache level correctly
 144         * overriding the architecturally specified levels, so
 145         * if type is NONE at this stage, it should be unified
 146         */
 147        if (this_leaf->type == CACHE_TYPE_NOCACHE &&
 148            cache_node_is_unified(this_leaf, np))
 149                this_leaf->type = CACHE_TYPE_UNIFIED;
 150        cache_size(this_leaf, np);
 151        cache_get_line_size(this_leaf, np);
 152        cache_nr_sets(this_leaf, np);
 153        cache_associativity(this_leaf);
 154}
 155
 156static int cache_setup_of_node(unsigned int cpu)
 157{
 158        struct device_node *np;
 159        struct cacheinfo *this_leaf;
 160        struct device *cpu_dev = get_cpu_device(cpu);
 161        struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
 162        unsigned int index = 0;
 163
 164        /* skip if fw_token is already populated */
 165        if (this_cpu_ci->info_list->fw_token) {
 166                return 0;
 167        }
 168
 169        if (!cpu_dev) {
 170                pr_err("No cpu device for CPU %d\n", cpu);
 171                return -ENODEV;
 172        }
 173        np = cpu_dev->of_node;
 174        if (!np) {
 175                pr_err("Failed to find cpu%d device node\n", cpu);
 176                return -ENOENT;
 177        }
 178
 179        while (index < cache_leaves(cpu)) {
 180                this_leaf = this_cpu_ci->info_list + index;
 181                if (this_leaf->level != 1)
 182                        np = of_find_next_cache_node(np);
 183                else
 184                        np = of_node_get(np);/* cpu node itself */
 185                if (!np)
 186                        break;
 187                cache_of_set_props(this_leaf, np);
 188                this_leaf->fw_token = np;
 189                index++;
 190        }
 191
 192        if (index != cache_leaves(cpu)) /* not all OF nodes populated */
 193                return -ENOENT;
 194
 195        return 0;
 196}
 197#else
 198static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
 199static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
 200                                           struct cacheinfo *sib_leaf)
 201{
 202        /*
 203         * For non-DT/ACPI systems, assume unique level 1 caches, system-wide
 204         * shared caches for all other levels. This will be used only if
 205         * arch specific code has not populated shared_cpu_map
 206         */
 207        return !(this_leaf->level == 1);
 208}
 209#endif
 210
 211int __weak cache_setup_acpi(unsigned int cpu)
 212{
 213        return -ENOTSUPP;
 214}
 215
 216unsigned int coherency_max_size;
 217
 218static int cache_shared_cpu_map_setup(unsigned int cpu)
 219{
 220        struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
 221        struct cacheinfo *this_leaf, *sib_leaf;
 222        unsigned int index;
 223        int ret = 0;
 224
 225        if (this_cpu_ci->cpu_map_populated)
 226                return 0;
 227
 228        if (of_have_populated_dt())
 229                ret = cache_setup_of_node(cpu);
 230        else if (!acpi_disabled)
 231                ret = cache_setup_acpi(cpu);
 232
 233        if (ret)
 234                return ret;
 235
 236        for (index = 0; index < cache_leaves(cpu); index++) {
 237                unsigned int i;
 238
 239                this_leaf = this_cpu_ci->info_list + index;
 240                /* skip if shared_cpu_map is already populated */
 241                if (!cpumask_empty(&this_leaf->shared_cpu_map))
 242                        continue;
 243
 244                cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
 245                for_each_online_cpu(i) {
 246                        struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
 247
 248                        if (i == cpu || !sib_cpu_ci->info_list)
 249                                continue;/* skip if itself or no cacheinfo */
 250                        sib_leaf = sib_cpu_ci->info_list + index;
 251                        if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
 252                                cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
 253                                cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
 254                        }
 255                }
 256                /* record the maximum cache line size */
 257                if (this_leaf->coherency_line_size > coherency_max_size)
 258                        coherency_max_size = this_leaf->coherency_line_size;
 259        }
 260
 261        return 0;
 262}
 263
 264static void cache_shared_cpu_map_remove(unsigned int cpu)
 265{
 266        struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
 267        struct cacheinfo *this_leaf, *sib_leaf;
 268        unsigned int sibling, index;
 269
 270        for (index = 0; index < cache_leaves(cpu); index++) {
 271                this_leaf = this_cpu_ci->info_list + index;
 272                for_each_cpu(sibling, &this_leaf->shared_cpu_map) {
 273                        struct cpu_cacheinfo *sib_cpu_ci;
 274
 275                        if (sibling == cpu) /* skip itself */
 276                                continue;
 277
 278                        sib_cpu_ci = get_cpu_cacheinfo(sibling);
 279                        if (!sib_cpu_ci->info_list)
 280                                continue;
 281
 282                        sib_leaf = sib_cpu_ci->info_list + index;
 283                        cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
 284                        cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
 285                }
 286                if (of_have_populated_dt())
 287                        of_node_put(this_leaf->fw_token);
 288        }
 289}
 290
 291static void free_cache_attributes(unsigned int cpu)
 292{
 293        if (!per_cpu_cacheinfo(cpu))
 294                return;
 295
 296        cache_shared_cpu_map_remove(cpu);
 297
 298        kfree(per_cpu_cacheinfo(cpu));
 299        per_cpu_cacheinfo(cpu) = NULL;
 300}
 301
 302int __weak init_cache_level(unsigned int cpu)
 303{
 304        return -ENOENT;
 305}
 306
 307int __weak populate_cache_leaves(unsigned int cpu)
 308{
 309        return -ENOENT;
 310}
 311
 312static int detect_cache_attributes(unsigned int cpu)
 313{
 314        int ret;
 315
 316        if (init_cache_level(cpu) || !cache_leaves(cpu))
 317                return -ENOENT;
 318
 319        per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
 320                                         sizeof(struct cacheinfo), GFP_KERNEL);
 321        if (per_cpu_cacheinfo(cpu) == NULL)
 322                return -ENOMEM;
 323
 324        /*
 325         * populate_cache_leaves() may completely setup the cache leaves and
 326         * shared_cpu_map or it may leave it partially setup.
 327         */
 328        ret = populate_cache_leaves(cpu);
 329        if (ret)
 330                goto free_ci;
 331        /*
 332         * For systems using DT for cache hierarchy, fw_token
 333         * and shared_cpu_map will be set up here only if they are
 334         * not populated already
 335         */
 336        ret = cache_shared_cpu_map_setup(cpu);
 337        if (ret) {
 338                pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu);
 339                goto free_ci;
 340        }
 341
 342        return 0;
 343
 344free_ci:
 345        free_cache_attributes(cpu);
 346        return ret;
 347}
 348
 349/* pointer to cpuX/cache device */
 350static DEFINE_PER_CPU(struct device *, ci_cache_dev);
 351#define per_cpu_cache_dev(cpu)  (per_cpu(ci_cache_dev, cpu))
 352
 353static cpumask_t cache_dev_map;
 354
 355/* pointer to array of devices for cpuX/cache/indexY */
 356static DEFINE_PER_CPU(struct device **, ci_index_dev);
 357#define per_cpu_index_dev(cpu)  (per_cpu(ci_index_dev, cpu))
 358#define per_cache_index_dev(cpu, idx)   ((per_cpu_index_dev(cpu))[idx])
 359
 360#define show_one(file_name, object)                             \
 361static ssize_t file_name##_show(struct device *dev,             \
 362                struct device_attribute *attr, char *buf)       \
 363{                                                               \
 364        struct cacheinfo *this_leaf = dev_get_drvdata(dev);     \
 365        return sprintf(buf, "%u\n", this_leaf->object);         \
 366}
 367
 368show_one(id, id);
 369show_one(level, level);
 370show_one(coherency_line_size, coherency_line_size);
 371show_one(number_of_sets, number_of_sets);
 372show_one(physical_line_partition, physical_line_partition);
 373show_one(ways_of_associativity, ways_of_associativity);
 374
 375static ssize_t size_show(struct device *dev,
 376                         struct device_attribute *attr, char *buf)
 377{
 378        struct cacheinfo *this_leaf = dev_get_drvdata(dev);
 379
 380        return sprintf(buf, "%uK\n", this_leaf->size >> 10);
 381}
 382
 383static ssize_t shared_cpumap_show_func(struct device *dev, bool list, char *buf)
 384{
 385        struct cacheinfo *this_leaf = dev_get_drvdata(dev);
 386        const struct cpumask *mask = &this_leaf->shared_cpu_map;
 387
 388        return cpumap_print_to_pagebuf(list, buf, mask);
 389}
 390
 391static ssize_t shared_cpu_map_show(struct device *dev,
 392                                   struct device_attribute *attr, char *buf)
 393{
 394        return shared_cpumap_show_func(dev, false, buf);
 395}
 396
 397static ssize_t shared_cpu_list_show(struct device *dev,
 398                                    struct device_attribute *attr, char *buf)
 399{
 400        return shared_cpumap_show_func(dev, true, buf);
 401}
 402
 403static ssize_t type_show(struct device *dev,
 404                         struct device_attribute *attr, char *buf)
 405{
 406        struct cacheinfo *this_leaf = dev_get_drvdata(dev);
 407
 408        switch (this_leaf->type) {
 409        case CACHE_TYPE_DATA:
 410                return sprintf(buf, "Data\n");
 411        case CACHE_TYPE_INST:
 412                return sprintf(buf, "Instruction\n");
 413        case CACHE_TYPE_UNIFIED:
 414                return sprintf(buf, "Unified\n");
 415        default:
 416                return -EINVAL;
 417        }
 418}
 419
 420static ssize_t allocation_policy_show(struct device *dev,
 421                                      struct device_attribute *attr, char *buf)
 422{
 423        struct cacheinfo *this_leaf = dev_get_drvdata(dev);
 424        unsigned int ci_attr = this_leaf->attributes;
 425        int n = 0;
 426
 427        if ((ci_attr & CACHE_READ_ALLOCATE) && (ci_attr & CACHE_WRITE_ALLOCATE))
 428                n = sprintf(buf, "ReadWriteAllocate\n");
 429        else if (ci_attr & CACHE_READ_ALLOCATE)
 430                n = sprintf(buf, "ReadAllocate\n");
 431        else if (ci_attr & CACHE_WRITE_ALLOCATE)
 432                n = sprintf(buf, "WriteAllocate\n");
 433        return n;
 434}
 435
 436static ssize_t write_policy_show(struct device *dev,
 437                                 struct device_attribute *attr, char *buf)
 438{
 439        struct cacheinfo *this_leaf = dev_get_drvdata(dev);
 440        unsigned int ci_attr = this_leaf->attributes;
 441        int n = 0;
 442
 443        if (ci_attr & CACHE_WRITE_THROUGH)
 444                n = sprintf(buf, "WriteThrough\n");
 445        else if (ci_attr & CACHE_WRITE_BACK)
 446                n = sprintf(buf, "WriteBack\n");
 447        return n;
 448}
 449
 450static DEVICE_ATTR_RO(id);
 451static DEVICE_ATTR_RO(level);
 452static DEVICE_ATTR_RO(type);
 453static DEVICE_ATTR_RO(coherency_line_size);
 454static DEVICE_ATTR_RO(ways_of_associativity);
 455static DEVICE_ATTR_RO(number_of_sets);
 456static DEVICE_ATTR_RO(size);
 457static DEVICE_ATTR_RO(allocation_policy);
 458static DEVICE_ATTR_RO(write_policy);
 459static DEVICE_ATTR_RO(shared_cpu_map);
 460static DEVICE_ATTR_RO(shared_cpu_list);
 461static DEVICE_ATTR_RO(physical_line_partition);
 462
 463static struct attribute *cache_default_attrs[] = {
 464        &dev_attr_id.attr,
 465        &dev_attr_type.attr,
 466        &dev_attr_level.attr,
 467        &dev_attr_shared_cpu_map.attr,
 468        &dev_attr_shared_cpu_list.attr,
 469        &dev_attr_coherency_line_size.attr,
 470        &dev_attr_ways_of_associativity.attr,
 471        &dev_attr_number_of_sets.attr,
 472        &dev_attr_size.attr,
 473        &dev_attr_allocation_policy.attr,
 474        &dev_attr_write_policy.attr,
 475        &dev_attr_physical_line_partition.attr,
 476        NULL
 477};
 478
 479static umode_t
 480cache_default_attrs_is_visible(struct kobject *kobj,
 481                               struct attribute *attr, int unused)
 482{
 483        struct device *dev = kobj_to_dev(kobj);
 484        struct cacheinfo *this_leaf = dev_get_drvdata(dev);
 485        const struct cpumask *mask = &this_leaf->shared_cpu_map;
 486        umode_t mode = attr->mode;
 487
 488        if ((attr == &dev_attr_id.attr) && (this_leaf->attributes & CACHE_ID))
 489                return mode;
 490        if ((attr == &dev_attr_type.attr) && this_leaf->type)
 491                return mode;
 492        if ((attr == &dev_attr_level.attr) && this_leaf->level)
 493                return mode;
 494        if ((attr == &dev_attr_shared_cpu_map.attr) && !cpumask_empty(mask))
 495                return mode;
 496        if ((attr == &dev_attr_shared_cpu_list.attr) && !cpumask_empty(mask))
 497                return mode;
 498        if ((attr == &dev_attr_coherency_line_size.attr) &&
 499            this_leaf->coherency_line_size)
 500                return mode;
 501        if ((attr == &dev_attr_ways_of_associativity.attr) &&
 502            this_leaf->size) /* allow 0 = full associativity */
 503                return mode;
 504        if ((attr == &dev_attr_number_of_sets.attr) &&
 505            this_leaf->number_of_sets)
 506                return mode;
 507        if ((attr == &dev_attr_size.attr) && this_leaf->size)
 508                return mode;
 509        if ((attr == &dev_attr_write_policy.attr) &&
 510            (this_leaf->attributes & CACHE_WRITE_POLICY_MASK))
 511                return mode;
 512        if ((attr == &dev_attr_allocation_policy.attr) &&
 513            (this_leaf->attributes & CACHE_ALLOCATE_POLICY_MASK))
 514                return mode;
 515        if ((attr == &dev_attr_physical_line_partition.attr) &&
 516            this_leaf->physical_line_partition)
 517                return mode;
 518
 519        return 0;
 520}
 521
 522static const struct attribute_group cache_default_group = {
 523        .attrs = cache_default_attrs,
 524        .is_visible = cache_default_attrs_is_visible,
 525};
 526
 527static const struct attribute_group *cache_default_groups[] = {
 528        &cache_default_group,
 529        NULL,
 530};
 531
 532static const struct attribute_group *cache_private_groups[] = {
 533        &cache_default_group,
 534        NULL, /* Place holder for private group */
 535        NULL,
 536};
 537
 538const struct attribute_group *
 539__weak cache_get_priv_group(struct cacheinfo *this_leaf)
 540{
 541        return NULL;
 542}
 543
 544static const struct attribute_group **
 545cache_get_attribute_groups(struct cacheinfo *this_leaf)
 546{
 547        const struct attribute_group *priv_group =
 548                        cache_get_priv_group(this_leaf);
 549
 550        if (!priv_group)
 551                return cache_default_groups;
 552
 553        if (!cache_private_groups[1])
 554                cache_private_groups[1] = priv_group;
 555
 556        return cache_private_groups;
 557}
 558
 559/* Add/Remove cache interface for CPU device */
 560static void cpu_cache_sysfs_exit(unsigned int cpu)
 561{
 562        int i;
 563        struct device *ci_dev;
 564
 565        if (per_cpu_index_dev(cpu)) {
 566                for (i = 0; i < cache_leaves(cpu); i++) {
 567                        ci_dev = per_cache_index_dev(cpu, i);
 568                        if (!ci_dev)
 569                                continue;
 570                        device_unregister(ci_dev);
 571                }
 572                kfree(per_cpu_index_dev(cpu));
 573                per_cpu_index_dev(cpu) = NULL;
 574        }
 575        device_unregister(per_cpu_cache_dev(cpu));
 576        per_cpu_cache_dev(cpu) = NULL;
 577}
 578
 579static int cpu_cache_sysfs_init(unsigned int cpu)
 580{
 581        struct device *dev = get_cpu_device(cpu);
 582
 583        if (per_cpu_cacheinfo(cpu) == NULL)
 584                return -ENOENT;
 585
 586        per_cpu_cache_dev(cpu) = cpu_device_create(dev, NULL, NULL, "cache");
 587        if (IS_ERR(per_cpu_cache_dev(cpu)))
 588                return PTR_ERR(per_cpu_cache_dev(cpu));
 589
 590        /* Allocate all required memory */
 591        per_cpu_index_dev(cpu) = kcalloc(cache_leaves(cpu),
 592                                         sizeof(struct device *), GFP_KERNEL);
 593        if (unlikely(per_cpu_index_dev(cpu) == NULL))
 594                goto err_out;
 595
 596        return 0;
 597
 598err_out:
 599        cpu_cache_sysfs_exit(cpu);
 600        return -ENOMEM;
 601}
 602
 603static int cache_add_dev(unsigned int cpu)
 604{
 605        unsigned int i;
 606        int rc;
 607        struct device *ci_dev, *parent;
 608        struct cacheinfo *this_leaf;
 609        struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
 610        const struct attribute_group **cache_groups;
 611
 612        rc = cpu_cache_sysfs_init(cpu);
 613        if (unlikely(rc < 0))
 614                return rc;
 615
 616        parent = per_cpu_cache_dev(cpu);
 617        for (i = 0; i < cache_leaves(cpu); i++) {
 618                this_leaf = this_cpu_ci->info_list + i;
 619                if (this_leaf->disable_sysfs)
 620                        continue;
 621                if (this_leaf->type == CACHE_TYPE_NOCACHE)
 622                        break;
 623                cache_groups = cache_get_attribute_groups(this_leaf);
 624                ci_dev = cpu_device_create(parent, this_leaf, cache_groups,
 625                                           "index%1u", i);
 626                if (IS_ERR(ci_dev)) {
 627                        rc = PTR_ERR(ci_dev);
 628                        goto err;
 629                }
 630                per_cache_index_dev(cpu, i) = ci_dev;
 631        }
 632        cpumask_set_cpu(cpu, &cache_dev_map);
 633
 634        return 0;
 635err:
 636        cpu_cache_sysfs_exit(cpu);
 637        return rc;
 638}
 639
 640static int cacheinfo_cpu_online(unsigned int cpu)
 641{
 642        int rc = detect_cache_attributes(cpu);
 643
 644        if (rc)
 645                return rc;
 646        rc = cache_add_dev(cpu);
 647        if (rc)
 648                free_cache_attributes(cpu);
 649        return rc;
 650}
 651
 652static int cacheinfo_cpu_pre_down(unsigned int cpu)
 653{
 654        if (cpumask_test_and_clear_cpu(cpu, &cache_dev_map))
 655                cpu_cache_sysfs_exit(cpu);
 656
 657        free_cache_attributes(cpu);
 658        return 0;
 659}
 660
 661static int __init cacheinfo_sysfs_init(void)
 662{
 663        return cpuhp_setup_state(CPUHP_AP_BASE_CACHEINFO_ONLINE,
 664                                 "base/cacheinfo:online",
 665                                 cacheinfo_cpu_online, cacheinfo_cpu_pre_down);
 666}
 667device_initcall(cacheinfo_sysfs_init);
 668