linux/drivers/perf/qcom_l2_pmu.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2015-2017 The Linux Foundation. All rights reserved.
   3 */
   4#include <linux/acpi.h>
   5#include <linux/bitops.h>
   6#include <linux/bug.h>
   7#include <linux/cpuhotplug.h>
   8#include <linux/cpumask.h>
   9#include <linux/device.h>
  10#include <linux/errno.h>
  11#include <linux/interrupt.h>
  12#include <linux/irq.h>
  13#include <linux/kernel.h>
  14#include <linux/list.h>
  15#include <linux/percpu.h>
  16#include <linux/perf_event.h>
  17#include <linux/platform_device.h>
  18#include <linux/smp.h>
  19#include <linux/spinlock.h>
  20#include <linux/sysfs.h>
  21#include <linux/types.h>
  22
  23#include <asm/barrier.h>
  24#include <asm/local64.h>
  25#include <asm/sysreg.h>
  26#include <soc/qcom/kryo-l2-accessors.h>
  27
  28#define MAX_L2_CTRS             9
  29
  30#define L2PMCR_NUM_EV_SHIFT     11
  31#define L2PMCR_NUM_EV_MASK      0x1F
  32
  33#define L2PMCR                  0x400
  34#define L2PMCNTENCLR            0x403
  35#define L2PMCNTENSET            0x404
  36#define L2PMINTENCLR            0x405
  37#define L2PMINTENSET            0x406
  38#define L2PMOVSCLR              0x407
  39#define L2PMOVSSET              0x408
  40#define L2PMCCNTCR              0x409
  41#define L2PMCCNTR               0x40A
  42#define L2PMCCNTSR              0x40C
  43#define L2PMRESR                0x410
  44#define IA_L2PMXEVCNTCR_BASE    0x420
  45#define IA_L2PMXEVCNTR_BASE     0x421
  46#define IA_L2PMXEVFILTER_BASE   0x423
  47#define IA_L2PMXEVTYPER_BASE    0x424
  48
  49#define IA_L2_REG_OFFSET        0x10
  50
  51#define L2PMXEVFILTER_SUFILTER_ALL      0x000E0000
  52#define L2PMXEVFILTER_ORGFILTER_IDINDEP 0x00000004
  53#define L2PMXEVFILTER_ORGFILTER_ALL     0x00000003
  54
  55#define L2EVTYPER_REG_SHIFT     3
  56
  57#define L2PMRESR_GROUP_BITS     8
  58#define L2PMRESR_GROUP_MASK     GENMASK(7, 0)
  59
  60#define L2CYCLE_CTR_BIT         31
  61#define L2CYCLE_CTR_RAW_CODE    0xFE
  62
  63#define L2PMCR_RESET_ALL        0x6
  64#define L2PMCR_COUNTERS_ENABLE  0x1
  65#define L2PMCR_COUNTERS_DISABLE 0x0
  66
  67#define L2PMRESR_EN             BIT_ULL(63)
  68
  69#define L2_EVT_MASK             0x00000FFF
  70#define L2_EVT_CODE_MASK        0x00000FF0
  71#define L2_EVT_GRP_MASK         0x0000000F
  72#define L2_EVT_CODE_SHIFT       4
  73#define L2_EVT_GRP_SHIFT        0
  74
  75#define L2_EVT_CODE(event)   (((event) & L2_EVT_CODE_MASK) >> L2_EVT_CODE_SHIFT)
  76#define L2_EVT_GROUP(event)  (((event) & L2_EVT_GRP_MASK) >> L2_EVT_GRP_SHIFT)
  77
  78#define L2_EVT_GROUP_MAX        7
  79
  80#define L2_COUNTER_RELOAD       BIT_ULL(31)
  81#define L2_CYCLE_COUNTER_RELOAD BIT_ULL(63)
  82
  83
  84#define reg_idx(reg, i)         (((i) * IA_L2_REG_OFFSET) + reg##_BASE)
  85
  86/*
  87 * Events
  88 */
  89#define L2_EVENT_CYCLES                    0xfe
  90#define L2_EVENT_DCACHE_OPS                0x400
  91#define L2_EVENT_ICACHE_OPS                0x401
  92#define L2_EVENT_TLBI                      0x402
  93#define L2_EVENT_BARRIERS                  0x403
  94#define L2_EVENT_TOTAL_READS               0x405
  95#define L2_EVENT_TOTAL_WRITES              0x406
  96#define L2_EVENT_TOTAL_REQUESTS            0x407
  97#define L2_EVENT_LDREX                     0x420
  98#define L2_EVENT_STREX                     0x421
  99#define L2_EVENT_CLREX                     0x422
 100
 101
 102
 103struct cluster_pmu;
 104
 105/*
 106 * Aggregate PMU. Implements the core pmu functions and manages
 107 * the hardware PMUs.
 108 */
 109struct l2cache_pmu {
 110        struct hlist_node node;
 111        u32 num_pmus;
 112        struct pmu pmu;
 113        int num_counters;
 114        cpumask_t cpumask;
 115        struct platform_device *pdev;
 116        struct cluster_pmu * __percpu *pmu_cluster;
 117        struct list_head clusters;
 118};
 119
 120/*
 121 * The cache is made up of one or more clusters, each cluster has its own PMU.
 122 * Each cluster is associated with one or more CPUs.
 123 * This structure represents one of the hardware PMUs.
 124 *
 125 * Events can be envisioned as a 2-dimensional array. Each column represents
 126 * a group of events. There are 8 groups. Only one entry from each
 127 * group can be in use at a time.
 128 *
 129 * Events are specified as 0xCCG, where CC is 2 hex digits specifying
 130 * the code (array row) and G specifies the group (column).
 131 *
 132 * In addition there is a cycle counter event specified by L2CYCLE_CTR_RAW_CODE
 133 * which is outside the above scheme.
 134 */
 135struct cluster_pmu {
 136        struct list_head next;
 137        struct perf_event *events[MAX_L2_CTRS];
 138        struct l2cache_pmu *l2cache_pmu;
 139        DECLARE_BITMAP(used_counters, MAX_L2_CTRS);
 140        DECLARE_BITMAP(used_groups, L2_EVT_GROUP_MAX + 1);
 141        int irq;
 142        int cluster_id;
 143        /* The CPU that is used for collecting events on this cluster */
 144        int on_cpu;
 145        /* All the CPUs associated with this cluster */
 146        cpumask_t cluster_cpus;
 147        spinlock_t pmu_lock;
 148};
 149
 150#define to_l2cache_pmu(p) (container_of(p, struct l2cache_pmu, pmu))
 151
 152static u32 l2_cycle_ctr_idx;
 153static u32 l2_counter_present_mask;
 154
 155static inline u32 idx_to_reg_bit(u32 idx)
 156{
 157        if (idx == l2_cycle_ctr_idx)
 158                return BIT(L2CYCLE_CTR_BIT);
 159
 160        return BIT(idx);
 161}
 162
 163static inline struct cluster_pmu *get_cluster_pmu(
 164        struct l2cache_pmu *l2cache_pmu, int cpu)
 165{
 166        return *per_cpu_ptr(l2cache_pmu->pmu_cluster, cpu);
 167}
 168
 169static void cluster_pmu_reset(void)
 170{
 171        /* Reset all counters */
 172        kryo_l2_set_indirect_reg(L2PMCR, L2PMCR_RESET_ALL);
 173        kryo_l2_set_indirect_reg(L2PMCNTENCLR, l2_counter_present_mask);
 174        kryo_l2_set_indirect_reg(L2PMINTENCLR, l2_counter_present_mask);
 175        kryo_l2_set_indirect_reg(L2PMOVSCLR, l2_counter_present_mask);
 176}
 177
 178static inline void cluster_pmu_enable(void)
 179{
 180        kryo_l2_set_indirect_reg(L2PMCR, L2PMCR_COUNTERS_ENABLE);
 181}
 182
 183static inline void cluster_pmu_disable(void)
 184{
 185        kryo_l2_set_indirect_reg(L2PMCR, L2PMCR_COUNTERS_DISABLE);
 186}
 187
 188static inline void cluster_pmu_counter_set_value(u32 idx, u64 value)
 189{
 190        if (idx == l2_cycle_ctr_idx)
 191                kryo_l2_set_indirect_reg(L2PMCCNTR, value);
 192        else
 193                kryo_l2_set_indirect_reg(reg_idx(IA_L2PMXEVCNTR, idx), value);
 194}
 195
 196static inline u64 cluster_pmu_counter_get_value(u32 idx)
 197{
 198        u64 value;
 199
 200        if (idx == l2_cycle_ctr_idx)
 201                value = kryo_l2_get_indirect_reg(L2PMCCNTR);
 202        else
 203                value = kryo_l2_get_indirect_reg(reg_idx(IA_L2PMXEVCNTR, idx));
 204
 205        return value;
 206}
 207
 208static inline void cluster_pmu_counter_enable(u32 idx)
 209{
 210        kryo_l2_set_indirect_reg(L2PMCNTENSET, idx_to_reg_bit(idx));
 211}
 212
 213static inline void cluster_pmu_counter_disable(u32 idx)
 214{
 215        kryo_l2_set_indirect_reg(L2PMCNTENCLR, idx_to_reg_bit(idx));
 216}
 217
 218static inline void cluster_pmu_counter_enable_interrupt(u32 idx)
 219{
 220        kryo_l2_set_indirect_reg(L2PMINTENSET, idx_to_reg_bit(idx));
 221}
 222
 223static inline void cluster_pmu_counter_disable_interrupt(u32 idx)
 224{
 225        kryo_l2_set_indirect_reg(L2PMINTENCLR, idx_to_reg_bit(idx));
 226}
 227
 228static inline void cluster_pmu_set_evccntcr(u32 val)
 229{
 230        kryo_l2_set_indirect_reg(L2PMCCNTCR, val);
 231}
 232
 233static inline void cluster_pmu_set_evcntcr(u32 ctr, u32 val)
 234{
 235        kryo_l2_set_indirect_reg(reg_idx(IA_L2PMXEVCNTCR, ctr), val);
 236}
 237
 238static inline void cluster_pmu_set_evtyper(u32 ctr, u32 val)
 239{
 240        kryo_l2_set_indirect_reg(reg_idx(IA_L2PMXEVTYPER, ctr), val);
 241}
 242
 243static void cluster_pmu_set_resr(struct cluster_pmu *cluster,
 244                               u32 event_group, u32 event_cc)
 245{
 246        u64 field;
 247        u64 resr_val;
 248        u32 shift;
 249        unsigned long flags;
 250
 251        shift = L2PMRESR_GROUP_BITS * event_group;
 252        field = ((u64)(event_cc & L2PMRESR_GROUP_MASK) << shift);
 253
 254        spin_lock_irqsave(&cluster->pmu_lock, flags);
 255
 256        resr_val = kryo_l2_get_indirect_reg(L2PMRESR);
 257        resr_val &= ~(L2PMRESR_GROUP_MASK << shift);
 258        resr_val |= field;
 259        resr_val |= L2PMRESR_EN;
 260        kryo_l2_set_indirect_reg(L2PMRESR, resr_val);
 261
 262        spin_unlock_irqrestore(&cluster->pmu_lock, flags);
 263}
 264
 265/*
 266 * Hardware allows filtering of events based on the originating
 267 * CPU. Turn this off by setting filter bits to allow events from
 268 * all CPUS, subunits and ID independent events in this cluster.
 269 */
 270static inline void cluster_pmu_set_evfilter_sys_mode(u32 ctr)
 271{
 272        u32 val =  L2PMXEVFILTER_SUFILTER_ALL |
 273                   L2PMXEVFILTER_ORGFILTER_IDINDEP |
 274                   L2PMXEVFILTER_ORGFILTER_ALL;
 275
 276        kryo_l2_set_indirect_reg(reg_idx(IA_L2PMXEVFILTER, ctr), val);
 277}
 278
 279static inline u32 cluster_pmu_getreset_ovsr(void)
 280{
 281        u32 result = kryo_l2_get_indirect_reg(L2PMOVSSET);
 282
 283        kryo_l2_set_indirect_reg(L2PMOVSCLR, result);
 284        return result;
 285}
 286
 287static inline bool cluster_pmu_has_overflowed(u32 ovsr)
 288{
 289        return !!(ovsr & l2_counter_present_mask);
 290}
 291
 292static inline bool cluster_pmu_counter_has_overflowed(u32 ovsr, u32 idx)
 293{
 294        return !!(ovsr & idx_to_reg_bit(idx));
 295}
 296
 297static void l2_cache_event_update(struct perf_event *event)
 298{
 299        struct hw_perf_event *hwc = &event->hw;
 300        u64 delta, prev, now;
 301        u32 idx = hwc->idx;
 302
 303        do {
 304                prev = local64_read(&hwc->prev_count);
 305                now = cluster_pmu_counter_get_value(idx);
 306        } while (local64_cmpxchg(&hwc->prev_count, prev, now) != prev);
 307
 308        /*
 309         * The cycle counter is 64-bit, but all other counters are
 310         * 32-bit, and we must handle 32-bit overflow explicitly.
 311         */
 312        delta = now - prev;
 313        if (idx != l2_cycle_ctr_idx)
 314                delta &= 0xffffffff;
 315
 316        local64_add(delta, &event->count);
 317}
 318
 319static void l2_cache_cluster_set_period(struct cluster_pmu *cluster,
 320                                       struct hw_perf_event *hwc)
 321{
 322        u32 idx = hwc->idx;
 323        u64 new;
 324
 325        /*
 326         * We limit the max period to half the max counter value so
 327         * that even in the case of extreme interrupt latency the
 328         * counter will (hopefully) not wrap past its initial value.
 329         */
 330        if (idx == l2_cycle_ctr_idx)
 331                new = L2_CYCLE_COUNTER_RELOAD;
 332        else
 333                new = L2_COUNTER_RELOAD;
 334
 335        local64_set(&hwc->prev_count, new);
 336        cluster_pmu_counter_set_value(idx, new);
 337}
 338
 339static int l2_cache_get_event_idx(struct cluster_pmu *cluster,
 340                                   struct perf_event *event)
 341{
 342        struct hw_perf_event *hwc = &event->hw;
 343        int idx;
 344        int num_ctrs = cluster->l2cache_pmu->num_counters - 1;
 345        unsigned int group;
 346
 347        if (hwc->config_base == L2CYCLE_CTR_RAW_CODE) {
 348                if (test_and_set_bit(l2_cycle_ctr_idx, cluster->used_counters))
 349                        return -EAGAIN;
 350
 351                return l2_cycle_ctr_idx;
 352        }
 353
 354        idx = find_first_zero_bit(cluster->used_counters, num_ctrs);
 355        if (idx == num_ctrs)
 356                /* The counters are all in use. */
 357                return -EAGAIN;
 358
 359        /*
 360         * Check for column exclusion: event column already in use by another
 361         * event. This is for events which are not in the same group.
 362         * Conflicting events in the same group are detected in event_init.
 363         */
 364        group = L2_EVT_GROUP(hwc->config_base);
 365        if (test_bit(group, cluster->used_groups))
 366                return -EAGAIN;
 367
 368        set_bit(idx, cluster->used_counters);
 369        set_bit(group, cluster->used_groups);
 370
 371        return idx;
 372}
 373
 374static void l2_cache_clear_event_idx(struct cluster_pmu *cluster,
 375                                      struct perf_event *event)
 376{
 377        struct hw_perf_event *hwc = &event->hw;
 378        int idx = hwc->idx;
 379
 380        clear_bit(idx, cluster->used_counters);
 381        if (hwc->config_base != L2CYCLE_CTR_RAW_CODE)
 382                clear_bit(L2_EVT_GROUP(hwc->config_base), cluster->used_groups);
 383}
 384
 385static irqreturn_t l2_cache_handle_irq(int irq_num, void *data)
 386{
 387        struct cluster_pmu *cluster = data;
 388        int num_counters = cluster->l2cache_pmu->num_counters;
 389        u32 ovsr;
 390        int idx;
 391
 392        ovsr = cluster_pmu_getreset_ovsr();
 393        if (!cluster_pmu_has_overflowed(ovsr))
 394                return IRQ_NONE;
 395
 396        for_each_set_bit(idx, cluster->used_counters, num_counters) {
 397                struct perf_event *event = cluster->events[idx];
 398                struct hw_perf_event *hwc;
 399
 400                if (WARN_ON_ONCE(!event))
 401                        continue;
 402
 403                if (!cluster_pmu_counter_has_overflowed(ovsr, idx))
 404                        continue;
 405
 406                l2_cache_event_update(event);
 407                hwc = &event->hw;
 408
 409                l2_cache_cluster_set_period(cluster, hwc);
 410        }
 411
 412        return IRQ_HANDLED;
 413}
 414
 415/*
 416 * Implementation of abstract pmu functionality required by
 417 * the core perf events code.
 418 */
 419
 420static void l2_cache_pmu_enable(struct pmu *pmu)
 421{
 422        /*
 423         * Although there is only one PMU (per socket) controlling multiple
 424         * physical PMUs (per cluster), because we do not support per-task mode
 425         * each event is associated with a CPU. Each event has pmu_enable
 426         * called on its CPU, so here it is only necessary to enable the
 427         * counters for the current CPU.
 428         */
 429
 430        cluster_pmu_enable();
 431}
 432
 433static void l2_cache_pmu_disable(struct pmu *pmu)
 434{
 435        cluster_pmu_disable();
 436}
 437
 438static int l2_cache_event_init(struct perf_event *event)
 439{
 440        struct hw_perf_event *hwc = &event->hw;
 441        struct cluster_pmu *cluster;
 442        struct perf_event *sibling;
 443        struct l2cache_pmu *l2cache_pmu;
 444
 445        if (event->attr.type != event->pmu->type)
 446                return -ENOENT;
 447
 448        l2cache_pmu = to_l2cache_pmu(event->pmu);
 449
 450        if (hwc->sample_period) {
 451                dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
 452                                    "Sampling not supported\n");
 453                return -EOPNOTSUPP;
 454        }
 455
 456        if (event->cpu < 0) {
 457                dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
 458                                    "Per-task mode not supported\n");
 459                return -EOPNOTSUPP;
 460        }
 461
 462        if (((L2_EVT_GROUP(event->attr.config) > L2_EVT_GROUP_MAX) ||
 463             ((event->attr.config & ~L2_EVT_MASK) != 0)) &&
 464            (event->attr.config != L2CYCLE_CTR_RAW_CODE)) {
 465                dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
 466                                    "Invalid config %llx\n",
 467                                    event->attr.config);
 468                return -EINVAL;
 469        }
 470
 471        /* Don't allow groups with mixed PMUs, except for s/w events */
 472        if (event->group_leader->pmu != event->pmu &&
 473            !is_software_event(event->group_leader)) {
 474                dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
 475                         "Can't create mixed PMU group\n");
 476                return -EINVAL;
 477        }
 478
 479        for_each_sibling_event(sibling, event->group_leader) {
 480                if (sibling->pmu != event->pmu &&
 481                    !is_software_event(sibling)) {
 482                        dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
 483                                 "Can't create mixed PMU group\n");
 484                        return -EINVAL;
 485                }
 486        }
 487
 488        cluster = get_cluster_pmu(l2cache_pmu, event->cpu);
 489        if (!cluster) {
 490                /* CPU has not been initialised */
 491                dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
 492                        "CPU%d not associated with L2 cluster\n", event->cpu);
 493                return -EINVAL;
 494        }
 495
 496        /* Ensure all events in a group are on the same cpu */
 497        if ((event->group_leader != event) &&
 498            (cluster->on_cpu != event->group_leader->cpu)) {
 499                dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
 500                         "Can't create group on CPUs %d and %d",
 501                         event->cpu, event->group_leader->cpu);
 502                return -EINVAL;
 503        }
 504
 505        if ((event != event->group_leader) &&
 506            !is_software_event(event->group_leader) &&
 507            (L2_EVT_GROUP(event->group_leader->attr.config) ==
 508             L2_EVT_GROUP(event->attr.config))) {
 509                dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
 510                         "Column exclusion: conflicting events %llx %llx\n",
 511                       event->group_leader->attr.config,
 512                       event->attr.config);
 513                return -EINVAL;
 514        }
 515
 516        for_each_sibling_event(sibling, event->group_leader) {
 517                if ((sibling != event) &&
 518                    !is_software_event(sibling) &&
 519                    (L2_EVT_GROUP(sibling->attr.config) ==
 520                     L2_EVT_GROUP(event->attr.config))) {
 521                        dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
 522                             "Column exclusion: conflicting events %llx %llx\n",
 523                                            sibling->attr.config,
 524                                            event->attr.config);
 525                        return -EINVAL;
 526                }
 527        }
 528
 529        hwc->idx = -1;
 530        hwc->config_base = event->attr.config;
 531
 532        /*
 533         * Ensure all events are on the same cpu so all events are in the
 534         * same cpu context, to avoid races on pmu_enable etc.
 535         */
 536        event->cpu = cluster->on_cpu;
 537
 538        return 0;
 539}
 540
 541static void l2_cache_event_start(struct perf_event *event, int flags)
 542{
 543        struct cluster_pmu *cluster;
 544        struct hw_perf_event *hwc = &event->hw;
 545        int idx = hwc->idx;
 546        u32 config;
 547        u32 event_cc, event_group;
 548
 549        hwc->state = 0;
 550
 551        cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu);
 552
 553        l2_cache_cluster_set_period(cluster, hwc);
 554
 555        if (hwc->config_base == L2CYCLE_CTR_RAW_CODE) {
 556                cluster_pmu_set_evccntcr(0);
 557        } else {
 558                config = hwc->config_base;
 559                event_cc    = L2_EVT_CODE(config);
 560                event_group = L2_EVT_GROUP(config);
 561
 562                cluster_pmu_set_evcntcr(idx, 0);
 563                cluster_pmu_set_evtyper(idx, event_group);
 564                cluster_pmu_set_resr(cluster, event_group, event_cc);
 565                cluster_pmu_set_evfilter_sys_mode(idx);
 566        }
 567
 568        cluster_pmu_counter_enable_interrupt(idx);
 569        cluster_pmu_counter_enable(idx);
 570}
 571
 572static void l2_cache_event_stop(struct perf_event *event, int flags)
 573{
 574        struct hw_perf_event *hwc = &event->hw;
 575        int idx = hwc->idx;
 576
 577        if (hwc->state & PERF_HES_STOPPED)
 578                return;
 579
 580        cluster_pmu_counter_disable_interrupt(idx);
 581        cluster_pmu_counter_disable(idx);
 582
 583        if (flags & PERF_EF_UPDATE)
 584                l2_cache_event_update(event);
 585        hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
 586}
 587
 588static int l2_cache_event_add(struct perf_event *event, int flags)
 589{
 590        struct hw_perf_event *hwc = &event->hw;
 591        int idx;
 592        int err = 0;
 593        struct cluster_pmu *cluster;
 594
 595        cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu);
 596
 597        idx = l2_cache_get_event_idx(cluster, event);
 598        if (idx < 0)
 599                return idx;
 600
 601        hwc->idx = idx;
 602        hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
 603        cluster->events[idx] = event;
 604        local64_set(&hwc->prev_count, 0);
 605
 606        if (flags & PERF_EF_START)
 607                l2_cache_event_start(event, flags);
 608
 609        /* Propagate changes to the userspace mapping. */
 610        perf_event_update_userpage(event);
 611
 612        return err;
 613}
 614
 615static void l2_cache_event_del(struct perf_event *event, int flags)
 616{
 617        struct hw_perf_event *hwc = &event->hw;
 618        struct cluster_pmu *cluster;
 619        int idx = hwc->idx;
 620
 621        cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu);
 622
 623        l2_cache_event_stop(event, flags | PERF_EF_UPDATE);
 624        cluster->events[idx] = NULL;
 625        l2_cache_clear_event_idx(cluster, event);
 626
 627        perf_event_update_userpage(event);
 628}
 629
 630static void l2_cache_event_read(struct perf_event *event)
 631{
 632        l2_cache_event_update(event);
 633}
 634
 635static ssize_t l2_cache_pmu_cpumask_show(struct device *dev,
 636                                         struct device_attribute *attr,
 637                                         char *buf)
 638{
 639        struct l2cache_pmu *l2cache_pmu = to_l2cache_pmu(dev_get_drvdata(dev));
 640
 641        return cpumap_print_to_pagebuf(true, buf, &l2cache_pmu->cpumask);
 642}
 643
 644static struct device_attribute l2_cache_pmu_cpumask_attr =
 645                __ATTR(cpumask, S_IRUGO, l2_cache_pmu_cpumask_show, NULL);
 646
 647static struct attribute *l2_cache_pmu_cpumask_attrs[] = {
 648        &l2_cache_pmu_cpumask_attr.attr,
 649        NULL,
 650};
 651
 652static const struct attribute_group l2_cache_pmu_cpumask_group = {
 653        .attrs = l2_cache_pmu_cpumask_attrs,
 654};
 655
 656/* CCG format for perf RAW codes. */
 657PMU_FORMAT_ATTR(l2_code,   "config:4-11");
 658PMU_FORMAT_ATTR(l2_group,  "config:0-3");
 659PMU_FORMAT_ATTR(event,     "config:0-11");
 660
 661static struct attribute *l2_cache_pmu_formats[] = {
 662        &format_attr_l2_code.attr,
 663        &format_attr_l2_group.attr,
 664        &format_attr_event.attr,
 665        NULL,
 666};
 667
 668static const struct attribute_group l2_cache_pmu_format_group = {
 669        .name = "format",
 670        .attrs = l2_cache_pmu_formats,
 671};
 672
 673static ssize_t l2cache_pmu_event_show(struct device *dev,
 674                                      struct device_attribute *attr, char *page)
 675{
 676        struct perf_pmu_events_attr *pmu_attr;
 677
 678        pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
 679        return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
 680}
 681
 682#define L2CACHE_EVENT_ATTR(_name, _id)                      \
 683        PMU_EVENT_ATTR_ID(_name, l2cache_pmu_event_show, _id)
 684
 685static struct attribute *l2_cache_pmu_events[] = {
 686        L2CACHE_EVENT_ATTR(cycles, L2_EVENT_CYCLES),
 687        L2CACHE_EVENT_ATTR(dcache-ops, L2_EVENT_DCACHE_OPS),
 688        L2CACHE_EVENT_ATTR(icache-ops, L2_EVENT_ICACHE_OPS),
 689        L2CACHE_EVENT_ATTR(tlbi, L2_EVENT_TLBI),
 690        L2CACHE_EVENT_ATTR(barriers, L2_EVENT_BARRIERS),
 691        L2CACHE_EVENT_ATTR(total-reads, L2_EVENT_TOTAL_READS),
 692        L2CACHE_EVENT_ATTR(total-writes, L2_EVENT_TOTAL_WRITES),
 693        L2CACHE_EVENT_ATTR(total-requests, L2_EVENT_TOTAL_REQUESTS),
 694        L2CACHE_EVENT_ATTR(ldrex, L2_EVENT_LDREX),
 695        L2CACHE_EVENT_ATTR(strex, L2_EVENT_STREX),
 696        L2CACHE_EVENT_ATTR(clrex, L2_EVENT_CLREX),
 697        NULL
 698};
 699
 700static const struct attribute_group l2_cache_pmu_events_group = {
 701        .name = "events",
 702        .attrs = l2_cache_pmu_events,
 703};
 704
 705static const struct attribute_group *l2_cache_pmu_attr_grps[] = {
 706        &l2_cache_pmu_format_group,
 707        &l2_cache_pmu_cpumask_group,
 708        &l2_cache_pmu_events_group,
 709        NULL,
 710};
 711
 712/*
 713 * Generic device handlers
 714 */
 715
 716static const struct acpi_device_id l2_cache_pmu_acpi_match[] = {
 717        { "QCOM8130", },
 718        { }
 719};
 720
 721static int get_num_counters(void)
 722{
 723        int val;
 724
 725        val = kryo_l2_get_indirect_reg(L2PMCR);
 726
 727        /*
 728         * Read number of counters from L2PMCR and add 1
 729         * for the cycle counter.
 730         */
 731        return ((val >> L2PMCR_NUM_EV_SHIFT) & L2PMCR_NUM_EV_MASK) + 1;
 732}
 733
 734static struct cluster_pmu *l2_cache_associate_cpu_with_cluster(
 735        struct l2cache_pmu *l2cache_pmu, int cpu)
 736{
 737        u64 mpidr;
 738        int cpu_cluster_id;
 739        struct cluster_pmu *cluster = NULL;
 740
 741        /*
 742         * This assumes that the cluster_id is in MPIDR[aff1] for
 743         * single-threaded cores, and MPIDR[aff2] for multi-threaded
 744         * cores. This logic will have to be updated if this changes.
 745         */
 746        mpidr = read_cpuid_mpidr();
 747        if (mpidr & MPIDR_MT_BITMASK)
 748                cpu_cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
 749        else
 750                cpu_cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
 751
 752        list_for_each_entry(cluster, &l2cache_pmu->clusters, next) {
 753                if (cluster->cluster_id != cpu_cluster_id)
 754                        continue;
 755
 756                dev_info(&l2cache_pmu->pdev->dev,
 757                         "CPU%d associated with cluster %d\n", cpu,
 758                         cluster->cluster_id);
 759                cpumask_set_cpu(cpu, &cluster->cluster_cpus);
 760                *per_cpu_ptr(l2cache_pmu->pmu_cluster, cpu) = cluster;
 761                break;
 762        }
 763
 764        return cluster;
 765}
 766
 767static int l2cache_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
 768{
 769        struct cluster_pmu *cluster;
 770        struct l2cache_pmu *l2cache_pmu;
 771
 772        l2cache_pmu = hlist_entry_safe(node, struct l2cache_pmu, node);
 773        cluster = get_cluster_pmu(l2cache_pmu, cpu);
 774        if (!cluster) {
 775                /* First time this CPU has come online */
 776                cluster = l2_cache_associate_cpu_with_cluster(l2cache_pmu, cpu);
 777                if (!cluster) {
 778                        /* Only if broken firmware doesn't list every cluster */
 779                        WARN_ONCE(1, "No L2 cache cluster for CPU%d\n", cpu);
 780                        return 0;
 781                }
 782        }
 783
 784        /* If another CPU is managing this cluster, we're done */
 785        if (cluster->on_cpu != -1)
 786                return 0;
 787
 788        /*
 789         * All CPUs on this cluster were down, use this one.
 790         * Reset to put it into sane state.
 791         */
 792        cluster->on_cpu = cpu;
 793        cpumask_set_cpu(cpu, &l2cache_pmu->cpumask);
 794        cluster_pmu_reset();
 795
 796        WARN_ON(irq_set_affinity(cluster->irq, cpumask_of(cpu)));
 797        enable_irq(cluster->irq);
 798
 799        return 0;
 800}
 801
 802static int l2cache_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
 803{
 804        struct cluster_pmu *cluster;
 805        struct l2cache_pmu *l2cache_pmu;
 806        cpumask_t cluster_online_cpus;
 807        unsigned int target;
 808
 809        l2cache_pmu = hlist_entry_safe(node, struct l2cache_pmu, node);
 810        cluster = get_cluster_pmu(l2cache_pmu, cpu);
 811        if (!cluster)
 812                return 0;
 813
 814        /* If this CPU is not managing the cluster, we're done */
 815        if (cluster->on_cpu != cpu)
 816                return 0;
 817
 818        /* Give up ownership of cluster */
 819        cpumask_clear_cpu(cpu, &l2cache_pmu->cpumask);
 820        cluster->on_cpu = -1;
 821
 822        /* Any other CPU for this cluster which is still online */
 823        cpumask_and(&cluster_online_cpus, &cluster->cluster_cpus,
 824                    cpu_online_mask);
 825        target = cpumask_any_but(&cluster_online_cpus, cpu);
 826        if (target >= nr_cpu_ids) {
 827                disable_irq(cluster->irq);
 828                return 0;
 829        }
 830
 831        perf_pmu_migrate_context(&l2cache_pmu->pmu, cpu, target);
 832        cluster->on_cpu = target;
 833        cpumask_set_cpu(target, &l2cache_pmu->cpumask);
 834        WARN_ON(irq_set_affinity(cluster->irq, cpumask_of(target)));
 835
 836        return 0;
 837}
 838
 839static int l2_cache_pmu_probe_cluster(struct device *dev, void *data)
 840{
 841        struct platform_device *pdev = to_platform_device(dev->parent);
 842        struct platform_device *sdev = to_platform_device(dev);
 843        struct l2cache_pmu *l2cache_pmu = data;
 844        struct cluster_pmu *cluster;
 845        struct acpi_device *device;
 846        unsigned long fw_cluster_id;
 847        int err;
 848        int irq;
 849
 850        if (acpi_bus_get_device(ACPI_HANDLE(dev), &device))
 851                return -ENODEV;
 852
 853        if (kstrtoul(device->pnp.unique_id, 10, &fw_cluster_id) < 0) {
 854                dev_err(&pdev->dev, "unable to read ACPI uid\n");
 855                return -ENODEV;
 856        }
 857
 858        cluster = devm_kzalloc(&pdev->dev, sizeof(*cluster), GFP_KERNEL);
 859        if (!cluster)
 860                return -ENOMEM;
 861
 862        INIT_LIST_HEAD(&cluster->next);
 863        list_add(&cluster->next, &l2cache_pmu->clusters);
 864        cluster->cluster_id = fw_cluster_id;
 865
 866        irq = platform_get_irq(sdev, 0);
 867        if (irq < 0)
 868                return irq;
 869        cluster->irq = irq;
 870
 871        cluster->l2cache_pmu = l2cache_pmu;
 872        cluster->on_cpu = -1;
 873
 874        err = devm_request_irq(&pdev->dev, irq, l2_cache_handle_irq,
 875                               IRQF_NOBALANCING | IRQF_NO_THREAD |
 876                               IRQF_NO_AUTOEN,
 877                               "l2-cache-pmu", cluster);
 878        if (err) {
 879                dev_err(&pdev->dev,
 880                        "Unable to request IRQ%d for L2 PMU counters\n", irq);
 881                return err;
 882        }
 883
 884        dev_info(&pdev->dev,
 885                "Registered L2 cache PMU cluster %ld\n", fw_cluster_id);
 886
 887        spin_lock_init(&cluster->pmu_lock);
 888
 889        l2cache_pmu->num_pmus++;
 890
 891        return 0;
 892}
 893
 894static int l2_cache_pmu_probe(struct platform_device *pdev)
 895{
 896        int err;
 897        struct l2cache_pmu *l2cache_pmu;
 898
 899        l2cache_pmu =
 900                devm_kzalloc(&pdev->dev, sizeof(*l2cache_pmu), GFP_KERNEL);
 901        if (!l2cache_pmu)
 902                return -ENOMEM;
 903
 904        INIT_LIST_HEAD(&l2cache_pmu->clusters);
 905
 906        platform_set_drvdata(pdev, l2cache_pmu);
 907        l2cache_pmu->pmu = (struct pmu) {
 908                /* suffix is instance id for future use with multiple sockets */
 909                .name           = "l2cache_0",
 910                .task_ctx_nr    = perf_invalid_context,
 911                .pmu_enable     = l2_cache_pmu_enable,
 912                .pmu_disable    = l2_cache_pmu_disable,
 913                .event_init     = l2_cache_event_init,
 914                .add            = l2_cache_event_add,
 915                .del            = l2_cache_event_del,
 916                .start          = l2_cache_event_start,
 917                .stop           = l2_cache_event_stop,
 918                .read           = l2_cache_event_read,
 919                .attr_groups    = l2_cache_pmu_attr_grps,
 920                .capabilities   = PERF_PMU_CAP_NO_EXCLUDE,
 921        };
 922
 923        l2cache_pmu->num_counters = get_num_counters();
 924        l2cache_pmu->pdev = pdev;
 925        l2cache_pmu->pmu_cluster = devm_alloc_percpu(&pdev->dev,
 926                                                     struct cluster_pmu *);
 927        if (!l2cache_pmu->pmu_cluster)
 928                return -ENOMEM;
 929
 930        l2_cycle_ctr_idx = l2cache_pmu->num_counters - 1;
 931        l2_counter_present_mask = GENMASK(l2cache_pmu->num_counters - 2, 0) |
 932                BIT(L2CYCLE_CTR_BIT);
 933
 934        cpumask_clear(&l2cache_pmu->cpumask);
 935
 936        /* Read cluster info and initialize each cluster */
 937        err = device_for_each_child(&pdev->dev, l2cache_pmu,
 938                                    l2_cache_pmu_probe_cluster);
 939        if (err)
 940                return err;
 941
 942        if (l2cache_pmu->num_pmus == 0) {
 943                dev_err(&pdev->dev, "No hardware L2 cache PMUs found\n");
 944                return -ENODEV;
 945        }
 946
 947        err = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
 948                                       &l2cache_pmu->node);
 949        if (err) {
 950                dev_err(&pdev->dev, "Error %d registering hotplug", err);
 951                return err;
 952        }
 953
 954        err = perf_pmu_register(&l2cache_pmu->pmu, l2cache_pmu->pmu.name, -1);
 955        if (err) {
 956                dev_err(&pdev->dev, "Error %d registering L2 cache PMU\n", err);
 957                goto out_unregister;
 958        }
 959
 960        dev_info(&pdev->dev, "Registered L2 cache PMU using %d HW PMUs\n",
 961                 l2cache_pmu->num_pmus);
 962
 963        return err;
 964
 965out_unregister:
 966        cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
 967                                    &l2cache_pmu->node);
 968        return err;
 969}
 970
 971static int l2_cache_pmu_remove(struct platform_device *pdev)
 972{
 973        struct l2cache_pmu *l2cache_pmu =
 974                to_l2cache_pmu(platform_get_drvdata(pdev));
 975
 976        perf_pmu_unregister(&l2cache_pmu->pmu);
 977        cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
 978                                    &l2cache_pmu->node);
 979        return 0;
 980}
 981
 982static struct platform_driver l2_cache_pmu_driver = {
 983        .driver = {
 984                .name = "qcom-l2cache-pmu",
 985                .acpi_match_table = ACPI_PTR(l2_cache_pmu_acpi_match),
 986                .suppress_bind_attrs = true,
 987        },
 988        .probe = l2_cache_pmu_probe,
 989        .remove = l2_cache_pmu_remove,
 990};
 991
 992static int __init register_l2_cache_pmu_driver(void)
 993{
 994        int err;
 995
 996        err = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
 997                                      "AP_PERF_ARM_QCOM_L2_ONLINE",
 998                                      l2cache_pmu_online_cpu,
 999                                      l2cache_pmu_offline_cpu);
1000        if (err)
1001                return err;
1002
1003        return platform_driver_register(&l2_cache_pmu_driver);
1004}
1005device_initcall(register_l2_cache_pmu_driver);
1006