linux/arch/arm/kernel/perf_event_v6.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * ARMv6 Performance counter handling code.
   4 *
   5 * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
   6 *
   7 * ARMv6 has 2 configurable performance counters and a single cycle counter.
   8 * They all share a single reset bit but can be written to zero so we can use
   9 * that for a reset.
  10 *
  11 * The counters can't be individually enabled or disabled so when we remove
  12 * one event and replace it with another we could get spurious counts from the
  13 * wrong event. However, we can take advantage of the fact that the
  14 * performance counters can export events to the event bus, and the event bus
  15 * itself can be monitored. This requires that we *don't* export the events to
  16 * the event bus. The procedure for disabling a configurable counter is:
  17 *      - change the counter to count the ETMEXTOUT[0] signal (0x20). This
  18 *        effectively stops the counter from counting.
  19 *      - disable the counter's interrupt generation (each counter has it's
  20 *        own interrupt enable bit).
  21 * Once stopped, the counter value can be written as 0 to reset.
  22 *
  23 * To enable a counter:
  24 *      - enable the counter's interrupt generation.
  25 *      - set the new event type.
  26 *
  27 * Note: the dedicated cycle counter only counts cycles and can't be
  28 * enabled/disabled independently of the others. When we want to disable the
  29 * cycle counter, we have to just disable the interrupt reporting and start
  30 * ignoring that counter. When re-enabling, we have to reset the value and
  31 * enable the interrupt.
  32 */
  33
  34#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
  35
  36#include <asm/cputype.h>
  37#include <asm/irq_regs.h>
  38
  39#include <linux/of.h>
  40#include <linux/perf/arm_pmu.h>
  41#include <linux/platform_device.h>
  42
  43enum armv6_perf_types {
  44        ARMV6_PERFCTR_ICACHE_MISS           = 0x0,
  45        ARMV6_PERFCTR_IBUF_STALL            = 0x1,
  46        ARMV6_PERFCTR_DDEP_STALL            = 0x2,
  47        ARMV6_PERFCTR_ITLB_MISS             = 0x3,
  48        ARMV6_PERFCTR_DTLB_MISS             = 0x4,
  49        ARMV6_PERFCTR_BR_EXEC               = 0x5,
  50        ARMV6_PERFCTR_BR_MISPREDICT         = 0x6,
  51        ARMV6_PERFCTR_INSTR_EXEC            = 0x7,
  52        ARMV6_PERFCTR_DCACHE_HIT            = 0x9,
  53        ARMV6_PERFCTR_DCACHE_ACCESS         = 0xA,
  54        ARMV6_PERFCTR_DCACHE_MISS           = 0xB,
  55        ARMV6_PERFCTR_DCACHE_WBACK          = 0xC,
  56        ARMV6_PERFCTR_SW_PC_CHANGE          = 0xD,
  57        ARMV6_PERFCTR_MAIN_TLB_MISS         = 0xF,
  58        ARMV6_PERFCTR_EXPL_D_ACCESS         = 0x10,
  59        ARMV6_PERFCTR_LSU_FULL_STALL        = 0x11,
  60        ARMV6_PERFCTR_WBUF_DRAINED          = 0x12,
  61        ARMV6_PERFCTR_CPU_CYCLES            = 0xFF,
  62        ARMV6_PERFCTR_NOP                   = 0x20,
  63};
  64
  65enum armv6_counters {
  66        ARMV6_CYCLE_COUNTER = 0,
  67        ARMV6_COUNTER0,
  68        ARMV6_COUNTER1,
  69};
  70
  71/*
  72 * The hardware events that we support. We do support cache operations but
  73 * we have harvard caches and no way to combine instruction and data
  74 * accesses/misses in hardware.
  75 */
  76static const unsigned armv6_perf_map[PERF_COUNT_HW_MAX] = {
  77        PERF_MAP_ALL_UNSUPPORTED,
  78        [PERF_COUNT_HW_CPU_CYCLES]              = ARMV6_PERFCTR_CPU_CYCLES,
  79        [PERF_COUNT_HW_INSTRUCTIONS]            = ARMV6_PERFCTR_INSTR_EXEC,
  80        [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = ARMV6_PERFCTR_BR_EXEC,
  81        [PERF_COUNT_HW_BRANCH_MISSES]           = ARMV6_PERFCTR_BR_MISPREDICT,
  82        [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV6_PERFCTR_IBUF_STALL,
  83        [PERF_COUNT_HW_STALLED_CYCLES_BACKEND]  = ARMV6_PERFCTR_LSU_FULL_STALL,
  84};
  85
  86static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  87                                          [PERF_COUNT_HW_CACHE_OP_MAX]
  88                                          [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  89        PERF_CACHE_MAP_ALL_UNSUPPORTED,
  90
  91        /*
  92         * The performance counters don't differentiate between read and write
  93         * accesses/misses so this isn't strictly correct, but it's the best we
  94         * can do. Writes and reads get combined.
  95         */
  96        [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV6_PERFCTR_DCACHE_ACCESS,
  97        [C(L1D)][C(OP_READ)][C(RESULT_MISS)]    = ARMV6_PERFCTR_DCACHE_MISS,
  98        [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS,
  99        [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV6_PERFCTR_DCACHE_MISS,
 100
 101        [C(L1I)][C(OP_READ)][C(RESULT_MISS)]    = ARMV6_PERFCTR_ICACHE_MISS,
 102
 103        /*
 104         * The ARM performance counters can count micro DTLB misses, micro ITLB
 105         * misses and main TLB misses. There isn't an event for TLB misses, so
 106         * use the micro misses here and if users want the main TLB misses they
 107         * can use a raw counter.
 108         */
 109        [C(DTLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV6_PERFCTR_DTLB_MISS,
 110        [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]  = ARMV6_PERFCTR_DTLB_MISS,
 111
 112        [C(ITLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV6_PERFCTR_ITLB_MISS,
 113        [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]  = ARMV6_PERFCTR_ITLB_MISS,
 114};
 115
 116enum armv6mpcore_perf_types {
 117        ARMV6MPCORE_PERFCTR_ICACHE_MISS     = 0x0,
 118        ARMV6MPCORE_PERFCTR_IBUF_STALL      = 0x1,
 119        ARMV6MPCORE_PERFCTR_DDEP_STALL      = 0x2,
 120        ARMV6MPCORE_PERFCTR_ITLB_MISS       = 0x3,
 121        ARMV6MPCORE_PERFCTR_DTLB_MISS       = 0x4,
 122        ARMV6MPCORE_PERFCTR_BR_EXEC         = 0x5,
 123        ARMV6MPCORE_PERFCTR_BR_NOTPREDICT   = 0x6,
 124        ARMV6MPCORE_PERFCTR_BR_MISPREDICT   = 0x7,
 125        ARMV6MPCORE_PERFCTR_INSTR_EXEC      = 0x8,
 126        ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS = 0xA,
 127        ARMV6MPCORE_PERFCTR_DCACHE_RDMISS   = 0xB,
 128        ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS = 0xC,
 129        ARMV6MPCORE_PERFCTR_DCACHE_WRMISS   = 0xD,
 130        ARMV6MPCORE_PERFCTR_DCACHE_EVICTION = 0xE,
 131        ARMV6MPCORE_PERFCTR_SW_PC_CHANGE    = 0xF,
 132        ARMV6MPCORE_PERFCTR_MAIN_TLB_MISS   = 0x10,
 133        ARMV6MPCORE_PERFCTR_EXPL_MEM_ACCESS = 0x11,
 134        ARMV6MPCORE_PERFCTR_LSU_FULL_STALL  = 0x12,
 135        ARMV6MPCORE_PERFCTR_WBUF_DRAINED    = 0x13,
 136        ARMV6MPCORE_PERFCTR_CPU_CYCLES      = 0xFF,
 137};
 138
 139/*
 140 * The hardware events that we support. We do support cache operations but
 141 * we have harvard caches and no way to combine instruction and data
 142 * accesses/misses in hardware.
 143 */
 144static const unsigned armv6mpcore_perf_map[PERF_COUNT_HW_MAX] = {
 145        PERF_MAP_ALL_UNSUPPORTED,
 146        [PERF_COUNT_HW_CPU_CYCLES]              = ARMV6MPCORE_PERFCTR_CPU_CYCLES,
 147        [PERF_COUNT_HW_INSTRUCTIONS]            = ARMV6MPCORE_PERFCTR_INSTR_EXEC,
 148        [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = ARMV6MPCORE_PERFCTR_BR_EXEC,
 149        [PERF_COUNT_HW_BRANCH_MISSES]           = ARMV6MPCORE_PERFCTR_BR_MISPREDICT,
 150        [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV6MPCORE_PERFCTR_IBUF_STALL,
 151        [PERF_COUNT_HW_STALLED_CYCLES_BACKEND]  = ARMV6MPCORE_PERFCTR_LSU_FULL_STALL,
 152};
 153
 154static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 155                                        [PERF_COUNT_HW_CACHE_OP_MAX]
 156                                        [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 157        PERF_CACHE_MAP_ALL_UNSUPPORTED,
 158
 159        [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS,
 160        [C(L1D)][C(OP_READ)][C(RESULT_MISS)]    = ARMV6MPCORE_PERFCTR_DCACHE_RDMISS,
 161        [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS,
 162        [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV6MPCORE_PERFCTR_DCACHE_WRMISS,
 163
 164        [C(L1I)][C(OP_READ)][C(RESULT_MISS)]    = ARMV6MPCORE_PERFCTR_ICACHE_MISS,
 165
 166        /*
 167         * The ARM performance counters can count micro DTLB misses, micro ITLB
 168         * misses and main TLB misses. There isn't an event for TLB misses, so
 169         * use the micro misses here and if users want the main TLB misses they
 170         * can use a raw counter.
 171         */
 172        [C(DTLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV6MPCORE_PERFCTR_DTLB_MISS,
 173        [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]  = ARMV6MPCORE_PERFCTR_DTLB_MISS,
 174
 175        [C(ITLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV6MPCORE_PERFCTR_ITLB_MISS,
 176        [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]  = ARMV6MPCORE_PERFCTR_ITLB_MISS,
 177};
 178
 179static inline unsigned long
 180armv6_pmcr_read(void)
 181{
 182        u32 val;
 183        asm volatile("mrc   p15, 0, %0, c15, c12, 0" : "=r"(val));
 184        return val;
 185}
 186
 187static inline void
 188armv6_pmcr_write(unsigned long val)
 189{
 190        asm volatile("mcr   p15, 0, %0, c15, c12, 0" : : "r"(val));
 191}
 192
 193#define ARMV6_PMCR_ENABLE               (1 << 0)
 194#define ARMV6_PMCR_CTR01_RESET          (1 << 1)
 195#define ARMV6_PMCR_CCOUNT_RESET         (1 << 2)
 196#define ARMV6_PMCR_CCOUNT_DIV           (1 << 3)
 197#define ARMV6_PMCR_COUNT0_IEN           (1 << 4)
 198#define ARMV6_PMCR_COUNT1_IEN           (1 << 5)
 199#define ARMV6_PMCR_CCOUNT_IEN           (1 << 6)
 200#define ARMV6_PMCR_COUNT0_OVERFLOW      (1 << 8)
 201#define ARMV6_PMCR_COUNT1_OVERFLOW      (1 << 9)
 202#define ARMV6_PMCR_CCOUNT_OVERFLOW      (1 << 10)
 203#define ARMV6_PMCR_EVT_COUNT0_SHIFT     20
 204#define ARMV6_PMCR_EVT_COUNT0_MASK      (0xFF << ARMV6_PMCR_EVT_COUNT0_SHIFT)
 205#define ARMV6_PMCR_EVT_COUNT1_SHIFT     12
 206#define ARMV6_PMCR_EVT_COUNT1_MASK      (0xFF << ARMV6_PMCR_EVT_COUNT1_SHIFT)
 207
 208#define ARMV6_PMCR_OVERFLOWED_MASK \
 209        (ARMV6_PMCR_COUNT0_OVERFLOW | ARMV6_PMCR_COUNT1_OVERFLOW | \
 210         ARMV6_PMCR_CCOUNT_OVERFLOW)
 211
 212static inline int
 213armv6_pmcr_has_overflowed(unsigned long pmcr)
 214{
 215        return pmcr & ARMV6_PMCR_OVERFLOWED_MASK;
 216}
 217
 218static inline int
 219armv6_pmcr_counter_has_overflowed(unsigned long pmcr,
 220                                  enum armv6_counters counter)
 221{
 222        int ret = 0;
 223
 224        if (ARMV6_CYCLE_COUNTER == counter)
 225                ret = pmcr & ARMV6_PMCR_CCOUNT_OVERFLOW;
 226        else if (ARMV6_COUNTER0 == counter)
 227                ret = pmcr & ARMV6_PMCR_COUNT0_OVERFLOW;
 228        else if (ARMV6_COUNTER1 == counter)
 229                ret = pmcr & ARMV6_PMCR_COUNT1_OVERFLOW;
 230        else
 231                WARN_ONCE(1, "invalid counter number (%d)\n", counter);
 232
 233        return ret;
 234}
 235
 236static inline u64 armv6pmu_read_counter(struct perf_event *event)
 237{
 238        struct hw_perf_event *hwc = &event->hw;
 239        int counter = hwc->idx;
 240        unsigned long value = 0;
 241
 242        if (ARMV6_CYCLE_COUNTER == counter)
 243                asm volatile("mrc   p15, 0, %0, c15, c12, 1" : "=r"(value));
 244        else if (ARMV6_COUNTER0 == counter)
 245                asm volatile("mrc   p15, 0, %0, c15, c12, 2" : "=r"(value));
 246        else if (ARMV6_COUNTER1 == counter)
 247                asm volatile("mrc   p15, 0, %0, c15, c12, 3" : "=r"(value));
 248        else
 249                WARN_ONCE(1, "invalid counter number (%d)\n", counter);
 250
 251        return value;
 252}
 253
 254static inline void armv6pmu_write_counter(struct perf_event *event, u64 value)
 255{
 256        struct hw_perf_event *hwc = &event->hw;
 257        int counter = hwc->idx;
 258
 259        if (ARMV6_CYCLE_COUNTER == counter)
 260                asm volatile("mcr   p15, 0, %0, c15, c12, 1" : : "r"(value));
 261        else if (ARMV6_COUNTER0 == counter)
 262                asm volatile("mcr   p15, 0, %0, c15, c12, 2" : : "r"(value));
 263        else if (ARMV6_COUNTER1 == counter)
 264                asm volatile("mcr   p15, 0, %0, c15, c12, 3" : : "r"(value));
 265        else
 266                WARN_ONCE(1, "invalid counter number (%d)\n", counter);
 267}
 268
 269static void armv6pmu_enable_event(struct perf_event *event)
 270{
 271        unsigned long val, mask, evt, flags;
 272        struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
 273        struct hw_perf_event *hwc = &event->hw;
 274        struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
 275        int idx = hwc->idx;
 276
 277        if (ARMV6_CYCLE_COUNTER == idx) {
 278                mask    = 0;
 279                evt     = ARMV6_PMCR_CCOUNT_IEN;
 280        } else if (ARMV6_COUNTER0 == idx) {
 281                mask    = ARMV6_PMCR_EVT_COUNT0_MASK;
 282                evt     = (hwc->config_base << ARMV6_PMCR_EVT_COUNT0_SHIFT) |
 283                          ARMV6_PMCR_COUNT0_IEN;
 284        } else if (ARMV6_COUNTER1 == idx) {
 285                mask    = ARMV6_PMCR_EVT_COUNT1_MASK;
 286                evt     = (hwc->config_base << ARMV6_PMCR_EVT_COUNT1_SHIFT) |
 287                          ARMV6_PMCR_COUNT1_IEN;
 288        } else {
 289                WARN_ONCE(1, "invalid counter number (%d)\n", idx);
 290                return;
 291        }
 292
 293        /*
 294         * Mask out the current event and set the counter to count the event
 295         * that we're interested in.
 296         */
 297        raw_spin_lock_irqsave(&events->pmu_lock, flags);
 298        val = armv6_pmcr_read();
 299        val &= ~mask;
 300        val |= evt;
 301        armv6_pmcr_write(val);
 302        raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
 303}
 304
 305static irqreturn_t
 306armv6pmu_handle_irq(struct arm_pmu *cpu_pmu)
 307{
 308        unsigned long pmcr = armv6_pmcr_read();
 309        struct perf_sample_data data;
 310        struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
 311        struct pt_regs *regs;
 312        int idx;
 313
 314        if (!armv6_pmcr_has_overflowed(pmcr))
 315                return IRQ_NONE;
 316
 317        regs = get_irq_regs();
 318
 319        /*
 320         * The interrupts are cleared by writing the overflow flags back to
 321         * the control register. All of the other bits don't have any effect
 322         * if they are rewritten, so write the whole value back.
 323         */
 324        armv6_pmcr_write(pmcr);
 325
 326        for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
 327                struct perf_event *event = cpuc->events[idx];
 328                struct hw_perf_event *hwc;
 329
 330                /* Ignore if we don't have an event. */
 331                if (!event)
 332                        continue;
 333
 334                /*
 335                 * We have a single interrupt for all counters. Check that
 336                 * each counter has overflowed before we process it.
 337                 */
 338                if (!armv6_pmcr_counter_has_overflowed(pmcr, idx))
 339                        continue;
 340
 341                hwc = &event->hw;
 342                armpmu_event_update(event);
 343                perf_sample_data_init(&data, 0, hwc->last_period);
 344                if (!armpmu_event_set_period(event))
 345                        continue;
 346
 347                if (perf_event_overflow(event, &data, regs))
 348                        cpu_pmu->disable(event);
 349        }
 350
 351        /*
 352         * Handle the pending perf events.
 353         *
 354         * Note: this call *must* be run with interrupts disabled. For
 355         * platforms that can have the PMU interrupts raised as an NMI, this
 356         * will not work.
 357         */
 358        irq_work_run();
 359
 360        return IRQ_HANDLED;
 361}
 362
 363static void armv6pmu_start(struct arm_pmu *cpu_pmu)
 364{
 365        unsigned long flags, val;
 366        struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
 367
 368        raw_spin_lock_irqsave(&events->pmu_lock, flags);
 369        val = armv6_pmcr_read();
 370        val |= ARMV6_PMCR_ENABLE;
 371        armv6_pmcr_write(val);
 372        raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
 373}
 374
 375static void armv6pmu_stop(struct arm_pmu *cpu_pmu)
 376{
 377        unsigned long flags, val;
 378        struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
 379
 380        raw_spin_lock_irqsave(&events->pmu_lock, flags);
 381        val = armv6_pmcr_read();
 382        val &= ~ARMV6_PMCR_ENABLE;
 383        armv6_pmcr_write(val);
 384        raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
 385}
 386
 387static int
 388armv6pmu_get_event_idx(struct pmu_hw_events *cpuc,
 389                                struct perf_event *event)
 390{
 391        struct hw_perf_event *hwc = &event->hw;
 392        /* Always place a cycle counter into the cycle counter. */
 393        if (ARMV6_PERFCTR_CPU_CYCLES == hwc->config_base) {
 394                if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask))
 395                        return -EAGAIN;
 396
 397                return ARMV6_CYCLE_COUNTER;
 398        } else {
 399                /*
 400                 * For anything other than a cycle counter, try and use
 401                 * counter0 and counter1.
 402                 */
 403                if (!test_and_set_bit(ARMV6_COUNTER1, cpuc->used_mask))
 404                        return ARMV6_COUNTER1;
 405
 406                if (!test_and_set_bit(ARMV6_COUNTER0, cpuc->used_mask))
 407                        return ARMV6_COUNTER0;
 408
 409                /* The counters are all in use. */
 410                return -EAGAIN;
 411        }
 412}
 413
 414static void armv6pmu_clear_event_idx(struct pmu_hw_events *cpuc,
 415                                     struct perf_event *event)
 416{
 417        clear_bit(event->hw.idx, cpuc->used_mask);
 418}
 419
 420static void armv6pmu_disable_event(struct perf_event *event)
 421{
 422        unsigned long val, mask, evt, flags;
 423        struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
 424        struct hw_perf_event *hwc = &event->hw;
 425        struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
 426        int idx = hwc->idx;
 427
 428        if (ARMV6_CYCLE_COUNTER == idx) {
 429                mask    = ARMV6_PMCR_CCOUNT_IEN;
 430                evt     = 0;
 431        } else if (ARMV6_COUNTER0 == idx) {
 432                mask    = ARMV6_PMCR_COUNT0_IEN | ARMV6_PMCR_EVT_COUNT0_MASK;
 433                evt     = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT0_SHIFT;
 434        } else if (ARMV6_COUNTER1 == idx) {
 435                mask    = ARMV6_PMCR_COUNT1_IEN | ARMV6_PMCR_EVT_COUNT1_MASK;
 436                evt     = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT1_SHIFT;
 437        } else {
 438                WARN_ONCE(1, "invalid counter number (%d)\n", idx);
 439                return;
 440        }
 441
 442        /*
 443         * Mask out the current event and set the counter to count the number
 444         * of ETM bus signal assertion cycles. The external reporting should
 445         * be disabled and so this should never increment.
 446         */
 447        raw_spin_lock_irqsave(&events->pmu_lock, flags);
 448        val = armv6_pmcr_read();
 449        val &= ~mask;
 450        val |= evt;
 451        armv6_pmcr_write(val);
 452        raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
 453}
 454
 455static void armv6mpcore_pmu_disable_event(struct perf_event *event)
 456{
 457        unsigned long val, mask, flags, evt = 0;
 458        struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
 459        struct hw_perf_event *hwc = &event->hw;
 460        struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
 461        int idx = hwc->idx;
 462
 463        if (ARMV6_CYCLE_COUNTER == idx) {
 464                mask    = ARMV6_PMCR_CCOUNT_IEN;
 465        } else if (ARMV6_COUNTER0 == idx) {
 466                mask    = ARMV6_PMCR_COUNT0_IEN;
 467        } else if (ARMV6_COUNTER1 == idx) {
 468                mask    = ARMV6_PMCR_COUNT1_IEN;
 469        } else {
 470                WARN_ONCE(1, "invalid counter number (%d)\n", idx);
 471                return;
 472        }
 473
 474        /*
 475         * Unlike UP ARMv6, we don't have a way of stopping the counters. We
 476         * simply disable the interrupt reporting.
 477         */
 478        raw_spin_lock_irqsave(&events->pmu_lock, flags);
 479        val = armv6_pmcr_read();
 480        val &= ~mask;
 481        val |= evt;
 482        armv6_pmcr_write(val);
 483        raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
 484}
 485
 486static int armv6_map_event(struct perf_event *event)
 487{
 488        return armpmu_map_event(event, &armv6_perf_map,
 489                                &armv6_perf_cache_map, 0xFF);
 490}
 491
 492static void armv6pmu_init(struct arm_pmu *cpu_pmu)
 493{
 494        cpu_pmu->handle_irq     = armv6pmu_handle_irq;
 495        cpu_pmu->enable         = armv6pmu_enable_event;
 496        cpu_pmu->disable        = armv6pmu_disable_event;
 497        cpu_pmu->read_counter   = armv6pmu_read_counter;
 498        cpu_pmu->write_counter  = armv6pmu_write_counter;
 499        cpu_pmu->get_event_idx  = armv6pmu_get_event_idx;
 500        cpu_pmu->clear_event_idx = armv6pmu_clear_event_idx;
 501        cpu_pmu->start          = armv6pmu_start;
 502        cpu_pmu->stop           = armv6pmu_stop;
 503        cpu_pmu->map_event      = armv6_map_event;
 504        cpu_pmu->num_events     = 3;
 505}
 506
 507static int armv6_1136_pmu_init(struct arm_pmu *cpu_pmu)
 508{
 509        armv6pmu_init(cpu_pmu);
 510        cpu_pmu->name           = "armv6_1136";
 511        return 0;
 512}
 513
 514static int armv6_1156_pmu_init(struct arm_pmu *cpu_pmu)
 515{
 516        armv6pmu_init(cpu_pmu);
 517        cpu_pmu->name           = "armv6_1156";
 518        return 0;
 519}
 520
 521static int armv6_1176_pmu_init(struct arm_pmu *cpu_pmu)
 522{
 523        armv6pmu_init(cpu_pmu);
 524        cpu_pmu->name           = "armv6_1176";
 525        return 0;
 526}
 527
 528/*
 529 * ARMv6mpcore is almost identical to single core ARMv6 with the exception
 530 * that some of the events have different enumerations and that there is no
 531 * *hack* to stop the programmable counters. To stop the counters we simply
 532 * disable the interrupt reporting and update the event. When unthrottling we
 533 * reset the period and enable the interrupt reporting.
 534 */
 535
 536static int armv6mpcore_map_event(struct perf_event *event)
 537{
 538        return armpmu_map_event(event, &armv6mpcore_perf_map,
 539                                &armv6mpcore_perf_cache_map, 0xFF);
 540}
 541
 542static int armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu)
 543{
 544        cpu_pmu->name           = "armv6_11mpcore";
 545        cpu_pmu->handle_irq     = armv6pmu_handle_irq;
 546        cpu_pmu->enable         = armv6pmu_enable_event;
 547        cpu_pmu->disable        = armv6mpcore_pmu_disable_event;
 548        cpu_pmu->read_counter   = armv6pmu_read_counter;
 549        cpu_pmu->write_counter  = armv6pmu_write_counter;
 550        cpu_pmu->get_event_idx  = armv6pmu_get_event_idx;
 551        cpu_pmu->clear_event_idx = armv6pmu_clear_event_idx;
 552        cpu_pmu->start          = armv6pmu_start;
 553        cpu_pmu->stop           = armv6pmu_stop;
 554        cpu_pmu->map_event      = armv6mpcore_map_event;
 555        cpu_pmu->num_events     = 3;
 556
 557        return 0;
 558}
 559
 560static const struct of_device_id armv6_pmu_of_device_ids[] = {
 561        {.compatible = "arm,arm11mpcore-pmu",   .data = armv6mpcore_pmu_init},
 562        {.compatible = "arm,arm1176-pmu",       .data = armv6_1176_pmu_init},
 563        {.compatible = "arm,arm1136-pmu",       .data = armv6_1136_pmu_init},
 564        { /* sentinel value */ }
 565};
 566
 567static const struct pmu_probe_info armv6_pmu_probe_table[] = {
 568        ARM_PMU_PROBE(ARM_CPU_PART_ARM1136, armv6_1136_pmu_init),
 569        ARM_PMU_PROBE(ARM_CPU_PART_ARM1156, armv6_1156_pmu_init),
 570        ARM_PMU_PROBE(ARM_CPU_PART_ARM1176, armv6_1176_pmu_init),
 571        ARM_PMU_PROBE(ARM_CPU_PART_ARM11MPCORE, armv6mpcore_pmu_init),
 572        { /* sentinel value */ }
 573};
 574
 575static int armv6_pmu_device_probe(struct platform_device *pdev)
 576{
 577        return arm_pmu_device_probe(pdev, armv6_pmu_of_device_ids,
 578                                    armv6_pmu_probe_table);
 579}
 580
 581static struct platform_driver armv6_pmu_driver = {
 582        .driver         = {
 583                .name   = "armv6-pmu",
 584                .of_match_table = armv6_pmu_of_device_ids,
 585        },
 586        .probe          = armv6_pmu_device_probe,
 587};
 588
 589builtin_platform_driver(armv6_pmu_driver);
 590#endif  /* CONFIG_CPU_V6 || CONFIG_CPU_V6K */
 591