linux/arch/arm/kernel/perf_event_v6.c
<<
>>
Prefs
   1/*
   2 * ARMv6 Performance counter handling code.
   3 *
   4 * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
   5 *
   6 * ARMv6 has 2 configurable performance counters and a single cycle counter.
   7 * They all share a single reset bit but can be written to zero so we can use
   8 * that for a reset.
   9 *
  10 * The counters can't be individually enabled or disabled so when we remove
  11 * one event and replace it with another we could get spurious counts from the
  12 * wrong event. However, we can take advantage of the fact that the
  13 * performance counters can export events to the event bus, and the event bus
  14 * itself can be monitored. This requires that we *don't* export the events to
  15 * the event bus. The procedure for disabling a configurable counter is:
  16 *      - change the counter to count the ETMEXTOUT[0] signal (0x20). This
  17 *        effectively stops the counter from counting.
  18 *      - disable the counter's interrupt generation (each counter has it's
  19 *        own interrupt enable bit).
  20 * Once stopped, the counter value can be written as 0 to reset.
  21 *
  22 * To enable a counter:
  23 *      - enable the counter's interrupt generation.
  24 *      - set the new event type.
  25 *
  26 * Note: the dedicated cycle counter only counts cycles and can't be
  27 * enabled/disabled independently of the others. When we want to disable the
  28 * cycle counter, we have to just disable the interrupt reporting and start
  29 * ignoring that counter. When re-enabling, we have to reset the value and
  30 * enable the interrupt.
  31 */
  32
  33#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
  34
  35#include <asm/cputype.h>
  36#include <asm/irq_regs.h>
  37
  38#include <linux/of.h>
  39#include <linux/perf/arm_pmu.h>
  40#include <linux/platform_device.h>
  41
  42enum armv6_perf_types {
  43        ARMV6_PERFCTR_ICACHE_MISS           = 0x0,
  44        ARMV6_PERFCTR_IBUF_STALL            = 0x1,
  45        ARMV6_PERFCTR_DDEP_STALL            = 0x2,
  46        ARMV6_PERFCTR_ITLB_MISS             = 0x3,
  47        ARMV6_PERFCTR_DTLB_MISS             = 0x4,
  48        ARMV6_PERFCTR_BR_EXEC               = 0x5,
  49        ARMV6_PERFCTR_BR_MISPREDICT         = 0x6,
  50        ARMV6_PERFCTR_INSTR_EXEC            = 0x7,
  51        ARMV6_PERFCTR_DCACHE_HIT            = 0x9,
  52        ARMV6_PERFCTR_DCACHE_ACCESS         = 0xA,
  53        ARMV6_PERFCTR_DCACHE_MISS           = 0xB,
  54        ARMV6_PERFCTR_DCACHE_WBACK          = 0xC,
  55        ARMV6_PERFCTR_SW_PC_CHANGE          = 0xD,
  56        ARMV6_PERFCTR_MAIN_TLB_MISS         = 0xF,
  57        ARMV6_PERFCTR_EXPL_D_ACCESS         = 0x10,
  58        ARMV6_PERFCTR_LSU_FULL_STALL        = 0x11,
  59        ARMV6_PERFCTR_WBUF_DRAINED          = 0x12,
  60        ARMV6_PERFCTR_CPU_CYCLES            = 0xFF,
  61        ARMV6_PERFCTR_NOP                   = 0x20,
  62};
  63
  64enum armv6_counters {
  65        ARMV6_CYCLE_COUNTER = 0,
  66        ARMV6_COUNTER0,
  67        ARMV6_COUNTER1,
  68};
  69
  70/*
  71 * The hardware events that we support. We do support cache operations but
  72 * we have harvard caches and no way to combine instruction and data
  73 * accesses/misses in hardware.
  74 */
  75static const unsigned armv6_perf_map[PERF_COUNT_HW_MAX] = {
  76        PERF_MAP_ALL_UNSUPPORTED,
  77        [PERF_COUNT_HW_CPU_CYCLES]              = ARMV6_PERFCTR_CPU_CYCLES,
  78        [PERF_COUNT_HW_INSTRUCTIONS]            = ARMV6_PERFCTR_INSTR_EXEC,
  79        [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = ARMV6_PERFCTR_BR_EXEC,
  80        [PERF_COUNT_HW_BRANCH_MISSES]           = ARMV6_PERFCTR_BR_MISPREDICT,
  81        [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV6_PERFCTR_IBUF_STALL,
  82        [PERF_COUNT_HW_STALLED_CYCLES_BACKEND]  = ARMV6_PERFCTR_LSU_FULL_STALL,
  83};
  84
  85static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  86                                          [PERF_COUNT_HW_CACHE_OP_MAX]
  87                                          [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  88        PERF_CACHE_MAP_ALL_UNSUPPORTED,
  89
  90        /*
  91         * The performance counters don't differentiate between read and write
  92         * accesses/misses so this isn't strictly correct, but it's the best we
  93         * can do. Writes and reads get combined.
  94         */
  95        [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV6_PERFCTR_DCACHE_ACCESS,
  96        [C(L1D)][C(OP_READ)][C(RESULT_MISS)]    = ARMV6_PERFCTR_DCACHE_MISS,
  97        [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS,
  98        [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV6_PERFCTR_DCACHE_MISS,
  99
 100        [C(L1I)][C(OP_READ)][C(RESULT_MISS)]    = ARMV6_PERFCTR_ICACHE_MISS,
 101
 102        /*
 103         * The ARM performance counters can count micro DTLB misses, micro ITLB
 104         * misses and main TLB misses. There isn't an event for TLB misses, so
 105         * use the micro misses here and if users want the main TLB misses they
 106         * can use a raw counter.
 107         */
 108        [C(DTLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV6_PERFCTR_DTLB_MISS,
 109        [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]  = ARMV6_PERFCTR_DTLB_MISS,
 110
 111        [C(ITLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV6_PERFCTR_ITLB_MISS,
 112        [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]  = ARMV6_PERFCTR_ITLB_MISS,
 113};
 114
 115enum armv6mpcore_perf_types {
 116        ARMV6MPCORE_PERFCTR_ICACHE_MISS     = 0x0,
 117        ARMV6MPCORE_PERFCTR_IBUF_STALL      = 0x1,
 118        ARMV6MPCORE_PERFCTR_DDEP_STALL      = 0x2,
 119        ARMV6MPCORE_PERFCTR_ITLB_MISS       = 0x3,
 120        ARMV6MPCORE_PERFCTR_DTLB_MISS       = 0x4,
 121        ARMV6MPCORE_PERFCTR_BR_EXEC         = 0x5,
 122        ARMV6MPCORE_PERFCTR_BR_NOTPREDICT   = 0x6,
 123        ARMV6MPCORE_PERFCTR_BR_MISPREDICT   = 0x7,
 124        ARMV6MPCORE_PERFCTR_INSTR_EXEC      = 0x8,
 125        ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS = 0xA,
 126        ARMV6MPCORE_PERFCTR_DCACHE_RDMISS   = 0xB,
 127        ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS = 0xC,
 128        ARMV6MPCORE_PERFCTR_DCACHE_WRMISS   = 0xD,
 129        ARMV6MPCORE_PERFCTR_DCACHE_EVICTION = 0xE,
 130        ARMV6MPCORE_PERFCTR_SW_PC_CHANGE    = 0xF,
 131        ARMV6MPCORE_PERFCTR_MAIN_TLB_MISS   = 0x10,
 132        ARMV6MPCORE_PERFCTR_EXPL_MEM_ACCESS = 0x11,
 133        ARMV6MPCORE_PERFCTR_LSU_FULL_STALL  = 0x12,
 134        ARMV6MPCORE_PERFCTR_WBUF_DRAINED    = 0x13,
 135        ARMV6MPCORE_PERFCTR_CPU_CYCLES      = 0xFF,
 136};
 137
 138/*
 139 * The hardware events that we support. We do support cache operations but
 140 * we have harvard caches and no way to combine instruction and data
 141 * accesses/misses in hardware.
 142 */
 143static const unsigned armv6mpcore_perf_map[PERF_COUNT_HW_MAX] = {
 144        PERF_MAP_ALL_UNSUPPORTED,
 145        [PERF_COUNT_HW_CPU_CYCLES]              = ARMV6MPCORE_PERFCTR_CPU_CYCLES,
 146        [PERF_COUNT_HW_INSTRUCTIONS]            = ARMV6MPCORE_PERFCTR_INSTR_EXEC,
 147        [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = ARMV6MPCORE_PERFCTR_BR_EXEC,
 148        [PERF_COUNT_HW_BRANCH_MISSES]           = ARMV6MPCORE_PERFCTR_BR_MISPREDICT,
 149        [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV6MPCORE_PERFCTR_IBUF_STALL,
 150        [PERF_COUNT_HW_STALLED_CYCLES_BACKEND]  = ARMV6MPCORE_PERFCTR_LSU_FULL_STALL,
 151};
 152
 153static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 154                                        [PERF_COUNT_HW_CACHE_OP_MAX]
 155                                        [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 156        PERF_CACHE_MAP_ALL_UNSUPPORTED,
 157
 158        [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS,
 159        [C(L1D)][C(OP_READ)][C(RESULT_MISS)]    = ARMV6MPCORE_PERFCTR_DCACHE_RDMISS,
 160        [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS,
 161        [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV6MPCORE_PERFCTR_DCACHE_WRMISS,
 162
 163        [C(L1I)][C(OP_READ)][C(RESULT_MISS)]    = ARMV6MPCORE_PERFCTR_ICACHE_MISS,
 164
 165        /*
 166         * The ARM performance counters can count micro DTLB misses, micro ITLB
 167         * misses and main TLB misses. There isn't an event for TLB misses, so
 168         * use the micro misses here and if users want the main TLB misses they
 169         * can use a raw counter.
 170         */
 171        [C(DTLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV6MPCORE_PERFCTR_DTLB_MISS,
 172        [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]  = ARMV6MPCORE_PERFCTR_DTLB_MISS,
 173
 174        [C(ITLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV6MPCORE_PERFCTR_ITLB_MISS,
 175        [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]  = ARMV6MPCORE_PERFCTR_ITLB_MISS,
 176};
 177
 178static inline unsigned long
 179armv6_pmcr_read(void)
 180{
 181        u32 val;
 182        asm volatile("mrc   p15, 0, %0, c15, c12, 0" : "=r"(val));
 183        return val;
 184}
 185
 186static inline void
 187armv6_pmcr_write(unsigned long val)
 188{
 189        asm volatile("mcr   p15, 0, %0, c15, c12, 0" : : "r"(val));
 190}
 191
 192#define ARMV6_PMCR_ENABLE               (1 << 0)
 193#define ARMV6_PMCR_CTR01_RESET          (1 << 1)
 194#define ARMV6_PMCR_CCOUNT_RESET         (1 << 2)
 195#define ARMV6_PMCR_CCOUNT_DIV           (1 << 3)
 196#define ARMV6_PMCR_COUNT0_IEN           (1 << 4)
 197#define ARMV6_PMCR_COUNT1_IEN           (1 << 5)
 198#define ARMV6_PMCR_CCOUNT_IEN           (1 << 6)
 199#define ARMV6_PMCR_COUNT0_OVERFLOW      (1 << 8)
 200#define ARMV6_PMCR_COUNT1_OVERFLOW      (1 << 9)
 201#define ARMV6_PMCR_CCOUNT_OVERFLOW      (1 << 10)
 202#define ARMV6_PMCR_EVT_COUNT0_SHIFT     20
 203#define ARMV6_PMCR_EVT_COUNT0_MASK      (0xFF << ARMV6_PMCR_EVT_COUNT0_SHIFT)
 204#define ARMV6_PMCR_EVT_COUNT1_SHIFT     12
 205#define ARMV6_PMCR_EVT_COUNT1_MASK      (0xFF << ARMV6_PMCR_EVT_COUNT1_SHIFT)
 206
 207#define ARMV6_PMCR_OVERFLOWED_MASK \
 208        (ARMV6_PMCR_COUNT0_OVERFLOW | ARMV6_PMCR_COUNT1_OVERFLOW | \
 209         ARMV6_PMCR_CCOUNT_OVERFLOW)
 210
 211static inline int
 212armv6_pmcr_has_overflowed(unsigned long pmcr)
 213{
 214        return pmcr & ARMV6_PMCR_OVERFLOWED_MASK;
 215}
 216
 217static inline int
 218armv6_pmcr_counter_has_overflowed(unsigned long pmcr,
 219                                  enum armv6_counters counter)
 220{
 221        int ret = 0;
 222
 223        if (ARMV6_CYCLE_COUNTER == counter)
 224                ret = pmcr & ARMV6_PMCR_CCOUNT_OVERFLOW;
 225        else if (ARMV6_COUNTER0 == counter)
 226                ret = pmcr & ARMV6_PMCR_COUNT0_OVERFLOW;
 227        else if (ARMV6_COUNTER1 == counter)
 228                ret = pmcr & ARMV6_PMCR_COUNT1_OVERFLOW;
 229        else
 230                WARN_ONCE(1, "invalid counter number (%d)\n", counter);
 231
 232        return ret;
 233}
 234
 235static inline u32 armv6pmu_read_counter(struct perf_event *event)
 236{
 237        struct hw_perf_event *hwc = &event->hw;
 238        int counter = hwc->idx;
 239        unsigned long value = 0;
 240
 241        if (ARMV6_CYCLE_COUNTER == counter)
 242                asm volatile("mrc   p15, 0, %0, c15, c12, 1" : "=r"(value));
 243        else if (ARMV6_COUNTER0 == counter)
 244                asm volatile("mrc   p15, 0, %0, c15, c12, 2" : "=r"(value));
 245        else if (ARMV6_COUNTER1 == counter)
 246                asm volatile("mrc   p15, 0, %0, c15, c12, 3" : "=r"(value));
 247        else
 248                WARN_ONCE(1, "invalid counter number (%d)\n", counter);
 249
 250        return value;
 251}
 252
 253static inline void armv6pmu_write_counter(struct perf_event *event, u32 value)
 254{
 255        struct hw_perf_event *hwc = &event->hw;
 256        int counter = hwc->idx;
 257
 258        if (ARMV6_CYCLE_COUNTER == counter)
 259                asm volatile("mcr   p15, 0, %0, c15, c12, 1" : : "r"(value));
 260        else if (ARMV6_COUNTER0 == counter)
 261                asm volatile("mcr   p15, 0, %0, c15, c12, 2" : : "r"(value));
 262        else if (ARMV6_COUNTER1 == counter)
 263                asm volatile("mcr   p15, 0, %0, c15, c12, 3" : : "r"(value));
 264        else
 265                WARN_ONCE(1, "invalid counter number (%d)\n", counter);
 266}
 267
 268static void armv6pmu_enable_event(struct perf_event *event)
 269{
 270        unsigned long val, mask, evt, flags;
 271        struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
 272        struct hw_perf_event *hwc = &event->hw;
 273        struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
 274        int idx = hwc->idx;
 275
 276        if (ARMV6_CYCLE_COUNTER == idx) {
 277                mask    = 0;
 278                evt     = ARMV6_PMCR_CCOUNT_IEN;
 279        } else if (ARMV6_COUNTER0 == idx) {
 280                mask    = ARMV6_PMCR_EVT_COUNT0_MASK;
 281                evt     = (hwc->config_base << ARMV6_PMCR_EVT_COUNT0_SHIFT) |
 282                          ARMV6_PMCR_COUNT0_IEN;
 283        } else if (ARMV6_COUNTER1 == idx) {
 284                mask    = ARMV6_PMCR_EVT_COUNT1_MASK;
 285                evt     = (hwc->config_base << ARMV6_PMCR_EVT_COUNT1_SHIFT) |
 286                          ARMV6_PMCR_COUNT1_IEN;
 287        } else {
 288                WARN_ONCE(1, "invalid counter number (%d)\n", idx);
 289                return;
 290        }
 291
 292        /*
 293         * Mask out the current event and set the counter to count the event
 294         * that we're interested in.
 295         */
 296        raw_spin_lock_irqsave(&events->pmu_lock, flags);
 297        val = armv6_pmcr_read();
 298        val &= ~mask;
 299        val |= evt;
 300        armv6_pmcr_write(val);
 301        raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
 302}
 303
 304static irqreturn_t
 305armv6pmu_handle_irq(int irq_num,
 306                    void *dev)
 307{
 308        unsigned long pmcr = armv6_pmcr_read();
 309        struct perf_sample_data data;
 310        struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
 311        struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
 312        struct pt_regs *regs;
 313        int idx;
 314
 315        if (!armv6_pmcr_has_overflowed(pmcr))
 316                return IRQ_NONE;
 317
 318        regs = get_irq_regs();
 319
 320        /*
 321         * The interrupts are cleared by writing the overflow flags back to
 322         * the control register. All of the other bits don't have any effect
 323         * if they are rewritten, so write the whole value back.
 324         */
 325        armv6_pmcr_write(pmcr);
 326
 327        for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
 328                struct perf_event *event = cpuc->events[idx];
 329                struct hw_perf_event *hwc;
 330
 331                /* Ignore if we don't have an event. */
 332                if (!event)
 333                        continue;
 334
 335                /*
 336                 * We have a single interrupt for all counters. Check that
 337                 * each counter has overflowed before we process it.
 338                 */
 339                if (!armv6_pmcr_counter_has_overflowed(pmcr, idx))
 340                        continue;
 341
 342                hwc = &event->hw;
 343                armpmu_event_update(event);
 344                perf_sample_data_init(&data, 0, hwc->last_period);
 345                if (!armpmu_event_set_period(event))
 346                        continue;
 347
 348                if (perf_event_overflow(event, &data, regs))
 349                        cpu_pmu->disable(event);
 350        }
 351
 352        /*
 353         * Handle the pending perf events.
 354         *
 355         * Note: this call *must* be run with interrupts disabled. For
 356         * platforms that can have the PMU interrupts raised as an NMI, this
 357         * will not work.
 358         */
 359        irq_work_run();
 360
 361        return IRQ_HANDLED;
 362}
 363
 364static void armv6pmu_start(struct arm_pmu *cpu_pmu)
 365{
 366        unsigned long flags, val;
 367        struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
 368
 369        raw_spin_lock_irqsave(&events->pmu_lock, flags);
 370        val = armv6_pmcr_read();
 371        val |= ARMV6_PMCR_ENABLE;
 372        armv6_pmcr_write(val);
 373        raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
 374}
 375
 376static void armv6pmu_stop(struct arm_pmu *cpu_pmu)
 377{
 378        unsigned long flags, val;
 379        struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
 380
 381        raw_spin_lock_irqsave(&events->pmu_lock, flags);
 382        val = armv6_pmcr_read();
 383        val &= ~ARMV6_PMCR_ENABLE;
 384        armv6_pmcr_write(val);
 385        raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
 386}
 387
 388static int
 389armv6pmu_get_event_idx(struct pmu_hw_events *cpuc,
 390                                struct perf_event *event)
 391{
 392        struct hw_perf_event *hwc = &event->hw;
 393        /* Always place a cycle counter into the cycle counter. */
 394        if (ARMV6_PERFCTR_CPU_CYCLES == hwc->config_base) {
 395                if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask))
 396                        return -EAGAIN;
 397
 398                return ARMV6_CYCLE_COUNTER;
 399        } else {
 400                /*
 401                 * For anything other than a cycle counter, try and use
 402                 * counter0 and counter1.
 403                 */
 404                if (!test_and_set_bit(ARMV6_COUNTER1, cpuc->used_mask))
 405                        return ARMV6_COUNTER1;
 406
 407                if (!test_and_set_bit(ARMV6_COUNTER0, cpuc->used_mask))
 408                        return ARMV6_COUNTER0;
 409
 410                /* The counters are all in use. */
 411                return -EAGAIN;
 412        }
 413}
 414
 415static void armv6pmu_disable_event(struct perf_event *event)
 416{
 417        unsigned long val, mask, evt, flags;
 418        struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
 419        struct hw_perf_event *hwc = &event->hw;
 420        struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
 421        int idx = hwc->idx;
 422
 423        if (ARMV6_CYCLE_COUNTER == idx) {
 424                mask    = ARMV6_PMCR_CCOUNT_IEN;
 425                evt     = 0;
 426        } else if (ARMV6_COUNTER0 == idx) {
 427                mask    = ARMV6_PMCR_COUNT0_IEN | ARMV6_PMCR_EVT_COUNT0_MASK;
 428                evt     = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT0_SHIFT;
 429        } else if (ARMV6_COUNTER1 == idx) {
 430                mask    = ARMV6_PMCR_COUNT1_IEN | ARMV6_PMCR_EVT_COUNT1_MASK;
 431                evt     = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT1_SHIFT;
 432        } else {
 433                WARN_ONCE(1, "invalid counter number (%d)\n", idx);
 434                return;
 435        }
 436
 437        /*
 438         * Mask out the current event and set the counter to count the number
 439         * of ETM bus signal assertion cycles. The external reporting should
 440         * be disabled and so this should never increment.
 441         */
 442        raw_spin_lock_irqsave(&events->pmu_lock, flags);
 443        val = armv6_pmcr_read();
 444        val &= ~mask;
 445        val |= evt;
 446        armv6_pmcr_write(val);
 447        raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
 448}
 449
 450static void armv6mpcore_pmu_disable_event(struct perf_event *event)
 451{
 452        unsigned long val, mask, flags, evt = 0;
 453        struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
 454        struct hw_perf_event *hwc = &event->hw;
 455        struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
 456        int idx = hwc->idx;
 457
 458        if (ARMV6_CYCLE_COUNTER == idx) {
 459                mask    = ARMV6_PMCR_CCOUNT_IEN;
 460        } else if (ARMV6_COUNTER0 == idx) {
 461                mask    = ARMV6_PMCR_COUNT0_IEN;
 462        } else if (ARMV6_COUNTER1 == idx) {
 463                mask    = ARMV6_PMCR_COUNT1_IEN;
 464        } else {
 465                WARN_ONCE(1, "invalid counter number (%d)\n", idx);
 466                return;
 467        }
 468
 469        /*
 470         * Unlike UP ARMv6, we don't have a way of stopping the counters. We
 471         * simply disable the interrupt reporting.
 472         */
 473        raw_spin_lock_irqsave(&events->pmu_lock, flags);
 474        val = armv6_pmcr_read();
 475        val &= ~mask;
 476        val |= evt;
 477        armv6_pmcr_write(val);
 478        raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
 479}
 480
 481static int armv6_map_event(struct perf_event *event)
 482{
 483        return armpmu_map_event(event, &armv6_perf_map,
 484                                &armv6_perf_cache_map, 0xFF);
 485}
 486
 487static void armv6pmu_init(struct arm_pmu *cpu_pmu)
 488{
 489        cpu_pmu->handle_irq     = armv6pmu_handle_irq;
 490        cpu_pmu->enable         = armv6pmu_enable_event;
 491        cpu_pmu->disable        = armv6pmu_disable_event;
 492        cpu_pmu->read_counter   = armv6pmu_read_counter;
 493        cpu_pmu->write_counter  = armv6pmu_write_counter;
 494        cpu_pmu->get_event_idx  = armv6pmu_get_event_idx;
 495        cpu_pmu->start          = armv6pmu_start;
 496        cpu_pmu->stop           = armv6pmu_stop;
 497        cpu_pmu->map_event      = armv6_map_event;
 498        cpu_pmu->num_events     = 3;
 499        cpu_pmu->max_period     = (1LLU << 32) - 1;
 500}
 501
 502static int armv6_1136_pmu_init(struct arm_pmu *cpu_pmu)
 503{
 504        armv6pmu_init(cpu_pmu);
 505        cpu_pmu->name           = "armv6_1136";
 506        return 0;
 507}
 508
 509static int armv6_1156_pmu_init(struct arm_pmu *cpu_pmu)
 510{
 511        armv6pmu_init(cpu_pmu);
 512        cpu_pmu->name           = "armv6_1156";
 513        return 0;
 514}
 515
 516static int armv6_1176_pmu_init(struct arm_pmu *cpu_pmu)
 517{
 518        armv6pmu_init(cpu_pmu);
 519        cpu_pmu->name           = "armv6_1176";
 520        return 0;
 521}
 522
 523/*
 524 * ARMv6mpcore is almost identical to single core ARMv6 with the exception
 525 * that some of the events have different enumerations and that there is no
 526 * *hack* to stop the programmable counters. To stop the counters we simply
 527 * disable the interrupt reporting and update the event. When unthrottling we
 528 * reset the period and enable the interrupt reporting.
 529 */
 530
 531static int armv6mpcore_map_event(struct perf_event *event)
 532{
 533        return armpmu_map_event(event, &armv6mpcore_perf_map,
 534                                &armv6mpcore_perf_cache_map, 0xFF);
 535}
 536
 537static int armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu)
 538{
 539        cpu_pmu->name           = "armv6_11mpcore";
 540        cpu_pmu->handle_irq     = armv6pmu_handle_irq;
 541        cpu_pmu->enable         = armv6pmu_enable_event;
 542        cpu_pmu->disable        = armv6mpcore_pmu_disable_event;
 543        cpu_pmu->read_counter   = armv6pmu_read_counter;
 544        cpu_pmu->write_counter  = armv6pmu_write_counter;
 545        cpu_pmu->get_event_idx  = armv6pmu_get_event_idx;
 546        cpu_pmu->start          = armv6pmu_start;
 547        cpu_pmu->stop           = armv6pmu_stop;
 548        cpu_pmu->map_event      = armv6mpcore_map_event;
 549        cpu_pmu->num_events     = 3;
 550        cpu_pmu->max_period     = (1LLU << 32) - 1;
 551
 552        return 0;
 553}
 554
 555static struct of_device_id armv6_pmu_of_device_ids[] = {
 556        {.compatible = "arm,arm11mpcore-pmu",   .data = armv6mpcore_pmu_init},
 557        {.compatible = "arm,arm1176-pmu",       .data = armv6_1176_pmu_init},
 558        {.compatible = "arm,arm1136-pmu",       .data = armv6_1136_pmu_init},
 559        { /* sentinel value */ }
 560};
 561
 562static const struct pmu_probe_info armv6_pmu_probe_table[] = {
 563        ARM_PMU_PROBE(ARM_CPU_PART_ARM1136, armv6_1136_pmu_init),
 564        ARM_PMU_PROBE(ARM_CPU_PART_ARM1156, armv6_1156_pmu_init),
 565        ARM_PMU_PROBE(ARM_CPU_PART_ARM1176, armv6_1176_pmu_init),
 566        ARM_PMU_PROBE(ARM_CPU_PART_ARM11MPCORE, armv6mpcore_pmu_init),
 567        { /* sentinel value */ }
 568};
 569
 570static int armv6_pmu_device_probe(struct platform_device *pdev)
 571{
 572        return arm_pmu_device_probe(pdev, armv6_pmu_of_device_ids,
 573                                    armv6_pmu_probe_table);
 574}
 575
 576static struct platform_driver armv6_pmu_driver = {
 577        .driver         = {
 578                .name   = "armv6-pmu",
 579                .of_match_table = armv6_pmu_of_device_ids,
 580        },
 581        .probe          = armv6_pmu_device_probe,
 582};
 583
 584static int __init register_armv6_pmu_driver(void)
 585{
 586        return platform_driver_register(&armv6_pmu_driver);
 587}
 588device_initcall(register_armv6_pmu_driver);
 589#endif  /* CONFIG_CPU_V6 || CONFIG_CPU_V6K */
 590