linux/arch/arm/kernel/perf_event_v7.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
   4 *
   5 * ARMv7 support: Jean Pihet <jpihet@mvista.com>
   6 * 2010 (c) MontaVista Software, LLC.
   7 *
   8 * Copied from ARMv6 code, with the low level code inspired
   9 *  by the ARMv7 Oprofile code.
  10 *
  11 * Cortex-A8 has up to 4 configurable performance counters and
  12 *  a single cycle counter.
  13 * Cortex-A9 has up to 31 configurable performance counters and
  14 *  a single cycle counter.
  15 *
  16 * All counters can be enabled/disabled and IRQ masked separately. The cycle
  17 *  counter and all 4 performance counters together can be reset separately.
  18 */
  19
  20#ifdef CONFIG_CPU_V7
  21
  22#include <asm/cp15.h>
  23#include <asm/cputype.h>
  24#include <asm/irq_regs.h>
  25#include <asm/vfp.h>
  26#include "../vfp/vfpinstr.h"
  27
  28#include <linux/of.h>
  29#include <linux/perf/arm_pmu.h>
  30#include <linux/platform_device.h>
  31
  32/*
  33 * Common ARMv7 event types
  34 *
  35 * Note: An implementation may not be able to count all of these events
  36 * but the encodings are considered to be `reserved' in the case that
  37 * they are not available.
  38 */
  39#define ARMV7_PERFCTR_PMNC_SW_INCR                      0x00
  40#define ARMV7_PERFCTR_L1_ICACHE_REFILL                  0x01
  41#define ARMV7_PERFCTR_ITLB_REFILL                       0x02
  42#define ARMV7_PERFCTR_L1_DCACHE_REFILL                  0x03
  43#define ARMV7_PERFCTR_L1_DCACHE_ACCESS                  0x04
  44#define ARMV7_PERFCTR_DTLB_REFILL                       0x05
  45#define ARMV7_PERFCTR_MEM_READ                          0x06
  46#define ARMV7_PERFCTR_MEM_WRITE                         0x07
  47#define ARMV7_PERFCTR_INSTR_EXECUTED                    0x08
  48#define ARMV7_PERFCTR_EXC_TAKEN                         0x09
  49#define ARMV7_PERFCTR_EXC_EXECUTED                      0x0A
  50#define ARMV7_PERFCTR_CID_WRITE                         0x0B
  51
  52/*
  53 * ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
  54 * It counts:
  55 *  - all (taken) branch instructions,
  56 *  - instructions that explicitly write the PC,
  57 *  - exception generating instructions.
  58 */
  59#define ARMV7_PERFCTR_PC_WRITE                          0x0C
  60#define ARMV7_PERFCTR_PC_IMM_BRANCH                     0x0D
  61#define ARMV7_PERFCTR_PC_PROC_RETURN                    0x0E
  62#define ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS              0x0F
  63#define ARMV7_PERFCTR_PC_BRANCH_MIS_PRED                0x10
  64#define ARMV7_PERFCTR_CLOCK_CYCLES                      0x11
  65#define ARMV7_PERFCTR_PC_BRANCH_PRED                    0x12
  66
  67/* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */
  68#define ARMV7_PERFCTR_MEM_ACCESS                        0x13
  69#define ARMV7_PERFCTR_L1_ICACHE_ACCESS                  0x14
  70#define ARMV7_PERFCTR_L1_DCACHE_WB                      0x15
  71#define ARMV7_PERFCTR_L2_CACHE_ACCESS                   0x16
  72#define ARMV7_PERFCTR_L2_CACHE_REFILL                   0x17
  73#define ARMV7_PERFCTR_L2_CACHE_WB                       0x18
  74#define ARMV7_PERFCTR_BUS_ACCESS                        0x19
  75#define ARMV7_PERFCTR_MEM_ERROR                         0x1A
  76#define ARMV7_PERFCTR_INSTR_SPEC                        0x1B
  77#define ARMV7_PERFCTR_TTBR_WRITE                        0x1C
  78#define ARMV7_PERFCTR_BUS_CYCLES                        0x1D
  79
  80#define ARMV7_PERFCTR_CPU_CYCLES                        0xFF
  81
  82/* ARMv7 Cortex-A8 specific event types */
  83#define ARMV7_A8_PERFCTR_L2_CACHE_ACCESS                0x43
  84#define ARMV7_A8_PERFCTR_L2_CACHE_REFILL                0x44
  85#define ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS               0x50
  86#define ARMV7_A8_PERFCTR_STALL_ISIDE                    0x56
  87
  88/* ARMv7 Cortex-A9 specific event types */
  89#define ARMV7_A9_PERFCTR_INSTR_CORE_RENAME              0x68
  90#define ARMV7_A9_PERFCTR_STALL_ICACHE                   0x60
  91#define ARMV7_A9_PERFCTR_STALL_DISPATCH                 0x66
  92
  93/* ARMv7 Cortex-A5 specific event types */
  94#define ARMV7_A5_PERFCTR_PREFETCH_LINEFILL              0xc2
  95#define ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP         0xc3
  96
  97/* ARMv7 Cortex-A15 specific event types */
  98#define ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ         0x40
  99#define ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE        0x41
 100#define ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ         0x42
 101#define ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE        0x43
 102
 103#define ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ           0x4C
 104#define ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE          0x4D
 105
 106#define ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ          0x50
 107#define ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE         0x51
 108#define ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ          0x52
 109#define ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE         0x53
 110
 111#define ARMV7_A15_PERFCTR_PC_WRITE_SPEC                 0x76
 112
 113/* ARMv7 Cortex-A12 specific event types */
 114#define ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ         0x40
 115#define ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE        0x41
 116
 117#define ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ          0x50
 118#define ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE         0x51
 119
 120#define ARMV7_A12_PERFCTR_PC_WRITE_SPEC                 0x76
 121
 122#define ARMV7_A12_PERFCTR_PF_TLB_REFILL                 0xe7
 123
 124/* ARMv7 Krait specific event types */
 125#define KRAIT_PMRESR0_GROUP0                            0xcc
 126#define KRAIT_PMRESR1_GROUP0                            0xd0
 127#define KRAIT_PMRESR2_GROUP0                            0xd4
 128#define KRAIT_VPMRESR0_GROUP0                           0xd8
 129
 130#define KRAIT_PERFCTR_L1_ICACHE_ACCESS                  0x10011
 131#define KRAIT_PERFCTR_L1_ICACHE_MISS                    0x10010
 132
 133#define KRAIT_PERFCTR_L1_ITLB_ACCESS                    0x12222
 134#define KRAIT_PERFCTR_L1_DTLB_ACCESS                    0x12210
 135
 136/* ARMv7 Scorpion specific event types */
 137#define SCORPION_LPM0_GROUP0                            0x4c
 138#define SCORPION_LPM1_GROUP0                            0x50
 139#define SCORPION_LPM2_GROUP0                            0x54
 140#define SCORPION_L2LPM_GROUP0                           0x58
 141#define SCORPION_VLPM_GROUP0                            0x5c
 142
 143#define SCORPION_ICACHE_ACCESS                          0x10053
 144#define SCORPION_ICACHE_MISS                            0x10052
 145
 146#define SCORPION_DTLB_ACCESS                            0x12013
 147#define SCORPION_DTLB_MISS                              0x12012
 148
 149#define SCORPION_ITLB_MISS                              0x12021
 150
 151/*
 152 * Cortex-A8 HW events mapping
 153 *
 154 * The hardware events that we support. We do support cache operations but
 155 * we have harvard caches and no way to combine instruction and data
 156 * accesses/misses in hardware.
 157 */
 158static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
 159        PERF_MAP_ALL_UNSUPPORTED,
 160        [PERF_COUNT_HW_CPU_CYCLES]              = ARMV7_PERFCTR_CPU_CYCLES,
 161        [PERF_COUNT_HW_INSTRUCTIONS]            = ARMV7_PERFCTR_INSTR_EXECUTED,
 162        [PERF_COUNT_HW_CACHE_REFERENCES]        = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 163        [PERF_COUNT_HW_CACHE_MISSES]            = ARMV7_PERFCTR_L1_DCACHE_REFILL,
 164        [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = ARMV7_PERFCTR_PC_WRITE,
 165        [PERF_COUNT_HW_BRANCH_MISSES]           = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 166        [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A8_PERFCTR_STALL_ISIDE,
 167};
 168
 169static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 170                                          [PERF_COUNT_HW_CACHE_OP_MAX]
 171                                          [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 172        PERF_CACHE_MAP_ALL_UNSUPPORTED,
 173
 174        /*
 175         * The performance counters don't differentiate between read and write
 176         * accesses/misses so this isn't strictly correct, but it's the best we
 177         * can do. Writes and reads get combined.
 178         */
 179        [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 180        [C(L1D)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_PERFCTR_L1_DCACHE_REFILL,
 181        [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 182        [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV7_PERFCTR_L1_DCACHE_REFILL,
 183
 184        [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS,
 185        [C(L1I)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_PERFCTR_L1_ICACHE_REFILL,
 186
 187        [C(LL)][C(OP_READ)][C(RESULT_ACCESS)]   = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
 188        [C(LL)][C(OP_READ)][C(RESULT_MISS)]     = ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
 189        [C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)]  = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
 190        [C(LL)][C(OP_WRITE)][C(RESULT_MISS)]    = ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
 191
 192        [C(DTLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV7_PERFCTR_DTLB_REFILL,
 193        [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]  = ARMV7_PERFCTR_DTLB_REFILL,
 194
 195        [C(ITLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV7_PERFCTR_ITLB_REFILL,
 196        [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]  = ARMV7_PERFCTR_ITLB_REFILL,
 197
 198        [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV7_PERFCTR_PC_BRANCH_PRED,
 199        [C(BPU)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 200        [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
 201        [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 202};
 203
 204/*
 205 * Cortex-A9 HW events mapping
 206 */
 207static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
 208        PERF_MAP_ALL_UNSUPPORTED,
 209        [PERF_COUNT_HW_CPU_CYCLES]              = ARMV7_PERFCTR_CPU_CYCLES,
 210        [PERF_COUNT_HW_INSTRUCTIONS]            = ARMV7_A9_PERFCTR_INSTR_CORE_RENAME,
 211        [PERF_COUNT_HW_CACHE_REFERENCES]        = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 212        [PERF_COUNT_HW_CACHE_MISSES]            = ARMV7_PERFCTR_L1_DCACHE_REFILL,
 213        [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = ARMV7_PERFCTR_PC_WRITE,
 214        [PERF_COUNT_HW_BRANCH_MISSES]           = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 215        [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A9_PERFCTR_STALL_ICACHE,
 216        [PERF_COUNT_HW_STALLED_CYCLES_BACKEND]  = ARMV7_A9_PERFCTR_STALL_DISPATCH,
 217};
 218
 219static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 220                                          [PERF_COUNT_HW_CACHE_OP_MAX]
 221                                          [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 222        PERF_CACHE_MAP_ALL_UNSUPPORTED,
 223
 224        /*
 225         * The performance counters don't differentiate between read and write
 226         * accesses/misses so this isn't strictly correct, but it's the best we
 227         * can do. Writes and reads get combined.
 228         */
 229        [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 230        [C(L1D)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_PERFCTR_L1_DCACHE_REFILL,
 231        [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 232        [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV7_PERFCTR_L1_DCACHE_REFILL,
 233
 234        [C(L1I)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_PERFCTR_L1_ICACHE_REFILL,
 235
 236        [C(DTLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV7_PERFCTR_DTLB_REFILL,
 237        [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]  = ARMV7_PERFCTR_DTLB_REFILL,
 238
 239        [C(ITLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV7_PERFCTR_ITLB_REFILL,
 240        [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]  = ARMV7_PERFCTR_ITLB_REFILL,
 241
 242        [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV7_PERFCTR_PC_BRANCH_PRED,
 243        [C(BPU)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 244        [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
 245        [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 246};
 247
 248/*
 249 * Cortex-A5 HW events mapping
 250 */
 251static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = {
 252        PERF_MAP_ALL_UNSUPPORTED,
 253        [PERF_COUNT_HW_CPU_CYCLES]              = ARMV7_PERFCTR_CPU_CYCLES,
 254        [PERF_COUNT_HW_INSTRUCTIONS]            = ARMV7_PERFCTR_INSTR_EXECUTED,
 255        [PERF_COUNT_HW_CACHE_REFERENCES]        = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 256        [PERF_COUNT_HW_CACHE_MISSES]            = ARMV7_PERFCTR_L1_DCACHE_REFILL,
 257        [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = ARMV7_PERFCTR_PC_WRITE,
 258        [PERF_COUNT_HW_BRANCH_MISSES]           = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 259};
 260
 261static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 262                                        [PERF_COUNT_HW_CACHE_OP_MAX]
 263                                        [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 264        PERF_CACHE_MAP_ALL_UNSUPPORTED,
 265
 266        [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 267        [C(L1D)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_PERFCTR_L1_DCACHE_REFILL,
 268        [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 269        [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV7_PERFCTR_L1_DCACHE_REFILL,
 270        [C(L1D)][C(OP_PREFETCH)][C(RESULT_ACCESS)]      = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
 271        [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)]        = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
 272
 273        [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
 274        [C(L1I)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_PERFCTR_L1_ICACHE_REFILL,
 275        /*
 276         * The prefetch counters don't differentiate between the I side and the
 277         * D side.
 278         */
 279        [C(L1I)][C(OP_PREFETCH)][C(RESULT_ACCESS)]      = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
 280        [C(L1I)][C(OP_PREFETCH)][C(RESULT_MISS)]        = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
 281
 282        [C(DTLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV7_PERFCTR_DTLB_REFILL,
 283        [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]  = ARMV7_PERFCTR_DTLB_REFILL,
 284
 285        [C(ITLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV7_PERFCTR_ITLB_REFILL,
 286        [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]  = ARMV7_PERFCTR_ITLB_REFILL,
 287
 288        [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV7_PERFCTR_PC_BRANCH_PRED,
 289        [C(BPU)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 290        [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
 291        [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 292};
 293
 294/*
 295 * Cortex-A15 HW events mapping
 296 */
 297static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = {
 298        PERF_MAP_ALL_UNSUPPORTED,
 299        [PERF_COUNT_HW_CPU_CYCLES]              = ARMV7_PERFCTR_CPU_CYCLES,
 300        [PERF_COUNT_HW_INSTRUCTIONS]            = ARMV7_PERFCTR_INSTR_EXECUTED,
 301        [PERF_COUNT_HW_CACHE_REFERENCES]        = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 302        [PERF_COUNT_HW_CACHE_MISSES]            = ARMV7_PERFCTR_L1_DCACHE_REFILL,
 303        [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = ARMV7_A15_PERFCTR_PC_WRITE_SPEC,
 304        [PERF_COUNT_HW_BRANCH_MISSES]           = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 305        [PERF_COUNT_HW_BUS_CYCLES]              = ARMV7_PERFCTR_BUS_CYCLES,
 306};
 307
 308static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 309                                        [PERF_COUNT_HW_CACHE_OP_MAX]
 310                                        [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 311        PERF_CACHE_MAP_ALL_UNSUPPORTED,
 312
 313        [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ,
 314        [C(L1D)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ,
 315        [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE,
 316        [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE,
 317
 318        /*
 319         * Not all performance counters differentiate between read and write
 320         * accesses/misses so we're not always strictly correct, but it's the
 321         * best we can do. Writes and reads get combined in these cases.
 322         */
 323        [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
 324        [C(L1I)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_PERFCTR_L1_ICACHE_REFILL,
 325
 326        [C(LL)][C(OP_READ)][C(RESULT_ACCESS)]   = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ,
 327        [C(LL)][C(OP_READ)][C(RESULT_MISS)]     = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ,
 328        [C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)]  = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE,
 329        [C(LL)][C(OP_WRITE)][C(RESULT_MISS)]    = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE,
 330
 331        [C(DTLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ,
 332        [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]  = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE,
 333
 334        [C(ITLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV7_PERFCTR_ITLB_REFILL,
 335        [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]  = ARMV7_PERFCTR_ITLB_REFILL,
 336
 337        [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV7_PERFCTR_PC_BRANCH_PRED,
 338        [C(BPU)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 339        [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
 340        [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 341};
 342
 343/*
 344 * Cortex-A7 HW events mapping
 345 */
 346static const unsigned armv7_a7_perf_map[PERF_COUNT_HW_MAX] = {
 347        PERF_MAP_ALL_UNSUPPORTED,
 348        [PERF_COUNT_HW_CPU_CYCLES]              = ARMV7_PERFCTR_CPU_CYCLES,
 349        [PERF_COUNT_HW_INSTRUCTIONS]            = ARMV7_PERFCTR_INSTR_EXECUTED,
 350        [PERF_COUNT_HW_CACHE_REFERENCES]        = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 351        [PERF_COUNT_HW_CACHE_MISSES]            = ARMV7_PERFCTR_L1_DCACHE_REFILL,
 352        [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = ARMV7_PERFCTR_PC_WRITE,
 353        [PERF_COUNT_HW_BRANCH_MISSES]           = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 354        [PERF_COUNT_HW_BUS_CYCLES]              = ARMV7_PERFCTR_BUS_CYCLES,
 355};
 356
 357static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 358                                        [PERF_COUNT_HW_CACHE_OP_MAX]
 359                                        [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 360        PERF_CACHE_MAP_ALL_UNSUPPORTED,
 361
 362        /*
 363         * The performance counters don't differentiate between read and write
 364         * accesses/misses so this isn't strictly correct, but it's the best we
 365         * can do. Writes and reads get combined.
 366         */
 367        [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 368        [C(L1D)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_PERFCTR_L1_DCACHE_REFILL,
 369        [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 370        [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV7_PERFCTR_L1_DCACHE_REFILL,
 371
 372        [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
 373        [C(L1I)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_PERFCTR_L1_ICACHE_REFILL,
 374
 375        [C(LL)][C(OP_READ)][C(RESULT_ACCESS)]   = ARMV7_PERFCTR_L2_CACHE_ACCESS,
 376        [C(LL)][C(OP_READ)][C(RESULT_MISS)]     = ARMV7_PERFCTR_L2_CACHE_REFILL,
 377        [C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)]  = ARMV7_PERFCTR_L2_CACHE_ACCESS,
 378        [C(LL)][C(OP_WRITE)][C(RESULT_MISS)]    = ARMV7_PERFCTR_L2_CACHE_REFILL,
 379
 380        [C(DTLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV7_PERFCTR_DTLB_REFILL,
 381        [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]  = ARMV7_PERFCTR_DTLB_REFILL,
 382
 383        [C(ITLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV7_PERFCTR_ITLB_REFILL,
 384        [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]  = ARMV7_PERFCTR_ITLB_REFILL,
 385
 386        [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV7_PERFCTR_PC_BRANCH_PRED,
 387        [C(BPU)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 388        [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
 389        [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 390};
 391
 392/*
 393 * Cortex-A12 HW events mapping
 394 */
 395static const unsigned armv7_a12_perf_map[PERF_COUNT_HW_MAX] = {
 396        PERF_MAP_ALL_UNSUPPORTED,
 397        [PERF_COUNT_HW_CPU_CYCLES]              = ARMV7_PERFCTR_CPU_CYCLES,
 398        [PERF_COUNT_HW_INSTRUCTIONS]            = ARMV7_PERFCTR_INSTR_EXECUTED,
 399        [PERF_COUNT_HW_CACHE_REFERENCES]        = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 400        [PERF_COUNT_HW_CACHE_MISSES]            = ARMV7_PERFCTR_L1_DCACHE_REFILL,
 401        [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = ARMV7_A12_PERFCTR_PC_WRITE_SPEC,
 402        [PERF_COUNT_HW_BRANCH_MISSES]           = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 403        [PERF_COUNT_HW_BUS_CYCLES]              = ARMV7_PERFCTR_BUS_CYCLES,
 404};
 405
 406static const unsigned armv7_a12_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 407                                        [PERF_COUNT_HW_CACHE_OP_MAX]
 408                                        [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 409        PERF_CACHE_MAP_ALL_UNSUPPORTED,
 410
 411        [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ,
 412        [C(L1D)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_PERFCTR_L1_DCACHE_REFILL,
 413        [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE,
 414        [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV7_PERFCTR_L1_DCACHE_REFILL,
 415
 416        /*
 417         * Not all performance counters differentiate between read and write
 418         * accesses/misses so we're not always strictly correct, but it's the
 419         * best we can do. Writes and reads get combined in these cases.
 420         */
 421        [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
 422        [C(L1I)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_PERFCTR_L1_ICACHE_REFILL,
 423
 424        [C(LL)][C(OP_READ)][C(RESULT_ACCESS)]   = ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ,
 425        [C(LL)][C(OP_READ)][C(RESULT_MISS)]     = ARMV7_PERFCTR_L2_CACHE_REFILL,
 426        [C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)]  = ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE,
 427        [C(LL)][C(OP_WRITE)][C(RESULT_MISS)]    = ARMV7_PERFCTR_L2_CACHE_REFILL,
 428
 429        [C(DTLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV7_PERFCTR_DTLB_REFILL,
 430        [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]  = ARMV7_PERFCTR_DTLB_REFILL,
 431        [C(DTLB)][C(OP_PREFETCH)][C(RESULT_MISS)]       = ARMV7_A12_PERFCTR_PF_TLB_REFILL,
 432
 433        [C(ITLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV7_PERFCTR_ITLB_REFILL,
 434        [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]  = ARMV7_PERFCTR_ITLB_REFILL,
 435
 436        [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV7_PERFCTR_PC_BRANCH_PRED,
 437        [C(BPU)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 438        [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
 439        [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 440};
 441
 442/*
 443 * Krait HW events mapping
 444 */
 445static const unsigned krait_perf_map[PERF_COUNT_HW_MAX] = {
 446        PERF_MAP_ALL_UNSUPPORTED,
 447        [PERF_COUNT_HW_CPU_CYCLES]          = ARMV7_PERFCTR_CPU_CYCLES,
 448        [PERF_COUNT_HW_INSTRUCTIONS]        = ARMV7_PERFCTR_INSTR_EXECUTED,
 449        [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
 450        [PERF_COUNT_HW_BRANCH_MISSES]       = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 451        [PERF_COUNT_HW_BUS_CYCLES]          = ARMV7_PERFCTR_CLOCK_CYCLES,
 452};
 453
 454static const unsigned krait_perf_map_no_branch[PERF_COUNT_HW_MAX] = {
 455        PERF_MAP_ALL_UNSUPPORTED,
 456        [PERF_COUNT_HW_CPU_CYCLES]          = ARMV7_PERFCTR_CPU_CYCLES,
 457        [PERF_COUNT_HW_INSTRUCTIONS]        = ARMV7_PERFCTR_INSTR_EXECUTED,
 458        [PERF_COUNT_HW_BRANCH_MISSES]       = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 459        [PERF_COUNT_HW_BUS_CYCLES]          = ARMV7_PERFCTR_CLOCK_CYCLES,
 460};
 461
 462static const unsigned krait_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 463                                          [PERF_COUNT_HW_CACHE_OP_MAX]
 464                                          [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 465        PERF_CACHE_MAP_ALL_UNSUPPORTED,
 466
 467        /*
 468         * The performance counters don't differentiate between read and write
 469         * accesses/misses so this isn't strictly correct, but it's the best we
 470         * can do. Writes and reads get combined.
 471         */
 472        [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 473        [C(L1D)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_PERFCTR_L1_DCACHE_REFILL,
 474        [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 475        [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV7_PERFCTR_L1_DCACHE_REFILL,
 476
 477        [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]  = KRAIT_PERFCTR_L1_ICACHE_ACCESS,
 478        [C(L1I)][C(OP_READ)][C(RESULT_MISS)]    = KRAIT_PERFCTR_L1_ICACHE_MISS,
 479
 480        [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_DTLB_ACCESS,
 481        [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)]        = KRAIT_PERFCTR_L1_DTLB_ACCESS,
 482
 483        [C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_ITLB_ACCESS,
 484        [C(ITLB)][C(OP_WRITE)][C(RESULT_ACCESS)]        = KRAIT_PERFCTR_L1_ITLB_ACCESS,
 485
 486        [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV7_PERFCTR_PC_BRANCH_PRED,
 487        [C(BPU)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 488        [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
 489        [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 490};
 491
 492/*
 493 * Scorpion HW events mapping
 494 */
 495static const unsigned scorpion_perf_map[PERF_COUNT_HW_MAX] = {
 496        PERF_MAP_ALL_UNSUPPORTED,
 497        [PERF_COUNT_HW_CPU_CYCLES]          = ARMV7_PERFCTR_CPU_CYCLES,
 498        [PERF_COUNT_HW_INSTRUCTIONS]        = ARMV7_PERFCTR_INSTR_EXECUTED,
 499        [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
 500        [PERF_COUNT_HW_BRANCH_MISSES]       = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 501        [PERF_COUNT_HW_BUS_CYCLES]          = ARMV7_PERFCTR_CLOCK_CYCLES,
 502};
 503
 504static const unsigned scorpion_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 505                                            [PERF_COUNT_HW_CACHE_OP_MAX]
 506                                            [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 507        PERF_CACHE_MAP_ALL_UNSUPPORTED,
 508        /*
 509         * The performance counters don't differentiate between read and write
 510         * accesses/misses so this isn't strictly correct, but it's the best we
 511         * can do. Writes and reads get combined.
 512         */
 513        [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 514        [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
 515        [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 516        [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
 517        [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = SCORPION_ICACHE_ACCESS,
 518        [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_ICACHE_MISS,
 519        /*
 520         * Only ITLB misses and DTLB refills are supported.  If users want the
 521         * DTLB refills misses a raw counter must be used.
 522         */
 523        [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = SCORPION_DTLB_ACCESS,
 524        [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_DTLB_MISS,
 525        [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = SCORPION_DTLB_ACCESS,
 526        [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = SCORPION_DTLB_MISS,
 527        [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_ITLB_MISS,
 528        [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = SCORPION_ITLB_MISS,
 529        [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
 530        [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 531        [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
 532        [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 533};
 534
 535PMU_FORMAT_ATTR(event, "config:0-7");
 536
 537static struct attribute *armv7_pmu_format_attrs[] = {
 538        &format_attr_event.attr,
 539        NULL,
 540};
 541
 542static struct attribute_group armv7_pmu_format_attr_group = {
 543        .name = "format",
 544        .attrs = armv7_pmu_format_attrs,
 545};
 546
 547#define ARMV7_EVENT_ATTR_RESOLVE(m) #m
 548#define ARMV7_EVENT_ATTR(name, config) \
 549        PMU_EVENT_ATTR_STRING(name, armv7_event_attr_##name, \
 550                              "event=" ARMV7_EVENT_ATTR_RESOLVE(config))
 551
 552ARMV7_EVENT_ATTR(sw_incr, ARMV7_PERFCTR_PMNC_SW_INCR);
 553ARMV7_EVENT_ATTR(l1i_cache_refill, ARMV7_PERFCTR_L1_ICACHE_REFILL);
 554ARMV7_EVENT_ATTR(l1i_tlb_refill, ARMV7_PERFCTR_ITLB_REFILL);
 555ARMV7_EVENT_ATTR(l1d_cache_refill, ARMV7_PERFCTR_L1_DCACHE_REFILL);
 556ARMV7_EVENT_ATTR(l1d_cache, ARMV7_PERFCTR_L1_DCACHE_ACCESS);
 557ARMV7_EVENT_ATTR(l1d_tlb_refill, ARMV7_PERFCTR_DTLB_REFILL);
 558ARMV7_EVENT_ATTR(ld_retired, ARMV7_PERFCTR_MEM_READ);
 559ARMV7_EVENT_ATTR(st_retired, ARMV7_PERFCTR_MEM_WRITE);
 560ARMV7_EVENT_ATTR(inst_retired, ARMV7_PERFCTR_INSTR_EXECUTED);
 561ARMV7_EVENT_ATTR(exc_taken, ARMV7_PERFCTR_EXC_TAKEN);
 562ARMV7_EVENT_ATTR(exc_return, ARMV7_PERFCTR_EXC_EXECUTED);
 563ARMV7_EVENT_ATTR(cid_write_retired, ARMV7_PERFCTR_CID_WRITE);
 564ARMV7_EVENT_ATTR(pc_write_retired, ARMV7_PERFCTR_PC_WRITE);
 565ARMV7_EVENT_ATTR(br_immed_retired, ARMV7_PERFCTR_PC_IMM_BRANCH);
 566ARMV7_EVENT_ATTR(br_return_retired, ARMV7_PERFCTR_PC_PROC_RETURN);
 567ARMV7_EVENT_ATTR(unaligned_ldst_retired, ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS);
 568ARMV7_EVENT_ATTR(br_mis_pred, ARMV7_PERFCTR_PC_BRANCH_MIS_PRED);
 569ARMV7_EVENT_ATTR(cpu_cycles, ARMV7_PERFCTR_CLOCK_CYCLES);
 570ARMV7_EVENT_ATTR(br_pred, ARMV7_PERFCTR_PC_BRANCH_PRED);
 571
 572static struct attribute *armv7_pmuv1_event_attrs[] = {
 573        &armv7_event_attr_sw_incr.attr.attr,
 574        &armv7_event_attr_l1i_cache_refill.attr.attr,
 575        &armv7_event_attr_l1i_tlb_refill.attr.attr,
 576        &armv7_event_attr_l1d_cache_refill.attr.attr,
 577        &armv7_event_attr_l1d_cache.attr.attr,
 578        &armv7_event_attr_l1d_tlb_refill.attr.attr,
 579        &armv7_event_attr_ld_retired.attr.attr,
 580        &armv7_event_attr_st_retired.attr.attr,
 581        &armv7_event_attr_inst_retired.attr.attr,
 582        &armv7_event_attr_exc_taken.attr.attr,
 583        &armv7_event_attr_exc_return.attr.attr,
 584        &armv7_event_attr_cid_write_retired.attr.attr,
 585        &armv7_event_attr_pc_write_retired.attr.attr,
 586        &armv7_event_attr_br_immed_retired.attr.attr,
 587        &armv7_event_attr_br_return_retired.attr.attr,
 588        &armv7_event_attr_unaligned_ldst_retired.attr.attr,
 589        &armv7_event_attr_br_mis_pred.attr.attr,
 590        &armv7_event_attr_cpu_cycles.attr.attr,
 591        &armv7_event_attr_br_pred.attr.attr,
 592        NULL,
 593};
 594
 595static struct attribute_group armv7_pmuv1_events_attr_group = {
 596        .name = "events",
 597        .attrs = armv7_pmuv1_event_attrs,
 598};
 599
 600ARMV7_EVENT_ATTR(mem_access, ARMV7_PERFCTR_MEM_ACCESS);
 601ARMV7_EVENT_ATTR(l1i_cache, ARMV7_PERFCTR_L1_ICACHE_ACCESS);
 602ARMV7_EVENT_ATTR(l1d_cache_wb, ARMV7_PERFCTR_L1_DCACHE_WB);
 603ARMV7_EVENT_ATTR(l2d_cache, ARMV7_PERFCTR_L2_CACHE_ACCESS);
 604ARMV7_EVENT_ATTR(l2d_cache_refill, ARMV7_PERFCTR_L2_CACHE_REFILL);
 605ARMV7_EVENT_ATTR(l2d_cache_wb, ARMV7_PERFCTR_L2_CACHE_WB);
 606ARMV7_EVENT_ATTR(bus_access, ARMV7_PERFCTR_BUS_ACCESS);
 607ARMV7_EVENT_ATTR(memory_error, ARMV7_PERFCTR_MEM_ERROR);
 608ARMV7_EVENT_ATTR(inst_spec, ARMV7_PERFCTR_INSTR_SPEC);
 609ARMV7_EVENT_ATTR(ttbr_write_retired, ARMV7_PERFCTR_TTBR_WRITE);
 610ARMV7_EVENT_ATTR(bus_cycles, ARMV7_PERFCTR_BUS_CYCLES);
 611
 612static struct attribute *armv7_pmuv2_event_attrs[] = {
 613        &armv7_event_attr_sw_incr.attr.attr,
 614        &armv7_event_attr_l1i_cache_refill.attr.attr,
 615        &armv7_event_attr_l1i_tlb_refill.attr.attr,
 616        &armv7_event_attr_l1d_cache_refill.attr.attr,
 617        &armv7_event_attr_l1d_cache.attr.attr,
 618        &armv7_event_attr_l1d_tlb_refill.attr.attr,
 619        &armv7_event_attr_ld_retired.attr.attr,
 620        &armv7_event_attr_st_retired.attr.attr,
 621        &armv7_event_attr_inst_retired.attr.attr,
 622        &armv7_event_attr_exc_taken.attr.attr,
 623        &armv7_event_attr_exc_return.attr.attr,
 624        &armv7_event_attr_cid_write_retired.attr.attr,
 625        &armv7_event_attr_pc_write_retired.attr.attr,
 626        &armv7_event_attr_br_immed_retired.attr.attr,
 627        &armv7_event_attr_br_return_retired.attr.attr,
 628        &armv7_event_attr_unaligned_ldst_retired.attr.attr,
 629        &armv7_event_attr_br_mis_pred.attr.attr,
 630        &armv7_event_attr_cpu_cycles.attr.attr,
 631        &armv7_event_attr_br_pred.attr.attr,
 632        &armv7_event_attr_mem_access.attr.attr,
 633        &armv7_event_attr_l1i_cache.attr.attr,
 634        &armv7_event_attr_l1d_cache_wb.attr.attr,
 635        &armv7_event_attr_l2d_cache.attr.attr,
 636        &armv7_event_attr_l2d_cache_refill.attr.attr,
 637        &armv7_event_attr_l2d_cache_wb.attr.attr,
 638        &armv7_event_attr_bus_access.attr.attr,
 639        &armv7_event_attr_memory_error.attr.attr,
 640        &armv7_event_attr_inst_spec.attr.attr,
 641        &armv7_event_attr_ttbr_write_retired.attr.attr,
 642        &armv7_event_attr_bus_cycles.attr.attr,
 643        NULL,
 644};
 645
 646static struct attribute_group armv7_pmuv2_events_attr_group = {
 647        .name = "events",
 648        .attrs = armv7_pmuv2_event_attrs,
 649};
 650
 651/*
 652 * Perf Events' indices
 653 */
 654#define ARMV7_IDX_CYCLE_COUNTER 0
 655#define ARMV7_IDX_COUNTER0      1
 656#define ARMV7_IDX_COUNTER_LAST(cpu_pmu) \
 657        (ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
 658
 659#define ARMV7_MAX_COUNTERS      32
 660#define ARMV7_COUNTER_MASK      (ARMV7_MAX_COUNTERS - 1)
 661
 662/*
 663 * ARMv7 low level PMNC access
 664 */
 665
 666/*
 667 * Perf Event to low level counters mapping
 668 */
 669#define ARMV7_IDX_TO_COUNTER(x) \
 670        (((x) - ARMV7_IDX_COUNTER0) & ARMV7_COUNTER_MASK)
 671
 672/*
 673 * Per-CPU PMNC: config reg
 674 */
 675#define ARMV7_PMNC_E            (1 << 0) /* Enable all counters */
 676#define ARMV7_PMNC_P            (1 << 1) /* Reset all counters */
 677#define ARMV7_PMNC_C            (1 << 2) /* Cycle counter reset */
 678#define ARMV7_PMNC_D            (1 << 3) /* CCNT counts every 64th cpu cycle */
 679#define ARMV7_PMNC_X            (1 << 4) /* Export to ETM */
 680#define ARMV7_PMNC_DP           (1 << 5) /* Disable CCNT if non-invasive debug*/
 681#define ARMV7_PMNC_N_SHIFT      11       /* Number of counters supported */
 682#define ARMV7_PMNC_N_MASK       0x1f
 683#define ARMV7_PMNC_MASK         0x3f     /* Mask for writable bits */
 684
 685/*
 686 * FLAG: counters overflow flag status reg
 687 */
 688#define ARMV7_FLAG_MASK         0xffffffff      /* Mask for writable bits */
 689#define ARMV7_OVERFLOWED_MASK   ARMV7_FLAG_MASK
 690
 691/*
 692 * PMXEVTYPER: Event selection reg
 693 */
 694#define ARMV7_EVTYPE_MASK       0xc80000ff      /* Mask for writable bits */
 695#define ARMV7_EVTYPE_EVENT      0xff            /* Mask for EVENT bits */
 696
 697/*
 698 * Event filters for PMUv2
 699 */
 700#define ARMV7_EXCLUDE_PL1       (1 << 31)
 701#define ARMV7_EXCLUDE_USER      (1 << 30)
 702#define ARMV7_INCLUDE_HYP       (1 << 27)
 703
 704/*
 705 * Secure debug enable reg
 706 */
 707#define ARMV7_SDER_SUNIDEN      BIT(1) /* Permit non-invasive debug */
 708
 709static inline u32 armv7_pmnc_read(void)
 710{
 711        u32 val;
 712        asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
 713        return val;
 714}
 715
 716static inline void armv7_pmnc_write(u32 val)
 717{
 718        val &= ARMV7_PMNC_MASK;
 719        isb();
 720        asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
 721}
 722
 723static inline int armv7_pmnc_has_overflowed(u32 pmnc)
 724{
 725        return pmnc & ARMV7_OVERFLOWED_MASK;
 726}
 727
 728static inline int armv7_pmnc_counter_valid(struct arm_pmu *cpu_pmu, int idx)
 729{
 730        return idx >= ARMV7_IDX_CYCLE_COUNTER &&
 731                idx <= ARMV7_IDX_COUNTER_LAST(cpu_pmu);
 732}
 733
 734static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx)
 735{
 736        return pmnc & BIT(ARMV7_IDX_TO_COUNTER(idx));
 737}
 738
 739static inline void armv7_pmnc_select_counter(int idx)
 740{
 741        u32 counter = ARMV7_IDX_TO_COUNTER(idx);
 742        asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter));
 743        isb();
 744}
 745
 746static inline u32 armv7pmu_read_counter(struct perf_event *event)
 747{
 748        struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
 749        struct hw_perf_event *hwc = &event->hw;
 750        int idx = hwc->idx;
 751        u32 value = 0;
 752
 753        if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
 754                pr_err("CPU%u reading wrong counter %d\n",
 755                        smp_processor_id(), idx);
 756        } else if (idx == ARMV7_IDX_CYCLE_COUNTER) {
 757                asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
 758        } else {
 759                armv7_pmnc_select_counter(idx);
 760                asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (value));
 761        }
 762
 763        return value;
 764}
 765
 766static inline void armv7pmu_write_counter(struct perf_event *event, u32 value)
 767{
 768        struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
 769        struct hw_perf_event *hwc = &event->hw;
 770        int idx = hwc->idx;
 771
 772        if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
 773                pr_err("CPU%u writing wrong counter %d\n",
 774                        smp_processor_id(), idx);
 775        } else if (idx == ARMV7_IDX_CYCLE_COUNTER) {
 776                asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
 777        } else {
 778                armv7_pmnc_select_counter(idx);
 779                asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (value));
 780        }
 781}
 782
 783static inline void armv7_pmnc_write_evtsel(int idx, u32 val)
 784{
 785        armv7_pmnc_select_counter(idx);
 786        val &= ARMV7_EVTYPE_MASK;
 787        asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
 788}
 789
 790static inline void armv7_pmnc_enable_counter(int idx)
 791{
 792        u32 counter = ARMV7_IDX_TO_COUNTER(idx);
 793        asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter)));
 794}
 795
 796static inline void armv7_pmnc_disable_counter(int idx)
 797{
 798        u32 counter = ARMV7_IDX_TO_COUNTER(idx);
 799        asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter)));
 800}
 801
 802static inline void armv7_pmnc_enable_intens(int idx)
 803{
 804        u32 counter = ARMV7_IDX_TO_COUNTER(idx);
 805        asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter)));
 806}
 807
 808static inline void armv7_pmnc_disable_intens(int idx)
 809{
 810        u32 counter = ARMV7_IDX_TO_COUNTER(idx);
 811        asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
 812        isb();
 813        /* Clear the overflow flag in case an interrupt is pending. */
 814        asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter)));
 815        isb();
 816}
 817
 818static inline u32 armv7_pmnc_getreset_flags(void)
 819{
 820        u32 val;
 821
 822        /* Read */
 823        asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
 824
 825        /* Write to clear flags */
 826        val &= ARMV7_FLAG_MASK;
 827        asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
 828
 829        return val;
 830}
 831
 832#ifdef DEBUG
 833static void armv7_pmnc_dump_regs(struct arm_pmu *cpu_pmu)
 834{
 835        u32 val;
 836        unsigned int cnt;
 837
 838        pr_info("PMNC registers dump:\n");
 839
 840        asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
 841        pr_info("PMNC  =0x%08x\n", val);
 842
 843        asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
 844        pr_info("CNTENS=0x%08x\n", val);
 845
 846        asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
 847        pr_info("INTENS=0x%08x\n", val);
 848
 849        asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
 850        pr_info("FLAGS =0x%08x\n", val);
 851
 852        asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
 853        pr_info("SELECT=0x%08x\n", val);
 854
 855        asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
 856        pr_info("CCNT  =0x%08x\n", val);
 857
 858        for (cnt = ARMV7_IDX_COUNTER0;
 859                        cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) {
 860                armv7_pmnc_select_counter(cnt);
 861                asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
 862                pr_info("CNT[%d] count =0x%08x\n",
 863                        ARMV7_IDX_TO_COUNTER(cnt), val);
 864                asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
 865                pr_info("CNT[%d] evtsel=0x%08x\n",
 866                        ARMV7_IDX_TO_COUNTER(cnt), val);
 867        }
 868}
 869#endif
 870
 871static void armv7pmu_enable_event(struct perf_event *event)
 872{
 873        unsigned long flags;
 874        struct hw_perf_event *hwc = &event->hw;
 875        struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
 876        struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
 877        int idx = hwc->idx;
 878
 879        if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
 880                pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
 881                        smp_processor_id(), idx);
 882                return;
 883        }
 884
 885        /*
 886         * Enable counter and interrupt, and set the counter to count
 887         * the event that we're interested in.
 888         */
 889        raw_spin_lock_irqsave(&events->pmu_lock, flags);
 890
 891        /*
 892         * Disable counter
 893         */
 894        armv7_pmnc_disable_counter(idx);
 895
 896        /*
 897         * Set event (if destined for PMNx counters)
 898         * We only need to set the event for the cycle counter if we
 899         * have the ability to perform event filtering.
 900         */
 901        if (cpu_pmu->set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER)
 902                armv7_pmnc_write_evtsel(idx, hwc->config_base);
 903
 904        /*
 905         * Enable interrupt for this counter
 906         */
 907        armv7_pmnc_enable_intens(idx);
 908
 909        /*
 910         * Enable counter
 911         */
 912        armv7_pmnc_enable_counter(idx);
 913
 914        raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
 915}
 916
 917static void armv7pmu_disable_event(struct perf_event *event)
 918{
 919        unsigned long flags;
 920        struct hw_perf_event *hwc = &event->hw;
 921        struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
 922        struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
 923        int idx = hwc->idx;
 924
 925        if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
 926                pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
 927                        smp_processor_id(), idx);
 928                return;
 929        }
 930
 931        /*
 932         * Disable counter and interrupt
 933         */
 934        raw_spin_lock_irqsave(&events->pmu_lock, flags);
 935
 936        /*
 937         * Disable counter
 938         */
 939        armv7_pmnc_disable_counter(idx);
 940
 941        /*
 942         * Disable interrupt for this counter
 943         */
 944        armv7_pmnc_disable_intens(idx);
 945
 946        raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
 947}
 948
 949static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
 950{
 951        u32 pmnc;
 952        struct perf_sample_data data;
 953        struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
 954        struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
 955        struct pt_regs *regs;
 956        int idx;
 957
 958        /*
 959         * Get and reset the IRQ flags
 960         */
 961        pmnc = armv7_pmnc_getreset_flags();
 962
 963        /*
 964         * Did an overflow occur?
 965         */
 966        if (!armv7_pmnc_has_overflowed(pmnc))
 967                return IRQ_NONE;
 968
 969        /*
 970         * Handle the counter(s) overflow(s)
 971         */
 972        regs = get_irq_regs();
 973
 974        for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
 975                struct perf_event *event = cpuc->events[idx];
 976                struct hw_perf_event *hwc;
 977
 978                /* Ignore if we don't have an event. */
 979                if (!event)
 980                        continue;
 981
 982                /*
 983                 * We have a single interrupt for all counters. Check that
 984                 * each counter has overflowed before we process it.
 985                 */
 986                if (!armv7_pmnc_counter_has_overflowed(pmnc, idx))
 987                        continue;
 988
 989                hwc = &event->hw;
 990                armpmu_event_update(event);
 991                perf_sample_data_init(&data, 0, hwc->last_period);
 992                if (!armpmu_event_set_period(event))
 993                        continue;
 994
 995                if (perf_event_overflow(event, &data, regs))
 996                        cpu_pmu->disable(event);
 997        }
 998
 999        /*
1000         * Handle the pending perf events.
1001         *
1002         * Note: this call *must* be run with interrupts disabled. For
1003         * platforms that can have the PMU interrupts raised as an NMI, this
1004         * will not work.
1005         */
1006        irq_work_run();
1007
1008        return IRQ_HANDLED;
1009}
1010
1011static void armv7pmu_start(struct arm_pmu *cpu_pmu)
1012{
1013        unsigned long flags;
1014        struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
1015
1016        raw_spin_lock_irqsave(&events->pmu_lock, flags);
1017        /* Enable all counters */
1018        armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
1019        raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1020}
1021
1022static void armv7pmu_stop(struct arm_pmu *cpu_pmu)
1023{
1024        unsigned long flags;
1025        struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
1026
1027        raw_spin_lock_irqsave(&events->pmu_lock, flags);
1028        /* Disable all counters */
1029        armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
1030        raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1031}
1032
1033static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc,
1034                                  struct perf_event *event)
1035{
1036        int idx;
1037        struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1038        struct hw_perf_event *hwc = &event->hw;
1039        unsigned long evtype = hwc->config_base & ARMV7_EVTYPE_EVENT;
1040
1041        /* Always place a cycle counter into the cycle counter. */
1042        if (evtype == ARMV7_PERFCTR_CPU_CYCLES) {
1043                if (test_and_set_bit(ARMV7_IDX_CYCLE_COUNTER, cpuc->used_mask))
1044                        return -EAGAIN;
1045
1046                return ARMV7_IDX_CYCLE_COUNTER;
1047        }
1048
1049        /*
1050         * For anything other than a cycle counter, try and use
1051         * the events counters
1052         */
1053        for (idx = ARMV7_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
1054                if (!test_and_set_bit(idx, cpuc->used_mask))
1055                        return idx;
1056        }
1057
1058        /* The counters are all in use. */
1059        return -EAGAIN;
1060}
1061
1062/*
1063 * Add an event filter to a given event. This will only work for PMUv2 PMUs.
1064 */
1065static int armv7pmu_set_event_filter(struct hw_perf_event *event,
1066                                     struct perf_event_attr *attr)
1067{
1068        unsigned long config_base = 0;
1069
1070        if (attr->exclude_idle)
1071                return -EPERM;
1072        if (attr->exclude_user)
1073                config_base |= ARMV7_EXCLUDE_USER;
1074        if (attr->exclude_kernel)
1075                config_base |= ARMV7_EXCLUDE_PL1;
1076        if (!attr->exclude_hv)
1077                config_base |= ARMV7_INCLUDE_HYP;
1078
1079        /*
1080         * Install the filter into config_base as this is used to
1081         * construct the event type.
1082         */
1083        event->config_base = config_base;
1084
1085        return 0;
1086}
1087
1088static void armv7pmu_reset(void *info)
1089{
1090        struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
1091        u32 idx, nb_cnt = cpu_pmu->num_events, val;
1092
1093        if (cpu_pmu->secure_access) {
1094                asm volatile("mrc p15, 0, %0, c1, c1, 1" : "=r" (val));
1095                val |= ARMV7_SDER_SUNIDEN;
1096                asm volatile("mcr p15, 0, %0, c1, c1, 1" : : "r" (val));
1097        }
1098
1099        /* The counter and interrupt enable registers are unknown at reset. */
1100        for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
1101                armv7_pmnc_disable_counter(idx);
1102                armv7_pmnc_disable_intens(idx);
1103        }
1104
1105        /* Initialize & Reset PMNC: C and P bits */
1106        armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
1107}
1108
1109static int armv7_a8_map_event(struct perf_event *event)
1110{
1111        return armpmu_map_event(event, &armv7_a8_perf_map,
1112                                &armv7_a8_perf_cache_map, 0xFF);
1113}
1114
1115static int armv7_a9_map_event(struct perf_event *event)
1116{
1117        return armpmu_map_event(event, &armv7_a9_perf_map,
1118                                &armv7_a9_perf_cache_map, 0xFF);
1119}
1120
1121static int armv7_a5_map_event(struct perf_event *event)
1122{
1123        return armpmu_map_event(event, &armv7_a5_perf_map,
1124                                &armv7_a5_perf_cache_map, 0xFF);
1125}
1126
1127static int armv7_a15_map_event(struct perf_event *event)
1128{
1129        return armpmu_map_event(event, &armv7_a15_perf_map,
1130                                &armv7_a15_perf_cache_map, 0xFF);
1131}
1132
1133static int armv7_a7_map_event(struct perf_event *event)
1134{
1135        return armpmu_map_event(event, &armv7_a7_perf_map,
1136                                &armv7_a7_perf_cache_map, 0xFF);
1137}
1138
1139static int armv7_a12_map_event(struct perf_event *event)
1140{
1141        return armpmu_map_event(event, &armv7_a12_perf_map,
1142                                &armv7_a12_perf_cache_map, 0xFF);
1143}
1144
1145static int krait_map_event(struct perf_event *event)
1146{
1147        return armpmu_map_event(event, &krait_perf_map,
1148                                &krait_perf_cache_map, 0xFFFFF);
1149}
1150
1151static int krait_map_event_no_branch(struct perf_event *event)
1152{
1153        return armpmu_map_event(event, &krait_perf_map_no_branch,
1154                                &krait_perf_cache_map, 0xFFFFF);
1155}
1156
1157static int scorpion_map_event(struct perf_event *event)
1158{
1159        return armpmu_map_event(event, &scorpion_perf_map,
1160                                &scorpion_perf_cache_map, 0xFFFFF);
1161}
1162
1163static void armv7pmu_init(struct arm_pmu *cpu_pmu)
1164{
1165        cpu_pmu->handle_irq     = armv7pmu_handle_irq;
1166        cpu_pmu->enable         = armv7pmu_enable_event;
1167        cpu_pmu->disable        = armv7pmu_disable_event;
1168        cpu_pmu->read_counter   = armv7pmu_read_counter;
1169        cpu_pmu->write_counter  = armv7pmu_write_counter;
1170        cpu_pmu->get_event_idx  = armv7pmu_get_event_idx;
1171        cpu_pmu->start          = armv7pmu_start;
1172        cpu_pmu->stop           = armv7pmu_stop;
1173        cpu_pmu->reset          = armv7pmu_reset;
1174        cpu_pmu->max_period     = (1LLU << 32) - 1;
1175};
1176
1177static void armv7_read_num_pmnc_events(void *info)
1178{
1179        int *nb_cnt = info;
1180
1181        /* Read the nb of CNTx counters supported from PMNC */
1182        *nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
1183
1184        /* Add the CPU cycles counter */
1185        *nb_cnt += 1;
1186}
1187
1188static int armv7_probe_num_events(struct arm_pmu *arm_pmu)
1189{
1190        return smp_call_function_any(&arm_pmu->supported_cpus,
1191                                     armv7_read_num_pmnc_events,
1192                                     &arm_pmu->num_events, 1);
1193}
1194
1195static int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
1196{
1197        armv7pmu_init(cpu_pmu);
1198        cpu_pmu->name           = "armv7_cortex_a8";
1199        cpu_pmu->map_event      = armv7_a8_map_event;
1200        cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1201                &armv7_pmuv1_events_attr_group;
1202        cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1203                &armv7_pmu_format_attr_group;
1204        return armv7_probe_num_events(cpu_pmu);
1205}
1206
1207static int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
1208{
1209        armv7pmu_init(cpu_pmu);
1210        cpu_pmu->name           = "armv7_cortex_a9";
1211        cpu_pmu->map_event      = armv7_a9_map_event;
1212        cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1213                &armv7_pmuv1_events_attr_group;
1214        cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1215                &armv7_pmu_format_attr_group;
1216        return armv7_probe_num_events(cpu_pmu);
1217}
1218
1219static int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
1220{
1221        armv7pmu_init(cpu_pmu);
1222        cpu_pmu->name           = "armv7_cortex_a5";
1223        cpu_pmu->map_event      = armv7_a5_map_event;
1224        cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1225                &armv7_pmuv1_events_attr_group;
1226        cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1227                &armv7_pmu_format_attr_group;
1228        return armv7_probe_num_events(cpu_pmu);
1229}
1230
1231static int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
1232{
1233        armv7pmu_init(cpu_pmu);
1234        cpu_pmu->name           = "armv7_cortex_a15";
1235        cpu_pmu->map_event      = armv7_a15_map_event;
1236        cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1237        cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1238                &armv7_pmuv2_events_attr_group;
1239        cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1240                &armv7_pmu_format_attr_group;
1241        return armv7_probe_num_events(cpu_pmu);
1242}
1243
1244static int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
1245{
1246        armv7pmu_init(cpu_pmu);
1247        cpu_pmu->name           = "armv7_cortex_a7";
1248        cpu_pmu->map_event      = armv7_a7_map_event;
1249        cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1250        cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1251                &armv7_pmuv2_events_attr_group;
1252        cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1253                &armv7_pmu_format_attr_group;
1254        return armv7_probe_num_events(cpu_pmu);
1255}
1256
1257static int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu)
1258{
1259        armv7pmu_init(cpu_pmu);
1260        cpu_pmu->name           = "armv7_cortex_a12";
1261        cpu_pmu->map_event      = armv7_a12_map_event;
1262        cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1263        cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1264                &armv7_pmuv2_events_attr_group;
1265        cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1266                &armv7_pmu_format_attr_group;
1267        return armv7_probe_num_events(cpu_pmu);
1268}
1269
1270static int armv7_a17_pmu_init(struct arm_pmu *cpu_pmu)
1271{
1272        int ret = armv7_a12_pmu_init(cpu_pmu);
1273        cpu_pmu->name = "armv7_cortex_a17";
1274        cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1275                &armv7_pmuv2_events_attr_group;
1276        cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1277                &armv7_pmu_format_attr_group;
1278        return ret;
1279}
1280
1281/*
1282 * Krait Performance Monitor Region Event Selection Register (PMRESRn)
1283 *
1284 *            31   30     24     16     8      0
1285 *            +--------------------------------+
1286 *  PMRESR0   | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 0
1287 *            +--------------------------------+
1288 *  PMRESR1   | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 1
1289 *            +--------------------------------+
1290 *  PMRESR2   | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 2
1291 *            +--------------------------------+
1292 *  VPMRESR0  | EN |  CC  |  CC  |  CC  |  CC  |   N = 2, R = ?
1293 *            +--------------------------------+
1294 *              EN | G=3  | G=2  | G=1  | G=0
1295 *
1296 *  Event Encoding:
1297 *
1298 *      hwc->config_base = 0xNRCCG
1299 *
1300 *      N  = prefix, 1 for Krait CPU (PMRESRn), 2 for Venum VFP (VPMRESR)
1301 *      R  = region register
1302 *      CC = class of events the group G is choosing from
1303 *      G  = group or particular event
1304 *
1305 *  Example: 0x12021 is a Krait CPU event in PMRESR2's group 1 with code 2
1306 *
1307 *  A region (R) corresponds to a piece of the CPU (execution unit, instruction
1308 *  unit, etc.) while the event code (CC) corresponds to a particular class of
1309 *  events (interrupts for example). An event code is broken down into
1310 *  groups (G) that can be mapped into the PMU (irq, fiqs, and irq+fiqs for
1311 *  example).
1312 */
1313
1314#define KRAIT_EVENT             (1 << 16)
1315#define VENUM_EVENT             (2 << 16)
1316#define KRAIT_EVENT_MASK        (KRAIT_EVENT | VENUM_EVENT)
1317#define PMRESRn_EN              BIT(31)
1318
1319#define EVENT_REGION(event)     (((event) >> 12) & 0xf)         /* R */
1320#define EVENT_GROUP(event)      ((event) & 0xf)                 /* G */
1321#define EVENT_CODE(event)       (((event) >> 4) & 0xff)         /* CC */
1322#define EVENT_VENUM(event)      (!!(event & VENUM_EVENT))       /* N=2 */
1323#define EVENT_CPU(event)        (!!(event & KRAIT_EVENT))       /* N=1 */
1324
1325static u32 krait_read_pmresrn(int n)
1326{
1327        u32 val;
1328
1329        switch (n) {
1330        case 0:
1331                asm volatile("mrc p15, 1, %0, c9, c15, 0" : "=r" (val));
1332                break;
1333        case 1:
1334                asm volatile("mrc p15, 1, %0, c9, c15, 1" : "=r" (val));
1335                break;
1336        case 2:
1337                asm volatile("mrc p15, 1, %0, c9, c15, 2" : "=r" (val));
1338                break;
1339        default:
1340                BUG(); /* Should be validated in krait_pmu_get_event_idx() */
1341        }
1342
1343        return val;
1344}
1345
1346static void krait_write_pmresrn(int n, u32 val)
1347{
1348        switch (n) {
1349        case 0:
1350                asm volatile("mcr p15, 1, %0, c9, c15, 0" : : "r" (val));
1351                break;
1352        case 1:
1353                asm volatile("mcr p15, 1, %0, c9, c15, 1" : : "r" (val));
1354                break;
1355        case 2:
1356                asm volatile("mcr p15, 1, %0, c9, c15, 2" : : "r" (val));
1357                break;
1358        default:
1359                BUG(); /* Should be validated in krait_pmu_get_event_idx() */
1360        }
1361}
1362
1363static u32 venum_read_pmresr(void)
1364{
1365        u32 val;
1366        asm volatile("mrc p10, 7, %0, c11, c0, 0" : "=r" (val));
1367        return val;
1368}
1369
1370static void venum_write_pmresr(u32 val)
1371{
1372        asm volatile("mcr p10, 7, %0, c11, c0, 0" : : "r" (val));
1373}
1374
1375static void venum_pre_pmresr(u32 *venum_orig_val, u32 *fp_orig_val)
1376{
1377        u32 venum_new_val;
1378        u32 fp_new_val;
1379
1380        BUG_ON(preemptible());
1381        /* CPACR Enable CP10 and CP11 access */
1382        *venum_orig_val = get_copro_access();
1383        venum_new_val = *venum_orig_val | CPACC_SVC(10) | CPACC_SVC(11);
1384        set_copro_access(venum_new_val);
1385
1386        /* Enable FPEXC */
1387        *fp_orig_val = fmrx(FPEXC);
1388        fp_new_val = *fp_orig_val | FPEXC_EN;
1389        fmxr(FPEXC, fp_new_val);
1390}
1391
1392static void venum_post_pmresr(u32 venum_orig_val, u32 fp_orig_val)
1393{
1394        BUG_ON(preemptible());
1395        /* Restore FPEXC */
1396        fmxr(FPEXC, fp_orig_val);
1397        isb();
1398        /* Restore CPACR */
1399        set_copro_access(venum_orig_val);
1400}
1401
1402static u32 krait_get_pmresrn_event(unsigned int region)
1403{
1404        static const u32 pmresrn_table[] = { KRAIT_PMRESR0_GROUP0,
1405                                             KRAIT_PMRESR1_GROUP0,
1406                                             KRAIT_PMRESR2_GROUP0 };
1407        return pmresrn_table[region];
1408}
1409
1410static void krait_evt_setup(int idx, u32 config_base)
1411{
1412        u32 val;
1413        u32 mask;
1414        u32 vval, fval;
1415        unsigned int region = EVENT_REGION(config_base);
1416        unsigned int group = EVENT_GROUP(config_base);
1417        unsigned int code = EVENT_CODE(config_base);
1418        unsigned int group_shift;
1419        bool venum_event = EVENT_VENUM(config_base);
1420
1421        group_shift = group * 8;
1422        mask = 0xff << group_shift;
1423
1424        /* Configure evtsel for the region and group */
1425        if (venum_event)
1426                val = KRAIT_VPMRESR0_GROUP0;
1427        else
1428                val = krait_get_pmresrn_event(region);
1429        val += group;
1430        /* Mix in mode-exclusion bits */
1431        val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1);
1432        armv7_pmnc_write_evtsel(idx, val);
1433
1434        if (venum_event) {
1435                venum_pre_pmresr(&vval, &fval);
1436                val = venum_read_pmresr();
1437                val &= ~mask;
1438                val |= code << group_shift;
1439                val |= PMRESRn_EN;
1440                venum_write_pmresr(val);
1441                venum_post_pmresr(vval, fval);
1442        } else {
1443                val = krait_read_pmresrn(region);
1444                val &= ~mask;
1445                val |= code << group_shift;
1446                val |= PMRESRn_EN;
1447                krait_write_pmresrn(region, val);
1448        }
1449}
1450
1451static u32 clear_pmresrn_group(u32 val, int group)
1452{
1453        u32 mask;
1454        int group_shift;
1455
1456        group_shift = group * 8;
1457        mask = 0xff << group_shift;
1458        val &= ~mask;
1459
1460        /* Don't clear enable bit if entire region isn't disabled */
1461        if (val & ~PMRESRn_EN)
1462                return val |= PMRESRn_EN;
1463
1464        return 0;
1465}
1466
1467static void krait_clearpmu(u32 config_base)
1468{
1469        u32 val;
1470        u32 vval, fval;
1471        unsigned int region = EVENT_REGION(config_base);
1472        unsigned int group = EVENT_GROUP(config_base);
1473        bool venum_event = EVENT_VENUM(config_base);
1474
1475        if (venum_event) {
1476                venum_pre_pmresr(&vval, &fval);
1477                val = venum_read_pmresr();
1478                val = clear_pmresrn_group(val, group);
1479                venum_write_pmresr(val);
1480                venum_post_pmresr(vval, fval);
1481        } else {
1482                val = krait_read_pmresrn(region);
1483                val = clear_pmresrn_group(val, group);
1484                krait_write_pmresrn(region, val);
1485        }
1486}
1487
1488static void krait_pmu_disable_event(struct perf_event *event)
1489{
1490        unsigned long flags;
1491        struct hw_perf_event *hwc = &event->hw;
1492        int idx = hwc->idx;
1493        struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1494        struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
1495
1496        /* Disable counter and interrupt */
1497        raw_spin_lock_irqsave(&events->pmu_lock, flags);
1498
1499        /* Disable counter */
1500        armv7_pmnc_disable_counter(idx);
1501
1502        /*
1503         * Clear pmresr code (if destined for PMNx counters)
1504         */
1505        if (hwc->config_base & KRAIT_EVENT_MASK)
1506                krait_clearpmu(hwc->config_base);
1507
1508        /* Disable interrupt for this counter */
1509        armv7_pmnc_disable_intens(idx);
1510
1511        raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1512}
1513
1514static void krait_pmu_enable_event(struct perf_event *event)
1515{
1516        unsigned long flags;
1517        struct hw_perf_event *hwc = &event->hw;
1518        int idx = hwc->idx;
1519        struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1520        struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
1521
1522        /*
1523         * Enable counter and interrupt, and set the counter to count
1524         * the event that we're interested in.
1525         */
1526        raw_spin_lock_irqsave(&events->pmu_lock, flags);
1527
1528        /* Disable counter */
1529        armv7_pmnc_disable_counter(idx);
1530
1531        /*
1532         * Set event (if destined for PMNx counters)
1533         * We set the event for the cycle counter because we
1534         * have the ability to perform event filtering.
1535         */
1536        if (hwc->config_base & KRAIT_EVENT_MASK)
1537                krait_evt_setup(idx, hwc->config_base);
1538        else
1539                armv7_pmnc_write_evtsel(idx, hwc->config_base);
1540
1541        /* Enable interrupt for this counter */
1542        armv7_pmnc_enable_intens(idx);
1543
1544        /* Enable counter */
1545        armv7_pmnc_enable_counter(idx);
1546
1547        raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1548}
1549
1550static void krait_pmu_reset(void *info)
1551{
1552        u32 vval, fval;
1553        struct arm_pmu *cpu_pmu = info;
1554        u32 idx, nb_cnt = cpu_pmu->num_events;
1555
1556        armv7pmu_reset(info);
1557
1558        /* Clear all pmresrs */
1559        krait_write_pmresrn(0, 0);
1560        krait_write_pmresrn(1, 0);
1561        krait_write_pmresrn(2, 0);
1562
1563        venum_pre_pmresr(&vval, &fval);
1564        venum_write_pmresr(0);
1565        venum_post_pmresr(vval, fval);
1566
1567        /* Reset PMxEVNCTCR to sane default */
1568        for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
1569                armv7_pmnc_select_counter(idx);
1570                asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
1571        }
1572
1573}
1574
1575static int krait_event_to_bit(struct perf_event *event, unsigned int region,
1576                              unsigned int group)
1577{
1578        int bit;
1579        struct hw_perf_event *hwc = &event->hw;
1580        struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1581
1582        if (hwc->config_base & VENUM_EVENT)
1583                bit = KRAIT_VPMRESR0_GROUP0;
1584        else
1585                bit = krait_get_pmresrn_event(region);
1586        bit -= krait_get_pmresrn_event(0);
1587        bit += group;
1588        /*
1589         * Lower bits are reserved for use by the counters (see
1590         * armv7pmu_get_event_idx() for more info)
1591         */
1592        bit += ARMV7_IDX_COUNTER_LAST(cpu_pmu) + 1;
1593
1594        return bit;
1595}
1596
1597/*
1598 * We check for column exclusion constraints here.
1599 * Two events cant use the same group within a pmresr register.
1600 */
1601static int krait_pmu_get_event_idx(struct pmu_hw_events *cpuc,
1602                                   struct perf_event *event)
1603{
1604        int idx;
1605        int bit = -1;
1606        struct hw_perf_event *hwc = &event->hw;
1607        unsigned int region = EVENT_REGION(hwc->config_base);
1608        unsigned int code = EVENT_CODE(hwc->config_base);
1609        unsigned int group = EVENT_GROUP(hwc->config_base);
1610        bool venum_event = EVENT_VENUM(hwc->config_base);
1611        bool krait_event = EVENT_CPU(hwc->config_base);
1612
1613        if (venum_event || krait_event) {
1614                /* Ignore invalid events */
1615                if (group > 3 || region > 2)
1616                        return -EINVAL;
1617                if (venum_event && (code & 0xe0))
1618                        return -EINVAL;
1619
1620                bit = krait_event_to_bit(event, region, group);
1621                if (test_and_set_bit(bit, cpuc->used_mask))
1622                        return -EAGAIN;
1623        }
1624
1625        idx = armv7pmu_get_event_idx(cpuc, event);
1626        if (idx < 0 && bit >= 0)
1627                clear_bit(bit, cpuc->used_mask);
1628
1629        return idx;
1630}
1631
1632static void krait_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
1633                                      struct perf_event *event)
1634{
1635        int bit;
1636        struct hw_perf_event *hwc = &event->hw;
1637        unsigned int region = EVENT_REGION(hwc->config_base);
1638        unsigned int group = EVENT_GROUP(hwc->config_base);
1639        bool venum_event = EVENT_VENUM(hwc->config_base);
1640        bool krait_event = EVENT_CPU(hwc->config_base);
1641
1642        if (venum_event || krait_event) {
1643                bit = krait_event_to_bit(event, region, group);
1644                clear_bit(bit, cpuc->used_mask);
1645        }
1646}
1647
1648static int krait_pmu_init(struct arm_pmu *cpu_pmu)
1649{
1650        armv7pmu_init(cpu_pmu);
1651        cpu_pmu->name           = "armv7_krait";
1652        /* Some early versions of Krait don't support PC write events */
1653        if (of_property_read_bool(cpu_pmu->plat_device->dev.of_node,
1654                                  "qcom,no-pc-write"))
1655                cpu_pmu->map_event = krait_map_event_no_branch;
1656        else
1657                cpu_pmu->map_event = krait_map_event;
1658        cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1659        cpu_pmu->reset          = krait_pmu_reset;
1660        cpu_pmu->enable         = krait_pmu_enable_event;
1661        cpu_pmu->disable        = krait_pmu_disable_event;
1662        cpu_pmu->get_event_idx  = krait_pmu_get_event_idx;
1663        cpu_pmu->clear_event_idx = krait_pmu_clear_event_idx;
1664        return armv7_probe_num_events(cpu_pmu);
1665}
1666
1667/*
1668 * Scorpion Local Performance Monitor Register (LPMn)
1669 *
1670 *            31   30     24     16     8      0
1671 *            +--------------------------------+
1672 *  LPM0      | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 0
1673 *            +--------------------------------+
1674 *  LPM1      | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 1
1675 *            +--------------------------------+
1676 *  LPM2      | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 2
1677 *            +--------------------------------+
1678 *  L2LPM     | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 3
1679 *            +--------------------------------+
1680 *  VLPM      | EN |  CC  |  CC  |  CC  |  CC  |   N = 2, R = ?
1681 *            +--------------------------------+
1682 *              EN | G=3  | G=2  | G=1  | G=0
1683 *
1684 *
1685 *  Event Encoding:
1686 *
1687 *      hwc->config_base = 0xNRCCG
1688 *
1689 *      N  = prefix, 1 for Scorpion CPU (LPMn/L2LPM), 2 for Venum VFP (VLPM)
1690 *      R  = region register
1691 *      CC = class of events the group G is choosing from
1692 *      G  = group or particular event
1693 *
1694 *  Example: 0x12021 is a Scorpion CPU event in LPM2's group 1 with code 2
1695 *
1696 *  A region (R) corresponds to a piece of the CPU (execution unit, instruction
1697 *  unit, etc.) while the event code (CC) corresponds to a particular class of
1698 *  events (interrupts for example). An event code is broken down into
1699 *  groups (G) that can be mapped into the PMU (irq, fiqs, and irq+fiqs for
1700 *  example).
1701 */
1702
1703static u32 scorpion_read_pmresrn(int n)
1704{
1705        u32 val;
1706
1707        switch (n) {
1708        case 0:
1709                asm volatile("mrc p15, 0, %0, c15, c0, 0" : "=r" (val));
1710                break;
1711        case 1:
1712                asm volatile("mrc p15, 1, %0, c15, c0, 0" : "=r" (val));
1713                break;
1714        case 2:
1715                asm volatile("mrc p15, 2, %0, c15, c0, 0" : "=r" (val));
1716                break;
1717        case 3:
1718                asm volatile("mrc p15, 3, %0, c15, c2, 0" : "=r" (val));
1719                break;
1720        default:
1721                BUG(); /* Should be validated in scorpion_pmu_get_event_idx() */
1722        }
1723
1724        return val;
1725}
1726
1727static void scorpion_write_pmresrn(int n, u32 val)
1728{
1729        switch (n) {
1730        case 0:
1731                asm volatile("mcr p15, 0, %0, c15, c0, 0" : : "r" (val));
1732                break;
1733        case 1:
1734                asm volatile("mcr p15, 1, %0, c15, c0, 0" : : "r" (val));
1735                break;
1736        case 2:
1737                asm volatile("mcr p15, 2, %0, c15, c0, 0" : : "r" (val));
1738                break;
1739        case 3:
1740                asm volatile("mcr p15, 3, %0, c15, c2, 0" : : "r" (val));
1741                break;
1742        default:
1743                BUG(); /* Should be validated in scorpion_pmu_get_event_idx() */
1744        }
1745}
1746
1747static u32 scorpion_get_pmresrn_event(unsigned int region)
1748{
1749        static const u32 pmresrn_table[] = { SCORPION_LPM0_GROUP0,
1750                                             SCORPION_LPM1_GROUP0,
1751                                             SCORPION_LPM2_GROUP0,
1752                                             SCORPION_L2LPM_GROUP0 };
1753        return pmresrn_table[region];
1754}
1755
1756static void scorpion_evt_setup(int idx, u32 config_base)
1757{
1758        u32 val;
1759        u32 mask;
1760        u32 vval, fval;
1761        unsigned int region = EVENT_REGION(config_base);
1762        unsigned int group = EVENT_GROUP(config_base);
1763        unsigned int code = EVENT_CODE(config_base);
1764        unsigned int group_shift;
1765        bool venum_event = EVENT_VENUM(config_base);
1766
1767        group_shift = group * 8;
1768        mask = 0xff << group_shift;
1769
1770        /* Configure evtsel for the region and group */
1771        if (venum_event)
1772                val = SCORPION_VLPM_GROUP0;
1773        else
1774                val = scorpion_get_pmresrn_event(region);
1775        val += group;
1776        /* Mix in mode-exclusion bits */
1777        val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1);
1778        armv7_pmnc_write_evtsel(idx, val);
1779
1780        asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
1781
1782        if (venum_event) {
1783                venum_pre_pmresr(&vval, &fval);
1784                val = venum_read_pmresr();
1785                val &= ~mask;
1786                val |= code << group_shift;
1787                val |= PMRESRn_EN;
1788                venum_write_pmresr(val);
1789                venum_post_pmresr(vval, fval);
1790        } else {
1791                val = scorpion_read_pmresrn(region);
1792                val &= ~mask;
1793                val |= code << group_shift;
1794                val |= PMRESRn_EN;
1795                scorpion_write_pmresrn(region, val);
1796        }
1797}
1798
1799static void scorpion_clearpmu(u32 config_base)
1800{
1801        u32 val;
1802        u32 vval, fval;
1803        unsigned int region = EVENT_REGION(config_base);
1804        unsigned int group = EVENT_GROUP(config_base);
1805        bool venum_event = EVENT_VENUM(config_base);
1806
1807        if (venum_event) {
1808                venum_pre_pmresr(&vval, &fval);
1809                val = venum_read_pmresr();
1810                val = clear_pmresrn_group(val, group);
1811                venum_write_pmresr(val);
1812                venum_post_pmresr(vval, fval);
1813        } else {
1814                val = scorpion_read_pmresrn(region);
1815                val = clear_pmresrn_group(val, group);
1816                scorpion_write_pmresrn(region, val);
1817        }
1818}
1819
1820static void scorpion_pmu_disable_event(struct perf_event *event)
1821{
1822        unsigned long flags;
1823        struct hw_perf_event *hwc = &event->hw;
1824        int idx = hwc->idx;
1825        struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1826        struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
1827
1828        /* Disable counter and interrupt */
1829        raw_spin_lock_irqsave(&events->pmu_lock, flags);
1830
1831        /* Disable counter */
1832        armv7_pmnc_disable_counter(idx);
1833
1834        /*
1835         * Clear pmresr code (if destined for PMNx counters)
1836         */
1837        if (hwc->config_base & KRAIT_EVENT_MASK)
1838                scorpion_clearpmu(hwc->config_base);
1839
1840        /* Disable interrupt for this counter */
1841        armv7_pmnc_disable_intens(idx);
1842
1843        raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1844}
1845
1846static void scorpion_pmu_enable_event(struct perf_event *event)
1847{
1848        unsigned long flags;
1849        struct hw_perf_event *hwc = &event->hw;
1850        int idx = hwc->idx;
1851        struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1852        struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
1853
1854        /*
1855         * Enable counter and interrupt, and set the counter to count
1856         * the event that we're interested in.
1857         */
1858        raw_spin_lock_irqsave(&events->pmu_lock, flags);
1859
1860        /* Disable counter */
1861        armv7_pmnc_disable_counter(idx);
1862
1863        /*
1864         * Set event (if destined for PMNx counters)
1865         * We don't set the event for the cycle counter because we
1866         * don't have the ability to perform event filtering.
1867         */
1868        if (hwc->config_base & KRAIT_EVENT_MASK)
1869                scorpion_evt_setup(idx, hwc->config_base);
1870        else if (idx != ARMV7_IDX_CYCLE_COUNTER)
1871                armv7_pmnc_write_evtsel(idx, hwc->config_base);
1872
1873        /* Enable interrupt for this counter */
1874        armv7_pmnc_enable_intens(idx);
1875
1876        /* Enable counter */
1877        armv7_pmnc_enable_counter(idx);
1878
1879        raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1880}
1881
1882static void scorpion_pmu_reset(void *info)
1883{
1884        u32 vval, fval;
1885        struct arm_pmu *cpu_pmu = info;
1886        u32 idx, nb_cnt = cpu_pmu->num_events;
1887
1888        armv7pmu_reset(info);
1889
1890        /* Clear all pmresrs */
1891        scorpion_write_pmresrn(0, 0);
1892        scorpion_write_pmresrn(1, 0);
1893        scorpion_write_pmresrn(2, 0);
1894        scorpion_write_pmresrn(3, 0);
1895
1896        venum_pre_pmresr(&vval, &fval);
1897        venum_write_pmresr(0);
1898        venum_post_pmresr(vval, fval);
1899
1900        /* Reset PMxEVNCTCR to sane default */
1901        for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
1902                armv7_pmnc_select_counter(idx);
1903                asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
1904        }
1905}
1906
1907static int scorpion_event_to_bit(struct perf_event *event, unsigned int region,
1908                              unsigned int group)
1909{
1910        int bit;
1911        struct hw_perf_event *hwc = &event->hw;
1912        struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1913
1914        if (hwc->config_base & VENUM_EVENT)
1915                bit = SCORPION_VLPM_GROUP0;
1916        else
1917                bit = scorpion_get_pmresrn_event(region);
1918        bit -= scorpion_get_pmresrn_event(0);
1919        bit += group;
1920        /*
1921         * Lower bits are reserved for use by the counters (see
1922         * armv7pmu_get_event_idx() for more info)
1923         */
1924        bit += ARMV7_IDX_COUNTER_LAST(cpu_pmu) + 1;
1925
1926        return bit;
1927}
1928
1929/*
1930 * We check for column exclusion constraints here.
1931 * Two events cant use the same group within a pmresr register.
1932 */
1933static int scorpion_pmu_get_event_idx(struct pmu_hw_events *cpuc,
1934                                   struct perf_event *event)
1935{
1936        int idx;
1937        int bit = -1;
1938        struct hw_perf_event *hwc = &event->hw;
1939        unsigned int region = EVENT_REGION(hwc->config_base);
1940        unsigned int group = EVENT_GROUP(hwc->config_base);
1941        bool venum_event = EVENT_VENUM(hwc->config_base);
1942        bool scorpion_event = EVENT_CPU(hwc->config_base);
1943
1944        if (venum_event || scorpion_event) {
1945                /* Ignore invalid events */
1946                if (group > 3 || region > 3)
1947                        return -EINVAL;
1948
1949                bit = scorpion_event_to_bit(event, region, group);
1950                if (test_and_set_bit(bit, cpuc->used_mask))
1951                        return -EAGAIN;
1952        }
1953
1954        idx = armv7pmu_get_event_idx(cpuc, event);
1955        if (idx < 0 && bit >= 0)
1956                clear_bit(bit, cpuc->used_mask);
1957
1958        return idx;
1959}
1960
1961static void scorpion_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
1962                                      struct perf_event *event)
1963{
1964        int bit;
1965        struct hw_perf_event *hwc = &event->hw;
1966        unsigned int region = EVENT_REGION(hwc->config_base);
1967        unsigned int group = EVENT_GROUP(hwc->config_base);
1968        bool venum_event = EVENT_VENUM(hwc->config_base);
1969        bool scorpion_event = EVENT_CPU(hwc->config_base);
1970
1971        if (venum_event || scorpion_event) {
1972                bit = scorpion_event_to_bit(event, region, group);
1973                clear_bit(bit, cpuc->used_mask);
1974        }
1975}
1976
1977static int scorpion_pmu_init(struct arm_pmu *cpu_pmu)
1978{
1979        armv7pmu_init(cpu_pmu);
1980        cpu_pmu->name           = "armv7_scorpion";
1981        cpu_pmu->map_event      = scorpion_map_event;
1982        cpu_pmu->reset          = scorpion_pmu_reset;
1983        cpu_pmu->enable         = scorpion_pmu_enable_event;
1984        cpu_pmu->disable        = scorpion_pmu_disable_event;
1985        cpu_pmu->get_event_idx  = scorpion_pmu_get_event_idx;
1986        cpu_pmu->clear_event_idx = scorpion_pmu_clear_event_idx;
1987        return armv7_probe_num_events(cpu_pmu);
1988}
1989
1990static int scorpion_mp_pmu_init(struct arm_pmu *cpu_pmu)
1991{
1992        armv7pmu_init(cpu_pmu);
1993        cpu_pmu->name           = "armv7_scorpion_mp";
1994        cpu_pmu->map_event      = scorpion_map_event;
1995        cpu_pmu->reset          = scorpion_pmu_reset;
1996        cpu_pmu->enable         = scorpion_pmu_enable_event;
1997        cpu_pmu->disable        = scorpion_pmu_disable_event;
1998        cpu_pmu->get_event_idx  = scorpion_pmu_get_event_idx;
1999        cpu_pmu->clear_event_idx = scorpion_pmu_clear_event_idx;
2000        return armv7_probe_num_events(cpu_pmu);
2001}
2002
2003static const struct of_device_id armv7_pmu_of_device_ids[] = {
2004        {.compatible = "arm,cortex-a17-pmu",    .data = armv7_a17_pmu_init},
2005        {.compatible = "arm,cortex-a15-pmu",    .data = armv7_a15_pmu_init},
2006        {.compatible = "arm,cortex-a12-pmu",    .data = armv7_a12_pmu_init},
2007        {.compatible = "arm,cortex-a9-pmu",     .data = armv7_a9_pmu_init},
2008        {.compatible = "arm,cortex-a8-pmu",     .data = armv7_a8_pmu_init},
2009        {.compatible = "arm,cortex-a7-pmu",     .data = armv7_a7_pmu_init},
2010        {.compatible = "arm,cortex-a5-pmu",     .data = armv7_a5_pmu_init},
2011        {.compatible = "qcom,krait-pmu",        .data = krait_pmu_init},
2012        {.compatible = "qcom,scorpion-pmu",     .data = scorpion_pmu_init},
2013        {.compatible = "qcom,scorpion-mp-pmu",  .data = scorpion_mp_pmu_init},
2014        {},
2015};
2016
2017static const struct pmu_probe_info armv7_pmu_probe_table[] = {
2018        ARM_PMU_PROBE(ARM_CPU_PART_CORTEX_A8, armv7_a8_pmu_init),
2019        ARM_PMU_PROBE(ARM_CPU_PART_CORTEX_A9, armv7_a9_pmu_init),
2020        { /* sentinel value */ }
2021};
2022
2023
2024static int armv7_pmu_device_probe(struct platform_device *pdev)
2025{
2026        return arm_pmu_device_probe(pdev, armv7_pmu_of_device_ids,
2027                                    armv7_pmu_probe_table);
2028}
2029
2030static struct platform_driver armv7_pmu_driver = {
2031        .driver         = {
2032                .name   = "armv7-pmu",
2033                .of_match_table = armv7_pmu_of_device_ids,
2034        },
2035        .probe          = armv7_pmu_device_probe,
2036};
2037
2038builtin_platform_driver(armv7_pmu_driver);
2039#endif  /* CONFIG_CPU_V7 */
2040