linux/arch/arm/kernel/perf_event_xscale.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * ARMv5 [xscale] Performance counter handling code.
   4 *
   5 * Copyright (C) 2010, ARM Ltd., Will Deacon <will.deacon@arm.com>
   6 *
   7 * Based on the previous xscale OProfile code.
   8 *
   9 * There are two variants of the xscale PMU that we support:
  10 *      - xscale1pmu: 2 event counters and a cycle counter
  11 *      - xscale2pmu: 4 event counters and a cycle counter
  12 * The two variants share event definitions, but have different
  13 * PMU structures.
  14 */
  15
  16#ifdef CONFIG_CPU_XSCALE
  17
  18#include <asm/cputype.h>
  19#include <asm/irq_regs.h>
  20
  21#include <linux/of.h>
  22#include <linux/perf/arm_pmu.h>
  23#include <linux/platform_device.h>
  24
  25enum xscale_perf_types {
  26        XSCALE_PERFCTR_ICACHE_MISS              = 0x00,
  27        XSCALE_PERFCTR_ICACHE_NO_DELIVER        = 0x01,
  28        XSCALE_PERFCTR_DATA_STALL               = 0x02,
  29        XSCALE_PERFCTR_ITLB_MISS                = 0x03,
  30        XSCALE_PERFCTR_DTLB_MISS                = 0x04,
  31        XSCALE_PERFCTR_BRANCH                   = 0x05,
  32        XSCALE_PERFCTR_BRANCH_MISS              = 0x06,
  33        XSCALE_PERFCTR_INSTRUCTION              = 0x07,
  34        XSCALE_PERFCTR_DCACHE_FULL_STALL        = 0x08,
  35        XSCALE_PERFCTR_DCACHE_FULL_STALL_CONTIG = 0x09,
  36        XSCALE_PERFCTR_DCACHE_ACCESS            = 0x0A,
  37        XSCALE_PERFCTR_DCACHE_MISS              = 0x0B,
  38        XSCALE_PERFCTR_DCACHE_WRITE_BACK        = 0x0C,
  39        XSCALE_PERFCTR_PC_CHANGED               = 0x0D,
  40        XSCALE_PERFCTR_BCU_REQUEST              = 0x10,
  41        XSCALE_PERFCTR_BCU_FULL                 = 0x11,
  42        XSCALE_PERFCTR_BCU_DRAIN                = 0x12,
  43        XSCALE_PERFCTR_BCU_ECC_NO_ELOG          = 0x14,
  44        XSCALE_PERFCTR_BCU_1_BIT_ERR            = 0x15,
  45        XSCALE_PERFCTR_RMW                      = 0x16,
  46        /* XSCALE_PERFCTR_CCNT is not hardware defined */
  47        XSCALE_PERFCTR_CCNT                     = 0xFE,
  48        XSCALE_PERFCTR_UNUSED                   = 0xFF,
  49};
  50
  51enum xscale_counters {
  52        XSCALE_CYCLE_COUNTER    = 0,
  53        XSCALE_COUNTER0,
  54        XSCALE_COUNTER1,
  55        XSCALE_COUNTER2,
  56        XSCALE_COUNTER3,
  57};
  58
  59static const unsigned xscale_perf_map[PERF_COUNT_HW_MAX] = {
  60        PERF_MAP_ALL_UNSUPPORTED,
  61        [PERF_COUNT_HW_CPU_CYCLES]              = XSCALE_PERFCTR_CCNT,
  62        [PERF_COUNT_HW_INSTRUCTIONS]            = XSCALE_PERFCTR_INSTRUCTION,
  63        [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = XSCALE_PERFCTR_BRANCH,
  64        [PERF_COUNT_HW_BRANCH_MISSES]           = XSCALE_PERFCTR_BRANCH_MISS,
  65        [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = XSCALE_PERFCTR_ICACHE_NO_DELIVER,
  66};
  67
  68static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  69                                           [PERF_COUNT_HW_CACHE_OP_MAX]
  70                                           [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  71        PERF_CACHE_MAP_ALL_UNSUPPORTED,
  72
  73        [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]  = XSCALE_PERFCTR_DCACHE_ACCESS,
  74        [C(L1D)][C(OP_READ)][C(RESULT_MISS)]    = XSCALE_PERFCTR_DCACHE_MISS,
  75        [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS,
  76        [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]   = XSCALE_PERFCTR_DCACHE_MISS,
  77
  78        [C(L1I)][C(OP_READ)][C(RESULT_MISS)]    = XSCALE_PERFCTR_ICACHE_MISS,
  79
  80        [C(DTLB)][C(OP_READ)][C(RESULT_MISS)]   = XSCALE_PERFCTR_DTLB_MISS,
  81        [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]  = XSCALE_PERFCTR_DTLB_MISS,
  82
  83        [C(ITLB)][C(OP_READ)][C(RESULT_MISS)]   = XSCALE_PERFCTR_ITLB_MISS,
  84        [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]  = XSCALE_PERFCTR_ITLB_MISS,
  85};
  86
  87#define XSCALE_PMU_ENABLE       0x001
  88#define XSCALE_PMN_RESET        0x002
  89#define XSCALE_CCNT_RESET       0x004
  90#define XSCALE_PMU_RESET        (CCNT_RESET | PMN_RESET)
  91#define XSCALE_PMU_CNT64        0x008
  92
  93#define XSCALE1_OVERFLOWED_MASK 0x700
  94#define XSCALE1_CCOUNT_OVERFLOW 0x400
  95#define XSCALE1_COUNT0_OVERFLOW 0x100
  96#define XSCALE1_COUNT1_OVERFLOW 0x200
  97#define XSCALE1_CCOUNT_INT_EN   0x040
  98#define XSCALE1_COUNT0_INT_EN   0x010
  99#define XSCALE1_COUNT1_INT_EN   0x020
 100#define XSCALE1_COUNT0_EVT_SHFT 12
 101#define XSCALE1_COUNT0_EVT_MASK (0xff << XSCALE1_COUNT0_EVT_SHFT)
 102#define XSCALE1_COUNT1_EVT_SHFT 20
 103#define XSCALE1_COUNT1_EVT_MASK (0xff << XSCALE1_COUNT1_EVT_SHFT)
 104
 105static inline u32
 106xscale1pmu_read_pmnc(void)
 107{
 108        u32 val;
 109        asm volatile("mrc p14, 0, %0, c0, c0, 0" : "=r" (val));
 110        return val;
 111}
 112
 113static inline void
 114xscale1pmu_write_pmnc(u32 val)
 115{
 116        /* upper 4bits and 7, 11 are write-as-0 */
 117        val &= 0xffff77f;
 118        asm volatile("mcr p14, 0, %0, c0, c0, 0" : : "r" (val));
 119}
 120
 121static inline int
 122xscale1_pmnc_counter_has_overflowed(unsigned long pmnc,
 123                                        enum xscale_counters counter)
 124{
 125        int ret = 0;
 126
 127        switch (counter) {
 128        case XSCALE_CYCLE_COUNTER:
 129                ret = pmnc & XSCALE1_CCOUNT_OVERFLOW;
 130                break;
 131        case XSCALE_COUNTER0:
 132                ret = pmnc & XSCALE1_COUNT0_OVERFLOW;
 133                break;
 134        case XSCALE_COUNTER1:
 135                ret = pmnc & XSCALE1_COUNT1_OVERFLOW;
 136                break;
 137        default:
 138                WARN_ONCE(1, "invalid counter number (%d)\n", counter);
 139        }
 140
 141        return ret;
 142}
 143
 144static irqreturn_t
 145xscale1pmu_handle_irq(struct arm_pmu *cpu_pmu)
 146{
 147        unsigned long pmnc;
 148        struct perf_sample_data data;
 149        struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
 150        struct pt_regs *regs;
 151        int idx;
 152
 153        /*
 154         * NOTE: there's an A stepping erratum that states if an overflow
 155         *       bit already exists and another occurs, the previous
 156         *       Overflow bit gets cleared. There's no workaround.
 157         *       Fixed in B stepping or later.
 158         */
 159        pmnc = xscale1pmu_read_pmnc();
 160
 161        /*
 162         * Write the value back to clear the overflow flags. Overflow
 163         * flags remain in pmnc for use below. We also disable the PMU
 164         * while we process the interrupt.
 165         */
 166        xscale1pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE);
 167
 168        if (!(pmnc & XSCALE1_OVERFLOWED_MASK))
 169                return IRQ_NONE;
 170
 171        regs = get_irq_regs();
 172
 173        for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
 174                struct perf_event *event = cpuc->events[idx];
 175                struct hw_perf_event *hwc;
 176
 177                if (!event)
 178                        continue;
 179
 180                if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx))
 181                        continue;
 182
 183                hwc = &event->hw;
 184                armpmu_event_update(event);
 185                perf_sample_data_init(&data, 0, hwc->last_period);
 186                if (!armpmu_event_set_period(event))
 187                        continue;
 188
 189                if (perf_event_overflow(event, &data, regs))
 190                        cpu_pmu->disable(event);
 191        }
 192
 193        irq_work_run();
 194
 195        /*
 196         * Re-enable the PMU.
 197         */
 198        pmnc = xscale1pmu_read_pmnc() | XSCALE_PMU_ENABLE;
 199        xscale1pmu_write_pmnc(pmnc);
 200
 201        return IRQ_HANDLED;
 202}
 203
 204static void xscale1pmu_enable_event(struct perf_event *event)
 205{
 206        unsigned long val, mask, evt, flags;
 207        struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
 208        struct hw_perf_event *hwc = &event->hw;
 209        struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
 210        int idx = hwc->idx;
 211
 212        switch (idx) {
 213        case XSCALE_CYCLE_COUNTER:
 214                mask = 0;
 215                evt = XSCALE1_CCOUNT_INT_EN;
 216                break;
 217        case XSCALE_COUNTER0:
 218                mask = XSCALE1_COUNT0_EVT_MASK;
 219                evt = (hwc->config_base << XSCALE1_COUNT0_EVT_SHFT) |
 220                        XSCALE1_COUNT0_INT_EN;
 221                break;
 222        case XSCALE_COUNTER1:
 223                mask = XSCALE1_COUNT1_EVT_MASK;
 224                evt = (hwc->config_base << XSCALE1_COUNT1_EVT_SHFT) |
 225                        XSCALE1_COUNT1_INT_EN;
 226                break;
 227        default:
 228                WARN_ONCE(1, "invalid counter number (%d)\n", idx);
 229                return;
 230        }
 231
 232        raw_spin_lock_irqsave(&events->pmu_lock, flags);
 233        val = xscale1pmu_read_pmnc();
 234        val &= ~mask;
 235        val |= evt;
 236        xscale1pmu_write_pmnc(val);
 237        raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
 238}
 239
 240static void xscale1pmu_disable_event(struct perf_event *event)
 241{
 242        unsigned long val, mask, evt, flags;
 243        struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
 244        struct hw_perf_event *hwc = &event->hw;
 245        struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
 246        int idx = hwc->idx;
 247
 248        switch (idx) {
 249        case XSCALE_CYCLE_COUNTER:
 250                mask = XSCALE1_CCOUNT_INT_EN;
 251                evt = 0;
 252                break;
 253        case XSCALE_COUNTER0:
 254                mask = XSCALE1_COUNT0_INT_EN | XSCALE1_COUNT0_EVT_MASK;
 255                evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT0_EVT_SHFT;
 256                break;
 257        case XSCALE_COUNTER1:
 258                mask = XSCALE1_COUNT1_INT_EN | XSCALE1_COUNT1_EVT_MASK;
 259                evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT1_EVT_SHFT;
 260                break;
 261        default:
 262                WARN_ONCE(1, "invalid counter number (%d)\n", idx);
 263                return;
 264        }
 265
 266        raw_spin_lock_irqsave(&events->pmu_lock, flags);
 267        val = xscale1pmu_read_pmnc();
 268        val &= ~mask;
 269        val |= evt;
 270        xscale1pmu_write_pmnc(val);
 271        raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
 272}
 273
 274static int
 275xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc,
 276                                struct perf_event *event)
 277{
 278        struct hw_perf_event *hwc = &event->hw;
 279        if (XSCALE_PERFCTR_CCNT == hwc->config_base) {
 280                if (test_and_set_bit(XSCALE_CYCLE_COUNTER, cpuc->used_mask))
 281                        return -EAGAIN;
 282
 283                return XSCALE_CYCLE_COUNTER;
 284        } else {
 285                if (!test_and_set_bit(XSCALE_COUNTER1, cpuc->used_mask))
 286                        return XSCALE_COUNTER1;
 287
 288                if (!test_and_set_bit(XSCALE_COUNTER0, cpuc->used_mask))
 289                        return XSCALE_COUNTER0;
 290
 291                return -EAGAIN;
 292        }
 293}
 294
 295static void xscalepmu_clear_event_idx(struct pmu_hw_events *cpuc,
 296                                     struct perf_event *event)
 297{
 298        clear_bit(event->hw.idx, cpuc->used_mask);
 299}
 300
 301static void xscale1pmu_start(struct arm_pmu *cpu_pmu)
 302{
 303        unsigned long flags, val;
 304        struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
 305
 306        raw_spin_lock_irqsave(&events->pmu_lock, flags);
 307        val = xscale1pmu_read_pmnc();
 308        val |= XSCALE_PMU_ENABLE;
 309        xscale1pmu_write_pmnc(val);
 310        raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
 311}
 312
 313static void xscale1pmu_stop(struct arm_pmu *cpu_pmu)
 314{
 315        unsigned long flags, val;
 316        struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
 317
 318        raw_spin_lock_irqsave(&events->pmu_lock, flags);
 319        val = xscale1pmu_read_pmnc();
 320        val &= ~XSCALE_PMU_ENABLE;
 321        xscale1pmu_write_pmnc(val);
 322        raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
 323}
 324
 325static inline u64 xscale1pmu_read_counter(struct perf_event *event)
 326{
 327        struct hw_perf_event *hwc = &event->hw;
 328        int counter = hwc->idx;
 329        u32 val = 0;
 330
 331        switch (counter) {
 332        case XSCALE_CYCLE_COUNTER:
 333                asm volatile("mrc p14, 0, %0, c1, c0, 0" : "=r" (val));
 334                break;
 335        case XSCALE_COUNTER0:
 336                asm volatile("mrc p14, 0, %0, c2, c0, 0" : "=r" (val));
 337                break;
 338        case XSCALE_COUNTER1:
 339                asm volatile("mrc p14, 0, %0, c3, c0, 0" : "=r" (val));
 340                break;
 341        }
 342
 343        return val;
 344}
 345
 346static inline void xscale1pmu_write_counter(struct perf_event *event, u64 val)
 347{
 348        struct hw_perf_event *hwc = &event->hw;
 349        int counter = hwc->idx;
 350
 351        switch (counter) {
 352        case XSCALE_CYCLE_COUNTER:
 353                asm volatile("mcr p14, 0, %0, c1, c0, 0" : : "r" (val));
 354                break;
 355        case XSCALE_COUNTER0:
 356                asm volatile("mcr p14, 0, %0, c2, c0, 0" : : "r" (val));
 357                break;
 358        case XSCALE_COUNTER1:
 359                asm volatile("mcr p14, 0, %0, c3, c0, 0" : : "r" (val));
 360                break;
 361        }
 362}
 363
 364static int xscale_map_event(struct perf_event *event)
 365{
 366        return armpmu_map_event(event, &xscale_perf_map,
 367                                &xscale_perf_cache_map, 0xFF);
 368}
 369
 370static int xscale1pmu_init(struct arm_pmu *cpu_pmu)
 371{
 372        cpu_pmu->name           = "armv5_xscale1";
 373        cpu_pmu->handle_irq     = xscale1pmu_handle_irq;
 374        cpu_pmu->enable         = xscale1pmu_enable_event;
 375        cpu_pmu->disable        = xscale1pmu_disable_event;
 376        cpu_pmu->read_counter   = xscale1pmu_read_counter;
 377        cpu_pmu->write_counter  = xscale1pmu_write_counter;
 378        cpu_pmu->get_event_idx  = xscale1pmu_get_event_idx;
 379        cpu_pmu->clear_event_idx = xscalepmu_clear_event_idx;
 380        cpu_pmu->start          = xscale1pmu_start;
 381        cpu_pmu->stop           = xscale1pmu_stop;
 382        cpu_pmu->map_event      = xscale_map_event;
 383        cpu_pmu->num_events     = 3;
 384
 385        return 0;
 386}
 387
 388#define XSCALE2_OVERFLOWED_MASK 0x01f
 389#define XSCALE2_CCOUNT_OVERFLOW 0x001
 390#define XSCALE2_COUNT0_OVERFLOW 0x002
 391#define XSCALE2_COUNT1_OVERFLOW 0x004
 392#define XSCALE2_COUNT2_OVERFLOW 0x008
 393#define XSCALE2_COUNT3_OVERFLOW 0x010
 394#define XSCALE2_CCOUNT_INT_EN   0x001
 395#define XSCALE2_COUNT0_INT_EN   0x002
 396#define XSCALE2_COUNT1_INT_EN   0x004
 397#define XSCALE2_COUNT2_INT_EN   0x008
 398#define XSCALE2_COUNT3_INT_EN   0x010
 399#define XSCALE2_COUNT0_EVT_SHFT 0
 400#define XSCALE2_COUNT0_EVT_MASK (0xff << XSCALE2_COUNT0_EVT_SHFT)
 401#define XSCALE2_COUNT1_EVT_SHFT 8
 402#define XSCALE2_COUNT1_EVT_MASK (0xff << XSCALE2_COUNT1_EVT_SHFT)
 403#define XSCALE2_COUNT2_EVT_SHFT 16
 404#define XSCALE2_COUNT2_EVT_MASK (0xff << XSCALE2_COUNT2_EVT_SHFT)
 405#define XSCALE2_COUNT3_EVT_SHFT 24
 406#define XSCALE2_COUNT3_EVT_MASK (0xff << XSCALE2_COUNT3_EVT_SHFT)
 407
 408static inline u32
 409xscale2pmu_read_pmnc(void)
 410{
 411        u32 val;
 412        asm volatile("mrc p14, 0, %0, c0, c1, 0" : "=r" (val));
 413        /* bits 1-2 and 4-23 are read-unpredictable */
 414        return val & 0xff000009;
 415}
 416
 417static inline void
 418xscale2pmu_write_pmnc(u32 val)
 419{
 420        /* bits 4-23 are write-as-0, 24-31 are write ignored */
 421        val &= 0xf;
 422        asm volatile("mcr p14, 0, %0, c0, c1, 0" : : "r" (val));
 423}
 424
 425static inline u32
 426xscale2pmu_read_overflow_flags(void)
 427{
 428        u32 val;
 429        asm volatile("mrc p14, 0, %0, c5, c1, 0" : "=r" (val));
 430        return val;
 431}
 432
 433static inline void
 434xscale2pmu_write_overflow_flags(u32 val)
 435{
 436        asm volatile("mcr p14, 0, %0, c5, c1, 0" : : "r" (val));
 437}
 438
 439static inline u32
 440xscale2pmu_read_event_select(void)
 441{
 442        u32 val;
 443        asm volatile("mrc p14, 0, %0, c8, c1, 0" : "=r" (val));
 444        return val;
 445}
 446
 447static inline void
 448xscale2pmu_write_event_select(u32 val)
 449{
 450        asm volatile("mcr p14, 0, %0, c8, c1, 0" : : "r"(val));
 451}
 452
 453static inline u32
 454xscale2pmu_read_int_enable(void)
 455{
 456        u32 val;
 457        asm volatile("mrc p14, 0, %0, c4, c1, 0" : "=r" (val));
 458        return val;
 459}
 460
 461static void
 462xscale2pmu_write_int_enable(u32 val)
 463{
 464        asm volatile("mcr p14, 0, %0, c4, c1, 0" : : "r" (val));
 465}
 466
 467static inline int
 468xscale2_pmnc_counter_has_overflowed(unsigned long of_flags,
 469                                        enum xscale_counters counter)
 470{
 471        int ret = 0;
 472
 473        switch (counter) {
 474        case XSCALE_CYCLE_COUNTER:
 475                ret = of_flags & XSCALE2_CCOUNT_OVERFLOW;
 476                break;
 477        case XSCALE_COUNTER0:
 478                ret = of_flags & XSCALE2_COUNT0_OVERFLOW;
 479                break;
 480        case XSCALE_COUNTER1:
 481                ret = of_flags & XSCALE2_COUNT1_OVERFLOW;
 482                break;
 483        case XSCALE_COUNTER2:
 484                ret = of_flags & XSCALE2_COUNT2_OVERFLOW;
 485                break;
 486        case XSCALE_COUNTER3:
 487                ret = of_flags & XSCALE2_COUNT3_OVERFLOW;
 488                break;
 489        default:
 490                WARN_ONCE(1, "invalid counter number (%d)\n", counter);
 491        }
 492
 493        return ret;
 494}
 495
 496static irqreturn_t
 497xscale2pmu_handle_irq(struct arm_pmu *cpu_pmu)
 498{
 499        unsigned long pmnc, of_flags;
 500        struct perf_sample_data data;
 501        struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
 502        struct pt_regs *regs;
 503        int idx;
 504
 505        /* Disable the PMU. */
 506        pmnc = xscale2pmu_read_pmnc();
 507        xscale2pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE);
 508
 509        /* Check the overflow flag register. */
 510        of_flags = xscale2pmu_read_overflow_flags();
 511        if (!(of_flags & XSCALE2_OVERFLOWED_MASK))
 512                return IRQ_NONE;
 513
 514        /* Clear the overflow bits. */
 515        xscale2pmu_write_overflow_flags(of_flags);
 516
 517        regs = get_irq_regs();
 518
 519        for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
 520                struct perf_event *event = cpuc->events[idx];
 521                struct hw_perf_event *hwc;
 522
 523                if (!event)
 524                        continue;
 525
 526                if (!xscale2_pmnc_counter_has_overflowed(of_flags, idx))
 527                        continue;
 528
 529                hwc = &event->hw;
 530                armpmu_event_update(event);
 531                perf_sample_data_init(&data, 0, hwc->last_period);
 532                if (!armpmu_event_set_period(event))
 533                        continue;
 534
 535                if (perf_event_overflow(event, &data, regs))
 536                        cpu_pmu->disable(event);
 537        }
 538
 539        irq_work_run();
 540
 541        /*
 542         * Re-enable the PMU.
 543         */
 544        pmnc = xscale2pmu_read_pmnc() | XSCALE_PMU_ENABLE;
 545        xscale2pmu_write_pmnc(pmnc);
 546
 547        return IRQ_HANDLED;
 548}
 549
 550static void xscale2pmu_enable_event(struct perf_event *event)
 551{
 552        unsigned long flags, ien, evtsel;
 553        struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
 554        struct hw_perf_event *hwc = &event->hw;
 555        struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
 556        int idx = hwc->idx;
 557
 558        ien = xscale2pmu_read_int_enable();
 559        evtsel = xscale2pmu_read_event_select();
 560
 561        switch (idx) {
 562        case XSCALE_CYCLE_COUNTER:
 563                ien |= XSCALE2_CCOUNT_INT_EN;
 564                break;
 565        case XSCALE_COUNTER0:
 566                ien |= XSCALE2_COUNT0_INT_EN;
 567                evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
 568                evtsel |= hwc->config_base << XSCALE2_COUNT0_EVT_SHFT;
 569                break;
 570        case XSCALE_COUNTER1:
 571                ien |= XSCALE2_COUNT1_INT_EN;
 572                evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
 573                evtsel |= hwc->config_base << XSCALE2_COUNT1_EVT_SHFT;
 574                break;
 575        case XSCALE_COUNTER2:
 576                ien |= XSCALE2_COUNT2_INT_EN;
 577                evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
 578                evtsel |= hwc->config_base << XSCALE2_COUNT2_EVT_SHFT;
 579                break;
 580        case XSCALE_COUNTER3:
 581                ien |= XSCALE2_COUNT3_INT_EN;
 582                evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
 583                evtsel |= hwc->config_base << XSCALE2_COUNT3_EVT_SHFT;
 584                break;
 585        default:
 586                WARN_ONCE(1, "invalid counter number (%d)\n", idx);
 587                return;
 588        }
 589
 590        raw_spin_lock_irqsave(&events->pmu_lock, flags);
 591        xscale2pmu_write_event_select(evtsel);
 592        xscale2pmu_write_int_enable(ien);
 593        raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
 594}
 595
 596static void xscale2pmu_disable_event(struct perf_event *event)
 597{
 598        unsigned long flags, ien, evtsel, of_flags;
 599        struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
 600        struct hw_perf_event *hwc = &event->hw;
 601        struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
 602        int idx = hwc->idx;
 603
 604        ien = xscale2pmu_read_int_enable();
 605        evtsel = xscale2pmu_read_event_select();
 606
 607        switch (idx) {
 608        case XSCALE_CYCLE_COUNTER:
 609                ien &= ~XSCALE2_CCOUNT_INT_EN;
 610                of_flags = XSCALE2_CCOUNT_OVERFLOW;
 611                break;
 612        case XSCALE_COUNTER0:
 613                ien &= ~XSCALE2_COUNT0_INT_EN;
 614                evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
 615                evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT;
 616                of_flags = XSCALE2_COUNT0_OVERFLOW;
 617                break;
 618        case XSCALE_COUNTER1:
 619                ien &= ~XSCALE2_COUNT1_INT_EN;
 620                evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
 621                evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT;
 622                of_flags = XSCALE2_COUNT1_OVERFLOW;
 623                break;
 624        case XSCALE_COUNTER2:
 625                ien &= ~XSCALE2_COUNT2_INT_EN;
 626                evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
 627                evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT;
 628                of_flags = XSCALE2_COUNT2_OVERFLOW;
 629                break;
 630        case XSCALE_COUNTER3:
 631                ien &= ~XSCALE2_COUNT3_INT_EN;
 632                evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
 633                evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT;
 634                of_flags = XSCALE2_COUNT3_OVERFLOW;
 635                break;
 636        default:
 637                WARN_ONCE(1, "invalid counter number (%d)\n", idx);
 638                return;
 639        }
 640
 641        raw_spin_lock_irqsave(&events->pmu_lock, flags);
 642        xscale2pmu_write_event_select(evtsel);
 643        xscale2pmu_write_int_enable(ien);
 644        xscale2pmu_write_overflow_flags(of_flags);
 645        raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
 646}
 647
 648static int
 649xscale2pmu_get_event_idx(struct pmu_hw_events *cpuc,
 650                                struct perf_event *event)
 651{
 652        int idx = xscale1pmu_get_event_idx(cpuc, event);
 653        if (idx >= 0)
 654                goto out;
 655
 656        if (!test_and_set_bit(XSCALE_COUNTER3, cpuc->used_mask))
 657                idx = XSCALE_COUNTER3;
 658        else if (!test_and_set_bit(XSCALE_COUNTER2, cpuc->used_mask))
 659                idx = XSCALE_COUNTER2;
 660out:
 661        return idx;
 662}
 663
 664static void xscale2pmu_start(struct arm_pmu *cpu_pmu)
 665{
 666        unsigned long flags, val;
 667        struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
 668
 669        raw_spin_lock_irqsave(&events->pmu_lock, flags);
 670        val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64;
 671        val |= XSCALE_PMU_ENABLE;
 672        xscale2pmu_write_pmnc(val);
 673        raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
 674}
 675
 676static void xscale2pmu_stop(struct arm_pmu *cpu_pmu)
 677{
 678        unsigned long flags, val;
 679        struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
 680
 681        raw_spin_lock_irqsave(&events->pmu_lock, flags);
 682        val = xscale2pmu_read_pmnc();
 683        val &= ~XSCALE_PMU_ENABLE;
 684        xscale2pmu_write_pmnc(val);
 685        raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
 686}
 687
 688static inline u64 xscale2pmu_read_counter(struct perf_event *event)
 689{
 690        struct hw_perf_event *hwc = &event->hw;
 691        int counter = hwc->idx;
 692        u32 val = 0;
 693
 694        switch (counter) {
 695        case XSCALE_CYCLE_COUNTER:
 696                asm volatile("mrc p14, 0, %0, c1, c1, 0" : "=r" (val));
 697                break;
 698        case XSCALE_COUNTER0:
 699                asm volatile("mrc p14, 0, %0, c0, c2, 0" : "=r" (val));
 700                break;
 701        case XSCALE_COUNTER1:
 702                asm volatile("mrc p14, 0, %0, c1, c2, 0" : "=r" (val));
 703                break;
 704        case XSCALE_COUNTER2:
 705                asm volatile("mrc p14, 0, %0, c2, c2, 0" : "=r" (val));
 706                break;
 707        case XSCALE_COUNTER3:
 708                asm volatile("mrc p14, 0, %0, c3, c2, 0" : "=r" (val));
 709                break;
 710        }
 711
 712        return val;
 713}
 714
 715static inline void xscale2pmu_write_counter(struct perf_event *event, u64 val)
 716{
 717        struct hw_perf_event *hwc = &event->hw;
 718        int counter = hwc->idx;
 719
 720        switch (counter) {
 721        case XSCALE_CYCLE_COUNTER:
 722                asm volatile("mcr p14, 0, %0, c1, c1, 0" : : "r" (val));
 723                break;
 724        case XSCALE_COUNTER0:
 725                asm volatile("mcr p14, 0, %0, c0, c2, 0" : : "r" (val));
 726                break;
 727        case XSCALE_COUNTER1:
 728                asm volatile("mcr p14, 0, %0, c1, c2, 0" : : "r" (val));
 729                break;
 730        case XSCALE_COUNTER2:
 731                asm volatile("mcr p14, 0, %0, c2, c2, 0" : : "r" (val));
 732                break;
 733        case XSCALE_COUNTER3:
 734                asm volatile("mcr p14, 0, %0, c3, c2, 0" : : "r" (val));
 735                break;
 736        }
 737}
 738
 739static int xscale2pmu_init(struct arm_pmu *cpu_pmu)
 740{
 741        cpu_pmu->name           = "armv5_xscale2";
 742        cpu_pmu->handle_irq     = xscale2pmu_handle_irq;
 743        cpu_pmu->enable         = xscale2pmu_enable_event;
 744        cpu_pmu->disable        = xscale2pmu_disable_event;
 745        cpu_pmu->read_counter   = xscale2pmu_read_counter;
 746        cpu_pmu->write_counter  = xscale2pmu_write_counter;
 747        cpu_pmu->get_event_idx  = xscale2pmu_get_event_idx;
 748        cpu_pmu->clear_event_idx = xscalepmu_clear_event_idx;
 749        cpu_pmu->start          = xscale2pmu_start;
 750        cpu_pmu->stop           = xscale2pmu_stop;
 751        cpu_pmu->map_event      = xscale_map_event;
 752        cpu_pmu->num_events     = 5;
 753
 754        return 0;
 755}
 756
 757static const struct pmu_probe_info xscale_pmu_probe_table[] = {
 758        XSCALE_PMU_PROBE(ARM_CPU_XSCALE_ARCH_V1, xscale1pmu_init),
 759        XSCALE_PMU_PROBE(ARM_CPU_XSCALE_ARCH_V2, xscale2pmu_init),
 760        { /* sentinel value */ }
 761};
 762
 763static int xscale_pmu_device_probe(struct platform_device *pdev)
 764{
 765        return arm_pmu_device_probe(pdev, NULL, xscale_pmu_probe_table);
 766}
 767
 768static struct platform_driver xscale_pmu_driver = {
 769        .driver         = {
 770                .name   = "xscale-pmu",
 771        },
 772        .probe          = xscale_pmu_device_probe,
 773};
 774
 775builtin_platform_driver(xscale_pmu_driver);
 776#endif  /* CONFIG_CPU_XSCALE */
 777