linux/arch/sparc/kernel/perf_event.c
<<
>>
Prefs
   1/* Performance event support for sparc64.
   2 *
   3 * Copyright (C) 2009, 2010 David S. Miller <davem@davemloft.net>
   4 *
   5 * This code is based almost entirely upon the x86 perf event
   6 * code, which is:
   7 *
   8 *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
   9 *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
  10 *  Copyright (C) 2009 Jaswinder Singh Rajput
  11 *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
  12 *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
  13 */
  14
  15#include <linux/perf_event.h>
  16#include <linux/kprobes.h>
  17#include <linux/ftrace.h>
  18#include <linux/kernel.h>
  19#include <linux/kdebug.h>
  20#include <linux/mutex.h>
  21
  22#include <asm/stacktrace.h>
  23#include <asm/cpudata.h>
  24#include <asm/uaccess.h>
  25#include <asm/atomic.h>
  26#include <asm/nmi.h>
  27#include <asm/pcr.h>
  28
  29#include "kstack.h"
  30
  31/* Sparc64 chips have two performance counters, 32-bits each, with
  32 * overflow interrupts generated on transition from 0xffffffff to 0.
  33 * The counters are accessed in one go using a 64-bit register.
  34 *
  35 * Both counters are controlled using a single control register.  The
  36 * only way to stop all sampling is to clear all of the context (user,
  37 * supervisor, hypervisor) sampling enable bits.  But these bits apply
  38 * to both counters, thus the two counters can't be enabled/disabled
  39 * individually.
  40 *
  41 * The control register has two event fields, one for each of the two
  42 * counters.  It's thus nearly impossible to have one counter going
  43 * while keeping the other one stopped.  Therefore it is possible to
  44 * get overflow interrupts for counters not currently "in use" and
  45 * that condition must be checked in the overflow interrupt handler.
  46 *
  47 * So we use a hack, in that we program inactive counters with the
  48 * "sw_count0" and "sw_count1" events.  These count how many times
  49 * the instruction "sethi %hi(0xfc000), %g0" is executed.  It's an
  50 * unusual way to encode a NOP and therefore will not trigger in
  51 * normal code.
  52 */
  53
  54#define MAX_HWEVENTS                    2
  55#define MAX_PERIOD                      ((1UL << 32) - 1)
  56
  57#define PIC_UPPER_INDEX                 0
  58#define PIC_LOWER_INDEX                 1
  59#define PIC_NO_INDEX                    -1
  60
  61struct cpu_hw_events {
  62        /* Number of events currently scheduled onto this cpu.
  63         * This tells how many entries in the arrays below
  64         * are valid.
  65         */
  66        int                     n_events;
  67
  68        /* Number of new events added since the last hw_perf_disable().
  69         * This works because the perf event layer always adds new
  70         * events inside of a perf_{disable,enable}() sequence.
  71         */
  72        int                     n_added;
  73
  74        /* Array of events current scheduled on this cpu.  */
  75        struct perf_event       *event[MAX_HWEVENTS];
  76
  77        /* Array of encoded longs, specifying the %pcr register
  78         * encoding and the mask of PIC counters this even can
  79         * be scheduled on.  See perf_event_encode() et al.
  80         */
  81        unsigned long           events[MAX_HWEVENTS];
  82
  83        /* The current counter index assigned to an event.  When the
  84         * event hasn't been programmed into the cpu yet, this will
  85         * hold PIC_NO_INDEX.  The event->hw.idx value tells us where
  86         * we ought to schedule the event.
  87         */
  88        int                     current_idx[MAX_HWEVENTS];
  89
  90        /* Software copy of %pcr register on this cpu.  */
  91        u64                     pcr;
  92
  93        /* Enabled/disable state.  */
  94        int                     enabled;
  95
  96        unsigned int            group_flag;
  97};
  98DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
  99
 100/* An event map describes the characteristics of a performance
 101 * counter event.  In particular it gives the encoding as well as
 102 * a mask telling which counters the event can be measured on.
 103 */
 104struct perf_event_map {
 105        u16     encoding;
 106        u8      pic_mask;
 107#define PIC_NONE        0x00
 108#define PIC_UPPER       0x01
 109#define PIC_LOWER       0x02
 110};
 111
 112/* Encode a perf_event_map entry into a long.  */
 113static unsigned long perf_event_encode(const struct perf_event_map *pmap)
 114{
 115        return ((unsigned long) pmap->encoding << 16) | pmap->pic_mask;
 116}
 117
 118static u8 perf_event_get_msk(unsigned long val)
 119{
 120        return val & 0xff;
 121}
 122
 123static u64 perf_event_get_enc(unsigned long val)
 124{
 125        return val >> 16;
 126}
 127
 128#define C(x) PERF_COUNT_HW_CACHE_##x
 129
 130#define CACHE_OP_UNSUPPORTED    0xfffe
 131#define CACHE_OP_NONSENSE       0xffff
 132
 133typedef struct perf_event_map cache_map_t
 134                                [PERF_COUNT_HW_CACHE_MAX]
 135                                [PERF_COUNT_HW_CACHE_OP_MAX]
 136                                [PERF_COUNT_HW_CACHE_RESULT_MAX];
 137
 138struct sparc_pmu {
 139        const struct perf_event_map     *(*event_map)(int);
 140        const cache_map_t               *cache_map;
 141        int                             max_events;
 142        int                             upper_shift;
 143        int                             lower_shift;
 144        int                             event_mask;
 145        int                             hv_bit;
 146        int                             irq_bit;
 147        int                             upper_nop;
 148        int                             lower_nop;
 149};
 150
 151static const struct perf_event_map ultra3_perfmon_event_map[] = {
 152        [PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER },
 153        [PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER },
 154        [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0009, PIC_LOWER },
 155        [PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER },
 156};
 157
 158static const struct perf_event_map *ultra3_event_map(int event_id)
 159{
 160        return &ultra3_perfmon_event_map[event_id];
 161}
 162
 163static const cache_map_t ultra3_cache_map = {
 164[C(L1D)] = {
 165        [C(OP_READ)] = {
 166                [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, },
 167                [C(RESULT_MISS)] = { 0x09, PIC_UPPER, },
 168        },
 169        [C(OP_WRITE)] = {
 170                [C(RESULT_ACCESS)] = { 0x0a, PIC_LOWER },
 171                [C(RESULT_MISS)] = { 0x0a, PIC_UPPER },
 172        },
 173        [C(OP_PREFETCH)] = {
 174                [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
 175                [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
 176        },
 177},
 178[C(L1I)] = {
 179        [C(OP_READ)] = {
 180                [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, },
 181                [C(RESULT_MISS)] = { 0x09, PIC_UPPER, },
 182        },
 183        [ C(OP_WRITE) ] = {
 184                [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
 185                [ C(RESULT_MISS)   ] = { CACHE_OP_NONSENSE },
 186        },
 187        [ C(OP_PREFETCH) ] = {
 188                [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
 189                [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
 190        },
 191},
 192[C(LL)] = {
 193        [C(OP_READ)] = {
 194                [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER, },
 195                [C(RESULT_MISS)] = { 0x0c, PIC_UPPER, },
 196        },
 197        [C(OP_WRITE)] = {
 198                [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER },
 199                [C(RESULT_MISS)] = { 0x0c, PIC_UPPER },
 200        },
 201        [C(OP_PREFETCH)] = {
 202                [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
 203                [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
 204        },
 205},
 206[C(DTLB)] = {
 207        [C(OP_READ)] = {
 208                [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
 209                [C(RESULT_MISS)] = { 0x12, PIC_UPPER, },
 210        },
 211        [ C(OP_WRITE) ] = {
 212                [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
 213                [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
 214        },
 215        [ C(OP_PREFETCH) ] = {
 216                [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
 217                [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
 218        },
 219},
 220[C(ITLB)] = {
 221        [C(OP_READ)] = {
 222                [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
 223                [C(RESULT_MISS)] = { 0x11, PIC_UPPER, },
 224        },
 225        [ C(OP_WRITE) ] = {
 226                [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
 227                [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
 228        },
 229        [ C(OP_PREFETCH) ] = {
 230                [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
 231                [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
 232        },
 233},
 234[C(BPU)] = {
 235        [C(OP_READ)] = {
 236                [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
 237                [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
 238        },
 239        [ C(OP_WRITE) ] = {
 240                [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
 241                [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
 242        },
 243        [ C(OP_PREFETCH) ] = {
 244                [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
 245                [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
 246        },
 247},
 248};
 249
 250static const struct sparc_pmu ultra3_pmu = {
 251        .event_map      = ultra3_event_map,
 252        .cache_map      = &ultra3_cache_map,
 253        .max_events     = ARRAY_SIZE(ultra3_perfmon_event_map),
 254        .upper_shift    = 11,
 255        .lower_shift    = 4,
 256        .event_mask     = 0x3f,
 257        .upper_nop      = 0x1c,
 258        .lower_nop      = 0x14,
 259};
 260
 261/* Niagara1 is very limited.  The upper PIC is hard-locked to count
 262 * only instructions, so it is free running which creates all kinds of
 263 * problems.  Some hardware designs make one wonder if the creator
 264 * even looked at how this stuff gets used by software.
 265 */
 266static const struct perf_event_map niagara1_perfmon_event_map[] = {
 267        [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, PIC_UPPER },
 268        [PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, PIC_UPPER },
 269        [PERF_COUNT_HW_CACHE_REFERENCES] = { 0, PIC_NONE },
 270        [PERF_COUNT_HW_CACHE_MISSES] = { 0x03, PIC_LOWER },
 271};
 272
 273static const struct perf_event_map *niagara1_event_map(int event_id)
 274{
 275        return &niagara1_perfmon_event_map[event_id];
 276}
 277
 278static const cache_map_t niagara1_cache_map = {
 279[C(L1D)] = {
 280        [C(OP_READ)] = {
 281                [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
 282                [C(RESULT_MISS)] = { 0x03, PIC_LOWER, },
 283        },
 284        [C(OP_WRITE)] = {
 285                [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
 286                [C(RESULT_MISS)] = { 0x03, PIC_LOWER, },
 287        },
 288        [C(OP_PREFETCH)] = {
 289                [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
 290                [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
 291        },
 292},
 293[C(L1I)] = {
 294        [C(OP_READ)] = {
 295                [C(RESULT_ACCESS)] = { 0x00, PIC_UPPER },
 296                [C(RESULT_MISS)] = { 0x02, PIC_LOWER, },
 297        },
 298        [ C(OP_WRITE) ] = {
 299                [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
 300                [ C(RESULT_MISS)   ] = { CACHE_OP_NONSENSE },
 301        },
 302        [ C(OP_PREFETCH) ] = {
 303                [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
 304                [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
 305        },
 306},
 307[C(LL)] = {
 308        [C(OP_READ)] = {
 309                [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
 310                [C(RESULT_MISS)] = { 0x07, PIC_LOWER, },
 311        },
 312        [C(OP_WRITE)] = {
 313                [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
 314                [C(RESULT_MISS)] = { 0x07, PIC_LOWER, },
 315        },
 316        [C(OP_PREFETCH)] = {
 317                [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
 318                [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
 319        },
 320},
 321[C(DTLB)] = {
 322        [C(OP_READ)] = {
 323                [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
 324                [C(RESULT_MISS)] = { 0x05, PIC_LOWER, },
 325        },
 326        [ C(OP_WRITE) ] = {
 327                [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
 328                [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
 329        },
 330        [ C(OP_PREFETCH) ] = {
 331                [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
 332                [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
 333        },
 334},
 335[C(ITLB)] = {
 336        [C(OP_READ)] = {
 337                [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
 338                [C(RESULT_MISS)] = { 0x04, PIC_LOWER, },
 339        },
 340        [ C(OP_WRITE) ] = {
 341                [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
 342                [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
 343        },
 344        [ C(OP_PREFETCH) ] = {
 345                [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
 346                [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
 347        },
 348},
 349[C(BPU)] = {
 350        [C(OP_READ)] = {
 351                [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
 352                [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
 353        },
 354        [ C(OP_WRITE) ] = {
 355                [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
 356                [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
 357        },
 358        [ C(OP_PREFETCH) ] = {
 359                [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
 360                [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
 361        },
 362},
 363};
 364
 365static const struct sparc_pmu niagara1_pmu = {
 366        .event_map      = niagara1_event_map,
 367        .cache_map      = &niagara1_cache_map,
 368        .max_events     = ARRAY_SIZE(niagara1_perfmon_event_map),
 369        .upper_shift    = 0,
 370        .lower_shift    = 4,
 371        .event_mask     = 0x7,
 372        .upper_nop      = 0x0,
 373        .lower_nop      = 0x0,
 374};
 375
 376static const struct perf_event_map niagara2_perfmon_event_map[] = {
 377        [PERF_COUNT_HW_CPU_CYCLES] = { 0x02ff, PIC_UPPER | PIC_LOWER },
 378        [PERF_COUNT_HW_INSTRUCTIONS] = { 0x02ff, PIC_UPPER | PIC_LOWER },
 379        [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0208, PIC_UPPER | PIC_LOWER },
 380        [PERF_COUNT_HW_CACHE_MISSES] = { 0x0302, PIC_UPPER | PIC_LOWER },
 381        [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x0201, PIC_UPPER | PIC_LOWER },
 382        [PERF_COUNT_HW_BRANCH_MISSES] = { 0x0202, PIC_UPPER | PIC_LOWER },
 383};
 384
 385static const struct perf_event_map *niagara2_event_map(int event_id)
 386{
 387        return &niagara2_perfmon_event_map[event_id];
 388}
 389
 390static const cache_map_t niagara2_cache_map = {
 391[C(L1D)] = {
 392        [C(OP_READ)] = {
 393                [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, },
 394                [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, },
 395        },
 396        [C(OP_WRITE)] = {
 397                [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, },
 398                [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, },
 399        },
 400        [C(OP_PREFETCH)] = {
 401                [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
 402                [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
 403        },
 404},
 405[C(L1I)] = {
 406        [C(OP_READ)] = {
 407                [C(RESULT_ACCESS)] = { 0x02ff, PIC_UPPER | PIC_LOWER, },
 408                [C(RESULT_MISS)] = { 0x0301, PIC_UPPER | PIC_LOWER, },
 409        },
 410        [ C(OP_WRITE) ] = {
 411                [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
 412                [ C(RESULT_MISS)   ] = { CACHE_OP_NONSENSE },
 413        },
 414        [ C(OP_PREFETCH) ] = {
 415                [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
 416                [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
 417        },
 418},
 419[C(LL)] = {
 420        [C(OP_READ)] = {
 421                [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, },
 422                [C(RESULT_MISS)] = { 0x0330, PIC_UPPER | PIC_LOWER, },
 423        },
 424        [C(OP_WRITE)] = {
 425                [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, },
 426                [C(RESULT_MISS)] = { 0x0320, PIC_UPPER | PIC_LOWER, },
 427        },
 428        [C(OP_PREFETCH)] = {
 429                [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
 430                [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
 431        },
 432},
 433[C(DTLB)] = {
 434        [C(OP_READ)] = {
 435                [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
 436                [C(RESULT_MISS)] = { 0x0b08, PIC_UPPER | PIC_LOWER, },
 437        },
 438        [ C(OP_WRITE) ] = {
 439                [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
 440                [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
 441        },
 442        [ C(OP_PREFETCH) ] = {
 443                [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
 444                [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
 445        },
 446},
 447[C(ITLB)] = {
 448        [C(OP_READ)] = {
 449                [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
 450                [C(RESULT_MISS)] = { 0xb04, PIC_UPPER | PIC_LOWER, },
 451        },
 452        [ C(OP_WRITE) ] = {
 453                [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
 454                [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
 455        },
 456        [ C(OP_PREFETCH) ] = {
 457                [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
 458                [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
 459        },
 460},
 461[C(BPU)] = {
 462        [C(OP_READ)] = {
 463                [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
 464                [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
 465        },
 466        [ C(OP_WRITE) ] = {
 467                [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
 468                [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
 469        },
 470        [ C(OP_PREFETCH) ] = {
 471                [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
 472                [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
 473        },
 474},
 475};
 476
 477static const struct sparc_pmu niagara2_pmu = {
 478        .event_map      = niagara2_event_map,
 479        .cache_map      = &niagara2_cache_map,
 480        .max_events     = ARRAY_SIZE(niagara2_perfmon_event_map),
 481        .upper_shift    = 19,
 482        .lower_shift    = 6,
 483        .event_mask     = 0xfff,
 484        .hv_bit         = 0x8,
 485        .irq_bit        = 0x30,
 486        .upper_nop      = 0x220,
 487        .lower_nop      = 0x220,
 488};
 489
 490static const struct sparc_pmu *sparc_pmu __read_mostly;
 491
 492static u64 event_encoding(u64 event_id, int idx)
 493{
 494        if (idx == PIC_UPPER_INDEX)
 495                event_id <<= sparc_pmu->upper_shift;
 496        else
 497                event_id <<= sparc_pmu->lower_shift;
 498        return event_id;
 499}
 500
 501static u64 mask_for_index(int idx)
 502{
 503        return event_encoding(sparc_pmu->event_mask, idx);
 504}
 505
 506static u64 nop_for_index(int idx)
 507{
 508        return event_encoding(idx == PIC_UPPER_INDEX ?
 509                              sparc_pmu->upper_nop :
 510                              sparc_pmu->lower_nop, idx);
 511}
 512
 513static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
 514{
 515        u64 val, mask = mask_for_index(idx);
 516
 517        val = cpuc->pcr;
 518        val &= ~mask;
 519        val |= hwc->config;
 520        cpuc->pcr = val;
 521
 522        pcr_ops->write(cpuc->pcr);
 523}
 524
 525static inline void sparc_pmu_disable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
 526{
 527        u64 mask = mask_for_index(idx);
 528        u64 nop = nop_for_index(idx);
 529        u64 val;
 530
 531        val = cpuc->pcr;
 532        val &= ~mask;
 533        val |= nop;
 534        cpuc->pcr = val;
 535
 536        pcr_ops->write(cpuc->pcr);
 537}
 538
 539static u32 read_pmc(int idx)
 540{
 541        u64 val;
 542
 543        read_pic(val);
 544        if (idx == PIC_UPPER_INDEX)
 545                val >>= 32;
 546
 547        return val & 0xffffffff;
 548}
 549
 550static void write_pmc(int idx, u64 val)
 551{
 552        u64 shift, mask, pic;
 553
 554        shift = 0;
 555        if (idx == PIC_UPPER_INDEX)
 556                shift = 32;
 557
 558        mask = ((u64) 0xffffffff) << shift;
 559        val <<= shift;
 560
 561        read_pic(pic);
 562        pic &= ~mask;
 563        pic |= val;
 564        write_pic(pic);
 565}
 566
 567static u64 sparc_perf_event_update(struct perf_event *event,
 568                                   struct hw_perf_event *hwc, int idx)
 569{
 570        int shift = 64 - 32;
 571        u64 prev_raw_count, new_raw_count;
 572        s64 delta;
 573
 574again:
 575        prev_raw_count = local64_read(&hwc->prev_count);
 576        new_raw_count = read_pmc(idx);
 577
 578        if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
 579                             new_raw_count) != prev_raw_count)
 580                goto again;
 581
 582        delta = (new_raw_count << shift) - (prev_raw_count << shift);
 583        delta >>= shift;
 584
 585        local64_add(delta, &event->count);
 586        local64_sub(delta, &hwc->period_left);
 587
 588        return new_raw_count;
 589}
 590
 591static int sparc_perf_event_set_period(struct perf_event *event,
 592                                       struct hw_perf_event *hwc, int idx)
 593{
 594        s64 left = local64_read(&hwc->period_left);
 595        s64 period = hwc->sample_period;
 596        int ret = 0;
 597
 598        if (unlikely(left <= -period)) {
 599                left = period;
 600                local64_set(&hwc->period_left, left);
 601                hwc->last_period = period;
 602                ret = 1;
 603        }
 604
 605        if (unlikely(left <= 0)) {
 606                left += period;
 607                local64_set(&hwc->period_left, left);
 608                hwc->last_period = period;
 609                ret = 1;
 610        }
 611        if (left > MAX_PERIOD)
 612                left = MAX_PERIOD;
 613
 614        local64_set(&hwc->prev_count, (u64)-left);
 615
 616        write_pmc(idx, (u64)(-left) & 0xffffffff);
 617
 618        perf_event_update_userpage(event);
 619
 620        return ret;
 621}
 622
 623/* If performance event entries have been added, move existing
 624 * events around (if necessary) and then assign new entries to
 625 * counters.
 626 */
 627static u64 maybe_change_configuration(struct cpu_hw_events *cpuc, u64 pcr)
 628{
 629        int i;
 630
 631        if (!cpuc->n_added)
 632                goto out;
 633
 634        /* Read in the counters which are moving.  */
 635        for (i = 0; i < cpuc->n_events; i++) {
 636                struct perf_event *cp = cpuc->event[i];
 637
 638                if (cpuc->current_idx[i] != PIC_NO_INDEX &&
 639                    cpuc->current_idx[i] != cp->hw.idx) {
 640                        sparc_perf_event_update(cp, &cp->hw,
 641                                                cpuc->current_idx[i]);
 642                        cpuc->current_idx[i] = PIC_NO_INDEX;
 643                }
 644        }
 645
 646        /* Assign to counters all unassigned events.  */
 647        for (i = 0; i < cpuc->n_events; i++) {
 648                struct perf_event *cp = cpuc->event[i];
 649                struct hw_perf_event *hwc = &cp->hw;
 650                int idx = hwc->idx;
 651                u64 enc;
 652
 653                if (cpuc->current_idx[i] != PIC_NO_INDEX)
 654                        continue;
 655
 656                sparc_perf_event_set_period(cp, hwc, idx);
 657                cpuc->current_idx[i] = idx;
 658
 659                enc = perf_event_get_enc(cpuc->events[i]);
 660                pcr &= ~mask_for_index(idx);
 661                if (hwc->state & PERF_HES_STOPPED)
 662                        pcr |= nop_for_index(idx);
 663                else
 664                        pcr |= event_encoding(enc, idx);
 665        }
 666out:
 667        return pcr;
 668}
 669
 670static void sparc_pmu_enable(struct pmu *pmu)
 671{
 672        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 673        u64 pcr;
 674
 675        if (cpuc->enabled)
 676                return;
 677
 678        cpuc->enabled = 1;
 679        barrier();
 680
 681        pcr = cpuc->pcr;
 682        if (!cpuc->n_events) {
 683                pcr = 0;
 684        } else {
 685                pcr = maybe_change_configuration(cpuc, pcr);
 686
 687                /* We require that all of the events have the same
 688                 * configuration, so just fetch the settings from the
 689                 * first entry.
 690                 */
 691                cpuc->pcr = pcr | cpuc->event[0]->hw.config_base;
 692        }
 693
 694        pcr_ops->write(cpuc->pcr);
 695}
 696
 697static void sparc_pmu_disable(struct pmu *pmu)
 698{
 699        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 700        u64 val;
 701
 702        if (!cpuc->enabled)
 703                return;
 704
 705        cpuc->enabled = 0;
 706        cpuc->n_added = 0;
 707
 708        val = cpuc->pcr;
 709        val &= ~(PCR_UTRACE | PCR_STRACE |
 710                 sparc_pmu->hv_bit | sparc_pmu->irq_bit);
 711        cpuc->pcr = val;
 712
 713        pcr_ops->write(cpuc->pcr);
 714}
 715
 716static int active_event_index(struct cpu_hw_events *cpuc,
 717                              struct perf_event *event)
 718{
 719        int i;
 720
 721        for (i = 0; i < cpuc->n_events; i++) {
 722                if (cpuc->event[i] == event)
 723                        break;
 724        }
 725        BUG_ON(i == cpuc->n_events);
 726        return cpuc->current_idx[i];
 727}
 728
 729static void sparc_pmu_start(struct perf_event *event, int flags)
 730{
 731        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 732        int idx = active_event_index(cpuc, event);
 733
 734        if (flags & PERF_EF_RELOAD) {
 735                WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
 736                sparc_perf_event_set_period(event, &event->hw, idx);
 737        }
 738
 739        event->hw.state = 0;
 740
 741        sparc_pmu_enable_event(cpuc, &event->hw, idx);
 742}
 743
 744static void sparc_pmu_stop(struct perf_event *event, int flags)
 745{
 746        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 747        int idx = active_event_index(cpuc, event);
 748
 749        if (!(event->hw.state & PERF_HES_STOPPED)) {
 750                sparc_pmu_disable_event(cpuc, &event->hw, idx);
 751                event->hw.state |= PERF_HES_STOPPED;
 752        }
 753
 754        if (!(event->hw.state & PERF_HES_UPTODATE) && (flags & PERF_EF_UPDATE)) {
 755                sparc_perf_event_update(event, &event->hw, idx);
 756                event->hw.state |= PERF_HES_UPTODATE;
 757        }
 758}
 759
 760static void sparc_pmu_del(struct perf_event *event, int _flags)
 761{
 762        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 763        unsigned long flags;
 764        int i;
 765
 766        local_irq_save(flags);
 767        perf_pmu_disable(event->pmu);
 768
 769        for (i = 0; i < cpuc->n_events; i++) {
 770                if (event == cpuc->event[i]) {
 771                        /* Absorb the final count and turn off the
 772                         * event.
 773                         */
 774                        sparc_pmu_stop(event, PERF_EF_UPDATE);
 775
 776                        /* Shift remaining entries down into
 777                         * the existing slot.
 778                         */
 779                        while (++i < cpuc->n_events) {
 780                                cpuc->event[i - 1] = cpuc->event[i];
 781                                cpuc->events[i - 1] = cpuc->events[i];
 782                                cpuc->current_idx[i - 1] =
 783                                        cpuc->current_idx[i];
 784                        }
 785
 786                        perf_event_update_userpage(event);
 787
 788                        cpuc->n_events--;
 789                        break;
 790                }
 791        }
 792
 793        perf_pmu_enable(event->pmu);
 794        local_irq_restore(flags);
 795}
 796
 797static void sparc_pmu_read(struct perf_event *event)
 798{
 799        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 800        int idx = active_event_index(cpuc, event);
 801        struct hw_perf_event *hwc = &event->hw;
 802
 803        sparc_perf_event_update(event, hwc, idx);
 804}
 805
 806static atomic_t active_events = ATOMIC_INIT(0);
 807static DEFINE_MUTEX(pmc_grab_mutex);
 808
 809static void perf_stop_nmi_watchdog(void *unused)
 810{
 811        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 812
 813        stop_nmi_watchdog(NULL);
 814        cpuc->pcr = pcr_ops->read();
 815}
 816
 817void perf_event_grab_pmc(void)
 818{
 819        if (atomic_inc_not_zero(&active_events))
 820                return;
 821
 822        mutex_lock(&pmc_grab_mutex);
 823        if (atomic_read(&active_events) == 0) {
 824                if (atomic_read(&nmi_active) > 0) {
 825                        on_each_cpu(perf_stop_nmi_watchdog, NULL, 1);
 826                        BUG_ON(atomic_read(&nmi_active) != 0);
 827                }
 828                atomic_inc(&active_events);
 829        }
 830        mutex_unlock(&pmc_grab_mutex);
 831}
 832
 833void perf_event_release_pmc(void)
 834{
 835        if (atomic_dec_and_mutex_lock(&active_events, &pmc_grab_mutex)) {
 836                if (atomic_read(&nmi_active) == 0)
 837                        on_each_cpu(start_nmi_watchdog, NULL, 1);
 838                mutex_unlock(&pmc_grab_mutex);
 839        }
 840}
 841
 842static const struct perf_event_map *sparc_map_cache_event(u64 config)
 843{
 844        unsigned int cache_type, cache_op, cache_result;
 845        const struct perf_event_map *pmap;
 846
 847        if (!sparc_pmu->cache_map)
 848                return ERR_PTR(-ENOENT);
 849
 850        cache_type = (config >>  0) & 0xff;
 851        if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
 852                return ERR_PTR(-EINVAL);
 853
 854        cache_op = (config >>  8) & 0xff;
 855        if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
 856                return ERR_PTR(-EINVAL);
 857
 858        cache_result = (config >> 16) & 0xff;
 859        if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
 860                return ERR_PTR(-EINVAL);
 861
 862        pmap = &((*sparc_pmu->cache_map)[cache_type][cache_op][cache_result]);
 863
 864        if (pmap->encoding == CACHE_OP_UNSUPPORTED)
 865                return ERR_PTR(-ENOENT);
 866
 867        if (pmap->encoding == CACHE_OP_NONSENSE)
 868                return ERR_PTR(-EINVAL);
 869
 870        return pmap;
 871}
 872
 873static void hw_perf_event_destroy(struct perf_event *event)
 874{
 875        perf_event_release_pmc();
 876}
 877
 878/* Make sure all events can be scheduled into the hardware at
 879 * the same time.  This is simplified by the fact that we only
 880 * need to support 2 simultaneous HW events.
 881 *
 882 * As a side effect, the evts[]->hw.idx values will be assigned
 883 * on success.  These are pending indexes.  When the events are
 884 * actually programmed into the chip, these values will propagate
 885 * to the per-cpu cpuc->current_idx[] slots, see the code in
 886 * maybe_change_configuration() for details.
 887 */
 888static int sparc_check_constraints(struct perf_event **evts,
 889                                   unsigned long *events, int n_ev)
 890{
 891        u8 msk0 = 0, msk1 = 0;
 892        int idx0 = 0;
 893
 894        /* This case is possible when we are invoked from
 895         * hw_perf_group_sched_in().
 896         */
 897        if (!n_ev)
 898                return 0;
 899
 900        if (n_ev > MAX_HWEVENTS)
 901                return -1;
 902
 903        msk0 = perf_event_get_msk(events[0]);
 904        if (n_ev == 1) {
 905                if (msk0 & PIC_LOWER)
 906                        idx0 = 1;
 907                goto success;
 908        }
 909        BUG_ON(n_ev != 2);
 910        msk1 = perf_event_get_msk(events[1]);
 911
 912        /* If both events can go on any counter, OK.  */
 913        if (msk0 == (PIC_UPPER | PIC_LOWER) &&
 914            msk1 == (PIC_UPPER | PIC_LOWER))
 915                goto success;
 916
 917        /* If one event is limited to a specific counter,
 918         * and the other can go on both, OK.
 919         */
 920        if ((msk0 == PIC_UPPER || msk0 == PIC_LOWER) &&
 921            msk1 == (PIC_UPPER | PIC_LOWER)) {
 922                if (msk0 & PIC_LOWER)
 923                        idx0 = 1;
 924                goto success;
 925        }
 926
 927        if ((msk1 == PIC_UPPER || msk1 == PIC_LOWER) &&
 928            msk0 == (PIC_UPPER | PIC_LOWER)) {
 929                if (msk1 & PIC_UPPER)
 930                        idx0 = 1;
 931                goto success;
 932        }
 933
 934        /* If the events are fixed to different counters, OK.  */
 935        if ((msk0 == PIC_UPPER && msk1 == PIC_LOWER) ||
 936            (msk0 == PIC_LOWER && msk1 == PIC_UPPER)) {
 937                if (msk0 & PIC_LOWER)
 938                        idx0 = 1;
 939                goto success;
 940        }
 941
 942        /* Otherwise, there is a conflict.  */
 943        return -1;
 944
 945success:
 946        evts[0]->hw.idx = idx0;
 947        if (n_ev == 2)
 948                evts[1]->hw.idx = idx0 ^ 1;
 949        return 0;
 950}
 951
 952static int check_excludes(struct perf_event **evts, int n_prev, int n_new)
 953{
 954        int eu = 0, ek = 0, eh = 0;
 955        struct perf_event *event;
 956        int i, n, first;
 957
 958        n = n_prev + n_new;
 959        if (n <= 1)
 960                return 0;
 961
 962        first = 1;
 963        for (i = 0; i < n; i++) {
 964                event = evts[i];
 965                if (first) {
 966                        eu = event->attr.exclude_user;
 967                        ek = event->attr.exclude_kernel;
 968                        eh = event->attr.exclude_hv;
 969                        first = 0;
 970                } else if (event->attr.exclude_user != eu ||
 971                           event->attr.exclude_kernel != ek ||
 972                           event->attr.exclude_hv != eh) {
 973                        return -EAGAIN;
 974                }
 975        }
 976
 977        return 0;
 978}
 979
 980static int collect_events(struct perf_event *group, int max_count,
 981                          struct perf_event *evts[], unsigned long *events,
 982                          int *current_idx)
 983{
 984        struct perf_event *event;
 985        int n = 0;
 986
 987        if (!is_software_event(group)) {
 988                if (n >= max_count)
 989                        return -1;
 990                evts[n] = group;
 991                events[n] = group->hw.event_base;
 992                current_idx[n++] = PIC_NO_INDEX;
 993        }
 994        list_for_each_entry(event, &group->sibling_list, group_entry) {
 995                if (!is_software_event(event) &&
 996                    event->state != PERF_EVENT_STATE_OFF) {
 997                        if (n >= max_count)
 998                                return -1;
 999                        evts[n] = event;
1000                        events[n] = event->hw.event_base;
1001                        current_idx[n++] = PIC_NO_INDEX;
1002                }
1003        }
1004        return n;
1005}
1006
1007static int sparc_pmu_add(struct perf_event *event, int ef_flags)
1008{
1009        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1010        int n0, ret = -EAGAIN;
1011        unsigned long flags;
1012
1013        local_irq_save(flags);
1014        perf_pmu_disable(event->pmu);
1015
1016        n0 = cpuc->n_events;
1017        if (n0 >= MAX_HWEVENTS)
1018                goto out;
1019
1020        cpuc->event[n0] = event;
1021        cpuc->events[n0] = event->hw.event_base;
1022        cpuc->current_idx[n0] = PIC_NO_INDEX;
1023
1024        event->hw.state = PERF_HES_UPTODATE;
1025        if (!(ef_flags & PERF_EF_START))
1026                event->hw.state |= PERF_HES_STOPPED;
1027
1028        /*
1029         * If group events scheduling transaction was started,
1030         * skip the schedulability test here, it will be peformed
1031         * at commit time(->commit_txn) as a whole
1032         */
1033        if (cpuc->group_flag & PERF_EVENT_TXN)
1034                goto nocheck;
1035
1036        if (check_excludes(cpuc->event, n0, 1))
1037                goto out;
1038        if (sparc_check_constraints(cpuc->event, cpuc->events, n0 + 1))
1039                goto out;
1040
1041nocheck:
1042        cpuc->n_events++;
1043        cpuc->n_added++;
1044
1045        ret = 0;
1046out:
1047        perf_pmu_enable(event->pmu);
1048        local_irq_restore(flags);
1049        return ret;
1050}
1051
1052static int sparc_pmu_event_init(struct perf_event *event)
1053{
1054        struct perf_event_attr *attr = &event->attr;
1055        struct perf_event *evts[MAX_HWEVENTS];
1056        struct hw_perf_event *hwc = &event->hw;
1057        unsigned long events[MAX_HWEVENTS];
1058        int current_idx_dmy[MAX_HWEVENTS];
1059        const struct perf_event_map *pmap;
1060        int n;
1061
1062        if (atomic_read(&nmi_active) < 0)
1063                return -ENODEV;
1064
1065        switch (attr->type) {
1066        case PERF_TYPE_HARDWARE:
1067                if (attr->config >= sparc_pmu->max_events)
1068                        return -EINVAL;
1069                pmap = sparc_pmu->event_map(attr->config);
1070                break;
1071
1072        case PERF_TYPE_HW_CACHE:
1073                pmap = sparc_map_cache_event(attr->config);
1074                if (IS_ERR(pmap))
1075                        return PTR_ERR(pmap);
1076                break;
1077
1078        case PERF_TYPE_RAW:
1079                pmap = NULL;
1080                break;
1081
1082        default:
1083                return -ENOENT;
1084
1085        }
1086
1087        if (pmap) {
1088                hwc->event_base = perf_event_encode(pmap);
1089        } else {
1090                /*
1091                 * User gives us "(encoding << 16) | pic_mask" for
1092                 * PERF_TYPE_RAW events.
1093                 */
1094                hwc->event_base = attr->config;
1095        }
1096
1097        /* We save the enable bits in the config_base.  */
1098        hwc->config_base = sparc_pmu->irq_bit;
1099        if (!attr->exclude_user)
1100                hwc->config_base |= PCR_UTRACE;
1101        if (!attr->exclude_kernel)
1102                hwc->config_base |= PCR_STRACE;
1103        if (!attr->exclude_hv)
1104                hwc->config_base |= sparc_pmu->hv_bit;
1105
1106        n = 0;
1107        if (event->group_leader != event) {
1108                n = collect_events(event->group_leader,
1109                                   MAX_HWEVENTS - 1,
1110                                   evts, events, current_idx_dmy);
1111                if (n < 0)
1112                        return -EINVAL;
1113        }
1114        events[n] = hwc->event_base;
1115        evts[n] = event;
1116
1117        if (check_excludes(evts, n, 1))
1118                return -EINVAL;
1119
1120        if (sparc_check_constraints(evts, events, n + 1))
1121                return -EINVAL;
1122
1123        hwc->idx = PIC_NO_INDEX;
1124
1125        /* Try to do all error checking before this point, as unwinding
1126         * state after grabbing the PMC is difficult.
1127         */
1128        perf_event_grab_pmc();
1129        event->destroy = hw_perf_event_destroy;
1130
1131        if (!hwc->sample_period) {
1132                hwc->sample_period = MAX_PERIOD;
1133                hwc->last_period = hwc->sample_period;
1134                local64_set(&hwc->period_left, hwc->sample_period);
1135        }
1136
1137        return 0;
1138}
1139
1140/*
1141 * Start group events scheduling transaction
1142 * Set the flag to make pmu::enable() not perform the
1143 * schedulability test, it will be performed at commit time
1144 */
1145static void sparc_pmu_start_txn(struct pmu *pmu)
1146{
1147        struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1148
1149        perf_pmu_disable(pmu);
1150        cpuhw->group_flag |= PERF_EVENT_TXN;
1151}
1152
1153/*
1154 * Stop group events scheduling transaction
1155 * Clear the flag and pmu::enable() will perform the
1156 * schedulability test.
1157 */
1158static void sparc_pmu_cancel_txn(struct pmu *pmu)
1159{
1160        struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1161
1162        cpuhw->group_flag &= ~PERF_EVENT_TXN;
1163        perf_pmu_enable(pmu);
1164}
1165
1166/*
1167 * Commit group events scheduling transaction
1168 * Perform the group schedulability test as a whole
1169 * Return 0 if success
1170 */
1171static int sparc_pmu_commit_txn(struct pmu *pmu)
1172{
1173        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1174        int n;
1175
1176        if (!sparc_pmu)
1177                return -EINVAL;
1178
1179        cpuc = &__get_cpu_var(cpu_hw_events);
1180        n = cpuc->n_events;
1181        if (check_excludes(cpuc->event, 0, n))
1182                return -EINVAL;
1183        if (sparc_check_constraints(cpuc->event, cpuc->events, n))
1184                return -EAGAIN;
1185
1186        cpuc->group_flag &= ~PERF_EVENT_TXN;
1187        perf_pmu_enable(pmu);
1188        return 0;
1189}
1190
1191static struct pmu pmu = {
1192        .pmu_enable     = sparc_pmu_enable,
1193        .pmu_disable    = sparc_pmu_disable,
1194        .event_init     = sparc_pmu_event_init,
1195        .add            = sparc_pmu_add,
1196        .del            = sparc_pmu_del,
1197        .start          = sparc_pmu_start,
1198        .stop           = sparc_pmu_stop,
1199        .read           = sparc_pmu_read,
1200        .start_txn      = sparc_pmu_start_txn,
1201        .cancel_txn     = sparc_pmu_cancel_txn,
1202        .commit_txn     = sparc_pmu_commit_txn,
1203};
1204
1205void perf_event_print_debug(void)
1206{
1207        unsigned long flags;
1208        u64 pcr, pic;
1209        int cpu;
1210
1211        if (!sparc_pmu)
1212                return;
1213
1214        local_irq_save(flags);
1215
1216        cpu = smp_processor_id();
1217
1218        pcr = pcr_ops->read();
1219        read_pic(pic);
1220
1221        pr_info("\n");
1222        pr_info("CPU#%d: PCR[%016llx] PIC[%016llx]\n",
1223                cpu, pcr, pic);
1224
1225        local_irq_restore(flags);
1226}
1227
1228static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
1229                                            unsigned long cmd, void *__args)
1230{
1231        struct die_args *args = __args;
1232        struct perf_sample_data data;
1233        struct cpu_hw_events *cpuc;
1234        struct pt_regs *regs;
1235        int i;
1236
1237        if (!atomic_read(&active_events))
1238                return NOTIFY_DONE;
1239
1240        switch (cmd) {
1241        case DIE_NMI:
1242                break;
1243
1244        default:
1245                return NOTIFY_DONE;
1246        }
1247
1248        regs = args->regs;
1249
1250        perf_sample_data_init(&data, 0);
1251
1252        cpuc = &__get_cpu_var(cpu_hw_events);
1253
1254        /* If the PMU has the TOE IRQ enable bits, we need to do a
1255         * dummy write to the %pcr to clear the overflow bits and thus
1256         * the interrupt.
1257         *
1258         * Do this before we peek at the counters to determine
1259         * overflow so we don't lose any events.
1260         */
1261        if (sparc_pmu->irq_bit)
1262                pcr_ops->write(cpuc->pcr);
1263
1264        for (i = 0; i < cpuc->n_events; i++) {
1265                struct perf_event *event = cpuc->event[i];
1266                int idx = cpuc->current_idx[i];
1267                struct hw_perf_event *hwc;
1268                u64 val;
1269
1270                hwc = &event->hw;
1271                val = sparc_perf_event_update(event, hwc, idx);
1272                if (val & (1ULL << 31))
1273                        continue;
1274
1275                data.period = event->hw.last_period;
1276                if (!sparc_perf_event_set_period(event, hwc, idx))
1277                        continue;
1278
1279                if (perf_event_overflow(event, 1, &data, regs))
1280                        sparc_pmu_stop(event, 0);
1281        }
1282
1283        return NOTIFY_STOP;
1284}
1285
1286static __read_mostly struct notifier_block perf_event_nmi_notifier = {
1287        .notifier_call          = perf_event_nmi_handler,
1288};
1289
1290static bool __init supported_pmu(void)
1291{
1292        if (!strcmp(sparc_pmu_type, "ultra3") ||
1293            !strcmp(sparc_pmu_type, "ultra3+") ||
1294            !strcmp(sparc_pmu_type, "ultra3i") ||
1295            !strcmp(sparc_pmu_type, "ultra4+")) {
1296                sparc_pmu = &ultra3_pmu;
1297                return true;
1298        }
1299        if (!strcmp(sparc_pmu_type, "niagara")) {
1300                sparc_pmu = &niagara1_pmu;
1301                return true;
1302        }
1303        if (!strcmp(sparc_pmu_type, "niagara2")) {
1304                sparc_pmu = &niagara2_pmu;
1305                return true;
1306        }
1307        return false;
1308}
1309
1310int __init init_hw_perf_events(void)
1311{
1312        pr_info("Performance events: ");
1313
1314        if (!supported_pmu()) {
1315                pr_cont("No support for PMU type '%s'\n", sparc_pmu_type);
1316                return 0;
1317        }
1318
1319        pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type);
1320
1321        perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
1322        register_die_notifier(&perf_event_nmi_notifier);
1323
1324        return 0;
1325}
1326early_initcall(init_hw_perf_events);
1327
1328void perf_callchain_kernel(struct perf_callchain_entry *entry,
1329                           struct pt_regs *regs)
1330{
1331        unsigned long ksp, fp;
1332#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1333        int graph = 0;
1334#endif
1335
1336        stack_trace_flush();
1337
1338        perf_callchain_store(entry, regs->tpc);
1339
1340        ksp = regs->u_regs[UREG_I6];
1341        fp = ksp + STACK_BIAS;
1342        do {
1343                struct sparc_stackf *sf;
1344                struct pt_regs *regs;
1345                unsigned long pc;
1346
1347                if (!kstack_valid(current_thread_info(), fp))
1348                        break;
1349
1350                sf = (struct sparc_stackf *) fp;
1351                regs = (struct pt_regs *) (sf + 1);
1352
1353                if (kstack_is_trap_frame(current_thread_info(), regs)) {
1354                        if (user_mode(regs))
1355                                break;
1356                        pc = regs->tpc;
1357                        fp = regs->u_regs[UREG_I6] + STACK_BIAS;
1358                } else {
1359                        pc = sf->callers_pc;
1360                        fp = (unsigned long)sf->fp + STACK_BIAS;
1361                }
1362                perf_callchain_store(entry, pc);
1363#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1364                if ((pc + 8UL) == (unsigned long) &return_to_handler) {
1365                        int index = current->curr_ret_stack;
1366                        if (current->ret_stack && index >= graph) {
1367                                pc = current->ret_stack[index - graph].ret;
1368                                perf_callchain_store(entry, pc);
1369                                graph++;
1370                        }
1371                }
1372#endif
1373        } while (entry->nr < PERF_MAX_STACK_DEPTH);
1374}
1375
1376static void perf_callchain_user_64(struct perf_callchain_entry *entry,
1377                                   struct pt_regs *regs)
1378{
1379        unsigned long ufp;
1380
1381        perf_callchain_store(entry, regs->tpc);
1382
1383        ufp = regs->u_regs[UREG_I6] + STACK_BIAS;
1384        do {
1385                struct sparc_stackf *usf, sf;
1386                unsigned long pc;
1387
1388                usf = (struct sparc_stackf *) ufp;
1389                if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
1390                        break;
1391
1392                pc = sf.callers_pc;
1393                ufp = (unsigned long)sf.fp + STACK_BIAS;
1394                perf_callchain_store(entry, pc);
1395        } while (entry->nr < PERF_MAX_STACK_DEPTH);
1396}
1397
1398static void perf_callchain_user_32(struct perf_callchain_entry *entry,
1399                                   struct pt_regs *regs)
1400{
1401        unsigned long ufp;
1402
1403        perf_callchain_store(entry, regs->tpc);
1404
1405        ufp = regs->u_regs[UREG_I6] & 0xffffffffUL;
1406        do {
1407                struct sparc_stackf32 *usf, sf;
1408                unsigned long pc;
1409
1410                usf = (struct sparc_stackf32 *) ufp;
1411                if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
1412                        break;
1413
1414                pc = sf.callers_pc;
1415                ufp = (unsigned long)sf.fp;
1416                perf_callchain_store(entry, pc);
1417        } while (entry->nr < PERF_MAX_STACK_DEPTH);
1418}
1419
1420void
1421perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
1422{
1423        flushw_user();
1424        if (test_thread_flag(TIF_32BIT))
1425                perf_callchain_user_32(entry, regs);
1426        else
1427                perf_callchain_user_64(entry, regs);
1428}
1429