linux/arch/powerpc/perf/core-book3s.c
<<
>>
Prefs
   1/*
   2 * Performance event support - powerpc architecture code
   3 *
   4 * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License
   8 * as published by the Free Software Foundation; either version
   9 * 2 of the License, or (at your option) any later version.
  10 */
  11#include <linux/kernel.h>
  12#include <linux/sched.h>
  13#include <linux/perf_event.h>
  14#include <linux/percpu.h>
  15#include <linux/hardirq.h>
  16#include <asm/reg.h>
  17#include <asm/pmc.h>
  18#include <asm/machdep.h>
  19#include <asm/firmware.h>
  20#include <asm/ptrace.h>
  21
  22struct cpu_hw_events {
  23        int n_events;
  24        int n_percpu;
  25        int disabled;
  26        int n_added;
  27        int n_limited;
  28        u8  pmcs_enabled;
  29        struct perf_event *event[MAX_HWEVENTS];
  30        u64 events[MAX_HWEVENTS];
  31        unsigned int flags[MAX_HWEVENTS];
  32        unsigned long mmcr[3];
  33        struct perf_event *limited_counter[MAX_LIMITED_HWCOUNTERS];
  34        u8  limited_hwidx[MAX_LIMITED_HWCOUNTERS];
  35        u64 alternatives[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
  36        unsigned long amasks[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
  37        unsigned long avalues[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
  38
  39        unsigned int group_flag;
  40        int n_txn_start;
  41};
  42DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
  43
  44struct power_pmu *ppmu;
  45
  46/*
  47 * Normally, to ignore kernel events we set the FCS (freeze counters
  48 * in supervisor mode) bit in MMCR0, but if the kernel runs with the
  49 * hypervisor bit set in the MSR, or if we are running on a processor
  50 * where the hypervisor bit is forced to 1 (as on Apple G5 processors),
  51 * then we need to use the FCHV bit to ignore kernel events.
  52 */
  53static unsigned int freeze_events_kernel = MMCR0_FCS;
  54
  55/*
  56 * 32-bit doesn't have MMCRA but does have an MMCR2,
  57 * and a few other names are different.
  58 */
  59#ifdef CONFIG_PPC32
  60
  61#define MMCR0_FCHV              0
  62#define MMCR0_PMCjCE            MMCR0_PMCnCE
  63
  64#define SPRN_MMCRA              SPRN_MMCR2
  65#define MMCRA_SAMPLE_ENABLE     0
  66
  67static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
  68{
  69        return 0;
  70}
  71static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) { }
  72static inline u32 perf_get_misc_flags(struct pt_regs *regs)
  73{
  74        return 0;
  75}
  76static inline void perf_read_regs(struct pt_regs *regs)
  77{
  78        regs->result = 0;
  79}
  80static inline int perf_intr_is_nmi(struct pt_regs *regs)
  81{
  82        return 0;
  83}
  84
  85static inline int siar_valid(struct pt_regs *regs)
  86{
  87        return 1;
  88}
  89
  90#endif /* CONFIG_PPC32 */
  91
  92/*
  93 * Things that are specific to 64-bit implementations.
  94 */
  95#ifdef CONFIG_PPC64
  96
  97static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
  98{
  99        unsigned long mmcra = regs->dsisr;
 100
 101        if ((mmcra & MMCRA_SAMPLE_ENABLE) && !(ppmu->flags & PPMU_ALT_SIPR)) {
 102                unsigned long slot = (mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT;
 103                if (slot > 1)
 104                        return 4 * (slot - 1);
 105        }
 106        return 0;
 107}
 108
 109/*
 110 * The user wants a data address recorded.
 111 * If we're not doing instruction sampling, give them the SDAR
 112 * (sampled data address).  If we are doing instruction sampling, then
 113 * only give them the SDAR if it corresponds to the instruction
 114 * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC or
 115 * the [POWER7P_]MMCRA_SDAR_VALID bit in MMCRA.
 116 */
 117static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp)
 118{
 119        unsigned long mmcra = regs->dsisr;
 120        unsigned long sdsync;
 121
 122        if (ppmu->flags & PPMU_SIAR_VALID)
 123                sdsync = POWER7P_MMCRA_SDAR_VALID;
 124        else if (ppmu->flags & PPMU_ALT_SIPR)
 125                sdsync = POWER6_MMCRA_SDSYNC;
 126        else
 127                sdsync = MMCRA_SDSYNC;
 128
 129        if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync))
 130                *addrp = mfspr(SPRN_SDAR);
 131}
 132
 133static bool mmcra_sihv(unsigned long mmcra)
 134{
 135        unsigned long sihv = MMCRA_SIHV;
 136
 137        if (ppmu->flags & PPMU_ALT_SIPR)
 138                sihv = POWER6_MMCRA_SIHV;
 139
 140        return !!(mmcra & sihv);
 141}
 142
 143static bool mmcra_sipr(unsigned long mmcra)
 144{
 145        unsigned long sipr = MMCRA_SIPR;
 146
 147        if (ppmu->flags & PPMU_ALT_SIPR)
 148                sipr = POWER6_MMCRA_SIPR;
 149
 150        return !!(mmcra & sipr);
 151}
 152
 153static inline u32 perf_flags_from_msr(struct pt_regs *regs)
 154{
 155        if (regs->msr & MSR_PR)
 156                return PERF_RECORD_MISC_USER;
 157        if ((regs->msr & MSR_HV) && freeze_events_kernel != MMCR0_FCHV)
 158                return PERF_RECORD_MISC_HYPERVISOR;
 159        return PERF_RECORD_MISC_KERNEL;
 160}
 161
 162static inline u32 perf_get_misc_flags(struct pt_regs *regs)
 163{
 164        unsigned long mmcra = regs->dsisr;
 165        unsigned long use_siar = regs->result;
 166
 167        if (!use_siar)
 168                return perf_flags_from_msr(regs);
 169
 170        /*
 171         * If we don't have flags in MMCRA, rather than using
 172         * the MSR, we intuit the flags from the address in
 173         * SIAR which should give slightly more reliable
 174         * results
 175         */
 176        if (ppmu->flags & PPMU_NO_SIPR) {
 177                unsigned long siar = mfspr(SPRN_SIAR);
 178                if (siar >= PAGE_OFFSET)
 179                        return PERF_RECORD_MISC_KERNEL;
 180                return PERF_RECORD_MISC_USER;
 181        }
 182
 183        /* PR has priority over HV, so order below is important */
 184        if (mmcra_sipr(mmcra))
 185                return PERF_RECORD_MISC_USER;
 186        if (mmcra_sihv(mmcra) && (freeze_events_kernel != MMCR0_FCHV))
 187                return PERF_RECORD_MISC_HYPERVISOR;
 188        return PERF_RECORD_MISC_KERNEL;
 189}
 190
 191/*
 192 * Overload regs->dsisr to store MMCRA so we only need to read it once
 193 * on each interrupt.
 194 * Overload regs->result to specify whether we should use the MSR (result
 195 * is zero) or the SIAR (result is non zero).
 196 */
 197static inline void perf_read_regs(struct pt_regs *regs)
 198{
 199        unsigned long mmcra = mfspr(SPRN_MMCRA);
 200        int marked = mmcra & MMCRA_SAMPLE_ENABLE;
 201        int use_siar;
 202
 203        /*
 204         * If this isn't a PMU exception (eg a software event) the SIAR is
 205         * not valid. Use pt_regs.
 206         *
 207         * If it is a marked event use the SIAR.
 208         *
 209         * If the PMU doesn't update the SIAR for non marked events use
 210         * pt_regs.
 211         *
 212         * If the PMU has HV/PR flags then check to see if they
 213         * place the exception in userspace. If so, use pt_regs. In
 214         * continuous sampling mode the SIAR and the PMU exception are
 215         * not synchronised, so they may be many instructions apart.
 216         * This can result in confusing backtraces. We still want
 217         * hypervisor samples as well as samples in the kernel with
 218         * interrupts off hence the userspace check.
 219         */
 220        if (TRAP(regs) != 0xf00)
 221                use_siar = 0;
 222        else if (marked)
 223                use_siar = 1;
 224        else if ((ppmu->flags & PPMU_NO_CONT_SAMPLING))
 225                use_siar = 0;
 226        else if (!(ppmu->flags & PPMU_NO_SIPR) && mmcra_sipr(mmcra))
 227                use_siar = 0;
 228        else
 229                use_siar = 1;
 230
 231        regs->dsisr = mmcra;
 232        regs->result = use_siar;
 233}
 234
 235/*
 236 * If interrupts were soft-disabled when a PMU interrupt occurs, treat
 237 * it as an NMI.
 238 */
 239static inline int perf_intr_is_nmi(struct pt_regs *regs)
 240{
 241        return !regs->softe;
 242}
 243
 244/*
 245 * On processors like P7+ that have the SIAR-Valid bit, marked instructions
 246 * must be sampled only if the SIAR-valid bit is set.
 247 *
 248 * For unmarked instructions and for processors that don't have the SIAR-Valid
 249 * bit, assume that SIAR is valid.
 250 */
 251static inline int siar_valid(struct pt_regs *regs)
 252{
 253        unsigned long mmcra = regs->dsisr;
 254        int marked = mmcra & MMCRA_SAMPLE_ENABLE;
 255
 256        if ((ppmu->flags & PPMU_SIAR_VALID) && marked)
 257                return mmcra & POWER7P_MMCRA_SIAR_VALID;
 258
 259        return 1;
 260}
 261
 262#endif /* CONFIG_PPC64 */
 263
 264static void perf_event_interrupt(struct pt_regs *regs);
 265
 266void perf_event_print_debug(void)
 267{
 268}
 269
 270/*
 271 * Read one performance monitor counter (PMC).
 272 */
 273static unsigned long read_pmc(int idx)
 274{
 275        unsigned long val;
 276
 277        switch (idx) {
 278        case 1:
 279                val = mfspr(SPRN_PMC1);
 280                break;
 281        case 2:
 282                val = mfspr(SPRN_PMC2);
 283                break;
 284        case 3:
 285                val = mfspr(SPRN_PMC3);
 286                break;
 287        case 4:
 288                val = mfspr(SPRN_PMC4);
 289                break;
 290        case 5:
 291                val = mfspr(SPRN_PMC5);
 292                break;
 293        case 6:
 294                val = mfspr(SPRN_PMC6);
 295                break;
 296#ifdef CONFIG_PPC64
 297        case 7:
 298                val = mfspr(SPRN_PMC7);
 299                break;
 300        case 8:
 301                val = mfspr(SPRN_PMC8);
 302                break;
 303#endif /* CONFIG_PPC64 */
 304        default:
 305                printk(KERN_ERR "oops trying to read PMC%d\n", idx);
 306                val = 0;
 307        }
 308        return val;
 309}
 310
 311/*
 312 * Write one PMC.
 313 */
 314static void write_pmc(int idx, unsigned long val)
 315{
 316        switch (idx) {
 317        case 1:
 318                mtspr(SPRN_PMC1, val);
 319                break;
 320        case 2:
 321                mtspr(SPRN_PMC2, val);
 322                break;
 323        case 3:
 324                mtspr(SPRN_PMC3, val);
 325                break;
 326        case 4:
 327                mtspr(SPRN_PMC4, val);
 328                break;
 329        case 5:
 330                mtspr(SPRN_PMC5, val);
 331                break;
 332        case 6:
 333                mtspr(SPRN_PMC6, val);
 334                break;
 335#ifdef CONFIG_PPC64
 336        case 7:
 337                mtspr(SPRN_PMC7, val);
 338                break;
 339        case 8:
 340                mtspr(SPRN_PMC8, val);
 341                break;
 342#endif /* CONFIG_PPC64 */
 343        default:
 344                printk(KERN_ERR "oops trying to write PMC%d\n", idx);
 345        }
 346}
 347
 348/*
 349 * Check if a set of events can all go on the PMU at once.
 350 * If they can't, this will look at alternative codes for the events
 351 * and see if any combination of alternative codes is feasible.
 352 * The feasible set is returned in event_id[].
 353 */
 354static int power_check_constraints(struct cpu_hw_events *cpuhw,
 355                                   u64 event_id[], unsigned int cflags[],
 356                                   int n_ev)
 357{
 358        unsigned long mask, value, nv;
 359        unsigned long smasks[MAX_HWEVENTS], svalues[MAX_HWEVENTS];
 360        int n_alt[MAX_HWEVENTS], choice[MAX_HWEVENTS];
 361        int i, j;
 362        unsigned long addf = ppmu->add_fields;
 363        unsigned long tadd = ppmu->test_adder;
 364
 365        if (n_ev > ppmu->n_counter)
 366                return -1;
 367
 368        /* First see if the events will go on as-is */
 369        for (i = 0; i < n_ev; ++i) {
 370                if ((cflags[i] & PPMU_LIMITED_PMC_REQD)
 371                    && !ppmu->limited_pmc_event(event_id[i])) {
 372                        ppmu->get_alternatives(event_id[i], cflags[i],
 373                                               cpuhw->alternatives[i]);
 374                        event_id[i] = cpuhw->alternatives[i][0];
 375                }
 376                if (ppmu->get_constraint(event_id[i], &cpuhw->amasks[i][0],
 377                                         &cpuhw->avalues[i][0]))
 378                        return -1;
 379        }
 380        value = mask = 0;
 381        for (i = 0; i < n_ev; ++i) {
 382                nv = (value | cpuhw->avalues[i][0]) +
 383                        (value & cpuhw->avalues[i][0] & addf);
 384                if ((((nv + tadd) ^ value) & mask) != 0 ||
 385                    (((nv + tadd) ^ cpuhw->avalues[i][0]) &
 386                     cpuhw->amasks[i][0]) != 0)
 387                        break;
 388                value = nv;
 389                mask |= cpuhw->amasks[i][0];
 390        }
 391        if (i == n_ev)
 392                return 0;       /* all OK */
 393
 394        /* doesn't work, gather alternatives... */
 395        if (!ppmu->get_alternatives)
 396                return -1;
 397        for (i = 0; i < n_ev; ++i) {
 398                choice[i] = 0;
 399                n_alt[i] = ppmu->get_alternatives(event_id[i], cflags[i],
 400                                                  cpuhw->alternatives[i]);
 401                for (j = 1; j < n_alt[i]; ++j)
 402                        ppmu->get_constraint(cpuhw->alternatives[i][j],
 403                                             &cpuhw->amasks[i][j],
 404                                             &cpuhw->avalues[i][j]);
 405        }
 406
 407        /* enumerate all possibilities and see if any will work */
 408        i = 0;
 409        j = -1;
 410        value = mask = nv = 0;
 411        while (i < n_ev) {
 412                if (j >= 0) {
 413                        /* we're backtracking, restore context */
 414                        value = svalues[i];
 415                        mask = smasks[i];
 416                        j = choice[i];
 417                }
 418                /*
 419                 * See if any alternative k for event_id i,
 420                 * where k > j, will satisfy the constraints.
 421                 */
 422                while (++j < n_alt[i]) {
 423                        nv = (value | cpuhw->avalues[i][j]) +
 424                                (value & cpuhw->avalues[i][j] & addf);
 425                        if ((((nv + tadd) ^ value) & mask) == 0 &&
 426                            (((nv + tadd) ^ cpuhw->avalues[i][j])
 427                             & cpuhw->amasks[i][j]) == 0)
 428                                break;
 429                }
 430                if (j >= n_alt[i]) {
 431                        /*
 432                         * No feasible alternative, backtrack
 433                         * to event_id i-1 and continue enumerating its
 434                         * alternatives from where we got up to.
 435                         */
 436                        if (--i < 0)
 437                                return -1;
 438                } else {
 439                        /*
 440                         * Found a feasible alternative for event_id i,
 441                         * remember where we got up to with this event_id,
 442                         * go on to the next event_id, and start with
 443                         * the first alternative for it.
 444                         */
 445                        choice[i] = j;
 446                        svalues[i] = value;
 447                        smasks[i] = mask;
 448                        value = nv;
 449                        mask |= cpuhw->amasks[i][j];
 450                        ++i;
 451                        j = -1;
 452                }
 453        }
 454
 455        /* OK, we have a feasible combination, tell the caller the solution */
 456        for (i = 0; i < n_ev; ++i)
 457                event_id[i] = cpuhw->alternatives[i][choice[i]];
 458        return 0;
 459}
 460
 461/*
 462 * Check if newly-added events have consistent settings for
 463 * exclude_{user,kernel,hv} with each other and any previously
 464 * added events.
 465 */
 466static int check_excludes(struct perf_event **ctrs, unsigned int cflags[],
 467                          int n_prev, int n_new)
 468{
 469        int eu = 0, ek = 0, eh = 0;
 470        int i, n, first;
 471        struct perf_event *event;
 472
 473        n = n_prev + n_new;
 474        if (n <= 1)
 475                return 0;
 476
 477        first = 1;
 478        for (i = 0; i < n; ++i) {
 479                if (cflags[i] & PPMU_LIMITED_PMC_OK) {
 480                        cflags[i] &= ~PPMU_LIMITED_PMC_REQD;
 481                        continue;
 482                }
 483                event = ctrs[i];
 484                if (first) {
 485                        eu = event->attr.exclude_user;
 486                        ek = event->attr.exclude_kernel;
 487                        eh = event->attr.exclude_hv;
 488                        first = 0;
 489                } else if (event->attr.exclude_user != eu ||
 490                           event->attr.exclude_kernel != ek ||
 491                           event->attr.exclude_hv != eh) {
 492                        return -EAGAIN;
 493                }
 494        }
 495
 496        if (eu || ek || eh)
 497                for (i = 0; i < n; ++i)
 498                        if (cflags[i] & PPMU_LIMITED_PMC_OK)
 499                                cflags[i] |= PPMU_LIMITED_PMC_REQD;
 500
 501        return 0;
 502}
 503
 504static u64 check_and_compute_delta(u64 prev, u64 val)
 505{
 506        u64 delta = (val - prev) & 0xfffffffful;
 507
 508        /*
 509         * POWER7 can roll back counter values, if the new value is smaller
 510         * than the previous value it will cause the delta and the counter to
 511         * have bogus values unless we rolled a counter over.  If a coutner is
 512         * rolled back, it will be smaller, but within 256, which is the maximum
 513         * number of events to rollback at once.  If we dectect a rollback
 514         * return 0.  This can lead to a small lack of precision in the
 515         * counters.
 516         */
 517        if (prev > val && (prev - val) < 256)
 518                delta = 0;
 519
 520        return delta;
 521}
 522
 523static void power_pmu_read(struct perf_event *event)
 524{
 525        s64 val, delta, prev;
 526
 527        if (event->hw.state & PERF_HES_STOPPED)
 528                return;
 529
 530        if (!event->hw.idx)
 531                return;
 532        /*
 533         * Performance monitor interrupts come even when interrupts
 534         * are soft-disabled, as long as interrupts are hard-enabled.
 535         * Therefore we treat them like NMIs.
 536         */
 537        do {
 538                prev = local64_read(&event->hw.prev_count);
 539                barrier();
 540                val = read_pmc(event->hw.idx);
 541                delta = check_and_compute_delta(prev, val);
 542                if (!delta)
 543                        return;
 544        } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
 545
 546        local64_add(delta, &event->count);
 547        local64_sub(delta, &event->hw.period_left);
 548}
 549
 550/*
 551 * On some machines, PMC5 and PMC6 can't be written, don't respect
 552 * the freeze conditions, and don't generate interrupts.  This tells
 553 * us if `event' is using such a PMC.
 554 */
 555static int is_limited_pmc(int pmcnum)
 556{
 557        return (ppmu->flags & PPMU_LIMITED_PMC5_6)
 558                && (pmcnum == 5 || pmcnum == 6);
 559}
 560
 561static void freeze_limited_counters(struct cpu_hw_events *cpuhw,
 562                                    unsigned long pmc5, unsigned long pmc6)
 563{
 564        struct perf_event *event;
 565        u64 val, prev, delta;
 566        int i;
 567
 568        for (i = 0; i < cpuhw->n_limited; ++i) {
 569                event = cpuhw->limited_counter[i];
 570                if (!event->hw.idx)
 571                        continue;
 572                val = (event->hw.idx == 5) ? pmc5 : pmc6;
 573                prev = local64_read(&event->hw.prev_count);
 574                event->hw.idx = 0;
 575                delta = check_and_compute_delta(prev, val);
 576                if (delta)
 577                        local64_add(delta, &event->count);
 578        }
 579}
 580
 581static void thaw_limited_counters(struct cpu_hw_events *cpuhw,
 582                                  unsigned long pmc5, unsigned long pmc6)
 583{
 584        struct perf_event *event;
 585        u64 val, prev;
 586        int i;
 587
 588        for (i = 0; i < cpuhw->n_limited; ++i) {
 589                event = cpuhw->limited_counter[i];
 590                event->hw.idx = cpuhw->limited_hwidx[i];
 591                val = (event->hw.idx == 5) ? pmc5 : pmc6;
 592                prev = local64_read(&event->hw.prev_count);
 593                if (check_and_compute_delta(prev, val))
 594                        local64_set(&event->hw.prev_count, val);
 595                perf_event_update_userpage(event);
 596        }
 597}
 598
 599/*
 600 * Since limited events don't respect the freeze conditions, we
 601 * have to read them immediately after freezing or unfreezing the
 602 * other events.  We try to keep the values from the limited
 603 * events as consistent as possible by keeping the delay (in
 604 * cycles and instructions) between freezing/unfreezing and reading
 605 * the limited events as small and consistent as possible.
 606 * Therefore, if any limited events are in use, we read them
 607 * both, and always in the same order, to minimize variability,
 608 * and do it inside the same asm that writes MMCR0.
 609 */
 610static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0)
 611{
 612        unsigned long pmc5, pmc6;
 613
 614        if (!cpuhw->n_limited) {
 615                mtspr(SPRN_MMCR0, mmcr0);
 616                return;
 617        }
 618
 619        /*
 620         * Write MMCR0, then read PMC5 and PMC6 immediately.
 621         * To ensure we don't get a performance monitor interrupt
 622         * between writing MMCR0 and freezing/thawing the limited
 623         * events, we first write MMCR0 with the event overflow
 624         * interrupt enable bits turned off.
 625         */
 626        asm volatile("mtspr %3,%2; mfspr %0,%4; mfspr %1,%5"
 627                     : "=&r" (pmc5), "=&r" (pmc6)
 628                     : "r" (mmcr0 & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)),
 629                       "i" (SPRN_MMCR0),
 630                       "i" (SPRN_PMC5), "i" (SPRN_PMC6));
 631
 632        if (mmcr0 & MMCR0_FC)
 633                freeze_limited_counters(cpuhw, pmc5, pmc6);
 634        else
 635                thaw_limited_counters(cpuhw, pmc5, pmc6);
 636
 637        /*
 638         * Write the full MMCR0 including the event overflow interrupt
 639         * enable bits, if necessary.
 640         */
 641        if (mmcr0 & (MMCR0_PMC1CE | MMCR0_PMCjCE))
 642                mtspr(SPRN_MMCR0, mmcr0);
 643}
 644
 645/*
 646 * Disable all events to prevent PMU interrupts and to allow
 647 * events to be added or removed.
 648 */
 649static void power_pmu_disable(struct pmu *pmu)
 650{
 651        struct cpu_hw_events *cpuhw;
 652        unsigned long flags;
 653
 654        if (!ppmu)
 655                return;
 656        local_irq_save(flags);
 657        cpuhw = &__get_cpu_var(cpu_hw_events);
 658
 659        if (!cpuhw->disabled) {
 660                cpuhw->disabled = 1;
 661                cpuhw->n_added = 0;
 662
 663                /*
 664                 * Check if we ever enabled the PMU on this cpu.
 665                 */
 666                if (!cpuhw->pmcs_enabled) {
 667                        ppc_enable_pmcs();
 668                        cpuhw->pmcs_enabled = 1;
 669                }
 670
 671                /*
 672                 * Disable instruction sampling if it was enabled
 673                 */
 674                if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) {
 675                        mtspr(SPRN_MMCRA,
 676                              cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
 677                        mb();
 678                }
 679
 680                /*
 681                 * Set the 'freeze counters' bit.
 682                 * The barrier is to make sure the mtspr has been
 683                 * executed and the PMU has frozen the events
 684                 * before we return.
 685                 */
 686                write_mmcr0(cpuhw, mfspr(SPRN_MMCR0) | MMCR0_FC);
 687                mb();
 688        }
 689        local_irq_restore(flags);
 690}
 691
 692/*
 693 * Re-enable all events if disable == 0.
 694 * If we were previously disabled and events were added, then
 695 * put the new config on the PMU.
 696 */
 697static void power_pmu_enable(struct pmu *pmu)
 698{
 699        struct perf_event *event;
 700        struct cpu_hw_events *cpuhw;
 701        unsigned long flags;
 702        long i;
 703        unsigned long val;
 704        s64 left;
 705        unsigned int hwc_index[MAX_HWEVENTS];
 706        int n_lim;
 707        int idx;
 708
 709        if (!ppmu)
 710                return;
 711        local_irq_save(flags);
 712        cpuhw = &__get_cpu_var(cpu_hw_events);
 713        if (!cpuhw->disabled) {
 714                local_irq_restore(flags);
 715                return;
 716        }
 717        cpuhw->disabled = 0;
 718
 719        /*
 720         * If we didn't change anything, or only removed events,
 721         * no need to recalculate MMCR* settings and reset the PMCs.
 722         * Just reenable the PMU with the current MMCR* settings
 723         * (possibly updated for removal of events).
 724         */
 725        if (!cpuhw->n_added) {
 726                mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
 727                mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
 728                if (cpuhw->n_events == 0)
 729                        ppc_set_pmu_inuse(0);
 730                goto out_enable;
 731        }
 732
 733        /*
 734         * Compute MMCR* values for the new set of events
 735         */
 736        if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_events, hwc_index,
 737                               cpuhw->mmcr)) {
 738                /* shouldn't ever get here */
 739                printk(KERN_ERR "oops compute_mmcr failed\n");
 740                goto out;
 741        }
 742
 743        /*
 744         * Add in MMCR0 freeze bits corresponding to the
 745         * attr.exclude_* bits for the first event.
 746         * We have already checked that all events have the
 747         * same values for these bits as the first event.
 748         */
 749        event = cpuhw->event[0];
 750        if (event->attr.exclude_user)
 751                cpuhw->mmcr[0] |= MMCR0_FCP;
 752        if (event->attr.exclude_kernel)
 753                cpuhw->mmcr[0] |= freeze_events_kernel;
 754        if (event->attr.exclude_hv)
 755                cpuhw->mmcr[0] |= MMCR0_FCHV;
 756
 757        /*
 758         * Write the new configuration to MMCR* with the freeze
 759         * bit set and set the hardware events to their initial values.
 760         * Then unfreeze the events.
 761         */
 762        ppc_set_pmu_inuse(1);
 763        mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
 764        mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
 765        mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE))
 766                                | MMCR0_FC);
 767
 768        /*
 769         * Read off any pre-existing events that need to move
 770         * to another PMC.
 771         */
 772        for (i = 0; i < cpuhw->n_events; ++i) {
 773                event = cpuhw->event[i];
 774                if (event->hw.idx && event->hw.idx != hwc_index[i] + 1) {
 775                        power_pmu_read(event);
 776                        write_pmc(event->hw.idx, 0);
 777                        event->hw.idx = 0;
 778                }
 779        }
 780
 781        /*
 782         * Initialize the PMCs for all the new and moved events.
 783         */
 784        cpuhw->n_limited = n_lim = 0;
 785        for (i = 0; i < cpuhw->n_events; ++i) {
 786                event = cpuhw->event[i];
 787                if (event->hw.idx)
 788                        continue;
 789                idx = hwc_index[i] + 1;
 790                if (is_limited_pmc(idx)) {
 791                        cpuhw->limited_counter[n_lim] = event;
 792                        cpuhw->limited_hwidx[n_lim] = idx;
 793                        ++n_lim;
 794                        continue;
 795                }
 796                val = 0;
 797                if (event->hw.sample_period) {
 798                        left = local64_read(&event->hw.period_left);
 799                        if (left < 0x80000000L)
 800                                val = 0x80000000L - left;
 801                }
 802                local64_set(&event->hw.prev_count, val);
 803                event->hw.idx = idx;
 804                if (event->hw.state & PERF_HES_STOPPED)
 805                        val = 0;
 806                write_pmc(idx, val);
 807                perf_event_update_userpage(event);
 808        }
 809        cpuhw->n_limited = n_lim;
 810        cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE;
 811
 812 out_enable:
 813        mb();
 814        write_mmcr0(cpuhw, cpuhw->mmcr[0]);
 815
 816        /*
 817         * Enable instruction sampling if necessary
 818         */
 819        if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) {
 820                mb();
 821                mtspr(SPRN_MMCRA, cpuhw->mmcr[2]);
 822        }
 823
 824 out:
 825        local_irq_restore(flags);
 826}
 827
 828static int collect_events(struct perf_event *group, int max_count,
 829                          struct perf_event *ctrs[], u64 *events,
 830                          unsigned int *flags)
 831{
 832        int n = 0;
 833        struct perf_event *event;
 834
 835        if (!is_software_event(group)) {
 836                if (n >= max_count)
 837                        return -1;
 838                ctrs[n] = group;
 839                flags[n] = group->hw.event_base;
 840                events[n++] = group->hw.config;
 841        }
 842        list_for_each_entry(event, &group->sibling_list, group_entry) {
 843                if (!is_software_event(event) &&
 844                    event->state != PERF_EVENT_STATE_OFF) {
 845                        if (n >= max_count)
 846                                return -1;
 847                        ctrs[n] = event;
 848                        flags[n] = event->hw.event_base;
 849                        events[n++] = event->hw.config;
 850                }
 851        }
 852        return n;
 853}
 854
 855/*
 856 * Add a event to the PMU.
 857 * If all events are not already frozen, then we disable and
 858 * re-enable the PMU in order to get hw_perf_enable to do the
 859 * actual work of reconfiguring the PMU.
 860 */
 861static int power_pmu_add(struct perf_event *event, int ef_flags)
 862{
 863        struct cpu_hw_events *cpuhw;
 864        unsigned long flags;
 865        int n0;
 866        int ret = -EAGAIN;
 867
 868        local_irq_save(flags);
 869        perf_pmu_disable(event->pmu);
 870
 871        /*
 872         * Add the event to the list (if there is room)
 873         * and check whether the total set is still feasible.
 874         */
 875        cpuhw = &__get_cpu_var(cpu_hw_events);
 876        n0 = cpuhw->n_events;
 877        if (n0 >= ppmu->n_counter)
 878                goto out;
 879        cpuhw->event[n0] = event;
 880        cpuhw->events[n0] = event->hw.config;
 881        cpuhw->flags[n0] = event->hw.event_base;
 882
 883        /*
 884         * This event may have been disabled/stopped in record_and_restart()
 885         * because we exceeded the ->event_limit. If re-starting the event,
 886         * clear the ->hw.state (STOPPED and UPTODATE flags), so the user
 887         * notification is re-enabled.
 888         */
 889        if (!(ef_flags & PERF_EF_START))
 890                event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
 891        else
 892                event->hw.state = 0;
 893
 894        /*
 895         * If group events scheduling transaction was started,
 896         * skip the schedulability test here, it will be performed
 897         * at commit time(->commit_txn) as a whole
 898         */
 899        if (cpuhw->group_flag & PERF_EVENT_TXN)
 900                goto nocheck;
 901
 902        if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1))
 903                goto out;
 904        if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1))
 905                goto out;
 906        event->hw.config = cpuhw->events[n0];
 907
 908nocheck:
 909        ++cpuhw->n_events;
 910        ++cpuhw->n_added;
 911
 912        ret = 0;
 913 out:
 914        perf_pmu_enable(event->pmu);
 915        local_irq_restore(flags);
 916        return ret;
 917}
 918
 919/*
 920 * Remove a event from the PMU.
 921 */
 922static void power_pmu_del(struct perf_event *event, int ef_flags)
 923{
 924        struct cpu_hw_events *cpuhw;
 925        long i;
 926        unsigned long flags;
 927
 928        local_irq_save(flags);
 929        perf_pmu_disable(event->pmu);
 930
 931        power_pmu_read(event);
 932
 933        cpuhw = &__get_cpu_var(cpu_hw_events);
 934        for (i = 0; i < cpuhw->n_events; ++i) {
 935                if (event == cpuhw->event[i]) {
 936                        while (++i < cpuhw->n_events) {
 937                                cpuhw->event[i-1] = cpuhw->event[i];
 938                                cpuhw->events[i-1] = cpuhw->events[i];
 939                                cpuhw->flags[i-1] = cpuhw->flags[i];
 940                        }
 941                        --cpuhw->n_events;
 942                        ppmu->disable_pmc(event->hw.idx - 1, cpuhw->mmcr);
 943                        if (event->hw.idx) {
 944                                write_pmc(event->hw.idx, 0);
 945                                event->hw.idx = 0;
 946                        }
 947                        perf_event_update_userpage(event);
 948                        break;
 949                }
 950        }
 951        for (i = 0; i < cpuhw->n_limited; ++i)
 952                if (event == cpuhw->limited_counter[i])
 953                        break;
 954        if (i < cpuhw->n_limited) {
 955                while (++i < cpuhw->n_limited) {
 956                        cpuhw->limited_counter[i-1] = cpuhw->limited_counter[i];
 957                        cpuhw->limited_hwidx[i-1] = cpuhw->limited_hwidx[i];
 958                }
 959                --cpuhw->n_limited;
 960        }
 961        if (cpuhw->n_events == 0) {
 962                /* disable exceptions if no events are running */
 963                cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE);
 964        }
 965
 966        perf_pmu_enable(event->pmu);
 967        local_irq_restore(flags);
 968}
 969
 970/*
 971 * POWER-PMU does not support disabling individual counters, hence
 972 * program their cycle counter to their max value and ignore the interrupts.
 973 */
 974
 975static void power_pmu_start(struct perf_event *event, int ef_flags)
 976{
 977        unsigned long flags;
 978        s64 left;
 979        unsigned long val;
 980
 981        if (!event->hw.idx || !event->hw.sample_period)
 982                return;
 983
 984        if (!(event->hw.state & PERF_HES_STOPPED))
 985                return;
 986
 987        if (ef_flags & PERF_EF_RELOAD)
 988                WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
 989
 990        local_irq_save(flags);
 991        perf_pmu_disable(event->pmu);
 992
 993        event->hw.state = 0;
 994        left = local64_read(&event->hw.period_left);
 995
 996        val = 0;
 997        if (left < 0x80000000L)
 998                val = 0x80000000L - left;
 999
1000        write_pmc(event->hw.idx, val);
1001
1002        perf_event_update_userpage(event);
1003        perf_pmu_enable(event->pmu);
1004        local_irq_restore(flags);
1005}
1006
1007static void power_pmu_stop(struct perf_event *event, int ef_flags)
1008{
1009        unsigned long flags;
1010
1011        if (!event->hw.idx || !event->hw.sample_period)
1012                return;
1013
1014        if (event->hw.state & PERF_HES_STOPPED)
1015                return;
1016
1017        local_irq_save(flags);
1018        perf_pmu_disable(event->pmu);
1019
1020        power_pmu_read(event);
1021        event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
1022        write_pmc(event->hw.idx, 0);
1023
1024        perf_event_update_userpage(event);
1025        perf_pmu_enable(event->pmu);
1026        local_irq_restore(flags);
1027}
1028
1029/*
1030 * Start group events scheduling transaction
1031 * Set the flag to make pmu::enable() not perform the
1032 * schedulability test, it will be performed at commit time
1033 */
1034void power_pmu_start_txn(struct pmu *pmu)
1035{
1036        struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1037
1038        perf_pmu_disable(pmu);
1039        cpuhw->group_flag |= PERF_EVENT_TXN;
1040        cpuhw->n_txn_start = cpuhw->n_events;
1041}
1042
1043/*
1044 * Stop group events scheduling transaction
1045 * Clear the flag and pmu::enable() will perform the
1046 * schedulability test.
1047 */
1048void power_pmu_cancel_txn(struct pmu *pmu)
1049{
1050        struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1051
1052        cpuhw->group_flag &= ~PERF_EVENT_TXN;
1053        perf_pmu_enable(pmu);
1054}
1055
1056/*
1057 * Commit group events scheduling transaction
1058 * Perform the group schedulability test as a whole
1059 * Return 0 if success
1060 */
1061int power_pmu_commit_txn(struct pmu *pmu)
1062{
1063        struct cpu_hw_events *cpuhw;
1064        long i, n;
1065
1066        if (!ppmu)
1067                return -EAGAIN;
1068        cpuhw = &__get_cpu_var(cpu_hw_events);
1069        n = cpuhw->n_events;
1070        if (check_excludes(cpuhw->event, cpuhw->flags, 0, n))
1071                return -EAGAIN;
1072        i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n);
1073        if (i < 0)
1074                return -EAGAIN;
1075
1076        for (i = cpuhw->n_txn_start; i < n; ++i)
1077                cpuhw->event[i]->hw.config = cpuhw->events[i];
1078
1079        cpuhw->group_flag &= ~PERF_EVENT_TXN;
1080        perf_pmu_enable(pmu);
1081        return 0;
1082}
1083
1084/*
1085 * Return 1 if we might be able to put event on a limited PMC,
1086 * or 0 if not.
1087 * A event can only go on a limited PMC if it counts something
1088 * that a limited PMC can count, doesn't require interrupts, and
1089 * doesn't exclude any processor mode.
1090 */
1091static int can_go_on_limited_pmc(struct perf_event *event, u64 ev,
1092                                 unsigned int flags)
1093{
1094        int n;
1095        u64 alt[MAX_EVENT_ALTERNATIVES];
1096
1097        if (event->attr.exclude_user
1098            || event->attr.exclude_kernel
1099            || event->attr.exclude_hv
1100            || event->attr.sample_period)
1101                return 0;
1102
1103        if (ppmu->limited_pmc_event(ev))
1104                return 1;
1105
1106        /*
1107         * The requested event_id isn't on a limited PMC already;
1108         * see if any alternative code goes on a limited PMC.
1109         */
1110        if (!ppmu->get_alternatives)
1111                return 0;
1112
1113        flags |= PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD;
1114        n = ppmu->get_alternatives(ev, flags, alt);
1115
1116        return n > 0;
1117}
1118
1119/*
1120 * Find an alternative event_id that goes on a normal PMC, if possible,
1121 * and return the event_id code, or 0 if there is no such alternative.
1122 * (Note: event_id code 0 is "don't count" on all machines.)
1123 */
1124static u64 normal_pmc_alternative(u64 ev, unsigned long flags)
1125{
1126        u64 alt[MAX_EVENT_ALTERNATIVES];
1127        int n;
1128
1129        flags &= ~(PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD);
1130        n = ppmu->get_alternatives(ev, flags, alt);
1131        if (!n)
1132                return 0;
1133        return alt[0];
1134}
1135
1136/* Number of perf_events counting hardware events */
1137static atomic_t num_events;
1138/* Used to avoid races in calling reserve/release_pmc_hardware */
1139static DEFINE_MUTEX(pmc_reserve_mutex);
1140
1141/*
1142 * Release the PMU if this is the last perf_event.
1143 */
1144static void hw_perf_event_destroy(struct perf_event *event)
1145{
1146        if (!atomic_add_unless(&num_events, -1, 1)) {
1147                mutex_lock(&pmc_reserve_mutex);
1148                if (atomic_dec_return(&num_events) == 0)
1149                        release_pmc_hardware();
1150                mutex_unlock(&pmc_reserve_mutex);
1151        }
1152}
1153
1154/*
1155 * Translate a generic cache event_id config to a raw event_id code.
1156 */
1157static int hw_perf_cache_event(u64 config, u64 *eventp)
1158{
1159        unsigned long type, op, result;
1160        int ev;
1161
1162        if (!ppmu->cache_events)
1163                return -EINVAL;
1164
1165        /* unpack config */
1166        type = config & 0xff;
1167        op = (config >> 8) & 0xff;
1168        result = (config >> 16) & 0xff;
1169
1170        if (type >= PERF_COUNT_HW_CACHE_MAX ||
1171            op >= PERF_COUNT_HW_CACHE_OP_MAX ||
1172            result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
1173                return -EINVAL;
1174
1175        ev = (*ppmu->cache_events)[type][op][result];
1176        if (ev == 0)
1177                return -EOPNOTSUPP;
1178        if (ev == -1)
1179                return -EINVAL;
1180        *eventp = ev;
1181        return 0;
1182}
1183
1184static int power_pmu_event_init(struct perf_event *event)
1185{
1186        u64 ev;
1187        unsigned long flags;
1188        struct perf_event *ctrs[MAX_HWEVENTS];
1189        u64 events[MAX_HWEVENTS];
1190        unsigned int cflags[MAX_HWEVENTS];
1191        int n;
1192        int err;
1193        struct cpu_hw_events *cpuhw;
1194
1195        if (!ppmu)
1196                return -ENOENT;
1197
1198        /* does not support taken branch sampling */
1199        if (has_branch_stack(event))
1200                return -EOPNOTSUPP;
1201
1202        switch (event->attr.type) {
1203        case PERF_TYPE_HARDWARE:
1204                ev = event->attr.config;
1205                if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
1206                        return -EOPNOTSUPP;
1207                ev = ppmu->generic_events[ev];
1208                break;
1209        case PERF_TYPE_HW_CACHE:
1210                err = hw_perf_cache_event(event->attr.config, &ev);
1211                if (err)
1212                        return err;
1213                break;
1214        case PERF_TYPE_RAW:
1215                ev = event->attr.config;
1216                break;
1217        default:
1218                return -ENOENT;
1219        }
1220
1221        event->hw.config_base = ev;
1222        event->hw.idx = 0;
1223
1224        /*
1225         * If we are not running on a hypervisor, force the
1226         * exclude_hv bit to 0 so that we don't care what
1227         * the user set it to.
1228         */
1229        if (!firmware_has_feature(FW_FEATURE_LPAR))
1230                event->attr.exclude_hv = 0;
1231
1232        /*
1233         * If this is a per-task event, then we can use
1234         * PM_RUN_* events interchangeably with their non RUN_*
1235         * equivalents, e.g. PM_RUN_CYC instead of PM_CYC.
1236         * XXX we should check if the task is an idle task.
1237         */
1238        flags = 0;
1239        if (event->attach_state & PERF_ATTACH_TASK)
1240                flags |= PPMU_ONLY_COUNT_RUN;
1241
1242        /*
1243         * If this machine has limited events, check whether this
1244         * event_id could go on a limited event.
1245         */
1246        if (ppmu->flags & PPMU_LIMITED_PMC5_6) {
1247                if (can_go_on_limited_pmc(event, ev, flags)) {
1248                        flags |= PPMU_LIMITED_PMC_OK;
1249                } else if (ppmu->limited_pmc_event(ev)) {
1250                        /*
1251                         * The requested event_id is on a limited PMC,
1252                         * but we can't use a limited PMC; see if any
1253                         * alternative goes on a normal PMC.
1254                         */
1255                        ev = normal_pmc_alternative(ev, flags);
1256                        if (!ev)
1257                                return -EINVAL;
1258                }
1259        }
1260
1261        /*
1262         * If this is in a group, check if it can go on with all the
1263         * other hardware events in the group.  We assume the event
1264         * hasn't been linked into its leader's sibling list at this point.
1265         */
1266        n = 0;
1267        if (event->group_leader != event) {
1268                n = collect_events(event->group_leader, ppmu->n_counter - 1,
1269                                   ctrs, events, cflags);
1270                if (n < 0)
1271                        return -EINVAL;
1272        }
1273        events[n] = ev;
1274        ctrs[n] = event;
1275        cflags[n] = flags;
1276        if (check_excludes(ctrs, cflags, n, 1))
1277                return -EINVAL;
1278
1279        cpuhw = &get_cpu_var(cpu_hw_events);
1280        err = power_check_constraints(cpuhw, events, cflags, n + 1);
1281        put_cpu_var(cpu_hw_events);
1282        if (err)
1283                return -EINVAL;
1284
1285        event->hw.config = events[n];
1286        event->hw.event_base = cflags[n];
1287        event->hw.last_period = event->hw.sample_period;
1288        local64_set(&event->hw.period_left, event->hw.last_period);
1289
1290        /*
1291         * See if we need to reserve the PMU.
1292         * If no events are currently in use, then we have to take a
1293         * mutex to ensure that we don't race with another task doing
1294         * reserve_pmc_hardware or release_pmc_hardware.
1295         */
1296        err = 0;
1297        if (!atomic_inc_not_zero(&num_events)) {
1298                mutex_lock(&pmc_reserve_mutex);
1299                if (atomic_read(&num_events) == 0 &&
1300                    reserve_pmc_hardware(perf_event_interrupt))
1301                        err = -EBUSY;
1302                else
1303                        atomic_inc(&num_events);
1304                mutex_unlock(&pmc_reserve_mutex);
1305        }
1306        event->destroy = hw_perf_event_destroy;
1307
1308        return err;
1309}
1310
1311static int power_pmu_event_idx(struct perf_event *event)
1312{
1313        return event->hw.idx;
1314}
1315
1316ssize_t power_events_sysfs_show(struct device *dev,
1317                                struct device_attribute *attr, char *page)
1318{
1319        struct perf_pmu_events_attr *pmu_attr;
1320
1321        pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
1322
1323        return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
1324}
1325
1326struct pmu power_pmu = {
1327        .pmu_enable     = power_pmu_enable,
1328        .pmu_disable    = power_pmu_disable,
1329        .event_init     = power_pmu_event_init,
1330        .add            = power_pmu_add,
1331        .del            = power_pmu_del,
1332        .start          = power_pmu_start,
1333        .stop           = power_pmu_stop,
1334        .read           = power_pmu_read,
1335        .start_txn      = power_pmu_start_txn,
1336        .cancel_txn     = power_pmu_cancel_txn,
1337        .commit_txn     = power_pmu_commit_txn,
1338        .event_idx      = power_pmu_event_idx,
1339};
1340
1341
1342/*
1343 * A counter has overflowed; update its count and record
1344 * things if requested.  Note that interrupts are hard-disabled
1345 * here so there is no possibility of being interrupted.
1346 */
1347static void record_and_restart(struct perf_event *event, unsigned long val,
1348                               struct pt_regs *regs)
1349{
1350        u64 period = event->hw.sample_period;
1351        s64 prev, delta, left;
1352        int record = 0;
1353
1354        if (event->hw.state & PERF_HES_STOPPED) {
1355                write_pmc(event->hw.idx, 0);
1356                return;
1357        }
1358
1359        /* we don't have to worry about interrupts here */
1360        prev = local64_read(&event->hw.prev_count);
1361        delta = check_and_compute_delta(prev, val);
1362        local64_add(delta, &event->count);
1363
1364        /*
1365         * See if the total period for this event has expired,
1366         * and update for the next period.
1367         */
1368        val = 0;
1369        left = local64_read(&event->hw.period_left) - delta;
1370        if (delta == 0)
1371                left++;
1372        if (period) {
1373                if (left <= 0) {
1374                        left += period;
1375                        if (left <= 0)
1376                                left = period;
1377                        record = siar_valid(regs);
1378                        event->hw.last_period = event->hw.sample_period;
1379                }
1380                if (left < 0x80000000LL)
1381                        val = 0x80000000LL - left;
1382        }
1383
1384        write_pmc(event->hw.idx, val);
1385        local64_set(&event->hw.prev_count, val);
1386        local64_set(&event->hw.period_left, left);
1387        perf_event_update_userpage(event);
1388
1389        /*
1390         * Finally record data if requested.
1391         */
1392        if (record) {
1393                struct perf_sample_data data;
1394
1395                perf_sample_data_init(&data, ~0ULL, event->hw.last_period);
1396
1397                if (event->attr.sample_type & PERF_SAMPLE_ADDR)
1398                        perf_get_data_addr(regs, &data.addr);
1399
1400                if (perf_event_overflow(event, &data, regs))
1401                        power_pmu_stop(event, 0);
1402        }
1403}
1404
1405/*
1406 * Called from generic code to get the misc flags (i.e. processor mode)
1407 * for an event_id.
1408 */
1409unsigned long perf_misc_flags(struct pt_regs *regs)
1410{
1411        u32 flags = perf_get_misc_flags(regs);
1412
1413        if (flags)
1414                return flags;
1415        return user_mode(regs) ? PERF_RECORD_MISC_USER :
1416                PERF_RECORD_MISC_KERNEL;
1417}
1418
1419/*
1420 * Called from generic code to get the instruction pointer
1421 * for an event_id.
1422 */
1423unsigned long perf_instruction_pointer(struct pt_regs *regs)
1424{
1425        unsigned long use_siar = regs->result;
1426
1427        if (use_siar && siar_valid(regs))
1428                return mfspr(SPRN_SIAR) + perf_ip_adjust(regs);
1429        else if (use_siar)
1430                return 0;               // no valid instruction pointer
1431        else
1432                return regs->nip;
1433}
1434
1435static bool pmc_overflow_power7(unsigned long val)
1436{
1437        /*
1438         * Events on POWER7 can roll back if a speculative event doesn't
1439         * eventually complete. Unfortunately in some rare cases they will
1440         * raise a performance monitor exception. We need to catch this to
1441         * ensure we reset the PMC. In all cases the PMC will be 256 or less
1442         * cycles from overflow.
1443         *
1444         * We only do this if the first pass fails to find any overflowing
1445         * PMCs because a user might set a period of less than 256 and we
1446         * don't want to mistakenly reset them.
1447         */
1448        if ((0x80000000 - val) <= 256)
1449                return true;
1450
1451        return false;
1452}
1453
1454static bool pmc_overflow(unsigned long val)
1455{
1456        if ((int)val < 0)
1457                return true;
1458
1459        return false;
1460}
1461
1462/*
1463 * Performance monitor interrupt stuff
1464 */
1465static void perf_event_interrupt(struct pt_regs *regs)
1466{
1467        int i, j;
1468        struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1469        struct perf_event *event;
1470        unsigned long val[8];
1471        int found, active;
1472        int nmi;
1473
1474        if (cpuhw->n_limited)
1475                freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5),
1476                                        mfspr(SPRN_PMC6));
1477
1478        perf_read_regs(regs);
1479
1480        nmi = perf_intr_is_nmi(regs);
1481        if (nmi)
1482                nmi_enter();
1483        else
1484                irq_enter();
1485
1486        /* Read all the PMCs since we'll need them a bunch of times */
1487        for (i = 0; i < ppmu->n_counter; ++i)
1488                val[i] = read_pmc(i + 1);
1489
1490        /* Try to find what caused the IRQ */
1491        found = 0;
1492        for (i = 0; i < ppmu->n_counter; ++i) {
1493                if (!pmc_overflow(val[i]))
1494                        continue;
1495                if (is_limited_pmc(i + 1))
1496                        continue; /* these won't generate IRQs */
1497                /*
1498                 * We've found one that's overflowed.  For active
1499                 * counters we need to log this.  For inactive
1500                 * counters, we need to reset it anyway
1501                 */
1502                found = 1;
1503                active = 0;
1504                for (j = 0; j < cpuhw->n_events; ++j) {
1505                        event = cpuhw->event[j];
1506                        if (event->hw.idx == (i + 1)) {
1507                                active = 1;
1508                                record_and_restart(event, val[i], regs);
1509                                break;
1510                        }
1511                }
1512                if (!active)
1513                        /* reset non active counters that have overflowed */
1514                        write_pmc(i + 1, 0);
1515        }
1516        if (!found && pvr_version_is(PVR_POWER7)) {
1517                /* check active counters for special buggy p7 overflow */
1518                for (i = 0; i < cpuhw->n_events; ++i) {
1519                        event = cpuhw->event[i];
1520                        if (!event->hw.idx || is_limited_pmc(event->hw.idx))
1521                                continue;
1522                        if (pmc_overflow_power7(val[event->hw.idx - 1])) {
1523                                /* event has overflowed in a buggy way*/
1524                                found = 1;
1525                                record_and_restart(event,
1526                                                   val[event->hw.idx - 1],
1527                                                   regs);
1528                        }
1529                }
1530        }
1531        if ((!found) && printk_ratelimit())
1532                printk(KERN_WARNING "Can't find PMC that caused IRQ\n");
1533
1534        /*
1535         * Reset MMCR0 to its normal value.  This will set PMXE and
1536         * clear FC (freeze counters) and PMAO (perf mon alert occurred)
1537         * and thus allow interrupts to occur again.
1538         * XXX might want to use MSR.PM to keep the events frozen until
1539         * we get back out of this interrupt.
1540         */
1541        write_mmcr0(cpuhw, cpuhw->mmcr[0]);
1542
1543        if (nmi)
1544                nmi_exit();
1545        else
1546                irq_exit();
1547}
1548
1549static void power_pmu_setup(int cpu)
1550{
1551        struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
1552
1553        if (!ppmu)
1554                return;
1555        memset(cpuhw, 0, sizeof(*cpuhw));
1556        cpuhw->mmcr[0] = MMCR0_FC;
1557}
1558
1559static int __cpuinit
1560power_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1561{
1562        unsigned int cpu = (long)hcpu;
1563
1564        switch (action & ~CPU_TASKS_FROZEN) {
1565        case CPU_UP_PREPARE:
1566                power_pmu_setup(cpu);
1567                break;
1568
1569        default:
1570                break;
1571        }
1572
1573        return NOTIFY_OK;
1574}
1575
1576int __cpuinit register_power_pmu(struct power_pmu *pmu)
1577{
1578        if (ppmu)
1579                return -EBUSY;          /* something's already registered */
1580
1581        ppmu = pmu;
1582        pr_info("%s performance monitor hardware support registered\n",
1583                pmu->name);
1584
1585        power_pmu.attr_groups = ppmu->attr_groups;
1586
1587#ifdef MSR_HV
1588        /*
1589         * Use FCHV to ignore kernel events if MSR.HV is set.
1590         */
1591        if (mfmsr() & MSR_HV)
1592                freeze_events_kernel = MMCR0_FCHV;
1593#endif /* CONFIG_PPC64 */
1594
1595        perf_pmu_register(&power_pmu, "cpu", PERF_TYPE_RAW);
1596        perf_cpu_notifier(power_pmu_notifier);
1597
1598        return 0;
1599}
1600