linux/arch/powerpc/kernel/perf_event.c
<<
>>
Prefs
   1/*
   2 * Performance event support - powerpc architecture code
   3 *
   4 * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License
   8 * as published by the Free Software Foundation; either version
   9 * 2 of the License, or (at your option) any later version.
  10 */
  11#include <linux/kernel.h>
  12#include <linux/sched.h>
  13#include <linux/perf_event.h>
  14#include <linux/percpu.h>
  15#include <linux/hardirq.h>
  16#include <asm/reg.h>
  17#include <asm/pmc.h>
  18#include <asm/machdep.h>
  19#include <asm/firmware.h>
  20#include <asm/ptrace.h>
  21
  22struct cpu_hw_events {
  23        int n_events;
  24        int n_percpu;
  25        int disabled;
  26        int n_added;
  27        int n_limited;
  28        u8  pmcs_enabled;
  29        struct perf_event *event[MAX_HWEVENTS];
  30        u64 events[MAX_HWEVENTS];
  31        unsigned int flags[MAX_HWEVENTS];
  32        unsigned long mmcr[3];
  33        struct perf_event *limited_counter[MAX_LIMITED_HWCOUNTERS];
  34        u8  limited_hwidx[MAX_LIMITED_HWCOUNTERS];
  35        u64 alternatives[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
  36        unsigned long amasks[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
  37        unsigned long avalues[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
  38
  39        unsigned int group_flag;
  40        int n_txn_start;
  41};
  42DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
  43
  44struct power_pmu *ppmu;
  45
  46/*
  47 * Normally, to ignore kernel events we set the FCS (freeze counters
  48 * in supervisor mode) bit in MMCR0, but if the kernel runs with the
  49 * hypervisor bit set in the MSR, or if we are running on a processor
  50 * where the hypervisor bit is forced to 1 (as on Apple G5 processors),
  51 * then we need to use the FCHV bit to ignore kernel events.
  52 */
  53static unsigned int freeze_events_kernel = MMCR0_FCS;
  54
  55/*
  56 * 32-bit doesn't have MMCRA but does have an MMCR2,
  57 * and a few other names are different.
  58 */
  59#ifdef CONFIG_PPC32
  60
  61#define MMCR0_FCHV              0
  62#define MMCR0_PMCjCE            MMCR0_PMCnCE
  63
  64#define SPRN_MMCRA              SPRN_MMCR2
  65#define MMCRA_SAMPLE_ENABLE     0
  66
  67static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
  68{
  69        return 0;
  70}
  71static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) { }
  72static inline u32 perf_get_misc_flags(struct pt_regs *regs)
  73{
  74        return 0;
  75}
  76static inline void perf_read_regs(struct pt_regs *regs) { }
  77static inline int perf_intr_is_nmi(struct pt_regs *regs)
  78{
  79        return 0;
  80}
  81
  82#endif /* CONFIG_PPC32 */
  83
  84/*
  85 * Things that are specific to 64-bit implementations.
  86 */
  87#ifdef CONFIG_PPC64
  88
  89static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
  90{
  91        unsigned long mmcra = regs->dsisr;
  92
  93        if ((mmcra & MMCRA_SAMPLE_ENABLE) && !(ppmu->flags & PPMU_ALT_SIPR)) {
  94                unsigned long slot = (mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT;
  95                if (slot > 1)
  96                        return 4 * (slot - 1);
  97        }
  98        return 0;
  99}
 100
 101/*
 102 * The user wants a data address recorded.
 103 * If we're not doing instruction sampling, give them the SDAR
 104 * (sampled data address).  If we are doing instruction sampling, then
 105 * only give them the SDAR if it corresponds to the instruction
 106 * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC
 107 * bit in MMCRA.
 108 */
 109static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp)
 110{
 111        unsigned long mmcra = regs->dsisr;
 112        unsigned long sdsync = (ppmu->flags & PPMU_ALT_SIPR) ?
 113                POWER6_MMCRA_SDSYNC : MMCRA_SDSYNC;
 114
 115        if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync))
 116                *addrp = mfspr(SPRN_SDAR);
 117}
 118
 119static inline u32 perf_get_misc_flags(struct pt_regs *regs)
 120{
 121        unsigned long mmcra = regs->dsisr;
 122        unsigned long sihv = MMCRA_SIHV;
 123        unsigned long sipr = MMCRA_SIPR;
 124
 125        if (TRAP(regs) != 0xf00)
 126                return 0;       /* not a PMU interrupt */
 127
 128        if (ppmu->flags & PPMU_ALT_SIPR) {
 129                sihv = POWER6_MMCRA_SIHV;
 130                sipr = POWER6_MMCRA_SIPR;
 131        }
 132
 133        /* PR has priority over HV, so order below is important */
 134        if (mmcra & sipr)
 135                return PERF_RECORD_MISC_USER;
 136        if ((mmcra & sihv) && (freeze_events_kernel != MMCR0_FCHV))
 137                return PERF_RECORD_MISC_HYPERVISOR;
 138        return PERF_RECORD_MISC_KERNEL;
 139}
 140
 141/*
 142 * Overload regs->dsisr to store MMCRA so we only need to read it once
 143 * on each interrupt.
 144 */
 145static inline void perf_read_regs(struct pt_regs *regs)
 146{
 147        regs->dsisr = mfspr(SPRN_MMCRA);
 148}
 149
 150/*
 151 * If interrupts were soft-disabled when a PMU interrupt occurs, treat
 152 * it as an NMI.
 153 */
 154static inline int perf_intr_is_nmi(struct pt_regs *regs)
 155{
 156        return !regs->softe;
 157}
 158
 159#endif /* CONFIG_PPC64 */
 160
 161static void perf_event_interrupt(struct pt_regs *regs);
 162
 163void perf_event_print_debug(void)
 164{
 165}
 166
 167/*
 168 * Read one performance monitor counter (PMC).
 169 */
 170static unsigned long read_pmc(int idx)
 171{
 172        unsigned long val;
 173
 174        switch (idx) {
 175        case 1:
 176                val = mfspr(SPRN_PMC1);
 177                break;
 178        case 2:
 179                val = mfspr(SPRN_PMC2);
 180                break;
 181        case 3:
 182                val = mfspr(SPRN_PMC3);
 183                break;
 184        case 4:
 185                val = mfspr(SPRN_PMC4);
 186                break;
 187        case 5:
 188                val = mfspr(SPRN_PMC5);
 189                break;
 190        case 6:
 191                val = mfspr(SPRN_PMC6);
 192                break;
 193#ifdef CONFIG_PPC64
 194        case 7:
 195                val = mfspr(SPRN_PMC7);
 196                break;
 197        case 8:
 198                val = mfspr(SPRN_PMC8);
 199                break;
 200#endif /* CONFIG_PPC64 */
 201        default:
 202                printk(KERN_ERR "oops trying to read PMC%d\n", idx);
 203                val = 0;
 204        }
 205        return val;
 206}
 207
 208/*
 209 * Write one PMC.
 210 */
 211static void write_pmc(int idx, unsigned long val)
 212{
 213        switch (idx) {
 214        case 1:
 215                mtspr(SPRN_PMC1, val);
 216                break;
 217        case 2:
 218                mtspr(SPRN_PMC2, val);
 219                break;
 220        case 3:
 221                mtspr(SPRN_PMC3, val);
 222                break;
 223        case 4:
 224                mtspr(SPRN_PMC4, val);
 225                break;
 226        case 5:
 227                mtspr(SPRN_PMC5, val);
 228                break;
 229        case 6:
 230                mtspr(SPRN_PMC6, val);
 231                break;
 232#ifdef CONFIG_PPC64
 233        case 7:
 234                mtspr(SPRN_PMC7, val);
 235                break;
 236        case 8:
 237                mtspr(SPRN_PMC8, val);
 238                break;
 239#endif /* CONFIG_PPC64 */
 240        default:
 241                printk(KERN_ERR "oops trying to write PMC%d\n", idx);
 242        }
 243}
 244
 245/*
 246 * Check if a set of events can all go on the PMU at once.
 247 * If they can't, this will look at alternative codes for the events
 248 * and see if any combination of alternative codes is feasible.
 249 * The feasible set is returned in event_id[].
 250 */
 251static int power_check_constraints(struct cpu_hw_events *cpuhw,
 252                                   u64 event_id[], unsigned int cflags[],
 253                                   int n_ev)
 254{
 255        unsigned long mask, value, nv;
 256        unsigned long smasks[MAX_HWEVENTS], svalues[MAX_HWEVENTS];
 257        int n_alt[MAX_HWEVENTS], choice[MAX_HWEVENTS];
 258        int i, j;
 259        unsigned long addf = ppmu->add_fields;
 260        unsigned long tadd = ppmu->test_adder;
 261
 262        if (n_ev > ppmu->n_counter)
 263                return -1;
 264
 265        /* First see if the events will go on as-is */
 266        for (i = 0; i < n_ev; ++i) {
 267                if ((cflags[i] & PPMU_LIMITED_PMC_REQD)
 268                    && !ppmu->limited_pmc_event(event_id[i])) {
 269                        ppmu->get_alternatives(event_id[i], cflags[i],
 270                                               cpuhw->alternatives[i]);
 271                        event_id[i] = cpuhw->alternatives[i][0];
 272                }
 273                if (ppmu->get_constraint(event_id[i], &cpuhw->amasks[i][0],
 274                                         &cpuhw->avalues[i][0]))
 275                        return -1;
 276        }
 277        value = mask = 0;
 278        for (i = 0; i < n_ev; ++i) {
 279                nv = (value | cpuhw->avalues[i][0]) +
 280                        (value & cpuhw->avalues[i][0] & addf);
 281                if ((((nv + tadd) ^ value) & mask) != 0 ||
 282                    (((nv + tadd) ^ cpuhw->avalues[i][0]) &
 283                     cpuhw->amasks[i][0]) != 0)
 284                        break;
 285                value = nv;
 286                mask |= cpuhw->amasks[i][0];
 287        }
 288        if (i == n_ev)
 289                return 0;       /* all OK */
 290
 291        /* doesn't work, gather alternatives... */
 292        if (!ppmu->get_alternatives)
 293                return -1;
 294        for (i = 0; i < n_ev; ++i) {
 295                choice[i] = 0;
 296                n_alt[i] = ppmu->get_alternatives(event_id[i], cflags[i],
 297                                                  cpuhw->alternatives[i]);
 298                for (j = 1; j < n_alt[i]; ++j)
 299                        ppmu->get_constraint(cpuhw->alternatives[i][j],
 300                                             &cpuhw->amasks[i][j],
 301                                             &cpuhw->avalues[i][j]);
 302        }
 303
 304        /* enumerate all possibilities and see if any will work */
 305        i = 0;
 306        j = -1;
 307        value = mask = nv = 0;
 308        while (i < n_ev) {
 309                if (j >= 0) {
 310                        /* we're backtracking, restore context */
 311                        value = svalues[i];
 312                        mask = smasks[i];
 313                        j = choice[i];
 314                }
 315                /*
 316                 * See if any alternative k for event_id i,
 317                 * where k > j, will satisfy the constraints.
 318                 */
 319                while (++j < n_alt[i]) {
 320                        nv = (value | cpuhw->avalues[i][j]) +
 321                                (value & cpuhw->avalues[i][j] & addf);
 322                        if ((((nv + tadd) ^ value) & mask) == 0 &&
 323                            (((nv + tadd) ^ cpuhw->avalues[i][j])
 324                             & cpuhw->amasks[i][j]) == 0)
 325                                break;
 326                }
 327                if (j >= n_alt[i]) {
 328                        /*
 329                         * No feasible alternative, backtrack
 330                         * to event_id i-1 and continue enumerating its
 331                         * alternatives from where we got up to.
 332                         */
 333                        if (--i < 0)
 334                                return -1;
 335                } else {
 336                        /*
 337                         * Found a feasible alternative for event_id i,
 338                         * remember where we got up to with this event_id,
 339                         * go on to the next event_id, and start with
 340                         * the first alternative for it.
 341                         */
 342                        choice[i] = j;
 343                        svalues[i] = value;
 344                        smasks[i] = mask;
 345                        value = nv;
 346                        mask |= cpuhw->amasks[i][j];
 347                        ++i;
 348                        j = -1;
 349                }
 350        }
 351
 352        /* OK, we have a feasible combination, tell the caller the solution */
 353        for (i = 0; i < n_ev; ++i)
 354                event_id[i] = cpuhw->alternatives[i][choice[i]];
 355        return 0;
 356}
 357
 358/*
 359 * Check if newly-added events have consistent settings for
 360 * exclude_{user,kernel,hv} with each other and any previously
 361 * added events.
 362 */
 363static int check_excludes(struct perf_event **ctrs, unsigned int cflags[],
 364                          int n_prev, int n_new)
 365{
 366        int eu = 0, ek = 0, eh = 0;
 367        int i, n, first;
 368        struct perf_event *event;
 369
 370        n = n_prev + n_new;
 371        if (n <= 1)
 372                return 0;
 373
 374        first = 1;
 375        for (i = 0; i < n; ++i) {
 376                if (cflags[i] & PPMU_LIMITED_PMC_OK) {
 377                        cflags[i] &= ~PPMU_LIMITED_PMC_REQD;
 378                        continue;
 379                }
 380                event = ctrs[i];
 381                if (first) {
 382                        eu = event->attr.exclude_user;
 383                        ek = event->attr.exclude_kernel;
 384                        eh = event->attr.exclude_hv;
 385                        first = 0;
 386                } else if (event->attr.exclude_user != eu ||
 387                           event->attr.exclude_kernel != ek ||
 388                           event->attr.exclude_hv != eh) {
 389                        return -EAGAIN;
 390                }
 391        }
 392
 393        if (eu || ek || eh)
 394                for (i = 0; i < n; ++i)
 395                        if (cflags[i] & PPMU_LIMITED_PMC_OK)
 396                                cflags[i] |= PPMU_LIMITED_PMC_REQD;
 397
 398        return 0;
 399}
 400
 401static u64 check_and_compute_delta(u64 prev, u64 val)
 402{
 403        u64 delta = (val - prev) & 0xfffffffful;
 404
 405        /*
 406         * POWER7 can roll back counter values, if the new value is smaller
 407         * than the previous value it will cause the delta and the counter to
 408         * have bogus values unless we rolled a counter over.  If a coutner is
 409         * rolled back, it will be smaller, but within 256, which is the maximum
 410         * number of events to rollback at once.  If we dectect a rollback
 411         * return 0.  This can lead to a small lack of precision in the
 412         * counters.
 413         */
 414        if (prev > val && (prev - val) < 256)
 415                delta = 0;
 416
 417        return delta;
 418}
 419
 420static void power_pmu_read(struct perf_event *event)
 421{
 422        s64 val, delta, prev;
 423
 424        if (event->hw.state & PERF_HES_STOPPED)
 425                return;
 426
 427        if (!event->hw.idx)
 428                return;
 429        /*
 430         * Performance monitor interrupts come even when interrupts
 431         * are soft-disabled, as long as interrupts are hard-enabled.
 432         * Therefore we treat them like NMIs.
 433         */
 434        do {
 435                prev = local64_read(&event->hw.prev_count);
 436                barrier();
 437                val = read_pmc(event->hw.idx);
 438                delta = check_and_compute_delta(prev, val);
 439                if (!delta)
 440                        return;
 441        } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
 442
 443        local64_add(delta, &event->count);
 444        local64_sub(delta, &event->hw.period_left);
 445}
 446
 447/*
 448 * On some machines, PMC5 and PMC6 can't be written, don't respect
 449 * the freeze conditions, and don't generate interrupts.  This tells
 450 * us if `event' is using such a PMC.
 451 */
 452static int is_limited_pmc(int pmcnum)
 453{
 454        return (ppmu->flags & PPMU_LIMITED_PMC5_6)
 455                && (pmcnum == 5 || pmcnum == 6);
 456}
 457
 458static void freeze_limited_counters(struct cpu_hw_events *cpuhw,
 459                                    unsigned long pmc5, unsigned long pmc6)
 460{
 461        struct perf_event *event;
 462        u64 val, prev, delta;
 463        int i;
 464
 465        for (i = 0; i < cpuhw->n_limited; ++i) {
 466                event = cpuhw->limited_counter[i];
 467                if (!event->hw.idx)
 468                        continue;
 469                val = (event->hw.idx == 5) ? pmc5 : pmc6;
 470                prev = local64_read(&event->hw.prev_count);
 471                event->hw.idx = 0;
 472                delta = check_and_compute_delta(prev, val);
 473                if (delta)
 474                        local64_add(delta, &event->count);
 475        }
 476}
 477
 478static void thaw_limited_counters(struct cpu_hw_events *cpuhw,
 479                                  unsigned long pmc5, unsigned long pmc6)
 480{
 481        struct perf_event *event;
 482        u64 val, prev;
 483        int i;
 484
 485        for (i = 0; i < cpuhw->n_limited; ++i) {
 486                event = cpuhw->limited_counter[i];
 487                event->hw.idx = cpuhw->limited_hwidx[i];
 488                val = (event->hw.idx == 5) ? pmc5 : pmc6;
 489                prev = local64_read(&event->hw.prev_count);
 490                if (check_and_compute_delta(prev, val))
 491                        local64_set(&event->hw.prev_count, val);
 492                perf_event_update_userpage(event);
 493        }
 494}
 495
 496/*
 497 * Since limited events don't respect the freeze conditions, we
 498 * have to read them immediately after freezing or unfreezing the
 499 * other events.  We try to keep the values from the limited
 500 * events as consistent as possible by keeping the delay (in
 501 * cycles and instructions) between freezing/unfreezing and reading
 502 * the limited events as small and consistent as possible.
 503 * Therefore, if any limited events are in use, we read them
 504 * both, and always in the same order, to minimize variability,
 505 * and do it inside the same asm that writes MMCR0.
 506 */
 507static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0)
 508{
 509        unsigned long pmc5, pmc6;
 510
 511        if (!cpuhw->n_limited) {
 512                mtspr(SPRN_MMCR0, mmcr0);
 513                return;
 514        }
 515
 516        /*
 517         * Write MMCR0, then read PMC5 and PMC6 immediately.
 518         * To ensure we don't get a performance monitor interrupt
 519         * between writing MMCR0 and freezing/thawing the limited
 520         * events, we first write MMCR0 with the event overflow
 521         * interrupt enable bits turned off.
 522         */
 523        asm volatile("mtspr %3,%2; mfspr %0,%4; mfspr %1,%5"
 524                     : "=&r" (pmc5), "=&r" (pmc6)
 525                     : "r" (mmcr0 & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)),
 526                       "i" (SPRN_MMCR0),
 527                       "i" (SPRN_PMC5), "i" (SPRN_PMC6));
 528
 529        if (mmcr0 & MMCR0_FC)
 530                freeze_limited_counters(cpuhw, pmc5, pmc6);
 531        else
 532                thaw_limited_counters(cpuhw, pmc5, pmc6);
 533
 534        /*
 535         * Write the full MMCR0 including the event overflow interrupt
 536         * enable bits, if necessary.
 537         */
 538        if (mmcr0 & (MMCR0_PMC1CE | MMCR0_PMCjCE))
 539                mtspr(SPRN_MMCR0, mmcr0);
 540}
 541
 542/*
 543 * Disable all events to prevent PMU interrupts and to allow
 544 * events to be added or removed.
 545 */
 546static void power_pmu_disable(struct pmu *pmu)
 547{
 548        struct cpu_hw_events *cpuhw;
 549        unsigned long flags;
 550
 551        if (!ppmu)
 552                return;
 553        local_irq_save(flags);
 554        cpuhw = &__get_cpu_var(cpu_hw_events);
 555
 556        if (!cpuhw->disabled) {
 557                cpuhw->disabled = 1;
 558                cpuhw->n_added = 0;
 559
 560                /*
 561                 * Check if we ever enabled the PMU on this cpu.
 562                 */
 563                if (!cpuhw->pmcs_enabled) {
 564                        ppc_enable_pmcs();
 565                        cpuhw->pmcs_enabled = 1;
 566                }
 567
 568                /*
 569                 * Disable instruction sampling if it was enabled
 570                 */
 571                if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) {
 572                        mtspr(SPRN_MMCRA,
 573                              cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
 574                        mb();
 575                }
 576
 577                /*
 578                 * Set the 'freeze counters' bit.
 579                 * The barrier is to make sure the mtspr has been
 580                 * executed and the PMU has frozen the events
 581                 * before we return.
 582                 */
 583                write_mmcr0(cpuhw, mfspr(SPRN_MMCR0) | MMCR0_FC);
 584                mb();
 585        }
 586        local_irq_restore(flags);
 587}
 588
 589/*
 590 * Re-enable all events if disable == 0.
 591 * If we were previously disabled and events were added, then
 592 * put the new config on the PMU.
 593 */
 594static void power_pmu_enable(struct pmu *pmu)
 595{
 596        struct perf_event *event;
 597        struct cpu_hw_events *cpuhw;
 598        unsigned long flags;
 599        long i;
 600        unsigned long val;
 601        s64 left;
 602        unsigned int hwc_index[MAX_HWEVENTS];
 603        int n_lim;
 604        int idx;
 605
 606        if (!ppmu)
 607                return;
 608        local_irq_save(flags);
 609        cpuhw = &__get_cpu_var(cpu_hw_events);
 610        if (!cpuhw->disabled) {
 611                local_irq_restore(flags);
 612                return;
 613        }
 614        cpuhw->disabled = 0;
 615
 616        /*
 617         * If we didn't change anything, or only removed events,
 618         * no need to recalculate MMCR* settings and reset the PMCs.
 619         * Just reenable the PMU with the current MMCR* settings
 620         * (possibly updated for removal of events).
 621         */
 622        if (!cpuhw->n_added) {
 623                mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
 624                mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
 625                if (cpuhw->n_events == 0)
 626                        ppc_set_pmu_inuse(0);
 627                goto out_enable;
 628        }
 629
 630        /*
 631         * Compute MMCR* values for the new set of events
 632         */
 633        if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_events, hwc_index,
 634                               cpuhw->mmcr)) {
 635                /* shouldn't ever get here */
 636                printk(KERN_ERR "oops compute_mmcr failed\n");
 637                goto out;
 638        }
 639
 640        /*
 641         * Add in MMCR0 freeze bits corresponding to the
 642         * attr.exclude_* bits for the first event.
 643         * We have already checked that all events have the
 644         * same values for these bits as the first event.
 645         */
 646        event = cpuhw->event[0];
 647        if (event->attr.exclude_user)
 648                cpuhw->mmcr[0] |= MMCR0_FCP;
 649        if (event->attr.exclude_kernel)
 650                cpuhw->mmcr[0] |= freeze_events_kernel;
 651        if (event->attr.exclude_hv)
 652                cpuhw->mmcr[0] |= MMCR0_FCHV;
 653
 654        /*
 655         * Write the new configuration to MMCR* with the freeze
 656         * bit set and set the hardware events to their initial values.
 657         * Then unfreeze the events.
 658         */
 659        ppc_set_pmu_inuse(1);
 660        mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
 661        mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
 662        mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE))
 663                                | MMCR0_FC);
 664
 665        /*
 666         * Read off any pre-existing events that need to move
 667         * to another PMC.
 668         */
 669        for (i = 0; i < cpuhw->n_events; ++i) {
 670                event = cpuhw->event[i];
 671                if (event->hw.idx && event->hw.idx != hwc_index[i] + 1) {
 672                        power_pmu_read(event);
 673                        write_pmc(event->hw.idx, 0);
 674                        event->hw.idx = 0;
 675                }
 676        }
 677
 678        /*
 679         * Initialize the PMCs for all the new and moved events.
 680         */
 681        cpuhw->n_limited = n_lim = 0;
 682        for (i = 0; i < cpuhw->n_events; ++i) {
 683                event = cpuhw->event[i];
 684                if (event->hw.idx)
 685                        continue;
 686                idx = hwc_index[i] + 1;
 687                if (is_limited_pmc(idx)) {
 688                        cpuhw->limited_counter[n_lim] = event;
 689                        cpuhw->limited_hwidx[n_lim] = idx;
 690                        ++n_lim;
 691                        continue;
 692                }
 693                val = 0;
 694                if (event->hw.sample_period) {
 695                        left = local64_read(&event->hw.period_left);
 696                        if (left < 0x80000000L)
 697                                val = 0x80000000L - left;
 698                }
 699                local64_set(&event->hw.prev_count, val);
 700                event->hw.idx = idx;
 701                if (event->hw.state & PERF_HES_STOPPED)
 702                        val = 0;
 703                write_pmc(idx, val);
 704                perf_event_update_userpage(event);
 705        }
 706        cpuhw->n_limited = n_lim;
 707        cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE;
 708
 709 out_enable:
 710        mb();
 711        write_mmcr0(cpuhw, cpuhw->mmcr[0]);
 712
 713        /*
 714         * Enable instruction sampling if necessary
 715         */
 716        if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) {
 717                mb();
 718                mtspr(SPRN_MMCRA, cpuhw->mmcr[2]);
 719        }
 720
 721 out:
 722        local_irq_restore(flags);
 723}
 724
 725static int collect_events(struct perf_event *group, int max_count,
 726                          struct perf_event *ctrs[], u64 *events,
 727                          unsigned int *flags)
 728{
 729        int n = 0;
 730        struct perf_event *event;
 731
 732        if (!is_software_event(group)) {
 733                if (n >= max_count)
 734                        return -1;
 735                ctrs[n] = group;
 736                flags[n] = group->hw.event_base;
 737                events[n++] = group->hw.config;
 738        }
 739        list_for_each_entry(event, &group->sibling_list, group_entry) {
 740                if (!is_software_event(event) &&
 741                    event->state != PERF_EVENT_STATE_OFF) {
 742                        if (n >= max_count)
 743                                return -1;
 744                        ctrs[n] = event;
 745                        flags[n] = event->hw.event_base;
 746                        events[n++] = event->hw.config;
 747                }
 748        }
 749        return n;
 750}
 751
 752/*
 753 * Add a event to the PMU.
 754 * If all events are not already frozen, then we disable and
 755 * re-enable the PMU in order to get hw_perf_enable to do the
 756 * actual work of reconfiguring the PMU.
 757 */
 758static int power_pmu_add(struct perf_event *event, int ef_flags)
 759{
 760        struct cpu_hw_events *cpuhw;
 761        unsigned long flags;
 762        int n0;
 763        int ret = -EAGAIN;
 764
 765        local_irq_save(flags);
 766        perf_pmu_disable(event->pmu);
 767
 768        /*
 769         * Add the event to the list (if there is room)
 770         * and check whether the total set is still feasible.
 771         */
 772        cpuhw = &__get_cpu_var(cpu_hw_events);
 773        n0 = cpuhw->n_events;
 774        if (n0 >= ppmu->n_counter)
 775                goto out;
 776        cpuhw->event[n0] = event;
 777        cpuhw->events[n0] = event->hw.config;
 778        cpuhw->flags[n0] = event->hw.event_base;
 779
 780        if (!(ef_flags & PERF_EF_START))
 781                event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
 782
 783        /*
 784         * If group events scheduling transaction was started,
 785         * skip the schedulability test here, it will be performed
 786         * at commit time(->commit_txn) as a whole
 787         */
 788        if (cpuhw->group_flag & PERF_EVENT_TXN)
 789                goto nocheck;
 790
 791        if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1))
 792                goto out;
 793        if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1))
 794                goto out;
 795        event->hw.config = cpuhw->events[n0];
 796
 797nocheck:
 798        ++cpuhw->n_events;
 799        ++cpuhw->n_added;
 800
 801        ret = 0;
 802 out:
 803        perf_pmu_enable(event->pmu);
 804        local_irq_restore(flags);
 805        return ret;
 806}
 807
 808/*
 809 * Remove a event from the PMU.
 810 */
 811static void power_pmu_del(struct perf_event *event, int ef_flags)
 812{
 813        struct cpu_hw_events *cpuhw;
 814        long i;
 815        unsigned long flags;
 816
 817        local_irq_save(flags);
 818        perf_pmu_disable(event->pmu);
 819
 820        power_pmu_read(event);
 821
 822        cpuhw = &__get_cpu_var(cpu_hw_events);
 823        for (i = 0; i < cpuhw->n_events; ++i) {
 824                if (event == cpuhw->event[i]) {
 825                        while (++i < cpuhw->n_events) {
 826                                cpuhw->event[i-1] = cpuhw->event[i];
 827                                cpuhw->events[i-1] = cpuhw->events[i];
 828                                cpuhw->flags[i-1] = cpuhw->flags[i];
 829                        }
 830                        --cpuhw->n_events;
 831                        ppmu->disable_pmc(event->hw.idx - 1, cpuhw->mmcr);
 832                        if (event->hw.idx) {
 833                                write_pmc(event->hw.idx, 0);
 834                                event->hw.idx = 0;
 835                        }
 836                        perf_event_update_userpage(event);
 837                        break;
 838                }
 839        }
 840        for (i = 0; i < cpuhw->n_limited; ++i)
 841                if (event == cpuhw->limited_counter[i])
 842                        break;
 843        if (i < cpuhw->n_limited) {
 844                while (++i < cpuhw->n_limited) {
 845                        cpuhw->limited_counter[i-1] = cpuhw->limited_counter[i];
 846                        cpuhw->limited_hwidx[i-1] = cpuhw->limited_hwidx[i];
 847                }
 848                --cpuhw->n_limited;
 849        }
 850        if (cpuhw->n_events == 0) {
 851                /* disable exceptions if no events are running */
 852                cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE);
 853        }
 854
 855        perf_pmu_enable(event->pmu);
 856        local_irq_restore(flags);
 857}
 858
 859/*
 860 * POWER-PMU does not support disabling individual counters, hence
 861 * program their cycle counter to their max value and ignore the interrupts.
 862 */
 863
 864static void power_pmu_start(struct perf_event *event, int ef_flags)
 865{
 866        unsigned long flags;
 867        s64 left;
 868
 869        if (!event->hw.idx || !event->hw.sample_period)
 870                return;
 871
 872        if (!(event->hw.state & PERF_HES_STOPPED))
 873                return;
 874
 875        if (ef_flags & PERF_EF_RELOAD)
 876                WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
 877
 878        local_irq_save(flags);
 879        perf_pmu_disable(event->pmu);
 880
 881        event->hw.state = 0;
 882        left = local64_read(&event->hw.period_left);
 883        write_pmc(event->hw.idx, left);
 884
 885        perf_event_update_userpage(event);
 886        perf_pmu_enable(event->pmu);
 887        local_irq_restore(flags);
 888}
 889
 890static void power_pmu_stop(struct perf_event *event, int ef_flags)
 891{
 892        unsigned long flags;
 893
 894        if (!event->hw.idx || !event->hw.sample_period)
 895                return;
 896
 897        if (event->hw.state & PERF_HES_STOPPED)
 898                return;
 899
 900        local_irq_save(flags);
 901        perf_pmu_disable(event->pmu);
 902
 903        power_pmu_read(event);
 904        event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
 905        write_pmc(event->hw.idx, 0);
 906
 907        perf_event_update_userpage(event);
 908        perf_pmu_enable(event->pmu);
 909        local_irq_restore(flags);
 910}
 911
 912/*
 913 * Start group events scheduling transaction
 914 * Set the flag to make pmu::enable() not perform the
 915 * schedulability test, it will be performed at commit time
 916 */
 917void power_pmu_start_txn(struct pmu *pmu)
 918{
 919        struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
 920
 921        perf_pmu_disable(pmu);
 922        cpuhw->group_flag |= PERF_EVENT_TXN;
 923        cpuhw->n_txn_start = cpuhw->n_events;
 924}
 925
 926/*
 927 * Stop group events scheduling transaction
 928 * Clear the flag and pmu::enable() will perform the
 929 * schedulability test.
 930 */
 931void power_pmu_cancel_txn(struct pmu *pmu)
 932{
 933        struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
 934
 935        cpuhw->group_flag &= ~PERF_EVENT_TXN;
 936        perf_pmu_enable(pmu);
 937}
 938
 939/*
 940 * Commit group events scheduling transaction
 941 * Perform the group schedulability test as a whole
 942 * Return 0 if success
 943 */
 944int power_pmu_commit_txn(struct pmu *pmu)
 945{
 946        struct cpu_hw_events *cpuhw;
 947        long i, n;
 948
 949        if (!ppmu)
 950                return -EAGAIN;
 951        cpuhw = &__get_cpu_var(cpu_hw_events);
 952        n = cpuhw->n_events;
 953        if (check_excludes(cpuhw->event, cpuhw->flags, 0, n))
 954                return -EAGAIN;
 955        i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n);
 956        if (i < 0)
 957                return -EAGAIN;
 958
 959        for (i = cpuhw->n_txn_start; i < n; ++i)
 960                cpuhw->event[i]->hw.config = cpuhw->events[i];
 961
 962        cpuhw->group_flag &= ~PERF_EVENT_TXN;
 963        perf_pmu_enable(pmu);
 964        return 0;
 965}
 966
 967/*
 968 * Return 1 if we might be able to put event on a limited PMC,
 969 * or 0 if not.
 970 * A event can only go on a limited PMC if it counts something
 971 * that a limited PMC can count, doesn't require interrupts, and
 972 * doesn't exclude any processor mode.
 973 */
 974static int can_go_on_limited_pmc(struct perf_event *event, u64 ev,
 975                                 unsigned int flags)
 976{
 977        int n;
 978        u64 alt[MAX_EVENT_ALTERNATIVES];
 979
 980        if (event->attr.exclude_user
 981            || event->attr.exclude_kernel
 982            || event->attr.exclude_hv
 983            || event->attr.sample_period)
 984                return 0;
 985
 986        if (ppmu->limited_pmc_event(ev))
 987                return 1;
 988
 989        /*
 990         * The requested event_id isn't on a limited PMC already;
 991         * see if any alternative code goes on a limited PMC.
 992         */
 993        if (!ppmu->get_alternatives)
 994                return 0;
 995
 996        flags |= PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD;
 997        n = ppmu->get_alternatives(ev, flags, alt);
 998
 999        return n > 0;
1000}
1001
1002/*
1003 * Find an alternative event_id that goes on a normal PMC, if possible,
1004 * and return the event_id code, or 0 if there is no such alternative.
1005 * (Note: event_id code 0 is "don't count" on all machines.)
1006 */
1007static u64 normal_pmc_alternative(u64 ev, unsigned long flags)
1008{
1009        u64 alt[MAX_EVENT_ALTERNATIVES];
1010        int n;
1011
1012        flags &= ~(PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD);
1013        n = ppmu->get_alternatives(ev, flags, alt);
1014        if (!n)
1015                return 0;
1016        return alt[0];
1017}
1018
1019/* Number of perf_events counting hardware events */
1020static atomic_t num_events;
1021/* Used to avoid races in calling reserve/release_pmc_hardware */
1022static DEFINE_MUTEX(pmc_reserve_mutex);
1023
1024/*
1025 * Release the PMU if this is the last perf_event.
1026 */
1027static void hw_perf_event_destroy(struct perf_event *event)
1028{
1029        if (!atomic_add_unless(&num_events, -1, 1)) {
1030                mutex_lock(&pmc_reserve_mutex);
1031                if (atomic_dec_return(&num_events) == 0)
1032                        release_pmc_hardware();
1033                mutex_unlock(&pmc_reserve_mutex);
1034        }
1035}
1036
1037/*
1038 * Translate a generic cache event_id config to a raw event_id code.
1039 */
1040static int hw_perf_cache_event(u64 config, u64 *eventp)
1041{
1042        unsigned long type, op, result;
1043        int ev;
1044
1045        if (!ppmu->cache_events)
1046                return -EINVAL;
1047
1048        /* unpack config */
1049        type = config & 0xff;
1050        op = (config >> 8) & 0xff;
1051        result = (config >> 16) & 0xff;
1052
1053        if (type >= PERF_COUNT_HW_CACHE_MAX ||
1054            op >= PERF_COUNT_HW_CACHE_OP_MAX ||
1055            result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
1056                return -EINVAL;
1057
1058        ev = (*ppmu->cache_events)[type][op][result];
1059        if (ev == 0)
1060                return -EOPNOTSUPP;
1061        if (ev == -1)
1062                return -EINVAL;
1063        *eventp = ev;
1064        return 0;
1065}
1066
1067static int power_pmu_event_init(struct perf_event *event)
1068{
1069        u64 ev;
1070        unsigned long flags;
1071        struct perf_event *ctrs[MAX_HWEVENTS];
1072        u64 events[MAX_HWEVENTS];
1073        unsigned int cflags[MAX_HWEVENTS];
1074        int n;
1075        int err;
1076        struct cpu_hw_events *cpuhw;
1077
1078        if (!ppmu)
1079                return -ENOENT;
1080
1081        switch (event->attr.type) {
1082        case PERF_TYPE_HARDWARE:
1083                ev = event->attr.config;
1084                if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
1085                        return -EOPNOTSUPP;
1086                ev = ppmu->generic_events[ev];
1087                break;
1088        case PERF_TYPE_HW_CACHE:
1089                err = hw_perf_cache_event(event->attr.config, &ev);
1090                if (err)
1091                        return err;
1092                break;
1093        case PERF_TYPE_RAW:
1094                ev = event->attr.config;
1095                break;
1096        default:
1097                return -ENOENT;
1098        }
1099
1100        event->hw.config_base = ev;
1101        event->hw.idx = 0;
1102
1103        /*
1104         * If we are not running on a hypervisor, force the
1105         * exclude_hv bit to 0 so that we don't care what
1106         * the user set it to.
1107         */
1108        if (!firmware_has_feature(FW_FEATURE_LPAR))
1109                event->attr.exclude_hv = 0;
1110
1111        /*
1112         * If this is a per-task event, then we can use
1113         * PM_RUN_* events interchangeably with their non RUN_*
1114         * equivalents, e.g. PM_RUN_CYC instead of PM_CYC.
1115         * XXX we should check if the task is an idle task.
1116         */
1117        flags = 0;
1118        if (event->attach_state & PERF_ATTACH_TASK)
1119                flags |= PPMU_ONLY_COUNT_RUN;
1120
1121        /*
1122         * If this machine has limited events, check whether this
1123         * event_id could go on a limited event.
1124         */
1125        if (ppmu->flags & PPMU_LIMITED_PMC5_6) {
1126                if (can_go_on_limited_pmc(event, ev, flags)) {
1127                        flags |= PPMU_LIMITED_PMC_OK;
1128                } else if (ppmu->limited_pmc_event(ev)) {
1129                        /*
1130                         * The requested event_id is on a limited PMC,
1131                         * but we can't use a limited PMC; see if any
1132                         * alternative goes on a normal PMC.
1133                         */
1134                        ev = normal_pmc_alternative(ev, flags);
1135                        if (!ev)
1136                                return -EINVAL;
1137                }
1138        }
1139
1140        /*
1141         * If this is in a group, check if it can go on with all the
1142         * other hardware events in the group.  We assume the event
1143         * hasn't been linked into its leader's sibling list at this point.
1144         */
1145        n = 0;
1146        if (event->group_leader != event) {
1147                n = collect_events(event->group_leader, ppmu->n_counter - 1,
1148                                   ctrs, events, cflags);
1149                if (n < 0)
1150                        return -EINVAL;
1151        }
1152        events[n] = ev;
1153        ctrs[n] = event;
1154        cflags[n] = flags;
1155        if (check_excludes(ctrs, cflags, n, 1))
1156                return -EINVAL;
1157
1158        cpuhw = &get_cpu_var(cpu_hw_events);
1159        err = power_check_constraints(cpuhw, events, cflags, n + 1);
1160        put_cpu_var(cpu_hw_events);
1161        if (err)
1162                return -EINVAL;
1163
1164        event->hw.config = events[n];
1165        event->hw.event_base = cflags[n];
1166        event->hw.last_period = event->hw.sample_period;
1167        local64_set(&event->hw.period_left, event->hw.last_period);
1168
1169        /*
1170         * See if we need to reserve the PMU.
1171         * If no events are currently in use, then we have to take a
1172         * mutex to ensure that we don't race with another task doing
1173         * reserve_pmc_hardware or release_pmc_hardware.
1174         */
1175        err = 0;
1176        if (!atomic_inc_not_zero(&num_events)) {
1177                mutex_lock(&pmc_reserve_mutex);
1178                if (atomic_read(&num_events) == 0 &&
1179                    reserve_pmc_hardware(perf_event_interrupt))
1180                        err = -EBUSY;
1181                else
1182                        atomic_inc(&num_events);
1183                mutex_unlock(&pmc_reserve_mutex);
1184        }
1185        event->destroy = hw_perf_event_destroy;
1186
1187        return err;
1188}
1189
1190struct pmu power_pmu = {
1191        .pmu_enable     = power_pmu_enable,
1192        .pmu_disable    = power_pmu_disable,
1193        .event_init     = power_pmu_event_init,
1194        .add            = power_pmu_add,
1195        .del            = power_pmu_del,
1196        .start          = power_pmu_start,
1197        .stop           = power_pmu_stop,
1198        .read           = power_pmu_read,
1199        .start_txn      = power_pmu_start_txn,
1200        .cancel_txn     = power_pmu_cancel_txn,
1201        .commit_txn     = power_pmu_commit_txn,
1202};
1203
1204/*
1205 * A counter has overflowed; update its count and record
1206 * things if requested.  Note that interrupts are hard-disabled
1207 * here so there is no possibility of being interrupted.
1208 */
1209static void record_and_restart(struct perf_event *event, unsigned long val,
1210                               struct pt_regs *regs, int nmi)
1211{
1212        u64 period = event->hw.sample_period;
1213        s64 prev, delta, left;
1214        int record = 0;
1215
1216        if (event->hw.state & PERF_HES_STOPPED) {
1217                write_pmc(event->hw.idx, 0);
1218                return;
1219        }
1220
1221        /* we don't have to worry about interrupts here */
1222        prev = local64_read(&event->hw.prev_count);
1223        delta = check_and_compute_delta(prev, val);
1224        local64_add(delta, &event->count);
1225
1226        /*
1227         * See if the total period for this event has expired,
1228         * and update for the next period.
1229         */
1230        val = 0;
1231        left = local64_read(&event->hw.period_left) - delta;
1232        if (period) {
1233                if (left <= 0) {
1234                        left += period;
1235                        if (left <= 0)
1236                                left = period;
1237                        record = 1;
1238                        event->hw.last_period = event->hw.sample_period;
1239                }
1240                if (left < 0x80000000LL)
1241                        val = 0x80000000LL - left;
1242        }
1243
1244        write_pmc(event->hw.idx, val);
1245        local64_set(&event->hw.prev_count, val);
1246        local64_set(&event->hw.period_left, left);
1247        perf_event_update_userpage(event);
1248
1249        /*
1250         * Finally record data if requested.
1251         */
1252        if (record) {
1253                struct perf_sample_data data;
1254
1255                perf_sample_data_init(&data, ~0ULL);
1256                data.period = event->hw.last_period;
1257
1258                if (event->attr.sample_type & PERF_SAMPLE_ADDR)
1259                        perf_get_data_addr(regs, &data.addr);
1260
1261                if (perf_event_overflow(event, nmi, &data, regs))
1262                        power_pmu_stop(event, 0);
1263        }
1264}
1265
1266/*
1267 * Called from generic code to get the misc flags (i.e. processor mode)
1268 * for an event_id.
1269 */
1270unsigned long perf_misc_flags(struct pt_regs *regs)
1271{
1272        u32 flags = perf_get_misc_flags(regs);
1273
1274        if (flags)
1275                return flags;
1276        return user_mode(regs) ? PERF_RECORD_MISC_USER :
1277                PERF_RECORD_MISC_KERNEL;
1278}
1279
1280/*
1281 * Called from generic code to get the instruction pointer
1282 * for an event_id.
1283 */
1284unsigned long perf_instruction_pointer(struct pt_regs *regs)
1285{
1286        unsigned long ip;
1287
1288        if (TRAP(regs) != 0xf00)
1289                return regs->nip;       /* not a PMU interrupt */
1290
1291        ip = mfspr(SPRN_SIAR) + perf_ip_adjust(regs);
1292        return ip;
1293}
1294
1295static bool pmc_overflow(unsigned long val)
1296{
1297        if ((int)val < 0)
1298                return true;
1299
1300        /*
1301         * Events on POWER7 can roll back if a speculative event doesn't
1302         * eventually complete. Unfortunately in some rare cases they will
1303         * raise a performance monitor exception. We need to catch this to
1304         * ensure we reset the PMC. In all cases the PMC will be 256 or less
1305         * cycles from overflow.
1306         *
1307         * We only do this if the first pass fails to find any overflowing
1308         * PMCs because a user might set a period of less than 256 and we
1309         * don't want to mistakenly reset them.
1310         */
1311        if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256))
1312                return true;
1313
1314        return false;
1315}
1316
1317/*
1318 * Performance monitor interrupt stuff
1319 */
1320static void perf_event_interrupt(struct pt_regs *regs)
1321{
1322        int i;
1323        struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1324        struct perf_event *event;
1325        unsigned long val;
1326        int found = 0;
1327        int nmi;
1328
1329        if (cpuhw->n_limited)
1330                freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5),
1331                                        mfspr(SPRN_PMC6));
1332
1333        perf_read_regs(regs);
1334
1335        nmi = perf_intr_is_nmi(regs);
1336        if (nmi)
1337                nmi_enter();
1338        else
1339                irq_enter();
1340
1341        for (i = 0; i < cpuhw->n_events; ++i) {
1342                event = cpuhw->event[i];
1343                if (!event->hw.idx || is_limited_pmc(event->hw.idx))
1344                        continue;
1345                val = read_pmc(event->hw.idx);
1346                if ((int)val < 0) {
1347                        /* event has overflowed */
1348                        found = 1;
1349                        record_and_restart(event, val, regs, nmi);
1350                }
1351        }
1352
1353        /*
1354         * In case we didn't find and reset the event that caused
1355         * the interrupt, scan all events and reset any that are
1356         * negative, to avoid getting continual interrupts.
1357         * Any that we processed in the previous loop will not be negative.
1358         */
1359        if (!found) {
1360                for (i = 0; i < ppmu->n_counter; ++i) {
1361                        if (is_limited_pmc(i + 1))
1362                                continue;
1363                        val = read_pmc(i + 1);
1364                        if (pmc_overflow(val))
1365                                write_pmc(i + 1, 0);
1366                }
1367        }
1368
1369        /*
1370         * Reset MMCR0 to its normal value.  This will set PMXE and
1371         * clear FC (freeze counters) and PMAO (perf mon alert occurred)
1372         * and thus allow interrupts to occur again.
1373         * XXX might want to use MSR.PM to keep the events frozen until
1374         * we get back out of this interrupt.
1375         */
1376        write_mmcr0(cpuhw, cpuhw->mmcr[0]);
1377
1378        if (nmi)
1379                nmi_exit();
1380        else
1381                irq_exit();
1382}
1383
1384static void power_pmu_setup(int cpu)
1385{
1386        struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
1387
1388        if (!ppmu)
1389                return;
1390        memset(cpuhw, 0, sizeof(*cpuhw));
1391        cpuhw->mmcr[0] = MMCR0_FC;
1392}
1393
1394static int __cpuinit
1395power_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1396{
1397        unsigned int cpu = (long)hcpu;
1398
1399        switch (action & ~CPU_TASKS_FROZEN) {
1400        case CPU_UP_PREPARE:
1401                power_pmu_setup(cpu);
1402                break;
1403
1404        default:
1405                break;
1406        }
1407
1408        return NOTIFY_OK;
1409}
1410
1411int register_power_pmu(struct power_pmu *pmu)
1412{
1413        if (ppmu)
1414                return -EBUSY;          /* something's already registered */
1415
1416        ppmu = pmu;
1417        pr_info("%s performance monitor hardware support registered\n",
1418                pmu->name);
1419
1420#ifdef MSR_HV
1421        /*
1422         * Use FCHV to ignore kernel events if MSR.HV is set.
1423         */
1424        if (mfmsr() & MSR_HV)
1425                freeze_events_kernel = MMCR0_FCHV;
1426#endif /* CONFIG_PPC64 */
1427
1428        perf_pmu_register(&power_pmu, "cpu", PERF_TYPE_RAW);
1429        perf_cpu_notifier(power_pmu_notifier);
1430
1431        return 0;
1432}
1433