linux/arch/powerpc/perf/core-book3s.c
<<
>>
Prefs
   1/*
   2 * Performance event support - powerpc architecture code
   3 *
   4 * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License
   8 * as published by the Free Software Foundation; either version
   9 * 2 of the License, or (at your option) any later version.
  10 */
  11#include <linux/kernel.h>
  12#include <linux/sched.h>
  13#include <linux/perf_event.h>
  14#include <linux/percpu.h>
  15#include <linux/hardirq.h>
  16#include <linux/uaccess.h>
  17#include <asm/reg.h>
  18#include <asm/pmc.h>
  19#include <asm/machdep.h>
  20#include <asm/firmware.h>
  21#include <asm/ptrace.h>
  22#include <asm/code-patching.h>
  23
  24#define BHRB_MAX_ENTRIES        32
  25#define BHRB_TARGET             0x0000000000000002
  26#define BHRB_PREDICTION         0x0000000000000001
  27#define BHRB_EA                 0xFFFFFFFFFFFFFFFC
  28
  29struct cpu_hw_events {
  30        int n_events;
  31        int n_percpu;
  32        int disabled;
  33        int n_added;
  34        int n_limited;
  35        u8  pmcs_enabled;
  36        struct perf_event *event[MAX_HWEVENTS];
  37        u64 events[MAX_HWEVENTS];
  38        unsigned int flags[MAX_HWEVENTS];
  39        unsigned long mmcr[3];
  40        struct perf_event *limited_counter[MAX_LIMITED_HWCOUNTERS];
  41        u8  limited_hwidx[MAX_LIMITED_HWCOUNTERS];
  42        u64 alternatives[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
  43        unsigned long amasks[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
  44        unsigned long avalues[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
  45
  46        unsigned int group_flag;
  47        int n_txn_start;
  48
  49        /* BHRB bits */
  50        u64                             bhrb_filter;    /* BHRB HW branch filter */
  51        int                             bhrb_users;
  52        void                            *bhrb_context;
  53        struct  perf_branch_stack       bhrb_stack;
  54        struct  perf_branch_entry       bhrb_entries[BHRB_MAX_ENTRIES];
  55};
  56
  57DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
  58
  59struct power_pmu *ppmu;
  60
  61/*
  62 * Normally, to ignore kernel events we set the FCS (freeze counters
  63 * in supervisor mode) bit in MMCR0, but if the kernel runs with the
  64 * hypervisor bit set in the MSR, or if we are running on a processor
  65 * where the hypervisor bit is forced to 1 (as on Apple G5 processors),
  66 * then we need to use the FCHV bit to ignore kernel events.
  67 */
  68static unsigned int freeze_events_kernel = MMCR0_FCS;
  69
  70/*
  71 * 32-bit doesn't have MMCRA but does have an MMCR2,
  72 * and a few other names are different.
  73 */
  74#ifdef CONFIG_PPC32
  75
  76#define MMCR0_FCHV              0
  77#define MMCR0_PMCjCE            MMCR0_PMCnCE
  78
  79#define SPRN_MMCRA              SPRN_MMCR2
  80#define MMCRA_SAMPLE_ENABLE     0
  81
  82static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
  83{
  84        return 0;
  85}
  86static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) { }
  87static inline u32 perf_get_misc_flags(struct pt_regs *regs)
  88{
  89        return 0;
  90}
  91static inline void perf_read_regs(struct pt_regs *regs)
  92{
  93        regs->result = 0;
  94}
  95static inline int perf_intr_is_nmi(struct pt_regs *regs)
  96{
  97        return 0;
  98}
  99
 100static inline int siar_valid(struct pt_regs *regs)
 101{
 102        return 1;
 103}
 104
 105static inline void power_pmu_bhrb_enable(struct perf_event *event) {}
 106static inline void power_pmu_bhrb_disable(struct perf_event *event) {}
 107void power_pmu_flush_branch_stack(void) {}
 108static inline void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) {}
 109#endif /* CONFIG_PPC32 */
 110
 111static bool regs_use_siar(struct pt_regs *regs)
 112{
 113        return !!regs->result;
 114}
 115
 116/*
 117 * Things that are specific to 64-bit implementations.
 118 */
 119#ifdef CONFIG_PPC64
 120
 121static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
 122{
 123        unsigned long mmcra = regs->dsisr;
 124
 125        if ((ppmu->flags & PPMU_HAS_SSLOT) && (mmcra & MMCRA_SAMPLE_ENABLE)) {
 126                unsigned long slot = (mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT;
 127                if (slot > 1)
 128                        return 4 * (slot - 1);
 129        }
 130
 131        return 0;
 132}
 133
 134/*
 135 * The user wants a data address recorded.
 136 * If we're not doing instruction sampling, give them the SDAR
 137 * (sampled data address).  If we are doing instruction sampling, then
 138 * only give them the SDAR if it corresponds to the instruction
 139 * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC, the
 140 * [POWER7P_]MMCRA_SDAR_VALID bit in MMCRA, or the SDAR_VALID bit in SIER.
 141 */
 142static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp)
 143{
 144        unsigned long mmcra = regs->dsisr;
 145        bool sdar_valid;
 146
 147        if (ppmu->flags & PPMU_HAS_SIER)
 148                sdar_valid = regs->dar & SIER_SDAR_VALID;
 149        else {
 150                unsigned long sdsync;
 151
 152                if (ppmu->flags & PPMU_SIAR_VALID)
 153                        sdsync = POWER7P_MMCRA_SDAR_VALID;
 154                else if (ppmu->flags & PPMU_ALT_SIPR)
 155                        sdsync = POWER6_MMCRA_SDSYNC;
 156                else
 157                        sdsync = MMCRA_SDSYNC;
 158
 159                sdar_valid = mmcra & sdsync;
 160        }
 161
 162        if (!(mmcra & MMCRA_SAMPLE_ENABLE) || sdar_valid)
 163                *addrp = mfspr(SPRN_SDAR);
 164}
 165
 166static bool regs_sihv(struct pt_regs *regs)
 167{
 168        unsigned long sihv = MMCRA_SIHV;
 169
 170        if (ppmu->flags & PPMU_HAS_SIER)
 171                return !!(regs->dar & SIER_SIHV);
 172
 173        if (ppmu->flags & PPMU_ALT_SIPR)
 174                sihv = POWER6_MMCRA_SIHV;
 175
 176        return !!(regs->dsisr & sihv);
 177}
 178
 179static bool regs_sipr(struct pt_regs *regs)
 180{
 181        unsigned long sipr = MMCRA_SIPR;
 182
 183        if (ppmu->flags & PPMU_HAS_SIER)
 184                return !!(regs->dar & SIER_SIPR);
 185
 186        if (ppmu->flags & PPMU_ALT_SIPR)
 187                sipr = POWER6_MMCRA_SIPR;
 188
 189        return !!(regs->dsisr & sipr);
 190}
 191
 192static inline u32 perf_flags_from_msr(struct pt_regs *regs)
 193{
 194        if (regs->msr & MSR_PR)
 195                return PERF_RECORD_MISC_USER;
 196        if ((regs->msr & MSR_HV) && freeze_events_kernel != MMCR0_FCHV)
 197                return PERF_RECORD_MISC_HYPERVISOR;
 198        return PERF_RECORD_MISC_KERNEL;
 199}
 200
 201static inline u32 perf_get_misc_flags(struct pt_regs *regs)
 202{
 203        bool use_siar = regs_use_siar(regs);
 204
 205        if (!use_siar)
 206                return perf_flags_from_msr(regs);
 207
 208        /*
 209         * If we don't have flags in MMCRA, rather than using
 210         * the MSR, we intuit the flags from the address in
 211         * SIAR which should give slightly more reliable
 212         * results
 213         */
 214        if (ppmu->flags & PPMU_NO_SIPR) {
 215                unsigned long siar = mfspr(SPRN_SIAR);
 216                if (siar >= PAGE_OFFSET)
 217                        return PERF_RECORD_MISC_KERNEL;
 218                return PERF_RECORD_MISC_USER;
 219        }
 220
 221        /* PR has priority over HV, so order below is important */
 222        if (regs_sipr(regs))
 223                return PERF_RECORD_MISC_USER;
 224
 225        if (regs_sihv(regs) && (freeze_events_kernel != MMCR0_FCHV))
 226                return PERF_RECORD_MISC_HYPERVISOR;
 227
 228        return PERF_RECORD_MISC_KERNEL;
 229}
 230
 231/*
 232 * Overload regs->dsisr to store MMCRA so we only need to read it once
 233 * on each interrupt.
 234 * Overload regs->dar to store SIER if we have it.
 235 * Overload regs->result to specify whether we should use the MSR (result
 236 * is zero) or the SIAR (result is non zero).
 237 */
 238static inline void perf_read_regs(struct pt_regs *regs)
 239{
 240        unsigned long mmcra = mfspr(SPRN_MMCRA);
 241        int marked = mmcra & MMCRA_SAMPLE_ENABLE;
 242        int use_siar;
 243
 244        regs->dsisr = mmcra;
 245
 246        if (ppmu->flags & PPMU_HAS_SIER)
 247                regs->dar = mfspr(SPRN_SIER);
 248
 249        /*
 250         * If this isn't a PMU exception (eg a software event) the SIAR is
 251         * not valid. Use pt_regs.
 252         *
 253         * If it is a marked event use the SIAR.
 254         *
 255         * If the PMU doesn't update the SIAR for non marked events use
 256         * pt_regs.
 257         *
 258         * If the PMU has HV/PR flags then check to see if they
 259         * place the exception in userspace. If so, use pt_regs. In
 260         * continuous sampling mode the SIAR and the PMU exception are
 261         * not synchronised, so they may be many instructions apart.
 262         * This can result in confusing backtraces. We still want
 263         * hypervisor samples as well as samples in the kernel with
 264         * interrupts off hence the userspace check.
 265         */
 266        if (TRAP(regs) != 0xf00)
 267                use_siar = 0;
 268        else if (marked)
 269                use_siar = 1;
 270        else if ((ppmu->flags & PPMU_NO_CONT_SAMPLING))
 271                use_siar = 0;
 272        else if (!(ppmu->flags & PPMU_NO_SIPR) && regs_sipr(regs))
 273                use_siar = 0;
 274        else
 275                use_siar = 1;
 276
 277        regs->result = use_siar;
 278}
 279
 280/*
 281 * If interrupts were soft-disabled when a PMU interrupt occurs, treat
 282 * it as an NMI.
 283 */
 284static inline int perf_intr_is_nmi(struct pt_regs *regs)
 285{
 286        return !regs->softe;
 287}
 288
 289/*
 290 * On processors like P7+ that have the SIAR-Valid bit, marked instructions
 291 * must be sampled only if the SIAR-valid bit is set.
 292 *
 293 * For unmarked instructions and for processors that don't have the SIAR-Valid
 294 * bit, assume that SIAR is valid.
 295 */
 296static inline int siar_valid(struct pt_regs *regs)
 297{
 298        unsigned long mmcra = regs->dsisr;
 299        int marked = mmcra & MMCRA_SAMPLE_ENABLE;
 300
 301        if (marked) {
 302                if (ppmu->flags & PPMU_HAS_SIER)
 303                        return regs->dar & SIER_SIAR_VALID;
 304
 305                if (ppmu->flags & PPMU_SIAR_VALID)
 306                        return mmcra & POWER7P_MMCRA_SIAR_VALID;
 307        }
 308
 309        return 1;
 310}
 311
 312
 313/* Reset all possible BHRB entries */
 314static void power_pmu_bhrb_reset(void)
 315{
 316        asm volatile(PPC_CLRBHRB);
 317}
 318
 319static void power_pmu_bhrb_enable(struct perf_event *event)
 320{
 321        struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
 322
 323        if (!ppmu->bhrb_nr)
 324                return;
 325
 326        /* Clear BHRB if we changed task context to avoid data leaks */
 327        if (event->ctx->task && cpuhw->bhrb_context != event->ctx) {
 328                power_pmu_bhrb_reset();
 329                cpuhw->bhrb_context = event->ctx;
 330        }
 331        cpuhw->bhrb_users++;
 332}
 333
 334static void power_pmu_bhrb_disable(struct perf_event *event)
 335{
 336        struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
 337
 338        if (!ppmu->bhrb_nr)
 339                return;
 340
 341        cpuhw->bhrb_users--;
 342        WARN_ON_ONCE(cpuhw->bhrb_users < 0);
 343
 344        if (!cpuhw->disabled && !cpuhw->bhrb_users) {
 345                /* BHRB cannot be turned off when other
 346                 * events are active on the PMU.
 347                 */
 348
 349                /* avoid stale pointer */
 350                cpuhw->bhrb_context = NULL;
 351        }
 352}
 353
 354/* Called from ctxsw to prevent one process's branch entries to
 355 * mingle with the other process's entries during context switch.
 356 */
 357void power_pmu_flush_branch_stack(void)
 358{
 359        if (ppmu->bhrb_nr)
 360                power_pmu_bhrb_reset();
 361}
 362/* Calculate the to address for a branch */
 363static __u64 power_pmu_bhrb_to(u64 addr)
 364{
 365        unsigned int instr;
 366        int ret;
 367        __u64 target;
 368
 369        if (is_kernel_addr(addr))
 370                return branch_target((unsigned int *)addr);
 371
 372        /* Userspace: need copy instruction here then translate it */
 373        pagefault_disable();
 374        ret = __get_user_inatomic(instr, (unsigned int __user *)addr);
 375        if (ret) {
 376                pagefault_enable();
 377                return 0;
 378        }
 379        pagefault_enable();
 380
 381        target = branch_target(&instr);
 382        if ((!target) || (instr & BRANCH_ABSOLUTE))
 383                return target;
 384
 385        /* Translate relative branch target from kernel to user address */
 386        return target - (unsigned long)&instr + addr;
 387}
 388
 389/* Processing BHRB entries */
 390void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw)
 391{
 392        u64 val;
 393        u64 addr;
 394        int r_index, u_index, pred;
 395
 396        r_index = 0;
 397        u_index = 0;
 398        while (r_index < ppmu->bhrb_nr) {
 399                /* Assembly read function */
 400                val = read_bhrb(r_index++);
 401                if (!val)
 402                        /* Terminal marker: End of valid BHRB entries */
 403                        break;
 404                else {
 405                        addr = val & BHRB_EA;
 406                        pred = val & BHRB_PREDICTION;
 407
 408                        if (!addr)
 409                                /* invalid entry */
 410                                continue;
 411
 412                        /* Branches are read most recent first (ie. mfbhrb 0 is
 413                         * the most recent branch).
 414                         * There are two types of valid entries:
 415                         * 1) a target entry which is the to address of a
 416                         *    computed goto like a blr,bctr,btar.  The next
 417                         *    entry read from the bhrb will be branch
 418                         *    corresponding to this target (ie. the actual
 419                         *    blr/bctr/btar instruction).
 420                         * 2) a from address which is an actual branch.  If a
 421                         *    target entry proceeds this, then this is the
 422                         *    matching branch for that target.  If this is not
 423                         *    following a target entry, then this is a branch
 424                         *    where the target is given as an immediate field
 425                         *    in the instruction (ie. an i or b form branch).
 426                         *    In this case we need to read the instruction from
 427                         *    memory to determine the target/to address.
 428                         */
 429
 430                        if (val & BHRB_TARGET) {
 431                                /* Target branches use two entries
 432                                 * (ie. computed gotos/XL form)
 433                                 */
 434                                cpuhw->bhrb_entries[u_index].to = addr;
 435                                cpuhw->bhrb_entries[u_index].mispred = pred;
 436                                cpuhw->bhrb_entries[u_index].predicted = ~pred;
 437
 438                                /* Get from address in next entry */
 439                                val = read_bhrb(r_index++);
 440                                addr = val & BHRB_EA;
 441                                if (val & BHRB_TARGET) {
 442                                        /* Shouldn't have two targets in a
 443                                           row.. Reset index and try again */
 444                                        r_index--;
 445                                        addr = 0;
 446                                }
 447                                cpuhw->bhrb_entries[u_index].from = addr;
 448                        } else {
 449                                /* Branches to immediate field 
 450                                   (ie I or B form) */
 451                                cpuhw->bhrb_entries[u_index].from = addr;
 452                                cpuhw->bhrb_entries[u_index].to =
 453                                        power_pmu_bhrb_to(addr);
 454                                cpuhw->bhrb_entries[u_index].mispred = pred;
 455                                cpuhw->bhrb_entries[u_index].predicted = ~pred;
 456                        }
 457                        u_index++;
 458
 459                }
 460        }
 461        cpuhw->bhrb_stack.nr = u_index;
 462        return;
 463}
 464
 465#endif /* CONFIG_PPC64 */
 466
 467static void perf_event_interrupt(struct pt_regs *regs);
 468
 469void perf_event_print_debug(void)
 470{
 471}
 472
 473/*
 474 * Read one performance monitor counter (PMC).
 475 */
 476static unsigned long read_pmc(int idx)
 477{
 478        unsigned long val;
 479
 480        switch (idx) {
 481        case 1:
 482                val = mfspr(SPRN_PMC1);
 483                break;
 484        case 2:
 485                val = mfspr(SPRN_PMC2);
 486                break;
 487        case 3:
 488                val = mfspr(SPRN_PMC3);
 489                break;
 490        case 4:
 491                val = mfspr(SPRN_PMC4);
 492                break;
 493        case 5:
 494                val = mfspr(SPRN_PMC5);
 495                break;
 496        case 6:
 497                val = mfspr(SPRN_PMC6);
 498                break;
 499#ifdef CONFIG_PPC64
 500        case 7:
 501                val = mfspr(SPRN_PMC7);
 502                break;
 503        case 8:
 504                val = mfspr(SPRN_PMC8);
 505                break;
 506#endif /* CONFIG_PPC64 */
 507        default:
 508                printk(KERN_ERR "oops trying to read PMC%d\n", idx);
 509                val = 0;
 510        }
 511        return val;
 512}
 513
 514/*
 515 * Write one PMC.
 516 */
 517static void write_pmc(int idx, unsigned long val)
 518{
 519        switch (idx) {
 520        case 1:
 521                mtspr(SPRN_PMC1, val);
 522                break;
 523        case 2:
 524                mtspr(SPRN_PMC2, val);
 525                break;
 526        case 3:
 527                mtspr(SPRN_PMC3, val);
 528                break;
 529        case 4:
 530                mtspr(SPRN_PMC4, val);
 531                break;
 532        case 5:
 533                mtspr(SPRN_PMC5, val);
 534                break;
 535        case 6:
 536                mtspr(SPRN_PMC6, val);
 537                break;
 538#ifdef CONFIG_PPC64
 539        case 7:
 540                mtspr(SPRN_PMC7, val);
 541                break;
 542        case 8:
 543                mtspr(SPRN_PMC8, val);
 544                break;
 545#endif /* CONFIG_PPC64 */
 546        default:
 547                printk(KERN_ERR "oops trying to write PMC%d\n", idx);
 548        }
 549}
 550
 551/*
 552 * Check if a set of events can all go on the PMU at once.
 553 * If they can't, this will look at alternative codes for the events
 554 * and see if any combination of alternative codes is feasible.
 555 * The feasible set is returned in event_id[].
 556 */
 557static int power_check_constraints(struct cpu_hw_events *cpuhw,
 558                                   u64 event_id[], unsigned int cflags[],
 559                                   int n_ev)
 560{
 561        unsigned long mask, value, nv;
 562        unsigned long smasks[MAX_HWEVENTS], svalues[MAX_HWEVENTS];
 563        int n_alt[MAX_HWEVENTS], choice[MAX_HWEVENTS];
 564        int i, j;
 565        unsigned long addf = ppmu->add_fields;
 566        unsigned long tadd = ppmu->test_adder;
 567
 568        if (n_ev > ppmu->n_counter)
 569                return -1;
 570
 571        /* First see if the events will go on as-is */
 572        for (i = 0; i < n_ev; ++i) {
 573                if ((cflags[i] & PPMU_LIMITED_PMC_REQD)
 574                    && !ppmu->limited_pmc_event(event_id[i])) {
 575                        ppmu->get_alternatives(event_id[i], cflags[i],
 576                                               cpuhw->alternatives[i]);
 577                        event_id[i] = cpuhw->alternatives[i][0];
 578                }
 579                if (ppmu->get_constraint(event_id[i], &cpuhw->amasks[i][0],
 580                                         &cpuhw->avalues[i][0]))
 581                        return -1;
 582        }
 583        value = mask = 0;
 584        for (i = 0; i < n_ev; ++i) {
 585                nv = (value | cpuhw->avalues[i][0]) +
 586                        (value & cpuhw->avalues[i][0] & addf);
 587                if ((((nv + tadd) ^ value) & mask) != 0 ||
 588                    (((nv + tadd) ^ cpuhw->avalues[i][0]) &
 589                     cpuhw->amasks[i][0]) != 0)
 590                        break;
 591                value = nv;
 592                mask |= cpuhw->amasks[i][0];
 593        }
 594        if (i == n_ev)
 595                return 0;       /* all OK */
 596
 597        /* doesn't work, gather alternatives... */
 598        if (!ppmu->get_alternatives)
 599                return -1;
 600        for (i = 0; i < n_ev; ++i) {
 601                choice[i] = 0;
 602                n_alt[i] = ppmu->get_alternatives(event_id[i], cflags[i],
 603                                                  cpuhw->alternatives[i]);
 604                for (j = 1; j < n_alt[i]; ++j)
 605                        ppmu->get_constraint(cpuhw->alternatives[i][j],
 606                                             &cpuhw->amasks[i][j],
 607                                             &cpuhw->avalues[i][j]);
 608        }
 609
 610        /* enumerate all possibilities and see if any will work */
 611        i = 0;
 612        j = -1;
 613        value = mask = nv = 0;
 614        while (i < n_ev) {
 615                if (j >= 0) {
 616                        /* we're backtracking, restore context */
 617                        value = svalues[i];
 618                        mask = smasks[i];
 619                        j = choice[i];
 620                }
 621                /*
 622                 * See if any alternative k for event_id i,
 623                 * where k > j, will satisfy the constraints.
 624                 */
 625                while (++j < n_alt[i]) {
 626                        nv = (value | cpuhw->avalues[i][j]) +
 627                                (value & cpuhw->avalues[i][j] & addf);
 628                        if ((((nv + tadd) ^ value) & mask) == 0 &&
 629                            (((nv + tadd) ^ cpuhw->avalues[i][j])
 630                             & cpuhw->amasks[i][j]) == 0)
 631                                break;
 632                }
 633                if (j >= n_alt[i]) {
 634                        /*
 635                         * No feasible alternative, backtrack
 636                         * to event_id i-1 and continue enumerating its
 637                         * alternatives from where we got up to.
 638                         */
 639                        if (--i < 0)
 640                                return -1;
 641                } else {
 642                        /*
 643                         * Found a feasible alternative for event_id i,
 644                         * remember where we got up to with this event_id,
 645                         * go on to the next event_id, and start with
 646                         * the first alternative for it.
 647                         */
 648                        choice[i] = j;
 649                        svalues[i] = value;
 650                        smasks[i] = mask;
 651                        value = nv;
 652                        mask |= cpuhw->amasks[i][j];
 653                        ++i;
 654                        j = -1;
 655                }
 656        }
 657
 658        /* OK, we have a feasible combination, tell the caller the solution */
 659        for (i = 0; i < n_ev; ++i)
 660                event_id[i] = cpuhw->alternatives[i][choice[i]];
 661        return 0;
 662}
 663
 664/*
 665 * Check if newly-added events have consistent settings for
 666 * exclude_{user,kernel,hv} with each other and any previously
 667 * added events.
 668 */
 669static int check_excludes(struct perf_event **ctrs, unsigned int cflags[],
 670                          int n_prev, int n_new)
 671{
 672        int eu = 0, ek = 0, eh = 0;
 673        int i, n, first;
 674        struct perf_event *event;
 675
 676        n = n_prev + n_new;
 677        if (n <= 1)
 678                return 0;
 679
 680        first = 1;
 681        for (i = 0; i < n; ++i) {
 682                if (cflags[i] & PPMU_LIMITED_PMC_OK) {
 683                        cflags[i] &= ~PPMU_LIMITED_PMC_REQD;
 684                        continue;
 685                }
 686                event = ctrs[i];
 687                if (first) {
 688                        eu = event->attr.exclude_user;
 689                        ek = event->attr.exclude_kernel;
 690                        eh = event->attr.exclude_hv;
 691                        first = 0;
 692                } else if (event->attr.exclude_user != eu ||
 693                           event->attr.exclude_kernel != ek ||
 694                           event->attr.exclude_hv != eh) {
 695                        return -EAGAIN;
 696                }
 697        }
 698
 699        if (eu || ek || eh)
 700                for (i = 0; i < n; ++i)
 701                        if (cflags[i] & PPMU_LIMITED_PMC_OK)
 702                                cflags[i] |= PPMU_LIMITED_PMC_REQD;
 703
 704        return 0;
 705}
 706
 707static u64 check_and_compute_delta(u64 prev, u64 val)
 708{
 709        u64 delta = (val - prev) & 0xfffffffful;
 710
 711        /*
 712         * POWER7 can roll back counter values, if the new value is smaller
 713         * than the previous value it will cause the delta and the counter to
 714         * have bogus values unless we rolled a counter over.  If a coutner is
 715         * rolled back, it will be smaller, but within 256, which is the maximum
 716         * number of events to rollback at once.  If we dectect a rollback
 717         * return 0.  This can lead to a small lack of precision in the
 718         * counters.
 719         */
 720        if (prev > val && (prev - val) < 256)
 721                delta = 0;
 722
 723        return delta;
 724}
 725
 726static void power_pmu_read(struct perf_event *event)
 727{
 728        s64 val, delta, prev;
 729
 730        if (event->hw.state & PERF_HES_STOPPED)
 731                return;
 732
 733        if (!event->hw.idx)
 734                return;
 735        /*
 736         * Performance monitor interrupts come even when interrupts
 737         * are soft-disabled, as long as interrupts are hard-enabled.
 738         * Therefore we treat them like NMIs.
 739         */
 740        do {
 741                prev = local64_read(&event->hw.prev_count);
 742                barrier();
 743                val = read_pmc(event->hw.idx);
 744                delta = check_and_compute_delta(prev, val);
 745                if (!delta)
 746                        return;
 747        } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
 748
 749        local64_add(delta, &event->count);
 750        local64_sub(delta, &event->hw.period_left);
 751}
 752
 753/*
 754 * On some machines, PMC5 and PMC6 can't be written, don't respect
 755 * the freeze conditions, and don't generate interrupts.  This tells
 756 * us if `event' is using such a PMC.
 757 */
 758static int is_limited_pmc(int pmcnum)
 759{
 760        return (ppmu->flags & PPMU_LIMITED_PMC5_6)
 761                && (pmcnum == 5 || pmcnum == 6);
 762}
 763
 764static void freeze_limited_counters(struct cpu_hw_events *cpuhw,
 765                                    unsigned long pmc5, unsigned long pmc6)
 766{
 767        struct perf_event *event;
 768        u64 val, prev, delta;
 769        int i;
 770
 771        for (i = 0; i < cpuhw->n_limited; ++i) {
 772                event = cpuhw->limited_counter[i];
 773                if (!event->hw.idx)
 774                        continue;
 775                val = (event->hw.idx == 5) ? pmc5 : pmc6;
 776                prev = local64_read(&event->hw.prev_count);
 777                event->hw.idx = 0;
 778                delta = check_and_compute_delta(prev, val);
 779                if (delta)
 780                        local64_add(delta, &event->count);
 781        }
 782}
 783
 784static void thaw_limited_counters(struct cpu_hw_events *cpuhw,
 785                                  unsigned long pmc5, unsigned long pmc6)
 786{
 787        struct perf_event *event;
 788        u64 val, prev;
 789        int i;
 790
 791        for (i = 0; i < cpuhw->n_limited; ++i) {
 792                event = cpuhw->limited_counter[i];
 793                event->hw.idx = cpuhw->limited_hwidx[i];
 794                val = (event->hw.idx == 5) ? pmc5 : pmc6;
 795                prev = local64_read(&event->hw.prev_count);
 796                if (check_and_compute_delta(prev, val))
 797                        local64_set(&event->hw.prev_count, val);
 798                perf_event_update_userpage(event);
 799        }
 800}
 801
 802/*
 803 * Since limited events don't respect the freeze conditions, we
 804 * have to read them immediately after freezing or unfreezing the
 805 * other events.  We try to keep the values from the limited
 806 * events as consistent as possible by keeping the delay (in
 807 * cycles and instructions) between freezing/unfreezing and reading
 808 * the limited events as small and consistent as possible.
 809 * Therefore, if any limited events are in use, we read them
 810 * both, and always in the same order, to minimize variability,
 811 * and do it inside the same asm that writes MMCR0.
 812 */
 813static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0)
 814{
 815        unsigned long pmc5, pmc6;
 816
 817        if (!cpuhw->n_limited) {
 818                mtspr(SPRN_MMCR0, mmcr0);
 819                return;
 820        }
 821
 822        /*
 823         * Write MMCR0, then read PMC5 and PMC6 immediately.
 824         * To ensure we don't get a performance monitor interrupt
 825         * between writing MMCR0 and freezing/thawing the limited
 826         * events, we first write MMCR0 with the event overflow
 827         * interrupt enable bits turned off.
 828         */
 829        asm volatile("mtspr %3,%2; mfspr %0,%4; mfspr %1,%5"
 830                     : "=&r" (pmc5), "=&r" (pmc6)
 831                     : "r" (mmcr0 & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)),
 832                       "i" (SPRN_MMCR0),
 833                       "i" (SPRN_PMC5), "i" (SPRN_PMC6));
 834
 835        if (mmcr0 & MMCR0_FC)
 836                freeze_limited_counters(cpuhw, pmc5, pmc6);
 837        else
 838                thaw_limited_counters(cpuhw, pmc5, pmc6);
 839
 840        /*
 841         * Write the full MMCR0 including the event overflow interrupt
 842         * enable bits, if necessary.
 843         */
 844        if (mmcr0 & (MMCR0_PMC1CE | MMCR0_PMCjCE))
 845                mtspr(SPRN_MMCR0, mmcr0);
 846}
 847
 848/*
 849 * Disable all events to prevent PMU interrupts and to allow
 850 * events to be added or removed.
 851 */
 852static void power_pmu_disable(struct pmu *pmu)
 853{
 854        struct cpu_hw_events *cpuhw;
 855        unsigned long flags;
 856
 857        if (!ppmu)
 858                return;
 859        local_irq_save(flags);
 860        cpuhw = &__get_cpu_var(cpu_hw_events);
 861
 862        if (!cpuhw->disabled) {
 863                cpuhw->disabled = 1;
 864                cpuhw->n_added = 0;
 865
 866                /*
 867                 * Check if we ever enabled the PMU on this cpu.
 868                 */
 869                if (!cpuhw->pmcs_enabled) {
 870                        ppc_enable_pmcs();
 871                        cpuhw->pmcs_enabled = 1;
 872                }
 873
 874                /*
 875                 * Disable instruction sampling if it was enabled
 876                 */
 877                if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) {
 878                        mtspr(SPRN_MMCRA,
 879                              cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
 880                        mb();
 881                }
 882
 883                /*
 884                 * Set the 'freeze counters' bit.
 885                 * The barrier is to make sure the mtspr has been
 886                 * executed and the PMU has frozen the events
 887                 * before we return.
 888                 */
 889                write_mmcr0(cpuhw, mfspr(SPRN_MMCR0) | MMCR0_FC);
 890                mb();
 891        }
 892        local_irq_restore(flags);
 893}
 894
 895/*
 896 * Re-enable all events if disable == 0.
 897 * If we were previously disabled and events were added, then
 898 * put the new config on the PMU.
 899 */
 900static void power_pmu_enable(struct pmu *pmu)
 901{
 902        struct perf_event *event;
 903        struct cpu_hw_events *cpuhw;
 904        unsigned long flags;
 905        long i;
 906        unsigned long val;
 907        s64 left;
 908        unsigned int hwc_index[MAX_HWEVENTS];
 909        int n_lim;
 910        int idx;
 911
 912        if (!ppmu)
 913                return;
 914        local_irq_save(flags);
 915        cpuhw = &__get_cpu_var(cpu_hw_events);
 916        if (!cpuhw->disabled) {
 917                local_irq_restore(flags);
 918                return;
 919        }
 920        cpuhw->disabled = 0;
 921
 922        /*
 923         * If we didn't change anything, or only removed events,
 924         * no need to recalculate MMCR* settings and reset the PMCs.
 925         * Just reenable the PMU with the current MMCR* settings
 926         * (possibly updated for removal of events).
 927         */
 928        if (!cpuhw->n_added) {
 929                mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
 930                mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
 931                if (cpuhw->n_events == 0)
 932                        ppc_set_pmu_inuse(0);
 933                goto out_enable;
 934        }
 935
 936        /*
 937         * Compute MMCR* values for the new set of events
 938         */
 939        if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_events, hwc_index,
 940                               cpuhw->mmcr)) {
 941                /* shouldn't ever get here */
 942                printk(KERN_ERR "oops compute_mmcr failed\n");
 943                goto out;
 944        }
 945
 946        /*
 947         * Add in MMCR0 freeze bits corresponding to the
 948         * attr.exclude_* bits for the first event.
 949         * We have already checked that all events have the
 950         * same values for these bits as the first event.
 951         */
 952        event = cpuhw->event[0];
 953        if (event->attr.exclude_user)
 954                cpuhw->mmcr[0] |= MMCR0_FCP;
 955        if (event->attr.exclude_kernel)
 956                cpuhw->mmcr[0] |= freeze_events_kernel;
 957        if (event->attr.exclude_hv)
 958                cpuhw->mmcr[0] |= MMCR0_FCHV;
 959
 960        /*
 961         * Write the new configuration to MMCR* with the freeze
 962         * bit set and set the hardware events to their initial values.
 963         * Then unfreeze the events.
 964         */
 965        ppc_set_pmu_inuse(1);
 966        mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
 967        mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
 968        mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE))
 969                                | MMCR0_FC);
 970
 971        /*
 972         * Read off any pre-existing events that need to move
 973         * to another PMC.
 974         */
 975        for (i = 0; i < cpuhw->n_events; ++i) {
 976                event = cpuhw->event[i];
 977                if (event->hw.idx && event->hw.idx != hwc_index[i] + 1) {
 978                        power_pmu_read(event);
 979                        write_pmc(event->hw.idx, 0);
 980                        event->hw.idx = 0;
 981                }
 982        }
 983
 984        /*
 985         * Initialize the PMCs for all the new and moved events.
 986         */
 987        cpuhw->n_limited = n_lim = 0;
 988        for (i = 0; i < cpuhw->n_events; ++i) {
 989                event = cpuhw->event[i];
 990                if (event->hw.idx)
 991                        continue;
 992                idx = hwc_index[i] + 1;
 993                if (is_limited_pmc(idx)) {
 994                        cpuhw->limited_counter[n_lim] = event;
 995                        cpuhw->limited_hwidx[n_lim] = idx;
 996                        ++n_lim;
 997                        continue;
 998                }
 999                val = 0;
1000                if (event->hw.sample_period) {
1001                        left = local64_read(&event->hw.period_left);
1002                        if (left < 0x80000000L)
1003                                val = 0x80000000L - left;
1004                }
1005                local64_set(&event->hw.prev_count, val);
1006                event->hw.idx = idx;
1007                if (event->hw.state & PERF_HES_STOPPED)
1008                        val = 0;
1009                write_pmc(idx, val);
1010                perf_event_update_userpage(event);
1011        }
1012        cpuhw->n_limited = n_lim;
1013        cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE;
1014
1015 out_enable:
1016        mb();
1017        write_mmcr0(cpuhw, cpuhw->mmcr[0]);
1018
1019        /*
1020         * Enable instruction sampling if necessary
1021         */
1022        if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) {
1023                mb();
1024                mtspr(SPRN_MMCRA, cpuhw->mmcr[2]);
1025        }
1026
1027 out:
1028        if (cpuhw->bhrb_users)
1029                ppmu->config_bhrb(cpuhw->bhrb_filter);
1030
1031        local_irq_restore(flags);
1032}
1033
1034static int collect_events(struct perf_event *group, int max_count,
1035                          struct perf_event *ctrs[], u64 *events,
1036                          unsigned int *flags)
1037{
1038        int n = 0;
1039        struct perf_event *event;
1040
1041        if (!is_software_event(group)) {
1042                if (n >= max_count)
1043                        return -1;
1044                ctrs[n] = group;
1045                flags[n] = group->hw.event_base;
1046                events[n++] = group->hw.config;
1047        }
1048        list_for_each_entry(event, &group->sibling_list, group_entry) {
1049                if (!is_software_event(event) &&
1050                    event->state != PERF_EVENT_STATE_OFF) {
1051                        if (n >= max_count)
1052                                return -1;
1053                        ctrs[n] = event;
1054                        flags[n] = event->hw.event_base;
1055                        events[n++] = event->hw.config;
1056                }
1057        }
1058        return n;
1059}
1060
1061/*
1062 * Add a event to the PMU.
1063 * If all events are not already frozen, then we disable and
1064 * re-enable the PMU in order to get hw_perf_enable to do the
1065 * actual work of reconfiguring the PMU.
1066 */
1067static int power_pmu_add(struct perf_event *event, int ef_flags)
1068{
1069        struct cpu_hw_events *cpuhw;
1070        unsigned long flags;
1071        int n0;
1072        int ret = -EAGAIN;
1073
1074        local_irq_save(flags);
1075        perf_pmu_disable(event->pmu);
1076
1077        /*
1078         * Add the event to the list (if there is room)
1079         * and check whether the total set is still feasible.
1080         */
1081        cpuhw = &__get_cpu_var(cpu_hw_events);
1082        n0 = cpuhw->n_events;
1083        if (n0 >= ppmu->n_counter)
1084                goto out;
1085        cpuhw->event[n0] = event;
1086        cpuhw->events[n0] = event->hw.config;
1087        cpuhw->flags[n0] = event->hw.event_base;
1088
1089        /*
1090         * This event may have been disabled/stopped in record_and_restart()
1091         * because we exceeded the ->event_limit. If re-starting the event,
1092         * clear the ->hw.state (STOPPED and UPTODATE flags), so the user
1093         * notification is re-enabled.
1094         */
1095        if (!(ef_flags & PERF_EF_START))
1096                event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
1097        else
1098                event->hw.state = 0;
1099
1100        /*
1101         * If group events scheduling transaction was started,
1102         * skip the schedulability test here, it will be performed
1103         * at commit time(->commit_txn) as a whole
1104         */
1105        if (cpuhw->group_flag & PERF_EVENT_TXN)
1106                goto nocheck;
1107
1108        if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1))
1109                goto out;
1110        if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1))
1111                goto out;
1112        event->hw.config = cpuhw->events[n0];
1113
1114nocheck:
1115        ++cpuhw->n_events;
1116        ++cpuhw->n_added;
1117
1118        ret = 0;
1119 out:
1120        if (has_branch_stack(event))
1121                power_pmu_bhrb_enable(event);
1122
1123        perf_pmu_enable(event->pmu);
1124        local_irq_restore(flags);
1125        return ret;
1126}
1127
1128/*
1129 * Remove a event from the PMU.
1130 */
1131static void power_pmu_del(struct perf_event *event, int ef_flags)
1132{
1133        struct cpu_hw_events *cpuhw;
1134        long i;
1135        unsigned long flags;
1136
1137        local_irq_save(flags);
1138        perf_pmu_disable(event->pmu);
1139
1140        power_pmu_read(event);
1141
1142        cpuhw = &__get_cpu_var(cpu_hw_events);
1143        for (i = 0; i < cpuhw->n_events; ++i) {
1144                if (event == cpuhw->event[i]) {
1145                        while (++i < cpuhw->n_events) {
1146                                cpuhw->event[i-1] = cpuhw->event[i];
1147                                cpuhw->events[i-1] = cpuhw->events[i];
1148                                cpuhw->flags[i-1] = cpuhw->flags[i];
1149                        }
1150                        --cpuhw->n_events;
1151                        ppmu->disable_pmc(event->hw.idx - 1, cpuhw->mmcr);
1152                        if (event->hw.idx) {
1153                                write_pmc(event->hw.idx, 0);
1154                                event->hw.idx = 0;
1155                        }
1156                        perf_event_update_userpage(event);
1157                        break;
1158                }
1159        }
1160        for (i = 0; i < cpuhw->n_limited; ++i)
1161                if (event == cpuhw->limited_counter[i])
1162                        break;
1163        if (i < cpuhw->n_limited) {
1164                while (++i < cpuhw->n_limited) {
1165                        cpuhw->limited_counter[i-1] = cpuhw->limited_counter[i];
1166                        cpuhw->limited_hwidx[i-1] = cpuhw->limited_hwidx[i];
1167                }
1168                --cpuhw->n_limited;
1169        }
1170        if (cpuhw->n_events == 0) {
1171                /* disable exceptions if no events are running */
1172                cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE);
1173        }
1174
1175        if (has_branch_stack(event))
1176                power_pmu_bhrb_disable(event);
1177
1178        perf_pmu_enable(event->pmu);
1179        local_irq_restore(flags);
1180}
1181
1182/*
1183 * POWER-PMU does not support disabling individual counters, hence
1184 * program their cycle counter to their max value and ignore the interrupts.
1185 */
1186
1187static void power_pmu_start(struct perf_event *event, int ef_flags)
1188{
1189        unsigned long flags;
1190        s64 left;
1191        unsigned long val;
1192
1193        if (!event->hw.idx || !event->hw.sample_period)
1194                return;
1195
1196        if (!(event->hw.state & PERF_HES_STOPPED))
1197                return;
1198
1199        if (ef_flags & PERF_EF_RELOAD)
1200                WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
1201
1202        local_irq_save(flags);
1203        perf_pmu_disable(event->pmu);
1204
1205        event->hw.state = 0;
1206        left = local64_read(&event->hw.period_left);
1207
1208        val = 0;
1209        if (left < 0x80000000L)
1210                val = 0x80000000L - left;
1211
1212        write_pmc(event->hw.idx, val);
1213
1214        perf_event_update_userpage(event);
1215        perf_pmu_enable(event->pmu);
1216        local_irq_restore(flags);
1217}
1218
1219static void power_pmu_stop(struct perf_event *event, int ef_flags)
1220{
1221        unsigned long flags;
1222
1223        if (!event->hw.idx || !event->hw.sample_period)
1224                return;
1225
1226        if (event->hw.state & PERF_HES_STOPPED)
1227                return;
1228
1229        local_irq_save(flags);
1230        perf_pmu_disable(event->pmu);
1231
1232        power_pmu_read(event);
1233        event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
1234        write_pmc(event->hw.idx, 0);
1235
1236        perf_event_update_userpage(event);
1237        perf_pmu_enable(event->pmu);
1238        local_irq_restore(flags);
1239}
1240
1241/*
1242 * Start group events scheduling transaction
1243 * Set the flag to make pmu::enable() not perform the
1244 * schedulability test, it will be performed at commit time
1245 */
1246void power_pmu_start_txn(struct pmu *pmu)
1247{
1248        struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1249
1250        perf_pmu_disable(pmu);
1251        cpuhw->group_flag |= PERF_EVENT_TXN;
1252        cpuhw->n_txn_start = cpuhw->n_events;
1253}
1254
1255/*
1256 * Stop group events scheduling transaction
1257 * Clear the flag and pmu::enable() will perform the
1258 * schedulability test.
1259 */
1260void power_pmu_cancel_txn(struct pmu *pmu)
1261{
1262        struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1263
1264        cpuhw->group_flag &= ~PERF_EVENT_TXN;
1265        perf_pmu_enable(pmu);
1266}
1267
1268/*
1269 * Commit group events scheduling transaction
1270 * Perform the group schedulability test as a whole
1271 * Return 0 if success
1272 */
1273int power_pmu_commit_txn(struct pmu *pmu)
1274{
1275        struct cpu_hw_events *cpuhw;
1276        long i, n;
1277
1278        if (!ppmu)
1279                return -EAGAIN;
1280        cpuhw = &__get_cpu_var(cpu_hw_events);
1281        n = cpuhw->n_events;
1282        if (check_excludes(cpuhw->event, cpuhw->flags, 0, n))
1283                return -EAGAIN;
1284        i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n);
1285        if (i < 0)
1286                return -EAGAIN;
1287
1288        for (i = cpuhw->n_txn_start; i < n; ++i)
1289                cpuhw->event[i]->hw.config = cpuhw->events[i];
1290
1291        cpuhw->group_flag &= ~PERF_EVENT_TXN;
1292        perf_pmu_enable(pmu);
1293        return 0;
1294}
1295
1296/*
1297 * Return 1 if we might be able to put event on a limited PMC,
1298 * or 0 if not.
1299 * A event can only go on a limited PMC if it counts something
1300 * that a limited PMC can count, doesn't require interrupts, and
1301 * doesn't exclude any processor mode.
1302 */
1303static int can_go_on_limited_pmc(struct perf_event *event, u64 ev,
1304                                 unsigned int flags)
1305{
1306        int n;
1307        u64 alt[MAX_EVENT_ALTERNATIVES];
1308
1309        if (event->attr.exclude_user
1310            || event->attr.exclude_kernel
1311            || event->attr.exclude_hv
1312            || event->attr.sample_period)
1313                return 0;
1314
1315        if (ppmu->limited_pmc_event(ev))
1316                return 1;
1317
1318        /*
1319         * The requested event_id isn't on a limited PMC already;
1320         * see if any alternative code goes on a limited PMC.
1321         */
1322        if (!ppmu->get_alternatives)
1323                return 0;
1324
1325        flags |= PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD;
1326        n = ppmu->get_alternatives(ev, flags, alt);
1327
1328        return n > 0;
1329}
1330
1331/*
1332 * Find an alternative event_id that goes on a normal PMC, if possible,
1333 * and return the event_id code, or 0 if there is no such alternative.
1334 * (Note: event_id code 0 is "don't count" on all machines.)
1335 */
1336static u64 normal_pmc_alternative(u64 ev, unsigned long flags)
1337{
1338        u64 alt[MAX_EVENT_ALTERNATIVES];
1339        int n;
1340
1341        flags &= ~(PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD);
1342        n = ppmu->get_alternatives(ev, flags, alt);
1343        if (!n)
1344                return 0;
1345        return alt[0];
1346}
1347
1348/* Number of perf_events counting hardware events */
1349static atomic_t num_events;
1350/* Used to avoid races in calling reserve/release_pmc_hardware */
1351static DEFINE_MUTEX(pmc_reserve_mutex);
1352
1353/*
1354 * Release the PMU if this is the last perf_event.
1355 */
1356static void hw_perf_event_destroy(struct perf_event *event)
1357{
1358        if (!atomic_add_unless(&num_events, -1, 1)) {
1359                mutex_lock(&pmc_reserve_mutex);
1360                if (atomic_dec_return(&num_events) == 0)
1361                        release_pmc_hardware();
1362                mutex_unlock(&pmc_reserve_mutex);
1363        }
1364}
1365
1366/*
1367 * Translate a generic cache event_id config to a raw event_id code.
1368 */
1369static int hw_perf_cache_event(u64 config, u64 *eventp)
1370{
1371        unsigned long type, op, result;
1372        int ev;
1373
1374        if (!ppmu->cache_events)
1375                return -EINVAL;
1376
1377        /* unpack config */
1378        type = config & 0xff;
1379        op = (config >> 8) & 0xff;
1380        result = (config >> 16) & 0xff;
1381
1382        if (type >= PERF_COUNT_HW_CACHE_MAX ||
1383            op >= PERF_COUNT_HW_CACHE_OP_MAX ||
1384            result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
1385                return -EINVAL;
1386
1387        ev = (*ppmu->cache_events)[type][op][result];
1388        if (ev == 0)
1389                return -EOPNOTSUPP;
1390        if (ev == -1)
1391                return -EINVAL;
1392        *eventp = ev;
1393        return 0;
1394}
1395
1396static int power_pmu_event_init(struct perf_event *event)
1397{
1398        u64 ev;
1399        unsigned long flags;
1400        struct perf_event *ctrs[MAX_HWEVENTS];
1401        u64 events[MAX_HWEVENTS];
1402        unsigned int cflags[MAX_HWEVENTS];
1403        int n;
1404        int err;
1405        struct cpu_hw_events *cpuhw;
1406
1407        if (!ppmu)
1408                return -ENOENT;
1409
1410        if (has_branch_stack(event)) {
1411                /* PMU has BHRB enabled */
1412                if (!(ppmu->flags & PPMU_BHRB))
1413                        return -EOPNOTSUPP;
1414        }
1415
1416        switch (event->attr.type) {
1417        case PERF_TYPE_HARDWARE:
1418                ev = event->attr.config;
1419                if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
1420                        return -EOPNOTSUPP;
1421                ev = ppmu->generic_events[ev];
1422                break;
1423        case PERF_TYPE_HW_CACHE:
1424                err = hw_perf_cache_event(event->attr.config, &ev);
1425                if (err)
1426                        return err;
1427                break;
1428        case PERF_TYPE_RAW:
1429                ev = event->attr.config;
1430                break;
1431        default:
1432                return -ENOENT;
1433        }
1434
1435        event->hw.config_base = ev;
1436        event->hw.idx = 0;
1437
1438        /*
1439         * If we are not running on a hypervisor, force the
1440         * exclude_hv bit to 0 so that we don't care what
1441         * the user set it to.
1442         */
1443        if (!firmware_has_feature(FW_FEATURE_LPAR))
1444                event->attr.exclude_hv = 0;
1445
1446        /*
1447         * If this is a per-task event, then we can use
1448         * PM_RUN_* events interchangeably with their non RUN_*
1449         * equivalents, e.g. PM_RUN_CYC instead of PM_CYC.
1450         * XXX we should check if the task is an idle task.
1451         */
1452        flags = 0;
1453        if (event->attach_state & PERF_ATTACH_TASK)
1454                flags |= PPMU_ONLY_COUNT_RUN;
1455
1456        /*
1457         * If this machine has limited events, check whether this
1458         * event_id could go on a limited event.
1459         */
1460        if (ppmu->flags & PPMU_LIMITED_PMC5_6) {
1461                if (can_go_on_limited_pmc(event, ev, flags)) {
1462                        flags |= PPMU_LIMITED_PMC_OK;
1463                } else if (ppmu->limited_pmc_event(ev)) {
1464                        /*
1465                         * The requested event_id is on a limited PMC,
1466                         * but we can't use a limited PMC; see if any
1467                         * alternative goes on a normal PMC.
1468                         */
1469                        ev = normal_pmc_alternative(ev, flags);
1470                        if (!ev)
1471                                return -EINVAL;
1472                }
1473        }
1474
1475        /*
1476         * If this is in a group, check if it can go on with all the
1477         * other hardware events in the group.  We assume the event
1478         * hasn't been linked into its leader's sibling list at this point.
1479         */
1480        n = 0;
1481        if (event->group_leader != event) {
1482                n = collect_events(event->group_leader, ppmu->n_counter - 1,
1483                                   ctrs, events, cflags);
1484                if (n < 0)
1485                        return -EINVAL;
1486        }
1487        events[n] = ev;
1488        ctrs[n] = event;
1489        cflags[n] = flags;
1490        if (check_excludes(ctrs, cflags, n, 1))
1491                return -EINVAL;
1492
1493        cpuhw = &get_cpu_var(cpu_hw_events);
1494        err = power_check_constraints(cpuhw, events, cflags, n + 1);
1495
1496        if (has_branch_stack(event)) {
1497                cpuhw->bhrb_filter = ppmu->bhrb_filter_map(
1498                                        event->attr.branch_sample_type);
1499
1500                if(cpuhw->bhrb_filter == -1)
1501                        return -EOPNOTSUPP;
1502        }
1503
1504        put_cpu_var(cpu_hw_events);
1505        if (err)
1506                return -EINVAL;
1507
1508        event->hw.config = events[n];
1509        event->hw.event_base = cflags[n];
1510        event->hw.last_period = event->hw.sample_period;
1511        local64_set(&event->hw.period_left, event->hw.last_period);
1512
1513        /*
1514         * See if we need to reserve the PMU.
1515         * If no events are currently in use, then we have to take a
1516         * mutex to ensure that we don't race with another task doing
1517         * reserve_pmc_hardware or release_pmc_hardware.
1518         */
1519        err = 0;
1520        if (!atomic_inc_not_zero(&num_events)) {
1521                mutex_lock(&pmc_reserve_mutex);
1522                if (atomic_read(&num_events) == 0 &&
1523                    reserve_pmc_hardware(perf_event_interrupt))
1524                        err = -EBUSY;
1525                else
1526                        atomic_inc(&num_events);
1527                mutex_unlock(&pmc_reserve_mutex);
1528        }
1529        event->destroy = hw_perf_event_destroy;
1530
1531        return err;
1532}
1533
1534static int power_pmu_event_idx(struct perf_event *event)
1535{
1536        return event->hw.idx;
1537}
1538
1539ssize_t power_events_sysfs_show(struct device *dev,
1540                                struct device_attribute *attr, char *page)
1541{
1542        struct perf_pmu_events_attr *pmu_attr;
1543
1544        pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
1545
1546        return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
1547}
1548
1549struct pmu power_pmu = {
1550        .pmu_enable     = power_pmu_enable,
1551        .pmu_disable    = power_pmu_disable,
1552        .event_init     = power_pmu_event_init,
1553        .add            = power_pmu_add,
1554        .del            = power_pmu_del,
1555        .start          = power_pmu_start,
1556        .stop           = power_pmu_stop,
1557        .read           = power_pmu_read,
1558        .start_txn      = power_pmu_start_txn,
1559        .cancel_txn     = power_pmu_cancel_txn,
1560        .commit_txn     = power_pmu_commit_txn,
1561        .event_idx      = power_pmu_event_idx,
1562        .flush_branch_stack = power_pmu_flush_branch_stack,
1563};
1564
1565/*
1566 * A counter has overflowed; update its count and record
1567 * things if requested.  Note that interrupts are hard-disabled
1568 * here so there is no possibility of being interrupted.
1569 */
1570static void record_and_restart(struct perf_event *event, unsigned long val,
1571                               struct pt_regs *regs)
1572{
1573        u64 period = event->hw.sample_period;
1574        s64 prev, delta, left;
1575        int record = 0;
1576
1577        if (event->hw.state & PERF_HES_STOPPED) {
1578                write_pmc(event->hw.idx, 0);
1579                return;
1580        }
1581
1582        /* we don't have to worry about interrupts here */
1583        prev = local64_read(&event->hw.prev_count);
1584        delta = check_and_compute_delta(prev, val);
1585        local64_add(delta, &event->count);
1586
1587        /*
1588         * See if the total period for this event has expired,
1589         * and update for the next period.
1590         */
1591        val = 0;
1592        left = local64_read(&event->hw.period_left) - delta;
1593        if (delta == 0)
1594                left++;
1595        if (period) {
1596                if (left <= 0) {
1597                        left += period;
1598                        if (left <= 0)
1599                                left = period;
1600                        record = siar_valid(regs);
1601                        event->hw.last_period = event->hw.sample_period;
1602                }
1603                if (left < 0x80000000LL)
1604                        val = 0x80000000LL - left;
1605        }
1606
1607        write_pmc(event->hw.idx, val);
1608        local64_set(&event->hw.prev_count, val);
1609        local64_set(&event->hw.period_left, left);
1610        perf_event_update_userpage(event);
1611
1612        /*
1613         * Finally record data if requested.
1614         */
1615        if (record) {
1616                struct perf_sample_data data;
1617
1618                perf_sample_data_init(&data, ~0ULL, event->hw.last_period);
1619
1620                if (event->attr.sample_type & PERF_SAMPLE_ADDR)
1621                        perf_get_data_addr(regs, &data.addr);
1622
1623                if (event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK) {
1624                        struct cpu_hw_events *cpuhw;
1625                        cpuhw = &__get_cpu_var(cpu_hw_events);
1626                        power_pmu_bhrb_read(cpuhw);
1627                        data.br_stack = &cpuhw->bhrb_stack;
1628                }
1629
1630                if (perf_event_overflow(event, &data, regs))
1631                        power_pmu_stop(event, 0);
1632        }
1633}
1634
1635/*
1636 * Called from generic code to get the misc flags (i.e. processor mode)
1637 * for an event_id.
1638 */
1639unsigned long perf_misc_flags(struct pt_regs *regs)
1640{
1641        u32 flags = perf_get_misc_flags(regs);
1642
1643        if (flags)
1644                return flags;
1645        return user_mode(regs) ? PERF_RECORD_MISC_USER :
1646                PERF_RECORD_MISC_KERNEL;
1647}
1648
1649/*
1650 * Called from generic code to get the instruction pointer
1651 * for an event_id.
1652 */
1653unsigned long perf_instruction_pointer(struct pt_regs *regs)
1654{
1655        bool use_siar = regs_use_siar(regs);
1656
1657        if (use_siar && siar_valid(regs))
1658                return mfspr(SPRN_SIAR) + perf_ip_adjust(regs);
1659        else if (use_siar)
1660                return 0;               // no valid instruction pointer
1661        else
1662                return regs->nip;
1663}
1664
1665static bool pmc_overflow_power7(unsigned long val)
1666{
1667        /*
1668         * Events on POWER7 can roll back if a speculative event doesn't
1669         * eventually complete. Unfortunately in some rare cases they will
1670         * raise a performance monitor exception. We need to catch this to
1671         * ensure we reset the PMC. In all cases the PMC will be 256 or less
1672         * cycles from overflow.
1673         *
1674         * We only do this if the first pass fails to find any overflowing
1675         * PMCs because a user might set a period of less than 256 and we
1676         * don't want to mistakenly reset them.
1677         */
1678        if ((0x80000000 - val) <= 256)
1679                return true;
1680
1681        return false;
1682}
1683
1684static bool pmc_overflow(unsigned long val)
1685{
1686        if ((int)val < 0)
1687                return true;
1688
1689        return false;
1690}
1691
1692/*
1693 * Performance monitor interrupt stuff
1694 */
1695static void perf_event_interrupt(struct pt_regs *regs)
1696{
1697        int i, j;
1698        struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1699        struct perf_event *event;
1700        unsigned long val[8];
1701        int found, active;
1702        int nmi;
1703
1704        if (cpuhw->n_limited)
1705                freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5),
1706                                        mfspr(SPRN_PMC6));
1707
1708        perf_read_regs(regs);
1709
1710        nmi = perf_intr_is_nmi(regs);
1711        if (nmi)
1712                nmi_enter();
1713        else
1714                irq_enter();
1715
1716        /* Read all the PMCs since we'll need them a bunch of times */
1717        for (i = 0; i < ppmu->n_counter; ++i)
1718                val[i] = read_pmc(i + 1);
1719
1720        /* Try to find what caused the IRQ */
1721        found = 0;
1722        for (i = 0; i < ppmu->n_counter; ++i) {
1723                if (!pmc_overflow(val[i]))
1724                        continue;
1725                if (is_limited_pmc(i + 1))
1726                        continue; /* these won't generate IRQs */
1727                /*
1728                 * We've found one that's overflowed.  For active
1729                 * counters we need to log this.  For inactive
1730                 * counters, we need to reset it anyway
1731                 */
1732                found = 1;
1733                active = 0;
1734                for (j = 0; j < cpuhw->n_events; ++j) {
1735                        event = cpuhw->event[j];
1736                        if (event->hw.idx == (i + 1)) {
1737                                active = 1;
1738                                record_and_restart(event, val[i], regs);
1739                                break;
1740                        }
1741                }
1742                if (!active)
1743                        /* reset non active counters that have overflowed */
1744                        write_pmc(i + 1, 0);
1745        }
1746        if (!found && pvr_version_is(PVR_POWER7)) {
1747                /* check active counters for special buggy p7 overflow */
1748                for (i = 0; i < cpuhw->n_events; ++i) {
1749                        event = cpuhw->event[i];
1750                        if (!event->hw.idx || is_limited_pmc(event->hw.idx))
1751                                continue;
1752                        if (pmc_overflow_power7(val[event->hw.idx - 1])) {
1753                                /* event has overflowed in a buggy way*/
1754                                found = 1;
1755                                record_and_restart(event,
1756                                                   val[event->hw.idx - 1],
1757                                                   regs);
1758                        }
1759                }
1760        }
1761        if (!found && !nmi && printk_ratelimit())
1762                printk(KERN_WARNING "Can't find PMC that caused IRQ\n");
1763
1764        /*
1765         * Reset MMCR0 to its normal value.  This will set PMXE and
1766         * clear FC (freeze counters) and PMAO (perf mon alert occurred)
1767         * and thus allow interrupts to occur again.
1768         * XXX might want to use MSR.PM to keep the events frozen until
1769         * we get back out of this interrupt.
1770         */
1771        write_mmcr0(cpuhw, cpuhw->mmcr[0]);
1772
1773        if (nmi)
1774                nmi_exit();
1775        else
1776                irq_exit();
1777}
1778
1779static void power_pmu_setup(int cpu)
1780{
1781        struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
1782
1783        if (!ppmu)
1784                return;
1785        memset(cpuhw, 0, sizeof(*cpuhw));
1786        cpuhw->mmcr[0] = MMCR0_FC;
1787}
1788
1789static int __cpuinit
1790power_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1791{
1792        unsigned int cpu = (long)hcpu;
1793
1794        switch (action & ~CPU_TASKS_FROZEN) {
1795        case CPU_UP_PREPARE:
1796                power_pmu_setup(cpu);
1797                break;
1798
1799        default:
1800                break;
1801        }
1802
1803        return NOTIFY_OK;
1804}
1805
1806int __cpuinit register_power_pmu(struct power_pmu *pmu)
1807{
1808        if (ppmu)
1809                return -EBUSY;          /* something's already registered */
1810
1811        ppmu = pmu;
1812        pr_info("%s performance monitor hardware support registered\n",
1813                pmu->name);
1814
1815        power_pmu.attr_groups = ppmu->attr_groups;
1816
1817#ifdef MSR_HV
1818        /*
1819         * Use FCHV to ignore kernel events if MSR.HV is set.
1820         */
1821        if (mfmsr() & MSR_HV)
1822                freeze_events_kernel = MMCR0_FCHV;
1823#endif /* CONFIG_PPC64 */
1824
1825        perf_pmu_register(&power_pmu, "cpu", PERF_TYPE_RAW);
1826        perf_cpu_notifier(power_pmu_notifier);
1827
1828        return 0;
1829}
1830