linux/arch/s390/kernel/perf_cpum_cf.c
<<
>>
Prefs
   1/*
   2 * Performance event support for s390x - CPU-measurement Counter Facility
   3 *
   4 *  Copyright IBM Corp. 2012
   5 *  Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License (version 2 only)
   9 * as published by the Free Software Foundation.
  10 */
  11#define KMSG_COMPONENT  "cpum_cf"
  12#define pr_fmt(fmt)     KMSG_COMPONENT ": " fmt
  13
  14#include <linux/kernel.h>
  15#include <linux/kernel_stat.h>
  16#include <linux/perf_event.h>
  17#include <linux/percpu.h>
  18#include <linux/notifier.h>
  19#include <linux/init.h>
  20#include <linux/export.h>
  21#include <asm/ctl_reg.h>
  22#include <asm/irq.h>
  23#include <asm/cpu_mf.h>
  24
  25/* CPU-measurement counter facility supports these CPU counter sets:
  26 * For CPU counter sets:
  27 *    Basic counter set:             0-31
  28 *    Problem-state counter set:    32-63
  29 *    Crypto-activity counter set:  64-127
  30 *    Extented counter set:        128-159
  31 */
  32enum cpumf_ctr_set {
  33        /* CPU counter sets */
  34        CPUMF_CTR_SET_BASIC   = 0,
  35        CPUMF_CTR_SET_USER    = 1,
  36        CPUMF_CTR_SET_CRYPTO  = 2,
  37        CPUMF_CTR_SET_EXT     = 3,
  38
  39        /* Maximum number of counter sets */
  40        CPUMF_CTR_SET_MAX,
  41};
  42
  43#define CPUMF_LCCTL_ENABLE_SHIFT    16
  44#define CPUMF_LCCTL_ACTCTL_SHIFT     0
  45static const u64 cpumf_state_ctl[CPUMF_CTR_SET_MAX] = {
  46        [CPUMF_CTR_SET_BASIC]   = 0x02,
  47        [CPUMF_CTR_SET_USER]    = 0x04,
  48        [CPUMF_CTR_SET_CRYPTO]  = 0x08,
  49        [CPUMF_CTR_SET_EXT]     = 0x01,
  50};
  51
  52static void ctr_set_enable(u64 *state, int ctr_set)
  53{
  54        *state |= cpumf_state_ctl[ctr_set] << CPUMF_LCCTL_ENABLE_SHIFT;
  55}
  56static void ctr_set_disable(u64 *state, int ctr_set)
  57{
  58        *state &= ~(cpumf_state_ctl[ctr_set] << CPUMF_LCCTL_ENABLE_SHIFT);
  59}
  60static void ctr_set_start(u64 *state, int ctr_set)
  61{
  62        *state |= cpumf_state_ctl[ctr_set] << CPUMF_LCCTL_ACTCTL_SHIFT;
  63}
  64static void ctr_set_stop(u64 *state, int ctr_set)
  65{
  66        *state &= ~(cpumf_state_ctl[ctr_set] << CPUMF_LCCTL_ACTCTL_SHIFT);
  67}
  68
  69/* Local CPUMF event structure */
  70struct cpu_hw_events {
  71        struct cpumf_ctr_info   info;
  72        atomic_t                ctr_set[CPUMF_CTR_SET_MAX];
  73        u64                     state, tx_state;
  74        unsigned int            flags;
  75        unsigned int            txn_flags;
  76};
  77static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
  78        .ctr_set = {
  79                [CPUMF_CTR_SET_BASIC]  = ATOMIC_INIT(0),
  80                [CPUMF_CTR_SET_USER]   = ATOMIC_INIT(0),
  81                [CPUMF_CTR_SET_CRYPTO] = ATOMIC_INIT(0),
  82                [CPUMF_CTR_SET_EXT]    = ATOMIC_INIT(0),
  83        },
  84        .state = 0,
  85        .flags = 0,
  86        .txn_flags = 0,
  87};
  88
  89static int get_counter_set(u64 event)
  90{
  91        int set = -1;
  92
  93        if (event < 32)
  94                set = CPUMF_CTR_SET_BASIC;
  95        else if (event < 64)
  96                set = CPUMF_CTR_SET_USER;
  97        else if (event < 128)
  98                set = CPUMF_CTR_SET_CRYPTO;
  99        else if (event < 256)
 100                set = CPUMF_CTR_SET_EXT;
 101
 102        return set;
 103}
 104
 105static int validate_event(const struct hw_perf_event *hwc)
 106{
 107        switch (hwc->config_base) {
 108        case CPUMF_CTR_SET_BASIC:
 109        case CPUMF_CTR_SET_USER:
 110        case CPUMF_CTR_SET_CRYPTO:
 111        case CPUMF_CTR_SET_EXT:
 112                /* check for reserved counters */
 113                if ((hwc->config >=  6 && hwc->config <=  31) ||
 114                    (hwc->config >= 38 && hwc->config <=  63) ||
 115                    (hwc->config >= 80 && hwc->config <= 127))
 116                        return -EOPNOTSUPP;
 117                break;
 118        default:
 119                return -EINVAL;
 120        }
 121
 122        return 0;
 123}
 124
 125static int validate_ctr_version(const struct hw_perf_event *hwc)
 126{
 127        struct cpu_hw_events *cpuhw;
 128        int err = 0;
 129
 130        cpuhw = &get_cpu_var(cpu_hw_events);
 131
 132        /* check required version for counter sets */
 133        switch (hwc->config_base) {
 134        case CPUMF_CTR_SET_BASIC:
 135        case CPUMF_CTR_SET_USER:
 136                if (cpuhw->info.cfvn < 1)
 137                        err = -EOPNOTSUPP;
 138                break;
 139        case CPUMF_CTR_SET_CRYPTO:
 140        case CPUMF_CTR_SET_EXT:
 141                if (cpuhw->info.csvn < 1)
 142                        err = -EOPNOTSUPP;
 143                if ((cpuhw->info.csvn == 1 && hwc->config > 159) ||
 144                    (cpuhw->info.csvn == 2 && hwc->config > 175) ||
 145                    (cpuhw->info.csvn  > 2 && hwc->config > 255))
 146                        err = -EOPNOTSUPP;
 147                break;
 148        }
 149
 150        put_cpu_var(cpu_hw_events);
 151        return err;
 152}
 153
 154static int validate_ctr_auth(const struct hw_perf_event *hwc)
 155{
 156        struct cpu_hw_events *cpuhw;
 157        u64 ctrs_state;
 158        int err = 0;
 159
 160        cpuhw = &get_cpu_var(cpu_hw_events);
 161
 162        /* Check authorization for cpu counter sets.
 163         * If the particular CPU counter set is not authorized,
 164         * return with -ENOENT in order to fall back to other
 165         * PMUs that might suffice the event request.
 166         */
 167        ctrs_state = cpumf_state_ctl[hwc->config_base];
 168        if (!(ctrs_state & cpuhw->info.auth_ctl))
 169                err = -ENOENT;
 170
 171        put_cpu_var(cpu_hw_events);
 172        return err;
 173}
 174
 175/*
 176 * Change the CPUMF state to active.
 177 * Enable and activate the CPU-counter sets according
 178 * to the per-cpu control state.
 179 */
 180static void cpumf_pmu_enable(struct pmu *pmu)
 181{
 182        struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
 183        int err;
 184
 185        if (cpuhw->flags & PMU_F_ENABLED)
 186                return;
 187
 188        err = lcctl(cpuhw->state);
 189        if (err) {
 190                pr_err("Enabling the performance measuring unit "
 191                       "failed with rc=%x\n", err);
 192                return;
 193        }
 194
 195        cpuhw->flags |= PMU_F_ENABLED;
 196}
 197
 198/*
 199 * Change the CPUMF state to inactive.
 200 * Disable and enable (inactive) the CPU-counter sets according
 201 * to the per-cpu control state.
 202 */
 203static void cpumf_pmu_disable(struct pmu *pmu)
 204{
 205        struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
 206        int err;
 207        u64 inactive;
 208
 209        if (!(cpuhw->flags & PMU_F_ENABLED))
 210                return;
 211
 212        inactive = cpuhw->state & ~((1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1);
 213        err = lcctl(inactive);
 214        if (err) {
 215                pr_err("Disabling the performance measuring unit "
 216                       "failed with rc=%x\n", err);
 217                return;
 218        }
 219
 220        cpuhw->flags &= ~PMU_F_ENABLED;
 221}
 222
 223
 224/* Number of perf events counting hardware events */
 225static atomic_t num_events = ATOMIC_INIT(0);
 226/* Used to avoid races in calling reserve/release_cpumf_hardware */
 227static DEFINE_MUTEX(pmc_reserve_mutex);
 228
 229/* CPU-measurement alerts for the counter facility */
 230static void cpumf_measurement_alert(struct ext_code ext_code,
 231                                    unsigned int alert, unsigned long unused)
 232{
 233        struct cpu_hw_events *cpuhw;
 234
 235        if (!(alert & CPU_MF_INT_CF_MASK))
 236                return;
 237
 238        inc_irq_stat(IRQEXT_CMC);
 239        cpuhw = this_cpu_ptr(&cpu_hw_events);
 240
 241        /* Measurement alerts are shared and might happen when the PMU
 242         * is not reserved.  Ignore these alerts in this case. */
 243        if (!(cpuhw->flags & PMU_F_RESERVED))
 244                return;
 245
 246        /* counter authorization change alert */
 247        if (alert & CPU_MF_INT_CF_CACA)
 248                qctri(&cpuhw->info);
 249
 250        /* loss of counter data alert */
 251        if (alert & CPU_MF_INT_CF_LCDA)
 252                pr_err("CPU[%i] Counter data was lost\n", smp_processor_id());
 253}
 254
 255#define PMC_INIT      0
 256#define PMC_RELEASE   1
 257static void setup_pmc_cpu(void *flags)
 258{
 259        struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
 260
 261        switch (*((int *) flags)) {
 262        case PMC_INIT:
 263                memset(&cpuhw->info, 0, sizeof(cpuhw->info));
 264                qctri(&cpuhw->info);
 265                cpuhw->flags |= PMU_F_RESERVED;
 266                break;
 267
 268        case PMC_RELEASE:
 269                cpuhw->flags &= ~PMU_F_RESERVED;
 270                break;
 271        }
 272
 273        /* Disable CPU counter sets */
 274        lcctl(0);
 275}
 276
 277/* Initialize the CPU-measurement facility */
 278static int reserve_pmc_hardware(void)
 279{
 280        int flags = PMC_INIT;
 281
 282        on_each_cpu(setup_pmc_cpu, &flags, 1);
 283        irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT);
 284
 285        return 0;
 286}
 287
 288/* Release the CPU-measurement facility */
 289static void release_pmc_hardware(void)
 290{
 291        int flags = PMC_RELEASE;
 292
 293        on_each_cpu(setup_pmc_cpu, &flags, 1);
 294        irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT);
 295}
 296
 297/* Release the PMU if event is the last perf event */
 298static void hw_perf_event_destroy(struct perf_event *event)
 299{
 300        if (!atomic_add_unless(&num_events, -1, 1)) {
 301                mutex_lock(&pmc_reserve_mutex);
 302                if (atomic_dec_return(&num_events) == 0)
 303                        release_pmc_hardware();
 304                mutex_unlock(&pmc_reserve_mutex);
 305        }
 306}
 307
 308/* CPUMF <-> perf event mappings for kernel+userspace (basic set) */
 309static const int cpumf_generic_events_basic[] = {
 310        [PERF_COUNT_HW_CPU_CYCLES]          = 0,
 311        [PERF_COUNT_HW_INSTRUCTIONS]        = 1,
 312        [PERF_COUNT_HW_CACHE_REFERENCES]    = -1,
 313        [PERF_COUNT_HW_CACHE_MISSES]        = -1,
 314        [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = -1,
 315        [PERF_COUNT_HW_BRANCH_MISSES]       = -1,
 316        [PERF_COUNT_HW_BUS_CYCLES]          = -1,
 317};
 318/* CPUMF <-> perf event mappings for userspace (problem-state set) */
 319static const int cpumf_generic_events_user[] = {
 320        [PERF_COUNT_HW_CPU_CYCLES]          = 32,
 321        [PERF_COUNT_HW_INSTRUCTIONS]        = 33,
 322        [PERF_COUNT_HW_CACHE_REFERENCES]    = -1,
 323        [PERF_COUNT_HW_CACHE_MISSES]        = -1,
 324        [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = -1,
 325        [PERF_COUNT_HW_BRANCH_MISSES]       = -1,
 326        [PERF_COUNT_HW_BUS_CYCLES]          = -1,
 327};
 328
 329static int __hw_perf_event_init(struct perf_event *event)
 330{
 331        struct perf_event_attr *attr = &event->attr;
 332        struct hw_perf_event *hwc = &event->hw;
 333        int err;
 334        u64 ev;
 335
 336        switch (attr->type) {
 337        case PERF_TYPE_RAW:
 338                /* Raw events are used to access counters directly,
 339                 * hence do not permit excludes */
 340                if (attr->exclude_kernel || attr->exclude_user ||
 341                    attr->exclude_hv)
 342                        return -EOPNOTSUPP;
 343                ev = attr->config;
 344                break;
 345
 346        case PERF_TYPE_HARDWARE:
 347                ev = attr->config;
 348                /* Count user space (problem-state) only */
 349                if (!attr->exclude_user && attr->exclude_kernel) {
 350                        if (ev >= ARRAY_SIZE(cpumf_generic_events_user))
 351                                return -EOPNOTSUPP;
 352                        ev = cpumf_generic_events_user[ev];
 353
 354                /* No support for kernel space counters only */
 355                } else if (!attr->exclude_kernel && attr->exclude_user) {
 356                        return -EOPNOTSUPP;
 357
 358                /* Count user and kernel space */
 359                } else {
 360                        if (ev >= ARRAY_SIZE(cpumf_generic_events_basic))
 361                                return -EOPNOTSUPP;
 362                        ev = cpumf_generic_events_basic[ev];
 363                }
 364                break;
 365
 366        default:
 367                return -ENOENT;
 368        }
 369
 370        if (ev == -1)
 371                return -ENOENT;
 372
 373        if (ev >= PERF_CPUM_CF_MAX_CTR)
 374                return -EINVAL;
 375
 376        /* Use the hardware perf event structure to store the counter number
 377         * in 'config' member and the counter set to which the counter belongs
 378         * in the 'config_base'.  The counter set (config_base) is then used
 379         * to enable/disable the counters.
 380         */
 381        hwc->config = ev;
 382        hwc->config_base = get_counter_set(ev);
 383
 384        /* Validate the counter that is assigned to this event.
 385         * Because the counter facility can use numerous counters at the
 386         * same time without constraints, it is not necessary to explicitly
 387         * validate event groups (event->group_leader != event).
 388         */
 389        err = validate_event(hwc);
 390        if (err)
 391                return err;
 392
 393        /* Initialize for using the CPU-measurement counter facility */
 394        if (!atomic_inc_not_zero(&num_events)) {
 395                mutex_lock(&pmc_reserve_mutex);
 396                if (atomic_read(&num_events) == 0 && reserve_pmc_hardware())
 397                        err = -EBUSY;
 398                else
 399                        atomic_inc(&num_events);
 400                mutex_unlock(&pmc_reserve_mutex);
 401        }
 402        event->destroy = hw_perf_event_destroy;
 403
 404        /* Finally, validate version and authorization of the counter set */
 405        err = validate_ctr_auth(hwc);
 406        if (!err)
 407                err = validate_ctr_version(hwc);
 408
 409        return err;
 410}
 411
 412static int cpumf_pmu_event_init(struct perf_event *event)
 413{
 414        int err;
 415
 416        switch (event->attr.type) {
 417        case PERF_TYPE_HARDWARE:
 418        case PERF_TYPE_HW_CACHE:
 419        case PERF_TYPE_RAW:
 420                err = __hw_perf_event_init(event);
 421                break;
 422        default:
 423                return -ENOENT;
 424        }
 425
 426        if (unlikely(err) && event->destroy)
 427                event->destroy(event);
 428
 429        return err;
 430}
 431
 432static int hw_perf_event_reset(struct perf_event *event)
 433{
 434        u64 prev, new;
 435        int err;
 436
 437        do {
 438                prev = local64_read(&event->hw.prev_count);
 439                err = ecctr(event->hw.config, &new);
 440                if (err) {
 441                        if (err != 3)
 442                                break;
 443                        /* The counter is not (yet) available. This
 444                         * might happen if the counter set to which
 445                         * this counter belongs is in the disabled
 446                         * state.
 447                         */
 448                        new = 0;
 449                }
 450        } while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev);
 451
 452        return err;
 453}
 454
 455static int hw_perf_event_update(struct perf_event *event)
 456{
 457        u64 prev, new, delta;
 458        int err;
 459
 460        do {
 461                prev = local64_read(&event->hw.prev_count);
 462                err = ecctr(event->hw.config, &new);
 463                if (err)
 464                        goto out;
 465        } while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev);
 466
 467        delta = (prev <= new) ? new - prev
 468                              : (-1ULL - prev) + new + 1;        /* overflow */
 469        local64_add(delta, &event->count);
 470out:
 471        return err;
 472}
 473
 474static void cpumf_pmu_read(struct perf_event *event)
 475{
 476        if (event->hw.state & PERF_HES_STOPPED)
 477                return;
 478
 479        hw_perf_event_update(event);
 480}
 481
 482static void cpumf_pmu_start(struct perf_event *event, int flags)
 483{
 484        struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
 485        struct hw_perf_event *hwc = &event->hw;
 486
 487        if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
 488                return;
 489
 490        if (WARN_ON_ONCE(hwc->config == -1))
 491                return;
 492
 493        if (flags & PERF_EF_RELOAD)
 494                WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
 495
 496        hwc->state = 0;
 497
 498        /* (Re-)enable and activate the counter set */
 499        ctr_set_enable(&cpuhw->state, hwc->config_base);
 500        ctr_set_start(&cpuhw->state, hwc->config_base);
 501
 502        /* The counter set to which this counter belongs can be already active.
 503         * Because all counters in a set are active, the event->hw.prev_count
 504         * needs to be synchronized.  At this point, the counter set can be in
 505         * the inactive or disabled state.
 506         */
 507        hw_perf_event_reset(event);
 508
 509        /* increment refcount for this counter set */
 510        atomic_inc(&cpuhw->ctr_set[hwc->config_base]);
 511}
 512
 513static void cpumf_pmu_stop(struct perf_event *event, int flags)
 514{
 515        struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
 516        struct hw_perf_event *hwc = &event->hw;
 517
 518        if (!(hwc->state & PERF_HES_STOPPED)) {
 519                /* Decrement reference count for this counter set and if this
 520                 * is the last used counter in the set, clear activation
 521                 * control and set the counter set state to inactive.
 522                 */
 523                if (!atomic_dec_return(&cpuhw->ctr_set[hwc->config_base]))
 524                        ctr_set_stop(&cpuhw->state, hwc->config_base);
 525                event->hw.state |= PERF_HES_STOPPED;
 526        }
 527
 528        if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
 529                hw_perf_event_update(event);
 530                event->hw.state |= PERF_HES_UPTODATE;
 531        }
 532}
 533
 534static int cpumf_pmu_add(struct perf_event *event, int flags)
 535{
 536        struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
 537
 538        /* Check authorization for the counter set to which this
 539         * counter belongs.
 540         * For group events transaction, the authorization check is
 541         * done in cpumf_pmu_commit_txn().
 542         */
 543        if (!(cpuhw->txn_flags & PERF_PMU_TXN_ADD))
 544                if (validate_ctr_auth(&event->hw))
 545                        return -ENOENT;
 546
 547        ctr_set_enable(&cpuhw->state, event->hw.config_base);
 548        event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
 549
 550        if (flags & PERF_EF_START)
 551                cpumf_pmu_start(event, PERF_EF_RELOAD);
 552
 553        perf_event_update_userpage(event);
 554
 555        return 0;
 556}
 557
 558static void cpumf_pmu_del(struct perf_event *event, int flags)
 559{
 560        struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
 561
 562        cpumf_pmu_stop(event, PERF_EF_UPDATE);
 563
 564        /* Check if any counter in the counter set is still used.  If not used,
 565         * change the counter set to the disabled state.  This also clears the
 566         * content of all counters in the set.
 567         *
 568         * When a new perf event has been added but not yet started, this can
 569         * clear enable control and resets all counters in a set.  Therefore,
 570         * cpumf_pmu_start() always has to reenable a counter set.
 571         */
 572        if (!atomic_read(&cpuhw->ctr_set[event->hw.config_base]))
 573                ctr_set_disable(&cpuhw->state, event->hw.config_base);
 574
 575        perf_event_update_userpage(event);
 576}
 577
 578/*
 579 * Start group events scheduling transaction.
 580 * Set flags to perform a single test at commit time.
 581 *
 582 * We only support PERF_PMU_TXN_ADD transactions. Save the
 583 * transaction flags but otherwise ignore non-PERF_PMU_TXN_ADD
 584 * transactions.
 585 */
 586static void cpumf_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags)
 587{
 588        struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
 589
 590        WARN_ON_ONCE(cpuhw->txn_flags);         /* txn already in flight */
 591
 592        cpuhw->txn_flags = txn_flags;
 593        if (txn_flags & ~PERF_PMU_TXN_ADD)
 594                return;
 595
 596        perf_pmu_disable(pmu);
 597        cpuhw->tx_state = cpuhw->state;
 598}
 599
 600/*
 601 * Stop and cancel a group events scheduling tranctions.
 602 * Assumes cpumf_pmu_del() is called for each successful added
 603 * cpumf_pmu_add() during the transaction.
 604 */
 605static void cpumf_pmu_cancel_txn(struct pmu *pmu)
 606{
 607        unsigned int txn_flags;
 608        struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
 609
 610        WARN_ON_ONCE(!cpuhw->txn_flags);        /* no txn in flight */
 611
 612        txn_flags = cpuhw->txn_flags;
 613        cpuhw->txn_flags = 0;
 614        if (txn_flags & ~PERF_PMU_TXN_ADD)
 615                return;
 616
 617        WARN_ON(cpuhw->tx_state != cpuhw->state);
 618
 619        perf_pmu_enable(pmu);
 620}
 621
 622/*
 623 * Commit the group events scheduling transaction.  On success, the
 624 * transaction is closed.   On error, the transaction is kept open
 625 * until cpumf_pmu_cancel_txn() is called.
 626 */
 627static int cpumf_pmu_commit_txn(struct pmu *pmu)
 628{
 629        struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
 630        u64 state;
 631
 632        WARN_ON_ONCE(!cpuhw->txn_flags);        /* no txn in flight */
 633
 634        if (cpuhw->txn_flags & ~PERF_PMU_TXN_ADD) {
 635                cpuhw->txn_flags = 0;
 636                return 0;
 637        }
 638
 639        /* check if the updated state can be scheduled */
 640        state = cpuhw->state & ~((1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1);
 641        state >>= CPUMF_LCCTL_ENABLE_SHIFT;
 642        if ((state & cpuhw->info.auth_ctl) != state)
 643                return -ENOENT;
 644
 645        cpuhw->txn_flags = 0;
 646        perf_pmu_enable(pmu);
 647        return 0;
 648}
 649
 650/* Performance monitoring unit for s390x */
 651static struct pmu cpumf_pmu = {
 652        .pmu_enable   = cpumf_pmu_enable,
 653        .pmu_disable  = cpumf_pmu_disable,
 654        .event_init   = cpumf_pmu_event_init,
 655        .add          = cpumf_pmu_add,
 656        .del          = cpumf_pmu_del,
 657        .start        = cpumf_pmu_start,
 658        .stop         = cpumf_pmu_stop,
 659        .read         = cpumf_pmu_read,
 660        .start_txn    = cpumf_pmu_start_txn,
 661        .commit_txn   = cpumf_pmu_commit_txn,
 662        .cancel_txn   = cpumf_pmu_cancel_txn,
 663};
 664
 665static int cpumf_pmu_notifier(struct notifier_block *self, unsigned long action,
 666                              void *hcpu)
 667{
 668        unsigned int cpu = (long) hcpu;
 669        int flags;
 670
 671        switch (action & ~CPU_TASKS_FROZEN) {
 672        case CPU_ONLINE:
 673        case CPU_DOWN_FAILED:
 674                flags = PMC_INIT;
 675                smp_call_function_single(cpu, setup_pmc_cpu, &flags, 1);
 676                break;
 677        case CPU_DOWN_PREPARE:
 678                flags = PMC_RELEASE;
 679                smp_call_function_single(cpu, setup_pmc_cpu, &flags, 1);
 680                break;
 681        default:
 682                break;
 683        }
 684
 685        return NOTIFY_OK;
 686}
 687
 688static int __init cpumf_pmu_init(void)
 689{
 690        int rc;
 691
 692        if (!cpum_cf_avail())
 693                return -ENODEV;
 694
 695        /* clear bit 15 of cr0 to unauthorize problem-state to
 696         * extract measurement counters */
 697        ctl_clear_bit(0, 48);
 698
 699        /* register handler for measurement-alert interruptions */
 700        rc = register_external_irq(EXT_IRQ_MEASURE_ALERT,
 701                                   cpumf_measurement_alert);
 702        if (rc) {
 703                pr_err("Registering for CPU-measurement alerts "
 704                       "failed with rc=%i\n", rc);
 705                goto out;
 706        }
 707
 708        /* The CPU measurement counter facility does not have overflow
 709         * interrupts to do sampling.  Sampling must be provided by
 710         * external means, for example, by timers.
 711         */
 712        cpumf_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
 713
 714        cpumf_pmu.attr_groups = cpumf_cf_event_group();
 715        rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", PERF_TYPE_RAW);
 716        if (rc) {
 717                pr_err("Registering the cpum_cf PMU failed with rc=%i\n", rc);
 718                unregister_external_irq(EXT_IRQ_MEASURE_ALERT,
 719                                        cpumf_measurement_alert);
 720                goto out;
 721        }
 722        perf_cpu_notifier(cpumf_pmu_notifier);
 723out:
 724        return rc;
 725}
 726early_initcall(cpumf_pmu_init);
 727