linux/arch/x86/events/core.c
<<
>>
Prefs
   1/*
   2 * Performance events x86 architecture code
   3 *
   4 *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
   5 *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
   6 *  Copyright (C) 2009 Jaswinder Singh Rajput
   7 *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
   8 *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
   9 *  Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
  10 *  Copyright (C) 2009 Google, Inc., Stephane Eranian
  11 *
  12 *  For licencing details see kernel-base/COPYING
  13 */
  14
  15#include <linux/perf_event.h>
  16#include <linux/capability.h>
  17#include <linux/notifier.h>
  18#include <linux/hardirq.h>
  19#include <linux/kprobes.h>
  20#include <linux/export.h>
  21#include <linux/init.h>
  22#include <linux/kdebug.h>
  23#include <linux/sched/mm.h>
  24#include <linux/sched/clock.h>
  25#include <linux/uaccess.h>
  26#include <linux/slab.h>
  27#include <linux/cpu.h>
  28#include <linux/bitops.h>
  29#include <linux/device.h>
  30#include <linux/nospec.h>
  31
  32#include <asm/apic.h>
  33#include <asm/stacktrace.h>
  34#include <asm/nmi.h>
  35#include <asm/smp.h>
  36#include <asm/alternative.h>
  37#include <asm/mmu_context.h>
  38#include <asm/tlbflush.h>
  39#include <asm/timer.h>
  40#include <asm/desc.h>
  41#include <asm/ldt.h>
  42#include <asm/unwind.h>
  43
  44#include "perf_event.h"
  45
  46struct x86_pmu x86_pmu __read_mostly;
  47
  48DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
  49        .enabled = 1,
  50};
  51
  52DEFINE_STATIC_KEY_FALSE(rdpmc_always_available_key);
  53
  54u64 __read_mostly hw_cache_event_ids
  55                                [PERF_COUNT_HW_CACHE_MAX]
  56                                [PERF_COUNT_HW_CACHE_OP_MAX]
  57                                [PERF_COUNT_HW_CACHE_RESULT_MAX];
  58u64 __read_mostly hw_cache_extra_regs
  59                                [PERF_COUNT_HW_CACHE_MAX]
  60                                [PERF_COUNT_HW_CACHE_OP_MAX]
  61                                [PERF_COUNT_HW_CACHE_RESULT_MAX];
  62
  63/*
  64 * Propagate event elapsed time into the generic event.
  65 * Can only be executed on the CPU where the event is active.
  66 * Returns the delta events processed.
  67 */
  68u64 x86_perf_event_update(struct perf_event *event)
  69{
  70        struct hw_perf_event *hwc = &event->hw;
  71        int shift = 64 - x86_pmu.cntval_bits;
  72        u64 prev_raw_count, new_raw_count;
  73        int idx = hwc->idx;
  74        u64 delta;
  75
  76        if (idx == INTEL_PMC_IDX_FIXED_BTS)
  77                return 0;
  78
  79        /*
  80         * Careful: an NMI might modify the previous event value.
  81         *
  82         * Our tactic to handle this is to first atomically read and
  83         * exchange a new raw count - then add that new-prev delta
  84         * count to the generic event atomically:
  85         */
  86again:
  87        prev_raw_count = local64_read(&hwc->prev_count);
  88        rdpmcl(hwc->event_base_rdpmc, new_raw_count);
  89
  90        if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
  91                                        new_raw_count) != prev_raw_count)
  92                goto again;
  93
  94        /*
  95         * Now we have the new raw value and have updated the prev
  96         * timestamp already. We can now calculate the elapsed delta
  97         * (event-)time and add that to the generic event.
  98         *
  99         * Careful, not all hw sign-extends above the physical width
 100         * of the count.
 101         */
 102        delta = (new_raw_count << shift) - (prev_raw_count << shift);
 103        delta >>= shift;
 104
 105        local64_add(delta, &event->count);
 106        local64_sub(delta, &hwc->period_left);
 107
 108        return new_raw_count;
 109}
 110
 111/*
 112 * Find and validate any extra registers to set up.
 113 */
 114static int x86_pmu_extra_regs(u64 config, struct perf_event *event)
 115{
 116        struct hw_perf_event_extra *reg;
 117        struct extra_reg *er;
 118
 119        reg = &event->hw.extra_reg;
 120
 121        if (!x86_pmu.extra_regs)
 122                return 0;
 123
 124        for (er = x86_pmu.extra_regs; er->msr; er++) {
 125                if (er->event != (config & er->config_mask))
 126                        continue;
 127                if (event->attr.config1 & ~er->valid_mask)
 128                        return -EINVAL;
 129                /* Check if the extra msrs can be safely accessed*/
 130                if (!er->extra_msr_access)
 131                        return -ENXIO;
 132
 133                reg->idx = er->idx;
 134                reg->config = event->attr.config1;
 135                reg->reg = er->msr;
 136                break;
 137        }
 138        return 0;
 139}
 140
 141static atomic_t active_events;
 142static atomic_t pmc_refcount;
 143static DEFINE_MUTEX(pmc_reserve_mutex);
 144
 145#ifdef CONFIG_X86_LOCAL_APIC
 146
 147static bool reserve_pmc_hardware(void)
 148{
 149        int i;
 150
 151        for (i = 0; i < x86_pmu.num_counters; i++) {
 152                if (!reserve_perfctr_nmi(x86_pmu_event_addr(i)))
 153                        goto perfctr_fail;
 154        }
 155
 156        for (i = 0; i < x86_pmu.num_counters; i++) {
 157                if (!reserve_evntsel_nmi(x86_pmu_config_addr(i)))
 158                        goto eventsel_fail;
 159        }
 160
 161        return true;
 162
 163eventsel_fail:
 164        for (i--; i >= 0; i--)
 165                release_evntsel_nmi(x86_pmu_config_addr(i));
 166
 167        i = x86_pmu.num_counters;
 168
 169perfctr_fail:
 170        for (i--; i >= 0; i--)
 171                release_perfctr_nmi(x86_pmu_event_addr(i));
 172
 173        return false;
 174}
 175
 176static void release_pmc_hardware(void)
 177{
 178        int i;
 179
 180        for (i = 0; i < x86_pmu.num_counters; i++) {
 181                release_perfctr_nmi(x86_pmu_event_addr(i));
 182                release_evntsel_nmi(x86_pmu_config_addr(i));
 183        }
 184}
 185
 186#else
 187
 188static bool reserve_pmc_hardware(void) { return true; }
 189static void release_pmc_hardware(void) {}
 190
 191#endif
 192
 193static bool check_hw_exists(void)
 194{
 195        u64 val, val_fail = -1, val_new= ~0;
 196        int i, reg, reg_fail = -1, ret = 0;
 197        int bios_fail = 0;
 198        int reg_safe = -1;
 199
 200        /*
 201         * Check to see if the BIOS enabled any of the counters, if so
 202         * complain and bail.
 203         */
 204        for (i = 0; i < x86_pmu.num_counters; i++) {
 205                reg = x86_pmu_config_addr(i);
 206                ret = rdmsrl_safe(reg, &val);
 207                if (ret)
 208                        goto msr_fail;
 209                if (val & ARCH_PERFMON_EVENTSEL_ENABLE) {
 210                        bios_fail = 1;
 211                        val_fail = val;
 212                        reg_fail = reg;
 213                } else {
 214                        reg_safe = i;
 215                }
 216        }
 217
 218        if (x86_pmu.num_counters_fixed) {
 219                reg = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
 220                ret = rdmsrl_safe(reg, &val);
 221                if (ret)
 222                        goto msr_fail;
 223                for (i = 0; i < x86_pmu.num_counters_fixed; i++) {
 224                        if (val & (0x03 << i*4)) {
 225                                bios_fail = 1;
 226                                val_fail = val;
 227                                reg_fail = reg;
 228                        }
 229                }
 230        }
 231
 232        /*
 233         * If all the counters are enabled, the below test will always
 234         * fail.  The tools will also become useless in this scenario.
 235         * Just fail and disable the hardware counters.
 236         */
 237
 238        if (reg_safe == -1) {
 239                reg = reg_safe;
 240                goto msr_fail;
 241        }
 242
 243        /*
 244         * Read the current value, change it and read it back to see if it
 245         * matches, this is needed to detect certain hardware emulators
 246         * (qemu/kvm) that don't trap on the MSR access and always return 0s.
 247         */
 248        reg = x86_pmu_event_addr(reg_safe);
 249        if (rdmsrl_safe(reg, &val))
 250                goto msr_fail;
 251        val ^= 0xffffUL;
 252        ret = wrmsrl_safe(reg, val);
 253        ret |= rdmsrl_safe(reg, &val_new);
 254        if (ret || val != val_new)
 255                goto msr_fail;
 256
 257        /*
 258         * We still allow the PMU driver to operate:
 259         */
 260        if (bios_fail) {
 261                pr_cont("Broken BIOS detected, complain to your hardware vendor.\n");
 262                pr_err(FW_BUG "the BIOS has corrupted hw-PMU resources (MSR %x is %Lx)\n",
 263                              reg_fail, val_fail);
 264        }
 265
 266        return true;
 267
 268msr_fail:
 269        if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
 270                pr_cont("PMU not available due to virtualization, using software events only.\n");
 271        } else {
 272                pr_cont("Broken PMU hardware detected, using software events only.\n");
 273                pr_err("Failed to access perfctr msr (MSR %x is %Lx)\n",
 274                       reg, val_new);
 275        }
 276
 277        return false;
 278}
 279
 280static void hw_perf_event_destroy(struct perf_event *event)
 281{
 282        x86_release_hardware();
 283        atomic_dec(&active_events);
 284}
 285
 286void hw_perf_lbr_event_destroy(struct perf_event *event)
 287{
 288        hw_perf_event_destroy(event);
 289
 290        /* undo the lbr/bts event accounting */
 291        x86_del_exclusive(x86_lbr_exclusive_lbr);
 292}
 293
 294static inline int x86_pmu_initialized(void)
 295{
 296        return x86_pmu.handle_irq != NULL;
 297}
 298
 299static inline int
 300set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
 301{
 302        struct perf_event_attr *attr = &event->attr;
 303        unsigned int cache_type, cache_op, cache_result;
 304        u64 config, val;
 305
 306        config = attr->config;
 307
 308        cache_type = (config >> 0) & 0xff;
 309        if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
 310                return -EINVAL;
 311        cache_type = array_index_nospec(cache_type, PERF_COUNT_HW_CACHE_MAX);
 312
 313        cache_op = (config >>  8) & 0xff;
 314        if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
 315                return -EINVAL;
 316        cache_op = array_index_nospec(cache_op, PERF_COUNT_HW_CACHE_OP_MAX);
 317
 318        cache_result = (config >> 16) & 0xff;
 319        if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
 320                return -EINVAL;
 321        cache_result = array_index_nospec(cache_result, PERF_COUNT_HW_CACHE_RESULT_MAX);
 322
 323        val = hw_cache_event_ids[cache_type][cache_op][cache_result];
 324
 325        if (val == 0)
 326                return -ENOENT;
 327
 328        if (val == -1)
 329                return -EINVAL;
 330
 331        hwc->config |= val;
 332        attr->config1 = hw_cache_extra_regs[cache_type][cache_op][cache_result];
 333        return x86_pmu_extra_regs(val, event);
 334}
 335
 336int x86_reserve_hardware(void)
 337{
 338        int err = 0;
 339
 340        if (!atomic_inc_not_zero(&pmc_refcount)) {
 341                mutex_lock(&pmc_reserve_mutex);
 342                if (atomic_read(&pmc_refcount) == 0) {
 343                        if (!reserve_pmc_hardware())
 344                                err = -EBUSY;
 345                        else
 346                                reserve_ds_buffers();
 347                }
 348                if (!err)
 349                        atomic_inc(&pmc_refcount);
 350                mutex_unlock(&pmc_reserve_mutex);
 351        }
 352
 353        return err;
 354}
 355
 356void x86_release_hardware(void)
 357{
 358        if (atomic_dec_and_mutex_lock(&pmc_refcount, &pmc_reserve_mutex)) {
 359                release_pmc_hardware();
 360                release_ds_buffers();
 361                mutex_unlock(&pmc_reserve_mutex);
 362        }
 363}
 364
 365/*
 366 * Check if we can create event of a certain type (that no conflicting events
 367 * are present).
 368 */
 369int x86_add_exclusive(unsigned int what)
 370{
 371        int i;
 372
 373        /*
 374         * When lbr_pt_coexist we allow PT to coexist with either LBR or BTS.
 375         * LBR and BTS are still mutually exclusive.
 376         */
 377        if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt)
 378                return 0;
 379
 380        if (!atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what])) {
 381                mutex_lock(&pmc_reserve_mutex);
 382                for (i = 0; i < ARRAY_SIZE(x86_pmu.lbr_exclusive); i++) {
 383                        if (i != what && atomic_read(&x86_pmu.lbr_exclusive[i]))
 384                                goto fail_unlock;
 385                }
 386                atomic_inc(&x86_pmu.lbr_exclusive[what]);
 387                mutex_unlock(&pmc_reserve_mutex);
 388        }
 389
 390        atomic_inc(&active_events);
 391        return 0;
 392
 393fail_unlock:
 394        mutex_unlock(&pmc_reserve_mutex);
 395        return -EBUSY;
 396}
 397
 398void x86_del_exclusive(unsigned int what)
 399{
 400        if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt)
 401                return;
 402
 403        atomic_dec(&x86_pmu.lbr_exclusive[what]);
 404        atomic_dec(&active_events);
 405}
 406
 407int x86_setup_perfctr(struct perf_event *event)
 408{
 409        struct perf_event_attr *attr = &event->attr;
 410        struct hw_perf_event *hwc = &event->hw;
 411        u64 config;
 412
 413        if (!is_sampling_event(event)) {
 414                hwc->sample_period = x86_pmu.max_period;
 415                hwc->last_period = hwc->sample_period;
 416                local64_set(&hwc->period_left, hwc->sample_period);
 417        }
 418
 419        if (attr->type == PERF_TYPE_RAW)
 420                return x86_pmu_extra_regs(event->attr.config, event);
 421
 422        if (attr->type == PERF_TYPE_HW_CACHE)
 423                return set_ext_hw_attr(hwc, event);
 424
 425        if (attr->config >= x86_pmu.max_events)
 426                return -EINVAL;
 427
 428        attr->config = array_index_nospec((unsigned long)attr->config, x86_pmu.max_events);
 429
 430        /*
 431         * The generic map:
 432         */
 433        config = x86_pmu.event_map(attr->config);
 434
 435        if (config == 0)
 436                return -ENOENT;
 437
 438        if (config == -1LL)
 439                return -EINVAL;
 440
 441        hwc->config |= config;
 442
 443        return 0;
 444}
 445
 446/*
 447 * check that branch_sample_type is compatible with
 448 * settings needed for precise_ip > 1 which implies
 449 * using the LBR to capture ALL taken branches at the
 450 * priv levels of the measurement
 451 */
 452static inline int precise_br_compat(struct perf_event *event)
 453{
 454        u64 m = event->attr.branch_sample_type;
 455        u64 b = 0;
 456
 457        /* must capture all branches */
 458        if (!(m & PERF_SAMPLE_BRANCH_ANY))
 459                return 0;
 460
 461        m &= PERF_SAMPLE_BRANCH_KERNEL | PERF_SAMPLE_BRANCH_USER;
 462
 463        if (!event->attr.exclude_user)
 464                b |= PERF_SAMPLE_BRANCH_USER;
 465
 466        if (!event->attr.exclude_kernel)
 467                b |= PERF_SAMPLE_BRANCH_KERNEL;
 468
 469        /*
 470         * ignore PERF_SAMPLE_BRANCH_HV, not supported on x86
 471         */
 472
 473        return m == b;
 474}
 475
 476int x86_pmu_max_precise(void)
 477{
 478        int precise = 0;
 479
 480        /* Support for constant skid */
 481        if (x86_pmu.pebs_active && !x86_pmu.pebs_broken) {
 482                precise++;
 483
 484                /* Support for IP fixup */
 485                if (x86_pmu.lbr_nr || x86_pmu.intel_cap.pebs_format >= 2)
 486                        precise++;
 487
 488                if (x86_pmu.pebs_prec_dist)
 489                        precise++;
 490        }
 491        return precise;
 492}
 493
 494int x86_pmu_hw_config(struct perf_event *event)
 495{
 496        if (event->attr.precise_ip) {
 497                int precise = x86_pmu_max_precise();
 498
 499                if (event->attr.precise_ip > precise)
 500                        return -EOPNOTSUPP;
 501
 502                /* There's no sense in having PEBS for non sampling events: */
 503                if (!is_sampling_event(event))
 504                        return -EINVAL;
 505        }
 506        /*
 507         * check that PEBS LBR correction does not conflict with
 508         * whatever the user is asking with attr->branch_sample_type
 509         */
 510        if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format < 2) {
 511                u64 *br_type = &event->attr.branch_sample_type;
 512
 513                if (has_branch_stack(event)) {
 514                        if (!precise_br_compat(event))
 515                                return -EOPNOTSUPP;
 516
 517                        /* branch_sample_type is compatible */
 518
 519                } else {
 520                        /*
 521                         * user did not specify  branch_sample_type
 522                         *
 523                         * For PEBS fixups, we capture all
 524                         * the branches at the priv level of the
 525                         * event.
 526                         */
 527                        *br_type = PERF_SAMPLE_BRANCH_ANY;
 528
 529                        if (!event->attr.exclude_user)
 530                                *br_type |= PERF_SAMPLE_BRANCH_USER;
 531
 532                        if (!event->attr.exclude_kernel)
 533                                *br_type |= PERF_SAMPLE_BRANCH_KERNEL;
 534                }
 535        }
 536
 537        if (event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK)
 538                event->attach_state |= PERF_ATTACH_TASK_DATA;
 539
 540        /*
 541         * Generate PMC IRQs:
 542         * (keep 'enabled' bit clear for now)
 543         */
 544        event->hw.config = ARCH_PERFMON_EVENTSEL_INT;
 545
 546        /*
 547         * Count user and OS events unless requested not to
 548         */
 549        if (!event->attr.exclude_user)
 550                event->hw.config |= ARCH_PERFMON_EVENTSEL_USR;
 551        if (!event->attr.exclude_kernel)
 552                event->hw.config |= ARCH_PERFMON_EVENTSEL_OS;
 553
 554        if (event->attr.type == PERF_TYPE_RAW)
 555                event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
 556
 557        if (event->attr.sample_period && x86_pmu.limit_period) {
 558                if (x86_pmu.limit_period(event, event->attr.sample_period) >
 559                                event->attr.sample_period)
 560                        return -EINVAL;
 561        }
 562
 563        return x86_setup_perfctr(event);
 564}
 565
 566/*
 567 * Setup the hardware configuration for a given attr_type
 568 */
 569static int __x86_pmu_event_init(struct perf_event *event)
 570{
 571        int err;
 572
 573        if (!x86_pmu_initialized())
 574                return -ENODEV;
 575
 576        err = x86_reserve_hardware();
 577        if (err)
 578                return err;
 579
 580        atomic_inc(&active_events);
 581        event->destroy = hw_perf_event_destroy;
 582
 583        event->hw.idx = -1;
 584        event->hw.last_cpu = -1;
 585        event->hw.last_tag = ~0ULL;
 586
 587        /* mark unused */
 588        event->hw.extra_reg.idx = EXTRA_REG_NONE;
 589        event->hw.branch_reg.idx = EXTRA_REG_NONE;
 590
 591        return x86_pmu.hw_config(event);
 592}
 593
 594void x86_pmu_disable_all(void)
 595{
 596        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
 597        int idx;
 598
 599        for (idx = 0; idx < x86_pmu.num_counters; idx++) {
 600                u64 val;
 601
 602                if (!test_bit(idx, cpuc->active_mask))
 603                        continue;
 604                rdmsrl(x86_pmu_config_addr(idx), val);
 605                if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
 606                        continue;
 607                val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
 608                wrmsrl(x86_pmu_config_addr(idx), val);
 609        }
 610}
 611
 612/*
 613 * There may be PMI landing after enabled=0. The PMI hitting could be before or
 614 * after disable_all.
 615 *
 616 * If PMI hits before disable_all, the PMU will be disabled in the NMI handler.
 617 * It will not be re-enabled in the NMI handler again, because enabled=0. After
 618 * handling the NMI, disable_all will be called, which will not change the
 619 * state either. If PMI hits after disable_all, the PMU is already disabled
 620 * before entering NMI handler. The NMI handler will not change the state
 621 * either.
 622 *
 623 * So either situation is harmless.
 624 */
 625static void x86_pmu_disable(struct pmu *pmu)
 626{
 627        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
 628
 629        if (!x86_pmu_initialized())
 630                return;
 631
 632        if (!cpuc->enabled)
 633                return;
 634
 635        cpuc->n_added = 0;
 636        cpuc->enabled = 0;
 637        barrier();
 638
 639        x86_pmu.disable_all();
 640}
 641
 642void x86_pmu_enable_all(int added)
 643{
 644        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
 645        int idx;
 646
 647        for (idx = 0; idx < x86_pmu.num_counters; idx++) {
 648                struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
 649
 650                if (!test_bit(idx, cpuc->active_mask))
 651                        continue;
 652
 653                __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
 654        }
 655}
 656
 657static struct pmu pmu;
 658
 659static inline int is_x86_event(struct perf_event *event)
 660{
 661        return event->pmu == &pmu;
 662}
 663
 664/*
 665 * Event scheduler state:
 666 *
 667 * Assign events iterating over all events and counters, beginning
 668 * with events with least weights first. Keep the current iterator
 669 * state in struct sched_state.
 670 */
 671struct sched_state {
 672        int     weight;
 673        int     event;          /* event index */
 674        int     counter;        /* counter index */
 675        int     unassigned;     /* number of events to be assigned left */
 676        int     nr_gp;          /* number of GP counters used */
 677        unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
 678};
 679
 680/* Total max is X86_PMC_IDX_MAX, but we are O(n!) limited */
 681#define SCHED_STATES_MAX        2
 682
 683struct perf_sched {
 684        int                     max_weight;
 685        int                     max_events;
 686        int                     max_gp;
 687        int                     saved_states;
 688        struct event_constraint **constraints;
 689        struct sched_state      state;
 690        struct sched_state      saved[SCHED_STATES_MAX];
 691};
 692
 693/*
 694 * Initialize interator that runs through all events and counters.
 695 */
 696static void perf_sched_init(struct perf_sched *sched, struct event_constraint **constraints,
 697                            int num, int wmin, int wmax, int gpmax)
 698{
 699        int idx;
 700
 701        memset(sched, 0, sizeof(*sched));
 702        sched->max_events       = num;
 703        sched->max_weight       = wmax;
 704        sched->max_gp           = gpmax;
 705        sched->constraints      = constraints;
 706
 707        for (idx = 0; idx < num; idx++) {
 708                if (constraints[idx]->weight == wmin)
 709                        break;
 710        }
 711
 712        sched->state.event      = idx;          /* start with min weight */
 713        sched->state.weight     = wmin;
 714        sched->state.unassigned = num;
 715}
 716
 717static void perf_sched_save_state(struct perf_sched *sched)
 718{
 719        if (WARN_ON_ONCE(sched->saved_states >= SCHED_STATES_MAX))
 720                return;
 721
 722        sched->saved[sched->saved_states] = sched->state;
 723        sched->saved_states++;
 724}
 725
 726static bool perf_sched_restore_state(struct perf_sched *sched)
 727{
 728        if (!sched->saved_states)
 729                return false;
 730
 731        sched->saved_states--;
 732        sched->state = sched->saved[sched->saved_states];
 733
 734        /* continue with next counter: */
 735        clear_bit(sched->state.counter++, sched->state.used);
 736
 737        return true;
 738}
 739
 740/*
 741 * Select a counter for the current event to schedule. Return true on
 742 * success.
 743 */
 744static bool __perf_sched_find_counter(struct perf_sched *sched)
 745{
 746        struct event_constraint *c;
 747        int idx;
 748
 749        if (!sched->state.unassigned)
 750                return false;
 751
 752        if (sched->state.event >= sched->max_events)
 753                return false;
 754
 755        c = sched->constraints[sched->state.event];
 756        /* Prefer fixed purpose counters */
 757        if (c->idxmsk64 & (~0ULL << INTEL_PMC_IDX_FIXED)) {
 758                idx = INTEL_PMC_IDX_FIXED;
 759                for_each_set_bit_from(idx, c->idxmsk, X86_PMC_IDX_MAX) {
 760                        if (!__test_and_set_bit(idx, sched->state.used))
 761                                goto done;
 762                }
 763        }
 764
 765        /* Grab the first unused counter starting with idx */
 766        idx = sched->state.counter;
 767        for_each_set_bit_from(idx, c->idxmsk, INTEL_PMC_IDX_FIXED) {
 768                if (!__test_and_set_bit(idx, sched->state.used)) {
 769                        if (sched->state.nr_gp++ >= sched->max_gp)
 770                                return false;
 771
 772                        goto done;
 773                }
 774        }
 775
 776        return false;
 777
 778done:
 779        sched->state.counter = idx;
 780
 781        if (c->overlap)
 782                perf_sched_save_state(sched);
 783
 784        return true;
 785}
 786
 787static bool perf_sched_find_counter(struct perf_sched *sched)
 788{
 789        while (!__perf_sched_find_counter(sched)) {
 790                if (!perf_sched_restore_state(sched))
 791                        return false;
 792        }
 793
 794        return true;
 795}
 796
 797/*
 798 * Go through all unassigned events and find the next one to schedule.
 799 * Take events with the least weight first. Return true on success.
 800 */
 801static bool perf_sched_next_event(struct perf_sched *sched)
 802{
 803        struct event_constraint *c;
 804
 805        if (!sched->state.unassigned || !--sched->state.unassigned)
 806                return false;
 807
 808        do {
 809                /* next event */
 810                sched->state.event++;
 811                if (sched->state.event >= sched->max_events) {
 812                        /* next weight */
 813                        sched->state.event = 0;
 814                        sched->state.weight++;
 815                        if (sched->state.weight > sched->max_weight)
 816                                return false;
 817                }
 818                c = sched->constraints[sched->state.event];
 819        } while (c->weight != sched->state.weight);
 820
 821        sched->state.counter = 0;       /* start with first counter */
 822
 823        return true;
 824}
 825
 826/*
 827 * Assign a counter for each event.
 828 */
 829int perf_assign_events(struct event_constraint **constraints, int n,
 830                        int wmin, int wmax, int gpmax, int *assign)
 831{
 832        struct perf_sched sched;
 833
 834        perf_sched_init(&sched, constraints, n, wmin, wmax, gpmax);
 835
 836        do {
 837                if (!perf_sched_find_counter(&sched))
 838                        break;  /* failed */
 839                if (assign)
 840                        assign[sched.state.event] = sched.state.counter;
 841        } while (perf_sched_next_event(&sched));
 842
 843        return sched.state.unassigned;
 844}
 845EXPORT_SYMBOL_GPL(perf_assign_events);
 846
 847int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
 848{
 849        struct event_constraint *c;
 850        unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
 851        struct perf_event *e;
 852        int i, wmin, wmax, unsched = 0;
 853        struct hw_perf_event *hwc;
 854
 855        bitmap_zero(used_mask, X86_PMC_IDX_MAX);
 856
 857        if (x86_pmu.start_scheduling)
 858                x86_pmu.start_scheduling(cpuc);
 859
 860        for (i = 0, wmin = X86_PMC_IDX_MAX, wmax = 0; i < n; i++) {
 861                cpuc->event_constraint[i] = NULL;
 862                c = x86_pmu.get_event_constraints(cpuc, i, cpuc->event_list[i]);
 863                cpuc->event_constraint[i] = c;
 864
 865                wmin = min(wmin, c->weight);
 866                wmax = max(wmax, c->weight);
 867        }
 868
 869        /*
 870         * fastpath, try to reuse previous register
 871         */
 872        for (i = 0; i < n; i++) {
 873                hwc = &cpuc->event_list[i]->hw;
 874                c = cpuc->event_constraint[i];
 875
 876                /* never assigned */
 877                if (hwc->idx == -1)
 878                        break;
 879
 880                /* constraint still honored */
 881                if (!test_bit(hwc->idx, c->idxmsk))
 882                        break;
 883
 884                /* not already used */
 885                if (test_bit(hwc->idx, used_mask))
 886                        break;
 887
 888                __set_bit(hwc->idx, used_mask);
 889                if (assign)
 890                        assign[i] = hwc->idx;
 891        }
 892
 893        /* slow path */
 894        if (i != n) {
 895                int gpmax = x86_pmu.num_counters;
 896
 897                /*
 898                 * Do not allow scheduling of more than half the available
 899                 * generic counters.
 900                 *
 901                 * This helps avoid counter starvation of sibling thread by
 902                 * ensuring at most half the counters cannot be in exclusive
 903                 * mode. There is no designated counters for the limits. Any
 904                 * N/2 counters can be used. This helps with events with
 905                 * specific counter constraints.
 906                 */
 907                if (is_ht_workaround_enabled() && !cpuc->is_fake &&
 908                    READ_ONCE(cpuc->excl_cntrs->exclusive_present))
 909                        gpmax /= 2;
 910
 911                unsched = perf_assign_events(cpuc->event_constraint, n, wmin,
 912                                             wmax, gpmax, assign);
 913        }
 914
 915        /*
 916         * In case of success (unsched = 0), mark events as committed,
 917         * so we do not put_constraint() in case new events are added
 918         * and fail to be scheduled
 919         *
 920         * We invoke the lower level commit callback to lock the resource
 921         *
 922         * We do not need to do all of this in case we are called to
 923         * validate an event group (assign == NULL)
 924         */
 925        if (!unsched && assign) {
 926                for (i = 0; i < n; i++) {
 927                        e = cpuc->event_list[i];
 928                        e->hw.flags |= PERF_X86_EVENT_COMMITTED;
 929                        if (x86_pmu.commit_scheduling)
 930                                x86_pmu.commit_scheduling(cpuc, i, assign[i]);
 931                }
 932        } else {
 933                for (i = 0; i < n; i++) {
 934                        e = cpuc->event_list[i];
 935                        /*
 936                         * do not put_constraint() on comitted events,
 937                         * because they are good to go
 938                         */
 939                        if ((e->hw.flags & PERF_X86_EVENT_COMMITTED))
 940                                continue;
 941
 942                        /*
 943                         * release events that failed scheduling
 944                         */
 945                        if (x86_pmu.put_event_constraints)
 946                                x86_pmu.put_event_constraints(cpuc, e);
 947                }
 948        }
 949
 950        if (x86_pmu.stop_scheduling)
 951                x86_pmu.stop_scheduling(cpuc);
 952
 953        return unsched ? -EINVAL : 0;
 954}
 955
 956/*
 957 * dogrp: true if must collect siblings events (group)
 958 * returns total number of events and error code
 959 */
 960static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
 961{
 962        struct perf_event *event;
 963        int n, max_count;
 964
 965        max_count = x86_pmu.num_counters + x86_pmu.num_counters_fixed;
 966
 967        /* current number of events already accepted */
 968        n = cpuc->n_events;
 969
 970        if (is_x86_event(leader)) {
 971                if (n >= max_count)
 972                        return -EINVAL;
 973                cpuc->event_list[n] = leader;
 974                n++;
 975        }
 976        if (!dogrp)
 977                return n;
 978
 979        for_each_sibling_event(event, leader) {
 980                if (!is_x86_event(event) ||
 981                    event->state <= PERF_EVENT_STATE_OFF)
 982                        continue;
 983
 984                if (n >= max_count)
 985                        return -EINVAL;
 986
 987                cpuc->event_list[n] = event;
 988                n++;
 989        }
 990        return n;
 991}
 992
 993static inline void x86_assign_hw_event(struct perf_event *event,
 994                                struct cpu_hw_events *cpuc, int i)
 995{
 996        struct hw_perf_event *hwc = &event->hw;
 997
 998        hwc->idx = cpuc->assign[i];
 999        hwc->last_cpu = smp_processor_id();
1000        hwc->last_tag = ++cpuc->tags[i];
1001
1002        if (hwc->idx == INTEL_PMC_IDX_FIXED_BTS) {
1003                hwc->config_base = 0;
1004                hwc->event_base = 0;
1005        } else if (hwc->idx >= INTEL_PMC_IDX_FIXED) {
1006                hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
1007                hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + (hwc->idx - INTEL_PMC_IDX_FIXED);
1008                hwc->event_base_rdpmc = (hwc->idx - INTEL_PMC_IDX_FIXED) | 1<<30;
1009        } else {
1010                hwc->config_base = x86_pmu_config_addr(hwc->idx);
1011                hwc->event_base  = x86_pmu_event_addr(hwc->idx);
1012                hwc->event_base_rdpmc = x86_pmu_rdpmc_index(hwc->idx);
1013        }
1014}
1015
1016/**
1017 * x86_perf_rdpmc_index - Return PMC counter used for event
1018 * @event: the perf_event to which the PMC counter was assigned
1019 *
1020 * The counter assigned to this performance event may change if interrupts
1021 * are enabled. This counter should thus never be used while interrupts are
1022 * enabled. Before this function is used to obtain the assigned counter the
1023 * event should be checked for validity using, for example,
1024 * perf_event_read_local(), within the same interrupt disabled section in
1025 * which this counter is planned to be used.
1026 *
1027 * Return: The index of the performance monitoring counter assigned to
1028 * @perf_event.
1029 */
1030int x86_perf_rdpmc_index(struct perf_event *event)
1031{
1032        lockdep_assert_irqs_disabled();
1033
1034        return event->hw.event_base_rdpmc;
1035}
1036
1037static inline int match_prev_assignment(struct hw_perf_event *hwc,
1038                                        struct cpu_hw_events *cpuc,
1039                                        int i)
1040{
1041        return hwc->idx == cpuc->assign[i] &&
1042                hwc->last_cpu == smp_processor_id() &&
1043                hwc->last_tag == cpuc->tags[i];
1044}
1045
1046static void x86_pmu_start(struct perf_event *event, int flags);
1047
1048static void x86_pmu_enable(struct pmu *pmu)
1049{
1050        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1051        struct perf_event *event;
1052        struct hw_perf_event *hwc;
1053        int i, added = cpuc->n_added;
1054
1055        if (!x86_pmu_initialized())
1056                return;
1057
1058        if (cpuc->enabled)
1059                return;
1060
1061        if (cpuc->n_added) {
1062                int n_running = cpuc->n_events - cpuc->n_added;
1063                /*
1064                 * apply assignment obtained either from
1065                 * hw_perf_group_sched_in() or x86_pmu_enable()
1066                 *
1067                 * step1: save events moving to new counters
1068                 */
1069                for (i = 0; i < n_running; i++) {
1070                        event = cpuc->event_list[i];
1071                        hwc = &event->hw;
1072
1073                        /*
1074                         * we can avoid reprogramming counter if:
1075                         * - assigned same counter as last time
1076                         * - running on same CPU as last time
1077                         * - no other event has used the counter since
1078                         */
1079                        if (hwc->idx == -1 ||
1080                            match_prev_assignment(hwc, cpuc, i))
1081                                continue;
1082
1083                        /*
1084                         * Ensure we don't accidentally enable a stopped
1085                         * counter simply because we rescheduled.
1086                         */
1087                        if (hwc->state & PERF_HES_STOPPED)
1088                                hwc->state |= PERF_HES_ARCH;
1089
1090                        x86_pmu_stop(event, PERF_EF_UPDATE);
1091                }
1092
1093                /*
1094                 * step2: reprogram moved events into new counters
1095                 */
1096                for (i = 0; i < cpuc->n_events; i++) {
1097                        event = cpuc->event_list[i];
1098                        hwc = &event->hw;
1099
1100                        if (!match_prev_assignment(hwc, cpuc, i))
1101                                x86_assign_hw_event(event, cpuc, i);
1102                        else if (i < n_running)
1103                                continue;
1104
1105                        if (hwc->state & PERF_HES_ARCH)
1106                                continue;
1107
1108                        x86_pmu_start(event, PERF_EF_RELOAD);
1109                }
1110                cpuc->n_added = 0;
1111                perf_events_lapic_init();
1112        }
1113
1114        cpuc->enabled = 1;
1115        barrier();
1116
1117        x86_pmu.enable_all(added);
1118}
1119
1120static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
1121
1122/*
1123 * Set the next IRQ period, based on the hwc->period_left value.
1124 * To be called with the event disabled in hw:
1125 */
1126int x86_perf_event_set_period(struct perf_event *event)
1127{
1128        struct hw_perf_event *hwc = &event->hw;
1129        s64 left = local64_read(&hwc->period_left);
1130        s64 period = hwc->sample_period;
1131        int ret = 0, idx = hwc->idx;
1132
1133        if (idx == INTEL_PMC_IDX_FIXED_BTS)
1134                return 0;
1135
1136        /*
1137         * If we are way outside a reasonable range then just skip forward:
1138         */
1139        if (unlikely(left <= -period)) {
1140                left = period;
1141                local64_set(&hwc->period_left, left);
1142                hwc->last_period = period;
1143                ret = 1;
1144        }
1145
1146        if (unlikely(left <= 0)) {
1147                left += period;
1148                local64_set(&hwc->period_left, left);
1149                hwc->last_period = period;
1150                ret = 1;
1151        }
1152        /*
1153         * Quirk: certain CPUs dont like it if just 1 hw_event is left:
1154         */
1155        if (unlikely(left < 2))
1156                left = 2;
1157
1158        if (left > x86_pmu.max_period)
1159                left = x86_pmu.max_period;
1160
1161        if (x86_pmu.limit_period)
1162                left = x86_pmu.limit_period(event, left);
1163
1164        per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
1165
1166        /*
1167         * The hw event starts counting from this event offset,
1168         * mark it to be able to extra future deltas:
1169         */
1170        local64_set(&hwc->prev_count, (u64)-left);
1171
1172        wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
1173
1174        /*
1175         * Due to erratum on certan cpu we need
1176         * a second write to be sure the register
1177         * is updated properly
1178         */
1179        if (x86_pmu.perfctr_second_write) {
1180                wrmsrl(hwc->event_base,
1181                        (u64)(-left) & x86_pmu.cntval_mask);
1182        }
1183
1184        perf_event_update_userpage(event);
1185
1186        return ret;
1187}
1188
1189void x86_pmu_enable_event(struct perf_event *event)
1190{
1191        if (__this_cpu_read(cpu_hw_events.enabled))
1192                __x86_pmu_enable_event(&event->hw,
1193                                       ARCH_PERFMON_EVENTSEL_ENABLE);
1194}
1195
1196/*
1197 * Add a single event to the PMU.
1198 *
1199 * The event is added to the group of enabled events
1200 * but only if it can be scehduled with existing events.
1201 */
1202static int x86_pmu_add(struct perf_event *event, int flags)
1203{
1204        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1205        struct hw_perf_event *hwc;
1206        int assign[X86_PMC_IDX_MAX];
1207        int n, n0, ret;
1208
1209        hwc = &event->hw;
1210
1211        n0 = cpuc->n_events;
1212        ret = n = collect_events(cpuc, event, false);
1213        if (ret < 0)
1214                goto out;
1215
1216        hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
1217        if (!(flags & PERF_EF_START))
1218                hwc->state |= PERF_HES_ARCH;
1219
1220        /*
1221         * If group events scheduling transaction was started,
1222         * skip the schedulability test here, it will be performed
1223         * at commit time (->commit_txn) as a whole.
1224         *
1225         * If commit fails, we'll call ->del() on all events
1226         * for which ->add() was called.
1227         */
1228        if (cpuc->txn_flags & PERF_PMU_TXN_ADD)
1229                goto done_collect;
1230
1231        ret = x86_pmu.schedule_events(cpuc, n, assign);
1232        if (ret)
1233                goto out;
1234        /*
1235         * copy new assignment, now we know it is possible
1236         * will be used by hw_perf_enable()
1237         */
1238        memcpy(cpuc->assign, assign, n*sizeof(int));
1239
1240done_collect:
1241        /*
1242         * Commit the collect_events() state. See x86_pmu_del() and
1243         * x86_pmu_*_txn().
1244         */
1245        cpuc->n_events = n;
1246        cpuc->n_added += n - n0;
1247        cpuc->n_txn += n - n0;
1248
1249        if (x86_pmu.add) {
1250                /*
1251                 * This is before x86_pmu_enable() will call x86_pmu_start(),
1252                 * so we enable LBRs before an event needs them etc..
1253                 */
1254                x86_pmu.add(event);
1255        }
1256
1257        ret = 0;
1258out:
1259        return ret;
1260}
1261
1262static void x86_pmu_start(struct perf_event *event, int flags)
1263{
1264        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1265        int idx = event->hw.idx;
1266
1267        if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
1268                return;
1269
1270        if (WARN_ON_ONCE(idx == -1))
1271                return;
1272
1273        if (flags & PERF_EF_RELOAD) {
1274                WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
1275                x86_perf_event_set_period(event);
1276        }
1277
1278        event->hw.state = 0;
1279
1280        cpuc->events[idx] = event;
1281        __set_bit(idx, cpuc->active_mask);
1282        __set_bit(idx, cpuc->running);
1283        x86_pmu.enable(event);
1284        perf_event_update_userpage(event);
1285}
1286
1287void perf_event_print_debug(void)
1288{
1289        u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
1290        u64 pebs, debugctl;
1291        struct cpu_hw_events *cpuc;
1292        unsigned long flags;
1293        int cpu, idx;
1294
1295        if (!x86_pmu.num_counters)
1296                return;
1297
1298        local_irq_save(flags);
1299
1300        cpu = smp_processor_id();
1301        cpuc = &per_cpu(cpu_hw_events, cpu);
1302
1303        if (x86_pmu.version >= 2) {
1304                rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
1305                rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1306                rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
1307                rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
1308
1309                pr_info("\n");
1310                pr_info("CPU#%d: ctrl:       %016llx\n", cpu, ctrl);
1311                pr_info("CPU#%d: status:     %016llx\n", cpu, status);
1312                pr_info("CPU#%d: overflow:   %016llx\n", cpu, overflow);
1313                pr_info("CPU#%d: fixed:      %016llx\n", cpu, fixed);
1314                if (x86_pmu.pebs_constraints) {
1315                        rdmsrl(MSR_IA32_PEBS_ENABLE, pebs);
1316                        pr_info("CPU#%d: pebs:       %016llx\n", cpu, pebs);
1317                }
1318                if (x86_pmu.lbr_nr) {
1319                        rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
1320                        pr_info("CPU#%d: debugctl:   %016llx\n", cpu, debugctl);
1321                }
1322        }
1323        pr_info("CPU#%d: active:     %016llx\n", cpu, *(u64 *)cpuc->active_mask);
1324
1325        for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1326                rdmsrl(x86_pmu_config_addr(idx), pmc_ctrl);
1327                rdmsrl(x86_pmu_event_addr(idx), pmc_count);
1328
1329                prev_left = per_cpu(pmc_prev_left[idx], cpu);
1330
1331                pr_info("CPU#%d:   gen-PMC%d ctrl:  %016llx\n",
1332                        cpu, idx, pmc_ctrl);
1333                pr_info("CPU#%d:   gen-PMC%d count: %016llx\n",
1334                        cpu, idx, pmc_count);
1335                pr_info("CPU#%d:   gen-PMC%d left:  %016llx\n",
1336                        cpu, idx, prev_left);
1337        }
1338        for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
1339                rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
1340
1341                pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
1342                        cpu, idx, pmc_count);
1343        }
1344        local_irq_restore(flags);
1345}
1346
1347void x86_pmu_stop(struct perf_event *event, int flags)
1348{
1349        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1350        struct hw_perf_event *hwc = &event->hw;
1351
1352        if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) {
1353                x86_pmu.disable(event);
1354                cpuc->events[hwc->idx] = NULL;
1355                WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
1356                hwc->state |= PERF_HES_STOPPED;
1357        }
1358
1359        if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
1360                /*
1361                 * Drain the remaining delta count out of a event
1362                 * that we are disabling:
1363                 */
1364                x86_perf_event_update(event);
1365                hwc->state |= PERF_HES_UPTODATE;
1366        }
1367}
1368
1369static void x86_pmu_del(struct perf_event *event, int flags)
1370{
1371        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1372        int i;
1373
1374        /*
1375         * event is descheduled
1376         */
1377        event->hw.flags &= ~PERF_X86_EVENT_COMMITTED;
1378
1379        /*
1380         * If we're called during a txn, we only need to undo x86_pmu.add.
1381         * The events never got scheduled and ->cancel_txn will truncate
1382         * the event_list.
1383         *
1384         * XXX assumes any ->del() called during a TXN will only be on
1385         * an event added during that same TXN.
1386         */
1387        if (cpuc->txn_flags & PERF_PMU_TXN_ADD)
1388                goto do_del;
1389
1390        /*
1391         * Not a TXN, therefore cleanup properly.
1392         */
1393        x86_pmu_stop(event, PERF_EF_UPDATE);
1394
1395        for (i = 0; i < cpuc->n_events; i++) {
1396                if (event == cpuc->event_list[i])
1397                        break;
1398        }
1399
1400        if (WARN_ON_ONCE(i == cpuc->n_events)) /* called ->del() without ->add() ? */
1401                return;
1402
1403        /* If we have a newly added event; make sure to decrease n_added. */
1404        if (i >= cpuc->n_events - cpuc->n_added)
1405                --cpuc->n_added;
1406
1407        if (x86_pmu.put_event_constraints)
1408                x86_pmu.put_event_constraints(cpuc, event);
1409
1410        /* Delete the array entry. */
1411        while (++i < cpuc->n_events) {
1412                cpuc->event_list[i-1] = cpuc->event_list[i];
1413                cpuc->event_constraint[i-1] = cpuc->event_constraint[i];
1414        }
1415        --cpuc->n_events;
1416
1417        perf_event_update_userpage(event);
1418
1419do_del:
1420        if (x86_pmu.del) {
1421                /*
1422                 * This is after x86_pmu_stop(); so we disable LBRs after any
1423                 * event can need them etc..
1424                 */
1425                x86_pmu.del(event);
1426        }
1427}
1428
1429int x86_pmu_handle_irq(struct pt_regs *regs)
1430{
1431        struct perf_sample_data data;
1432        struct cpu_hw_events *cpuc;
1433        struct perf_event *event;
1434        int idx, handled = 0;
1435        u64 val;
1436
1437        cpuc = this_cpu_ptr(&cpu_hw_events);
1438
1439        /*
1440         * Some chipsets need to unmask the LVTPC in a particular spot
1441         * inside the nmi handler.  As a result, the unmasking was pushed
1442         * into all the nmi handlers.
1443         *
1444         * This generic handler doesn't seem to have any issues where the
1445         * unmasking occurs so it was left at the top.
1446         */
1447        apic_write(APIC_LVTPC, APIC_DM_NMI);
1448
1449        for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1450                if (!test_bit(idx, cpuc->active_mask)) {
1451                        /*
1452                         * Though we deactivated the counter some cpus
1453                         * might still deliver spurious interrupts still
1454                         * in flight. Catch them:
1455                         */
1456                        if (__test_and_clear_bit(idx, cpuc->running))
1457                                handled++;
1458                        continue;
1459                }
1460
1461                event = cpuc->events[idx];
1462
1463                val = x86_perf_event_update(event);
1464                if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
1465                        continue;
1466
1467                /*
1468                 * event overflow
1469                 */
1470                handled++;
1471                perf_sample_data_init(&data, 0, event->hw.last_period);
1472
1473                if (!x86_perf_event_set_period(event))
1474                        continue;
1475
1476                if (perf_event_overflow(event, &data, regs))
1477                        x86_pmu_stop(event, 0);
1478        }
1479
1480        if (handled)
1481                inc_irq_stat(apic_perf_irqs);
1482
1483        return handled;
1484}
1485
1486void perf_events_lapic_init(void)
1487{
1488        if (!x86_pmu.apic || !x86_pmu_initialized())
1489                return;
1490
1491        /*
1492         * Always use NMI for PMU
1493         */
1494        apic_write(APIC_LVTPC, APIC_DM_NMI);
1495}
1496
1497static int
1498perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs)
1499{
1500        u64 start_clock;
1501        u64 finish_clock;
1502        int ret;
1503
1504        /*
1505         * All PMUs/events that share this PMI handler should make sure to
1506         * increment active_events for their events.
1507         */
1508        if (!atomic_read(&active_events))
1509                return NMI_DONE;
1510
1511        start_clock = sched_clock();
1512        ret = x86_pmu.handle_irq(regs);
1513        finish_clock = sched_clock();
1514
1515        perf_sample_event_took(finish_clock - start_clock);
1516
1517        return ret;
1518}
1519NOKPROBE_SYMBOL(perf_event_nmi_handler);
1520
1521struct event_constraint emptyconstraint;
1522struct event_constraint unconstrained;
1523
1524static int x86_pmu_prepare_cpu(unsigned int cpu)
1525{
1526        struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
1527        int i;
1528
1529        for (i = 0 ; i < X86_PERF_KFREE_MAX; i++)
1530                cpuc->kfree_on_online[i] = NULL;
1531        if (x86_pmu.cpu_prepare)
1532                return x86_pmu.cpu_prepare(cpu);
1533        return 0;
1534}
1535
1536static int x86_pmu_dead_cpu(unsigned int cpu)
1537{
1538        if (x86_pmu.cpu_dead)
1539                x86_pmu.cpu_dead(cpu);
1540        return 0;
1541}
1542
1543static int x86_pmu_online_cpu(unsigned int cpu)
1544{
1545        struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
1546        int i;
1547
1548        for (i = 0 ; i < X86_PERF_KFREE_MAX; i++) {
1549                kfree(cpuc->kfree_on_online[i]);
1550                cpuc->kfree_on_online[i] = NULL;
1551        }
1552        return 0;
1553}
1554
1555static int x86_pmu_starting_cpu(unsigned int cpu)
1556{
1557        if (x86_pmu.cpu_starting)
1558                x86_pmu.cpu_starting(cpu);
1559        return 0;
1560}
1561
1562static int x86_pmu_dying_cpu(unsigned int cpu)
1563{
1564        if (x86_pmu.cpu_dying)
1565                x86_pmu.cpu_dying(cpu);
1566        return 0;
1567}
1568
1569static void __init pmu_check_apic(void)
1570{
1571        if (boot_cpu_has(X86_FEATURE_APIC))
1572                return;
1573
1574        x86_pmu.apic = 0;
1575        pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
1576        pr_info("no hardware sampling interrupt available.\n");
1577
1578        /*
1579         * If we have a PMU initialized but no APIC
1580         * interrupts, we cannot sample hardware
1581         * events (user-space has to fall back and
1582         * sample via a hrtimer based software event):
1583         */
1584        pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
1585
1586}
1587
1588static struct attribute_group x86_pmu_format_group __ro_after_init = {
1589        .name = "format",
1590        .attrs = NULL,
1591};
1592
1593/*
1594 * Remove all undefined events (x86_pmu.event_map(id) == 0)
1595 * out of events_attr attributes.
1596 */
1597static void __init filter_events(struct attribute **attrs)
1598{
1599        struct device_attribute *d;
1600        struct perf_pmu_events_attr *pmu_attr;
1601        int offset = 0;
1602        int i, j;
1603
1604        for (i = 0; attrs[i]; i++) {
1605                d = (struct device_attribute *)attrs[i];
1606                pmu_attr = container_of(d, struct perf_pmu_events_attr, attr);
1607                /* str trumps id */
1608                if (pmu_attr->event_str)
1609                        continue;
1610                if (x86_pmu.event_map(i + offset))
1611                        continue;
1612
1613                for (j = i; attrs[j]; j++)
1614                        attrs[j] = attrs[j + 1];
1615
1616                /* Check the shifted attr. */
1617                i--;
1618
1619                /*
1620                 * event_map() is index based, the attrs array is organized
1621                 * by increasing event index. If we shift the events, then
1622                 * we need to compensate for the event_map(), otherwise
1623                 * we are looking up the wrong event in the map
1624                 */
1625                offset++;
1626        }
1627}
1628
1629/* Merge two pointer arrays */
1630__init struct attribute **merge_attr(struct attribute **a, struct attribute **b)
1631{
1632        struct attribute **new;
1633        int j, i;
1634
1635        for (j = 0; a && a[j]; j++)
1636                ;
1637        for (i = 0; b && b[i]; i++)
1638                j++;
1639        j++;
1640
1641        new = kmalloc_array(j, sizeof(struct attribute *), GFP_KERNEL);
1642        if (!new)
1643                return NULL;
1644
1645        j = 0;
1646        for (i = 0; a && a[i]; i++)
1647                new[j++] = a[i];
1648        for (i = 0; b && b[i]; i++)
1649                new[j++] = b[i];
1650        new[j] = NULL;
1651
1652        return new;
1653}
1654
1655ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr, char *page)
1656{
1657        struct perf_pmu_events_attr *pmu_attr = \
1658                container_of(attr, struct perf_pmu_events_attr, attr);
1659        u64 config = x86_pmu.event_map(pmu_attr->id);
1660
1661        /* string trumps id */
1662        if (pmu_attr->event_str)
1663                return sprintf(page, "%s", pmu_attr->event_str);
1664
1665        return x86_pmu.events_sysfs_show(page, config);
1666}
1667EXPORT_SYMBOL_GPL(events_sysfs_show);
1668
1669ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr,
1670                          char *page)
1671{
1672        struct perf_pmu_events_ht_attr *pmu_attr =
1673                container_of(attr, struct perf_pmu_events_ht_attr, attr);
1674
1675        /*
1676         * Report conditional events depending on Hyper-Threading.
1677         *
1678         * This is overly conservative as usually the HT special
1679         * handling is not needed if the other CPU thread is idle.
1680         *
1681         * Note this does not (and cannot) handle the case when thread
1682         * siblings are invisible, for example with virtualization
1683         * if they are owned by some other guest.  The user tool
1684         * has to re-read when a thread sibling gets onlined later.
1685         */
1686        return sprintf(page, "%s",
1687                        topology_max_smt_threads() > 1 ?
1688                        pmu_attr->event_str_ht :
1689                        pmu_attr->event_str_noht);
1690}
1691
1692EVENT_ATTR(cpu-cycles,                  CPU_CYCLES              );
1693EVENT_ATTR(instructions,                INSTRUCTIONS            );
1694EVENT_ATTR(cache-references,            CACHE_REFERENCES        );
1695EVENT_ATTR(cache-misses,                CACHE_MISSES            );
1696EVENT_ATTR(branch-instructions,         BRANCH_INSTRUCTIONS     );
1697EVENT_ATTR(branch-misses,               BRANCH_MISSES           );
1698EVENT_ATTR(bus-cycles,                  BUS_CYCLES              );
1699EVENT_ATTR(stalled-cycles-frontend,     STALLED_CYCLES_FRONTEND );
1700EVENT_ATTR(stalled-cycles-backend,      STALLED_CYCLES_BACKEND  );
1701EVENT_ATTR(ref-cycles,                  REF_CPU_CYCLES          );
1702
1703static struct attribute *empty_attrs;
1704
1705static struct attribute *events_attr[] = {
1706        EVENT_PTR(CPU_CYCLES),
1707        EVENT_PTR(INSTRUCTIONS),
1708        EVENT_PTR(CACHE_REFERENCES),
1709        EVENT_PTR(CACHE_MISSES),
1710        EVENT_PTR(BRANCH_INSTRUCTIONS),
1711        EVENT_PTR(BRANCH_MISSES),
1712        EVENT_PTR(BUS_CYCLES),
1713        EVENT_PTR(STALLED_CYCLES_FRONTEND),
1714        EVENT_PTR(STALLED_CYCLES_BACKEND),
1715        EVENT_PTR(REF_CPU_CYCLES),
1716        NULL,
1717};
1718
1719static struct attribute_group x86_pmu_events_group __ro_after_init = {
1720        .name = "events",
1721        .attrs = events_attr,
1722};
1723
1724ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event)
1725{
1726        u64 umask  = (config & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
1727        u64 cmask  = (config & ARCH_PERFMON_EVENTSEL_CMASK) >> 24;
1728        bool edge  = (config & ARCH_PERFMON_EVENTSEL_EDGE);
1729        bool pc    = (config & ARCH_PERFMON_EVENTSEL_PIN_CONTROL);
1730        bool any   = (config & ARCH_PERFMON_EVENTSEL_ANY);
1731        bool inv   = (config & ARCH_PERFMON_EVENTSEL_INV);
1732        ssize_t ret;
1733
1734        /*
1735        * We have whole page size to spend and just little data
1736        * to write, so we can safely use sprintf.
1737        */
1738        ret = sprintf(page, "event=0x%02llx", event);
1739
1740        if (umask)
1741                ret += sprintf(page + ret, ",umask=0x%02llx", umask);
1742
1743        if (edge)
1744                ret += sprintf(page + ret, ",edge");
1745
1746        if (pc)
1747                ret += sprintf(page + ret, ",pc");
1748
1749        if (any)
1750                ret += sprintf(page + ret, ",any");
1751
1752        if (inv)
1753                ret += sprintf(page + ret, ",inv");
1754
1755        if (cmask)
1756                ret += sprintf(page + ret, ",cmask=0x%02llx", cmask);
1757
1758        ret += sprintf(page + ret, "\n");
1759
1760        return ret;
1761}
1762
1763static struct attribute_group x86_pmu_attr_group;
1764static struct attribute_group x86_pmu_caps_group;
1765
1766static int __init init_hw_perf_events(void)
1767{
1768        struct x86_pmu_quirk *quirk;
1769        int err;
1770
1771        pr_info("Performance Events: ");
1772
1773        switch (boot_cpu_data.x86_vendor) {
1774        case X86_VENDOR_INTEL:
1775                err = intel_pmu_init();
1776                break;
1777        case X86_VENDOR_AMD:
1778                err = amd_pmu_init();
1779                break;
1780        case X86_VENDOR_HYGON:
1781                err = amd_pmu_init();
1782                x86_pmu.name = "HYGON";
1783                break;
1784        default:
1785                err = -ENOTSUPP;
1786        }
1787        if (err != 0) {
1788                pr_cont("no PMU driver, software events only.\n");
1789                return 0;
1790        }
1791
1792        pmu_check_apic();
1793
1794        /* sanity check that the hardware exists or is emulated */
1795        if (!check_hw_exists())
1796                return 0;
1797
1798        pr_cont("%s PMU driver.\n", x86_pmu.name);
1799
1800        x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */
1801
1802        for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next)
1803                quirk->func();
1804
1805        if (!x86_pmu.intel_ctrl)
1806                x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
1807
1808        perf_events_lapic_init();
1809        register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI");
1810
1811        unconstrained = (struct event_constraint)
1812                __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
1813                                   0, x86_pmu.num_counters, 0, 0);
1814
1815        x86_pmu_format_group.attrs = x86_pmu.format_attrs;
1816
1817        if (x86_pmu.caps_attrs) {
1818                struct attribute **tmp;
1819
1820                tmp = merge_attr(x86_pmu_caps_group.attrs, x86_pmu.caps_attrs);
1821                if (!WARN_ON(!tmp))
1822                        x86_pmu_caps_group.attrs = tmp;
1823        }
1824
1825        if (x86_pmu.event_attrs)
1826                x86_pmu_events_group.attrs = x86_pmu.event_attrs;
1827
1828        if (!x86_pmu.events_sysfs_show)
1829                x86_pmu_events_group.attrs = &empty_attrs;
1830        else
1831                filter_events(x86_pmu_events_group.attrs);
1832
1833        if (x86_pmu.cpu_events) {
1834                struct attribute **tmp;
1835
1836                tmp = merge_attr(x86_pmu_events_group.attrs, x86_pmu.cpu_events);
1837                if (!WARN_ON(!tmp))
1838                        x86_pmu_events_group.attrs = tmp;
1839        }
1840
1841        if (x86_pmu.attrs) {
1842                struct attribute **tmp;
1843
1844                tmp = merge_attr(x86_pmu_attr_group.attrs, x86_pmu.attrs);
1845                if (!WARN_ON(!tmp))
1846                        x86_pmu_attr_group.attrs = tmp;
1847        }
1848
1849        pr_info("... version:                %d\n",     x86_pmu.version);
1850        pr_info("... bit width:              %d\n",     x86_pmu.cntval_bits);
1851        pr_info("... generic registers:      %d\n",     x86_pmu.num_counters);
1852        pr_info("... value mask:             %016Lx\n", x86_pmu.cntval_mask);
1853        pr_info("... max period:             %016Lx\n", x86_pmu.max_period);
1854        pr_info("... fixed-purpose events:   %d\n",     x86_pmu.num_counters_fixed);
1855        pr_info("... event mask:             %016Lx\n", x86_pmu.intel_ctrl);
1856
1857        /*
1858         * Install callbacks. Core will call them for each online
1859         * cpu.
1860         */
1861        err = cpuhp_setup_state(CPUHP_PERF_X86_PREPARE, "perf/x86:prepare",
1862                                x86_pmu_prepare_cpu, x86_pmu_dead_cpu);
1863        if (err)
1864                return err;
1865
1866        err = cpuhp_setup_state(CPUHP_AP_PERF_X86_STARTING,
1867                                "perf/x86:starting", x86_pmu_starting_cpu,
1868                                x86_pmu_dying_cpu);
1869        if (err)
1870                goto out;
1871
1872        err = cpuhp_setup_state(CPUHP_AP_PERF_X86_ONLINE, "perf/x86:online",
1873                                x86_pmu_online_cpu, NULL);
1874        if (err)
1875                goto out1;
1876
1877        err = perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
1878        if (err)
1879                goto out2;
1880
1881        return 0;
1882
1883out2:
1884        cpuhp_remove_state(CPUHP_AP_PERF_X86_ONLINE);
1885out1:
1886        cpuhp_remove_state(CPUHP_AP_PERF_X86_STARTING);
1887out:
1888        cpuhp_remove_state(CPUHP_PERF_X86_PREPARE);
1889        return err;
1890}
1891early_initcall(init_hw_perf_events);
1892
1893static inline void x86_pmu_read(struct perf_event *event)
1894{
1895        if (x86_pmu.read)
1896                return x86_pmu.read(event);
1897        x86_perf_event_update(event);
1898}
1899
1900/*
1901 * Start group events scheduling transaction
1902 * Set the flag to make pmu::enable() not perform the
1903 * schedulability test, it will be performed at commit time
1904 *
1905 * We only support PERF_PMU_TXN_ADD transactions. Save the
1906 * transaction flags but otherwise ignore non-PERF_PMU_TXN_ADD
1907 * transactions.
1908 */
1909static void x86_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags)
1910{
1911        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1912
1913        WARN_ON_ONCE(cpuc->txn_flags);          /* txn already in flight */
1914
1915        cpuc->txn_flags = txn_flags;
1916        if (txn_flags & ~PERF_PMU_TXN_ADD)
1917                return;
1918
1919        perf_pmu_disable(pmu);
1920        __this_cpu_write(cpu_hw_events.n_txn, 0);
1921}
1922
1923/*
1924 * Stop group events scheduling transaction
1925 * Clear the flag and pmu::enable() will perform the
1926 * schedulability test.
1927 */
1928static void x86_pmu_cancel_txn(struct pmu *pmu)
1929{
1930        unsigned int txn_flags;
1931        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1932
1933        WARN_ON_ONCE(!cpuc->txn_flags); /* no txn in flight */
1934
1935        txn_flags = cpuc->txn_flags;
1936        cpuc->txn_flags = 0;
1937        if (txn_flags & ~PERF_PMU_TXN_ADD)
1938                return;
1939
1940        /*
1941         * Truncate collected array by the number of events added in this
1942         * transaction. See x86_pmu_add() and x86_pmu_*_txn().
1943         */
1944        __this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn));
1945        __this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn));
1946        perf_pmu_enable(pmu);
1947}
1948
1949/*
1950 * Commit group events scheduling transaction
1951 * Perform the group schedulability test as a whole
1952 * Return 0 if success
1953 *
1954 * Does not cancel the transaction on failure; expects the caller to do this.
1955 */
1956static int x86_pmu_commit_txn(struct pmu *pmu)
1957{
1958        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1959        int assign[X86_PMC_IDX_MAX];
1960        int n, ret;
1961
1962        WARN_ON_ONCE(!cpuc->txn_flags); /* no txn in flight */
1963
1964        if (cpuc->txn_flags & ~PERF_PMU_TXN_ADD) {
1965                cpuc->txn_flags = 0;
1966                return 0;
1967        }
1968
1969        n = cpuc->n_events;
1970
1971        if (!x86_pmu_initialized())
1972                return -EAGAIN;
1973
1974        ret = x86_pmu.schedule_events(cpuc, n, assign);
1975        if (ret)
1976                return ret;
1977
1978        /*
1979         * copy new assignment, now we know it is possible
1980         * will be used by hw_perf_enable()
1981         */
1982        memcpy(cpuc->assign, assign, n*sizeof(int));
1983
1984        cpuc->txn_flags = 0;
1985        perf_pmu_enable(pmu);
1986        return 0;
1987}
1988/*
1989 * a fake_cpuc is used to validate event groups. Due to
1990 * the extra reg logic, we need to also allocate a fake
1991 * per_core and per_cpu structure. Otherwise, group events
1992 * using extra reg may conflict without the kernel being
1993 * able to catch this when the last event gets added to
1994 * the group.
1995 */
1996static void free_fake_cpuc(struct cpu_hw_events *cpuc)
1997{
1998        kfree(cpuc->shared_regs);
1999        kfree(cpuc);
2000}
2001
2002static struct cpu_hw_events *allocate_fake_cpuc(void)
2003{
2004        struct cpu_hw_events *cpuc;
2005        int cpu = raw_smp_processor_id();
2006
2007        cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL);
2008        if (!cpuc)
2009                return ERR_PTR(-ENOMEM);
2010
2011        /* only needed, if we have extra_regs */
2012        if (x86_pmu.extra_regs) {
2013                cpuc->shared_regs = allocate_shared_regs(cpu);
2014                if (!cpuc->shared_regs)
2015                        goto error;
2016        }
2017        cpuc->is_fake = 1;
2018        return cpuc;
2019error:
2020        free_fake_cpuc(cpuc);
2021        return ERR_PTR(-ENOMEM);
2022}
2023
2024/*
2025 * validate that we can schedule this event
2026 */
2027static int validate_event(struct perf_event *event)
2028{
2029        struct cpu_hw_events *fake_cpuc;
2030        struct event_constraint *c;
2031        int ret = 0;
2032
2033        fake_cpuc = allocate_fake_cpuc();
2034        if (IS_ERR(fake_cpuc))
2035                return PTR_ERR(fake_cpuc);
2036
2037        c = x86_pmu.get_event_constraints(fake_cpuc, -1, event);
2038
2039        if (!c || !c->weight)
2040                ret = -EINVAL;
2041
2042        if (x86_pmu.put_event_constraints)
2043                x86_pmu.put_event_constraints(fake_cpuc, event);
2044
2045        free_fake_cpuc(fake_cpuc);
2046
2047        return ret;
2048}
2049
2050/*
2051 * validate a single event group
2052 *
2053 * validation include:
2054 *      - check events are compatible which each other
2055 *      - events do not compete for the same counter
2056 *      - number of events <= number of counters
2057 *
2058 * validation ensures the group can be loaded onto the
2059 * PMU if it was the only group available.
2060 */
2061static int validate_group(struct perf_event *event)
2062{
2063        struct perf_event *leader = event->group_leader;
2064        struct cpu_hw_events *fake_cpuc;
2065        int ret = -EINVAL, n;
2066
2067        fake_cpuc = allocate_fake_cpuc();
2068        if (IS_ERR(fake_cpuc))
2069                return PTR_ERR(fake_cpuc);
2070        /*
2071         * the event is not yet connected with its
2072         * siblings therefore we must first collect
2073         * existing siblings, then add the new event
2074         * before we can simulate the scheduling
2075         */
2076        n = collect_events(fake_cpuc, leader, true);
2077        if (n < 0)
2078                goto out;
2079
2080        fake_cpuc->n_events = n;
2081        n = collect_events(fake_cpuc, event, false);
2082        if (n < 0)
2083                goto out;
2084
2085        fake_cpuc->n_events = n;
2086
2087        ret = x86_pmu.schedule_events(fake_cpuc, n, NULL);
2088
2089out:
2090        free_fake_cpuc(fake_cpuc);
2091        return ret;
2092}
2093
2094static int x86_pmu_event_init(struct perf_event *event)
2095{
2096        struct pmu *tmp;
2097        int err;
2098
2099        switch (event->attr.type) {
2100        case PERF_TYPE_RAW:
2101        case PERF_TYPE_HARDWARE:
2102        case PERF_TYPE_HW_CACHE:
2103                break;
2104
2105        default:
2106                return -ENOENT;
2107        }
2108
2109        err = __x86_pmu_event_init(event);
2110        if (!err) {
2111                /*
2112                 * we temporarily connect event to its pmu
2113                 * such that validate_group() can classify
2114                 * it as an x86 event using is_x86_event()
2115                 */
2116                tmp = event->pmu;
2117                event->pmu = &pmu;
2118
2119                if (event->group_leader != event)
2120                        err = validate_group(event);
2121                else
2122                        err = validate_event(event);
2123
2124                event->pmu = tmp;
2125        }
2126        if (err) {
2127                if (event->destroy)
2128                        event->destroy(event);
2129        }
2130
2131        if (READ_ONCE(x86_pmu.attr_rdpmc) &&
2132            !(event->hw.flags & PERF_X86_EVENT_LARGE_PEBS))
2133                event->hw.flags |= PERF_X86_EVENT_RDPMC_ALLOWED;
2134
2135        return err;
2136}
2137
2138static void refresh_pce(void *ignored)
2139{
2140        load_mm_cr4(this_cpu_read(cpu_tlbstate.loaded_mm));
2141}
2142
2143static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
2144{
2145        if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
2146                return;
2147
2148        /*
2149         * This function relies on not being called concurrently in two
2150         * tasks in the same mm.  Otherwise one task could observe
2151         * perf_rdpmc_allowed > 1 and return all the way back to
2152         * userspace with CR4.PCE clear while another task is still
2153         * doing on_each_cpu_mask() to propagate CR4.PCE.
2154         *
2155         * For now, this can't happen because all callers hold mmap_sem
2156         * for write.  If this changes, we'll need a different solution.
2157         */
2158        lockdep_assert_held_exclusive(&mm->mmap_sem);
2159
2160        if (atomic_inc_return(&mm->context.perf_rdpmc_allowed) == 1)
2161                on_each_cpu_mask(mm_cpumask(mm), refresh_pce, NULL, 1);
2162}
2163
2164static void x86_pmu_event_unmapped(struct perf_event *event, struct mm_struct *mm)
2165{
2166
2167        if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
2168                return;
2169
2170        if (atomic_dec_and_test(&mm->context.perf_rdpmc_allowed))
2171                on_each_cpu_mask(mm_cpumask(mm), refresh_pce, NULL, 1);
2172}
2173
2174static int x86_pmu_event_idx(struct perf_event *event)
2175{
2176        int idx = event->hw.idx;
2177
2178        if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
2179                return 0;
2180
2181        if (x86_pmu.num_counters_fixed && idx >= INTEL_PMC_IDX_FIXED) {
2182                idx -= INTEL_PMC_IDX_FIXED;
2183                idx |= 1 << 30;
2184        }
2185
2186        return idx + 1;
2187}
2188
2189static ssize_t get_attr_rdpmc(struct device *cdev,
2190                              struct device_attribute *attr,
2191                              char *buf)
2192{
2193        return snprintf(buf, 40, "%d\n", x86_pmu.attr_rdpmc);
2194}
2195
2196static ssize_t set_attr_rdpmc(struct device *cdev,
2197                              struct device_attribute *attr,
2198                              const char *buf, size_t count)
2199{
2200        unsigned long val;
2201        ssize_t ret;
2202
2203        ret = kstrtoul(buf, 0, &val);
2204        if (ret)
2205                return ret;
2206
2207        if (val > 2)
2208                return -EINVAL;
2209
2210        if (x86_pmu.attr_rdpmc_broken)
2211                return -ENOTSUPP;
2212
2213        if ((val == 2) != (x86_pmu.attr_rdpmc == 2)) {
2214                /*
2215                 * Changing into or out of always available, aka
2216                 * perf-event-bypassing mode.  This path is extremely slow,
2217                 * but only root can trigger it, so it's okay.
2218                 */
2219                if (val == 2)
2220                        static_branch_inc(&rdpmc_always_available_key);
2221                else
2222                        static_branch_dec(&rdpmc_always_available_key);
2223                on_each_cpu(refresh_pce, NULL, 1);
2224        }
2225
2226        x86_pmu.attr_rdpmc = val;
2227
2228        return count;
2229}
2230
2231static DEVICE_ATTR(rdpmc, S_IRUSR | S_IWUSR, get_attr_rdpmc, set_attr_rdpmc);
2232
2233static struct attribute *x86_pmu_attrs[] = {
2234        &dev_attr_rdpmc.attr,
2235        NULL,
2236};
2237
2238static struct attribute_group x86_pmu_attr_group __ro_after_init = {
2239        .attrs = x86_pmu_attrs,
2240};
2241
2242static ssize_t max_precise_show(struct device *cdev,
2243                                  struct device_attribute *attr,
2244                                  char *buf)
2245{
2246        return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu_max_precise());
2247}
2248
2249static DEVICE_ATTR_RO(max_precise);
2250
2251static struct attribute *x86_pmu_caps_attrs[] = {
2252        &dev_attr_max_precise.attr,
2253        NULL
2254};
2255
2256static struct attribute_group x86_pmu_caps_group __ro_after_init = {
2257        .name = "caps",
2258        .attrs = x86_pmu_caps_attrs,
2259};
2260
2261static const struct attribute_group *x86_pmu_attr_groups[] = {
2262        &x86_pmu_attr_group,
2263        &x86_pmu_format_group,
2264        &x86_pmu_events_group,
2265        &x86_pmu_caps_group,
2266        NULL,
2267};
2268
2269static void x86_pmu_sched_task(struct perf_event_context *ctx, bool sched_in)
2270{
2271        if (x86_pmu.sched_task)
2272                x86_pmu.sched_task(ctx, sched_in);
2273}
2274
2275void perf_check_microcode(void)
2276{
2277        if (x86_pmu.check_microcode)
2278                x86_pmu.check_microcode();
2279}
2280
2281static struct pmu pmu = {
2282        .pmu_enable             = x86_pmu_enable,
2283        .pmu_disable            = x86_pmu_disable,
2284
2285        .attr_groups            = x86_pmu_attr_groups,
2286
2287        .event_init             = x86_pmu_event_init,
2288
2289        .event_mapped           = x86_pmu_event_mapped,
2290        .event_unmapped         = x86_pmu_event_unmapped,
2291
2292        .add                    = x86_pmu_add,
2293        .del                    = x86_pmu_del,
2294        .start                  = x86_pmu_start,
2295        .stop                   = x86_pmu_stop,
2296        .read                   = x86_pmu_read,
2297
2298        .start_txn              = x86_pmu_start_txn,
2299        .cancel_txn             = x86_pmu_cancel_txn,
2300        .commit_txn             = x86_pmu_commit_txn,
2301
2302        .event_idx              = x86_pmu_event_idx,
2303        .sched_task             = x86_pmu_sched_task,
2304        .task_ctx_size          = sizeof(struct x86_perf_task_context),
2305};
2306
2307void arch_perf_update_userpage(struct perf_event *event,
2308                               struct perf_event_mmap_page *userpg, u64 now)
2309{
2310        struct cyc2ns_data data;
2311        u64 offset;
2312
2313        userpg->cap_user_time = 0;
2314        userpg->cap_user_time_zero = 0;
2315        userpg->cap_user_rdpmc =
2316                !!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED);
2317        userpg->pmc_width = x86_pmu.cntval_bits;
2318
2319        if (!using_native_sched_clock() || !sched_clock_stable())
2320                return;
2321
2322        cyc2ns_read_begin(&data);
2323
2324        offset = data.cyc2ns_offset + __sched_clock_offset;
2325
2326        /*
2327         * Internal timekeeping for enabled/running/stopped times
2328         * is always in the local_clock domain.
2329         */
2330        userpg->cap_user_time = 1;
2331        userpg->time_mult = data.cyc2ns_mul;
2332        userpg->time_shift = data.cyc2ns_shift;
2333        userpg->time_offset = offset - now;
2334
2335        /*
2336         * cap_user_time_zero doesn't make sense when we're using a different
2337         * time base for the records.
2338         */
2339        if (!event->attr.use_clockid) {
2340                userpg->cap_user_time_zero = 1;
2341                userpg->time_zero = offset;
2342        }
2343
2344        cyc2ns_read_end();
2345}
2346
2347void
2348perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
2349{
2350        struct unwind_state state;
2351        unsigned long addr;
2352
2353        if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
2354                /* TODO: We don't support guest os callchain now */
2355                return;
2356        }
2357
2358        if (perf_callchain_store(entry, regs->ip))
2359                return;
2360
2361        for (unwind_start(&state, current, regs, NULL); !unwind_done(&state);
2362             unwind_next_frame(&state)) {
2363                addr = unwind_get_return_address(&state);
2364                if (!addr || perf_callchain_store(entry, addr))
2365                        return;
2366        }
2367}
2368
2369static inline int
2370valid_user_frame(const void __user *fp, unsigned long size)
2371{
2372        return (__range_not_ok(fp, size, TASK_SIZE) == 0);
2373}
2374
2375static unsigned long get_segment_base(unsigned int segment)
2376{
2377        struct desc_struct *desc;
2378        unsigned int idx = segment >> 3;
2379
2380        if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) {
2381#ifdef CONFIG_MODIFY_LDT_SYSCALL
2382                struct ldt_struct *ldt;
2383
2384                /* IRQs are off, so this synchronizes with smp_store_release */
2385                ldt = READ_ONCE(current->active_mm->context.ldt);
2386                if (!ldt || idx >= ldt->nr_entries)
2387                        return 0;
2388
2389                desc = &ldt->entries[idx];
2390#else
2391                return 0;
2392#endif
2393        } else {
2394                if (idx >= GDT_ENTRIES)
2395                        return 0;
2396
2397                desc = raw_cpu_ptr(gdt_page.gdt) + idx;
2398        }
2399
2400        return get_desc_base(desc);
2401}
2402
2403#ifdef CONFIG_IA32_EMULATION
2404
2405#include <linux/compat.h>
2406
2407static inline int
2408perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *entry)
2409{
2410        /* 32-bit process in 64-bit kernel. */
2411        unsigned long ss_base, cs_base;
2412        struct stack_frame_ia32 frame;
2413        const void __user *fp;
2414
2415        if (!test_thread_flag(TIF_IA32))
2416                return 0;
2417
2418        cs_base = get_segment_base(regs->cs);
2419        ss_base = get_segment_base(regs->ss);
2420
2421        fp = compat_ptr(ss_base + regs->bp);
2422        pagefault_disable();
2423        while (entry->nr < entry->max_stack) {
2424                unsigned long bytes;
2425                frame.next_frame     = 0;
2426                frame.return_address = 0;
2427
2428                if (!valid_user_frame(fp, sizeof(frame)))
2429                        break;
2430
2431                bytes = __copy_from_user_nmi(&frame.next_frame, fp, 4);
2432                if (bytes != 0)
2433                        break;
2434                bytes = __copy_from_user_nmi(&frame.return_address, fp+4, 4);
2435                if (bytes != 0)
2436                        break;
2437
2438                perf_callchain_store(entry, cs_base + frame.return_address);
2439                fp = compat_ptr(ss_base + frame.next_frame);
2440        }
2441        pagefault_enable();
2442        return 1;
2443}
2444#else
2445static inline int
2446perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *entry)
2447{
2448    return 0;
2449}
2450#endif
2451
2452void
2453perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
2454{
2455        struct stack_frame frame;
2456        const unsigned long __user *fp;
2457
2458        if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
2459                /* TODO: We don't support guest os callchain now */
2460                return;
2461        }
2462
2463        /*
2464         * We don't know what to do with VM86 stacks.. ignore them for now.
2465         */
2466        if (regs->flags & (X86_VM_MASK | PERF_EFLAGS_VM))
2467                return;
2468
2469        fp = (unsigned long __user *)regs->bp;
2470
2471        perf_callchain_store(entry, regs->ip);
2472
2473        if (!nmi_uaccess_okay())
2474                return;
2475
2476        if (perf_callchain_user32(regs, entry))
2477                return;
2478
2479        pagefault_disable();
2480        while (entry->nr < entry->max_stack) {
2481                unsigned long bytes;
2482
2483                frame.next_frame             = NULL;
2484                frame.return_address = 0;
2485
2486                if (!valid_user_frame(fp, sizeof(frame)))
2487                        break;
2488
2489                bytes = __copy_from_user_nmi(&frame.next_frame, fp, sizeof(*fp));
2490                if (bytes != 0)
2491                        break;
2492                bytes = __copy_from_user_nmi(&frame.return_address, fp + 1, sizeof(*fp));
2493                if (bytes != 0)
2494                        break;
2495
2496                perf_callchain_store(entry, frame.return_address);
2497                fp = (void __user *)frame.next_frame;
2498        }
2499        pagefault_enable();
2500}
2501
2502/*
2503 * Deal with code segment offsets for the various execution modes:
2504 *
2505 *   VM86 - the good olde 16 bit days, where the linear address is
2506 *          20 bits and we use regs->ip + 0x10 * regs->cs.
2507 *
2508 *   IA32 - Where we need to look at GDT/LDT segment descriptor tables
2509 *          to figure out what the 32bit base address is.
2510 *
2511 *    X32 - has TIF_X32 set, but is running in x86_64
2512 *
2513 * X86_64 - CS,DS,SS,ES are all zero based.
2514 */
2515static unsigned long code_segment_base(struct pt_regs *regs)
2516{
2517        /*
2518         * For IA32 we look at the GDT/LDT segment base to convert the
2519         * effective IP to a linear address.
2520         */
2521
2522#ifdef CONFIG_X86_32
2523        /*
2524         * If we are in VM86 mode, add the segment offset to convert to a
2525         * linear address.
2526         */
2527        if (regs->flags & X86_VM_MASK)
2528                return 0x10 * regs->cs;
2529
2530        if (user_mode(regs) && regs->cs != __USER_CS)
2531                return get_segment_base(regs->cs);
2532#else
2533        if (user_mode(regs) && !user_64bit_mode(regs) &&
2534            regs->cs != __USER32_CS)
2535                return get_segment_base(regs->cs);
2536#endif
2537        return 0;
2538}
2539
2540unsigned long perf_instruction_pointer(struct pt_regs *regs)
2541{
2542        if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
2543                return perf_guest_cbs->get_guest_ip();
2544
2545        return regs->ip + code_segment_base(regs);
2546}
2547
2548unsigned long perf_misc_flags(struct pt_regs *regs)
2549{
2550        int misc = 0;
2551
2552        if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
2553                if (perf_guest_cbs->is_user_mode())
2554                        misc |= PERF_RECORD_MISC_GUEST_USER;
2555                else
2556                        misc |= PERF_RECORD_MISC_GUEST_KERNEL;
2557        } else {
2558                if (user_mode(regs))
2559                        misc |= PERF_RECORD_MISC_USER;
2560                else
2561                        misc |= PERF_RECORD_MISC_KERNEL;
2562        }
2563
2564        if (regs->flags & PERF_EFLAGS_EXACT)
2565                misc |= PERF_RECORD_MISC_EXACT_IP;
2566
2567        return misc;
2568}
2569
2570void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
2571{
2572        cap->version            = x86_pmu.version;
2573        cap->num_counters_gp    = x86_pmu.num_counters;
2574        cap->num_counters_fixed = x86_pmu.num_counters_fixed;
2575        cap->bit_width_gp       = x86_pmu.cntval_bits;
2576        cap->bit_width_fixed    = x86_pmu.cntval_bits;
2577        cap->events_mask        = (unsigned int)x86_pmu.events_maskl;
2578        cap->events_mask_len    = x86_pmu.events_mask_len;
2579}
2580EXPORT_SYMBOL_GPL(perf_get_x86_pmu_capability);
2581