linux/arch/x86/events/intel/lbr.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/perf_event.h>
   3#include <linux/types.h>
   4
   5#include <asm/perf_event.h>
   6#include <asm/msr.h>
   7#include <asm/insn.h>
   8
   9#include "../perf_event.h"
  10
  11static const enum {
  12        LBR_EIP_FLAGS           = 1,
  13        LBR_TSX                 = 2,
  14} lbr_desc[LBR_FORMAT_MAX_KNOWN + 1] = {
  15        [LBR_FORMAT_EIP_FLAGS]  = LBR_EIP_FLAGS,
  16        [LBR_FORMAT_EIP_FLAGS2] = LBR_EIP_FLAGS | LBR_TSX,
  17};
  18
  19/*
  20 * Intel LBR_SELECT bits
  21 * Intel Vol3a, April 2011, Section 16.7 Table 16-10
  22 *
  23 * Hardware branch filter (not available on all CPUs)
  24 */
  25#define LBR_KERNEL_BIT          0 /* do not capture at ring0 */
  26#define LBR_USER_BIT            1 /* do not capture at ring > 0 */
  27#define LBR_JCC_BIT             2 /* do not capture conditional branches */
  28#define LBR_REL_CALL_BIT        3 /* do not capture relative calls */
  29#define LBR_IND_CALL_BIT        4 /* do not capture indirect calls */
  30#define LBR_RETURN_BIT          5 /* do not capture near returns */
  31#define LBR_IND_JMP_BIT         6 /* do not capture indirect jumps */
  32#define LBR_REL_JMP_BIT         7 /* do not capture relative jumps */
  33#define LBR_FAR_BIT             8 /* do not capture far branches */
  34#define LBR_CALL_STACK_BIT      9 /* enable call stack */
  35
  36/*
  37 * Following bit only exists in Linux; we mask it out before writing it to
  38 * the actual MSR. But it helps the constraint perf code to understand
  39 * that this is a separate configuration.
  40 */
  41#define LBR_NO_INFO_BIT        63 /* don't read LBR_INFO. */
  42
  43#define LBR_KERNEL      (1 << LBR_KERNEL_BIT)
  44#define LBR_USER        (1 << LBR_USER_BIT)
  45#define LBR_JCC         (1 << LBR_JCC_BIT)
  46#define LBR_REL_CALL    (1 << LBR_REL_CALL_BIT)
  47#define LBR_IND_CALL    (1 << LBR_IND_CALL_BIT)
  48#define LBR_RETURN      (1 << LBR_RETURN_BIT)
  49#define LBR_REL_JMP     (1 << LBR_REL_JMP_BIT)
  50#define LBR_IND_JMP     (1 << LBR_IND_JMP_BIT)
  51#define LBR_FAR         (1 << LBR_FAR_BIT)
  52#define LBR_CALL_STACK  (1 << LBR_CALL_STACK_BIT)
  53#define LBR_NO_INFO     (1ULL << LBR_NO_INFO_BIT)
  54
  55#define LBR_PLM (LBR_KERNEL | LBR_USER)
  56
  57#define LBR_SEL_MASK    0x3ff   /* valid bits in LBR_SELECT */
  58#define LBR_NOT_SUPP    -1      /* LBR filter not supported */
  59#define LBR_IGN         0       /* ignored */
  60
  61#define LBR_ANY          \
  62        (LBR_JCC        |\
  63         LBR_REL_CALL   |\
  64         LBR_IND_CALL   |\
  65         LBR_RETURN     |\
  66         LBR_REL_JMP    |\
  67         LBR_IND_JMP    |\
  68         LBR_FAR)
  69
  70#define LBR_FROM_FLAG_MISPRED   BIT_ULL(63)
  71#define LBR_FROM_FLAG_IN_TX     BIT_ULL(62)
  72#define LBR_FROM_FLAG_ABORT     BIT_ULL(61)
  73
  74#define LBR_FROM_SIGNEXT_2MSB   (BIT_ULL(60) | BIT_ULL(59))
  75
  76/*
  77 * x86control flow change classification
  78 * x86control flow changes include branches, interrupts, traps, faults
  79 */
  80enum {
  81        X86_BR_NONE             = 0,      /* unknown */
  82
  83        X86_BR_USER             = 1 << 0, /* branch target is user */
  84        X86_BR_KERNEL           = 1 << 1, /* branch target is kernel */
  85
  86        X86_BR_CALL             = 1 << 2, /* call */
  87        X86_BR_RET              = 1 << 3, /* return */
  88        X86_BR_SYSCALL          = 1 << 4, /* syscall */
  89        X86_BR_SYSRET           = 1 << 5, /* syscall return */
  90        X86_BR_INT              = 1 << 6, /* sw interrupt */
  91        X86_BR_IRET             = 1 << 7, /* return from interrupt */
  92        X86_BR_JCC              = 1 << 8, /* conditional */
  93        X86_BR_JMP              = 1 << 9, /* jump */
  94        X86_BR_IRQ              = 1 << 10,/* hw interrupt or trap or fault */
  95        X86_BR_IND_CALL         = 1 << 11,/* indirect calls */
  96        X86_BR_ABORT            = 1 << 12,/* transaction abort */
  97        X86_BR_IN_TX            = 1 << 13,/* in transaction */
  98        X86_BR_NO_TX            = 1 << 14,/* not in transaction */
  99        X86_BR_ZERO_CALL        = 1 << 15,/* zero length call */
 100        X86_BR_CALL_STACK       = 1 << 16,/* call stack */
 101        X86_BR_IND_JMP          = 1 << 17,/* indirect jump */
 102
 103        X86_BR_TYPE_SAVE        = 1 << 18,/* indicate to save branch type */
 104
 105};
 106
 107#define X86_BR_PLM (X86_BR_USER | X86_BR_KERNEL)
 108#define X86_BR_ANYTX (X86_BR_NO_TX | X86_BR_IN_TX)
 109
 110#define X86_BR_ANY       \
 111        (X86_BR_CALL    |\
 112         X86_BR_RET     |\
 113         X86_BR_SYSCALL |\
 114         X86_BR_SYSRET  |\
 115         X86_BR_INT     |\
 116         X86_BR_IRET    |\
 117         X86_BR_JCC     |\
 118         X86_BR_JMP      |\
 119         X86_BR_IRQ      |\
 120         X86_BR_ABORT    |\
 121         X86_BR_IND_CALL |\
 122         X86_BR_IND_JMP  |\
 123         X86_BR_ZERO_CALL)
 124
 125#define X86_BR_ALL (X86_BR_PLM | X86_BR_ANY)
 126
 127#define X86_BR_ANY_CALL          \
 128        (X86_BR_CALL            |\
 129         X86_BR_IND_CALL        |\
 130         X86_BR_ZERO_CALL       |\
 131         X86_BR_SYSCALL         |\
 132         X86_BR_IRQ             |\
 133         X86_BR_INT)
 134
 135/*
 136 * Intel LBR_CTL bits
 137 *
 138 * Hardware branch filter for Arch LBR
 139 */
 140#define ARCH_LBR_KERNEL_BIT             1  /* capture at ring0 */
 141#define ARCH_LBR_USER_BIT               2  /* capture at ring > 0 */
 142#define ARCH_LBR_CALL_STACK_BIT         3  /* enable call stack */
 143#define ARCH_LBR_JCC_BIT                16 /* capture conditional branches */
 144#define ARCH_LBR_REL_JMP_BIT            17 /* capture relative jumps */
 145#define ARCH_LBR_IND_JMP_BIT            18 /* capture indirect jumps */
 146#define ARCH_LBR_REL_CALL_BIT           19 /* capture relative calls */
 147#define ARCH_LBR_IND_CALL_BIT           20 /* capture indirect calls */
 148#define ARCH_LBR_RETURN_BIT             21 /* capture near returns */
 149#define ARCH_LBR_OTHER_BRANCH_BIT       22 /* capture other branches */
 150
 151#define ARCH_LBR_KERNEL                 (1ULL << ARCH_LBR_KERNEL_BIT)
 152#define ARCH_LBR_USER                   (1ULL << ARCH_LBR_USER_BIT)
 153#define ARCH_LBR_CALL_STACK             (1ULL << ARCH_LBR_CALL_STACK_BIT)
 154#define ARCH_LBR_JCC                    (1ULL << ARCH_LBR_JCC_BIT)
 155#define ARCH_LBR_REL_JMP                (1ULL << ARCH_LBR_REL_JMP_BIT)
 156#define ARCH_LBR_IND_JMP                (1ULL << ARCH_LBR_IND_JMP_BIT)
 157#define ARCH_LBR_REL_CALL               (1ULL << ARCH_LBR_REL_CALL_BIT)
 158#define ARCH_LBR_IND_CALL               (1ULL << ARCH_LBR_IND_CALL_BIT)
 159#define ARCH_LBR_RETURN                 (1ULL << ARCH_LBR_RETURN_BIT)
 160#define ARCH_LBR_OTHER_BRANCH           (1ULL << ARCH_LBR_OTHER_BRANCH_BIT)
 161
 162#define ARCH_LBR_ANY                     \
 163        (ARCH_LBR_JCC                   |\
 164         ARCH_LBR_REL_JMP               |\
 165         ARCH_LBR_IND_JMP               |\
 166         ARCH_LBR_REL_CALL              |\
 167         ARCH_LBR_IND_CALL              |\
 168         ARCH_LBR_RETURN                |\
 169         ARCH_LBR_OTHER_BRANCH)
 170
 171#define ARCH_LBR_CTL_MASK                       0x7f000e
 172
 173static void intel_pmu_lbr_filter(struct cpu_hw_events *cpuc);
 174
 175static __always_inline bool is_lbr_call_stack_bit_set(u64 config)
 176{
 177        if (static_cpu_has(X86_FEATURE_ARCH_LBR))
 178                return !!(config & ARCH_LBR_CALL_STACK);
 179
 180        return !!(config & LBR_CALL_STACK);
 181}
 182
 183/*
 184 * We only support LBR implementations that have FREEZE_LBRS_ON_PMI
 185 * otherwise it becomes near impossible to get a reliable stack.
 186 */
 187
 188static void __intel_pmu_lbr_enable(bool pmi)
 189{
 190        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
 191        u64 debugctl, lbr_select = 0, orig_debugctl;
 192
 193        /*
 194         * No need to unfreeze manually, as v4 can do that as part
 195         * of the GLOBAL_STATUS ack.
 196         */
 197        if (pmi && x86_pmu.version >= 4)
 198                return;
 199
 200        /*
 201         * No need to reprogram LBR_SELECT in a PMI, as it
 202         * did not change.
 203         */
 204        if (cpuc->lbr_sel)
 205                lbr_select = cpuc->lbr_sel->config & x86_pmu.lbr_sel_mask;
 206        if (!static_cpu_has(X86_FEATURE_ARCH_LBR) && !pmi && cpuc->lbr_sel)
 207                wrmsrl(MSR_LBR_SELECT, lbr_select);
 208
 209        rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
 210        orig_debugctl = debugctl;
 211
 212        if (!static_cpu_has(X86_FEATURE_ARCH_LBR))
 213                debugctl |= DEBUGCTLMSR_LBR;
 214        /*
 215         * LBR callstack does not work well with FREEZE_LBRS_ON_PMI.
 216         * If FREEZE_LBRS_ON_PMI is set, PMI near call/return instructions
 217         * may cause superfluous increase/decrease of LBR_TOS.
 218         */
 219        if (is_lbr_call_stack_bit_set(lbr_select))
 220                debugctl &= ~DEBUGCTLMSR_FREEZE_LBRS_ON_PMI;
 221        else
 222                debugctl |= DEBUGCTLMSR_FREEZE_LBRS_ON_PMI;
 223
 224        if (orig_debugctl != debugctl)
 225                wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
 226
 227        if (static_cpu_has(X86_FEATURE_ARCH_LBR))
 228                wrmsrl(MSR_ARCH_LBR_CTL, lbr_select | ARCH_LBR_CTL_LBREN);
 229}
 230
 231static void __intel_pmu_lbr_disable(void)
 232{
 233        u64 debugctl;
 234
 235        if (static_cpu_has(X86_FEATURE_ARCH_LBR)) {
 236                wrmsrl(MSR_ARCH_LBR_CTL, 0);
 237                return;
 238        }
 239
 240        rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
 241        debugctl &= ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
 242        wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
 243}
 244
 245void intel_pmu_lbr_reset_32(void)
 246{
 247        int i;
 248
 249        for (i = 0; i < x86_pmu.lbr_nr; i++)
 250                wrmsrl(x86_pmu.lbr_from + i, 0);
 251}
 252
 253void intel_pmu_lbr_reset_64(void)
 254{
 255        int i;
 256
 257        for (i = 0; i < x86_pmu.lbr_nr; i++) {
 258                wrmsrl(x86_pmu.lbr_from + i, 0);
 259                wrmsrl(x86_pmu.lbr_to   + i, 0);
 260                if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
 261                        wrmsrl(x86_pmu.lbr_info + i, 0);
 262        }
 263}
 264
 265static void intel_pmu_arch_lbr_reset(void)
 266{
 267        /* Write to ARCH_LBR_DEPTH MSR, all LBR entries are reset to 0 */
 268        wrmsrl(MSR_ARCH_LBR_DEPTH, x86_pmu.lbr_nr);
 269}
 270
 271void intel_pmu_lbr_reset(void)
 272{
 273        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
 274
 275        if (!x86_pmu.lbr_nr)
 276                return;
 277
 278        x86_pmu.lbr_reset();
 279
 280        cpuc->last_task_ctx = NULL;
 281        cpuc->last_log_id = 0;
 282}
 283
 284/*
 285 * TOS = most recently recorded branch
 286 */
 287static inline u64 intel_pmu_lbr_tos(void)
 288{
 289        u64 tos;
 290
 291        rdmsrl(x86_pmu.lbr_tos, tos);
 292        return tos;
 293}
 294
 295enum {
 296        LBR_NONE,
 297        LBR_VALID,
 298};
 299
 300/*
 301 * For formats with LBR_TSX flags (e.g. LBR_FORMAT_EIP_FLAGS2), bits 61:62 in
 302 * MSR_LAST_BRANCH_FROM_x are the TSX flags when TSX is supported, but when
 303 * TSX is not supported they have no consistent behavior:
 304 *
 305 *   - For wrmsr(), bits 61:62 are considered part of the sign extension.
 306 *   - For HW updates (branch captures) bits 61:62 are always OFF and are not
 307 *     part of the sign extension.
 308 *
 309 * Therefore, if:
 310 *
 311 *   1) LBR has TSX format
 312 *   2) CPU has no TSX support enabled
 313 *
 314 * ... then any value passed to wrmsr() must be sign extended to 63 bits and any
 315 * value from rdmsr() must be converted to have a 61 bits sign extension,
 316 * ignoring the TSX flags.
 317 */
 318static inline bool lbr_from_signext_quirk_needed(void)
 319{
 320        int lbr_format = x86_pmu.intel_cap.lbr_format;
 321        bool tsx_support = boot_cpu_has(X86_FEATURE_HLE) ||
 322                           boot_cpu_has(X86_FEATURE_RTM);
 323
 324        return !tsx_support && (lbr_desc[lbr_format] & LBR_TSX);
 325}
 326
 327static DEFINE_STATIC_KEY_FALSE(lbr_from_quirk_key);
 328
 329/* If quirk is enabled, ensure sign extension is 63 bits: */
 330inline u64 lbr_from_signext_quirk_wr(u64 val)
 331{
 332        if (static_branch_unlikely(&lbr_from_quirk_key)) {
 333                /*
 334                 * Sign extend into bits 61:62 while preserving bit 63.
 335                 *
 336                 * Quirk is enabled when TSX is disabled. Therefore TSX bits
 337                 * in val are always OFF and must be changed to be sign
 338                 * extension bits. Since bits 59:60 are guaranteed to be
 339                 * part of the sign extension bits, we can just copy them
 340                 * to 61:62.
 341                 */
 342                val |= (LBR_FROM_SIGNEXT_2MSB & val) << 2;
 343        }
 344        return val;
 345}
 346
 347/*
 348 * If quirk is needed, ensure sign extension is 61 bits:
 349 */
 350static u64 lbr_from_signext_quirk_rd(u64 val)
 351{
 352        if (static_branch_unlikely(&lbr_from_quirk_key)) {
 353                /*
 354                 * Quirk is on when TSX is not enabled. Therefore TSX
 355                 * flags must be read as OFF.
 356                 */
 357                val &= ~(LBR_FROM_FLAG_IN_TX | LBR_FROM_FLAG_ABORT);
 358        }
 359        return val;
 360}
 361
 362static __always_inline void wrlbr_from(unsigned int idx, u64 val)
 363{
 364        val = lbr_from_signext_quirk_wr(val);
 365        wrmsrl(x86_pmu.lbr_from + idx, val);
 366}
 367
 368static __always_inline void wrlbr_to(unsigned int idx, u64 val)
 369{
 370        wrmsrl(x86_pmu.lbr_to + idx, val);
 371}
 372
 373static __always_inline void wrlbr_info(unsigned int idx, u64 val)
 374{
 375        wrmsrl(x86_pmu.lbr_info + idx, val);
 376}
 377
 378static __always_inline u64 rdlbr_from(unsigned int idx, struct lbr_entry *lbr)
 379{
 380        u64 val;
 381
 382        if (lbr)
 383                return lbr->from;
 384
 385        rdmsrl(x86_pmu.lbr_from + idx, val);
 386
 387        return lbr_from_signext_quirk_rd(val);
 388}
 389
 390static __always_inline u64 rdlbr_to(unsigned int idx, struct lbr_entry *lbr)
 391{
 392        u64 val;
 393
 394        if (lbr)
 395                return lbr->to;
 396
 397        rdmsrl(x86_pmu.lbr_to + idx, val);
 398
 399        return val;
 400}
 401
 402static __always_inline u64 rdlbr_info(unsigned int idx, struct lbr_entry *lbr)
 403{
 404        u64 val;
 405
 406        if (lbr)
 407                return lbr->info;
 408
 409        rdmsrl(x86_pmu.lbr_info + idx, val);
 410
 411        return val;
 412}
 413
 414static inline void
 415wrlbr_all(struct lbr_entry *lbr, unsigned int idx, bool need_info)
 416{
 417        wrlbr_from(idx, lbr->from);
 418        wrlbr_to(idx, lbr->to);
 419        if (need_info)
 420                wrlbr_info(idx, lbr->info);
 421}
 422
 423static inline bool
 424rdlbr_all(struct lbr_entry *lbr, unsigned int idx, bool need_info)
 425{
 426        u64 from = rdlbr_from(idx, NULL);
 427
 428        /* Don't read invalid entry */
 429        if (!from)
 430                return false;
 431
 432        lbr->from = from;
 433        lbr->to = rdlbr_to(idx, NULL);
 434        if (need_info)
 435                lbr->info = rdlbr_info(idx, NULL);
 436
 437        return true;
 438}
 439
 440void intel_pmu_lbr_restore(void *ctx)
 441{
 442        bool need_info = x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO;
 443        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
 444        struct x86_perf_task_context *task_ctx = ctx;
 445        int i;
 446        unsigned lbr_idx, mask;
 447        u64 tos = task_ctx->tos;
 448
 449        mask = x86_pmu.lbr_nr - 1;
 450        for (i = 0; i < task_ctx->valid_lbrs; i++) {
 451                lbr_idx = (tos - i) & mask;
 452                wrlbr_all(&task_ctx->lbr[i], lbr_idx, need_info);
 453        }
 454
 455        for (; i < x86_pmu.lbr_nr; i++) {
 456                lbr_idx = (tos - i) & mask;
 457                wrlbr_from(lbr_idx, 0);
 458                wrlbr_to(lbr_idx, 0);
 459                if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
 460                        wrlbr_info(lbr_idx, 0);
 461        }
 462
 463        wrmsrl(x86_pmu.lbr_tos, tos);
 464
 465        if (cpuc->lbr_select)
 466                wrmsrl(MSR_LBR_SELECT, task_ctx->lbr_sel);
 467}
 468
 469static void intel_pmu_arch_lbr_restore(void *ctx)
 470{
 471        struct x86_perf_task_context_arch_lbr *task_ctx = ctx;
 472        struct lbr_entry *entries = task_ctx->entries;
 473        int i;
 474
 475        /* Fast reset the LBRs before restore if the call stack is not full. */
 476        if (!entries[x86_pmu.lbr_nr - 1].from)
 477                intel_pmu_arch_lbr_reset();
 478
 479        for (i = 0; i < x86_pmu.lbr_nr; i++) {
 480                if (!entries[i].from)
 481                        break;
 482                wrlbr_all(&entries[i], i, true);
 483        }
 484}
 485
 486/*
 487 * Restore the Architecture LBR state from the xsave area in the perf
 488 * context data for the task via the XRSTORS instruction.
 489 */
 490static void intel_pmu_arch_lbr_xrstors(void *ctx)
 491{
 492        struct x86_perf_task_context_arch_lbr_xsave *task_ctx = ctx;
 493
 494        copy_kernel_to_dynamic_supervisor(&task_ctx->xsave, XFEATURE_MASK_LBR);
 495}
 496
 497static __always_inline bool lbr_is_reset_in_cstate(void *ctx)
 498{
 499        if (static_cpu_has(X86_FEATURE_ARCH_LBR))
 500                return x86_pmu.lbr_deep_c_reset && !rdlbr_from(0, NULL);
 501
 502        return !rdlbr_from(((struct x86_perf_task_context *)ctx)->tos, NULL);
 503}
 504
 505static void __intel_pmu_lbr_restore(void *ctx)
 506{
 507        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
 508
 509        if (task_context_opt(ctx)->lbr_callstack_users == 0 ||
 510            task_context_opt(ctx)->lbr_stack_state == LBR_NONE) {
 511                intel_pmu_lbr_reset();
 512                return;
 513        }
 514
 515        /*
 516         * Does not restore the LBR registers, if
 517         * - No one else touched them, and
 518         * - Was not cleared in Cstate
 519         */
 520        if ((ctx == cpuc->last_task_ctx) &&
 521            (task_context_opt(ctx)->log_id == cpuc->last_log_id) &&
 522            !lbr_is_reset_in_cstate(ctx)) {
 523                task_context_opt(ctx)->lbr_stack_state = LBR_NONE;
 524                return;
 525        }
 526
 527        x86_pmu.lbr_restore(ctx);
 528
 529        task_context_opt(ctx)->lbr_stack_state = LBR_NONE;
 530}
 531
 532void intel_pmu_lbr_save(void *ctx)
 533{
 534        bool need_info = x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO;
 535        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
 536        struct x86_perf_task_context *task_ctx = ctx;
 537        unsigned lbr_idx, mask;
 538        u64 tos;
 539        int i;
 540
 541        mask = x86_pmu.lbr_nr - 1;
 542        tos = intel_pmu_lbr_tos();
 543        for (i = 0; i < x86_pmu.lbr_nr; i++) {
 544                lbr_idx = (tos - i) & mask;
 545                if (!rdlbr_all(&task_ctx->lbr[i], lbr_idx, need_info))
 546                        break;
 547        }
 548        task_ctx->valid_lbrs = i;
 549        task_ctx->tos = tos;
 550
 551        if (cpuc->lbr_select)
 552                rdmsrl(MSR_LBR_SELECT, task_ctx->lbr_sel);
 553}
 554
 555static void intel_pmu_arch_lbr_save(void *ctx)
 556{
 557        struct x86_perf_task_context_arch_lbr *task_ctx = ctx;
 558        struct lbr_entry *entries = task_ctx->entries;
 559        int i;
 560
 561        for (i = 0; i < x86_pmu.lbr_nr; i++) {
 562                if (!rdlbr_all(&entries[i], i, true))
 563                        break;
 564        }
 565
 566        /* LBR call stack is not full. Reset is required in restore. */
 567        if (i < x86_pmu.lbr_nr)
 568                entries[x86_pmu.lbr_nr - 1].from = 0;
 569}
 570
 571/*
 572 * Save the Architecture LBR state to the xsave area in the perf
 573 * context data for the task via the XSAVES instruction.
 574 */
 575static void intel_pmu_arch_lbr_xsaves(void *ctx)
 576{
 577        struct x86_perf_task_context_arch_lbr_xsave *task_ctx = ctx;
 578
 579        copy_dynamic_supervisor_to_kernel(&task_ctx->xsave, XFEATURE_MASK_LBR);
 580}
 581
 582static void __intel_pmu_lbr_save(void *ctx)
 583{
 584        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
 585
 586        if (task_context_opt(ctx)->lbr_callstack_users == 0) {
 587                task_context_opt(ctx)->lbr_stack_state = LBR_NONE;
 588                return;
 589        }
 590
 591        x86_pmu.lbr_save(ctx);
 592
 593        task_context_opt(ctx)->lbr_stack_state = LBR_VALID;
 594
 595        cpuc->last_task_ctx = ctx;
 596        cpuc->last_log_id = ++task_context_opt(ctx)->log_id;
 597}
 598
 599void intel_pmu_lbr_swap_task_ctx(struct perf_event_context *prev,
 600                                 struct perf_event_context *next)
 601{
 602        void *prev_ctx_data, *next_ctx_data;
 603
 604        swap(prev->task_ctx_data, next->task_ctx_data);
 605
 606        /*
 607         * Architecture specific synchronization makes sense in
 608         * case both prev->task_ctx_data and next->task_ctx_data
 609         * pointers are allocated.
 610         */
 611
 612        prev_ctx_data = next->task_ctx_data;
 613        next_ctx_data = prev->task_ctx_data;
 614
 615        if (!prev_ctx_data || !next_ctx_data)
 616                return;
 617
 618        swap(task_context_opt(prev_ctx_data)->lbr_callstack_users,
 619             task_context_opt(next_ctx_data)->lbr_callstack_users);
 620}
 621
 622void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in)
 623{
 624        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
 625        void *task_ctx;
 626
 627        if (!cpuc->lbr_users)
 628                return;
 629
 630        /*
 631         * If LBR callstack feature is enabled and the stack was saved when
 632         * the task was scheduled out, restore the stack. Otherwise flush
 633         * the LBR stack.
 634         */
 635        task_ctx = ctx ? ctx->task_ctx_data : NULL;
 636        if (task_ctx) {
 637                if (sched_in)
 638                        __intel_pmu_lbr_restore(task_ctx);
 639                else
 640                        __intel_pmu_lbr_save(task_ctx);
 641                return;
 642        }
 643
 644        /*
 645         * Since a context switch can flip the address space and LBR entries
 646         * are not tagged with an identifier, we need to wipe the LBR, even for
 647         * per-cpu events. You simply cannot resolve the branches from the old
 648         * address space.
 649         */
 650        if (sched_in)
 651                intel_pmu_lbr_reset();
 652}
 653
 654static inline bool branch_user_callstack(unsigned br_sel)
 655{
 656        return (br_sel & X86_BR_USER) && (br_sel & X86_BR_CALL_STACK);
 657}
 658
 659void intel_pmu_lbr_add(struct perf_event *event)
 660{
 661        struct kmem_cache *kmem_cache = event->pmu->task_ctx_cache;
 662        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
 663
 664        if (!x86_pmu.lbr_nr)
 665                return;
 666
 667        if (event->hw.flags & PERF_X86_EVENT_LBR_SELECT)
 668                cpuc->lbr_select = 1;
 669
 670        cpuc->br_sel = event->hw.branch_reg.reg;
 671
 672        if (branch_user_callstack(cpuc->br_sel) && event->ctx->task_ctx_data)
 673                task_context_opt(event->ctx->task_ctx_data)->lbr_callstack_users++;
 674
 675        /*
 676         * Request pmu::sched_task() callback, which will fire inside the
 677         * regular perf event scheduling, so that call will:
 678         *
 679         *  - restore or wipe; when LBR-callstack,
 680         *  - wipe; otherwise,
 681         *
 682         * when this is from __perf_event_task_sched_in().
 683         *
 684         * However, if this is from perf_install_in_context(), no such callback
 685         * will follow and we'll need to reset the LBR here if this is the
 686         * first LBR event.
 687         *
 688         * The problem is, we cannot tell these cases apart... but we can
 689         * exclude the biggest chunk of cases by looking at
 690         * event->total_time_running. An event that has accrued runtime cannot
 691         * be 'new'. Conversely, a new event can get installed through the
 692         * context switch path for the first time.
 693         */
 694        if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip > 0)
 695                cpuc->lbr_pebs_users++;
 696        perf_sched_cb_inc(event->ctx->pmu);
 697        if (!cpuc->lbr_users++ && !event->total_time_running)
 698                intel_pmu_lbr_reset();
 699
 700        if (static_cpu_has(X86_FEATURE_ARCH_LBR) &&
 701            kmem_cache && !cpuc->lbr_xsave &&
 702            (cpuc->lbr_users != cpuc->lbr_pebs_users))
 703                cpuc->lbr_xsave = kmem_cache_alloc(kmem_cache, GFP_KERNEL);
 704}
 705
 706void release_lbr_buffers(void)
 707{
 708        struct kmem_cache *kmem_cache = x86_get_pmu()->task_ctx_cache;
 709        struct cpu_hw_events *cpuc;
 710        int cpu;
 711
 712        if (!static_cpu_has(X86_FEATURE_ARCH_LBR))
 713                return;
 714
 715        for_each_possible_cpu(cpu) {
 716                cpuc = per_cpu_ptr(&cpu_hw_events, cpu);
 717                if (kmem_cache && cpuc->lbr_xsave) {
 718                        kmem_cache_free(kmem_cache, cpuc->lbr_xsave);
 719                        cpuc->lbr_xsave = NULL;
 720                }
 721        }
 722}
 723
 724void intel_pmu_lbr_del(struct perf_event *event)
 725{
 726        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
 727
 728        if (!x86_pmu.lbr_nr)
 729                return;
 730
 731        if (branch_user_callstack(cpuc->br_sel) &&
 732            event->ctx->task_ctx_data)
 733                task_context_opt(event->ctx->task_ctx_data)->lbr_callstack_users--;
 734
 735        if (event->hw.flags & PERF_X86_EVENT_LBR_SELECT)
 736                cpuc->lbr_select = 0;
 737
 738        if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip > 0)
 739                cpuc->lbr_pebs_users--;
 740        cpuc->lbr_users--;
 741        WARN_ON_ONCE(cpuc->lbr_users < 0);
 742        WARN_ON_ONCE(cpuc->lbr_pebs_users < 0);
 743        perf_sched_cb_dec(event->ctx->pmu);
 744}
 745
 746static inline bool vlbr_exclude_host(void)
 747{
 748        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
 749
 750        return test_bit(INTEL_PMC_IDX_FIXED_VLBR,
 751                (unsigned long *)&cpuc->intel_ctrl_guest_mask);
 752}
 753
 754void intel_pmu_lbr_enable_all(bool pmi)
 755{
 756        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
 757
 758        if (cpuc->lbr_users && !vlbr_exclude_host())
 759                __intel_pmu_lbr_enable(pmi);
 760}
 761
 762void intel_pmu_lbr_disable_all(void)
 763{
 764        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
 765
 766        if (cpuc->lbr_users && !vlbr_exclude_host())
 767                __intel_pmu_lbr_disable();
 768}
 769
 770void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
 771{
 772        unsigned long mask = x86_pmu.lbr_nr - 1;
 773        u64 tos = intel_pmu_lbr_tos();
 774        int i;
 775
 776        for (i = 0; i < x86_pmu.lbr_nr; i++) {
 777                unsigned long lbr_idx = (tos - i) & mask;
 778                union {
 779                        struct {
 780                                u32 from;
 781                                u32 to;
 782                        };
 783                        u64     lbr;
 784                } msr_lastbranch;
 785
 786                rdmsrl(x86_pmu.lbr_from + lbr_idx, msr_lastbranch.lbr);
 787
 788                cpuc->lbr_entries[i].from       = msr_lastbranch.from;
 789                cpuc->lbr_entries[i].to         = msr_lastbranch.to;
 790                cpuc->lbr_entries[i].mispred    = 0;
 791                cpuc->lbr_entries[i].predicted  = 0;
 792                cpuc->lbr_entries[i].in_tx      = 0;
 793                cpuc->lbr_entries[i].abort      = 0;
 794                cpuc->lbr_entries[i].cycles     = 0;
 795                cpuc->lbr_entries[i].type       = 0;
 796                cpuc->lbr_entries[i].reserved   = 0;
 797        }
 798        cpuc->lbr_stack.nr = i;
 799        cpuc->lbr_stack.hw_idx = tos;
 800}
 801
 802/*
 803 * Due to lack of segmentation in Linux the effective address (offset)
 804 * is the same as the linear address, allowing us to merge the LIP and EIP
 805 * LBR formats.
 806 */
 807void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
 808{
 809        bool need_info = false, call_stack = false;
 810        unsigned long mask = x86_pmu.lbr_nr - 1;
 811        int lbr_format = x86_pmu.intel_cap.lbr_format;
 812        u64 tos = intel_pmu_lbr_tos();
 813        int i;
 814        int out = 0;
 815        int num = x86_pmu.lbr_nr;
 816
 817        if (cpuc->lbr_sel) {
 818                need_info = !(cpuc->lbr_sel->config & LBR_NO_INFO);
 819                if (cpuc->lbr_sel->config & LBR_CALL_STACK)
 820                        call_stack = true;
 821        }
 822
 823        for (i = 0; i < num; i++) {
 824                unsigned long lbr_idx = (tos - i) & mask;
 825                u64 from, to, mis = 0, pred = 0, in_tx = 0, abort = 0;
 826                int skip = 0;
 827                u16 cycles = 0;
 828                int lbr_flags = lbr_desc[lbr_format];
 829
 830                from = rdlbr_from(lbr_idx, NULL);
 831                to   = rdlbr_to(lbr_idx, NULL);
 832
 833                /*
 834                 * Read LBR call stack entries
 835                 * until invalid entry (0s) is detected.
 836                 */
 837                if (call_stack && !from)
 838                        break;
 839
 840                if (lbr_format == LBR_FORMAT_INFO && need_info) {
 841                        u64 info;
 842
 843                        info = rdlbr_info(lbr_idx, NULL);
 844                        mis = !!(info & LBR_INFO_MISPRED);
 845                        pred = !mis;
 846                        in_tx = !!(info & LBR_INFO_IN_TX);
 847                        abort = !!(info & LBR_INFO_ABORT);
 848                        cycles = (info & LBR_INFO_CYCLES);
 849                }
 850
 851                if (lbr_format == LBR_FORMAT_TIME) {
 852                        mis = !!(from & LBR_FROM_FLAG_MISPRED);
 853                        pred = !mis;
 854                        skip = 1;
 855                        cycles = ((to >> 48) & LBR_INFO_CYCLES);
 856
 857                        to = (u64)((((s64)to) << 16) >> 16);
 858                }
 859
 860                if (lbr_flags & LBR_EIP_FLAGS) {
 861                        mis = !!(from & LBR_FROM_FLAG_MISPRED);
 862                        pred = !mis;
 863                        skip = 1;
 864                }
 865                if (lbr_flags & LBR_TSX) {
 866                        in_tx = !!(from & LBR_FROM_FLAG_IN_TX);
 867                        abort = !!(from & LBR_FROM_FLAG_ABORT);
 868                        skip = 3;
 869                }
 870                from = (u64)((((s64)from) << skip) >> skip);
 871
 872                /*
 873                 * Some CPUs report duplicated abort records,
 874                 * with the second entry not having an abort bit set.
 875                 * Skip them here. This loop runs backwards,
 876                 * so we need to undo the previous record.
 877                 * If the abort just happened outside the window
 878                 * the extra entry cannot be removed.
 879                 */
 880                if (abort && x86_pmu.lbr_double_abort && out > 0)
 881                        out--;
 882
 883                cpuc->lbr_entries[out].from      = from;
 884                cpuc->lbr_entries[out].to        = to;
 885                cpuc->lbr_entries[out].mispred   = mis;
 886                cpuc->lbr_entries[out].predicted = pred;
 887                cpuc->lbr_entries[out].in_tx     = in_tx;
 888                cpuc->lbr_entries[out].abort     = abort;
 889                cpuc->lbr_entries[out].cycles    = cycles;
 890                cpuc->lbr_entries[out].type      = 0;
 891                cpuc->lbr_entries[out].reserved  = 0;
 892                out++;
 893        }
 894        cpuc->lbr_stack.nr = out;
 895        cpuc->lbr_stack.hw_idx = tos;
 896}
 897
 898static __always_inline int get_lbr_br_type(u64 info)
 899{
 900        if (!static_cpu_has(X86_FEATURE_ARCH_LBR) || !x86_pmu.lbr_br_type)
 901                return 0;
 902
 903        return (info & LBR_INFO_BR_TYPE) >> LBR_INFO_BR_TYPE_OFFSET;
 904}
 905
 906static __always_inline bool get_lbr_mispred(u64 info)
 907{
 908        if (static_cpu_has(X86_FEATURE_ARCH_LBR) && !x86_pmu.lbr_mispred)
 909                return 0;
 910
 911        return !!(info & LBR_INFO_MISPRED);
 912}
 913
 914static __always_inline bool get_lbr_predicted(u64 info)
 915{
 916        if (static_cpu_has(X86_FEATURE_ARCH_LBR) && !x86_pmu.lbr_mispred)
 917                return 0;
 918
 919        return !(info & LBR_INFO_MISPRED);
 920}
 921
 922static __always_inline bool get_lbr_cycles(u64 info)
 923{
 924        if (static_cpu_has(X86_FEATURE_ARCH_LBR) &&
 925            !(x86_pmu.lbr_timed_lbr && info & LBR_INFO_CYC_CNT_VALID))
 926                return 0;
 927
 928        return info & LBR_INFO_CYCLES;
 929}
 930
 931static void intel_pmu_store_lbr(struct cpu_hw_events *cpuc,
 932                                struct lbr_entry *entries)
 933{
 934        struct perf_branch_entry *e;
 935        struct lbr_entry *lbr;
 936        u64 from, to, info;
 937        int i;
 938
 939        for (i = 0; i < x86_pmu.lbr_nr; i++) {
 940                lbr = entries ? &entries[i] : NULL;
 941                e = &cpuc->lbr_entries[i];
 942
 943                from = rdlbr_from(i, lbr);
 944                /*
 945                 * Read LBR entries until invalid entry (0s) is detected.
 946                 */
 947                if (!from)
 948                        break;
 949
 950                to = rdlbr_to(i, lbr);
 951                info = rdlbr_info(i, lbr);
 952
 953                e->from         = from;
 954                e->to           = to;
 955                e->mispred      = get_lbr_mispred(info);
 956                e->predicted    = get_lbr_predicted(info);
 957                e->in_tx        = !!(info & LBR_INFO_IN_TX);
 958                e->abort        = !!(info & LBR_INFO_ABORT);
 959                e->cycles       = get_lbr_cycles(info);
 960                e->type         = get_lbr_br_type(info);
 961                e->reserved     = 0;
 962        }
 963
 964        cpuc->lbr_stack.nr = i;
 965}
 966
 967static void intel_pmu_arch_lbr_read(struct cpu_hw_events *cpuc)
 968{
 969        intel_pmu_store_lbr(cpuc, NULL);
 970}
 971
 972static void intel_pmu_arch_lbr_read_xsave(struct cpu_hw_events *cpuc)
 973{
 974        struct x86_perf_task_context_arch_lbr_xsave *xsave = cpuc->lbr_xsave;
 975
 976        if (!xsave) {
 977                intel_pmu_store_lbr(cpuc, NULL);
 978                return;
 979        }
 980        copy_dynamic_supervisor_to_kernel(&xsave->xsave, XFEATURE_MASK_LBR);
 981
 982        intel_pmu_store_lbr(cpuc, xsave->lbr.entries);
 983}
 984
 985void intel_pmu_lbr_read(void)
 986{
 987        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
 988
 989        /*
 990         * Don't read when all LBRs users are using adaptive PEBS.
 991         *
 992         * This could be smarter and actually check the event,
 993         * but this simple approach seems to work for now.
 994         */
 995        if (!cpuc->lbr_users || vlbr_exclude_host() ||
 996            cpuc->lbr_users == cpuc->lbr_pebs_users)
 997                return;
 998
 999        x86_pmu.lbr_read(cpuc);
1000
1001        intel_pmu_lbr_filter(cpuc);
1002}
1003
1004/*
1005 * SW filter is used:
1006 * - in case there is no HW filter
1007 * - in case the HW filter has errata or limitations
1008 */
1009static int intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
1010{
1011        u64 br_type = event->attr.branch_sample_type;
1012        int mask = 0;
1013
1014        if (br_type & PERF_SAMPLE_BRANCH_USER)
1015                mask |= X86_BR_USER;
1016
1017        if (br_type & PERF_SAMPLE_BRANCH_KERNEL)
1018                mask |= X86_BR_KERNEL;
1019
1020        /* we ignore BRANCH_HV here */
1021
1022        if (br_type & PERF_SAMPLE_BRANCH_ANY)
1023                mask |= X86_BR_ANY;
1024
1025        if (br_type & PERF_SAMPLE_BRANCH_ANY_CALL)
1026                mask |= X86_BR_ANY_CALL;
1027
1028        if (br_type & PERF_SAMPLE_BRANCH_ANY_RETURN)
1029                mask |= X86_BR_RET | X86_BR_IRET | X86_BR_SYSRET;
1030
1031        if (br_type & PERF_SAMPLE_BRANCH_IND_CALL)
1032                mask |= X86_BR_IND_CALL;
1033
1034        if (br_type & PERF_SAMPLE_BRANCH_ABORT_TX)
1035                mask |= X86_BR_ABORT;
1036
1037        if (br_type & PERF_SAMPLE_BRANCH_IN_TX)
1038                mask |= X86_BR_IN_TX;
1039
1040        if (br_type & PERF_SAMPLE_BRANCH_NO_TX)
1041                mask |= X86_BR_NO_TX;
1042
1043        if (br_type & PERF_SAMPLE_BRANCH_COND)
1044                mask |= X86_BR_JCC;
1045
1046        if (br_type & PERF_SAMPLE_BRANCH_CALL_STACK) {
1047                if (!x86_pmu_has_lbr_callstack())
1048                        return -EOPNOTSUPP;
1049                if (mask & ~(X86_BR_USER | X86_BR_KERNEL))
1050                        return -EINVAL;
1051                mask |= X86_BR_CALL | X86_BR_IND_CALL | X86_BR_RET |
1052                        X86_BR_CALL_STACK;
1053        }
1054
1055        if (br_type & PERF_SAMPLE_BRANCH_IND_JUMP)
1056                mask |= X86_BR_IND_JMP;
1057
1058        if (br_type & PERF_SAMPLE_BRANCH_CALL)
1059                mask |= X86_BR_CALL | X86_BR_ZERO_CALL;
1060
1061        if (br_type & PERF_SAMPLE_BRANCH_TYPE_SAVE)
1062                mask |= X86_BR_TYPE_SAVE;
1063
1064        /*
1065         * stash actual user request into reg, it may
1066         * be used by fixup code for some CPU
1067         */
1068        event->hw.branch_reg.reg = mask;
1069        return 0;
1070}
1071
1072/*
1073 * setup the HW LBR filter
1074 * Used only when available, may not be enough to disambiguate
1075 * all branches, may need the help of the SW filter
1076 */
1077static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event)
1078{
1079        struct hw_perf_event_extra *reg;
1080        u64 br_type = event->attr.branch_sample_type;
1081        u64 mask = 0, v;
1082        int i;
1083
1084        for (i = 0; i < PERF_SAMPLE_BRANCH_MAX_SHIFT; i++) {
1085                if (!(br_type & (1ULL << i)))
1086                        continue;
1087
1088                v = x86_pmu.lbr_sel_map[i];
1089                if (v == LBR_NOT_SUPP)
1090                        return -EOPNOTSUPP;
1091
1092                if (v != LBR_IGN)
1093                        mask |= v;
1094        }
1095
1096        reg = &event->hw.branch_reg;
1097        reg->idx = EXTRA_REG_LBR;
1098
1099        if (static_cpu_has(X86_FEATURE_ARCH_LBR)) {
1100                reg->config = mask;
1101                return 0;
1102        }
1103
1104        /*
1105         * The first 9 bits (LBR_SEL_MASK) in LBR_SELECT operate
1106         * in suppress mode. So LBR_SELECT should be set to
1107         * (~mask & LBR_SEL_MASK) | (mask & ~LBR_SEL_MASK)
1108         * But the 10th bit LBR_CALL_STACK does not operate
1109         * in suppress mode.
1110         */
1111        reg->config = mask ^ (x86_pmu.lbr_sel_mask & ~LBR_CALL_STACK);
1112
1113        if ((br_type & PERF_SAMPLE_BRANCH_NO_CYCLES) &&
1114            (br_type & PERF_SAMPLE_BRANCH_NO_FLAGS) &&
1115            (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO))
1116                reg->config |= LBR_NO_INFO;
1117
1118        return 0;
1119}
1120
1121int intel_pmu_setup_lbr_filter(struct perf_event *event)
1122{
1123        int ret = 0;
1124
1125        /*
1126         * no LBR on this PMU
1127         */
1128        if (!x86_pmu.lbr_nr)
1129                return -EOPNOTSUPP;
1130
1131        /*
1132         * setup SW LBR filter
1133         */
1134        ret = intel_pmu_setup_sw_lbr_filter(event);
1135        if (ret)
1136                return ret;
1137
1138        /*
1139         * setup HW LBR filter, if any
1140         */
1141        if (x86_pmu.lbr_sel_map)
1142                ret = intel_pmu_setup_hw_lbr_filter(event);
1143
1144        return ret;
1145}
1146
1147/*
1148 * return the type of control flow change at address "from"
1149 * instruction is not necessarily a branch (in case of interrupt).
1150 *
1151 * The branch type returned also includes the priv level of the
1152 * target of the control flow change (X86_BR_USER, X86_BR_KERNEL).
1153 *
1154 * If a branch type is unknown OR the instruction cannot be
1155 * decoded (e.g., text page not present), then X86_BR_NONE is
1156 * returned.
1157 */
1158static int branch_type(unsigned long from, unsigned long to, int abort)
1159{
1160        struct insn insn;
1161        void *addr;
1162        int bytes_read, bytes_left;
1163        int ret = X86_BR_NONE;
1164        int ext, to_plm, from_plm;
1165        u8 buf[MAX_INSN_SIZE];
1166        int is64 = 0;
1167
1168        to_plm = kernel_ip(to) ? X86_BR_KERNEL : X86_BR_USER;
1169        from_plm = kernel_ip(from) ? X86_BR_KERNEL : X86_BR_USER;
1170
1171        /*
1172         * maybe zero if lbr did not fill up after a reset by the time
1173         * we get a PMU interrupt
1174         */
1175        if (from == 0 || to == 0)
1176                return X86_BR_NONE;
1177
1178        if (abort)
1179                return X86_BR_ABORT | to_plm;
1180
1181        if (from_plm == X86_BR_USER) {
1182                /*
1183                 * can happen if measuring at the user level only
1184                 * and we interrupt in a kernel thread, e.g., idle.
1185                 */
1186                if (!current->mm)
1187                        return X86_BR_NONE;
1188
1189                /* may fail if text not present */
1190                bytes_left = copy_from_user_nmi(buf, (void __user *)from,
1191                                                MAX_INSN_SIZE);
1192                bytes_read = MAX_INSN_SIZE - bytes_left;
1193                if (!bytes_read)
1194                        return X86_BR_NONE;
1195
1196                addr = buf;
1197        } else {
1198                /*
1199                 * The LBR logs any address in the IP, even if the IP just
1200                 * faulted. This means userspace can control the from address.
1201                 * Ensure we don't blindy read any address by validating it is
1202                 * a known text address.
1203                 */
1204                if (kernel_text_address(from)) {
1205                        addr = (void *)from;
1206                        /*
1207                         * Assume we can get the maximum possible size
1208                         * when grabbing kernel data.  This is not
1209                         * _strictly_ true since we could possibly be
1210                         * executing up next to a memory hole, but
1211                         * it is very unlikely to be a problem.
1212                         */
1213                        bytes_read = MAX_INSN_SIZE;
1214                } else {
1215                        return X86_BR_NONE;
1216                }
1217        }
1218
1219        /*
1220         * decoder needs to know the ABI especially
1221         * on 64-bit systems running 32-bit apps
1222         */
1223#ifdef CONFIG_X86_64
1224        is64 = kernel_ip((unsigned long)addr) || !test_thread_flag(TIF_IA32);
1225#endif
1226        insn_init(&insn, addr, bytes_read, is64);
1227        insn_get_opcode(&insn);
1228        if (!insn.opcode.got)
1229                return X86_BR_ABORT;
1230
1231        switch (insn.opcode.bytes[0]) {
1232        case 0xf:
1233                switch (insn.opcode.bytes[1]) {
1234                case 0x05: /* syscall */
1235                case 0x34: /* sysenter */
1236                        ret = X86_BR_SYSCALL;
1237                        break;
1238                case 0x07: /* sysret */
1239                case 0x35: /* sysexit */
1240                        ret = X86_BR_SYSRET;
1241                        break;
1242                case 0x80 ... 0x8f: /* conditional */
1243                        ret = X86_BR_JCC;
1244                        break;
1245                default:
1246                        ret = X86_BR_NONE;
1247                }
1248                break;
1249        case 0x70 ... 0x7f: /* conditional */
1250                ret = X86_BR_JCC;
1251                break;
1252        case 0xc2: /* near ret */
1253        case 0xc3: /* near ret */
1254        case 0xca: /* far ret */
1255        case 0xcb: /* far ret */
1256                ret = X86_BR_RET;
1257                break;
1258        case 0xcf: /* iret */
1259                ret = X86_BR_IRET;
1260                break;
1261        case 0xcc ... 0xce: /* int */
1262                ret = X86_BR_INT;
1263                break;
1264        case 0xe8: /* call near rel */
1265                insn_get_immediate(&insn);
1266                if (insn.immediate1.value == 0) {
1267                        /* zero length call */
1268                        ret = X86_BR_ZERO_CALL;
1269                        break;
1270                }
1271                fallthrough;
1272        case 0x9a: /* call far absolute */
1273                ret = X86_BR_CALL;
1274                break;
1275        case 0xe0 ... 0xe3: /* loop jmp */
1276                ret = X86_BR_JCC;
1277                break;
1278        case 0xe9 ... 0xeb: /* jmp */
1279                ret = X86_BR_JMP;
1280                break;
1281        case 0xff: /* call near absolute, call far absolute ind */
1282                insn_get_modrm(&insn);
1283                ext = (insn.modrm.bytes[0] >> 3) & 0x7;
1284                switch (ext) {
1285                case 2: /* near ind call */
1286                case 3: /* far ind call */
1287                        ret = X86_BR_IND_CALL;
1288                        break;
1289                case 4:
1290                case 5:
1291                        ret = X86_BR_IND_JMP;
1292                        break;
1293                }
1294                break;
1295        default:
1296                ret = X86_BR_NONE;
1297        }
1298        /*
1299         * interrupts, traps, faults (and thus ring transition) may
1300         * occur on any instructions. Thus, to classify them correctly,
1301         * we need to first look at the from and to priv levels. If they
1302         * are different and to is in the kernel, then it indicates
1303         * a ring transition. If the from instruction is not a ring
1304         * transition instr (syscall, systenter, int), then it means
1305         * it was a irq, trap or fault.
1306         *
1307         * we have no way of detecting kernel to kernel faults.
1308         */
1309        if (from_plm == X86_BR_USER && to_plm == X86_BR_KERNEL
1310            && ret != X86_BR_SYSCALL && ret != X86_BR_INT)
1311                ret = X86_BR_IRQ;
1312
1313        /*
1314         * branch priv level determined by target as
1315         * is done by HW when LBR_SELECT is implemented
1316         */
1317        if (ret != X86_BR_NONE)
1318                ret |= to_plm;
1319
1320        return ret;
1321}
1322
1323#define X86_BR_TYPE_MAP_MAX     16
1324
1325static int branch_map[X86_BR_TYPE_MAP_MAX] = {
1326        PERF_BR_CALL,           /* X86_BR_CALL */
1327        PERF_BR_RET,            /* X86_BR_RET */
1328        PERF_BR_SYSCALL,        /* X86_BR_SYSCALL */
1329        PERF_BR_SYSRET,         /* X86_BR_SYSRET */
1330        PERF_BR_UNKNOWN,        /* X86_BR_INT */
1331        PERF_BR_UNKNOWN,        /* X86_BR_IRET */
1332        PERF_BR_COND,           /* X86_BR_JCC */
1333        PERF_BR_UNCOND,         /* X86_BR_JMP */
1334        PERF_BR_UNKNOWN,        /* X86_BR_IRQ */
1335        PERF_BR_IND_CALL,       /* X86_BR_IND_CALL */
1336        PERF_BR_UNKNOWN,        /* X86_BR_ABORT */
1337        PERF_BR_UNKNOWN,        /* X86_BR_IN_TX */
1338        PERF_BR_UNKNOWN,        /* X86_BR_NO_TX */
1339        PERF_BR_CALL,           /* X86_BR_ZERO_CALL */
1340        PERF_BR_UNKNOWN,        /* X86_BR_CALL_STACK */
1341        PERF_BR_IND,            /* X86_BR_IND_JMP */
1342};
1343
1344static int
1345common_branch_type(int type)
1346{
1347        int i;
1348
1349        type >>= 2; /* skip X86_BR_USER and X86_BR_KERNEL */
1350
1351        if (type) {
1352                i = __ffs(type);
1353                if (i < X86_BR_TYPE_MAP_MAX)
1354                        return branch_map[i];
1355        }
1356
1357        return PERF_BR_UNKNOWN;
1358}
1359
1360enum {
1361        ARCH_LBR_BR_TYPE_JCC                    = 0,
1362        ARCH_LBR_BR_TYPE_NEAR_IND_JMP           = 1,
1363        ARCH_LBR_BR_TYPE_NEAR_REL_JMP           = 2,
1364        ARCH_LBR_BR_TYPE_NEAR_IND_CALL          = 3,
1365        ARCH_LBR_BR_TYPE_NEAR_REL_CALL          = 4,
1366        ARCH_LBR_BR_TYPE_NEAR_RET               = 5,
1367        ARCH_LBR_BR_TYPE_KNOWN_MAX              = ARCH_LBR_BR_TYPE_NEAR_RET,
1368
1369        ARCH_LBR_BR_TYPE_MAP_MAX                = 16,
1370};
1371
1372static const int arch_lbr_br_type_map[ARCH_LBR_BR_TYPE_MAP_MAX] = {
1373        [ARCH_LBR_BR_TYPE_JCC]                  = X86_BR_JCC,
1374        [ARCH_LBR_BR_TYPE_NEAR_IND_JMP]         = X86_BR_IND_JMP,
1375        [ARCH_LBR_BR_TYPE_NEAR_REL_JMP]         = X86_BR_JMP,
1376        [ARCH_LBR_BR_TYPE_NEAR_IND_CALL]        = X86_BR_IND_CALL,
1377        [ARCH_LBR_BR_TYPE_NEAR_REL_CALL]        = X86_BR_CALL,
1378        [ARCH_LBR_BR_TYPE_NEAR_RET]             = X86_BR_RET,
1379};
1380
1381/*
1382 * implement actual branch filter based on user demand.
1383 * Hardware may not exactly satisfy that request, thus
1384 * we need to inspect opcodes. Mismatched branches are
1385 * discarded. Therefore, the number of branches returned
1386 * in PERF_SAMPLE_BRANCH_STACK sample may vary.
1387 */
1388static void
1389intel_pmu_lbr_filter(struct cpu_hw_events *cpuc)
1390{
1391        u64 from, to;
1392        int br_sel = cpuc->br_sel;
1393        int i, j, type, to_plm;
1394        bool compress = false;
1395
1396        /* if sampling all branches, then nothing to filter */
1397        if (((br_sel & X86_BR_ALL) == X86_BR_ALL) &&
1398            ((br_sel & X86_BR_TYPE_SAVE) != X86_BR_TYPE_SAVE))
1399                return;
1400
1401        for (i = 0; i < cpuc->lbr_stack.nr; i++) {
1402
1403                from = cpuc->lbr_entries[i].from;
1404                to = cpuc->lbr_entries[i].to;
1405                type = cpuc->lbr_entries[i].type;
1406
1407                /*
1408                 * Parse the branch type recorded in LBR_x_INFO MSR.
1409                 * Doesn't support OTHER_BRANCH decoding for now.
1410                 * OTHER_BRANCH branch type still rely on software decoding.
1411                 */
1412                if (static_cpu_has(X86_FEATURE_ARCH_LBR) &&
1413                    type <= ARCH_LBR_BR_TYPE_KNOWN_MAX) {
1414                        to_plm = kernel_ip(to) ? X86_BR_KERNEL : X86_BR_USER;
1415                        type = arch_lbr_br_type_map[type] | to_plm;
1416                } else
1417                        type = branch_type(from, to, cpuc->lbr_entries[i].abort);
1418                if (type != X86_BR_NONE && (br_sel & X86_BR_ANYTX)) {
1419                        if (cpuc->lbr_entries[i].in_tx)
1420                                type |= X86_BR_IN_TX;
1421                        else
1422                                type |= X86_BR_NO_TX;
1423                }
1424
1425                /* if type does not correspond, then discard */
1426                if (type == X86_BR_NONE || (br_sel & type) != type) {
1427                        cpuc->lbr_entries[i].from = 0;
1428                        compress = true;
1429                }
1430
1431                if ((br_sel & X86_BR_TYPE_SAVE) == X86_BR_TYPE_SAVE)
1432                        cpuc->lbr_entries[i].type = common_branch_type(type);
1433        }
1434
1435        if (!compress)
1436                return;
1437
1438        /* remove all entries with from=0 */
1439        for (i = 0; i < cpuc->lbr_stack.nr; ) {
1440                if (!cpuc->lbr_entries[i].from) {
1441                        j = i;
1442                        while (++j < cpuc->lbr_stack.nr)
1443                                cpuc->lbr_entries[j-1] = cpuc->lbr_entries[j];
1444                        cpuc->lbr_stack.nr--;
1445                        if (!cpuc->lbr_entries[i].from)
1446                                continue;
1447                }
1448                i++;
1449        }
1450}
1451
1452void intel_pmu_store_pebs_lbrs(struct lbr_entry *lbr)
1453{
1454        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1455
1456        /* Cannot get TOS for large PEBS and Arch LBR */
1457        if (static_cpu_has(X86_FEATURE_ARCH_LBR) ||
1458            (cpuc->n_pebs == cpuc->n_large_pebs))
1459                cpuc->lbr_stack.hw_idx = -1ULL;
1460        else
1461                cpuc->lbr_stack.hw_idx = intel_pmu_lbr_tos();
1462
1463        intel_pmu_store_lbr(cpuc, lbr);
1464        intel_pmu_lbr_filter(cpuc);
1465}
1466
1467/*
1468 * Map interface branch filters onto LBR filters
1469 */
1470static const int nhm_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
1471        [PERF_SAMPLE_BRANCH_ANY_SHIFT]          = LBR_ANY,
1472        [PERF_SAMPLE_BRANCH_USER_SHIFT]         = LBR_USER,
1473        [PERF_SAMPLE_BRANCH_KERNEL_SHIFT]       = LBR_KERNEL,
1474        [PERF_SAMPLE_BRANCH_HV_SHIFT]           = LBR_IGN,
1475        [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT]   = LBR_RETURN | LBR_REL_JMP
1476                                                | LBR_IND_JMP | LBR_FAR,
1477        /*
1478         * NHM/WSM erratum: must include REL_JMP+IND_JMP to get CALL branches
1479         */
1480        [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] =
1481         LBR_REL_CALL | LBR_IND_CALL | LBR_REL_JMP | LBR_IND_JMP | LBR_FAR,
1482        /*
1483         * NHM/WSM erratum: must include IND_JMP to capture IND_CALL
1484         */
1485        [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL | LBR_IND_JMP,
1486        [PERF_SAMPLE_BRANCH_COND_SHIFT]     = LBR_JCC,
1487        [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP,
1488};
1489
1490static const int snb_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
1491        [PERF_SAMPLE_BRANCH_ANY_SHIFT]          = LBR_ANY,
1492        [PERF_SAMPLE_BRANCH_USER_SHIFT]         = LBR_USER,
1493        [PERF_SAMPLE_BRANCH_KERNEL_SHIFT]       = LBR_KERNEL,
1494        [PERF_SAMPLE_BRANCH_HV_SHIFT]           = LBR_IGN,
1495        [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT]   = LBR_RETURN | LBR_FAR,
1496        [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT]     = LBR_REL_CALL | LBR_IND_CALL
1497                                                | LBR_FAR,
1498        [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT]     = LBR_IND_CALL,
1499        [PERF_SAMPLE_BRANCH_COND_SHIFT]         = LBR_JCC,
1500        [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT]     = LBR_IND_JMP,
1501        [PERF_SAMPLE_BRANCH_CALL_SHIFT]         = LBR_REL_CALL,
1502};
1503
1504static const int hsw_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
1505        [PERF_SAMPLE_BRANCH_ANY_SHIFT]          = LBR_ANY,
1506        [PERF_SAMPLE_BRANCH_USER_SHIFT]         = LBR_USER,
1507        [PERF_SAMPLE_BRANCH_KERNEL_SHIFT]       = LBR_KERNEL,
1508        [PERF_SAMPLE_BRANCH_HV_SHIFT]           = LBR_IGN,
1509        [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT]   = LBR_RETURN | LBR_FAR,
1510        [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT]     = LBR_REL_CALL | LBR_IND_CALL
1511                                                | LBR_FAR,
1512        [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT]     = LBR_IND_CALL,
1513        [PERF_SAMPLE_BRANCH_COND_SHIFT]         = LBR_JCC,
1514        [PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT]   = LBR_REL_CALL | LBR_IND_CALL
1515                                                | LBR_RETURN | LBR_CALL_STACK,
1516        [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT]     = LBR_IND_JMP,
1517        [PERF_SAMPLE_BRANCH_CALL_SHIFT]         = LBR_REL_CALL,
1518};
1519
1520static int arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
1521        [PERF_SAMPLE_BRANCH_ANY_SHIFT]          = ARCH_LBR_ANY,
1522        [PERF_SAMPLE_BRANCH_USER_SHIFT]         = ARCH_LBR_USER,
1523        [PERF_SAMPLE_BRANCH_KERNEL_SHIFT]       = ARCH_LBR_KERNEL,
1524        [PERF_SAMPLE_BRANCH_HV_SHIFT]           = LBR_IGN,
1525        [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT]   = ARCH_LBR_RETURN |
1526                                                  ARCH_LBR_OTHER_BRANCH,
1527        [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT]     = ARCH_LBR_REL_CALL |
1528                                                  ARCH_LBR_IND_CALL |
1529                                                  ARCH_LBR_OTHER_BRANCH,
1530        [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT]     = ARCH_LBR_IND_CALL,
1531        [PERF_SAMPLE_BRANCH_COND_SHIFT]         = ARCH_LBR_JCC,
1532        [PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT]   = ARCH_LBR_REL_CALL |
1533                                                  ARCH_LBR_IND_CALL |
1534                                                  ARCH_LBR_RETURN |
1535                                                  ARCH_LBR_CALL_STACK,
1536        [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT]     = ARCH_LBR_IND_JMP,
1537        [PERF_SAMPLE_BRANCH_CALL_SHIFT]         = ARCH_LBR_REL_CALL,
1538};
1539
1540/* core */
1541void __init intel_pmu_lbr_init_core(void)
1542{
1543        x86_pmu.lbr_nr     = 4;
1544        x86_pmu.lbr_tos    = MSR_LBR_TOS;
1545        x86_pmu.lbr_from   = MSR_LBR_CORE_FROM;
1546        x86_pmu.lbr_to     = MSR_LBR_CORE_TO;
1547
1548        /*
1549         * SW branch filter usage:
1550         * - compensate for lack of HW filter
1551         */
1552}
1553
1554/* nehalem/westmere */
1555void __init intel_pmu_lbr_init_nhm(void)
1556{
1557        x86_pmu.lbr_nr     = 16;
1558        x86_pmu.lbr_tos    = MSR_LBR_TOS;
1559        x86_pmu.lbr_from   = MSR_LBR_NHM_FROM;
1560        x86_pmu.lbr_to     = MSR_LBR_NHM_TO;
1561
1562        x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1563        x86_pmu.lbr_sel_map  = nhm_lbr_sel_map;
1564
1565        /*
1566         * SW branch filter usage:
1567         * - workaround LBR_SEL errata (see above)
1568         * - support syscall, sysret capture.
1569         *   That requires LBR_FAR but that means far
1570         *   jmp need to be filtered out
1571         */
1572}
1573
1574/* sandy bridge */
1575void __init intel_pmu_lbr_init_snb(void)
1576{
1577        x86_pmu.lbr_nr   = 16;
1578        x86_pmu.lbr_tos  = MSR_LBR_TOS;
1579        x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1580        x86_pmu.lbr_to   = MSR_LBR_NHM_TO;
1581
1582        x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1583        x86_pmu.lbr_sel_map  = snb_lbr_sel_map;
1584
1585        /*
1586         * SW branch filter usage:
1587         * - support syscall, sysret capture.
1588         *   That requires LBR_FAR but that means far
1589         *   jmp need to be filtered out
1590         */
1591}
1592
1593static inline struct kmem_cache *
1594create_lbr_kmem_cache(size_t size, size_t align)
1595{
1596        return kmem_cache_create("x86_lbr", size, align, 0, NULL);
1597}
1598
1599/* haswell */
1600void intel_pmu_lbr_init_hsw(void)
1601{
1602        size_t size = sizeof(struct x86_perf_task_context);
1603
1604        x86_pmu.lbr_nr   = 16;
1605        x86_pmu.lbr_tos  = MSR_LBR_TOS;
1606        x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1607        x86_pmu.lbr_to   = MSR_LBR_NHM_TO;
1608
1609        x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1610        x86_pmu.lbr_sel_map  = hsw_lbr_sel_map;
1611
1612        x86_get_pmu()->task_ctx_cache = create_lbr_kmem_cache(size, 0);
1613
1614        if (lbr_from_signext_quirk_needed())
1615                static_branch_enable(&lbr_from_quirk_key);
1616}
1617
1618/* skylake */
1619__init void intel_pmu_lbr_init_skl(void)
1620{
1621        size_t size = sizeof(struct x86_perf_task_context);
1622
1623        x86_pmu.lbr_nr   = 32;
1624        x86_pmu.lbr_tos  = MSR_LBR_TOS;
1625        x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1626        x86_pmu.lbr_to   = MSR_LBR_NHM_TO;
1627        x86_pmu.lbr_info = MSR_LBR_INFO_0;
1628
1629        x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1630        x86_pmu.lbr_sel_map  = hsw_lbr_sel_map;
1631
1632        x86_get_pmu()->task_ctx_cache = create_lbr_kmem_cache(size, 0);
1633
1634        /*
1635         * SW branch filter usage:
1636         * - support syscall, sysret capture.
1637         *   That requires LBR_FAR but that means far
1638         *   jmp need to be filtered out
1639         */
1640}
1641
1642/* atom */
1643void __init intel_pmu_lbr_init_atom(void)
1644{
1645        /*
1646         * only models starting at stepping 10 seems
1647         * to have an operational LBR which can freeze
1648         * on PMU interrupt
1649         */
1650        if (boot_cpu_data.x86_model == 28
1651            && boot_cpu_data.x86_stepping < 10) {
1652                pr_cont("LBR disabled due to erratum");
1653                return;
1654        }
1655
1656        x86_pmu.lbr_nr     = 8;
1657        x86_pmu.lbr_tos    = MSR_LBR_TOS;
1658        x86_pmu.lbr_from   = MSR_LBR_CORE_FROM;
1659        x86_pmu.lbr_to     = MSR_LBR_CORE_TO;
1660
1661        /*
1662         * SW branch filter usage:
1663         * - compensate for lack of HW filter
1664         */
1665}
1666
1667/* slm */
1668void __init intel_pmu_lbr_init_slm(void)
1669{
1670        x86_pmu.lbr_nr     = 8;
1671        x86_pmu.lbr_tos    = MSR_LBR_TOS;
1672        x86_pmu.lbr_from   = MSR_LBR_CORE_FROM;
1673        x86_pmu.lbr_to     = MSR_LBR_CORE_TO;
1674
1675        x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1676        x86_pmu.lbr_sel_map  = nhm_lbr_sel_map;
1677
1678        /*
1679         * SW branch filter usage:
1680         * - compensate for lack of HW filter
1681         */
1682        pr_cont("8-deep LBR, ");
1683}
1684
1685/* Knights Landing */
1686void intel_pmu_lbr_init_knl(void)
1687{
1688        x86_pmu.lbr_nr     = 8;
1689        x86_pmu.lbr_tos    = MSR_LBR_TOS;
1690        x86_pmu.lbr_from   = MSR_LBR_NHM_FROM;
1691        x86_pmu.lbr_to     = MSR_LBR_NHM_TO;
1692
1693        x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1694        x86_pmu.lbr_sel_map  = snb_lbr_sel_map;
1695
1696        /* Knights Landing does have MISPREDICT bit */
1697        if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_LIP)
1698                x86_pmu.intel_cap.lbr_format = LBR_FORMAT_EIP_FLAGS;
1699}
1700
1701/*
1702 * LBR state size is variable based on the max number of registers.
1703 * This calculates the expected state size, which should match
1704 * what the hardware enumerates for the size of XFEATURE_LBR.
1705 */
1706static inline unsigned int get_lbr_state_size(void)
1707{
1708        return sizeof(struct arch_lbr_state) +
1709               x86_pmu.lbr_nr * sizeof(struct lbr_entry);
1710}
1711
1712static bool is_arch_lbr_xsave_available(void)
1713{
1714        if (!boot_cpu_has(X86_FEATURE_XSAVES))
1715                return false;
1716
1717        /*
1718         * Check the LBR state with the corresponding software structure.
1719         * Disable LBR XSAVES support if the size doesn't match.
1720         */
1721        if (WARN_ON(xfeature_size(XFEATURE_LBR) != get_lbr_state_size()))
1722                return false;
1723
1724        return true;
1725}
1726
1727void __init intel_pmu_arch_lbr_init(void)
1728{
1729        struct pmu *pmu = x86_get_pmu();
1730        union cpuid28_eax eax;
1731        union cpuid28_ebx ebx;
1732        union cpuid28_ecx ecx;
1733        unsigned int unused_edx;
1734        bool arch_lbr_xsave;
1735        size_t size;
1736        u64 lbr_nr;
1737
1738        /* Arch LBR Capabilities */
1739        cpuid(28, &eax.full, &ebx.full, &ecx.full, &unused_edx);
1740
1741        lbr_nr = fls(eax.split.lbr_depth_mask) * 8;
1742        if (!lbr_nr)
1743                goto clear_arch_lbr;
1744
1745        /* Apply the max depth of Arch LBR */
1746        if (wrmsrl_safe(MSR_ARCH_LBR_DEPTH, lbr_nr))
1747                goto clear_arch_lbr;
1748
1749        x86_pmu.lbr_depth_mask = eax.split.lbr_depth_mask;
1750        x86_pmu.lbr_deep_c_reset = eax.split.lbr_deep_c_reset;
1751        x86_pmu.lbr_lip = eax.split.lbr_lip;
1752        x86_pmu.lbr_cpl = ebx.split.lbr_cpl;
1753        x86_pmu.lbr_filter = ebx.split.lbr_filter;
1754        x86_pmu.lbr_call_stack = ebx.split.lbr_call_stack;
1755        x86_pmu.lbr_mispred = ecx.split.lbr_mispred;
1756        x86_pmu.lbr_timed_lbr = ecx.split.lbr_timed_lbr;
1757        x86_pmu.lbr_br_type = ecx.split.lbr_br_type;
1758        x86_pmu.lbr_nr = lbr_nr;
1759
1760
1761        arch_lbr_xsave = is_arch_lbr_xsave_available();
1762        if (arch_lbr_xsave) {
1763                size = sizeof(struct x86_perf_task_context_arch_lbr_xsave) +
1764                       get_lbr_state_size();
1765                pmu->task_ctx_cache = create_lbr_kmem_cache(size,
1766                                                            XSAVE_ALIGNMENT);
1767        }
1768
1769        if (!pmu->task_ctx_cache) {
1770                arch_lbr_xsave = false;
1771
1772                size = sizeof(struct x86_perf_task_context_arch_lbr) +
1773                       lbr_nr * sizeof(struct lbr_entry);
1774                pmu->task_ctx_cache = create_lbr_kmem_cache(size, 0);
1775        }
1776
1777        x86_pmu.lbr_from = MSR_ARCH_LBR_FROM_0;
1778        x86_pmu.lbr_to = MSR_ARCH_LBR_TO_0;
1779        x86_pmu.lbr_info = MSR_ARCH_LBR_INFO_0;
1780
1781        /* LBR callstack requires both CPL and Branch Filtering support */
1782        if (!x86_pmu.lbr_cpl ||
1783            !x86_pmu.lbr_filter ||
1784            !x86_pmu.lbr_call_stack)
1785                arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] = LBR_NOT_SUPP;
1786
1787        if (!x86_pmu.lbr_cpl) {
1788                arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_NOT_SUPP;
1789                arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_NOT_SUPP;
1790        } else if (!x86_pmu.lbr_filter) {
1791                arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_NOT_SUPP;
1792                arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_NOT_SUPP;
1793                arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] = LBR_NOT_SUPP;
1794                arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_NOT_SUPP;
1795                arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_NOT_SUPP;
1796                arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_NOT_SUPP;
1797                arch_lbr_ctl_map[PERF_SAMPLE_BRANCH_CALL_SHIFT] = LBR_NOT_SUPP;
1798        }
1799
1800        x86_pmu.lbr_ctl_mask = ARCH_LBR_CTL_MASK;
1801        x86_pmu.lbr_ctl_map  = arch_lbr_ctl_map;
1802
1803        if (!x86_pmu.lbr_cpl && !x86_pmu.lbr_filter)
1804                x86_pmu.lbr_ctl_map = NULL;
1805
1806        x86_pmu.lbr_reset = intel_pmu_arch_lbr_reset;
1807        if (arch_lbr_xsave) {
1808                x86_pmu.lbr_save = intel_pmu_arch_lbr_xsaves;
1809                x86_pmu.lbr_restore = intel_pmu_arch_lbr_xrstors;
1810                x86_pmu.lbr_read = intel_pmu_arch_lbr_read_xsave;
1811                pr_cont("XSAVE ");
1812        } else {
1813                x86_pmu.lbr_save = intel_pmu_arch_lbr_save;
1814                x86_pmu.lbr_restore = intel_pmu_arch_lbr_restore;
1815                x86_pmu.lbr_read = intel_pmu_arch_lbr_read;
1816        }
1817
1818        pr_cont("Architectural LBR, ");
1819
1820        return;
1821
1822clear_arch_lbr:
1823        clear_cpu_cap(&boot_cpu_data, X86_FEATURE_ARCH_LBR);
1824}
1825
1826/**
1827 * x86_perf_get_lbr - get the LBR records information
1828 *
1829 * @lbr: the caller's memory to store the LBR records information
1830 *
1831 * Returns: 0 indicates the LBR info has been successfully obtained
1832 */
1833int x86_perf_get_lbr(struct x86_pmu_lbr *lbr)
1834{
1835        int lbr_fmt = x86_pmu.intel_cap.lbr_format;
1836
1837        lbr->nr = x86_pmu.lbr_nr;
1838        lbr->from = x86_pmu.lbr_from;
1839        lbr->to = x86_pmu.lbr_to;
1840        lbr->info = (lbr_fmt == LBR_FORMAT_INFO) ? x86_pmu.lbr_info : 0;
1841
1842        return 0;
1843}
1844EXPORT_SYMBOL_GPL(x86_perf_get_lbr);
1845
1846struct event_constraint vlbr_constraint =
1847        __EVENT_CONSTRAINT(INTEL_FIXED_VLBR_EVENT, (1ULL << INTEL_PMC_IDX_FIXED_VLBR),
1848                          FIXED_EVENT_FLAGS, 1, 0, PERF_X86_EVENT_LBR_SELECT);
1849