linux/arch/x86/include/asm/spec_ctrl.h
<<
>>
Prefs
   1#ifndef _ASM_X86_SPEC_CTRL_H
   2#define _ASM_X86_SPEC_CTRL_H
   3
   4#define SPEC_CTRL_PCP_IBRS_ENTRY        (1<<0)
   5#define SPEC_CTRL_PCP_IBRS_EXIT         (1<<1)
   6
   7#define SPEC_CTRL_PCP_IBRS (SPEC_CTRL_PCP_IBRS_ENTRY|SPEC_CTRL_PCP_IBRS_EXIT)
   8
   9#define IBRS_ENABLED_PCP        PER_CPU_VAR(spec_ctrl_pcp + \
  10                                KERNEL_IBRS_SPEC_CTRL_enabled)
  11#define IBRS_ENTRY_PCP          PER_CPU_VAR(spec_ctrl_pcp + \
  12                                KERNEL_IBRS_SPEC_CTRL_entry)
  13#define IBRS_EXIT_PCP           PER_CPU_VAR(spec_ctrl_pcp + \
  14                                KERNEL_IBRS_SPEC_CTRL_exit)
  15#define IBRS_HI32_PCP           PER_CPU_VAR(spec_ctrl_pcp + \
  16                                KERNEL_IBRS_SPEC_CTRL_hi32)
  17
  18#ifdef __ASSEMBLY__
  19
  20#include <asm/msr-index.h>
  21
  22.macro __IBRS_ENTRY
  23        movl IBRS_HI32_PCP, %edx
  24        movl IBRS_ENTRY_PCP, %eax
  25        movl $MSR_IA32_SPEC_CTRL, %ecx
  26        wrmsr
  27.endm
  28
  29.macro IBRS_ENTRY
  30        testl $SPEC_CTRL_PCP_IBRS_ENTRY, IBRS_ENABLED_PCP
  31        jz .Lskip_\@
  32
  33        pushq %rax
  34        pushq %rcx
  35        pushq %rdx
  36        __IBRS_ENTRY
  37        popq %rdx
  38        popq %rcx
  39        popq %rax
  40        jmp .Lend_\@
  41
  42.Lskip_\@:
  43        lfence
  44.Lend_\@:
  45.endm
  46
  47.macro IBRS_ENTRY_CLOBBER
  48        testl $SPEC_CTRL_PCP_IBRS_ENTRY, IBRS_ENABLED_PCP
  49        jz .Lskip_\@
  50
  51        __IBRS_ENTRY
  52        jmp .Lend_\@
  53
  54.Lskip_\@:
  55        lfence
  56.Lend_\@:
  57.endm
  58
  59#define NO_IBRS_RESTORE         (-1)    /* No restore on exit */
  60
  61/*
  62 * The save_reg is initialize to NO_IBRS_RESTORE just in case IBRS is
  63 * enabled in the middle of an exception, this avoids the very remote risk
  64 * of writing random save_reg content into the SPEC_CTRL MSR in such case.
  65 */
  66.macro IBRS_ENTRY_SAVE_AND_CLOBBER save_reg:req
  67        movl $NO_IBRS_RESTORE, \save_reg
  68        testl $SPEC_CTRL_PCP_IBRS_ENTRY, IBRS_ENABLED_PCP
  69        jz .Lskip_\@
  70
  71        movl $MSR_IA32_SPEC_CTRL, %ecx
  72        rdmsr
  73        /*
  74         * If the content of the MSR matches the kernel entry value,
  75         * we should still rewrite the MSR anyway to enforce the
  76         * barrier-like semantics in some IBRS implementations.
  77         * Nowever, we can leave the save_reg as NO_IBRS_RESTORE
  78         * so that we won't do a rewrite on exit,
  79         *
  80         * %edx is initialized by rdmsr above, and so it doesn't need
  81         * to be touched.
  82         */
  83        movl IBRS_ENTRY_PCP, %ecx
  84        cmpl %eax, %ecx
  85        je   .Lwrmsr_\@
  86
  87        movl %eax, \save_reg
  88.Lwrmsr_\@:
  89        movl $MSR_IA32_SPEC_CTRL, %ecx
  90        wrmsr
  91        jmp .Lend_\@
  92
  93.Lskip_\@:
  94        lfence
  95.Lend_\@:
  96.endm
  97
  98.macro __IBRS_EXIT
  99        movl IBRS_HI32_PCP, %edx
 100        movl IBRS_EXIT_PCP, %eax
 101        movl $MSR_IA32_SPEC_CTRL, %ecx
 102        wrmsr
 103.endm
 104
 105.macro IBRS_EXIT
 106        testl $SPEC_CTRL_PCP_IBRS_EXIT, IBRS_ENABLED_PCP
 107        jz .Lskip_\@
 108
 109        pushq %rax
 110        pushq %rcx
 111        pushq %rdx
 112        __IBRS_EXIT
 113        popq %rdx
 114        popq %rcx
 115        popq %rax
 116
 117.Lskip_\@:
 118.endm
 119
 120.macro IBRS_EXIT_RESTORE_CLOBBER save_reg:req
 121        testl $SPEC_CTRL_PCP_IBRS, IBRS_ENABLED_PCP
 122        jz .Lskip_\@
 123
 124        cmpl $NO_IBRS_RESTORE, \save_reg
 125        je .Lskip_\@
 126
 127        movl $MSR_IA32_SPEC_CTRL, %ecx
 128        movl IBRS_HI32_PCP, %edx
 129        movl \save_reg, %eax
 130        wrmsr
 131
 132.Lskip_\@:
 133.endm
 134
 135.macro IBRS_EXIT_CLOBBER
 136        testl $SPEC_CTRL_PCP_IBRS_EXIT, IBRS_ENABLED_PCP
 137        jz .Lskip_\@
 138
 139        __IBRS_EXIT
 140
 141.Lskip_\@:
 142.endm
 143
 144.macro CLEAR_EXTRA_REGS
 145        xorq %r15, %r15
 146        xorq %r14, %r14
 147        xorq %r13, %r13
 148        xorq %r12, %r12
 149        xorq %rbp, %rbp
 150        xorq %rbx, %rbx
 151.endm
 152
 153.macro CLEAR_R8_TO_R15
 154        xorq %r15, %r15
 155        xorq %r14, %r14
 156        xorq %r13, %r13
 157        xorq %r12, %r12
 158        xorq %r11, %r11
 159        xorq %r10, %r10
 160        xorq %r9, %r9
 161        xorq %r8, %r8
 162.endm
 163
 164.macro CLEAR_R10_TO_R15
 165        xorq %r15, %r15
 166        xorq %r14, %r14
 167        xorq %r13, %r13
 168        xorq %r12, %r12
 169        xorq %r11, %r11
 170        xorq %r10, %r10
 171.endm
 172
 173#else /* __ASSEMBLY__ */
 174
 175#include <linux/ptrace.h>
 176#include <asm/microcode.h>
 177#include <asm/nospec-branch.h>
 178
 179extern struct static_key retp_enabled_key;
 180extern struct static_key ibrs_present_key;
 181extern struct static_key ssbd_userset_key;
 182
 183/*
 184 * Special SPEC_CTRL MSR value to write the content of the spec_ctrl_pcp.
 185 */
 186#define SPEC_CTRL_MSR_REFRESH   ((unsigned)-1)
 187
 188extern void spec_ctrl_rescan_cpuid(void);
 189extern void spec_ctrl_init(void);
 190extern void spec_ctrl_cpu_init(void);
 191extern void ssb_select_mitigation(void);
 192extern void ssb_print_mitigation(void);
 193
 194bool spec_ctrl_force_enable_ibrs(void);
 195bool spec_ctrl_cond_enable_ibrs(bool full_retpoline);
 196bool spec_ctrl_enable_ibrs_always(void);
 197void spec_ctrl_enable_ibrs_enhanced(void);
 198bool spec_ctrl_force_enable_ibp_disabled(void);
 199bool spec_ctrl_cond_enable_ibp_disabled(void);
 200void spec_ctrl_enable_retpoline(void);
 201bool spec_ctrl_enable_retpoline_ibrs_user(void);
 202void spec_ctrl_set_ssbd(bool ssbd_on);
 203
 204enum spectre_v2_mitigation spec_ctrl_get_mitigation(void);
 205
 206bool unprotected_firmware_begin(void);
 207void unprotected_firmware_end(bool ibrs_on);
 208
 209/*
 210 * Percpu IBRS kernel entry/exit control structure
 211 */
 212struct kernel_ibrs_spec_ctrl {
 213        unsigned int enabled;   /* Entry and exit enabled control bits */
 214        unsigned int exit;      /* Lower 32-bit of SPEC_CTRL MSR for exit */
 215        union {
 216                struct {
 217                        /*
 218                         * The lower and upper 32-bit of SPEC_CTRL MSR
 219                         * when entering kernel.
 220                         */
 221                        unsigned int entry;
 222                        unsigned int hi32;
 223                };
 224                u64     entry64;        /* Full 64-bit SPEC_CTRL MSR */
 225        };
 226};
 227
 228DECLARE_PER_CPU_USER_MAPPED(struct kernel_ibrs_spec_ctrl, spec_ctrl_pcp);
 229
 230extern void x86_amd_ssbd_enable(void);
 231
 232/* The Intel SPEC CTRL MSR base value cache */
 233extern u64 x86_spec_ctrl_base;
 234extern u64 x86_spec_ctrl_mask;
 235
 236static inline u64 ssbd_tif_to_spec_ctrl(u64 tifn)
 237{
 238        BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
 239        return (tifn & _TIF_SSBD) >> (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
 240}
 241
 242static inline unsigned long ssbd_spec_ctrl_to_tif(u64 spec_ctrl)
 243{
 244        BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
 245        return (spec_ctrl & SPEC_CTRL_SSBD) << (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
 246}
 247
 248static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn)
 249{
 250        return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL;
 251}
 252
 253#ifdef CONFIG_SMP
 254extern void speculative_store_bypass_ht_init(void);
 255#else
 256static inline void speculative_store_bypass_ht_init(void) { }
 257#endif
 258
 259extern void speculative_store_bypass_update(unsigned long tif);
 260
 261static inline void speculative_store_bypass_update_current(void)
 262{
 263        speculative_store_bypass_update(current_thread_info()->flags);
 264}
 265
 266enum {
 267        IBRS_DISABLED,
 268
 269        /* in host kernel, disabled in guest and userland */
 270        IBRS_ENABLED,
 271
 272        /* in host kernel and host userland, disabled in guest */
 273        IBRS_ENABLED_ALWAYS,
 274
 275        /* in host userland, disabled in kernel and guest */
 276        IBRS_ENABLED_USER,
 277
 278        /* in both kernel and host userland (enhanced IBRS) */
 279        IBRS_ENHANCED,
 280
 281        IBRS_MAX = IBRS_ENHANCED,
 282};
 283
 284static __always_inline int cpu_has_spec_ctrl(void)
 285{
 286        return static_key_false(&ibrs_present_key);
 287}
 288
 289static __always_inline bool ibrs_enabled_kernel(void)
 290{
 291        /*
 292         * We don't need to do the cpu_has_spec_ctrl() check here as
 293         * the IBRS bit won't be on if no such capability exists.
 294         */
 295        unsigned int ibrs = __this_cpu_read(spec_ctrl_pcp.entry);
 296
 297        return ibrs & SPEC_CTRL_IBRS;
 298}
 299
 300static inline bool retp_enabled(void)
 301{
 302        return static_key_false(&retp_enabled_key);
 303}
 304
 305static inline bool retp_enabled_full(void)
 306{
 307        return retp_enabled() && retp_compiler();
 308}
 309
 310static inline bool ibpb_enabled(void)
 311{
 312        return (boot_cpu_has(X86_FEATURE_IBPB) &&
 313                (ibrs_enabled_kernel() || retp_enabled()));
 314}
 315
 316/*
 317 * On VMENTER we must preserve whatever view of the SPEC_CTRL MSR
 318 * the guest has, while on VMEXIT we restore the kernel view. This
 319 * would be easier if SPEC_CTRL were architecturally maskable or
 320 * shadowable for guests but this is not (currently) the case.
 321 * Takes the guest view of SPEC_CTRL MSR as a parameter and also
 322 * the guest's version of VIRT_SPEC_CTRL, if emulated.
 323 */
 324
 325static __always_inline void
 326x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
 327{
 328        /*
 329         * The per-cpu spec_ctrl_pcp.entry64 will be the SPEC_CTRL MSR value
 330         * to be used in host kernel. This is performance critical code.
 331         * Preemption is disabled, so we cannot race with sysfs writes.
 332         */
 333        u64 msr, guestval, hostval = this_cpu_read(spec_ctrl_pcp.entry64);
 334        struct thread_info *ti = current_thread_info();
 335        bool write_msr;
 336
 337        if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
 338                /*
 339                 *  Restrict guest_spec_ctrl to supported values. Clear the
 340                 *  modifiable bits in the host base value and or the
 341                 *  modifiable bits from the guest value.
 342                 */
 343                guestval = hostval & ~x86_spec_ctrl_mask;
 344                guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
 345
 346                /*
 347                 * IBRS may have barrier semantics so it must be set
 348                 * during vmexit (!setguest) if SPEC_CTRL MSR write is
 349                 * enabled at kernel entry.
 350                 */
 351                write_msr = (!setguest &&
 352                            (this_cpu_read(spec_ctrl_pcp.enabled) &
 353                                           SPEC_CTRL_PCP_IBRS_ENTRY)) ||
 354                            (hostval != guestval);
 355
 356                if (unlikely(write_msr)) {
 357                        msr = setguest ? guestval : hostval;
 358                        native_wrmsrl(MSR_IA32_SPEC_CTRL, msr);
 359                }
 360        }
 361
 362        /*
 363         * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
 364         * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
 365         */
 366        if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
 367            !static_cpu_has(X86_FEATURE_VIRT_SSBD))
 368                goto ret;
 369
 370        /*
 371         * If the SSBD mode is not user settable, grab the SSBD bit
 372         * from x86_spec_ctrl_base. Otherwise, evaluate current's TIF_SSBD
 373         * thread flag.
 374         */
 375        if (!static_key_false(&ssbd_userset_key))
 376                hostval = x86_spec_ctrl_base & SPEC_CTRL_SSBD;
 377        else
 378                hostval = ssbd_tif_to_spec_ctrl(ti->flags);
 379
 380        /* Sanitize the guest value */
 381        guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
 382
 383        if (hostval != guestval) {
 384                unsigned long tif;
 385
 386                tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
 387                                 ssbd_spec_ctrl_to_tif(hostval);
 388
 389                speculative_store_bypass_update(tif);
 390        }
 391
 392ret:
 393        /* rmb not needed when entering guest */
 394        if (!setguest) {
 395                /*
 396                 * This is an unconditional jump, no wrong speculation
 397                 * is possible.
 398                 */
 399                if (retp_enabled_full())
 400                        return;
 401
 402                /* rmb to prevent wrong speculation for security */
 403                rmb();
 404        }
 405}
 406
 407/**
 408 * x86_spec_ctrl_set_guest - Set speculation control registers for the guest
 409 * @guest_spec_ctrl:            The guest content of MSR_SPEC_CTRL
 410 * @guest_virt_spec_ctrl:       The guest controlled bits of MSR_VIRT_SPEC_CTRL
 411 *                              (may get translated to MSR_AMD64_LS_CFG bits)
 412 *
 413 * Avoids writing to the MSR if the content/bits are the same
 414 */
 415static __always_inline void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl,
 416                                                    u64 guest_virt_spec_ctrl)
 417{
 418        x86_virt_spec_ctrl(guest_spec_ctrl, guest_virt_spec_ctrl, true);
 419}
 420
 421/**
 422 * x86_spec_ctrl_restore_host - Restore host speculation control registers
 423 * @guest_spec_ctrl:            The guest content of MSR_SPEC_CTRL
 424 * @guest_virt_spec_ctrl:       The guest controlled bits of MSR_VIRT_SPEC_CTRL
 425 *                              (may get translated to MSR_AMD64_LS_CFG bits)
 426 *
 427 * Avoids writing to the MSR if the content/bits are the same
 428 */
 429static __always_inline void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl,
 430                                                       u64 guest_virt_spec_ctrl)
 431{
 432        x86_virt_spec_ctrl(guest_spec_ctrl, guest_virt_spec_ctrl, false);
 433}
 434
 435/*
 436 * The spec_ctrl_ibrs_off() is called before a cpu enters idle state and
 437 * spec_ctrl_ibrs_off() is called after exit from an idle state.
 438 *
 439 * There is no need to turn off and on IBRS when entering and exiting
 440 * idle state if enhanced IBRS feature is present.
 441 */
 442static __always_inline void spec_ctrl_ibrs_on(void)
 443{
 444        /*
 445         * IBRS may have barrier semantics so it must be set even for ALWAYS
 446         * mode.
 447         */
 448        if (static_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
 449                if (ibrs_enabled_kernel())
 450                        return;
 451                /* Fall back to retpoline check */
 452        } else if (ibrs_enabled_kernel()) {
 453                u64 spec_ctrl = this_cpu_read(spec_ctrl_pcp.entry64);
 454
 455                native_wrmsrl(MSR_IA32_SPEC_CTRL, spec_ctrl);
 456                return;
 457        }
 458
 459        /* This is an unconditional jump, no wrong speculation is possible.  */
 460        if (retp_enabled_full())
 461                return;
 462
 463        /* rmb to prevent wrong speculation for security */
 464        rmb();
 465}
 466
 467static __always_inline void spec_ctrl_ibrs_off(void)
 468{
 469        if (!static_cpu_has(X86_FEATURE_IBRS_ENHANCED) &&
 470            ibrs_enabled_kernel()) {
 471                u64 spec_ctrl = x86_spec_ctrl_base;
 472
 473                /* SSBD controlled in MSR_SPEC_CTRL */
 474                if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
 475                        spec_ctrl |= ssbd_tif_to_spec_ctrl(
 476                                        current_thread_info()->flags);
 477
 478                native_wrmsrl(MSR_IA32_SPEC_CTRL, spec_ctrl);
 479        }
 480        /* rmb not needed when disabling IBRS */
 481}
 482
 483/*
 484 * These functions are called before calling into firmware.  Firmware might
 485 * have indirect branches, so if we're running with retpolines, we need to
 486 * enable IBRS to protect the kernel from spectre v2.
 487 *
 488 * The 'ibrs_on' variable is used to prevent race conditions.  Otherwise, if
 489 * the admin disabled IBRS while a CPU was running in firmware, IBRS could get
 490 * stuck on indefinitely.
 491 *
 492 * There are still other race conditions possible, but they're generally not a
 493 * problem because they'll get corrected on the next kernel exit.
 494 */
 495static inline bool spec_ctrl_ibrs_on_firmware(void)
 496{
 497        bool ibrs_on = false;
 498
 499        if (cpu_has_spec_ctrl() && retp_enabled() && !ibrs_enabled_kernel()) {
 500                u64 spec_ctrl = this_cpu_read(spec_ctrl_pcp.entry64) |
 501                                SPEC_CTRL_IBRS;
 502
 503                native_wrmsrl(MSR_IA32_SPEC_CTRL, spec_ctrl);
 504                ibrs_on = true;
 505        } else {
 506                /* rmb to prevent wrong speculation for security */
 507                rmb();
 508        }
 509
 510        return ibrs_on;
 511}
 512
 513static inline void spec_ctrl_ibrs_off_firmware(bool ibrs_on)
 514{
 515        if (ibrs_on) {
 516                u64 spec_ctrl = this_cpu_read(spec_ctrl_pcp.entry64);
 517
 518                native_wrmsrl(MSR_IA32_SPEC_CTRL, spec_ctrl);
 519        } else {
 520                /* rmb to prevent wrong speculation for security */
 521                rmb();
 522        }
 523}
 524
 525static inline void __spec_ctrl_ibpb(void)
 526{
 527        native_wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
 528}
 529
 530static inline void spec_ctrl_ibpb(void)
 531{
 532        if (ibpb_enabled())
 533                __spec_ctrl_ibpb();
 534}
 535
 536static inline void spec_ctrl_ibpb_if_different_creds(struct task_struct *next)
 537{
 538        if (ibpb_enabled() &&
 539            (!next || __ptrace_may_access(next, PTRACE_MODE_IBPB))) {
 540                __spec_ctrl_ibpb();
 541
 542                if (static_cpu_has(X86_FEATURE_SMEP))
 543                        fill_RSB();
 544        }
 545}
 546
 547extern enum ssb_mitigation ssb_mode;
 548
 549#endif /* __ASSEMBLY__ */
 550#endif /* _ASM_X86_SPEC_CTRL_H */
 551