linux/arch/x86/kernel/cpu/bugs.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  Copyright (C) 1994  Linus Torvalds
   4 *
   5 *  Cyrix stuff, June 1998 by:
   6 *      - Rafael R. Reilova (moved everything from head.S),
   7 *        <rreilova@ececs.uc.edu>
   8 *      - Channing Corn (tests & fixes),
   9 *      - Andrew D. Balsa (code cleanup).
  10 */
  11#include <linux/init.h>
  12#include <linux/utsname.h>
  13#include <linux/cpu.h>
  14#include <linux/module.h>
  15#include <linux/nospec.h>
  16#include <linux/prctl.h>
  17#include <linux/sched/smt.h>
  18#include <linux/pgtable.h>
  19
  20#include <asm/spec-ctrl.h>
  21#include <asm/cmdline.h>
  22#include <asm/bugs.h>
  23#include <asm/processor.h>
  24#include <asm/processor-flags.h>
  25#include <asm/fpu/internal.h>
  26#include <asm/msr.h>
  27#include <asm/vmx.h>
  28#include <asm/paravirt.h>
  29#include <asm/alternative.h>
  30#include <asm/set_memory.h>
  31#include <asm/intel-family.h>
  32#include <asm/e820/api.h>
  33#include <asm/hypervisor.h>
  34#include <asm/tlbflush.h>
  35
  36#include "cpu.h"
  37
  38static void __init spectre_v1_select_mitigation(void);
  39static void __init spectre_v2_select_mitigation(void);
  40static void __init ssb_select_mitigation(void);
  41static void __init l1tf_select_mitigation(void);
  42static void __init mds_select_mitigation(void);
  43static void __init mds_print_mitigation(void);
  44static void __init taa_select_mitigation(void);
  45static void __init srbds_select_mitigation(void);
  46static void __init l1d_flush_select_mitigation(void);
  47
  48/* The base value of the SPEC_CTRL MSR that always has to be preserved. */
  49u64 x86_spec_ctrl_base;
  50EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
  51static DEFINE_MUTEX(spec_ctrl_mutex);
  52
  53/*
  54 * The vendor and possibly platform specific bits which can be modified in
  55 * x86_spec_ctrl_base.
  56 */
  57static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
  58
  59/*
  60 * AMD specific MSR info for Speculative Store Bypass control.
  61 * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
  62 */
  63u64 __ro_after_init x86_amd_ls_cfg_base;
  64u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
  65
  66/* Control conditional STIBP in switch_to() */
  67DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp);
  68/* Control conditional IBPB in switch_mm() */
  69DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
  70/* Control unconditional IBPB in switch_mm() */
  71DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
  72
  73/* Control MDS CPU buffer clear before returning to user space */
  74DEFINE_STATIC_KEY_FALSE(mds_user_clear);
  75EXPORT_SYMBOL_GPL(mds_user_clear);
  76/* Control MDS CPU buffer clear before idling (halt, mwait) */
  77DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
  78EXPORT_SYMBOL_GPL(mds_idle_clear);
  79
  80/*
  81 * Controls whether l1d flush based mitigations are enabled,
  82 * based on hw features and admin setting via boot parameter
  83 * defaults to false
  84 */
  85DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
  86
  87void __init check_bugs(void)
  88{
  89        identify_boot_cpu();
  90
  91        /*
  92         * identify_boot_cpu() initialized SMT support information, let the
  93         * core code know.
  94         */
  95        cpu_smt_check_topology();
  96
  97        if (!IS_ENABLED(CONFIG_SMP)) {
  98                pr_info("CPU: ");
  99                print_cpu_info(&boot_cpu_data);
 100        }
 101
 102        /*
 103         * Read the SPEC_CTRL MSR to account for reserved bits which may
 104         * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
 105         * init code as it is not enumerated and depends on the family.
 106         */
 107        if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
 108                rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
 109
 110        /* Allow STIBP in MSR_SPEC_CTRL if supported */
 111        if (boot_cpu_has(X86_FEATURE_STIBP))
 112                x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
 113
 114        /* Select the proper CPU mitigations before patching alternatives: */
 115        spectre_v1_select_mitigation();
 116        spectre_v2_select_mitigation();
 117        ssb_select_mitigation();
 118        l1tf_select_mitigation();
 119        mds_select_mitigation();
 120        taa_select_mitigation();
 121        srbds_select_mitigation();
 122        l1d_flush_select_mitigation();
 123
 124        /*
 125         * As MDS and TAA mitigations are inter-related, print MDS
 126         * mitigation until after TAA mitigation selection is done.
 127         */
 128        mds_print_mitigation();
 129
 130        arch_smt_update();
 131
 132#ifdef CONFIG_X86_32
 133        /*
 134         * Check whether we are able to run this kernel safely on SMP.
 135         *
 136         * - i386 is no longer supported.
 137         * - In order to run on anything without a TSC, we need to be
 138         *   compiled for a i486.
 139         */
 140        if (boot_cpu_data.x86 < 4)
 141                panic("Kernel requires i486+ for 'invlpg' and other features");
 142
 143        init_utsname()->machine[1] =
 144                '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
 145        alternative_instructions();
 146
 147        fpu__init_check_bugs();
 148#else /* CONFIG_X86_64 */
 149        alternative_instructions();
 150
 151        /*
 152         * Make sure the first 2MB area is not mapped by huge pages
 153         * There are typically fixed size MTRRs in there and overlapping
 154         * MTRRs into large pages causes slow downs.
 155         *
 156         * Right now we don't do that with gbpages because there seems
 157         * very little benefit for that case.
 158         */
 159        if (!direct_gbpages)
 160                set_memory_4k((unsigned long)__va(0), 1);
 161#endif
 162}
 163
 164void
 165x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
 166{
 167        u64 msrval, guestval, hostval = x86_spec_ctrl_base;
 168        struct thread_info *ti = current_thread_info();
 169
 170        /* Is MSR_SPEC_CTRL implemented ? */
 171        if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
 172                /*
 173                 * Restrict guest_spec_ctrl to supported values. Clear the
 174                 * modifiable bits in the host base value and or the
 175                 * modifiable bits from the guest value.
 176                 */
 177                guestval = hostval & ~x86_spec_ctrl_mask;
 178                guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
 179
 180                /* SSBD controlled in MSR_SPEC_CTRL */
 181                if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
 182                    static_cpu_has(X86_FEATURE_AMD_SSBD))
 183                        hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
 184
 185                /* Conditional STIBP enabled? */
 186                if (static_branch_unlikely(&switch_to_cond_stibp))
 187                        hostval |= stibp_tif_to_spec_ctrl(ti->flags);
 188
 189                if (hostval != guestval) {
 190                        msrval = setguest ? guestval : hostval;
 191                        wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
 192                }
 193        }
 194
 195        /*
 196         * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
 197         * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
 198         */
 199        if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
 200            !static_cpu_has(X86_FEATURE_VIRT_SSBD))
 201                return;
 202
 203        /*
 204         * If the host has SSBD mitigation enabled, force it in the host's
 205         * virtual MSR value. If its not permanently enabled, evaluate
 206         * current's TIF_SSBD thread flag.
 207         */
 208        if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
 209                hostval = SPEC_CTRL_SSBD;
 210        else
 211                hostval = ssbd_tif_to_spec_ctrl(ti->flags);
 212
 213        /* Sanitize the guest value */
 214        guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
 215
 216        if (hostval != guestval) {
 217                unsigned long tif;
 218
 219                tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
 220                                 ssbd_spec_ctrl_to_tif(hostval);
 221
 222                speculation_ctrl_update(tif);
 223        }
 224}
 225EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
 226
 227static void x86_amd_ssb_disable(void)
 228{
 229        u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
 230
 231        if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
 232                wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
 233        else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
 234                wrmsrl(MSR_AMD64_LS_CFG, msrval);
 235}
 236
 237#undef pr_fmt
 238#define pr_fmt(fmt)     "MDS: " fmt
 239
 240/* Default mitigation for MDS-affected CPUs */
 241static enum mds_mitigations mds_mitigation __ro_after_init = MDS_MITIGATION_FULL;
 242static bool mds_nosmt __ro_after_init = false;
 243
 244static const char * const mds_strings[] = {
 245        [MDS_MITIGATION_OFF]    = "Vulnerable",
 246        [MDS_MITIGATION_FULL]   = "Mitigation: Clear CPU buffers",
 247        [MDS_MITIGATION_VMWERV] = "Vulnerable: Clear CPU buffers attempted, no microcode",
 248};
 249
 250static void __init mds_select_mitigation(void)
 251{
 252        if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) {
 253                mds_mitigation = MDS_MITIGATION_OFF;
 254                return;
 255        }
 256
 257        if (mds_mitigation == MDS_MITIGATION_FULL) {
 258                if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
 259                        mds_mitigation = MDS_MITIGATION_VMWERV;
 260
 261                static_branch_enable(&mds_user_clear);
 262
 263                if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) &&
 264                    (mds_nosmt || cpu_mitigations_auto_nosmt()))
 265                        cpu_smt_disable(false);
 266        }
 267}
 268
 269static void __init mds_print_mitigation(void)
 270{
 271        if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off())
 272                return;
 273
 274        pr_info("%s\n", mds_strings[mds_mitigation]);
 275}
 276
 277static int __init mds_cmdline(char *str)
 278{
 279        if (!boot_cpu_has_bug(X86_BUG_MDS))
 280                return 0;
 281
 282        if (!str)
 283                return -EINVAL;
 284
 285        if (!strcmp(str, "off"))
 286                mds_mitigation = MDS_MITIGATION_OFF;
 287        else if (!strcmp(str, "full"))
 288                mds_mitigation = MDS_MITIGATION_FULL;
 289        else if (!strcmp(str, "full,nosmt")) {
 290                mds_mitigation = MDS_MITIGATION_FULL;
 291                mds_nosmt = true;
 292        }
 293
 294        return 0;
 295}
 296early_param("mds", mds_cmdline);
 297
 298#undef pr_fmt
 299#define pr_fmt(fmt)     "TAA: " fmt
 300
 301enum taa_mitigations {
 302        TAA_MITIGATION_OFF,
 303        TAA_MITIGATION_UCODE_NEEDED,
 304        TAA_MITIGATION_VERW,
 305        TAA_MITIGATION_TSX_DISABLED,
 306};
 307
 308/* Default mitigation for TAA-affected CPUs */
 309static enum taa_mitigations taa_mitigation __ro_after_init = TAA_MITIGATION_VERW;
 310static bool taa_nosmt __ro_after_init;
 311
 312static const char * const taa_strings[] = {
 313        [TAA_MITIGATION_OFF]            = "Vulnerable",
 314        [TAA_MITIGATION_UCODE_NEEDED]   = "Vulnerable: Clear CPU buffers attempted, no microcode",
 315        [TAA_MITIGATION_VERW]           = "Mitigation: Clear CPU buffers",
 316        [TAA_MITIGATION_TSX_DISABLED]   = "Mitigation: TSX disabled",
 317};
 318
 319static void __init taa_select_mitigation(void)
 320{
 321        u64 ia32_cap;
 322
 323        if (!boot_cpu_has_bug(X86_BUG_TAA)) {
 324                taa_mitigation = TAA_MITIGATION_OFF;
 325                return;
 326        }
 327
 328        /* TSX previously disabled by tsx=off */
 329        if (!boot_cpu_has(X86_FEATURE_RTM)) {
 330                taa_mitigation = TAA_MITIGATION_TSX_DISABLED;
 331                goto out;
 332        }
 333
 334        if (cpu_mitigations_off()) {
 335                taa_mitigation = TAA_MITIGATION_OFF;
 336                return;
 337        }
 338
 339        /*
 340         * TAA mitigation via VERW is turned off if both
 341         * tsx_async_abort=off and mds=off are specified.
 342         */
 343        if (taa_mitigation == TAA_MITIGATION_OFF &&
 344            mds_mitigation == MDS_MITIGATION_OFF)
 345                goto out;
 346
 347        if (boot_cpu_has(X86_FEATURE_MD_CLEAR))
 348                taa_mitigation = TAA_MITIGATION_VERW;
 349        else
 350                taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
 351
 352        /*
 353         * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1.
 354         * A microcode update fixes this behavior to clear CPU buffers. It also
 355         * adds support for MSR_IA32_TSX_CTRL which is enumerated by the
 356         * ARCH_CAP_TSX_CTRL_MSR bit.
 357         *
 358         * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
 359         * update is required.
 360         */
 361        ia32_cap = x86_read_arch_cap_msr();
 362        if ( (ia32_cap & ARCH_CAP_MDS_NO) &&
 363            !(ia32_cap & ARCH_CAP_TSX_CTRL_MSR))
 364                taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
 365
 366        /*
 367         * TSX is enabled, select alternate mitigation for TAA which is
 368         * the same as MDS. Enable MDS static branch to clear CPU buffers.
 369         *
 370         * For guests that can't determine whether the correct microcode is
 371         * present on host, enable the mitigation for UCODE_NEEDED as well.
 372         */
 373        static_branch_enable(&mds_user_clear);
 374
 375        if (taa_nosmt || cpu_mitigations_auto_nosmt())
 376                cpu_smt_disable(false);
 377
 378        /*
 379         * Update MDS mitigation, if necessary, as the mds_user_clear is
 380         * now enabled for TAA mitigation.
 381         */
 382        if (mds_mitigation == MDS_MITIGATION_OFF &&
 383            boot_cpu_has_bug(X86_BUG_MDS)) {
 384                mds_mitigation = MDS_MITIGATION_FULL;
 385                mds_select_mitigation();
 386        }
 387out:
 388        pr_info("%s\n", taa_strings[taa_mitigation]);
 389}
 390
 391static int __init tsx_async_abort_parse_cmdline(char *str)
 392{
 393        if (!boot_cpu_has_bug(X86_BUG_TAA))
 394                return 0;
 395
 396        if (!str)
 397                return -EINVAL;
 398
 399        if (!strcmp(str, "off")) {
 400                taa_mitigation = TAA_MITIGATION_OFF;
 401        } else if (!strcmp(str, "full")) {
 402                taa_mitigation = TAA_MITIGATION_VERW;
 403        } else if (!strcmp(str, "full,nosmt")) {
 404                taa_mitigation = TAA_MITIGATION_VERW;
 405                taa_nosmt = true;
 406        }
 407
 408        return 0;
 409}
 410early_param("tsx_async_abort", tsx_async_abort_parse_cmdline);
 411
 412#undef pr_fmt
 413#define pr_fmt(fmt)     "SRBDS: " fmt
 414
 415enum srbds_mitigations {
 416        SRBDS_MITIGATION_OFF,
 417        SRBDS_MITIGATION_UCODE_NEEDED,
 418        SRBDS_MITIGATION_FULL,
 419        SRBDS_MITIGATION_TSX_OFF,
 420        SRBDS_MITIGATION_HYPERVISOR,
 421};
 422
 423static enum srbds_mitigations srbds_mitigation __ro_after_init = SRBDS_MITIGATION_FULL;
 424
 425static const char * const srbds_strings[] = {
 426        [SRBDS_MITIGATION_OFF]          = "Vulnerable",
 427        [SRBDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
 428        [SRBDS_MITIGATION_FULL]         = "Mitigation: Microcode",
 429        [SRBDS_MITIGATION_TSX_OFF]      = "Mitigation: TSX disabled",
 430        [SRBDS_MITIGATION_HYPERVISOR]   = "Unknown: Dependent on hypervisor status",
 431};
 432
 433static bool srbds_off;
 434
 435void update_srbds_msr(void)
 436{
 437        u64 mcu_ctrl;
 438
 439        if (!boot_cpu_has_bug(X86_BUG_SRBDS))
 440                return;
 441
 442        if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
 443                return;
 444
 445        if (srbds_mitigation == SRBDS_MITIGATION_UCODE_NEEDED)
 446                return;
 447
 448        rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
 449
 450        switch (srbds_mitigation) {
 451        case SRBDS_MITIGATION_OFF:
 452        case SRBDS_MITIGATION_TSX_OFF:
 453                mcu_ctrl |= RNGDS_MITG_DIS;
 454                break;
 455        case SRBDS_MITIGATION_FULL:
 456                mcu_ctrl &= ~RNGDS_MITG_DIS;
 457                break;
 458        default:
 459                break;
 460        }
 461
 462        wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
 463}
 464
 465static void __init srbds_select_mitigation(void)
 466{
 467        u64 ia32_cap;
 468
 469        if (!boot_cpu_has_bug(X86_BUG_SRBDS))
 470                return;
 471
 472        /*
 473         * Check to see if this is one of the MDS_NO systems supporting
 474         * TSX that are only exposed to SRBDS when TSX is enabled.
 475         */
 476        ia32_cap = x86_read_arch_cap_msr();
 477        if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM))
 478                srbds_mitigation = SRBDS_MITIGATION_TSX_OFF;
 479        else if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
 480                srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR;
 481        else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL))
 482                srbds_mitigation = SRBDS_MITIGATION_UCODE_NEEDED;
 483        else if (cpu_mitigations_off() || srbds_off)
 484                srbds_mitigation = SRBDS_MITIGATION_OFF;
 485
 486        update_srbds_msr();
 487        pr_info("%s\n", srbds_strings[srbds_mitigation]);
 488}
 489
 490static int __init srbds_parse_cmdline(char *str)
 491{
 492        if (!str)
 493                return -EINVAL;
 494
 495        if (!boot_cpu_has_bug(X86_BUG_SRBDS))
 496                return 0;
 497
 498        srbds_off = !strcmp(str, "off");
 499        return 0;
 500}
 501early_param("srbds", srbds_parse_cmdline);
 502
 503#undef pr_fmt
 504#define pr_fmt(fmt)     "L1D Flush : " fmt
 505
 506enum l1d_flush_mitigations {
 507        L1D_FLUSH_OFF = 0,
 508        L1D_FLUSH_ON,
 509};
 510
 511static enum l1d_flush_mitigations l1d_flush_mitigation __initdata = L1D_FLUSH_OFF;
 512
 513static void __init l1d_flush_select_mitigation(void)
 514{
 515        if (!l1d_flush_mitigation || !boot_cpu_has(X86_FEATURE_FLUSH_L1D))
 516                return;
 517
 518        static_branch_enable(&switch_mm_cond_l1d_flush);
 519        pr_info("Conditional flush on switch_mm() enabled\n");
 520}
 521
 522static int __init l1d_flush_parse_cmdline(char *str)
 523{
 524        if (!strcmp(str, "on"))
 525                l1d_flush_mitigation = L1D_FLUSH_ON;
 526
 527        return 0;
 528}
 529early_param("l1d_flush", l1d_flush_parse_cmdline);
 530
 531#undef pr_fmt
 532#define pr_fmt(fmt)     "Spectre V1 : " fmt
 533
 534enum spectre_v1_mitigation {
 535        SPECTRE_V1_MITIGATION_NONE,
 536        SPECTRE_V1_MITIGATION_AUTO,
 537};
 538
 539static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init =
 540        SPECTRE_V1_MITIGATION_AUTO;
 541
 542static const char * const spectre_v1_strings[] = {
 543        [SPECTRE_V1_MITIGATION_NONE] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers",
 544        [SPECTRE_V1_MITIGATION_AUTO] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization",
 545};
 546
 547/*
 548 * Does SMAP provide full mitigation against speculative kernel access to
 549 * userspace?
 550 */
 551static bool smap_works_speculatively(void)
 552{
 553        if (!boot_cpu_has(X86_FEATURE_SMAP))
 554                return false;
 555
 556        /*
 557         * On CPUs which are vulnerable to Meltdown, SMAP does not
 558         * prevent speculative access to user data in the L1 cache.
 559         * Consider SMAP to be non-functional as a mitigation on these
 560         * CPUs.
 561         */
 562        if (boot_cpu_has(X86_BUG_CPU_MELTDOWN))
 563                return false;
 564
 565        return true;
 566}
 567
 568static void __init spectre_v1_select_mitigation(void)
 569{
 570        if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1) || cpu_mitigations_off()) {
 571                spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
 572                return;
 573        }
 574
 575        if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) {
 576                /*
 577                 * With Spectre v1, a user can speculatively control either
 578                 * path of a conditional swapgs with a user-controlled GS
 579                 * value.  The mitigation is to add lfences to both code paths.
 580                 *
 581                 * If FSGSBASE is enabled, the user can put a kernel address in
 582                 * GS, in which case SMAP provides no protection.
 583                 *
 584                 * If FSGSBASE is disabled, the user can only put a user space
 585                 * address in GS.  That makes an attack harder, but still
 586                 * possible if there's no SMAP protection.
 587                 */
 588                if (boot_cpu_has(X86_FEATURE_FSGSBASE) ||
 589                    !smap_works_speculatively()) {
 590                        /*
 591                         * Mitigation can be provided from SWAPGS itself or
 592                         * PTI as the CR3 write in the Meltdown mitigation
 593                         * is serializing.
 594                         *
 595                         * If neither is there, mitigate with an LFENCE to
 596                         * stop speculation through swapgs.
 597                         */
 598                        if (boot_cpu_has_bug(X86_BUG_SWAPGS) &&
 599                            !boot_cpu_has(X86_FEATURE_PTI))
 600                                setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER);
 601
 602                        /*
 603                         * Enable lfences in the kernel entry (non-swapgs)
 604                         * paths, to prevent user entry from speculatively
 605                         * skipping swapgs.
 606                         */
 607                        setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL);
 608                }
 609        }
 610
 611        pr_info("%s\n", spectre_v1_strings[spectre_v1_mitigation]);
 612}
 613
 614static int __init nospectre_v1_cmdline(char *str)
 615{
 616        spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
 617        return 0;
 618}
 619early_param("nospectre_v1", nospectre_v1_cmdline);
 620
 621#undef pr_fmt
 622#define pr_fmt(fmt)     "Spectre V2 : " fmt
 623
 624static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
 625        SPECTRE_V2_NONE;
 626
 627static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init =
 628        SPECTRE_V2_USER_NONE;
 629static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init =
 630        SPECTRE_V2_USER_NONE;
 631
 632#ifdef CONFIG_RETPOLINE
 633static bool spectre_v2_bad_module;
 634
 635bool retpoline_module_ok(bool has_retpoline)
 636{
 637        if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
 638                return true;
 639
 640        pr_err("System may be vulnerable to spectre v2\n");
 641        spectre_v2_bad_module = true;
 642        return false;
 643}
 644
 645static inline const char *spectre_v2_module_string(void)
 646{
 647        return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
 648}
 649#else
 650static inline const char *spectre_v2_module_string(void) { return ""; }
 651#endif
 652
 653static inline bool match_option(const char *arg, int arglen, const char *opt)
 654{
 655        int len = strlen(opt);
 656
 657        return len == arglen && !strncmp(arg, opt, len);
 658}
 659
 660/* The kernel command line selection for spectre v2 */
 661enum spectre_v2_mitigation_cmd {
 662        SPECTRE_V2_CMD_NONE,
 663        SPECTRE_V2_CMD_AUTO,
 664        SPECTRE_V2_CMD_FORCE,
 665        SPECTRE_V2_CMD_RETPOLINE,
 666        SPECTRE_V2_CMD_RETPOLINE_GENERIC,
 667        SPECTRE_V2_CMD_RETPOLINE_AMD,
 668};
 669
 670enum spectre_v2_user_cmd {
 671        SPECTRE_V2_USER_CMD_NONE,
 672        SPECTRE_V2_USER_CMD_AUTO,
 673        SPECTRE_V2_USER_CMD_FORCE,
 674        SPECTRE_V2_USER_CMD_PRCTL,
 675        SPECTRE_V2_USER_CMD_PRCTL_IBPB,
 676        SPECTRE_V2_USER_CMD_SECCOMP,
 677        SPECTRE_V2_USER_CMD_SECCOMP_IBPB,
 678};
 679
 680static const char * const spectre_v2_user_strings[] = {
 681        [SPECTRE_V2_USER_NONE]                  = "User space: Vulnerable",
 682        [SPECTRE_V2_USER_STRICT]                = "User space: Mitigation: STIBP protection",
 683        [SPECTRE_V2_USER_STRICT_PREFERRED]      = "User space: Mitigation: STIBP always-on protection",
 684        [SPECTRE_V2_USER_PRCTL]                 = "User space: Mitigation: STIBP via prctl",
 685        [SPECTRE_V2_USER_SECCOMP]               = "User space: Mitigation: STIBP via seccomp and prctl",
 686};
 687
 688static const struct {
 689        const char                      *option;
 690        enum spectre_v2_user_cmd        cmd;
 691        bool                            secure;
 692} v2_user_options[] __initconst = {
 693        { "auto",               SPECTRE_V2_USER_CMD_AUTO,               false },
 694        { "off",                SPECTRE_V2_USER_CMD_NONE,               false },
 695        { "on",                 SPECTRE_V2_USER_CMD_FORCE,              true  },
 696        { "prctl",              SPECTRE_V2_USER_CMD_PRCTL,              false },
 697        { "prctl,ibpb",         SPECTRE_V2_USER_CMD_PRCTL_IBPB,         false },
 698        { "seccomp",            SPECTRE_V2_USER_CMD_SECCOMP,            false },
 699        { "seccomp,ibpb",       SPECTRE_V2_USER_CMD_SECCOMP_IBPB,       false },
 700};
 701
 702static void __init spec_v2_user_print_cond(const char *reason, bool secure)
 703{
 704        if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
 705                pr_info("spectre_v2_user=%s forced on command line.\n", reason);
 706}
 707
 708static enum spectre_v2_user_cmd __init
 709spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd)
 710{
 711        char arg[20];
 712        int ret, i;
 713
 714        switch (v2_cmd) {
 715        case SPECTRE_V2_CMD_NONE:
 716                return SPECTRE_V2_USER_CMD_NONE;
 717        case SPECTRE_V2_CMD_FORCE:
 718                return SPECTRE_V2_USER_CMD_FORCE;
 719        default:
 720                break;
 721        }
 722
 723        ret = cmdline_find_option(boot_command_line, "spectre_v2_user",
 724                                  arg, sizeof(arg));
 725        if (ret < 0)
 726                return SPECTRE_V2_USER_CMD_AUTO;
 727
 728        for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) {
 729                if (match_option(arg, ret, v2_user_options[i].option)) {
 730                        spec_v2_user_print_cond(v2_user_options[i].option,
 731                                                v2_user_options[i].secure);
 732                        return v2_user_options[i].cmd;
 733                }
 734        }
 735
 736        pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg);
 737        return SPECTRE_V2_USER_CMD_AUTO;
 738}
 739
 740static void __init
 741spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
 742{
 743        enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE;
 744        bool smt_possible = IS_ENABLED(CONFIG_SMP);
 745        enum spectre_v2_user_cmd cmd;
 746
 747        if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
 748                return;
 749
 750        if (cpu_smt_control == CPU_SMT_FORCE_DISABLED ||
 751            cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
 752                smt_possible = false;
 753
 754        cmd = spectre_v2_parse_user_cmdline(v2_cmd);
 755        switch (cmd) {
 756        case SPECTRE_V2_USER_CMD_NONE:
 757                goto set_mode;
 758        case SPECTRE_V2_USER_CMD_FORCE:
 759                mode = SPECTRE_V2_USER_STRICT;
 760                break;
 761        case SPECTRE_V2_USER_CMD_PRCTL:
 762        case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
 763                mode = SPECTRE_V2_USER_PRCTL;
 764                break;
 765        case SPECTRE_V2_USER_CMD_AUTO:
 766        case SPECTRE_V2_USER_CMD_SECCOMP:
 767        case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
 768                if (IS_ENABLED(CONFIG_SECCOMP))
 769                        mode = SPECTRE_V2_USER_SECCOMP;
 770                else
 771                        mode = SPECTRE_V2_USER_PRCTL;
 772                break;
 773        }
 774
 775        /* Initialize Indirect Branch Prediction Barrier */
 776        if (boot_cpu_has(X86_FEATURE_IBPB)) {
 777                setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
 778
 779                spectre_v2_user_ibpb = mode;
 780                switch (cmd) {
 781                case SPECTRE_V2_USER_CMD_FORCE:
 782                case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
 783                case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
 784                        static_branch_enable(&switch_mm_always_ibpb);
 785                        spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
 786                        break;
 787                case SPECTRE_V2_USER_CMD_PRCTL:
 788                case SPECTRE_V2_USER_CMD_AUTO:
 789                case SPECTRE_V2_USER_CMD_SECCOMP:
 790                        static_branch_enable(&switch_mm_cond_ibpb);
 791                        break;
 792                default:
 793                        break;
 794                }
 795
 796                pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
 797                        static_key_enabled(&switch_mm_always_ibpb) ?
 798                        "always-on" : "conditional");
 799        }
 800
 801        /*
 802         * If no STIBP, enhanced IBRS is enabled or SMT impossible, STIBP is not
 803         * required.
 804         */
 805        if (!boot_cpu_has(X86_FEATURE_STIBP) ||
 806            !smt_possible ||
 807            spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
 808                return;
 809
 810        /*
 811         * At this point, an STIBP mode other than "off" has been set.
 812         * If STIBP support is not being forced, check if STIBP always-on
 813         * is preferred.
 814         */
 815        if (mode != SPECTRE_V2_USER_STRICT &&
 816            boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
 817                mode = SPECTRE_V2_USER_STRICT_PREFERRED;
 818
 819        spectre_v2_user_stibp = mode;
 820
 821set_mode:
 822        pr_info("%s\n", spectre_v2_user_strings[mode]);
 823}
 824
 825static const char * const spectre_v2_strings[] = {
 826        [SPECTRE_V2_NONE]                       = "Vulnerable",
 827        [SPECTRE_V2_RETPOLINE_GENERIC]          = "Mitigation: Full generic retpoline",
 828        [SPECTRE_V2_RETPOLINE_AMD]              = "Mitigation: Full AMD retpoline",
 829        [SPECTRE_V2_IBRS_ENHANCED]              = "Mitigation: Enhanced IBRS",
 830};
 831
 832static const struct {
 833        const char *option;
 834        enum spectre_v2_mitigation_cmd cmd;
 835        bool secure;
 836} mitigation_options[] __initconst = {
 837        { "off",                SPECTRE_V2_CMD_NONE,              false },
 838        { "on",                 SPECTRE_V2_CMD_FORCE,             true  },
 839        { "retpoline",          SPECTRE_V2_CMD_RETPOLINE,         false },
 840        { "retpoline,amd",      SPECTRE_V2_CMD_RETPOLINE_AMD,     false },
 841        { "retpoline,generic",  SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
 842        { "auto",               SPECTRE_V2_CMD_AUTO,              false },
 843};
 844
 845static void __init spec_v2_print_cond(const char *reason, bool secure)
 846{
 847        if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
 848                pr_info("%s selected on command line.\n", reason);
 849}
 850
 851static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
 852{
 853        enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
 854        char arg[20];
 855        int ret, i;
 856
 857        if (cmdline_find_option_bool(boot_command_line, "nospectre_v2") ||
 858            cpu_mitigations_off())
 859                return SPECTRE_V2_CMD_NONE;
 860
 861        ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
 862        if (ret < 0)
 863                return SPECTRE_V2_CMD_AUTO;
 864
 865        for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
 866                if (!match_option(arg, ret, mitigation_options[i].option))
 867                        continue;
 868                cmd = mitigation_options[i].cmd;
 869                break;
 870        }
 871
 872        if (i >= ARRAY_SIZE(mitigation_options)) {
 873                pr_err("unknown option (%s). Switching to AUTO select\n", arg);
 874                return SPECTRE_V2_CMD_AUTO;
 875        }
 876
 877        if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
 878             cmd == SPECTRE_V2_CMD_RETPOLINE_AMD ||
 879             cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) &&
 880            !IS_ENABLED(CONFIG_RETPOLINE)) {
 881                pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option);
 882                return SPECTRE_V2_CMD_AUTO;
 883        }
 884
 885        if (cmd == SPECTRE_V2_CMD_RETPOLINE_AMD &&
 886            boot_cpu_data.x86_vendor != X86_VENDOR_HYGON &&
 887            boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
 888                pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
 889                return SPECTRE_V2_CMD_AUTO;
 890        }
 891
 892        spec_v2_print_cond(mitigation_options[i].option,
 893                           mitigation_options[i].secure);
 894        return cmd;
 895}
 896
 897static void __init spectre_v2_select_mitigation(void)
 898{
 899        enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
 900        enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
 901
 902        /*
 903         * If the CPU is not affected and the command line mode is NONE or AUTO
 904         * then nothing to do.
 905         */
 906        if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
 907            (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
 908                return;
 909
 910        switch (cmd) {
 911        case SPECTRE_V2_CMD_NONE:
 912                return;
 913
 914        case SPECTRE_V2_CMD_FORCE:
 915        case SPECTRE_V2_CMD_AUTO:
 916                if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
 917                        mode = SPECTRE_V2_IBRS_ENHANCED;
 918                        /* Force it so VMEXIT will restore correctly */
 919                        x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
 920                        wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
 921                        goto specv2_set_mode;
 922                }
 923                if (IS_ENABLED(CONFIG_RETPOLINE))
 924                        goto retpoline_auto;
 925                break;
 926        case SPECTRE_V2_CMD_RETPOLINE_AMD:
 927                if (IS_ENABLED(CONFIG_RETPOLINE))
 928                        goto retpoline_amd;
 929                break;
 930        case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
 931                if (IS_ENABLED(CONFIG_RETPOLINE))
 932                        goto retpoline_generic;
 933                break;
 934        case SPECTRE_V2_CMD_RETPOLINE:
 935                if (IS_ENABLED(CONFIG_RETPOLINE))
 936                        goto retpoline_auto;
 937                break;
 938        }
 939        pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!");
 940        return;
 941
 942retpoline_auto:
 943        if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
 944            boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
 945        retpoline_amd:
 946                if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
 947                        pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
 948                        goto retpoline_generic;
 949                }
 950                mode = SPECTRE_V2_RETPOLINE_AMD;
 951                setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
 952                setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
 953        } else {
 954        retpoline_generic:
 955                mode = SPECTRE_V2_RETPOLINE_GENERIC;
 956                setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
 957        }
 958
 959specv2_set_mode:
 960        spectre_v2_enabled = mode;
 961        pr_info("%s\n", spectre_v2_strings[mode]);
 962
 963        /*
 964         * If spectre v2 protection has been enabled, unconditionally fill
 965         * RSB during a context switch; this protects against two independent
 966         * issues:
 967         *
 968         *      - RSB underflow (and switch to BTB) on Skylake+
 969         *      - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
 970         */
 971        setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
 972        pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
 973
 974        /*
 975         * Retpoline means the kernel is safe because it has no indirect
 976         * branches. Enhanced IBRS protects firmware too, so, enable restricted
 977         * speculation around firmware calls only when Enhanced IBRS isn't
 978         * supported.
 979         *
 980         * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
 981         * the user might select retpoline on the kernel command line and if
 982         * the CPU supports Enhanced IBRS, kernel might un-intentionally not
 983         * enable IBRS around firmware calls.
 984         */
 985        if (boot_cpu_has(X86_FEATURE_IBRS) && mode != SPECTRE_V2_IBRS_ENHANCED) {
 986                setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
 987                pr_info("Enabling Restricted Speculation for firmware calls\n");
 988        }
 989
 990        /* Set up IBPB and STIBP depending on the general spectre V2 command */
 991        spectre_v2_user_select_mitigation(cmd);
 992}
 993
 994static void update_stibp_msr(void * __unused)
 995{
 996        wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
 997}
 998
 999/* Update x86_spec_ctrl_base in case SMT state changed. */
1000static void update_stibp_strict(void)
1001{
1002        u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
1003
1004        if (sched_smt_active())
1005                mask |= SPEC_CTRL_STIBP;
1006
1007        if (mask == x86_spec_ctrl_base)
1008                return;
1009
1010        pr_info("Update user space SMT mitigation: STIBP %s\n",
1011                mask & SPEC_CTRL_STIBP ? "always-on" : "off");
1012        x86_spec_ctrl_base = mask;
1013        on_each_cpu(update_stibp_msr, NULL, 1);
1014}
1015
1016/* Update the static key controlling the evaluation of TIF_SPEC_IB */
1017static void update_indir_branch_cond(void)
1018{
1019        if (sched_smt_active())
1020                static_branch_enable(&switch_to_cond_stibp);
1021        else
1022                static_branch_disable(&switch_to_cond_stibp);
1023}
1024
1025#undef pr_fmt
1026#define pr_fmt(fmt) fmt
1027
1028/* Update the static key controlling the MDS CPU buffer clear in idle */
1029static void update_mds_branch_idle(void)
1030{
1031        /*
1032         * Enable the idle clearing if SMT is active on CPUs which are
1033         * affected only by MSBDS and not any other MDS variant.
1034         *
1035         * The other variants cannot be mitigated when SMT is enabled, so
1036         * clearing the buffers on idle just to prevent the Store Buffer
1037         * repartitioning leak would be a window dressing exercise.
1038         */
1039        if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY))
1040                return;
1041
1042        if (sched_smt_active())
1043                static_branch_enable(&mds_idle_clear);
1044        else
1045                static_branch_disable(&mds_idle_clear);
1046}
1047
1048#define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
1049#define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n"
1050
1051void cpu_bugs_smt_update(void)
1052{
1053        mutex_lock(&spec_ctrl_mutex);
1054
1055        switch (spectre_v2_user_stibp) {
1056        case SPECTRE_V2_USER_NONE:
1057                break;
1058        case SPECTRE_V2_USER_STRICT:
1059        case SPECTRE_V2_USER_STRICT_PREFERRED:
1060                update_stibp_strict();
1061                break;
1062        case SPECTRE_V2_USER_PRCTL:
1063        case SPECTRE_V2_USER_SECCOMP:
1064                update_indir_branch_cond();
1065                break;
1066        }
1067
1068        switch (mds_mitigation) {
1069        case MDS_MITIGATION_FULL:
1070        case MDS_MITIGATION_VMWERV:
1071                if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
1072                        pr_warn_once(MDS_MSG_SMT);
1073                update_mds_branch_idle();
1074                break;
1075        case MDS_MITIGATION_OFF:
1076                break;
1077        }
1078
1079        switch (taa_mitigation) {
1080        case TAA_MITIGATION_VERW:
1081        case TAA_MITIGATION_UCODE_NEEDED:
1082                if (sched_smt_active())
1083                        pr_warn_once(TAA_MSG_SMT);
1084                break;
1085        case TAA_MITIGATION_TSX_DISABLED:
1086        case TAA_MITIGATION_OFF:
1087                break;
1088        }
1089
1090        mutex_unlock(&spec_ctrl_mutex);
1091}
1092
1093#undef pr_fmt
1094#define pr_fmt(fmt)     "Speculative Store Bypass: " fmt
1095
1096static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE;
1097
1098/* The kernel command line selection */
1099enum ssb_mitigation_cmd {
1100        SPEC_STORE_BYPASS_CMD_NONE,
1101        SPEC_STORE_BYPASS_CMD_AUTO,
1102        SPEC_STORE_BYPASS_CMD_ON,
1103        SPEC_STORE_BYPASS_CMD_PRCTL,
1104        SPEC_STORE_BYPASS_CMD_SECCOMP,
1105};
1106
1107static const char * const ssb_strings[] = {
1108        [SPEC_STORE_BYPASS_NONE]        = "Vulnerable",
1109        [SPEC_STORE_BYPASS_DISABLE]     = "Mitigation: Speculative Store Bypass disabled",
1110        [SPEC_STORE_BYPASS_PRCTL]       = "Mitigation: Speculative Store Bypass disabled via prctl",
1111        [SPEC_STORE_BYPASS_SECCOMP]     = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
1112};
1113
1114static const struct {
1115        const char *option;
1116        enum ssb_mitigation_cmd cmd;
1117} ssb_mitigation_options[]  __initconst = {
1118        { "auto",       SPEC_STORE_BYPASS_CMD_AUTO },    /* Platform decides */
1119        { "on",         SPEC_STORE_BYPASS_CMD_ON },      /* Disable Speculative Store Bypass */
1120        { "off",        SPEC_STORE_BYPASS_CMD_NONE },    /* Don't touch Speculative Store Bypass */
1121        { "prctl",      SPEC_STORE_BYPASS_CMD_PRCTL },   /* Disable Speculative Store Bypass via prctl */
1122        { "seccomp",    SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
1123};
1124
1125static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
1126{
1127        enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
1128        char arg[20];
1129        int ret, i;
1130
1131        if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable") ||
1132            cpu_mitigations_off()) {
1133                return SPEC_STORE_BYPASS_CMD_NONE;
1134        } else {
1135                ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
1136                                          arg, sizeof(arg));
1137                if (ret < 0)
1138                        return SPEC_STORE_BYPASS_CMD_AUTO;
1139
1140                for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
1141                        if (!match_option(arg, ret, ssb_mitigation_options[i].option))
1142                                continue;
1143
1144                        cmd = ssb_mitigation_options[i].cmd;
1145                        break;
1146                }
1147
1148                if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
1149                        pr_err("unknown option (%s). Switching to AUTO select\n", arg);
1150                        return SPEC_STORE_BYPASS_CMD_AUTO;
1151                }
1152        }
1153
1154        return cmd;
1155}
1156
1157static enum ssb_mitigation __init __ssb_select_mitigation(void)
1158{
1159        enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
1160        enum ssb_mitigation_cmd cmd;
1161
1162        if (!boot_cpu_has(X86_FEATURE_SSBD))
1163                return mode;
1164
1165        cmd = ssb_parse_cmdline();
1166        if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
1167            (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
1168             cmd == SPEC_STORE_BYPASS_CMD_AUTO))
1169                return mode;
1170
1171        switch (cmd) {
1172        case SPEC_STORE_BYPASS_CMD_AUTO:
1173        case SPEC_STORE_BYPASS_CMD_SECCOMP:
1174                /*
1175                 * Choose prctl+seccomp as the default mode if seccomp is
1176                 * enabled.
1177                 */
1178                if (IS_ENABLED(CONFIG_SECCOMP))
1179                        mode = SPEC_STORE_BYPASS_SECCOMP;
1180                else
1181                        mode = SPEC_STORE_BYPASS_PRCTL;
1182                break;
1183        case SPEC_STORE_BYPASS_CMD_ON:
1184                mode = SPEC_STORE_BYPASS_DISABLE;
1185                break;
1186        case SPEC_STORE_BYPASS_CMD_PRCTL:
1187                mode = SPEC_STORE_BYPASS_PRCTL;
1188                break;
1189        case SPEC_STORE_BYPASS_CMD_NONE:
1190                break;
1191        }
1192
1193        /*
1194         * If SSBD is controlled by the SPEC_CTRL MSR, then set the proper
1195         * bit in the mask to allow guests to use the mitigation even in the
1196         * case where the host does not enable it.
1197         */
1198        if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
1199            static_cpu_has(X86_FEATURE_AMD_SSBD)) {
1200                x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
1201        }
1202
1203        /*
1204         * We have three CPU feature flags that are in play here:
1205         *  - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
1206         *  - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
1207         *  - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
1208         */
1209        if (mode == SPEC_STORE_BYPASS_DISABLE) {
1210                setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
1211                /*
1212                 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
1213                 * use a completely different MSR and bit dependent on family.
1214                 */
1215                if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
1216                    !static_cpu_has(X86_FEATURE_AMD_SSBD)) {
1217                        x86_amd_ssb_disable();
1218                } else {
1219                        x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
1220                        wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
1221                }
1222        }
1223
1224        return mode;
1225}
1226
1227static void ssb_select_mitigation(void)
1228{
1229        ssb_mode = __ssb_select_mitigation();
1230
1231        if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
1232                pr_info("%s\n", ssb_strings[ssb_mode]);
1233}
1234
1235#undef pr_fmt
1236#define pr_fmt(fmt)     "Speculation prctl: " fmt
1237
1238static void task_update_spec_tif(struct task_struct *tsk)
1239{
1240        /* Force the update of the real TIF bits */
1241        set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE);
1242
1243        /*
1244         * Immediately update the speculation control MSRs for the current
1245         * task, but for a non-current task delay setting the CPU
1246         * mitigation until it is scheduled next.
1247         *
1248         * This can only happen for SECCOMP mitigation. For PRCTL it's
1249         * always the current task.
1250         */
1251        if (tsk == current)
1252                speculation_ctrl_update_current();
1253}
1254
1255static int l1d_flush_prctl_set(struct task_struct *task, unsigned long ctrl)
1256{
1257
1258        if (!static_branch_unlikely(&switch_mm_cond_l1d_flush))
1259                return -EPERM;
1260
1261        switch (ctrl) {
1262        case PR_SPEC_ENABLE:
1263                set_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH);
1264                return 0;
1265        case PR_SPEC_DISABLE:
1266                clear_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH);
1267                return 0;
1268        default:
1269                return -ERANGE;
1270        }
1271}
1272
1273static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
1274{
1275        if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
1276            ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
1277                return -ENXIO;
1278
1279        switch (ctrl) {
1280        case PR_SPEC_ENABLE:
1281                /* If speculation is force disabled, enable is not allowed */
1282                if (task_spec_ssb_force_disable(task))
1283                        return -EPERM;
1284                task_clear_spec_ssb_disable(task);
1285                task_clear_spec_ssb_noexec(task);
1286                task_update_spec_tif(task);
1287                break;
1288        case PR_SPEC_DISABLE:
1289                task_set_spec_ssb_disable(task);
1290                task_clear_spec_ssb_noexec(task);
1291                task_update_spec_tif(task);
1292                break;
1293        case PR_SPEC_FORCE_DISABLE:
1294                task_set_spec_ssb_disable(task);
1295                task_set_spec_ssb_force_disable(task);
1296                task_clear_spec_ssb_noexec(task);
1297                task_update_spec_tif(task);
1298                break;
1299        case PR_SPEC_DISABLE_NOEXEC:
1300                if (task_spec_ssb_force_disable(task))
1301                        return -EPERM;
1302                task_set_spec_ssb_disable(task);
1303                task_set_spec_ssb_noexec(task);
1304                task_update_spec_tif(task);
1305                break;
1306        default:
1307                return -ERANGE;
1308        }
1309        return 0;
1310}
1311
1312static bool is_spec_ib_user_controlled(void)
1313{
1314        return spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL ||
1315                spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
1316                spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
1317                spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP;
1318}
1319
1320static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
1321{
1322        switch (ctrl) {
1323        case PR_SPEC_ENABLE:
1324                if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
1325                    spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
1326                        return 0;
1327
1328                /*
1329                 * With strict mode for both IBPB and STIBP, the instruction
1330                 * code paths avoid checking this task flag and instead,
1331                 * unconditionally run the instruction. However, STIBP and IBPB
1332                 * are independent and either can be set to conditionally
1333                 * enabled regardless of the mode of the other.
1334                 *
1335                 * If either is set to conditional, allow the task flag to be
1336                 * updated, unless it was force-disabled by a previous prctl
1337                 * call. Currently, this is possible on an AMD CPU which has the
1338                 * feature X86_FEATURE_AMD_STIBP_ALWAYS_ON. In this case, if the
1339                 * kernel is booted with 'spectre_v2_user=seccomp', then
1340                 * spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP and
1341                 * spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED.
1342                 */
1343                if (!is_spec_ib_user_controlled() ||
1344                    task_spec_ib_force_disable(task))
1345                        return -EPERM;
1346
1347                task_clear_spec_ib_disable(task);
1348                task_update_spec_tif(task);
1349                break;
1350        case PR_SPEC_DISABLE:
1351        case PR_SPEC_FORCE_DISABLE:
1352                /*
1353                 * Indirect branch speculation is always allowed when
1354                 * mitigation is force disabled.
1355                 */
1356                if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
1357                    spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
1358                        return -EPERM;
1359
1360                if (!is_spec_ib_user_controlled())
1361                        return 0;
1362
1363                task_set_spec_ib_disable(task);
1364                if (ctrl == PR_SPEC_FORCE_DISABLE)
1365                        task_set_spec_ib_force_disable(task);
1366                task_update_spec_tif(task);
1367                break;
1368        default:
1369                return -ERANGE;
1370        }
1371        return 0;
1372}
1373
1374int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
1375                             unsigned long ctrl)
1376{
1377        switch (which) {
1378        case PR_SPEC_STORE_BYPASS:
1379                return ssb_prctl_set(task, ctrl);
1380        case PR_SPEC_INDIRECT_BRANCH:
1381                return ib_prctl_set(task, ctrl);
1382        case PR_SPEC_L1D_FLUSH:
1383                return l1d_flush_prctl_set(task, ctrl);
1384        default:
1385                return -ENODEV;
1386        }
1387}
1388
1389#ifdef CONFIG_SECCOMP
1390void arch_seccomp_spec_mitigate(struct task_struct *task)
1391{
1392        if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
1393                ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
1394        if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
1395            spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP)
1396                ib_prctl_set(task, PR_SPEC_FORCE_DISABLE);
1397}
1398#endif
1399
1400static int l1d_flush_prctl_get(struct task_struct *task)
1401{
1402        if (!static_branch_unlikely(&switch_mm_cond_l1d_flush))
1403                return PR_SPEC_FORCE_DISABLE;
1404
1405        if (test_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH))
1406                return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
1407        else
1408                return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
1409}
1410
1411static int ssb_prctl_get(struct task_struct *task)
1412{
1413        switch (ssb_mode) {
1414        case SPEC_STORE_BYPASS_DISABLE:
1415                return PR_SPEC_DISABLE;
1416        case SPEC_STORE_BYPASS_SECCOMP:
1417        case SPEC_STORE_BYPASS_PRCTL:
1418                if (task_spec_ssb_force_disable(task))
1419                        return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
1420                if (task_spec_ssb_noexec(task))
1421                        return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC;
1422                if (task_spec_ssb_disable(task))
1423                        return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
1424                return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
1425        default:
1426                if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
1427                        return PR_SPEC_ENABLE;
1428                return PR_SPEC_NOT_AFFECTED;
1429        }
1430}
1431
1432static int ib_prctl_get(struct task_struct *task)
1433{
1434        if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
1435                return PR_SPEC_NOT_AFFECTED;
1436
1437        if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
1438            spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
1439                return PR_SPEC_ENABLE;
1440        else if (is_spec_ib_user_controlled()) {
1441                if (task_spec_ib_force_disable(task))
1442                        return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
1443                if (task_spec_ib_disable(task))
1444                        return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
1445                return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
1446        } else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
1447            spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
1448            spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
1449                return PR_SPEC_DISABLE;
1450        else
1451                return PR_SPEC_NOT_AFFECTED;
1452}
1453
1454int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
1455{
1456        switch (which) {
1457        case PR_SPEC_STORE_BYPASS:
1458                return ssb_prctl_get(task);
1459        case PR_SPEC_INDIRECT_BRANCH:
1460                return ib_prctl_get(task);
1461        case PR_SPEC_L1D_FLUSH:
1462                return l1d_flush_prctl_get(task);
1463        default:
1464                return -ENODEV;
1465        }
1466}
1467
1468void x86_spec_ctrl_setup_ap(void)
1469{
1470        if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
1471                wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
1472
1473        if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
1474                x86_amd_ssb_disable();
1475}
1476
1477bool itlb_multihit_kvm_mitigation;
1478EXPORT_SYMBOL_GPL(itlb_multihit_kvm_mitigation);
1479
1480#undef pr_fmt
1481#define pr_fmt(fmt)     "L1TF: " fmt
1482
1483/* Default mitigation for L1TF-affected CPUs */
1484enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH;
1485#if IS_ENABLED(CONFIG_KVM_INTEL)
1486EXPORT_SYMBOL_GPL(l1tf_mitigation);
1487#endif
1488enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
1489EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
1490
1491/*
1492 * These CPUs all support 44bits physical address space internally in the
1493 * cache but CPUID can report a smaller number of physical address bits.
1494 *
1495 * The L1TF mitigation uses the top most address bit for the inversion of
1496 * non present PTEs. When the installed memory reaches into the top most
1497 * address bit due to memory holes, which has been observed on machines
1498 * which report 36bits physical address bits and have 32G RAM installed,
1499 * then the mitigation range check in l1tf_select_mitigation() triggers.
1500 * This is a false positive because the mitigation is still possible due to
1501 * the fact that the cache uses 44bit internally. Use the cache bits
1502 * instead of the reported physical bits and adjust them on the affected
1503 * machines to 44bit if the reported bits are less than 44.
1504 */
1505static void override_cache_bits(struct cpuinfo_x86 *c)
1506{
1507        if (c->x86 != 6)
1508                return;
1509
1510        switch (c->x86_model) {
1511        case INTEL_FAM6_NEHALEM:
1512        case INTEL_FAM6_WESTMERE:
1513        case INTEL_FAM6_SANDYBRIDGE:
1514        case INTEL_FAM6_IVYBRIDGE:
1515        case INTEL_FAM6_HASWELL:
1516        case INTEL_FAM6_HASWELL_L:
1517        case INTEL_FAM6_HASWELL_G:
1518        case INTEL_FAM6_BROADWELL:
1519        case INTEL_FAM6_BROADWELL_G:
1520        case INTEL_FAM6_SKYLAKE_L:
1521        case INTEL_FAM6_SKYLAKE:
1522        case INTEL_FAM6_KABYLAKE_L:
1523        case INTEL_FAM6_KABYLAKE:
1524                if (c->x86_cache_bits < 44)
1525                        c->x86_cache_bits = 44;
1526                break;
1527        }
1528}
1529
1530static void __init l1tf_select_mitigation(void)
1531{
1532        u64 half_pa;
1533
1534        if (!boot_cpu_has_bug(X86_BUG_L1TF))
1535                return;
1536
1537        if (cpu_mitigations_off())
1538                l1tf_mitigation = L1TF_MITIGATION_OFF;
1539        else if (cpu_mitigations_auto_nosmt())
1540                l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
1541
1542        override_cache_bits(&boot_cpu_data);
1543
1544        switch (l1tf_mitigation) {
1545        case L1TF_MITIGATION_OFF:
1546        case L1TF_MITIGATION_FLUSH_NOWARN:
1547        case L1TF_MITIGATION_FLUSH:
1548                break;
1549        case L1TF_MITIGATION_FLUSH_NOSMT:
1550        case L1TF_MITIGATION_FULL:
1551                cpu_smt_disable(false);
1552                break;
1553        case L1TF_MITIGATION_FULL_FORCE:
1554                cpu_smt_disable(true);
1555                break;
1556        }
1557
1558#if CONFIG_PGTABLE_LEVELS == 2
1559        pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
1560        return;
1561#endif
1562
1563        half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
1564        if (l1tf_mitigation != L1TF_MITIGATION_OFF &&
1565                        e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
1566                pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
1567                pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
1568                                half_pa);
1569                pr_info("However, doing so will make a part of your RAM unusable.\n");
1570                pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide.\n");
1571                return;
1572        }
1573
1574        setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
1575}
1576
1577static int __init l1tf_cmdline(char *str)
1578{
1579        if (!boot_cpu_has_bug(X86_BUG_L1TF))
1580                return 0;
1581
1582        if (!str)
1583                return -EINVAL;
1584
1585        if (!strcmp(str, "off"))
1586                l1tf_mitigation = L1TF_MITIGATION_OFF;
1587        else if (!strcmp(str, "flush,nowarn"))
1588                l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN;
1589        else if (!strcmp(str, "flush"))
1590                l1tf_mitigation = L1TF_MITIGATION_FLUSH;
1591        else if (!strcmp(str, "flush,nosmt"))
1592                l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
1593        else if (!strcmp(str, "full"))
1594                l1tf_mitigation = L1TF_MITIGATION_FULL;
1595        else if (!strcmp(str, "full,force"))
1596                l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE;
1597
1598        return 0;
1599}
1600early_param("l1tf", l1tf_cmdline);
1601
1602#undef pr_fmt
1603#define pr_fmt(fmt) fmt
1604
1605#ifdef CONFIG_SYSFS
1606
1607#define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
1608
1609#if IS_ENABLED(CONFIG_KVM_INTEL)
1610static const char * const l1tf_vmx_states[] = {
1611        [VMENTER_L1D_FLUSH_AUTO]                = "auto",
1612        [VMENTER_L1D_FLUSH_NEVER]               = "vulnerable",
1613        [VMENTER_L1D_FLUSH_COND]                = "conditional cache flushes",
1614        [VMENTER_L1D_FLUSH_ALWAYS]              = "cache flushes",
1615        [VMENTER_L1D_FLUSH_EPT_DISABLED]        = "EPT disabled",
1616        [VMENTER_L1D_FLUSH_NOT_REQUIRED]        = "flush not necessary"
1617};
1618
1619static ssize_t l1tf_show_state(char *buf)
1620{
1621        if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
1622                return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
1623
1624        if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
1625            (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
1626             sched_smt_active())) {
1627                return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
1628                               l1tf_vmx_states[l1tf_vmx_mitigation]);
1629        }
1630
1631        return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
1632                       l1tf_vmx_states[l1tf_vmx_mitigation],
1633                       sched_smt_active() ? "vulnerable" : "disabled");
1634}
1635
1636static ssize_t itlb_multihit_show_state(char *buf)
1637{
1638        if (!boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) ||
1639            !boot_cpu_has(X86_FEATURE_VMX))
1640                return sprintf(buf, "KVM: Mitigation: VMX unsupported\n");
1641        else if (!(cr4_read_shadow() & X86_CR4_VMXE))
1642                return sprintf(buf, "KVM: Mitigation: VMX disabled\n");
1643        else if (itlb_multihit_kvm_mitigation)
1644                return sprintf(buf, "KVM: Mitigation: Split huge pages\n");
1645        else
1646                return sprintf(buf, "KVM: Vulnerable\n");
1647}
1648#else
1649static ssize_t l1tf_show_state(char *buf)
1650{
1651        return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
1652}
1653
1654static ssize_t itlb_multihit_show_state(char *buf)
1655{
1656        return sprintf(buf, "Processor vulnerable\n");
1657}
1658#endif
1659
1660static ssize_t mds_show_state(char *buf)
1661{
1662        if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
1663                return sprintf(buf, "%s; SMT Host state unknown\n",
1664                               mds_strings[mds_mitigation]);
1665        }
1666
1667        if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) {
1668                return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
1669                               (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" :
1670                                sched_smt_active() ? "mitigated" : "disabled"));
1671        }
1672
1673        return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
1674                       sched_smt_active() ? "vulnerable" : "disabled");
1675}
1676
1677static ssize_t tsx_async_abort_show_state(char *buf)
1678{
1679        if ((taa_mitigation == TAA_MITIGATION_TSX_DISABLED) ||
1680            (taa_mitigation == TAA_MITIGATION_OFF))
1681                return sprintf(buf, "%s\n", taa_strings[taa_mitigation]);
1682
1683        if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
1684                return sprintf(buf, "%s; SMT Host state unknown\n",
1685                               taa_strings[taa_mitigation]);
1686        }
1687
1688        return sprintf(buf, "%s; SMT %s\n", taa_strings[taa_mitigation],
1689                       sched_smt_active() ? "vulnerable" : "disabled");
1690}
1691
1692static char *stibp_state(void)
1693{
1694        if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
1695                return "";
1696
1697        switch (spectre_v2_user_stibp) {
1698        case SPECTRE_V2_USER_NONE:
1699                return ", STIBP: disabled";
1700        case SPECTRE_V2_USER_STRICT:
1701                return ", STIBP: forced";
1702        case SPECTRE_V2_USER_STRICT_PREFERRED:
1703                return ", STIBP: always-on";
1704        case SPECTRE_V2_USER_PRCTL:
1705        case SPECTRE_V2_USER_SECCOMP:
1706                if (static_key_enabled(&switch_to_cond_stibp))
1707                        return ", STIBP: conditional";
1708        }
1709        return "";
1710}
1711
1712static char *ibpb_state(void)
1713{
1714        if (boot_cpu_has(X86_FEATURE_IBPB)) {
1715                if (static_key_enabled(&switch_mm_always_ibpb))
1716                        return ", IBPB: always-on";
1717                if (static_key_enabled(&switch_mm_cond_ibpb))
1718                        return ", IBPB: conditional";
1719                return ", IBPB: disabled";
1720        }
1721        return "";
1722}
1723
1724static ssize_t srbds_show_state(char *buf)
1725{
1726        return sprintf(buf, "%s\n", srbds_strings[srbds_mitigation]);
1727}
1728
1729static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
1730                               char *buf, unsigned int bug)
1731{
1732        if (!boot_cpu_has_bug(bug))
1733                return sprintf(buf, "Not affected\n");
1734
1735        switch (bug) {
1736        case X86_BUG_CPU_MELTDOWN:
1737                if (boot_cpu_has(X86_FEATURE_PTI))
1738                        return sprintf(buf, "Mitigation: PTI\n");
1739
1740                if (hypervisor_is_type(X86_HYPER_XEN_PV))
1741                        return sprintf(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
1742
1743                break;
1744
1745        case X86_BUG_SPECTRE_V1:
1746                return sprintf(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]);
1747
1748        case X86_BUG_SPECTRE_V2:
1749                return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
1750                               ibpb_state(),
1751                               boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
1752                               stibp_state(),
1753                               boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
1754                               spectre_v2_module_string());
1755
1756        case X86_BUG_SPEC_STORE_BYPASS:
1757                return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
1758
1759        case X86_BUG_L1TF:
1760                if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
1761                        return l1tf_show_state(buf);
1762                break;
1763
1764        case X86_BUG_MDS:
1765                return mds_show_state(buf);
1766
1767        case X86_BUG_TAA:
1768                return tsx_async_abort_show_state(buf);
1769
1770        case X86_BUG_ITLB_MULTIHIT:
1771                return itlb_multihit_show_state(buf);
1772
1773        case X86_BUG_SRBDS:
1774                return srbds_show_state(buf);
1775
1776        default:
1777                break;
1778        }
1779
1780        return sprintf(buf, "Vulnerable\n");
1781}
1782
1783ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
1784{
1785        return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
1786}
1787
1788ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
1789{
1790        return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
1791}
1792
1793ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
1794{
1795        return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
1796}
1797
1798ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
1799{
1800        return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
1801}
1802
1803ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
1804{
1805        return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
1806}
1807
1808ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf)
1809{
1810        return cpu_show_common(dev, attr, buf, X86_BUG_MDS);
1811}
1812
1813ssize_t cpu_show_tsx_async_abort(struct device *dev, struct device_attribute *attr, char *buf)
1814{
1815        return cpu_show_common(dev, attr, buf, X86_BUG_TAA);
1816}
1817
1818ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr, char *buf)
1819{
1820        return cpu_show_common(dev, attr, buf, X86_BUG_ITLB_MULTIHIT);
1821}
1822
1823ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf)
1824{
1825        return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS);
1826}
1827#endif
1828