root/arch/x86/kernel/cpu/bugs.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. check_bugs
  2. x86_virt_spec_ctrl
  3. x86_amd_ssb_disable
  4. mds_select_mitigation
  5. mds_print_mitigation
  6. mds_cmdline
  7. taa_select_mitigation
  8. tsx_async_abort_parse_cmdline
  9. update_srbds_msr
  10. srbds_select_mitigation
  11. srbds_parse_cmdline
  12. smap_works_speculatively
  13. spectre_v1_select_mitigation
  14. nospectre_v1_cmdline
  15. retpoline_module_ok
  16. spectre_v2_module_string
  17. spectre_v2_module_string
  18. match_option
  19. spec_v2_user_print_cond
  20. spectre_v2_parse_user_cmdline
  21. spectre_v2_user_select_mitigation
  22. spec_v2_print_cond
  23. spectre_v2_parse_cmdline
  24. spectre_v2_select_mitigation
  25. update_stibp_msr
  26. update_stibp_strict
  27. update_indir_branch_cond
  28. update_mds_branch_idle
  29. cpu_bugs_smt_update
  30. ssb_parse_cmdline
  31. __ssb_select_mitigation
  32. ssb_select_mitigation
  33. task_update_spec_tif
  34. ssb_prctl_set
  35. ib_prctl_set
  36. arch_prctl_spec_ctrl_set
  37. arch_seccomp_spec_mitigate
  38. ssb_prctl_get
  39. ib_prctl_get
  40. arch_prctl_spec_ctrl_get
  41. x86_spec_ctrl_setup_ap
  42. override_cache_bits
  43. l1tf_select_mitigation
  44. l1tf_cmdline
  45. l1tf_show_state
  46. itlb_multihit_show_state
  47. l1tf_show_state
  48. itlb_multihit_show_state
  49. mds_show_state
  50. tsx_async_abort_show_state
  51. stibp_state
  52. ibpb_state
  53. srbds_show_state
  54. cpu_show_common
  55. cpu_show_meltdown
  56. cpu_show_spectre_v1
  57. cpu_show_spectre_v2
  58. cpu_show_spec_store_bypass
  59. cpu_show_l1tf
  60. cpu_show_mds
  61. cpu_show_tsx_async_abort
  62. cpu_show_itlb_multihit
  63. cpu_show_srbds

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  *  Copyright (C) 1994  Linus Torvalds
   4  *
   5  *  Cyrix stuff, June 1998 by:
   6  *      - Rafael R. Reilova (moved everything from head.S),
   7  *        <rreilova@ececs.uc.edu>
   8  *      - Channing Corn (tests & fixes),
   9  *      - Andrew D. Balsa (code cleanup).
  10  */
  11 #include <linux/init.h>
  12 #include <linux/utsname.h>
  13 #include <linux/cpu.h>
  14 #include <linux/module.h>
  15 #include <linux/nospec.h>
  16 #include <linux/prctl.h>
  17 #include <linux/sched/smt.h>
  18 
  19 #include <asm/spec-ctrl.h>
  20 #include <asm/cmdline.h>
  21 #include <asm/bugs.h>
  22 #include <asm/processor.h>
  23 #include <asm/processor-flags.h>
  24 #include <asm/fpu/internal.h>
  25 #include <asm/msr.h>
  26 #include <asm/vmx.h>
  27 #include <asm/paravirt.h>
  28 #include <asm/alternative.h>
  29 #include <asm/pgtable.h>
  30 #include <asm/set_memory.h>
  31 #include <asm/intel-family.h>
  32 #include <asm/e820/api.h>
  33 #include <asm/hypervisor.h>
  34 
  35 #include "cpu.h"
  36 
  37 static void __init spectre_v1_select_mitigation(void);
  38 static void __init spectre_v2_select_mitigation(void);
  39 static void __init ssb_select_mitigation(void);
  40 static void __init l1tf_select_mitigation(void);
  41 static void __init mds_select_mitigation(void);
  42 static void __init mds_print_mitigation(void);
  43 static void __init taa_select_mitigation(void);
  44 static void __init srbds_select_mitigation(void);
  45 
  46 /* The base value of the SPEC_CTRL MSR that always has to be preserved. */
  47 u64 x86_spec_ctrl_base;
  48 EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
  49 static DEFINE_MUTEX(spec_ctrl_mutex);
  50 
  51 /*
  52  * The vendor and possibly platform specific bits which can be modified in
  53  * x86_spec_ctrl_base.
  54  */
  55 static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
  56 
  57 /*
  58  * AMD specific MSR info for Speculative Store Bypass control.
  59  * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
  60  */
  61 u64 __ro_after_init x86_amd_ls_cfg_base;
  62 u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
  63 
  64 /* Control conditional STIBP in switch_to() */
  65 DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp);
  66 /* Control conditional IBPB in switch_mm() */
  67 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
  68 /* Control unconditional IBPB in switch_mm() */
  69 DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
  70 
  71 /* Control MDS CPU buffer clear before returning to user space */
  72 DEFINE_STATIC_KEY_FALSE(mds_user_clear);
  73 EXPORT_SYMBOL_GPL(mds_user_clear);
  74 /* Control MDS CPU buffer clear before idling (halt, mwait) */
  75 DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
  76 EXPORT_SYMBOL_GPL(mds_idle_clear);
  77 
  78 void __init check_bugs(void)
  79 {
  80         identify_boot_cpu();
  81 
  82         /*
  83          * identify_boot_cpu() initialized SMT support information, let the
  84          * core code know.
  85          */
  86         cpu_smt_check_topology();
  87 
  88         if (!IS_ENABLED(CONFIG_SMP)) {
  89                 pr_info("CPU: ");
  90                 print_cpu_info(&boot_cpu_data);
  91         }
  92 
  93         /*
  94          * Read the SPEC_CTRL MSR to account for reserved bits which may
  95          * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
  96          * init code as it is not enumerated and depends on the family.
  97          */
  98         if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
  99                 rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
 100 
 101         /* Allow STIBP in MSR_SPEC_CTRL if supported */
 102         if (boot_cpu_has(X86_FEATURE_STIBP))
 103                 x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
 104 
 105         /* Select the proper CPU mitigations before patching alternatives: */
 106         spectre_v1_select_mitigation();
 107         spectre_v2_select_mitigation();
 108         ssb_select_mitigation();
 109         l1tf_select_mitigation();
 110         mds_select_mitigation();
 111         taa_select_mitigation();
 112         srbds_select_mitigation();
 113 
 114         /*
 115          * As MDS and TAA mitigations are inter-related, print MDS
 116          * mitigation until after TAA mitigation selection is done.
 117          */
 118         mds_print_mitigation();
 119 
 120         arch_smt_update();
 121 
 122 #ifdef CONFIG_X86_32
 123         /*
 124          * Check whether we are able to run this kernel safely on SMP.
 125          *
 126          * - i386 is no longer supported.
 127          * - In order to run on anything without a TSC, we need to be
 128          *   compiled for a i486.
 129          */
 130         if (boot_cpu_data.x86 < 4)
 131                 panic("Kernel requires i486+ for 'invlpg' and other features");
 132 
 133         init_utsname()->machine[1] =
 134                 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
 135         alternative_instructions();
 136 
 137         fpu__init_check_bugs();
 138 #else /* CONFIG_X86_64 */
 139         alternative_instructions();
 140 
 141         /*
 142          * Make sure the first 2MB area is not mapped by huge pages
 143          * There are typically fixed size MTRRs in there and overlapping
 144          * MTRRs into large pages causes slow downs.
 145          *
 146          * Right now we don't do that with gbpages because there seems
 147          * very little benefit for that case.
 148          */
 149         if (!direct_gbpages)
 150                 set_memory_4k((unsigned long)__va(0), 1);
 151 #endif
 152 }
 153 
 154 void
 155 x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
 156 {
 157         u64 msrval, guestval, hostval = x86_spec_ctrl_base;
 158         struct thread_info *ti = current_thread_info();
 159 
 160         /* Is MSR_SPEC_CTRL implemented ? */
 161         if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
 162                 /*
 163                  * Restrict guest_spec_ctrl to supported values. Clear the
 164                  * modifiable bits in the host base value and or the
 165                  * modifiable bits from the guest value.
 166                  */
 167                 guestval = hostval & ~x86_spec_ctrl_mask;
 168                 guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
 169 
 170                 /* SSBD controlled in MSR_SPEC_CTRL */
 171                 if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
 172                     static_cpu_has(X86_FEATURE_AMD_SSBD))
 173                         hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
 174 
 175                 /* Conditional STIBP enabled? */
 176                 if (static_branch_unlikely(&switch_to_cond_stibp))
 177                         hostval |= stibp_tif_to_spec_ctrl(ti->flags);
 178 
 179                 if (hostval != guestval) {
 180                         msrval = setguest ? guestval : hostval;
 181                         wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
 182                 }
 183         }
 184 
 185         /*
 186          * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
 187          * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
 188          */
 189         if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
 190             !static_cpu_has(X86_FEATURE_VIRT_SSBD))
 191                 return;
 192 
 193         /*
 194          * If the host has SSBD mitigation enabled, force it in the host's
 195          * virtual MSR value. If its not permanently enabled, evaluate
 196          * current's TIF_SSBD thread flag.
 197          */
 198         if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
 199                 hostval = SPEC_CTRL_SSBD;
 200         else
 201                 hostval = ssbd_tif_to_spec_ctrl(ti->flags);
 202 
 203         /* Sanitize the guest value */
 204         guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
 205 
 206         if (hostval != guestval) {
 207                 unsigned long tif;
 208 
 209                 tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
 210                                  ssbd_spec_ctrl_to_tif(hostval);
 211 
 212                 speculation_ctrl_update(tif);
 213         }
 214 }
 215 EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
 216 
 217 static void x86_amd_ssb_disable(void)
 218 {
 219         u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
 220 
 221         if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
 222                 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
 223         else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
 224                 wrmsrl(MSR_AMD64_LS_CFG, msrval);
 225 }
 226 
 227 #undef pr_fmt
 228 #define pr_fmt(fmt)     "MDS: " fmt
 229 
 230 /* Default mitigation for MDS-affected CPUs */
 231 static enum mds_mitigations mds_mitigation __ro_after_init = MDS_MITIGATION_FULL;
 232 static bool mds_nosmt __ro_after_init = false;
 233 
 234 static const char * const mds_strings[] = {
 235         [MDS_MITIGATION_OFF]    = "Vulnerable",
 236         [MDS_MITIGATION_FULL]   = "Mitigation: Clear CPU buffers",
 237         [MDS_MITIGATION_VMWERV] = "Vulnerable: Clear CPU buffers attempted, no microcode",
 238 };
 239 
 240 static void __init mds_select_mitigation(void)
 241 {
 242         if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) {
 243                 mds_mitigation = MDS_MITIGATION_OFF;
 244                 return;
 245         }
 246 
 247         if (mds_mitigation == MDS_MITIGATION_FULL) {
 248                 if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
 249                         mds_mitigation = MDS_MITIGATION_VMWERV;
 250 
 251                 static_branch_enable(&mds_user_clear);
 252 
 253                 if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) &&
 254                     (mds_nosmt || cpu_mitigations_auto_nosmt()))
 255                         cpu_smt_disable(false);
 256         }
 257 }
 258 
 259 static void __init mds_print_mitigation(void)
 260 {
 261         if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off())
 262                 return;
 263 
 264         pr_info("%s\n", mds_strings[mds_mitigation]);
 265 }
 266 
 267 static int __init mds_cmdline(char *str)
 268 {
 269         if (!boot_cpu_has_bug(X86_BUG_MDS))
 270                 return 0;
 271 
 272         if (!str)
 273                 return -EINVAL;
 274 
 275         if (!strcmp(str, "off"))
 276                 mds_mitigation = MDS_MITIGATION_OFF;
 277         else if (!strcmp(str, "full"))
 278                 mds_mitigation = MDS_MITIGATION_FULL;
 279         else if (!strcmp(str, "full,nosmt")) {
 280                 mds_mitigation = MDS_MITIGATION_FULL;
 281                 mds_nosmt = true;
 282         }
 283 
 284         return 0;
 285 }
 286 early_param("mds", mds_cmdline);
 287 
 288 #undef pr_fmt
 289 #define pr_fmt(fmt)     "TAA: " fmt
 290 
 291 /* Default mitigation for TAA-affected CPUs */
 292 static enum taa_mitigations taa_mitigation __ro_after_init = TAA_MITIGATION_VERW;
 293 static bool taa_nosmt __ro_after_init;
 294 
 295 static const char * const taa_strings[] = {
 296         [TAA_MITIGATION_OFF]            = "Vulnerable",
 297         [TAA_MITIGATION_UCODE_NEEDED]   = "Vulnerable: Clear CPU buffers attempted, no microcode",
 298         [TAA_MITIGATION_VERW]           = "Mitigation: Clear CPU buffers",
 299         [TAA_MITIGATION_TSX_DISABLED]   = "Mitigation: TSX disabled",
 300 };
 301 
 302 static void __init taa_select_mitigation(void)
 303 {
 304         u64 ia32_cap;
 305 
 306         if (!boot_cpu_has_bug(X86_BUG_TAA)) {
 307                 taa_mitigation = TAA_MITIGATION_OFF;
 308                 return;
 309         }
 310 
 311         /* TSX previously disabled by tsx=off */
 312         if (!boot_cpu_has(X86_FEATURE_RTM)) {
 313                 taa_mitigation = TAA_MITIGATION_TSX_DISABLED;
 314                 goto out;
 315         }
 316 
 317         if (cpu_mitigations_off()) {
 318                 taa_mitigation = TAA_MITIGATION_OFF;
 319                 return;
 320         }
 321 
 322         /*
 323          * TAA mitigation via VERW is turned off if both
 324          * tsx_async_abort=off and mds=off are specified.
 325          */
 326         if (taa_mitigation == TAA_MITIGATION_OFF &&
 327             mds_mitigation == MDS_MITIGATION_OFF)
 328                 goto out;
 329 
 330         if (boot_cpu_has(X86_FEATURE_MD_CLEAR))
 331                 taa_mitigation = TAA_MITIGATION_VERW;
 332         else
 333                 taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
 334 
 335         /*
 336          * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1.
 337          * A microcode update fixes this behavior to clear CPU buffers. It also
 338          * adds support for MSR_IA32_TSX_CTRL which is enumerated by the
 339          * ARCH_CAP_TSX_CTRL_MSR bit.
 340          *
 341          * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
 342          * update is required.
 343          */
 344         ia32_cap = x86_read_arch_cap_msr();
 345         if ( (ia32_cap & ARCH_CAP_MDS_NO) &&
 346             !(ia32_cap & ARCH_CAP_TSX_CTRL_MSR))
 347                 taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
 348 
 349         /*
 350          * TSX is enabled, select alternate mitigation for TAA which is
 351          * the same as MDS. Enable MDS static branch to clear CPU buffers.
 352          *
 353          * For guests that can't determine whether the correct microcode is
 354          * present on host, enable the mitigation for UCODE_NEEDED as well.
 355          */
 356         static_branch_enable(&mds_user_clear);
 357 
 358         if (taa_nosmt || cpu_mitigations_auto_nosmt())
 359                 cpu_smt_disable(false);
 360 
 361         /*
 362          * Update MDS mitigation, if necessary, as the mds_user_clear is
 363          * now enabled for TAA mitigation.
 364          */
 365         if (mds_mitigation == MDS_MITIGATION_OFF &&
 366             boot_cpu_has_bug(X86_BUG_MDS)) {
 367                 mds_mitigation = MDS_MITIGATION_FULL;
 368                 mds_select_mitigation();
 369         }
 370 out:
 371         pr_info("%s\n", taa_strings[taa_mitigation]);
 372 }
 373 
 374 static int __init tsx_async_abort_parse_cmdline(char *str)
 375 {
 376         if (!boot_cpu_has_bug(X86_BUG_TAA))
 377                 return 0;
 378 
 379         if (!str)
 380                 return -EINVAL;
 381 
 382         if (!strcmp(str, "off")) {
 383                 taa_mitigation = TAA_MITIGATION_OFF;
 384         } else if (!strcmp(str, "full")) {
 385                 taa_mitigation = TAA_MITIGATION_VERW;
 386         } else if (!strcmp(str, "full,nosmt")) {
 387                 taa_mitigation = TAA_MITIGATION_VERW;
 388                 taa_nosmt = true;
 389         }
 390 
 391         return 0;
 392 }
 393 early_param("tsx_async_abort", tsx_async_abort_parse_cmdline);
 394 
 395 #undef pr_fmt
 396 #define pr_fmt(fmt)     "SRBDS: " fmt
 397 
 398 enum srbds_mitigations {
 399         SRBDS_MITIGATION_OFF,
 400         SRBDS_MITIGATION_UCODE_NEEDED,
 401         SRBDS_MITIGATION_FULL,
 402         SRBDS_MITIGATION_TSX_OFF,
 403         SRBDS_MITIGATION_HYPERVISOR,
 404 };
 405 
 406 static enum srbds_mitigations srbds_mitigation __ro_after_init = SRBDS_MITIGATION_FULL;
 407 
 408 static const char * const srbds_strings[] = {
 409         [SRBDS_MITIGATION_OFF]          = "Vulnerable",
 410         [SRBDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
 411         [SRBDS_MITIGATION_FULL]         = "Mitigation: Microcode",
 412         [SRBDS_MITIGATION_TSX_OFF]      = "Mitigation: TSX disabled",
 413         [SRBDS_MITIGATION_HYPERVISOR]   = "Unknown: Dependent on hypervisor status",
 414 };
 415 
 416 static bool srbds_off;
 417 
 418 void update_srbds_msr(void)
 419 {
 420         u64 mcu_ctrl;
 421 
 422         if (!boot_cpu_has_bug(X86_BUG_SRBDS))
 423                 return;
 424 
 425         if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
 426                 return;
 427 
 428         if (srbds_mitigation == SRBDS_MITIGATION_UCODE_NEEDED)
 429                 return;
 430 
 431         rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
 432 
 433         switch (srbds_mitigation) {
 434         case SRBDS_MITIGATION_OFF:
 435         case SRBDS_MITIGATION_TSX_OFF:
 436                 mcu_ctrl |= RNGDS_MITG_DIS;
 437                 break;
 438         case SRBDS_MITIGATION_FULL:
 439                 mcu_ctrl &= ~RNGDS_MITG_DIS;
 440                 break;
 441         default:
 442                 break;
 443         }
 444 
 445         wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
 446 }
 447 
 448 static void __init srbds_select_mitigation(void)
 449 {
 450         u64 ia32_cap;
 451 
 452         if (!boot_cpu_has_bug(X86_BUG_SRBDS))
 453                 return;
 454 
 455         /*
 456          * Check to see if this is one of the MDS_NO systems supporting
 457          * TSX that are only exposed to SRBDS when TSX is enabled.
 458          */
 459         ia32_cap = x86_read_arch_cap_msr();
 460         if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM))
 461                 srbds_mitigation = SRBDS_MITIGATION_TSX_OFF;
 462         else if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
 463                 srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR;
 464         else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL))
 465                 srbds_mitigation = SRBDS_MITIGATION_UCODE_NEEDED;
 466         else if (cpu_mitigations_off() || srbds_off)
 467                 srbds_mitigation = SRBDS_MITIGATION_OFF;
 468 
 469         update_srbds_msr();
 470         pr_info("%s\n", srbds_strings[srbds_mitigation]);
 471 }
 472 
 473 static int __init srbds_parse_cmdline(char *str)
 474 {
 475         if (!str)
 476                 return -EINVAL;
 477 
 478         if (!boot_cpu_has_bug(X86_BUG_SRBDS))
 479                 return 0;
 480 
 481         srbds_off = !strcmp(str, "off");
 482         return 0;
 483 }
 484 early_param("srbds", srbds_parse_cmdline);
 485 
 486 #undef pr_fmt
 487 #define pr_fmt(fmt)     "Spectre V1 : " fmt
 488 
 489 enum spectre_v1_mitigation {
 490         SPECTRE_V1_MITIGATION_NONE,
 491         SPECTRE_V1_MITIGATION_AUTO,
 492 };
 493 
 494 static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init =
 495         SPECTRE_V1_MITIGATION_AUTO;
 496 
 497 static const char * const spectre_v1_strings[] = {
 498         [SPECTRE_V1_MITIGATION_NONE] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers",
 499         [SPECTRE_V1_MITIGATION_AUTO] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization",
 500 };
 501 
 502 /*
 503  * Does SMAP provide full mitigation against speculative kernel access to
 504  * userspace?
 505  */
 506 static bool smap_works_speculatively(void)
 507 {
 508         if (!boot_cpu_has(X86_FEATURE_SMAP))
 509                 return false;
 510 
 511         /*
 512          * On CPUs which are vulnerable to Meltdown, SMAP does not
 513          * prevent speculative access to user data in the L1 cache.
 514          * Consider SMAP to be non-functional as a mitigation on these
 515          * CPUs.
 516          */
 517         if (boot_cpu_has(X86_BUG_CPU_MELTDOWN))
 518                 return false;
 519 
 520         return true;
 521 }
 522 
 523 static void __init spectre_v1_select_mitigation(void)
 524 {
 525         if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1) || cpu_mitigations_off()) {
 526                 spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
 527                 return;
 528         }
 529 
 530         if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) {
 531                 /*
 532                  * With Spectre v1, a user can speculatively control either
 533                  * path of a conditional swapgs with a user-controlled GS
 534                  * value.  The mitigation is to add lfences to both code paths.
 535                  *
 536                  * If FSGSBASE is enabled, the user can put a kernel address in
 537                  * GS, in which case SMAP provides no protection.
 538                  *
 539                  * [ NOTE: Don't check for X86_FEATURE_FSGSBASE until the
 540                  *         FSGSBASE enablement patches have been merged. ]
 541                  *
 542                  * If FSGSBASE is disabled, the user can only put a user space
 543                  * address in GS.  That makes an attack harder, but still
 544                  * possible if there's no SMAP protection.
 545                  */
 546                 if (!smap_works_speculatively()) {
 547                         /*
 548                          * Mitigation can be provided from SWAPGS itself or
 549                          * PTI as the CR3 write in the Meltdown mitigation
 550                          * is serializing.
 551                          *
 552                          * If neither is there, mitigate with an LFENCE to
 553                          * stop speculation through swapgs.
 554                          */
 555                         if (boot_cpu_has_bug(X86_BUG_SWAPGS) &&
 556                             !boot_cpu_has(X86_FEATURE_PTI))
 557                                 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER);
 558 
 559                         /*
 560                          * Enable lfences in the kernel entry (non-swapgs)
 561                          * paths, to prevent user entry from speculatively
 562                          * skipping swapgs.
 563                          */
 564                         setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL);
 565                 }
 566         }
 567 
 568         pr_info("%s\n", spectre_v1_strings[spectre_v1_mitigation]);
 569 }
 570 
 571 static int __init nospectre_v1_cmdline(char *str)
 572 {
 573         spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
 574         return 0;
 575 }
 576 early_param("nospectre_v1", nospectre_v1_cmdline);
 577 
 578 #undef pr_fmt
 579 #define pr_fmt(fmt)     "Spectre V2 : " fmt
 580 
 581 static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
 582         SPECTRE_V2_NONE;
 583 
 584 static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init =
 585         SPECTRE_V2_USER_NONE;
 586 static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init =
 587         SPECTRE_V2_USER_NONE;
 588 
 589 #ifdef CONFIG_RETPOLINE
 590 static bool spectre_v2_bad_module;
 591 
 592 bool retpoline_module_ok(bool has_retpoline)
 593 {
 594         if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
 595                 return true;
 596 
 597         pr_err("System may be vulnerable to spectre v2\n");
 598         spectre_v2_bad_module = true;
 599         return false;
 600 }
 601 
 602 static inline const char *spectre_v2_module_string(void)
 603 {
 604         return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
 605 }
 606 #else
 607 static inline const char *spectre_v2_module_string(void) { return ""; }
 608 #endif
 609 
 610 static inline bool match_option(const char *arg, int arglen, const char *opt)
 611 {
 612         int len = strlen(opt);
 613 
 614         return len == arglen && !strncmp(arg, opt, len);
 615 }
 616 
 617 /* The kernel command line selection for spectre v2 */
 618 enum spectre_v2_mitigation_cmd {
 619         SPECTRE_V2_CMD_NONE,
 620         SPECTRE_V2_CMD_AUTO,
 621         SPECTRE_V2_CMD_FORCE,
 622         SPECTRE_V2_CMD_RETPOLINE,
 623         SPECTRE_V2_CMD_RETPOLINE_GENERIC,
 624         SPECTRE_V2_CMD_RETPOLINE_AMD,
 625 };
 626 
 627 enum spectre_v2_user_cmd {
 628         SPECTRE_V2_USER_CMD_NONE,
 629         SPECTRE_V2_USER_CMD_AUTO,
 630         SPECTRE_V2_USER_CMD_FORCE,
 631         SPECTRE_V2_USER_CMD_PRCTL,
 632         SPECTRE_V2_USER_CMD_PRCTL_IBPB,
 633         SPECTRE_V2_USER_CMD_SECCOMP,
 634         SPECTRE_V2_USER_CMD_SECCOMP_IBPB,
 635 };
 636 
 637 static const char * const spectre_v2_user_strings[] = {
 638         [SPECTRE_V2_USER_NONE]                  = "User space: Vulnerable",
 639         [SPECTRE_V2_USER_STRICT]                = "User space: Mitigation: STIBP protection",
 640         [SPECTRE_V2_USER_STRICT_PREFERRED]      = "User space: Mitigation: STIBP always-on protection",
 641         [SPECTRE_V2_USER_PRCTL]                 = "User space: Mitigation: STIBP via prctl",
 642         [SPECTRE_V2_USER_SECCOMP]               = "User space: Mitigation: STIBP via seccomp and prctl",
 643 };
 644 
 645 static const struct {
 646         const char                      *option;
 647         enum spectre_v2_user_cmd        cmd;
 648         bool                            secure;
 649 } v2_user_options[] __initconst = {
 650         { "auto",               SPECTRE_V2_USER_CMD_AUTO,               false },
 651         { "off",                SPECTRE_V2_USER_CMD_NONE,               false },
 652         { "on",                 SPECTRE_V2_USER_CMD_FORCE,              true  },
 653         { "prctl",              SPECTRE_V2_USER_CMD_PRCTL,              false },
 654         { "prctl,ibpb",         SPECTRE_V2_USER_CMD_PRCTL_IBPB,         false },
 655         { "seccomp",            SPECTRE_V2_USER_CMD_SECCOMP,            false },
 656         { "seccomp,ibpb",       SPECTRE_V2_USER_CMD_SECCOMP_IBPB,       false },
 657 };
 658 
 659 static void __init spec_v2_user_print_cond(const char *reason, bool secure)
 660 {
 661         if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
 662                 pr_info("spectre_v2_user=%s forced on command line.\n", reason);
 663 }
 664 
 665 static enum spectre_v2_user_cmd __init
 666 spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd)
 667 {
 668         char arg[20];
 669         int ret, i;
 670 
 671         switch (v2_cmd) {
 672         case SPECTRE_V2_CMD_NONE:
 673                 return SPECTRE_V2_USER_CMD_NONE;
 674         case SPECTRE_V2_CMD_FORCE:
 675                 return SPECTRE_V2_USER_CMD_FORCE;
 676         default:
 677                 break;
 678         }
 679 
 680         ret = cmdline_find_option(boot_command_line, "spectre_v2_user",
 681                                   arg, sizeof(arg));
 682         if (ret < 0)
 683                 return SPECTRE_V2_USER_CMD_AUTO;
 684 
 685         for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) {
 686                 if (match_option(arg, ret, v2_user_options[i].option)) {
 687                         spec_v2_user_print_cond(v2_user_options[i].option,
 688                                                 v2_user_options[i].secure);
 689                         return v2_user_options[i].cmd;
 690                 }
 691         }
 692 
 693         pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg);
 694         return SPECTRE_V2_USER_CMD_AUTO;
 695 }
 696 
 697 static void __init
 698 spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
 699 {
 700         enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE;
 701         bool smt_possible = IS_ENABLED(CONFIG_SMP);
 702         enum spectre_v2_user_cmd cmd;
 703 
 704         if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
 705                 return;
 706 
 707         if (cpu_smt_control == CPU_SMT_FORCE_DISABLED ||
 708             cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
 709                 smt_possible = false;
 710 
 711         cmd = spectre_v2_parse_user_cmdline(v2_cmd);
 712         switch (cmd) {
 713         case SPECTRE_V2_USER_CMD_NONE:
 714                 goto set_mode;
 715         case SPECTRE_V2_USER_CMD_FORCE:
 716                 mode = SPECTRE_V2_USER_STRICT;
 717                 break;
 718         case SPECTRE_V2_USER_CMD_PRCTL:
 719         case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
 720                 mode = SPECTRE_V2_USER_PRCTL;
 721                 break;
 722         case SPECTRE_V2_USER_CMD_AUTO:
 723         case SPECTRE_V2_USER_CMD_SECCOMP:
 724         case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
 725                 if (IS_ENABLED(CONFIG_SECCOMP))
 726                         mode = SPECTRE_V2_USER_SECCOMP;
 727                 else
 728                         mode = SPECTRE_V2_USER_PRCTL;
 729                 break;
 730         }
 731 
 732         /* Initialize Indirect Branch Prediction Barrier */
 733         if (boot_cpu_has(X86_FEATURE_IBPB)) {
 734                 setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
 735 
 736                 switch (cmd) {
 737                 case SPECTRE_V2_USER_CMD_FORCE:
 738                 case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
 739                 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
 740                         static_branch_enable(&switch_mm_always_ibpb);
 741                         break;
 742                 case SPECTRE_V2_USER_CMD_PRCTL:
 743                 case SPECTRE_V2_USER_CMD_AUTO:
 744                 case SPECTRE_V2_USER_CMD_SECCOMP:
 745                         static_branch_enable(&switch_mm_cond_ibpb);
 746                         break;
 747                 default:
 748                         break;
 749                 }
 750 
 751                 pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
 752                         static_key_enabled(&switch_mm_always_ibpb) ?
 753                         "always-on" : "conditional");
 754 
 755                 spectre_v2_user_ibpb = mode;
 756         }
 757 
 758         /*
 759          * If enhanced IBRS is enabled or SMT impossible, STIBP is not
 760          * required.
 761          */
 762         if (!smt_possible || spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
 763                 return;
 764 
 765         /*
 766          * At this point, an STIBP mode other than "off" has been set.
 767          * If STIBP support is not being forced, check if STIBP always-on
 768          * is preferred.
 769          */
 770         if (mode != SPECTRE_V2_USER_STRICT &&
 771             boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
 772                 mode = SPECTRE_V2_USER_STRICT_PREFERRED;
 773 
 774         /*
 775          * If STIBP is not available, clear the STIBP mode.
 776          */
 777         if (!boot_cpu_has(X86_FEATURE_STIBP))
 778                 mode = SPECTRE_V2_USER_NONE;
 779 
 780         spectre_v2_user_stibp = mode;
 781 
 782 set_mode:
 783         pr_info("%s\n", spectre_v2_user_strings[mode]);
 784 }
 785 
 786 static const char * const spectre_v2_strings[] = {
 787         [SPECTRE_V2_NONE]                       = "Vulnerable",
 788         [SPECTRE_V2_RETPOLINE_GENERIC]          = "Mitigation: Full generic retpoline",
 789         [SPECTRE_V2_RETPOLINE_AMD]              = "Mitigation: Full AMD retpoline",
 790         [SPECTRE_V2_IBRS_ENHANCED]              = "Mitigation: Enhanced IBRS",
 791 };
 792 
 793 static const struct {
 794         const char *option;
 795         enum spectre_v2_mitigation_cmd cmd;
 796         bool secure;
 797 } mitigation_options[] __initconst = {
 798         { "off",                SPECTRE_V2_CMD_NONE,              false },
 799         { "on",                 SPECTRE_V2_CMD_FORCE,             true  },
 800         { "retpoline",          SPECTRE_V2_CMD_RETPOLINE,         false },
 801         { "retpoline,amd",      SPECTRE_V2_CMD_RETPOLINE_AMD,     false },
 802         { "retpoline,generic",  SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
 803         { "auto",               SPECTRE_V2_CMD_AUTO,              false },
 804 };
 805 
 806 static void __init spec_v2_print_cond(const char *reason, bool secure)
 807 {
 808         if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
 809                 pr_info("%s selected on command line.\n", reason);
 810 }
 811 
 812 static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
 813 {
 814         enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
 815         char arg[20];
 816         int ret, i;
 817 
 818         if (cmdline_find_option_bool(boot_command_line, "nospectre_v2") ||
 819             cpu_mitigations_off())
 820                 return SPECTRE_V2_CMD_NONE;
 821 
 822         ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
 823         if (ret < 0)
 824                 return SPECTRE_V2_CMD_AUTO;
 825 
 826         for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
 827                 if (!match_option(arg, ret, mitigation_options[i].option))
 828                         continue;
 829                 cmd = mitigation_options[i].cmd;
 830                 break;
 831         }
 832 
 833         if (i >= ARRAY_SIZE(mitigation_options)) {
 834                 pr_err("unknown option (%s). Switching to AUTO select\n", arg);
 835                 return SPECTRE_V2_CMD_AUTO;
 836         }
 837 
 838         if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
 839              cmd == SPECTRE_V2_CMD_RETPOLINE_AMD ||
 840              cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) &&
 841             !IS_ENABLED(CONFIG_RETPOLINE)) {
 842                 pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option);
 843                 return SPECTRE_V2_CMD_AUTO;
 844         }
 845 
 846         if (cmd == SPECTRE_V2_CMD_RETPOLINE_AMD &&
 847             boot_cpu_data.x86_vendor != X86_VENDOR_HYGON &&
 848             boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
 849                 pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
 850                 return SPECTRE_V2_CMD_AUTO;
 851         }
 852 
 853         spec_v2_print_cond(mitigation_options[i].option,
 854                            mitigation_options[i].secure);
 855         return cmd;
 856 }
 857 
 858 static void __init spectre_v2_select_mitigation(void)
 859 {
 860         enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
 861         enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
 862 
 863         /*
 864          * If the CPU is not affected and the command line mode is NONE or AUTO
 865          * then nothing to do.
 866          */
 867         if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
 868             (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
 869                 return;
 870 
 871         switch (cmd) {
 872         case SPECTRE_V2_CMD_NONE:
 873                 return;
 874 
 875         case SPECTRE_V2_CMD_FORCE:
 876         case SPECTRE_V2_CMD_AUTO:
 877                 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
 878                         mode = SPECTRE_V2_IBRS_ENHANCED;
 879                         /* Force it so VMEXIT will restore correctly */
 880                         x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
 881                         wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
 882                         goto specv2_set_mode;
 883                 }
 884                 if (IS_ENABLED(CONFIG_RETPOLINE))
 885                         goto retpoline_auto;
 886                 break;
 887         case SPECTRE_V2_CMD_RETPOLINE_AMD:
 888                 if (IS_ENABLED(CONFIG_RETPOLINE))
 889                         goto retpoline_amd;
 890                 break;
 891         case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
 892                 if (IS_ENABLED(CONFIG_RETPOLINE))
 893                         goto retpoline_generic;
 894                 break;
 895         case SPECTRE_V2_CMD_RETPOLINE:
 896                 if (IS_ENABLED(CONFIG_RETPOLINE))
 897                         goto retpoline_auto;
 898                 break;
 899         }
 900         pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!");
 901         return;
 902 
 903 retpoline_auto:
 904         if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
 905             boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
 906         retpoline_amd:
 907                 if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
 908                         pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
 909                         goto retpoline_generic;
 910                 }
 911                 mode = SPECTRE_V2_RETPOLINE_AMD;
 912                 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
 913                 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
 914         } else {
 915         retpoline_generic:
 916                 mode = SPECTRE_V2_RETPOLINE_GENERIC;
 917                 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
 918         }
 919 
 920 specv2_set_mode:
 921         spectre_v2_enabled = mode;
 922         pr_info("%s\n", spectre_v2_strings[mode]);
 923 
 924         /*
 925          * If spectre v2 protection has been enabled, unconditionally fill
 926          * RSB during a context switch; this protects against two independent
 927          * issues:
 928          *
 929          *      - RSB underflow (and switch to BTB) on Skylake+
 930          *      - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
 931          */
 932         setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
 933         pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
 934 
 935         /*
 936          * Retpoline means the kernel is safe because it has no indirect
 937          * branches. Enhanced IBRS protects firmware too, so, enable restricted
 938          * speculation around firmware calls only when Enhanced IBRS isn't
 939          * supported.
 940          *
 941          * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
 942          * the user might select retpoline on the kernel command line and if
 943          * the CPU supports Enhanced IBRS, kernel might un-intentionally not
 944          * enable IBRS around firmware calls.
 945          */
 946         if (boot_cpu_has(X86_FEATURE_IBRS) && mode != SPECTRE_V2_IBRS_ENHANCED) {
 947                 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
 948                 pr_info("Enabling Restricted Speculation for firmware calls\n");
 949         }
 950 
 951         /* Set up IBPB and STIBP depending on the general spectre V2 command */
 952         spectre_v2_user_select_mitigation(cmd);
 953 }
 954 
 955 static void update_stibp_msr(void * __unused)
 956 {
 957         wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
 958 }
 959 
 960 /* Update x86_spec_ctrl_base in case SMT state changed. */
 961 static void update_stibp_strict(void)
 962 {
 963         u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
 964 
 965         if (sched_smt_active())
 966                 mask |= SPEC_CTRL_STIBP;
 967 
 968         if (mask == x86_spec_ctrl_base)
 969                 return;
 970 
 971         pr_info("Update user space SMT mitigation: STIBP %s\n",
 972                 mask & SPEC_CTRL_STIBP ? "always-on" : "off");
 973         x86_spec_ctrl_base = mask;
 974         on_each_cpu(update_stibp_msr, NULL, 1);
 975 }
 976 
 977 /* Update the static key controlling the evaluation of TIF_SPEC_IB */
 978 static void update_indir_branch_cond(void)
 979 {
 980         if (sched_smt_active())
 981                 static_branch_enable(&switch_to_cond_stibp);
 982         else
 983                 static_branch_disable(&switch_to_cond_stibp);
 984 }
 985 
 986 #undef pr_fmt
 987 #define pr_fmt(fmt) fmt
 988 
 989 /* Update the static key controlling the MDS CPU buffer clear in idle */
 990 static void update_mds_branch_idle(void)
 991 {
 992         /*
 993          * Enable the idle clearing if SMT is active on CPUs which are
 994          * affected only by MSBDS and not any other MDS variant.
 995          *
 996          * The other variants cannot be mitigated when SMT is enabled, so
 997          * clearing the buffers on idle just to prevent the Store Buffer
 998          * repartitioning leak would be a window dressing exercise.
 999          */
1000         if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY))
1001                 return;
1002 
1003         if (sched_smt_active())
1004                 static_branch_enable(&mds_idle_clear);
1005         else
1006                 static_branch_disable(&mds_idle_clear);
1007 }
1008 
1009 #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
1010 #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n"
1011 
1012 void cpu_bugs_smt_update(void)
1013 {
1014         mutex_lock(&spec_ctrl_mutex);
1015 
1016         switch (spectre_v2_user_stibp) {
1017         case SPECTRE_V2_USER_NONE:
1018                 break;
1019         case SPECTRE_V2_USER_STRICT:
1020         case SPECTRE_V2_USER_STRICT_PREFERRED:
1021                 update_stibp_strict();
1022                 break;
1023         case SPECTRE_V2_USER_PRCTL:
1024         case SPECTRE_V2_USER_SECCOMP:
1025                 update_indir_branch_cond();
1026                 break;
1027         }
1028 
1029         switch (mds_mitigation) {
1030         case MDS_MITIGATION_FULL:
1031         case MDS_MITIGATION_VMWERV:
1032                 if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
1033                         pr_warn_once(MDS_MSG_SMT);
1034                 update_mds_branch_idle();
1035                 break;
1036         case MDS_MITIGATION_OFF:
1037                 break;
1038         }
1039 
1040         switch (taa_mitigation) {
1041         case TAA_MITIGATION_VERW:
1042         case TAA_MITIGATION_UCODE_NEEDED:
1043                 if (sched_smt_active())
1044                         pr_warn_once(TAA_MSG_SMT);
1045                 break;
1046         case TAA_MITIGATION_TSX_DISABLED:
1047         case TAA_MITIGATION_OFF:
1048                 break;
1049         }
1050 
1051         mutex_unlock(&spec_ctrl_mutex);
1052 }
1053 
1054 #undef pr_fmt
1055 #define pr_fmt(fmt)     "Speculative Store Bypass: " fmt
1056 
1057 static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE;
1058 
1059 /* The kernel command line selection */
1060 enum ssb_mitigation_cmd {
1061         SPEC_STORE_BYPASS_CMD_NONE,
1062         SPEC_STORE_BYPASS_CMD_AUTO,
1063         SPEC_STORE_BYPASS_CMD_ON,
1064         SPEC_STORE_BYPASS_CMD_PRCTL,
1065         SPEC_STORE_BYPASS_CMD_SECCOMP,
1066 };
1067 
1068 static const char * const ssb_strings[] = {
1069         [SPEC_STORE_BYPASS_NONE]        = "Vulnerable",
1070         [SPEC_STORE_BYPASS_DISABLE]     = "Mitigation: Speculative Store Bypass disabled",
1071         [SPEC_STORE_BYPASS_PRCTL]       = "Mitigation: Speculative Store Bypass disabled via prctl",
1072         [SPEC_STORE_BYPASS_SECCOMP]     = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
1073 };
1074 
1075 static const struct {
1076         const char *option;
1077         enum ssb_mitigation_cmd cmd;
1078 } ssb_mitigation_options[]  __initconst = {
1079         { "auto",       SPEC_STORE_BYPASS_CMD_AUTO },    /* Platform decides */
1080         { "on",         SPEC_STORE_BYPASS_CMD_ON },      /* Disable Speculative Store Bypass */
1081         { "off",        SPEC_STORE_BYPASS_CMD_NONE },    /* Don't touch Speculative Store Bypass */
1082         { "prctl",      SPEC_STORE_BYPASS_CMD_PRCTL },   /* Disable Speculative Store Bypass via prctl */
1083         { "seccomp",    SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
1084 };
1085 
1086 static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
1087 {
1088         enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
1089         char arg[20];
1090         int ret, i;
1091 
1092         if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable") ||
1093             cpu_mitigations_off()) {
1094                 return SPEC_STORE_BYPASS_CMD_NONE;
1095         } else {
1096                 ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
1097                                           arg, sizeof(arg));
1098                 if (ret < 0)
1099                         return SPEC_STORE_BYPASS_CMD_AUTO;
1100 
1101                 for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
1102                         if (!match_option(arg, ret, ssb_mitigation_options[i].option))
1103                                 continue;
1104 
1105                         cmd = ssb_mitigation_options[i].cmd;
1106                         break;
1107                 }
1108 
1109                 if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
1110                         pr_err("unknown option (%s). Switching to AUTO select\n", arg);
1111                         return SPEC_STORE_BYPASS_CMD_AUTO;
1112                 }
1113         }
1114 
1115         return cmd;
1116 }
1117 
1118 static enum ssb_mitigation __init __ssb_select_mitigation(void)
1119 {
1120         enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
1121         enum ssb_mitigation_cmd cmd;
1122 
1123         if (!boot_cpu_has(X86_FEATURE_SSBD))
1124                 return mode;
1125 
1126         cmd = ssb_parse_cmdline();
1127         if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
1128             (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
1129              cmd == SPEC_STORE_BYPASS_CMD_AUTO))
1130                 return mode;
1131 
1132         switch (cmd) {
1133         case SPEC_STORE_BYPASS_CMD_AUTO:
1134         case SPEC_STORE_BYPASS_CMD_SECCOMP:
1135                 /*
1136                  * Choose prctl+seccomp as the default mode if seccomp is
1137                  * enabled.
1138                  */
1139                 if (IS_ENABLED(CONFIG_SECCOMP))
1140                         mode = SPEC_STORE_BYPASS_SECCOMP;
1141                 else
1142                         mode = SPEC_STORE_BYPASS_PRCTL;
1143                 break;
1144         case SPEC_STORE_BYPASS_CMD_ON:
1145                 mode = SPEC_STORE_BYPASS_DISABLE;
1146                 break;
1147         case SPEC_STORE_BYPASS_CMD_PRCTL:
1148                 mode = SPEC_STORE_BYPASS_PRCTL;
1149                 break;
1150         case SPEC_STORE_BYPASS_CMD_NONE:
1151                 break;
1152         }
1153 
1154         /*
1155          * If SSBD is controlled by the SPEC_CTRL MSR, then set the proper
1156          * bit in the mask to allow guests to use the mitigation even in the
1157          * case where the host does not enable it.
1158          */
1159         if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
1160             static_cpu_has(X86_FEATURE_AMD_SSBD)) {
1161                 x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
1162         }
1163 
1164         /*
1165          * We have three CPU feature flags that are in play here:
1166          *  - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
1167          *  - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
1168          *  - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
1169          */
1170         if (mode == SPEC_STORE_BYPASS_DISABLE) {
1171                 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
1172                 /*
1173                  * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
1174                  * use a completely different MSR and bit dependent on family.
1175                  */
1176                 if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
1177                     !static_cpu_has(X86_FEATURE_AMD_SSBD)) {
1178                         x86_amd_ssb_disable();
1179                 } else {
1180                         x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
1181                         wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
1182                 }
1183         }
1184 
1185         return mode;
1186 }
1187 
1188 static void ssb_select_mitigation(void)
1189 {
1190         ssb_mode = __ssb_select_mitigation();
1191 
1192         if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
1193                 pr_info("%s\n", ssb_strings[ssb_mode]);
1194 }
1195 
1196 #undef pr_fmt
1197 #define pr_fmt(fmt)     "Speculation prctl: " fmt
1198 
1199 static void task_update_spec_tif(struct task_struct *tsk)
1200 {
1201         /* Force the update of the real TIF bits */
1202         set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE);
1203 
1204         /*
1205          * Immediately update the speculation control MSRs for the current
1206          * task, but for a non-current task delay setting the CPU
1207          * mitigation until it is scheduled next.
1208          *
1209          * This can only happen for SECCOMP mitigation. For PRCTL it's
1210          * always the current task.
1211          */
1212         if (tsk == current)
1213                 speculation_ctrl_update_current();
1214 }
1215 
1216 static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
1217 {
1218         if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
1219             ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
1220                 return -ENXIO;
1221 
1222         switch (ctrl) {
1223         case PR_SPEC_ENABLE:
1224                 /* If speculation is force disabled, enable is not allowed */
1225                 if (task_spec_ssb_force_disable(task))
1226                         return -EPERM;
1227                 task_clear_spec_ssb_disable(task);
1228                 task_clear_spec_ssb_noexec(task);
1229                 task_update_spec_tif(task);
1230                 break;
1231         case PR_SPEC_DISABLE:
1232                 task_set_spec_ssb_disable(task);
1233                 task_clear_spec_ssb_noexec(task);
1234                 task_update_spec_tif(task);
1235                 break;
1236         case PR_SPEC_FORCE_DISABLE:
1237                 task_set_spec_ssb_disable(task);
1238                 task_set_spec_ssb_force_disable(task);
1239                 task_clear_spec_ssb_noexec(task);
1240                 task_update_spec_tif(task);
1241                 break;
1242         case PR_SPEC_DISABLE_NOEXEC:
1243                 if (task_spec_ssb_force_disable(task))
1244                         return -EPERM;
1245                 task_set_spec_ssb_disable(task);
1246                 task_set_spec_ssb_noexec(task);
1247                 task_update_spec_tif(task);
1248                 break;
1249         default:
1250                 return -ERANGE;
1251         }
1252         return 0;
1253 }
1254 
1255 static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
1256 {
1257         switch (ctrl) {
1258         case PR_SPEC_ENABLE:
1259                 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
1260                     spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
1261                         return 0;
1262                 /*
1263                  * Indirect branch speculation is always disabled in strict
1264                  * mode. It can neither be enabled if it was force-disabled
1265                  * by a  previous prctl call.
1266 
1267                  */
1268                 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
1269                     spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
1270                     spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ||
1271                     task_spec_ib_force_disable(task))
1272                         return -EPERM;
1273                 task_clear_spec_ib_disable(task);
1274                 task_update_spec_tif(task);
1275                 break;
1276         case PR_SPEC_DISABLE:
1277         case PR_SPEC_FORCE_DISABLE:
1278                 /*
1279                  * Indirect branch speculation is always allowed when
1280                  * mitigation is force disabled.
1281                  */
1282                 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
1283                     spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
1284                         return -EPERM;
1285                 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
1286                     spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
1287                     spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
1288                         return 0;
1289                 task_set_spec_ib_disable(task);
1290                 if (ctrl == PR_SPEC_FORCE_DISABLE)
1291                         task_set_spec_ib_force_disable(task);
1292                 task_update_spec_tif(task);
1293                 break;
1294         default:
1295                 return -ERANGE;
1296         }
1297         return 0;
1298 }
1299 
1300 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
1301                              unsigned long ctrl)
1302 {
1303         switch (which) {
1304         case PR_SPEC_STORE_BYPASS:
1305                 return ssb_prctl_set(task, ctrl);
1306         case PR_SPEC_INDIRECT_BRANCH:
1307                 return ib_prctl_set(task, ctrl);
1308         default:
1309                 return -ENODEV;
1310         }
1311 }
1312 
1313 #ifdef CONFIG_SECCOMP
1314 void arch_seccomp_spec_mitigate(struct task_struct *task)
1315 {
1316         if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
1317                 ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
1318         if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
1319             spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP)
1320                 ib_prctl_set(task, PR_SPEC_FORCE_DISABLE);
1321 }
1322 #endif
1323 
1324 static int ssb_prctl_get(struct task_struct *task)
1325 {
1326         switch (ssb_mode) {
1327         case SPEC_STORE_BYPASS_DISABLE:
1328                 return PR_SPEC_DISABLE;
1329         case SPEC_STORE_BYPASS_SECCOMP:
1330         case SPEC_STORE_BYPASS_PRCTL:
1331                 if (task_spec_ssb_force_disable(task))
1332                         return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
1333                 if (task_spec_ssb_noexec(task))
1334                         return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC;
1335                 if (task_spec_ssb_disable(task))
1336                         return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
1337                 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
1338         default:
1339                 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
1340                         return PR_SPEC_ENABLE;
1341                 return PR_SPEC_NOT_AFFECTED;
1342         }
1343 }
1344 
1345 static int ib_prctl_get(struct task_struct *task)
1346 {
1347         if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
1348                 return PR_SPEC_NOT_AFFECTED;
1349 
1350         if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
1351             spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
1352                 return PR_SPEC_ENABLE;
1353         else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
1354             spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
1355             spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
1356                 return PR_SPEC_DISABLE;
1357         else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL ||
1358             spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
1359             spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
1360             spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) {
1361                 if (task_spec_ib_force_disable(task))
1362                         return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
1363                 if (task_spec_ib_disable(task))
1364                         return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
1365                 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
1366         } else
1367                 return PR_SPEC_NOT_AFFECTED;
1368 }
1369 
1370 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
1371 {
1372         switch (which) {
1373         case PR_SPEC_STORE_BYPASS:
1374                 return ssb_prctl_get(task);
1375         case PR_SPEC_INDIRECT_BRANCH:
1376                 return ib_prctl_get(task);
1377         default:
1378                 return -ENODEV;
1379         }
1380 }
1381 
1382 void x86_spec_ctrl_setup_ap(void)
1383 {
1384         if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
1385                 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
1386 
1387         if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
1388                 x86_amd_ssb_disable();
1389 }
1390 
1391 bool itlb_multihit_kvm_mitigation;
1392 EXPORT_SYMBOL_GPL(itlb_multihit_kvm_mitigation);
1393 
1394 #undef pr_fmt
1395 #define pr_fmt(fmt)     "L1TF: " fmt
1396 
1397 /* Default mitigation for L1TF-affected CPUs */
1398 enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH;
1399 #if IS_ENABLED(CONFIG_KVM_INTEL)
1400 EXPORT_SYMBOL_GPL(l1tf_mitigation);
1401 #endif
1402 enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
1403 EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
1404 
1405 /*
1406  * These CPUs all support 44bits physical address space internally in the
1407  * cache but CPUID can report a smaller number of physical address bits.
1408  *
1409  * The L1TF mitigation uses the top most address bit for the inversion of
1410  * non present PTEs. When the installed memory reaches into the top most
1411  * address bit due to memory holes, which has been observed on machines
1412  * which report 36bits physical address bits and have 32G RAM installed,
1413  * then the mitigation range check in l1tf_select_mitigation() triggers.
1414  * This is a false positive because the mitigation is still possible due to
1415  * the fact that the cache uses 44bit internally. Use the cache bits
1416  * instead of the reported physical bits and adjust them on the affected
1417  * machines to 44bit if the reported bits are less than 44.
1418  */
1419 static void override_cache_bits(struct cpuinfo_x86 *c)
1420 {
1421         if (c->x86 != 6)
1422                 return;
1423 
1424         switch (c->x86_model) {
1425         case INTEL_FAM6_NEHALEM:
1426         case INTEL_FAM6_WESTMERE:
1427         case INTEL_FAM6_SANDYBRIDGE:
1428         case INTEL_FAM6_IVYBRIDGE:
1429         case INTEL_FAM6_HASWELL:
1430         case INTEL_FAM6_HASWELL_L:
1431         case INTEL_FAM6_HASWELL_G:
1432         case INTEL_FAM6_BROADWELL:
1433         case INTEL_FAM6_BROADWELL_G:
1434         case INTEL_FAM6_SKYLAKE_L:
1435         case INTEL_FAM6_SKYLAKE:
1436         case INTEL_FAM6_KABYLAKE_L:
1437         case INTEL_FAM6_KABYLAKE:
1438                 if (c->x86_cache_bits < 44)
1439                         c->x86_cache_bits = 44;
1440                 break;
1441         }
1442 }
1443 
1444 static void __init l1tf_select_mitigation(void)
1445 {
1446         u64 half_pa;
1447 
1448         if (!boot_cpu_has_bug(X86_BUG_L1TF))
1449                 return;
1450 
1451         if (cpu_mitigations_off())
1452                 l1tf_mitigation = L1TF_MITIGATION_OFF;
1453         else if (cpu_mitigations_auto_nosmt())
1454                 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
1455 
1456         override_cache_bits(&boot_cpu_data);
1457 
1458         switch (l1tf_mitigation) {
1459         case L1TF_MITIGATION_OFF:
1460         case L1TF_MITIGATION_FLUSH_NOWARN:
1461         case L1TF_MITIGATION_FLUSH:
1462                 break;
1463         case L1TF_MITIGATION_FLUSH_NOSMT:
1464         case L1TF_MITIGATION_FULL:
1465                 cpu_smt_disable(false);
1466                 break;
1467         case L1TF_MITIGATION_FULL_FORCE:
1468                 cpu_smt_disable(true);
1469                 break;
1470         }
1471 
1472 #if CONFIG_PGTABLE_LEVELS == 2
1473         pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
1474         return;
1475 #endif
1476 
1477         half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
1478         if (l1tf_mitigation != L1TF_MITIGATION_OFF &&
1479                         e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
1480                 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
1481                 pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
1482                                 half_pa);
1483                 pr_info("However, doing so will make a part of your RAM unusable.\n");
1484                 pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide.\n");
1485                 return;
1486         }
1487 
1488         setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
1489 }
1490 
1491 static int __init l1tf_cmdline(char *str)
1492 {
1493         if (!boot_cpu_has_bug(X86_BUG_L1TF))
1494                 return 0;
1495 
1496         if (!str)
1497                 return -EINVAL;
1498 
1499         if (!strcmp(str, "off"))
1500                 l1tf_mitigation = L1TF_MITIGATION_OFF;
1501         else if (!strcmp(str, "flush,nowarn"))
1502                 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN;
1503         else if (!strcmp(str, "flush"))
1504                 l1tf_mitigation = L1TF_MITIGATION_FLUSH;
1505         else if (!strcmp(str, "flush,nosmt"))
1506                 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
1507         else if (!strcmp(str, "full"))
1508                 l1tf_mitigation = L1TF_MITIGATION_FULL;
1509         else if (!strcmp(str, "full,force"))
1510                 l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE;
1511 
1512         return 0;
1513 }
1514 early_param("l1tf", l1tf_cmdline);
1515 
1516 #undef pr_fmt
1517 #define pr_fmt(fmt) fmt
1518 
1519 #ifdef CONFIG_SYSFS
1520 
1521 #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
1522 
1523 #if IS_ENABLED(CONFIG_KVM_INTEL)
1524 static const char * const l1tf_vmx_states[] = {
1525         [VMENTER_L1D_FLUSH_AUTO]                = "auto",
1526         [VMENTER_L1D_FLUSH_NEVER]               = "vulnerable",
1527         [VMENTER_L1D_FLUSH_COND]                = "conditional cache flushes",
1528         [VMENTER_L1D_FLUSH_ALWAYS]              = "cache flushes",
1529         [VMENTER_L1D_FLUSH_EPT_DISABLED]        = "EPT disabled",
1530         [VMENTER_L1D_FLUSH_NOT_REQUIRED]        = "flush not necessary"
1531 };
1532 
1533 static ssize_t l1tf_show_state(char *buf)
1534 {
1535         if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
1536                 return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
1537 
1538         if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
1539             (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
1540              sched_smt_active())) {
1541                 return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
1542                                l1tf_vmx_states[l1tf_vmx_mitigation]);
1543         }
1544 
1545         return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
1546                        l1tf_vmx_states[l1tf_vmx_mitigation],
1547                        sched_smt_active() ? "vulnerable" : "disabled");
1548 }
1549 
1550 static ssize_t itlb_multihit_show_state(char *buf)
1551 {
1552         if (itlb_multihit_kvm_mitigation)
1553                 return sprintf(buf, "KVM: Mitigation: Split huge pages\n");
1554         else
1555                 return sprintf(buf, "KVM: Vulnerable\n");
1556 }
1557 #else
1558 static ssize_t l1tf_show_state(char *buf)
1559 {
1560         return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
1561 }
1562 
1563 static ssize_t itlb_multihit_show_state(char *buf)
1564 {
1565         return sprintf(buf, "Processor vulnerable\n");
1566 }
1567 #endif
1568 
1569 static ssize_t mds_show_state(char *buf)
1570 {
1571         if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
1572                 return sprintf(buf, "%s; SMT Host state unknown\n",
1573                                mds_strings[mds_mitigation]);
1574         }
1575 
1576         if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) {
1577                 return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
1578                                (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" :
1579                                 sched_smt_active() ? "mitigated" : "disabled"));
1580         }
1581 
1582         return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
1583                        sched_smt_active() ? "vulnerable" : "disabled");
1584 }
1585 
1586 static ssize_t tsx_async_abort_show_state(char *buf)
1587 {
1588         if ((taa_mitigation == TAA_MITIGATION_TSX_DISABLED) ||
1589             (taa_mitigation == TAA_MITIGATION_OFF))
1590                 return sprintf(buf, "%s\n", taa_strings[taa_mitigation]);
1591 
1592         if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
1593                 return sprintf(buf, "%s; SMT Host state unknown\n",
1594                                taa_strings[taa_mitigation]);
1595         }
1596 
1597         return sprintf(buf, "%s; SMT %s\n", taa_strings[taa_mitigation],
1598                        sched_smt_active() ? "vulnerable" : "disabled");
1599 }
1600 
1601 static char *stibp_state(void)
1602 {
1603         if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
1604                 return "";
1605 
1606         switch (spectre_v2_user_stibp) {
1607         case SPECTRE_V2_USER_NONE:
1608                 return ", STIBP: disabled";
1609         case SPECTRE_V2_USER_STRICT:
1610                 return ", STIBP: forced";
1611         case SPECTRE_V2_USER_STRICT_PREFERRED:
1612                 return ", STIBP: always-on";
1613         case SPECTRE_V2_USER_PRCTL:
1614         case SPECTRE_V2_USER_SECCOMP:
1615                 if (static_key_enabled(&switch_to_cond_stibp))
1616                         return ", STIBP: conditional";
1617         }
1618         return "";
1619 }
1620 
1621 static char *ibpb_state(void)
1622 {
1623         if (boot_cpu_has(X86_FEATURE_IBPB)) {
1624                 if (static_key_enabled(&switch_mm_always_ibpb))
1625                         return ", IBPB: always-on";
1626                 if (static_key_enabled(&switch_mm_cond_ibpb))
1627                         return ", IBPB: conditional";
1628                 return ", IBPB: disabled";
1629         }
1630         return "";
1631 }
1632 
1633 static ssize_t srbds_show_state(char *buf)
1634 {
1635         return sprintf(buf, "%s\n", srbds_strings[srbds_mitigation]);
1636 }
1637 
1638 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
1639                                char *buf, unsigned int bug)
1640 {
1641         if (!boot_cpu_has_bug(bug))
1642                 return sprintf(buf, "Not affected\n");
1643 
1644         switch (bug) {
1645         case X86_BUG_CPU_MELTDOWN:
1646                 if (boot_cpu_has(X86_FEATURE_PTI))
1647                         return sprintf(buf, "Mitigation: PTI\n");
1648 
1649                 if (hypervisor_is_type(X86_HYPER_XEN_PV))
1650                         return sprintf(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
1651 
1652                 break;
1653 
1654         case X86_BUG_SPECTRE_V1:
1655                 return sprintf(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]);
1656 
1657         case X86_BUG_SPECTRE_V2:
1658                 return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
1659                                ibpb_state(),
1660                                boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
1661                                stibp_state(),
1662                                boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
1663                                spectre_v2_module_string());
1664 
1665         case X86_BUG_SPEC_STORE_BYPASS:
1666                 return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
1667 
1668         case X86_BUG_L1TF:
1669                 if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
1670                         return l1tf_show_state(buf);
1671                 break;
1672 
1673         case X86_BUG_MDS:
1674                 return mds_show_state(buf);
1675 
1676         case X86_BUG_TAA:
1677                 return tsx_async_abort_show_state(buf);
1678 
1679         case X86_BUG_ITLB_MULTIHIT:
1680                 return itlb_multihit_show_state(buf);
1681 
1682         case X86_BUG_SRBDS:
1683                 return srbds_show_state(buf);
1684 
1685         default:
1686                 break;
1687         }
1688 
1689         return sprintf(buf, "Vulnerable\n");
1690 }
1691 
1692 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
1693 {
1694         return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
1695 }
1696 
1697 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
1698 {
1699         return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
1700 }
1701 
1702 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
1703 {
1704         return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
1705 }
1706 
1707 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
1708 {
1709         return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
1710 }
1711 
1712 ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
1713 {
1714         return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
1715 }
1716 
1717 ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf)
1718 {
1719         return cpu_show_common(dev, attr, buf, X86_BUG_MDS);
1720 }
1721 
1722 ssize_t cpu_show_tsx_async_abort(struct device *dev, struct device_attribute *attr, char *buf)
1723 {
1724         return cpu_show_common(dev, attr, buf, X86_BUG_TAA);
1725 }
1726 
1727 ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr, char *buf)
1728 {
1729         return cpu_show_common(dev, attr, buf, X86_BUG_ITLB_MULTIHIT);
1730 }
1731 
1732 ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf)
1733 {
1734         return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS);
1735 }
1736 #endif

/* [<][>][^][v][top][bottom][index][help] */