bugs.c 48 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 1994 Linus Torvalds
  4. *
  5. * Cyrix stuff, June 1998 by:
  6. * - Rafael R. Reilova (moved everything from head.S),
  7. * <rreilova@ececs.uc.edu>
  8. * - Channing Corn (tests & fixes),
  9. * - Andrew D. Balsa (code cleanup).
  10. */
  11. #include <linux/init.h>
  12. #include <linux/utsname.h>
  13. #include <linux/cpu.h>
  14. #include <linux/module.h>
  15. #include <linux/nospec.h>
  16. #include <linux/prctl.h>
  17. #include <linux/sched/smt.h>
  18. #include <asm/spec-ctrl.h>
  19. #include <asm/cmdline.h>
  20. #include <asm/bugs.h>
  21. #include <asm/processor.h>
  22. #include <asm/processor-flags.h>
  23. #include <asm/fpu/internal.h>
  24. #include <asm/msr.h>
  25. #include <asm/vmx.h>
  26. #include <asm/paravirt.h>
  27. #include <asm/alternative.h>
  28. #include <asm/pgtable.h>
  29. #include <asm/set_memory.h>
  30. #include <asm/intel-family.h>
  31. #include <asm/e820/api.h>
  32. #include <asm/hypervisor.h>
  33. #include "cpu.h"
  34. static void __init spectre_v1_select_mitigation(void);
  35. static void __init spectre_v2_select_mitigation(void);
  36. static void __init ssb_select_mitigation(void);
  37. static void __init l1tf_select_mitigation(void);
  38. static void __init mds_select_mitigation(void);
  39. static void __init mds_print_mitigation(void);
  40. static void __init taa_select_mitigation(void);
  41. static void __init srbds_select_mitigation(void);
  42. /* The base value of the SPEC_CTRL MSR that always has to be preserved. */
  43. u64 x86_spec_ctrl_base;
  44. EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
  45. static DEFINE_MUTEX(spec_ctrl_mutex);
  46. /*
  47. * The vendor and possibly platform specific bits which can be modified in
  48. * x86_spec_ctrl_base.
  49. */
  50. static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
  51. /*
  52. * AMD specific MSR info for Speculative Store Bypass control.
  53. * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
  54. */
  55. u64 __ro_after_init x86_amd_ls_cfg_base;
  56. u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
  57. /* Control conditional STIBP in switch_to() */
  58. DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp);
  59. /* Control conditional IBPB in switch_mm() */
  60. DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
  61. /* Control unconditional IBPB in switch_mm() */
  62. DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
  63. /* Control MDS CPU buffer clear before returning to user space */
  64. DEFINE_STATIC_KEY_FALSE(mds_user_clear);
  65. EXPORT_SYMBOL_GPL(mds_user_clear);
  66. /* Control MDS CPU buffer clear before idling (halt, mwait) */
  67. DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
  68. EXPORT_SYMBOL_GPL(mds_idle_clear);
  69. void __init check_bugs(void)
  70. {
  71. identify_boot_cpu();
  72. /*
  73. * identify_boot_cpu() initialized SMT support information, let the
  74. * core code know.
  75. */
  76. cpu_smt_check_topology();
  77. if (!IS_ENABLED(CONFIG_SMP)) {
  78. pr_info("CPU: ");
  79. print_cpu_info(&boot_cpu_data);
  80. }
  81. /*
  82. * Read the SPEC_CTRL MSR to account for reserved bits which may
  83. * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
  84. * init code as it is not enumerated and depends on the family.
  85. */
  86. if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
  87. rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
  88. /* Allow STIBP in MSR_SPEC_CTRL if supported */
  89. if (boot_cpu_has(X86_FEATURE_STIBP))
  90. x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
  91. /* Select the proper CPU mitigations before patching alternatives: */
  92. spectre_v1_select_mitigation();
  93. spectre_v2_select_mitigation();
  94. ssb_select_mitigation();
  95. l1tf_select_mitigation();
  96. mds_select_mitigation();
  97. taa_select_mitigation();
  98. srbds_select_mitigation();
  99. /*
  100. * As MDS and TAA mitigations are inter-related, print MDS
  101. * mitigation until after TAA mitigation selection is done.
  102. */
  103. mds_print_mitigation();
  104. arch_smt_update();
  105. #ifdef CONFIG_X86_32
  106. /*
  107. * Check whether we are able to run this kernel safely on SMP.
  108. *
  109. * - i386 is no longer supported.
  110. * - In order to run on anything without a TSC, we need to be
  111. * compiled for a i486.
  112. */
  113. if (boot_cpu_data.x86 < 4)
  114. panic("Kernel requires i486+ for 'invlpg' and other features");
  115. init_utsname()->machine[1] =
  116. '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
  117. alternative_instructions();
  118. fpu__init_check_bugs();
  119. #else /* CONFIG_X86_64 */
  120. alternative_instructions();
  121. /*
  122. * Make sure the first 2MB area is not mapped by huge pages
  123. * There are typically fixed size MTRRs in there and overlapping
  124. * MTRRs into large pages causes slow downs.
  125. *
  126. * Right now we don't do that with gbpages because there seems
  127. * very little benefit for that case.
  128. */
  129. if (!direct_gbpages)
  130. set_memory_4k((unsigned long)__va(0), 1);
  131. #endif
  132. }
  133. void
  134. x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
  135. {
  136. u64 msrval, guestval, hostval = x86_spec_ctrl_base;
  137. struct thread_info *ti = current_thread_info();
  138. /* Is MSR_SPEC_CTRL implemented ? */
  139. if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
  140. /*
  141. * Restrict guest_spec_ctrl to supported values. Clear the
  142. * modifiable bits in the host base value and or the
  143. * modifiable bits from the guest value.
  144. */
  145. guestval = hostval & ~x86_spec_ctrl_mask;
  146. guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
  147. /* SSBD controlled in MSR_SPEC_CTRL */
  148. if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
  149. static_cpu_has(X86_FEATURE_AMD_SSBD))
  150. hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
  151. /* Conditional STIBP enabled? */
  152. if (static_branch_unlikely(&switch_to_cond_stibp))
  153. hostval |= stibp_tif_to_spec_ctrl(ti->flags);
  154. if (hostval != guestval) {
  155. msrval = setguest ? guestval : hostval;
  156. wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
  157. }
  158. }
  159. /*
  160. * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
  161. * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
  162. */
  163. if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
  164. !static_cpu_has(X86_FEATURE_VIRT_SSBD))
  165. return;
  166. /*
  167. * If the host has SSBD mitigation enabled, force it in the host's
  168. * virtual MSR value. If its not permanently enabled, evaluate
  169. * current's TIF_SSBD thread flag.
  170. */
  171. if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
  172. hostval = SPEC_CTRL_SSBD;
  173. else
  174. hostval = ssbd_tif_to_spec_ctrl(ti->flags);
  175. /* Sanitize the guest value */
  176. guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
  177. if (hostval != guestval) {
  178. unsigned long tif;
  179. tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
  180. ssbd_spec_ctrl_to_tif(hostval);
  181. speculation_ctrl_update(tif);
  182. }
  183. }
  184. EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
  185. static void x86_amd_ssb_disable(void)
  186. {
  187. u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
  188. if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
  189. wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
  190. else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
  191. wrmsrl(MSR_AMD64_LS_CFG, msrval);
  192. }
  193. #undef pr_fmt
  194. #define pr_fmt(fmt) "MDS: " fmt
  195. /* Default mitigation for MDS-affected CPUs */
  196. static enum mds_mitigations mds_mitigation __ro_after_init = MDS_MITIGATION_FULL;
  197. static bool mds_nosmt __ro_after_init = false;
  198. static const char * const mds_strings[] = {
  199. [MDS_MITIGATION_OFF] = "Vulnerable",
  200. [MDS_MITIGATION_FULL] = "Mitigation: Clear CPU buffers",
  201. [MDS_MITIGATION_VMWERV] = "Vulnerable: Clear CPU buffers attempted, no microcode",
  202. };
  203. static void __init mds_select_mitigation(void)
  204. {
  205. if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) {
  206. mds_mitigation = MDS_MITIGATION_OFF;
  207. return;
  208. }
  209. if (mds_mitigation == MDS_MITIGATION_FULL) {
  210. if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
  211. mds_mitigation = MDS_MITIGATION_VMWERV;
  212. static_branch_enable(&mds_user_clear);
  213. if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) &&
  214. (mds_nosmt || cpu_mitigations_auto_nosmt()))
  215. cpu_smt_disable(false);
  216. }
  217. }
  218. static void __init mds_print_mitigation(void)
  219. {
  220. if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off())
  221. return;
  222. pr_info("%s\n", mds_strings[mds_mitigation]);
  223. }
  224. static int __init mds_cmdline(char *str)
  225. {
  226. if (!boot_cpu_has_bug(X86_BUG_MDS))
  227. return 0;
  228. if (!str)
  229. return -EINVAL;
  230. if (!strcmp(str, "off"))
  231. mds_mitigation = MDS_MITIGATION_OFF;
  232. else if (!strcmp(str, "full"))
  233. mds_mitigation = MDS_MITIGATION_FULL;
  234. else if (!strcmp(str, "full,nosmt")) {
  235. mds_mitigation = MDS_MITIGATION_FULL;
  236. mds_nosmt = true;
  237. }
  238. return 0;
  239. }
  240. early_param("mds", mds_cmdline);
  241. #undef pr_fmt
  242. #define pr_fmt(fmt) "TAA: " fmt
  243. /* Default mitigation for TAA-affected CPUs */
  244. static enum taa_mitigations taa_mitigation __ro_after_init = TAA_MITIGATION_VERW;
  245. static bool taa_nosmt __ro_after_init;
  246. static const char * const taa_strings[] = {
  247. [TAA_MITIGATION_OFF] = "Vulnerable",
  248. [TAA_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode",
  249. [TAA_MITIGATION_VERW] = "Mitigation: Clear CPU buffers",
  250. [TAA_MITIGATION_TSX_DISABLED] = "Mitigation: TSX disabled",
  251. };
  252. static void __init taa_select_mitigation(void)
  253. {
  254. u64 ia32_cap;
  255. if (!boot_cpu_has_bug(X86_BUG_TAA)) {
  256. taa_mitigation = TAA_MITIGATION_OFF;
  257. return;
  258. }
  259. /* TSX previously disabled by tsx=off */
  260. if (!boot_cpu_has(X86_FEATURE_RTM)) {
  261. taa_mitigation = TAA_MITIGATION_TSX_DISABLED;
  262. goto out;
  263. }
  264. if (cpu_mitigations_off()) {
  265. taa_mitigation = TAA_MITIGATION_OFF;
  266. return;
  267. }
  268. /*
  269. * TAA mitigation via VERW is turned off if both
  270. * tsx_async_abort=off and mds=off are specified.
  271. */
  272. if (taa_mitigation == TAA_MITIGATION_OFF &&
  273. mds_mitigation == MDS_MITIGATION_OFF)
  274. goto out;
  275. if (boot_cpu_has(X86_FEATURE_MD_CLEAR))
  276. taa_mitigation = TAA_MITIGATION_VERW;
  277. else
  278. taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
  279. /*
  280. * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1.
  281. * A microcode update fixes this behavior to clear CPU buffers. It also
  282. * adds support for MSR_IA32_TSX_CTRL which is enumerated by the
  283. * ARCH_CAP_TSX_CTRL_MSR bit.
  284. *
  285. * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
  286. * update is required.
  287. */
  288. ia32_cap = x86_read_arch_cap_msr();
  289. if ( (ia32_cap & ARCH_CAP_MDS_NO) &&
  290. !(ia32_cap & ARCH_CAP_TSX_CTRL_MSR))
  291. taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
  292. /*
  293. * TSX is enabled, select alternate mitigation for TAA which is
  294. * the same as MDS. Enable MDS static branch to clear CPU buffers.
  295. *
  296. * For guests that can't determine whether the correct microcode is
  297. * present on host, enable the mitigation for UCODE_NEEDED as well.
  298. */
  299. static_branch_enable(&mds_user_clear);
  300. if (taa_nosmt || cpu_mitigations_auto_nosmt())
  301. cpu_smt_disable(false);
  302. /*
  303. * Update MDS mitigation, if necessary, as the mds_user_clear is
  304. * now enabled for TAA mitigation.
  305. */
  306. if (mds_mitigation == MDS_MITIGATION_OFF &&
  307. boot_cpu_has_bug(X86_BUG_MDS)) {
  308. mds_mitigation = MDS_MITIGATION_FULL;
  309. mds_select_mitigation();
  310. }
  311. out:
  312. pr_info("%s\n", taa_strings[taa_mitigation]);
  313. }
  314. static int __init tsx_async_abort_parse_cmdline(char *str)
  315. {
  316. if (!boot_cpu_has_bug(X86_BUG_TAA))
  317. return 0;
  318. if (!str)
  319. return -EINVAL;
  320. if (!strcmp(str, "off")) {
  321. taa_mitigation = TAA_MITIGATION_OFF;
  322. } else if (!strcmp(str, "full")) {
  323. taa_mitigation = TAA_MITIGATION_VERW;
  324. } else if (!strcmp(str, "full,nosmt")) {
  325. taa_mitigation = TAA_MITIGATION_VERW;
  326. taa_nosmt = true;
  327. }
  328. return 0;
  329. }
  330. early_param("tsx_async_abort", tsx_async_abort_parse_cmdline);
  331. #undef pr_fmt
  332. #define pr_fmt(fmt) "SRBDS: " fmt
  333. enum srbds_mitigations {
  334. SRBDS_MITIGATION_OFF,
  335. SRBDS_MITIGATION_UCODE_NEEDED,
  336. SRBDS_MITIGATION_FULL,
  337. SRBDS_MITIGATION_TSX_OFF,
  338. SRBDS_MITIGATION_HYPERVISOR,
  339. };
  340. static enum srbds_mitigations srbds_mitigation __ro_after_init = SRBDS_MITIGATION_FULL;
  341. static const char * const srbds_strings[] = {
  342. [SRBDS_MITIGATION_OFF] = "Vulnerable",
  343. [SRBDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
  344. [SRBDS_MITIGATION_FULL] = "Mitigation: Microcode",
  345. [SRBDS_MITIGATION_TSX_OFF] = "Mitigation: TSX disabled",
  346. [SRBDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status",
  347. };
  348. static bool srbds_off;
  349. void update_srbds_msr(void)
  350. {
  351. u64 mcu_ctrl;
  352. if (!boot_cpu_has_bug(X86_BUG_SRBDS))
  353. return;
  354. if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
  355. return;
  356. if (srbds_mitigation == SRBDS_MITIGATION_UCODE_NEEDED)
  357. return;
  358. rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
  359. switch (srbds_mitigation) {
  360. case SRBDS_MITIGATION_OFF:
  361. case SRBDS_MITIGATION_TSX_OFF:
  362. mcu_ctrl |= RNGDS_MITG_DIS;
  363. break;
  364. case SRBDS_MITIGATION_FULL:
  365. mcu_ctrl &= ~RNGDS_MITG_DIS;
  366. break;
  367. default:
  368. break;
  369. }
  370. wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
  371. }
  372. static void __init srbds_select_mitigation(void)
  373. {
  374. u64 ia32_cap;
  375. if (!boot_cpu_has_bug(X86_BUG_SRBDS))
  376. return;
  377. /*
  378. * Check to see if this is one of the MDS_NO systems supporting
  379. * TSX that are only exposed to SRBDS when TSX is enabled.
  380. */
  381. ia32_cap = x86_read_arch_cap_msr();
  382. if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM))
  383. srbds_mitigation = SRBDS_MITIGATION_TSX_OFF;
  384. else if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
  385. srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR;
  386. else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL))
  387. srbds_mitigation = SRBDS_MITIGATION_UCODE_NEEDED;
  388. else if (cpu_mitigations_off() || srbds_off)
  389. srbds_mitigation = SRBDS_MITIGATION_OFF;
  390. update_srbds_msr();
  391. pr_info("%s\n", srbds_strings[srbds_mitigation]);
  392. }
  393. static int __init srbds_parse_cmdline(char *str)
  394. {
  395. if (!str)
  396. return -EINVAL;
  397. if (!boot_cpu_has_bug(X86_BUG_SRBDS))
  398. return 0;
  399. srbds_off = !strcmp(str, "off");
  400. return 0;
  401. }
  402. early_param("srbds", srbds_parse_cmdline);
  403. #undef pr_fmt
  404. #define pr_fmt(fmt) "Spectre V1 : " fmt
  405. enum spectre_v1_mitigation {
  406. SPECTRE_V1_MITIGATION_NONE,
  407. SPECTRE_V1_MITIGATION_AUTO,
  408. };
  409. static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init =
  410. SPECTRE_V1_MITIGATION_AUTO;
  411. static const char * const spectre_v1_strings[] = {
  412. [SPECTRE_V1_MITIGATION_NONE] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers",
  413. [SPECTRE_V1_MITIGATION_AUTO] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization",
  414. };
  415. /*
  416. * Does SMAP provide full mitigation against speculative kernel access to
  417. * userspace?
  418. */
  419. static bool smap_works_speculatively(void)
  420. {
  421. if (!boot_cpu_has(X86_FEATURE_SMAP))
  422. return false;
  423. /*
  424. * On CPUs which are vulnerable to Meltdown, SMAP does not
  425. * prevent speculative access to user data in the L1 cache.
  426. * Consider SMAP to be non-functional as a mitigation on these
  427. * CPUs.
  428. */
  429. if (boot_cpu_has(X86_BUG_CPU_MELTDOWN))
  430. return false;
  431. return true;
  432. }
  433. static void __init spectre_v1_select_mitigation(void)
  434. {
  435. if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1) || cpu_mitigations_off()) {
  436. spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
  437. return;
  438. }
  439. if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) {
  440. /*
  441. * With Spectre v1, a user can speculatively control either
  442. * path of a conditional swapgs with a user-controlled GS
  443. * value. The mitigation is to add lfences to both code paths.
  444. *
  445. * If FSGSBASE is enabled, the user can put a kernel address in
  446. * GS, in which case SMAP provides no protection.
  447. *
  448. * [ NOTE: Don't check for X86_FEATURE_FSGSBASE until the
  449. * FSGSBASE enablement patches have been merged. ]
  450. *
  451. * If FSGSBASE is disabled, the user can only put a user space
  452. * address in GS. That makes an attack harder, but still
  453. * possible if there's no SMAP protection.
  454. */
  455. if (!smap_works_speculatively()) {
  456. /*
  457. * Mitigation can be provided from SWAPGS itself or
  458. * PTI as the CR3 write in the Meltdown mitigation
  459. * is serializing.
  460. *
  461. * If neither is there, mitigate with an LFENCE to
  462. * stop speculation through swapgs.
  463. */
  464. if (boot_cpu_has_bug(X86_BUG_SWAPGS) &&
  465. !boot_cpu_has(X86_FEATURE_PTI))
  466. setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER);
  467. /*
  468. * Enable lfences in the kernel entry (non-swapgs)
  469. * paths, to prevent user entry from speculatively
  470. * skipping swapgs.
  471. */
  472. setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL);
  473. }
  474. }
  475. pr_info("%s\n", spectre_v1_strings[spectre_v1_mitigation]);
  476. }
  477. static int __init nospectre_v1_cmdline(char *str)
  478. {
  479. spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
  480. return 0;
  481. }
  482. early_param("nospectre_v1", nospectre_v1_cmdline);
  483. #undef pr_fmt
  484. #define pr_fmt(fmt) "Spectre V2 : " fmt
  485. static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
  486. SPECTRE_V2_NONE;
  487. static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init =
  488. SPECTRE_V2_USER_NONE;
  489. static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init =
  490. SPECTRE_V2_USER_NONE;
  491. #ifdef CONFIG_RETPOLINE
  492. static bool spectre_v2_bad_module;
  493. bool retpoline_module_ok(bool has_retpoline)
  494. {
  495. if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
  496. return true;
  497. pr_err("System may be vulnerable to spectre v2\n");
  498. spectre_v2_bad_module = true;
  499. return false;
  500. }
  501. static inline const char *spectre_v2_module_string(void)
  502. {
  503. return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
  504. }
  505. #else
  506. static inline const char *spectre_v2_module_string(void) { return ""; }
  507. #endif
  508. static inline bool match_option(const char *arg, int arglen, const char *opt)
  509. {
  510. int len = strlen(opt);
  511. return len == arglen && !strncmp(arg, opt, len);
  512. }
  513. /* The kernel command line selection for spectre v2 */
  514. enum spectre_v2_mitigation_cmd {
  515. SPECTRE_V2_CMD_NONE,
  516. SPECTRE_V2_CMD_AUTO,
  517. SPECTRE_V2_CMD_FORCE,
  518. SPECTRE_V2_CMD_RETPOLINE,
  519. SPECTRE_V2_CMD_RETPOLINE_GENERIC,
  520. SPECTRE_V2_CMD_RETPOLINE_AMD,
  521. };
  522. enum spectre_v2_user_cmd {
  523. SPECTRE_V2_USER_CMD_NONE,
  524. SPECTRE_V2_USER_CMD_AUTO,
  525. SPECTRE_V2_USER_CMD_FORCE,
  526. SPECTRE_V2_USER_CMD_PRCTL,
  527. SPECTRE_V2_USER_CMD_PRCTL_IBPB,
  528. SPECTRE_V2_USER_CMD_SECCOMP,
  529. SPECTRE_V2_USER_CMD_SECCOMP_IBPB,
  530. };
  531. static const char * const spectre_v2_user_strings[] = {
  532. [SPECTRE_V2_USER_NONE] = "User space: Vulnerable",
  533. [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection",
  534. [SPECTRE_V2_USER_STRICT_PREFERRED] = "User space: Mitigation: STIBP always-on protection",
  535. [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl",
  536. [SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl",
  537. };
  538. static const struct {
  539. const char *option;
  540. enum spectre_v2_user_cmd cmd;
  541. bool secure;
  542. } v2_user_options[] __initconst = {
  543. { "auto", SPECTRE_V2_USER_CMD_AUTO, false },
  544. { "off", SPECTRE_V2_USER_CMD_NONE, false },
  545. { "on", SPECTRE_V2_USER_CMD_FORCE, true },
  546. { "prctl", SPECTRE_V2_USER_CMD_PRCTL, false },
  547. { "prctl,ibpb", SPECTRE_V2_USER_CMD_PRCTL_IBPB, false },
  548. { "seccomp", SPECTRE_V2_USER_CMD_SECCOMP, false },
  549. { "seccomp,ibpb", SPECTRE_V2_USER_CMD_SECCOMP_IBPB, false },
  550. };
  551. static void __init spec_v2_user_print_cond(const char *reason, bool secure)
  552. {
  553. if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
  554. pr_info("spectre_v2_user=%s forced on command line.\n", reason);
  555. }
  556. static enum spectre_v2_user_cmd __init
  557. spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd)
  558. {
  559. char arg[20];
  560. int ret, i;
  561. switch (v2_cmd) {
  562. case SPECTRE_V2_CMD_NONE:
  563. return SPECTRE_V2_USER_CMD_NONE;
  564. case SPECTRE_V2_CMD_FORCE:
  565. return SPECTRE_V2_USER_CMD_FORCE;
  566. default:
  567. break;
  568. }
  569. ret = cmdline_find_option(boot_command_line, "spectre_v2_user",
  570. arg, sizeof(arg));
  571. if (ret < 0)
  572. return SPECTRE_V2_USER_CMD_AUTO;
  573. for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) {
  574. if (match_option(arg, ret, v2_user_options[i].option)) {
  575. spec_v2_user_print_cond(v2_user_options[i].option,
  576. v2_user_options[i].secure);
  577. return v2_user_options[i].cmd;
  578. }
  579. }
  580. pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg);
  581. return SPECTRE_V2_USER_CMD_AUTO;
  582. }
  583. static void __init
  584. spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
  585. {
  586. enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE;
  587. bool smt_possible = IS_ENABLED(CONFIG_SMP);
  588. enum spectre_v2_user_cmd cmd;
  589. if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
  590. return;
  591. if (cpu_smt_control == CPU_SMT_FORCE_DISABLED ||
  592. cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
  593. smt_possible = false;
  594. cmd = spectre_v2_parse_user_cmdline(v2_cmd);
  595. switch (cmd) {
  596. case SPECTRE_V2_USER_CMD_NONE:
  597. goto set_mode;
  598. case SPECTRE_V2_USER_CMD_FORCE:
  599. mode = SPECTRE_V2_USER_STRICT;
  600. break;
  601. case SPECTRE_V2_USER_CMD_PRCTL:
  602. case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
  603. mode = SPECTRE_V2_USER_PRCTL;
  604. break;
  605. case SPECTRE_V2_USER_CMD_AUTO:
  606. case SPECTRE_V2_USER_CMD_SECCOMP:
  607. case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
  608. if (IS_ENABLED(CONFIG_SECCOMP))
  609. mode = SPECTRE_V2_USER_SECCOMP;
  610. else
  611. mode = SPECTRE_V2_USER_PRCTL;
  612. break;
  613. }
  614. /* Initialize Indirect Branch Prediction Barrier */
  615. if (boot_cpu_has(X86_FEATURE_IBPB)) {
  616. setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
  617. spectre_v2_user_ibpb = mode;
  618. switch (cmd) {
  619. case SPECTRE_V2_USER_CMD_FORCE:
  620. case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
  621. case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
  622. static_branch_enable(&switch_mm_always_ibpb);
  623. spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
  624. break;
  625. case SPECTRE_V2_USER_CMD_PRCTL:
  626. case SPECTRE_V2_USER_CMD_AUTO:
  627. case SPECTRE_V2_USER_CMD_SECCOMP:
  628. static_branch_enable(&switch_mm_cond_ibpb);
  629. break;
  630. default:
  631. break;
  632. }
  633. pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
  634. static_key_enabled(&switch_mm_always_ibpb) ?
  635. "always-on" : "conditional");
  636. }
  637. /*
  638. * If enhanced IBRS is enabled or SMT impossible, STIBP is not
  639. * required.
  640. */
  641. if (!smt_possible || spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
  642. return;
  643. /*
  644. * At this point, an STIBP mode other than "off" has been set.
  645. * If STIBP support is not being forced, check if STIBP always-on
  646. * is preferred.
  647. */
  648. if (mode != SPECTRE_V2_USER_STRICT &&
  649. boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
  650. mode = SPECTRE_V2_USER_STRICT_PREFERRED;
  651. /*
  652. * If STIBP is not available, clear the STIBP mode.
  653. */
  654. if (!boot_cpu_has(X86_FEATURE_STIBP))
  655. mode = SPECTRE_V2_USER_NONE;
  656. spectre_v2_user_stibp = mode;
  657. set_mode:
  658. pr_info("%s\n", spectre_v2_user_strings[mode]);
  659. }
  660. static const char * const spectre_v2_strings[] = {
  661. [SPECTRE_V2_NONE] = "Vulnerable",
  662. [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline",
  663. [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline",
  664. [SPECTRE_V2_IBRS_ENHANCED] = "Mitigation: Enhanced IBRS",
  665. };
  666. static const struct {
  667. const char *option;
  668. enum spectre_v2_mitigation_cmd cmd;
  669. bool secure;
  670. } mitigation_options[] __initconst = {
  671. { "off", SPECTRE_V2_CMD_NONE, false },
  672. { "on", SPECTRE_V2_CMD_FORCE, true },
  673. { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false },
  674. { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_AMD, false },
  675. { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
  676. { "auto", SPECTRE_V2_CMD_AUTO, false },
  677. };
  678. static void __init spec_v2_print_cond(const char *reason, bool secure)
  679. {
  680. if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
  681. pr_info("%s selected on command line.\n", reason);
  682. }
  683. static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
  684. {
  685. enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
  686. char arg[20];
  687. int ret, i;
  688. if (cmdline_find_option_bool(boot_command_line, "nospectre_v2") ||
  689. cpu_mitigations_off())
  690. return SPECTRE_V2_CMD_NONE;
  691. ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
  692. if (ret < 0)
  693. return SPECTRE_V2_CMD_AUTO;
  694. for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
  695. if (!match_option(arg, ret, mitigation_options[i].option))
  696. continue;
  697. cmd = mitigation_options[i].cmd;
  698. break;
  699. }
  700. if (i >= ARRAY_SIZE(mitigation_options)) {
  701. pr_err("unknown option (%s). Switching to AUTO select\n", arg);
  702. return SPECTRE_V2_CMD_AUTO;
  703. }
  704. if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
  705. cmd == SPECTRE_V2_CMD_RETPOLINE_AMD ||
  706. cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) &&
  707. !IS_ENABLED(CONFIG_RETPOLINE)) {
  708. pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option);
  709. return SPECTRE_V2_CMD_AUTO;
  710. }
  711. if (cmd == SPECTRE_V2_CMD_RETPOLINE_AMD &&
  712. boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
  713. pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
  714. return SPECTRE_V2_CMD_AUTO;
  715. }
  716. spec_v2_print_cond(mitigation_options[i].option,
  717. mitigation_options[i].secure);
  718. return cmd;
  719. }
  720. static void __init spectre_v2_select_mitigation(void)
  721. {
  722. enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
  723. enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
  724. /*
  725. * If the CPU is not affected and the command line mode is NONE or AUTO
  726. * then nothing to do.
  727. */
  728. if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
  729. (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
  730. return;
  731. switch (cmd) {
  732. case SPECTRE_V2_CMD_NONE:
  733. return;
  734. case SPECTRE_V2_CMD_FORCE:
  735. case SPECTRE_V2_CMD_AUTO:
  736. if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
  737. mode = SPECTRE_V2_IBRS_ENHANCED;
  738. /* Force it so VMEXIT will restore correctly */
  739. x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
  740. wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
  741. goto specv2_set_mode;
  742. }
  743. if (IS_ENABLED(CONFIG_RETPOLINE))
  744. goto retpoline_auto;
  745. break;
  746. case SPECTRE_V2_CMD_RETPOLINE_AMD:
  747. if (IS_ENABLED(CONFIG_RETPOLINE))
  748. goto retpoline_amd;
  749. break;
  750. case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
  751. if (IS_ENABLED(CONFIG_RETPOLINE))
  752. goto retpoline_generic;
  753. break;
  754. case SPECTRE_V2_CMD_RETPOLINE:
  755. if (IS_ENABLED(CONFIG_RETPOLINE))
  756. goto retpoline_auto;
  757. break;
  758. }
  759. pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!");
  760. return;
  761. retpoline_auto:
  762. if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
  763. retpoline_amd:
  764. if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
  765. pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
  766. goto retpoline_generic;
  767. }
  768. mode = SPECTRE_V2_RETPOLINE_AMD;
  769. setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
  770. setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
  771. } else {
  772. retpoline_generic:
  773. mode = SPECTRE_V2_RETPOLINE_GENERIC;
  774. setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
  775. }
  776. specv2_set_mode:
  777. spectre_v2_enabled = mode;
  778. pr_info("%s\n", spectre_v2_strings[mode]);
  779. /*
  780. * If spectre v2 protection has been enabled, unconditionally fill
  781. * RSB during a context switch; this protects against two independent
  782. * issues:
  783. *
  784. * - RSB underflow (and switch to BTB) on Skylake+
  785. * - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
  786. */
  787. setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
  788. pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
  789. /*
  790. * Retpoline means the kernel is safe because it has no indirect
  791. * branches. Enhanced IBRS protects firmware too, so, enable restricted
  792. * speculation around firmware calls only when Enhanced IBRS isn't
  793. * supported.
  794. *
  795. * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
  796. * the user might select retpoline on the kernel command line and if
  797. * the CPU supports Enhanced IBRS, kernel might un-intentionally not
  798. * enable IBRS around firmware calls.
  799. */
  800. if (boot_cpu_has(X86_FEATURE_IBRS) && mode != SPECTRE_V2_IBRS_ENHANCED) {
  801. setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
  802. pr_info("Enabling Restricted Speculation for firmware calls\n");
  803. }
  804. /* Set up IBPB and STIBP depending on the general spectre V2 command */
  805. spectre_v2_user_select_mitigation(cmd);
  806. }
  807. static void update_stibp_msr(void * __unused)
  808. {
  809. wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
  810. }
  811. /* Update x86_spec_ctrl_base in case SMT state changed. */
  812. static void update_stibp_strict(void)
  813. {
  814. u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
  815. if (sched_smt_active())
  816. mask |= SPEC_CTRL_STIBP;
  817. if (mask == x86_spec_ctrl_base)
  818. return;
  819. pr_info("Update user space SMT mitigation: STIBP %s\n",
  820. mask & SPEC_CTRL_STIBP ? "always-on" : "off");
  821. x86_spec_ctrl_base = mask;
  822. on_each_cpu(update_stibp_msr, NULL, 1);
  823. }
  824. /* Update the static key controlling the evaluation of TIF_SPEC_IB */
  825. static void update_indir_branch_cond(void)
  826. {
  827. if (sched_smt_active())
  828. static_branch_enable(&switch_to_cond_stibp);
  829. else
  830. static_branch_disable(&switch_to_cond_stibp);
  831. }
  832. #undef pr_fmt
  833. #define pr_fmt(fmt) fmt
  834. /* Update the static key controlling the MDS CPU buffer clear in idle */
  835. static void update_mds_branch_idle(void)
  836. {
  837. /*
  838. * Enable the idle clearing if SMT is active on CPUs which are
  839. * affected only by MSBDS and not any other MDS variant.
  840. *
  841. * The other variants cannot be mitigated when SMT is enabled, so
  842. * clearing the buffers on idle just to prevent the Store Buffer
  843. * repartitioning leak would be a window dressing exercise.
  844. */
  845. if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY))
  846. return;
  847. if (sched_smt_active())
  848. static_branch_enable(&mds_idle_clear);
  849. else
  850. static_branch_disable(&mds_idle_clear);
  851. }
  852. #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
  853. #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n"
  854. void arch_smt_update(void)
  855. {
  856. mutex_lock(&spec_ctrl_mutex);
  857. switch (spectre_v2_user_stibp) {
  858. case SPECTRE_V2_USER_NONE:
  859. break;
  860. case SPECTRE_V2_USER_STRICT:
  861. case SPECTRE_V2_USER_STRICT_PREFERRED:
  862. update_stibp_strict();
  863. break;
  864. case SPECTRE_V2_USER_PRCTL:
  865. case SPECTRE_V2_USER_SECCOMP:
  866. update_indir_branch_cond();
  867. break;
  868. }
  869. switch (mds_mitigation) {
  870. case MDS_MITIGATION_FULL:
  871. case MDS_MITIGATION_VMWERV:
  872. if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
  873. pr_warn_once(MDS_MSG_SMT);
  874. update_mds_branch_idle();
  875. break;
  876. case MDS_MITIGATION_OFF:
  877. break;
  878. }
  879. switch (taa_mitigation) {
  880. case TAA_MITIGATION_VERW:
  881. case TAA_MITIGATION_UCODE_NEEDED:
  882. if (sched_smt_active())
  883. pr_warn_once(TAA_MSG_SMT);
  884. break;
  885. case TAA_MITIGATION_TSX_DISABLED:
  886. case TAA_MITIGATION_OFF:
  887. break;
  888. }
  889. mutex_unlock(&spec_ctrl_mutex);
  890. }
  891. #undef pr_fmt
  892. #define pr_fmt(fmt) "Speculative Store Bypass: " fmt
  893. static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE;
  894. /* The kernel command line selection */
  895. enum ssb_mitigation_cmd {
  896. SPEC_STORE_BYPASS_CMD_NONE,
  897. SPEC_STORE_BYPASS_CMD_AUTO,
  898. SPEC_STORE_BYPASS_CMD_ON,
  899. SPEC_STORE_BYPASS_CMD_PRCTL,
  900. SPEC_STORE_BYPASS_CMD_SECCOMP,
  901. };
  902. static const char * const ssb_strings[] = {
  903. [SPEC_STORE_BYPASS_NONE] = "Vulnerable",
  904. [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled",
  905. [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl",
  906. [SPEC_STORE_BYPASS_SECCOMP] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
  907. };
  908. static const struct {
  909. const char *option;
  910. enum ssb_mitigation_cmd cmd;
  911. } ssb_mitigation_options[] __initconst = {
  912. { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
  913. { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
  914. { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
  915. { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */
  916. { "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
  917. };
  918. static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
  919. {
  920. enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
  921. char arg[20];
  922. int ret, i;
  923. if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable") ||
  924. cpu_mitigations_off()) {
  925. return SPEC_STORE_BYPASS_CMD_NONE;
  926. } else {
  927. ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
  928. arg, sizeof(arg));
  929. if (ret < 0)
  930. return SPEC_STORE_BYPASS_CMD_AUTO;
  931. for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
  932. if (!match_option(arg, ret, ssb_mitigation_options[i].option))
  933. continue;
  934. cmd = ssb_mitigation_options[i].cmd;
  935. break;
  936. }
  937. if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
  938. pr_err("unknown option (%s). Switching to AUTO select\n", arg);
  939. return SPEC_STORE_BYPASS_CMD_AUTO;
  940. }
  941. }
  942. return cmd;
  943. }
  944. static enum ssb_mitigation __init __ssb_select_mitigation(void)
  945. {
  946. enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
  947. enum ssb_mitigation_cmd cmd;
  948. if (!boot_cpu_has(X86_FEATURE_SSBD))
  949. return mode;
  950. cmd = ssb_parse_cmdline();
  951. if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
  952. (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
  953. cmd == SPEC_STORE_BYPASS_CMD_AUTO))
  954. return mode;
  955. switch (cmd) {
  956. case SPEC_STORE_BYPASS_CMD_AUTO:
  957. case SPEC_STORE_BYPASS_CMD_SECCOMP:
  958. /*
  959. * Choose prctl+seccomp as the default mode if seccomp is
  960. * enabled.
  961. */
  962. if (IS_ENABLED(CONFIG_SECCOMP))
  963. mode = SPEC_STORE_BYPASS_SECCOMP;
  964. else
  965. mode = SPEC_STORE_BYPASS_PRCTL;
  966. break;
  967. case SPEC_STORE_BYPASS_CMD_ON:
  968. mode = SPEC_STORE_BYPASS_DISABLE;
  969. break;
  970. case SPEC_STORE_BYPASS_CMD_PRCTL:
  971. mode = SPEC_STORE_BYPASS_PRCTL;
  972. break;
  973. case SPEC_STORE_BYPASS_CMD_NONE:
  974. break;
  975. }
  976. /*
  977. * If SSBD is controlled by the SPEC_CTRL MSR, then set the proper
  978. * bit in the mask to allow guests to use the mitigation even in the
  979. * case where the host does not enable it.
  980. */
  981. if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
  982. static_cpu_has(X86_FEATURE_AMD_SSBD)) {
  983. x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
  984. }
  985. /*
  986. * We have three CPU feature flags that are in play here:
  987. * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
  988. * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
  989. * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
  990. */
  991. if (mode == SPEC_STORE_BYPASS_DISABLE) {
  992. setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
  993. /*
  994. * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
  995. * use a completely different MSR and bit dependent on family.
  996. */
  997. if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
  998. !static_cpu_has(X86_FEATURE_AMD_SSBD)) {
  999. x86_amd_ssb_disable();
  1000. } else {
  1001. x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
  1002. wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
  1003. }
  1004. }
  1005. return mode;
  1006. }
  1007. static void ssb_select_mitigation(void)
  1008. {
  1009. ssb_mode = __ssb_select_mitigation();
  1010. if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
  1011. pr_info("%s\n", ssb_strings[ssb_mode]);
  1012. }
  1013. #undef pr_fmt
  1014. #define pr_fmt(fmt) "Speculation prctl: " fmt
  1015. static void task_update_spec_tif(struct task_struct *tsk)
  1016. {
  1017. /* Force the update of the real TIF bits */
  1018. set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE);
  1019. /*
  1020. * Immediately update the speculation control MSRs for the current
  1021. * task, but for a non-current task delay setting the CPU
  1022. * mitigation until it is scheduled next.
  1023. *
  1024. * This can only happen for SECCOMP mitigation. For PRCTL it's
  1025. * always the current task.
  1026. */
  1027. if (tsk == current)
  1028. speculation_ctrl_update_current();
  1029. }
  1030. static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
  1031. {
  1032. if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
  1033. ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
  1034. return -ENXIO;
  1035. switch (ctrl) {
  1036. case PR_SPEC_ENABLE:
  1037. /* If speculation is force disabled, enable is not allowed */
  1038. if (task_spec_ssb_force_disable(task))
  1039. return -EPERM;
  1040. task_clear_spec_ssb_disable(task);
  1041. task_update_spec_tif(task);
  1042. break;
  1043. case PR_SPEC_DISABLE:
  1044. task_set_spec_ssb_disable(task);
  1045. task_update_spec_tif(task);
  1046. break;
  1047. case PR_SPEC_FORCE_DISABLE:
  1048. task_set_spec_ssb_disable(task);
  1049. task_set_spec_ssb_force_disable(task);
  1050. task_update_spec_tif(task);
  1051. break;
  1052. default:
  1053. return -ERANGE;
  1054. }
  1055. return 0;
  1056. }
  1057. static bool is_spec_ib_user_controlled(void)
  1058. {
  1059. return spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL ||
  1060. spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
  1061. spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
  1062. spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP;
  1063. }
  1064. static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
  1065. {
  1066. switch (ctrl) {
  1067. case PR_SPEC_ENABLE:
  1068. if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
  1069. spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
  1070. return 0;
  1071. /*
  1072. * With strict mode for both IBPB and STIBP, the instruction
  1073. * code paths avoid checking this task flag and instead,
  1074. * unconditionally run the instruction. However, STIBP and IBPB
  1075. * are independent and either can be set to conditionally
  1076. * enabled regardless of the mode of the other.
  1077. *
  1078. * If either is set to conditional, allow the task flag to be
  1079. * updated, unless it was force-disabled by a previous prctl
  1080. * call. Currently, this is possible on an AMD CPU which has the
  1081. * feature X86_FEATURE_AMD_STIBP_ALWAYS_ON. In this case, if the
  1082. * kernel is booted with 'spectre_v2_user=seccomp', then
  1083. * spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP and
  1084. * spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED.
  1085. */
  1086. if (!is_spec_ib_user_controlled() ||
  1087. task_spec_ib_force_disable(task))
  1088. return -EPERM;
  1089. task_clear_spec_ib_disable(task);
  1090. task_update_spec_tif(task);
  1091. break;
  1092. case PR_SPEC_DISABLE:
  1093. case PR_SPEC_FORCE_DISABLE:
  1094. /*
  1095. * Indirect branch speculation is always allowed when
  1096. * mitigation is force disabled.
  1097. */
  1098. if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
  1099. spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
  1100. return -EPERM;
  1101. if (!is_spec_ib_user_controlled())
  1102. return 0;
  1103. task_set_spec_ib_disable(task);
  1104. if (ctrl == PR_SPEC_FORCE_DISABLE)
  1105. task_set_spec_ib_force_disable(task);
  1106. task_update_spec_tif(task);
  1107. break;
  1108. default:
  1109. return -ERANGE;
  1110. }
  1111. return 0;
  1112. }
  1113. int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
  1114. unsigned long ctrl)
  1115. {
  1116. switch (which) {
  1117. case PR_SPEC_STORE_BYPASS:
  1118. return ssb_prctl_set(task, ctrl);
  1119. case PR_SPEC_INDIRECT_BRANCH:
  1120. return ib_prctl_set(task, ctrl);
  1121. default:
  1122. return -ENODEV;
  1123. }
  1124. }
  1125. #ifdef CONFIG_SECCOMP
  1126. void arch_seccomp_spec_mitigate(struct task_struct *task)
  1127. {
  1128. if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
  1129. ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
  1130. if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
  1131. spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP)
  1132. ib_prctl_set(task, PR_SPEC_FORCE_DISABLE);
  1133. }
  1134. #endif
  1135. static int ssb_prctl_get(struct task_struct *task)
  1136. {
  1137. switch (ssb_mode) {
  1138. case SPEC_STORE_BYPASS_DISABLE:
  1139. return PR_SPEC_DISABLE;
  1140. case SPEC_STORE_BYPASS_SECCOMP:
  1141. case SPEC_STORE_BYPASS_PRCTL:
  1142. if (task_spec_ssb_force_disable(task))
  1143. return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
  1144. if (task_spec_ssb_disable(task))
  1145. return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
  1146. return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
  1147. default:
  1148. if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
  1149. return PR_SPEC_ENABLE;
  1150. return PR_SPEC_NOT_AFFECTED;
  1151. }
  1152. }
  1153. static int ib_prctl_get(struct task_struct *task)
  1154. {
  1155. if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
  1156. return PR_SPEC_NOT_AFFECTED;
  1157. if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
  1158. spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
  1159. return PR_SPEC_ENABLE;
  1160. else if (is_spec_ib_user_controlled()) {
  1161. if (task_spec_ib_force_disable(task))
  1162. return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
  1163. if (task_spec_ib_disable(task))
  1164. return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
  1165. return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
  1166. } else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
  1167. spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
  1168. spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
  1169. return PR_SPEC_DISABLE;
  1170. else
  1171. return PR_SPEC_NOT_AFFECTED;
  1172. }
  1173. int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
  1174. {
  1175. switch (which) {
  1176. case PR_SPEC_STORE_BYPASS:
  1177. return ssb_prctl_get(task);
  1178. case PR_SPEC_INDIRECT_BRANCH:
  1179. return ib_prctl_get(task);
  1180. default:
  1181. return -ENODEV;
  1182. }
  1183. }
  1184. void x86_spec_ctrl_setup_ap(void)
  1185. {
  1186. if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
  1187. wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
  1188. if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
  1189. x86_amd_ssb_disable();
  1190. }
  1191. bool itlb_multihit_kvm_mitigation;
  1192. EXPORT_SYMBOL_GPL(itlb_multihit_kvm_mitigation);
  1193. #undef pr_fmt
  1194. #define pr_fmt(fmt) "L1TF: " fmt
  1195. /* Default mitigation for L1TF-affected CPUs */
  1196. enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH;
  1197. #if IS_ENABLED(CONFIG_KVM_INTEL)
  1198. EXPORT_SYMBOL_GPL(l1tf_mitigation);
  1199. #endif
  1200. enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
  1201. EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
  1202. /*
  1203. * These CPUs all support 44bits physical address space internally in the
  1204. * cache but CPUID can report a smaller number of physical address bits.
  1205. *
  1206. * The L1TF mitigation uses the top most address bit for the inversion of
  1207. * non present PTEs. When the installed memory reaches into the top most
  1208. * address bit due to memory holes, which has been observed on machines
  1209. * which report 36bits physical address bits and have 32G RAM installed,
  1210. * then the mitigation range check in l1tf_select_mitigation() triggers.
  1211. * This is a false positive because the mitigation is still possible due to
  1212. * the fact that the cache uses 44bit internally. Use the cache bits
  1213. * instead of the reported physical bits and adjust them on the affected
  1214. * machines to 44bit if the reported bits are less than 44.
  1215. */
  1216. static void override_cache_bits(struct cpuinfo_x86 *c)
  1217. {
  1218. if (c->x86 != 6)
  1219. return;
  1220. switch (c->x86_model) {
  1221. case INTEL_FAM6_NEHALEM:
  1222. case INTEL_FAM6_WESTMERE:
  1223. case INTEL_FAM6_SANDYBRIDGE:
  1224. case INTEL_FAM6_IVYBRIDGE:
  1225. case INTEL_FAM6_HASWELL_CORE:
  1226. case INTEL_FAM6_HASWELL_ULT:
  1227. case INTEL_FAM6_HASWELL_GT3E:
  1228. case INTEL_FAM6_BROADWELL_CORE:
  1229. case INTEL_FAM6_BROADWELL_GT3E:
  1230. case INTEL_FAM6_SKYLAKE_MOBILE:
  1231. case INTEL_FAM6_SKYLAKE_DESKTOP:
  1232. case INTEL_FAM6_KABYLAKE_MOBILE:
  1233. case INTEL_FAM6_KABYLAKE_DESKTOP:
  1234. if (c->x86_cache_bits < 44)
  1235. c->x86_cache_bits = 44;
  1236. break;
  1237. }
  1238. }
  1239. static void __init l1tf_select_mitigation(void)
  1240. {
  1241. u64 half_pa;
  1242. if (!boot_cpu_has_bug(X86_BUG_L1TF))
  1243. return;
  1244. if (cpu_mitigations_off())
  1245. l1tf_mitigation = L1TF_MITIGATION_OFF;
  1246. else if (cpu_mitigations_auto_nosmt())
  1247. l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
  1248. override_cache_bits(&boot_cpu_data);
  1249. switch (l1tf_mitigation) {
  1250. case L1TF_MITIGATION_OFF:
  1251. case L1TF_MITIGATION_FLUSH_NOWARN:
  1252. case L1TF_MITIGATION_FLUSH:
  1253. break;
  1254. case L1TF_MITIGATION_FLUSH_NOSMT:
  1255. case L1TF_MITIGATION_FULL:
  1256. cpu_smt_disable(false);
  1257. break;
  1258. case L1TF_MITIGATION_FULL_FORCE:
  1259. cpu_smt_disable(true);
  1260. break;
  1261. }
  1262. #if CONFIG_PGTABLE_LEVELS == 2
  1263. pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
  1264. return;
  1265. #endif
  1266. half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
  1267. if (l1tf_mitigation != L1TF_MITIGATION_OFF &&
  1268. e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
  1269. pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
  1270. pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
  1271. half_pa);
  1272. pr_info("However, doing so will make a part of your RAM unusable.\n");
  1273. pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide.\n");
  1274. return;
  1275. }
  1276. setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
  1277. }
  1278. static int __init l1tf_cmdline(char *str)
  1279. {
  1280. if (!boot_cpu_has_bug(X86_BUG_L1TF))
  1281. return 0;
  1282. if (!str)
  1283. return -EINVAL;
  1284. if (!strcmp(str, "off"))
  1285. l1tf_mitigation = L1TF_MITIGATION_OFF;
  1286. else if (!strcmp(str, "flush,nowarn"))
  1287. l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN;
  1288. else if (!strcmp(str, "flush"))
  1289. l1tf_mitigation = L1TF_MITIGATION_FLUSH;
  1290. else if (!strcmp(str, "flush,nosmt"))
  1291. l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
  1292. else if (!strcmp(str, "full"))
  1293. l1tf_mitigation = L1TF_MITIGATION_FULL;
  1294. else if (!strcmp(str, "full,force"))
  1295. l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE;
  1296. return 0;
  1297. }
  1298. early_param("l1tf", l1tf_cmdline);
  1299. #undef pr_fmt
  1300. #define pr_fmt(fmt) fmt
  1301. #ifdef CONFIG_SYSFS
  1302. #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
  1303. #if IS_ENABLED(CONFIG_KVM_INTEL)
  1304. static const char * const l1tf_vmx_states[] = {
  1305. [VMENTER_L1D_FLUSH_AUTO] = "auto",
  1306. [VMENTER_L1D_FLUSH_NEVER] = "vulnerable",
  1307. [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes",
  1308. [VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes",
  1309. [VMENTER_L1D_FLUSH_EPT_DISABLED] = "EPT disabled",
  1310. [VMENTER_L1D_FLUSH_NOT_REQUIRED] = "flush not necessary"
  1311. };
  1312. static ssize_t l1tf_show_state(char *buf)
  1313. {
  1314. if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
  1315. return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
  1316. if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
  1317. (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
  1318. sched_smt_active())) {
  1319. return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
  1320. l1tf_vmx_states[l1tf_vmx_mitigation]);
  1321. }
  1322. return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
  1323. l1tf_vmx_states[l1tf_vmx_mitigation],
  1324. sched_smt_active() ? "vulnerable" : "disabled");
  1325. }
  1326. static ssize_t itlb_multihit_show_state(char *buf)
  1327. {
  1328. if (itlb_multihit_kvm_mitigation)
  1329. return sprintf(buf, "KVM: Mitigation: Split huge pages\n");
  1330. else
  1331. return sprintf(buf, "KVM: Vulnerable\n");
  1332. }
  1333. #else
  1334. static ssize_t l1tf_show_state(char *buf)
  1335. {
  1336. return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
  1337. }
  1338. static ssize_t itlb_multihit_show_state(char *buf)
  1339. {
  1340. return sprintf(buf, "Processor vulnerable\n");
  1341. }
  1342. #endif
  1343. static ssize_t mds_show_state(char *buf)
  1344. {
  1345. if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
  1346. return sprintf(buf, "%s; SMT Host state unknown\n",
  1347. mds_strings[mds_mitigation]);
  1348. }
  1349. if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) {
  1350. return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
  1351. (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" :
  1352. sched_smt_active() ? "mitigated" : "disabled"));
  1353. }
  1354. return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
  1355. sched_smt_active() ? "vulnerable" : "disabled");
  1356. }
  1357. static ssize_t tsx_async_abort_show_state(char *buf)
  1358. {
  1359. if ((taa_mitigation == TAA_MITIGATION_TSX_DISABLED) ||
  1360. (taa_mitigation == TAA_MITIGATION_OFF))
  1361. return sprintf(buf, "%s\n", taa_strings[taa_mitigation]);
  1362. if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
  1363. return sprintf(buf, "%s; SMT Host state unknown\n",
  1364. taa_strings[taa_mitigation]);
  1365. }
  1366. return sprintf(buf, "%s; SMT %s\n", taa_strings[taa_mitigation],
  1367. sched_smt_active() ? "vulnerable" : "disabled");
  1368. }
  1369. static char *stibp_state(void)
  1370. {
  1371. if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
  1372. return "";
  1373. switch (spectre_v2_user_stibp) {
  1374. case SPECTRE_V2_USER_NONE:
  1375. return ", STIBP: disabled";
  1376. case SPECTRE_V2_USER_STRICT:
  1377. return ", STIBP: forced";
  1378. case SPECTRE_V2_USER_STRICT_PREFERRED:
  1379. return ", STIBP: always-on";
  1380. case SPECTRE_V2_USER_PRCTL:
  1381. case SPECTRE_V2_USER_SECCOMP:
  1382. if (static_key_enabled(&switch_to_cond_stibp))
  1383. return ", STIBP: conditional";
  1384. }
  1385. return "";
  1386. }
  1387. static char *ibpb_state(void)
  1388. {
  1389. if (boot_cpu_has(X86_FEATURE_IBPB)) {
  1390. if (static_key_enabled(&switch_mm_always_ibpb))
  1391. return ", IBPB: always-on";
  1392. if (static_key_enabled(&switch_mm_cond_ibpb))
  1393. return ", IBPB: conditional";
  1394. return ", IBPB: disabled";
  1395. }
  1396. return "";
  1397. }
  1398. static ssize_t srbds_show_state(char *buf)
  1399. {
  1400. return sprintf(buf, "%s\n", srbds_strings[srbds_mitigation]);
  1401. }
  1402. static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
  1403. char *buf, unsigned int bug)
  1404. {
  1405. if (!boot_cpu_has_bug(bug))
  1406. return sprintf(buf, "Not affected\n");
  1407. switch (bug) {
  1408. case X86_BUG_CPU_MELTDOWN:
  1409. if (boot_cpu_has(X86_FEATURE_PTI))
  1410. return sprintf(buf, "Mitigation: PTI\n");
  1411. if (hypervisor_is_type(X86_HYPER_XEN_PV))
  1412. return sprintf(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
  1413. break;
  1414. case X86_BUG_SPECTRE_V1:
  1415. return sprintf(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]);
  1416. case X86_BUG_SPECTRE_V2:
  1417. return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
  1418. ibpb_state(),
  1419. boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
  1420. stibp_state(),
  1421. boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
  1422. spectre_v2_module_string());
  1423. case X86_BUG_SPEC_STORE_BYPASS:
  1424. return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
  1425. case X86_BUG_L1TF:
  1426. if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
  1427. return l1tf_show_state(buf);
  1428. break;
  1429. case X86_BUG_MDS:
  1430. return mds_show_state(buf);
  1431. case X86_BUG_TAA:
  1432. return tsx_async_abort_show_state(buf);
  1433. case X86_BUG_ITLB_MULTIHIT:
  1434. return itlb_multihit_show_state(buf);
  1435. case X86_BUG_SRBDS:
  1436. return srbds_show_state(buf);
  1437. default:
  1438. break;
  1439. }
  1440. return sprintf(buf, "Vulnerable\n");
  1441. }
  1442. ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
  1443. {
  1444. return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
  1445. }
  1446. ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
  1447. {
  1448. return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
  1449. }
  1450. ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
  1451. {
  1452. return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
  1453. }
  1454. ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
  1455. {
  1456. return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
  1457. }
  1458. ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
  1459. {
  1460. return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
  1461. }
  1462. ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf)
  1463. {
  1464. return cpu_show_common(dev, attr, buf, X86_BUG_MDS);
  1465. }
  1466. ssize_t cpu_show_tsx_async_abort(struct device *dev, struct device_attribute *attr, char *buf)
  1467. {
  1468. return cpu_show_common(dev, attr, buf, X86_BUG_TAA);
  1469. }
  1470. ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr, char *buf)
  1471. {
  1472. return cpu_show_common(dev, attr, buf, X86_BUG_ITLB_MULTIHIT);
  1473. }
  1474. ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf)
  1475. {
  1476. return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS);
  1477. }
  1478. #endif