security.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498
  1. // SPDX-License-Identifier: GPL-2.0+
  2. //
  3. // Security related flags and so on.
  4. //
  5. // Copyright 2018, Michael Ellerman, IBM Corporation.
  6. #include <linux/cpu.h>
  7. #include <linux/kernel.h>
  8. #include <linux/device.h>
  9. #include <linux/seq_buf.h>
  10. #include <asm/asm-prototypes.h>
  11. #include <asm/code-patching.h>
  12. #include <asm/debugfs.h>
  13. #include <asm/security_features.h>
  14. #include <asm/setup.h>
  15. unsigned long powerpc_security_features __read_mostly = SEC_FTR_DEFAULT;
  16. enum count_cache_flush_type {
  17. COUNT_CACHE_FLUSH_NONE = 0x1,
  18. COUNT_CACHE_FLUSH_SW = 0x2,
  19. COUNT_CACHE_FLUSH_HW = 0x4,
  20. };
  21. static enum count_cache_flush_type count_cache_flush_type = COUNT_CACHE_FLUSH_NONE;
  22. static bool link_stack_flush_enabled;
  23. bool barrier_nospec_enabled;
  24. static bool no_nospec;
  25. static bool btb_flush_enabled;
  26. #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3S_64)
  27. static bool no_spectrev2;
  28. #endif
  29. static void enable_barrier_nospec(bool enable)
  30. {
  31. barrier_nospec_enabled = enable;
  32. do_barrier_nospec_fixups(enable);
  33. }
  34. void setup_barrier_nospec(void)
  35. {
  36. bool enable;
  37. /*
  38. * It would make sense to check SEC_FTR_SPEC_BAR_ORI31 below as well.
  39. * But there's a good reason not to. The two flags we check below are
  40. * both are enabled by default in the kernel, so if the hcall is not
  41. * functional they will be enabled.
  42. * On a system where the host firmware has been updated (so the ori
  43. * functions as a barrier), but on which the hypervisor (KVM/Qemu) has
  44. * not been updated, we would like to enable the barrier. Dropping the
  45. * check for SEC_FTR_SPEC_BAR_ORI31 achieves that. The only downside is
  46. * we potentially enable the barrier on systems where the host firmware
  47. * is not updated, but that's harmless as it's a no-op.
  48. */
  49. enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
  50. security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR);
  51. if (!no_nospec && !cpu_mitigations_off())
  52. enable_barrier_nospec(enable);
  53. }
  54. static int __init handle_nospectre_v1(char *p)
  55. {
  56. no_nospec = true;
  57. return 0;
  58. }
  59. early_param("nospectre_v1", handle_nospectre_v1);
  60. #ifdef CONFIG_DEBUG_FS
  61. static int barrier_nospec_set(void *data, u64 val)
  62. {
  63. switch (val) {
  64. case 0:
  65. case 1:
  66. break;
  67. default:
  68. return -EINVAL;
  69. }
  70. if (!!val == !!barrier_nospec_enabled)
  71. return 0;
  72. enable_barrier_nospec(!!val);
  73. return 0;
  74. }
  75. static int barrier_nospec_get(void *data, u64 *val)
  76. {
  77. *val = barrier_nospec_enabled ? 1 : 0;
  78. return 0;
  79. }
  80. DEFINE_SIMPLE_ATTRIBUTE(fops_barrier_nospec,
  81. barrier_nospec_get, barrier_nospec_set, "%llu\n");
  82. static __init int barrier_nospec_debugfs_init(void)
  83. {
  84. debugfs_create_file("barrier_nospec", 0600, powerpc_debugfs_root, NULL,
  85. &fops_barrier_nospec);
  86. return 0;
  87. }
  88. device_initcall(barrier_nospec_debugfs_init);
  89. #endif /* CONFIG_DEBUG_FS */
  90. #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3S_64)
  91. static int __init handle_nospectre_v2(char *p)
  92. {
  93. no_spectrev2 = true;
  94. return 0;
  95. }
  96. early_param("nospectre_v2", handle_nospectre_v2);
  97. #endif /* CONFIG_PPC_FSL_BOOK3E || CONFIG_PPC_BOOK3S_64 */
  98. #ifdef CONFIG_PPC_FSL_BOOK3E
  99. void setup_spectre_v2(void)
  100. {
  101. if (no_spectrev2 || cpu_mitigations_off())
  102. do_btb_flush_fixups();
  103. else
  104. btb_flush_enabled = true;
  105. }
  106. #endif /* CONFIG_PPC_FSL_BOOK3E */
  107. #ifdef CONFIG_PPC_BOOK3S_64
  108. ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
  109. {
  110. bool thread_priv;
  111. thread_priv = security_ftr_enabled(SEC_FTR_L1D_THREAD_PRIV);
  112. if (rfi_flush) {
  113. struct seq_buf s;
  114. seq_buf_init(&s, buf, PAGE_SIZE - 1);
  115. seq_buf_printf(&s, "Mitigation: RFI Flush");
  116. if (thread_priv)
  117. seq_buf_printf(&s, ", L1D private per thread");
  118. seq_buf_printf(&s, "\n");
  119. return s.len;
  120. }
  121. if (thread_priv)
  122. return sprintf(buf, "Vulnerable: L1D private per thread\n");
  123. if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) &&
  124. !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR))
  125. return sprintf(buf, "Not affected\n");
  126. return sprintf(buf, "Vulnerable\n");
  127. }
  128. ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
  129. {
  130. return cpu_show_meltdown(dev, attr, buf);
  131. }
  132. #endif
  133. ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
  134. {
  135. struct seq_buf s;
  136. seq_buf_init(&s, buf, PAGE_SIZE - 1);
  137. if (security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR)) {
  138. if (barrier_nospec_enabled)
  139. seq_buf_printf(&s, "Mitigation: __user pointer sanitization");
  140. else
  141. seq_buf_printf(&s, "Vulnerable");
  142. if (security_ftr_enabled(SEC_FTR_SPEC_BAR_ORI31))
  143. seq_buf_printf(&s, ", ori31 speculation barrier enabled");
  144. seq_buf_printf(&s, "\n");
  145. } else
  146. seq_buf_printf(&s, "Not affected\n");
  147. return s.len;
  148. }
  149. ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
  150. {
  151. struct seq_buf s;
  152. bool bcs, ccd;
  153. seq_buf_init(&s, buf, PAGE_SIZE - 1);
  154. bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED);
  155. ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED);
  156. if (bcs || ccd) {
  157. seq_buf_printf(&s, "Mitigation: ");
  158. if (bcs)
  159. seq_buf_printf(&s, "Indirect branch serialisation (kernel only)");
  160. if (bcs && ccd)
  161. seq_buf_printf(&s, ", ");
  162. if (ccd)
  163. seq_buf_printf(&s, "Indirect branch cache disabled");
  164. if (link_stack_flush_enabled)
  165. seq_buf_printf(&s, ", Software link stack flush");
  166. } else if (count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) {
  167. seq_buf_printf(&s, "Mitigation: Software count cache flush");
  168. if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW)
  169. seq_buf_printf(&s, " (hardware accelerated)");
  170. if (link_stack_flush_enabled)
  171. seq_buf_printf(&s, ", Software link stack flush");
  172. } else if (btb_flush_enabled) {
  173. seq_buf_printf(&s, "Mitigation: Branch predictor state flush");
  174. } else {
  175. seq_buf_printf(&s, "Vulnerable");
  176. }
  177. seq_buf_printf(&s, "\n");
  178. return s.len;
  179. }
  180. #ifdef CONFIG_PPC_BOOK3S_64
  181. /*
  182. * Store-forwarding barrier support.
  183. */
  184. static enum stf_barrier_type stf_enabled_flush_types;
  185. static bool no_stf_barrier;
  186. bool stf_barrier;
  187. static int __init handle_no_stf_barrier(char *p)
  188. {
  189. pr_info("stf-barrier: disabled on command line.");
  190. no_stf_barrier = true;
  191. return 0;
  192. }
  193. early_param("no_stf_barrier", handle_no_stf_barrier);
  194. /* This is the generic flag used by other architectures */
  195. static int __init handle_ssbd(char *p)
  196. {
  197. if (!p || strncmp(p, "auto", 5) == 0 || strncmp(p, "on", 2) == 0 ) {
  198. /* Until firmware tells us, we have the barrier with auto */
  199. return 0;
  200. } else if (strncmp(p, "off", 3) == 0) {
  201. handle_no_stf_barrier(NULL);
  202. return 0;
  203. } else
  204. return 1;
  205. return 0;
  206. }
  207. early_param("spec_store_bypass_disable", handle_ssbd);
  208. /* This is the generic flag used by other architectures */
  209. static int __init handle_no_ssbd(char *p)
  210. {
  211. handle_no_stf_barrier(NULL);
  212. return 0;
  213. }
  214. early_param("nospec_store_bypass_disable", handle_no_ssbd);
  215. static void stf_barrier_enable(bool enable)
  216. {
  217. if (enable)
  218. do_stf_barrier_fixups(stf_enabled_flush_types);
  219. else
  220. do_stf_barrier_fixups(STF_BARRIER_NONE);
  221. stf_barrier = enable;
  222. }
  223. void setup_stf_barrier(void)
  224. {
  225. enum stf_barrier_type type;
  226. bool enable, hv;
  227. hv = cpu_has_feature(CPU_FTR_HVMODE);
  228. /* Default to fallback in case fw-features are not available */
  229. if (cpu_has_feature(CPU_FTR_ARCH_300))
  230. type = STF_BARRIER_EIEIO;
  231. else if (cpu_has_feature(CPU_FTR_ARCH_207S))
  232. type = STF_BARRIER_SYNC_ORI;
  233. else if (cpu_has_feature(CPU_FTR_ARCH_206))
  234. type = STF_BARRIER_FALLBACK;
  235. else
  236. type = STF_BARRIER_NONE;
  237. enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
  238. (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) ||
  239. (security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && hv));
  240. if (type == STF_BARRIER_FALLBACK) {
  241. pr_info("stf-barrier: fallback barrier available\n");
  242. } else if (type == STF_BARRIER_SYNC_ORI) {
  243. pr_info("stf-barrier: hwsync barrier available\n");
  244. } else if (type == STF_BARRIER_EIEIO) {
  245. pr_info("stf-barrier: eieio barrier available\n");
  246. }
  247. stf_enabled_flush_types = type;
  248. if (!no_stf_barrier && !cpu_mitigations_off())
  249. stf_barrier_enable(enable);
  250. }
  251. ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
  252. {
  253. if (stf_barrier && stf_enabled_flush_types != STF_BARRIER_NONE) {
  254. const char *type;
  255. switch (stf_enabled_flush_types) {
  256. case STF_BARRIER_EIEIO:
  257. type = "eieio";
  258. break;
  259. case STF_BARRIER_SYNC_ORI:
  260. type = "hwsync";
  261. break;
  262. case STF_BARRIER_FALLBACK:
  263. type = "fallback";
  264. break;
  265. default:
  266. type = "unknown";
  267. }
  268. return sprintf(buf, "Mitigation: Kernel entry/exit barrier (%s)\n", type);
  269. }
  270. if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) &&
  271. !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR))
  272. return sprintf(buf, "Not affected\n");
  273. return sprintf(buf, "Vulnerable\n");
  274. }
  275. #ifdef CONFIG_DEBUG_FS
  276. static int stf_barrier_set(void *data, u64 val)
  277. {
  278. bool enable;
  279. if (val == 1)
  280. enable = true;
  281. else if (val == 0)
  282. enable = false;
  283. else
  284. return -EINVAL;
  285. /* Only do anything if we're changing state */
  286. if (enable != stf_barrier)
  287. stf_barrier_enable(enable);
  288. return 0;
  289. }
  290. static int stf_barrier_get(void *data, u64 *val)
  291. {
  292. *val = stf_barrier ? 1 : 0;
  293. return 0;
  294. }
  295. DEFINE_SIMPLE_ATTRIBUTE(fops_stf_barrier, stf_barrier_get, stf_barrier_set, "%llu\n");
  296. static __init int stf_barrier_debugfs_init(void)
  297. {
  298. debugfs_create_file("stf_barrier", 0600, powerpc_debugfs_root, NULL, &fops_stf_barrier);
  299. return 0;
  300. }
  301. device_initcall(stf_barrier_debugfs_init);
  302. #endif /* CONFIG_DEBUG_FS */
  303. static void no_count_cache_flush(void)
  304. {
  305. count_cache_flush_type = COUNT_CACHE_FLUSH_NONE;
  306. pr_info("count-cache-flush: software flush disabled.\n");
  307. }
  308. static void toggle_count_cache_flush(bool enable)
  309. {
  310. if (!security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE) &&
  311. !security_ftr_enabled(SEC_FTR_FLUSH_LINK_STACK))
  312. enable = false;
  313. if (!enable) {
  314. patch_instruction_site(&patch__call_flush_count_cache, PPC_INST_NOP);
  315. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  316. patch_instruction_site(&patch__call_kvm_flush_link_stack, PPC_INST_NOP);
  317. #endif
  318. pr_info("link-stack-flush: software flush disabled.\n");
  319. link_stack_flush_enabled = false;
  320. no_count_cache_flush();
  321. return;
  322. }
  323. // This enables the branch from _switch to flush_count_cache
  324. patch_branch_site(&patch__call_flush_count_cache,
  325. (u64)&flush_count_cache, BRANCH_SET_LINK);
  326. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  327. // This enables the branch from guest_exit_cont to kvm_flush_link_stack
  328. patch_branch_site(&patch__call_kvm_flush_link_stack,
  329. (u64)&kvm_flush_link_stack, BRANCH_SET_LINK);
  330. #endif
  331. pr_info("link-stack-flush: software flush enabled.\n");
  332. link_stack_flush_enabled = true;
  333. // If we just need to flush the link stack, patch an early return
  334. if (!security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) {
  335. patch_instruction_site(&patch__flush_link_stack_return, PPC_INST_BLR);
  336. no_count_cache_flush();
  337. return;
  338. }
  339. if (!security_ftr_enabled(SEC_FTR_BCCTR_FLUSH_ASSIST)) {
  340. count_cache_flush_type = COUNT_CACHE_FLUSH_SW;
  341. pr_info("count-cache-flush: full software flush sequence enabled.\n");
  342. return;
  343. }
  344. patch_instruction_site(&patch__flush_count_cache_return, PPC_INST_BLR);
  345. count_cache_flush_type = COUNT_CACHE_FLUSH_HW;
  346. pr_info("count-cache-flush: hardware assisted flush sequence enabled\n");
  347. }
  348. void setup_count_cache_flush(void)
  349. {
  350. bool enable = true;
  351. if (no_spectrev2 || cpu_mitigations_off()) {
  352. if (security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED) ||
  353. security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED))
  354. pr_warn("Spectre v2 mitigations not fully under software control, can't disable\n");
  355. enable = false;
  356. }
  357. /*
  358. * There's no firmware feature flag/hypervisor bit to tell us we need to
  359. * flush the link stack on context switch. So we set it here if we see
  360. * either of the Spectre v2 mitigations that aim to protect userspace.
  361. */
  362. if (security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED) ||
  363. security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE))
  364. security_ftr_set(SEC_FTR_FLUSH_LINK_STACK);
  365. toggle_count_cache_flush(enable);
  366. }
  367. #ifdef CONFIG_DEBUG_FS
  368. static int count_cache_flush_set(void *data, u64 val)
  369. {
  370. bool enable;
  371. if (val == 1)
  372. enable = true;
  373. else if (val == 0)
  374. enable = false;
  375. else
  376. return -EINVAL;
  377. toggle_count_cache_flush(enable);
  378. return 0;
  379. }
  380. static int count_cache_flush_get(void *data, u64 *val)
  381. {
  382. if (count_cache_flush_type == COUNT_CACHE_FLUSH_NONE)
  383. *val = 0;
  384. else
  385. *val = 1;
  386. return 0;
  387. }
  388. DEFINE_SIMPLE_ATTRIBUTE(fops_count_cache_flush, count_cache_flush_get,
  389. count_cache_flush_set, "%llu\n");
  390. static __init int count_cache_flush_debugfs_init(void)
  391. {
  392. debugfs_create_file("count_cache_flush", 0600, powerpc_debugfs_root,
  393. NULL, &fops_count_cache_flush);
  394. return 0;
  395. }
  396. device_initcall(count_cache_flush_debugfs_init);
  397. #endif /* CONFIG_DEBUG_FS */
  398. #endif /* CONFIG_PPC_BOOK3S_64 */