cet.c 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/ptrace.h>
  3. #include <asm/bugs.h>
  4. #include <asm/traps.h>
  5. enum cp_error_code {
  6. CP_EC = (1 << 15) - 1,
  7. CP_RET = 1,
  8. CP_IRET = 2,
  9. CP_ENDBR = 3,
  10. CP_RSTRORSSP = 4,
  11. CP_SETSSBSY = 5,
  12. CP_ENCL = 1 << 15,
  13. };
  14. static const char cp_err[][10] = {
  15. [0] = "unknown",
  16. [1] = "near ret",
  17. [2] = "far/iret",
  18. [3] = "endbranch",
  19. [4] = "rstorssp",
  20. [5] = "setssbsy",
  21. };
  22. static const char *cp_err_string(unsigned long error_code)
  23. {
  24. unsigned int cpec = error_code & CP_EC;
  25. if (cpec >= ARRAY_SIZE(cp_err))
  26. cpec = 0;
  27. return cp_err[cpec];
  28. }
  29. static void do_unexpected_cp(struct pt_regs *regs, unsigned long error_code)
  30. {
  31. WARN_ONCE(1, "Unexpected %s #CP, error_code: %s\n",
  32. user_mode(regs) ? "user mode" : "kernel mode",
  33. cp_err_string(error_code));
  34. }
  35. static DEFINE_RATELIMIT_STATE(cpf_rate, DEFAULT_RATELIMIT_INTERVAL,
  36. DEFAULT_RATELIMIT_BURST);
  37. static void do_user_cp_fault(struct pt_regs *regs, unsigned long error_code)
  38. {
  39. struct task_struct *tsk;
  40. unsigned long ssp;
  41. /*
  42. * An exception was just taken from userspace. Since interrupts are disabled
  43. * here, no scheduling should have messed with the registers yet and they
  44. * will be whatever is live in userspace. So read the SSP before enabling
  45. * interrupts so locking the fpregs to do it later is not required.
  46. */
  47. rdmsrl(MSR_IA32_PL3_SSP, ssp);
  48. cond_local_irq_enable(regs);
  49. tsk = current;
  50. tsk->thread.error_code = error_code;
  51. tsk->thread.trap_nr = X86_TRAP_CP;
  52. /* Ratelimit to prevent log spamming. */
  53. if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
  54. __ratelimit(&cpf_rate)) {
  55. pr_emerg("%s[%d] control protection ip:%lx sp:%lx ssp:%lx error:%lx(%s)%s",
  56. tsk->comm, task_pid_nr(tsk),
  57. regs->ip, regs->sp, ssp, error_code,
  58. cp_err_string(error_code),
  59. error_code & CP_ENCL ? " in enclave" : "");
  60. print_vma_addr(KERN_CONT " in ", regs->ip);
  61. pr_cont("\n");
  62. }
  63. force_sig_fault(SIGSEGV, SEGV_CPERR, (void __user *)0);
  64. cond_local_irq_disable(regs);
  65. }
  66. static __ro_after_init bool ibt_fatal = true;
  67. /*
  68. * By definition, all missing-ENDBRANCH #CPs are a result of WFE && !ENDBR.
  69. *
  70. * For the kernel IBT no ENDBR selftest where #CPs are deliberately triggered,
  71. * the WFE state of the interrupted context needs to be cleared to let execution
  72. * continue. Otherwise when the CPU resumes from the instruction that just
  73. * caused the previous #CP, another missing-ENDBRANCH #CP is raised and the CPU
  74. * enters a dead loop.
  75. *
  76. * This is not a problem with IDT because it doesn't preserve WFE and IRET doesn't
  77. * set WFE. But FRED provides space on the entry stack (in an expanded CS area)
  78. * to save and restore the WFE state, thus the WFE state is no longer clobbered,
  79. * so software must clear it.
  80. */
  81. static void ibt_clear_fred_wfe(struct pt_regs *regs)
  82. {
  83. /*
  84. * No need to do any FRED checks.
  85. *
  86. * For IDT event delivery, the high-order 48 bits of CS are pushed
  87. * as 0s into the stack, and later IRET ignores these bits.
  88. *
  89. * For FRED, a test to check if fred_cs.wfe is set would be dropped
  90. * by compilers.
  91. */
  92. regs->fred_cs.wfe = 0;
  93. }
  94. static void do_kernel_cp_fault(struct pt_regs *regs, unsigned long error_code)
  95. {
  96. if ((error_code & CP_EC) != CP_ENDBR) {
  97. do_unexpected_cp(regs, error_code);
  98. return;
  99. }
  100. if (unlikely(regs->ip == (unsigned long)&ibt_selftest_noendbr)) {
  101. regs->ax = 0;
  102. ibt_clear_fred_wfe(regs);
  103. return;
  104. }
  105. pr_err("Missing ENDBR: %pS\n", (void *)instruction_pointer(regs));
  106. if (!ibt_fatal) {
  107. printk(KERN_DEFAULT CUT_HERE);
  108. __warn(__FILE__, __LINE__, (void *)regs->ip, TAINT_WARN, regs, NULL);
  109. ibt_clear_fred_wfe(regs);
  110. return;
  111. }
  112. BUG();
  113. }
  114. static int __init ibt_setup(char *str)
  115. {
  116. if (!strcmp(str, "off"))
  117. setup_clear_cpu_cap(X86_FEATURE_IBT);
  118. if (!strcmp(str, "warn"))
  119. ibt_fatal = false;
  120. return 1;
  121. }
  122. __setup("ibt=", ibt_setup);
  123. DEFINE_IDTENTRY_ERRORCODE(exc_control_protection)
  124. {
  125. if (user_mode(regs)) {
  126. if (cpu_feature_enabled(X86_FEATURE_USER_SHSTK))
  127. do_user_cp_fault(regs, error_code);
  128. else
  129. do_unexpected_cp(regs, error_code);
  130. } else {
  131. if (cpu_feature_enabled(X86_FEATURE_IBT))
  132. do_kernel_cp_fault(regs, error_code);
  133. else
  134. do_unexpected_cp(regs, error_code);
  135. }
  136. }