sdei.c 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271
  1. // SPDX-License-Identifier: GPL-2.0
  2. // Copyright (C) 2017 Arm Ltd.
  3. #define pr_fmt(fmt) "sdei: " fmt
  4. #include <linux/arm_sdei.h>
  5. #include <linux/hardirq.h>
  6. #include <linux/irqflags.h>
  7. #include <linux/sched/task_stack.h>
  8. #include <linux/uaccess.h>
  9. #include <asm/alternative.h>
  10. #include <asm/kprobes.h>
  11. #include <asm/mmu.h>
  12. #include <asm/ptrace.h>
  13. #include <asm/sections.h>
  14. #include <asm/stacktrace.h>
  15. #include <asm/sysreg.h>
  16. #include <asm/vmap_stack.h>
  17. unsigned long sdei_exit_mode;
  18. /*
  19. * VMAP'd stacks checking for stack overflow on exception using sp as a scratch
  20. * register, meaning SDEI has to switch to its own stack. We need two stacks as
  21. * a critical event may interrupt a normal event that has just taken a
  22. * synchronous exception, and is using sp as scratch register. For a critical
  23. * event interrupting a normal event, we can't reliably tell if we were on the
  24. * sdei stack.
  25. * For now, we allocate stacks when the driver is probed.
  26. */
  27. DECLARE_PER_CPU(unsigned long *, sdei_stack_normal_ptr);
  28. DECLARE_PER_CPU(unsigned long *, sdei_stack_critical_ptr);
  29. #ifdef CONFIG_VMAP_STACK
  30. DEFINE_PER_CPU(unsigned long *, sdei_stack_normal_ptr);
  31. DEFINE_PER_CPU(unsigned long *, sdei_stack_critical_ptr);
  32. #endif
  33. static void _free_sdei_stack(unsigned long * __percpu *ptr, int cpu)
  34. {
  35. unsigned long *p;
  36. p = per_cpu(*ptr, cpu);
  37. if (p) {
  38. per_cpu(*ptr, cpu) = NULL;
  39. vfree(p);
  40. }
  41. }
  42. static void free_sdei_stacks(void)
  43. {
  44. int cpu;
  45. for_each_possible_cpu(cpu) {
  46. _free_sdei_stack(&sdei_stack_normal_ptr, cpu);
  47. _free_sdei_stack(&sdei_stack_critical_ptr, cpu);
  48. }
  49. }
  50. static int _init_sdei_stack(unsigned long * __percpu *ptr, int cpu)
  51. {
  52. unsigned long *p;
  53. p = arch_alloc_vmap_stack(SDEI_STACK_SIZE, cpu_to_node(cpu));
  54. if (!p)
  55. return -ENOMEM;
  56. per_cpu(*ptr, cpu) = p;
  57. return 0;
  58. }
  59. static int init_sdei_stacks(void)
  60. {
  61. int cpu;
  62. int err = 0;
  63. for_each_possible_cpu(cpu) {
  64. err = _init_sdei_stack(&sdei_stack_normal_ptr, cpu);
  65. if (err)
  66. break;
  67. err = _init_sdei_stack(&sdei_stack_critical_ptr, cpu);
  68. if (err)
  69. break;
  70. }
  71. if (err)
  72. free_sdei_stacks();
  73. return err;
  74. }
  75. static bool on_sdei_normal_stack(unsigned long sp, struct stack_info *info)
  76. {
  77. unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr);
  78. unsigned long high = low + SDEI_STACK_SIZE;
  79. if (!low)
  80. return false;
  81. if (sp < low || sp >= high)
  82. return false;
  83. if (info) {
  84. info->low = low;
  85. info->high = high;
  86. info->type = STACK_TYPE_SDEI_NORMAL;
  87. }
  88. return true;
  89. }
  90. static bool on_sdei_critical_stack(unsigned long sp, struct stack_info *info)
  91. {
  92. unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr);
  93. unsigned long high = low + SDEI_STACK_SIZE;
  94. if (!low)
  95. return false;
  96. if (sp < low || sp >= high)
  97. return false;
  98. if (info) {
  99. info->low = low;
  100. info->high = high;
  101. info->type = STACK_TYPE_SDEI_CRITICAL;
  102. }
  103. return true;
  104. }
  105. bool _on_sdei_stack(unsigned long sp, struct stack_info *info)
  106. {
  107. if (!IS_ENABLED(CONFIG_VMAP_STACK))
  108. return false;
  109. if (on_sdei_critical_stack(sp, info))
  110. return true;
  111. if (on_sdei_normal_stack(sp, info))
  112. return true;
  113. return false;
  114. }
  115. unsigned long sdei_arch_get_entry_point(int conduit)
  116. {
  117. /*
  118. * SDEI works between adjacent exception levels. If we booted at EL1 we
  119. * assume a hypervisor is marshalling events. If we booted at EL2 and
  120. * dropped to EL1 because we don't support VHE, then we can't support
  121. * SDEI.
  122. */
  123. if (is_hyp_mode_available() && !is_kernel_in_hyp_mode()) {
  124. pr_err("Not supported on this hardware/boot configuration\n");
  125. return 0;
  126. }
  127. if (IS_ENABLED(CONFIG_VMAP_STACK)) {
  128. if (init_sdei_stacks())
  129. return 0;
  130. }
  131. sdei_exit_mode = (conduit == CONDUIT_HVC) ? SDEI_EXIT_HVC : SDEI_EXIT_SMC;
  132. #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
  133. if (arm64_kernel_unmapped_at_el0()) {
  134. unsigned long offset;
  135. offset = (unsigned long)__sdei_asm_entry_trampoline -
  136. (unsigned long)__entry_tramp_text_start;
  137. return TRAMP_VALIAS + offset;
  138. } else
  139. #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
  140. return (unsigned long)__sdei_asm_handler;
  141. }
  142. /*
  143. * __sdei_handler() returns one of:
  144. * SDEI_EV_HANDLED - success, return to the interrupted context.
  145. * SDEI_EV_FAILED - failure, return this error code to firmare.
  146. * virtual-address - success, return to this address.
  147. */
  148. static __kprobes unsigned long _sdei_handler(struct pt_regs *regs,
  149. struct sdei_registered_event *arg)
  150. {
  151. u32 mode;
  152. int i, err = 0;
  153. int clobbered_registers = 4;
  154. u64 elr = read_sysreg(elr_el1);
  155. u32 kernel_mode = read_sysreg(CurrentEL) | 1; /* +SPSel */
  156. unsigned long vbar = read_sysreg(vbar_el1);
  157. if (arm64_kernel_unmapped_at_el0())
  158. clobbered_registers++;
  159. /* Retrieve the missing registers values */
  160. for (i = 0; i < clobbered_registers; i++) {
  161. /* from within the handler, this call always succeeds */
  162. sdei_api_event_context(i, &regs->regs[i]);
  163. }
  164. /*
  165. * We didn't take an exception to get here, set PAN. UAO will be cleared
  166. * by sdei_event_handler()s set_fs(USER_DS) call.
  167. */
  168. __uaccess_enable_hw_pan();
  169. err = sdei_event_handler(regs, arg);
  170. if (err)
  171. return SDEI_EV_FAILED;
  172. if (elr != read_sysreg(elr_el1)) {
  173. /*
  174. * We took a synchronous exception from the SDEI handler.
  175. * This could deadlock, and if you interrupt KVM it will
  176. * hyp-panic instead.
  177. */
  178. pr_warn("unsafe: exception during handler\n");
  179. }
  180. mode = regs->pstate & (PSR_MODE32_BIT | PSR_MODE_MASK);
  181. /*
  182. * If we interrupted the kernel with interrupts masked, we always go
  183. * back to wherever we came from.
  184. */
  185. if (mode == kernel_mode && !interrupts_enabled(regs))
  186. return SDEI_EV_HANDLED;
  187. /*
  188. * Otherwise, we pretend this was an IRQ. This lets user space tasks
  189. * receive signals before we return to them, and KVM to invoke it's
  190. * world switch to do the same.
  191. *
  192. * See DDI0487B.a Table D1-7 'Vector offsets from vector table base
  193. * address'.
  194. */
  195. if (mode == kernel_mode)
  196. return vbar + 0x280;
  197. else if (mode & PSR_MODE32_BIT)
  198. return vbar + 0x680;
  199. return vbar + 0x480;
  200. }
  201. asmlinkage __kprobes notrace unsigned long
  202. __sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg)
  203. {
  204. unsigned long ret;
  205. bool do_nmi_exit = false;
  206. /*
  207. * nmi_enter() deals with printk() re-entrance and use of RCU when
  208. * RCU believed this CPU was idle. Because critical events can
  209. * interrupt normal events, we may already be in_nmi().
  210. */
  211. if (!in_nmi()) {
  212. nmi_enter();
  213. do_nmi_exit = true;
  214. }
  215. ret = _sdei_handler(regs, arg);
  216. if (do_nmi_exit)
  217. nmi_exit();
  218. return ret;
  219. }