callthunks.c 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. #define pr_fmt(fmt) "callthunks: " fmt
  3. #include <linux/debugfs.h>
  4. #include <linux/kallsyms.h>
  5. #include <linux/memory.h>
  6. #include <linux/moduleloader.h>
  7. #include <linux/static_call.h>
  8. #include <asm/alternative.h>
  9. #include <asm/asm-offsets.h>
  10. #include <asm/cpu.h>
  11. #include <asm/ftrace.h>
  12. #include <asm/insn.h>
  13. #include <asm/kexec.h>
  14. #include <asm/nospec-branch.h>
  15. #include <asm/paravirt.h>
  16. #include <asm/sections.h>
  17. #include <asm/switch_to.h>
  18. #include <asm/sync_core.h>
  19. #include <asm/text-patching.h>
  20. #include <asm/xen/hypercall.h>
  21. static int __initdata_or_module debug_callthunks;
  22. #define MAX_PATCH_LEN (255-1)
  23. #define prdbg(fmt, args...) \
  24. do { \
  25. if (debug_callthunks) \
  26. printk(KERN_DEBUG pr_fmt(fmt), ##args); \
  27. } while(0)
  28. static int __init debug_thunks(char *str)
  29. {
  30. debug_callthunks = 1;
  31. return 1;
  32. }
  33. __setup("debug-callthunks", debug_thunks);
  34. #ifdef CONFIG_CALL_THUNKS_DEBUG
  35. DEFINE_PER_CPU(u64, __x86_call_count);
  36. DEFINE_PER_CPU(u64, __x86_ret_count);
  37. DEFINE_PER_CPU(u64, __x86_stuffs_count);
  38. DEFINE_PER_CPU(u64, __x86_ctxsw_count);
  39. EXPORT_PER_CPU_SYMBOL_GPL(__x86_ctxsw_count);
  40. EXPORT_PER_CPU_SYMBOL_GPL(__x86_call_count);
  41. #endif
  42. extern s32 __call_sites[], __call_sites_end[];
  43. struct core_text {
  44. unsigned long base;
  45. unsigned long end;
  46. const char *name;
  47. };
  48. static bool thunks_initialized __ro_after_init;
  49. static const struct core_text builtin_coretext = {
  50. .base = (unsigned long)_text,
  51. .end = (unsigned long)_etext,
  52. .name = "builtin",
  53. };
  54. asm (
  55. ".pushsection .rodata \n"
  56. ".global skl_call_thunk_template \n"
  57. "skl_call_thunk_template: \n"
  58. __stringify(INCREMENT_CALL_DEPTH)" \n"
  59. ".global skl_call_thunk_tail \n"
  60. "skl_call_thunk_tail: \n"
  61. ".popsection \n"
  62. );
  63. extern u8 skl_call_thunk_template[];
  64. extern u8 skl_call_thunk_tail[];
  65. #define SKL_TMPL_SIZE \
  66. ((unsigned int)(skl_call_thunk_tail - skl_call_thunk_template))
  67. extern void error_entry(void);
  68. extern void xen_error_entry(void);
  69. extern void paranoid_entry(void);
  70. static inline bool within_coretext(const struct core_text *ct, void *addr)
  71. {
  72. unsigned long p = (unsigned long)addr;
  73. return ct->base <= p && p < ct->end;
  74. }
  75. static inline bool within_module_coretext(void *addr)
  76. {
  77. bool ret = false;
  78. #ifdef CONFIG_MODULES
  79. struct module *mod;
  80. preempt_disable();
  81. mod = __module_address((unsigned long)addr);
  82. if (mod && within_module_core((unsigned long)addr, mod))
  83. ret = true;
  84. preempt_enable();
  85. #endif
  86. return ret;
  87. }
  88. static bool is_coretext(const struct core_text *ct, void *addr)
  89. {
  90. if (ct && within_coretext(ct, addr))
  91. return true;
  92. if (within_coretext(&builtin_coretext, addr))
  93. return true;
  94. return within_module_coretext(addr);
  95. }
  96. static bool skip_addr(void *dest)
  97. {
  98. if (dest == error_entry)
  99. return true;
  100. if (dest == paranoid_entry)
  101. return true;
  102. if (dest == xen_error_entry)
  103. return true;
  104. /* Does FILL_RSB... */
  105. if (dest == __switch_to_asm)
  106. return true;
  107. /* Accounts directly */
  108. if (dest == ret_from_fork)
  109. return true;
  110. #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_AMD_MEM_ENCRYPT)
  111. if (dest == soft_restart_cpu)
  112. return true;
  113. #endif
  114. #ifdef CONFIG_FUNCTION_TRACER
  115. if (dest == __fentry__)
  116. return true;
  117. #endif
  118. #ifdef CONFIG_KEXEC_CORE
  119. if (dest >= (void *)relocate_kernel &&
  120. dest < (void*)relocate_kernel + KEXEC_CONTROL_CODE_MAX_SIZE)
  121. return true;
  122. #endif
  123. return false;
  124. }
  125. static __init_or_module void *call_get_dest(void *addr)
  126. {
  127. struct insn insn;
  128. void *dest;
  129. int ret;
  130. ret = insn_decode_kernel(&insn, addr);
  131. if (ret)
  132. return ERR_PTR(ret);
  133. /* Patched out call? */
  134. if (insn.opcode.bytes[0] != CALL_INSN_OPCODE)
  135. return NULL;
  136. dest = addr + insn.length + insn.immediate.value;
  137. if (skip_addr(dest))
  138. return NULL;
  139. return dest;
  140. }
  141. static const u8 nops[] = {
  142. 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
  143. 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
  144. 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
  145. 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
  146. };
  147. static void *patch_dest(void *dest, bool direct)
  148. {
  149. unsigned int tsize = SKL_TMPL_SIZE;
  150. u8 insn_buff[MAX_PATCH_LEN];
  151. u8 *pad = dest - tsize;
  152. memcpy(insn_buff, skl_call_thunk_template, tsize);
  153. apply_relocation(insn_buff, pad, tsize, skl_call_thunk_template, tsize);
  154. /* Already patched? */
  155. if (!bcmp(pad, insn_buff, tsize))
  156. return pad;
  157. /* Ensure there are nops */
  158. if (bcmp(pad, nops, tsize)) {
  159. pr_warn_once("Invalid padding area for %pS\n", dest);
  160. return NULL;
  161. }
  162. if (direct)
  163. memcpy(pad, insn_buff, tsize);
  164. else
  165. text_poke_copy_locked(pad, insn_buff, tsize, true);
  166. return pad;
  167. }
  168. static __init_or_module void patch_call(void *addr, const struct core_text *ct)
  169. {
  170. void *pad, *dest;
  171. u8 bytes[8];
  172. if (!within_coretext(ct, addr))
  173. return;
  174. dest = call_get_dest(addr);
  175. if (!dest || WARN_ON_ONCE(IS_ERR(dest)))
  176. return;
  177. if (!is_coretext(ct, dest))
  178. return;
  179. pad = patch_dest(dest, within_coretext(ct, dest));
  180. if (!pad)
  181. return;
  182. prdbg("Patch call at: %pS %px to %pS %px -> %px \n", addr, addr,
  183. dest, dest, pad);
  184. __text_gen_insn(bytes, CALL_INSN_OPCODE, addr, pad, CALL_INSN_SIZE);
  185. text_poke_early(addr, bytes, CALL_INSN_SIZE);
  186. }
  187. static __init_or_module void
  188. patch_call_sites(s32 *start, s32 *end, const struct core_text *ct)
  189. {
  190. s32 *s;
  191. for (s = start; s < end; s++)
  192. patch_call((void *)s + *s, ct);
  193. }
  194. static __init_or_module void
  195. patch_alt_call_sites(struct alt_instr *start, struct alt_instr *end,
  196. const struct core_text *ct)
  197. {
  198. struct alt_instr *a;
  199. for (a = start; a < end; a++)
  200. patch_call((void *)&a->instr_offset + a->instr_offset, ct);
  201. }
  202. static __init_or_module void
  203. callthunks_setup(struct callthunk_sites *cs, const struct core_text *ct)
  204. {
  205. prdbg("Patching call sites %s\n", ct->name);
  206. patch_call_sites(cs->call_start, cs->call_end, ct);
  207. patch_alt_call_sites(cs->alt_start, cs->alt_end, ct);
  208. prdbg("Patching call sites done%s\n", ct->name);
  209. }
  210. void __init callthunks_patch_builtin_calls(void)
  211. {
  212. struct callthunk_sites cs = {
  213. .call_start = __call_sites,
  214. .call_end = __call_sites_end,
  215. .alt_start = __alt_instructions,
  216. .alt_end = __alt_instructions_end
  217. };
  218. if (!cpu_feature_enabled(X86_FEATURE_CALL_DEPTH))
  219. return;
  220. pr_info("Setting up call depth tracking\n");
  221. mutex_lock(&text_mutex);
  222. callthunks_setup(&cs, &builtin_coretext);
  223. thunks_initialized = true;
  224. mutex_unlock(&text_mutex);
  225. }
  226. void *callthunks_translate_call_dest(void *dest)
  227. {
  228. void *target;
  229. lockdep_assert_held(&text_mutex);
  230. if (!thunks_initialized || skip_addr(dest))
  231. return dest;
  232. if (!is_coretext(NULL, dest))
  233. return dest;
  234. target = patch_dest(dest, false);
  235. return target ? : dest;
  236. }
  237. #ifdef CONFIG_BPF_JIT
  238. static bool is_callthunk(void *addr)
  239. {
  240. unsigned int tmpl_size = SKL_TMPL_SIZE;
  241. u8 insn_buff[MAX_PATCH_LEN];
  242. unsigned long dest;
  243. u8 *pad;
  244. dest = roundup((unsigned long)addr, CONFIG_FUNCTION_ALIGNMENT);
  245. if (!thunks_initialized || skip_addr((void *)dest))
  246. return false;
  247. pad = (void *)(dest - tmpl_size);
  248. memcpy(insn_buff, skl_call_thunk_template, tmpl_size);
  249. apply_relocation(insn_buff, pad, tmpl_size, skl_call_thunk_template, tmpl_size);
  250. return !bcmp(pad, insn_buff, tmpl_size);
  251. }
  252. int x86_call_depth_emit_accounting(u8 **pprog, void *func, void *ip)
  253. {
  254. unsigned int tmpl_size = SKL_TMPL_SIZE;
  255. u8 insn_buff[MAX_PATCH_LEN];
  256. if (!thunks_initialized)
  257. return 0;
  258. /* Is function call target a thunk? */
  259. if (func && is_callthunk(func))
  260. return 0;
  261. memcpy(insn_buff, skl_call_thunk_template, tmpl_size);
  262. apply_relocation(insn_buff, ip, tmpl_size, skl_call_thunk_template, tmpl_size);
  263. memcpy(*pprog, insn_buff, tmpl_size);
  264. *pprog += tmpl_size;
  265. return tmpl_size;
  266. }
  267. #endif
  268. #ifdef CONFIG_MODULES
  269. void noinline callthunks_patch_module_calls(struct callthunk_sites *cs,
  270. struct module *mod)
  271. {
  272. struct core_text ct = {
  273. .base = (unsigned long)mod->mem[MOD_TEXT].base,
  274. .end = (unsigned long)mod->mem[MOD_TEXT].base + mod->mem[MOD_TEXT].size,
  275. .name = mod->name,
  276. };
  277. if (!thunks_initialized)
  278. return;
  279. mutex_lock(&text_mutex);
  280. callthunks_setup(cs, &ct);
  281. mutex_unlock(&text_mutex);
  282. }
  283. #endif /* CONFIG_MODULES */
  284. #if defined(CONFIG_CALL_THUNKS_DEBUG) && defined(CONFIG_DEBUG_FS)
  285. static int callthunks_debug_show(struct seq_file *m, void *p)
  286. {
  287. unsigned long cpu = (unsigned long)m->private;
  288. seq_printf(m, "C: %16llu R: %16llu S: %16llu X: %16llu\n,",
  289. per_cpu(__x86_call_count, cpu),
  290. per_cpu(__x86_ret_count, cpu),
  291. per_cpu(__x86_stuffs_count, cpu),
  292. per_cpu(__x86_ctxsw_count, cpu));
  293. return 0;
  294. }
  295. static int callthunks_debug_open(struct inode *inode, struct file *file)
  296. {
  297. return single_open(file, callthunks_debug_show, inode->i_private);
  298. }
  299. static const struct file_operations dfs_ops = {
  300. .open = callthunks_debug_open,
  301. .read = seq_read,
  302. .llseek = seq_lseek,
  303. .release = single_release,
  304. };
  305. static int __init callthunks_debugfs_init(void)
  306. {
  307. struct dentry *dir;
  308. unsigned long cpu;
  309. dir = debugfs_create_dir("callthunks", NULL);
  310. for_each_possible_cpu(cpu) {
  311. void *arg = (void *)cpu;
  312. char name [10];
  313. sprintf(name, "cpu%lu", cpu);
  314. debugfs_create_file(name, 0644, dir, arg, &dfs_ops);
  315. }
  316. return 0;
  317. }
  318. __initcall(callthunks_debugfs_init);
  319. #endif